1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* Copyright (c) 2015 - 2021 Intel Corporation */ 3 #include "main.h" 4 5 /** 6 * irdma_query_device - get device attributes 7 * @ibdev: device pointer from stack 8 * @props: returning device attributes 9 * @udata: user data 10 */ 11 static int irdma_query_device(struct ib_device *ibdev, 12 struct ib_device_attr *props, 13 struct ib_udata *udata) 14 { 15 struct irdma_device *iwdev = to_iwdev(ibdev); 16 struct irdma_pci_f *rf = iwdev->rf; 17 struct pci_dev *pcidev = iwdev->rf->pcidev; 18 struct irdma_hw_attrs *hw_attrs = &rf->sc_dev.hw_attrs; 19 20 if (udata->inlen || udata->outlen) 21 return -EINVAL; 22 23 memset(props, 0, sizeof(*props)); 24 addrconf_addr_eui48((u8 *)&props->sys_image_guid, 25 iwdev->netdev->dev_addr); 26 props->fw_ver = (u64)irdma_fw_major_ver(&rf->sc_dev) << 32 | 27 irdma_fw_minor_ver(&rf->sc_dev); 28 props->device_cap_flags = IB_DEVICE_MEM_WINDOW | 29 IB_DEVICE_MEM_MGT_EXTENSIONS; 30 if (hw_attrs->uk_attrs.hw_rev < IRDMA_GEN_3) 31 props->kernel_cap_flags = IBK_LOCAL_DMA_LKEY; 32 props->vendor_id = pcidev->vendor; 33 props->vendor_part_id = pcidev->device; 34 35 props->hw_ver = rf->pcidev->revision; 36 props->page_size_cap = hw_attrs->page_size_cap; 37 props->max_mr_size = hw_attrs->max_mr_size; 38 props->max_qp = rf->max_qp - rf->used_qps; 39 props->max_qp_wr = hw_attrs->max_qp_wr; 40 props->max_send_sge = hw_attrs->uk_attrs.max_hw_wq_frags; 41 props->max_recv_sge = hw_attrs->uk_attrs.max_hw_wq_frags; 42 props->max_cq = rf->max_cq - rf->used_cqs; 43 props->max_cqe = rf->max_cqe - 1; 44 props->max_mr = rf->max_mr - rf->used_mrs; 45 if (hw_attrs->uk_attrs.hw_rev >= IRDMA_GEN_3) 46 props->max_mw = props->max_mr; 47 props->max_pd = rf->max_pd - rf->used_pds; 48 props->max_sge_rd = hw_attrs->uk_attrs.max_hw_read_sges; 49 props->max_qp_rd_atom = hw_attrs->max_hw_ird; 50 props->max_qp_init_rd_atom = hw_attrs->max_hw_ord; 51 if (rdma_protocol_roce(ibdev, 1)) { 52 props->device_cap_flags |= IB_DEVICE_RC_RNR_NAK_GEN; 53 props->max_pkeys = IRDMA_PKEY_TBL_SZ; 54 } 55 56 props->max_ah = rf->max_ah; 57 props->max_mcast_grp = rf->max_mcg; 58 props->max_mcast_qp_attach = IRDMA_MAX_MGS_PER_CTX; 59 props->max_total_mcast_qp_attach = rf->max_qp * IRDMA_MAX_MGS_PER_CTX; 60 props->max_fast_reg_page_list_len = IRDMA_MAX_PAGES_PER_FMR; 61 props->max_srq = rf->max_srq - rf->used_srqs; 62 props->max_srq_wr = IRDMA_MAX_SRQ_WRS; 63 props->max_srq_sge = hw_attrs->uk_attrs.max_hw_wq_frags; 64 if (hw_attrs->uk_attrs.feature_flags & IRDMA_FEATURE_ATOMIC_OPS) 65 props->atomic_cap = IB_ATOMIC_HCA; 66 else 67 props->atomic_cap = IB_ATOMIC_NONE; 68 props->masked_atomic_cap = props->atomic_cap; 69 if (hw_attrs->uk_attrs.hw_rev >= IRDMA_GEN_3) { 70 #define HCA_CORE_CLOCK_KHZ 1000000UL 71 props->timestamp_mask = GENMASK(31, 0); 72 props->hca_core_clock = HCA_CORE_CLOCK_KHZ; 73 } 74 if (hw_attrs->uk_attrs.hw_rev >= IRDMA_GEN_3) 75 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B; 76 77 return 0; 78 } 79 80 /** 81 * irdma_query_port - get port attributes 82 * @ibdev: device pointer from stack 83 * @port: port number for query 84 * @props: returning device attributes 85 */ 86 static int irdma_query_port(struct ib_device *ibdev, u32 port, 87 struct ib_port_attr *props) 88 { 89 struct irdma_device *iwdev = to_iwdev(ibdev); 90 struct net_device *netdev = iwdev->netdev; 91 92 /* no need to zero out pros here. done by caller */ 93 94 props->max_mtu = IB_MTU_4096; 95 props->active_mtu = ib_mtu_int_to_enum(netdev->mtu); 96 props->lid = 1; 97 props->lmc = 0; 98 props->sm_lid = 0; 99 props->sm_sl = 0; 100 if (netif_carrier_ok(netdev) && netif_running(netdev)) { 101 props->state = IB_PORT_ACTIVE; 102 props->phys_state = IB_PORT_PHYS_STATE_LINK_UP; 103 } else { 104 props->state = IB_PORT_DOWN; 105 props->phys_state = IB_PORT_PHYS_STATE_DISABLED; 106 } 107 108 ib_get_eth_speed(ibdev, port, &props->active_speed, 109 &props->active_width); 110 111 if (rdma_protocol_roce(ibdev, 1)) { 112 props->gid_tbl_len = 32; 113 props->ip_gids = true; 114 props->pkey_tbl_len = IRDMA_PKEY_TBL_SZ; 115 } else { 116 props->gid_tbl_len = 1; 117 } 118 props->qkey_viol_cntr = 0; 119 props->port_cap_flags |= IB_PORT_CM_SUP | IB_PORT_REINIT_SUP; 120 props->max_msg_sz = iwdev->rf->sc_dev.hw_attrs.max_hw_outbound_msg_size; 121 122 return 0; 123 } 124 125 /** 126 * irdma_disassociate_ucontext - Disassociate user context 127 * @context: ib user context 128 */ 129 static void irdma_disassociate_ucontext(struct ib_ucontext *context) 130 { 131 } 132 133 static int irdma_mmap_legacy(struct irdma_ucontext *ucontext, 134 struct vm_area_struct *vma) 135 { 136 u64 pfn; 137 138 if (vma->vm_pgoff || vma->vm_end - vma->vm_start != PAGE_SIZE) 139 return -EINVAL; 140 141 vma->vm_private_data = ucontext; 142 pfn = ((uintptr_t)ucontext->iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET] + 143 pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT; 144 145 return rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, PAGE_SIZE, 146 pgprot_noncached(vma->vm_page_prot), NULL); 147 } 148 149 static void irdma_mmap_free(struct rdma_user_mmap_entry *rdma_entry) 150 { 151 struct irdma_user_mmap_entry *entry = to_irdma_mmap_entry(rdma_entry); 152 153 kfree(entry); 154 } 155 156 static struct rdma_user_mmap_entry* 157 irdma_user_mmap_entry_insert(struct irdma_ucontext *ucontext, u64 bar_offset, 158 enum irdma_mmap_flag mmap_flag, u64 *mmap_offset) 159 { 160 struct irdma_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL); 161 int ret; 162 163 if (!entry) 164 return NULL; 165 166 entry->bar_offset = bar_offset; 167 entry->mmap_flag = mmap_flag; 168 169 ret = rdma_user_mmap_entry_insert(&ucontext->ibucontext, 170 &entry->rdma_entry, PAGE_SIZE); 171 if (ret) { 172 kfree(entry); 173 return NULL; 174 } 175 *mmap_offset = rdma_user_mmap_get_offset(&entry->rdma_entry); 176 177 return &entry->rdma_entry; 178 } 179 180 /** 181 * irdma_mmap - user memory map 182 * @context: context created during alloc 183 * @vma: kernel info for user memory map 184 */ 185 static int irdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) 186 { 187 struct rdma_user_mmap_entry *rdma_entry; 188 struct irdma_user_mmap_entry *entry; 189 struct irdma_ucontext *ucontext; 190 u64 pfn; 191 int ret; 192 193 ucontext = to_ucontext(context); 194 195 /* Legacy support for libi40iw with hard-coded mmap key */ 196 if (ucontext->legacy_mode) 197 return irdma_mmap_legacy(ucontext, vma); 198 199 rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma); 200 if (!rdma_entry) { 201 ibdev_dbg(&ucontext->iwdev->ibdev, 202 "VERBS: pgoff[0x%lx] does not have valid entry\n", 203 vma->vm_pgoff); 204 return -EINVAL; 205 } 206 207 entry = to_irdma_mmap_entry(rdma_entry); 208 ibdev_dbg(&ucontext->iwdev->ibdev, 209 "VERBS: bar_offset [0x%llx] mmap_flag [%d]\n", 210 entry->bar_offset, entry->mmap_flag); 211 212 pfn = (entry->bar_offset + 213 pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT; 214 215 switch (entry->mmap_flag) { 216 case IRDMA_MMAP_IO_NC: 217 ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE, 218 pgprot_noncached(vma->vm_page_prot), 219 rdma_entry); 220 break; 221 case IRDMA_MMAP_IO_WC: 222 ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE, 223 pgprot_writecombine(vma->vm_page_prot), 224 rdma_entry); 225 break; 226 default: 227 ret = -EINVAL; 228 } 229 230 if (ret) 231 ibdev_dbg(&ucontext->iwdev->ibdev, 232 "VERBS: bar_offset [0x%llx] mmap_flag[%d] err[%d]\n", 233 entry->bar_offset, entry->mmap_flag, ret); 234 rdma_user_mmap_entry_put(rdma_entry); 235 236 return ret; 237 } 238 239 /** 240 * irdma_alloc_push_page - allocate a push page for qp 241 * @iwqp: qp pointer 242 */ 243 static void irdma_alloc_push_page(struct irdma_qp *iwqp) 244 { 245 struct irdma_cqp_request *cqp_request; 246 struct cqp_cmds_info *cqp_info; 247 struct irdma_device *iwdev = iwqp->iwdev; 248 struct irdma_sc_qp *qp = &iwqp->sc_qp; 249 int status; 250 251 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); 252 if (!cqp_request) 253 return; 254 255 cqp_info = &cqp_request->info; 256 cqp_info->cqp_cmd = IRDMA_OP_MANAGE_PUSH_PAGE; 257 cqp_info->post_sq = 1; 258 cqp_info->in.u.manage_push_page.info.push_idx = 0; 259 cqp_info->in.u.manage_push_page.info.qs_handle = 260 qp->vsi->qos[qp->user_pri].qs_handle; 261 cqp_info->in.u.manage_push_page.info.free_page = 0; 262 cqp_info->in.u.manage_push_page.info.push_page_type = 0; 263 cqp_info->in.u.manage_push_page.cqp = &iwdev->rf->cqp.sc_cqp; 264 cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request; 265 266 status = irdma_handle_cqp_op(iwdev->rf, cqp_request); 267 if (!status && cqp_request->compl_info.op_ret_val < 268 iwdev->rf->sc_dev.hw_attrs.max_hw_device_pages) { 269 qp->push_idx = cqp_request->compl_info.op_ret_val; 270 qp->push_offset = 0; 271 } 272 273 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); 274 } 275 276 /** 277 * irdma_alloc_ucontext - Allocate the user context data structure 278 * @uctx: uverbs context pointer 279 * @udata: user data 280 * 281 * This keeps track of all objects associated with a particular 282 * user-mode client. 283 */ 284 static int irdma_alloc_ucontext(struct ib_ucontext *uctx, 285 struct ib_udata *udata) 286 { 287 #define IRDMA_ALLOC_UCTX_MIN_REQ_LEN offsetofend(struct irdma_alloc_ucontext_req, rsvd8) 288 #define IRDMA_ALLOC_UCTX_MIN_RESP_LEN offsetofend(struct irdma_alloc_ucontext_resp, rsvd) 289 struct ib_device *ibdev = uctx->device; 290 struct irdma_device *iwdev = to_iwdev(ibdev); 291 struct irdma_alloc_ucontext_req req = {}; 292 struct irdma_alloc_ucontext_resp uresp = {}; 293 struct irdma_ucontext *ucontext = to_ucontext(uctx); 294 struct irdma_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs; 295 296 if (udata->inlen < IRDMA_ALLOC_UCTX_MIN_REQ_LEN || 297 udata->outlen < IRDMA_ALLOC_UCTX_MIN_RESP_LEN) 298 return -EINVAL; 299 300 if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) 301 return -EINVAL; 302 303 if (req.userspace_ver < 4 || req.userspace_ver > IRDMA_ABI_VER) 304 goto ver_error; 305 306 ucontext->iwdev = iwdev; 307 ucontext->abi_ver = req.userspace_ver; 308 309 if (!(req.comp_mask & IRDMA_SUPPORT_WQE_FORMAT_V2) && 310 uk_attrs->hw_rev >= IRDMA_GEN_3) 311 return -EOPNOTSUPP; 312 313 if (req.comp_mask & IRDMA_ALLOC_UCTX_USE_RAW_ATTR) 314 ucontext->use_raw_attrs = true; 315 316 /* GEN_1 legacy support with libi40iw */ 317 if (udata->outlen == IRDMA_ALLOC_UCTX_MIN_RESP_LEN) { 318 if (uk_attrs->hw_rev != IRDMA_GEN_1) 319 return -EOPNOTSUPP; 320 321 ucontext->legacy_mode = true; 322 uresp.max_qps = iwdev->rf->max_qp; 323 uresp.max_pds = iwdev->rf->sc_dev.hw_attrs.max_hw_pds; 324 uresp.wq_size = iwdev->rf->sc_dev.hw_attrs.max_qp_wr * 2; 325 uresp.kernel_ver = req.userspace_ver; 326 if (ib_copy_to_udata(udata, &uresp, 327 min(sizeof(uresp), udata->outlen))) 328 return -EFAULT; 329 } else { 330 u64 bar_off = (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET]; 331 332 ucontext->db_mmap_entry = 333 irdma_user_mmap_entry_insert(ucontext, bar_off, 334 IRDMA_MMAP_IO_NC, 335 &uresp.db_mmap_key); 336 if (!ucontext->db_mmap_entry) 337 return -ENOMEM; 338 339 uresp.kernel_ver = IRDMA_ABI_VER; 340 uresp.feature_flags = uk_attrs->feature_flags; 341 uresp.max_hw_wq_frags = uk_attrs->max_hw_wq_frags; 342 uresp.max_hw_read_sges = uk_attrs->max_hw_read_sges; 343 uresp.max_hw_inline = uk_attrs->max_hw_inline; 344 uresp.max_hw_rq_quanta = uk_attrs->max_hw_rq_quanta; 345 uresp.max_hw_wq_quanta = uk_attrs->max_hw_wq_quanta; 346 uresp.max_hw_sq_chunk = uk_attrs->max_hw_sq_chunk; 347 uresp.max_hw_cq_size = uk_attrs->max_hw_cq_size; 348 uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size; 349 uresp.hw_rev = uk_attrs->hw_rev; 350 uresp.comp_mask |= IRDMA_ALLOC_UCTX_USE_RAW_ATTR; 351 uresp.min_hw_wq_size = uk_attrs->min_hw_wq_size; 352 uresp.comp_mask |= IRDMA_ALLOC_UCTX_MIN_HW_WQ_SIZE; 353 uresp.max_hw_srq_quanta = uk_attrs->max_hw_srq_quanta; 354 uresp.comp_mask |= IRDMA_ALLOC_UCTX_MAX_HW_SRQ_QUANTA; 355 if (ib_copy_to_udata(udata, &uresp, 356 min(sizeof(uresp), udata->outlen))) { 357 rdma_user_mmap_entry_remove(ucontext->db_mmap_entry); 358 return -EFAULT; 359 } 360 } 361 362 INIT_LIST_HEAD(&ucontext->cq_reg_mem_list); 363 spin_lock_init(&ucontext->cq_reg_mem_list_lock); 364 INIT_LIST_HEAD(&ucontext->qp_reg_mem_list); 365 spin_lock_init(&ucontext->qp_reg_mem_list_lock); 366 INIT_LIST_HEAD(&ucontext->srq_reg_mem_list); 367 spin_lock_init(&ucontext->srq_reg_mem_list_lock); 368 369 return 0; 370 371 ver_error: 372 ibdev_err(&iwdev->ibdev, 373 "Invalid userspace driver version detected. Detected version %d, should be %d\n", 374 req.userspace_ver, IRDMA_ABI_VER); 375 return -EINVAL; 376 } 377 378 /** 379 * irdma_dealloc_ucontext - deallocate the user context data structure 380 * @context: user context created during alloc 381 */ 382 static void irdma_dealloc_ucontext(struct ib_ucontext *context) 383 { 384 struct irdma_ucontext *ucontext = to_ucontext(context); 385 386 rdma_user_mmap_entry_remove(ucontext->db_mmap_entry); 387 } 388 389 /** 390 * irdma_alloc_pd - allocate protection domain 391 * @pd: PD pointer 392 * @udata: user data 393 */ 394 static int irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata) 395 { 396 #define IRDMA_ALLOC_PD_MIN_RESP_LEN offsetofend(struct irdma_alloc_pd_resp, rsvd) 397 struct irdma_pd *iwpd = to_iwpd(pd); 398 struct irdma_device *iwdev = to_iwdev(pd->device); 399 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; 400 struct irdma_pci_f *rf = iwdev->rf; 401 struct irdma_alloc_pd_resp uresp = {}; 402 struct irdma_sc_pd *sc_pd; 403 u32 pd_id = 0; 404 int err; 405 406 if (udata && udata->outlen < IRDMA_ALLOC_PD_MIN_RESP_LEN) 407 return -EINVAL; 408 409 err = irdma_alloc_rsrc(rf, rf->allocated_pds, rf->max_pd, &pd_id, 410 &rf->next_pd); 411 if (err) 412 return err; 413 414 sc_pd = &iwpd->sc_pd; 415 if (udata) { 416 struct irdma_ucontext *ucontext = 417 rdma_udata_to_drv_context(udata, struct irdma_ucontext, 418 ibucontext); 419 irdma_sc_pd_init(dev, sc_pd, pd_id, ucontext->abi_ver); 420 uresp.pd_id = pd_id; 421 if (ib_copy_to_udata(udata, &uresp, 422 min(sizeof(uresp), udata->outlen))) { 423 err = -EFAULT; 424 goto error; 425 } 426 } else { 427 irdma_sc_pd_init(dev, sc_pd, pd_id, IRDMA_ABI_VER); 428 } 429 430 return 0; 431 error: 432 irdma_free_rsrc(rf, rf->allocated_pds, pd_id); 433 434 return err; 435 } 436 437 /** 438 * irdma_dealloc_pd - deallocate pd 439 * @ibpd: ptr of pd to be deallocated 440 * @udata: user data 441 */ 442 static int irdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) 443 { 444 struct irdma_pd *iwpd = to_iwpd(ibpd); 445 struct irdma_device *iwdev = to_iwdev(ibpd->device); 446 447 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_pds, iwpd->sc_pd.pd_id); 448 449 return 0; 450 } 451 452 /** 453 * irdma_get_pbl - Retrieve pbl from a list given a virtual 454 * address 455 * @va: user virtual address 456 * @pbl_list: pbl list to search in (QP's or CQ's) 457 */ 458 static struct irdma_pbl *irdma_get_pbl(unsigned long va, 459 struct list_head *pbl_list) 460 { 461 struct irdma_pbl *iwpbl; 462 463 list_for_each_entry (iwpbl, pbl_list, list) { 464 if (iwpbl->user_base == va) { 465 list_del(&iwpbl->list); 466 iwpbl->on_list = false; 467 return iwpbl; 468 } 469 } 470 471 return NULL; 472 } 473 474 /** 475 * irdma_clean_cqes - clean cq entries for qp 476 * @iwqp: qp ptr (user or kernel) 477 * @iwcq: cq ptr 478 */ 479 static void irdma_clean_cqes(struct irdma_qp *iwqp, struct irdma_cq *iwcq) 480 { 481 struct irdma_cq_uk *ukcq = &iwcq->sc_cq.cq_uk; 482 unsigned long flags; 483 484 spin_lock_irqsave(&iwcq->lock, flags); 485 irdma_uk_clean_cq(&iwqp->sc_qp.qp_uk, ukcq); 486 spin_unlock_irqrestore(&iwcq->lock, flags); 487 } 488 489 static void irdma_remove_push_mmap_entries(struct irdma_qp *iwqp) 490 { 491 if (iwqp->push_db_mmap_entry) { 492 rdma_user_mmap_entry_remove(iwqp->push_db_mmap_entry); 493 iwqp->push_db_mmap_entry = NULL; 494 } 495 if (iwqp->push_wqe_mmap_entry) { 496 rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry); 497 iwqp->push_wqe_mmap_entry = NULL; 498 } 499 } 500 501 static int irdma_setup_push_mmap_entries(struct irdma_ucontext *ucontext, 502 struct irdma_qp *iwqp, 503 u64 *push_wqe_mmap_key, 504 u64 *push_db_mmap_key) 505 { 506 struct irdma_device *iwdev = ucontext->iwdev; 507 u64 rsvd, bar_off; 508 509 rsvd = IRDMA_PF_BAR_RSVD; 510 bar_off = (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET]; 511 /* skip over db page */ 512 bar_off += IRDMA_HW_PAGE_SIZE; 513 /* push wqe page */ 514 bar_off += rsvd + iwqp->sc_qp.push_idx * IRDMA_HW_PAGE_SIZE; 515 iwqp->push_wqe_mmap_entry = irdma_user_mmap_entry_insert(ucontext, 516 bar_off, IRDMA_MMAP_IO_WC, 517 push_wqe_mmap_key); 518 if (!iwqp->push_wqe_mmap_entry) 519 return -ENOMEM; 520 521 /* push doorbell page */ 522 bar_off += IRDMA_HW_PAGE_SIZE; 523 iwqp->push_db_mmap_entry = irdma_user_mmap_entry_insert(ucontext, 524 bar_off, IRDMA_MMAP_IO_NC, 525 push_db_mmap_key); 526 if (!iwqp->push_db_mmap_entry) { 527 rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry); 528 return -ENOMEM; 529 } 530 531 return 0; 532 } 533 534 /** 535 * irdma_destroy_qp - destroy qp 536 * @ibqp: qp's ib pointer also to get to device's qp address 537 * @udata: user data 538 */ 539 static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) 540 { 541 struct irdma_qp *iwqp = to_iwqp(ibqp); 542 struct irdma_device *iwdev = iwqp->iwdev; 543 544 iwqp->sc_qp.qp_uk.destroy_pending = true; 545 546 if (iwqp->iwarp_state >= IRDMA_QP_STATE_IDLE) 547 irdma_modify_qp_to_err(&iwqp->sc_qp); 548 549 if (!iwqp->user_mode) 550 cancel_delayed_work_sync(&iwqp->dwork_flush); 551 552 if (!iwqp->user_mode) { 553 if (iwqp->iwscq) { 554 irdma_clean_cqes(iwqp, iwqp->iwscq); 555 if (iwqp->iwrcq != iwqp->iwscq) 556 irdma_clean_cqes(iwqp, iwqp->iwrcq); 557 } 558 } 559 560 irdma_qp_rem_ref(&iwqp->ibqp); 561 wait_for_completion(&iwqp->free_qp); 562 irdma_free_lsmm_rsrc(iwqp); 563 irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp); 564 565 irdma_remove_push_mmap_entries(iwqp); 566 567 if (iwqp->sc_qp.qp_uk.qp_id == 1) 568 iwdev->rf->hwqp1_rsvd = false; 569 irdma_free_qp_rsrc(iwqp); 570 571 return 0; 572 } 573 574 /** 575 * irdma_setup_virt_qp - setup for allocation of virtual qp 576 * @iwdev: irdma device 577 * @iwqp: qp ptr 578 * @init_info: initialize info to return 579 */ 580 static void irdma_setup_virt_qp(struct irdma_device *iwdev, 581 struct irdma_qp *iwqp, 582 struct irdma_qp_init_info *init_info) 583 { 584 struct irdma_pbl *iwpbl = iwqp->iwpbl; 585 struct irdma_qp_mr *qpmr = &iwpbl->qp_mr; 586 587 iwqp->page = qpmr->sq_page; 588 init_info->shadow_area_pa = qpmr->shadow; 589 if (iwpbl->pbl_allocated) { 590 init_info->virtual_map = true; 591 init_info->sq_pa = qpmr->sq_pbl.idx; 592 /* Need to use contiguous buffer for RQ of QP 593 * in case it is associated with SRQ. 594 */ 595 init_info->rq_pa = init_info->qp_uk_init_info.srq_uk ? 596 qpmr->rq_pa : qpmr->rq_pbl.idx; 597 } else { 598 init_info->sq_pa = qpmr->sq_pbl.addr; 599 init_info->rq_pa = qpmr->rq_pbl.addr; 600 } 601 } 602 603 /** 604 * irdma_setup_umode_qp - setup sq and rq size in user mode qp 605 * @udata: udata 606 * @iwdev: iwarp device 607 * @iwqp: qp ptr (user or kernel) 608 * @info: initialize info to return 609 * @init_attr: Initial QP create attributes 610 */ 611 static int irdma_setup_umode_qp(struct ib_udata *udata, 612 struct irdma_device *iwdev, 613 struct irdma_qp *iwqp, 614 struct irdma_qp_init_info *info, 615 struct ib_qp_init_attr *init_attr) 616 { 617 struct irdma_ucontext *ucontext = rdma_udata_to_drv_context(udata, 618 struct irdma_ucontext, ibucontext); 619 struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info; 620 struct irdma_create_qp_req req; 621 unsigned long flags; 622 int ret; 623 624 ret = ib_copy_from_udata(&req, udata, 625 min(sizeof(req), udata->inlen)); 626 if (ret) { 627 ibdev_dbg(&iwdev->ibdev, "VERBS: ib_copy_from_data fail\n"); 628 return ret; 629 } 630 631 iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx; 632 iwqp->user_mode = 1; 633 if (req.user_wqe_bufs) { 634 info->qp_uk_init_info.legacy_mode = ucontext->legacy_mode; 635 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags); 636 iwqp->iwpbl = irdma_get_pbl((unsigned long)req.user_wqe_bufs, 637 &ucontext->qp_reg_mem_list); 638 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); 639 640 if (!iwqp->iwpbl) { 641 ret = -ENODATA; 642 ibdev_dbg(&iwdev->ibdev, "VERBS: no pbl info\n"); 643 return ret; 644 } 645 } 646 647 if (!ucontext->use_raw_attrs) { 648 /** 649 * Maintain backward compat with older ABI which passes sq and 650 * rq depth in quanta in cap.max_send_wr and cap.max_recv_wr. 651 * There is no way to compute the correct value of 652 * iwqp->max_send_wr/max_recv_wr in the kernel. 653 */ 654 iwqp->max_send_wr = init_attr->cap.max_send_wr; 655 iwqp->max_recv_wr = init_attr->cap.max_recv_wr; 656 ukinfo->sq_size = init_attr->cap.max_send_wr; 657 ukinfo->rq_size = init_attr->cap.max_recv_wr; 658 irdma_uk_calc_shift_wq(ukinfo, &ukinfo->sq_shift, 659 &ukinfo->rq_shift); 660 } else { 661 ret = irdma_uk_calc_depth_shift_sq(ukinfo, &ukinfo->sq_depth, 662 &ukinfo->sq_shift); 663 if (ret) 664 return ret; 665 666 ret = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth, 667 &ukinfo->rq_shift); 668 if (ret) 669 return ret; 670 671 iwqp->max_send_wr = 672 (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift; 673 iwqp->max_recv_wr = 674 (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift; 675 ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift; 676 ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift; 677 } 678 679 irdma_setup_virt_qp(iwdev, iwqp, info); 680 681 return 0; 682 } 683 684 /** 685 * irdma_setup_kmode_qp - setup initialization for kernel mode qp 686 * @iwdev: iwarp device 687 * @iwqp: qp ptr (user or kernel) 688 * @info: initialize info to return 689 * @init_attr: Initial QP create attributes 690 */ 691 static int irdma_setup_kmode_qp(struct irdma_device *iwdev, 692 struct irdma_qp *iwqp, 693 struct irdma_qp_init_info *info, 694 struct ib_qp_init_attr *init_attr) 695 { 696 struct irdma_dma_mem *mem = &iwqp->kqp.dma_mem; 697 u32 size; 698 int status; 699 struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info; 700 701 status = irdma_uk_calc_depth_shift_sq(ukinfo, &ukinfo->sq_depth, 702 &ukinfo->sq_shift); 703 if (status) 704 return status; 705 706 status = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth, 707 &ukinfo->rq_shift); 708 if (status) 709 return status; 710 711 iwqp->kqp.sq_wrid_mem = 712 kcalloc(ukinfo->sq_depth, sizeof(*iwqp->kqp.sq_wrid_mem), GFP_KERNEL); 713 if (!iwqp->kqp.sq_wrid_mem) 714 return -ENOMEM; 715 716 iwqp->kqp.rq_wrid_mem = 717 kcalloc(ukinfo->rq_depth, sizeof(*iwqp->kqp.rq_wrid_mem), GFP_KERNEL); 718 719 if (!iwqp->kqp.rq_wrid_mem) { 720 kfree(iwqp->kqp.sq_wrid_mem); 721 iwqp->kqp.sq_wrid_mem = NULL; 722 return -ENOMEM; 723 } 724 725 ukinfo->sq_wrtrk_array = iwqp->kqp.sq_wrid_mem; 726 ukinfo->rq_wrid_array = iwqp->kqp.rq_wrid_mem; 727 728 size = (ukinfo->sq_depth + ukinfo->rq_depth) * IRDMA_QP_WQE_MIN_SIZE; 729 size += (IRDMA_SHADOW_AREA_SIZE << 3); 730 731 mem->size = ALIGN(size, 256); 732 mem->va = dma_alloc_coherent(iwdev->rf->hw.device, mem->size, 733 &mem->pa, GFP_KERNEL); 734 if (!mem->va) { 735 kfree(iwqp->kqp.sq_wrid_mem); 736 iwqp->kqp.sq_wrid_mem = NULL; 737 kfree(iwqp->kqp.rq_wrid_mem); 738 iwqp->kqp.rq_wrid_mem = NULL; 739 return -ENOMEM; 740 } 741 742 ukinfo->sq = mem->va; 743 info->sq_pa = mem->pa; 744 ukinfo->rq = &ukinfo->sq[ukinfo->sq_depth]; 745 info->rq_pa = info->sq_pa + (ukinfo->sq_depth * IRDMA_QP_WQE_MIN_SIZE); 746 ukinfo->shadow_area = ukinfo->rq[ukinfo->rq_depth].elem; 747 info->shadow_area_pa = 748 info->rq_pa + (ukinfo->rq_depth * IRDMA_QP_WQE_MIN_SIZE); 749 ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift; 750 ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift; 751 ukinfo->qp_id = info->qp_uk_init_info.qp_id; 752 753 iwqp->max_send_wr = (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift; 754 iwqp->max_recv_wr = (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift; 755 init_attr->cap.max_send_wr = iwqp->max_send_wr; 756 init_attr->cap.max_recv_wr = iwqp->max_recv_wr; 757 758 return 0; 759 } 760 761 static int irdma_cqp_create_qp_cmd(struct irdma_qp *iwqp) 762 { 763 struct irdma_pci_f *rf = iwqp->iwdev->rf; 764 struct irdma_cqp_request *cqp_request; 765 struct cqp_cmds_info *cqp_info; 766 struct irdma_create_qp_info *qp_info; 767 int status; 768 769 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); 770 if (!cqp_request) 771 return -ENOMEM; 772 773 cqp_info = &cqp_request->info; 774 qp_info = &cqp_request->info.in.u.qp_create.info; 775 qp_info->mac_valid = true; 776 qp_info->cq_num_valid = true; 777 qp_info->next_iwarp_state = IRDMA_QP_STATE_IDLE; 778 779 cqp_info->cqp_cmd = IRDMA_OP_QP_CREATE; 780 cqp_info->post_sq = 1; 781 cqp_info->in.u.qp_create.qp = &iwqp->sc_qp; 782 cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request; 783 status = irdma_handle_cqp_op(rf, cqp_request); 784 irdma_put_cqp_request(&rf->cqp, cqp_request); 785 786 return status; 787 } 788 789 static void irdma_roce_fill_and_set_qpctx_info(struct irdma_qp *iwqp, 790 struct irdma_qp_host_ctx_info *ctx_info) 791 { 792 struct irdma_device *iwdev = iwqp->iwdev; 793 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; 794 struct irdma_roce_offload_info *roce_info; 795 struct irdma_udp_offload_info *udp_info; 796 797 udp_info = &iwqp->udp_info; 798 udp_info->snd_mss = ib_mtu_enum_to_int(ib_mtu_int_to_enum(iwdev->vsi.mtu)); 799 udp_info->cwnd = iwdev->roce_cwnd; 800 udp_info->rexmit_thresh = 2; 801 udp_info->rnr_nak_thresh = 2; 802 udp_info->src_port = 0xc000; 803 udp_info->dst_port = ROCE_V2_UDP_DPORT; 804 roce_info = &iwqp->roce_info; 805 ether_addr_copy(roce_info->mac_addr, iwdev->netdev->dev_addr); 806 807 if (iwqp->ibqp.qp_type == IB_QPT_GSI && iwqp->ibqp.qp_num != 1) 808 roce_info->is_qp1 = true; 809 roce_info->rd_en = true; 810 roce_info->wr_rdresp_en = true; 811 if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) 812 roce_info->bind_en = true; 813 roce_info->dcqcn_en = false; 814 roce_info->rtomin = 5; 815 816 roce_info->ack_credits = iwdev->roce_ackcreds; 817 roce_info->ird_size = dev->hw_attrs.max_hw_ird; 818 roce_info->ord_size = dev->hw_attrs.max_hw_ord; 819 820 if (!iwqp->user_mode) { 821 roce_info->priv_mode_en = true; 822 roce_info->fast_reg_en = true; 823 roce_info->udprivcq_en = true; 824 } 825 roce_info->roce_tver = 0; 826 827 ctx_info->roce_info = &iwqp->roce_info; 828 ctx_info->udp_info = &iwqp->udp_info; 829 irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info); 830 } 831 832 static void irdma_iw_fill_and_set_qpctx_info(struct irdma_qp *iwqp, 833 struct irdma_qp_host_ctx_info *ctx_info) 834 { 835 struct irdma_device *iwdev = iwqp->iwdev; 836 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; 837 struct irdma_iwarp_offload_info *iwarp_info; 838 839 iwarp_info = &iwqp->iwarp_info; 840 ether_addr_copy(iwarp_info->mac_addr, iwdev->netdev->dev_addr); 841 iwarp_info->rd_en = true; 842 iwarp_info->wr_rdresp_en = true; 843 iwarp_info->ecn_en = true; 844 iwarp_info->rtomin = 5; 845 846 if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) 847 iwarp_info->ib_rd_en = true; 848 if (!iwqp->user_mode) { 849 iwarp_info->priv_mode_en = true; 850 iwarp_info->fast_reg_en = true; 851 } 852 iwarp_info->ddp_ver = 1; 853 iwarp_info->rdmap_ver = 1; 854 855 ctx_info->iwarp_info = &iwqp->iwarp_info; 856 ctx_info->iwarp_info_valid = true; 857 irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info); 858 ctx_info->iwarp_info_valid = false; 859 } 860 861 static int irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr, 862 struct irdma_device *iwdev) 863 { 864 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; 865 struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs; 866 867 if (init_attr->create_flags) 868 return -EOPNOTSUPP; 869 870 if (init_attr->cap.max_inline_data > uk_attrs->max_hw_inline || 871 init_attr->cap.max_send_sge > uk_attrs->max_hw_wq_frags || 872 init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags || 873 init_attr->cap.max_send_wr > uk_attrs->max_hw_wq_quanta || 874 init_attr->cap.max_recv_wr > uk_attrs->max_hw_rq_quanta) 875 return -EINVAL; 876 877 if (rdma_protocol_roce(&iwdev->ibdev, 1)) { 878 if (init_attr->qp_type != IB_QPT_RC && 879 init_attr->qp_type != IB_QPT_UD && 880 init_attr->qp_type != IB_QPT_GSI) 881 return -EOPNOTSUPP; 882 } else { 883 if (init_attr->qp_type != IB_QPT_RC) 884 return -EOPNOTSUPP; 885 } 886 887 return 0; 888 } 889 890 static void irdma_flush_worker(struct work_struct *work) 891 { 892 struct delayed_work *dwork = to_delayed_work(work); 893 struct irdma_qp *iwqp = container_of(dwork, struct irdma_qp, dwork_flush); 894 895 irdma_generate_flush_completions(iwqp); 896 } 897 898 static int irdma_setup_gsi_qp_rsrc(struct irdma_qp *iwqp, u32 *qp_num) 899 { 900 struct irdma_device *iwdev = iwqp->iwdev; 901 struct irdma_pci_f *rf = iwdev->rf; 902 unsigned long flags; 903 int ret; 904 905 if (rf->rdma_ver <= IRDMA_GEN_2) { 906 *qp_num = 1; 907 return 0; 908 } 909 910 spin_lock_irqsave(&rf->rsrc_lock, flags); 911 if (!rf->hwqp1_rsvd) { 912 *qp_num = 1; 913 rf->hwqp1_rsvd = true; 914 spin_unlock_irqrestore(&rf->rsrc_lock, flags); 915 } else { 916 spin_unlock_irqrestore(&rf->rsrc_lock, flags); 917 ret = irdma_alloc_rsrc(rf, rf->allocated_qps, rf->max_qp, 918 qp_num, &rf->next_qp); 919 if (ret) 920 return ret; 921 } 922 923 ret = irdma_vchnl_req_add_vport(&rf->sc_dev, iwdev->vport_id, *qp_num, 924 (&iwdev->vsi)->qos); 925 if (ret) { 926 if (*qp_num != 1) { 927 irdma_free_rsrc(rf, rf->allocated_qps, *qp_num); 928 } else { 929 spin_lock_irqsave(&rf->rsrc_lock, flags); 930 rf->hwqp1_rsvd = false; 931 spin_unlock_irqrestore(&rf->rsrc_lock, flags); 932 } 933 return ret; 934 } 935 936 return 0; 937 } 938 939 /** 940 * irdma_create_qp - create qp 941 * @ibqp: ptr of qp 942 * @init_attr: attributes for qp 943 * @udata: user data for create qp 944 */ 945 static int irdma_create_qp(struct ib_qp *ibqp, 946 struct ib_qp_init_attr *init_attr, 947 struct ib_udata *udata) 948 { 949 #define IRDMA_CREATE_QP_MIN_REQ_LEN offsetofend(struct irdma_create_qp_req, user_compl_ctx) 950 #define IRDMA_CREATE_QP_MIN_RESP_LEN offsetofend(struct irdma_create_qp_resp, rsvd) 951 struct ib_pd *ibpd = ibqp->pd; 952 struct irdma_pd *iwpd = to_iwpd(ibpd); 953 struct irdma_device *iwdev = to_iwdev(ibpd->device); 954 struct irdma_pci_f *rf = iwdev->rf; 955 struct irdma_qp *iwqp = to_iwqp(ibqp); 956 struct irdma_create_qp_resp uresp = {}; 957 u32 qp_num = 0; 958 int err_code; 959 struct irdma_sc_qp *qp; 960 struct irdma_sc_dev *dev = &rf->sc_dev; 961 struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs; 962 struct irdma_qp_init_info init_info = {}; 963 struct irdma_qp_host_ctx_info *ctx_info; 964 struct irdma_srq *iwsrq; 965 bool srq_valid = false; 966 u32 srq_id = 0; 967 968 if (init_attr->srq) { 969 iwsrq = to_iwsrq(init_attr->srq); 970 srq_valid = true; 971 srq_id = iwsrq->srq_num; 972 init_attr->cap.max_recv_sge = uk_attrs->max_hw_wq_frags; 973 init_attr->cap.max_recv_wr = 4; 974 init_info.qp_uk_init_info.srq_uk = &iwsrq->sc_srq.srq_uk; 975 } 976 977 err_code = irdma_validate_qp_attrs(init_attr, iwdev); 978 if (err_code) 979 return err_code; 980 981 if (udata && (udata->inlen < IRDMA_CREATE_QP_MIN_REQ_LEN || 982 udata->outlen < IRDMA_CREATE_QP_MIN_RESP_LEN)) 983 return -EINVAL; 984 985 init_info.vsi = &iwdev->vsi; 986 init_info.qp_uk_init_info.uk_attrs = uk_attrs; 987 init_info.qp_uk_init_info.sq_size = init_attr->cap.max_send_wr; 988 init_info.qp_uk_init_info.rq_size = init_attr->cap.max_recv_wr; 989 init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge; 990 init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge; 991 init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data; 992 993 qp = &iwqp->sc_qp; 994 qp->qp_uk.back_qp = iwqp; 995 qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX; 996 997 iwqp->iwdev = iwdev; 998 iwqp->q2_ctx_mem.size = ALIGN(IRDMA_Q2_BUF_SIZE + IRDMA_QP_CTX_SIZE, 999 256); 1000 iwqp->q2_ctx_mem.va = dma_alloc_coherent(dev->hw->device, 1001 iwqp->q2_ctx_mem.size, 1002 &iwqp->q2_ctx_mem.pa, 1003 GFP_KERNEL); 1004 if (!iwqp->q2_ctx_mem.va) 1005 return -ENOMEM; 1006 1007 init_info.q2 = iwqp->q2_ctx_mem.va; 1008 init_info.q2_pa = iwqp->q2_ctx_mem.pa; 1009 init_info.host_ctx = (__le64 *)(init_info.q2 + IRDMA_Q2_BUF_SIZE); 1010 init_info.host_ctx_pa = init_info.q2_pa + IRDMA_Q2_BUF_SIZE; 1011 1012 if (init_attr->qp_type == IB_QPT_GSI) { 1013 err_code = irdma_setup_gsi_qp_rsrc(iwqp, &qp_num); 1014 if (err_code) 1015 goto error; 1016 iwqp->ibqp.qp_num = 1; 1017 } else { 1018 err_code = irdma_alloc_rsrc(rf, rf->allocated_qps, rf->max_qp, 1019 &qp_num, &rf->next_qp); 1020 if (err_code) 1021 goto error; 1022 iwqp->ibqp.qp_num = qp_num; 1023 } 1024 1025 iwqp->iwpd = iwpd; 1026 qp = &iwqp->sc_qp; 1027 iwqp->iwscq = to_iwcq(init_attr->send_cq); 1028 iwqp->iwrcq = to_iwcq(init_attr->recv_cq); 1029 iwqp->host_ctx.va = init_info.host_ctx; 1030 iwqp->host_ctx.pa = init_info.host_ctx_pa; 1031 iwqp->host_ctx.size = IRDMA_QP_CTX_SIZE; 1032 1033 init_info.pd = &iwpd->sc_pd; 1034 init_info.qp_uk_init_info.qp_id = qp_num; 1035 if (!rdma_protocol_roce(&iwdev->ibdev, 1)) 1036 init_info.qp_uk_init_info.first_sq_wq = 1; 1037 iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp; 1038 init_waitqueue_head(&iwqp->waitq); 1039 init_waitqueue_head(&iwqp->mod_qp_waitq); 1040 1041 if (udata) { 1042 init_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver; 1043 err_code = irdma_setup_umode_qp(udata, iwdev, iwqp, &init_info, 1044 init_attr); 1045 } else { 1046 INIT_DELAYED_WORK(&iwqp->dwork_flush, irdma_flush_worker); 1047 init_info.qp_uk_init_info.abi_ver = IRDMA_ABI_VER; 1048 err_code = irdma_setup_kmode_qp(iwdev, iwqp, &init_info, init_attr); 1049 } 1050 1051 if (err_code) { 1052 ibdev_dbg(&iwdev->ibdev, "VERBS: setup qp failed\n"); 1053 goto error; 1054 } 1055 1056 if (rdma_protocol_roce(&iwdev->ibdev, 1)) { 1057 if (init_attr->qp_type == IB_QPT_RC) { 1058 init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_ROCE_RC; 1059 init_info.qp_uk_init_info.qp_caps = IRDMA_SEND_WITH_IMM | 1060 IRDMA_WRITE_WITH_IMM | 1061 IRDMA_ROCE; 1062 } else { 1063 init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_ROCE_UD; 1064 init_info.qp_uk_init_info.qp_caps = IRDMA_SEND_WITH_IMM | 1065 IRDMA_ROCE; 1066 } 1067 } else { 1068 init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_IWARP; 1069 init_info.qp_uk_init_info.qp_caps = IRDMA_WRITE_WITH_IMM; 1070 } 1071 1072 if (dev->hw_attrs.uk_attrs.hw_rev > IRDMA_GEN_1) 1073 init_info.qp_uk_init_info.qp_caps |= IRDMA_PUSH_MODE; 1074 1075 err_code = irdma_sc_qp_init(qp, &init_info); 1076 if (err_code) { 1077 ibdev_dbg(&iwdev->ibdev, "VERBS: qp_init fail\n"); 1078 goto error; 1079 } 1080 1081 ctx_info = &iwqp->ctx_info; 1082 ctx_info->srq_valid = srq_valid; 1083 ctx_info->srq_id = srq_id; 1084 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id; 1085 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id; 1086 1087 if (rdma_protocol_roce(&iwdev->ibdev, 1)) { 1088 if (dev->ws_add(&iwdev->vsi, 0)) { 1089 irdma_cqp_qp_destroy_cmd(&rf->sc_dev, &iwqp->sc_qp); 1090 err_code = -EINVAL; 1091 goto error; 1092 } 1093 irdma_qp_add_qos(&iwqp->sc_qp); 1094 irdma_roce_fill_and_set_qpctx_info(iwqp, ctx_info); 1095 } else { 1096 irdma_iw_fill_and_set_qpctx_info(iwqp, ctx_info); 1097 } 1098 1099 err_code = irdma_cqp_create_qp_cmd(iwqp); 1100 if (err_code) 1101 goto error; 1102 1103 refcount_set(&iwqp->refcnt, 1); 1104 spin_lock_init(&iwqp->lock); 1105 spin_lock_init(&iwqp->sc_qp.pfpdu.lock); 1106 iwqp->sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR; 1107 rf->qp_table[qp_num] = iwqp; 1108 1109 if (udata) { 1110 /* GEN_1 legacy support with libi40iw does not have expanded uresp struct */ 1111 if (udata->outlen < sizeof(uresp)) { 1112 uresp.lsmm = 1; 1113 uresp.push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX_GEN_1; 1114 } else { 1115 if (rdma_protocol_iwarp(&iwdev->ibdev, 1)) 1116 uresp.lsmm = 1; 1117 } 1118 uresp.actual_sq_size = init_info.qp_uk_init_info.sq_size; 1119 uresp.actual_rq_size = init_info.qp_uk_init_info.rq_size; 1120 uresp.qp_id = qp_num; 1121 uresp.qp_caps = qp->qp_uk.qp_caps; 1122 1123 err_code = ib_copy_to_udata(udata, &uresp, 1124 min(sizeof(uresp), udata->outlen)); 1125 if (err_code) { 1126 ibdev_dbg(&iwdev->ibdev, "VERBS: copy_to_udata failed\n"); 1127 irdma_destroy_qp(&iwqp->ibqp, udata); 1128 return err_code; 1129 } 1130 } 1131 1132 init_completion(&iwqp->free_qp); 1133 return 0; 1134 1135 error: 1136 irdma_free_qp_rsrc(iwqp); 1137 return err_code; 1138 } 1139 1140 static int irdma_get_ib_acc_flags(struct irdma_qp *iwqp) 1141 { 1142 int acc_flags = 0; 1143 1144 if (rdma_protocol_roce(iwqp->ibqp.device, 1)) { 1145 if (iwqp->roce_info.wr_rdresp_en) { 1146 acc_flags |= IB_ACCESS_LOCAL_WRITE; 1147 acc_flags |= IB_ACCESS_REMOTE_WRITE; 1148 } 1149 if (iwqp->roce_info.rd_en) 1150 acc_flags |= IB_ACCESS_REMOTE_READ; 1151 if (iwqp->roce_info.bind_en) 1152 acc_flags |= IB_ACCESS_MW_BIND; 1153 if (iwqp->ctx_info.remote_atomics_en) 1154 acc_flags |= IB_ACCESS_REMOTE_ATOMIC; 1155 } else { 1156 if (iwqp->iwarp_info.wr_rdresp_en) { 1157 acc_flags |= IB_ACCESS_LOCAL_WRITE; 1158 acc_flags |= IB_ACCESS_REMOTE_WRITE; 1159 } 1160 if (iwqp->iwarp_info.rd_en) 1161 acc_flags |= IB_ACCESS_REMOTE_READ; 1162 if (iwqp->ctx_info.remote_atomics_en) 1163 acc_flags |= IB_ACCESS_REMOTE_ATOMIC; 1164 } 1165 return acc_flags; 1166 } 1167 1168 /** 1169 * irdma_query_qp - query qp attributes 1170 * @ibqp: qp pointer 1171 * @attr: attributes pointer 1172 * @attr_mask: Not used 1173 * @init_attr: qp attributes to return 1174 */ 1175 static int irdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1176 int attr_mask, struct ib_qp_init_attr *init_attr) 1177 { 1178 struct irdma_qp *iwqp = to_iwqp(ibqp); 1179 struct irdma_sc_qp *qp = &iwqp->sc_qp; 1180 1181 memset(attr, 0, sizeof(*attr)); 1182 memset(init_attr, 0, sizeof(*init_attr)); 1183 1184 attr->qp_state = iwqp->ibqp_state; 1185 attr->cur_qp_state = iwqp->ibqp_state; 1186 attr->cap.max_send_wr = iwqp->max_send_wr; 1187 attr->cap.max_recv_wr = iwqp->max_recv_wr; 1188 attr->cap.max_inline_data = qp->qp_uk.max_inline_data; 1189 attr->cap.max_send_sge = qp->qp_uk.max_sq_frag_cnt; 1190 attr->cap.max_recv_sge = qp->qp_uk.max_rq_frag_cnt; 1191 attr->qp_access_flags = irdma_get_ib_acc_flags(iwqp); 1192 attr->port_num = 1; 1193 if (rdma_protocol_roce(ibqp->device, 1)) { 1194 attr->path_mtu = ib_mtu_int_to_enum(iwqp->udp_info.snd_mss); 1195 attr->qkey = iwqp->roce_info.qkey; 1196 attr->rq_psn = iwqp->udp_info.epsn; 1197 attr->sq_psn = iwqp->udp_info.psn_nxt; 1198 attr->dest_qp_num = iwqp->roce_info.dest_qp; 1199 attr->pkey_index = iwqp->roce_info.p_key; 1200 attr->retry_cnt = iwqp->udp_info.rexmit_thresh; 1201 attr->rnr_retry = iwqp->udp_info.rnr_nak_thresh; 1202 attr->min_rnr_timer = iwqp->udp_info.min_rnr_timer; 1203 attr->max_rd_atomic = iwqp->roce_info.ord_size; 1204 attr->max_dest_rd_atomic = iwqp->roce_info.ird_size; 1205 } 1206 1207 init_attr->event_handler = iwqp->ibqp.event_handler; 1208 init_attr->qp_context = iwqp->ibqp.qp_context; 1209 init_attr->send_cq = iwqp->ibqp.send_cq; 1210 init_attr->recv_cq = iwqp->ibqp.recv_cq; 1211 init_attr->srq = iwqp->ibqp.srq; 1212 init_attr->cap = attr->cap; 1213 1214 return 0; 1215 } 1216 1217 /** 1218 * irdma_query_pkey - Query partition key 1219 * @ibdev: device pointer from stack 1220 * @port: port number 1221 * @index: index of pkey 1222 * @pkey: pointer to store the pkey 1223 */ 1224 static int irdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index, 1225 u16 *pkey) 1226 { 1227 if (index >= IRDMA_PKEY_TBL_SZ) 1228 return -EINVAL; 1229 1230 *pkey = IRDMA_DEFAULT_PKEY; 1231 return 0; 1232 } 1233 1234 static u8 irdma_roce_get_vlan_prio(const struct ib_gid_attr *attr, u8 prio) 1235 { 1236 struct net_device *ndev; 1237 1238 rcu_read_lock(); 1239 ndev = rcu_dereference(attr->ndev); 1240 if (!ndev) 1241 goto exit; 1242 if (is_vlan_dev(ndev)) { 1243 u16 vlan_qos = vlan_dev_get_egress_qos_mask(ndev, prio); 1244 1245 prio = (vlan_qos & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; 1246 } 1247 exit: 1248 rcu_read_unlock(); 1249 return prio; 1250 } 1251 1252 static int irdma_wait_for_suspend(struct irdma_qp *iwqp) 1253 { 1254 if (!wait_event_timeout(iwqp->iwdev->suspend_wq, 1255 !iwqp->suspend_pending, 1256 msecs_to_jiffies(IRDMA_EVENT_TIMEOUT_MS))) { 1257 iwqp->suspend_pending = false; 1258 ibdev_warn(&iwqp->iwdev->ibdev, 1259 "modify_qp timed out waiting for suspend. qp_id = %d, last_ae = 0x%x\n", 1260 iwqp->ibqp.qp_num, iwqp->last_aeq); 1261 return -EBUSY; 1262 } 1263 1264 return 0; 1265 } 1266 1267 /** 1268 * irdma_modify_qp_roce - modify qp request 1269 * @ibqp: qp's pointer for modify 1270 * @attr: access attributes 1271 * @attr_mask: state mask 1272 * @udata: user data 1273 */ 1274 int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1275 int attr_mask, struct ib_udata *udata) 1276 { 1277 #define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req, rq_flush) 1278 #define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_resp, push_valid) 1279 struct irdma_pd *iwpd = to_iwpd(ibqp->pd); 1280 struct irdma_qp *iwqp = to_iwqp(ibqp); 1281 struct irdma_device *iwdev = iwqp->iwdev; 1282 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; 1283 struct irdma_qp_host_ctx_info *ctx_info; 1284 struct irdma_roce_offload_info *roce_info; 1285 struct irdma_udp_offload_info *udp_info; 1286 struct irdma_modify_qp_info info = {}; 1287 struct irdma_modify_qp_resp uresp = {}; 1288 struct irdma_modify_qp_req ureq = {}; 1289 unsigned long flags; 1290 u8 issue_modify_qp = 0; 1291 int ret = 0; 1292 1293 ctx_info = &iwqp->ctx_info; 1294 roce_info = &iwqp->roce_info; 1295 udp_info = &iwqp->udp_info; 1296 1297 if (udata) { 1298 /* udata inlen/outlen can be 0 when supporting legacy libi40iw */ 1299 if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) || 1300 (udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN)) 1301 return -EINVAL; 1302 } 1303 1304 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) 1305 return -EOPNOTSUPP; 1306 1307 if (attr_mask & IB_QP_DEST_QPN) 1308 roce_info->dest_qp = attr->dest_qp_num; 1309 1310 if (attr_mask & IB_QP_PKEY_INDEX) { 1311 ret = irdma_query_pkey(ibqp->device, 0, attr->pkey_index, 1312 &roce_info->p_key); 1313 if (ret) 1314 return ret; 1315 } 1316 1317 if (attr_mask & IB_QP_QKEY) 1318 roce_info->qkey = attr->qkey; 1319 1320 if (attr_mask & IB_QP_PATH_MTU) 1321 udp_info->snd_mss = ib_mtu_enum_to_int(attr->path_mtu); 1322 1323 if (attr_mask & IB_QP_SQ_PSN) { 1324 udp_info->psn_nxt = attr->sq_psn; 1325 udp_info->lsn = 0xffff; 1326 udp_info->psn_una = attr->sq_psn; 1327 udp_info->psn_max = attr->sq_psn; 1328 } 1329 1330 if (attr_mask & IB_QP_RQ_PSN) 1331 udp_info->epsn = attr->rq_psn; 1332 1333 if (attr_mask & IB_QP_RNR_RETRY) 1334 udp_info->rnr_nak_thresh = attr->rnr_retry; 1335 1336 if (attr_mask & IB_QP_MIN_RNR_TIMER && 1337 dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) 1338 udp_info->min_rnr_timer = attr->min_rnr_timer; 1339 1340 if (attr_mask & IB_QP_RETRY_CNT) 1341 udp_info->rexmit_thresh = attr->retry_cnt; 1342 1343 ctx_info->roce_info->pd_id = iwpd->sc_pd.pd_id; 1344 1345 if (attr_mask & IB_QP_AV) { 1346 struct irdma_av *av = &iwqp->roce_ah.av; 1347 const struct ib_gid_attr *sgid_attr = 1348 attr->ah_attr.grh.sgid_attr; 1349 u16 vlan_id = VLAN_N_VID; 1350 u32 local_ip[4]; 1351 1352 memset(&iwqp->roce_ah, 0, sizeof(iwqp->roce_ah)); 1353 if (attr->ah_attr.ah_flags & IB_AH_GRH) { 1354 udp_info->ttl = attr->ah_attr.grh.hop_limit; 1355 udp_info->flow_label = attr->ah_attr.grh.flow_label; 1356 udp_info->tos = attr->ah_attr.grh.traffic_class; 1357 udp_info->src_port = 1358 rdma_get_udp_sport(udp_info->flow_label, 1359 ibqp->qp_num, 1360 roce_info->dest_qp); 1361 irdma_qp_rem_qos(&iwqp->sc_qp); 1362 dev->ws_remove(iwqp->sc_qp.vsi, ctx_info->user_pri); 1363 if (iwqp->sc_qp.vsi->dscp_mode) 1364 ctx_info->user_pri = 1365 iwqp->sc_qp.vsi->dscp_map[irdma_tos2dscp(udp_info->tos)]; 1366 else 1367 ctx_info->user_pri = rt_tos2priority(udp_info->tos); 1368 } 1369 ret = rdma_read_gid_l2_fields(sgid_attr, &vlan_id, 1370 ctx_info->roce_info->mac_addr); 1371 if (ret) 1372 return ret; 1373 ctx_info->user_pri = irdma_roce_get_vlan_prio(sgid_attr, 1374 ctx_info->user_pri); 1375 if (dev->ws_add(iwqp->sc_qp.vsi, ctx_info->user_pri)) 1376 return -ENOMEM; 1377 iwqp->sc_qp.user_pri = ctx_info->user_pri; 1378 irdma_qp_add_qos(&iwqp->sc_qp); 1379 1380 if (vlan_id >= VLAN_N_VID && iwdev->dcb_vlan_mode) 1381 vlan_id = 0; 1382 if (vlan_id < VLAN_N_VID) { 1383 udp_info->insert_vlan_tag = true; 1384 udp_info->vlan_tag = vlan_id | 1385 ctx_info->user_pri << VLAN_PRIO_SHIFT; 1386 } else { 1387 udp_info->insert_vlan_tag = false; 1388 } 1389 1390 av->attrs = attr->ah_attr; 1391 rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid_attr->gid); 1392 rdma_gid2ip((struct sockaddr *)&av->dgid_addr, &attr->ah_attr.grh.dgid); 1393 av->net_type = rdma_gid_attr_network_type(sgid_attr); 1394 if (av->net_type == RDMA_NETWORK_IPV6) { 1395 __be32 *daddr = 1396 av->dgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32; 1397 __be32 *saddr = 1398 av->sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32; 1399 1400 irdma_copy_ip_ntohl(&udp_info->dest_ip_addr[0], daddr); 1401 irdma_copy_ip_ntohl(&udp_info->local_ipaddr[0], saddr); 1402 1403 udp_info->ipv4 = false; 1404 irdma_copy_ip_ntohl(local_ip, daddr); 1405 1406 } else if (av->net_type == RDMA_NETWORK_IPV4) { 1407 __be32 saddr = av->sgid_addr.saddr_in.sin_addr.s_addr; 1408 __be32 daddr = av->dgid_addr.saddr_in.sin_addr.s_addr; 1409 1410 local_ip[0] = ntohl(daddr); 1411 1412 udp_info->ipv4 = true; 1413 udp_info->dest_ip_addr[0] = 0; 1414 udp_info->dest_ip_addr[1] = 0; 1415 udp_info->dest_ip_addr[2] = 0; 1416 udp_info->dest_ip_addr[3] = local_ip[0]; 1417 1418 udp_info->local_ipaddr[0] = 0; 1419 udp_info->local_ipaddr[1] = 0; 1420 udp_info->local_ipaddr[2] = 0; 1421 udp_info->local_ipaddr[3] = ntohl(saddr); 1422 } 1423 udp_info->arp_idx = 1424 irdma_add_arp(iwdev->rf, local_ip, udp_info->ipv4, 1425 attr->ah_attr.roce.dmac); 1426 } 1427 1428 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { 1429 if (attr->max_rd_atomic > dev->hw_attrs.max_hw_ord) { 1430 ibdev_err(&iwdev->ibdev, 1431 "rd_atomic = %d, above max_hw_ord=%d\n", 1432 attr->max_rd_atomic, 1433 dev->hw_attrs.max_hw_ord); 1434 return -EINVAL; 1435 } 1436 if (attr->max_rd_atomic) 1437 roce_info->ord_size = attr->max_rd_atomic; 1438 info.ord_valid = true; 1439 } 1440 1441 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { 1442 if (attr->max_dest_rd_atomic > dev->hw_attrs.max_hw_ird) { 1443 ibdev_err(&iwdev->ibdev, 1444 "rd_atomic = %d, above max_hw_ird=%d\n", 1445 attr->max_dest_rd_atomic, 1446 dev->hw_attrs.max_hw_ird); 1447 return -EINVAL; 1448 } 1449 if (attr->max_dest_rd_atomic) 1450 roce_info->ird_size = attr->max_dest_rd_atomic; 1451 } 1452 1453 if (attr_mask & IB_QP_ACCESS_FLAGS) { 1454 if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE) 1455 roce_info->wr_rdresp_en = true; 1456 if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE) 1457 roce_info->wr_rdresp_en = true; 1458 if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ) 1459 roce_info->rd_en = true; 1460 if (dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_ATOMIC_OPS) 1461 if (attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC) 1462 ctx_info->remote_atomics_en = true; 1463 } 1464 1465 wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend)); 1466 1467 ibdev_dbg(&iwdev->ibdev, 1468 "VERBS: caller: %pS qp_id=%d to_ibqpstate=%d ibqpstate=%d irdma_qpstate=%d attr_mask=0x%x\n", 1469 __builtin_return_address(0), ibqp->qp_num, attr->qp_state, 1470 iwqp->ibqp_state, iwqp->iwarp_state, attr_mask); 1471 1472 spin_lock_irqsave(&iwqp->lock, flags); 1473 if (attr_mask & IB_QP_STATE) { 1474 if (!ib_modify_qp_is_ok(iwqp->ibqp_state, attr->qp_state, 1475 iwqp->ibqp.qp_type, attr_mask)) { 1476 ibdev_warn(&iwdev->ibdev, "modify_qp invalid for qp_id=%d, old_state=0x%x, new_state=0x%x\n", 1477 iwqp->ibqp.qp_num, iwqp->ibqp_state, 1478 attr->qp_state); 1479 ret = -EINVAL; 1480 goto exit; 1481 } 1482 info.curr_iwarp_state = iwqp->iwarp_state; 1483 1484 switch (attr->qp_state) { 1485 case IB_QPS_INIT: 1486 if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) { 1487 ret = -EINVAL; 1488 goto exit; 1489 } 1490 1491 if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) { 1492 info.next_iwarp_state = IRDMA_QP_STATE_IDLE; 1493 issue_modify_qp = 1; 1494 } 1495 break; 1496 case IB_QPS_RTR: 1497 if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) { 1498 ret = -EINVAL; 1499 goto exit; 1500 } 1501 info.arp_cache_idx_valid = true; 1502 info.cq_num_valid = true; 1503 info.next_iwarp_state = IRDMA_QP_STATE_RTR; 1504 issue_modify_qp = 1; 1505 break; 1506 case IB_QPS_RTS: 1507 if (iwqp->ibqp_state < IB_QPS_RTR || 1508 iwqp->ibqp_state == IB_QPS_ERR) { 1509 ret = -EINVAL; 1510 goto exit; 1511 } 1512 1513 info.arp_cache_idx_valid = true; 1514 info.cq_num_valid = true; 1515 info.ord_valid = true; 1516 info.next_iwarp_state = IRDMA_QP_STATE_RTS; 1517 issue_modify_qp = 1; 1518 if (iwdev->push_mode && udata && 1519 iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX && 1520 dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { 1521 spin_unlock_irqrestore(&iwqp->lock, flags); 1522 irdma_alloc_push_page(iwqp); 1523 spin_lock_irqsave(&iwqp->lock, flags); 1524 } 1525 break; 1526 case IB_QPS_SQD: 1527 if (iwqp->iwarp_state == IRDMA_QP_STATE_SQD) 1528 goto exit; 1529 1530 if (iwqp->iwarp_state != IRDMA_QP_STATE_RTS) { 1531 ret = -EINVAL; 1532 goto exit; 1533 } 1534 1535 info.next_iwarp_state = IRDMA_QP_STATE_SQD; 1536 issue_modify_qp = 1; 1537 iwqp->suspend_pending = true; 1538 break; 1539 case IB_QPS_SQE: 1540 case IB_QPS_ERR: 1541 case IB_QPS_RESET: 1542 if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) { 1543 spin_unlock_irqrestore(&iwqp->lock, flags); 1544 if (udata && udata->inlen) { 1545 if (ib_copy_from_udata(&ureq, udata, 1546 min(sizeof(ureq), udata->inlen))) 1547 return -EINVAL; 1548 1549 irdma_flush_wqes(iwqp, 1550 (ureq.sq_flush ? IRDMA_FLUSH_SQ : 0) | 1551 (ureq.rq_flush ? IRDMA_FLUSH_RQ : 0) | 1552 IRDMA_REFLUSH); 1553 } 1554 return 0; 1555 } 1556 1557 info.next_iwarp_state = IRDMA_QP_STATE_ERROR; 1558 issue_modify_qp = 1; 1559 break; 1560 default: 1561 ret = -EINVAL; 1562 goto exit; 1563 } 1564 1565 iwqp->ibqp_state = attr->qp_state; 1566 } 1567 1568 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id; 1569 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id; 1570 irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info); 1571 spin_unlock_irqrestore(&iwqp->lock, flags); 1572 1573 if (attr_mask & IB_QP_STATE) { 1574 if (issue_modify_qp) { 1575 ctx_info->rem_endpoint_idx = udp_info->arp_idx; 1576 if (irdma_hw_modify_qp(iwdev, iwqp, &info, true)) 1577 return -EINVAL; 1578 if (info.next_iwarp_state == IRDMA_QP_STATE_SQD) { 1579 ret = irdma_wait_for_suspend(iwqp); 1580 if (ret) 1581 return ret; 1582 } 1583 spin_lock_irqsave(&iwqp->lock, flags); 1584 if (iwqp->iwarp_state == info.curr_iwarp_state) { 1585 iwqp->iwarp_state = info.next_iwarp_state; 1586 iwqp->ibqp_state = attr->qp_state; 1587 } 1588 if (iwqp->ibqp_state > IB_QPS_RTS && 1589 !iwqp->flush_issued) { 1590 spin_unlock_irqrestore(&iwqp->lock, flags); 1591 irdma_flush_wqes(iwqp, IRDMA_FLUSH_SQ | 1592 IRDMA_FLUSH_RQ | 1593 IRDMA_FLUSH_WAIT); 1594 iwqp->flush_issued = 1; 1595 } else { 1596 spin_unlock_irqrestore(&iwqp->lock, flags); 1597 } 1598 } else { 1599 iwqp->ibqp_state = attr->qp_state; 1600 } 1601 if (udata && udata->outlen && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { 1602 struct irdma_ucontext *ucontext; 1603 1604 ucontext = rdma_udata_to_drv_context(udata, 1605 struct irdma_ucontext, ibucontext); 1606 if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX && 1607 !iwqp->push_wqe_mmap_entry && 1608 !irdma_setup_push_mmap_entries(ucontext, iwqp, 1609 &uresp.push_wqe_mmap_key, &uresp.push_db_mmap_key)) { 1610 uresp.push_valid = 1; 1611 uresp.push_offset = iwqp->sc_qp.push_offset; 1612 } 1613 ret = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), 1614 udata->outlen)); 1615 if (ret) { 1616 irdma_remove_push_mmap_entries(iwqp); 1617 ibdev_dbg(&iwdev->ibdev, 1618 "VERBS: copy_to_udata failed\n"); 1619 return ret; 1620 } 1621 } 1622 } 1623 1624 return 0; 1625 exit: 1626 spin_unlock_irqrestore(&iwqp->lock, flags); 1627 1628 return ret; 1629 } 1630 1631 /** 1632 * irdma_modify_qp - modify qp request 1633 * @ibqp: qp's pointer for modify 1634 * @attr: access attributes 1635 * @attr_mask: state mask 1636 * @udata: user data 1637 */ 1638 int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, 1639 struct ib_udata *udata) 1640 { 1641 #define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req, rq_flush) 1642 #define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_resp, push_valid) 1643 struct irdma_qp *iwqp = to_iwqp(ibqp); 1644 struct irdma_device *iwdev = iwqp->iwdev; 1645 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; 1646 struct irdma_qp_host_ctx_info *ctx_info; 1647 struct irdma_tcp_offload_info *tcp_info; 1648 struct irdma_iwarp_offload_info *offload_info; 1649 struct irdma_modify_qp_info info = {}; 1650 struct irdma_modify_qp_resp uresp = {}; 1651 struct irdma_modify_qp_req ureq = {}; 1652 u8 issue_modify_qp = 0; 1653 u8 dont_wait = 0; 1654 int err; 1655 unsigned long flags; 1656 1657 if (udata) { 1658 /* udata inlen/outlen can be 0 when supporting legacy libi40iw */ 1659 if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) || 1660 (udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN)) 1661 return -EINVAL; 1662 } 1663 1664 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) 1665 return -EOPNOTSUPP; 1666 1667 ctx_info = &iwqp->ctx_info; 1668 offload_info = &iwqp->iwarp_info; 1669 tcp_info = &iwqp->tcp_info; 1670 wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend)); 1671 ibdev_dbg(&iwdev->ibdev, 1672 "VERBS: caller: %pS qp_id=%d to_ibqpstate=%d ibqpstate=%d irdma_qpstate=%d last_aeq=%d hw_tcp_state=%d hw_iwarp_state=%d attr_mask=0x%x\n", 1673 __builtin_return_address(0), ibqp->qp_num, attr->qp_state, 1674 iwqp->ibqp_state, iwqp->iwarp_state, iwqp->last_aeq, 1675 iwqp->hw_tcp_state, iwqp->hw_iwarp_state, attr_mask); 1676 1677 spin_lock_irqsave(&iwqp->lock, flags); 1678 if (attr_mask & IB_QP_STATE) { 1679 info.curr_iwarp_state = iwqp->iwarp_state; 1680 switch (attr->qp_state) { 1681 case IB_QPS_INIT: 1682 case IB_QPS_RTR: 1683 if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) { 1684 err = -EINVAL; 1685 goto exit; 1686 } 1687 1688 if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) { 1689 info.next_iwarp_state = IRDMA_QP_STATE_IDLE; 1690 issue_modify_qp = 1; 1691 } 1692 if (iwdev->push_mode && udata && 1693 iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX && 1694 dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { 1695 spin_unlock_irqrestore(&iwqp->lock, flags); 1696 irdma_alloc_push_page(iwqp); 1697 spin_lock_irqsave(&iwqp->lock, flags); 1698 } 1699 break; 1700 case IB_QPS_RTS: 1701 if (iwqp->iwarp_state > IRDMA_QP_STATE_RTS || 1702 !iwqp->cm_id) { 1703 err = -EINVAL; 1704 goto exit; 1705 } 1706 1707 issue_modify_qp = 1; 1708 iwqp->hw_tcp_state = IRDMA_TCP_STATE_ESTABLISHED; 1709 iwqp->hte_added = 1; 1710 info.next_iwarp_state = IRDMA_QP_STATE_RTS; 1711 info.tcp_ctx_valid = true; 1712 info.ord_valid = true; 1713 info.arp_cache_idx_valid = true; 1714 info.cq_num_valid = true; 1715 break; 1716 case IB_QPS_SQD: 1717 if (iwqp->hw_iwarp_state > IRDMA_QP_STATE_RTS) { 1718 err = 0; 1719 goto exit; 1720 } 1721 1722 if (iwqp->iwarp_state == IRDMA_QP_STATE_CLOSING || 1723 iwqp->iwarp_state < IRDMA_QP_STATE_RTS) { 1724 err = 0; 1725 goto exit; 1726 } 1727 1728 if (iwqp->iwarp_state > IRDMA_QP_STATE_CLOSING) { 1729 err = -EINVAL; 1730 goto exit; 1731 } 1732 1733 info.next_iwarp_state = IRDMA_QP_STATE_CLOSING; 1734 issue_modify_qp = 1; 1735 break; 1736 case IB_QPS_SQE: 1737 if (iwqp->iwarp_state >= IRDMA_QP_STATE_TERMINATE) { 1738 err = -EINVAL; 1739 goto exit; 1740 } 1741 1742 info.next_iwarp_state = IRDMA_QP_STATE_TERMINATE; 1743 issue_modify_qp = 1; 1744 break; 1745 case IB_QPS_ERR: 1746 case IB_QPS_RESET: 1747 if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) { 1748 spin_unlock_irqrestore(&iwqp->lock, flags); 1749 if (udata && udata->inlen) { 1750 if (ib_copy_from_udata(&ureq, udata, 1751 min(sizeof(ureq), udata->inlen))) 1752 return -EINVAL; 1753 1754 irdma_flush_wqes(iwqp, 1755 (ureq.sq_flush ? IRDMA_FLUSH_SQ : 0) | 1756 (ureq.rq_flush ? IRDMA_FLUSH_RQ : 0) | 1757 IRDMA_REFLUSH); 1758 } 1759 return 0; 1760 } 1761 1762 if (iwqp->sc_qp.term_flags) { 1763 spin_unlock_irqrestore(&iwqp->lock, flags); 1764 irdma_terminate_del_timer(&iwqp->sc_qp); 1765 spin_lock_irqsave(&iwqp->lock, flags); 1766 } 1767 info.next_iwarp_state = IRDMA_QP_STATE_ERROR; 1768 if (iwqp->hw_tcp_state > IRDMA_TCP_STATE_CLOSED && 1769 iwdev->iw_status && 1770 iwqp->hw_tcp_state != IRDMA_TCP_STATE_TIME_WAIT) 1771 info.reset_tcp_conn = true; 1772 else 1773 dont_wait = 1; 1774 1775 issue_modify_qp = 1; 1776 info.next_iwarp_state = IRDMA_QP_STATE_ERROR; 1777 break; 1778 default: 1779 err = -EINVAL; 1780 goto exit; 1781 } 1782 1783 iwqp->ibqp_state = attr->qp_state; 1784 } 1785 if (attr_mask & IB_QP_ACCESS_FLAGS) { 1786 ctx_info->iwarp_info_valid = true; 1787 if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE) 1788 offload_info->wr_rdresp_en = true; 1789 if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE) 1790 offload_info->wr_rdresp_en = true; 1791 if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ) 1792 offload_info->rd_en = true; 1793 } 1794 1795 if (ctx_info->iwarp_info_valid) { 1796 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id; 1797 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id; 1798 irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info); 1799 } 1800 spin_unlock_irqrestore(&iwqp->lock, flags); 1801 1802 if (attr_mask & IB_QP_STATE) { 1803 if (issue_modify_qp) { 1804 ctx_info->rem_endpoint_idx = tcp_info->arp_idx; 1805 if (irdma_hw_modify_qp(iwdev, iwqp, &info, true)) 1806 return -EINVAL; 1807 } 1808 1809 spin_lock_irqsave(&iwqp->lock, flags); 1810 if (iwqp->iwarp_state == info.curr_iwarp_state) { 1811 iwqp->iwarp_state = info.next_iwarp_state; 1812 iwqp->ibqp_state = attr->qp_state; 1813 } 1814 spin_unlock_irqrestore(&iwqp->lock, flags); 1815 } 1816 1817 if (issue_modify_qp && iwqp->ibqp_state > IB_QPS_RTS) { 1818 if (dont_wait) { 1819 if (iwqp->hw_tcp_state) { 1820 spin_lock_irqsave(&iwqp->lock, flags); 1821 iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSED; 1822 iwqp->last_aeq = IRDMA_AE_RESET_SENT; 1823 spin_unlock_irqrestore(&iwqp->lock, flags); 1824 } 1825 irdma_cm_disconn(iwqp); 1826 } else { 1827 int close_timer_started; 1828 1829 spin_lock_irqsave(&iwdev->cm_core.ht_lock, flags); 1830 1831 if (iwqp->cm_node) { 1832 refcount_inc(&iwqp->cm_node->refcnt); 1833 spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags); 1834 close_timer_started = atomic_inc_return(&iwqp->close_timer_started); 1835 if (iwqp->cm_id && close_timer_started == 1) 1836 irdma_schedule_cm_timer(iwqp->cm_node, 1837 (struct irdma_puda_buf *)iwqp, 1838 IRDMA_TIMER_TYPE_CLOSE, 1, 0); 1839 1840 irdma_rem_ref_cm_node(iwqp->cm_node); 1841 } else { 1842 spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags); 1843 } 1844 } 1845 } 1846 if (attr_mask & IB_QP_STATE && udata && udata->outlen && 1847 dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { 1848 struct irdma_ucontext *ucontext; 1849 1850 ucontext = rdma_udata_to_drv_context(udata, 1851 struct irdma_ucontext, ibucontext); 1852 if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX && 1853 !iwqp->push_wqe_mmap_entry && 1854 !irdma_setup_push_mmap_entries(ucontext, iwqp, 1855 &uresp.push_wqe_mmap_key, &uresp.push_db_mmap_key)) { 1856 uresp.push_valid = 1; 1857 uresp.push_offset = iwqp->sc_qp.push_offset; 1858 } 1859 1860 err = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), 1861 udata->outlen)); 1862 if (err) { 1863 irdma_remove_push_mmap_entries(iwqp); 1864 ibdev_dbg(&iwdev->ibdev, 1865 "VERBS: copy_to_udata failed\n"); 1866 return err; 1867 } 1868 } 1869 1870 return 0; 1871 exit: 1872 spin_unlock_irqrestore(&iwqp->lock, flags); 1873 1874 return err; 1875 } 1876 1877 /** 1878 * irdma_srq_free_rsrc - free up resources for srq 1879 * @rf: RDMA PCI function 1880 * @iwsrq: srq ptr 1881 */ 1882 static void irdma_srq_free_rsrc(struct irdma_pci_f *rf, struct irdma_srq *iwsrq) 1883 { 1884 struct irdma_sc_srq *srq = &iwsrq->sc_srq; 1885 1886 if (!iwsrq->user_mode) { 1887 dma_free_coherent(rf->sc_dev.hw->device, iwsrq->kmem.size, 1888 iwsrq->kmem.va, iwsrq->kmem.pa); 1889 iwsrq->kmem.va = NULL; 1890 } 1891 1892 irdma_free_rsrc(rf, rf->allocated_srqs, srq->srq_uk.srq_id); 1893 } 1894 1895 /** 1896 * irdma_cq_free_rsrc - free up resources for cq 1897 * @rf: RDMA PCI function 1898 * @iwcq: cq ptr 1899 */ 1900 static void irdma_cq_free_rsrc(struct irdma_pci_f *rf, struct irdma_cq *iwcq) 1901 { 1902 struct irdma_sc_cq *cq = &iwcq->sc_cq; 1903 1904 if (!iwcq->user_mode) { 1905 dma_free_coherent(rf->sc_dev.hw->device, iwcq->kmem.size, 1906 iwcq->kmem.va, iwcq->kmem.pa); 1907 iwcq->kmem.va = NULL; 1908 dma_free_coherent(rf->sc_dev.hw->device, 1909 iwcq->kmem_shadow.size, 1910 iwcq->kmem_shadow.va, iwcq->kmem_shadow.pa); 1911 iwcq->kmem_shadow.va = NULL; 1912 } 1913 1914 irdma_free_rsrc(rf, rf->allocated_cqs, cq->cq_uk.cq_id); 1915 } 1916 1917 /** 1918 * irdma_free_cqbuf - worker to free a cq buffer 1919 * @work: provides access to the cq buffer to free 1920 */ 1921 static void irdma_free_cqbuf(struct work_struct *work) 1922 { 1923 struct irdma_cq_buf *cq_buf = container_of(work, struct irdma_cq_buf, work); 1924 1925 dma_free_coherent(cq_buf->hw->device, cq_buf->kmem_buf.size, 1926 cq_buf->kmem_buf.va, cq_buf->kmem_buf.pa); 1927 cq_buf->kmem_buf.va = NULL; 1928 kfree(cq_buf); 1929 } 1930 1931 /** 1932 * irdma_process_resize_list - remove resized cq buffers from the resize_list 1933 * @iwcq: cq which owns the resize_list 1934 * @iwdev: irdma device 1935 * @lcqe_buf: the buffer where the last cqe is received 1936 */ 1937 static int irdma_process_resize_list(struct irdma_cq *iwcq, 1938 struct irdma_device *iwdev, 1939 struct irdma_cq_buf *lcqe_buf) 1940 { 1941 struct list_head *tmp_node, *list_node; 1942 struct irdma_cq_buf *cq_buf; 1943 int cnt = 0; 1944 1945 list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) { 1946 cq_buf = list_entry(list_node, struct irdma_cq_buf, list); 1947 if (cq_buf == lcqe_buf) 1948 return cnt; 1949 1950 list_del(&cq_buf->list); 1951 queue_work(iwdev->cleanup_wq, &cq_buf->work); 1952 cnt++; 1953 } 1954 1955 return cnt; 1956 } 1957 1958 /** 1959 * irdma_destroy_srq - destroy srq 1960 * @ibsrq: srq pointer 1961 * @udata: user data 1962 */ 1963 static int irdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) 1964 { 1965 struct irdma_device *iwdev = to_iwdev(ibsrq->device); 1966 struct irdma_srq *iwsrq = to_iwsrq(ibsrq); 1967 struct irdma_sc_srq *srq = &iwsrq->sc_srq; 1968 1969 irdma_srq_wq_destroy(iwdev->rf, srq); 1970 irdma_srq_free_rsrc(iwdev->rf, iwsrq); 1971 return 0; 1972 } 1973 1974 /** 1975 * irdma_destroy_cq - destroy cq 1976 * @ib_cq: cq pointer 1977 * @udata: user data 1978 */ 1979 static int irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) 1980 { 1981 struct irdma_device *iwdev = to_iwdev(ib_cq->device); 1982 struct irdma_cq *iwcq = to_iwcq(ib_cq); 1983 struct irdma_sc_cq *cq = &iwcq->sc_cq; 1984 struct irdma_sc_dev *dev = cq->dev; 1985 struct irdma_sc_ceq *ceq = dev->ceq[cq->ceq_id]; 1986 struct irdma_ceq *iwceq = container_of(ceq, struct irdma_ceq, sc_ceq); 1987 unsigned long flags; 1988 1989 spin_lock_irqsave(&iwcq->lock, flags); 1990 if (!list_empty(&iwcq->cmpl_generated)) 1991 irdma_remove_cmpls_list(iwcq); 1992 if (!list_empty(&iwcq->resize_list)) 1993 irdma_process_resize_list(iwcq, iwdev, NULL); 1994 spin_unlock_irqrestore(&iwcq->lock, flags); 1995 1996 irdma_cq_rem_ref(ib_cq); 1997 wait_for_completion(&iwcq->free_cq); 1998 1999 irdma_cq_wq_destroy(iwdev->rf, cq); 2000 2001 spin_lock_irqsave(&iwceq->ce_lock, flags); 2002 irdma_sc_cleanup_ceqes(cq, ceq); 2003 spin_unlock_irqrestore(&iwceq->ce_lock, flags); 2004 irdma_cq_free_rsrc(iwdev->rf, iwcq); 2005 2006 return 0; 2007 } 2008 2009 /** 2010 * irdma_resize_cq - resize cq 2011 * @ibcq: cq to be resized 2012 * @entries: desired cq size 2013 * @udata: user data 2014 */ 2015 static int irdma_resize_cq(struct ib_cq *ibcq, int entries, 2016 struct ib_udata *udata) 2017 { 2018 #define IRDMA_RESIZE_CQ_MIN_REQ_LEN offsetofend(struct irdma_resize_cq_req, user_cq_buffer) 2019 struct irdma_cq *iwcq = to_iwcq(ibcq); 2020 struct irdma_sc_dev *dev = iwcq->sc_cq.dev; 2021 struct irdma_cqp_request *cqp_request; 2022 struct cqp_cmds_info *cqp_info; 2023 struct irdma_modify_cq_info *m_info; 2024 struct irdma_modify_cq_info info = {}; 2025 struct irdma_dma_mem kmem_buf; 2026 struct irdma_cq_mr *cqmr_buf; 2027 struct irdma_pbl *iwpbl_buf; 2028 struct irdma_device *iwdev; 2029 struct irdma_pci_f *rf; 2030 struct irdma_cq_buf *cq_buf = NULL; 2031 unsigned long flags; 2032 u8 cqe_size; 2033 int ret; 2034 2035 iwdev = to_iwdev(ibcq->device); 2036 rf = iwdev->rf; 2037 2038 if (!(rf->sc_dev.hw_attrs.uk_attrs.feature_flags & 2039 IRDMA_FEATURE_CQ_RESIZE)) 2040 return -EOPNOTSUPP; 2041 2042 if (udata && udata->inlen < IRDMA_RESIZE_CQ_MIN_REQ_LEN) 2043 return -EINVAL; 2044 2045 if (entries > rf->max_cqe) 2046 return -EINVAL; 2047 2048 if (!iwcq->user_mode) { 2049 entries += 2; 2050 2051 if (!iwcq->sc_cq.cq_uk.avoid_mem_cflct && 2052 dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) 2053 entries *= 2; 2054 2055 if (entries & 1) 2056 entries += 1; /* cq size must be an even number */ 2057 2058 cqe_size = iwcq->sc_cq.cq_uk.avoid_mem_cflct ? 64 : 32; 2059 if (entries * cqe_size == IRDMA_HW_PAGE_SIZE) 2060 entries += 2; 2061 } 2062 2063 info.cq_size = max(entries, 4); 2064 2065 if (info.cq_size == iwcq->sc_cq.cq_uk.cq_size - 1) 2066 return 0; 2067 2068 if (udata) { 2069 struct irdma_resize_cq_req req = {}; 2070 struct irdma_ucontext *ucontext = 2071 rdma_udata_to_drv_context(udata, struct irdma_ucontext, 2072 ibucontext); 2073 2074 /* CQ resize not supported with legacy GEN_1 libi40iw */ 2075 if (ucontext->legacy_mode) 2076 return -EOPNOTSUPP; 2077 2078 if (ib_copy_from_udata(&req, udata, 2079 min(sizeof(req), udata->inlen))) 2080 return -EINVAL; 2081 2082 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); 2083 iwpbl_buf = irdma_get_pbl((unsigned long)req.user_cq_buffer, 2084 &ucontext->cq_reg_mem_list); 2085 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); 2086 2087 if (!iwpbl_buf) 2088 return -ENOMEM; 2089 2090 cqmr_buf = &iwpbl_buf->cq_mr; 2091 if (iwpbl_buf->pbl_allocated) { 2092 info.virtual_map = true; 2093 info.pbl_chunk_size = 1; 2094 info.first_pm_pbl_idx = cqmr_buf->cq_pbl.idx; 2095 } else { 2096 info.cq_pa = cqmr_buf->cq_pbl.addr; 2097 } 2098 } else { 2099 /* Kmode CQ resize */ 2100 int rsize; 2101 2102 rsize = info.cq_size * sizeof(struct irdma_cqe); 2103 kmem_buf.size = ALIGN(round_up(rsize, 256), 256); 2104 kmem_buf.va = dma_alloc_coherent(dev->hw->device, 2105 kmem_buf.size, &kmem_buf.pa, 2106 GFP_KERNEL); 2107 if (!kmem_buf.va) 2108 return -ENOMEM; 2109 2110 info.cq_base = kmem_buf.va; 2111 info.cq_pa = kmem_buf.pa; 2112 cq_buf = kzalloc(sizeof(*cq_buf), GFP_KERNEL); 2113 if (!cq_buf) { 2114 ret = -ENOMEM; 2115 goto error; 2116 } 2117 } 2118 2119 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); 2120 if (!cqp_request) { 2121 ret = -ENOMEM; 2122 goto error; 2123 } 2124 2125 info.shadow_read_threshold = iwcq->sc_cq.shadow_read_threshold; 2126 info.cq_resize = true; 2127 2128 cqp_info = &cqp_request->info; 2129 m_info = &cqp_info->in.u.cq_modify.info; 2130 memcpy(m_info, &info, sizeof(*m_info)); 2131 2132 cqp_info->cqp_cmd = IRDMA_OP_CQ_MODIFY; 2133 cqp_info->in.u.cq_modify.cq = &iwcq->sc_cq; 2134 cqp_info->in.u.cq_modify.scratch = (uintptr_t)cqp_request; 2135 cqp_info->post_sq = 1; 2136 ret = irdma_handle_cqp_op(rf, cqp_request); 2137 irdma_put_cqp_request(&rf->cqp, cqp_request); 2138 if (ret) 2139 goto error; 2140 2141 spin_lock_irqsave(&iwcq->lock, flags); 2142 if (cq_buf) { 2143 cq_buf->kmem_buf = iwcq->kmem; 2144 cq_buf->hw = dev->hw; 2145 memcpy(&cq_buf->cq_uk, &iwcq->sc_cq.cq_uk, sizeof(cq_buf->cq_uk)); 2146 INIT_WORK(&cq_buf->work, irdma_free_cqbuf); 2147 list_add_tail(&cq_buf->list, &iwcq->resize_list); 2148 iwcq->kmem = kmem_buf; 2149 } 2150 2151 irdma_sc_cq_resize(&iwcq->sc_cq, &info); 2152 ibcq->cqe = info.cq_size - 1; 2153 spin_unlock_irqrestore(&iwcq->lock, flags); 2154 2155 return 0; 2156 error: 2157 if (!udata) { 2158 dma_free_coherent(dev->hw->device, kmem_buf.size, kmem_buf.va, 2159 kmem_buf.pa); 2160 kmem_buf.va = NULL; 2161 } 2162 kfree(cq_buf); 2163 2164 return ret; 2165 } 2166 2167 /** 2168 * irdma_srq_event - event notification for srq limit 2169 * @srq: shared srq struct 2170 */ 2171 void irdma_srq_event(struct irdma_sc_srq *srq) 2172 { 2173 struct irdma_srq *iwsrq = container_of(srq, struct irdma_srq, sc_srq); 2174 struct ib_srq *ibsrq = &iwsrq->ibsrq; 2175 struct ib_event event; 2176 2177 srq->srq_limit = 0; 2178 2179 if (!ibsrq->event_handler) 2180 return; 2181 2182 event.device = ibsrq->device; 2183 event.element.port_num = 1; 2184 event.element.srq = ibsrq; 2185 event.event = IB_EVENT_SRQ_LIMIT_REACHED; 2186 ibsrq->event_handler(&event, ibsrq->srq_context); 2187 } 2188 2189 /** 2190 * irdma_modify_srq - modify srq request 2191 * @ibsrq: srq's pointer for modify 2192 * @attr: access attributes 2193 * @attr_mask: state mask 2194 * @udata: user data 2195 */ 2196 static int irdma_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, 2197 enum ib_srq_attr_mask attr_mask, 2198 struct ib_udata *udata) 2199 { 2200 struct irdma_device *iwdev = to_iwdev(ibsrq->device); 2201 struct irdma_srq *iwsrq = to_iwsrq(ibsrq); 2202 struct irdma_cqp_request *cqp_request; 2203 struct irdma_pci_f *rf = iwdev->rf; 2204 struct irdma_modify_srq_info *info; 2205 struct cqp_cmds_info *cqp_info; 2206 int status; 2207 2208 if (attr_mask & IB_SRQ_MAX_WR) 2209 return -EINVAL; 2210 2211 if (!(attr_mask & IB_SRQ_LIMIT)) 2212 return 0; 2213 2214 if (attr->srq_limit > iwsrq->sc_srq.srq_uk.srq_size) 2215 return -EINVAL; 2216 2217 /* Execute this cqp op synchronously, so we can update srq_limit 2218 * upon successful completion. 2219 */ 2220 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); 2221 if (!cqp_request) 2222 return -ENOMEM; 2223 2224 cqp_info = &cqp_request->info; 2225 info = &cqp_info->in.u.srq_modify.info; 2226 info->srq_limit = attr->srq_limit; 2227 if (info->srq_limit > 0xFFF) 2228 info->srq_limit = 0xFFF; 2229 info->arm_limit_event = 1; 2230 2231 cqp_info->cqp_cmd = IRDMA_OP_SRQ_MODIFY; 2232 cqp_info->post_sq = 1; 2233 cqp_info->in.u.srq_modify.srq = &iwsrq->sc_srq; 2234 cqp_info->in.u.srq_modify.scratch = (uintptr_t)cqp_request; 2235 status = irdma_handle_cqp_op(rf, cqp_request); 2236 irdma_put_cqp_request(&rf->cqp, cqp_request); 2237 if (status) 2238 return status; 2239 2240 iwsrq->sc_srq.srq_limit = info->srq_limit; 2241 2242 return 0; 2243 } 2244 2245 static int irdma_setup_umode_srq(struct irdma_device *iwdev, 2246 struct irdma_srq *iwsrq, 2247 struct irdma_srq_init_info *info, 2248 struct ib_udata *udata) 2249 { 2250 #define IRDMA_CREATE_SRQ_MIN_REQ_LEN \ 2251 offsetofend(struct irdma_create_srq_req, user_shadow_area) 2252 struct irdma_create_srq_req req = {}; 2253 struct irdma_ucontext *ucontext; 2254 struct irdma_srq_mr *srqmr; 2255 struct irdma_pbl *iwpbl; 2256 unsigned long flags; 2257 2258 iwsrq->user_mode = true; 2259 ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, 2260 ibucontext); 2261 2262 if (udata->inlen < IRDMA_CREATE_SRQ_MIN_REQ_LEN) 2263 return -EINVAL; 2264 2265 if (ib_copy_from_udata(&req, udata, 2266 min(sizeof(req), udata->inlen))) 2267 return -EFAULT; 2268 2269 spin_lock_irqsave(&ucontext->srq_reg_mem_list_lock, flags); 2270 iwpbl = irdma_get_pbl((unsigned long)req.user_srq_buf, 2271 &ucontext->srq_reg_mem_list); 2272 spin_unlock_irqrestore(&ucontext->srq_reg_mem_list_lock, flags); 2273 if (!iwpbl) 2274 return -EPROTO; 2275 2276 iwsrq->iwpbl = iwpbl; 2277 srqmr = &iwpbl->srq_mr; 2278 2279 if (iwpbl->pbl_allocated) { 2280 info->virtual_map = true; 2281 info->pbl_chunk_size = 1; 2282 info->first_pm_pbl_idx = srqmr->srq_pbl.idx; 2283 info->leaf_pbl_size = 1; 2284 } else { 2285 info->srq_pa = srqmr->srq_pbl.addr; 2286 } 2287 info->shadow_area_pa = srqmr->shadow; 2288 2289 return 0; 2290 } 2291 2292 static int irdma_setup_kmode_srq(struct irdma_device *iwdev, 2293 struct irdma_srq *iwsrq, 2294 struct irdma_srq_init_info *info, u32 depth, 2295 u8 shift) 2296 { 2297 struct irdma_srq_uk_init_info *ukinfo = &info->srq_uk_init_info; 2298 struct irdma_dma_mem *mem = &iwsrq->kmem; 2299 u32 size, ring_size; 2300 2301 ring_size = depth * IRDMA_QP_WQE_MIN_SIZE; 2302 size = ring_size + (IRDMA_SHADOW_AREA_SIZE << 3); 2303 2304 mem->size = ALIGN(size, 256); 2305 mem->va = dma_alloc_coherent(iwdev->rf->hw.device, mem->size, 2306 &mem->pa, GFP_KERNEL); 2307 if (!mem->va) 2308 return -ENOMEM; 2309 2310 ukinfo->srq = mem->va; 2311 ukinfo->srq_size = depth >> shift; 2312 ukinfo->shadow_area = mem->va + ring_size; 2313 2314 info->srq_pa = mem->pa; 2315 info->shadow_area_pa = info->srq_pa + ring_size; 2316 2317 return 0; 2318 } 2319 2320 /** 2321 * irdma_create_srq - create srq 2322 * @ibsrq: ib's srq pointer 2323 * @initattrs: attributes for srq 2324 * @udata: user data for create srq 2325 */ 2326 static int irdma_create_srq(struct ib_srq *ibsrq, 2327 struct ib_srq_init_attr *initattrs, 2328 struct ib_udata *udata) 2329 { 2330 struct irdma_device *iwdev = to_iwdev(ibsrq->device); 2331 struct ib_srq_attr *attr = &initattrs->attr; 2332 struct irdma_pd *iwpd = to_iwpd(ibsrq->pd); 2333 struct irdma_srq *iwsrq = to_iwsrq(ibsrq); 2334 struct irdma_srq_uk_init_info *ukinfo; 2335 struct irdma_cqp_request *cqp_request; 2336 struct irdma_srq_init_info info = {}; 2337 struct irdma_pci_f *rf = iwdev->rf; 2338 struct irdma_uk_attrs *uk_attrs; 2339 struct cqp_cmds_info *cqp_info; 2340 int err_code = 0; 2341 u32 depth; 2342 u8 shift; 2343 2344 uk_attrs = &rf->sc_dev.hw_attrs.uk_attrs; 2345 ukinfo = &info.srq_uk_init_info; 2346 2347 if (initattrs->srq_type != IB_SRQT_BASIC) 2348 return -EOPNOTSUPP; 2349 2350 if (!(uk_attrs->feature_flags & IRDMA_FEATURE_SRQ) || 2351 attr->max_sge > uk_attrs->max_hw_wq_frags) 2352 return -EINVAL; 2353 2354 refcount_set(&iwsrq->refcnt, 1); 2355 spin_lock_init(&iwsrq->lock); 2356 err_code = irdma_alloc_rsrc(rf, rf->allocated_srqs, rf->max_srq, 2357 &iwsrq->srq_num, &rf->next_srq); 2358 if (err_code) 2359 return err_code; 2360 2361 ukinfo->max_srq_frag_cnt = attr->max_sge; 2362 ukinfo->uk_attrs = uk_attrs; 2363 ukinfo->srq_id = iwsrq->srq_num; 2364 2365 irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_srq_frag_cnt, 0, 2366 &shift); 2367 2368 err_code = irdma_get_srqdepth(ukinfo->uk_attrs, attr->max_wr, 2369 shift, &depth); 2370 if (err_code) 2371 return err_code; 2372 2373 /* Actual SRQ size in WRs for ring and HW */ 2374 ukinfo->srq_size = depth >> shift; 2375 2376 /* Max postable WRs to SRQ */ 2377 iwsrq->max_wr = (depth - IRDMA_RQ_RSVD) >> shift; 2378 attr->max_wr = iwsrq->max_wr; 2379 2380 if (udata) 2381 err_code = irdma_setup_umode_srq(iwdev, iwsrq, &info, udata); 2382 else 2383 err_code = irdma_setup_kmode_srq(iwdev, iwsrq, &info, depth, 2384 shift); 2385 2386 if (err_code) 2387 goto free_rsrc; 2388 2389 info.vsi = &iwdev->vsi; 2390 info.pd = &iwpd->sc_pd; 2391 2392 iwsrq->sc_srq.srq_uk.lock = &iwsrq->lock; 2393 err_code = irdma_sc_srq_init(&iwsrq->sc_srq, &info); 2394 if (err_code) 2395 goto free_dmem; 2396 2397 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); 2398 if (!cqp_request) { 2399 err_code = -ENOMEM; 2400 goto free_dmem; 2401 } 2402 2403 cqp_info = &cqp_request->info; 2404 cqp_info->cqp_cmd = IRDMA_OP_SRQ_CREATE; 2405 cqp_info->post_sq = 1; 2406 cqp_info->in.u.srq_create.srq = &iwsrq->sc_srq; 2407 cqp_info->in.u.srq_create.scratch = (uintptr_t)cqp_request; 2408 err_code = irdma_handle_cqp_op(rf, cqp_request); 2409 irdma_put_cqp_request(&rf->cqp, cqp_request); 2410 if (err_code) 2411 goto free_dmem; 2412 2413 if (udata) { 2414 struct irdma_create_srq_resp resp = {}; 2415 2416 resp.srq_id = iwsrq->srq_num; 2417 resp.srq_size = ukinfo->srq_size; 2418 if (ib_copy_to_udata(udata, &resp, 2419 min(sizeof(resp), udata->outlen))) { 2420 err_code = -EPROTO; 2421 goto srq_destroy; 2422 } 2423 } 2424 2425 return 0; 2426 2427 srq_destroy: 2428 irdma_srq_wq_destroy(rf, &iwsrq->sc_srq); 2429 2430 free_dmem: 2431 if (!iwsrq->user_mode) 2432 dma_free_coherent(rf->hw.device, iwsrq->kmem.size, 2433 iwsrq->kmem.va, iwsrq->kmem.pa); 2434 free_rsrc: 2435 irdma_free_rsrc(rf, rf->allocated_srqs, iwsrq->srq_num); 2436 return err_code; 2437 } 2438 2439 /** 2440 * irdma_query_srq - get SRQ attributes 2441 * @ibsrq: the SRQ to query 2442 * @attr: the attributes of the SRQ 2443 */ 2444 static int irdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) 2445 { 2446 struct irdma_srq *iwsrq = to_iwsrq(ibsrq); 2447 2448 attr->max_wr = iwsrq->max_wr; 2449 attr->max_sge = iwsrq->sc_srq.srq_uk.max_srq_frag_cnt; 2450 attr->srq_limit = iwsrq->sc_srq.srq_limit; 2451 2452 return 0; 2453 } 2454 2455 static inline int cq_validate_flags(u32 flags, u8 hw_rev) 2456 { 2457 /* GEN1/2 does not support CQ create flags */ 2458 if (hw_rev <= IRDMA_GEN_2) 2459 return flags ? -EOPNOTSUPP : 0; 2460 2461 return flags & ~IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION ? -EOPNOTSUPP : 0; 2462 } 2463 2464 /** 2465 * irdma_create_cq - create cq 2466 * @ibcq: CQ allocated 2467 * @attr: attributes for cq 2468 * @attrs: uverbs attribute bundle 2469 */ 2470 static int irdma_create_cq(struct ib_cq *ibcq, 2471 const struct ib_cq_init_attr *attr, 2472 struct uverbs_attr_bundle *attrs) 2473 { 2474 #define IRDMA_CREATE_CQ_MIN_REQ_LEN offsetofend(struct irdma_create_cq_req, user_cq_buf) 2475 #define IRDMA_CREATE_CQ_MIN_RESP_LEN offsetofend(struct irdma_create_cq_resp, cq_size) 2476 struct ib_udata *udata = &attrs->driver_udata; 2477 struct ib_device *ibdev = ibcq->device; 2478 struct irdma_device *iwdev = to_iwdev(ibdev); 2479 struct irdma_pci_f *rf = iwdev->rf; 2480 struct irdma_cq *iwcq = to_iwcq(ibcq); 2481 u32 cq_num = 0; 2482 struct irdma_sc_cq *cq; 2483 struct irdma_sc_dev *dev = &rf->sc_dev; 2484 struct irdma_cq_init_info info = {}; 2485 struct irdma_cqp_request *cqp_request; 2486 struct cqp_cmds_info *cqp_info; 2487 struct irdma_cq_uk_init_info *ukinfo = &info.cq_uk_init_info; 2488 unsigned long flags; 2489 int err_code; 2490 int entries = attr->cqe; 2491 bool cqe_64byte_ena; 2492 u8 cqe_size; 2493 2494 err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev); 2495 if (err_code) 2496 return err_code; 2497 2498 if (udata && (udata->inlen < IRDMA_CREATE_CQ_MIN_REQ_LEN || 2499 udata->outlen < IRDMA_CREATE_CQ_MIN_RESP_LEN)) 2500 return -EINVAL; 2501 2502 err_code = irdma_alloc_rsrc(rf, rf->allocated_cqs, rf->max_cq, &cq_num, 2503 &rf->next_cq); 2504 if (err_code) 2505 return err_code; 2506 2507 cq = &iwcq->sc_cq; 2508 cq->back_cq = iwcq; 2509 refcount_set(&iwcq->refcnt, 1); 2510 spin_lock_init(&iwcq->lock); 2511 INIT_LIST_HEAD(&iwcq->resize_list); 2512 INIT_LIST_HEAD(&iwcq->cmpl_generated); 2513 iwcq->cq_num = cq_num; 2514 info.dev = dev; 2515 ukinfo->cq_size = max(entries, 4); 2516 ukinfo->cq_id = cq_num; 2517 cqe_64byte_ena = dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_64_BYTE_CQE ? 2518 true : false; 2519 cqe_size = cqe_64byte_ena ? 64 : 32; 2520 ukinfo->avoid_mem_cflct = cqe_64byte_ena; 2521 iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size; 2522 if (attr->comp_vector < rf->ceqs_count) 2523 info.ceq_id = attr->comp_vector; 2524 info.ceq_id_valid = true; 2525 info.ceqe_mask = 1; 2526 info.type = IRDMA_CQ_TYPE_IWARP; 2527 info.vsi = &iwdev->vsi; 2528 2529 if (udata) { 2530 struct irdma_ucontext *ucontext; 2531 struct irdma_create_cq_req req = {}; 2532 struct irdma_cq_mr *cqmr; 2533 struct irdma_pbl *iwpbl; 2534 struct irdma_pbl *iwpbl_shadow; 2535 struct irdma_cq_mr *cqmr_shadow; 2536 2537 iwcq->user_mode = true; 2538 ucontext = 2539 rdma_udata_to_drv_context(udata, struct irdma_ucontext, 2540 ibucontext); 2541 if (ib_copy_from_udata(&req, udata, 2542 min(sizeof(req), udata->inlen))) { 2543 err_code = -EFAULT; 2544 goto cq_free_rsrc; 2545 } 2546 2547 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); 2548 iwpbl = irdma_get_pbl((unsigned long)req.user_cq_buf, 2549 &ucontext->cq_reg_mem_list); 2550 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); 2551 if (!iwpbl) { 2552 err_code = -EPROTO; 2553 goto cq_free_rsrc; 2554 } 2555 2556 cqmr = &iwpbl->cq_mr; 2557 2558 if (rf->sc_dev.hw_attrs.uk_attrs.feature_flags & 2559 IRDMA_FEATURE_CQ_RESIZE && !ucontext->legacy_mode) { 2560 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); 2561 iwpbl_shadow = irdma_get_pbl( 2562 (unsigned long)req.user_shadow_area, 2563 &ucontext->cq_reg_mem_list); 2564 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); 2565 2566 if (!iwpbl_shadow) { 2567 err_code = -EPROTO; 2568 goto cq_free_rsrc; 2569 } 2570 cqmr_shadow = &iwpbl_shadow->cq_mr; 2571 info.shadow_area_pa = cqmr_shadow->cq_pbl.addr; 2572 cqmr->split = true; 2573 } else { 2574 info.shadow_area_pa = cqmr->shadow; 2575 } 2576 if (iwpbl->pbl_allocated) { 2577 info.virtual_map = true; 2578 info.pbl_chunk_size = 1; 2579 info.first_pm_pbl_idx = cqmr->cq_pbl.idx; 2580 } else { 2581 info.cq_base_pa = cqmr->cq_pbl.addr; 2582 } 2583 } else { 2584 /* Kmode allocations */ 2585 int rsize; 2586 2587 if (entries < 1 || entries > rf->max_cqe) { 2588 err_code = -EINVAL; 2589 goto cq_free_rsrc; 2590 } 2591 2592 entries += 2; 2593 if (!cqe_64byte_ena && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) 2594 entries *= 2; 2595 2596 if (entries & 1) 2597 entries += 1; /* cq size must be an even number */ 2598 2599 if (entries * cqe_size == IRDMA_HW_PAGE_SIZE) 2600 entries += 2; 2601 2602 ukinfo->cq_size = entries; 2603 2604 if (cqe_64byte_ena) 2605 rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_extended_cqe); 2606 else 2607 rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_cqe); 2608 iwcq->kmem.size = ALIGN(round_up(rsize, 256), 256); 2609 iwcq->kmem.va = dma_alloc_coherent(dev->hw->device, 2610 iwcq->kmem.size, 2611 &iwcq->kmem.pa, GFP_KERNEL); 2612 if (!iwcq->kmem.va) { 2613 err_code = -ENOMEM; 2614 goto cq_free_rsrc; 2615 } 2616 2617 iwcq->kmem_shadow.size = ALIGN(IRDMA_SHADOW_AREA_SIZE << 3, 2618 64); 2619 iwcq->kmem_shadow.va = dma_alloc_coherent(dev->hw->device, 2620 iwcq->kmem_shadow.size, 2621 &iwcq->kmem_shadow.pa, 2622 GFP_KERNEL); 2623 if (!iwcq->kmem_shadow.va) { 2624 err_code = -ENOMEM; 2625 goto cq_free_rsrc; 2626 } 2627 info.shadow_area_pa = iwcq->kmem_shadow.pa; 2628 ukinfo->shadow_area = iwcq->kmem_shadow.va; 2629 ukinfo->cq_base = iwcq->kmem.va; 2630 info.cq_base_pa = iwcq->kmem.pa; 2631 } 2632 2633 info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2, 2634 (u32)IRDMA_MAX_CQ_READ_THRESH); 2635 2636 if (irdma_sc_cq_init(cq, &info)) { 2637 ibdev_dbg(&iwdev->ibdev, "VERBS: init cq fail\n"); 2638 err_code = -EPROTO; 2639 goto cq_free_rsrc; 2640 } 2641 2642 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); 2643 if (!cqp_request) { 2644 err_code = -ENOMEM; 2645 goto cq_free_rsrc; 2646 } 2647 2648 cqp_info = &cqp_request->info; 2649 cqp_info->cqp_cmd = IRDMA_OP_CQ_CREATE; 2650 cqp_info->post_sq = 1; 2651 cqp_info->in.u.cq_create.cq = cq; 2652 cqp_info->in.u.cq_create.check_overflow = true; 2653 cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request; 2654 err_code = irdma_handle_cqp_op(rf, cqp_request); 2655 irdma_put_cqp_request(&rf->cqp, cqp_request); 2656 if (err_code) 2657 goto cq_free_rsrc; 2658 2659 if (udata) { 2660 struct irdma_create_cq_resp resp = {}; 2661 2662 resp.cq_id = info.cq_uk_init_info.cq_id; 2663 resp.cq_size = info.cq_uk_init_info.cq_size; 2664 if (ib_copy_to_udata(udata, &resp, 2665 min(sizeof(resp), udata->outlen))) { 2666 ibdev_dbg(&iwdev->ibdev, 2667 "VERBS: copy to user data\n"); 2668 err_code = -EPROTO; 2669 goto cq_destroy; 2670 } 2671 } 2672 2673 init_completion(&iwcq->free_cq); 2674 2675 /* Populate table entry after CQ is fully created. */ 2676 smp_store_release(&rf->cq_table[cq_num], iwcq); 2677 2678 return 0; 2679 cq_destroy: 2680 irdma_cq_wq_destroy(rf, cq); 2681 cq_free_rsrc: 2682 irdma_cq_free_rsrc(rf, iwcq); 2683 2684 return err_code; 2685 } 2686 2687 /** 2688 * irdma_get_mr_access - get hw MR access permissions from IB access flags 2689 * @access: IB access flags 2690 * @hw_rev: Hardware version 2691 */ 2692 static inline u16 irdma_get_mr_access(int access, u8 hw_rev) 2693 { 2694 u16 hw_access = 0; 2695 2696 hw_access |= (access & IB_ACCESS_LOCAL_WRITE) ? 2697 IRDMA_ACCESS_FLAGS_LOCALWRITE : 0; 2698 hw_access |= (access & IB_ACCESS_REMOTE_WRITE) ? 2699 IRDMA_ACCESS_FLAGS_REMOTEWRITE : 0; 2700 hw_access |= (access & IB_ACCESS_REMOTE_READ) ? 2701 IRDMA_ACCESS_FLAGS_REMOTEREAD : 0; 2702 if (hw_rev >= IRDMA_GEN_3) { 2703 hw_access |= (access & IB_ACCESS_MW_BIND) ? 2704 IRDMA_ACCESS_FLAGS_BIND_WINDOW : 0; 2705 } 2706 hw_access |= (access & IB_ZERO_BASED) ? 2707 IRDMA_ACCESS_FLAGS_ZERO_BASED : 0; 2708 hw_access |= IRDMA_ACCESS_FLAGS_LOCALREAD; 2709 2710 return hw_access; 2711 } 2712 2713 /** 2714 * irdma_free_stag - free stag resource 2715 * @iwdev: irdma device 2716 * @stag: stag to free 2717 */ 2718 static void irdma_free_stag(struct irdma_device *iwdev, u32 stag) 2719 { 2720 u32 stag_idx; 2721 2722 stag_idx = (stag & iwdev->rf->mr_stagmask) >> IRDMA_CQPSQ_STAG_IDX_S; 2723 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_mrs, stag_idx); 2724 } 2725 2726 /** 2727 * irdma_create_stag - create random stag 2728 * @iwdev: irdma device 2729 */ 2730 static u32 irdma_create_stag(struct irdma_device *iwdev) 2731 { 2732 u32 stag = 0; 2733 u32 stag_index = 0; 2734 u32 next_stag_index; 2735 u32 driver_key; 2736 u32 random; 2737 u8 consumer_key; 2738 int ret; 2739 2740 get_random_bytes(&random, sizeof(random)); 2741 consumer_key = (u8)random; 2742 2743 driver_key = random & ~iwdev->rf->mr_stagmask; 2744 next_stag_index = (random & iwdev->rf->mr_stagmask) >> 8; 2745 next_stag_index %= iwdev->rf->max_mr; 2746 2747 ret = irdma_alloc_rsrc(iwdev->rf, iwdev->rf->allocated_mrs, 2748 iwdev->rf->max_mr, &stag_index, 2749 &next_stag_index); 2750 if (ret) 2751 return stag; 2752 stag = stag_index << IRDMA_CQPSQ_STAG_IDX_S; 2753 stag |= driver_key; 2754 stag += (u32)consumer_key; 2755 2756 return stag; 2757 } 2758 2759 /** 2760 * irdma_next_pbl_addr - Get next pbl address 2761 * @pbl: pointer to a pble 2762 * @pinfo: info pointer 2763 * @idx: index 2764 */ 2765 static inline u64 *irdma_next_pbl_addr(u64 *pbl, struct irdma_pble_info **pinfo, 2766 u32 *idx) 2767 { 2768 *idx += 1; 2769 if (!(*pinfo) || *idx != (*pinfo)->cnt) 2770 return ++pbl; 2771 *idx = 0; 2772 (*pinfo)++; 2773 2774 return (*pinfo)->addr; 2775 } 2776 2777 /** 2778 * irdma_copy_user_pgaddrs - copy user page address to pble's os locally 2779 * @iwmr: iwmr for IB's user page addresses 2780 * @pbl: ple pointer to save 1 level or 0 level pble 2781 * @level: indicated level 0, 1 or 2 2782 */ 2783 static void irdma_copy_user_pgaddrs(struct irdma_mr *iwmr, u64 *pbl, 2784 enum irdma_pble_level level) 2785 { 2786 struct ib_umem *region = iwmr->region; 2787 struct irdma_pbl *iwpbl = &iwmr->iwpbl; 2788 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc; 2789 struct irdma_pble_info *pinfo; 2790 struct ib_block_iter biter; 2791 u32 idx = 0; 2792 u32 pbl_cnt = 0; 2793 2794 pinfo = (level == PBLE_LEVEL_1) ? NULL : palloc->level2.leaf; 2795 2796 if (iwmr->type == IRDMA_MEMREG_TYPE_QP) 2797 iwpbl->qp_mr.sq_page = sg_page(region->sgt_append.sgt.sgl); 2798 2799 rdma_umem_for_each_dma_block(region, &biter, iwmr->page_size) { 2800 *pbl = rdma_block_iter_dma_address(&biter); 2801 if (++pbl_cnt == palloc->total_cnt) 2802 break; 2803 pbl = irdma_next_pbl_addr(pbl, &pinfo, &idx); 2804 } 2805 } 2806 2807 /** 2808 * irdma_check_mem_contiguous - check if pbls stored in arr are contiguous 2809 * @arr: lvl1 pbl array 2810 * @npages: page count 2811 * @pg_size: page size 2812 * 2813 */ 2814 static bool irdma_check_mem_contiguous(u64 *arr, u32 npages, u32 pg_size) 2815 { 2816 u32 pg_idx; 2817 2818 for (pg_idx = 0; pg_idx < npages; pg_idx++) { 2819 if ((*arr + (pg_size * pg_idx)) != arr[pg_idx]) 2820 return false; 2821 } 2822 2823 return true; 2824 } 2825 2826 /** 2827 * irdma_check_mr_contiguous - check if MR is physically contiguous 2828 * @palloc: pbl allocation struct 2829 * @pg_size: page size 2830 */ 2831 static bool irdma_check_mr_contiguous(struct irdma_pble_alloc *palloc, 2832 u32 pg_size) 2833 { 2834 struct irdma_pble_level2 *lvl2 = &palloc->level2; 2835 struct irdma_pble_info *leaf = lvl2->leaf; 2836 u64 *arr = NULL; 2837 u64 *start_addr = NULL; 2838 int i; 2839 bool ret; 2840 2841 if (palloc->level == PBLE_LEVEL_1) { 2842 arr = palloc->level1.addr; 2843 ret = irdma_check_mem_contiguous(arr, palloc->total_cnt, 2844 pg_size); 2845 return ret; 2846 } 2847 2848 start_addr = leaf->addr; 2849 2850 for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) { 2851 arr = leaf->addr; 2852 if ((*start_addr + (i * pg_size * PBLE_PER_PAGE)) != *arr) 2853 return false; 2854 ret = irdma_check_mem_contiguous(arr, leaf->cnt, pg_size); 2855 if (!ret) 2856 return false; 2857 } 2858 2859 return true; 2860 } 2861 2862 /** 2863 * irdma_setup_pbles - copy user pg address to pble's 2864 * @rf: RDMA PCI function 2865 * @iwmr: mr pointer for this memory registration 2866 * @lvl: requested pble levels 2867 */ 2868 static int irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr, 2869 u8 lvl) 2870 { 2871 struct irdma_pbl *iwpbl = &iwmr->iwpbl; 2872 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc; 2873 struct irdma_pble_info *pinfo; 2874 u64 *pbl; 2875 int status; 2876 enum irdma_pble_level level = PBLE_LEVEL_1; 2877 2878 if (lvl) { 2879 status = irdma_get_pble(rf->pble_rsrc, palloc, iwmr->page_cnt, 2880 lvl); 2881 if (status) 2882 return status; 2883 2884 iwpbl->pbl_allocated = true; 2885 level = palloc->level; 2886 pinfo = (level == PBLE_LEVEL_1) ? &palloc->level1 : 2887 palloc->level2.leaf; 2888 pbl = pinfo->addr; 2889 } else { 2890 pbl = iwmr->pgaddrmem; 2891 } 2892 2893 irdma_copy_user_pgaddrs(iwmr, pbl, level); 2894 2895 if (lvl) 2896 iwmr->pgaddrmem[0] = *pbl; 2897 2898 return 0; 2899 } 2900 2901 /** 2902 * irdma_handle_q_mem - handle memory for qp and cq 2903 * @iwdev: irdma device 2904 * @req: information for q memory management 2905 * @iwpbl: pble struct 2906 * @lvl: pble level mask 2907 */ 2908 static int irdma_handle_q_mem(struct irdma_device *iwdev, 2909 struct irdma_mem_reg_req *req, 2910 struct irdma_pbl *iwpbl, u8 lvl) 2911 { 2912 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc; 2913 struct irdma_mr *iwmr = iwpbl->iwmr; 2914 struct irdma_qp_mr *qpmr = &iwpbl->qp_mr; 2915 struct irdma_cq_mr *cqmr = &iwpbl->cq_mr; 2916 struct irdma_srq_mr *srqmr = &iwpbl->srq_mr; 2917 struct irdma_hmc_pble *hmc_p; 2918 u64 *arr = iwmr->pgaddrmem; 2919 u32 pg_size, total; 2920 int err = 0; 2921 bool ret = true; 2922 2923 pg_size = iwmr->page_size; 2924 err = irdma_setup_pbles(iwdev->rf, iwmr, lvl); 2925 if (err) 2926 return err; 2927 2928 if (lvl) 2929 arr = palloc->level1.addr; 2930 2931 switch (iwmr->type) { 2932 case IRDMA_MEMREG_TYPE_QP: 2933 total = req->sq_pages + req->rq_pages; 2934 hmc_p = &qpmr->sq_pbl; 2935 qpmr->shadow = (dma_addr_t)arr[total]; 2936 /* Need to use physical address for RQ of QP 2937 * in case it is associated with SRQ. 2938 */ 2939 qpmr->rq_pa = (dma_addr_t)arr[req->sq_pages]; 2940 if (lvl) { 2941 ret = irdma_check_mem_contiguous(arr, req->sq_pages, 2942 pg_size); 2943 if (ret) 2944 ret = irdma_check_mem_contiguous(&arr[req->sq_pages], 2945 req->rq_pages, 2946 pg_size); 2947 } 2948 2949 if (!ret) { 2950 hmc_p->idx = palloc->level1.idx; 2951 hmc_p = &qpmr->rq_pbl; 2952 hmc_p->idx = palloc->level1.idx + req->sq_pages; 2953 } else { 2954 hmc_p->addr = arr[0]; 2955 hmc_p = &qpmr->rq_pbl; 2956 hmc_p->addr = arr[req->sq_pages]; 2957 } 2958 break; 2959 case IRDMA_MEMREG_TYPE_SRQ: 2960 hmc_p = &srqmr->srq_pbl; 2961 srqmr->shadow = (dma_addr_t)arr[req->rq_pages]; 2962 if (lvl) 2963 ret = irdma_check_mem_contiguous(arr, req->rq_pages, 2964 pg_size); 2965 2966 if (!ret) 2967 hmc_p->idx = palloc->level1.idx; 2968 else 2969 hmc_p->addr = arr[0]; 2970 break; 2971 case IRDMA_MEMREG_TYPE_CQ: 2972 hmc_p = &cqmr->cq_pbl; 2973 2974 if (!cqmr->split) 2975 cqmr->shadow = (dma_addr_t)arr[req->cq_pages]; 2976 2977 if (lvl) 2978 ret = irdma_check_mem_contiguous(arr, req->cq_pages, 2979 pg_size); 2980 2981 if (!ret) 2982 hmc_p->idx = palloc->level1.idx; 2983 else 2984 hmc_p->addr = arr[0]; 2985 break; 2986 default: 2987 ibdev_dbg(&iwdev->ibdev, "VERBS: MR type error\n"); 2988 err = -EINVAL; 2989 } 2990 2991 if (lvl && ret) { 2992 irdma_free_pble(iwdev->rf->pble_rsrc, palloc); 2993 iwpbl->pbl_allocated = false; 2994 } 2995 2996 return err; 2997 } 2998 2999 /** 3000 * irdma_hw_alloc_mw - create the hw memory window 3001 * @iwdev: irdma device 3002 * @iwmr: pointer to memory window info 3003 */ 3004 static int irdma_hw_alloc_mw(struct irdma_device *iwdev, struct irdma_mr *iwmr) 3005 { 3006 struct irdma_mw_alloc_info *info; 3007 struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd); 3008 struct irdma_cqp_request *cqp_request; 3009 struct cqp_cmds_info *cqp_info; 3010 int status; 3011 3012 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); 3013 if (!cqp_request) 3014 return -ENOMEM; 3015 3016 cqp_info = &cqp_request->info; 3017 info = &cqp_info->in.u.mw_alloc.info; 3018 memset(info, 0, sizeof(*info)); 3019 if (iwmr->ibmw.type == IB_MW_TYPE_1) 3020 info->mw_wide = true; 3021 3022 info->page_size = PAGE_SIZE; 3023 info->mw_stag_index = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S; 3024 info->pd_id = iwpd->sc_pd.pd_id; 3025 info->remote_access = true; 3026 cqp_info->cqp_cmd = IRDMA_OP_MW_ALLOC; 3027 cqp_info->post_sq = 1; 3028 cqp_info->in.u.mw_alloc.dev = &iwdev->rf->sc_dev; 3029 cqp_info->in.u.mw_alloc.scratch = (uintptr_t)cqp_request; 3030 status = irdma_handle_cqp_op(iwdev->rf, cqp_request); 3031 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); 3032 3033 return status; 3034 } 3035 3036 /** 3037 * irdma_alloc_mw - Allocate memory window 3038 * @ibmw: Memory Window 3039 * @udata: user data pointer 3040 */ 3041 static int irdma_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata) 3042 { 3043 struct irdma_device *iwdev = to_iwdev(ibmw->device); 3044 struct irdma_mr *iwmr = to_iwmw(ibmw); 3045 int err_code; 3046 u32 stag; 3047 3048 stag = irdma_create_stag(iwdev); 3049 if (!stag) 3050 return -ENOMEM; 3051 3052 iwmr->stag = stag; 3053 ibmw->rkey = stag; 3054 3055 err_code = irdma_hw_alloc_mw(iwdev, iwmr); 3056 if (err_code) { 3057 irdma_free_stag(iwdev, stag); 3058 return err_code; 3059 } 3060 3061 return 0; 3062 } 3063 3064 /** 3065 * irdma_dealloc_mw - Dealloc memory window 3066 * @ibmw: memory window structure. 3067 */ 3068 static int irdma_dealloc_mw(struct ib_mw *ibmw) 3069 { 3070 struct ib_pd *ibpd = ibmw->pd; 3071 struct irdma_pd *iwpd = to_iwpd(ibpd); 3072 struct irdma_mr *iwmr = to_iwmr((struct ib_mr *)ibmw); 3073 struct irdma_device *iwdev = to_iwdev(ibmw->device); 3074 struct irdma_cqp_request *cqp_request; 3075 struct cqp_cmds_info *cqp_info; 3076 struct irdma_dealloc_stag_info *info; 3077 3078 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); 3079 if (!cqp_request) 3080 return -ENOMEM; 3081 3082 cqp_info = &cqp_request->info; 3083 info = &cqp_info->in.u.dealloc_stag.info; 3084 memset(info, 0, sizeof(*info)); 3085 info->pd_id = iwpd->sc_pd.pd_id; 3086 info->stag_idx = ibmw->rkey >> IRDMA_CQPSQ_STAG_IDX_S; 3087 info->mr = false; 3088 cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG; 3089 cqp_info->post_sq = 1; 3090 cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev; 3091 cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request; 3092 irdma_handle_cqp_op(iwdev->rf, cqp_request); 3093 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); 3094 irdma_free_stag(iwdev, iwmr->stag); 3095 3096 return 0; 3097 } 3098 3099 /** 3100 * irdma_hw_alloc_stag - cqp command to allocate stag 3101 * @iwdev: irdma device 3102 * @iwmr: irdma mr pointer 3103 */ 3104 static int irdma_hw_alloc_stag(struct irdma_device *iwdev, 3105 struct irdma_mr *iwmr) 3106 { 3107 struct irdma_allocate_stag_info *info; 3108 struct ib_pd *pd = iwmr->ibmr.pd; 3109 struct irdma_pd *iwpd = to_iwpd(pd); 3110 int status; 3111 struct irdma_cqp_request *cqp_request; 3112 struct cqp_cmds_info *cqp_info; 3113 3114 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); 3115 if (!cqp_request) 3116 return -ENOMEM; 3117 3118 cqp_info = &cqp_request->info; 3119 info = &cqp_info->in.u.alloc_stag.info; 3120 info->page_size = PAGE_SIZE; 3121 info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S; 3122 info->pd_id = iwpd->sc_pd.pd_id; 3123 info->total_len = iwmr->len; 3124 info->remote_access = true; 3125 cqp_info->cqp_cmd = IRDMA_OP_ALLOC_STAG; 3126 cqp_info->post_sq = 1; 3127 cqp_info->in.u.alloc_stag.dev = &iwdev->rf->sc_dev; 3128 cqp_info->in.u.alloc_stag.scratch = (uintptr_t)cqp_request; 3129 status = irdma_handle_cqp_op(iwdev->rf, cqp_request); 3130 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); 3131 if (status) 3132 return status; 3133 3134 iwmr->is_hwreg = true; 3135 return 0; 3136 } 3137 3138 /** 3139 * irdma_alloc_mr - register stag for fast memory registration 3140 * @pd: ibpd pointer 3141 * @mr_type: memory for stag registrion 3142 * @max_num_sg: man number of pages 3143 */ 3144 static struct ib_mr *irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, 3145 u32 max_num_sg) 3146 { 3147 struct irdma_device *iwdev = to_iwdev(pd->device); 3148 struct irdma_pble_alloc *palloc; 3149 struct irdma_pbl *iwpbl; 3150 struct irdma_mr *iwmr; 3151 u32 stag; 3152 int err_code; 3153 3154 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL); 3155 if (!iwmr) 3156 return ERR_PTR(-ENOMEM); 3157 3158 stag = irdma_create_stag(iwdev); 3159 if (!stag) { 3160 err_code = -ENOMEM; 3161 goto err; 3162 } 3163 3164 iwmr->stag = stag; 3165 iwmr->ibmr.rkey = stag; 3166 iwmr->ibmr.lkey = stag; 3167 iwmr->ibmr.pd = pd; 3168 iwmr->ibmr.device = pd->device; 3169 iwpbl = &iwmr->iwpbl; 3170 iwpbl->iwmr = iwmr; 3171 iwmr->type = IRDMA_MEMREG_TYPE_MEM; 3172 palloc = &iwpbl->pble_alloc; 3173 iwmr->page_cnt = max_num_sg; 3174 /* Use system PAGE_SIZE as the sg page sizes are unknown at this point */ 3175 iwmr->len = max_num_sg * PAGE_SIZE; 3176 err_code = irdma_get_pble(iwdev->rf->pble_rsrc, palloc, iwmr->page_cnt, 3177 false); 3178 if (err_code) 3179 goto err_get_pble; 3180 3181 err_code = irdma_hw_alloc_stag(iwdev, iwmr); 3182 if (err_code) 3183 goto err_alloc_stag; 3184 3185 iwpbl->pbl_allocated = true; 3186 3187 return &iwmr->ibmr; 3188 err_alloc_stag: 3189 irdma_free_pble(iwdev->rf->pble_rsrc, palloc); 3190 err_get_pble: 3191 irdma_free_stag(iwdev, stag); 3192 err: 3193 kfree(iwmr); 3194 3195 return ERR_PTR(err_code); 3196 } 3197 3198 /** 3199 * irdma_set_page - populate pbl list for fmr 3200 * @ibmr: ib mem to access iwarp mr pointer 3201 * @addr: page dma address fro pbl list 3202 */ 3203 static int irdma_set_page(struct ib_mr *ibmr, u64 addr) 3204 { 3205 struct irdma_mr *iwmr = to_iwmr(ibmr); 3206 struct irdma_pbl *iwpbl = &iwmr->iwpbl; 3207 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc; 3208 u64 *pbl; 3209 3210 if (unlikely(iwmr->npages == iwmr->page_cnt)) 3211 return -ENOMEM; 3212 3213 if (palloc->level == PBLE_LEVEL_2) { 3214 struct irdma_pble_info *palloc_info = 3215 palloc->level2.leaf + (iwmr->npages >> PBLE_512_SHIFT); 3216 3217 palloc_info->addr[iwmr->npages & (PBLE_PER_PAGE - 1)] = addr; 3218 } else { 3219 pbl = palloc->level1.addr; 3220 pbl[iwmr->npages] = addr; 3221 } 3222 iwmr->npages++; 3223 3224 return 0; 3225 } 3226 3227 /** 3228 * irdma_map_mr_sg - map of sg list for fmr 3229 * @ibmr: ib mem to access iwarp mr pointer 3230 * @sg: scatter gather list 3231 * @sg_nents: number of sg pages 3232 * @sg_offset: scatter gather list for fmr 3233 */ 3234 static int irdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, 3235 int sg_nents, unsigned int *sg_offset) 3236 { 3237 struct irdma_mr *iwmr = to_iwmr(ibmr); 3238 3239 iwmr->npages = 0; 3240 3241 return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, irdma_set_page); 3242 } 3243 3244 /** 3245 * irdma_hwreg_mr - send cqp command for memory registration 3246 * @iwdev: irdma device 3247 * @iwmr: irdma mr pointer 3248 * @access: access for MR 3249 */ 3250 static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr, 3251 u16 access) 3252 { 3253 struct irdma_pbl *iwpbl = &iwmr->iwpbl; 3254 struct irdma_reg_ns_stag_info *stag_info; 3255 struct ib_pd *pd = iwmr->ibmr.pd; 3256 struct irdma_pd *iwpd = to_iwpd(pd); 3257 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc; 3258 struct irdma_cqp_request *cqp_request; 3259 struct cqp_cmds_info *cqp_info; 3260 int ret; 3261 3262 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); 3263 if (!cqp_request) 3264 return -ENOMEM; 3265 3266 cqp_info = &cqp_request->info; 3267 stag_info = &cqp_info->in.u.mr_reg_non_shared.info; 3268 stag_info->va = iwpbl->user_base; 3269 stag_info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S; 3270 stag_info->stag_key = (u8)iwmr->stag; 3271 stag_info->total_len = iwmr->len; 3272 stag_info->access_rights = irdma_get_mr_access(access, 3273 iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev); 3274 if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_ATOMIC_OPS) 3275 stag_info->remote_atomics_en = (access & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0; 3276 stag_info->pd_id = iwpd->sc_pd.pd_id; 3277 stag_info->all_memory = iwmr->dma_mr; 3278 if (stag_info->access_rights & IRDMA_ACCESS_FLAGS_ZERO_BASED) 3279 stag_info->addr_type = IRDMA_ADDR_TYPE_ZERO_BASED; 3280 else 3281 stag_info->addr_type = IRDMA_ADDR_TYPE_VA_BASED; 3282 stag_info->page_size = iwmr->page_size; 3283 3284 if (iwpbl->pbl_allocated) { 3285 if (palloc->level == PBLE_LEVEL_1) { 3286 stag_info->first_pm_pbl_index = palloc->level1.idx; 3287 stag_info->chunk_size = 1; 3288 } else { 3289 stag_info->first_pm_pbl_index = palloc->level2.root.idx; 3290 stag_info->chunk_size = 3; 3291 } 3292 } else { 3293 stag_info->reg_addr_pa = iwmr->pgaddrmem[0]; 3294 } 3295 3296 cqp_info->cqp_cmd = IRDMA_OP_MR_REG_NON_SHARED; 3297 cqp_info->post_sq = 1; 3298 cqp_info->in.u.mr_reg_non_shared.dev = &iwdev->rf->sc_dev; 3299 cqp_info->in.u.mr_reg_non_shared.scratch = (uintptr_t)cqp_request; 3300 ret = irdma_handle_cqp_op(iwdev->rf, cqp_request); 3301 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); 3302 3303 if (!ret) 3304 iwmr->is_hwreg = true; 3305 3306 return ret; 3307 } 3308 3309 static int irdma_reg_user_mr_type_mem(struct irdma_mr *iwmr, int access, 3310 bool create_stag) 3311 { 3312 struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device); 3313 struct irdma_pbl *iwpbl = &iwmr->iwpbl; 3314 u32 stag = 0; 3315 u8 lvl; 3316 int err; 3317 3318 lvl = iwmr->page_cnt != 1 ? PBLE_LEVEL_1 | PBLE_LEVEL_2 : PBLE_LEVEL_0; 3319 3320 err = irdma_setup_pbles(iwdev->rf, iwmr, lvl); 3321 if (err) 3322 return err; 3323 3324 if (lvl) { 3325 err = irdma_check_mr_contiguous(&iwpbl->pble_alloc, 3326 iwmr->page_size); 3327 if (err) { 3328 irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc); 3329 iwpbl->pbl_allocated = false; 3330 } 3331 } 3332 3333 if (create_stag) { 3334 stag = irdma_create_stag(iwdev); 3335 if (!stag) { 3336 err = -ENOMEM; 3337 goto free_pble; 3338 } 3339 3340 iwmr->stag = stag; 3341 iwmr->ibmr.rkey = stag; 3342 iwmr->ibmr.lkey = stag; 3343 } 3344 3345 err = irdma_hwreg_mr(iwdev, iwmr, access); 3346 if (err) 3347 goto err_hwreg; 3348 3349 return 0; 3350 3351 err_hwreg: 3352 if (stag) 3353 irdma_free_stag(iwdev, stag); 3354 3355 free_pble: 3356 if (iwpbl->pble_alloc.level != PBLE_LEVEL_0 && iwpbl->pbl_allocated) 3357 irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc); 3358 3359 return err; 3360 } 3361 3362 static struct irdma_mr *irdma_alloc_iwmr(struct ib_umem *region, 3363 struct ib_pd *pd, u64 virt, 3364 enum irdma_memreg_type reg_type) 3365 { 3366 struct irdma_device *iwdev = to_iwdev(pd->device); 3367 struct irdma_pbl *iwpbl; 3368 struct irdma_mr *iwmr; 3369 unsigned long pgsz_bitmap; 3370 3371 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL); 3372 if (!iwmr) 3373 return ERR_PTR(-ENOMEM); 3374 3375 iwpbl = &iwmr->iwpbl; 3376 iwpbl->iwmr = iwmr; 3377 iwmr->region = region; 3378 iwmr->ibmr.pd = pd; 3379 iwmr->ibmr.device = pd->device; 3380 iwmr->ibmr.iova = virt; 3381 iwmr->type = reg_type; 3382 3383 pgsz_bitmap = (reg_type == IRDMA_MEMREG_TYPE_MEM) ? 3384 iwdev->rf->sc_dev.hw_attrs.page_size_cap : SZ_4K; 3385 3386 iwmr->page_size = ib_umem_find_best_pgsz(region, pgsz_bitmap, virt); 3387 if (unlikely(!iwmr->page_size)) { 3388 kfree(iwmr); 3389 return ERR_PTR(-EOPNOTSUPP); 3390 } 3391 3392 iwmr->len = region->length; 3393 iwpbl->user_base = virt; 3394 iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size); 3395 3396 return iwmr; 3397 } 3398 3399 static void irdma_free_iwmr(struct irdma_mr *iwmr) 3400 { 3401 kfree(iwmr); 3402 } 3403 3404 static int irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req, 3405 struct ib_udata *udata, 3406 struct irdma_mr *iwmr) 3407 { 3408 struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device); 3409 struct irdma_pbl *iwpbl = &iwmr->iwpbl; 3410 struct irdma_ucontext *ucontext = NULL; 3411 unsigned long flags; 3412 u32 total; 3413 int err; 3414 u8 lvl; 3415 3416 /* iWarp: Catch page not starting on OS page boundary */ 3417 if (!rdma_protocol_roce(&iwdev->ibdev, 1) && 3418 ib_umem_offset(iwmr->region)) 3419 return -EINVAL; 3420 3421 total = req.sq_pages + req.rq_pages + 1; 3422 if (total > iwmr->page_cnt) 3423 return -EINVAL; 3424 3425 total = req.sq_pages + req.rq_pages; 3426 lvl = total > 2 ? PBLE_LEVEL_1 : PBLE_LEVEL_0; 3427 err = irdma_handle_q_mem(iwdev, &req, iwpbl, lvl); 3428 if (err) 3429 return err; 3430 3431 ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, 3432 ibucontext); 3433 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags); 3434 list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list); 3435 iwpbl->on_list = true; 3436 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); 3437 3438 return 0; 3439 } 3440 3441 static int irdma_reg_user_mr_type_srq(struct irdma_mem_reg_req req, 3442 struct ib_udata *udata, 3443 struct irdma_mr *iwmr) 3444 { 3445 struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device); 3446 struct irdma_pbl *iwpbl = &iwmr->iwpbl; 3447 struct irdma_ucontext *ucontext; 3448 unsigned long flags; 3449 u32 total; 3450 int err; 3451 u8 lvl; 3452 3453 total = req.rq_pages + IRDMA_SHADOW_PGCNT; 3454 if (total > iwmr->page_cnt) 3455 return -EINVAL; 3456 3457 lvl = req.rq_pages > 1 ? PBLE_LEVEL_1 : PBLE_LEVEL_0; 3458 err = irdma_handle_q_mem(iwdev, &req, iwpbl, lvl); 3459 if (err) 3460 return err; 3461 3462 ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, 3463 ibucontext); 3464 spin_lock_irqsave(&ucontext->srq_reg_mem_list_lock, flags); 3465 list_add_tail(&iwpbl->list, &ucontext->srq_reg_mem_list); 3466 iwpbl->on_list = true; 3467 spin_unlock_irqrestore(&ucontext->srq_reg_mem_list_lock, flags); 3468 3469 return 0; 3470 } 3471 3472 static int irdma_reg_user_mr_type_cq(struct irdma_mem_reg_req req, 3473 struct ib_udata *udata, 3474 struct irdma_mr *iwmr) 3475 { 3476 struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device); 3477 struct irdma_pbl *iwpbl = &iwmr->iwpbl; 3478 struct irdma_ucontext *ucontext = NULL; 3479 u8 shadow_pgcnt = 1; 3480 unsigned long flags; 3481 u32 total; 3482 int err; 3483 u8 lvl; 3484 3485 if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE) 3486 shadow_pgcnt = 0; 3487 total = req.cq_pages + shadow_pgcnt; 3488 if (total > iwmr->page_cnt) 3489 return -EINVAL; 3490 3491 lvl = req.cq_pages > 1 ? PBLE_LEVEL_1 : PBLE_LEVEL_0; 3492 err = irdma_handle_q_mem(iwdev, &req, iwpbl, lvl); 3493 if (err) 3494 return err; 3495 3496 ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, 3497 ibucontext); 3498 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); 3499 list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list); 3500 iwpbl->on_list = true; 3501 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); 3502 3503 return 0; 3504 } 3505 3506 /** 3507 * irdma_reg_user_mr - Register a user memory region 3508 * @pd: ptr of pd 3509 * @start: virtual start address 3510 * @len: length of mr 3511 * @virt: virtual address 3512 * @access: access of mr 3513 * @dmah: dma handle 3514 * @udata: user data 3515 */ 3516 static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len, 3517 u64 virt, int access, 3518 struct ib_dmah *dmah, 3519 struct ib_udata *udata) 3520 { 3521 #define IRDMA_MEM_REG_MIN_REQ_LEN offsetofend(struct irdma_mem_reg_req, sq_pages) 3522 struct irdma_device *iwdev = to_iwdev(pd->device); 3523 struct irdma_mem_reg_req req = {}; 3524 struct ib_umem *region = NULL; 3525 struct irdma_mr *iwmr = NULL; 3526 int err; 3527 3528 if (dmah) 3529 return ERR_PTR(-EOPNOTSUPP); 3530 3531 if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size) 3532 return ERR_PTR(-EINVAL); 3533 3534 if (udata->inlen < IRDMA_MEM_REG_MIN_REQ_LEN) 3535 return ERR_PTR(-EINVAL); 3536 3537 region = ib_umem_get(pd->device, start, len, access); 3538 3539 if (IS_ERR(region)) { 3540 ibdev_dbg(&iwdev->ibdev, 3541 "VERBS: Failed to create ib_umem region\n"); 3542 return (struct ib_mr *)region; 3543 } 3544 3545 if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) { 3546 ib_umem_release(region); 3547 return ERR_PTR(-EFAULT); 3548 } 3549 3550 iwmr = irdma_alloc_iwmr(region, pd, virt, req.reg_type); 3551 if (IS_ERR(iwmr)) { 3552 ib_umem_release(region); 3553 return (struct ib_mr *)iwmr; 3554 } 3555 3556 switch (req.reg_type) { 3557 case IRDMA_MEMREG_TYPE_QP: 3558 err = irdma_reg_user_mr_type_qp(req, udata, iwmr); 3559 if (err) 3560 goto error; 3561 3562 break; 3563 case IRDMA_MEMREG_TYPE_SRQ: 3564 err = irdma_reg_user_mr_type_srq(req, udata, iwmr); 3565 if (err) 3566 goto error; 3567 3568 break; 3569 case IRDMA_MEMREG_TYPE_CQ: 3570 err = irdma_reg_user_mr_type_cq(req, udata, iwmr); 3571 if (err) 3572 goto error; 3573 break; 3574 case IRDMA_MEMREG_TYPE_MEM: 3575 err = irdma_reg_user_mr_type_mem(iwmr, access, true); 3576 if (err) 3577 goto error; 3578 3579 break; 3580 default: 3581 err = -EINVAL; 3582 goto error; 3583 } 3584 3585 return &iwmr->ibmr; 3586 error: 3587 ib_umem_release(region); 3588 irdma_free_iwmr(iwmr); 3589 3590 return ERR_PTR(err); 3591 } 3592 3593 static struct ib_mr *irdma_reg_user_mr_dmabuf(struct ib_pd *pd, u64 start, 3594 u64 len, u64 virt, 3595 int fd, int access, 3596 struct ib_dmah *dmah, 3597 struct uverbs_attr_bundle *attrs) 3598 { 3599 struct irdma_device *iwdev = to_iwdev(pd->device); 3600 struct ib_umem_dmabuf *umem_dmabuf; 3601 struct irdma_mr *iwmr; 3602 int err; 3603 3604 if (dmah) 3605 return ERR_PTR(-EOPNOTSUPP); 3606 3607 if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size) 3608 return ERR_PTR(-EINVAL); 3609 3610 umem_dmabuf = ib_umem_dmabuf_get_pinned(pd->device, start, len, fd, access); 3611 if (IS_ERR(umem_dmabuf)) { 3612 ibdev_dbg(&iwdev->ibdev, "Failed to get dmabuf umem[%pe]\n", 3613 umem_dmabuf); 3614 return ERR_CAST(umem_dmabuf); 3615 } 3616 3617 iwmr = irdma_alloc_iwmr(&umem_dmabuf->umem, pd, virt, IRDMA_MEMREG_TYPE_MEM); 3618 if (IS_ERR(iwmr)) { 3619 err = PTR_ERR(iwmr); 3620 goto err_release; 3621 } 3622 3623 err = irdma_reg_user_mr_type_mem(iwmr, access, true); 3624 if (err) 3625 goto err_iwmr; 3626 3627 return &iwmr->ibmr; 3628 3629 err_iwmr: 3630 irdma_free_iwmr(iwmr); 3631 3632 err_release: 3633 ib_umem_release(&umem_dmabuf->umem); 3634 3635 return ERR_PTR(err); 3636 } 3637 3638 static int irdma_hwdereg_mr(struct ib_mr *ib_mr) 3639 { 3640 struct irdma_device *iwdev = to_iwdev(ib_mr->device); 3641 struct irdma_mr *iwmr = to_iwmr(ib_mr); 3642 struct irdma_pd *iwpd = to_iwpd(ib_mr->pd); 3643 struct irdma_dealloc_stag_info *info; 3644 struct irdma_pbl *iwpbl = &iwmr->iwpbl; 3645 struct irdma_cqp_request *cqp_request; 3646 struct cqp_cmds_info *cqp_info; 3647 int status; 3648 3649 /* Skip HW MR de-register when it is already de-registered 3650 * during an MR re-reregister and the re-registration fails 3651 */ 3652 if (!iwmr->is_hwreg) 3653 return 0; 3654 3655 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); 3656 if (!cqp_request) 3657 return -ENOMEM; 3658 3659 cqp_info = &cqp_request->info; 3660 info = &cqp_info->in.u.dealloc_stag.info; 3661 info->pd_id = iwpd->sc_pd.pd_id; 3662 info->stag_idx = ib_mr->rkey >> IRDMA_CQPSQ_STAG_IDX_S; 3663 info->mr = true; 3664 if (iwpbl->pbl_allocated) 3665 info->dealloc_pbl = true; 3666 3667 cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG; 3668 cqp_info->post_sq = 1; 3669 cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev; 3670 cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request; 3671 status = irdma_handle_cqp_op(iwdev->rf, cqp_request); 3672 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); 3673 if (status) 3674 return status; 3675 3676 iwmr->is_hwreg = false; 3677 return 0; 3678 } 3679 3680 /* 3681 * irdma_rereg_mr_trans - Re-register a user MR for a change translation. 3682 * @iwmr: ptr of iwmr 3683 * @start: virtual start address 3684 * @len: length of mr 3685 * @virt: virtual address 3686 * 3687 * Re-register a user memory region when a change translation is requested. 3688 * Re-register a new region while reusing the stag from the original registration. 3689 */ 3690 static int irdma_rereg_mr_trans(struct irdma_mr *iwmr, u64 start, u64 len, 3691 u64 virt) 3692 { 3693 struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device); 3694 struct irdma_pbl *iwpbl = &iwmr->iwpbl; 3695 struct ib_pd *pd = iwmr->ibmr.pd; 3696 struct ib_umem *region; 3697 int err; 3698 3699 region = ib_umem_get(pd->device, start, len, iwmr->access); 3700 if (IS_ERR(region)) 3701 return PTR_ERR(region); 3702 3703 iwmr->region = region; 3704 iwmr->ibmr.iova = virt; 3705 iwmr->ibmr.pd = pd; 3706 iwmr->page_size = ib_umem_find_best_pgsz(region, 3707 iwdev->rf->sc_dev.hw_attrs.page_size_cap, 3708 virt); 3709 if (unlikely(!iwmr->page_size)) { 3710 err = -EOPNOTSUPP; 3711 goto err; 3712 } 3713 3714 iwmr->len = region->length; 3715 iwpbl->user_base = virt; 3716 iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size); 3717 3718 err = irdma_reg_user_mr_type_mem(iwmr, iwmr->access, false); 3719 if (err) 3720 goto err; 3721 3722 return 0; 3723 3724 err: 3725 ib_umem_release(region); 3726 return err; 3727 } 3728 3729 /* 3730 * irdma_rereg_user_mr - Re-Register a user memory region(MR) 3731 * @ibmr: ib mem to access iwarp mr pointer 3732 * @flags: bit mask to indicate which of the attr's of MR modified 3733 * @start: virtual start address 3734 * @len: length of mr 3735 * @virt: virtual address 3736 * @new_access: bit mask of access flags 3737 * @new_pd: ptr of pd 3738 * @udata: user data 3739 * 3740 * Return: 3741 * NULL - Success, existing MR updated 3742 * ERR_PTR - error occurred 3743 */ 3744 static struct ib_mr *irdma_rereg_user_mr(struct ib_mr *ib_mr, int flags, 3745 u64 start, u64 len, u64 virt, 3746 int new_access, struct ib_pd *new_pd, 3747 struct ib_udata *udata) 3748 { 3749 struct irdma_device *iwdev = to_iwdev(ib_mr->device); 3750 struct irdma_mr *iwmr = to_iwmr(ib_mr); 3751 struct irdma_pbl *iwpbl = &iwmr->iwpbl; 3752 int ret; 3753 3754 if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size) 3755 return ERR_PTR(-EINVAL); 3756 3757 if (flags & ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS)) 3758 return ERR_PTR(-EOPNOTSUPP); 3759 3760 ret = irdma_hwdereg_mr(ib_mr); 3761 if (ret) 3762 return ERR_PTR(ret); 3763 3764 if (flags & IB_MR_REREG_ACCESS) 3765 iwmr->access = new_access; 3766 3767 if (flags & IB_MR_REREG_PD) { 3768 iwmr->ibmr.pd = new_pd; 3769 iwmr->ibmr.device = new_pd->device; 3770 } 3771 3772 if (flags & IB_MR_REREG_TRANS) { 3773 if (iwpbl->pbl_allocated) { 3774 irdma_free_pble(iwdev->rf->pble_rsrc, 3775 &iwpbl->pble_alloc); 3776 iwpbl->pbl_allocated = false; 3777 } 3778 if (iwmr->region) { 3779 ib_umem_release(iwmr->region); 3780 iwmr->region = NULL; 3781 } 3782 3783 ret = irdma_rereg_mr_trans(iwmr, start, len, virt); 3784 } else 3785 ret = irdma_hwreg_mr(iwdev, iwmr, iwmr->access); 3786 if (ret) 3787 return ERR_PTR(ret); 3788 3789 return NULL; 3790 } 3791 3792 /** 3793 * irdma_reg_phys_mr - register kernel physical memory 3794 * @pd: ibpd pointer 3795 * @addr: physical address of memory to register 3796 * @size: size of memory to register 3797 * @access: Access rights 3798 * @iova_start: start of virtual address for physical buffers 3799 * @dma_mr: Flag indicating whether this region is a PD DMA MR 3800 */ 3801 struct ib_mr *irdma_reg_phys_mr(struct ib_pd *pd, u64 addr, u64 size, int access, 3802 u64 *iova_start, bool dma_mr) 3803 { 3804 struct irdma_device *iwdev = to_iwdev(pd->device); 3805 struct irdma_pbl *iwpbl; 3806 struct irdma_mr *iwmr; 3807 u32 stag; 3808 int ret; 3809 3810 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL); 3811 if (!iwmr) 3812 return ERR_PTR(-ENOMEM); 3813 3814 iwmr->ibmr.pd = pd; 3815 iwmr->ibmr.device = pd->device; 3816 iwpbl = &iwmr->iwpbl; 3817 iwpbl->iwmr = iwmr; 3818 iwmr->type = IRDMA_MEMREG_TYPE_MEM; 3819 iwmr->dma_mr = dma_mr; 3820 iwpbl->user_base = *iova_start; 3821 stag = irdma_create_stag(iwdev); 3822 if (!stag) { 3823 ret = -ENOMEM; 3824 goto err; 3825 } 3826 3827 iwmr->stag = stag; 3828 iwmr->ibmr.iova = *iova_start; 3829 iwmr->ibmr.rkey = stag; 3830 iwmr->ibmr.lkey = stag; 3831 iwmr->page_cnt = 1; 3832 iwmr->pgaddrmem[0] = addr; 3833 iwmr->len = size; 3834 iwmr->page_size = SZ_4K; 3835 ret = irdma_hwreg_mr(iwdev, iwmr, access); 3836 if (ret) { 3837 irdma_free_stag(iwdev, stag); 3838 goto err; 3839 } 3840 3841 return &iwmr->ibmr; 3842 3843 err: 3844 kfree(iwmr); 3845 3846 return ERR_PTR(ret); 3847 } 3848 3849 /** 3850 * irdma_get_dma_mr - register physical mem 3851 * @pd: ptr of pd 3852 * @acc: access for memory 3853 */ 3854 static struct ib_mr *irdma_get_dma_mr(struct ib_pd *pd, int acc) 3855 { 3856 u64 kva = 0; 3857 3858 return irdma_reg_phys_mr(pd, 0, 0, acc, &kva, true); 3859 } 3860 3861 /** 3862 * irdma_del_memlist - Deleting pbl list entries for CQ/QP 3863 * @iwmr: iwmr for IB's user page addresses 3864 * @ucontext: ptr to user context 3865 */ 3866 static void irdma_del_memlist(struct irdma_mr *iwmr, 3867 struct irdma_ucontext *ucontext) 3868 { 3869 struct irdma_pbl *iwpbl = &iwmr->iwpbl; 3870 unsigned long flags; 3871 3872 switch (iwmr->type) { 3873 case IRDMA_MEMREG_TYPE_CQ: 3874 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); 3875 if (iwpbl->on_list) { 3876 iwpbl->on_list = false; 3877 list_del(&iwpbl->list); 3878 } 3879 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); 3880 break; 3881 case IRDMA_MEMREG_TYPE_QP: 3882 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags); 3883 if (iwpbl->on_list) { 3884 iwpbl->on_list = false; 3885 list_del(&iwpbl->list); 3886 } 3887 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); 3888 break; 3889 case IRDMA_MEMREG_TYPE_SRQ: 3890 spin_lock_irqsave(&ucontext->srq_reg_mem_list_lock, flags); 3891 if (iwpbl->on_list) { 3892 iwpbl->on_list = false; 3893 list_del(&iwpbl->list); 3894 } 3895 spin_unlock_irqrestore(&ucontext->srq_reg_mem_list_lock, flags); 3896 break; 3897 default: 3898 break; 3899 } 3900 } 3901 3902 /** 3903 * irdma_dereg_mr - deregister mr 3904 * @ib_mr: mr ptr for dereg 3905 * @udata: user data 3906 */ 3907 static int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) 3908 { 3909 struct irdma_mr *iwmr = to_iwmr(ib_mr); 3910 struct irdma_device *iwdev = to_iwdev(ib_mr->device); 3911 struct irdma_pbl *iwpbl = &iwmr->iwpbl; 3912 int ret; 3913 3914 if (iwmr->type != IRDMA_MEMREG_TYPE_MEM) { 3915 if (iwmr->region) { 3916 struct irdma_ucontext *ucontext; 3917 3918 ucontext = rdma_udata_to_drv_context(udata, 3919 struct irdma_ucontext, 3920 ibucontext); 3921 irdma_del_memlist(iwmr, ucontext); 3922 } 3923 goto done; 3924 } 3925 3926 ret = irdma_hwdereg_mr(ib_mr); 3927 if (ret) 3928 return ret; 3929 3930 irdma_free_stag(iwdev, iwmr->stag); 3931 done: 3932 if (iwpbl->pbl_allocated) 3933 irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc); 3934 3935 if (iwmr->region) 3936 ib_umem_release(iwmr->region); 3937 3938 kfree(iwmr); 3939 3940 return 0; 3941 } 3942 3943 /** 3944 * irdma_post_send - kernel application wr 3945 * @ibqp: qp ptr for wr 3946 * @ib_wr: work request ptr 3947 * @bad_wr: return of bad wr if err 3948 */ 3949 static int irdma_post_send(struct ib_qp *ibqp, 3950 const struct ib_send_wr *ib_wr, 3951 const struct ib_send_wr **bad_wr) 3952 { 3953 struct irdma_qp *iwqp; 3954 struct irdma_qp_uk *ukqp; 3955 struct irdma_sc_dev *dev; 3956 struct irdma_post_sq_info info; 3957 int err = 0; 3958 unsigned long flags; 3959 bool inv_stag; 3960 struct irdma_ah *ah; 3961 3962 iwqp = to_iwqp(ibqp); 3963 ukqp = &iwqp->sc_qp.qp_uk; 3964 dev = &iwqp->iwdev->rf->sc_dev; 3965 3966 spin_lock_irqsave(&iwqp->lock, flags); 3967 while (ib_wr) { 3968 memset(&info, 0, sizeof(info)); 3969 inv_stag = false; 3970 info.wr_id = (ib_wr->wr_id); 3971 if ((ib_wr->send_flags & IB_SEND_SIGNALED) || iwqp->sig_all) 3972 info.signaled = true; 3973 if (ib_wr->send_flags & IB_SEND_FENCE) 3974 info.read_fence = true; 3975 switch (ib_wr->opcode) { 3976 case IB_WR_ATOMIC_CMP_AND_SWP: 3977 if (unlikely(!(dev->hw_attrs.uk_attrs.feature_flags & 3978 IRDMA_FEATURE_ATOMIC_OPS))) { 3979 err = -EINVAL; 3980 break; 3981 } 3982 info.op_type = IRDMA_OP_TYPE_ATOMIC_COMPARE_AND_SWAP; 3983 info.op.atomic_compare_swap.tagged_offset = ib_wr->sg_list[0].addr; 3984 info.op.atomic_compare_swap.remote_tagged_offset = 3985 atomic_wr(ib_wr)->remote_addr; 3986 info.op.atomic_compare_swap.swap_data_bytes = atomic_wr(ib_wr)->swap; 3987 info.op.atomic_compare_swap.compare_data_bytes = 3988 atomic_wr(ib_wr)->compare_add; 3989 info.op.atomic_compare_swap.stag = ib_wr->sg_list[0].lkey; 3990 info.op.atomic_compare_swap.remote_stag = atomic_wr(ib_wr)->rkey; 3991 err = irdma_uk_atomic_compare_swap(ukqp, &info, false); 3992 break; 3993 case IB_WR_ATOMIC_FETCH_AND_ADD: 3994 if (unlikely(!(dev->hw_attrs.uk_attrs.feature_flags & 3995 IRDMA_FEATURE_ATOMIC_OPS))) { 3996 err = -EINVAL; 3997 break; 3998 } 3999 info.op_type = IRDMA_OP_TYPE_ATOMIC_FETCH_AND_ADD; 4000 info.op.atomic_fetch_add.tagged_offset = ib_wr->sg_list[0].addr; 4001 info.op.atomic_fetch_add.remote_tagged_offset = 4002 atomic_wr(ib_wr)->remote_addr; 4003 info.op.atomic_fetch_add.fetch_add_data_bytes = 4004 atomic_wr(ib_wr)->compare_add; 4005 info.op.atomic_fetch_add.stag = ib_wr->sg_list[0].lkey; 4006 info.op.atomic_fetch_add.remote_stag = 4007 atomic_wr(ib_wr)->rkey; 4008 err = irdma_uk_atomic_fetch_add(ukqp, &info, false); 4009 break; 4010 case IB_WR_SEND_WITH_IMM: 4011 if (ukqp->qp_caps & IRDMA_SEND_WITH_IMM) { 4012 info.imm_data_valid = true; 4013 info.imm_data = ntohl(ib_wr->ex.imm_data); 4014 } else { 4015 err = -EINVAL; 4016 break; 4017 } 4018 fallthrough; 4019 case IB_WR_SEND: 4020 case IB_WR_SEND_WITH_INV: 4021 if (ib_wr->opcode == IB_WR_SEND || 4022 ib_wr->opcode == IB_WR_SEND_WITH_IMM) { 4023 if (ib_wr->send_flags & IB_SEND_SOLICITED) 4024 info.op_type = IRDMA_OP_TYPE_SEND_SOL; 4025 else 4026 info.op_type = IRDMA_OP_TYPE_SEND; 4027 } else { 4028 if (ib_wr->send_flags & IB_SEND_SOLICITED) 4029 info.op_type = IRDMA_OP_TYPE_SEND_SOL_INV; 4030 else 4031 info.op_type = IRDMA_OP_TYPE_SEND_INV; 4032 info.stag_to_inv = ib_wr->ex.invalidate_rkey; 4033 } 4034 4035 info.op.send.num_sges = ib_wr->num_sge; 4036 info.op.send.sg_list = ib_wr->sg_list; 4037 if (iwqp->ibqp.qp_type == IB_QPT_UD || 4038 iwqp->ibqp.qp_type == IB_QPT_GSI) { 4039 ah = to_iwah(ud_wr(ib_wr)->ah); 4040 info.op.send.ah_id = ah->sc_ah.ah_info.ah_idx; 4041 info.op.send.qkey = ud_wr(ib_wr)->remote_qkey; 4042 info.op.send.dest_qp = ud_wr(ib_wr)->remote_qpn; 4043 } 4044 4045 if (ib_wr->send_flags & IB_SEND_INLINE) 4046 err = irdma_uk_inline_send(ukqp, &info, false); 4047 else 4048 err = irdma_uk_send(ukqp, &info, false); 4049 break; 4050 case IB_WR_RDMA_WRITE_WITH_IMM: 4051 if (ukqp->qp_caps & IRDMA_WRITE_WITH_IMM) { 4052 info.imm_data_valid = true; 4053 info.imm_data = ntohl(ib_wr->ex.imm_data); 4054 } else { 4055 err = -EINVAL; 4056 break; 4057 } 4058 fallthrough; 4059 case IB_WR_RDMA_WRITE: 4060 if (ib_wr->send_flags & IB_SEND_SOLICITED) 4061 info.op_type = IRDMA_OP_TYPE_RDMA_WRITE_SOL; 4062 else 4063 info.op_type = IRDMA_OP_TYPE_RDMA_WRITE; 4064 4065 info.op.rdma_write.num_lo_sges = ib_wr->num_sge; 4066 info.op.rdma_write.lo_sg_list = ib_wr->sg_list; 4067 info.op.rdma_write.rem_addr.addr = 4068 rdma_wr(ib_wr)->remote_addr; 4069 info.op.rdma_write.rem_addr.lkey = rdma_wr(ib_wr)->rkey; 4070 if (ib_wr->send_flags & IB_SEND_INLINE) 4071 err = irdma_uk_inline_rdma_write(ukqp, &info, false); 4072 else 4073 err = irdma_uk_rdma_write(ukqp, &info, false); 4074 break; 4075 case IB_WR_RDMA_READ_WITH_INV: 4076 inv_stag = true; 4077 fallthrough; 4078 case IB_WR_RDMA_READ: 4079 if (ib_wr->num_sge > 4080 dev->hw_attrs.uk_attrs.max_hw_read_sges) { 4081 err = -EINVAL; 4082 break; 4083 } 4084 info.op_type = IRDMA_OP_TYPE_RDMA_READ; 4085 info.op.rdma_read.rem_addr.addr = rdma_wr(ib_wr)->remote_addr; 4086 info.op.rdma_read.rem_addr.lkey = rdma_wr(ib_wr)->rkey; 4087 info.op.rdma_read.lo_sg_list = (void *)ib_wr->sg_list; 4088 info.op.rdma_read.num_lo_sges = ib_wr->num_sge; 4089 err = irdma_uk_rdma_read(ukqp, &info, inv_stag, false); 4090 break; 4091 case IB_WR_LOCAL_INV: 4092 info.op_type = IRDMA_OP_TYPE_INV_STAG; 4093 info.local_fence = true; 4094 info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey; 4095 err = irdma_uk_stag_local_invalidate(ukqp, &info, true); 4096 break; 4097 case IB_WR_REG_MR: { 4098 struct irdma_mr *iwmr = to_iwmr(reg_wr(ib_wr)->mr); 4099 struct irdma_pble_alloc *palloc = &iwmr->iwpbl.pble_alloc; 4100 struct irdma_fast_reg_stag_info stag_info = {}; 4101 4102 stag_info.signaled = info.signaled; 4103 stag_info.read_fence = info.read_fence; 4104 stag_info.access_rights = 4105 irdma_get_mr_access(reg_wr(ib_wr)->access, 4106 dev->hw_attrs.uk_attrs.hw_rev); 4107 stag_info.stag_key = reg_wr(ib_wr)->key & 0xff; 4108 stag_info.stag_idx = reg_wr(ib_wr)->key >> 8; 4109 stag_info.page_size = reg_wr(ib_wr)->mr->page_size; 4110 stag_info.wr_id = ib_wr->wr_id; 4111 stag_info.addr_type = IRDMA_ADDR_TYPE_VA_BASED; 4112 stag_info.va = (void *)(uintptr_t)iwmr->ibmr.iova; 4113 stag_info.total_len = iwmr->ibmr.length; 4114 stag_info.reg_addr_pa = *palloc->level1.addr; 4115 stag_info.first_pm_pbl_index = palloc->level1.idx; 4116 stag_info.local_fence = ib_wr->send_flags & IB_SEND_FENCE; 4117 if (iwmr->npages > IRDMA_MIN_PAGES_PER_FMR) 4118 stag_info.chunk_size = 1; 4119 err = irdma_sc_mr_fast_register(&iwqp->sc_qp, &stag_info, 4120 true); 4121 break; 4122 } 4123 default: 4124 err = -EINVAL; 4125 ibdev_dbg(&iwqp->iwdev->ibdev, 4126 "VERBS: upost_send bad opcode = 0x%x\n", 4127 ib_wr->opcode); 4128 break; 4129 } 4130 4131 if (err) 4132 break; 4133 ib_wr = ib_wr->next; 4134 } 4135 4136 if (!iwqp->flush_issued) { 4137 if (iwqp->hw_iwarp_state <= IRDMA_QP_STATE_RTS) 4138 irdma_uk_qp_post_wr(ukqp); 4139 spin_unlock_irqrestore(&iwqp->lock, flags); 4140 } else { 4141 spin_unlock_irqrestore(&iwqp->lock, flags); 4142 mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush, 4143 msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS)); 4144 } 4145 4146 if (err) 4147 *bad_wr = ib_wr; 4148 4149 return err; 4150 } 4151 4152 /** 4153 * irdma_post_srq_recv - post receive wr for kernel application 4154 * @ibsrq: ib srq pointer 4155 * @ib_wr: work request for receive 4156 * @bad_wr: bad wr caused an error 4157 */ 4158 static int irdma_post_srq_recv(struct ib_srq *ibsrq, 4159 const struct ib_recv_wr *ib_wr, 4160 const struct ib_recv_wr **bad_wr) 4161 { 4162 struct irdma_srq *iwsrq = to_iwsrq(ibsrq); 4163 struct irdma_srq_uk *uksrq = &iwsrq->sc_srq.srq_uk; 4164 struct irdma_post_rq_info post_recv = {}; 4165 unsigned long flags; 4166 int err = 0; 4167 4168 spin_lock_irqsave(&iwsrq->lock, flags); 4169 while (ib_wr) { 4170 if (ib_wr->num_sge > uksrq->max_srq_frag_cnt) { 4171 err = -EINVAL; 4172 goto out; 4173 } 4174 post_recv.num_sges = ib_wr->num_sge; 4175 post_recv.wr_id = ib_wr->wr_id; 4176 post_recv.sg_list = ib_wr->sg_list; 4177 err = irdma_uk_srq_post_receive(uksrq, &post_recv); 4178 if (err) 4179 goto out; 4180 4181 ib_wr = ib_wr->next; 4182 } 4183 4184 out: 4185 spin_unlock_irqrestore(&iwsrq->lock, flags); 4186 4187 if (err) 4188 *bad_wr = ib_wr; 4189 4190 return err; 4191 } 4192 4193 /** 4194 * irdma_post_recv - post receive wr for kernel application 4195 * @ibqp: ib qp pointer 4196 * @ib_wr: work request for receive 4197 * @bad_wr: bad wr caused an error 4198 */ 4199 static int irdma_post_recv(struct ib_qp *ibqp, 4200 const struct ib_recv_wr *ib_wr, 4201 const struct ib_recv_wr **bad_wr) 4202 { 4203 struct irdma_qp *iwqp; 4204 struct irdma_qp_uk *ukqp; 4205 struct irdma_post_rq_info post_recv = {}; 4206 unsigned long flags; 4207 int err = 0; 4208 4209 iwqp = to_iwqp(ibqp); 4210 ukqp = &iwqp->sc_qp.qp_uk; 4211 4212 if (ukqp->srq_uk) { 4213 *bad_wr = ib_wr; 4214 return -EINVAL; 4215 } 4216 4217 spin_lock_irqsave(&iwqp->lock, flags); 4218 while (ib_wr) { 4219 post_recv.num_sges = ib_wr->num_sge; 4220 post_recv.wr_id = ib_wr->wr_id; 4221 post_recv.sg_list = ib_wr->sg_list; 4222 err = irdma_uk_post_receive(ukqp, &post_recv); 4223 if (err) { 4224 ibdev_dbg(&iwqp->iwdev->ibdev, 4225 "VERBS: post_recv err %d\n", err); 4226 goto out; 4227 } 4228 4229 ib_wr = ib_wr->next; 4230 } 4231 4232 out: 4233 spin_unlock_irqrestore(&iwqp->lock, flags); 4234 if (iwqp->flush_issued) 4235 mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush, 4236 msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS)); 4237 4238 if (err) 4239 *bad_wr = ib_wr; 4240 4241 return err; 4242 } 4243 4244 /** 4245 * irdma_flush_err_to_ib_wc_status - return change flush error code to IB status 4246 * @opcode: iwarp flush code 4247 */ 4248 static enum ib_wc_status irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode opcode) 4249 { 4250 switch (opcode) { 4251 case FLUSH_PROT_ERR: 4252 return IB_WC_LOC_PROT_ERR; 4253 case FLUSH_REM_ACCESS_ERR: 4254 return IB_WC_REM_ACCESS_ERR; 4255 case FLUSH_LOC_QP_OP_ERR: 4256 return IB_WC_LOC_QP_OP_ERR; 4257 case FLUSH_REM_OP_ERR: 4258 return IB_WC_REM_OP_ERR; 4259 case FLUSH_LOC_LEN_ERR: 4260 return IB_WC_LOC_LEN_ERR; 4261 case FLUSH_GENERAL_ERR: 4262 return IB_WC_WR_FLUSH_ERR; 4263 case FLUSH_RETRY_EXC_ERR: 4264 return IB_WC_RETRY_EXC_ERR; 4265 case FLUSH_MW_BIND_ERR: 4266 return IB_WC_MW_BIND_ERR; 4267 case FLUSH_REM_INV_REQ_ERR: 4268 return IB_WC_REM_INV_REQ_ERR; 4269 case FLUSH_RNR_RETRY_EXC_ERR: 4270 return IB_WC_RNR_RETRY_EXC_ERR; 4271 case FLUSH_FATAL_ERR: 4272 default: 4273 return IB_WC_FATAL_ERR; 4274 } 4275 } 4276 4277 /** 4278 * irdma_process_cqe - process cqe info 4279 * @entry: processed cqe 4280 * @cq_poll_info: cqe info 4281 */ 4282 static void irdma_process_cqe(struct ib_wc *entry, 4283 struct irdma_cq_poll_info *cq_poll_info) 4284 { 4285 struct irdma_sc_qp *qp; 4286 4287 entry->wc_flags = 0; 4288 entry->pkey_index = 0; 4289 entry->wr_id = cq_poll_info->wr_id; 4290 4291 qp = cq_poll_info->qp_handle; 4292 entry->qp = qp->qp_uk.back_qp; 4293 4294 if (cq_poll_info->error) { 4295 entry->status = (cq_poll_info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) ? 4296 irdma_flush_err_to_ib_wc_status(cq_poll_info->minor_err) : IB_WC_GENERAL_ERR; 4297 4298 entry->vendor_err = cq_poll_info->major_err << 16 | 4299 cq_poll_info->minor_err; 4300 } else { 4301 entry->status = IB_WC_SUCCESS; 4302 if (cq_poll_info->imm_valid) { 4303 entry->ex.imm_data = htonl(cq_poll_info->imm_data); 4304 entry->wc_flags |= IB_WC_WITH_IMM; 4305 } 4306 if (cq_poll_info->ud_smac_valid) { 4307 ether_addr_copy(entry->smac, cq_poll_info->ud_smac); 4308 entry->wc_flags |= IB_WC_WITH_SMAC; 4309 } 4310 4311 if (cq_poll_info->ud_vlan_valid) { 4312 u16 vlan = cq_poll_info->ud_vlan & VLAN_VID_MASK; 4313 4314 entry->sl = cq_poll_info->ud_vlan >> VLAN_PRIO_SHIFT; 4315 if (vlan) { 4316 entry->vlan_id = vlan; 4317 entry->wc_flags |= IB_WC_WITH_VLAN; 4318 } 4319 } else { 4320 entry->sl = 0; 4321 } 4322 } 4323 4324 if (cq_poll_info->q_type == IRDMA_CQE_QTYPE_SQ) { 4325 set_ib_wc_op_sq(cq_poll_info, entry); 4326 } else { 4327 if (qp->dev->hw_attrs.uk_attrs.hw_rev <= IRDMA_GEN_2) 4328 set_ib_wc_op_rq(cq_poll_info, entry, 4329 qp->qp_uk.qp_caps & IRDMA_SEND_WITH_IMM ? 4330 true : false); 4331 else 4332 set_ib_wc_op_rq_gen_3(cq_poll_info, entry); 4333 if (qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_UD && 4334 cq_poll_info->stag_invalid_set) { 4335 entry->ex.invalidate_rkey = cq_poll_info->inv_stag; 4336 entry->wc_flags |= IB_WC_WITH_INVALIDATE; 4337 } 4338 } 4339 4340 if (qp->qp_uk.qp_type == IRDMA_QP_TYPE_ROCE_UD) { 4341 entry->src_qp = cq_poll_info->ud_src_qpn; 4342 entry->slid = 0; 4343 entry->wc_flags |= 4344 (IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE); 4345 entry->network_hdr_type = cq_poll_info->ipv4 ? 4346 RDMA_NETWORK_IPV4 : 4347 RDMA_NETWORK_IPV6; 4348 } else { 4349 entry->src_qp = cq_poll_info->qp_id; 4350 } 4351 4352 entry->byte_len = cq_poll_info->bytes_xfered; 4353 } 4354 4355 /** 4356 * irdma_poll_one - poll one entry of the CQ 4357 * @ukcq: ukcq to poll 4358 * @cur_cqe: current CQE info to be filled in 4359 * @entry: ibv_wc object to be filled for non-extended CQ or NULL for extended CQ 4360 * 4361 * Returns the internal irdma device error code or 0 on success 4362 */ 4363 static inline int irdma_poll_one(struct irdma_cq_uk *ukcq, 4364 struct irdma_cq_poll_info *cur_cqe, 4365 struct ib_wc *entry) 4366 { 4367 int ret = irdma_uk_cq_poll_cmpl(ukcq, cur_cqe); 4368 4369 if (ret) 4370 return ret; 4371 4372 irdma_process_cqe(entry, cur_cqe); 4373 4374 return 0; 4375 } 4376 4377 /** 4378 * __irdma_poll_cq - poll cq for completion (kernel apps) 4379 * @iwcq: cq to poll 4380 * @num_entries: number of entries to poll 4381 * @entry: wr of a completed entry 4382 */ 4383 static int __irdma_poll_cq(struct irdma_cq *iwcq, int num_entries, struct ib_wc *entry) 4384 { 4385 struct list_head *tmp_node, *list_node; 4386 struct irdma_cq_buf *last_buf = NULL; 4387 struct irdma_cq_poll_info *cur_cqe = &iwcq->cur_cqe; 4388 struct irdma_cq_buf *cq_buf; 4389 int ret; 4390 struct irdma_device *iwdev; 4391 struct irdma_cq_uk *ukcq; 4392 bool cq_new_cqe = false; 4393 int resized_bufs = 0; 4394 int npolled = 0; 4395 4396 iwdev = to_iwdev(iwcq->ibcq.device); 4397 ukcq = &iwcq->sc_cq.cq_uk; 4398 4399 /* go through the list of previously resized CQ buffers */ 4400 list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) { 4401 cq_buf = container_of(list_node, struct irdma_cq_buf, list); 4402 while (npolled < num_entries) { 4403 ret = irdma_poll_one(&cq_buf->cq_uk, cur_cqe, entry + npolled); 4404 if (!ret) { 4405 ++npolled; 4406 cq_new_cqe = true; 4407 continue; 4408 } 4409 if (ret == -ENOENT) 4410 break; 4411 /* QP using the CQ is destroyed. Skip reporting this CQE */ 4412 if (ret == -EFAULT) { 4413 cq_new_cqe = true; 4414 continue; 4415 } 4416 goto error; 4417 } 4418 4419 /* save the resized CQ buffer which received the last cqe */ 4420 if (cq_new_cqe) 4421 last_buf = cq_buf; 4422 cq_new_cqe = false; 4423 } 4424 4425 /* check the current CQ for new cqes */ 4426 while (npolled < num_entries) { 4427 ret = irdma_poll_one(ukcq, cur_cqe, entry + npolled); 4428 if (ret == -ENOENT) { 4429 ret = irdma_generated_cmpls(iwcq, cur_cqe); 4430 if (!ret) 4431 irdma_process_cqe(entry + npolled, cur_cqe); 4432 } 4433 if (!ret) { 4434 ++npolled; 4435 cq_new_cqe = true; 4436 continue; 4437 } 4438 4439 if (ret == -ENOENT) 4440 break; 4441 /* QP using the CQ is destroyed. Skip reporting this CQE */ 4442 if (ret == -EFAULT) { 4443 cq_new_cqe = true; 4444 continue; 4445 } 4446 goto error; 4447 } 4448 4449 if (cq_new_cqe) 4450 /* all previous CQ resizes are complete */ 4451 resized_bufs = irdma_process_resize_list(iwcq, iwdev, NULL); 4452 else if (last_buf) 4453 /* only CQ resizes up to the last_buf are complete */ 4454 resized_bufs = irdma_process_resize_list(iwcq, iwdev, last_buf); 4455 if (resized_bufs) 4456 /* report to the HW the number of complete CQ resizes */ 4457 irdma_uk_cq_set_resized_cnt(ukcq, resized_bufs); 4458 4459 return npolled; 4460 error: 4461 ibdev_dbg(&iwdev->ibdev, "%s: Error polling CQ, irdma_err: %d\n", 4462 __func__, ret); 4463 4464 return ret; 4465 } 4466 4467 /** 4468 * irdma_poll_cq - poll cq for completion (kernel apps) 4469 * @ibcq: cq to poll 4470 * @num_entries: number of entries to poll 4471 * @entry: wr of a completed entry 4472 */ 4473 static int irdma_poll_cq(struct ib_cq *ibcq, int num_entries, 4474 struct ib_wc *entry) 4475 { 4476 struct irdma_cq *iwcq; 4477 unsigned long flags; 4478 int ret; 4479 4480 iwcq = to_iwcq(ibcq); 4481 4482 spin_lock_irqsave(&iwcq->lock, flags); 4483 ret = __irdma_poll_cq(iwcq, num_entries, entry); 4484 spin_unlock_irqrestore(&iwcq->lock, flags); 4485 4486 return ret; 4487 } 4488 4489 /** 4490 * irdma_req_notify_cq - arm cq kernel application 4491 * @ibcq: cq to arm 4492 * @notify_flags: notofication flags 4493 */ 4494 static int irdma_req_notify_cq(struct ib_cq *ibcq, 4495 enum ib_cq_notify_flags notify_flags) 4496 { 4497 struct irdma_cq *iwcq; 4498 struct irdma_cq_uk *ukcq; 4499 unsigned long flags; 4500 enum irdma_cmpl_notify cq_notify; 4501 bool promo_event = false; 4502 int ret = 0; 4503 4504 cq_notify = notify_flags == IB_CQ_SOLICITED ? 4505 IRDMA_CQ_COMPL_SOLICITED : IRDMA_CQ_COMPL_EVENT; 4506 iwcq = to_iwcq(ibcq); 4507 ukcq = &iwcq->sc_cq.cq_uk; 4508 4509 spin_lock_irqsave(&iwcq->lock, flags); 4510 /* Only promote to arm the CQ for any event if the last arm event was solicited. */ 4511 if (iwcq->last_notify == IRDMA_CQ_COMPL_SOLICITED && notify_flags != IB_CQ_SOLICITED) 4512 promo_event = true; 4513 4514 if (!atomic_cmpxchg(&iwcq->armed, 0, 1) || promo_event) { 4515 iwcq->last_notify = cq_notify; 4516 irdma_uk_cq_request_notification(ukcq, cq_notify); 4517 } 4518 4519 if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) && 4520 (!irdma_uk_cq_empty(ukcq) || !list_empty(&iwcq->cmpl_generated))) 4521 ret = 1; 4522 spin_unlock_irqrestore(&iwcq->lock, flags); 4523 4524 return ret; 4525 } 4526 4527 static const struct rdma_stat_desc irdma_hw_stat_descs[] = { 4528 /* gen1 - 32-bit */ 4529 [IRDMA_HW_STAT_INDEX_IP4RXDISCARD].name = "ip4InDiscards", 4530 [IRDMA_HW_STAT_INDEX_IP4RXTRUNC].name = "ip4InTruncatedPkts", 4531 [IRDMA_HW_STAT_INDEX_IP4TXNOROUTE].name = "ip4OutNoRoutes", 4532 [IRDMA_HW_STAT_INDEX_IP6RXDISCARD].name = "ip6InDiscards", 4533 [IRDMA_HW_STAT_INDEX_IP6RXTRUNC].name = "ip6InTruncatedPkts", 4534 [IRDMA_HW_STAT_INDEX_IP6TXNOROUTE].name = "ip6OutNoRoutes", 4535 [IRDMA_HW_STAT_INDEX_RXVLANERR].name = "rxVlanErrors", 4536 /* gen1 - 64-bit */ 4537 [IRDMA_HW_STAT_INDEX_IP4RXOCTS].name = "ip4InOctets", 4538 [IRDMA_HW_STAT_INDEX_IP4RXPKTS].name = "ip4InPkts", 4539 [IRDMA_HW_STAT_INDEX_IP4RXFRAGS].name = "ip4InReasmRqd", 4540 [IRDMA_HW_STAT_INDEX_IP4RXMCPKTS].name = "ip4InMcastPkts", 4541 [IRDMA_HW_STAT_INDEX_IP4TXOCTS].name = "ip4OutOctets", 4542 [IRDMA_HW_STAT_INDEX_IP4TXPKTS].name = "ip4OutPkts", 4543 [IRDMA_HW_STAT_INDEX_IP4TXFRAGS].name = "ip4OutSegRqd", 4544 [IRDMA_HW_STAT_INDEX_IP4TXMCPKTS].name = "ip4OutMcastPkts", 4545 [IRDMA_HW_STAT_INDEX_IP6RXOCTS].name = "ip6InOctets", 4546 [IRDMA_HW_STAT_INDEX_IP6RXPKTS].name = "ip6InPkts", 4547 [IRDMA_HW_STAT_INDEX_IP6RXFRAGS].name = "ip6InReasmRqd", 4548 [IRDMA_HW_STAT_INDEX_IP6RXMCPKTS].name = "ip6InMcastPkts", 4549 [IRDMA_HW_STAT_INDEX_IP6TXOCTS].name = "ip6OutOctets", 4550 [IRDMA_HW_STAT_INDEX_IP6TXPKTS].name = "ip6OutPkts", 4551 [IRDMA_HW_STAT_INDEX_IP6TXFRAGS].name = "ip6OutSegRqd", 4552 [IRDMA_HW_STAT_INDEX_IP6TXMCPKTS].name = "ip6OutMcastPkts", 4553 [IRDMA_HW_STAT_INDEX_RDMARXRDS].name = "InRdmaReads", 4554 [IRDMA_HW_STAT_INDEX_RDMARXSNDS].name = "InRdmaSends", 4555 [IRDMA_HW_STAT_INDEX_RDMARXWRS].name = "InRdmaWrites", 4556 [IRDMA_HW_STAT_INDEX_RDMATXRDS].name = "OutRdmaReads", 4557 [IRDMA_HW_STAT_INDEX_RDMATXSNDS].name = "OutRdmaSends", 4558 [IRDMA_HW_STAT_INDEX_RDMATXWRS].name = "OutRdmaWrites", 4559 [IRDMA_HW_STAT_INDEX_RDMAVBND].name = "RdmaBnd", 4560 [IRDMA_HW_STAT_INDEX_RDMAVINV].name = "RdmaInv", 4561 4562 /* gen2 - 32-bit */ 4563 [IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED].name = "cnpHandled", 4564 [IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED].name = "cnpIgnored", 4565 [IRDMA_HW_STAT_INDEX_TXNPCNPSENT].name = "cnpSent", 4566 /* gen2 - 64-bit */ 4567 [IRDMA_HW_STAT_INDEX_IP4RXMCOCTS].name = "ip4InMcastOctets", 4568 [IRDMA_HW_STAT_INDEX_IP4TXMCOCTS].name = "ip4OutMcastOctets", 4569 [IRDMA_HW_STAT_INDEX_IP6RXMCOCTS].name = "ip6InMcastOctets", 4570 [IRDMA_HW_STAT_INDEX_IP6TXMCOCTS].name = "ip6OutMcastOctets", 4571 [IRDMA_HW_STAT_INDEX_UDPRXPKTS].name = "RxUDP", 4572 [IRDMA_HW_STAT_INDEX_UDPTXPKTS].name = "TxUDP", 4573 [IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS].name = "RxECNMrkd", 4574 [IRDMA_HW_STAT_INDEX_TCPRTXSEG].name = "RetransSegs", 4575 [IRDMA_HW_STAT_INDEX_TCPRXOPTERR].name = "InOptErrors", 4576 [IRDMA_HW_STAT_INDEX_TCPRXPROTOERR].name = "InProtoErrors", 4577 [IRDMA_HW_STAT_INDEX_TCPRXSEGS].name = "InSegs", 4578 [IRDMA_HW_STAT_INDEX_TCPTXSEG].name = "OutSegs", 4579 4580 /* gen3 */ 4581 [IRDMA_HW_STAT_INDEX_RNR_SENT].name = "RNR sent", 4582 [IRDMA_HW_STAT_INDEX_RNR_RCVD].name = "RNR received", 4583 [IRDMA_HW_STAT_INDEX_RDMAORDLMTCNT].name = "ord limit count", 4584 [IRDMA_HW_STAT_INDEX_RDMAIRDLMTCNT].name = "ird limit count", 4585 [IRDMA_HW_STAT_INDEX_RDMARXATS].name = "Rx atomics", 4586 [IRDMA_HW_STAT_INDEX_RDMATXATS].name = "Tx atomics", 4587 [IRDMA_HW_STAT_INDEX_NAKSEQERR].name = "Nak Sequence Error", 4588 [IRDMA_HW_STAT_INDEX_NAKSEQERR_IMPLIED].name = "Nak Sequence Error Implied", 4589 [IRDMA_HW_STAT_INDEX_RTO].name = "RTO", 4590 [IRDMA_HW_STAT_INDEX_RXOOOPKTS].name = "Rcvd Out of order packets", 4591 [IRDMA_HW_STAT_INDEX_ICRCERR].name = "CRC errors", 4592 }; 4593 4594 static int irdma_roce_port_immutable(struct ib_device *ibdev, u32 port_num, 4595 struct ib_port_immutable *immutable) 4596 { 4597 struct ib_port_attr attr; 4598 int err; 4599 4600 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; 4601 err = ib_query_port(ibdev, port_num, &attr); 4602 if (err) 4603 return err; 4604 4605 immutable->max_mad_size = IB_MGMT_MAD_SIZE; 4606 immutable->pkey_tbl_len = attr.pkey_tbl_len; 4607 immutable->gid_tbl_len = attr.gid_tbl_len; 4608 4609 return 0; 4610 } 4611 4612 static int irdma_iw_port_immutable(struct ib_device *ibdev, u32 port_num, 4613 struct ib_port_immutable *immutable) 4614 { 4615 struct ib_port_attr attr; 4616 int err; 4617 4618 immutable->core_cap_flags = RDMA_CORE_PORT_IWARP; 4619 err = ib_query_port(ibdev, port_num, &attr); 4620 if (err) 4621 return err; 4622 immutable->gid_tbl_len = attr.gid_tbl_len; 4623 4624 return 0; 4625 } 4626 4627 static void irdma_get_dev_fw_str(struct ib_device *dev, char *str) 4628 { 4629 struct irdma_device *iwdev = to_iwdev(dev); 4630 4631 snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u", 4632 irdma_fw_major_ver(&iwdev->rf->sc_dev), 4633 irdma_fw_minor_ver(&iwdev->rf->sc_dev)); 4634 } 4635 4636 /** 4637 * irdma_alloc_hw_port_stats - Allocate a hw stats structure 4638 * @ibdev: device pointer from stack 4639 * @port_num: port number 4640 */ 4641 static struct rdma_hw_stats *irdma_alloc_hw_port_stats(struct ib_device *ibdev, 4642 u32 port_num) 4643 { 4644 struct irdma_device *iwdev = to_iwdev(ibdev); 4645 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; 4646 4647 int num_counters = dev->hw_attrs.max_stat_idx; 4648 unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN; 4649 4650 return rdma_alloc_hw_stats_struct(irdma_hw_stat_descs, num_counters, 4651 lifespan); 4652 } 4653 4654 /** 4655 * irdma_get_hw_stats - Populates the rdma_hw_stats structure 4656 * @ibdev: device pointer from stack 4657 * @stats: stats pointer from stack 4658 * @port_num: port number 4659 * @index: which hw counter the stack is requesting we update 4660 */ 4661 static int irdma_get_hw_stats(struct ib_device *ibdev, 4662 struct rdma_hw_stats *stats, u32 port_num, 4663 int index) 4664 { 4665 struct irdma_device *iwdev = to_iwdev(ibdev); 4666 struct irdma_dev_hw_stats *hw_stats = &iwdev->vsi.pestat->hw_stats; 4667 4668 if (iwdev->rf->rdma_ver >= IRDMA_GEN_2) 4669 irdma_cqp_gather_stats_cmd(&iwdev->rf->sc_dev, iwdev->vsi.pestat, true); 4670 else 4671 irdma_cqp_gather_stats_gen1(&iwdev->rf->sc_dev, iwdev->vsi.pestat); 4672 4673 memcpy(&stats->value[0], hw_stats, sizeof(u64) * stats->num_counters); 4674 4675 return stats->num_counters; 4676 } 4677 4678 /** 4679 * irdma_query_gid - Query port GID 4680 * @ibdev: device pointer from stack 4681 * @port: port number 4682 * @index: Entry index 4683 * @gid: Global ID 4684 */ 4685 static int irdma_query_gid(struct ib_device *ibdev, u32 port, int index, 4686 union ib_gid *gid) 4687 { 4688 struct irdma_device *iwdev = to_iwdev(ibdev); 4689 4690 memset(gid->raw, 0, sizeof(gid->raw)); 4691 ether_addr_copy(gid->raw, iwdev->netdev->dev_addr); 4692 4693 return 0; 4694 } 4695 4696 /** 4697 * mcast_list_add - Add a new mcast item to list 4698 * @rf: RDMA PCI function 4699 * @new_elem: pointer to element to add 4700 */ 4701 static void mcast_list_add(struct irdma_pci_f *rf, 4702 struct mc_table_list *new_elem) 4703 { 4704 list_add(&new_elem->list, &rf->mc_qht_list.list); 4705 } 4706 4707 /** 4708 * mcast_list_del - Remove an mcast item from list 4709 * @mc_qht_elem: pointer to mcast table list element 4710 */ 4711 static void mcast_list_del(struct mc_table_list *mc_qht_elem) 4712 { 4713 if (mc_qht_elem) 4714 list_del(&mc_qht_elem->list); 4715 } 4716 4717 /** 4718 * mcast_list_lookup_ip - Search mcast list for address 4719 * @rf: RDMA PCI function 4720 * @ip_mcast: pointer to mcast IP address 4721 */ 4722 static struct mc_table_list *mcast_list_lookup_ip(struct irdma_pci_f *rf, 4723 u32 *ip_mcast) 4724 { 4725 struct mc_table_list *mc_qht_el; 4726 struct list_head *pos, *q; 4727 4728 list_for_each_safe (pos, q, &rf->mc_qht_list.list) { 4729 mc_qht_el = list_entry(pos, struct mc_table_list, list); 4730 if (!memcmp(mc_qht_el->mc_info.dest_ip, ip_mcast, 4731 sizeof(mc_qht_el->mc_info.dest_ip))) 4732 return mc_qht_el; 4733 } 4734 4735 return NULL; 4736 } 4737 4738 /** 4739 * irdma_mcast_cqp_op - perform a mcast cqp operation 4740 * @iwdev: irdma device 4741 * @mc_grp_ctx: mcast group info 4742 * @op: operation 4743 * 4744 * returns error status 4745 */ 4746 static int irdma_mcast_cqp_op(struct irdma_device *iwdev, 4747 struct irdma_mcast_grp_info *mc_grp_ctx, u8 op) 4748 { 4749 struct cqp_cmds_info *cqp_info; 4750 struct irdma_cqp_request *cqp_request; 4751 int status; 4752 4753 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); 4754 if (!cqp_request) 4755 return -ENOMEM; 4756 4757 cqp_request->info.in.u.mc_create.info = *mc_grp_ctx; 4758 cqp_info = &cqp_request->info; 4759 cqp_info->cqp_cmd = op; 4760 cqp_info->post_sq = 1; 4761 cqp_info->in.u.mc_create.scratch = (uintptr_t)cqp_request; 4762 cqp_info->in.u.mc_create.cqp = &iwdev->rf->cqp.sc_cqp; 4763 status = irdma_handle_cqp_op(iwdev->rf, cqp_request); 4764 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); 4765 4766 return status; 4767 } 4768 4769 /** 4770 * irdma_mcast_mac - Get the multicast MAC for an IP address 4771 * @ip_addr: IPv4 or IPv6 address 4772 * @mac: pointer to result MAC address 4773 * @ipv4: flag indicating IPv4 or IPv6 4774 * 4775 */ 4776 void irdma_mcast_mac(u32 *ip_addr, u8 *mac, bool ipv4) 4777 { 4778 u8 *ip = (u8 *)ip_addr; 4779 4780 if (ipv4) { 4781 unsigned char mac4[ETH_ALEN] = {0x01, 0x00, 0x5E, 0x00, 4782 0x00, 0x00}; 4783 4784 mac4[3] = ip[2] & 0x7F; 4785 mac4[4] = ip[1]; 4786 mac4[5] = ip[0]; 4787 ether_addr_copy(mac, mac4); 4788 } else { 4789 unsigned char mac6[ETH_ALEN] = {0x33, 0x33, 0x00, 0x00, 4790 0x00, 0x00}; 4791 4792 mac6[2] = ip[3]; 4793 mac6[3] = ip[2]; 4794 mac6[4] = ip[1]; 4795 mac6[5] = ip[0]; 4796 ether_addr_copy(mac, mac6); 4797 } 4798 } 4799 4800 /** 4801 * irdma_attach_mcast - attach a qp to a multicast group 4802 * @ibqp: ptr to qp 4803 * @ibgid: pointer to global ID 4804 * @lid: local ID 4805 * 4806 * returns error status 4807 */ 4808 static int irdma_attach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid) 4809 { 4810 struct irdma_qp *iwqp = to_iwqp(ibqp); 4811 struct irdma_device *iwdev = iwqp->iwdev; 4812 struct irdma_pci_f *rf = iwdev->rf; 4813 struct mc_table_list *mc_qht_elem; 4814 struct irdma_mcast_grp_ctx_entry_info mcg_info = {}; 4815 unsigned long flags; 4816 u32 ip_addr[4] = {}; 4817 u32 mgn; 4818 u32 no_mgs; 4819 int ret = 0; 4820 bool ipv4; 4821 u16 vlan_id; 4822 union irdma_sockaddr sgid_addr; 4823 unsigned char dmac[ETH_ALEN]; 4824 4825 rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid); 4826 4827 if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid)) { 4828 irdma_copy_ip_ntohl(ip_addr, 4829 sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32); 4830 irdma_get_vlan_mac_ipv6(ip_addr, &vlan_id, NULL); 4831 ipv4 = false; 4832 ibdev_dbg(&iwdev->ibdev, 4833 "VERBS: qp_id=%d, IP6address=%pI6\n", ibqp->qp_num, 4834 ip_addr); 4835 irdma_mcast_mac(ip_addr, dmac, false); 4836 } else { 4837 ip_addr[0] = ntohl(sgid_addr.saddr_in.sin_addr.s_addr); 4838 ipv4 = true; 4839 vlan_id = irdma_get_vlan_ipv4(ip_addr); 4840 irdma_mcast_mac(ip_addr, dmac, true); 4841 ibdev_dbg(&iwdev->ibdev, 4842 "VERBS: qp_id=%d, IP4address=%pI4, MAC=%pM\n", 4843 ibqp->qp_num, ip_addr, dmac); 4844 } 4845 4846 spin_lock_irqsave(&rf->qh_list_lock, flags); 4847 mc_qht_elem = mcast_list_lookup_ip(rf, ip_addr); 4848 if (!mc_qht_elem) { 4849 struct irdma_dma_mem *dma_mem_mc; 4850 4851 spin_unlock_irqrestore(&rf->qh_list_lock, flags); 4852 mc_qht_elem = kzalloc(sizeof(*mc_qht_elem), GFP_KERNEL); 4853 if (!mc_qht_elem) 4854 return -ENOMEM; 4855 4856 mc_qht_elem->mc_info.ipv4_valid = ipv4; 4857 memcpy(mc_qht_elem->mc_info.dest_ip, ip_addr, 4858 sizeof(mc_qht_elem->mc_info.dest_ip)); 4859 ret = irdma_alloc_rsrc(rf, rf->allocated_mcgs, rf->max_mcg, 4860 &mgn, &rf->next_mcg); 4861 if (ret) { 4862 kfree(mc_qht_elem); 4863 return -ENOMEM; 4864 } 4865 4866 mc_qht_elem->mc_info.mgn = mgn; 4867 dma_mem_mc = &mc_qht_elem->mc_grp_ctx.dma_mem_mc; 4868 dma_mem_mc->size = ALIGN(sizeof(u64) * IRDMA_MAX_MGS_PER_CTX, 4869 IRDMA_HW_PAGE_SIZE); 4870 dma_mem_mc->va = dma_alloc_coherent(rf->hw.device, 4871 dma_mem_mc->size, 4872 &dma_mem_mc->pa, 4873 GFP_KERNEL); 4874 if (!dma_mem_mc->va) { 4875 irdma_free_rsrc(rf, rf->allocated_mcgs, mgn); 4876 kfree(mc_qht_elem); 4877 return -ENOMEM; 4878 } 4879 4880 mc_qht_elem->mc_grp_ctx.mg_id = (u16)mgn; 4881 memcpy(mc_qht_elem->mc_grp_ctx.dest_ip_addr, ip_addr, 4882 sizeof(mc_qht_elem->mc_grp_ctx.dest_ip_addr)); 4883 mc_qht_elem->mc_grp_ctx.ipv4_valid = ipv4; 4884 mc_qht_elem->mc_grp_ctx.vlan_id = vlan_id; 4885 if (vlan_id < VLAN_N_VID) 4886 mc_qht_elem->mc_grp_ctx.vlan_valid = true; 4887 mc_qht_elem->mc_grp_ctx.hmc_fcn_id = iwdev->rf->sc_dev.hmc_fn_id; 4888 mc_qht_elem->mc_grp_ctx.qs_handle = 4889 iwqp->sc_qp.vsi->qos[iwqp->sc_qp.user_pri].qs_handle; 4890 ether_addr_copy(mc_qht_elem->mc_grp_ctx.dest_mac_addr, dmac); 4891 4892 spin_lock_irqsave(&rf->qh_list_lock, flags); 4893 mcast_list_add(rf, mc_qht_elem); 4894 } else { 4895 if (mc_qht_elem->mc_grp_ctx.no_of_mgs == 4896 IRDMA_MAX_MGS_PER_CTX) { 4897 spin_unlock_irqrestore(&rf->qh_list_lock, flags); 4898 return -ENOMEM; 4899 } 4900 } 4901 4902 mcg_info.qp_id = iwqp->ibqp.qp_num; 4903 no_mgs = mc_qht_elem->mc_grp_ctx.no_of_mgs; 4904 irdma_sc_add_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info); 4905 spin_unlock_irqrestore(&rf->qh_list_lock, flags); 4906 4907 /* Only if there is a change do we need to modify or create */ 4908 if (!no_mgs) { 4909 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx, 4910 IRDMA_OP_MC_CREATE); 4911 } else if (no_mgs != mc_qht_elem->mc_grp_ctx.no_of_mgs) { 4912 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx, 4913 IRDMA_OP_MC_MODIFY); 4914 } else { 4915 return 0; 4916 } 4917 4918 if (ret) 4919 goto error; 4920 4921 return 0; 4922 4923 error: 4924 irdma_sc_del_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info); 4925 if (!mc_qht_elem->mc_grp_ctx.no_of_mgs) { 4926 mcast_list_del(mc_qht_elem); 4927 dma_free_coherent(rf->hw.device, 4928 mc_qht_elem->mc_grp_ctx.dma_mem_mc.size, 4929 mc_qht_elem->mc_grp_ctx.dma_mem_mc.va, 4930 mc_qht_elem->mc_grp_ctx.dma_mem_mc.pa); 4931 mc_qht_elem->mc_grp_ctx.dma_mem_mc.va = NULL; 4932 irdma_free_rsrc(rf, rf->allocated_mcgs, 4933 mc_qht_elem->mc_grp_ctx.mg_id); 4934 kfree(mc_qht_elem); 4935 } 4936 4937 return ret; 4938 } 4939 4940 /** 4941 * irdma_detach_mcast - detach a qp from a multicast group 4942 * @ibqp: ptr to qp 4943 * @ibgid: pointer to global ID 4944 * @lid: local ID 4945 * 4946 * returns error status 4947 */ 4948 static int irdma_detach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid) 4949 { 4950 struct irdma_qp *iwqp = to_iwqp(ibqp); 4951 struct irdma_device *iwdev = iwqp->iwdev; 4952 struct irdma_pci_f *rf = iwdev->rf; 4953 u32 ip_addr[4] = {}; 4954 struct mc_table_list *mc_qht_elem; 4955 struct irdma_mcast_grp_ctx_entry_info mcg_info = {}; 4956 int ret; 4957 unsigned long flags; 4958 union irdma_sockaddr sgid_addr; 4959 4960 rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid); 4961 if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid)) 4962 irdma_copy_ip_ntohl(ip_addr, 4963 sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32); 4964 else 4965 ip_addr[0] = ntohl(sgid_addr.saddr_in.sin_addr.s_addr); 4966 4967 spin_lock_irqsave(&rf->qh_list_lock, flags); 4968 mc_qht_elem = mcast_list_lookup_ip(rf, ip_addr); 4969 if (!mc_qht_elem) { 4970 spin_unlock_irqrestore(&rf->qh_list_lock, flags); 4971 ibdev_dbg(&iwdev->ibdev, 4972 "VERBS: address not found MCG\n"); 4973 return 0; 4974 } 4975 4976 mcg_info.qp_id = iwqp->ibqp.qp_num; 4977 irdma_sc_del_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info); 4978 if (!mc_qht_elem->mc_grp_ctx.no_of_mgs) { 4979 mcast_list_del(mc_qht_elem); 4980 spin_unlock_irqrestore(&rf->qh_list_lock, flags); 4981 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx, 4982 IRDMA_OP_MC_DESTROY); 4983 if (ret) { 4984 ibdev_dbg(&iwdev->ibdev, 4985 "VERBS: failed MC_DESTROY MCG\n"); 4986 spin_lock_irqsave(&rf->qh_list_lock, flags); 4987 mcast_list_add(rf, mc_qht_elem); 4988 spin_unlock_irqrestore(&rf->qh_list_lock, flags); 4989 return -EAGAIN; 4990 } 4991 4992 dma_free_coherent(rf->hw.device, 4993 mc_qht_elem->mc_grp_ctx.dma_mem_mc.size, 4994 mc_qht_elem->mc_grp_ctx.dma_mem_mc.va, 4995 mc_qht_elem->mc_grp_ctx.dma_mem_mc.pa); 4996 mc_qht_elem->mc_grp_ctx.dma_mem_mc.va = NULL; 4997 irdma_free_rsrc(rf, rf->allocated_mcgs, 4998 mc_qht_elem->mc_grp_ctx.mg_id); 4999 kfree(mc_qht_elem); 5000 } else { 5001 spin_unlock_irqrestore(&rf->qh_list_lock, flags); 5002 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx, 5003 IRDMA_OP_MC_MODIFY); 5004 if (ret) { 5005 ibdev_dbg(&iwdev->ibdev, 5006 "VERBS: failed Modify MCG\n"); 5007 return ret; 5008 } 5009 } 5010 5011 return 0; 5012 } 5013 5014 static int irdma_create_hw_ah(struct irdma_device *iwdev, struct irdma_ah *ah, bool sleep) 5015 { 5016 struct irdma_pci_f *rf = iwdev->rf; 5017 int err; 5018 5019 err = irdma_alloc_rsrc(rf, rf->allocated_ahs, rf->max_ah, &ah->sc_ah.ah_info.ah_idx, 5020 &rf->next_ah); 5021 if (err) 5022 return err; 5023 5024 err = irdma_ah_cqp_op(rf, &ah->sc_ah, IRDMA_OP_AH_CREATE, sleep, 5025 irdma_gsi_ud_qp_ah_cb, &ah->sc_ah); 5026 5027 if (err) { 5028 ibdev_dbg(&iwdev->ibdev, "VERBS: CQP-OP Create AH fail"); 5029 goto err_ah_create; 5030 } 5031 5032 if (!sleep) { 5033 const u64 tmout_ms = irdma_get_timeout_threshold(&rf->sc_dev) * 5034 CQP_COMPL_WAIT_TIME_MS; 5035 5036 if (poll_timeout_us_atomic(irdma_cqp_ce_handler(rf, 5037 &rf->ccq.sc_cq), 5038 ah->sc_ah.ah_info.ah_valid, 1, 5039 tmout_ms * USEC_PER_MSEC, false)) { 5040 ibdev_dbg(&iwdev->ibdev, 5041 "VERBS: CQP create AH timed out"); 5042 err = -ETIMEDOUT; 5043 goto err_ah_create; 5044 } 5045 } 5046 return 0; 5047 5048 err_ah_create: 5049 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, ah->sc_ah.ah_info.ah_idx); 5050 5051 return err; 5052 } 5053 5054 static int irdma_setup_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *attr) 5055 { 5056 struct irdma_pd *pd = to_iwpd(ibah->pd); 5057 struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah); 5058 struct rdma_ah_attr *ah_attr = attr->ah_attr; 5059 const struct ib_gid_attr *sgid_attr; 5060 struct irdma_device *iwdev = to_iwdev(ibah->pd->device); 5061 struct irdma_pci_f *rf = iwdev->rf; 5062 struct irdma_sc_ah *sc_ah; 5063 struct irdma_ah_info *ah_info; 5064 union irdma_sockaddr sgid_addr, dgid_addr; 5065 int err; 5066 u8 dmac[ETH_ALEN]; 5067 5068 ah->pd = pd; 5069 sc_ah = &ah->sc_ah; 5070 sc_ah->ah_info.vsi = &iwdev->vsi; 5071 irdma_sc_init_ah(&rf->sc_dev, sc_ah); 5072 ah->sgid_index = ah_attr->grh.sgid_index; 5073 sgid_attr = ah_attr->grh.sgid_attr; 5074 memcpy(&ah->dgid, &ah_attr->grh.dgid, sizeof(ah->dgid)); 5075 rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid_attr->gid); 5076 rdma_gid2ip((struct sockaddr *)&dgid_addr, &ah_attr->grh.dgid); 5077 ah->av.attrs = *ah_attr; 5078 ah->av.net_type = rdma_gid_attr_network_type(sgid_attr); 5079 ah_info = &sc_ah->ah_info; 5080 ah_info->pd_idx = pd->sc_pd.pd_id; 5081 if (ah_attr->ah_flags & IB_AH_GRH) { 5082 ah_info->flow_label = ah_attr->grh.flow_label; 5083 ah_info->hop_ttl = ah_attr->grh.hop_limit; 5084 ah_info->tc_tos = ah_attr->grh.traffic_class; 5085 } 5086 5087 ether_addr_copy(dmac, ah_attr->roce.dmac); 5088 if (ah->av.net_type == RDMA_NETWORK_IPV4) { 5089 ah_info->ipv4_valid = true; 5090 ah_info->dest_ip_addr[0] = 5091 ntohl(dgid_addr.saddr_in.sin_addr.s_addr); 5092 ah_info->src_ip_addr[0] = 5093 ntohl(sgid_addr.saddr_in.sin_addr.s_addr); 5094 ah_info->do_lpbk = irdma_ipv4_is_lpb(ah_info->src_ip_addr[0], 5095 ah_info->dest_ip_addr[0]); 5096 if (ipv4_is_multicast(dgid_addr.saddr_in.sin_addr.s_addr)) { 5097 ah_info->do_lpbk = true; 5098 irdma_mcast_mac(ah_info->dest_ip_addr, dmac, true); 5099 } 5100 } else { 5101 irdma_copy_ip_ntohl(ah_info->dest_ip_addr, 5102 dgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32); 5103 irdma_copy_ip_ntohl(ah_info->src_ip_addr, 5104 sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32); 5105 ah_info->do_lpbk = irdma_ipv6_is_lpb(ah_info->src_ip_addr, 5106 ah_info->dest_ip_addr); 5107 if (rdma_is_multicast_addr(&dgid_addr.saddr_in6.sin6_addr)) { 5108 ah_info->do_lpbk = true; 5109 irdma_mcast_mac(ah_info->dest_ip_addr, dmac, false); 5110 } 5111 } 5112 5113 err = rdma_read_gid_l2_fields(sgid_attr, &ah_info->vlan_tag, 5114 ah_info->mac_addr); 5115 if (err) 5116 return err; 5117 5118 ah_info->dst_arpindex = irdma_add_arp(iwdev->rf, ah_info->dest_ip_addr, 5119 ah_info->ipv4_valid, dmac); 5120 5121 if (ah_info->dst_arpindex == -1) 5122 return -EINVAL; 5123 5124 if (ah_info->vlan_tag >= VLAN_N_VID && iwdev->dcb_vlan_mode) 5125 ah_info->vlan_tag = 0; 5126 5127 if (ah_info->vlan_tag < VLAN_N_VID) { 5128 u8 prio = rt_tos2priority(ah_info->tc_tos); 5129 5130 prio = irdma_roce_get_vlan_prio(sgid_attr, prio); 5131 5132 ah_info->vlan_tag |= (u16)prio << VLAN_PRIO_SHIFT; 5133 ah_info->insert_vlan_tag = true; 5134 } 5135 5136 return 0; 5137 } 5138 5139 /** 5140 * irdma_ah_exists - Check for existing identical AH 5141 * @iwdev: irdma device 5142 * @new_ah: AH to check for 5143 * 5144 * returns true if AH is found, false if not found. 5145 */ 5146 static bool irdma_ah_exists(struct irdma_device *iwdev, 5147 struct irdma_ah *new_ah) 5148 { 5149 struct irdma_ah *ah; 5150 u32 key = new_ah->sc_ah.ah_info.dest_ip_addr[0] ^ 5151 new_ah->sc_ah.ah_info.dest_ip_addr[1] ^ 5152 new_ah->sc_ah.ah_info.dest_ip_addr[2] ^ 5153 new_ah->sc_ah.ah_info.dest_ip_addr[3]; 5154 5155 hash_for_each_possible(iwdev->rf->ah_hash_tbl, ah, list, key) { 5156 /* Set ah_valid and ah_id the same so memcmp can work */ 5157 new_ah->sc_ah.ah_info.ah_idx = ah->sc_ah.ah_info.ah_idx; 5158 new_ah->sc_ah.ah_info.ah_valid = ah->sc_ah.ah_info.ah_valid; 5159 if (!memcmp(&ah->sc_ah.ah_info, &new_ah->sc_ah.ah_info, 5160 sizeof(ah->sc_ah.ah_info))) { 5161 refcount_inc(&ah->refcnt); 5162 new_ah->parent_ah = ah; 5163 return true; 5164 } 5165 } 5166 5167 return false; 5168 } 5169 5170 /** 5171 * irdma_destroy_ah - Destroy address handle 5172 * @ibah: pointer to address handle 5173 * @ah_flags: flags for sleepable 5174 */ 5175 static int irdma_destroy_ah(struct ib_ah *ibah, u32 ah_flags) 5176 { 5177 struct irdma_device *iwdev = to_iwdev(ibah->device); 5178 struct irdma_ah *ah = to_iwah(ibah); 5179 5180 if ((ah_flags & RDMA_DESTROY_AH_SLEEPABLE) && ah->parent_ah) { 5181 mutex_lock(&iwdev->rf->ah_tbl_lock); 5182 if (!refcount_dec_and_test(&ah->parent_ah->refcnt)) { 5183 mutex_unlock(&iwdev->rf->ah_tbl_lock); 5184 return 0; 5185 } 5186 hash_del(&ah->parent_ah->list); 5187 kfree(ah->parent_ah); 5188 mutex_unlock(&iwdev->rf->ah_tbl_lock); 5189 } 5190 5191 irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah, IRDMA_OP_AH_DESTROY, 5192 false, NULL, ah); 5193 5194 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, 5195 ah->sc_ah.ah_info.ah_idx); 5196 5197 return 0; 5198 } 5199 5200 /** 5201 * irdma_create_user_ah - create user address handle 5202 * @ibah: address handle 5203 * @attr: address handle attributes 5204 * @udata: User data 5205 * 5206 * returns 0 on success, error otherwise 5207 */ 5208 static int irdma_create_user_ah(struct ib_ah *ibah, 5209 struct rdma_ah_init_attr *attr, 5210 struct ib_udata *udata) 5211 { 5212 #define IRDMA_CREATE_AH_MIN_RESP_LEN offsetofend(struct irdma_create_ah_resp, rsvd) 5213 struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah); 5214 struct irdma_device *iwdev = to_iwdev(ibah->pd->device); 5215 struct irdma_create_ah_resp uresp; 5216 struct irdma_ah *parent_ah; 5217 int err; 5218 5219 if (udata->outlen < IRDMA_CREATE_AH_MIN_RESP_LEN) 5220 return -EINVAL; 5221 5222 err = irdma_setup_ah(ibah, attr); 5223 if (err) 5224 return err; 5225 mutex_lock(&iwdev->rf->ah_tbl_lock); 5226 if (!irdma_ah_exists(iwdev, ah)) { 5227 err = irdma_create_hw_ah(iwdev, ah, true); 5228 if (err) { 5229 mutex_unlock(&iwdev->rf->ah_tbl_lock); 5230 return err; 5231 } 5232 /* Add new AH to list */ 5233 parent_ah = kmemdup(ah, sizeof(*ah), GFP_KERNEL); 5234 if (parent_ah) { 5235 u32 key = parent_ah->sc_ah.ah_info.dest_ip_addr[0] ^ 5236 parent_ah->sc_ah.ah_info.dest_ip_addr[1] ^ 5237 parent_ah->sc_ah.ah_info.dest_ip_addr[2] ^ 5238 parent_ah->sc_ah.ah_info.dest_ip_addr[3]; 5239 5240 ah->parent_ah = parent_ah; 5241 hash_add(iwdev->rf->ah_hash_tbl, &parent_ah->list, key); 5242 refcount_set(&parent_ah->refcnt, 1); 5243 } 5244 } 5245 mutex_unlock(&iwdev->rf->ah_tbl_lock); 5246 5247 uresp.ah_id = ah->sc_ah.ah_info.ah_idx; 5248 err = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), udata->outlen)); 5249 if (err) 5250 irdma_destroy_ah(ibah, attr->flags); 5251 5252 return err; 5253 } 5254 5255 /** 5256 * irdma_create_ah - create address handle 5257 * @ibah: address handle 5258 * @attr: address handle attributes 5259 * @udata: NULL 5260 * 5261 * returns 0 on success, error otherwise 5262 */ 5263 static int irdma_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *attr, 5264 struct ib_udata *udata) 5265 { 5266 struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah); 5267 struct irdma_device *iwdev = to_iwdev(ibah->pd->device); 5268 int err; 5269 5270 err = irdma_setup_ah(ibah, attr); 5271 if (err) 5272 return err; 5273 err = irdma_create_hw_ah(iwdev, ah, attr->flags & RDMA_CREATE_AH_SLEEPABLE); 5274 5275 return err; 5276 } 5277 5278 /** 5279 * irdma_query_ah - Query address handle 5280 * @ibah: pointer to address handle 5281 * @ah_attr: address handle attributes 5282 */ 5283 static int irdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr) 5284 { 5285 struct irdma_ah *ah = to_iwah(ibah); 5286 5287 memset(ah_attr, 0, sizeof(*ah_attr)); 5288 if (ah->av.attrs.ah_flags & IB_AH_GRH) { 5289 ah_attr->ah_flags = IB_AH_GRH; 5290 ah_attr->grh.flow_label = ah->sc_ah.ah_info.flow_label; 5291 ah_attr->grh.traffic_class = ah->sc_ah.ah_info.tc_tos; 5292 ah_attr->grh.hop_limit = ah->sc_ah.ah_info.hop_ttl; 5293 ah_attr->grh.sgid_index = ah->sgid_index; 5294 memcpy(&ah_attr->grh.dgid, &ah->dgid, 5295 sizeof(ah_attr->grh.dgid)); 5296 } 5297 5298 return 0; 5299 } 5300 5301 static enum rdma_link_layer irdma_get_link_layer(struct ib_device *ibdev, 5302 u32 port_num) 5303 { 5304 return IB_LINK_LAYER_ETHERNET; 5305 } 5306 5307 static const struct ib_device_ops irdma_gen1_dev_ops = { 5308 .dealloc_driver = irdma_ib_dealloc_device, 5309 }; 5310 5311 static const struct ib_device_ops irdma_gen3_dev_ops = { 5312 .alloc_mw = irdma_alloc_mw, 5313 .create_srq = irdma_create_srq, 5314 .dealloc_mw = irdma_dealloc_mw, 5315 .destroy_srq = irdma_destroy_srq, 5316 .modify_srq = irdma_modify_srq, 5317 .post_srq_recv = irdma_post_srq_recv, 5318 .query_srq = irdma_query_srq, 5319 }; 5320 5321 static const struct ib_device_ops irdma_roce_dev_ops = { 5322 .attach_mcast = irdma_attach_mcast, 5323 .create_ah = irdma_create_ah, 5324 .create_user_ah = irdma_create_user_ah, 5325 .destroy_ah = irdma_destroy_ah, 5326 .detach_mcast = irdma_detach_mcast, 5327 .get_link_layer = irdma_get_link_layer, 5328 .get_port_immutable = irdma_roce_port_immutable, 5329 .modify_qp = irdma_modify_qp_roce, 5330 .query_ah = irdma_query_ah, 5331 .query_pkey = irdma_query_pkey, 5332 }; 5333 5334 static const struct ib_device_ops irdma_iw_dev_ops = { 5335 .get_port_immutable = irdma_iw_port_immutable, 5336 .iw_accept = irdma_accept, 5337 .iw_add_ref = irdma_qp_add_ref, 5338 .iw_connect = irdma_connect, 5339 .iw_create_listen = irdma_create_listen, 5340 .iw_destroy_listen = irdma_destroy_listen, 5341 .iw_get_qp = irdma_get_qp, 5342 .iw_reject = irdma_reject, 5343 .iw_rem_ref = irdma_qp_rem_ref, 5344 .modify_qp = irdma_modify_qp, 5345 .query_gid = irdma_query_gid, 5346 }; 5347 5348 static const struct ib_device_ops irdma_dev_ops = { 5349 .owner = THIS_MODULE, 5350 .driver_id = RDMA_DRIVER_IRDMA, 5351 .uverbs_abi_ver = IRDMA_ABI_VER, 5352 5353 .alloc_hw_port_stats = irdma_alloc_hw_port_stats, 5354 .alloc_mr = irdma_alloc_mr, 5355 .alloc_pd = irdma_alloc_pd, 5356 .alloc_ucontext = irdma_alloc_ucontext, 5357 .create_cq = irdma_create_cq, 5358 .create_qp = irdma_create_qp, 5359 .dealloc_driver = irdma_ib_dealloc_device, 5360 .dealloc_mw = irdma_dealloc_mw, 5361 .dealloc_pd = irdma_dealloc_pd, 5362 .dealloc_ucontext = irdma_dealloc_ucontext, 5363 .dereg_mr = irdma_dereg_mr, 5364 .destroy_cq = irdma_destroy_cq, 5365 .destroy_qp = irdma_destroy_qp, 5366 .disassociate_ucontext = irdma_disassociate_ucontext, 5367 .get_dev_fw_str = irdma_get_dev_fw_str, 5368 .get_dma_mr = irdma_get_dma_mr, 5369 .get_hw_stats = irdma_get_hw_stats, 5370 .map_mr_sg = irdma_map_mr_sg, 5371 .mmap = irdma_mmap, 5372 .mmap_free = irdma_mmap_free, 5373 .poll_cq = irdma_poll_cq, 5374 .post_recv = irdma_post_recv, 5375 .post_send = irdma_post_send, 5376 .query_device = irdma_query_device, 5377 .query_port = irdma_query_port, 5378 .query_qp = irdma_query_qp, 5379 .reg_user_mr = irdma_reg_user_mr, 5380 .reg_user_mr_dmabuf = irdma_reg_user_mr_dmabuf, 5381 .rereg_user_mr = irdma_rereg_user_mr, 5382 .req_notify_cq = irdma_req_notify_cq, 5383 .resize_cq = irdma_resize_cq, 5384 INIT_RDMA_OBJ_SIZE(ib_pd, irdma_pd, ibpd), 5385 INIT_RDMA_OBJ_SIZE(ib_ucontext, irdma_ucontext, ibucontext), 5386 INIT_RDMA_OBJ_SIZE(ib_ah, irdma_ah, ibah), 5387 INIT_RDMA_OBJ_SIZE(ib_cq, irdma_cq, ibcq), 5388 INIT_RDMA_OBJ_SIZE(ib_mw, irdma_mr, ibmw), 5389 INIT_RDMA_OBJ_SIZE(ib_qp, irdma_qp, ibqp), 5390 INIT_RDMA_OBJ_SIZE(ib_srq, irdma_srq, ibsrq), 5391 }; 5392 5393 /** 5394 * irdma_init_roce_device - initialization of roce rdma device 5395 * @iwdev: irdma device 5396 */ 5397 static void irdma_init_roce_device(struct irdma_device *iwdev) 5398 { 5399 iwdev->ibdev.node_type = RDMA_NODE_IB_CA; 5400 addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid, 5401 iwdev->netdev->dev_addr); 5402 ib_set_device_ops(&iwdev->ibdev, &irdma_roce_dev_ops); 5403 } 5404 5405 /** 5406 * irdma_init_iw_device - initialization of iwarp rdma device 5407 * @iwdev: irdma device 5408 */ 5409 static void irdma_init_iw_device(struct irdma_device *iwdev) 5410 { 5411 struct net_device *netdev = iwdev->netdev; 5412 5413 iwdev->ibdev.node_type = RDMA_NODE_RNIC; 5414 addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid, 5415 netdev->dev_addr); 5416 memcpy(iwdev->ibdev.iw_ifname, netdev->name, 5417 sizeof(iwdev->ibdev.iw_ifname)); 5418 ib_set_device_ops(&iwdev->ibdev, &irdma_iw_dev_ops); 5419 } 5420 5421 /** 5422 * irdma_init_rdma_device - initialization of rdma device 5423 * @iwdev: irdma device 5424 */ 5425 static void irdma_init_rdma_device(struct irdma_device *iwdev) 5426 { 5427 struct pci_dev *pcidev = iwdev->rf->pcidev; 5428 5429 if (iwdev->roce_mode) 5430 irdma_init_roce_device(iwdev); 5431 else 5432 irdma_init_iw_device(iwdev); 5433 5434 iwdev->ibdev.phys_port_cnt = 1; 5435 iwdev->ibdev.num_comp_vectors = iwdev->rf->ceqs_count; 5436 iwdev->ibdev.dev.parent = &pcidev->dev; 5437 ib_set_device_ops(&iwdev->ibdev, &irdma_dev_ops); 5438 if (iwdev->rf->rdma_ver == IRDMA_GEN_1) 5439 ib_set_device_ops(&iwdev->ibdev, &irdma_gen1_dev_ops); 5440 if (iwdev->rf->rdma_ver >= IRDMA_GEN_3) 5441 ib_set_device_ops(&iwdev->ibdev, &irdma_gen3_dev_ops); 5442 } 5443 5444 /** 5445 * irdma_port_ibevent - indicate port event 5446 * @iwdev: irdma device 5447 */ 5448 void irdma_port_ibevent(struct irdma_device *iwdev) 5449 { 5450 struct ib_event event; 5451 5452 event.device = &iwdev->ibdev; 5453 event.element.port_num = 1; 5454 event.event = 5455 iwdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; 5456 ib_dispatch_event(&event); 5457 } 5458 5459 /** 5460 * irdma_ib_unregister_device - unregister rdma device from IB 5461 * core 5462 * @iwdev: irdma device 5463 */ 5464 void irdma_ib_unregister_device(struct irdma_device *iwdev) 5465 { 5466 iwdev->iw_status = 0; 5467 irdma_port_ibevent(iwdev); 5468 ib_unregister_device(&iwdev->ibdev); 5469 } 5470 5471 /** 5472 * irdma_ib_register_device - register irdma device to IB core 5473 * @iwdev: irdma device 5474 */ 5475 int irdma_ib_register_device(struct irdma_device *iwdev) 5476 { 5477 int ret; 5478 5479 irdma_init_rdma_device(iwdev); 5480 5481 ret = ib_device_set_netdev(&iwdev->ibdev, iwdev->netdev, 1); 5482 if (ret) 5483 goto error; 5484 dma_set_max_seg_size(iwdev->rf->hw.device, UINT_MAX); 5485 ret = ib_register_device(&iwdev->ibdev, "irdma%d", iwdev->rf->hw.device); 5486 if (ret) 5487 goto error; 5488 5489 iwdev->iw_status = 1; 5490 irdma_port_ibevent(iwdev); 5491 5492 return 0; 5493 5494 error: 5495 if (ret) 5496 ibdev_dbg(&iwdev->ibdev, "VERBS: Register RDMA device fail\n"); 5497 5498 return ret; 5499 } 5500 5501 /** 5502 * irdma_ib_dealloc_device 5503 * @ibdev: ib device 5504 * 5505 * callback from ibdev dealloc_driver to deallocate resources 5506 * unber irdma device 5507 */ 5508 void irdma_ib_dealloc_device(struct ib_device *ibdev) 5509 { 5510 struct irdma_device *iwdev = to_iwdev(ibdev); 5511 5512 irdma_rt_deinit_hw(iwdev); 5513 if (!iwdev->is_vport) { 5514 irdma_ctrl_deinit_hw(iwdev->rf); 5515 if (iwdev->rf->vchnl_wq) { 5516 destroy_workqueue(iwdev->rf->vchnl_wq); 5517 mutex_destroy(&iwdev->rf->sc_dev.vchnl_mutex); 5518 } 5519 } 5520 } 5521