1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* Copyright (c) 2015 - 2021 Intel Corporation */ 3 #include "main.h" 4 5 /** 6 * irdma_query_device - get device attributes 7 * @ibdev: device pointer from stack 8 * @props: returning device attributes 9 * @udata: user data 10 */ 11 static int irdma_query_device(struct ib_device *ibdev, 12 struct ib_device_attr *props, 13 struct ib_udata *udata) 14 { 15 struct irdma_device *iwdev = to_iwdev(ibdev); 16 struct irdma_pci_f *rf = iwdev->rf; 17 struct pci_dev *pcidev = iwdev->rf->pcidev; 18 struct irdma_hw_attrs *hw_attrs = &rf->sc_dev.hw_attrs; 19 20 if (udata->inlen || udata->outlen) 21 return -EINVAL; 22 23 memset(props, 0, sizeof(*props)); 24 addrconf_addr_eui48((u8 *)&props->sys_image_guid, 25 iwdev->netdev->dev_addr); 26 props->fw_ver = (u64)irdma_fw_major_ver(&rf->sc_dev) << 32 | 27 irdma_fw_minor_ver(&rf->sc_dev); 28 props->device_cap_flags = IB_DEVICE_MEM_WINDOW | 29 IB_DEVICE_MEM_MGT_EXTENSIONS; 30 props->kernel_cap_flags = IBK_LOCAL_DMA_LKEY; 31 props->vendor_id = pcidev->vendor; 32 props->vendor_part_id = pcidev->device; 33 34 props->hw_ver = rf->pcidev->revision; 35 props->page_size_cap = hw_attrs->page_size_cap; 36 props->max_mr_size = hw_attrs->max_mr_size; 37 props->max_qp = rf->max_qp - rf->used_qps; 38 props->max_qp_wr = hw_attrs->max_qp_wr; 39 props->max_send_sge = hw_attrs->uk_attrs.max_hw_wq_frags; 40 props->max_recv_sge = hw_attrs->uk_attrs.max_hw_wq_frags; 41 props->max_cq = rf->max_cq - rf->used_cqs; 42 props->max_cqe = rf->max_cqe - 1; 43 props->max_mr = rf->max_mr - rf->used_mrs; 44 props->max_mw = props->max_mr; 45 props->max_pd = rf->max_pd - rf->used_pds; 46 props->max_sge_rd = hw_attrs->uk_attrs.max_hw_read_sges; 47 props->max_qp_rd_atom = hw_attrs->max_hw_ird; 48 props->max_qp_init_rd_atom = hw_attrs->max_hw_ord; 49 if (rdma_protocol_roce(ibdev, 1)) { 50 props->device_cap_flags |= IB_DEVICE_RC_RNR_NAK_GEN; 51 props->max_pkeys = IRDMA_PKEY_TBL_SZ; 52 } 53 54 props->max_ah = rf->max_ah; 55 props->max_mcast_grp = rf->max_mcg; 56 props->max_mcast_qp_attach = IRDMA_MAX_MGS_PER_CTX; 57 props->max_total_mcast_qp_attach = rf->max_qp * IRDMA_MAX_MGS_PER_CTX; 58 props->max_fast_reg_page_list_len = IRDMA_MAX_PAGES_PER_FMR; 59 #define HCA_CLOCK_TIMESTAMP_MASK 0x1ffff 60 if (hw_attrs->uk_attrs.hw_rev >= IRDMA_GEN_2) 61 props->timestamp_mask = HCA_CLOCK_TIMESTAMP_MASK; 62 63 return 0; 64 } 65 66 /** 67 * irdma_query_port - get port attributes 68 * @ibdev: device pointer from stack 69 * @port: port number for query 70 * @props: returning device attributes 71 */ 72 static int irdma_query_port(struct ib_device *ibdev, u32 port, 73 struct ib_port_attr *props) 74 { 75 struct irdma_device *iwdev = to_iwdev(ibdev); 76 struct net_device *netdev = iwdev->netdev; 77 78 /* no need to zero out pros here. done by caller */ 79 80 props->max_mtu = IB_MTU_4096; 81 props->active_mtu = ib_mtu_int_to_enum(netdev->mtu); 82 props->lid = 1; 83 props->lmc = 0; 84 props->sm_lid = 0; 85 props->sm_sl = 0; 86 if (netif_carrier_ok(netdev) && netif_running(netdev)) { 87 props->state = IB_PORT_ACTIVE; 88 props->phys_state = IB_PORT_PHYS_STATE_LINK_UP; 89 } else { 90 props->state = IB_PORT_DOWN; 91 props->phys_state = IB_PORT_PHYS_STATE_DISABLED; 92 } 93 94 ib_get_eth_speed(ibdev, port, &props->active_speed, 95 &props->active_width); 96 97 if (rdma_protocol_roce(ibdev, 1)) { 98 props->gid_tbl_len = 32; 99 props->ip_gids = true; 100 props->pkey_tbl_len = IRDMA_PKEY_TBL_SZ; 101 } else { 102 props->gid_tbl_len = 1; 103 } 104 props->qkey_viol_cntr = 0; 105 props->port_cap_flags |= IB_PORT_CM_SUP | IB_PORT_REINIT_SUP; 106 props->max_msg_sz = iwdev->rf->sc_dev.hw_attrs.max_hw_outbound_msg_size; 107 108 return 0; 109 } 110 111 /** 112 * irdma_disassociate_ucontext - Disassociate user context 113 * @context: ib user context 114 */ 115 static void irdma_disassociate_ucontext(struct ib_ucontext *context) 116 { 117 } 118 119 static int irdma_mmap_legacy(struct irdma_ucontext *ucontext, 120 struct vm_area_struct *vma) 121 { 122 u64 pfn; 123 124 if (vma->vm_pgoff || vma->vm_end - vma->vm_start != PAGE_SIZE) 125 return -EINVAL; 126 127 vma->vm_private_data = ucontext; 128 pfn = ((uintptr_t)ucontext->iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET] + 129 pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT; 130 131 return rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, PAGE_SIZE, 132 pgprot_noncached(vma->vm_page_prot), NULL); 133 } 134 135 static void irdma_mmap_free(struct rdma_user_mmap_entry *rdma_entry) 136 { 137 struct irdma_user_mmap_entry *entry = to_irdma_mmap_entry(rdma_entry); 138 139 kfree(entry); 140 } 141 142 static struct rdma_user_mmap_entry* 143 irdma_user_mmap_entry_insert(struct irdma_ucontext *ucontext, u64 bar_offset, 144 enum irdma_mmap_flag mmap_flag, u64 *mmap_offset) 145 { 146 struct irdma_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL); 147 int ret; 148 149 if (!entry) 150 return NULL; 151 152 entry->bar_offset = bar_offset; 153 entry->mmap_flag = mmap_flag; 154 155 ret = rdma_user_mmap_entry_insert(&ucontext->ibucontext, 156 &entry->rdma_entry, PAGE_SIZE); 157 if (ret) { 158 kfree(entry); 159 return NULL; 160 } 161 *mmap_offset = rdma_user_mmap_get_offset(&entry->rdma_entry); 162 163 return &entry->rdma_entry; 164 } 165 166 /** 167 * irdma_mmap - user memory map 168 * @context: context created during alloc 169 * @vma: kernel info for user memory map 170 */ 171 static int irdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) 172 { 173 struct rdma_user_mmap_entry *rdma_entry; 174 struct irdma_user_mmap_entry *entry; 175 struct irdma_ucontext *ucontext; 176 u64 pfn; 177 int ret; 178 179 ucontext = to_ucontext(context); 180 181 /* Legacy support for libi40iw with hard-coded mmap key */ 182 if (ucontext->legacy_mode) 183 return irdma_mmap_legacy(ucontext, vma); 184 185 rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma); 186 if (!rdma_entry) { 187 ibdev_dbg(&ucontext->iwdev->ibdev, 188 "VERBS: pgoff[0x%lx] does not have valid entry\n", 189 vma->vm_pgoff); 190 return -EINVAL; 191 } 192 193 entry = to_irdma_mmap_entry(rdma_entry); 194 ibdev_dbg(&ucontext->iwdev->ibdev, 195 "VERBS: bar_offset [0x%llx] mmap_flag [%d]\n", 196 entry->bar_offset, entry->mmap_flag); 197 198 pfn = (entry->bar_offset + 199 pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT; 200 201 switch (entry->mmap_flag) { 202 case IRDMA_MMAP_IO_NC: 203 ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE, 204 pgprot_noncached(vma->vm_page_prot), 205 rdma_entry); 206 break; 207 case IRDMA_MMAP_IO_WC: 208 ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE, 209 pgprot_writecombine(vma->vm_page_prot), 210 rdma_entry); 211 break; 212 default: 213 ret = -EINVAL; 214 } 215 216 if (ret) 217 ibdev_dbg(&ucontext->iwdev->ibdev, 218 "VERBS: bar_offset [0x%llx] mmap_flag[%d] err[%d]\n", 219 entry->bar_offset, entry->mmap_flag, ret); 220 rdma_user_mmap_entry_put(rdma_entry); 221 222 return ret; 223 } 224 225 /** 226 * irdma_alloc_push_page - allocate a push page for qp 227 * @iwqp: qp pointer 228 */ 229 static void irdma_alloc_push_page(struct irdma_qp *iwqp) 230 { 231 struct irdma_cqp_request *cqp_request; 232 struct cqp_cmds_info *cqp_info; 233 struct irdma_device *iwdev = iwqp->iwdev; 234 struct irdma_sc_qp *qp = &iwqp->sc_qp; 235 int status; 236 237 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); 238 if (!cqp_request) 239 return; 240 241 cqp_info = &cqp_request->info; 242 cqp_info->cqp_cmd = IRDMA_OP_MANAGE_PUSH_PAGE; 243 cqp_info->post_sq = 1; 244 cqp_info->in.u.manage_push_page.info.push_idx = 0; 245 cqp_info->in.u.manage_push_page.info.qs_handle = 246 qp->vsi->qos[qp->user_pri].qs_handle; 247 cqp_info->in.u.manage_push_page.info.free_page = 0; 248 cqp_info->in.u.manage_push_page.info.push_page_type = 0; 249 cqp_info->in.u.manage_push_page.cqp = &iwdev->rf->cqp.sc_cqp; 250 cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request; 251 252 status = irdma_handle_cqp_op(iwdev->rf, cqp_request); 253 if (!status && cqp_request->compl_info.op_ret_val < 254 iwdev->rf->sc_dev.hw_attrs.max_hw_device_pages) { 255 qp->push_idx = cqp_request->compl_info.op_ret_val; 256 qp->push_offset = 0; 257 } 258 259 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); 260 } 261 262 /** 263 * irdma_alloc_ucontext - Allocate the user context data structure 264 * @uctx: uverbs context pointer 265 * @udata: user data 266 * 267 * This keeps track of all objects associated with a particular 268 * user-mode client. 269 */ 270 static int irdma_alloc_ucontext(struct ib_ucontext *uctx, 271 struct ib_udata *udata) 272 { 273 #define IRDMA_ALLOC_UCTX_MIN_REQ_LEN offsetofend(struct irdma_alloc_ucontext_req, rsvd8) 274 #define IRDMA_ALLOC_UCTX_MIN_RESP_LEN offsetofend(struct irdma_alloc_ucontext_resp, rsvd) 275 struct ib_device *ibdev = uctx->device; 276 struct irdma_device *iwdev = to_iwdev(ibdev); 277 struct irdma_alloc_ucontext_req req = {}; 278 struct irdma_alloc_ucontext_resp uresp = {}; 279 struct irdma_ucontext *ucontext = to_ucontext(uctx); 280 struct irdma_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs; 281 282 if (udata->inlen < IRDMA_ALLOC_UCTX_MIN_REQ_LEN || 283 udata->outlen < IRDMA_ALLOC_UCTX_MIN_RESP_LEN) 284 return -EINVAL; 285 286 if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) 287 return -EINVAL; 288 289 if (req.userspace_ver < 4 || req.userspace_ver > IRDMA_ABI_VER) 290 goto ver_error; 291 292 ucontext->iwdev = iwdev; 293 ucontext->abi_ver = req.userspace_ver; 294 295 if (req.comp_mask & IRDMA_ALLOC_UCTX_USE_RAW_ATTR) 296 ucontext->use_raw_attrs = true; 297 298 /* GEN_1 legacy support with libi40iw */ 299 if (udata->outlen == IRDMA_ALLOC_UCTX_MIN_RESP_LEN) { 300 if (uk_attrs->hw_rev != IRDMA_GEN_1) 301 return -EOPNOTSUPP; 302 303 ucontext->legacy_mode = true; 304 uresp.max_qps = iwdev->rf->max_qp; 305 uresp.max_pds = iwdev->rf->sc_dev.hw_attrs.max_hw_pds; 306 uresp.wq_size = iwdev->rf->sc_dev.hw_attrs.max_qp_wr * 2; 307 uresp.kernel_ver = req.userspace_ver; 308 if (ib_copy_to_udata(udata, &uresp, 309 min(sizeof(uresp), udata->outlen))) 310 return -EFAULT; 311 } else { 312 u64 bar_off = (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET]; 313 314 ucontext->db_mmap_entry = 315 irdma_user_mmap_entry_insert(ucontext, bar_off, 316 IRDMA_MMAP_IO_NC, 317 &uresp.db_mmap_key); 318 if (!ucontext->db_mmap_entry) 319 return -ENOMEM; 320 321 uresp.kernel_ver = IRDMA_ABI_VER; 322 uresp.feature_flags = uk_attrs->feature_flags; 323 uresp.max_hw_wq_frags = uk_attrs->max_hw_wq_frags; 324 uresp.max_hw_read_sges = uk_attrs->max_hw_read_sges; 325 uresp.max_hw_inline = uk_attrs->max_hw_inline; 326 uresp.max_hw_rq_quanta = uk_attrs->max_hw_rq_quanta; 327 uresp.max_hw_wq_quanta = uk_attrs->max_hw_wq_quanta; 328 uresp.max_hw_sq_chunk = uk_attrs->max_hw_sq_chunk; 329 uresp.max_hw_cq_size = uk_attrs->max_hw_cq_size; 330 uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size; 331 uresp.hw_rev = uk_attrs->hw_rev; 332 uresp.comp_mask |= IRDMA_ALLOC_UCTX_USE_RAW_ATTR; 333 uresp.min_hw_wq_size = uk_attrs->min_hw_wq_size; 334 uresp.comp_mask |= IRDMA_ALLOC_UCTX_MIN_HW_WQ_SIZE; 335 if (ib_copy_to_udata(udata, &uresp, 336 min(sizeof(uresp), udata->outlen))) { 337 rdma_user_mmap_entry_remove(ucontext->db_mmap_entry); 338 return -EFAULT; 339 } 340 } 341 342 INIT_LIST_HEAD(&ucontext->cq_reg_mem_list); 343 spin_lock_init(&ucontext->cq_reg_mem_list_lock); 344 INIT_LIST_HEAD(&ucontext->qp_reg_mem_list); 345 spin_lock_init(&ucontext->qp_reg_mem_list_lock); 346 347 return 0; 348 349 ver_error: 350 ibdev_err(&iwdev->ibdev, 351 "Invalid userspace driver version detected. Detected version %d, should be %d\n", 352 req.userspace_ver, IRDMA_ABI_VER); 353 return -EINVAL; 354 } 355 356 /** 357 * irdma_dealloc_ucontext - deallocate the user context data structure 358 * @context: user context created during alloc 359 */ 360 static void irdma_dealloc_ucontext(struct ib_ucontext *context) 361 { 362 struct irdma_ucontext *ucontext = to_ucontext(context); 363 364 rdma_user_mmap_entry_remove(ucontext->db_mmap_entry); 365 } 366 367 /** 368 * irdma_alloc_pd - allocate protection domain 369 * @pd: PD pointer 370 * @udata: user data 371 */ 372 static int irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata) 373 { 374 #define IRDMA_ALLOC_PD_MIN_RESP_LEN offsetofend(struct irdma_alloc_pd_resp, rsvd) 375 struct irdma_pd *iwpd = to_iwpd(pd); 376 struct irdma_device *iwdev = to_iwdev(pd->device); 377 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; 378 struct irdma_pci_f *rf = iwdev->rf; 379 struct irdma_alloc_pd_resp uresp = {}; 380 struct irdma_sc_pd *sc_pd; 381 u32 pd_id = 0; 382 int err; 383 384 if (udata && udata->outlen < IRDMA_ALLOC_PD_MIN_RESP_LEN) 385 return -EINVAL; 386 387 err = irdma_alloc_rsrc(rf, rf->allocated_pds, rf->max_pd, &pd_id, 388 &rf->next_pd); 389 if (err) 390 return err; 391 392 sc_pd = &iwpd->sc_pd; 393 if (udata) { 394 struct irdma_ucontext *ucontext = 395 rdma_udata_to_drv_context(udata, struct irdma_ucontext, 396 ibucontext); 397 irdma_sc_pd_init(dev, sc_pd, pd_id, ucontext->abi_ver); 398 uresp.pd_id = pd_id; 399 if (ib_copy_to_udata(udata, &uresp, 400 min(sizeof(uresp), udata->outlen))) { 401 err = -EFAULT; 402 goto error; 403 } 404 } else { 405 irdma_sc_pd_init(dev, sc_pd, pd_id, IRDMA_ABI_VER); 406 } 407 408 return 0; 409 error: 410 irdma_free_rsrc(rf, rf->allocated_pds, pd_id); 411 412 return err; 413 } 414 415 /** 416 * irdma_dealloc_pd - deallocate pd 417 * @ibpd: ptr of pd to be deallocated 418 * @udata: user data 419 */ 420 static int irdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) 421 { 422 struct irdma_pd *iwpd = to_iwpd(ibpd); 423 struct irdma_device *iwdev = to_iwdev(ibpd->device); 424 425 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_pds, iwpd->sc_pd.pd_id); 426 427 return 0; 428 } 429 430 /** 431 * irdma_get_pbl - Retrieve pbl from a list given a virtual 432 * address 433 * @va: user virtual address 434 * @pbl_list: pbl list to search in (QP's or CQ's) 435 */ 436 static struct irdma_pbl *irdma_get_pbl(unsigned long va, 437 struct list_head *pbl_list) 438 { 439 struct irdma_pbl *iwpbl; 440 441 list_for_each_entry (iwpbl, pbl_list, list) { 442 if (iwpbl->user_base == va) { 443 list_del(&iwpbl->list); 444 iwpbl->on_list = false; 445 return iwpbl; 446 } 447 } 448 449 return NULL; 450 } 451 452 /** 453 * irdma_clean_cqes - clean cq entries for qp 454 * @iwqp: qp ptr (user or kernel) 455 * @iwcq: cq ptr 456 */ 457 static void irdma_clean_cqes(struct irdma_qp *iwqp, struct irdma_cq *iwcq) 458 { 459 struct irdma_cq_uk *ukcq = &iwcq->sc_cq.cq_uk; 460 unsigned long flags; 461 462 spin_lock_irqsave(&iwcq->lock, flags); 463 irdma_uk_clean_cq(&iwqp->sc_qp.qp_uk, ukcq); 464 spin_unlock_irqrestore(&iwcq->lock, flags); 465 } 466 467 static void irdma_remove_push_mmap_entries(struct irdma_qp *iwqp) 468 { 469 if (iwqp->push_db_mmap_entry) { 470 rdma_user_mmap_entry_remove(iwqp->push_db_mmap_entry); 471 iwqp->push_db_mmap_entry = NULL; 472 } 473 if (iwqp->push_wqe_mmap_entry) { 474 rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry); 475 iwqp->push_wqe_mmap_entry = NULL; 476 } 477 } 478 479 static int irdma_setup_push_mmap_entries(struct irdma_ucontext *ucontext, 480 struct irdma_qp *iwqp, 481 u64 *push_wqe_mmap_key, 482 u64 *push_db_mmap_key) 483 { 484 struct irdma_device *iwdev = ucontext->iwdev; 485 u64 rsvd, bar_off; 486 487 rsvd = IRDMA_PF_BAR_RSVD; 488 bar_off = (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET]; 489 /* skip over db page */ 490 bar_off += IRDMA_HW_PAGE_SIZE; 491 /* push wqe page */ 492 bar_off += rsvd + iwqp->sc_qp.push_idx * IRDMA_HW_PAGE_SIZE; 493 iwqp->push_wqe_mmap_entry = irdma_user_mmap_entry_insert(ucontext, 494 bar_off, IRDMA_MMAP_IO_WC, 495 push_wqe_mmap_key); 496 if (!iwqp->push_wqe_mmap_entry) 497 return -ENOMEM; 498 499 /* push doorbell page */ 500 bar_off += IRDMA_HW_PAGE_SIZE; 501 iwqp->push_db_mmap_entry = irdma_user_mmap_entry_insert(ucontext, 502 bar_off, IRDMA_MMAP_IO_NC, 503 push_db_mmap_key); 504 if (!iwqp->push_db_mmap_entry) { 505 rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry); 506 return -ENOMEM; 507 } 508 509 return 0; 510 } 511 512 /** 513 * irdma_destroy_qp - destroy qp 514 * @ibqp: qp's ib pointer also to get to device's qp address 515 * @udata: user data 516 */ 517 static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) 518 { 519 struct irdma_qp *iwqp = to_iwqp(ibqp); 520 struct irdma_device *iwdev = iwqp->iwdev; 521 522 iwqp->sc_qp.qp_uk.destroy_pending = true; 523 524 if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS) 525 irdma_modify_qp_to_err(&iwqp->sc_qp); 526 527 if (!iwqp->user_mode) 528 cancel_delayed_work_sync(&iwqp->dwork_flush); 529 530 if (!iwqp->user_mode) { 531 if (iwqp->iwscq) { 532 irdma_clean_cqes(iwqp, iwqp->iwscq); 533 if (iwqp->iwrcq != iwqp->iwscq) 534 irdma_clean_cqes(iwqp, iwqp->iwrcq); 535 } 536 } 537 538 irdma_qp_rem_ref(&iwqp->ibqp); 539 wait_for_completion(&iwqp->free_qp); 540 irdma_free_lsmm_rsrc(iwqp); 541 irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp); 542 543 irdma_remove_push_mmap_entries(iwqp); 544 irdma_free_qp_rsrc(iwqp); 545 546 return 0; 547 } 548 549 /** 550 * irdma_setup_virt_qp - setup for allocation of virtual qp 551 * @iwdev: irdma device 552 * @iwqp: qp ptr 553 * @init_info: initialize info to return 554 */ 555 static void irdma_setup_virt_qp(struct irdma_device *iwdev, 556 struct irdma_qp *iwqp, 557 struct irdma_qp_init_info *init_info) 558 { 559 struct irdma_pbl *iwpbl = iwqp->iwpbl; 560 struct irdma_qp_mr *qpmr = &iwpbl->qp_mr; 561 562 iwqp->page = qpmr->sq_page; 563 init_info->shadow_area_pa = qpmr->shadow; 564 if (iwpbl->pbl_allocated) { 565 init_info->virtual_map = true; 566 init_info->sq_pa = qpmr->sq_pbl.idx; 567 init_info->rq_pa = qpmr->rq_pbl.idx; 568 } else { 569 init_info->sq_pa = qpmr->sq_pbl.addr; 570 init_info->rq_pa = qpmr->rq_pbl.addr; 571 } 572 } 573 574 /** 575 * irdma_setup_umode_qp - setup sq and rq size in user mode qp 576 * @udata: udata 577 * @iwdev: iwarp device 578 * @iwqp: qp ptr (user or kernel) 579 * @info: initialize info to return 580 * @init_attr: Initial QP create attributes 581 */ 582 static int irdma_setup_umode_qp(struct ib_udata *udata, 583 struct irdma_device *iwdev, 584 struct irdma_qp *iwqp, 585 struct irdma_qp_init_info *info, 586 struct ib_qp_init_attr *init_attr) 587 { 588 struct irdma_ucontext *ucontext = rdma_udata_to_drv_context(udata, 589 struct irdma_ucontext, ibucontext); 590 struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info; 591 struct irdma_create_qp_req req; 592 unsigned long flags; 593 int ret; 594 595 ret = ib_copy_from_udata(&req, udata, 596 min(sizeof(req), udata->inlen)); 597 if (ret) { 598 ibdev_dbg(&iwdev->ibdev, "VERBS: ib_copy_from_data fail\n"); 599 return ret; 600 } 601 602 iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx; 603 iwqp->user_mode = 1; 604 if (req.user_wqe_bufs) { 605 info->qp_uk_init_info.legacy_mode = ucontext->legacy_mode; 606 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags); 607 iwqp->iwpbl = irdma_get_pbl((unsigned long)req.user_wqe_bufs, 608 &ucontext->qp_reg_mem_list); 609 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); 610 611 if (!iwqp->iwpbl) { 612 ret = -ENODATA; 613 ibdev_dbg(&iwdev->ibdev, "VERBS: no pbl info\n"); 614 return ret; 615 } 616 } 617 618 if (!ucontext->use_raw_attrs) { 619 /** 620 * Maintain backward compat with older ABI which passes sq and 621 * rq depth in quanta in cap.max_send_wr and cap.max_recv_wr. 622 * There is no way to compute the correct value of 623 * iwqp->max_send_wr/max_recv_wr in the kernel. 624 */ 625 iwqp->max_send_wr = init_attr->cap.max_send_wr; 626 iwqp->max_recv_wr = init_attr->cap.max_recv_wr; 627 ukinfo->sq_size = init_attr->cap.max_send_wr; 628 ukinfo->rq_size = init_attr->cap.max_recv_wr; 629 irdma_uk_calc_shift_wq(ukinfo, &ukinfo->sq_shift, 630 &ukinfo->rq_shift); 631 } else { 632 ret = irdma_uk_calc_depth_shift_sq(ukinfo, &ukinfo->sq_depth, 633 &ukinfo->sq_shift); 634 if (ret) 635 return ret; 636 637 ret = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth, 638 &ukinfo->rq_shift); 639 if (ret) 640 return ret; 641 642 iwqp->max_send_wr = 643 (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift; 644 iwqp->max_recv_wr = 645 (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift; 646 ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift; 647 ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift; 648 } 649 650 irdma_setup_virt_qp(iwdev, iwqp, info); 651 652 return 0; 653 } 654 655 /** 656 * irdma_setup_kmode_qp - setup initialization for kernel mode qp 657 * @iwdev: iwarp device 658 * @iwqp: qp ptr (user or kernel) 659 * @info: initialize info to return 660 * @init_attr: Initial QP create attributes 661 */ 662 static int irdma_setup_kmode_qp(struct irdma_device *iwdev, 663 struct irdma_qp *iwqp, 664 struct irdma_qp_init_info *info, 665 struct ib_qp_init_attr *init_attr) 666 { 667 struct irdma_dma_mem *mem = &iwqp->kqp.dma_mem; 668 u32 size; 669 int status; 670 struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info; 671 672 status = irdma_uk_calc_depth_shift_sq(ukinfo, &ukinfo->sq_depth, 673 &ukinfo->sq_shift); 674 if (status) 675 return status; 676 677 status = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth, 678 &ukinfo->rq_shift); 679 if (status) 680 return status; 681 682 iwqp->kqp.sq_wrid_mem = 683 kcalloc(ukinfo->sq_depth, sizeof(*iwqp->kqp.sq_wrid_mem), GFP_KERNEL); 684 if (!iwqp->kqp.sq_wrid_mem) 685 return -ENOMEM; 686 687 iwqp->kqp.rq_wrid_mem = 688 kcalloc(ukinfo->rq_depth, sizeof(*iwqp->kqp.rq_wrid_mem), GFP_KERNEL); 689 690 if (!iwqp->kqp.rq_wrid_mem) { 691 kfree(iwqp->kqp.sq_wrid_mem); 692 iwqp->kqp.sq_wrid_mem = NULL; 693 return -ENOMEM; 694 } 695 696 ukinfo->sq_wrtrk_array = iwqp->kqp.sq_wrid_mem; 697 ukinfo->rq_wrid_array = iwqp->kqp.rq_wrid_mem; 698 699 size = (ukinfo->sq_depth + ukinfo->rq_depth) * IRDMA_QP_WQE_MIN_SIZE; 700 size += (IRDMA_SHADOW_AREA_SIZE << 3); 701 702 mem->size = ALIGN(size, 256); 703 mem->va = dma_alloc_coherent(iwdev->rf->hw.device, mem->size, 704 &mem->pa, GFP_KERNEL); 705 if (!mem->va) { 706 kfree(iwqp->kqp.sq_wrid_mem); 707 iwqp->kqp.sq_wrid_mem = NULL; 708 kfree(iwqp->kqp.rq_wrid_mem); 709 iwqp->kqp.rq_wrid_mem = NULL; 710 return -ENOMEM; 711 } 712 713 ukinfo->sq = mem->va; 714 info->sq_pa = mem->pa; 715 ukinfo->rq = &ukinfo->sq[ukinfo->sq_depth]; 716 info->rq_pa = info->sq_pa + (ukinfo->sq_depth * IRDMA_QP_WQE_MIN_SIZE); 717 ukinfo->shadow_area = ukinfo->rq[ukinfo->rq_depth].elem; 718 info->shadow_area_pa = 719 info->rq_pa + (ukinfo->rq_depth * IRDMA_QP_WQE_MIN_SIZE); 720 ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift; 721 ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift; 722 ukinfo->qp_id = iwqp->ibqp.qp_num; 723 724 iwqp->max_send_wr = (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift; 725 iwqp->max_recv_wr = (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift; 726 init_attr->cap.max_send_wr = iwqp->max_send_wr; 727 init_attr->cap.max_recv_wr = iwqp->max_recv_wr; 728 729 return 0; 730 } 731 732 static int irdma_cqp_create_qp_cmd(struct irdma_qp *iwqp) 733 { 734 struct irdma_pci_f *rf = iwqp->iwdev->rf; 735 struct irdma_cqp_request *cqp_request; 736 struct cqp_cmds_info *cqp_info; 737 struct irdma_create_qp_info *qp_info; 738 int status; 739 740 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); 741 if (!cqp_request) 742 return -ENOMEM; 743 744 cqp_info = &cqp_request->info; 745 qp_info = &cqp_request->info.in.u.qp_create.info; 746 memset(qp_info, 0, sizeof(*qp_info)); 747 qp_info->mac_valid = true; 748 qp_info->cq_num_valid = true; 749 qp_info->next_iwarp_state = IRDMA_QP_STATE_IDLE; 750 751 cqp_info->cqp_cmd = IRDMA_OP_QP_CREATE; 752 cqp_info->post_sq = 1; 753 cqp_info->in.u.qp_create.qp = &iwqp->sc_qp; 754 cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request; 755 status = irdma_handle_cqp_op(rf, cqp_request); 756 irdma_put_cqp_request(&rf->cqp, cqp_request); 757 758 return status; 759 } 760 761 static void irdma_roce_fill_and_set_qpctx_info(struct irdma_qp *iwqp, 762 struct irdma_qp_host_ctx_info *ctx_info) 763 { 764 struct irdma_device *iwdev = iwqp->iwdev; 765 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; 766 struct irdma_roce_offload_info *roce_info; 767 struct irdma_udp_offload_info *udp_info; 768 769 udp_info = &iwqp->udp_info; 770 udp_info->snd_mss = ib_mtu_enum_to_int(ib_mtu_int_to_enum(iwdev->vsi.mtu)); 771 udp_info->cwnd = iwdev->roce_cwnd; 772 udp_info->rexmit_thresh = 2; 773 udp_info->rnr_nak_thresh = 2; 774 udp_info->src_port = 0xc000; 775 udp_info->dst_port = ROCE_V2_UDP_DPORT; 776 roce_info = &iwqp->roce_info; 777 ether_addr_copy(roce_info->mac_addr, iwdev->netdev->dev_addr); 778 779 roce_info->rd_en = true; 780 roce_info->wr_rdresp_en = true; 781 roce_info->bind_en = true; 782 roce_info->dcqcn_en = false; 783 roce_info->rtomin = 5; 784 785 roce_info->ack_credits = iwdev->roce_ackcreds; 786 roce_info->ird_size = dev->hw_attrs.max_hw_ird; 787 roce_info->ord_size = dev->hw_attrs.max_hw_ord; 788 789 if (!iwqp->user_mode) { 790 roce_info->priv_mode_en = true; 791 roce_info->fast_reg_en = true; 792 roce_info->udprivcq_en = true; 793 } 794 roce_info->roce_tver = 0; 795 796 ctx_info->roce_info = &iwqp->roce_info; 797 ctx_info->udp_info = &iwqp->udp_info; 798 irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info); 799 } 800 801 static void irdma_iw_fill_and_set_qpctx_info(struct irdma_qp *iwqp, 802 struct irdma_qp_host_ctx_info *ctx_info) 803 { 804 struct irdma_device *iwdev = iwqp->iwdev; 805 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; 806 struct irdma_iwarp_offload_info *iwarp_info; 807 808 iwarp_info = &iwqp->iwarp_info; 809 ether_addr_copy(iwarp_info->mac_addr, iwdev->netdev->dev_addr); 810 iwarp_info->rd_en = true; 811 iwarp_info->wr_rdresp_en = true; 812 iwarp_info->bind_en = true; 813 iwarp_info->ecn_en = true; 814 iwarp_info->rtomin = 5; 815 816 if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) 817 iwarp_info->ib_rd_en = true; 818 if (!iwqp->user_mode) { 819 iwarp_info->priv_mode_en = true; 820 iwarp_info->fast_reg_en = true; 821 } 822 iwarp_info->ddp_ver = 1; 823 iwarp_info->rdmap_ver = 1; 824 825 ctx_info->iwarp_info = &iwqp->iwarp_info; 826 ctx_info->iwarp_info_valid = true; 827 irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info); 828 ctx_info->iwarp_info_valid = false; 829 } 830 831 static int irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr, 832 struct irdma_device *iwdev) 833 { 834 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; 835 struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs; 836 837 if (init_attr->create_flags) 838 return -EOPNOTSUPP; 839 840 if (init_attr->cap.max_inline_data > uk_attrs->max_hw_inline || 841 init_attr->cap.max_send_sge > uk_attrs->max_hw_wq_frags || 842 init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags) 843 return -EINVAL; 844 845 if (rdma_protocol_roce(&iwdev->ibdev, 1)) { 846 if (init_attr->qp_type != IB_QPT_RC && 847 init_attr->qp_type != IB_QPT_UD && 848 init_attr->qp_type != IB_QPT_GSI) 849 return -EOPNOTSUPP; 850 } else { 851 if (init_attr->qp_type != IB_QPT_RC) 852 return -EOPNOTSUPP; 853 } 854 855 return 0; 856 } 857 858 static void irdma_flush_worker(struct work_struct *work) 859 { 860 struct delayed_work *dwork = to_delayed_work(work); 861 struct irdma_qp *iwqp = container_of(dwork, struct irdma_qp, dwork_flush); 862 863 irdma_generate_flush_completions(iwqp); 864 } 865 866 /** 867 * irdma_create_qp - create qp 868 * @ibqp: ptr of qp 869 * @init_attr: attributes for qp 870 * @udata: user data for create qp 871 */ 872 static int irdma_create_qp(struct ib_qp *ibqp, 873 struct ib_qp_init_attr *init_attr, 874 struct ib_udata *udata) 875 { 876 #define IRDMA_CREATE_QP_MIN_REQ_LEN offsetofend(struct irdma_create_qp_req, user_compl_ctx) 877 #define IRDMA_CREATE_QP_MIN_RESP_LEN offsetofend(struct irdma_create_qp_resp, rsvd) 878 struct ib_pd *ibpd = ibqp->pd; 879 struct irdma_pd *iwpd = to_iwpd(ibpd); 880 struct irdma_device *iwdev = to_iwdev(ibpd->device); 881 struct irdma_pci_f *rf = iwdev->rf; 882 struct irdma_qp *iwqp = to_iwqp(ibqp); 883 struct irdma_create_qp_resp uresp = {}; 884 u32 qp_num = 0; 885 int err_code; 886 struct irdma_sc_qp *qp; 887 struct irdma_sc_dev *dev = &rf->sc_dev; 888 struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs; 889 struct irdma_qp_init_info init_info = {}; 890 struct irdma_qp_host_ctx_info *ctx_info; 891 892 err_code = irdma_validate_qp_attrs(init_attr, iwdev); 893 if (err_code) 894 return err_code; 895 896 if (udata && (udata->inlen < IRDMA_CREATE_QP_MIN_REQ_LEN || 897 udata->outlen < IRDMA_CREATE_QP_MIN_RESP_LEN)) 898 return -EINVAL; 899 900 init_info.vsi = &iwdev->vsi; 901 init_info.qp_uk_init_info.uk_attrs = uk_attrs; 902 init_info.qp_uk_init_info.sq_size = init_attr->cap.max_send_wr; 903 init_info.qp_uk_init_info.rq_size = init_attr->cap.max_recv_wr; 904 init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge; 905 init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge; 906 init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data; 907 908 qp = &iwqp->sc_qp; 909 qp->qp_uk.back_qp = iwqp; 910 qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX; 911 912 iwqp->iwdev = iwdev; 913 iwqp->q2_ctx_mem.size = ALIGN(IRDMA_Q2_BUF_SIZE + IRDMA_QP_CTX_SIZE, 914 256); 915 iwqp->q2_ctx_mem.va = dma_alloc_coherent(dev->hw->device, 916 iwqp->q2_ctx_mem.size, 917 &iwqp->q2_ctx_mem.pa, 918 GFP_KERNEL); 919 if (!iwqp->q2_ctx_mem.va) 920 return -ENOMEM; 921 922 init_info.q2 = iwqp->q2_ctx_mem.va; 923 init_info.q2_pa = iwqp->q2_ctx_mem.pa; 924 init_info.host_ctx = (__le64 *)(init_info.q2 + IRDMA_Q2_BUF_SIZE); 925 init_info.host_ctx_pa = init_info.q2_pa + IRDMA_Q2_BUF_SIZE; 926 927 if (init_attr->qp_type == IB_QPT_GSI) 928 qp_num = 1; 929 else 930 err_code = irdma_alloc_rsrc(rf, rf->allocated_qps, rf->max_qp, 931 &qp_num, &rf->next_qp); 932 if (err_code) 933 goto error; 934 935 iwqp->iwpd = iwpd; 936 iwqp->ibqp.qp_num = qp_num; 937 qp = &iwqp->sc_qp; 938 iwqp->iwscq = to_iwcq(init_attr->send_cq); 939 iwqp->iwrcq = to_iwcq(init_attr->recv_cq); 940 iwqp->host_ctx.va = init_info.host_ctx; 941 iwqp->host_ctx.pa = init_info.host_ctx_pa; 942 iwqp->host_ctx.size = IRDMA_QP_CTX_SIZE; 943 944 init_info.pd = &iwpd->sc_pd; 945 init_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num; 946 if (!rdma_protocol_roce(&iwdev->ibdev, 1)) 947 init_info.qp_uk_init_info.first_sq_wq = 1; 948 iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp; 949 init_waitqueue_head(&iwqp->waitq); 950 init_waitqueue_head(&iwqp->mod_qp_waitq); 951 952 if (udata) { 953 init_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver; 954 err_code = irdma_setup_umode_qp(udata, iwdev, iwqp, &init_info, 955 init_attr); 956 } else { 957 INIT_DELAYED_WORK(&iwqp->dwork_flush, irdma_flush_worker); 958 init_info.qp_uk_init_info.abi_ver = IRDMA_ABI_VER; 959 err_code = irdma_setup_kmode_qp(iwdev, iwqp, &init_info, init_attr); 960 } 961 962 if (err_code) { 963 ibdev_dbg(&iwdev->ibdev, "VERBS: setup qp failed\n"); 964 goto error; 965 } 966 967 if (rdma_protocol_roce(&iwdev->ibdev, 1)) { 968 if (init_attr->qp_type == IB_QPT_RC) { 969 init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_ROCE_RC; 970 init_info.qp_uk_init_info.qp_caps = IRDMA_SEND_WITH_IMM | 971 IRDMA_WRITE_WITH_IMM | 972 IRDMA_ROCE; 973 } else { 974 init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_ROCE_UD; 975 init_info.qp_uk_init_info.qp_caps = IRDMA_SEND_WITH_IMM | 976 IRDMA_ROCE; 977 } 978 } else { 979 init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_IWARP; 980 init_info.qp_uk_init_info.qp_caps = IRDMA_WRITE_WITH_IMM; 981 } 982 983 if (dev->hw_attrs.uk_attrs.hw_rev > IRDMA_GEN_1) 984 init_info.qp_uk_init_info.qp_caps |= IRDMA_PUSH_MODE; 985 986 err_code = irdma_sc_qp_init(qp, &init_info); 987 if (err_code) { 988 ibdev_dbg(&iwdev->ibdev, "VERBS: qp_init fail\n"); 989 goto error; 990 } 991 992 ctx_info = &iwqp->ctx_info; 993 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id; 994 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id; 995 996 if (rdma_protocol_roce(&iwdev->ibdev, 1)) 997 irdma_roce_fill_and_set_qpctx_info(iwqp, ctx_info); 998 else 999 irdma_iw_fill_and_set_qpctx_info(iwqp, ctx_info); 1000 1001 err_code = irdma_cqp_create_qp_cmd(iwqp); 1002 if (err_code) 1003 goto error; 1004 1005 refcount_set(&iwqp->refcnt, 1); 1006 spin_lock_init(&iwqp->lock); 1007 spin_lock_init(&iwqp->sc_qp.pfpdu.lock); 1008 iwqp->sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR; 1009 rf->qp_table[qp_num] = iwqp; 1010 1011 if (rdma_protocol_roce(&iwdev->ibdev, 1)) { 1012 if (dev->ws_add(&iwdev->vsi, 0)) { 1013 irdma_cqp_qp_destroy_cmd(&rf->sc_dev, &iwqp->sc_qp); 1014 err_code = -EINVAL; 1015 goto error; 1016 } 1017 1018 irdma_qp_add_qos(&iwqp->sc_qp); 1019 } 1020 1021 if (udata) { 1022 /* GEN_1 legacy support with libi40iw does not have expanded uresp struct */ 1023 if (udata->outlen < sizeof(uresp)) { 1024 uresp.lsmm = 1; 1025 uresp.push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX_GEN_1; 1026 } else { 1027 if (rdma_protocol_iwarp(&iwdev->ibdev, 1)) 1028 uresp.lsmm = 1; 1029 } 1030 uresp.actual_sq_size = init_info.qp_uk_init_info.sq_size; 1031 uresp.actual_rq_size = init_info.qp_uk_init_info.rq_size; 1032 uresp.qp_id = qp_num; 1033 uresp.qp_caps = qp->qp_uk.qp_caps; 1034 1035 err_code = ib_copy_to_udata(udata, &uresp, 1036 min(sizeof(uresp), udata->outlen)); 1037 if (err_code) { 1038 ibdev_dbg(&iwdev->ibdev, "VERBS: copy_to_udata failed\n"); 1039 irdma_destroy_qp(&iwqp->ibqp, udata); 1040 return err_code; 1041 } 1042 } 1043 1044 init_completion(&iwqp->free_qp); 1045 return 0; 1046 1047 error: 1048 irdma_free_qp_rsrc(iwqp); 1049 return err_code; 1050 } 1051 1052 static int irdma_get_ib_acc_flags(struct irdma_qp *iwqp) 1053 { 1054 int acc_flags = 0; 1055 1056 if (rdma_protocol_roce(iwqp->ibqp.device, 1)) { 1057 if (iwqp->roce_info.wr_rdresp_en) { 1058 acc_flags |= IB_ACCESS_LOCAL_WRITE; 1059 acc_flags |= IB_ACCESS_REMOTE_WRITE; 1060 } 1061 if (iwqp->roce_info.rd_en) 1062 acc_flags |= IB_ACCESS_REMOTE_READ; 1063 if (iwqp->roce_info.bind_en) 1064 acc_flags |= IB_ACCESS_MW_BIND; 1065 } else { 1066 if (iwqp->iwarp_info.wr_rdresp_en) { 1067 acc_flags |= IB_ACCESS_LOCAL_WRITE; 1068 acc_flags |= IB_ACCESS_REMOTE_WRITE; 1069 } 1070 if (iwqp->iwarp_info.rd_en) 1071 acc_flags |= IB_ACCESS_REMOTE_READ; 1072 if (iwqp->iwarp_info.bind_en) 1073 acc_flags |= IB_ACCESS_MW_BIND; 1074 } 1075 return acc_flags; 1076 } 1077 1078 /** 1079 * irdma_query_qp - query qp attributes 1080 * @ibqp: qp pointer 1081 * @attr: attributes pointer 1082 * @attr_mask: Not used 1083 * @init_attr: qp attributes to return 1084 */ 1085 static int irdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1086 int attr_mask, struct ib_qp_init_attr *init_attr) 1087 { 1088 struct irdma_qp *iwqp = to_iwqp(ibqp); 1089 struct irdma_sc_qp *qp = &iwqp->sc_qp; 1090 1091 memset(attr, 0, sizeof(*attr)); 1092 memset(init_attr, 0, sizeof(*init_attr)); 1093 1094 attr->qp_state = iwqp->ibqp_state; 1095 attr->cur_qp_state = iwqp->ibqp_state; 1096 attr->cap.max_send_wr = iwqp->max_send_wr; 1097 attr->cap.max_recv_wr = iwqp->max_recv_wr; 1098 attr->cap.max_inline_data = qp->qp_uk.max_inline_data; 1099 attr->cap.max_send_sge = qp->qp_uk.max_sq_frag_cnt; 1100 attr->cap.max_recv_sge = qp->qp_uk.max_rq_frag_cnt; 1101 attr->qp_access_flags = irdma_get_ib_acc_flags(iwqp); 1102 attr->port_num = 1; 1103 if (rdma_protocol_roce(ibqp->device, 1)) { 1104 attr->path_mtu = ib_mtu_int_to_enum(iwqp->udp_info.snd_mss); 1105 attr->qkey = iwqp->roce_info.qkey; 1106 attr->rq_psn = iwqp->udp_info.epsn; 1107 attr->sq_psn = iwqp->udp_info.psn_nxt; 1108 attr->dest_qp_num = iwqp->roce_info.dest_qp; 1109 attr->pkey_index = iwqp->roce_info.p_key; 1110 attr->retry_cnt = iwqp->udp_info.rexmit_thresh; 1111 attr->rnr_retry = iwqp->udp_info.rnr_nak_thresh; 1112 attr->max_rd_atomic = iwqp->roce_info.ord_size; 1113 attr->max_dest_rd_atomic = iwqp->roce_info.ird_size; 1114 } 1115 1116 init_attr->event_handler = iwqp->ibqp.event_handler; 1117 init_attr->qp_context = iwqp->ibqp.qp_context; 1118 init_attr->send_cq = iwqp->ibqp.send_cq; 1119 init_attr->recv_cq = iwqp->ibqp.recv_cq; 1120 init_attr->cap = attr->cap; 1121 1122 return 0; 1123 } 1124 1125 /** 1126 * irdma_query_pkey - Query partition key 1127 * @ibdev: device pointer from stack 1128 * @port: port number 1129 * @index: index of pkey 1130 * @pkey: pointer to store the pkey 1131 */ 1132 static int irdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index, 1133 u16 *pkey) 1134 { 1135 if (index >= IRDMA_PKEY_TBL_SZ) 1136 return -EINVAL; 1137 1138 *pkey = IRDMA_DEFAULT_PKEY; 1139 return 0; 1140 } 1141 1142 static u8 irdma_roce_get_vlan_prio(const struct ib_gid_attr *attr, u8 prio) 1143 { 1144 struct net_device *ndev; 1145 1146 rcu_read_lock(); 1147 ndev = rcu_dereference(attr->ndev); 1148 if (!ndev) 1149 goto exit; 1150 if (is_vlan_dev(ndev)) { 1151 u16 vlan_qos = vlan_dev_get_egress_qos_mask(ndev, prio); 1152 1153 prio = (vlan_qos & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; 1154 } 1155 exit: 1156 rcu_read_unlock(); 1157 return prio; 1158 } 1159 1160 static int irdma_wait_for_suspend(struct irdma_qp *iwqp) 1161 { 1162 if (!wait_event_timeout(iwqp->iwdev->suspend_wq, 1163 !iwqp->suspend_pending, 1164 msecs_to_jiffies(IRDMA_EVENT_TIMEOUT_MS))) { 1165 iwqp->suspend_pending = false; 1166 ibdev_warn(&iwqp->iwdev->ibdev, 1167 "modify_qp timed out waiting for suspend. qp_id = %d, last_ae = 0x%x\n", 1168 iwqp->ibqp.qp_num, iwqp->last_aeq); 1169 return -EBUSY; 1170 } 1171 1172 return 0; 1173 } 1174 1175 /** 1176 * irdma_modify_qp_roce - modify qp request 1177 * @ibqp: qp's pointer for modify 1178 * @attr: access attributes 1179 * @attr_mask: state mask 1180 * @udata: user data 1181 */ 1182 int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1183 int attr_mask, struct ib_udata *udata) 1184 { 1185 #define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req, rq_flush) 1186 #define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_resp, push_valid) 1187 struct irdma_pd *iwpd = to_iwpd(ibqp->pd); 1188 struct irdma_qp *iwqp = to_iwqp(ibqp); 1189 struct irdma_device *iwdev = iwqp->iwdev; 1190 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; 1191 struct irdma_qp_host_ctx_info *ctx_info; 1192 struct irdma_roce_offload_info *roce_info; 1193 struct irdma_udp_offload_info *udp_info; 1194 struct irdma_modify_qp_info info = {}; 1195 struct irdma_modify_qp_resp uresp = {}; 1196 struct irdma_modify_qp_req ureq = {}; 1197 unsigned long flags; 1198 u8 issue_modify_qp = 0; 1199 int ret = 0; 1200 1201 ctx_info = &iwqp->ctx_info; 1202 roce_info = &iwqp->roce_info; 1203 udp_info = &iwqp->udp_info; 1204 1205 if (udata) { 1206 /* udata inlen/outlen can be 0 when supporting legacy libi40iw */ 1207 if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) || 1208 (udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN)) 1209 return -EINVAL; 1210 } 1211 1212 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) 1213 return -EOPNOTSUPP; 1214 1215 if (attr_mask & IB_QP_DEST_QPN) 1216 roce_info->dest_qp = attr->dest_qp_num; 1217 1218 if (attr_mask & IB_QP_PKEY_INDEX) { 1219 ret = irdma_query_pkey(ibqp->device, 0, attr->pkey_index, 1220 &roce_info->p_key); 1221 if (ret) 1222 return ret; 1223 } 1224 1225 if (attr_mask & IB_QP_QKEY) 1226 roce_info->qkey = attr->qkey; 1227 1228 if (attr_mask & IB_QP_PATH_MTU) 1229 udp_info->snd_mss = ib_mtu_enum_to_int(attr->path_mtu); 1230 1231 if (attr_mask & IB_QP_SQ_PSN) { 1232 udp_info->psn_nxt = attr->sq_psn; 1233 udp_info->lsn = 0xffff; 1234 udp_info->psn_una = attr->sq_psn; 1235 udp_info->psn_max = attr->sq_psn; 1236 } 1237 1238 if (attr_mask & IB_QP_RQ_PSN) 1239 udp_info->epsn = attr->rq_psn; 1240 1241 if (attr_mask & IB_QP_RNR_RETRY) 1242 udp_info->rnr_nak_thresh = attr->rnr_retry; 1243 1244 if (attr_mask & IB_QP_RETRY_CNT) 1245 udp_info->rexmit_thresh = attr->retry_cnt; 1246 1247 ctx_info->roce_info->pd_id = iwpd->sc_pd.pd_id; 1248 1249 if (attr_mask & IB_QP_AV) { 1250 struct irdma_av *av = &iwqp->roce_ah.av; 1251 const struct ib_gid_attr *sgid_attr = 1252 attr->ah_attr.grh.sgid_attr; 1253 u16 vlan_id = VLAN_N_VID; 1254 u32 local_ip[4]; 1255 1256 memset(&iwqp->roce_ah, 0, sizeof(iwqp->roce_ah)); 1257 if (attr->ah_attr.ah_flags & IB_AH_GRH) { 1258 udp_info->ttl = attr->ah_attr.grh.hop_limit; 1259 udp_info->flow_label = attr->ah_attr.grh.flow_label; 1260 udp_info->tos = attr->ah_attr.grh.traffic_class; 1261 udp_info->src_port = 1262 rdma_get_udp_sport(udp_info->flow_label, 1263 ibqp->qp_num, 1264 roce_info->dest_qp); 1265 irdma_qp_rem_qos(&iwqp->sc_qp); 1266 dev->ws_remove(iwqp->sc_qp.vsi, ctx_info->user_pri); 1267 if (iwqp->sc_qp.vsi->dscp_mode) 1268 ctx_info->user_pri = 1269 iwqp->sc_qp.vsi->dscp_map[irdma_tos2dscp(udp_info->tos)]; 1270 else 1271 ctx_info->user_pri = rt_tos2priority(udp_info->tos); 1272 } 1273 ret = rdma_read_gid_l2_fields(sgid_attr, &vlan_id, 1274 ctx_info->roce_info->mac_addr); 1275 if (ret) 1276 return ret; 1277 ctx_info->user_pri = irdma_roce_get_vlan_prio(sgid_attr, 1278 ctx_info->user_pri); 1279 if (dev->ws_add(iwqp->sc_qp.vsi, ctx_info->user_pri)) 1280 return -ENOMEM; 1281 iwqp->sc_qp.user_pri = ctx_info->user_pri; 1282 irdma_qp_add_qos(&iwqp->sc_qp); 1283 1284 if (vlan_id >= VLAN_N_VID && iwdev->dcb_vlan_mode) 1285 vlan_id = 0; 1286 if (vlan_id < VLAN_N_VID) { 1287 udp_info->insert_vlan_tag = true; 1288 udp_info->vlan_tag = vlan_id | 1289 ctx_info->user_pri << VLAN_PRIO_SHIFT; 1290 } else { 1291 udp_info->insert_vlan_tag = false; 1292 } 1293 1294 av->attrs = attr->ah_attr; 1295 rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid_attr->gid); 1296 rdma_gid2ip((struct sockaddr *)&av->dgid_addr, &attr->ah_attr.grh.dgid); 1297 av->net_type = rdma_gid_attr_network_type(sgid_attr); 1298 if (av->net_type == RDMA_NETWORK_IPV6) { 1299 __be32 *daddr = 1300 av->dgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32; 1301 __be32 *saddr = 1302 av->sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32; 1303 1304 irdma_copy_ip_ntohl(&udp_info->dest_ip_addr[0], daddr); 1305 irdma_copy_ip_ntohl(&udp_info->local_ipaddr[0], saddr); 1306 1307 udp_info->ipv4 = false; 1308 irdma_copy_ip_ntohl(local_ip, daddr); 1309 1310 } else if (av->net_type == RDMA_NETWORK_IPV4) { 1311 __be32 saddr = av->sgid_addr.saddr_in.sin_addr.s_addr; 1312 __be32 daddr = av->dgid_addr.saddr_in.sin_addr.s_addr; 1313 1314 local_ip[0] = ntohl(daddr); 1315 1316 udp_info->ipv4 = true; 1317 udp_info->dest_ip_addr[0] = 0; 1318 udp_info->dest_ip_addr[1] = 0; 1319 udp_info->dest_ip_addr[2] = 0; 1320 udp_info->dest_ip_addr[3] = local_ip[0]; 1321 1322 udp_info->local_ipaddr[0] = 0; 1323 udp_info->local_ipaddr[1] = 0; 1324 udp_info->local_ipaddr[2] = 0; 1325 udp_info->local_ipaddr[3] = ntohl(saddr); 1326 } 1327 udp_info->arp_idx = 1328 irdma_add_arp(iwdev->rf, local_ip, udp_info->ipv4, 1329 attr->ah_attr.roce.dmac); 1330 } 1331 1332 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { 1333 if (attr->max_rd_atomic > dev->hw_attrs.max_hw_ord) { 1334 ibdev_err(&iwdev->ibdev, 1335 "rd_atomic = %d, above max_hw_ord=%d\n", 1336 attr->max_rd_atomic, 1337 dev->hw_attrs.max_hw_ord); 1338 return -EINVAL; 1339 } 1340 if (attr->max_rd_atomic) 1341 roce_info->ord_size = attr->max_rd_atomic; 1342 info.ord_valid = true; 1343 } 1344 1345 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { 1346 if (attr->max_dest_rd_atomic > dev->hw_attrs.max_hw_ird) { 1347 ibdev_err(&iwdev->ibdev, 1348 "rd_atomic = %d, above max_hw_ird=%d\n", 1349 attr->max_rd_atomic, 1350 dev->hw_attrs.max_hw_ird); 1351 return -EINVAL; 1352 } 1353 if (attr->max_dest_rd_atomic) 1354 roce_info->ird_size = attr->max_dest_rd_atomic; 1355 } 1356 1357 if (attr_mask & IB_QP_ACCESS_FLAGS) { 1358 if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE) 1359 roce_info->wr_rdresp_en = true; 1360 if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE) 1361 roce_info->wr_rdresp_en = true; 1362 if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ) 1363 roce_info->rd_en = true; 1364 } 1365 1366 wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend)); 1367 1368 ibdev_dbg(&iwdev->ibdev, 1369 "VERBS: caller: %pS qp_id=%d to_ibqpstate=%d ibqpstate=%d irdma_qpstate=%d attr_mask=0x%x\n", 1370 __builtin_return_address(0), ibqp->qp_num, attr->qp_state, 1371 iwqp->ibqp_state, iwqp->iwarp_state, attr_mask); 1372 1373 spin_lock_irqsave(&iwqp->lock, flags); 1374 if (attr_mask & IB_QP_STATE) { 1375 if (!ib_modify_qp_is_ok(iwqp->ibqp_state, attr->qp_state, 1376 iwqp->ibqp.qp_type, attr_mask)) { 1377 ibdev_warn(&iwdev->ibdev, "modify_qp invalid for qp_id=%d, old_state=0x%x, new_state=0x%x\n", 1378 iwqp->ibqp.qp_num, iwqp->ibqp_state, 1379 attr->qp_state); 1380 ret = -EINVAL; 1381 goto exit; 1382 } 1383 info.curr_iwarp_state = iwqp->iwarp_state; 1384 1385 switch (attr->qp_state) { 1386 case IB_QPS_INIT: 1387 if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) { 1388 ret = -EINVAL; 1389 goto exit; 1390 } 1391 1392 if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) { 1393 info.next_iwarp_state = IRDMA_QP_STATE_IDLE; 1394 issue_modify_qp = 1; 1395 } 1396 break; 1397 case IB_QPS_RTR: 1398 if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) { 1399 ret = -EINVAL; 1400 goto exit; 1401 } 1402 info.arp_cache_idx_valid = true; 1403 info.cq_num_valid = true; 1404 info.next_iwarp_state = IRDMA_QP_STATE_RTR; 1405 issue_modify_qp = 1; 1406 break; 1407 case IB_QPS_RTS: 1408 if (iwqp->ibqp_state < IB_QPS_RTR || 1409 iwqp->ibqp_state == IB_QPS_ERR) { 1410 ret = -EINVAL; 1411 goto exit; 1412 } 1413 1414 info.arp_cache_idx_valid = true; 1415 info.cq_num_valid = true; 1416 info.ord_valid = true; 1417 info.next_iwarp_state = IRDMA_QP_STATE_RTS; 1418 issue_modify_qp = 1; 1419 if (iwdev->push_mode && udata && 1420 iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX && 1421 dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { 1422 spin_unlock_irqrestore(&iwqp->lock, flags); 1423 irdma_alloc_push_page(iwqp); 1424 spin_lock_irqsave(&iwqp->lock, flags); 1425 } 1426 break; 1427 case IB_QPS_SQD: 1428 if (iwqp->iwarp_state == IRDMA_QP_STATE_SQD) 1429 goto exit; 1430 1431 if (iwqp->iwarp_state != IRDMA_QP_STATE_RTS) { 1432 ret = -EINVAL; 1433 goto exit; 1434 } 1435 1436 info.next_iwarp_state = IRDMA_QP_STATE_SQD; 1437 issue_modify_qp = 1; 1438 iwqp->suspend_pending = true; 1439 break; 1440 case IB_QPS_SQE: 1441 case IB_QPS_ERR: 1442 case IB_QPS_RESET: 1443 if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) { 1444 spin_unlock_irqrestore(&iwqp->lock, flags); 1445 if (udata && udata->inlen) { 1446 if (ib_copy_from_udata(&ureq, udata, 1447 min(sizeof(ureq), udata->inlen))) 1448 return -EINVAL; 1449 1450 irdma_flush_wqes(iwqp, 1451 (ureq.sq_flush ? IRDMA_FLUSH_SQ : 0) | 1452 (ureq.rq_flush ? IRDMA_FLUSH_RQ : 0) | 1453 IRDMA_REFLUSH); 1454 } 1455 return 0; 1456 } 1457 1458 info.next_iwarp_state = IRDMA_QP_STATE_ERROR; 1459 issue_modify_qp = 1; 1460 break; 1461 default: 1462 ret = -EINVAL; 1463 goto exit; 1464 } 1465 1466 iwqp->ibqp_state = attr->qp_state; 1467 } 1468 1469 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id; 1470 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id; 1471 irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info); 1472 spin_unlock_irqrestore(&iwqp->lock, flags); 1473 1474 if (attr_mask & IB_QP_STATE) { 1475 if (issue_modify_qp) { 1476 ctx_info->rem_endpoint_idx = udp_info->arp_idx; 1477 if (irdma_hw_modify_qp(iwdev, iwqp, &info, true)) 1478 return -EINVAL; 1479 if (info.next_iwarp_state == IRDMA_QP_STATE_SQD) { 1480 ret = irdma_wait_for_suspend(iwqp); 1481 if (ret) 1482 return ret; 1483 } 1484 spin_lock_irqsave(&iwqp->lock, flags); 1485 if (iwqp->iwarp_state == info.curr_iwarp_state) { 1486 iwqp->iwarp_state = info.next_iwarp_state; 1487 iwqp->ibqp_state = attr->qp_state; 1488 } 1489 if (iwqp->ibqp_state > IB_QPS_RTS && 1490 !iwqp->flush_issued) { 1491 spin_unlock_irqrestore(&iwqp->lock, flags); 1492 irdma_flush_wqes(iwqp, IRDMA_FLUSH_SQ | 1493 IRDMA_FLUSH_RQ | 1494 IRDMA_FLUSH_WAIT); 1495 iwqp->flush_issued = 1; 1496 } else { 1497 spin_unlock_irqrestore(&iwqp->lock, flags); 1498 } 1499 } else { 1500 iwqp->ibqp_state = attr->qp_state; 1501 } 1502 if (udata && udata->outlen && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { 1503 struct irdma_ucontext *ucontext; 1504 1505 ucontext = rdma_udata_to_drv_context(udata, 1506 struct irdma_ucontext, ibucontext); 1507 if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX && 1508 !iwqp->push_wqe_mmap_entry && 1509 !irdma_setup_push_mmap_entries(ucontext, iwqp, 1510 &uresp.push_wqe_mmap_key, &uresp.push_db_mmap_key)) { 1511 uresp.push_valid = 1; 1512 uresp.push_offset = iwqp->sc_qp.push_offset; 1513 } 1514 ret = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), 1515 udata->outlen)); 1516 if (ret) { 1517 irdma_remove_push_mmap_entries(iwqp); 1518 ibdev_dbg(&iwdev->ibdev, 1519 "VERBS: copy_to_udata failed\n"); 1520 return ret; 1521 } 1522 } 1523 } 1524 1525 return 0; 1526 exit: 1527 spin_unlock_irqrestore(&iwqp->lock, flags); 1528 1529 return ret; 1530 } 1531 1532 /** 1533 * irdma_modify_qp - modify qp request 1534 * @ibqp: qp's pointer for modify 1535 * @attr: access attributes 1536 * @attr_mask: state mask 1537 * @udata: user data 1538 */ 1539 int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, 1540 struct ib_udata *udata) 1541 { 1542 #define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req, rq_flush) 1543 #define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_resp, push_valid) 1544 struct irdma_qp *iwqp = to_iwqp(ibqp); 1545 struct irdma_device *iwdev = iwqp->iwdev; 1546 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; 1547 struct irdma_qp_host_ctx_info *ctx_info; 1548 struct irdma_tcp_offload_info *tcp_info; 1549 struct irdma_iwarp_offload_info *offload_info; 1550 struct irdma_modify_qp_info info = {}; 1551 struct irdma_modify_qp_resp uresp = {}; 1552 struct irdma_modify_qp_req ureq = {}; 1553 u8 issue_modify_qp = 0; 1554 u8 dont_wait = 0; 1555 int err; 1556 unsigned long flags; 1557 1558 if (udata) { 1559 /* udata inlen/outlen can be 0 when supporting legacy libi40iw */ 1560 if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) || 1561 (udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN)) 1562 return -EINVAL; 1563 } 1564 1565 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) 1566 return -EOPNOTSUPP; 1567 1568 ctx_info = &iwqp->ctx_info; 1569 offload_info = &iwqp->iwarp_info; 1570 tcp_info = &iwqp->tcp_info; 1571 wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend)); 1572 ibdev_dbg(&iwdev->ibdev, 1573 "VERBS: caller: %pS qp_id=%d to_ibqpstate=%d ibqpstate=%d irdma_qpstate=%d last_aeq=%d hw_tcp_state=%d hw_iwarp_state=%d attr_mask=0x%x\n", 1574 __builtin_return_address(0), ibqp->qp_num, attr->qp_state, 1575 iwqp->ibqp_state, iwqp->iwarp_state, iwqp->last_aeq, 1576 iwqp->hw_tcp_state, iwqp->hw_iwarp_state, attr_mask); 1577 1578 spin_lock_irqsave(&iwqp->lock, flags); 1579 if (attr_mask & IB_QP_STATE) { 1580 info.curr_iwarp_state = iwqp->iwarp_state; 1581 switch (attr->qp_state) { 1582 case IB_QPS_INIT: 1583 case IB_QPS_RTR: 1584 if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) { 1585 err = -EINVAL; 1586 goto exit; 1587 } 1588 1589 if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) { 1590 info.next_iwarp_state = IRDMA_QP_STATE_IDLE; 1591 issue_modify_qp = 1; 1592 } 1593 if (iwdev->push_mode && udata && 1594 iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX && 1595 dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { 1596 spin_unlock_irqrestore(&iwqp->lock, flags); 1597 irdma_alloc_push_page(iwqp); 1598 spin_lock_irqsave(&iwqp->lock, flags); 1599 } 1600 break; 1601 case IB_QPS_RTS: 1602 if (iwqp->iwarp_state > IRDMA_QP_STATE_RTS || 1603 !iwqp->cm_id) { 1604 err = -EINVAL; 1605 goto exit; 1606 } 1607 1608 issue_modify_qp = 1; 1609 iwqp->hw_tcp_state = IRDMA_TCP_STATE_ESTABLISHED; 1610 iwqp->hte_added = 1; 1611 info.next_iwarp_state = IRDMA_QP_STATE_RTS; 1612 info.tcp_ctx_valid = true; 1613 info.ord_valid = true; 1614 info.arp_cache_idx_valid = true; 1615 info.cq_num_valid = true; 1616 break; 1617 case IB_QPS_SQD: 1618 if (iwqp->hw_iwarp_state > IRDMA_QP_STATE_RTS) { 1619 err = 0; 1620 goto exit; 1621 } 1622 1623 if (iwqp->iwarp_state == IRDMA_QP_STATE_CLOSING || 1624 iwqp->iwarp_state < IRDMA_QP_STATE_RTS) { 1625 err = 0; 1626 goto exit; 1627 } 1628 1629 if (iwqp->iwarp_state > IRDMA_QP_STATE_CLOSING) { 1630 err = -EINVAL; 1631 goto exit; 1632 } 1633 1634 info.next_iwarp_state = IRDMA_QP_STATE_CLOSING; 1635 issue_modify_qp = 1; 1636 break; 1637 case IB_QPS_SQE: 1638 if (iwqp->iwarp_state >= IRDMA_QP_STATE_TERMINATE) { 1639 err = -EINVAL; 1640 goto exit; 1641 } 1642 1643 info.next_iwarp_state = IRDMA_QP_STATE_TERMINATE; 1644 issue_modify_qp = 1; 1645 break; 1646 case IB_QPS_ERR: 1647 case IB_QPS_RESET: 1648 if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) { 1649 spin_unlock_irqrestore(&iwqp->lock, flags); 1650 if (udata && udata->inlen) { 1651 if (ib_copy_from_udata(&ureq, udata, 1652 min(sizeof(ureq), udata->inlen))) 1653 return -EINVAL; 1654 1655 irdma_flush_wqes(iwqp, 1656 (ureq.sq_flush ? IRDMA_FLUSH_SQ : 0) | 1657 (ureq.rq_flush ? IRDMA_FLUSH_RQ : 0) | 1658 IRDMA_REFLUSH); 1659 } 1660 return 0; 1661 } 1662 1663 if (iwqp->sc_qp.term_flags) { 1664 spin_unlock_irqrestore(&iwqp->lock, flags); 1665 irdma_terminate_del_timer(&iwqp->sc_qp); 1666 spin_lock_irqsave(&iwqp->lock, flags); 1667 } 1668 info.next_iwarp_state = IRDMA_QP_STATE_ERROR; 1669 if (iwqp->hw_tcp_state > IRDMA_TCP_STATE_CLOSED && 1670 iwdev->iw_status && 1671 iwqp->hw_tcp_state != IRDMA_TCP_STATE_TIME_WAIT) 1672 info.reset_tcp_conn = true; 1673 else 1674 dont_wait = 1; 1675 1676 issue_modify_qp = 1; 1677 info.next_iwarp_state = IRDMA_QP_STATE_ERROR; 1678 break; 1679 default: 1680 err = -EINVAL; 1681 goto exit; 1682 } 1683 1684 iwqp->ibqp_state = attr->qp_state; 1685 } 1686 if (attr_mask & IB_QP_ACCESS_FLAGS) { 1687 ctx_info->iwarp_info_valid = true; 1688 if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE) 1689 offload_info->wr_rdresp_en = true; 1690 if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE) 1691 offload_info->wr_rdresp_en = true; 1692 if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ) 1693 offload_info->rd_en = true; 1694 } 1695 1696 if (ctx_info->iwarp_info_valid) { 1697 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id; 1698 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id; 1699 irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info); 1700 } 1701 spin_unlock_irqrestore(&iwqp->lock, flags); 1702 1703 if (attr_mask & IB_QP_STATE) { 1704 if (issue_modify_qp) { 1705 ctx_info->rem_endpoint_idx = tcp_info->arp_idx; 1706 if (irdma_hw_modify_qp(iwdev, iwqp, &info, true)) 1707 return -EINVAL; 1708 } 1709 1710 spin_lock_irqsave(&iwqp->lock, flags); 1711 if (iwqp->iwarp_state == info.curr_iwarp_state) { 1712 iwqp->iwarp_state = info.next_iwarp_state; 1713 iwqp->ibqp_state = attr->qp_state; 1714 } 1715 spin_unlock_irqrestore(&iwqp->lock, flags); 1716 } 1717 1718 if (issue_modify_qp && iwqp->ibqp_state > IB_QPS_RTS) { 1719 if (dont_wait) { 1720 if (iwqp->hw_tcp_state) { 1721 spin_lock_irqsave(&iwqp->lock, flags); 1722 iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSED; 1723 iwqp->last_aeq = IRDMA_AE_RESET_SENT; 1724 spin_unlock_irqrestore(&iwqp->lock, flags); 1725 } 1726 irdma_cm_disconn(iwqp); 1727 } else { 1728 int close_timer_started; 1729 1730 spin_lock_irqsave(&iwdev->cm_core.ht_lock, flags); 1731 1732 if (iwqp->cm_node) { 1733 refcount_inc(&iwqp->cm_node->refcnt); 1734 spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags); 1735 close_timer_started = atomic_inc_return(&iwqp->close_timer_started); 1736 if (iwqp->cm_id && close_timer_started == 1) 1737 irdma_schedule_cm_timer(iwqp->cm_node, 1738 (struct irdma_puda_buf *)iwqp, 1739 IRDMA_TIMER_TYPE_CLOSE, 1, 0); 1740 1741 irdma_rem_ref_cm_node(iwqp->cm_node); 1742 } else { 1743 spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags); 1744 } 1745 } 1746 } 1747 if (attr_mask & IB_QP_STATE && udata && udata->outlen && 1748 dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { 1749 struct irdma_ucontext *ucontext; 1750 1751 ucontext = rdma_udata_to_drv_context(udata, 1752 struct irdma_ucontext, ibucontext); 1753 if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX && 1754 !iwqp->push_wqe_mmap_entry && 1755 !irdma_setup_push_mmap_entries(ucontext, iwqp, 1756 &uresp.push_wqe_mmap_key, &uresp.push_db_mmap_key)) { 1757 uresp.push_valid = 1; 1758 uresp.push_offset = iwqp->sc_qp.push_offset; 1759 } 1760 1761 err = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), 1762 udata->outlen)); 1763 if (err) { 1764 irdma_remove_push_mmap_entries(iwqp); 1765 ibdev_dbg(&iwdev->ibdev, 1766 "VERBS: copy_to_udata failed\n"); 1767 return err; 1768 } 1769 } 1770 1771 return 0; 1772 exit: 1773 spin_unlock_irqrestore(&iwqp->lock, flags); 1774 1775 return err; 1776 } 1777 1778 /** 1779 * irdma_cq_free_rsrc - free up resources for cq 1780 * @rf: RDMA PCI function 1781 * @iwcq: cq ptr 1782 */ 1783 static void irdma_cq_free_rsrc(struct irdma_pci_f *rf, struct irdma_cq *iwcq) 1784 { 1785 struct irdma_sc_cq *cq = &iwcq->sc_cq; 1786 1787 if (!iwcq->user_mode) { 1788 dma_free_coherent(rf->sc_dev.hw->device, iwcq->kmem.size, 1789 iwcq->kmem.va, iwcq->kmem.pa); 1790 iwcq->kmem.va = NULL; 1791 dma_free_coherent(rf->sc_dev.hw->device, 1792 iwcq->kmem_shadow.size, 1793 iwcq->kmem_shadow.va, iwcq->kmem_shadow.pa); 1794 iwcq->kmem_shadow.va = NULL; 1795 } 1796 1797 irdma_free_rsrc(rf, rf->allocated_cqs, cq->cq_uk.cq_id); 1798 } 1799 1800 /** 1801 * irdma_free_cqbuf - worker to free a cq buffer 1802 * @work: provides access to the cq buffer to free 1803 */ 1804 static void irdma_free_cqbuf(struct work_struct *work) 1805 { 1806 struct irdma_cq_buf *cq_buf = container_of(work, struct irdma_cq_buf, work); 1807 1808 dma_free_coherent(cq_buf->hw->device, cq_buf->kmem_buf.size, 1809 cq_buf->kmem_buf.va, cq_buf->kmem_buf.pa); 1810 cq_buf->kmem_buf.va = NULL; 1811 kfree(cq_buf); 1812 } 1813 1814 /** 1815 * irdma_process_resize_list - remove resized cq buffers from the resize_list 1816 * @iwcq: cq which owns the resize_list 1817 * @iwdev: irdma device 1818 * @lcqe_buf: the buffer where the last cqe is received 1819 */ 1820 static int irdma_process_resize_list(struct irdma_cq *iwcq, 1821 struct irdma_device *iwdev, 1822 struct irdma_cq_buf *lcqe_buf) 1823 { 1824 struct list_head *tmp_node, *list_node; 1825 struct irdma_cq_buf *cq_buf; 1826 int cnt = 0; 1827 1828 list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) { 1829 cq_buf = list_entry(list_node, struct irdma_cq_buf, list); 1830 if (cq_buf == lcqe_buf) 1831 return cnt; 1832 1833 list_del(&cq_buf->list); 1834 queue_work(iwdev->cleanup_wq, &cq_buf->work); 1835 cnt++; 1836 } 1837 1838 return cnt; 1839 } 1840 1841 /** 1842 * irdma_destroy_cq - destroy cq 1843 * @ib_cq: cq pointer 1844 * @udata: user data 1845 */ 1846 static int irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) 1847 { 1848 struct irdma_device *iwdev = to_iwdev(ib_cq->device); 1849 struct irdma_cq *iwcq = to_iwcq(ib_cq); 1850 struct irdma_sc_cq *cq = &iwcq->sc_cq; 1851 struct irdma_sc_dev *dev = cq->dev; 1852 struct irdma_sc_ceq *ceq = dev->ceq[cq->ceq_id]; 1853 struct irdma_ceq *iwceq = container_of(ceq, struct irdma_ceq, sc_ceq); 1854 unsigned long flags; 1855 1856 spin_lock_irqsave(&iwcq->lock, flags); 1857 if (!list_empty(&iwcq->cmpl_generated)) 1858 irdma_remove_cmpls_list(iwcq); 1859 if (!list_empty(&iwcq->resize_list)) 1860 irdma_process_resize_list(iwcq, iwdev, NULL); 1861 spin_unlock_irqrestore(&iwcq->lock, flags); 1862 1863 irdma_cq_rem_ref(ib_cq); 1864 wait_for_completion(&iwcq->free_cq); 1865 1866 irdma_cq_wq_destroy(iwdev->rf, cq); 1867 1868 spin_lock_irqsave(&iwceq->ce_lock, flags); 1869 irdma_sc_cleanup_ceqes(cq, ceq); 1870 spin_unlock_irqrestore(&iwceq->ce_lock, flags); 1871 irdma_cq_free_rsrc(iwdev->rf, iwcq); 1872 1873 return 0; 1874 } 1875 1876 /** 1877 * irdma_resize_cq - resize cq 1878 * @ibcq: cq to be resized 1879 * @entries: desired cq size 1880 * @udata: user data 1881 */ 1882 static int irdma_resize_cq(struct ib_cq *ibcq, int entries, 1883 struct ib_udata *udata) 1884 { 1885 #define IRDMA_RESIZE_CQ_MIN_REQ_LEN offsetofend(struct irdma_resize_cq_req, user_cq_buffer) 1886 struct irdma_cq *iwcq = to_iwcq(ibcq); 1887 struct irdma_sc_dev *dev = iwcq->sc_cq.dev; 1888 struct irdma_cqp_request *cqp_request; 1889 struct cqp_cmds_info *cqp_info; 1890 struct irdma_modify_cq_info *m_info; 1891 struct irdma_modify_cq_info info = {}; 1892 struct irdma_dma_mem kmem_buf; 1893 struct irdma_cq_mr *cqmr_buf; 1894 struct irdma_pbl *iwpbl_buf; 1895 struct irdma_device *iwdev; 1896 struct irdma_pci_f *rf; 1897 struct irdma_cq_buf *cq_buf = NULL; 1898 unsigned long flags; 1899 int ret; 1900 1901 iwdev = to_iwdev(ibcq->device); 1902 rf = iwdev->rf; 1903 1904 if (!(rf->sc_dev.hw_attrs.uk_attrs.feature_flags & 1905 IRDMA_FEATURE_CQ_RESIZE)) 1906 return -EOPNOTSUPP; 1907 1908 if (udata && udata->inlen < IRDMA_RESIZE_CQ_MIN_REQ_LEN) 1909 return -EINVAL; 1910 1911 if (entries > rf->max_cqe) 1912 return -EINVAL; 1913 1914 if (!iwcq->user_mode) { 1915 entries++; 1916 if (rf->sc_dev.hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) 1917 entries *= 2; 1918 } 1919 1920 info.cq_size = max(entries, 4); 1921 1922 if (info.cq_size == iwcq->sc_cq.cq_uk.cq_size - 1) 1923 return 0; 1924 1925 if (udata) { 1926 struct irdma_resize_cq_req req = {}; 1927 struct irdma_ucontext *ucontext = 1928 rdma_udata_to_drv_context(udata, struct irdma_ucontext, 1929 ibucontext); 1930 1931 /* CQ resize not supported with legacy GEN_1 libi40iw */ 1932 if (ucontext->legacy_mode) 1933 return -EOPNOTSUPP; 1934 1935 if (ib_copy_from_udata(&req, udata, 1936 min(sizeof(req), udata->inlen))) 1937 return -EINVAL; 1938 1939 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); 1940 iwpbl_buf = irdma_get_pbl((unsigned long)req.user_cq_buffer, 1941 &ucontext->cq_reg_mem_list); 1942 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); 1943 1944 if (!iwpbl_buf) 1945 return -ENOMEM; 1946 1947 cqmr_buf = &iwpbl_buf->cq_mr; 1948 if (iwpbl_buf->pbl_allocated) { 1949 info.virtual_map = true; 1950 info.pbl_chunk_size = 1; 1951 info.first_pm_pbl_idx = cqmr_buf->cq_pbl.idx; 1952 } else { 1953 info.cq_pa = cqmr_buf->cq_pbl.addr; 1954 } 1955 } else { 1956 /* Kmode CQ resize */ 1957 int rsize; 1958 1959 rsize = info.cq_size * sizeof(struct irdma_cqe); 1960 kmem_buf.size = ALIGN(round_up(rsize, 256), 256); 1961 kmem_buf.va = dma_alloc_coherent(dev->hw->device, 1962 kmem_buf.size, &kmem_buf.pa, 1963 GFP_KERNEL); 1964 if (!kmem_buf.va) 1965 return -ENOMEM; 1966 1967 info.cq_base = kmem_buf.va; 1968 info.cq_pa = kmem_buf.pa; 1969 cq_buf = kzalloc(sizeof(*cq_buf), GFP_KERNEL); 1970 if (!cq_buf) { 1971 ret = -ENOMEM; 1972 goto error; 1973 } 1974 } 1975 1976 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); 1977 if (!cqp_request) { 1978 ret = -ENOMEM; 1979 goto error; 1980 } 1981 1982 info.shadow_read_threshold = iwcq->sc_cq.shadow_read_threshold; 1983 info.cq_resize = true; 1984 1985 cqp_info = &cqp_request->info; 1986 m_info = &cqp_info->in.u.cq_modify.info; 1987 memcpy(m_info, &info, sizeof(*m_info)); 1988 1989 cqp_info->cqp_cmd = IRDMA_OP_CQ_MODIFY; 1990 cqp_info->in.u.cq_modify.cq = &iwcq->sc_cq; 1991 cqp_info->in.u.cq_modify.scratch = (uintptr_t)cqp_request; 1992 cqp_info->post_sq = 1; 1993 ret = irdma_handle_cqp_op(rf, cqp_request); 1994 irdma_put_cqp_request(&rf->cqp, cqp_request); 1995 if (ret) 1996 goto error; 1997 1998 spin_lock_irqsave(&iwcq->lock, flags); 1999 if (cq_buf) { 2000 cq_buf->kmem_buf = iwcq->kmem; 2001 cq_buf->hw = dev->hw; 2002 memcpy(&cq_buf->cq_uk, &iwcq->sc_cq.cq_uk, sizeof(cq_buf->cq_uk)); 2003 INIT_WORK(&cq_buf->work, irdma_free_cqbuf); 2004 list_add_tail(&cq_buf->list, &iwcq->resize_list); 2005 iwcq->kmem = kmem_buf; 2006 } 2007 2008 irdma_sc_cq_resize(&iwcq->sc_cq, &info); 2009 ibcq->cqe = info.cq_size - 1; 2010 spin_unlock_irqrestore(&iwcq->lock, flags); 2011 2012 return 0; 2013 error: 2014 if (!udata) { 2015 dma_free_coherent(dev->hw->device, kmem_buf.size, kmem_buf.va, 2016 kmem_buf.pa); 2017 kmem_buf.va = NULL; 2018 } 2019 kfree(cq_buf); 2020 2021 return ret; 2022 } 2023 2024 static inline int cq_validate_flags(u32 flags, u8 hw_rev) 2025 { 2026 /* GEN1 does not support CQ create flags */ 2027 if (hw_rev == IRDMA_GEN_1) 2028 return flags ? -EOPNOTSUPP : 0; 2029 2030 return flags & ~IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION ? -EOPNOTSUPP : 0; 2031 } 2032 2033 /** 2034 * irdma_create_cq - create cq 2035 * @ibcq: CQ allocated 2036 * @attr: attributes for cq 2037 * @udata: user data 2038 */ 2039 static int irdma_create_cq(struct ib_cq *ibcq, 2040 const struct ib_cq_init_attr *attr, 2041 struct ib_udata *udata) 2042 { 2043 #define IRDMA_CREATE_CQ_MIN_REQ_LEN offsetofend(struct irdma_create_cq_req, user_cq_buf) 2044 #define IRDMA_CREATE_CQ_MIN_RESP_LEN offsetofend(struct irdma_create_cq_resp, cq_size) 2045 struct ib_device *ibdev = ibcq->device; 2046 struct irdma_device *iwdev = to_iwdev(ibdev); 2047 struct irdma_pci_f *rf = iwdev->rf; 2048 struct irdma_cq *iwcq = to_iwcq(ibcq); 2049 u32 cq_num = 0; 2050 struct irdma_sc_cq *cq; 2051 struct irdma_sc_dev *dev = &rf->sc_dev; 2052 struct irdma_cq_init_info info = {}; 2053 struct irdma_cqp_request *cqp_request; 2054 struct cqp_cmds_info *cqp_info; 2055 struct irdma_cq_uk_init_info *ukinfo = &info.cq_uk_init_info; 2056 unsigned long flags; 2057 int err_code; 2058 int entries = attr->cqe; 2059 2060 err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev); 2061 if (err_code) 2062 return err_code; 2063 2064 if (udata && (udata->inlen < IRDMA_CREATE_CQ_MIN_REQ_LEN || 2065 udata->outlen < IRDMA_CREATE_CQ_MIN_RESP_LEN)) 2066 return -EINVAL; 2067 2068 err_code = irdma_alloc_rsrc(rf, rf->allocated_cqs, rf->max_cq, &cq_num, 2069 &rf->next_cq); 2070 if (err_code) 2071 return err_code; 2072 2073 cq = &iwcq->sc_cq; 2074 cq->back_cq = iwcq; 2075 refcount_set(&iwcq->refcnt, 1); 2076 spin_lock_init(&iwcq->lock); 2077 INIT_LIST_HEAD(&iwcq->resize_list); 2078 INIT_LIST_HEAD(&iwcq->cmpl_generated); 2079 info.dev = dev; 2080 ukinfo->cq_size = max(entries, 4); 2081 ukinfo->cq_id = cq_num; 2082 iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size; 2083 if (attr->comp_vector < rf->ceqs_count) 2084 info.ceq_id = attr->comp_vector; 2085 info.ceq_id_valid = true; 2086 info.ceqe_mask = 1; 2087 info.type = IRDMA_CQ_TYPE_IWARP; 2088 info.vsi = &iwdev->vsi; 2089 2090 if (udata) { 2091 struct irdma_ucontext *ucontext; 2092 struct irdma_create_cq_req req = {}; 2093 struct irdma_cq_mr *cqmr; 2094 struct irdma_pbl *iwpbl; 2095 struct irdma_pbl *iwpbl_shadow; 2096 struct irdma_cq_mr *cqmr_shadow; 2097 2098 iwcq->user_mode = true; 2099 ucontext = 2100 rdma_udata_to_drv_context(udata, struct irdma_ucontext, 2101 ibucontext); 2102 if (ib_copy_from_udata(&req, udata, 2103 min(sizeof(req), udata->inlen))) { 2104 err_code = -EFAULT; 2105 goto cq_free_rsrc; 2106 } 2107 2108 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); 2109 iwpbl = irdma_get_pbl((unsigned long)req.user_cq_buf, 2110 &ucontext->cq_reg_mem_list); 2111 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); 2112 if (!iwpbl) { 2113 err_code = -EPROTO; 2114 goto cq_free_rsrc; 2115 } 2116 2117 iwcq->iwpbl = iwpbl; 2118 iwcq->cq_mem_size = 0; 2119 cqmr = &iwpbl->cq_mr; 2120 2121 if (rf->sc_dev.hw_attrs.uk_attrs.feature_flags & 2122 IRDMA_FEATURE_CQ_RESIZE && !ucontext->legacy_mode) { 2123 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); 2124 iwpbl_shadow = irdma_get_pbl( 2125 (unsigned long)req.user_shadow_area, 2126 &ucontext->cq_reg_mem_list); 2127 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); 2128 2129 if (!iwpbl_shadow) { 2130 err_code = -EPROTO; 2131 goto cq_free_rsrc; 2132 } 2133 iwcq->iwpbl_shadow = iwpbl_shadow; 2134 cqmr_shadow = &iwpbl_shadow->cq_mr; 2135 info.shadow_area_pa = cqmr_shadow->cq_pbl.addr; 2136 cqmr->split = true; 2137 } else { 2138 info.shadow_area_pa = cqmr->shadow; 2139 } 2140 if (iwpbl->pbl_allocated) { 2141 info.virtual_map = true; 2142 info.pbl_chunk_size = 1; 2143 info.first_pm_pbl_idx = cqmr->cq_pbl.idx; 2144 } else { 2145 info.cq_base_pa = cqmr->cq_pbl.addr; 2146 } 2147 } else { 2148 /* Kmode allocations */ 2149 int rsize; 2150 2151 if (entries < 1 || entries > rf->max_cqe) { 2152 err_code = -EINVAL; 2153 goto cq_free_rsrc; 2154 } 2155 2156 entries++; 2157 if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) 2158 entries *= 2; 2159 ukinfo->cq_size = entries; 2160 2161 rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_cqe); 2162 iwcq->kmem.size = ALIGN(round_up(rsize, 256), 256); 2163 iwcq->kmem.va = dma_alloc_coherent(dev->hw->device, 2164 iwcq->kmem.size, 2165 &iwcq->kmem.pa, GFP_KERNEL); 2166 if (!iwcq->kmem.va) { 2167 err_code = -ENOMEM; 2168 goto cq_free_rsrc; 2169 } 2170 2171 iwcq->kmem_shadow.size = ALIGN(IRDMA_SHADOW_AREA_SIZE << 3, 2172 64); 2173 iwcq->kmem_shadow.va = dma_alloc_coherent(dev->hw->device, 2174 iwcq->kmem_shadow.size, 2175 &iwcq->kmem_shadow.pa, 2176 GFP_KERNEL); 2177 if (!iwcq->kmem_shadow.va) { 2178 err_code = -ENOMEM; 2179 goto cq_free_rsrc; 2180 } 2181 info.shadow_area_pa = iwcq->kmem_shadow.pa; 2182 ukinfo->shadow_area = iwcq->kmem_shadow.va; 2183 ukinfo->cq_base = iwcq->kmem.va; 2184 info.cq_base_pa = iwcq->kmem.pa; 2185 } 2186 2187 if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) 2188 info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2, 2189 (u32)IRDMA_MAX_CQ_READ_THRESH); 2190 2191 if (irdma_sc_cq_init(cq, &info)) { 2192 ibdev_dbg(&iwdev->ibdev, "VERBS: init cq fail\n"); 2193 err_code = -EPROTO; 2194 goto cq_free_rsrc; 2195 } 2196 2197 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); 2198 if (!cqp_request) { 2199 err_code = -ENOMEM; 2200 goto cq_free_rsrc; 2201 } 2202 2203 cqp_info = &cqp_request->info; 2204 cqp_info->cqp_cmd = IRDMA_OP_CQ_CREATE; 2205 cqp_info->post_sq = 1; 2206 cqp_info->in.u.cq_create.cq = cq; 2207 cqp_info->in.u.cq_create.check_overflow = true; 2208 cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request; 2209 err_code = irdma_handle_cqp_op(rf, cqp_request); 2210 irdma_put_cqp_request(&rf->cqp, cqp_request); 2211 if (err_code) 2212 goto cq_free_rsrc; 2213 2214 if (udata) { 2215 struct irdma_create_cq_resp resp = {}; 2216 2217 resp.cq_id = info.cq_uk_init_info.cq_id; 2218 resp.cq_size = info.cq_uk_init_info.cq_size; 2219 if (ib_copy_to_udata(udata, &resp, 2220 min(sizeof(resp), udata->outlen))) { 2221 ibdev_dbg(&iwdev->ibdev, 2222 "VERBS: copy to user data\n"); 2223 err_code = -EPROTO; 2224 goto cq_destroy; 2225 } 2226 } 2227 rf->cq_table[cq_num] = iwcq; 2228 init_completion(&iwcq->free_cq); 2229 2230 return 0; 2231 cq_destroy: 2232 irdma_cq_wq_destroy(rf, cq); 2233 cq_free_rsrc: 2234 irdma_cq_free_rsrc(rf, iwcq); 2235 2236 return err_code; 2237 } 2238 2239 /** 2240 * irdma_get_mr_access - get hw MR access permissions from IB access flags 2241 * @access: IB access flags 2242 */ 2243 static inline u16 irdma_get_mr_access(int access) 2244 { 2245 u16 hw_access = 0; 2246 2247 hw_access |= (access & IB_ACCESS_LOCAL_WRITE) ? 2248 IRDMA_ACCESS_FLAGS_LOCALWRITE : 0; 2249 hw_access |= (access & IB_ACCESS_REMOTE_WRITE) ? 2250 IRDMA_ACCESS_FLAGS_REMOTEWRITE : 0; 2251 hw_access |= (access & IB_ACCESS_REMOTE_READ) ? 2252 IRDMA_ACCESS_FLAGS_REMOTEREAD : 0; 2253 hw_access |= (access & IB_ACCESS_MW_BIND) ? 2254 IRDMA_ACCESS_FLAGS_BIND_WINDOW : 0; 2255 hw_access |= (access & IB_ZERO_BASED) ? 2256 IRDMA_ACCESS_FLAGS_ZERO_BASED : 0; 2257 hw_access |= IRDMA_ACCESS_FLAGS_LOCALREAD; 2258 2259 return hw_access; 2260 } 2261 2262 /** 2263 * irdma_free_stag - free stag resource 2264 * @iwdev: irdma device 2265 * @stag: stag to free 2266 */ 2267 static void irdma_free_stag(struct irdma_device *iwdev, u32 stag) 2268 { 2269 u32 stag_idx; 2270 2271 stag_idx = (stag & iwdev->rf->mr_stagmask) >> IRDMA_CQPSQ_STAG_IDX_S; 2272 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_mrs, stag_idx); 2273 } 2274 2275 /** 2276 * irdma_create_stag - create random stag 2277 * @iwdev: irdma device 2278 */ 2279 static u32 irdma_create_stag(struct irdma_device *iwdev) 2280 { 2281 u32 stag = 0; 2282 u32 stag_index = 0; 2283 u32 next_stag_index; 2284 u32 driver_key; 2285 u32 random; 2286 u8 consumer_key; 2287 int ret; 2288 2289 get_random_bytes(&random, sizeof(random)); 2290 consumer_key = (u8)random; 2291 2292 driver_key = random & ~iwdev->rf->mr_stagmask; 2293 next_stag_index = (random & iwdev->rf->mr_stagmask) >> 8; 2294 next_stag_index %= iwdev->rf->max_mr; 2295 2296 ret = irdma_alloc_rsrc(iwdev->rf, iwdev->rf->allocated_mrs, 2297 iwdev->rf->max_mr, &stag_index, 2298 &next_stag_index); 2299 if (ret) 2300 return stag; 2301 stag = stag_index << IRDMA_CQPSQ_STAG_IDX_S; 2302 stag |= driver_key; 2303 stag += (u32)consumer_key; 2304 2305 return stag; 2306 } 2307 2308 /** 2309 * irdma_next_pbl_addr - Get next pbl address 2310 * @pbl: pointer to a pble 2311 * @pinfo: info pointer 2312 * @idx: index 2313 */ 2314 static inline u64 *irdma_next_pbl_addr(u64 *pbl, struct irdma_pble_info **pinfo, 2315 u32 *idx) 2316 { 2317 *idx += 1; 2318 if (!(*pinfo) || *idx != (*pinfo)->cnt) 2319 return ++pbl; 2320 *idx = 0; 2321 (*pinfo)++; 2322 2323 return (*pinfo)->addr; 2324 } 2325 2326 /** 2327 * irdma_copy_user_pgaddrs - copy user page address to pble's os locally 2328 * @iwmr: iwmr for IB's user page addresses 2329 * @pbl: ple pointer to save 1 level or 0 level pble 2330 * @level: indicated level 0, 1 or 2 2331 */ 2332 static void irdma_copy_user_pgaddrs(struct irdma_mr *iwmr, u64 *pbl, 2333 enum irdma_pble_level level) 2334 { 2335 struct ib_umem *region = iwmr->region; 2336 struct irdma_pbl *iwpbl = &iwmr->iwpbl; 2337 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc; 2338 struct irdma_pble_info *pinfo; 2339 struct ib_block_iter biter; 2340 u32 idx = 0; 2341 u32 pbl_cnt = 0; 2342 2343 pinfo = (level == PBLE_LEVEL_1) ? NULL : palloc->level2.leaf; 2344 2345 if (iwmr->type == IRDMA_MEMREG_TYPE_QP) 2346 iwpbl->qp_mr.sq_page = sg_page(region->sgt_append.sgt.sgl); 2347 2348 rdma_umem_for_each_dma_block(region, &biter, iwmr->page_size) { 2349 *pbl = rdma_block_iter_dma_address(&biter); 2350 if (++pbl_cnt == palloc->total_cnt) 2351 break; 2352 pbl = irdma_next_pbl_addr(pbl, &pinfo, &idx); 2353 } 2354 } 2355 2356 /** 2357 * irdma_check_mem_contiguous - check if pbls stored in arr are contiguous 2358 * @arr: lvl1 pbl array 2359 * @npages: page count 2360 * @pg_size: page size 2361 * 2362 */ 2363 static bool irdma_check_mem_contiguous(u64 *arr, u32 npages, u32 pg_size) 2364 { 2365 u32 pg_idx; 2366 2367 for (pg_idx = 0; pg_idx < npages; pg_idx++) { 2368 if ((*arr + (pg_size * pg_idx)) != arr[pg_idx]) 2369 return false; 2370 } 2371 2372 return true; 2373 } 2374 2375 /** 2376 * irdma_check_mr_contiguous - check if MR is physically contiguous 2377 * @palloc: pbl allocation struct 2378 * @pg_size: page size 2379 */ 2380 static bool irdma_check_mr_contiguous(struct irdma_pble_alloc *palloc, 2381 u32 pg_size) 2382 { 2383 struct irdma_pble_level2 *lvl2 = &palloc->level2; 2384 struct irdma_pble_info *leaf = lvl2->leaf; 2385 u64 *arr = NULL; 2386 u64 *start_addr = NULL; 2387 int i; 2388 bool ret; 2389 2390 if (palloc->level == PBLE_LEVEL_1) { 2391 arr = palloc->level1.addr; 2392 ret = irdma_check_mem_contiguous(arr, palloc->total_cnt, 2393 pg_size); 2394 return ret; 2395 } 2396 2397 start_addr = leaf->addr; 2398 2399 for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) { 2400 arr = leaf->addr; 2401 if ((*start_addr + (i * pg_size * PBLE_PER_PAGE)) != *arr) 2402 return false; 2403 ret = irdma_check_mem_contiguous(arr, leaf->cnt, pg_size); 2404 if (!ret) 2405 return false; 2406 } 2407 2408 return true; 2409 } 2410 2411 /** 2412 * irdma_setup_pbles - copy user pg address to pble's 2413 * @rf: RDMA PCI function 2414 * @iwmr: mr pointer for this memory registration 2415 * @lvl: requested pble levels 2416 */ 2417 static int irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr, 2418 u8 lvl) 2419 { 2420 struct irdma_pbl *iwpbl = &iwmr->iwpbl; 2421 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc; 2422 struct irdma_pble_info *pinfo; 2423 u64 *pbl; 2424 int status; 2425 enum irdma_pble_level level = PBLE_LEVEL_1; 2426 2427 if (lvl) { 2428 status = irdma_get_pble(rf->pble_rsrc, palloc, iwmr->page_cnt, 2429 lvl); 2430 if (status) 2431 return status; 2432 2433 iwpbl->pbl_allocated = true; 2434 level = palloc->level; 2435 pinfo = (level == PBLE_LEVEL_1) ? &palloc->level1 : 2436 palloc->level2.leaf; 2437 pbl = pinfo->addr; 2438 } else { 2439 pbl = iwmr->pgaddrmem; 2440 } 2441 2442 irdma_copy_user_pgaddrs(iwmr, pbl, level); 2443 2444 if (lvl) 2445 iwmr->pgaddrmem[0] = *pbl; 2446 2447 return 0; 2448 } 2449 2450 /** 2451 * irdma_handle_q_mem - handle memory for qp and cq 2452 * @iwdev: irdma device 2453 * @req: information for q memory management 2454 * @iwpbl: pble struct 2455 * @lvl: pble level mask 2456 */ 2457 static int irdma_handle_q_mem(struct irdma_device *iwdev, 2458 struct irdma_mem_reg_req *req, 2459 struct irdma_pbl *iwpbl, u8 lvl) 2460 { 2461 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc; 2462 struct irdma_mr *iwmr = iwpbl->iwmr; 2463 struct irdma_qp_mr *qpmr = &iwpbl->qp_mr; 2464 struct irdma_cq_mr *cqmr = &iwpbl->cq_mr; 2465 struct irdma_hmc_pble *hmc_p; 2466 u64 *arr = iwmr->pgaddrmem; 2467 u32 pg_size, total; 2468 int err = 0; 2469 bool ret = true; 2470 2471 pg_size = iwmr->page_size; 2472 err = irdma_setup_pbles(iwdev->rf, iwmr, lvl); 2473 if (err) 2474 return err; 2475 2476 if (lvl) 2477 arr = palloc->level1.addr; 2478 2479 switch (iwmr->type) { 2480 case IRDMA_MEMREG_TYPE_QP: 2481 total = req->sq_pages + req->rq_pages; 2482 hmc_p = &qpmr->sq_pbl; 2483 qpmr->shadow = (dma_addr_t)arr[total]; 2484 2485 if (lvl) { 2486 ret = irdma_check_mem_contiguous(arr, req->sq_pages, 2487 pg_size); 2488 if (ret) 2489 ret = irdma_check_mem_contiguous(&arr[req->sq_pages], 2490 req->rq_pages, 2491 pg_size); 2492 } 2493 2494 if (!ret) { 2495 hmc_p->idx = palloc->level1.idx; 2496 hmc_p = &qpmr->rq_pbl; 2497 hmc_p->idx = palloc->level1.idx + req->sq_pages; 2498 } else { 2499 hmc_p->addr = arr[0]; 2500 hmc_p = &qpmr->rq_pbl; 2501 hmc_p->addr = arr[req->sq_pages]; 2502 } 2503 break; 2504 case IRDMA_MEMREG_TYPE_CQ: 2505 hmc_p = &cqmr->cq_pbl; 2506 2507 if (!cqmr->split) 2508 cqmr->shadow = (dma_addr_t)arr[req->cq_pages]; 2509 2510 if (lvl) 2511 ret = irdma_check_mem_contiguous(arr, req->cq_pages, 2512 pg_size); 2513 2514 if (!ret) 2515 hmc_p->idx = palloc->level1.idx; 2516 else 2517 hmc_p->addr = arr[0]; 2518 break; 2519 default: 2520 ibdev_dbg(&iwdev->ibdev, "VERBS: MR type error\n"); 2521 err = -EINVAL; 2522 } 2523 2524 if (lvl && ret) { 2525 irdma_free_pble(iwdev->rf->pble_rsrc, palloc); 2526 iwpbl->pbl_allocated = false; 2527 } 2528 2529 return err; 2530 } 2531 2532 /** 2533 * irdma_hw_alloc_mw - create the hw memory window 2534 * @iwdev: irdma device 2535 * @iwmr: pointer to memory window info 2536 */ 2537 static int irdma_hw_alloc_mw(struct irdma_device *iwdev, struct irdma_mr *iwmr) 2538 { 2539 struct irdma_mw_alloc_info *info; 2540 struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd); 2541 struct irdma_cqp_request *cqp_request; 2542 struct cqp_cmds_info *cqp_info; 2543 int status; 2544 2545 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); 2546 if (!cqp_request) 2547 return -ENOMEM; 2548 2549 cqp_info = &cqp_request->info; 2550 info = &cqp_info->in.u.mw_alloc.info; 2551 memset(info, 0, sizeof(*info)); 2552 if (iwmr->ibmw.type == IB_MW_TYPE_1) 2553 info->mw_wide = true; 2554 2555 info->page_size = PAGE_SIZE; 2556 info->mw_stag_index = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S; 2557 info->pd_id = iwpd->sc_pd.pd_id; 2558 info->remote_access = true; 2559 cqp_info->cqp_cmd = IRDMA_OP_MW_ALLOC; 2560 cqp_info->post_sq = 1; 2561 cqp_info->in.u.mw_alloc.dev = &iwdev->rf->sc_dev; 2562 cqp_info->in.u.mw_alloc.scratch = (uintptr_t)cqp_request; 2563 status = irdma_handle_cqp_op(iwdev->rf, cqp_request); 2564 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); 2565 2566 return status; 2567 } 2568 2569 /** 2570 * irdma_alloc_mw - Allocate memory window 2571 * @ibmw: Memory Window 2572 * @udata: user data pointer 2573 */ 2574 static int irdma_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata) 2575 { 2576 struct irdma_device *iwdev = to_iwdev(ibmw->device); 2577 struct irdma_mr *iwmr = to_iwmw(ibmw); 2578 int err_code; 2579 u32 stag; 2580 2581 stag = irdma_create_stag(iwdev); 2582 if (!stag) 2583 return -ENOMEM; 2584 2585 iwmr->stag = stag; 2586 ibmw->rkey = stag; 2587 2588 err_code = irdma_hw_alloc_mw(iwdev, iwmr); 2589 if (err_code) { 2590 irdma_free_stag(iwdev, stag); 2591 return err_code; 2592 } 2593 2594 return 0; 2595 } 2596 2597 /** 2598 * irdma_dealloc_mw - Dealloc memory window 2599 * @ibmw: memory window structure. 2600 */ 2601 static int irdma_dealloc_mw(struct ib_mw *ibmw) 2602 { 2603 struct ib_pd *ibpd = ibmw->pd; 2604 struct irdma_pd *iwpd = to_iwpd(ibpd); 2605 struct irdma_mr *iwmr = to_iwmr((struct ib_mr *)ibmw); 2606 struct irdma_device *iwdev = to_iwdev(ibmw->device); 2607 struct irdma_cqp_request *cqp_request; 2608 struct cqp_cmds_info *cqp_info; 2609 struct irdma_dealloc_stag_info *info; 2610 2611 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); 2612 if (!cqp_request) 2613 return -ENOMEM; 2614 2615 cqp_info = &cqp_request->info; 2616 info = &cqp_info->in.u.dealloc_stag.info; 2617 memset(info, 0, sizeof(*info)); 2618 info->pd_id = iwpd->sc_pd.pd_id; 2619 info->stag_idx = ibmw->rkey >> IRDMA_CQPSQ_STAG_IDX_S; 2620 info->mr = false; 2621 cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG; 2622 cqp_info->post_sq = 1; 2623 cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev; 2624 cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request; 2625 irdma_handle_cqp_op(iwdev->rf, cqp_request); 2626 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); 2627 irdma_free_stag(iwdev, iwmr->stag); 2628 2629 return 0; 2630 } 2631 2632 /** 2633 * irdma_hw_alloc_stag - cqp command to allocate stag 2634 * @iwdev: irdma device 2635 * @iwmr: irdma mr pointer 2636 */ 2637 static int irdma_hw_alloc_stag(struct irdma_device *iwdev, 2638 struct irdma_mr *iwmr) 2639 { 2640 struct irdma_allocate_stag_info *info; 2641 struct ib_pd *pd = iwmr->ibmr.pd; 2642 struct irdma_pd *iwpd = to_iwpd(pd); 2643 int status; 2644 struct irdma_cqp_request *cqp_request; 2645 struct cqp_cmds_info *cqp_info; 2646 2647 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); 2648 if (!cqp_request) 2649 return -ENOMEM; 2650 2651 cqp_info = &cqp_request->info; 2652 info = &cqp_info->in.u.alloc_stag.info; 2653 memset(info, 0, sizeof(*info)); 2654 info->page_size = PAGE_SIZE; 2655 info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S; 2656 info->pd_id = iwpd->sc_pd.pd_id; 2657 info->total_len = iwmr->len; 2658 info->all_memory = pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY; 2659 info->remote_access = true; 2660 cqp_info->cqp_cmd = IRDMA_OP_ALLOC_STAG; 2661 cqp_info->post_sq = 1; 2662 cqp_info->in.u.alloc_stag.dev = &iwdev->rf->sc_dev; 2663 cqp_info->in.u.alloc_stag.scratch = (uintptr_t)cqp_request; 2664 status = irdma_handle_cqp_op(iwdev->rf, cqp_request); 2665 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); 2666 if (status) 2667 return status; 2668 2669 iwmr->is_hwreg = 1; 2670 return 0; 2671 } 2672 2673 /** 2674 * irdma_alloc_mr - register stag for fast memory registration 2675 * @pd: ibpd pointer 2676 * @mr_type: memory for stag registrion 2677 * @max_num_sg: man number of pages 2678 */ 2679 static struct ib_mr *irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, 2680 u32 max_num_sg) 2681 { 2682 struct irdma_device *iwdev = to_iwdev(pd->device); 2683 struct irdma_pble_alloc *palloc; 2684 struct irdma_pbl *iwpbl; 2685 struct irdma_mr *iwmr; 2686 u32 stag; 2687 int err_code; 2688 2689 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL); 2690 if (!iwmr) 2691 return ERR_PTR(-ENOMEM); 2692 2693 stag = irdma_create_stag(iwdev); 2694 if (!stag) { 2695 err_code = -ENOMEM; 2696 goto err; 2697 } 2698 2699 iwmr->stag = stag; 2700 iwmr->ibmr.rkey = stag; 2701 iwmr->ibmr.lkey = stag; 2702 iwmr->ibmr.pd = pd; 2703 iwmr->ibmr.device = pd->device; 2704 iwpbl = &iwmr->iwpbl; 2705 iwpbl->iwmr = iwmr; 2706 iwmr->type = IRDMA_MEMREG_TYPE_MEM; 2707 palloc = &iwpbl->pble_alloc; 2708 iwmr->page_cnt = max_num_sg; 2709 /* Use system PAGE_SIZE as the sg page sizes are unknown at this point */ 2710 iwmr->len = max_num_sg * PAGE_SIZE; 2711 err_code = irdma_get_pble(iwdev->rf->pble_rsrc, palloc, iwmr->page_cnt, 2712 false); 2713 if (err_code) 2714 goto err_get_pble; 2715 2716 err_code = irdma_hw_alloc_stag(iwdev, iwmr); 2717 if (err_code) 2718 goto err_alloc_stag; 2719 2720 iwpbl->pbl_allocated = true; 2721 2722 return &iwmr->ibmr; 2723 err_alloc_stag: 2724 irdma_free_pble(iwdev->rf->pble_rsrc, palloc); 2725 err_get_pble: 2726 irdma_free_stag(iwdev, stag); 2727 err: 2728 kfree(iwmr); 2729 2730 return ERR_PTR(err_code); 2731 } 2732 2733 /** 2734 * irdma_set_page - populate pbl list for fmr 2735 * @ibmr: ib mem to access iwarp mr pointer 2736 * @addr: page dma address fro pbl list 2737 */ 2738 static int irdma_set_page(struct ib_mr *ibmr, u64 addr) 2739 { 2740 struct irdma_mr *iwmr = to_iwmr(ibmr); 2741 struct irdma_pbl *iwpbl = &iwmr->iwpbl; 2742 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc; 2743 u64 *pbl; 2744 2745 if (unlikely(iwmr->npages == iwmr->page_cnt)) 2746 return -ENOMEM; 2747 2748 if (palloc->level == PBLE_LEVEL_2) { 2749 struct irdma_pble_info *palloc_info = 2750 palloc->level2.leaf + (iwmr->npages >> PBLE_512_SHIFT); 2751 2752 palloc_info->addr[iwmr->npages & (PBLE_PER_PAGE - 1)] = addr; 2753 } else { 2754 pbl = palloc->level1.addr; 2755 pbl[iwmr->npages] = addr; 2756 } 2757 iwmr->npages++; 2758 2759 return 0; 2760 } 2761 2762 /** 2763 * irdma_map_mr_sg - map of sg list for fmr 2764 * @ibmr: ib mem to access iwarp mr pointer 2765 * @sg: scatter gather list 2766 * @sg_nents: number of sg pages 2767 * @sg_offset: scatter gather list for fmr 2768 */ 2769 static int irdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, 2770 int sg_nents, unsigned int *sg_offset) 2771 { 2772 struct irdma_mr *iwmr = to_iwmr(ibmr); 2773 2774 iwmr->npages = 0; 2775 2776 return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, irdma_set_page); 2777 } 2778 2779 /** 2780 * irdma_hwreg_mr - send cqp command for memory registration 2781 * @iwdev: irdma device 2782 * @iwmr: irdma mr pointer 2783 * @access: access for MR 2784 */ 2785 static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr, 2786 u16 access) 2787 { 2788 struct irdma_pbl *iwpbl = &iwmr->iwpbl; 2789 struct irdma_reg_ns_stag_info *stag_info; 2790 struct ib_pd *pd = iwmr->ibmr.pd; 2791 struct irdma_pd *iwpd = to_iwpd(pd); 2792 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc; 2793 struct irdma_cqp_request *cqp_request; 2794 struct cqp_cmds_info *cqp_info; 2795 int ret; 2796 2797 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); 2798 if (!cqp_request) 2799 return -ENOMEM; 2800 2801 cqp_info = &cqp_request->info; 2802 stag_info = &cqp_info->in.u.mr_reg_non_shared.info; 2803 memset(stag_info, 0, sizeof(*stag_info)); 2804 stag_info->va = iwpbl->user_base; 2805 stag_info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S; 2806 stag_info->stag_key = (u8)iwmr->stag; 2807 stag_info->total_len = iwmr->len; 2808 stag_info->access_rights = irdma_get_mr_access(access); 2809 stag_info->pd_id = iwpd->sc_pd.pd_id; 2810 stag_info->all_memory = pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY; 2811 if (stag_info->access_rights & IRDMA_ACCESS_FLAGS_ZERO_BASED) 2812 stag_info->addr_type = IRDMA_ADDR_TYPE_ZERO_BASED; 2813 else 2814 stag_info->addr_type = IRDMA_ADDR_TYPE_VA_BASED; 2815 stag_info->page_size = iwmr->page_size; 2816 2817 if (iwpbl->pbl_allocated) { 2818 if (palloc->level == PBLE_LEVEL_1) { 2819 stag_info->first_pm_pbl_index = palloc->level1.idx; 2820 stag_info->chunk_size = 1; 2821 } else { 2822 stag_info->first_pm_pbl_index = palloc->level2.root.idx; 2823 stag_info->chunk_size = 3; 2824 } 2825 } else { 2826 stag_info->reg_addr_pa = iwmr->pgaddrmem[0]; 2827 } 2828 2829 cqp_info->cqp_cmd = IRDMA_OP_MR_REG_NON_SHARED; 2830 cqp_info->post_sq = 1; 2831 cqp_info->in.u.mr_reg_non_shared.dev = &iwdev->rf->sc_dev; 2832 cqp_info->in.u.mr_reg_non_shared.scratch = (uintptr_t)cqp_request; 2833 ret = irdma_handle_cqp_op(iwdev->rf, cqp_request); 2834 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); 2835 2836 if (!ret) 2837 iwmr->is_hwreg = 1; 2838 2839 return ret; 2840 } 2841 2842 static int irdma_reg_user_mr_type_mem(struct irdma_mr *iwmr, int access, 2843 bool create_stag) 2844 { 2845 struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device); 2846 struct irdma_pbl *iwpbl = &iwmr->iwpbl; 2847 u32 stag = 0; 2848 u8 lvl; 2849 int err; 2850 2851 lvl = iwmr->page_cnt != 1 ? PBLE_LEVEL_1 | PBLE_LEVEL_2 : PBLE_LEVEL_0; 2852 2853 err = irdma_setup_pbles(iwdev->rf, iwmr, lvl); 2854 if (err) 2855 return err; 2856 2857 if (lvl) { 2858 err = irdma_check_mr_contiguous(&iwpbl->pble_alloc, 2859 iwmr->page_size); 2860 if (err) { 2861 irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc); 2862 iwpbl->pbl_allocated = false; 2863 } 2864 } 2865 2866 if (create_stag) { 2867 stag = irdma_create_stag(iwdev); 2868 if (!stag) { 2869 err = -ENOMEM; 2870 goto free_pble; 2871 } 2872 2873 iwmr->stag = stag; 2874 iwmr->ibmr.rkey = stag; 2875 iwmr->ibmr.lkey = stag; 2876 } 2877 2878 err = irdma_hwreg_mr(iwdev, iwmr, access); 2879 if (err) 2880 goto err_hwreg; 2881 2882 return 0; 2883 2884 err_hwreg: 2885 if (stag) 2886 irdma_free_stag(iwdev, stag); 2887 2888 free_pble: 2889 if (iwpbl->pble_alloc.level != PBLE_LEVEL_0 && iwpbl->pbl_allocated) 2890 irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc); 2891 2892 return err; 2893 } 2894 2895 static struct irdma_mr *irdma_alloc_iwmr(struct ib_umem *region, 2896 struct ib_pd *pd, u64 virt, 2897 enum irdma_memreg_type reg_type) 2898 { 2899 struct irdma_device *iwdev = to_iwdev(pd->device); 2900 struct irdma_pbl *iwpbl; 2901 struct irdma_mr *iwmr; 2902 unsigned long pgsz_bitmap; 2903 2904 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL); 2905 if (!iwmr) 2906 return ERR_PTR(-ENOMEM); 2907 2908 iwpbl = &iwmr->iwpbl; 2909 iwpbl->iwmr = iwmr; 2910 iwmr->region = region; 2911 iwmr->ibmr.pd = pd; 2912 iwmr->ibmr.device = pd->device; 2913 iwmr->ibmr.iova = virt; 2914 iwmr->type = reg_type; 2915 2916 pgsz_bitmap = (reg_type == IRDMA_MEMREG_TYPE_MEM) ? 2917 iwdev->rf->sc_dev.hw_attrs.page_size_cap : SZ_4K; 2918 2919 iwmr->page_size = ib_umem_find_best_pgsz(region, pgsz_bitmap, virt); 2920 if (unlikely(!iwmr->page_size)) { 2921 kfree(iwmr); 2922 return ERR_PTR(-EOPNOTSUPP); 2923 } 2924 2925 iwmr->len = region->length; 2926 iwpbl->user_base = virt; 2927 iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size); 2928 2929 return iwmr; 2930 } 2931 2932 static void irdma_free_iwmr(struct irdma_mr *iwmr) 2933 { 2934 kfree(iwmr); 2935 } 2936 2937 static int irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req, 2938 struct ib_udata *udata, 2939 struct irdma_mr *iwmr) 2940 { 2941 struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device); 2942 struct irdma_pbl *iwpbl = &iwmr->iwpbl; 2943 struct irdma_ucontext *ucontext = NULL; 2944 unsigned long flags; 2945 u32 total; 2946 int err; 2947 u8 lvl; 2948 2949 /* iWarp: Catch page not starting on OS page boundary */ 2950 if (!rdma_protocol_roce(&iwdev->ibdev, 1) && 2951 ib_umem_offset(iwmr->region)) 2952 return -EINVAL; 2953 2954 total = req.sq_pages + req.rq_pages + 1; 2955 if (total > iwmr->page_cnt) 2956 return -EINVAL; 2957 2958 total = req.sq_pages + req.rq_pages; 2959 lvl = total > 2 ? PBLE_LEVEL_1 : PBLE_LEVEL_0; 2960 err = irdma_handle_q_mem(iwdev, &req, iwpbl, lvl); 2961 if (err) 2962 return err; 2963 2964 ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, 2965 ibucontext); 2966 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags); 2967 list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list); 2968 iwpbl->on_list = true; 2969 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); 2970 2971 return 0; 2972 } 2973 2974 static int irdma_reg_user_mr_type_cq(struct irdma_mem_reg_req req, 2975 struct ib_udata *udata, 2976 struct irdma_mr *iwmr) 2977 { 2978 struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device); 2979 struct irdma_pbl *iwpbl = &iwmr->iwpbl; 2980 struct irdma_ucontext *ucontext = NULL; 2981 u8 shadow_pgcnt = 1; 2982 unsigned long flags; 2983 u32 total; 2984 int err; 2985 u8 lvl; 2986 2987 if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE) 2988 shadow_pgcnt = 0; 2989 total = req.cq_pages + shadow_pgcnt; 2990 if (total > iwmr->page_cnt) 2991 return -EINVAL; 2992 2993 lvl = req.cq_pages > 1 ? PBLE_LEVEL_1 : PBLE_LEVEL_0; 2994 err = irdma_handle_q_mem(iwdev, &req, iwpbl, lvl); 2995 if (err) 2996 return err; 2997 2998 ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, 2999 ibucontext); 3000 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); 3001 list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list); 3002 iwpbl->on_list = true; 3003 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); 3004 3005 return 0; 3006 } 3007 3008 /** 3009 * irdma_reg_user_mr - Register a user memory region 3010 * @pd: ptr of pd 3011 * @start: virtual start address 3012 * @len: length of mr 3013 * @virt: virtual address 3014 * @access: access of mr 3015 * @udata: user data 3016 */ 3017 static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len, 3018 u64 virt, int access, 3019 struct ib_udata *udata) 3020 { 3021 #define IRDMA_MEM_REG_MIN_REQ_LEN offsetofend(struct irdma_mem_reg_req, sq_pages) 3022 struct irdma_device *iwdev = to_iwdev(pd->device); 3023 struct irdma_mem_reg_req req = {}; 3024 struct ib_umem *region = NULL; 3025 struct irdma_mr *iwmr = NULL; 3026 int err; 3027 3028 if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size) 3029 return ERR_PTR(-EINVAL); 3030 3031 if (udata->inlen < IRDMA_MEM_REG_MIN_REQ_LEN) 3032 return ERR_PTR(-EINVAL); 3033 3034 region = ib_umem_get(pd->device, start, len, access); 3035 3036 if (IS_ERR(region)) { 3037 ibdev_dbg(&iwdev->ibdev, 3038 "VERBS: Failed to create ib_umem region\n"); 3039 return (struct ib_mr *)region; 3040 } 3041 3042 if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) { 3043 ib_umem_release(region); 3044 return ERR_PTR(-EFAULT); 3045 } 3046 3047 iwmr = irdma_alloc_iwmr(region, pd, virt, req.reg_type); 3048 if (IS_ERR(iwmr)) { 3049 ib_umem_release(region); 3050 return (struct ib_mr *)iwmr; 3051 } 3052 3053 switch (req.reg_type) { 3054 case IRDMA_MEMREG_TYPE_QP: 3055 err = irdma_reg_user_mr_type_qp(req, udata, iwmr); 3056 if (err) 3057 goto error; 3058 3059 break; 3060 case IRDMA_MEMREG_TYPE_CQ: 3061 err = irdma_reg_user_mr_type_cq(req, udata, iwmr); 3062 if (err) 3063 goto error; 3064 break; 3065 case IRDMA_MEMREG_TYPE_MEM: 3066 err = irdma_reg_user_mr_type_mem(iwmr, access, true); 3067 if (err) 3068 goto error; 3069 3070 break; 3071 default: 3072 err = -EINVAL; 3073 goto error; 3074 } 3075 3076 return &iwmr->ibmr; 3077 error: 3078 ib_umem_release(region); 3079 irdma_free_iwmr(iwmr); 3080 3081 return ERR_PTR(err); 3082 } 3083 3084 static struct ib_mr *irdma_reg_user_mr_dmabuf(struct ib_pd *pd, u64 start, 3085 u64 len, u64 virt, 3086 int fd, int access, 3087 struct ib_udata *udata) 3088 { 3089 struct irdma_device *iwdev = to_iwdev(pd->device); 3090 struct ib_umem_dmabuf *umem_dmabuf; 3091 struct irdma_mr *iwmr; 3092 int err; 3093 3094 if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size) 3095 return ERR_PTR(-EINVAL); 3096 3097 umem_dmabuf = ib_umem_dmabuf_get_pinned(pd->device, start, len, fd, access); 3098 if (IS_ERR(umem_dmabuf)) { 3099 err = PTR_ERR(umem_dmabuf); 3100 ibdev_dbg(&iwdev->ibdev, "Failed to get dmabuf umem[%d]\n", err); 3101 return ERR_PTR(err); 3102 } 3103 3104 iwmr = irdma_alloc_iwmr(&umem_dmabuf->umem, pd, virt, IRDMA_MEMREG_TYPE_MEM); 3105 if (IS_ERR(iwmr)) { 3106 err = PTR_ERR(iwmr); 3107 goto err_release; 3108 } 3109 3110 err = irdma_reg_user_mr_type_mem(iwmr, access, true); 3111 if (err) 3112 goto err_iwmr; 3113 3114 return &iwmr->ibmr; 3115 3116 err_iwmr: 3117 irdma_free_iwmr(iwmr); 3118 3119 err_release: 3120 ib_umem_release(&umem_dmabuf->umem); 3121 3122 return ERR_PTR(err); 3123 } 3124 3125 static int irdma_hwdereg_mr(struct ib_mr *ib_mr) 3126 { 3127 struct irdma_device *iwdev = to_iwdev(ib_mr->device); 3128 struct irdma_mr *iwmr = to_iwmr(ib_mr); 3129 struct irdma_pd *iwpd = to_iwpd(ib_mr->pd); 3130 struct irdma_dealloc_stag_info *info; 3131 struct irdma_pbl *iwpbl = &iwmr->iwpbl; 3132 struct irdma_cqp_request *cqp_request; 3133 struct cqp_cmds_info *cqp_info; 3134 int status; 3135 3136 /* Skip HW MR de-register when it is already de-registered 3137 * during an MR re-reregister and the re-registration fails 3138 */ 3139 if (!iwmr->is_hwreg) 3140 return 0; 3141 3142 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); 3143 if (!cqp_request) 3144 return -ENOMEM; 3145 3146 cqp_info = &cqp_request->info; 3147 info = &cqp_info->in.u.dealloc_stag.info; 3148 memset(info, 0, sizeof(*info)); 3149 info->pd_id = iwpd->sc_pd.pd_id; 3150 info->stag_idx = ib_mr->rkey >> IRDMA_CQPSQ_STAG_IDX_S; 3151 info->mr = true; 3152 if (iwpbl->pbl_allocated) 3153 info->dealloc_pbl = true; 3154 3155 cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG; 3156 cqp_info->post_sq = 1; 3157 cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev; 3158 cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request; 3159 status = irdma_handle_cqp_op(iwdev->rf, cqp_request); 3160 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); 3161 if (status) 3162 return status; 3163 3164 iwmr->is_hwreg = 0; 3165 return 0; 3166 } 3167 3168 /* 3169 * irdma_rereg_mr_trans - Re-register a user MR for a change translation. 3170 * @iwmr: ptr of iwmr 3171 * @start: virtual start address 3172 * @len: length of mr 3173 * @virt: virtual address 3174 * 3175 * Re-register a user memory region when a change translation is requested. 3176 * Re-register a new region while reusing the stag from the original registration. 3177 */ 3178 static int irdma_rereg_mr_trans(struct irdma_mr *iwmr, u64 start, u64 len, 3179 u64 virt) 3180 { 3181 struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device); 3182 struct irdma_pbl *iwpbl = &iwmr->iwpbl; 3183 struct ib_pd *pd = iwmr->ibmr.pd; 3184 struct ib_umem *region; 3185 int err; 3186 3187 region = ib_umem_get(pd->device, start, len, iwmr->access); 3188 if (IS_ERR(region)) 3189 return PTR_ERR(region); 3190 3191 iwmr->region = region; 3192 iwmr->ibmr.iova = virt; 3193 iwmr->ibmr.pd = pd; 3194 iwmr->page_size = ib_umem_find_best_pgsz(region, 3195 iwdev->rf->sc_dev.hw_attrs.page_size_cap, 3196 virt); 3197 if (unlikely(!iwmr->page_size)) { 3198 err = -EOPNOTSUPP; 3199 goto err; 3200 } 3201 3202 iwmr->len = region->length; 3203 iwpbl->user_base = virt; 3204 iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size); 3205 3206 err = irdma_reg_user_mr_type_mem(iwmr, iwmr->access, false); 3207 if (err) 3208 goto err; 3209 3210 return 0; 3211 3212 err: 3213 ib_umem_release(region); 3214 return err; 3215 } 3216 3217 /* 3218 * irdma_rereg_user_mr - Re-Register a user memory region(MR) 3219 * @ibmr: ib mem to access iwarp mr pointer 3220 * @flags: bit mask to indicate which of the attr's of MR modified 3221 * @start: virtual start address 3222 * @len: length of mr 3223 * @virt: virtual address 3224 * @new_access: bit mask of access flags 3225 * @new_pd: ptr of pd 3226 * @udata: user data 3227 * 3228 * Return: 3229 * NULL - Success, existing MR updated 3230 * ERR_PTR - error occurred 3231 */ 3232 static struct ib_mr *irdma_rereg_user_mr(struct ib_mr *ib_mr, int flags, 3233 u64 start, u64 len, u64 virt, 3234 int new_access, struct ib_pd *new_pd, 3235 struct ib_udata *udata) 3236 { 3237 struct irdma_device *iwdev = to_iwdev(ib_mr->device); 3238 struct irdma_mr *iwmr = to_iwmr(ib_mr); 3239 struct irdma_pbl *iwpbl = &iwmr->iwpbl; 3240 int ret; 3241 3242 if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size) 3243 return ERR_PTR(-EINVAL); 3244 3245 if (flags & ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS)) 3246 return ERR_PTR(-EOPNOTSUPP); 3247 3248 ret = irdma_hwdereg_mr(ib_mr); 3249 if (ret) 3250 return ERR_PTR(ret); 3251 3252 if (flags & IB_MR_REREG_ACCESS) 3253 iwmr->access = new_access; 3254 3255 if (flags & IB_MR_REREG_PD) { 3256 iwmr->ibmr.pd = new_pd; 3257 iwmr->ibmr.device = new_pd->device; 3258 } 3259 3260 if (flags & IB_MR_REREG_TRANS) { 3261 if (iwpbl->pbl_allocated) { 3262 irdma_free_pble(iwdev->rf->pble_rsrc, 3263 &iwpbl->pble_alloc); 3264 iwpbl->pbl_allocated = false; 3265 } 3266 if (iwmr->region) { 3267 ib_umem_release(iwmr->region); 3268 iwmr->region = NULL; 3269 } 3270 3271 ret = irdma_rereg_mr_trans(iwmr, start, len, virt); 3272 } else 3273 ret = irdma_hwreg_mr(iwdev, iwmr, iwmr->access); 3274 if (ret) 3275 return ERR_PTR(ret); 3276 3277 return NULL; 3278 } 3279 3280 /** 3281 * irdma_reg_phys_mr - register kernel physical memory 3282 * @pd: ibpd pointer 3283 * @addr: physical address of memory to register 3284 * @size: size of memory to register 3285 * @access: Access rights 3286 * @iova_start: start of virtual address for physical buffers 3287 */ 3288 struct ib_mr *irdma_reg_phys_mr(struct ib_pd *pd, u64 addr, u64 size, int access, 3289 u64 *iova_start) 3290 { 3291 struct irdma_device *iwdev = to_iwdev(pd->device); 3292 struct irdma_pbl *iwpbl; 3293 struct irdma_mr *iwmr; 3294 u32 stag; 3295 int ret; 3296 3297 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL); 3298 if (!iwmr) 3299 return ERR_PTR(-ENOMEM); 3300 3301 iwmr->ibmr.pd = pd; 3302 iwmr->ibmr.device = pd->device; 3303 iwpbl = &iwmr->iwpbl; 3304 iwpbl->iwmr = iwmr; 3305 iwmr->type = IRDMA_MEMREG_TYPE_MEM; 3306 iwpbl->user_base = *iova_start; 3307 stag = irdma_create_stag(iwdev); 3308 if (!stag) { 3309 ret = -ENOMEM; 3310 goto err; 3311 } 3312 3313 iwmr->stag = stag; 3314 iwmr->ibmr.iova = *iova_start; 3315 iwmr->ibmr.rkey = stag; 3316 iwmr->ibmr.lkey = stag; 3317 iwmr->page_cnt = 1; 3318 iwmr->pgaddrmem[0] = addr; 3319 iwmr->len = size; 3320 iwmr->page_size = SZ_4K; 3321 ret = irdma_hwreg_mr(iwdev, iwmr, access); 3322 if (ret) { 3323 irdma_free_stag(iwdev, stag); 3324 goto err; 3325 } 3326 3327 return &iwmr->ibmr; 3328 3329 err: 3330 kfree(iwmr); 3331 3332 return ERR_PTR(ret); 3333 } 3334 3335 /** 3336 * irdma_get_dma_mr - register physical mem 3337 * @pd: ptr of pd 3338 * @acc: access for memory 3339 */ 3340 static struct ib_mr *irdma_get_dma_mr(struct ib_pd *pd, int acc) 3341 { 3342 u64 kva = 0; 3343 3344 return irdma_reg_phys_mr(pd, 0, 0, acc, &kva); 3345 } 3346 3347 /** 3348 * irdma_del_memlist - Deleting pbl list entries for CQ/QP 3349 * @iwmr: iwmr for IB's user page addresses 3350 * @ucontext: ptr to user context 3351 */ 3352 static void irdma_del_memlist(struct irdma_mr *iwmr, 3353 struct irdma_ucontext *ucontext) 3354 { 3355 struct irdma_pbl *iwpbl = &iwmr->iwpbl; 3356 unsigned long flags; 3357 3358 switch (iwmr->type) { 3359 case IRDMA_MEMREG_TYPE_CQ: 3360 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); 3361 if (iwpbl->on_list) { 3362 iwpbl->on_list = false; 3363 list_del(&iwpbl->list); 3364 } 3365 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); 3366 break; 3367 case IRDMA_MEMREG_TYPE_QP: 3368 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags); 3369 if (iwpbl->on_list) { 3370 iwpbl->on_list = false; 3371 list_del(&iwpbl->list); 3372 } 3373 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); 3374 break; 3375 default: 3376 break; 3377 } 3378 } 3379 3380 /** 3381 * irdma_dereg_mr - deregister mr 3382 * @ib_mr: mr ptr for dereg 3383 * @udata: user data 3384 */ 3385 static int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) 3386 { 3387 struct irdma_mr *iwmr = to_iwmr(ib_mr); 3388 struct irdma_device *iwdev = to_iwdev(ib_mr->device); 3389 struct irdma_pbl *iwpbl = &iwmr->iwpbl; 3390 int ret; 3391 3392 if (iwmr->type != IRDMA_MEMREG_TYPE_MEM) { 3393 if (iwmr->region) { 3394 struct irdma_ucontext *ucontext; 3395 3396 ucontext = rdma_udata_to_drv_context(udata, 3397 struct irdma_ucontext, 3398 ibucontext); 3399 irdma_del_memlist(iwmr, ucontext); 3400 } 3401 goto done; 3402 } 3403 3404 ret = irdma_hwdereg_mr(ib_mr); 3405 if (ret) 3406 return ret; 3407 3408 irdma_free_stag(iwdev, iwmr->stag); 3409 done: 3410 if (iwpbl->pbl_allocated) 3411 irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc); 3412 3413 if (iwmr->region) 3414 ib_umem_release(iwmr->region); 3415 3416 kfree(iwmr); 3417 3418 return 0; 3419 } 3420 3421 /** 3422 * irdma_post_send - kernel application wr 3423 * @ibqp: qp ptr for wr 3424 * @ib_wr: work request ptr 3425 * @bad_wr: return of bad wr if err 3426 */ 3427 static int irdma_post_send(struct ib_qp *ibqp, 3428 const struct ib_send_wr *ib_wr, 3429 const struct ib_send_wr **bad_wr) 3430 { 3431 struct irdma_qp *iwqp; 3432 struct irdma_qp_uk *ukqp; 3433 struct irdma_sc_dev *dev; 3434 struct irdma_post_sq_info info; 3435 int err = 0; 3436 unsigned long flags; 3437 bool inv_stag; 3438 struct irdma_ah *ah; 3439 3440 iwqp = to_iwqp(ibqp); 3441 ukqp = &iwqp->sc_qp.qp_uk; 3442 dev = &iwqp->iwdev->rf->sc_dev; 3443 3444 spin_lock_irqsave(&iwqp->lock, flags); 3445 while (ib_wr) { 3446 memset(&info, 0, sizeof(info)); 3447 inv_stag = false; 3448 info.wr_id = (ib_wr->wr_id); 3449 if ((ib_wr->send_flags & IB_SEND_SIGNALED) || iwqp->sig_all) 3450 info.signaled = true; 3451 if (ib_wr->send_flags & IB_SEND_FENCE) 3452 info.read_fence = true; 3453 switch (ib_wr->opcode) { 3454 case IB_WR_SEND_WITH_IMM: 3455 if (ukqp->qp_caps & IRDMA_SEND_WITH_IMM) { 3456 info.imm_data_valid = true; 3457 info.imm_data = ntohl(ib_wr->ex.imm_data); 3458 } else { 3459 err = -EINVAL; 3460 break; 3461 } 3462 fallthrough; 3463 case IB_WR_SEND: 3464 case IB_WR_SEND_WITH_INV: 3465 if (ib_wr->opcode == IB_WR_SEND || 3466 ib_wr->opcode == IB_WR_SEND_WITH_IMM) { 3467 if (ib_wr->send_flags & IB_SEND_SOLICITED) 3468 info.op_type = IRDMA_OP_TYPE_SEND_SOL; 3469 else 3470 info.op_type = IRDMA_OP_TYPE_SEND; 3471 } else { 3472 if (ib_wr->send_flags & IB_SEND_SOLICITED) 3473 info.op_type = IRDMA_OP_TYPE_SEND_SOL_INV; 3474 else 3475 info.op_type = IRDMA_OP_TYPE_SEND_INV; 3476 info.stag_to_inv = ib_wr->ex.invalidate_rkey; 3477 } 3478 3479 info.op.send.num_sges = ib_wr->num_sge; 3480 info.op.send.sg_list = ib_wr->sg_list; 3481 if (iwqp->ibqp.qp_type == IB_QPT_UD || 3482 iwqp->ibqp.qp_type == IB_QPT_GSI) { 3483 ah = to_iwah(ud_wr(ib_wr)->ah); 3484 info.op.send.ah_id = ah->sc_ah.ah_info.ah_idx; 3485 info.op.send.qkey = ud_wr(ib_wr)->remote_qkey; 3486 info.op.send.dest_qp = ud_wr(ib_wr)->remote_qpn; 3487 } 3488 3489 if (ib_wr->send_flags & IB_SEND_INLINE) 3490 err = irdma_uk_inline_send(ukqp, &info, false); 3491 else 3492 err = irdma_uk_send(ukqp, &info, false); 3493 break; 3494 case IB_WR_RDMA_WRITE_WITH_IMM: 3495 if (ukqp->qp_caps & IRDMA_WRITE_WITH_IMM) { 3496 info.imm_data_valid = true; 3497 info.imm_data = ntohl(ib_wr->ex.imm_data); 3498 } else { 3499 err = -EINVAL; 3500 break; 3501 } 3502 fallthrough; 3503 case IB_WR_RDMA_WRITE: 3504 if (ib_wr->send_flags & IB_SEND_SOLICITED) 3505 info.op_type = IRDMA_OP_TYPE_RDMA_WRITE_SOL; 3506 else 3507 info.op_type = IRDMA_OP_TYPE_RDMA_WRITE; 3508 3509 info.op.rdma_write.num_lo_sges = ib_wr->num_sge; 3510 info.op.rdma_write.lo_sg_list = ib_wr->sg_list; 3511 info.op.rdma_write.rem_addr.addr = 3512 rdma_wr(ib_wr)->remote_addr; 3513 info.op.rdma_write.rem_addr.lkey = rdma_wr(ib_wr)->rkey; 3514 if (ib_wr->send_flags & IB_SEND_INLINE) 3515 err = irdma_uk_inline_rdma_write(ukqp, &info, false); 3516 else 3517 err = irdma_uk_rdma_write(ukqp, &info, false); 3518 break; 3519 case IB_WR_RDMA_READ_WITH_INV: 3520 inv_stag = true; 3521 fallthrough; 3522 case IB_WR_RDMA_READ: 3523 if (ib_wr->num_sge > 3524 dev->hw_attrs.uk_attrs.max_hw_read_sges) { 3525 err = -EINVAL; 3526 break; 3527 } 3528 info.op_type = IRDMA_OP_TYPE_RDMA_READ; 3529 info.op.rdma_read.rem_addr.addr = rdma_wr(ib_wr)->remote_addr; 3530 info.op.rdma_read.rem_addr.lkey = rdma_wr(ib_wr)->rkey; 3531 info.op.rdma_read.lo_sg_list = (void *)ib_wr->sg_list; 3532 info.op.rdma_read.num_lo_sges = ib_wr->num_sge; 3533 err = irdma_uk_rdma_read(ukqp, &info, inv_stag, false); 3534 break; 3535 case IB_WR_LOCAL_INV: 3536 info.op_type = IRDMA_OP_TYPE_INV_STAG; 3537 info.local_fence = info.read_fence; 3538 info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey; 3539 err = irdma_uk_stag_local_invalidate(ukqp, &info, true); 3540 break; 3541 case IB_WR_REG_MR: { 3542 struct irdma_mr *iwmr = to_iwmr(reg_wr(ib_wr)->mr); 3543 struct irdma_pble_alloc *palloc = &iwmr->iwpbl.pble_alloc; 3544 struct irdma_fast_reg_stag_info stag_info = {}; 3545 3546 stag_info.signaled = info.signaled; 3547 stag_info.read_fence = info.read_fence; 3548 stag_info.access_rights = irdma_get_mr_access(reg_wr(ib_wr)->access); 3549 stag_info.stag_key = reg_wr(ib_wr)->key & 0xff; 3550 stag_info.stag_idx = reg_wr(ib_wr)->key >> 8; 3551 stag_info.page_size = reg_wr(ib_wr)->mr->page_size; 3552 stag_info.wr_id = ib_wr->wr_id; 3553 stag_info.addr_type = IRDMA_ADDR_TYPE_VA_BASED; 3554 stag_info.va = (void *)(uintptr_t)iwmr->ibmr.iova; 3555 stag_info.total_len = iwmr->ibmr.length; 3556 stag_info.reg_addr_pa = *palloc->level1.addr; 3557 stag_info.first_pm_pbl_index = palloc->level1.idx; 3558 stag_info.local_fence = ib_wr->send_flags & IB_SEND_FENCE; 3559 if (iwmr->npages > IRDMA_MIN_PAGES_PER_FMR) 3560 stag_info.chunk_size = 1; 3561 err = irdma_sc_mr_fast_register(&iwqp->sc_qp, &stag_info, 3562 true); 3563 break; 3564 } 3565 default: 3566 err = -EINVAL; 3567 ibdev_dbg(&iwqp->iwdev->ibdev, 3568 "VERBS: upost_send bad opcode = 0x%x\n", 3569 ib_wr->opcode); 3570 break; 3571 } 3572 3573 if (err) 3574 break; 3575 ib_wr = ib_wr->next; 3576 } 3577 3578 if (!iwqp->flush_issued) { 3579 if (iwqp->hw_iwarp_state <= IRDMA_QP_STATE_RTS) 3580 irdma_uk_qp_post_wr(ukqp); 3581 spin_unlock_irqrestore(&iwqp->lock, flags); 3582 } else { 3583 spin_unlock_irqrestore(&iwqp->lock, flags); 3584 mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush, 3585 msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS)); 3586 } 3587 if (err) 3588 *bad_wr = ib_wr; 3589 3590 return err; 3591 } 3592 3593 /** 3594 * irdma_post_recv - post receive wr for kernel application 3595 * @ibqp: ib qp pointer 3596 * @ib_wr: work request for receive 3597 * @bad_wr: bad wr caused an error 3598 */ 3599 static int irdma_post_recv(struct ib_qp *ibqp, 3600 const struct ib_recv_wr *ib_wr, 3601 const struct ib_recv_wr **bad_wr) 3602 { 3603 struct irdma_qp *iwqp; 3604 struct irdma_qp_uk *ukqp; 3605 struct irdma_post_rq_info post_recv = {}; 3606 unsigned long flags; 3607 int err = 0; 3608 3609 iwqp = to_iwqp(ibqp); 3610 ukqp = &iwqp->sc_qp.qp_uk; 3611 3612 spin_lock_irqsave(&iwqp->lock, flags); 3613 while (ib_wr) { 3614 post_recv.num_sges = ib_wr->num_sge; 3615 post_recv.wr_id = ib_wr->wr_id; 3616 post_recv.sg_list = ib_wr->sg_list; 3617 err = irdma_uk_post_receive(ukqp, &post_recv); 3618 if (err) { 3619 ibdev_dbg(&iwqp->iwdev->ibdev, 3620 "VERBS: post_recv err %d\n", err); 3621 goto out; 3622 } 3623 3624 ib_wr = ib_wr->next; 3625 } 3626 3627 out: 3628 spin_unlock_irqrestore(&iwqp->lock, flags); 3629 if (iwqp->flush_issued) 3630 mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush, 3631 msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS)); 3632 3633 if (err) 3634 *bad_wr = ib_wr; 3635 3636 return err; 3637 } 3638 3639 /** 3640 * irdma_flush_err_to_ib_wc_status - return change flush error code to IB status 3641 * @opcode: iwarp flush code 3642 */ 3643 static enum ib_wc_status irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode opcode) 3644 { 3645 switch (opcode) { 3646 case FLUSH_PROT_ERR: 3647 return IB_WC_LOC_PROT_ERR; 3648 case FLUSH_REM_ACCESS_ERR: 3649 return IB_WC_REM_ACCESS_ERR; 3650 case FLUSH_LOC_QP_OP_ERR: 3651 return IB_WC_LOC_QP_OP_ERR; 3652 case FLUSH_REM_OP_ERR: 3653 return IB_WC_REM_OP_ERR; 3654 case FLUSH_LOC_LEN_ERR: 3655 return IB_WC_LOC_LEN_ERR; 3656 case FLUSH_GENERAL_ERR: 3657 return IB_WC_WR_FLUSH_ERR; 3658 case FLUSH_RETRY_EXC_ERR: 3659 return IB_WC_RETRY_EXC_ERR; 3660 case FLUSH_MW_BIND_ERR: 3661 return IB_WC_MW_BIND_ERR; 3662 case FLUSH_REM_INV_REQ_ERR: 3663 return IB_WC_REM_INV_REQ_ERR; 3664 case FLUSH_FATAL_ERR: 3665 default: 3666 return IB_WC_FATAL_ERR; 3667 } 3668 } 3669 3670 /** 3671 * irdma_process_cqe - process cqe info 3672 * @entry: processed cqe 3673 * @cq_poll_info: cqe info 3674 */ 3675 static void irdma_process_cqe(struct ib_wc *entry, 3676 struct irdma_cq_poll_info *cq_poll_info) 3677 { 3678 struct irdma_sc_qp *qp; 3679 3680 entry->wc_flags = 0; 3681 entry->pkey_index = 0; 3682 entry->wr_id = cq_poll_info->wr_id; 3683 3684 qp = cq_poll_info->qp_handle; 3685 entry->qp = qp->qp_uk.back_qp; 3686 3687 if (cq_poll_info->error) { 3688 entry->status = (cq_poll_info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) ? 3689 irdma_flush_err_to_ib_wc_status(cq_poll_info->minor_err) : IB_WC_GENERAL_ERR; 3690 3691 entry->vendor_err = cq_poll_info->major_err << 16 | 3692 cq_poll_info->minor_err; 3693 } else { 3694 entry->status = IB_WC_SUCCESS; 3695 if (cq_poll_info->imm_valid) { 3696 entry->ex.imm_data = htonl(cq_poll_info->imm_data); 3697 entry->wc_flags |= IB_WC_WITH_IMM; 3698 } 3699 if (cq_poll_info->ud_smac_valid) { 3700 ether_addr_copy(entry->smac, cq_poll_info->ud_smac); 3701 entry->wc_flags |= IB_WC_WITH_SMAC; 3702 } 3703 3704 if (cq_poll_info->ud_vlan_valid) { 3705 u16 vlan = cq_poll_info->ud_vlan & VLAN_VID_MASK; 3706 3707 entry->sl = cq_poll_info->ud_vlan >> VLAN_PRIO_SHIFT; 3708 if (vlan) { 3709 entry->vlan_id = vlan; 3710 entry->wc_flags |= IB_WC_WITH_VLAN; 3711 } 3712 } else { 3713 entry->sl = 0; 3714 } 3715 } 3716 3717 if (cq_poll_info->q_type == IRDMA_CQE_QTYPE_SQ) { 3718 set_ib_wc_op_sq(cq_poll_info, entry); 3719 } else { 3720 set_ib_wc_op_rq(cq_poll_info, entry, 3721 qp->qp_uk.qp_caps & IRDMA_SEND_WITH_IMM); 3722 if (qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_UD && 3723 cq_poll_info->stag_invalid_set) { 3724 entry->ex.invalidate_rkey = cq_poll_info->inv_stag; 3725 entry->wc_flags |= IB_WC_WITH_INVALIDATE; 3726 } 3727 } 3728 3729 if (qp->qp_uk.qp_type == IRDMA_QP_TYPE_ROCE_UD) { 3730 entry->src_qp = cq_poll_info->ud_src_qpn; 3731 entry->slid = 0; 3732 entry->wc_flags |= 3733 (IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE); 3734 entry->network_hdr_type = cq_poll_info->ipv4 ? 3735 RDMA_NETWORK_IPV4 : 3736 RDMA_NETWORK_IPV6; 3737 } else { 3738 entry->src_qp = cq_poll_info->qp_id; 3739 } 3740 3741 entry->byte_len = cq_poll_info->bytes_xfered; 3742 } 3743 3744 /** 3745 * irdma_poll_one - poll one entry of the CQ 3746 * @ukcq: ukcq to poll 3747 * @cur_cqe: current CQE info to be filled in 3748 * @entry: ibv_wc object to be filled for non-extended CQ or NULL for extended CQ 3749 * 3750 * Returns the internal irdma device error code or 0 on success 3751 */ 3752 static inline int irdma_poll_one(struct irdma_cq_uk *ukcq, 3753 struct irdma_cq_poll_info *cur_cqe, 3754 struct ib_wc *entry) 3755 { 3756 int ret = irdma_uk_cq_poll_cmpl(ukcq, cur_cqe); 3757 3758 if (ret) 3759 return ret; 3760 3761 irdma_process_cqe(entry, cur_cqe); 3762 3763 return 0; 3764 } 3765 3766 /** 3767 * __irdma_poll_cq - poll cq for completion (kernel apps) 3768 * @iwcq: cq to poll 3769 * @num_entries: number of entries to poll 3770 * @entry: wr of a completed entry 3771 */ 3772 static int __irdma_poll_cq(struct irdma_cq *iwcq, int num_entries, struct ib_wc *entry) 3773 { 3774 struct list_head *tmp_node, *list_node; 3775 struct irdma_cq_buf *last_buf = NULL; 3776 struct irdma_cq_poll_info *cur_cqe = &iwcq->cur_cqe; 3777 struct irdma_cq_buf *cq_buf; 3778 int ret; 3779 struct irdma_device *iwdev; 3780 struct irdma_cq_uk *ukcq; 3781 bool cq_new_cqe = false; 3782 int resized_bufs = 0; 3783 int npolled = 0; 3784 3785 iwdev = to_iwdev(iwcq->ibcq.device); 3786 ukcq = &iwcq->sc_cq.cq_uk; 3787 3788 /* go through the list of previously resized CQ buffers */ 3789 list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) { 3790 cq_buf = container_of(list_node, struct irdma_cq_buf, list); 3791 while (npolled < num_entries) { 3792 ret = irdma_poll_one(&cq_buf->cq_uk, cur_cqe, entry + npolled); 3793 if (!ret) { 3794 ++npolled; 3795 cq_new_cqe = true; 3796 continue; 3797 } 3798 if (ret == -ENOENT) 3799 break; 3800 /* QP using the CQ is destroyed. Skip reporting this CQE */ 3801 if (ret == -EFAULT) { 3802 cq_new_cqe = true; 3803 continue; 3804 } 3805 goto error; 3806 } 3807 3808 /* save the resized CQ buffer which received the last cqe */ 3809 if (cq_new_cqe) 3810 last_buf = cq_buf; 3811 cq_new_cqe = false; 3812 } 3813 3814 /* check the current CQ for new cqes */ 3815 while (npolled < num_entries) { 3816 ret = irdma_poll_one(ukcq, cur_cqe, entry + npolled); 3817 if (ret == -ENOENT) { 3818 ret = irdma_generated_cmpls(iwcq, cur_cqe); 3819 if (!ret) 3820 irdma_process_cqe(entry + npolled, cur_cqe); 3821 } 3822 if (!ret) { 3823 ++npolled; 3824 cq_new_cqe = true; 3825 continue; 3826 } 3827 3828 if (ret == -ENOENT) 3829 break; 3830 /* QP using the CQ is destroyed. Skip reporting this CQE */ 3831 if (ret == -EFAULT) { 3832 cq_new_cqe = true; 3833 continue; 3834 } 3835 goto error; 3836 } 3837 3838 if (cq_new_cqe) 3839 /* all previous CQ resizes are complete */ 3840 resized_bufs = irdma_process_resize_list(iwcq, iwdev, NULL); 3841 else if (last_buf) 3842 /* only CQ resizes up to the last_buf are complete */ 3843 resized_bufs = irdma_process_resize_list(iwcq, iwdev, last_buf); 3844 if (resized_bufs) 3845 /* report to the HW the number of complete CQ resizes */ 3846 irdma_uk_cq_set_resized_cnt(ukcq, resized_bufs); 3847 3848 return npolled; 3849 error: 3850 ibdev_dbg(&iwdev->ibdev, "%s: Error polling CQ, irdma_err: %d\n", 3851 __func__, ret); 3852 3853 return ret; 3854 } 3855 3856 /** 3857 * irdma_poll_cq - poll cq for completion (kernel apps) 3858 * @ibcq: cq to poll 3859 * @num_entries: number of entries to poll 3860 * @entry: wr of a completed entry 3861 */ 3862 static int irdma_poll_cq(struct ib_cq *ibcq, int num_entries, 3863 struct ib_wc *entry) 3864 { 3865 struct irdma_cq *iwcq; 3866 unsigned long flags; 3867 int ret; 3868 3869 iwcq = to_iwcq(ibcq); 3870 3871 spin_lock_irqsave(&iwcq->lock, flags); 3872 ret = __irdma_poll_cq(iwcq, num_entries, entry); 3873 spin_unlock_irqrestore(&iwcq->lock, flags); 3874 3875 return ret; 3876 } 3877 3878 /** 3879 * irdma_req_notify_cq - arm cq kernel application 3880 * @ibcq: cq to arm 3881 * @notify_flags: notofication flags 3882 */ 3883 static int irdma_req_notify_cq(struct ib_cq *ibcq, 3884 enum ib_cq_notify_flags notify_flags) 3885 { 3886 struct irdma_cq *iwcq; 3887 struct irdma_cq_uk *ukcq; 3888 unsigned long flags; 3889 enum irdma_cmpl_notify cq_notify; 3890 bool promo_event = false; 3891 int ret = 0; 3892 3893 cq_notify = notify_flags == IB_CQ_SOLICITED ? 3894 IRDMA_CQ_COMPL_SOLICITED : IRDMA_CQ_COMPL_EVENT; 3895 iwcq = to_iwcq(ibcq); 3896 ukcq = &iwcq->sc_cq.cq_uk; 3897 3898 spin_lock_irqsave(&iwcq->lock, flags); 3899 /* Only promote to arm the CQ for any event if the last arm event was solicited. */ 3900 if (iwcq->last_notify == IRDMA_CQ_COMPL_SOLICITED && notify_flags != IB_CQ_SOLICITED) 3901 promo_event = true; 3902 3903 if (!atomic_cmpxchg(&iwcq->armed, 0, 1) || promo_event) { 3904 iwcq->last_notify = cq_notify; 3905 irdma_uk_cq_request_notification(ukcq, cq_notify); 3906 } 3907 3908 if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) && 3909 (!irdma_cq_empty(iwcq) || !list_empty(&iwcq->cmpl_generated))) 3910 ret = 1; 3911 spin_unlock_irqrestore(&iwcq->lock, flags); 3912 3913 return ret; 3914 } 3915 3916 static int irdma_roce_port_immutable(struct ib_device *ibdev, u32 port_num, 3917 struct ib_port_immutable *immutable) 3918 { 3919 struct ib_port_attr attr; 3920 int err; 3921 3922 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; 3923 err = ib_query_port(ibdev, port_num, &attr); 3924 if (err) 3925 return err; 3926 3927 immutable->max_mad_size = IB_MGMT_MAD_SIZE; 3928 immutable->pkey_tbl_len = attr.pkey_tbl_len; 3929 immutable->gid_tbl_len = attr.gid_tbl_len; 3930 3931 return 0; 3932 } 3933 3934 static int irdma_iw_port_immutable(struct ib_device *ibdev, u32 port_num, 3935 struct ib_port_immutable *immutable) 3936 { 3937 struct ib_port_attr attr; 3938 int err; 3939 3940 immutable->core_cap_flags = RDMA_CORE_PORT_IWARP; 3941 err = ib_query_port(ibdev, port_num, &attr); 3942 if (err) 3943 return err; 3944 immutable->gid_tbl_len = attr.gid_tbl_len; 3945 3946 return 0; 3947 } 3948 3949 static const struct rdma_stat_desc irdma_hw_stat_names[] = { 3950 /* gen1 - 32-bit */ 3951 [IRDMA_HW_STAT_INDEX_IP4RXDISCARD].name = "ip4InDiscards", 3952 [IRDMA_HW_STAT_INDEX_IP4RXTRUNC].name = "ip4InTruncatedPkts", 3953 [IRDMA_HW_STAT_INDEX_IP4TXNOROUTE].name = "ip4OutNoRoutes", 3954 [IRDMA_HW_STAT_INDEX_IP6RXDISCARD].name = "ip6InDiscards", 3955 [IRDMA_HW_STAT_INDEX_IP6RXTRUNC].name = "ip6InTruncatedPkts", 3956 [IRDMA_HW_STAT_INDEX_IP6TXNOROUTE].name = "ip6OutNoRoutes", 3957 [IRDMA_HW_STAT_INDEX_TCPRTXSEG].name = "tcpRetransSegs", 3958 [IRDMA_HW_STAT_INDEX_TCPRXOPTERR].name = "tcpInOptErrors", 3959 [IRDMA_HW_STAT_INDEX_TCPRXPROTOERR].name = "tcpInProtoErrors", 3960 [IRDMA_HW_STAT_INDEX_RXVLANERR].name = "rxVlanErrors", 3961 /* gen1 - 64-bit */ 3962 [IRDMA_HW_STAT_INDEX_IP4RXOCTS].name = "ip4InOctets", 3963 [IRDMA_HW_STAT_INDEX_IP4RXPKTS].name = "ip4InPkts", 3964 [IRDMA_HW_STAT_INDEX_IP4RXFRAGS].name = "ip4InReasmRqd", 3965 [IRDMA_HW_STAT_INDEX_IP4RXMCPKTS].name = "ip4InMcastPkts", 3966 [IRDMA_HW_STAT_INDEX_IP4TXOCTS].name = "ip4OutOctets", 3967 [IRDMA_HW_STAT_INDEX_IP4TXPKTS].name = "ip4OutPkts", 3968 [IRDMA_HW_STAT_INDEX_IP4TXFRAGS].name = "ip4OutSegRqd", 3969 [IRDMA_HW_STAT_INDEX_IP4TXMCPKTS].name = "ip4OutMcastPkts", 3970 [IRDMA_HW_STAT_INDEX_IP6RXOCTS].name = "ip6InOctets", 3971 [IRDMA_HW_STAT_INDEX_IP6RXPKTS].name = "ip6InPkts", 3972 [IRDMA_HW_STAT_INDEX_IP6RXFRAGS].name = "ip6InReasmRqd", 3973 [IRDMA_HW_STAT_INDEX_IP6RXMCPKTS].name = "ip6InMcastPkts", 3974 [IRDMA_HW_STAT_INDEX_IP6TXOCTS].name = "ip6OutOctets", 3975 [IRDMA_HW_STAT_INDEX_IP6TXPKTS].name = "ip6OutPkts", 3976 [IRDMA_HW_STAT_INDEX_IP6TXFRAGS].name = "ip6OutSegRqd", 3977 [IRDMA_HW_STAT_INDEX_IP6TXMCPKTS].name = "ip6OutMcastPkts", 3978 [IRDMA_HW_STAT_INDEX_TCPRXSEGS].name = "tcpInSegs", 3979 [IRDMA_HW_STAT_INDEX_TCPTXSEG].name = "tcpOutSegs", 3980 [IRDMA_HW_STAT_INDEX_RDMARXRDS].name = "iwInRdmaReads", 3981 [IRDMA_HW_STAT_INDEX_RDMARXSNDS].name = "iwInRdmaSends", 3982 [IRDMA_HW_STAT_INDEX_RDMARXWRS].name = "iwInRdmaWrites", 3983 [IRDMA_HW_STAT_INDEX_RDMATXRDS].name = "iwOutRdmaReads", 3984 [IRDMA_HW_STAT_INDEX_RDMATXSNDS].name = "iwOutRdmaSends", 3985 [IRDMA_HW_STAT_INDEX_RDMATXWRS].name = "iwOutRdmaWrites", 3986 [IRDMA_HW_STAT_INDEX_RDMAVBND].name = "iwRdmaBnd", 3987 [IRDMA_HW_STAT_INDEX_RDMAVINV].name = "iwRdmaInv", 3988 3989 /* gen2 - 32-bit */ 3990 [IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED].name = "cnpHandled", 3991 [IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED].name = "cnpIgnored", 3992 [IRDMA_HW_STAT_INDEX_TXNPCNPSENT].name = "cnpSent", 3993 /* gen2 - 64-bit */ 3994 [IRDMA_HW_STAT_INDEX_IP4RXMCOCTS].name = "ip4InMcastOctets", 3995 [IRDMA_HW_STAT_INDEX_IP4TXMCOCTS].name = "ip4OutMcastOctets", 3996 [IRDMA_HW_STAT_INDEX_IP6RXMCOCTS].name = "ip6InMcastOctets", 3997 [IRDMA_HW_STAT_INDEX_IP6TXMCOCTS].name = "ip6OutMcastOctets", 3998 [IRDMA_HW_STAT_INDEX_UDPRXPKTS].name = "RxUDP", 3999 [IRDMA_HW_STAT_INDEX_UDPTXPKTS].name = "TxUDP", 4000 [IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS].name = "RxECNMrkd", 4001 4002 }; 4003 4004 static void irdma_get_dev_fw_str(struct ib_device *dev, char *str) 4005 { 4006 struct irdma_device *iwdev = to_iwdev(dev); 4007 4008 snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u", 4009 irdma_fw_major_ver(&iwdev->rf->sc_dev), 4010 irdma_fw_minor_ver(&iwdev->rf->sc_dev)); 4011 } 4012 4013 /** 4014 * irdma_alloc_hw_port_stats - Allocate a hw stats structure 4015 * @ibdev: device pointer from stack 4016 * @port_num: port number 4017 */ 4018 static struct rdma_hw_stats *irdma_alloc_hw_port_stats(struct ib_device *ibdev, 4019 u32 port_num) 4020 { 4021 struct irdma_device *iwdev = to_iwdev(ibdev); 4022 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; 4023 4024 int num_counters = dev->hw_attrs.max_stat_idx; 4025 unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN; 4026 4027 return rdma_alloc_hw_stats_struct(irdma_hw_stat_names, num_counters, 4028 lifespan); 4029 } 4030 4031 /** 4032 * irdma_get_hw_stats - Populates the rdma_hw_stats structure 4033 * @ibdev: device pointer from stack 4034 * @stats: stats pointer from stack 4035 * @port_num: port number 4036 * @index: which hw counter the stack is requesting we update 4037 */ 4038 static int irdma_get_hw_stats(struct ib_device *ibdev, 4039 struct rdma_hw_stats *stats, u32 port_num, 4040 int index) 4041 { 4042 struct irdma_device *iwdev = to_iwdev(ibdev); 4043 struct irdma_dev_hw_stats *hw_stats = &iwdev->vsi.pestat->hw_stats; 4044 4045 if (iwdev->rf->rdma_ver >= IRDMA_GEN_2) 4046 irdma_cqp_gather_stats_cmd(&iwdev->rf->sc_dev, iwdev->vsi.pestat, true); 4047 else 4048 irdma_cqp_gather_stats_gen1(&iwdev->rf->sc_dev, iwdev->vsi.pestat); 4049 4050 memcpy(&stats->value[0], hw_stats, sizeof(u64) * stats->num_counters); 4051 4052 return stats->num_counters; 4053 } 4054 4055 /** 4056 * irdma_query_gid - Query port GID 4057 * @ibdev: device pointer from stack 4058 * @port: port number 4059 * @index: Entry index 4060 * @gid: Global ID 4061 */ 4062 static int irdma_query_gid(struct ib_device *ibdev, u32 port, int index, 4063 union ib_gid *gid) 4064 { 4065 struct irdma_device *iwdev = to_iwdev(ibdev); 4066 4067 memset(gid->raw, 0, sizeof(gid->raw)); 4068 ether_addr_copy(gid->raw, iwdev->netdev->dev_addr); 4069 4070 return 0; 4071 } 4072 4073 /** 4074 * mcast_list_add - Add a new mcast item to list 4075 * @rf: RDMA PCI function 4076 * @new_elem: pointer to element to add 4077 */ 4078 static void mcast_list_add(struct irdma_pci_f *rf, 4079 struct mc_table_list *new_elem) 4080 { 4081 list_add(&new_elem->list, &rf->mc_qht_list.list); 4082 } 4083 4084 /** 4085 * mcast_list_del - Remove an mcast item from list 4086 * @mc_qht_elem: pointer to mcast table list element 4087 */ 4088 static void mcast_list_del(struct mc_table_list *mc_qht_elem) 4089 { 4090 if (mc_qht_elem) 4091 list_del(&mc_qht_elem->list); 4092 } 4093 4094 /** 4095 * mcast_list_lookup_ip - Search mcast list for address 4096 * @rf: RDMA PCI function 4097 * @ip_mcast: pointer to mcast IP address 4098 */ 4099 static struct mc_table_list *mcast_list_lookup_ip(struct irdma_pci_f *rf, 4100 u32 *ip_mcast) 4101 { 4102 struct mc_table_list *mc_qht_el; 4103 struct list_head *pos, *q; 4104 4105 list_for_each_safe (pos, q, &rf->mc_qht_list.list) { 4106 mc_qht_el = list_entry(pos, struct mc_table_list, list); 4107 if (!memcmp(mc_qht_el->mc_info.dest_ip, ip_mcast, 4108 sizeof(mc_qht_el->mc_info.dest_ip))) 4109 return mc_qht_el; 4110 } 4111 4112 return NULL; 4113 } 4114 4115 /** 4116 * irdma_mcast_cqp_op - perform a mcast cqp operation 4117 * @iwdev: irdma device 4118 * @mc_grp_ctx: mcast group info 4119 * @op: operation 4120 * 4121 * returns error status 4122 */ 4123 static int irdma_mcast_cqp_op(struct irdma_device *iwdev, 4124 struct irdma_mcast_grp_info *mc_grp_ctx, u8 op) 4125 { 4126 struct cqp_cmds_info *cqp_info; 4127 struct irdma_cqp_request *cqp_request; 4128 int status; 4129 4130 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); 4131 if (!cqp_request) 4132 return -ENOMEM; 4133 4134 cqp_request->info.in.u.mc_create.info = *mc_grp_ctx; 4135 cqp_info = &cqp_request->info; 4136 cqp_info->cqp_cmd = op; 4137 cqp_info->post_sq = 1; 4138 cqp_info->in.u.mc_create.scratch = (uintptr_t)cqp_request; 4139 cqp_info->in.u.mc_create.cqp = &iwdev->rf->cqp.sc_cqp; 4140 status = irdma_handle_cqp_op(iwdev->rf, cqp_request); 4141 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); 4142 4143 return status; 4144 } 4145 4146 /** 4147 * irdma_mcast_mac - Get the multicast MAC for an IP address 4148 * @ip_addr: IPv4 or IPv6 address 4149 * @mac: pointer to result MAC address 4150 * @ipv4: flag indicating IPv4 or IPv6 4151 * 4152 */ 4153 void irdma_mcast_mac(u32 *ip_addr, u8 *mac, bool ipv4) 4154 { 4155 u8 *ip = (u8 *)ip_addr; 4156 4157 if (ipv4) { 4158 unsigned char mac4[ETH_ALEN] = {0x01, 0x00, 0x5E, 0x00, 4159 0x00, 0x00}; 4160 4161 mac4[3] = ip[2] & 0x7F; 4162 mac4[4] = ip[1]; 4163 mac4[5] = ip[0]; 4164 ether_addr_copy(mac, mac4); 4165 } else { 4166 unsigned char mac6[ETH_ALEN] = {0x33, 0x33, 0x00, 0x00, 4167 0x00, 0x00}; 4168 4169 mac6[2] = ip[3]; 4170 mac6[3] = ip[2]; 4171 mac6[4] = ip[1]; 4172 mac6[5] = ip[0]; 4173 ether_addr_copy(mac, mac6); 4174 } 4175 } 4176 4177 /** 4178 * irdma_attach_mcast - attach a qp to a multicast group 4179 * @ibqp: ptr to qp 4180 * @ibgid: pointer to global ID 4181 * @lid: local ID 4182 * 4183 * returns error status 4184 */ 4185 static int irdma_attach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid) 4186 { 4187 struct irdma_qp *iwqp = to_iwqp(ibqp); 4188 struct irdma_device *iwdev = iwqp->iwdev; 4189 struct irdma_pci_f *rf = iwdev->rf; 4190 struct mc_table_list *mc_qht_elem; 4191 struct irdma_mcast_grp_ctx_entry_info mcg_info = {}; 4192 unsigned long flags; 4193 u32 ip_addr[4] = {}; 4194 u32 mgn; 4195 u32 no_mgs; 4196 int ret = 0; 4197 bool ipv4; 4198 u16 vlan_id; 4199 union irdma_sockaddr sgid_addr; 4200 unsigned char dmac[ETH_ALEN]; 4201 4202 rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid); 4203 4204 if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid)) { 4205 irdma_copy_ip_ntohl(ip_addr, 4206 sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32); 4207 irdma_get_vlan_mac_ipv6(ip_addr, &vlan_id, NULL); 4208 ipv4 = false; 4209 ibdev_dbg(&iwdev->ibdev, 4210 "VERBS: qp_id=%d, IP6address=%pI6\n", ibqp->qp_num, 4211 ip_addr); 4212 irdma_mcast_mac(ip_addr, dmac, false); 4213 } else { 4214 ip_addr[0] = ntohl(sgid_addr.saddr_in.sin_addr.s_addr); 4215 ipv4 = true; 4216 vlan_id = irdma_get_vlan_ipv4(ip_addr); 4217 irdma_mcast_mac(ip_addr, dmac, true); 4218 ibdev_dbg(&iwdev->ibdev, 4219 "VERBS: qp_id=%d, IP4address=%pI4, MAC=%pM\n", 4220 ibqp->qp_num, ip_addr, dmac); 4221 } 4222 4223 spin_lock_irqsave(&rf->qh_list_lock, flags); 4224 mc_qht_elem = mcast_list_lookup_ip(rf, ip_addr); 4225 if (!mc_qht_elem) { 4226 struct irdma_dma_mem *dma_mem_mc; 4227 4228 spin_unlock_irqrestore(&rf->qh_list_lock, flags); 4229 mc_qht_elem = kzalloc(sizeof(*mc_qht_elem), GFP_KERNEL); 4230 if (!mc_qht_elem) 4231 return -ENOMEM; 4232 4233 mc_qht_elem->mc_info.ipv4_valid = ipv4; 4234 memcpy(mc_qht_elem->mc_info.dest_ip, ip_addr, 4235 sizeof(mc_qht_elem->mc_info.dest_ip)); 4236 ret = irdma_alloc_rsrc(rf, rf->allocated_mcgs, rf->max_mcg, 4237 &mgn, &rf->next_mcg); 4238 if (ret) { 4239 kfree(mc_qht_elem); 4240 return -ENOMEM; 4241 } 4242 4243 mc_qht_elem->mc_info.mgn = mgn; 4244 dma_mem_mc = &mc_qht_elem->mc_grp_ctx.dma_mem_mc; 4245 dma_mem_mc->size = ALIGN(sizeof(u64) * IRDMA_MAX_MGS_PER_CTX, 4246 IRDMA_HW_PAGE_SIZE); 4247 dma_mem_mc->va = dma_alloc_coherent(rf->hw.device, 4248 dma_mem_mc->size, 4249 &dma_mem_mc->pa, 4250 GFP_KERNEL); 4251 if (!dma_mem_mc->va) { 4252 irdma_free_rsrc(rf, rf->allocated_mcgs, mgn); 4253 kfree(mc_qht_elem); 4254 return -ENOMEM; 4255 } 4256 4257 mc_qht_elem->mc_grp_ctx.mg_id = (u16)mgn; 4258 memcpy(mc_qht_elem->mc_grp_ctx.dest_ip_addr, ip_addr, 4259 sizeof(mc_qht_elem->mc_grp_ctx.dest_ip_addr)); 4260 mc_qht_elem->mc_grp_ctx.ipv4_valid = ipv4; 4261 mc_qht_elem->mc_grp_ctx.vlan_id = vlan_id; 4262 if (vlan_id < VLAN_N_VID) 4263 mc_qht_elem->mc_grp_ctx.vlan_valid = true; 4264 mc_qht_elem->mc_grp_ctx.hmc_fcn_id = iwdev->rf->sc_dev.hmc_fn_id; 4265 mc_qht_elem->mc_grp_ctx.qs_handle = 4266 iwqp->sc_qp.vsi->qos[iwqp->sc_qp.user_pri].qs_handle; 4267 ether_addr_copy(mc_qht_elem->mc_grp_ctx.dest_mac_addr, dmac); 4268 4269 spin_lock_irqsave(&rf->qh_list_lock, flags); 4270 mcast_list_add(rf, mc_qht_elem); 4271 } else { 4272 if (mc_qht_elem->mc_grp_ctx.no_of_mgs == 4273 IRDMA_MAX_MGS_PER_CTX) { 4274 spin_unlock_irqrestore(&rf->qh_list_lock, flags); 4275 return -ENOMEM; 4276 } 4277 } 4278 4279 mcg_info.qp_id = iwqp->ibqp.qp_num; 4280 no_mgs = mc_qht_elem->mc_grp_ctx.no_of_mgs; 4281 irdma_sc_add_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info); 4282 spin_unlock_irqrestore(&rf->qh_list_lock, flags); 4283 4284 /* Only if there is a change do we need to modify or create */ 4285 if (!no_mgs) { 4286 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx, 4287 IRDMA_OP_MC_CREATE); 4288 } else if (no_mgs != mc_qht_elem->mc_grp_ctx.no_of_mgs) { 4289 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx, 4290 IRDMA_OP_MC_MODIFY); 4291 } else { 4292 return 0; 4293 } 4294 4295 if (ret) 4296 goto error; 4297 4298 return 0; 4299 4300 error: 4301 irdma_sc_del_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info); 4302 if (!mc_qht_elem->mc_grp_ctx.no_of_mgs) { 4303 mcast_list_del(mc_qht_elem); 4304 dma_free_coherent(rf->hw.device, 4305 mc_qht_elem->mc_grp_ctx.dma_mem_mc.size, 4306 mc_qht_elem->mc_grp_ctx.dma_mem_mc.va, 4307 mc_qht_elem->mc_grp_ctx.dma_mem_mc.pa); 4308 mc_qht_elem->mc_grp_ctx.dma_mem_mc.va = NULL; 4309 irdma_free_rsrc(rf, rf->allocated_mcgs, 4310 mc_qht_elem->mc_grp_ctx.mg_id); 4311 kfree(mc_qht_elem); 4312 } 4313 4314 return ret; 4315 } 4316 4317 /** 4318 * irdma_detach_mcast - detach a qp from a multicast group 4319 * @ibqp: ptr to qp 4320 * @ibgid: pointer to global ID 4321 * @lid: local ID 4322 * 4323 * returns error status 4324 */ 4325 static int irdma_detach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid) 4326 { 4327 struct irdma_qp *iwqp = to_iwqp(ibqp); 4328 struct irdma_device *iwdev = iwqp->iwdev; 4329 struct irdma_pci_f *rf = iwdev->rf; 4330 u32 ip_addr[4] = {}; 4331 struct mc_table_list *mc_qht_elem; 4332 struct irdma_mcast_grp_ctx_entry_info mcg_info = {}; 4333 int ret; 4334 unsigned long flags; 4335 union irdma_sockaddr sgid_addr; 4336 4337 rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid); 4338 if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid)) 4339 irdma_copy_ip_ntohl(ip_addr, 4340 sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32); 4341 else 4342 ip_addr[0] = ntohl(sgid_addr.saddr_in.sin_addr.s_addr); 4343 4344 spin_lock_irqsave(&rf->qh_list_lock, flags); 4345 mc_qht_elem = mcast_list_lookup_ip(rf, ip_addr); 4346 if (!mc_qht_elem) { 4347 spin_unlock_irqrestore(&rf->qh_list_lock, flags); 4348 ibdev_dbg(&iwdev->ibdev, 4349 "VERBS: address not found MCG\n"); 4350 return 0; 4351 } 4352 4353 mcg_info.qp_id = iwqp->ibqp.qp_num; 4354 irdma_sc_del_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info); 4355 if (!mc_qht_elem->mc_grp_ctx.no_of_mgs) { 4356 mcast_list_del(mc_qht_elem); 4357 spin_unlock_irqrestore(&rf->qh_list_lock, flags); 4358 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx, 4359 IRDMA_OP_MC_DESTROY); 4360 if (ret) { 4361 ibdev_dbg(&iwdev->ibdev, 4362 "VERBS: failed MC_DESTROY MCG\n"); 4363 spin_lock_irqsave(&rf->qh_list_lock, flags); 4364 mcast_list_add(rf, mc_qht_elem); 4365 spin_unlock_irqrestore(&rf->qh_list_lock, flags); 4366 return -EAGAIN; 4367 } 4368 4369 dma_free_coherent(rf->hw.device, 4370 mc_qht_elem->mc_grp_ctx.dma_mem_mc.size, 4371 mc_qht_elem->mc_grp_ctx.dma_mem_mc.va, 4372 mc_qht_elem->mc_grp_ctx.dma_mem_mc.pa); 4373 mc_qht_elem->mc_grp_ctx.dma_mem_mc.va = NULL; 4374 irdma_free_rsrc(rf, rf->allocated_mcgs, 4375 mc_qht_elem->mc_grp_ctx.mg_id); 4376 kfree(mc_qht_elem); 4377 } else { 4378 spin_unlock_irqrestore(&rf->qh_list_lock, flags); 4379 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx, 4380 IRDMA_OP_MC_MODIFY); 4381 if (ret) { 4382 ibdev_dbg(&iwdev->ibdev, 4383 "VERBS: failed Modify MCG\n"); 4384 return ret; 4385 } 4386 } 4387 4388 return 0; 4389 } 4390 4391 static int irdma_create_hw_ah(struct irdma_device *iwdev, struct irdma_ah *ah, bool sleep) 4392 { 4393 struct irdma_pci_f *rf = iwdev->rf; 4394 int err; 4395 4396 err = irdma_alloc_rsrc(rf, rf->allocated_ahs, rf->max_ah, &ah->sc_ah.ah_info.ah_idx, 4397 &rf->next_ah); 4398 if (err) 4399 return err; 4400 4401 err = irdma_ah_cqp_op(rf, &ah->sc_ah, IRDMA_OP_AH_CREATE, sleep, 4402 irdma_gsi_ud_qp_ah_cb, &ah->sc_ah); 4403 4404 if (err) { 4405 ibdev_dbg(&iwdev->ibdev, "VERBS: CQP-OP Create AH fail"); 4406 goto err_ah_create; 4407 } 4408 4409 if (!sleep) { 4410 int cnt = CQP_COMPL_WAIT_TIME_MS * CQP_TIMEOUT_THRESHOLD; 4411 4412 do { 4413 irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq); 4414 mdelay(1); 4415 } while (!ah->sc_ah.ah_info.ah_valid && --cnt); 4416 4417 if (!cnt) { 4418 ibdev_dbg(&iwdev->ibdev, "VERBS: CQP create AH timed out"); 4419 err = -ETIMEDOUT; 4420 goto err_ah_create; 4421 } 4422 } 4423 return 0; 4424 4425 err_ah_create: 4426 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, ah->sc_ah.ah_info.ah_idx); 4427 4428 return err; 4429 } 4430 4431 static int irdma_setup_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *attr) 4432 { 4433 struct irdma_pd *pd = to_iwpd(ibah->pd); 4434 struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah); 4435 struct rdma_ah_attr *ah_attr = attr->ah_attr; 4436 const struct ib_gid_attr *sgid_attr; 4437 struct irdma_device *iwdev = to_iwdev(ibah->pd->device); 4438 struct irdma_pci_f *rf = iwdev->rf; 4439 struct irdma_sc_ah *sc_ah; 4440 struct irdma_ah_info *ah_info; 4441 union irdma_sockaddr sgid_addr, dgid_addr; 4442 int err; 4443 u8 dmac[ETH_ALEN]; 4444 4445 ah->pd = pd; 4446 sc_ah = &ah->sc_ah; 4447 sc_ah->ah_info.vsi = &iwdev->vsi; 4448 irdma_sc_init_ah(&rf->sc_dev, sc_ah); 4449 ah->sgid_index = ah_attr->grh.sgid_index; 4450 sgid_attr = ah_attr->grh.sgid_attr; 4451 memcpy(&ah->dgid, &ah_attr->grh.dgid, sizeof(ah->dgid)); 4452 rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid_attr->gid); 4453 rdma_gid2ip((struct sockaddr *)&dgid_addr, &ah_attr->grh.dgid); 4454 ah->av.attrs = *ah_attr; 4455 ah->av.net_type = rdma_gid_attr_network_type(sgid_attr); 4456 ah_info = &sc_ah->ah_info; 4457 ah_info->pd_idx = pd->sc_pd.pd_id; 4458 if (ah_attr->ah_flags & IB_AH_GRH) { 4459 ah_info->flow_label = ah_attr->grh.flow_label; 4460 ah_info->hop_ttl = ah_attr->grh.hop_limit; 4461 ah_info->tc_tos = ah_attr->grh.traffic_class; 4462 } 4463 4464 ether_addr_copy(dmac, ah_attr->roce.dmac); 4465 if (ah->av.net_type == RDMA_NETWORK_IPV4) { 4466 ah_info->ipv4_valid = true; 4467 ah_info->dest_ip_addr[0] = 4468 ntohl(dgid_addr.saddr_in.sin_addr.s_addr); 4469 ah_info->src_ip_addr[0] = 4470 ntohl(sgid_addr.saddr_in.sin_addr.s_addr); 4471 ah_info->do_lpbk = irdma_ipv4_is_lpb(ah_info->src_ip_addr[0], 4472 ah_info->dest_ip_addr[0]); 4473 if (ipv4_is_multicast(dgid_addr.saddr_in.sin_addr.s_addr)) { 4474 ah_info->do_lpbk = true; 4475 irdma_mcast_mac(ah_info->dest_ip_addr, dmac, true); 4476 } 4477 } else { 4478 irdma_copy_ip_ntohl(ah_info->dest_ip_addr, 4479 dgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32); 4480 irdma_copy_ip_ntohl(ah_info->src_ip_addr, 4481 sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32); 4482 ah_info->do_lpbk = irdma_ipv6_is_lpb(ah_info->src_ip_addr, 4483 ah_info->dest_ip_addr); 4484 if (rdma_is_multicast_addr(&dgid_addr.saddr_in6.sin6_addr)) { 4485 ah_info->do_lpbk = true; 4486 irdma_mcast_mac(ah_info->dest_ip_addr, dmac, false); 4487 } 4488 } 4489 4490 err = rdma_read_gid_l2_fields(sgid_attr, &ah_info->vlan_tag, 4491 ah_info->mac_addr); 4492 if (err) 4493 return err; 4494 4495 ah_info->dst_arpindex = irdma_add_arp(iwdev->rf, ah_info->dest_ip_addr, 4496 ah_info->ipv4_valid, dmac); 4497 4498 if (ah_info->dst_arpindex == -1) 4499 return -EINVAL; 4500 4501 if (ah_info->vlan_tag >= VLAN_N_VID && iwdev->dcb_vlan_mode) 4502 ah_info->vlan_tag = 0; 4503 4504 if (ah_info->vlan_tag < VLAN_N_VID) { 4505 u8 prio = rt_tos2priority(ah_info->tc_tos); 4506 4507 prio = irdma_roce_get_vlan_prio(sgid_attr, prio); 4508 4509 ah_info->vlan_tag |= (u16)prio << VLAN_PRIO_SHIFT; 4510 ah_info->insert_vlan_tag = true; 4511 } 4512 4513 return 0; 4514 } 4515 4516 /** 4517 * irdma_ah_exists - Check for existing identical AH 4518 * @iwdev: irdma device 4519 * @new_ah: AH to check for 4520 * 4521 * returns true if AH is found, false if not found. 4522 */ 4523 static bool irdma_ah_exists(struct irdma_device *iwdev, 4524 struct irdma_ah *new_ah) 4525 { 4526 struct irdma_ah *ah; 4527 u32 key = new_ah->sc_ah.ah_info.dest_ip_addr[0] ^ 4528 new_ah->sc_ah.ah_info.dest_ip_addr[1] ^ 4529 new_ah->sc_ah.ah_info.dest_ip_addr[2] ^ 4530 new_ah->sc_ah.ah_info.dest_ip_addr[3]; 4531 4532 hash_for_each_possible(iwdev->ah_hash_tbl, ah, list, key) { 4533 /* Set ah_valid and ah_id the same so memcmp can work */ 4534 new_ah->sc_ah.ah_info.ah_idx = ah->sc_ah.ah_info.ah_idx; 4535 new_ah->sc_ah.ah_info.ah_valid = ah->sc_ah.ah_info.ah_valid; 4536 if (!memcmp(&ah->sc_ah.ah_info, &new_ah->sc_ah.ah_info, 4537 sizeof(ah->sc_ah.ah_info))) { 4538 refcount_inc(&ah->refcnt); 4539 new_ah->parent_ah = ah; 4540 return true; 4541 } 4542 } 4543 4544 return false; 4545 } 4546 4547 /** 4548 * irdma_destroy_ah - Destroy address handle 4549 * @ibah: pointer to address handle 4550 * @ah_flags: flags for sleepable 4551 */ 4552 static int irdma_destroy_ah(struct ib_ah *ibah, u32 ah_flags) 4553 { 4554 struct irdma_device *iwdev = to_iwdev(ibah->device); 4555 struct irdma_ah *ah = to_iwah(ibah); 4556 4557 if ((ah_flags & RDMA_DESTROY_AH_SLEEPABLE) && ah->parent_ah) { 4558 mutex_lock(&iwdev->ah_tbl_lock); 4559 if (!refcount_dec_and_test(&ah->parent_ah->refcnt)) { 4560 mutex_unlock(&iwdev->ah_tbl_lock); 4561 return 0; 4562 } 4563 hash_del(&ah->parent_ah->list); 4564 kfree(ah->parent_ah); 4565 mutex_unlock(&iwdev->ah_tbl_lock); 4566 } 4567 4568 irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah, IRDMA_OP_AH_DESTROY, 4569 false, NULL, ah); 4570 4571 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, 4572 ah->sc_ah.ah_info.ah_idx); 4573 4574 return 0; 4575 } 4576 4577 /** 4578 * irdma_create_user_ah - create user address handle 4579 * @ibah: address handle 4580 * @attr: address handle attributes 4581 * @udata: User data 4582 * 4583 * returns 0 on success, error otherwise 4584 */ 4585 static int irdma_create_user_ah(struct ib_ah *ibah, 4586 struct rdma_ah_init_attr *attr, 4587 struct ib_udata *udata) 4588 { 4589 #define IRDMA_CREATE_AH_MIN_RESP_LEN offsetofend(struct irdma_create_ah_resp, rsvd) 4590 struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah); 4591 struct irdma_device *iwdev = to_iwdev(ibah->pd->device); 4592 struct irdma_create_ah_resp uresp; 4593 struct irdma_ah *parent_ah; 4594 int err; 4595 4596 if (udata && udata->outlen < IRDMA_CREATE_AH_MIN_RESP_LEN) 4597 return -EINVAL; 4598 4599 err = irdma_setup_ah(ibah, attr); 4600 if (err) 4601 return err; 4602 mutex_lock(&iwdev->ah_tbl_lock); 4603 if (!irdma_ah_exists(iwdev, ah)) { 4604 err = irdma_create_hw_ah(iwdev, ah, true); 4605 if (err) { 4606 mutex_unlock(&iwdev->ah_tbl_lock); 4607 return err; 4608 } 4609 /* Add new AH to list */ 4610 parent_ah = kmemdup(ah, sizeof(*ah), GFP_KERNEL); 4611 if (parent_ah) { 4612 u32 key = parent_ah->sc_ah.ah_info.dest_ip_addr[0] ^ 4613 parent_ah->sc_ah.ah_info.dest_ip_addr[1] ^ 4614 parent_ah->sc_ah.ah_info.dest_ip_addr[2] ^ 4615 parent_ah->sc_ah.ah_info.dest_ip_addr[3]; 4616 4617 ah->parent_ah = parent_ah; 4618 hash_add(iwdev->ah_hash_tbl, &parent_ah->list, key); 4619 refcount_set(&parent_ah->refcnt, 1); 4620 } 4621 } 4622 mutex_unlock(&iwdev->ah_tbl_lock); 4623 4624 uresp.ah_id = ah->sc_ah.ah_info.ah_idx; 4625 err = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), udata->outlen)); 4626 if (err) 4627 irdma_destroy_ah(ibah, attr->flags); 4628 4629 return err; 4630 } 4631 4632 /** 4633 * irdma_create_ah - create address handle 4634 * @ibah: address handle 4635 * @attr: address handle attributes 4636 * @udata: NULL 4637 * 4638 * returns 0 on success, error otherwise 4639 */ 4640 static int irdma_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *attr, 4641 struct ib_udata *udata) 4642 { 4643 struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah); 4644 struct irdma_device *iwdev = to_iwdev(ibah->pd->device); 4645 int err; 4646 4647 err = irdma_setup_ah(ibah, attr); 4648 if (err) 4649 return err; 4650 err = irdma_create_hw_ah(iwdev, ah, attr->flags & RDMA_CREATE_AH_SLEEPABLE); 4651 4652 return err; 4653 } 4654 4655 /** 4656 * irdma_query_ah - Query address handle 4657 * @ibah: pointer to address handle 4658 * @ah_attr: address handle attributes 4659 */ 4660 static int irdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr) 4661 { 4662 struct irdma_ah *ah = to_iwah(ibah); 4663 4664 memset(ah_attr, 0, sizeof(*ah_attr)); 4665 if (ah->av.attrs.ah_flags & IB_AH_GRH) { 4666 ah_attr->ah_flags = IB_AH_GRH; 4667 ah_attr->grh.flow_label = ah->sc_ah.ah_info.flow_label; 4668 ah_attr->grh.traffic_class = ah->sc_ah.ah_info.tc_tos; 4669 ah_attr->grh.hop_limit = ah->sc_ah.ah_info.hop_ttl; 4670 ah_attr->grh.sgid_index = ah->sgid_index; 4671 memcpy(&ah_attr->grh.dgid, &ah->dgid, 4672 sizeof(ah_attr->grh.dgid)); 4673 } 4674 4675 return 0; 4676 } 4677 4678 static enum rdma_link_layer irdma_get_link_layer(struct ib_device *ibdev, 4679 u32 port_num) 4680 { 4681 return IB_LINK_LAYER_ETHERNET; 4682 } 4683 4684 static const struct ib_device_ops irdma_roce_dev_ops = { 4685 .attach_mcast = irdma_attach_mcast, 4686 .create_ah = irdma_create_ah, 4687 .create_user_ah = irdma_create_user_ah, 4688 .destroy_ah = irdma_destroy_ah, 4689 .detach_mcast = irdma_detach_mcast, 4690 .get_link_layer = irdma_get_link_layer, 4691 .get_port_immutable = irdma_roce_port_immutable, 4692 .modify_qp = irdma_modify_qp_roce, 4693 .query_ah = irdma_query_ah, 4694 .query_pkey = irdma_query_pkey, 4695 }; 4696 4697 static const struct ib_device_ops irdma_iw_dev_ops = { 4698 .get_port_immutable = irdma_iw_port_immutable, 4699 .iw_accept = irdma_accept, 4700 .iw_add_ref = irdma_qp_add_ref, 4701 .iw_connect = irdma_connect, 4702 .iw_create_listen = irdma_create_listen, 4703 .iw_destroy_listen = irdma_destroy_listen, 4704 .iw_get_qp = irdma_get_qp, 4705 .iw_reject = irdma_reject, 4706 .iw_rem_ref = irdma_qp_rem_ref, 4707 .modify_qp = irdma_modify_qp, 4708 .query_gid = irdma_query_gid, 4709 }; 4710 4711 static const struct ib_device_ops irdma_dev_ops = { 4712 .owner = THIS_MODULE, 4713 .driver_id = RDMA_DRIVER_IRDMA, 4714 .uverbs_abi_ver = IRDMA_ABI_VER, 4715 4716 .alloc_hw_port_stats = irdma_alloc_hw_port_stats, 4717 .alloc_mr = irdma_alloc_mr, 4718 .alloc_mw = irdma_alloc_mw, 4719 .alloc_pd = irdma_alloc_pd, 4720 .alloc_ucontext = irdma_alloc_ucontext, 4721 .create_cq = irdma_create_cq, 4722 .create_qp = irdma_create_qp, 4723 .dealloc_driver = irdma_ib_dealloc_device, 4724 .dealloc_mw = irdma_dealloc_mw, 4725 .dealloc_pd = irdma_dealloc_pd, 4726 .dealloc_ucontext = irdma_dealloc_ucontext, 4727 .dereg_mr = irdma_dereg_mr, 4728 .destroy_cq = irdma_destroy_cq, 4729 .destroy_qp = irdma_destroy_qp, 4730 .disassociate_ucontext = irdma_disassociate_ucontext, 4731 .get_dev_fw_str = irdma_get_dev_fw_str, 4732 .get_dma_mr = irdma_get_dma_mr, 4733 .get_hw_stats = irdma_get_hw_stats, 4734 .map_mr_sg = irdma_map_mr_sg, 4735 .mmap = irdma_mmap, 4736 .mmap_free = irdma_mmap_free, 4737 .poll_cq = irdma_poll_cq, 4738 .post_recv = irdma_post_recv, 4739 .post_send = irdma_post_send, 4740 .query_device = irdma_query_device, 4741 .query_port = irdma_query_port, 4742 .query_qp = irdma_query_qp, 4743 .reg_user_mr = irdma_reg_user_mr, 4744 .reg_user_mr_dmabuf = irdma_reg_user_mr_dmabuf, 4745 .rereg_user_mr = irdma_rereg_user_mr, 4746 .req_notify_cq = irdma_req_notify_cq, 4747 .resize_cq = irdma_resize_cq, 4748 INIT_RDMA_OBJ_SIZE(ib_pd, irdma_pd, ibpd), 4749 INIT_RDMA_OBJ_SIZE(ib_ucontext, irdma_ucontext, ibucontext), 4750 INIT_RDMA_OBJ_SIZE(ib_ah, irdma_ah, ibah), 4751 INIT_RDMA_OBJ_SIZE(ib_cq, irdma_cq, ibcq), 4752 INIT_RDMA_OBJ_SIZE(ib_mw, irdma_mr, ibmw), 4753 INIT_RDMA_OBJ_SIZE(ib_qp, irdma_qp, ibqp), 4754 }; 4755 4756 /** 4757 * irdma_init_roce_device - initialization of roce rdma device 4758 * @iwdev: irdma device 4759 */ 4760 static void irdma_init_roce_device(struct irdma_device *iwdev) 4761 { 4762 iwdev->ibdev.node_type = RDMA_NODE_IB_CA; 4763 addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid, 4764 iwdev->netdev->dev_addr); 4765 ib_set_device_ops(&iwdev->ibdev, &irdma_roce_dev_ops); 4766 } 4767 4768 /** 4769 * irdma_init_iw_device - initialization of iwarp rdma device 4770 * @iwdev: irdma device 4771 */ 4772 static void irdma_init_iw_device(struct irdma_device *iwdev) 4773 { 4774 struct net_device *netdev = iwdev->netdev; 4775 4776 iwdev->ibdev.node_type = RDMA_NODE_RNIC; 4777 addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid, 4778 netdev->dev_addr); 4779 memcpy(iwdev->ibdev.iw_ifname, netdev->name, 4780 sizeof(iwdev->ibdev.iw_ifname)); 4781 ib_set_device_ops(&iwdev->ibdev, &irdma_iw_dev_ops); 4782 } 4783 4784 /** 4785 * irdma_init_rdma_device - initialization of rdma device 4786 * @iwdev: irdma device 4787 */ 4788 static void irdma_init_rdma_device(struct irdma_device *iwdev) 4789 { 4790 struct pci_dev *pcidev = iwdev->rf->pcidev; 4791 4792 if (iwdev->roce_mode) 4793 irdma_init_roce_device(iwdev); 4794 else 4795 irdma_init_iw_device(iwdev); 4796 4797 iwdev->ibdev.phys_port_cnt = 1; 4798 iwdev->ibdev.num_comp_vectors = iwdev->rf->ceqs_count; 4799 iwdev->ibdev.dev.parent = &pcidev->dev; 4800 ib_set_device_ops(&iwdev->ibdev, &irdma_dev_ops); 4801 } 4802 4803 /** 4804 * irdma_port_ibevent - indicate port event 4805 * @iwdev: irdma device 4806 */ 4807 void irdma_port_ibevent(struct irdma_device *iwdev) 4808 { 4809 struct ib_event event; 4810 4811 event.device = &iwdev->ibdev; 4812 event.element.port_num = 1; 4813 event.event = 4814 iwdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; 4815 ib_dispatch_event(&event); 4816 } 4817 4818 /** 4819 * irdma_ib_unregister_device - unregister rdma device from IB 4820 * core 4821 * @iwdev: irdma device 4822 */ 4823 void irdma_ib_unregister_device(struct irdma_device *iwdev) 4824 { 4825 iwdev->iw_status = 0; 4826 irdma_port_ibevent(iwdev); 4827 ib_unregister_device(&iwdev->ibdev); 4828 } 4829 4830 /** 4831 * irdma_ib_register_device - register irdma device to IB core 4832 * @iwdev: irdma device 4833 */ 4834 int irdma_ib_register_device(struct irdma_device *iwdev) 4835 { 4836 int ret; 4837 4838 irdma_init_rdma_device(iwdev); 4839 4840 ret = ib_device_set_netdev(&iwdev->ibdev, iwdev->netdev, 1); 4841 if (ret) 4842 goto error; 4843 dma_set_max_seg_size(iwdev->rf->hw.device, UINT_MAX); 4844 ret = ib_register_device(&iwdev->ibdev, "irdma%d", iwdev->rf->hw.device); 4845 if (ret) 4846 goto error; 4847 4848 iwdev->iw_status = 1; 4849 irdma_port_ibevent(iwdev); 4850 4851 return 0; 4852 4853 error: 4854 if (ret) 4855 ibdev_dbg(&iwdev->ibdev, "VERBS: Register RDMA device fail\n"); 4856 4857 return ret; 4858 } 4859 4860 /** 4861 * irdma_ib_dealloc_device 4862 * @ibdev: ib device 4863 * 4864 * callback from ibdev dealloc_driver to deallocate resources 4865 * unber irdma device 4866 */ 4867 void irdma_ib_dealloc_device(struct ib_device *ibdev) 4868 { 4869 struct irdma_device *iwdev = to_iwdev(ibdev); 4870 4871 irdma_rt_deinit_hw(iwdev); 4872 irdma_ctrl_deinit_hw(iwdev->rf); 4873 kfree(iwdev->rf); 4874 } 4875