Lines Matching +full:pd +full:- +full:revision

1 /*-
2 * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
4 * Copyright (c) 2015 - 2023 Intel Corporation
16 * - Redistributions of source code must retain the above
20 * - Redistributions in binary form must reproduce the above
38 * irdma_query_device - get device attributes
49 struct irdma_pci_f *rf = iwdev->rf; in irdma_query_device()
50 struct pci_dev *pcidev = iwdev->rf->pcidev; in irdma_query_device()
51 struct irdma_hw_attrs *hw_attrs = &rf->sc_dev.hw_attrs; in irdma_query_device()
53 if (udata->inlen || udata->outlen) in irdma_query_device()
54 return -EINVAL; in irdma_query_device()
57 addrconf_addr_eui48((u8 *)&props->sys_image_guid, in irdma_query_device()
58 if_getlladdr(iwdev->netdev)); in irdma_query_device()
59 props->fw_ver = (u64)irdma_fw_major_ver(&rf->sc_dev) << 32 | in irdma_query_device()
60 irdma_fw_minor_ver(&rf->sc_dev); in irdma_query_device()
61 props->device_cap_flags = IB_DEVICE_MEM_WINDOW | in irdma_query_device()
63 props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY; in irdma_query_device()
64 props->vendor_id = pcidev->vendor; in irdma_query_device()
65 props->vendor_part_id = pcidev->device; in irdma_query_device()
66 props->hw_ver = pcidev->revision; in irdma_query_device()
67 props->page_size_cap = hw_attrs->page_size_cap; in irdma_query_device()
68 props->max_mr_size = hw_attrs->max_mr_size; in irdma_query_device()
69 props->max_qp = rf->max_qp - rf->used_qps; in irdma_query_device()
70 props->max_qp_wr = hw_attrs->max_qp_wr; in irdma_query_device()
72 props->max_cq = rf->max_cq - rf->used_cqs; in irdma_query_device()
73 props->max_cqe = rf->max_cqe - 1; in irdma_query_device()
74 props->max_mr = rf->max_mr - rf->used_mrs; in irdma_query_device()
75 props->max_pd = rf->max_pd - rf->used_pds; in irdma_query_device()
76 props->max_sge_rd = hw_attrs->uk_attrs.max_hw_read_sges; in irdma_query_device()
77 props->max_qp_rd_atom = hw_attrs->max_hw_ird; in irdma_query_device()
78 props->max_qp_init_rd_atom = hw_attrs->max_hw_ord; in irdma_query_device()
80 props->device_cap_flags |= IB_DEVICE_RC_RNR_NAK_GEN; in irdma_query_device()
81 props->max_pkeys = IRDMA_PKEY_TBL_SZ; in irdma_query_device()
82 props->max_ah = rf->max_ah; in irdma_query_device()
83 if (hw_attrs->uk_attrs.hw_rev == IRDMA_GEN_2) { in irdma_query_device()
84 props->max_mcast_grp = rf->max_mcg; in irdma_query_device()
85 props->max_mcast_qp_attach = IRDMA_MAX_MGS_PER_CTX; in irdma_query_device()
86 props->max_total_mcast_qp_attach = rf->max_qp * IRDMA_MAX_MGS_PER_CTX; in irdma_query_device()
89 props->max_fast_reg_page_list_len = IRDMA_MAX_PAGES_PER_FMR; in irdma_query_device()
90 if (hw_attrs->uk_attrs.hw_rev >= IRDMA_GEN_2) in irdma_query_device()
91 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B; in irdma_query_device()
102 if (vma->vm_pgoff || vma->vm_end - vma->vm_start != PAGE_SIZE) in irdma_mmap_legacy()
103 return -EINVAL; in irdma_mmap_legacy()
105 vma->vm_private_data = ucontext; in irdma_mmap_legacy()
106 pfn = ((uintptr_t)ucontext->iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET] + in irdma_mmap_legacy()
107 pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT; in irdma_mmap_legacy()
109 return rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, PAGE_SIZE, in irdma_mmap_legacy()
110 pgprot_noncached(vma->vm_page_prot), NULL); in irdma_mmap_legacy()
131 entry->bar_offset = bar_offset; in irdma_user_mmap_entry_insert()
132 entry->mmap_flag = mmap_flag; in irdma_user_mmap_entry_insert()
134 ret = rdma_user_mmap_entry_insert(&ucontext->ibucontext, in irdma_user_mmap_entry_insert()
135 &entry->rdma_entry, PAGE_SIZE); in irdma_user_mmap_entry_insert()
140 *mmap_offset = rdma_user_mmap_get_offset(&entry->rdma_entry); in irdma_user_mmap_entry_insert()
142 return &entry->rdma_entry; in irdma_user_mmap_entry_insert()
146 * irdma_mmap - user memory map
161 /* Legacy support for libi40iw with hard-coded mmap key */ in irdma_mmap()
162 if (ucontext->legacy_mode) in irdma_mmap()
165 rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma); in irdma_mmap()
167 irdma_debug(&ucontext->iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, in irdma_mmap()
169 vma->vm_pgoff); in irdma_mmap()
170 return -EINVAL; in irdma_mmap()
174 irdma_debug(&ucontext->iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, in irdma_mmap()
175 "bar_offset [0x%lx] mmap_flag [%d]\n", entry->bar_offset, in irdma_mmap()
176 entry->mmap_flag); in irdma_mmap()
178 pfn = (entry->bar_offset + in irdma_mmap()
179 pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT; in irdma_mmap()
181 switch (entry->mmap_flag) { in irdma_mmap()
184 pgprot_noncached(vma->vm_page_prot), in irdma_mmap()
189 pgprot_writecombine(vma->vm_page_prot), in irdma_mmap()
193 ret = -EINVAL; in irdma_mmap()
197 irdma_debug(&ucontext->iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, in irdma_mmap()
199 entry->bar_offset, entry->mmap_flag, ret); in irdma_mmap()
206 * irdma_alloc_push_page - allocate a push page for qp
214 struct irdma_device *iwdev = iwqp->iwdev; in irdma_alloc_push_page()
215 struct irdma_sc_qp *qp = &iwqp->sc_qp; in irdma_alloc_push_page()
218 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); in irdma_alloc_push_page()
222 cqp_info = &cqp_request->info; in irdma_alloc_push_page()
223 cqp_info->cqp_cmd = IRDMA_OP_MANAGE_PUSH_PAGE; in irdma_alloc_push_page()
224 cqp_info->post_sq = 1; in irdma_alloc_push_page()
225 cqp_info->in.u.manage_push_page.info.push_idx = 0; in irdma_alloc_push_page()
226 cqp_info->in.u.manage_push_page.info.qs_handle = in irdma_alloc_push_page()
227 qp->vsi->qos[qp->user_pri].qs_handle; in irdma_alloc_push_page()
228 cqp_info->in.u.manage_push_page.info.free_page = 0; in irdma_alloc_push_page()
229 cqp_info->in.u.manage_push_page.info.push_page_type = 0; in irdma_alloc_push_page()
230 cqp_info->in.u.manage_push_page.cqp = &iwdev->rf->cqp.sc_cqp; in irdma_alloc_push_page()
231 cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request; in irdma_alloc_push_page()
233 status = irdma_handle_cqp_op(iwdev->rf, cqp_request); in irdma_alloc_push_page()
234 if (!status && cqp_request->compl_info.op_ret_val < in irdma_alloc_push_page()
235 iwdev->rf->sc_dev.hw_attrs.max_hw_device_pages) { in irdma_alloc_push_page()
236 qp->push_idx = cqp_request->compl_info.op_ret_val; in irdma_alloc_push_page()
237 qp->push_offset = 0; in irdma_alloc_push_page()
240 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); in irdma_alloc_push_page()
244 * irdma_get_pbl - Retrieve pbl from a list given a virtual
256 if (iwpbl->user_base == va) { in irdma_get_pbl()
257 list_del(&iwpbl->list); in irdma_get_pbl()
258 iwpbl->on_list = false; in irdma_get_pbl()
267 * irdma_clean_cqes - clean cq entries for qp
274 struct irdma_cq_uk *ukcq = &iwcq->sc_cq.cq_uk; in irdma_clean_cqes()
277 spin_lock_irqsave(&iwcq->lock, flags); in irdma_clean_cqes()
278 irdma_uk_clean_cq(&iwqp->sc_qp.qp_uk, ukcq); in irdma_clean_cqes()
279 spin_unlock_irqrestore(&iwcq->lock, flags); in irdma_clean_cqes()
283 u64 bar_off = (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET]; in irdma_compute_push_wqe_offset()
285 if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_2) { in irdma_compute_push_wqe_offset()
301 if (iwqp->push_db_mmap_entry) { in irdma_remove_push_mmap_entries()
302 rdma_user_mmap_entry_remove(iwqp->push_db_mmap_entry); in irdma_remove_push_mmap_entries()
303 iwqp->push_db_mmap_entry = NULL; in irdma_remove_push_mmap_entries()
305 if (iwqp->push_wqe_mmap_entry) { in irdma_remove_push_mmap_entries()
306 rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry); in irdma_remove_push_mmap_entries()
307 iwqp->push_wqe_mmap_entry = NULL; in irdma_remove_push_mmap_entries()
317 struct irdma_device *iwdev = ucontext->iwdev; in irdma_setup_push_mmap_entries()
320 WARN_ON_ONCE(iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev < IRDMA_GEN_2); in irdma_setup_push_mmap_entries()
322 bar_off = irdma_compute_push_wqe_offset(iwdev, iwqp->sc_qp.push_idx); in irdma_setup_push_mmap_entries()
324 iwqp->push_wqe_mmap_entry = irdma_user_mmap_entry_insert(ucontext, in irdma_setup_push_mmap_entries()
327 if (!iwqp->push_wqe_mmap_entry) in irdma_setup_push_mmap_entries()
328 return -ENOMEM; in irdma_setup_push_mmap_entries()
332 iwqp->push_db_mmap_entry = irdma_user_mmap_entry_insert(ucontext, in irdma_setup_push_mmap_entries()
335 if (!iwqp->push_db_mmap_entry) { in irdma_setup_push_mmap_entries()
336 rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry); in irdma_setup_push_mmap_entries()
337 return -ENOMEM; in irdma_setup_push_mmap_entries()
344 * irdma_setup_virt_qp - setup for allocation of virtual qp
354 struct irdma_pbl *iwpbl = iwqp->iwpbl; in irdma_setup_virt_qp()
355 struct irdma_qp_mr *qpmr = &iwpbl->qp_mr; in irdma_setup_virt_qp()
357 iwqp->page = qpmr->sq_page; in irdma_setup_virt_qp()
358 init_info->shadow_area_pa = qpmr->shadow; in irdma_setup_virt_qp()
359 if (iwpbl->pbl_allocated) { in irdma_setup_virt_qp()
360 init_info->virtual_map = true; in irdma_setup_virt_qp()
361 init_info->sq_pa = qpmr->sq_pbl.idx; in irdma_setup_virt_qp()
362 init_info->rq_pa = qpmr->rq_pbl.idx; in irdma_setup_virt_qp()
364 init_info->sq_pa = qpmr->sq_pbl.addr; in irdma_setup_virt_qp()
365 init_info->rq_pa = qpmr->rq_pbl.addr; in irdma_setup_virt_qp()
370 * irdma_setup_umode_qp - setup sq and rq size in user mode qp
385 struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info; in irdma_setup_umode_qp()
391 min(sizeof(req), udata->inlen)); in irdma_setup_umode_qp()
393 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, in irdma_setup_umode_qp()
398 iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx; in irdma_setup_umode_qp()
399 iwqp->user_mode = 1; in irdma_setup_umode_qp()
401 info->qp_uk_init_info.legacy_mode = ucontext->legacy_mode; in irdma_setup_umode_qp()
402 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags); in irdma_setup_umode_qp()
403 iwqp->iwpbl = irdma_get_pbl((unsigned long)req.user_wqe_bufs, in irdma_setup_umode_qp()
404 &ucontext->qp_reg_mem_list); in irdma_setup_umode_qp()
405 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); in irdma_setup_umode_qp()
407 if (!iwqp->iwpbl) { in irdma_setup_umode_qp()
408 ret = -ENODATA; in irdma_setup_umode_qp()
409 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, in irdma_setup_umode_qp()
415 if (!ucontext->use_raw_attrs) { in irdma_setup_umode_qp()
420 * iwqp->max_send_wr/max_recv_wr in the kernel. in irdma_setup_umode_qp()
422 iwqp->max_send_wr = init_attr->cap.max_send_wr; in irdma_setup_umode_qp()
423 iwqp->max_recv_wr = init_attr->cap.max_recv_wr; in irdma_setup_umode_qp()
424 ukinfo->sq_size = init_attr->cap.max_send_wr; in irdma_setup_umode_qp()
425 ukinfo->rq_size = init_attr->cap.max_recv_wr; in irdma_setup_umode_qp()
426 irdma_uk_calc_shift_wq(ukinfo, &ukinfo->sq_shift, &ukinfo->rq_shift); in irdma_setup_umode_qp()
428 ret = irdma_uk_calc_depth_shift_sq(ukinfo, &ukinfo->sq_depth, in irdma_setup_umode_qp()
429 &ukinfo->sq_shift); in irdma_setup_umode_qp()
433 ret = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth, in irdma_setup_umode_qp()
434 &ukinfo->rq_shift); in irdma_setup_umode_qp()
438 iwqp->max_send_wr = (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift; in irdma_setup_umode_qp()
439 iwqp->max_recv_wr = (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift; in irdma_setup_umode_qp()
440 ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift; in irdma_setup_umode_qp()
441 ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift; in irdma_setup_umode_qp()
444 iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_RTS_AE) in irdma_setup_umode_qp()
445 ukinfo->start_wqe_idx = 4; in irdma_setup_umode_qp()
452 * irdma_setup_kmode_qp - setup initialization for kernel mode qp
464 struct irdma_dma_mem *mem = &iwqp->kqp.dma_mem; in irdma_setup_kmode_qp()
467 struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info; in irdma_setup_kmode_qp()
469 status = irdma_uk_calc_depth_shift_sq(ukinfo, &ukinfo->sq_depth, in irdma_setup_kmode_qp()
470 &ukinfo->sq_shift); in irdma_setup_kmode_qp()
474 status = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth, in irdma_setup_kmode_qp()
475 &ukinfo->rq_shift); in irdma_setup_kmode_qp()
479 iwqp->kqp.sq_wrid_mem = in irdma_setup_kmode_qp()
480 kcalloc(ukinfo->sq_depth, sizeof(*iwqp->kqp.sq_wrid_mem), GFP_KERNEL); in irdma_setup_kmode_qp()
481 if (!iwqp->kqp.sq_wrid_mem) in irdma_setup_kmode_qp()
482 return -ENOMEM; in irdma_setup_kmode_qp()
484 iwqp->kqp.rq_wrid_mem = in irdma_setup_kmode_qp()
485 kcalloc(ukinfo->rq_depth, sizeof(*iwqp->kqp.rq_wrid_mem), GFP_KERNEL); in irdma_setup_kmode_qp()
486 if (!iwqp->kqp.rq_wrid_mem) { in irdma_setup_kmode_qp()
487 kfree(iwqp->kqp.sq_wrid_mem); in irdma_setup_kmode_qp()
488 iwqp->kqp.sq_wrid_mem = NULL; in irdma_setup_kmode_qp()
489 return -ENOMEM; in irdma_setup_kmode_qp()
492 iwqp->kqp.sig_trk_mem = kcalloc(ukinfo->sq_depth, sizeof(u32), GFP_KERNEL); in irdma_setup_kmode_qp()
493 memset(iwqp->kqp.sig_trk_mem, 0, ukinfo->sq_depth * sizeof(u32)); in irdma_setup_kmode_qp()
494 if (!iwqp->kqp.sig_trk_mem) { in irdma_setup_kmode_qp()
495 kfree(iwqp->kqp.sq_wrid_mem); in irdma_setup_kmode_qp()
496 iwqp->kqp.sq_wrid_mem = NULL; in irdma_setup_kmode_qp()
497 kfree(iwqp->kqp.rq_wrid_mem); in irdma_setup_kmode_qp()
498 iwqp->kqp.rq_wrid_mem = NULL; in irdma_setup_kmode_qp()
499 return -ENOMEM; in irdma_setup_kmode_qp()
501 ukinfo->sq_sigwrtrk_array = (void *)iwqp->kqp.sig_trk_mem; in irdma_setup_kmode_qp()
502 ukinfo->sq_wrtrk_array = iwqp->kqp.sq_wrid_mem; in irdma_setup_kmode_qp()
503 ukinfo->rq_wrid_array = iwqp->kqp.rq_wrid_mem; in irdma_setup_kmode_qp()
505 size = (ukinfo->sq_depth + ukinfo->rq_depth) * IRDMA_QP_WQE_MIN_SIZE; in irdma_setup_kmode_qp()
508 mem->size = size; in irdma_setup_kmode_qp()
509 mem->va = irdma_allocate_dma_mem(&iwdev->rf->hw, mem, mem->size, in irdma_setup_kmode_qp()
511 if (!mem->va) { in irdma_setup_kmode_qp()
512 kfree(iwqp->kqp.sq_wrid_mem); in irdma_setup_kmode_qp()
513 iwqp->kqp.sq_wrid_mem = NULL; in irdma_setup_kmode_qp()
514 kfree(iwqp->kqp.rq_wrid_mem); in irdma_setup_kmode_qp()
515 iwqp->kqp.rq_wrid_mem = NULL; in irdma_setup_kmode_qp()
516 return -ENOMEM; in irdma_setup_kmode_qp()
519 ukinfo->sq = mem->va; in irdma_setup_kmode_qp()
520 info->sq_pa = mem->pa; in irdma_setup_kmode_qp()
521 ukinfo->rq = &ukinfo->sq[ukinfo->sq_depth]; in irdma_setup_kmode_qp()
522 info->rq_pa = info->sq_pa + (ukinfo->sq_depth * IRDMA_QP_WQE_MIN_SIZE); in irdma_setup_kmode_qp()
523 ukinfo->shadow_area = ukinfo->rq[ukinfo->rq_depth].elem; in irdma_setup_kmode_qp()
524 info->shadow_area_pa = info->rq_pa + (ukinfo->rq_depth * IRDMA_QP_WQE_MIN_SIZE); in irdma_setup_kmode_qp()
525 ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift; in irdma_setup_kmode_qp()
526 ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift; in irdma_setup_kmode_qp()
527 ukinfo->qp_id = iwqp->ibqp.qp_num; in irdma_setup_kmode_qp()
529 iwqp->max_send_wr = (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift; in irdma_setup_kmode_qp()
530 iwqp->max_recv_wr = (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift; in irdma_setup_kmode_qp()
531 init_attr->cap.max_send_wr = iwqp->max_send_wr; in irdma_setup_kmode_qp()
532 init_attr->cap.max_recv_wr = iwqp->max_recv_wr; in irdma_setup_kmode_qp()
540 struct irdma_pci_f *rf = iwqp->iwdev->rf; in irdma_cqp_create_qp_cmd()
546 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); in irdma_cqp_create_qp_cmd()
548 return -ENOMEM; in irdma_cqp_create_qp_cmd()
550 cqp_info = &cqp_request->info; in irdma_cqp_create_qp_cmd()
551 qp_info = &cqp_request->info.in.u.qp_create.info; in irdma_cqp_create_qp_cmd()
553 qp_info->mac_valid = true; in irdma_cqp_create_qp_cmd()
554 qp_info->cq_num_valid = true; in irdma_cqp_create_qp_cmd()
555 qp_info->next_iwarp_state = IRDMA_QP_STATE_IDLE; in irdma_cqp_create_qp_cmd()
557 cqp_info->cqp_cmd = IRDMA_OP_QP_CREATE; in irdma_cqp_create_qp_cmd()
558 cqp_info->post_sq = 1; in irdma_cqp_create_qp_cmd()
559 cqp_info->in.u.qp_create.qp = &iwqp->sc_qp; in irdma_cqp_create_qp_cmd()
560 cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request; in irdma_cqp_create_qp_cmd()
562 irdma_put_cqp_request(&rf->cqp, cqp_request); in irdma_cqp_create_qp_cmd()
571 struct irdma_device *iwdev = iwqp->iwdev; in irdma_roce_fill_and_set_qpctx_info()
572 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; in irdma_roce_fill_and_set_qpctx_info()
576 udp_info = &iwqp->udp_info; in irdma_roce_fill_and_set_qpctx_info()
577 udp_info->snd_mss = ib_mtu_enum_to_int(ib_mtu_int_to_enum(iwdev->vsi.mtu)); in irdma_roce_fill_and_set_qpctx_info()
578 udp_info->cwnd = iwdev->roce_cwnd; in irdma_roce_fill_and_set_qpctx_info()
579 udp_info->rexmit_thresh = 2; in irdma_roce_fill_and_set_qpctx_info()
580 udp_info->rnr_nak_thresh = 2; in irdma_roce_fill_and_set_qpctx_info()
581 udp_info->src_port = 0xc000; in irdma_roce_fill_and_set_qpctx_info()
582 udp_info->dst_port = ROCE_V2_UDP_DPORT; in irdma_roce_fill_and_set_qpctx_info()
583 roce_info = &iwqp->roce_info; in irdma_roce_fill_and_set_qpctx_info()
584 ether_addr_copy(roce_info->mac_addr, if_getlladdr(iwdev->netdev)); in irdma_roce_fill_and_set_qpctx_info()
586 roce_info->rd_en = true; in irdma_roce_fill_and_set_qpctx_info()
587 roce_info->wr_rdresp_en = true; in irdma_roce_fill_and_set_qpctx_info()
588 roce_info->dcqcn_en = false; in irdma_roce_fill_and_set_qpctx_info()
589 roce_info->rtomin = iwdev->roce_rtomin; in irdma_roce_fill_and_set_qpctx_info()
591 roce_info->ack_credits = iwdev->roce_ackcreds; in irdma_roce_fill_and_set_qpctx_info()
592 roce_info->ird_size = dev->hw_attrs.max_hw_ird; in irdma_roce_fill_and_set_qpctx_info()
593 roce_info->ord_size = dev->hw_attrs.max_hw_ord; in irdma_roce_fill_and_set_qpctx_info()
595 if (!iwqp->user_mode) { in irdma_roce_fill_and_set_qpctx_info()
596 roce_info->priv_mode_en = true; in irdma_roce_fill_and_set_qpctx_info()
597 roce_info->fast_reg_en = true; in irdma_roce_fill_and_set_qpctx_info()
598 roce_info->udprivcq_en = true; in irdma_roce_fill_and_set_qpctx_info()
600 roce_info->roce_tver = 0; in irdma_roce_fill_and_set_qpctx_info()
602 ctx_info->roce_info = &iwqp->roce_info; in irdma_roce_fill_and_set_qpctx_info()
603 ctx_info->udp_info = &iwqp->udp_info; in irdma_roce_fill_and_set_qpctx_info()
604 irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info); in irdma_roce_fill_and_set_qpctx_info()
611 struct irdma_device *iwdev = iwqp->iwdev; in irdma_iw_fill_and_set_qpctx_info()
612 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; in irdma_iw_fill_and_set_qpctx_info()
615 iwarp_info = &iwqp->iwarp_info; in irdma_iw_fill_and_set_qpctx_info()
616 ether_addr_copy(iwarp_info->mac_addr, if_getlladdr(iwdev->netdev)); in irdma_iw_fill_and_set_qpctx_info()
617 iwarp_info->rd_en = true; in irdma_iw_fill_and_set_qpctx_info()
618 iwarp_info->wr_rdresp_en = true; in irdma_iw_fill_and_set_qpctx_info()
619 iwarp_info->ecn_en = true; in irdma_iw_fill_and_set_qpctx_info()
620 iwarp_info->rtomin = 5; in irdma_iw_fill_and_set_qpctx_info()
622 if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) in irdma_iw_fill_and_set_qpctx_info()
623 iwarp_info->ib_rd_en = true; in irdma_iw_fill_and_set_qpctx_info()
624 if (!iwqp->user_mode) { in irdma_iw_fill_and_set_qpctx_info()
625 iwarp_info->priv_mode_en = true; in irdma_iw_fill_and_set_qpctx_info()
626 iwarp_info->fast_reg_en = true; in irdma_iw_fill_and_set_qpctx_info()
628 iwarp_info->ddp_ver = 1; in irdma_iw_fill_and_set_qpctx_info()
629 iwarp_info->rdmap_ver = 1; in irdma_iw_fill_and_set_qpctx_info()
631 ctx_info->iwarp_info = &iwqp->iwarp_info; in irdma_iw_fill_and_set_qpctx_info()
632 ctx_info->iwarp_info_valid = true; in irdma_iw_fill_and_set_qpctx_info()
633 irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info); in irdma_iw_fill_and_set_qpctx_info()
634 ctx_info->iwarp_info_valid = false; in irdma_iw_fill_and_set_qpctx_info()
641 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; in irdma_validate_qp_attrs()
642 struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs; in irdma_validate_qp_attrs()
644 if (init_attr->create_flags) in irdma_validate_qp_attrs()
645 return -EOPNOTSUPP; in irdma_validate_qp_attrs()
647 if (init_attr->cap.max_inline_data > uk_attrs->max_hw_inline || in irdma_validate_qp_attrs()
648 init_attr->cap.max_send_sge > uk_attrs->max_hw_wq_frags || in irdma_validate_qp_attrs()
649 init_attr->cap.max_send_wr > uk_attrs->max_hw_wq_quanta || in irdma_validate_qp_attrs()
650 init_attr->cap.max_recv_wr > uk_attrs->max_hw_rq_quanta || in irdma_validate_qp_attrs()
651 init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags) in irdma_validate_qp_attrs()
652 return -EINVAL; in irdma_validate_qp_attrs()
654 if (rdma_protocol_roce(&iwdev->ibdev, 1)) { in irdma_validate_qp_attrs()
655 if (init_attr->qp_type != IB_QPT_RC && in irdma_validate_qp_attrs()
656 init_attr->qp_type != IB_QPT_UD && in irdma_validate_qp_attrs()
657 init_attr->qp_type != IB_QPT_GSI) in irdma_validate_qp_attrs()
658 return -EOPNOTSUPP; in irdma_validate_qp_attrs()
660 if (init_attr->qp_type != IB_QPT_RC) in irdma_validate_qp_attrs()
661 return -EOPNOTSUPP; in irdma_validate_qp_attrs()
672 if (iwqp->sc_qp.qp_uk.destroy_pending) in irdma_sched_qp_flush_work()
674 irdma_qp_add_ref(&iwqp->ibqp); in irdma_sched_qp_flush_work()
675 spin_lock_irqsave(&iwqp->dwork_flush_lock, flags); in irdma_sched_qp_flush_work()
676 if (mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush, in irdma_sched_qp_flush_work()
678 irdma_qp_rem_ref(&iwqp->ibqp); in irdma_sched_qp_flush_work()
679 spin_unlock_irqrestore(&iwqp->dwork_flush_lock, flags); in irdma_sched_qp_flush_work()
690 irdma_qp_rem_ref(&iwqp->ibqp); in irdma_flush_worker()
698 if (rdma_protocol_roce(iwqp->ibqp.device, 1)) { in irdma_get_ib_acc_flags()
699 if (iwqp->roce_info.wr_rdresp_en) { in irdma_get_ib_acc_flags()
703 if (iwqp->roce_info.rd_en) in irdma_get_ib_acc_flags()
706 if (iwqp->iwarp_info.wr_rdresp_en) { in irdma_get_ib_acc_flags()
710 if (iwqp->iwarp_info.rd_en) in irdma_get_ib_acc_flags()
717 * irdma_query_qp - query qp attributes
728 struct irdma_sc_qp *qp = &iwqp->sc_qp; in irdma_query_qp()
733 attr->qp_state = iwqp->ibqp_state; in irdma_query_qp()
734 attr->cur_qp_state = iwqp->ibqp_state; in irdma_query_qp()
735 attr->cap.max_send_wr = iwqp->max_send_wr; in irdma_query_qp()
736 attr->cap.max_recv_wr = iwqp->max_recv_wr; in irdma_query_qp()
737 attr->cap.max_inline_data = qp->qp_uk.max_inline_data; in irdma_query_qp()
738 attr->cap.max_send_sge = qp->qp_uk.max_sq_frag_cnt; in irdma_query_qp()
739 attr->cap.max_recv_sge = qp->qp_uk.max_rq_frag_cnt; in irdma_query_qp()
740 attr->qp_access_flags = irdma_get_ib_acc_flags(iwqp); in irdma_query_qp()
741 attr->port_num = 1; in irdma_query_qp()
742 if (rdma_protocol_roce(ibqp->device, 1)) { in irdma_query_qp()
743 attr->path_mtu = ib_mtu_int_to_enum(iwqp->udp_info.snd_mss); in irdma_query_qp()
744 attr->qkey = iwqp->roce_info.qkey; in irdma_query_qp()
745 attr->rq_psn = iwqp->udp_info.epsn; in irdma_query_qp()
746 attr->sq_psn = iwqp->udp_info.psn_nxt; in irdma_query_qp()
747 attr->dest_qp_num = iwqp->roce_info.dest_qp; in irdma_query_qp()
748 attr->pkey_index = iwqp->roce_info.p_key; in irdma_query_qp()
749 attr->retry_cnt = iwqp->udp_info.rexmit_thresh; in irdma_query_qp()
750 attr->rnr_retry = iwqp->udp_info.rnr_nak_thresh; in irdma_query_qp()
751 attr->max_rd_atomic = iwqp->roce_info.ord_size; in irdma_query_qp()
752 attr->max_dest_rd_atomic = iwqp->roce_info.ird_size; in irdma_query_qp()
755 init_attr->event_handler = iwqp->ibqp.event_handler; in irdma_query_qp()
756 init_attr->qp_context = iwqp->ibqp.qp_context; in irdma_query_qp()
757 init_attr->send_cq = iwqp->ibqp.send_cq; in irdma_query_qp()
758 init_attr->recv_cq = iwqp->ibqp.recv_cq; in irdma_query_qp()
759 init_attr->cap = attr->cap; in irdma_query_qp()
767 if (!wait_event_timeout(iwqp->iwdev->suspend_wq, in irdma_wait_for_suspend()
768 !iwqp->suspend_pending, in irdma_wait_for_suspend()
770 iwqp->suspend_pending = false; in irdma_wait_for_suspend()
771 irdma_dev_warn(&iwqp->iwdev->ibdev, in irdma_wait_for_suspend()
773 iwqp->ibqp.qp_num, iwqp->last_aeq); in irdma_wait_for_suspend()
774 return -EBUSY; in irdma_wait_for_suspend()
781 * irdma_modify_qp_roce - modify qp request
793 struct irdma_pd *iwpd = to_iwpd(ibqp->pd); in irdma_modify_qp_roce()
795 struct irdma_device *iwdev = iwqp->iwdev; in irdma_modify_qp_roce()
796 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; in irdma_modify_qp_roce()
807 ctx_info = &iwqp->ctx_info; in irdma_modify_qp_roce()
808 roce_info = &iwqp->roce_info; in irdma_modify_qp_roce()
809 udp_info = &iwqp->udp_info; in irdma_modify_qp_roce()
812 if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) || in irdma_modify_qp_roce()
813 (udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN)) in irdma_modify_qp_roce()
814 return -EINVAL; in irdma_modify_qp_roce()
818 return -EOPNOTSUPP; in irdma_modify_qp_roce()
821 roce_info->dest_qp = attr->dest_qp_num; in irdma_modify_qp_roce()
824 ret = irdma_query_pkey(ibqp->device, 0, attr->pkey_index, in irdma_modify_qp_roce()
825 &roce_info->p_key); in irdma_modify_qp_roce()
831 roce_info->qkey = attr->qkey; in irdma_modify_qp_roce()
834 udp_info->snd_mss = ib_mtu_enum_to_int(attr->path_mtu); in irdma_modify_qp_roce()
837 udp_info->psn_nxt = attr->sq_psn; in irdma_modify_qp_roce()
838 udp_info->lsn = 0xffff; in irdma_modify_qp_roce()
839 udp_info->psn_una = attr->sq_psn; in irdma_modify_qp_roce()
840 udp_info->psn_max = attr->sq_psn; in irdma_modify_qp_roce()
844 udp_info->epsn = attr->rq_psn; in irdma_modify_qp_roce()
847 udp_info->rnr_nak_thresh = attr->rnr_retry; in irdma_modify_qp_roce()
850 udp_info->rexmit_thresh = attr->retry_cnt; in irdma_modify_qp_roce()
852 ctx_info->roce_info->pd_id = iwpd->sc_pd.pd_id; in irdma_modify_qp_roce()
855 struct irdma_av *av = &iwqp->roce_ah.av; in irdma_modify_qp_roce()
859 memset(&iwqp->roce_ah, 0, sizeof(iwqp->roce_ah)); in irdma_modify_qp_roce()
860 if (attr->ah_attr.ah_flags & IB_AH_GRH) { in irdma_modify_qp_roce()
861 udp_info->ttl = attr->ah_attr.grh.hop_limit; in irdma_modify_qp_roce()
862 udp_info->flow_label = attr->ah_attr.grh.flow_label; in irdma_modify_qp_roce()
863 udp_info->tos = attr->ah_attr.grh.traffic_class; in irdma_modify_qp_roce()
865 udp_info->src_port = kc_rdma_get_udp_sport(udp_info->flow_label, in irdma_modify_qp_roce()
866 ibqp->qp_num, in irdma_modify_qp_roce()
867 roce_info->dest_qp); in irdma_modify_qp_roce()
869 irdma_qp_rem_qos(&iwqp->sc_qp); in irdma_modify_qp_roce()
870 dev->ws_remove(iwqp->sc_qp.vsi, ctx_info->user_pri); in irdma_modify_qp_roce()
871 if (iwqp->sc_qp.vsi->dscp_mode) in irdma_modify_qp_roce()
872 ctx_info->user_pri = in irdma_modify_qp_roce()
873 iwqp->sc_qp.vsi->dscp_map[irdma_tos2dscp(udp_info->tos)]; in irdma_modify_qp_roce()
875 ctx_info->user_pri = rt_tos2priority(udp_info->tos); in irdma_modify_qp_roce()
880 if (dev->ws_add(iwqp->sc_qp.vsi, ctx_info->user_pri)) in irdma_modify_qp_roce()
881 return -ENOMEM; in irdma_modify_qp_roce()
882 iwqp->sc_qp.user_pri = ctx_info->user_pri; in irdma_modify_qp_roce()
883 irdma_qp_add_qos(&iwqp->sc_qp); in irdma_modify_qp_roce()
885 if (vlan_id >= VLAN_N_VID && iwdev->dcb_vlan_mode) in irdma_modify_qp_roce()
888 udp_info->insert_vlan_tag = true; in irdma_modify_qp_roce()
889 udp_info->vlan_tag = vlan_id | in irdma_modify_qp_roce()
890 ctx_info->user_pri << VLAN_PRIO_SHIFT; in irdma_modify_qp_roce()
892 udp_info->insert_vlan_tag = false; in irdma_modify_qp_roce()
895 av->attrs = attr->ah_attr; in irdma_modify_qp_roce()
896 rdma_gid2ip((struct sockaddr *)&av->dgid_addr, &attr->ah_attr.grh.dgid); in irdma_modify_qp_roce()
897 if (av->net_type == RDMA_NETWORK_IPV6) { in irdma_modify_qp_roce()
899 av->dgid_addr.saddr_in6.sin6_addr.__u6_addr.__u6_addr32; in irdma_modify_qp_roce()
901 av->sgid_addr.saddr_in6.sin6_addr.__u6_addr.__u6_addr32; in irdma_modify_qp_roce()
903 irdma_copy_ip_ntohl(&udp_info->dest_ip_addr[0], daddr); in irdma_modify_qp_roce()
904 irdma_copy_ip_ntohl(&udp_info->local_ipaddr[0], saddr); in irdma_modify_qp_roce()
906 udp_info->ipv4 = false; in irdma_modify_qp_roce()
908 } else if (av->net_type == RDMA_NETWORK_IPV4) { in irdma_modify_qp_roce()
909 __be32 saddr = av->sgid_addr.saddr_in.sin_addr.s_addr; in irdma_modify_qp_roce()
910 __be32 daddr = av->dgid_addr.saddr_in.sin_addr.s_addr; in irdma_modify_qp_roce()
914 udp_info->ipv4 = true; in irdma_modify_qp_roce()
915 udp_info->dest_ip_addr[0] = 0; in irdma_modify_qp_roce()
916 udp_info->dest_ip_addr[1] = 0; in irdma_modify_qp_roce()
917 udp_info->dest_ip_addr[2] = 0; in irdma_modify_qp_roce()
918 udp_info->dest_ip_addr[3] = local_ip[0]; in irdma_modify_qp_roce()
920 udp_info->local_ipaddr[0] = 0; in irdma_modify_qp_roce()
921 udp_info->local_ipaddr[1] = 0; in irdma_modify_qp_roce()
922 udp_info->local_ipaddr[2] = 0; in irdma_modify_qp_roce()
923 udp_info->local_ipaddr[3] = ntohl(saddr); in irdma_modify_qp_roce()
925 return -EINVAL; in irdma_modify_qp_roce()
927 udp_info->arp_idx = in irdma_modify_qp_roce()
928 irdma_add_arp(iwdev->rf, local_ip, in irdma_modify_qp_roce()
929 ah_attr_to_dmac(attr->ah_attr)); in irdma_modify_qp_roce()
933 if (attr->max_rd_atomic > dev->hw_attrs.max_hw_ord) { in irdma_modify_qp_roce()
934 irdma_dev_err(&iwdev->ibdev, in irdma_modify_qp_roce()
936 attr->max_rd_atomic, in irdma_modify_qp_roce()
937 dev->hw_attrs.max_hw_ord); in irdma_modify_qp_roce()
938 return -EINVAL; in irdma_modify_qp_roce()
940 if (attr->max_rd_atomic) in irdma_modify_qp_roce()
941 roce_info->ord_size = attr->max_rd_atomic; in irdma_modify_qp_roce()
946 if (attr->max_dest_rd_atomic > dev->hw_attrs.max_hw_ird) { in irdma_modify_qp_roce()
947 irdma_dev_err(&iwdev->ibdev, in irdma_modify_qp_roce()
949 attr->max_rd_atomic, in irdma_modify_qp_roce()
950 dev->hw_attrs.max_hw_ird); in irdma_modify_qp_roce()
951 return -EINVAL; in irdma_modify_qp_roce()
953 if (attr->max_dest_rd_atomic) in irdma_modify_qp_roce()
954 roce_info->ird_size = attr->max_dest_rd_atomic; in irdma_modify_qp_roce()
958 if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE) in irdma_modify_qp_roce()
959 roce_info->wr_rdresp_en = true; in irdma_modify_qp_roce()
960 if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE) in irdma_modify_qp_roce()
961 roce_info->wr_rdresp_en = true; in irdma_modify_qp_roce()
962 if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ) in irdma_modify_qp_roce()
963 roce_info->rd_en = true; in irdma_modify_qp_roce()
966 wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend)); in irdma_modify_qp_roce()
968 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, in irdma_modify_qp_roce()
970 __builtin_return_address(0), ibqp->qp_num, attr->qp_state, in irdma_modify_qp_roce()
971 iwqp->ibqp_state, iwqp->iwarp_state, attr_mask); in irdma_modify_qp_roce()
973 spin_lock_irqsave(&iwqp->lock, flags); in irdma_modify_qp_roce()
975 if (!ib_modify_qp_is_ok(iwqp->ibqp_state, attr->qp_state, in irdma_modify_qp_roce()
976 iwqp->ibqp.qp_type, attr_mask)) { in irdma_modify_qp_roce()
977 irdma_dev_warn(&iwdev->ibdev, in irdma_modify_qp_roce()
979 iwqp->ibqp.qp_num, iwqp->ibqp_state, in irdma_modify_qp_roce()
980 attr->qp_state); in irdma_modify_qp_roce()
981 ret = -EINVAL; in irdma_modify_qp_roce()
984 info.curr_iwarp_state = iwqp->iwarp_state; in irdma_modify_qp_roce()
986 switch (attr->qp_state) { in irdma_modify_qp_roce()
988 if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) { in irdma_modify_qp_roce()
989 ret = -EINVAL; in irdma_modify_qp_roce()
993 if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) { in irdma_modify_qp_roce()
999 if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) { in irdma_modify_qp_roce()
1000 ret = -EINVAL; in irdma_modify_qp_roce()
1009 if (iwqp->ibqp_state < IB_QPS_RTR || in irdma_modify_qp_roce()
1010 iwqp->ibqp_state == IB_QPS_ERR) { in irdma_modify_qp_roce()
1011 ret = -EINVAL; in irdma_modify_qp_roce()
1020 if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_2) in irdma_modify_qp_roce()
1021 iwdev->rf->check_fc(&iwdev->vsi, &iwqp->sc_qp); in irdma_modify_qp_roce()
1022 udp_info->cwnd = iwdev->roce_cwnd; in irdma_modify_qp_roce()
1023 roce_info->ack_credits = iwdev->roce_ackcreds; in irdma_modify_qp_roce()
1024 if (iwdev->push_mode && udata && in irdma_modify_qp_roce()
1025 iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX && in irdma_modify_qp_roce()
1026 dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { in irdma_modify_qp_roce()
1027 spin_unlock_irqrestore(&iwqp->lock, flags); in irdma_modify_qp_roce()
1029 spin_lock_irqsave(&iwqp->lock, flags); in irdma_modify_qp_roce()
1033 if (iwqp->iwarp_state == IRDMA_QP_STATE_SQD) in irdma_modify_qp_roce()
1036 if (iwqp->iwarp_state != IRDMA_QP_STATE_RTS) { in irdma_modify_qp_roce()
1037 ret = -EINVAL; in irdma_modify_qp_roce()
1043 iwqp->suspend_pending = true; in irdma_modify_qp_roce()
1048 if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) { in irdma_modify_qp_roce()
1049 spin_unlock_irqrestore(&iwqp->lock, flags); in irdma_modify_qp_roce()
1050 if (udata && udata->inlen) { in irdma_modify_qp_roce()
1052 min(sizeof(ureq), udata->inlen))) in irdma_modify_qp_roce()
1053 return -EINVAL; in irdma_modify_qp_roce()
1067 ret = -EINVAL; in irdma_modify_qp_roce()
1071 iwqp->ibqp_state = attr->qp_state; in irdma_modify_qp_roce()
1074 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id; in irdma_modify_qp_roce()
1075 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id; in irdma_modify_qp_roce()
1076 irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info); in irdma_modify_qp_roce()
1077 spin_unlock_irqrestore(&iwqp->lock, flags); in irdma_modify_qp_roce()
1081 ctx_info->rem_endpoint_idx = udp_info->arp_idx; in irdma_modify_qp_roce()
1083 return -EINVAL; in irdma_modify_qp_roce()
1089 spin_lock_irqsave(&iwqp->lock, flags); in irdma_modify_qp_roce()
1090 if (iwqp->iwarp_state == info.curr_iwarp_state) { in irdma_modify_qp_roce()
1091 iwqp->iwarp_state = info.next_iwarp_state; in irdma_modify_qp_roce()
1092 iwqp->ibqp_state = attr->qp_state; in irdma_modify_qp_roce()
1094 if (iwqp->ibqp_state > IB_QPS_RTS && in irdma_modify_qp_roce()
1095 !iwqp->flush_issued) { in irdma_modify_qp_roce()
1096 spin_unlock_irqrestore(&iwqp->lock, flags); in irdma_modify_qp_roce()
1100 iwqp->flush_issued = 1; in irdma_modify_qp_roce()
1103 spin_unlock_irqrestore(&iwqp->lock, flags); in irdma_modify_qp_roce()
1106 iwqp->ibqp_state = attr->qp_state; in irdma_modify_qp_roce()
1108 if (udata && udata->outlen && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { in irdma_modify_qp_roce()
1112 if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX && in irdma_modify_qp_roce()
1113 !iwqp->push_wqe_mmap_entry && in irdma_modify_qp_roce()
1117 uresp.push_offset = iwqp->sc_qp.push_offset; in irdma_modify_qp_roce()
1119 uresp.rd_fence_rate = iwdev->rd_fence_rate; in irdma_modify_qp_roce()
1121 udata->outlen)); in irdma_modify_qp_roce()
1124 irdma_debug(&iwdev->rf->sc_dev, in irdma_modify_qp_roce()
1134 spin_unlock_irqrestore(&iwqp->lock, flags); in irdma_modify_qp_roce()
1140 * irdma_modify_qp - modify qp request
1153 struct irdma_device *iwdev = iwqp->iwdev; in irdma_modify_qp()
1154 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; in irdma_modify_qp()
1167 if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) || in irdma_modify_qp()
1168 (udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN)) in irdma_modify_qp()
1169 return -EINVAL; in irdma_modify_qp()
1173 return -EOPNOTSUPP; in irdma_modify_qp()
1175 ctx_info = &iwqp->ctx_info; in irdma_modify_qp()
1176 offload_info = &iwqp->iwarp_info; in irdma_modify_qp()
1177 tcp_info = &iwqp->tcp_info; in irdma_modify_qp()
1178 wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend)); in irdma_modify_qp()
1179 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, in irdma_modify_qp()
1181 __builtin_return_address(0), ibqp->qp_num, attr->qp_state, in irdma_modify_qp()
1182 iwqp->ibqp_state, iwqp->iwarp_state, iwqp->last_aeq, in irdma_modify_qp()
1183 iwqp->hw_tcp_state, iwqp->hw_iwarp_state, attr_mask); in irdma_modify_qp()
1185 spin_lock_irqsave(&iwqp->lock, flags); in irdma_modify_qp()
1187 info.curr_iwarp_state = iwqp->iwarp_state; in irdma_modify_qp()
1188 switch (attr->qp_state) { in irdma_modify_qp()
1191 if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) { in irdma_modify_qp()
1192 err = -EINVAL; in irdma_modify_qp()
1196 if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) { in irdma_modify_qp()
1200 if (iwdev->push_mode && udata && in irdma_modify_qp()
1201 iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX && in irdma_modify_qp()
1202 dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { in irdma_modify_qp()
1203 spin_unlock_irqrestore(&iwqp->lock, flags); in irdma_modify_qp()
1205 spin_lock_irqsave(&iwqp->lock, flags); in irdma_modify_qp()
1209 if (iwqp->iwarp_state > IRDMA_QP_STATE_RTS || in irdma_modify_qp()
1210 !iwqp->cm_id) { in irdma_modify_qp()
1211 err = -EINVAL; in irdma_modify_qp()
1216 iwqp->hw_tcp_state = IRDMA_TCP_STATE_ESTABLISHED; in irdma_modify_qp()
1217 iwqp->hte_added = 1; in irdma_modify_qp()
1225 if (iwqp->hw_iwarp_state > IRDMA_QP_STATE_RTS) { in irdma_modify_qp()
1230 if (iwqp->iwarp_state == IRDMA_QP_STATE_CLOSING || in irdma_modify_qp()
1231 iwqp->iwarp_state < IRDMA_QP_STATE_RTS) { in irdma_modify_qp()
1236 if (iwqp->iwarp_state > IRDMA_QP_STATE_CLOSING) { in irdma_modify_qp()
1237 err = -EINVAL; in irdma_modify_qp()
1245 if (iwqp->iwarp_state >= IRDMA_QP_STATE_TERMINATE) { in irdma_modify_qp()
1246 err = -EINVAL; in irdma_modify_qp()
1255 if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) { in irdma_modify_qp()
1256 spin_unlock_irqrestore(&iwqp->lock, flags); in irdma_modify_qp()
1257 if (udata && udata->inlen) { in irdma_modify_qp()
1259 min(sizeof(ureq), udata->inlen))) in irdma_modify_qp()
1260 return -EINVAL; in irdma_modify_qp()
1270 if (iwqp->sc_qp.term_flags) { in irdma_modify_qp()
1271 spin_unlock_irqrestore(&iwqp->lock, flags); in irdma_modify_qp()
1272 irdma_terminate_del_timer(&iwqp->sc_qp); in irdma_modify_qp()
1273 spin_lock_irqsave(&iwqp->lock, flags); in irdma_modify_qp()
1276 if (iwqp->hw_tcp_state > IRDMA_TCP_STATE_CLOSED && in irdma_modify_qp()
1277 iwdev->iw_status && in irdma_modify_qp()
1278 iwqp->hw_tcp_state != IRDMA_TCP_STATE_TIME_WAIT) in irdma_modify_qp()
1287 err = -EINVAL; in irdma_modify_qp()
1291 iwqp->ibqp_state = attr->qp_state; in irdma_modify_qp()
1294 ctx_info->iwarp_info_valid = true; in irdma_modify_qp()
1295 if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE) in irdma_modify_qp()
1296 offload_info->wr_rdresp_en = true; in irdma_modify_qp()
1297 if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE) in irdma_modify_qp()
1298 offload_info->wr_rdresp_en = true; in irdma_modify_qp()
1299 if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ) in irdma_modify_qp()
1300 offload_info->rd_en = true; in irdma_modify_qp()
1303 if (ctx_info->iwarp_info_valid) { in irdma_modify_qp()
1304 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id; in irdma_modify_qp()
1305 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id; in irdma_modify_qp()
1306 irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info); in irdma_modify_qp()
1308 spin_unlock_irqrestore(&iwqp->lock, flags); in irdma_modify_qp()
1312 ctx_info->rem_endpoint_idx = tcp_info->arp_idx; in irdma_modify_qp()
1314 return -EINVAL; in irdma_modify_qp()
1317 spin_lock_irqsave(&iwqp->lock, flags); in irdma_modify_qp()
1318 if (iwqp->iwarp_state == info.curr_iwarp_state) { in irdma_modify_qp()
1319 iwqp->iwarp_state = info.next_iwarp_state; in irdma_modify_qp()
1320 iwqp->ibqp_state = attr->qp_state; in irdma_modify_qp()
1322 spin_unlock_irqrestore(&iwqp->lock, flags); in irdma_modify_qp()
1325 if (issue_modify_qp && iwqp->ibqp_state > IB_QPS_RTS) { in irdma_modify_qp()
1327 if (iwqp->hw_tcp_state) { in irdma_modify_qp()
1328 spin_lock_irqsave(&iwqp->lock, flags); in irdma_modify_qp()
1329 iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSED; in irdma_modify_qp()
1330 iwqp->last_aeq = IRDMA_AE_RESET_SENT; in irdma_modify_qp()
1331 spin_unlock_irqrestore(&iwqp->lock, flags); in irdma_modify_qp()
1337 spin_lock_irqsave(&iwdev->cm_core.ht_lock, flags); in irdma_modify_qp()
1339 if (iwqp->cm_node) { in irdma_modify_qp()
1340 atomic_inc(&iwqp->cm_node->refcnt); in irdma_modify_qp()
1341 spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags); in irdma_modify_qp()
1342 close_timer_started = atomic_inc_return(&iwqp->close_timer_started); in irdma_modify_qp()
1343 if (iwqp->cm_id && close_timer_started == 1) in irdma_modify_qp()
1344 irdma_schedule_cm_timer(iwqp->cm_node, in irdma_modify_qp()
1348 irdma_rem_ref_cm_node(iwqp->cm_node); in irdma_modify_qp()
1350 spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags); in irdma_modify_qp()
1354 if (attr_mask & IB_QP_STATE && udata && udata->outlen && in irdma_modify_qp()
1355 dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { in irdma_modify_qp()
1359 if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX && in irdma_modify_qp()
1360 !iwqp->push_wqe_mmap_entry && in irdma_modify_qp()
1364 uresp.push_offset = iwqp->sc_qp.push_offset; in irdma_modify_qp()
1366 uresp.rd_fence_rate = iwdev->rd_fence_rate; in irdma_modify_qp()
1369 udata->outlen)); in irdma_modify_qp()
1372 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, in irdma_modify_qp()
1380 spin_unlock_irqrestore(&iwqp->lock, flags); in irdma_modify_qp()
1386 * irdma_cq_free_rsrc - free up resources for cq
1393 struct irdma_sc_cq *cq = &iwcq->sc_cq; in irdma_cq_free_rsrc()
1395 if (!iwcq->user_mode) { in irdma_cq_free_rsrc()
1396 irdma_free_dma_mem(rf->sc_dev.hw, &iwcq->kmem); in irdma_cq_free_rsrc()
1397 irdma_free_dma_mem(rf->sc_dev.hw, &iwcq->kmem_shadow); in irdma_cq_free_rsrc()
1400 irdma_free_rsrc(rf, rf->allocated_cqs, cq->cq_uk.cq_id); in irdma_cq_free_rsrc()
1404 * irdma_free_cqbuf - worker to free a cq buffer
1412 irdma_free_dma_mem(cq_buf->hw, &cq_buf->kmem_buf); in irdma_free_cqbuf()
1417 * irdma_process_resize_list - remove resized cq buffers from the resize_list
1431 list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) { in irdma_process_resize_list()
1436 list_del(&cq_buf->list); in irdma_process_resize_list()
1437 queue_work(iwdev->cleanup_wq, &cq_buf->work); in irdma_process_resize_list()
1445 * irdma_resize_cq - resize cq
1456 struct irdma_sc_dev *dev = iwcq->sc_cq.dev; in irdma_resize_cq()
1470 iwdev = to_iwdev(ibcq->device); in irdma_resize_cq()
1471 rf = iwdev->rf; in irdma_resize_cq()
1473 if (!(rf->sc_dev.hw_attrs.uk_attrs.feature_flags & in irdma_resize_cq()
1475 return -EOPNOTSUPP; in irdma_resize_cq()
1477 if (udata && udata->inlen < IRDMA_RESIZE_CQ_MIN_REQ_LEN) in irdma_resize_cq()
1478 return -EINVAL; in irdma_resize_cq()
1480 if (entries > rf->max_cqe) in irdma_resize_cq()
1481 return -EINVAL; in irdma_resize_cq()
1483 if (!iwcq->user_mode) { in irdma_resize_cq()
1485 if (rf->sc_dev.hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) in irdma_resize_cq()
1491 if (info.cq_size == iwcq->sc_cq.cq_uk.cq_size - 1) in irdma_resize_cq()
1500 if (ucontext->legacy_mode) in irdma_resize_cq()
1501 return -EOPNOTSUPP; in irdma_resize_cq()
1504 min(sizeof(req), udata->inlen))) in irdma_resize_cq()
1505 return -EINVAL; in irdma_resize_cq()
1507 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); in irdma_resize_cq()
1509 &ucontext->cq_reg_mem_list); in irdma_resize_cq()
1510 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); in irdma_resize_cq()
1513 return -ENOMEM; in irdma_resize_cq()
1515 cqmr_buf = &iwpbl_buf->cq_mr; in irdma_resize_cq()
1516 if (iwpbl_buf->pbl_allocated) { in irdma_resize_cq()
1519 info.first_pm_pbl_idx = cqmr_buf->cq_pbl.idx; in irdma_resize_cq()
1521 info.cq_pa = cqmr_buf->cq_pbl.addr; in irdma_resize_cq()
1529 kmem_buf.va = irdma_allocate_dma_mem(dev->hw, &kmem_buf, in irdma_resize_cq()
1532 return -ENOMEM; in irdma_resize_cq()
1538 ret = -ENOMEM; in irdma_resize_cq()
1543 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); in irdma_resize_cq()
1545 ret = -ENOMEM; in irdma_resize_cq()
1549 info.shadow_read_threshold = iwcq->sc_cq.shadow_read_threshold; in irdma_resize_cq()
1552 cqp_info = &cqp_request->info; in irdma_resize_cq()
1553 m_info = &cqp_info->in.u.cq_modify.info; in irdma_resize_cq()
1556 cqp_info->cqp_cmd = IRDMA_OP_CQ_MODIFY; in irdma_resize_cq()
1557 cqp_info->in.u.cq_modify.cq = &iwcq->sc_cq; in irdma_resize_cq()
1558 cqp_info->in.u.cq_modify.scratch = (uintptr_t)cqp_request; in irdma_resize_cq()
1559 cqp_info->post_sq = 1; in irdma_resize_cq()
1561 irdma_put_cqp_request(&rf->cqp, cqp_request); in irdma_resize_cq()
1565 spin_lock_irqsave(&iwcq->lock, flags); in irdma_resize_cq()
1567 cq_buf->kmem_buf = iwcq->kmem; in irdma_resize_cq()
1568 cq_buf->hw = dev->hw; in irdma_resize_cq()
1569 memcpy(&cq_buf->cq_uk, &iwcq->sc_cq.cq_uk, sizeof(cq_buf->cq_uk)); in irdma_resize_cq()
1570 INIT_WORK(&cq_buf->work, irdma_free_cqbuf); in irdma_resize_cq()
1571 list_add_tail(&cq_buf->list, &iwcq->resize_list); in irdma_resize_cq()
1572 iwcq->kmem = kmem_buf; in irdma_resize_cq()
1575 irdma_sc_cq_resize(&iwcq->sc_cq, &info); in irdma_resize_cq()
1576 ibcq->cqe = info.cq_size - 1; in irdma_resize_cq()
1577 spin_unlock_irqrestore(&iwcq->lock, flags); in irdma_resize_cq()
1582 irdma_free_dma_mem(dev->hw, &kmem_buf); in irdma_resize_cq()
1589 * irdma_get_mr_access - get hw MR access permissions from IB access flags
1611 * irdma_free_stag - free stag resource
1620 stag_idx = (stag & iwdev->rf->mr_stagmask) >> IRDMA_CQPSQ_STAG_IDX_S; in irdma_free_stag()
1621 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_mrs, stag_idx); in irdma_free_stag()
1625 * irdma_create_stag - create random stag
1642 driver_key = random & ~iwdev->rf->mr_stagmask; in irdma_create_stag()
1643 next_stag_index = (random & iwdev->rf->mr_stagmask) >> 8; in irdma_create_stag()
1644 next_stag_index %= iwdev->rf->max_mr; in irdma_create_stag()
1646 ret = irdma_alloc_rsrc(iwdev->rf, iwdev->rf->allocated_mrs, in irdma_create_stag()
1647 iwdev->rf->max_mr, &stag_index, in irdma_create_stag()
1659 * irdma_check_mem_contiguous - check if pbls stored in arr are contiguous
1679 * irdma_check_mr_contiguous - check if MR is physically contiguous
1687 struct irdma_pble_level2 *lvl2 = &palloc->level2; in irdma_check_mr_contiguous()
1688 struct irdma_pble_info *leaf = lvl2->leaf; in irdma_check_mr_contiguous()
1694 if (palloc->level == PBLE_LEVEL_1) { in irdma_check_mr_contiguous()
1695 arr = palloc->level1.addr; in irdma_check_mr_contiguous()
1696 ret = irdma_check_mem_contiguous(arr, palloc->total_cnt, in irdma_check_mr_contiguous()
1701 start_addr = leaf->addr; in irdma_check_mr_contiguous()
1703 for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) { in irdma_check_mr_contiguous()
1704 arr = leaf->addr; in irdma_check_mr_contiguous()
1707 ret = irdma_check_mem_contiguous(arr, leaf->cnt, pg_size); in irdma_check_mr_contiguous()
1716 * irdma_setup_pbles - copy user pg address to pble's
1725 struct irdma_pbl *iwpbl = &iwmr->iwpbl; in irdma_setup_pbles()
1726 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc; in irdma_setup_pbles()
1733 status = irdma_get_pble(rf->pble_rsrc, palloc, iwmr->page_cnt, in irdma_setup_pbles()
1738 iwpbl->pbl_allocated = true; in irdma_setup_pbles()
1739 level = palloc->level; in irdma_setup_pbles()
1740 pinfo = (level == PBLE_LEVEL_1) ? &palloc->level1 : in irdma_setup_pbles()
1741 palloc->level2.leaf; in irdma_setup_pbles()
1742 pbl = pinfo->addr; in irdma_setup_pbles()
1744 pbl = iwmr->pgaddrmem; in irdma_setup_pbles()
1750 iwmr->pgaddrmem[0] = *pbl; in irdma_setup_pbles()
1756 * irdma_handle_q_mem - handle memory for qp and cq
1767 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc; in irdma_handle_q_mem()
1768 struct irdma_mr *iwmr = iwpbl->iwmr; in irdma_handle_q_mem()
1769 struct irdma_qp_mr *qpmr = &iwpbl->qp_mr; in irdma_handle_q_mem()
1770 struct irdma_cq_mr *cqmr = &iwpbl->cq_mr; in irdma_handle_q_mem()
1772 u64 *arr = iwmr->pgaddrmem; in irdma_handle_q_mem()
1777 pg_size = iwmr->page_size; in irdma_handle_q_mem()
1778 err = irdma_setup_pbles(iwdev->rf, iwmr, lvl); in irdma_handle_q_mem()
1783 arr = palloc->level1.addr; in irdma_handle_q_mem()
1785 switch (iwmr->type) { in irdma_handle_q_mem()
1787 total = req->sq_pages + req->rq_pages; in irdma_handle_q_mem()
1788 hmc_p = &qpmr->sq_pbl; in irdma_handle_q_mem()
1789 qpmr->shadow = (dma_addr_t) arr[total]; in irdma_handle_q_mem()
1791 ret = irdma_check_mem_contiguous(arr, req->sq_pages, in irdma_handle_q_mem()
1794 ret = irdma_check_mem_contiguous(&arr[req->sq_pages], in irdma_handle_q_mem()
1795 req->rq_pages, in irdma_handle_q_mem()
1800 hmc_p->idx = palloc->level1.idx; in irdma_handle_q_mem()
1801 hmc_p = &qpmr->rq_pbl; in irdma_handle_q_mem()
1802 hmc_p->idx = palloc->level1.idx + req->sq_pages; in irdma_handle_q_mem()
1804 hmc_p->addr = arr[0]; in irdma_handle_q_mem()
1805 hmc_p = &qpmr->rq_pbl; in irdma_handle_q_mem()
1806 hmc_p->addr = arr[req->sq_pages]; in irdma_handle_q_mem()
1810 hmc_p = &cqmr->cq_pbl; in irdma_handle_q_mem()
1812 if (!cqmr->split) in irdma_handle_q_mem()
1813 cqmr->shadow = (dma_addr_t) arr[req->cq_pages]; in irdma_handle_q_mem()
1816 ret = irdma_check_mem_contiguous(arr, req->cq_pages, in irdma_handle_q_mem()
1820 hmc_p->idx = palloc->level1.idx; in irdma_handle_q_mem()
1822 hmc_p->addr = arr[0]; in irdma_handle_q_mem()
1825 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, "MR type error\n"); in irdma_handle_q_mem()
1826 err = -EINVAL; in irdma_handle_q_mem()
1830 irdma_free_pble(iwdev->rf->pble_rsrc, palloc); in irdma_handle_q_mem()
1831 iwpbl->pbl_allocated = false; in irdma_handle_q_mem()
1838 * irdma_hw_alloc_stag - cqp command to allocate stag
1847 struct ib_pd *pd = iwmr->ibmr.pd; in irdma_hw_alloc_stag() local
1848 struct irdma_pd *iwpd = to_iwpd(pd); in irdma_hw_alloc_stag()
1853 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); in irdma_hw_alloc_stag()
1855 return -ENOMEM; in irdma_hw_alloc_stag()
1857 cqp_info = &cqp_request->info; in irdma_hw_alloc_stag()
1858 info = &cqp_info->in.u.alloc_stag.info; in irdma_hw_alloc_stag()
1860 info->page_size = PAGE_SIZE; in irdma_hw_alloc_stag()
1861 info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S; in irdma_hw_alloc_stag()
1862 info->pd_id = iwpd->sc_pd.pd_id; in irdma_hw_alloc_stag()
1863 info->total_len = iwmr->len; in irdma_hw_alloc_stag()
1864 info->all_memory = (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) ? true : false; in irdma_hw_alloc_stag()
1865 info->remote_access = true; in irdma_hw_alloc_stag()
1866 cqp_info->cqp_cmd = IRDMA_OP_ALLOC_STAG; in irdma_hw_alloc_stag()
1867 cqp_info->post_sq = 1; in irdma_hw_alloc_stag()
1868 cqp_info->in.u.alloc_stag.dev = &iwdev->rf->sc_dev; in irdma_hw_alloc_stag()
1869 cqp_info->in.u.alloc_stag.scratch = (uintptr_t)cqp_request; in irdma_hw_alloc_stag()
1870 status = irdma_handle_cqp_op(iwdev->rf, cqp_request); in irdma_hw_alloc_stag()
1871 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); in irdma_hw_alloc_stag()
1873 iwmr->is_hwreg = 1; in irdma_hw_alloc_stag()
1879 * irdma_set_page - populate pbl list for fmr
1887 struct irdma_pbl *iwpbl = &iwmr->iwpbl; in irdma_set_page()
1888 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc; in irdma_set_page()
1891 if (unlikely(iwmr->npages == iwmr->page_cnt)) in irdma_set_page()
1892 return -ENOMEM; in irdma_set_page()
1894 if (palloc->level == PBLE_LEVEL_2) { in irdma_set_page()
1896 palloc->level2.leaf + (iwmr->npages >> PBLE_512_SHIFT); in irdma_set_page()
1898 palloc_info->addr[iwmr->npages & (PBLE_PER_PAGE - 1)] = addr; in irdma_set_page()
1900 pbl = palloc->level1.addr; in irdma_set_page()
1901 pbl[iwmr->npages] = addr; in irdma_set_page()
1904 iwmr->npages++; in irdma_set_page()
1909 * irdma_map_mr_sg - map of sg list for fmr
1921 iwmr->npages = 0; in irdma_map_mr_sg()
1927 * irdma_hwreg_mr - send cqp command for memory registration
1936 struct irdma_pbl *iwpbl = &iwmr->iwpbl; in irdma_hwreg_mr()
1938 struct ib_pd *pd = iwmr->ibmr.pd; in irdma_hwreg_mr() local
1939 struct irdma_pd *iwpd = to_iwpd(pd); in irdma_hwreg_mr()
1940 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc; in irdma_hwreg_mr()
1945 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); in irdma_hwreg_mr()
1947 return -ENOMEM; in irdma_hwreg_mr()
1949 cqp_info = &cqp_request->info; in irdma_hwreg_mr()
1950 stag_info = &cqp_info->in.u.mr_reg_non_shared.info; in irdma_hwreg_mr()
1952 stag_info->va = iwpbl->user_base; in irdma_hwreg_mr()
1953 stag_info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S; in irdma_hwreg_mr()
1954 stag_info->stag_key = (u8)iwmr->stag; in irdma_hwreg_mr()
1955 stag_info->total_len = iwmr->len; in irdma_hwreg_mr()
1956 stag_info->all_memory = (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) ? true : false; in irdma_hwreg_mr()
1957 stag_info->access_rights = irdma_get_mr_access(access, in irdma_hwreg_mr()
1958 iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev); in irdma_hwreg_mr()
1959 stag_info->pd_id = iwpd->sc_pd.pd_id; in irdma_hwreg_mr()
1960 if (stag_info->access_rights & IRDMA_ACCESS_FLAGS_ZERO_BASED) in irdma_hwreg_mr()
1961 stag_info->addr_type = IRDMA_ADDR_TYPE_ZERO_BASED; in irdma_hwreg_mr()
1963 stag_info->addr_type = IRDMA_ADDR_TYPE_VA_BASED; in irdma_hwreg_mr()
1964 stag_info->page_size = iwmr->page_size; in irdma_hwreg_mr()
1966 if (iwpbl->pbl_allocated) { in irdma_hwreg_mr()
1967 if (palloc->level == PBLE_LEVEL_1) { in irdma_hwreg_mr()
1968 stag_info->first_pm_pbl_index = palloc->level1.idx; in irdma_hwreg_mr()
1969 stag_info->chunk_size = 1; in irdma_hwreg_mr()
1971 stag_info->first_pm_pbl_index = palloc->level2.root.idx; in irdma_hwreg_mr()
1972 stag_info->chunk_size = 3; in irdma_hwreg_mr()
1975 stag_info->reg_addr_pa = iwmr->pgaddrmem[0]; in irdma_hwreg_mr()
1978 cqp_info->cqp_cmd = IRDMA_OP_MR_REG_NON_SHARED; in irdma_hwreg_mr()
1979 cqp_info->post_sq = 1; in irdma_hwreg_mr()
1980 cqp_info->in.u.mr_reg_non_shared.dev = &iwdev->rf->sc_dev; in irdma_hwreg_mr()
1981 cqp_info->in.u.mr_reg_non_shared.scratch = (uintptr_t)cqp_request; in irdma_hwreg_mr()
1982 ret = irdma_handle_cqp_op(iwdev->rf, cqp_request); in irdma_hwreg_mr()
1983 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); in irdma_hwreg_mr()
1986 iwmr->is_hwreg = 1; in irdma_hwreg_mr()
1992 …* irdma_alloc_iwmr - Allocate iwmr @region - memory region @pd - protection domain @virt - virtual…
1997 struct ib_pd *pd, u64 virt, in irdma_alloc_iwmr() argument
2005 return ERR_PTR(-ENOMEM); in irdma_alloc_iwmr()
2007 iwpbl = &iwmr->iwpbl; in irdma_alloc_iwmr()
2008 iwpbl->iwmr = iwmr; in irdma_alloc_iwmr()
2009 iwmr->region = region; in irdma_alloc_iwmr()
2010 iwmr->ibmr.pd = pd; in irdma_alloc_iwmr()
2011 iwmr->ibmr.device = pd->device; in irdma_alloc_iwmr()
2012 iwmr->ibmr.iova = virt; in irdma_alloc_iwmr()
2013 iwmr->type = reg_type; in irdma_alloc_iwmr()
2016 iwmr->page_msk = ~(IRDMA_HW_PAGE_SIZE - 1); in irdma_alloc_iwmr()
2017 iwmr->page_size = IRDMA_HW_PAGE_SIZE; in irdma_alloc_iwmr()
2018 iwmr->len = region->length; in irdma_alloc_iwmr()
2019 iwpbl->user_base = virt; in irdma_alloc_iwmr()
2020 iwmr->page_cnt = irdma_ib_umem_num_dma_blocks(region, iwmr->page_size, virt); in irdma_alloc_iwmr()
2032 * irdma_reg_user_mr_type_mem - Handle memory registration
2033 * @iwmr - irdma mr
2034 * @access - access rights
2035 * @create_stag - flag to create stag or not
2041 struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device); in irdma_reg_user_mr_type_mem()
2042 struct irdma_pbl *iwpbl = &iwmr->iwpbl; in irdma_reg_user_mr_type_mem()
2047 lvl = iwmr->page_cnt != 1 ? PBLE_LEVEL_1 | PBLE_LEVEL_2 : PBLE_LEVEL_0; in irdma_reg_user_mr_type_mem()
2049 err = irdma_setup_pbles(iwdev->rf, iwmr, lvl); in irdma_reg_user_mr_type_mem()
2054 err = irdma_check_mr_contiguous(&iwpbl->pble_alloc, in irdma_reg_user_mr_type_mem()
2055 iwmr->page_size); in irdma_reg_user_mr_type_mem()
2057 irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc); in irdma_reg_user_mr_type_mem()
2058 iwpbl->pbl_allocated = false; in irdma_reg_user_mr_type_mem()
2065 err = -ENOMEM; in irdma_reg_user_mr_type_mem()
2069 iwmr->stag = stag; in irdma_reg_user_mr_type_mem()
2070 iwmr->ibmr.rkey = stag; in irdma_reg_user_mr_type_mem()
2071 iwmr->ibmr.lkey = stag; in irdma_reg_user_mr_type_mem()
2073 iwmr->access = access; in irdma_reg_user_mr_type_mem()
2085 if (iwpbl->pble_alloc.level != PBLE_LEVEL_0 && iwpbl->pbl_allocated) in irdma_reg_user_mr_type_mem()
2086 irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc); in irdma_reg_user_mr_type_mem()
2092 …* irdma_reg_user_mr_type_qp - Handle QP memory registration @req - memory reg req @udata - user in…
2099 struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device); in irdma_reg_user_mr_type_qp()
2100 struct irdma_pbl *iwpbl = &iwmr->iwpbl; in irdma_reg_user_mr_type_qp()
2108 if (total > iwmr->page_cnt) in irdma_reg_user_mr_type_qp()
2109 return -EINVAL; in irdma_reg_user_mr_type_qp()
2118 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags); in irdma_reg_user_mr_type_qp()
2119 list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list); in irdma_reg_user_mr_type_qp()
2120 iwpbl->on_list = true; in irdma_reg_user_mr_type_qp()
2121 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); in irdma_reg_user_mr_type_qp()
2127 …* irdma_reg_user_mr_type_cq - Handle CQ memory registration @req - memory reg req @udata - user in…
2134 struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device); in irdma_reg_user_mr_type_cq()
2135 struct irdma_pbl *iwpbl = &iwmr->iwpbl; in irdma_reg_user_mr_type_cq()
2143 …((iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE) ? 0 : IRDMA_SHADOW_… in irdma_reg_user_mr_type_cq()
2144 if (total > iwmr->page_cnt) in irdma_reg_user_mr_type_cq()
2145 return -EINVAL; in irdma_reg_user_mr_type_cq()
2153 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); in irdma_reg_user_mr_type_cq()
2154 list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list); in irdma_reg_user_mr_type_cq()
2155 iwpbl->on_list = true; in irdma_reg_user_mr_type_cq()
2156 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); in irdma_reg_user_mr_type_cq()
2162 * irdma_reg_user_mr - Register a user memory region
2163 * @pd: ptr of pd
2171 irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len, in irdma_reg_user_mr() argument
2176 struct irdma_device *iwdev = to_iwdev(pd->device); in irdma_reg_user_mr()
2182 if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size) in irdma_reg_user_mr()
2183 return ERR_PTR(-EINVAL); in irdma_reg_user_mr()
2185 if (udata->inlen < IRDMA_MEM_REG_MIN_REQ_LEN) in irdma_reg_user_mr()
2186 return ERR_PTR(-EINVAL); in irdma_reg_user_mr()
2188 region = ib_umem_get(pd->uobject->context, start, len, access, 0); in irdma_reg_user_mr()
2191 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, in irdma_reg_user_mr()
2196 if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) { in irdma_reg_user_mr()
2198 return ERR_PTR(-EFAULT); in irdma_reg_user_mr()
2201 iwmr = irdma_alloc_iwmr(region, pd, virt, req.reg_type); in irdma_reg_user_mr()
2227 err = -EINVAL; in irdma_reg_user_mr()
2231 return &iwmr->ibmr; in irdma_reg_user_mr()
2243 struct irdma_device *iwdev = to_iwdev(ib_mr->device); in irdma_hwdereg_mr()
2245 struct irdma_pd *iwpd = to_iwpd(ib_mr->pd); in irdma_hwdereg_mr()
2247 struct irdma_pbl *iwpbl = &iwmr->iwpbl; in irdma_hwdereg_mr()
2253 …* Skip HW MR de-register when it is already de-registered during an MR re-reregister and the re-re… in irdma_hwdereg_mr()
2256 if (!iwmr->is_hwreg) in irdma_hwdereg_mr()
2259 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); in irdma_hwdereg_mr()
2261 return -ENOMEM; in irdma_hwdereg_mr()
2263 cqp_info = &cqp_request->info; in irdma_hwdereg_mr()
2264 info = &cqp_info->in.u.dealloc_stag.info; in irdma_hwdereg_mr()
2266 info->pd_id = iwpd->sc_pd.pd_id; in irdma_hwdereg_mr()
2267 info->stag_idx = RS_64_1(ib_mr->rkey, IRDMA_CQPSQ_STAG_IDX_S); in irdma_hwdereg_mr()
2268 info->mr = true; in irdma_hwdereg_mr()
2269 if (iwpbl->pbl_allocated) in irdma_hwdereg_mr()
2270 info->dealloc_pbl = true; in irdma_hwdereg_mr()
2272 cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG; in irdma_hwdereg_mr()
2273 cqp_info->post_sq = 1; in irdma_hwdereg_mr()
2274 cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev; in irdma_hwdereg_mr()
2275 cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request; in irdma_hwdereg_mr()
2276 status = irdma_handle_cqp_op(iwdev->rf, cqp_request); in irdma_hwdereg_mr()
2277 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); in irdma_hwdereg_mr()
2280 iwmr->is_hwreg = 0; in irdma_hwdereg_mr()
2286 …* irdma_rereg_mr_trans - Re-register a user MR for a change translation. @iwmr: ptr of iwmr @start…
2289 …* Re-register a user memory region when a change translation is requested. Re-register a new regio…
2296 struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device); in irdma_rereg_mr_trans()
2297 struct irdma_pbl *iwpbl = &iwmr->iwpbl; in irdma_rereg_mr_trans()
2298 struct ib_pd *pd = iwmr->ibmr.pd; in irdma_rereg_mr_trans() local
2302 region = ib_umem_get(pd->uobject->context, start, len, iwmr->access, 0); in irdma_rereg_mr_trans()
2305 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, in irdma_rereg_mr_trans()
2310 iwmr->region = region; in irdma_rereg_mr_trans()
2311 iwmr->ibmr.iova = virt; in irdma_rereg_mr_trans()
2312 iwmr->ibmr.pd = pd; in irdma_rereg_mr_trans()
2313 iwmr->page_size = PAGE_SIZE; in irdma_rereg_mr_trans()
2315 iwmr->len = region->length; in irdma_rereg_mr_trans()
2316 iwpbl->user_base = virt; in irdma_rereg_mr_trans()
2317 iwmr->page_cnt = irdma_ib_umem_num_dma_blocks(region, iwmr->page_size, in irdma_rereg_mr_trans()
2320 err = irdma_reg_user_mr_type_mem(iwmr, iwmr->access, false); in irdma_rereg_mr_trans()
2324 return &iwmr->ibmr; in irdma_rereg_mr_trans()
2332 * irdma_reg_phys_mr - register kernel physical memory
2333 * @pd: ibpd pointer
2340 irdma_reg_phys_mr(struct ib_pd *pd, u64 addr, u64 size, int access, in irdma_reg_phys_mr() argument
2343 struct irdma_device *iwdev = to_iwdev(pd->device); in irdma_reg_phys_mr()
2351 return ERR_PTR(-ENOMEM); in irdma_reg_phys_mr()
2353 iwmr->ibmr.pd = pd; in irdma_reg_phys_mr()
2354 iwmr->ibmr.device = pd->device; in irdma_reg_phys_mr()
2355 iwpbl = &iwmr->iwpbl; in irdma_reg_phys_mr()
2356 iwpbl->iwmr = iwmr; in irdma_reg_phys_mr()
2357 iwmr->type = IRDMA_MEMREG_TYPE_MEM; in irdma_reg_phys_mr()
2358 iwpbl->user_base = *iova_start; in irdma_reg_phys_mr()
2361 ret = -ENOMEM; in irdma_reg_phys_mr()
2365 iwmr->stag = stag; in irdma_reg_phys_mr()
2366 iwmr->ibmr.iova = *iova_start; in irdma_reg_phys_mr()
2367 iwmr->ibmr.rkey = stag; in irdma_reg_phys_mr()
2368 iwmr->ibmr.lkey = stag; in irdma_reg_phys_mr()
2369 iwmr->page_cnt = 1; in irdma_reg_phys_mr()
2370 iwmr->pgaddrmem[0] = addr; in irdma_reg_phys_mr()
2371 iwmr->len = size; in irdma_reg_phys_mr()
2372 iwmr->page_size = SZ_4K; in irdma_reg_phys_mr()
2379 return &iwmr->ibmr; in irdma_reg_phys_mr()
2388 * irdma_get_dma_mr - register physical mem
2389 * @pd: ptr of pd
2393 irdma_get_dma_mr(struct ib_pd *pd, int acc) in irdma_get_dma_mr() argument
2397 return irdma_reg_phys_mr(pd, 0, 0, acc, &kva); in irdma_get_dma_mr()
2401 * irdma_del_memlist - Deleting pbl list entries for CQ/QP
2409 struct irdma_pbl *iwpbl = &iwmr->iwpbl; in irdma_del_memlist()
2412 switch (iwmr->type) { in irdma_del_memlist()
2414 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); in irdma_del_memlist()
2415 if (iwpbl->on_list) { in irdma_del_memlist()
2416 iwpbl->on_list = false; in irdma_del_memlist()
2417 list_del(&iwpbl->list); in irdma_del_memlist()
2419 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); in irdma_del_memlist()
2422 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags); in irdma_del_memlist()
2423 if (iwpbl->on_list) { in irdma_del_memlist()
2424 iwpbl->on_list = false; in irdma_del_memlist()
2425 list_del(&iwpbl->list); in irdma_del_memlist()
2427 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); in irdma_del_memlist()
2435 * irdma_post_send - kernel application wr
2455 ukqp = &iwqp->sc_qp.qp_uk; in irdma_post_send()
2456 dev = &iwqp->iwdev->rf->sc_dev; in irdma_post_send()
2458 spin_lock_irqsave(&iwqp->lock, flags); in irdma_post_send()
2462 info.wr_id = (ib_wr->wr_id); in irdma_post_send()
2463 if ((ib_wr->send_flags & IB_SEND_SIGNALED) || iwqp->sig_all) in irdma_post_send()
2465 if (ib_wr->send_flags & IB_SEND_FENCE) in irdma_post_send()
2467 switch (ib_wr->opcode) { in irdma_post_send()
2469 if (ukqp->qp_caps & IRDMA_SEND_WITH_IMM) { in irdma_post_send()
2471 info.imm_data = ntohl(ib_wr->ex.imm_data); in irdma_post_send()
2473 err = -EINVAL; in irdma_post_send()
2479 if (ib_wr->opcode == IB_WR_SEND || in irdma_post_send()
2480 ib_wr->opcode == IB_WR_SEND_WITH_IMM) { in irdma_post_send()
2481 if (ib_wr->send_flags & IB_SEND_SOLICITED) in irdma_post_send()
2486 if (ib_wr->send_flags & IB_SEND_SOLICITED) in irdma_post_send()
2490 info.stag_to_inv = ib_wr->ex.invalidate_rkey; in irdma_post_send()
2493 info.op.send.num_sges = ib_wr->num_sge; in irdma_post_send()
2494 info.op.send.sg_list = ib_wr->sg_list; in irdma_post_send()
2495 if (iwqp->ibqp.qp_type == IB_QPT_UD || in irdma_post_send()
2496 iwqp->ibqp.qp_type == IB_QPT_GSI) { in irdma_post_send()
2497 ah = to_iwah(ud_wr(ib_wr)->ah); in irdma_post_send()
2498 info.op.send.ah_id = ah->sc_ah.ah_info.ah_idx; in irdma_post_send()
2499 info.op.send.qkey = ud_wr(ib_wr)->remote_qkey; in irdma_post_send()
2500 info.op.send.dest_qp = ud_wr(ib_wr)->remote_qpn; in irdma_post_send()
2503 if (ib_wr->send_flags & IB_SEND_INLINE) in irdma_post_send()
2509 if (ukqp->qp_caps & IRDMA_WRITE_WITH_IMM) { in irdma_post_send()
2511 info.imm_data = ntohl(ib_wr->ex.imm_data); in irdma_post_send()
2513 err = -EINVAL; in irdma_post_send()
2518 if (ib_wr->send_flags & IB_SEND_SOLICITED) in irdma_post_send()
2523 info.op.rdma_write.num_lo_sges = ib_wr->num_sge; in irdma_post_send()
2524 info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list; in irdma_post_send()
2525 info.op.rdma_write.rem_addr.addr = rdma_wr(ib_wr)->remote_addr; in irdma_post_send()
2526 info.op.rdma_write.rem_addr.lkey = rdma_wr(ib_wr)->rkey; in irdma_post_send()
2527 if (ib_wr->send_flags & IB_SEND_INLINE) in irdma_post_send()
2536 if (ib_wr->num_sge > in irdma_post_send()
2537 dev->hw_attrs.uk_attrs.max_hw_read_sges) { in irdma_post_send()
2538 err = -EINVAL; in irdma_post_send()
2542 info.op.rdma_read.rem_addr.addr = rdma_wr(ib_wr)->remote_addr; in irdma_post_send()
2543 info.op.rdma_read.rem_addr.lkey = rdma_wr(ib_wr)->rkey; in irdma_post_send()
2544 info.op.rdma_read.lo_sg_list = (void *)ib_wr->sg_list; in irdma_post_send()
2545 info.op.rdma_read.num_lo_sges = ib_wr->num_sge; in irdma_post_send()
2551 info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey; in irdma_post_send()
2555 struct irdma_mr *iwmr = to_iwmr(reg_wr(ib_wr)->mr); in irdma_post_send()
2556 struct irdma_pble_alloc *palloc = &iwmr->iwpbl.pble_alloc; in irdma_post_send()
2562 irdma_get_mr_access(reg_wr(ib_wr)->access, in irdma_post_send()
2563 dev->hw_attrs.uk_attrs.hw_rev); in irdma_post_send()
2564 stag_info.stag_key = reg_wr(ib_wr)->key & 0xff; in irdma_post_send()
2565 stag_info.stag_idx = reg_wr(ib_wr)->key >> 8; in irdma_post_send()
2566 stag_info.page_size = reg_wr(ib_wr)->mr->page_size; in irdma_post_send()
2567 stag_info.wr_id = ib_wr->wr_id; in irdma_post_send()
2569 stag_info.va = (void *)(uintptr_t)iwmr->ibmr.iova; in irdma_post_send()
2570 stag_info.total_len = iwmr->ibmr.length; in irdma_post_send()
2571 if (palloc->level == PBLE_LEVEL_2) { in irdma_post_send()
2573 stag_info.first_pm_pbl_index = palloc->level2.root.idx; in irdma_post_send()
2576 stag_info.first_pm_pbl_index = palloc->level1.idx; in irdma_post_send()
2578 stag_info.local_fence = ib_wr->send_flags & IB_SEND_FENCE; in irdma_post_send()
2579 err = irdma_sc_mr_fast_register(&iwqp->sc_qp, &stag_info, in irdma_post_send()
2584 err = -EINVAL; in irdma_post_send()
2585 irdma_debug(&iwqp->iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, in irdma_post_send()
2587 ib_wr->opcode); in irdma_post_send()
2593 ib_wr = ib_wr->next; in irdma_post_send()
2596 if (!iwqp->flush_issued) { in irdma_post_send()
2597 if (iwqp->hw_iwarp_state <= IRDMA_QP_STATE_RTS) in irdma_post_send()
2599 spin_unlock_irqrestore(&iwqp->lock, flags); in irdma_post_send()
2601 spin_unlock_irqrestore(&iwqp->lock, flags); in irdma_post_send()
2612 * irdma_post_recv - post receive wr for kernel application
2623 struct irdma_qp_uk *ukqp = &iwqp->sc_qp.qp_uk; in irdma_post_recv()
2628 spin_lock_irqsave(&iwqp->lock, flags); in irdma_post_recv()
2631 if (ib_wr->num_sge > ukqp->max_rq_frag_cnt) { in irdma_post_recv()
2632 err = -EINVAL; in irdma_post_recv()
2635 post_recv.num_sges = ib_wr->num_sge; in irdma_post_recv()
2636 post_recv.wr_id = ib_wr->wr_id; in irdma_post_recv()
2637 post_recv.sg_list = ib_wr->sg_list; in irdma_post_recv()
2640 irdma_debug(&iwqp->iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, in irdma_post_recv()
2645 ib_wr = ib_wr->next; in irdma_post_recv()
2649 spin_unlock_irqrestore(&iwqp->lock, flags); in irdma_post_recv()
2650 if (iwqp->flush_issued) in irdma_post_recv()
2660 * irdma_flush_err_to_ib_wc_status - return change flush error code to IB status
2692 * irdma_process_cqe - process cqe info
2702 entry->wc_flags = 0; in irdma_process_cqe()
2703 entry->pkey_index = 0; in irdma_process_cqe()
2704 entry->wr_id = cq_poll_info->wr_id; in irdma_process_cqe()
2706 qp = cq_poll_info->qp_handle; in irdma_process_cqe()
2707 entry->qp = qp->qp_uk.back_qp; in irdma_process_cqe()
2709 if (cq_poll_info->error) { in irdma_process_cqe()
2710 entry->status = (cq_poll_info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) ? in irdma_process_cqe()
2711 irdma_flush_err_to_ib_wc_status(cq_poll_info->minor_err) : IB_WC_GENERAL_ERR; in irdma_process_cqe()
2713 entry->vendor_err = cq_poll_info->major_err << 16 | in irdma_process_cqe()
2714 cq_poll_info->minor_err; in irdma_process_cqe()
2716 entry->status = IB_WC_SUCCESS; in irdma_process_cqe()
2717 if (cq_poll_info->imm_valid) { in irdma_process_cqe()
2718 entry->ex.imm_data = htonl(cq_poll_info->imm_data); in irdma_process_cqe()
2719 entry->wc_flags |= IB_WC_WITH_IMM; in irdma_process_cqe()
2721 if (cq_poll_info->ud_smac_valid) { in irdma_process_cqe()
2722 ether_addr_copy(entry->smac, cq_poll_info->ud_smac); in irdma_process_cqe()
2723 entry->wc_flags |= IB_WC_WITH_SMAC; in irdma_process_cqe()
2726 if (cq_poll_info->ud_vlan_valid) { in irdma_process_cqe()
2727 u16 vlan = cq_poll_info->ud_vlan & EVL_VLID_MASK; in irdma_process_cqe()
2729 entry->sl = cq_poll_info->ud_vlan >> VLAN_PRIO_SHIFT; in irdma_process_cqe()
2731 entry->vlan_id = vlan; in irdma_process_cqe()
2732 entry->wc_flags |= IB_WC_WITH_VLAN; in irdma_process_cqe()
2735 entry->sl = 0; in irdma_process_cqe()
2739 if (cq_poll_info->q_type == IRDMA_CQE_QTYPE_SQ) { in irdma_process_cqe()
2743 qp->qp_uk.qp_caps & IRDMA_SEND_WITH_IMM ? in irdma_process_cqe()
2745 if (qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_UD && in irdma_process_cqe()
2746 cq_poll_info->stag_invalid_set) { in irdma_process_cqe()
2747 entry->ex.invalidate_rkey = cq_poll_info->inv_stag; in irdma_process_cqe()
2748 entry->wc_flags |= IB_WC_WITH_INVALIDATE; in irdma_process_cqe()
2752 if (qp->qp_uk.qp_type == IRDMA_QP_TYPE_ROCE_UD) { in irdma_process_cqe()
2753 entry->src_qp = cq_poll_info->ud_src_qpn; in irdma_process_cqe()
2754 entry->slid = 0; in irdma_process_cqe()
2755 entry->wc_flags |= in irdma_process_cqe()
2757 entry->network_hdr_type = cq_poll_info->ipv4 ? in irdma_process_cqe()
2761 entry->src_qp = cq_poll_info->qp_id; in irdma_process_cqe()
2764 entry->byte_len = cq_poll_info->bytes_xfered; in irdma_process_cqe()
2768 * irdma_poll_one - poll one entry of the CQ
2771 * @entry: ibv_wc object to be filled for non-extended CQ or NULL for extended CQ
2791 * __irdma_poll_cq - poll cq for completion (kernel apps)
2801 struct irdma_cq_poll_info *cur_cqe = &iwcq->cur_cqe; in __irdma_poll_cq()
2810 iwdev = to_iwdev(iwcq->ibcq.device); in __irdma_poll_cq()
2811 ukcq = &iwcq->sc_cq.cq_uk; in __irdma_poll_cq()
2814 list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) { in __irdma_poll_cq()
2817 ret = irdma_poll_one(&cq_buf->cq_uk, cur_cqe, entry + npolled); in __irdma_poll_cq()
2823 if (ret == -ENOENT) in __irdma_poll_cq()
2826 if (ret == -EFAULT) { in __irdma_poll_cq()
2842 if (ret == -ENOENT) { in __irdma_poll_cq()
2853 if (ret == -ENOENT) in __irdma_poll_cq()
2856 if (ret == -EFAULT) { in __irdma_poll_cq()
2875 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, in __irdma_poll_cq()
2882 * irdma_poll_cq - poll cq for completion (kernel apps)
2897 spin_lock_irqsave(&iwcq->lock, flags); in irdma_poll_cq()
2899 spin_unlock_irqrestore(&iwcq->lock, flags); in irdma_poll_cq()
2905 * irdma_req_notify_cq - arm cq kernel application
2921 ukcq = &iwcq->sc_cq.cq_uk; in irdma_req_notify_cq()
2923 spin_lock_irqsave(&iwcq->lock, flags); in irdma_req_notify_cq()
2927 if (iwcq->last_notify == IRDMA_CQ_COMPL_SOLICITED) in irdma_req_notify_cq()
2931 if (!atomic_cmpxchg(&iwcq->armed, 0, 1) || promo_event) { in irdma_req_notify_cq()
2932 iwcq->last_notify = cq_notify; in irdma_req_notify_cq()
2937 (!irdma_cq_empty(iwcq) || !list_empty(&iwcq->cmpl_generated))) in irdma_req_notify_cq()
2939 spin_unlock_irqrestore(&iwcq->lock, flags); in irdma_req_notify_cq()
2945 * mcast_list_add - Add a new mcast item to list
2953 list_add(&new_elem->list, &rf->mc_qht_list.list); in mcast_list_add()
2957 * mcast_list_del - Remove an mcast item from list
2964 list_del(&mc_qht_elem->list); in mcast_list_del()
2968 * mcast_list_lookup_ip - Search mcast list for address
2979 list_for_each_safe(pos, q, &rf->mc_qht_list.list) { in mcast_list_lookup_ip()
2981 if (!memcmp(mc_qht_el->mc_info.dest_ip, ip_mcast, in mcast_list_lookup_ip()
2982 sizeof(mc_qht_el->mc_info.dest_ip))) in mcast_list_lookup_ip()
2990 * irdma_mcast_cqp_op - perform a mcast cqp operation
3005 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); in irdma_mcast_cqp_op()
3007 return -ENOMEM; in irdma_mcast_cqp_op()
3009 cqp_request->info.in.u.mc_create.info = *mc_grp_ctx; in irdma_mcast_cqp_op()
3010 cqp_info = &cqp_request->info; in irdma_mcast_cqp_op()
3011 cqp_info->cqp_cmd = op; in irdma_mcast_cqp_op()
3012 cqp_info->post_sq = 1; in irdma_mcast_cqp_op()
3013 cqp_info->in.u.mc_create.scratch = (uintptr_t)cqp_request; in irdma_mcast_cqp_op()
3014 cqp_info->in.u.mc_create.cqp = &iwdev->rf->cqp.sc_cqp; in irdma_mcast_cqp_op()
3015 status = irdma_handle_cqp_op(iwdev->rf, cqp_request); in irdma_mcast_cqp_op()
3016 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); in irdma_mcast_cqp_op()
3022 * irdma_attach_mcast - attach a qp to a multicast group
3033 struct irdma_device *iwdev = iwqp->iwdev; in irdma_attach_mcast()
3034 struct irdma_pci_f *rf = iwdev->rf; in irdma_attach_mcast()
3052 irdma_get_vlan_mac_ipv6(iwqp->cm_id, ip_addr, &vlan_id, NULL); in irdma_attach_mcast()
3054 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, in irdma_attach_mcast()
3055 "qp_id=%d, IP6address=%x:%x:%x:%x\n", ibqp->qp_num, in irdma_attach_mcast()
3061 vlan_id = irdma_get_vlan_ipv4(iwqp->cm_id, ip_addr); in irdma_attach_mcast()
3063 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, in irdma_attach_mcast()
3065 ibqp->qp_num, ip_addr[0], dmac[0], dmac[1], dmac[2], in irdma_attach_mcast()
3069 spin_lock_irqsave(&rf->qh_list_lock, flags); in irdma_attach_mcast()
3074 spin_unlock_irqrestore(&rf->qh_list_lock, flags); in irdma_attach_mcast()
3077 return -ENOMEM; in irdma_attach_mcast()
3079 mc_qht_elem->mc_info.ipv4_valid = ipv4; in irdma_attach_mcast()
3080 memcpy(mc_qht_elem->mc_info.dest_ip, ip_addr, in irdma_attach_mcast()
3081 sizeof(mc_qht_elem->mc_info.dest_ip)); in irdma_attach_mcast()
3082 ret = irdma_alloc_rsrc(rf, rf->allocated_mcgs, rf->max_mcg, in irdma_attach_mcast()
3083 &mgn, &rf->next_mcg); in irdma_attach_mcast()
3086 return -ENOMEM; in irdma_attach_mcast()
3089 mc_qht_elem->mc_info.mgn = mgn; in irdma_attach_mcast()
3090 dma_mem_mc = &mc_qht_elem->mc_grp_ctx.dma_mem_mc; in irdma_attach_mcast()
3091 dma_mem_mc->size = sizeof(u64)* IRDMA_MAX_MGS_PER_CTX; in irdma_attach_mcast()
3092 dma_mem_mc->va = irdma_allocate_dma_mem(&rf->hw, dma_mem_mc, in irdma_attach_mcast()
3093 dma_mem_mc->size, in irdma_attach_mcast()
3095 if (!dma_mem_mc->va) { in irdma_attach_mcast()
3096 irdma_free_rsrc(rf, rf->allocated_mcgs, mgn); in irdma_attach_mcast()
3098 return -ENOMEM; in irdma_attach_mcast()
3101 mc_qht_elem->mc_grp_ctx.mg_id = (u16)mgn; in irdma_attach_mcast()
3102 memcpy(mc_qht_elem->mc_grp_ctx.dest_ip_addr, ip_addr, in irdma_attach_mcast()
3103 sizeof(mc_qht_elem->mc_grp_ctx.dest_ip_addr)); in irdma_attach_mcast()
3104 mc_qht_elem->mc_grp_ctx.ipv4_valid = ipv4; in irdma_attach_mcast()
3105 mc_qht_elem->mc_grp_ctx.vlan_id = vlan_id; in irdma_attach_mcast()
3107 mc_qht_elem->mc_grp_ctx.vlan_valid = true; in irdma_attach_mcast()
3108 mc_qht_elem->mc_grp_ctx.hmc_fcn_id = iwdev->rf->sc_dev.hmc_fn_id; in irdma_attach_mcast()
3109 mc_qht_elem->mc_grp_ctx.qs_handle = in irdma_attach_mcast()
3110 iwqp->sc_qp.vsi->qos[iwqp->sc_qp.user_pri].qs_handle; in irdma_attach_mcast()
3111 ether_addr_copy(mc_qht_elem->mc_grp_ctx.dest_mac_addr, dmac); in irdma_attach_mcast()
3113 spin_lock_irqsave(&rf->qh_list_lock, flags); in irdma_attach_mcast()
3116 if (mc_qht_elem->mc_grp_ctx.no_of_mgs == in irdma_attach_mcast()
3118 spin_unlock_irqrestore(&rf->qh_list_lock, flags); in irdma_attach_mcast()
3119 return -ENOMEM; in irdma_attach_mcast()
3123 mcg_info.qp_id = iwqp->ibqp.qp_num; in irdma_attach_mcast()
3124 no_mgs = mc_qht_elem->mc_grp_ctx.no_of_mgs; in irdma_attach_mcast()
3125 irdma_sc_add_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info); in irdma_attach_mcast()
3126 spin_unlock_irqrestore(&rf->qh_list_lock, flags); in irdma_attach_mcast()
3130 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx, in irdma_attach_mcast()
3132 } else if (no_mgs != mc_qht_elem->mc_grp_ctx.no_of_mgs) { in irdma_attach_mcast()
3133 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx, in irdma_attach_mcast()
3145 irdma_sc_del_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info); in irdma_attach_mcast()
3146 if (!mc_qht_elem->mc_grp_ctx.no_of_mgs) { in irdma_attach_mcast()
3148 irdma_free_dma_mem(&rf->hw, in irdma_attach_mcast()
3149 &mc_qht_elem->mc_grp_ctx.dma_mem_mc); in irdma_attach_mcast()
3150 irdma_free_rsrc(rf, rf->allocated_mcgs, in irdma_attach_mcast()
3151 mc_qht_elem->mc_grp_ctx.mg_id); in irdma_attach_mcast()
3159 * irdma_detach_mcast - detach a qp from a multicast group
3170 struct irdma_device *iwdev = iwqp->iwdev; in irdma_detach_mcast()
3171 struct irdma_pci_f *rf = iwdev->rf; in irdma_detach_mcast()
3186 spin_lock_irqsave(&rf->qh_list_lock, flags); in irdma_detach_mcast()
3189 spin_unlock_irqrestore(&rf->qh_list_lock, flags); in irdma_detach_mcast()
3190 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, in irdma_detach_mcast()
3195 mcg_info.qp_id = iwqp->ibqp.qp_num; in irdma_detach_mcast()
3196 irdma_sc_del_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info); in irdma_detach_mcast()
3197 if (!mc_qht_elem->mc_grp_ctx.no_of_mgs) { in irdma_detach_mcast()
3199 spin_unlock_irqrestore(&rf->qh_list_lock, flags); in irdma_detach_mcast()
3200 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx, in irdma_detach_mcast()
3203 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, in irdma_detach_mcast()
3205 spin_lock_irqsave(&rf->qh_list_lock, flags); in irdma_detach_mcast()
3207 spin_unlock_irqrestore(&rf->qh_list_lock, flags); in irdma_detach_mcast()
3208 return -EAGAIN; in irdma_detach_mcast()
3211 irdma_free_dma_mem(&rf->hw, in irdma_detach_mcast()
3212 &mc_qht_elem->mc_grp_ctx.dma_mem_mc); in irdma_detach_mcast()
3213 irdma_free_rsrc(rf, rf->allocated_mcgs, in irdma_detach_mcast()
3214 mc_qht_elem->mc_grp_ctx.mg_id); in irdma_detach_mcast()
3217 spin_unlock_irqrestore(&rf->qh_list_lock, flags); in irdma_detach_mcast()
3218 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx, in irdma_detach_mcast()
3221 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, in irdma_detach_mcast()
3231 * irdma_query_ah - Query address handle
3241 if (ah->av.attrs.ah_flags & IB_AH_GRH) { in irdma_query_ah()
3242 ah_attr->ah_flags = IB_AH_GRH; in irdma_query_ah()
3243 ah_attr->grh.flow_label = ah->sc_ah.ah_info.flow_label; in irdma_query_ah()
3244 ah_attr->grh.traffic_class = ah->sc_ah.ah_info.tc_tos; in irdma_query_ah()
3245 ah_attr->grh.hop_limit = ah->sc_ah.ah_info.hop_ttl; in irdma_query_ah()
3246 ah_attr->grh.sgid_index = ah->sgid_index; in irdma_query_ah()
3247 ah_attr->grh.sgid_index = ah->sgid_index; in irdma_query_ah()
3248 memcpy(&ah_attr->grh.dgid, &ah->dgid, in irdma_query_ah()
3249 sizeof(ah_attr->grh.dgid)); in irdma_query_ah()
3258 if (iwdev->netdev) { in irdma_get_netdev()
3259 dev_hold(iwdev->netdev); in irdma_get_netdev()
3260 return iwdev->netdev; in irdma_get_netdev()
3271 dev_ops->ops.driver_id = RDMA_DRIVER_I40IW; in irdma_set_device_ops()
3272 dev_ops->ops.size_ib_ah = IRDMA_SET_RDMA_OBJ_SIZE(ib_ah, irdma_ah, ibah); in irdma_set_device_ops()
3273 dev_ops->ops.size_ib_cq = IRDMA_SET_RDMA_OBJ_SIZE(ib_cq, irdma_cq, ibcq); in irdma_set_device_ops()
3274 dev_ops->ops.size_ib_pd = IRDMA_SET_RDMA_OBJ_SIZE(ib_pd, irdma_pd, ibpd); in irdma_set_device_ops()
3275 dev_ops->ops.size_ib_ucontext = IRDMA_SET_RDMA_OBJ_SIZE(ib_ucontext, in irdma_set_device_ops()
3279 dev_ops->alloc_hw_stats = irdma_alloc_hw_stats; in irdma_set_device_ops()
3280 dev_ops->alloc_mr = irdma_alloc_mr; in irdma_set_device_ops()
3281 dev_ops->alloc_pd = irdma_alloc_pd; in irdma_set_device_ops()
3282 dev_ops->alloc_ucontext = irdma_alloc_ucontext; in irdma_set_device_ops()
3283 dev_ops->create_cq = irdma_create_cq; in irdma_set_device_ops()
3284 dev_ops->create_qp = irdma_create_qp; in irdma_set_device_ops()
3285 dev_ops->dealloc_pd = irdma_dealloc_pd; in irdma_set_device_ops()
3286 dev_ops->dealloc_ucontext = irdma_dealloc_ucontext; in irdma_set_device_ops()
3287 dev_ops->dereg_mr = irdma_dereg_mr; in irdma_set_device_ops()
3288 dev_ops->destroy_cq = irdma_destroy_cq; in irdma_set_device_ops()
3289 dev_ops->destroy_qp = irdma_destroy_qp; in irdma_set_device_ops()
3290 dev_ops->disassociate_ucontext = irdma_disassociate_ucontext; in irdma_set_device_ops()
3291 dev_ops->get_dev_fw_str = irdma_get_dev_fw_str; in irdma_set_device_ops()
3292 dev_ops->get_dma_mr = irdma_get_dma_mr; in irdma_set_device_ops()
3293 dev_ops->get_hw_stats = irdma_get_hw_stats; in irdma_set_device_ops()
3294 dev_ops->get_netdev = irdma_get_netdev; in irdma_set_device_ops()
3295 dev_ops->map_mr_sg = irdma_map_mr_sg; in irdma_set_device_ops()
3296 dev_ops->mmap = irdma_mmap; in irdma_set_device_ops()
3297 dev_ops->mmap_free = irdma_mmap_free; in irdma_set_device_ops()
3298 dev_ops->poll_cq = irdma_poll_cq; in irdma_set_device_ops()
3299 dev_ops->post_recv = irdma_post_recv; in irdma_set_device_ops()
3300 dev_ops->post_send = irdma_post_send; in irdma_set_device_ops()
3301 dev_ops->query_device = irdma_query_device; in irdma_set_device_ops()
3302 dev_ops->query_port = irdma_query_port; in irdma_set_device_ops()
3303 dev_ops->modify_port = irdma_modify_port; in irdma_set_device_ops()
3304 dev_ops->query_qp = irdma_query_qp; in irdma_set_device_ops()
3305 dev_ops->reg_user_mr = irdma_reg_user_mr; in irdma_set_device_ops()
3306 dev_ops->rereg_user_mr = irdma_rereg_user_mr; in irdma_set_device_ops()
3307 dev_ops->req_notify_cq = irdma_req_notify_cq; in irdma_set_device_ops()
3308 dev_ops->resize_cq = irdma_resize_cq; in irdma_set_device_ops()
3316 dev_ops->attach_mcast = irdma_attach_mcast; in irdma_set_device_mcast_ops()
3317 dev_ops->detach_mcast = irdma_detach_mcast; in irdma_set_device_mcast_ops()
3325 dev_ops->create_ah = irdma_create_ah; in irdma_set_device_roce_ops()
3326 dev_ops->destroy_ah = irdma_destroy_ah; in irdma_set_device_roce_ops()
3327 dev_ops->get_link_layer = irdma_get_link_layer; in irdma_set_device_roce_ops()
3328 dev_ops->get_port_immutable = irdma_roce_port_immutable; in irdma_set_device_roce_ops()
3329 dev_ops->modify_qp = irdma_modify_qp_roce; in irdma_set_device_roce_ops()
3330 dev_ops->query_ah = irdma_query_ah; in irdma_set_device_roce_ops()
3331 dev_ops->query_gid = irdma_query_gid_roce; in irdma_set_device_roce_ops()
3332 dev_ops->query_pkey = irdma_query_pkey; in irdma_set_device_roce_ops()
3333 ibdev->add_gid = irdma_add_gid; in irdma_set_device_roce_ops()
3334 ibdev->del_gid = irdma_del_gid; in irdma_set_device_roce_ops()
3342 ibdev->uverbs_cmd_mask |= in irdma_set_device_iw_ops()
3346 dev_ops->create_ah = irdma_create_ah_stub; in irdma_set_device_iw_ops()
3347 dev_ops->destroy_ah = irdma_destroy_ah_stub; in irdma_set_device_iw_ops()
3348 dev_ops->get_port_immutable = irdma_iw_port_immutable; in irdma_set_device_iw_ops()
3349 dev_ops->modify_qp = irdma_modify_qp; in irdma_set_device_iw_ops()
3350 dev_ops->query_gid = irdma_query_gid; in irdma_set_device_iw_ops()
3351 dev_ops->query_pkey = irdma_iw_query_pkey; in irdma_set_device_iw_ops()
3360 * irdma_init_roce_device - initialization of roce rdma device
3367 iwdev->ibdev.node_type = RDMA_NODE_IB_CA; in irdma_init_roce_device()
3368 addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid, in irdma_init_roce_device()
3369 if_getlladdr(iwdev->netdev)); in irdma_init_roce_device()
3370 irdma_set_device_roce_ops(&iwdev->ibdev); in irdma_init_roce_device()
3371 if (iwdev->rf->rdma_ver == IRDMA_GEN_2) in irdma_init_roce_device()
3372 irdma_set_device_mcast_ops(&iwdev->ibdev); in irdma_init_roce_device()
3376 * irdma_init_iw_device - initialization of iwarp rdma device
3382 if_t netdev = iwdev->netdev; in irdma_init_iw_device()
3384 iwdev->ibdev.node_type = RDMA_NODE_RNIC; in irdma_init_iw_device()
3385 addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid, in irdma_init_iw_device()
3387 iwdev->ibdev.iwcm = kzalloc(sizeof(*iwdev->ibdev.iwcm), GFP_KERNEL); in irdma_init_iw_device()
3388 if (!iwdev->ibdev.iwcm) in irdma_init_iw_device()
3389 return -ENOMEM; in irdma_init_iw_device()
3391 iwdev->ibdev.iwcm->add_ref = irdma_qp_add_ref; in irdma_init_iw_device()
3392 iwdev->ibdev.iwcm->rem_ref = irdma_qp_rem_ref; in irdma_init_iw_device()
3393 iwdev->ibdev.iwcm->get_qp = irdma_get_qp; in irdma_init_iw_device()
3394 iwdev->ibdev.iwcm->connect = irdma_connect; in irdma_init_iw_device()
3395 iwdev->ibdev.iwcm->accept = irdma_accept; in irdma_init_iw_device()
3396 iwdev->ibdev.iwcm->reject = irdma_reject; in irdma_init_iw_device()
3397 iwdev->ibdev.iwcm->create_listen = irdma_create_listen; in irdma_init_iw_device()
3398 iwdev->ibdev.iwcm->destroy_listen = irdma_destroy_listen; in irdma_init_iw_device()
3399 memcpy(iwdev->ibdev.iwcm->ifname, if_name(netdev), in irdma_init_iw_device()
3400 sizeof(iwdev->ibdev.iwcm->ifname)); in irdma_init_iw_device()
3401 irdma_set_device_iw_ops(&iwdev->ibdev); in irdma_init_iw_device()
3407 * irdma_init_rdma_device - initialization of rdma device
3415 iwdev->ibdev.owner = THIS_MODULE; in irdma_init_rdma_device()
3416 iwdev->ibdev.uverbs_abi_ver = IRDMA_ABI_VER; in irdma_init_rdma_device()
3419 if (iwdev->roce_mode) { in irdma_init_rdma_device()
3427 iwdev->ibdev.phys_port_cnt = 1; in irdma_init_rdma_device()
3428 iwdev->ibdev.num_comp_vectors = iwdev->rf->ceqs_count; in irdma_init_rdma_device()
3429 iwdev->ibdev.dev.parent = iwdev->rf->dev_ctx.dev; in irdma_init_rdma_device()
3430 set_ibdev_dma_device(iwdev->ibdev, &iwdev->rf->pcidev->dev); in irdma_init_rdma_device()
3431 irdma_set_device_ops(&iwdev->ibdev); in irdma_init_rdma_device()
3432 if (iwdev->rf->rdma_ver == IRDMA_GEN_1) in irdma_init_rdma_device()
3433 irdma_set_device_gen1_ops(&iwdev->ibdev); in irdma_init_rdma_device()
3439 * irdma_port_ibevent - indicate port event
3447 event.device = &iwdev->ibdev; in irdma_port_ibevent()
3450 iwdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; in irdma_port_ibevent()
3455 * irdma_ib_unregister_device - unregister rdma device from IB
3462 iwdev->iw_status = 0; in irdma_ib_unregister_device()
3464 ib_unregister_device(&iwdev->ibdev); in irdma_ib_unregister_device()
3465 dev_put(iwdev->netdev); in irdma_ib_unregister_device()
3466 kfree(iwdev->ibdev.iwcm); in irdma_ib_unregister_device()
3467 iwdev->ibdev.iwcm = NULL; in irdma_ib_unregister_device()
3471 * irdma_ib_register_device - register irdma device to IB core
3483 dev_hold(iwdev->netdev); in irdma_ib_register_device()
3484 sprintf(iwdev->ibdev.name, "irdma-%s", if_name(iwdev->netdev)); in irdma_ib_register_device()
3485 ret = ib_register_device(&iwdev->ibdev, NULL); in irdma_ib_register_device()
3489 iwdev->iw_status = 1; in irdma_ib_register_device()
3495 kfree(iwdev->ibdev.iwcm); in irdma_ib_register_device()
3496 iwdev->ibdev.iwcm = NULL; in irdma_ib_register_device()
3497 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, "Register RDMA device fail\n"); in irdma_ib_register_device()