Lines Matching +full:pa +full:- +full:stats

1 /*-
2 * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
4 * Copyright (c) 2018 - 2023 Intel Corporation
16 * - Redistributions of source code must retain the above
20 * - Redistributions in binary form must reproduce the above
75 irdma_fw_major_ver(&iwdev->rf->sc_dev), in irdma_get_dev_fw_str()
76 irdma_fw_minor_ver(&iwdev->rf->sc_dev)); in irdma_get_dev_fw_str()
100 * irdma_alloc_mr - register stag for fast memory registration
110 struct irdma_device *iwdev = to_iwdev(pd->device); in irdma_alloc_mr()
116 int err_code = -ENOMEM; in irdma_alloc_mr()
120 return ERR_PTR(-ENOMEM); in irdma_alloc_mr()
124 err_code = -ENOMEM; in irdma_alloc_mr()
128 iwmr->stag = stag; in irdma_alloc_mr()
129 iwmr->ibmr.rkey = stag; in irdma_alloc_mr()
130 iwmr->ibmr.lkey = stag; in irdma_alloc_mr()
131 iwmr->ibmr.pd = pd; in irdma_alloc_mr()
132 iwmr->ibmr.device = pd->device; in irdma_alloc_mr()
133 iwpbl = &iwmr->iwpbl; in irdma_alloc_mr()
134 iwpbl->iwmr = iwmr; in irdma_alloc_mr()
135 iwmr->type = IRDMA_MEMREG_TYPE_MEM; in irdma_alloc_mr()
136 palloc = &iwpbl->pble_alloc; in irdma_alloc_mr()
137 iwmr->page_cnt = max_num_sg; in irdma_alloc_mr()
139 iwmr->len = max_num_sg * PAGE_SIZE; in irdma_alloc_mr()
140 status = irdma_get_pble(iwdev->rf->pble_rsrc, palloc, iwmr->page_cnt, in irdma_alloc_mr()
149 iwpbl->pbl_allocated = true; in irdma_alloc_mr()
151 return &iwmr->ibmr; in irdma_alloc_mr()
153 irdma_free_pble(iwdev->rf->pble_rsrc, palloc); in irdma_alloc_mr()
165 * irdma_alloc_ucontext - Allocate the user context data structure
170 * user-mode client.
175 struct ib_device *ibdev = uctx->device; in irdma_alloc_ucontext()
180 struct irdma_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs; in irdma_alloc_ucontext()
182 if (udata->inlen < IRDMA_ALLOC_UCTX_MIN_REQ_LEN || in irdma_alloc_ucontext()
183 udata->outlen < IRDMA_ALLOC_UCTX_MIN_RESP_LEN) in irdma_alloc_ucontext()
184 return -EINVAL; in irdma_alloc_ucontext()
186 if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) in irdma_alloc_ucontext()
187 return -EINVAL; in irdma_alloc_ucontext()
192 ucontext->iwdev = iwdev; in irdma_alloc_ucontext()
193 ucontext->abi_ver = req.userspace_ver; in irdma_alloc_ucontext()
196 ucontext->use_raw_attrs = true; in irdma_alloc_ucontext()
199 if (udata->outlen == IRDMA_ALLOC_UCTX_MIN_RESP_LEN) { in irdma_alloc_ucontext()
200 if (uk_attrs->hw_rev != IRDMA_GEN_1) in irdma_alloc_ucontext()
201 return -EOPNOTSUPP; in irdma_alloc_ucontext()
203 ucontext->legacy_mode = true; in irdma_alloc_ucontext()
204 uresp.max_qps = iwdev->rf->max_qp; in irdma_alloc_ucontext()
205 uresp.max_pds = iwdev->rf->sc_dev.hw_attrs.max_hw_pds; in irdma_alloc_ucontext()
206 uresp.wq_size = iwdev->rf->sc_dev.hw_attrs.max_qp_wr * 2; in irdma_alloc_ucontext()
208 if (ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), udata->outlen))) in irdma_alloc_ucontext()
209 return -EFAULT; in irdma_alloc_ucontext()
214 uresp.feature_flags = uk_attrs->feature_flags; in irdma_alloc_ucontext()
215 uresp.max_hw_wq_frags = uk_attrs->max_hw_wq_frags; in irdma_alloc_ucontext()
216 uresp.max_hw_read_sges = uk_attrs->max_hw_read_sges; in irdma_alloc_ucontext()
217 uresp.max_hw_inline = uk_attrs->max_hw_inline; in irdma_alloc_ucontext()
218 uresp.max_hw_rq_quanta = uk_attrs->max_hw_rq_quanta; in irdma_alloc_ucontext()
219 uresp.max_hw_wq_quanta = uk_attrs->max_hw_wq_quanta; in irdma_alloc_ucontext()
220 uresp.max_hw_sq_chunk = uk_attrs->max_hw_sq_chunk; in irdma_alloc_ucontext()
221 uresp.max_hw_cq_size = uk_attrs->max_hw_cq_size; in irdma_alloc_ucontext()
222 uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size; in irdma_alloc_ucontext()
223 uresp.hw_rev = uk_attrs->hw_rev; in irdma_alloc_ucontext()
225 uresp.min_hw_wq_size = uk_attrs->min_hw_wq_size; in irdma_alloc_ucontext()
229 (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET]; in irdma_alloc_ucontext()
230 ucontext->db_mmap_entry = in irdma_alloc_ucontext()
234 if (!ucontext->db_mmap_entry) { in irdma_alloc_ucontext()
235 return -ENOMEM; in irdma_alloc_ucontext()
239 min(sizeof(uresp), udata->outlen))) { in irdma_alloc_ucontext()
240 rdma_user_mmap_entry_remove(ucontext->db_mmap_entry); in irdma_alloc_ucontext()
241 return -EFAULT; in irdma_alloc_ucontext()
245 INIT_LIST_HEAD(&ucontext->cq_reg_mem_list); in irdma_alloc_ucontext()
246 spin_lock_init(&ucontext->cq_reg_mem_list_lock); in irdma_alloc_ucontext()
247 INIT_LIST_HEAD(&ucontext->qp_reg_mem_list); in irdma_alloc_ucontext()
248 spin_lock_init(&ucontext->qp_reg_mem_list_lock); in irdma_alloc_ucontext()
249 INIT_LIST_HEAD(&ucontext->vma_list); in irdma_alloc_ucontext()
250 mutex_init(&ucontext->vma_list_mutex); in irdma_alloc_ucontext()
255 irdma_dev_err(&iwdev->ibdev, in irdma_alloc_ucontext()
258 return -EINVAL; in irdma_alloc_ucontext()
263 * irdma_dealloc_ucontext - deallocate the user context data structure
271 rdma_user_mmap_entry_remove(ucontext->db_mmap_entry); in irdma_dealloc_ucontext()
279 * irdma_alloc_pd - allocate protection domain
287 struct irdma_device *iwdev = to_iwdev(pd->device); in irdma_alloc_pd()
288 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; in irdma_alloc_pd()
289 struct irdma_pci_f *rf = iwdev->rf; in irdma_alloc_pd()
295 if (udata && udata->outlen < IRDMA_ALLOC_PD_MIN_RESP_LEN) in irdma_alloc_pd()
296 return -EINVAL; in irdma_alloc_pd()
298 err = irdma_alloc_rsrc(rf, rf->allocated_pds, rf->max_pd, &pd_id, in irdma_alloc_pd()
299 &rf->next_pd); in irdma_alloc_pd()
303 sc_pd = &iwpd->sc_pd; in irdma_alloc_pd()
308 irdma_sc_pd_init(dev, sc_pd, pd_id, ucontext->abi_ver); in irdma_alloc_pd()
311 min(sizeof(uresp), udata->outlen))) { in irdma_alloc_pd()
312 err = -EFAULT; in irdma_alloc_pd()
319 spin_lock_init(&iwpd->udqp_list_lock); in irdma_alloc_pd()
320 INIT_LIST_HEAD(&iwpd->udqp_list); in irdma_alloc_pd()
326 irdma_free_rsrc(rf, rf->allocated_pds, pd_id); in irdma_alloc_pd()
336 struct irdma_device *iwdev = to_iwdev(ibpd->device); in irdma_dealloc_pd()
338 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_pds, iwpd->sc_pd.pd_id); in irdma_dealloc_pd()
344 * irdma_find_qp_update_qs - update QS handle for UD QPs
359 spin_lock_irqsave(&pd->udqp_list_lock, flags); in irdma_find_qp_update_qs()
360 list_for_each_safe(list_node, tmp_node, &pd->udqp_list) { in irdma_find_qp_update_qs()
363 irdma_qp_add_ref(&iwqp->ibqp); in irdma_find_qp_update_qs()
365 if (iwqp->sc_qp.qs_handle == iwqp->sc_qp.vsi->qos[user_pri].qs_handle) { in irdma_find_qp_update_qs()
366 if (iwqp->ctx_info.user_pri == user_pri) { in irdma_find_qp_update_qs()
368 irdma_qp_rem_ref(&iwqp->ibqp); in irdma_find_qp_update_qs()
376 irdma_qp_rem_ref(&iwqp->ibqp); in irdma_find_qp_update_qs()
377 spin_unlock_irqrestore(&pd->udqp_list_lock, flags); in irdma_find_qp_update_qs()
380 work->iwqp = iwqp; in irdma_find_qp_update_qs()
381 work->user_prio = user_pri; in irdma_find_qp_update_qs()
382 work->qs_change = qs_change; in irdma_find_qp_update_qs()
383 INIT_WORK(&work->work, irdma_udqp_qs_worker); in irdma_find_qp_update_qs()
385 irdma_cqp_qp_suspend_resume(&iwqp->sc_qp, IRDMA_OP_SUSPEND); in irdma_find_qp_update_qs()
386 queue_work(rf->iwdev->cleanup_wq, &work->work); in irdma_find_qp_update_qs()
388 spin_unlock_irqrestore(&pd->udqp_list_lock, flags); in irdma_find_qp_update_qs()
399 ah_info->ipv4_valid = true; in irdma_fill_ah_info()
400 ah_info->dest_ip_addr[0] = in irdma_fill_ah_info()
401 ntohl(dgid_addr->saddr_in.sin_addr.s_addr); in irdma_fill_ah_info()
402 ah_info->src_ip_addr[0] = in irdma_fill_ah_info()
403 ntohl(sgid_addr->saddr_in.sin_addr.s_addr); in irdma_fill_ah_info()
405 ah_info->do_lpbk = irdma_ipv4_is_lpb(ah_info->src_ip_addr[0], in irdma_fill_ah_info()
406 ah_info->dest_ip_addr[0]); in irdma_fill_ah_info()
408 if (ipv4_is_multicast(dgid_addr->saddr_in.sin_addr.s_addr)) { in irdma_fill_ah_info()
409 irdma_mcast_mac_v4(ah_info->dest_ip_addr, dmac); in irdma_fill_ah_info()
412 irdma_copy_ip_ntohl(ah_info->dest_ip_addr, in irdma_fill_ah_info()
413 dgid_addr->saddr_in6.sin6_addr.__u6_addr.__u6_addr32); in irdma_fill_ah_info()
414 irdma_copy_ip_ntohl(ah_info->src_ip_addr, in irdma_fill_ah_info()
415 sgid_addr->saddr_in6.sin6_addr.__u6_addr.__u6_addr32); in irdma_fill_ah_info()
416 ah_info->do_lpbk = irdma_ipv6_is_lpb(ah_info->src_ip_addr, in irdma_fill_ah_info()
417 ah_info->dest_ip_addr); in irdma_fill_ah_info()
418 if (rdma_is_multicast_addr(&dgid_addr->saddr_in6.sin6_addr)) { in irdma_fill_ah_info()
419 irdma_mcast_mac_v6(ah_info->dest_ip_addr, dmac); in irdma_fill_ah_info()
438 if (sgid_attr->ndev && is_vlan_dev(sgid_attr->ndev)) in irdma_create_ah_vlan_tag()
439 ah_info->vlan_tag = vlan_dev_vlan_id(sgid_attr->ndev); in irdma_create_ah_vlan_tag()
441 ah_info->vlan_tag = VLAN_N_VID; in irdma_create_ah_vlan_tag()
443 ah_info->dst_arpindex = irdma_add_arp(iwdev->rf, ah_info->dest_ip_addr, dmac); in irdma_create_ah_vlan_tag()
445 if (ah_info->dst_arpindex == -1) in irdma_create_ah_vlan_tag()
446 return -EINVAL; in irdma_create_ah_vlan_tag()
448 if (ah_info->vlan_tag >= VLAN_N_VID && iwdev->dcb_vlan_mode) in irdma_create_ah_vlan_tag()
449 ah_info->vlan_tag = 0; in irdma_create_ah_vlan_tag()
451 if (ah_info->vlan_tag < VLAN_N_VID) { in irdma_create_ah_vlan_tag()
452 ah_info->insert_vlan_tag = true; in irdma_create_ah_vlan_tag()
453 vlan_prio = (u16)irdma_roce_get_vlan_prio(sgid_attr->ndev, in irdma_create_ah_vlan_tag()
454 rt_tos2priority(ah_info->tc_tos)); in irdma_create_ah_vlan_tag()
455 ah_info->vlan_tag |= vlan_prio << VLAN_PRIO_SHIFT; in irdma_create_ah_vlan_tag()
456 irdma_find_qp_update_qs(iwdev->rf, pd, vlan_prio); in irdma_create_ah_vlan_tag()
458 if (iwdev->roce_dcqcn_en) { in irdma_create_ah_vlan_tag()
459 ah_info->tc_tos &= ~ECN_CODE_PT_MASK; in irdma_create_ah_vlan_tag()
460 ah_info->tc_tos |= ECN_CODE_PT_VAL; in irdma_create_ah_vlan_tag()
473 int cnt = rf->sc_dev.hw_attrs.max_cqp_compl_wait_time_ms * in irdma_create_ah_wait()
476 sc_ah->ah_info.cqp_request; in irdma_create_ah_wait()
479 irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq); in irdma_create_ah_wait()
481 } while (!READ_ONCE(cqp_request->request_done) && --cnt); in irdma_create_ah_wait()
483 if (cnt && !cqp_request->compl_info.op_ret_val) { in irdma_create_ah_wait()
484 irdma_put_cqp_request(&rf->cqp, cqp_request); in irdma_create_ah_wait()
485 sc_ah->ah_info.ah_valid = true; in irdma_create_ah_wait()
487 ret = !cnt ? -ETIMEDOUT : -EINVAL; in irdma_create_ah_wait()
488 irdma_dev_err(&rf->iwdev->ibdev, "CQP create AH error ret = %d opt_ret_val = %d", in irdma_create_ah_wait()
489 ret, cqp_request->compl_info.op_ret_val); in irdma_create_ah_wait()
490 irdma_put_cqp_request(&rf->cqp, cqp_request); in irdma_create_ah_wait()
491 if (!cnt && !rf->reset) { in irdma_create_ah_wait()
492 rf->reset = true; in irdma_create_ah_wait()
493 rf->gen_ops.request_reset(rf); in irdma_create_ah_wait()
505 * irdma_create_ah - create address handle
518 struct irdma_pd *pd = to_iwpd(ib_ah->pd); in irdma_create_ah()
520 struct irdma_device *iwdev = to_iwdev(ib_ah->pd->device); in irdma_create_ah()
523 struct irdma_pci_f *rf = iwdev->rf; in irdma_create_ah()
533 if (udata && udata->outlen < IRDMA_CREATE_AH_MIN_RESP_LEN) in irdma_create_ah()
534 return -EINVAL; in irdma_create_ah()
536 err = irdma_alloc_rsrc(rf, rf->allocated_ahs, in irdma_create_ah()
537 rf->max_ah, &ah_id, &rf->next_ah); in irdma_create_ah()
542 ah->pd = pd; in irdma_create_ah()
543 sc_ah = &ah->sc_ah; in irdma_create_ah()
544 sc_ah->ah_info.ah_idx = ah_id; in irdma_create_ah()
545 sc_ah->ah_info.vsi = &iwdev->vsi; in irdma_create_ah()
546 irdma_sc_init_ah(&rf->sc_dev, sc_ah); in irdma_create_ah()
547 ah->sgid_index = attr->grh.sgid_index; in irdma_create_ah()
548 memcpy(&ah->dgid, &attr->grh.dgid, sizeof(ah->dgid)); in irdma_create_ah()
550 err = ib_get_cached_gid(&iwdev->ibdev, attr->port_num, in irdma_create_ah()
551 attr->grh.sgid_index, &sgid, &sgid_attr); in irdma_create_ah()
554 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, in irdma_create_ah()
556 attr->grh.sgid_index, attr->port_num); in irdma_create_ah()
557 err = -EINVAL; in irdma_create_ah()
561 rdma_gid2ip((struct sockaddr *)&dgid_addr, &attr->grh.dgid); in irdma_create_ah()
562 ah->av.attrs = *attr; in irdma_create_ah()
563 ah->av.net_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid); in irdma_create_ah()
568 ah_info = &sc_ah->ah_info; in irdma_create_ah()
569 ah_info->ah_idx = ah_id; in irdma_create_ah()
570 ah_info->pd_idx = pd->sc_pd.pd_id; in irdma_create_ah()
571 ether_addr_copy(ah_info->mac_addr, if_getlladdr(iwdev->netdev)); in irdma_create_ah()
573 if (attr->ah_flags & IB_AH_GRH) { in irdma_create_ah()
574 ah_info->flow_label = attr->grh.flow_label; in irdma_create_ah()
575 ah_info->hop_ttl = attr->grh.hop_limit; in irdma_create_ah()
576 ah_info->tc_tos = attr->grh.traffic_class; in irdma_create_ah()
579 ether_addr_copy(dmac, attr->dmac); in irdma_create_ah()
581 irdma_fill_ah_info(if_getvnet(iwdev->netdev), ah_info, &sgid_attr, &sgid_addr, &dgid_addr, in irdma_create_ah()
582 dmac, ah->av.net_type); in irdma_create_ah()
588 err = irdma_ah_cqp_op(iwdev->rf, sc_ah, IRDMA_OP_AH_CREATE, in irdma_create_ah()
591 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_DEV, "CQP-OP Create AH fail"); in irdma_create_ah()
600 uresp.ah_id = ah->sc_ah.ah_info.ah_idx; in irdma_create_ah()
601 err = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), udata->outlen)); in irdma_create_ah()
603 irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah, in irdma_create_ah()
611 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, ah_id); in irdma_create_ah()
619 ether_addr_copy(dmac, attr->dmac); in irdma_ether_copy()
627 return -ENOSYS; in irdma_create_ah_stub()
638 * irdma_free_qp_rsrc - free up memory resources for qp
644 struct irdma_device *iwdev = iwqp->iwdev; in irdma_free_qp_rsrc()
645 struct irdma_pci_f *rf = iwdev->rf; in irdma_free_qp_rsrc()
646 u32 qp_num = iwqp->ibqp.qp_num; in irdma_free_qp_rsrc()
648 irdma_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp); in irdma_free_qp_rsrc()
650 if (iwqp->sc_qp.vsi) { in irdma_free_qp_rsrc()
651 irdma_qp_rem_qos(&iwqp->sc_qp); in irdma_free_qp_rsrc()
652 iwqp->sc_qp.dev->ws_remove(iwqp->sc_qp.vsi, in irdma_free_qp_rsrc()
653 iwqp->sc_qp.user_pri); in irdma_free_qp_rsrc()
657 irdma_free_rsrc(rf, rf->allocated_qps, qp_num); in irdma_free_qp_rsrc()
658 irdma_free_dma_mem(rf->sc_dev.hw, &iwqp->q2_ctx_mem); in irdma_free_qp_rsrc()
659 irdma_free_dma_mem(rf->sc_dev.hw, &iwqp->kqp.dma_mem); in irdma_free_qp_rsrc()
660 kfree(iwqp->kqp.sig_trk_mem); in irdma_free_qp_rsrc()
661 iwqp->kqp.sig_trk_mem = NULL; in irdma_free_qp_rsrc()
662 kfree(iwqp->kqp.sq_wrid_mem); in irdma_free_qp_rsrc()
663 kfree(iwqp->kqp.rq_wrid_mem); in irdma_free_qp_rsrc()
664 kfree(iwqp->sg_list); in irdma_free_qp_rsrc()
669 * irdma_create_qp - create qp
682 struct irdma_device *iwdev = to_iwdev(ibpd->device); in irdma_create_qp()
683 struct irdma_pci_f *rf = iwdev->rf; in irdma_create_qp()
690 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_create_qp()
691 struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs; in irdma_create_qp()
700 if (udata && (udata->inlen < IRDMA_CREATE_QP_MIN_REQ_LEN || in irdma_create_qp()
701 udata->outlen < IRDMA_CREATE_QP_MIN_RESP_LEN)) in irdma_create_qp()
702 return ERR_PTR(-EINVAL); in irdma_create_qp()
704 init_info.vsi = &iwdev->vsi; in irdma_create_qp()
706 init_info.qp_uk_init_info.sq_size = init_attr->cap.max_send_wr; in irdma_create_qp()
707 init_info.qp_uk_init_info.rq_size = init_attr->cap.max_recv_wr; in irdma_create_qp()
708 init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge; in irdma_create_qp()
709 init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge; in irdma_create_qp()
710 init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data; in irdma_create_qp()
714 return ERR_PTR(-ENOMEM); in irdma_create_qp()
716 iwqp->sg_list = kcalloc(uk_attrs->max_hw_wq_frags, sizeof(*iwqp->sg_list), in irdma_create_qp()
718 if (!iwqp->sg_list) { in irdma_create_qp()
720 return ERR_PTR(-ENOMEM); in irdma_create_qp()
723 qp = &iwqp->sc_qp; in irdma_create_qp()
724 qp->qp_uk.back_qp = iwqp; in irdma_create_qp()
725 qp->qp_uk.lock = &iwqp->lock; in irdma_create_qp()
726 qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX; in irdma_create_qp()
728 iwqp->iwdev = iwdev; in irdma_create_qp()
729 iwqp->q2_ctx_mem.size = IRDMA_Q2_BUF_SIZE + IRDMA_QP_CTX_SIZE; in irdma_create_qp()
730 iwqp->q2_ctx_mem.va = irdma_allocate_dma_mem(dev->hw, &iwqp->q2_ctx_mem, in irdma_create_qp()
731 iwqp->q2_ctx_mem.size, in irdma_create_qp()
733 if (!iwqp->q2_ctx_mem.va) { in irdma_create_qp()
734 kfree(iwqp->sg_list); in irdma_create_qp()
736 return ERR_PTR(-ENOMEM); in irdma_create_qp()
739 init_info.q2 = iwqp->q2_ctx_mem.va; in irdma_create_qp()
740 init_info.q2_pa = iwqp->q2_ctx_mem.pa; in irdma_create_qp()
744 if (init_attr->qp_type == IB_QPT_GSI) in irdma_create_qp()
747 err_code = irdma_alloc_rsrc(rf, rf->allocated_qps, rf->max_qp, in irdma_create_qp()
748 &qp_num, &rf->next_qp); in irdma_create_qp()
752 iwqp->iwpd = iwpd; in irdma_create_qp()
753 iwqp->ibqp.qp_num = qp_num; in irdma_create_qp()
754 qp = &iwqp->sc_qp; in irdma_create_qp()
755 iwqp->iwscq = to_iwcq(init_attr->send_cq); in irdma_create_qp()
756 iwqp->iwrcq = to_iwcq(init_attr->recv_cq); in irdma_create_qp()
757 iwqp->host_ctx.va = init_info.host_ctx; in irdma_create_qp()
758 iwqp->host_ctx.pa = init_info.host_ctx_pa; in irdma_create_qp()
759 iwqp->host_ctx.size = IRDMA_QP_CTX_SIZE; in irdma_create_qp()
761 init_info.pd = &iwpd->sc_pd; in irdma_create_qp()
762 init_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num; in irdma_create_qp()
763 if (!rdma_protocol_roce(&iwdev->ibdev, 1)) in irdma_create_qp()
765 iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp; in irdma_create_qp()
766 init_waitqueue_head(&iwqp->waitq); in irdma_create_qp()
767 init_waitqueue_head(&iwqp->mod_qp_waitq); in irdma_create_qp()
769 spin_lock_init(&iwqp->dwork_flush_lock); in irdma_create_qp()
772 init_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver; in irdma_create_qp()
775 INIT_DELAYED_WORK(&iwqp->dwork_flush, irdma_flush_worker); in irdma_create_qp()
781 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, "setup qp failed\n"); in irdma_create_qp()
785 if (rdma_protocol_roce(&iwdev->ibdev, 1)) { in irdma_create_qp()
786 if (init_attr->qp_type == IB_QPT_RC) { in irdma_create_qp()
803 err_code = -EPROTO; in irdma_create_qp()
804 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, "qp_init fail\n"); in irdma_create_qp()
808 ctx_info = &iwqp->ctx_info; in irdma_create_qp()
809 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id; in irdma_create_qp()
810 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id; in irdma_create_qp()
812 if (rdma_protocol_roce(&iwdev->ibdev, 1)) in irdma_create_qp()
821 atomic_set(&iwqp->refcnt, 1); in irdma_create_qp()
822 spin_lock_init(&iwqp->lock); in irdma_create_qp()
823 spin_lock_init(&iwqp->sc_qp.pfpdu.lock); in irdma_create_qp()
824 iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0; in irdma_create_qp()
825 rf->qp_table[qp_num] = iwqp; in irdma_create_qp()
827 if (rdma_protocol_roce(&iwdev->ibdev, 1)) { in irdma_create_qp()
828 if (dev->ws_add(&iwdev->vsi, 0)) { in irdma_create_qp()
829 irdma_cqp_qp_destroy_cmd(&rf->sc_dev, &iwqp->sc_qp); in irdma_create_qp()
830 err_code = -EINVAL; in irdma_create_qp()
834 irdma_qp_add_qos(&iwqp->sc_qp); in irdma_create_qp()
835 spin_lock_irqsave(&iwpd->udqp_list_lock, flags); in irdma_create_qp()
836 if (iwqp->sc_qp.qp_uk.qp_type == IRDMA_QP_TYPE_ROCE_UD) in irdma_create_qp()
837 list_add_tail(&iwqp->ud_list_elem, &iwpd->udqp_list); in irdma_create_qp()
838 spin_unlock_irqrestore(&iwpd->udqp_list_lock, flags); in irdma_create_qp()
843 if (udata->outlen == IRDMA_CREATE_QP_MIN_RESP_LEN) { in irdma_create_qp()
847 if (rdma_protocol_iwarp(&iwdev->ibdev, 1)) { in irdma_create_qp()
849 if (qp->qp_uk.start_wqe_idx) { in irdma_create_qp()
851 uresp.start_wqe_idx = qp->qp_uk.start_wqe_idx; in irdma_create_qp()
858 uresp.qp_caps = qp->qp_uk.qp_caps; in irdma_create_qp()
861 min(sizeof(uresp), udata->outlen)); in irdma_create_qp()
863 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, "copy_to_udata failed\n"); in irdma_create_qp()
864 irdma_destroy_qp(&iwqp->ibqp, udata); in irdma_create_qp()
869 init_completion(&iwqp->free_qp); in irdma_create_qp()
870 return &iwqp->ibqp; in irdma_create_qp()
879 * irdma_destroy_qp - destroy qp
887 struct irdma_device *iwdev = iwqp->iwdev; in irdma_destroy_qp()
890 if (iwqp->sc_qp.qp_uk.destroy_pending) in irdma_destroy_qp()
892 iwqp->sc_qp.qp_uk.destroy_pending = true; in irdma_destroy_qp()
894 spin_lock_irqsave(&iwqp->iwpd->udqp_list_lock, flags); in irdma_destroy_qp()
895 if (iwqp->sc_qp.qp_uk.qp_type == IRDMA_QP_TYPE_ROCE_UD) in irdma_destroy_qp()
896 list_del(&iwqp->ud_list_elem); in irdma_destroy_qp()
897 spin_unlock_irqrestore(&iwqp->iwpd->udqp_list_lock, flags); in irdma_destroy_qp()
899 if (iwqp->iwarp_state >= IRDMA_QP_STATE_IDLE) in irdma_destroy_qp()
900 irdma_modify_qp_to_err(&iwqp->sc_qp); in irdma_destroy_qp()
902 if (!iwqp->user_mode) { in irdma_destroy_qp()
903 if (iwqp->iwscq) { in irdma_destroy_qp()
904 irdma_clean_cqes(iwqp, iwqp->iwscq); in irdma_destroy_qp()
905 if (iwqp->iwrcq != iwqp->iwscq) in irdma_destroy_qp()
906 irdma_clean_cqes(iwqp, iwqp->iwrcq); in irdma_destroy_qp()
909 irdma_qp_rem_ref(&iwqp->ibqp); in irdma_destroy_qp()
910 wait_for_completion(&iwqp->free_qp); in irdma_destroy_qp()
912 if (!iwdev->rf->reset && irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp)) in irdma_destroy_qp()
913 return (iwdev->rf->rdma_ver <= IRDMA_GEN_2 && !iwqp->user_mode) ? 0 : -ENOTRECOVERABLE; in irdma_destroy_qp()
922 * irdma_create_cq - create cq
934 struct ib_device *ibdev = ibcq->device; in irdma_create_cq()
936 struct irdma_pci_f *rf = iwdev->rf; in irdma_create_cq()
940 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_create_cq()
948 int entries = attr->cqe; in irdma_create_cq()
951 err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev); in irdma_create_cq()
955 if (udata && (udata->inlen < IRDMA_CREATE_CQ_MIN_REQ_LEN || in irdma_create_cq()
956 udata->outlen < IRDMA_CREATE_CQ_MIN_RESP_LEN)) in irdma_create_cq()
957 return -EINVAL; in irdma_create_cq()
958 err_code = irdma_alloc_rsrc(rf, rf->allocated_cqs, rf->max_cq, &cq_num, in irdma_create_cq()
959 &rf->next_cq); in irdma_create_cq()
962 cq = &iwcq->sc_cq; in irdma_create_cq()
963 cq->back_cq = iwcq; in irdma_create_cq()
964 atomic_set(&iwcq->refcnt, 1); in irdma_create_cq()
965 spin_lock_init(&iwcq->lock); in irdma_create_cq()
966 INIT_LIST_HEAD(&iwcq->resize_list); in irdma_create_cq()
967 INIT_LIST_HEAD(&iwcq->cmpl_generated); in irdma_create_cq()
969 ukinfo->cq_size = max(entries, 4); in irdma_create_cq()
970 ukinfo->cq_id = cq_num; in irdma_create_cq()
971 cqe_64byte_ena = (dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_64_BYTE_CQE) ? true : false; in irdma_create_cq()
972 ukinfo->avoid_mem_cflct = cqe_64byte_ena; in irdma_create_cq()
973 iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size; in irdma_create_cq()
974 atomic_set(&iwcq->armed, 0); in irdma_create_cq()
975 if (attr->comp_vector < rf->ceqs_count) in irdma_create_cq()
976 info.ceq_id = attr->comp_vector; in irdma_create_cq()
980 info.vsi = &iwdev->vsi; in irdma_create_cq()
990 iwcq->user_mode = true; in irdma_create_cq()
994 min(sizeof(req), udata->inlen))) { in irdma_create_cq()
995 err_code = -EFAULT; in irdma_create_cq()
999 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); in irdma_create_cq()
1001 &ucontext->cq_reg_mem_list); in irdma_create_cq()
1002 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); in irdma_create_cq()
1004 err_code = -EPROTO; in irdma_create_cq()
1007 iwcq->iwpbl = iwpbl; in irdma_create_cq()
1008 iwcq->cq_mem_size = 0; in irdma_create_cq()
1009 cqmr = &iwpbl->cq_mr; in irdma_create_cq()
1011 if (rf->sc_dev.hw_attrs.uk_attrs.feature_flags & in irdma_create_cq()
1012 IRDMA_FEATURE_CQ_RESIZE && !ucontext->legacy_mode) { in irdma_create_cq()
1013 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); in irdma_create_cq()
1015 &ucontext->cq_reg_mem_list); in irdma_create_cq()
1016 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); in irdma_create_cq()
1019 err_code = -EPROTO; in irdma_create_cq()
1022 iwcq->iwpbl_shadow = iwpbl_shadow; in irdma_create_cq()
1023 cqmr_shadow = &iwpbl_shadow->cq_mr; in irdma_create_cq()
1024 info.shadow_area_pa = cqmr_shadow->cq_pbl.addr; in irdma_create_cq()
1025 cqmr->split = true; in irdma_create_cq()
1027 info.shadow_area_pa = cqmr->shadow; in irdma_create_cq()
1029 if (iwpbl->pbl_allocated) { in irdma_create_cq()
1032 info.first_pm_pbl_idx = cqmr->cq_pbl.idx; in irdma_create_cq()
1034 info.cq_base_pa = cqmr->cq_pbl.addr; in irdma_create_cq()
1040 if (entries < 1 || entries > rf->max_cqe) { in irdma_create_cq()
1041 err_code = -EINVAL; in irdma_create_cq()
1046 if (!cqe_64byte_ena && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) in irdma_create_cq()
1048 ukinfo->cq_size = entries; in irdma_create_cq()
1054 iwcq->kmem.size = round_up(rsize, IRDMA_HW_PAGE_SIZE); in irdma_create_cq()
1055 iwcq->kmem.va = irdma_allocate_dma_mem(dev->hw, &iwcq->kmem, in irdma_create_cq()
1056 iwcq->kmem.size, IRDMA_HW_PAGE_SIZE); in irdma_create_cq()
1057 if (!iwcq->kmem.va) { in irdma_create_cq()
1058 err_code = -ENOMEM; in irdma_create_cq()
1062 iwcq->kmem_shadow.size = IRDMA_SHADOW_AREA_SIZE << 3; in irdma_create_cq()
1063 iwcq->kmem_shadow.va = irdma_allocate_dma_mem(dev->hw, in irdma_create_cq()
1064 &iwcq->kmem_shadow, in irdma_create_cq()
1065 iwcq->kmem_shadow.size, in irdma_create_cq()
1068 if (!iwcq->kmem_shadow.va) { in irdma_create_cq()
1069 err_code = -ENOMEM; in irdma_create_cq()
1072 info.shadow_area_pa = iwcq->kmem_shadow.pa; in irdma_create_cq()
1073 ukinfo->shadow_area = iwcq->kmem_shadow.va; in irdma_create_cq()
1074 ukinfo->cq_base = iwcq->kmem.va; in irdma_create_cq()
1075 info.cq_base_pa = iwcq->kmem.pa; in irdma_create_cq()
1081 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, "init cq fail\n"); in irdma_create_cq()
1082 err_code = -EPROTO; in irdma_create_cq()
1086 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); in irdma_create_cq()
1088 err_code = -ENOMEM; in irdma_create_cq()
1091 cqp_info = &cqp_request->info; in irdma_create_cq()
1092 cqp_info->cqp_cmd = IRDMA_OP_CQ_CREATE; in irdma_create_cq()
1093 cqp_info->post_sq = 1; in irdma_create_cq()
1094 cqp_info->in.u.cq_create.cq = cq; in irdma_create_cq()
1095 cqp_info->in.u.cq_create.check_overflow = true; in irdma_create_cq()
1096 cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request; in irdma_create_cq()
1098 irdma_put_cqp_request(&rf->cqp, cqp_request); in irdma_create_cq()
1100 err_code = -ENOMEM; in irdma_create_cq()
1110 min(sizeof(resp), udata->outlen))) { in irdma_create_cq()
1111 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, "copy to user data\n"); in irdma_create_cq()
1112 err_code = -EPROTO; in irdma_create_cq()
1117 rf->cq_table[cq_num] = iwcq; in irdma_create_cq()
1118 init_completion(&iwcq->free_cq); in irdma_create_cq()
1124 if (!iwcq->user_mode) { in irdma_create_cq()
1125 irdma_free_dma_mem(dev->hw, &iwcq->kmem); in irdma_create_cq()
1126 irdma_free_dma_mem(dev->hw, &iwcq->kmem_shadow); in irdma_create_cq()
1129 irdma_free_rsrc(rf, rf->allocated_cqs, cq_num); in irdma_create_cq()
1134 * irdma_copy_user_pgaddrs - copy user page address to pble's os locally
1144 struct ib_umem *region = iwmr->region; in irdma_copy_user_pgaddrs()
1145 struct irdma_pbl *iwpbl = &iwmr->iwpbl; in irdma_copy_user_pgaddrs()
1149 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc; in irdma_copy_user_pgaddrs()
1154 pinfo = (level == PBLE_LEVEL_1) ? NULL : palloc->level2.leaf; in irdma_copy_user_pgaddrs()
1155 for_each_sg(region->sg_head.sgl, sg, region->nmap, entry) { in irdma_copy_user_pgaddrs()
1156 chunk_pages = DIV_ROUND_UP(sg_dma_len(sg), iwmr->page_size); in irdma_copy_user_pgaddrs()
1157 if (iwmr->type == IRDMA_MEMREG_TYPE_QP && !iwpbl->qp_mr.sq_page) in irdma_copy_user_pgaddrs()
1158 iwpbl->qp_mr.sq_page = sg_page(sg); in irdma_copy_user_pgaddrs()
1160 pg_addr = sg_dma_address(sg) + (i * iwmr->page_size); in irdma_copy_user_pgaddrs()
1162 *pbl = pg_addr & iwmr->page_msk; in irdma_copy_user_pgaddrs()
1163 else if (!(pg_addr & ~iwmr->page_msk)) in irdma_copy_user_pgaddrs()
1167 if (++pbl_cnt == palloc->total_cnt) in irdma_copy_user_pgaddrs()
1175 * irdma_destroy_ah - Destroy address handle
1183 struct irdma_device *iwdev = to_iwdev(ibah->device); in irdma_destroy_ah()
1186 irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah, IRDMA_OP_AH_DESTROY, in irdma_destroy_ah()
1189 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, in irdma_destroy_ah()
1190 ah->sc_ah.ah_info.ah_idx); in irdma_destroy_ah()
1198 struct irdma_device *iwdev = to_iwdev(ib_mr->device); in irdma_dereg_mr()
1199 struct irdma_pbl *iwpbl = &iwmr->iwpbl; in irdma_dereg_mr()
1202 if (iwmr->type != IRDMA_MEMREG_TYPE_MEM) { in irdma_dereg_mr()
1203 if (iwmr->region) { in irdma_dereg_mr()
1217 irdma_free_stag(iwdev, iwmr->stag); in irdma_dereg_mr()
1219 if (iwpbl->pbl_allocated) in irdma_dereg_mr()
1220 irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc); in irdma_dereg_mr()
1222 if (iwmr->region) in irdma_dereg_mr()
1223 ib_umem_release(iwmr->region); in irdma_dereg_mr()
1231 …* irdma_rereg_user_mr - Re-Register a user memory region @ibmr: ib mem to access iwarp mr pointer …
1240 struct irdma_device *iwdev = to_iwdev(ib_mr->device); in irdma_rereg_user_mr()
1242 struct irdma_pbl *iwpbl = &iwmr->iwpbl; in irdma_rereg_user_mr()
1245 if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size) in irdma_rereg_user_mr()
1246 return -EINVAL; in irdma_rereg_user_mr()
1249 return -EOPNOTSUPP; in irdma_rereg_user_mr()
1256 iwmr->access = new_access; in irdma_rereg_user_mr()
1259 iwmr->ibmr.pd = new_pd; in irdma_rereg_user_mr()
1260 iwmr->ibmr.device = new_pd->device; in irdma_rereg_user_mr()
1264 if (iwpbl->pbl_allocated) { in irdma_rereg_user_mr()
1265 irdma_free_pble(iwdev->rf->pble_rsrc, in irdma_rereg_user_mr()
1266 &iwpbl->pble_alloc); in irdma_rereg_user_mr()
1267 iwpbl->pbl_allocated = false; in irdma_rereg_user_mr()
1269 if (iwmr->region) { in irdma_rereg_user_mr()
1270 ib_umem_release(iwmr->region); in irdma_rereg_user_mr()
1271 iwmr->region = NULL; in irdma_rereg_user_mr()
1279 ret = irdma_hwreg_mr(iwdev, iwmr, iwmr->access); in irdma_rereg_user_mr()
1294 struct irdma_av *av = &iwqp->roce_ah.av; in kc_irdma_set_roce_cm_info()
1296 ret = ib_get_cached_gid(iwqp->ibqp.device, attr->ah_attr.port_num, in kc_irdma_set_roce_cm_info()
1297 attr->ah_attr.grh.sgid_index, &sgid, in kc_irdma_set_roce_cm_info()
1304 ether_addr_copy(iwqp->ctx_info.roce_info->mac_addr, if_getlladdr(sgid_attr.ndev)); in kc_irdma_set_roce_cm_info()
1307 av->net_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid); in kc_irdma_set_roce_cm_info()
1308 rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid); in kc_irdma_set_roce_cm_info()
1310 iwqp->sc_qp.user_pri = iwqp->ctx_info.user_pri; in kc_irdma_set_roce_cm_info()
1316 * irdma_destroy_cq - destroy cq
1323 struct irdma_device *iwdev = to_iwdev(ib_cq->device); in irdma_destroy_cq()
1325 struct irdma_sc_cq *cq = &iwcq->sc_cq; in irdma_destroy_cq()
1326 struct irdma_sc_dev *dev = cq->dev; in irdma_destroy_cq()
1327 struct irdma_sc_ceq *ceq = dev->ceq[cq->ceq_id]; in irdma_destroy_cq()
1331 spin_lock_irqsave(&iwcq->lock, flags); in irdma_destroy_cq()
1332 if (!list_empty(&iwcq->cmpl_generated)) in irdma_destroy_cq()
1334 if (!list_empty(&iwcq->resize_list)) in irdma_destroy_cq()
1336 spin_unlock_irqrestore(&iwcq->lock, flags); in irdma_destroy_cq()
1339 wait_for_completion(&iwcq->free_cq); in irdma_destroy_cq()
1341 irdma_cq_wq_destroy(iwdev->rf, cq); in irdma_destroy_cq()
1343 spin_lock_irqsave(&iwceq->ce_lock, flags); in irdma_destroy_cq()
1345 spin_unlock_irqrestore(&iwceq->ce_lock, flags); in irdma_destroy_cq()
1346 irdma_cq_free_rsrc(iwdev->rf, iwcq); in irdma_destroy_cq()
1350 * kc_set_loc_seq_num_mss - Set local seq number and mss
1359 cm_node->tcp_cntxt.loc_seq_num = ts.tv_nsec; in kc_set_loc_seq_num_mss()
1360 if (cm_node->iwdev->vsi.mtu > 1500 && in kc_set_loc_seq_num_mss()
1361 2 * cm_node->iwdev->vsi.mtu > cm_node->iwdev->rcv_wnd) in kc_set_loc_seq_num_mss()
1362 cm_node->tcp_cntxt.mss = (cm_node->ipv4) ? in kc_set_loc_seq_num_mss()
1363 (1500 - IRDMA_MTU_TO_MSS_IPV4) : in kc_set_loc_seq_num_mss()
1364 (1500 - IRDMA_MTU_TO_MSS_IPV6); in kc_set_loc_seq_num_mss()
1366 cm_node->tcp_cntxt.mss = (cm_node->ipv4) ? in kc_set_loc_seq_num_mss()
1367 (cm_node->iwdev->vsi.mtu - IRDMA_MTU_TO_MSS_IPV4) : in kc_set_loc_seq_num_mss()
1368 (cm_node->iwdev->vsi.mtu - IRDMA_MTU_TO_MSS_IPV6); in kc_set_loc_seq_num_mss()
1372 * irdma_disassociate_ucontext - Disassociate user context
1389 iwdev = hdl->iwdev; in ib_device_get_by_netdev()
1390 if (netdev == iwdev->netdev) { in ib_device_get_by_netdev()
1393 return &iwdev->ibdev; in ib_device_get_by_netdev()
1408 * irdma_query_gid_roce - Query port GID for Roce
1421 if (ret == -EAGAIN) { in irdma_query_gid_roce()
1430 * irdma_modify_port - modify port attributes
1441 return -EINVAL; in irdma_modify_port()
1447 * irdma_query_pkey - Query partition key
1458 return -EINVAL; in irdma_query_pkey()
1471 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; in irdma_roce_port_immutable()
1476 immutable->max_mad_size = IB_MGMT_MAD_SIZE; in irdma_roce_port_immutable()
1477 immutable->pkey_tbl_len = attr.pkey_tbl_len; in irdma_roce_port_immutable()
1478 immutable->gid_tbl_len = attr.gid_tbl_len; in irdma_roce_port_immutable()
1490 immutable->core_cap_flags = RDMA_CORE_PORT_IWARP; in irdma_iw_port_immutable()
1494 immutable->gid_tbl_len = 1; in irdma_iw_port_immutable()
1500 * irdma_query_port - get port attributes
1510 if_t netdev = iwdev->netdev; in irdma_query_port()
1514 props->max_mtu = IB_MTU_4096; in irdma_query_port()
1515 props->active_mtu = ib_mtu_int_to_enum(if_getmtu(netdev)); in irdma_query_port()
1516 props->lid = 1; in irdma_query_port()
1517 props->lmc = 0; in irdma_query_port()
1518 props->sm_lid = 0; in irdma_query_port()
1519 props->sm_sl = 0; in irdma_query_port()
1522 props->state = IB_PORT_ACTIVE; in irdma_query_port()
1523 props->phys_state = IB_PORT_PHYS_STATE_LINK_UP; in irdma_query_port()
1525 props->state = IB_PORT_DOWN; in irdma_query_port()
1526 props->phys_state = IB_PORT_PHYS_STATE_DISABLED; in irdma_query_port()
1528 ib_get_eth_speed(ibdev, port, &props->active_speed, &props->active_width); in irdma_query_port()
1531 props->gid_tbl_len = 32; in irdma_query_port()
1532 props->port_cap_flags |= IB_PORT_IP_BASED_GIDS; in irdma_query_port()
1533 props->pkey_tbl_len = IRDMA_PKEY_TBL_SZ; in irdma_query_port()
1535 props->gid_tbl_len = 1; in irdma_query_port()
1537 props->qkey_viol_cntr = 0; in irdma_query_port()
1538 props->port_cap_flags |= IB_PORT_CM_SUP | IB_PORT_REINIT_SUP; in irdma_query_port()
1539 props->max_msg_sz = iwdev->rf->sc_dev.hw_attrs.max_hw_outbound_msg_size; in irdma_query_port()
1545 /* gen1 - 32-bit */
1553 /* gen1 - 64-bit */
1579 /* gen2 - 32-bit */
1583 /* gen2 - 64-bit */
1599 * irdma_alloc_hw_stats - Allocate a hw stats structure
1608 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; in irdma_alloc_hw_stats()
1610 int num_counters = dev->hw_attrs.max_stat_idx; in irdma_alloc_hw_stats()
1618 * irdma_get_hw_stats - Populates the rdma_hw_stats structure
1620 * @stats: stats pointer from stack
1626 struct rdma_hw_stats *stats, u8 port_num, in irdma_get_hw_stats() argument
1630 struct irdma_dev_hw_stats *hw_stats = &iwdev->vsi.pestat->hw_stats; in irdma_get_hw_stats()
1632 if (iwdev->rf->rdma_ver >= IRDMA_GEN_2) in irdma_get_hw_stats()
1633 irdma_cqp_gather_stats_cmd(&iwdev->rf->sc_dev, iwdev->vsi.pestat, true); in irdma_get_hw_stats()
1635 memcpy(&stats->value[0], hw_stats, sizeof(u64)* stats->num_counters); in irdma_get_hw_stats()
1637 return stats->num_counters; in irdma_get_hw_stats()
1641 * irdma_query_gid - Query port GID
1653 memset(gid->raw, 0, sizeof(gid->raw)); in irdma_query_gid()
1654 ether_addr_copy(gid->raw, if_getlladdr(iwdev->netdev)); in irdma_query_gid()
1684 iwdev->ibdev.uverbs_cmd_mask |= in kc_set_roce_uverbs_cmd_mask()
1694 iwdev->ibdev.uverbs_cmd_mask = in kc_set_rdma_uverbs_cmd_mask()
1715 iwdev->ibdev.uverbs_ex_cmd_mask = in kc_set_rdma_uverbs_cmd_mask()
1719 if (iwdev->rf->rdma_ver >= IRDMA_GEN_2) in kc_set_rdma_uverbs_cmd_mask()
1720 iwdev->ibdev.uverbs_ex_cmd_mask |= BIT_ULL(IB_USER_VERBS_EX_CMD_CREATE_CQ); in kc_set_rdma_uverbs_cmd_mask()
1726 if_t netdev = ibdev->get_netdev(ibdev, port_num); in ib_get_eth_speed()
1730 return -ENODEV; in ib_get_eth_speed()