Lines Matching refs:rdev

55 	struct bnxt_re_dev *rdev;  in bnxt_re_resolve_dmac_task()  local
60 rdev = dmac_work->rdev; in bnxt_re_resolve_dmac_task()
62 rc = ib_resolve_eth_dmac(&rdev->ibdev, ah_attr); in bnxt_re_resolve_dmac_task()
64 dev_err(rdev_to_dev(dmac_work->rdev), in bnxt_re_resolve_dmac_task()
111 static int bnxt_re_copy_to_udata(struct bnxt_re_dev *rdev, void *data, in bnxt_re_copy_to_udata() argument
118 dev_err(rdev_to_dev(rdev), in bnxt_re_copy_to_udata()
128 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); in bnxt_re_get_netdev() local
133 if (!rdev || !rdev->netdev) in bnxt_re_get_netdev()
136 netdev = rdev->netdev; in bnxt_re_get_netdev()
151 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); in bnxt_re_query_device() local
152 struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr; in bnxt_re_query_device()
157 bnxt_qplib_get_guid(rdev->dev_addr, (u8 *)&ib_attr->sys_image_guid); in bnxt_re_query_device()
160 ib_attr->vendor_id = rdev->en_dev->pdev->vendor; in bnxt_re_query_device()
161 ib_attr->vendor_part_id = rdev->en_dev->pdev->device; in bnxt_re_query_device()
162 ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device; in bnxt_re_query_device()
169 if (rdev->min_tx_depth == 1 && in bnxt_re_query_device()
171 rdev->min_tx_depth = min_tx_depth; in bnxt_re_query_device()
227 dev_dbg(rdev_to_dev(rdev), "Modify device with mask 0x%x\n", in bnxt_re_modify_device()
290 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); in bnxt_re_query_port() local
291 struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr; in bnxt_re_query_port()
294 dev_dbg(rdev_to_dev(rdev), "QUERY PORT with port_num 0x%x\n", port_num); in bnxt_re_query_port()
298 port_attr->state = bnxt_re_get_link_state(rdev); in bnxt_re_query_port()
302 port_attr->active_mtu = iboe_get_mtu(if_getmtu(rdev->netdev)); in bnxt_re_query_port()
320 rdev->espeed = rdev->en_dev->espeed; in bnxt_re_query_port()
322 if (test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags)) in bnxt_re_query_port()
323 __to_ib_speed_width(rdev->espeed, &active_speed, in bnxt_re_query_port()
336 dev_dbg(rdev_to_dev(rdev), "Modify port with mask 0x%x\n", in bnxt_re_modify_port()
355 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); in bnxt_re_get_port_immutable() local
363 if (rdev->roce_mode == BNXT_RE_FLAG_ROCEV1_CAP) in bnxt_re_get_port_immutable()
365 else if (rdev->roce_mode == BNXT_RE_FLAG_ROCEV2_CAP) in bnxt_re_get_port_immutable()
376 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); in bnxt_re_compat_qfwstr() local
378 sprintf(str, "%d.%d.%d.%d", rdev->dev_attr->fw_ver[0], in bnxt_re_compat_qfwstr()
379 rdev->dev_attr->fw_ver[1], rdev->dev_attr->fw_ver[2], in bnxt_re_compat_qfwstr()
380 rdev->dev_attr->fw_ver[3]); in bnxt_re_compat_qfwstr()
397 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); in bnxt_re_query_gid() local
402 rc = bnxt_qplib_get_sgid(&rdev->qplib_res, in bnxt_re_query_gid()
403 &rdev->qplib_res.sgid_tbl, index, in bnxt_re_query_gid()
413 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); in bnxt_re_del_gid() local
414 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl; in bnxt_re_del_gid()
421 dev_err(rdev_to_dev(rdev), "GID entry has no ctx?!\n"); in bnxt_re_del_gid()
426 dev_dbg(rdev_to_dev(rdev), "GID index out of range?!\n"); in bnxt_re_del_gid()
442 (rdev->gsi_ctx.gsi_sqp || in bnxt_re_del_gid()
443 rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_UD)) { in bnxt_re_del_gid()
444 dev_dbg(rdev_to_dev(rdev), in bnxt_re_del_gid()
447 rdev->gid_map[index] = -1; in bnxt_re_del_gid()
454 rdev->gid_map[index] = -1; in bnxt_re_del_gid()
459 dev_dbg(rdev_to_dev(rdev), "GID remove success\n"); in bnxt_re_del_gid()
464 dev_err(rdev_to_dev(rdev), in bnxt_re_del_gid()
469 dev_dbg(rdev_to_dev(rdev), "GID sgid_tbl does not exist!\n"); in bnxt_re_del_gid()
483 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); in bnxt_re_add_gid() local
484 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl; in bnxt_re_add_gid()
489 rdev->dev_addr, in bnxt_re_add_gid()
492 dev_dbg(rdev_to_dev(rdev), "GID %pI6 is already present\n", gid); in bnxt_re_add_gid()
506 rdev->gid_map[index] = tbl_idx; in bnxt_re_add_gid()
509 dev_err(rdev_to_dev(rdev), "Add GID failed rc = 0x%x\n", rc); in bnxt_re_add_gid()
514 dev_err(rdev_to_dev(rdev), "Add GID ctx failed\n"); in bnxt_re_add_gid()
522 rdev->gid_map[index] = tbl_idx; in bnxt_re_add_gid()
539 struct bnxt_re_dev *rdev = pd->rdev; in bnxt_re_legacy_create_fence_wqe() local
541 if (!_is_chip_gen_p5_p7(rdev->chip_ctx)) in bnxt_re_legacy_create_fence_wqe()
580 dev_dbg(rdev_to_dev(qp->rdev), in bnxt_re_legacy_bind_fence_mw()
585 dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n"); in bnxt_re_legacy_bind_fence_mw()
597 struct bnxt_re_dev *rdev = pd->rdev; in bnxt_re_legacy_create_fence_mr() local
606 if (!_is_chip_gen_p5_p7(rdev->chip_ctx)) in bnxt_re_legacy_create_fence_mr()
614 dma_addr = ib_dma_map_single(&rdev->ibdev, fence->va, in bnxt_re_legacy_create_fence_mr()
617 rc = ib_dma_mapping_error(&rdev->ibdev, dma_addr); in bnxt_re_legacy_create_fence_mr()
619 dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n"); in bnxt_re_legacy_create_fence_mr()
631 mr->rdev = rdev; in bnxt_re_legacy_create_fence_mr()
635 if (!_is_alloc_mr_unified(rdev->qplib_res.dattr)) { in bnxt_re_legacy_create_fence_mr()
636 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr); in bnxt_re_legacy_create_fence_mr()
638 dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n"); in bnxt_re_legacy_create_fence_mr()
656 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mrinfo, false); in bnxt_re_legacy_create_fence_mr()
658 dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n"); in bnxt_re_legacy_create_fence_mr()
663 atomic_inc(&rdev->stats.rsors.mr_count); in bnxt_re_legacy_create_fence_mr()
664 max_mr_count = atomic_read(&rdev->stats.rsors.mr_count); in bnxt_re_legacy_create_fence_mr()
665 if (max_mr_count > (atomic_read(&rdev->stats.rsors.max_mr_count))) in bnxt_re_legacy_create_fence_mr()
666 atomic_set(&rdev->stats.rsors.max_mr_count, max_mr_count); in bnxt_re_legacy_create_fence_mr()
671 dev_err(rdev_to_dev(rdev), in bnxt_re_legacy_create_fence_mr()
683 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); in bnxt_re_legacy_create_fence_mr()
684 atomic_dec(&rdev->stats.rsors.mr_count); in bnxt_re_legacy_create_fence_mr()
690 ib_dma_unmap_single(&rdev->ibdev, fence->dma_addr, in bnxt_re_legacy_create_fence_mr()
703 struct bnxt_re_dev *rdev = pd->rdev; in bnxt_re_legacy_destroy_fence_mr() local
706 if (!_is_chip_gen_p5_p7(rdev->chip_ctx)) in bnxt_re_legacy_destroy_fence_mr()
715 bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr, in bnxt_re_legacy_destroy_fence_mr()
718 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); in bnxt_re_legacy_destroy_fence_mr()
721 atomic_dec(&rdev->stats.rsors.mr_count); in bnxt_re_legacy_destroy_fence_mr()
724 ib_dma_unmap_single(&rdev->ibdev, fence->dma_addr, in bnxt_re_legacy_destroy_fence_mr()
734 static int bnxt_re_get_user_dpi(struct bnxt_re_dev *rdev, in bnxt_re_get_user_dpi() argument
737 struct bnxt_qplib_chip_ctx *cctx = rdev->chip_ctx; in bnxt_re_get_user_dpi()
744 ret = bnxt_qplib_alloc_dpi(&rdev->qplib_res, &cntx->dpi, cntx, type); in bnxt_re_get_user_dpi()
746 dev_err(rdev_to_dev(rdev), "Alloc doorbell page failed!\n"); in bnxt_re_get_user_dpi()
752 ret = bnxt_qplib_alloc_dpi(&rdev->qplib_res, &cntx->wcdpi, in bnxt_re_get_user_dpi()
755 dev_err(rdev_to_dev(rdev), "push dp alloc failed\n"); in bnxt_re_get_user_dpi()
765 struct bnxt_re_dev *rdev = pd->rdev; in bnxt_re_dealloc_pd() local
770 rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res, in bnxt_re_dealloc_pd()
771 &rdev->qplib_res.pd_tbl, in bnxt_re_dealloc_pd()
774 dev_err_ratelimited(rdev_to_dev(rdev), in bnxt_re_dealloc_pd()
776 atomic_dec(&rdev->stats.rsors.pd_count); in bnxt_re_dealloc_pd()
786 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); in bnxt_re_alloc_pd() local
794 pd->rdev = rdev; in bnxt_re_alloc_pd()
795 if (bnxt_qplib_alloc_pd(&rdev->qplib_res, &pd->qplib_pd)) { in bnxt_re_alloc_pd()
796 dev_err(rdev_to_dev(rdev), in bnxt_re_alloc_pd()
806 rc = bnxt_re_get_user_dpi(rdev, ucntx); in bnxt_re_alloc_pd()
820 if (rdev->dbr_pacing) { in bnxt_re_alloc_pd()
821 WARN_ON(!rdev->dbr_bar_addr); in bnxt_re_alloc_pd()
822 resp.dbr_bar_addr = (u64)rdev->dbr_bar_addr; in bnxt_re_alloc_pd()
826 rc = bnxt_re_copy_to_udata(rdev, &resp, in bnxt_re_alloc_pd()
835 dev_warn(rdev_to_dev(rdev), in bnxt_re_alloc_pd()
838 atomic_inc(&rdev->stats.rsors.pd_count); in bnxt_re_alloc_pd()
839 max_pd_count = atomic_read(&rdev->stats.rsors.pd_count); in bnxt_re_alloc_pd()
840 if (max_pd_count > atomic_read(&rdev->stats.rsors.max_pd_count)) in bnxt_re_alloc_pd()
841 atomic_set(&rdev->stats.rsors.max_pd_count, max_pd_count); in bnxt_re_alloc_pd()
845 (void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl, in bnxt_re_alloc_pd()
855 struct bnxt_re_dev *rdev = ah->rdev; in bnxt_re_destroy_ah() local
861 rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah, block); in bnxt_re_destroy_ah()
863 dev_err_ratelimited(rdev_to_dev(rdev), in bnxt_re_destroy_ah()
866 atomic_dec(&rdev->stats.rsors.ah_count); in bnxt_re_destroy_ah()
906 static int bnxt_re_get_ah_info(struct bnxt_re_dev *rdev, in bnxt_re_get_ah_info() argument
919 rc = bnxt_re_get_cached_gid(&rdev->ibdev, 1, ah_attr->grh.sgid_index, in bnxt_re_get_ah_info()
940 static u8 _get_sgid_index(struct bnxt_re_dev *rdev, u8 gindx) in _get_sgid_index() argument
942 gindx = rdev->gid_map[gindx]; in _get_sgid_index()
946 static int bnxt_re_init_dmac(struct bnxt_re_dev *rdev, struct ib_ah_attr *ah_attr, in bnxt_re_init_dmac() argument
963 resolve_dmac_work->rdev = rdev; in bnxt_re_init_dmac()
969 queue_work(rdev->resolve_wq, &resolve_dmac_work->work); in bnxt_re_init_dmac()
980 &rdev->mac_wq_list); in bnxt_re_init_dmac()
999 struct bnxt_re_dev *rdev = pd->rdev; in bnxt_re_create_ah() local
1009 dev_err(rdev_to_dev(rdev), "ah_attr->ah_flags GRH is not set\n"); in bnxt_re_create_ah()
1011 ah->rdev = rdev; in bnxt_re_create_ah()
1018 ah->qplib_ah.sgid_index = _get_sgid_index(rdev, ah_attr->grh.sgid_index); in bnxt_re_create_ah()
1020 dev_err(rdev_to_dev(rdev), "invalid sgid_index!\n"); in bnxt_re_create_ah()
1029 rc = bnxt_re_get_ah_info(rdev, ah_attr, &ah_info); in bnxt_re_create_ah()
1034 rc = bnxt_re_init_dmac(rdev, ah_attr, &ah_info, is_user, ah); in bnxt_re_create_ah()
1038 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah, block); in bnxt_re_create_ah()
1040 dev_err(rdev_to_dev(rdev), in bnxt_re_create_ah()
1059 atomic_inc(&rdev->stats.rsors.ah_count); in bnxt_re_create_ah()
1060 max_ah_count = atomic_read(&rdev->stats.rsors.ah_count); in bnxt_re_create_ah()
1061 if (max_ah_count > atomic_read(&rdev->stats.rsors.max_ah_count)) in bnxt_re_create_ah()
1062 atomic_set(&rdev->stats.rsors.max_ah_count, max_ah_count); in bnxt_re_create_ah()
1096 struct bnxt_re_dev *rdev = srq->rdev; in bnxt_re_destroy_srq() local
1101 rc = bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq); in bnxt_re_destroy_srq()
1103 dev_err_ratelimited(rdev_to_dev(rdev), in bnxt_re_destroy_srq()
1110 atomic_dec(&rdev->stats.rsors.srq_count); in bnxt_re_destroy_srq()
1130 struct ib_umem *ib_umem_get_compat(struct bnxt_re_dev *rdev, in ib_umem_get_compat() argument
1140 struct ib_umem *ib_umem_get_flags_compat(struct bnxt_re_dev *rdev, in ib_umem_get_flags_compat() argument
1146 return ib_umem_get_compat(rdev, ucontext, udata, addr, size, in ib_umem_get_flags_compat()
1155 static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev, in bnxt_re_init_user_srq() argument
1174 dev_warn(rdev_to_dev(rdev), in bnxt_re_init_user_srq()
1186 umem = ib_umem_get_compat(rdev, context, udata, ureq.srqva, bytes, in bnxt_re_init_user_srq()
1189 dev_err(rdev_to_dev(rdev), "%s: ib_umem_get failed with %ld\n", in bnxt_re_init_user_srq()
1210 struct bnxt_re_dev *rdev; in bnxt_re_create_srq() local
1220 rdev = pd->rdev; in bnxt_re_create_srq()
1221 dev_attr = rdev->dev_attr; in bnxt_re_create_srq()
1223 if (rdev->mod_exit) { in bnxt_re_create_srq()
1224 dev_dbg(rdev_to_dev(rdev), "%s(): in mod_exit, just return!\n", __func__); in bnxt_re_create_srq()
1230 dev_err(rdev_to_dev(rdev), "SRQ type not supported\n"); in bnxt_re_create_srq()
1240 if (atomic_read(&rdev->stats.rsors.srq_count) >= dev_attr->max_srq) { in bnxt_re_create_srq()
1241 dev_err(rdev_to_dev(rdev), "Create SRQ failed - max exceeded(SRQs)\n"); in bnxt_re_create_srq()
1247 dev_err(rdev_to_dev(rdev), "Create SRQ failed - max exceeded(SRQ_WQs)\n"); in bnxt_re_create_srq()
1252 srq->rdev = rdev; in bnxt_re_create_srq()
1254 srq->qplib_srq.dpi = &rdev->dpi_privileged; in bnxt_re_create_srq()
1268 srq->qplib_srq.eventq_hw_ring_id = rdev->nqr.nq[0].ring_id; in bnxt_re_create_srq()
1273 rc = bnxt_re_init_user_srq(rdev, pd, srq, udata); in bnxt_re_create_srq()
1278 rc = bnxt_qplib_create_srq(&rdev->qplib_res, &srq->qplib_srq); in bnxt_re_create_srq()
1280 dev_err(rdev_to_dev(rdev), "Create HW SRQ failed!\n"); in bnxt_re_create_srq()
1288 rc = bnxt_re_copy_to_udata(rdev, &resp, in bnxt_re_create_srq()
1292 bnxt_qplib_destroy_srq(&rdev->qplib_res, &srq->qplib_srq); in bnxt_re_create_srq()
1296 atomic_inc(&rdev->stats.rsors.srq_count); in bnxt_re_create_srq()
1297 max_srq_count = atomic_read(&rdev->stats.rsors.srq_count); in bnxt_re_create_srq()
1298 if (max_srq_count > atomic_read(&rdev->stats.rsors.max_srq_count)) in bnxt_re_create_srq()
1299 atomic_set(&rdev->stats.rsors.max_srq_count, max_srq_count); in bnxt_re_create_srq()
1318 struct bnxt_re_dev *rdev = srq->rdev; in bnxt_re_modify_srq() local
1331 rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq); in bnxt_re_modify_srq()
1333 dev_err(rdev_to_dev(rdev), "Modify HW SRQ failed!\n"); in bnxt_re_modify_srq()
1341 rc = bnxt_re_copy_to_udata(rdev, srq, 0, udata); in bnxt_re_modify_srq()
1347 dev_err(rdev_to_dev(rdev), in bnxt_re_modify_srq()
1358 struct bnxt_re_dev *rdev = srq->rdev; in bnxt_re_query_srq() local
1361 rc = bnxt_qplib_query_srq(&rdev->qplib_res, &srq->qplib_srq); in bnxt_re_query_srq()
1363 dev_err(rdev_to_dev(rdev), "Query HW SRQ (0x%x) failed! rc = %d\n", in bnxt_re_query_srq()
1426 struct bnxt_re_dev *rdev; in bnxt_re_destroy_gsi_sqp() local
1430 rdev = qp->rdev; in bnxt_re_destroy_gsi_sqp()
1431 gsi_sqp = rdev->gsi_ctx.gsi_sqp; in bnxt_re_destroy_gsi_sqp()
1432 gsi_sah = rdev->gsi_ctx.gsi_sah; in bnxt_re_destroy_gsi_sqp()
1435 mutex_lock(&rdev->qp_lock); in bnxt_re_destroy_gsi_sqp()
1437 mutex_unlock(&rdev->qp_lock); in bnxt_re_destroy_gsi_sqp()
1440 dev_dbg(rdev_to_dev(rdev), "Destroy the shadow AH\n"); in bnxt_re_destroy_gsi_sqp()
1441 rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &gsi_sah->qplib_ah, in bnxt_re_destroy_gsi_sqp()
1444 dev_err(rdev_to_dev(rdev), in bnxt_re_destroy_gsi_sqp()
1446 atomic_dec(&rdev->stats.rsors.ah_count); in bnxt_re_destroy_gsi_sqp()
1449 dev_dbg(rdev_to_dev(rdev), "Destroy the shadow QP\n"); in bnxt_re_destroy_gsi_sqp()
1450 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &gsi_sqp->qplib_qp); in bnxt_re_destroy_gsi_sqp()
1452 dev_err(rdev_to_dev(rdev), "Destroy Shadow QP failed\n"); in bnxt_re_destroy_gsi_sqp()
1459 bnxt_qplib_free_qp_res(&rdev->qplib_res, &gsi_sqp->qplib_qp); in bnxt_re_destroy_gsi_sqp()
1460 bnxt_qplib_free_hdr_buf(&rdev->qplib_res, &gsi_sqp->qplib_qp); in bnxt_re_destroy_gsi_sqp()
1461 kfree(rdev->gsi_ctx.sqp_tbl); in bnxt_re_destroy_gsi_sqp()
1464 rdev->gsi_ctx.gsi_sqp = NULL; in bnxt_re_destroy_gsi_sqp()
1465 rdev->gsi_ctx.gsi_sah = NULL; in bnxt_re_destroy_gsi_sqp()
1466 rdev->gsi_ctx.sqp_tbl = NULL; in bnxt_re_destroy_gsi_sqp()
1467 atomic_dec(&rdev->stats.rsors.qp_count); in bnxt_re_destroy_gsi_sqp()
1472 static void bnxt_re_dump_debug_stats(struct bnxt_re_dev *rdev, u32 active_qps) in bnxt_re_dump_debug_stats() argument
1478 if (!rdev->rcfw.sp_perf_stats_enabled) in bnxt_re_dump_debug_stats()
1485 if (rdev->rcfw.qp_destroy_stats[i]) { in bnxt_re_dump_debug_stats()
1487 avg_time += rdev->rcfw.qp_destroy_stats[i]; in bnxt_re_dump_debug_stats()
1491 dev_dbg(rdev_to_dev(rdev), in bnxt_re_dump_debug_stats()
1498 dev_dbg(rdev_to_dev(rdev), in bnxt_re_dump_debug_stats()
1507 dev_dbg(rdev_to_dev(rdev), in bnxt_re_dump_debug_stats()
1510 atomic_read(&rdev->stats.rsors.max_qp_count)); in bnxt_re_dump_debug_stats()
1518 struct bnxt_re_dev *rdev = qp->rdev; in bnxt_re_destroy_qp() local
1523 mutex_lock(&rdev->qp_lock); in bnxt_re_destroy_qp()
1525 active_qps = atomic_dec_return(&rdev->stats.rsors.qp_count); in bnxt_re_destroy_qp()
1527 atomic_dec(&rdev->stats.rsors.rc_qp_count); in bnxt_re_destroy_qp()
1529 atomic_dec(&rdev->stats.rsors.ud_qp_count); in bnxt_re_destroy_qp()
1530 mutex_unlock(&rdev->qp_lock); in bnxt_re_destroy_qp()
1532 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp); in bnxt_re_destroy_qp()
1534 dev_err_ratelimited(rdev_to_dev(rdev), in bnxt_re_destroy_qp()
1544 bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp); in bnxt_re_destroy_qp()
1546 rdev->gsi_ctx.gsi_qp_mode != BNXT_RE_GSI_MODE_UD) { in bnxt_re_destroy_qp()
1547 if (rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_ALL && in bnxt_re_destroy_qp()
1548 rdev->gsi_ctx.gsi_sqp) { in bnxt_re_destroy_qp()
1551 bnxt_qplib_free_hdr_buf(&rdev->qplib_res, &qp->qplib_qp); in bnxt_re_destroy_qp()
1560 bnxt_re_dump_debug_stats(rdev, active_qps); in bnxt_re_destroy_qp()
1604 struct bnxt_re_dev *rdev; in bnxt_re_setup_swqe_size() local
1608 rdev = qp->rdev; in bnxt_re_setup_swqe_size()
1611 dev_attr = rdev->dev_attr; in bnxt_re_setup_swqe_size()
1638 static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, in bnxt_re_init_user_qp() argument
1658 dev_warn(rdev_to_dev(rdev), in bnxt_re_init_user_qp()
1671 psn_sz = _is_chip_gen_p5_p7(rdev->chip_ctx) ? in bnxt_re_init_user_qp()
1674 if (rdev->dev_attr && BNXT_RE_HW_RETX(rdev->dev_attr->dev_cap_flags)) in bnxt_re_init_user_qp()
1680 if (BNXT_RE_HW_RETX(rdev->dev_attr->dev_cap_flags)) in bnxt_re_init_user_qp()
1686 umem = ib_umem_get_compat(rdev, context, udata, ureq.qpsva, bytes, in bnxt_re_init_user_qp()
1689 dev_err(rdev_to_dev(rdev), "%s: ib_umem_get failed with %ld\n", in bnxt_re_init_user_qp()
1704 umem = ib_umem_get_compat(rdev, in bnxt_re_init_user_qp()
1708 dev_err(rdev_to_dev(rdev), in bnxt_re_init_user_qp()
1736 struct bnxt_re_dev *rdev = pd->rdev; in bnxt_re_create_shadow_qp_ah() local
1743 dev_err(rdev_to_dev(rdev), "Allocate Address Handle failed!\n"); in bnxt_re_create_shadow_qp_ah()
1747 ah->rdev = rdev; in bnxt_re_create_shadow_qp_ah()
1750 rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid); in bnxt_re_create_shadow_qp_ah()
1764 ether_addr_copy(ah->qplib_ah.dmac, rdev->dev_addr); in bnxt_re_create_shadow_qp_ah()
1765 dev_dbg(rdev_to_dev(rdev), "ah->qplib_ah.dmac = %x:%x:%x:%x:%x:%x\n", in bnxt_re_create_shadow_qp_ah()
1769 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah, true); in bnxt_re_create_shadow_qp_ah()
1771 dev_err(rdev_to_dev(rdev), in bnxt_re_create_shadow_qp_ah()
1775 dev_dbg(rdev_to_dev(rdev), "AH ID = %d\n", ah->qplib_ah.id); in bnxt_re_create_shadow_qp_ah()
1776 atomic_inc(&rdev->stats.rsors.ah_count); in bnxt_re_create_shadow_qp_ah()
1784 void bnxt_re_update_shadow_ah(struct bnxt_re_dev *rdev) in bnxt_re_update_shadow_ah() argument
1792 if (!rdev) in bnxt_re_update_shadow_ah()
1795 sah = rdev->gsi_ctx.gsi_sah; in bnxt_re_update_shadow_ah()
1797 dev_dbg(rdev_to_dev(rdev), "Updating the AH\n"); in bnxt_re_update_shadow_ah()
1800 if (!compare_ether_header(sah->qplib_ah.dmac, rdev->dev_addr)) { in bnxt_re_update_shadow_ah()
1801 dev_dbg(rdev_to_dev(rdev), in bnxt_re_update_shadow_ah()
1806 gsi_qp = rdev->gsi_ctx.gsi_qp; in bnxt_re_update_shadow_ah()
1809 rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, in bnxt_re_update_shadow_ah()
1812 dev_err(rdev_to_dev(rdev), in bnxt_re_update_shadow_ah()
1816 atomic_dec(&rdev->stats.rsors.ah_count); in bnxt_re_update_shadow_ah()
1818 rdev->gsi_ctx.gsi_sah = NULL; in bnxt_re_update_shadow_ah()
1820 sah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res, in bnxt_re_update_shadow_ah()
1823 dev_err(rdev_to_dev(rdev), in bnxt_re_update_shadow_ah()
1827 rdev->gsi_ctx.gsi_sah = sah; in bnxt_re_update_shadow_ah()
1828 atomic_inc(&rdev->stats.rsors.ah_count); in bnxt_re_update_shadow_ah()
1836 struct bnxt_re_dev *rdev = pd->rdev; in bnxt_re_create_shadow_qp() local
1842 dev_err(rdev_to_dev(rdev), "Allocate internal UD QP failed!\n"); in bnxt_re_create_shadow_qp()
1846 qp->rdev = rdev; in bnxt_re_create_shadow_qp()
1849 ether_addr_copy(qp->qplib_qp.smac, rdev->dev_addr); in bnxt_re_create_shadow_qp()
1877 qp->qplib_qp.dpi = &rdev->dpi_privileged; in bnxt_re_create_shadow_qp()
1886 dev_err(rdev_to_dev(rdev), "create HW QP failed!\n"); in bnxt_re_create_shadow_qp()
1890 dev_dbg(rdev_to_dev(rdev), "Created shadow QP with ID = %d\n", in bnxt_re_create_shadow_qp()
1894 mutex_lock(&rdev->qp_lock); in bnxt_re_create_shadow_qp()
1895 list_add_tail(&qp->list, &rdev->qp_list); in bnxt_re_create_shadow_qp()
1896 atomic_inc(&rdev->stats.rsors.qp_count); in bnxt_re_create_shadow_qp()
1897 mutex_unlock(&rdev->qp_lock); in bnxt_re_create_shadow_qp()
1911 struct bnxt_re_dev *rdev; in bnxt_re_init_rq_attr() local
1915 rdev = qp->rdev; in bnxt_re_init_rq_attr()
1918 dev_attr = rdev->dev_attr; in bnxt_re_init_rq_attr()
1925 dev_err(rdev_to_dev(rdev), "SRQ not found\n"); in bnxt_re_init_rq_attr()
1955 struct bnxt_re_dev *rdev; in bnxt_re_adjust_gsi_rq_attr() local
1957 rdev = qp->rdev; in bnxt_re_adjust_gsi_rq_attr()
1959 dev_attr = rdev->dev_attr; in bnxt_re_adjust_gsi_rq_attr()
1961 if (rdev->gsi_ctx.gsi_qp_mode != BNXT_RE_GSI_MODE_UD) in bnxt_re_adjust_gsi_rq_attr()
1971 struct bnxt_re_dev *rdev; in bnxt_re_init_sq_attr() local
1977 rdev = qp->rdev; in bnxt_re_init_sq_attr()
1980 dev_attr = rdev->dev_attr; in bnxt_re_init_sq_attr()
1997 if (!cntx && rdev->min_tx_depth && init_attr->qp_type != IB_QPT_GSI) { in bnxt_re_init_sq_attr()
2003 if (rdev->min_tx_depth > 1 && entries < rdev->min_tx_depth) in bnxt_re_init_sq_attr()
2004 entries = rdev->min_tx_depth; in bnxt_re_init_sq_attr()
2008 diff = bnxt_re_get_diff(cntx, rdev->chip_ctx); in bnxt_re_init_sq_attr()
2029 struct bnxt_re_dev *rdev; in bnxt_re_adjust_gsi_sq_attr() local
2032 rdev = qp->rdev; in bnxt_re_adjust_gsi_sq_attr()
2034 dev_attr = rdev->dev_attr; in bnxt_re_adjust_gsi_sq_attr()
2036 if (rdev->gsi_ctx.gsi_qp_mode != BNXT_RE_GSI_MODE_UD) { in bnxt_re_adjust_gsi_sq_attr()
2049 static int bnxt_re_init_qp_type(struct bnxt_re_dev *rdev, in bnxt_re_init_qp_type() argument
2056 chip_ctx = rdev->chip_ctx; in bnxt_re_init_qp_type()
2057 gsi_ctx = &rdev->gsi_ctx; in bnxt_re_init_qp_type()
2061 dev_err(rdev_to_dev(rdev), "QP type 0x%x not supported\n", in bnxt_re_init_qp_type()
2076 static int bnxt_re_init_qp_wqe_mode(struct bnxt_re_dev *rdev) in bnxt_re_init_qp_wqe_mode() argument
2078 return rdev->chip_ctx->modes.wqe_mode; in bnxt_re_init_qp_wqe_mode()
2089 struct bnxt_re_dev *rdev; in bnxt_re_init_qp_attr() local
2093 rdev = qp->rdev; in bnxt_re_init_qp_attr()
2095 dev_attr = rdev->dev_attr; in bnxt_re_init_qp_attr()
2108 qptype = bnxt_re_init_qp_type(rdev, init_attr); in bnxt_re_init_qp_attr()
2114 qplqp->wqe_mode = bnxt_re_init_qp_wqe_mode(rdev); in bnxt_re_init_qp_attr()
2115 ether_addr_copy(qplqp->smac, rdev->dev_addr); in bnxt_re_init_qp_attr()
2121 qplqp->mtu = ib_mtu_enum_to_int(iboe_get_mtu(if_getmtu(rdev->netdev))); in bnxt_re_init_qp_attr()
2122 qplqp->dpi = &rdev->dpi_privileged; /* Doorbell page */ in bnxt_re_init_qp_attr()
2124 dev_dbg(rdev_to_dev(rdev), in bnxt_re_init_qp_attr()
2134 dev_err(rdev_to_dev(rdev), "Send CQ not found\n"); in bnxt_re_init_qp_attr()
2145 dev_err(rdev_to_dev(rdev), "Receive CQ not found\n"); in bnxt_re_init_qp_attr()
2168 rc = bnxt_re_init_user_qp(rdev, pd, qp, udata); in bnxt_re_init_qp_attr()
2177 struct bnxt_re_dev *rdev; in bnxt_re_create_shadow_gsi() local
2182 rdev = qp->rdev; in bnxt_re_create_shadow_gsi()
2188 rdev->gsi_ctx.sqp_tbl = sqp_tbl; in bnxt_re_create_shadow_gsi()
2190 sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res, &qp->qplib_qp); in bnxt_re_create_shadow_gsi()
2193 dev_err(rdev_to_dev(rdev), in bnxt_re_create_shadow_gsi()
2197 rdev->gsi_ctx.gsi_sqp = sqp; in bnxt_re_create_shadow_gsi()
2201 sah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res, in bnxt_re_create_shadow_gsi()
2204 bnxt_qplib_destroy_qp(&rdev->qplib_res, in bnxt_re_create_shadow_gsi()
2207 dev_err(rdev_to_dev(rdev), in bnxt_re_create_shadow_gsi()
2211 rdev->gsi_ctx.gsi_sah = sah; in bnxt_re_create_shadow_gsi()
2237 struct bnxt_re_dev *rdev; in bnxt_re_create_gsi_qp() local
2242 rdev = qp->rdev; in bnxt_re_create_gsi_qp()
2244 res = &rdev->qplib_res; in bnxt_re_create_gsi_qp()
2245 gsi_mode = rdev->gsi_ctx.gsi_qp_mode; in bnxt_re_create_gsi_qp()
2255 dev_err(rdev_to_dev(rdev), "create HW QP1 failed!\n"); in bnxt_re_create_gsi_qp()
2265 static bool bnxt_re_test_qp_limits(struct bnxt_re_dev *rdev, in bnxt_re_test_qp_limits() argument
2278 dev_err(rdev_to_dev(rdev), "Create QP failed - max exceeded! " in bnxt_re_test_qp_limits()
2294 struct bnxt_re_dev *rdev) in __get_qp_from_qp_in() argument
2300 dev_err(rdev_to_dev(rdev), "Allocate QP failed!\n"); in __get_qp_from_qp_in()
2311 struct bnxt_re_dev *rdev; in bnxt_re_create_qp() local
2317 rdev = pd->rdev; in bnxt_re_create_qp()
2318 dev_attr = rdev->dev_attr; in bnxt_re_create_qp()
2319 if (rdev->mod_exit) { in bnxt_re_create_qp()
2321 dev_dbg(rdev_to_dev(rdev), "%s(): in mod_exit, just return!\n", __func__); in bnxt_re_create_qp()
2325 if (atomic_read(&rdev->stats.rsors.qp_count) >= dev_attr->max_qp) { in bnxt_re_create_qp()
2326 dev_err(rdev_to_dev(rdev), "Create QP failed - max exceeded(QPs Alloc'd %u of max %u)\n", in bnxt_re_create_qp()
2327 atomic_read(&rdev->stats.rsors.qp_count), dev_attr->max_qp); in bnxt_re_create_qp()
2332 rc = bnxt_re_test_qp_limits(rdev, qp_init_attr, dev_attr); in bnxt_re_create_qp()
2337 qp = __get_qp_from_qp_in(qp_in, rdev); in bnxt_re_create_qp()
2342 qp->rdev = rdev; in bnxt_re_create_qp()
2349 !_is_chip_gen_p5_p7(rdev->chip_ctx)) { in bnxt_re_create_qp()
2356 rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp); in bnxt_re_create_qp()
2358 dev_err(rdev_to_dev(rdev), "create HW QP failed!\n"); in bnxt_re_create_qp()
2366 rc = bnxt_re_copy_to_udata(rdev, &resp, in bnxt_re_create_qp()
2376 rdev->gsi_ctx.gsi_qp = qp; in bnxt_re_create_qp()
2380 mutex_lock(&rdev->qp_lock); in bnxt_re_create_qp()
2381 list_add_tail(&qp->list, &rdev->qp_list); in bnxt_re_create_qp()
2382 mutex_unlock(&rdev->qp_lock); in bnxt_re_create_qp()
2383 atomic_inc(&rdev->stats.rsors.qp_count); in bnxt_re_create_qp()
2384 active_qps = atomic_read(&rdev->stats.rsors.qp_count); in bnxt_re_create_qp()
2385 if (active_qps > atomic_read(&rdev->stats.rsors.max_qp_count)) in bnxt_re_create_qp()
2386 atomic_set(&rdev->stats.rsors.max_qp_count, active_qps); in bnxt_re_create_qp()
2388 bnxt_re_dump_debug_stats(rdev, active_qps); in bnxt_re_create_qp()
2392 tmp_qps = atomic_inc_return(&rdev->stats.rsors.rc_qp_count); in bnxt_re_create_qp()
2393 if (tmp_qps > atomic_read(&rdev->stats.rsors.max_rc_qp_count)) in bnxt_re_create_qp()
2394 atomic_set(&rdev->stats.rsors.max_rc_qp_count, tmp_qps); in bnxt_re_create_qp()
2396 tmp_qps = atomic_inc_return(&rdev->stats.rsors.ud_qp_count); in bnxt_re_create_qp()
2397 if (tmp_qps > atomic_read(&rdev->stats.rsors.max_ud_qp_count)) in bnxt_re_create_qp()
2398 atomic_set(&rdev->stats.rsors.max_ud_qp_count, tmp_qps); in bnxt_re_create_qp()
2404 bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp); in bnxt_re_create_qp()
2418 static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev, in bnxt_re_modify_shadow_qp() argument
2422 struct bnxt_re_qp *qp = rdev->gsi_ctx.gsi_sqp; in bnxt_re_modify_shadow_qp()
2444 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp); in bnxt_re_modify_shadow_qp()
2446 dev_err(rdev_to_dev(rdev), "Modify Shadow QP for QP1 failed\n"); in bnxt_re_modify_shadow_qp()
2455 static u16 get_source_port(struct bnxt_re_dev *rdev, in get_source_port() argument
2496 static void bnxt_re_update_qp_info(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp) in bnxt_re_update_qp_info() argument
2516 qp->qp_info_entry.s_port = get_source_port(rdev, qp); in bnxt_re_update_qp_info()
2533 dev_dbg(rdev_to_dev(qp->rdev), in bnxt_qplib_manage_flush_qp()
2548 dev_dbg(rdev_to_dev(qp->rdev), in bnxt_qplib_manage_flush_qp()
2573 struct bnxt_re_dev *rdev; in bnxt_re_modify_qp() local
2584 rdev = qp->rdev; in bnxt_re_modify_qp()
2585 dev_attr = rdev->dev_attr; in bnxt_re_modify_qp()
2594 dev_err(rdev_to_dev(rdev),"invalid attribute mask=0x%x" in bnxt_re_modify_qp()
2601 dev_dbg(rdev_to_dev(rdev), "%s:%d INFO attribute mask=0x%x qpn=0x%x " in bnxt_re_modify_qp()
2652 qp->qplib_qp.ah.sgid_index = _get_sgid_index(rdev, in bnxt_re_modify_qp()
2661 status = bnxt_re_get_cached_gid(&rdev->ibdev, 1, in bnxt_re_modify_qp()
2669 memcpy(qp->qplib_qp.smac, rdev->dev_addr, in bnxt_re_modify_qp()
2672 dev_dbg(rdev_to_dev(rdev), in bnxt_re_modify_qp()
2694 bnxt_re_init_qpmtu(qp, if_getmtu(rdev->netdev), qp_attr_mask, qp_attr, in bnxt_re_modify_qp()
2702 dev_err(rdev_to_dev(rdev), "qp %#x invalid mtu\n", in bnxt_re_modify_qp()
2737 dev_dbg(rdev_to_dev(rdev), in bnxt_re_modify_qp()
2751 dev_err(rdev_to_dev(rdev), in bnxt_re_modify_qp()
2774 dev_err(rdev_to_dev(rdev), in bnxt_re_modify_qp()
2810 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp); in bnxt_re_modify_qp()
2812 dev_err(rdev_to_dev(rdev), "Modify HW QP failed!\n"); in bnxt_re_modify_qp()
2826 rc = bnxt_re_copy_to_udata(rdev, &resp, in bnxt_re_modify_qp()
2834 rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_ALL && in bnxt_re_modify_qp()
2835 rdev->gsi_ctx.gsi_sqp) in bnxt_re_modify_qp()
2836 rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask); in bnxt_re_modify_qp()
2840 bnxt_re_update_qp_info(rdev, qp); in bnxt_re_modify_qp()
2848 struct bnxt_re_dev *rdev = qp->rdev; in bnxt_re_query_qp() local
2859 rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp); in bnxt_re_query_qp()
2861 dev_err(rdev_to_dev(rdev), "Query HW QP (0x%x) failed! rc = %d\n", in bnxt_re_query_qp()
2940 rc = bnxt_re_query_gid(&qp->rdev->ibdev, 1, qplib_ah->sgid_index, &sgid); in bnxt_re_build_qp1_send()
3000 dev_err(rdev_to_dev(qp->rdev), "QP1 buffer is empty!\n"); in bnxt_re_build_qp1_send()
3020 struct bnxt_re_dev *rdev; in bnxt_re_build_gsi_send() local
3023 rdev = qp->rdev; in bnxt_re_build_gsi_send()
3026 if (rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_UD) in bnxt_re_build_gsi_send()
3052 struct bnxt_re_dev *rdev = qp->rdev; in bnxt_re_build_qp1_recv() local
3069 if (rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_ROCE_V2_IPV4) in bnxt_re_build_qp1_recv()
3071 if (rdev->gsi_ctx.gsi_qp_mode != BNXT_RE_GSI_MODE_ROCE_V1) in bnxt_re_build_qp1_recv()
3086 dev_err(rdev_to_dev(qp->rdev),"QP1 rq buffer is empty!\n"); in bnxt_re_build_qp1_recv()
3100 dev_err(rdev_to_dev(qp->rdev), in bnxt_re_build_qp1_recv()
3114 dev_err(rdev_to_dev(qp->rdev), in bnxt_re_build_qp1_recv()
3128 dev_err(rdev_to_dev(qp->rdev), in bnxt_re_build_qp1_recv()
3139 dev_err(rdev_to_dev(qp->rdev), in bnxt_re_build_qp1_recv()
3147 dev_err(rdev_to_dev(qp->rdev), "QP1 buffer is empty!\n"); in bnxt_re_build_qp1_recv()
3160 struct bnxt_re_dev *rdev; in bnxt_re_build_qp1_shadow_qp_recv() local
3164 rdev = qp->rdev; in bnxt_re_build_qp1_shadow_qp_recv()
3178 dev_err(rdev_to_dev(qp->rdev), in bnxt_re_build_qp1_shadow_qp_recv()
3184 sqp_entry = &rdev->gsi_ctx.sqp_tbl[rq_prod_index]; in bnxt_re_build_qp1_shadow_qp_recv()
3228 dev_err(rdev_to_dev(qp->rdev), "%s Invalid opcode %d!\n", in bnxt_re_build_send_wqe()
3326 dev_err_ratelimited(rdev_to_dev(mr->rdev), in bnxt_re_build_reg_wqe()
3361 dev_err_ratelimited(rdev_to_dev(mr->rdev), in bnxt_re_build_reg_wqe()
3367 dev_dbg(rdev_to_dev(mr->rdev), in bnxt_re_build_reg_wqe()
3399 static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev, in bnxt_re_post_send_shadow_qp() argument
3413 dev_err(rdev_to_dev(rdev), in bnxt_re_post_send_shadow_qp()
3428 dev_err(rdev_to_dev(rdev), in bnxt_re_post_send_shadow_qp()
3459 struct bnxt_re_dev *rdev; in bnxt_re_post_send() local
3463 rdev = qp->rdev; in bnxt_re_post_send()
3470 dev_err(rdev_to_dev(rdev), in bnxt_re_post_send()
3483 rdev->gsi_ctx.gsi_qp_mode != BNXT_RE_GSI_MODE_UD) { in bnxt_re_post_send()
3514 dev_err(rdev_to_dev(rdev), in bnxt_re_post_send()
3526 dev_err(rdev_to_dev(rdev), in bnxt_re_post_send()
3533 if (!_is_chip_gen_p5_p7(rdev->chip_ctx)) in bnxt_re_post_send()
3539 dev_err(rdev_to_dev(rdev), in bnxt_re_post_send()
3547 if (!_is_chip_gen_p5_p7(rdev->chip_ctx)) in bnxt_re_post_send()
3554 static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev, in bnxt_re_post_recv_shadow_qp() argument
3567 dev_err(rdev_to_dev(rdev), in bnxt_re_post_recv_shadow_qp()
3580 dev_err(rdev_to_dev(rdev), in bnxt_re_post_recv_shadow_qp()
3594 struct bnxt_re_dev *rdev = qp->rdev; in bnxt_re_build_gsi_recv() local
3597 if (rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_ALL) in bnxt_re_build_gsi_recv()
3619 dev_err(rdev_to_dev(qp->rdev), in bnxt_re_post_recv()
3630 qp->rdev->gsi_ctx.gsi_qp_mode != BNXT_RE_GSI_MODE_UD) { in bnxt_re_post_recv()
3640 dev_err(rdev_to_dev(qp->rdev), in bnxt_re_post_recv()
3664 struct bnxt_re_dev *rdev = cq->rdev; in bnxt_re_destroy_cq() local
3691 rc = bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq); in bnxt_re_destroy_cq()
3693 dev_err_ratelimited(rdev_to_dev(rdev), in bnxt_re_destroy_cq()
3697 bnxt_re_put_nq(rdev, cq->qplib_cq.nq); in bnxt_re_destroy_cq()
3702 atomic_dec(&rdev->stats.rsors.cq_count); in bnxt_re_destroy_cq()
3709 struct bnxt_re_dev *rdev) in __get_cq_from_cq_in() argument
3725 struct bnxt_re_dev *rdev; in bnxt_re_create_cq() local
3734 rdev = rdev_from_cq_in(cq_in); in bnxt_re_create_cq()
3735 if (rdev->mod_exit) { in bnxt_re_create_cq()
3737 dev_dbg(rdev_to_dev(rdev), "%s(): in mod_exit, just return!\n", __func__); in bnxt_re_create_cq()
3746 dev_attr = rdev->dev_attr; in bnxt_re_create_cq()
3748 if (atomic_read(&rdev->stats.rsors.cq_count) >= dev_attr->max_cq) { in bnxt_re_create_cq()
3749 dev_err(rdev_to_dev(rdev), "Create CQ failed - max exceeded(CQs)\n"); in bnxt_re_create_cq()
3755 dev_err(rdev_to_dev(rdev), "Create CQ failed - max exceeded(CQ_WQs)\n"); in bnxt_re_create_cq()
3760 cq = __get_cq_from_cq_in(cq_in, rdev); in bnxt_re_create_cq()
3765 cq->rdev = rdev; in bnxt_re_create_cq()
3775 if (!udata && !rdev->gsi_ctx.first_cq_created && in bnxt_re_create_cq()
3776 rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_ALL) { in bnxt_re_create_cq()
3777 rdev->gsi_ctx.first_cq_created = true; in bnxt_re_create_cq()
3794 dev_warn(rdev_to_dev(rdev), in bnxt_re_create_cq()
3815 dev_err(rdev_to_dev(rdev), in bnxt_re_create_cq()
3834 (rdev, context, udata, ureq.cq_va, in bnxt_re_create_cq()
3839 dev_err(rdev_to_dev(rdev), in bnxt_re_create_cq()
3848 rc = bnxt_re_get_user_dpi(rdev, uctx); in bnxt_re_create_cq()
3858 dev_err(rdev_to_dev(rdev), in bnxt_re_create_cq()
3863 qplcq->dpi = &rdev->dpi_privileged; in bnxt_re_create_cq()
3870 qplcq->nq = bnxt_re_get_nq(rdev); in bnxt_re_create_cq()
3873 rc = bnxt_qplib_create_cq(&rdev->qplib_res, qplcq); in bnxt_re_create_cq()
3875 dev_err(rdev_to_dev(rdev), "Create HW CQ failed!\n"); in bnxt_re_create_cq()
3883 atomic_inc(&rdev->stats.rsors.cq_count); in bnxt_re_create_cq()
3884 max_active_cqs = atomic_read(&rdev->stats.rsors.cq_count); in bnxt_re_create_cq()
3885 if (max_active_cqs > atomic_read(&rdev->stats.rsors.max_cq_count)) in bnxt_re_create_cq()
3886 atomic_set(&rdev->stats.rsors.max_cq_count, max_active_cqs); in bnxt_re_create_cq()
3905 if (_is_chip_p7(rdev->chip_ctx)) { in bnxt_re_create_cq()
3909 dev_err(rdev_to_dev(rdev), in bnxt_re_create_cq()
3911 bnxt_qplib_destroy_cq(&rdev->qplib_res, qplcq); in bnxt_re_create_cq()
3920 rc = bnxt_re_copy_to_udata(rdev, &resp, in bnxt_re_create_cq()
3926 bnxt_qplib_destroy_cq(&rdev->qplib_res, qplcq); in bnxt_re_create_cq()
3951 struct bnxt_re_dev *rdev = cq->rdev; in bnxt_re_modify_cq() local
3957 rc = bnxt_qplib_modify_cq(&rdev->qplib_res, &cq->qplib_cq); in bnxt_re_modify_cq()
3959 dev_err(rdev_to_dev(rdev), "Modify HW CQ %#x failed!\n", in bnxt_re_modify_cq()
3972 struct bnxt_re_dev *rdev = cq->rdev; in bnxt_re_resize_cq_complete() local
3974 bnxt_qplib_resize_cq_complete(&rdev->qplib_res, &cq->qplib_cq); in bnxt_re_resize_cq_complete()
3993 struct bnxt_re_dev *rdev; in bnxt_re_resize_cq() local
4001 rdev = cq->rdev; in bnxt_re_resize_cq()
4002 dev_attr = rdev->dev_attr; in bnxt_re_resize_cq()
4011 dev_err(rdev_to_dev(rdev), "Resize CQ %#x failed - Busy\n", in bnxt_re_resize_cq()
4018 dev_err(rdev_to_dev(rdev), "Resize CQ %#x failed - max exceeded\n", in bnxt_re_resize_cq()
4030 dev_info(rdev_to_dev(rdev), "CQ is already at size %d\n", cqe); in bnxt_re_resize_cq()
4036 dev_warn(rdev_to_dev(rdev), in bnxt_re_resize_cq()
4046 dev_dbg(rdev_to_dev(rdev), "%s: va %p\n", __func__, in bnxt_re_resize_cq()
4049 (rdev, in bnxt_re_resize_cq()
4056 dev_err(rdev_to_dev(rdev), "%s: ib_umem_get failed! rc = %d\n", in bnxt_re_resize_cq()
4061 dev_dbg(rdev_to_dev(rdev), "%s: ib_umem_get() success\n", in bnxt_re_resize_cq()
4077 rc = bnxt_qplib_resize_cq(&rdev->qplib_res, &cq->qplib_cq, entries); in bnxt_re_resize_cq()
4079 dev_err(rdev_to_dev(rdev), "Resize HW CQ %#x failed!\n", in bnxt_re_resize_cq()
4089 bnxt_qplib_resize_cq_complete(&rdev->qplib_res, &cq->qplib_cq); in bnxt_re_resize_cq()
4091 atomic_inc(&rdev->stats.rsors.resize_count); in bnxt_re_resize_cq()
4258 static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev, in bnxt_re_is_loopback_packet() argument
4272 if (!ether_addr_equal(tmp_buf, rdev->dev_addr)) { in bnxt_re_is_loopback_packet()
4289 static bool bnxt_re_is_vlan_in_packet(struct bnxt_re_dev *rdev, in bnxt_re_is_vlan_in_packet() argument
4329 struct bnxt_re_dev *rdev; in bnxt_re_process_raw_qp_packet_receive() local
4340 rdev = gsi_qp->rdev; in bnxt_re_process_raw_qp_packet_receive()
4341 gsi_sqp = rdev->gsi_ctx.gsi_sqp; in bnxt_re_process_raw_qp_packet_receive()
4351 sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx]; in bnxt_re_process_raw_qp_packet_receive()
4357 dev_err(rdev_to_dev(rdev), "Not handling this packet\n"); in bnxt_re_process_raw_qp_packet_receive()
4370 if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf)) in bnxt_re_process_raw_qp_packet_receive()
4373 if (bnxt_re_is_vlan_in_packet(rdev, rq_hdr_buf, cqe)) in bnxt_re_process_raw_qp_packet_receive()
4410 rc = bnxt_re_post_recv_shadow_qp(rdev, gsi_sqp, &rwr); in bnxt_re_process_raw_qp_packet_receive()
4412 dev_err(rdev_to_dev(rdev), in bnxt_re_process_raw_qp_packet_receive()
4423 gsi_sah = rdev->gsi_ctx.gsi_sah; in bnxt_re_process_raw_qp_packet_receive()
4428 rc = bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr); in bnxt_re_process_raw_qp_packet_receive()
4487 struct bnxt_re_dev *rdev = gsi_sqp->rdev; in bnxt_re_process_res_shadow_qp_wc() local
4497 sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx]; in bnxt_re_process_res_shadow_qp_wc()
4509 if (bnxt_re_check_if_vlan_valid(rdev, vlan_id)) { in bnxt_re_process_res_shadow_qp_wc()
4525 dev_dbg(rdev_to_dev(rdev), "%s nw_type = %d\n", __func__, nw_type); in bnxt_re_process_res_shadow_qp_wc()
4528 static void bnxt_re_process_res_ud_wc(struct bnxt_re_dev *rdev, in bnxt_re_process_res_ud_wc() argument
4541 if (rdev->gsi_ctx.gsi_qp->qplib_qp.id == qp->qplib_qp.id && in bnxt_re_process_res_ud_wc()
4542 rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_UD) { in bnxt_re_process_res_ud_wc()
4546 if (_is_cqe_v2_supported(rdev->dev_attr->dev_cap_flags)) { in bnxt_re_process_res_ud_wc()
4556 if (vlan_id && bnxt_re_check_if_vlan_valid(rdev, vlan_id)) { in bnxt_re_process_res_ud_wc()
4588 struct bnxt_re_dev *rdev = cq->rdev; in bnxt_re_poll_cq() local
4604 bnxt_re_pacing_alert(rdev); in bnxt_re_poll_cq()
4622 dev_err(rdev_to_dev(rdev), "POLL CQ no CQL to use\n"); in bnxt_re_poll_cq()
4626 gsi_mode = rdev->gsi_ctx.gsi_qp_mode; in bnxt_re_poll_cq()
4635 dev_err(rdev_to_dev(rdev), in bnxt_re_poll_cq()
4658 dev_err(rdev_to_dev(rdev), in bnxt_re_poll_cq()
4673 rdev->gsi_ctx.gsi_sqp->qplib_qp.id) { in bnxt_re_poll_cq()
4676 dev_dbg(rdev_to_dev(rdev), in bnxt_re_poll_cq()
4700 sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx]; in bnxt_re_poll_cq()
4712 rdev->gsi_ctx.gsi_sqp->qplib_qp.id) { in bnxt_re_poll_cq()
4716 dev_dbg(rdev_to_dev(rdev), in bnxt_re_poll_cq()
4728 bnxt_re_process_res_ud_wc(rdev, qp, wc, cqe); in bnxt_re_poll_cq()
4731 dev_err(rdev_to_dev(cq->rdev), in bnxt_re_poll_cq()
4776 struct bnxt_re_dev *rdev; in bnxt_re_get_dma_mr() local
4785 rdev = pd->rdev; in bnxt_re_get_dma_mr()
4789 dev_err(rdev_to_dev(rdev), in bnxt_re_get_dma_mr()
4793 mr->rdev = rdev; in bnxt_re_get_dma_mr()
4799 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr); in bnxt_re_get_dma_mr()
4801 dev_err(rdev_to_dev(rdev), "Allocate DMA MR failed!\n"); in bnxt_re_get_dma_mr()
4812 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mrinfo, false); in bnxt_re_get_dma_mr()
4814 dev_err(rdev_to_dev(rdev), "Register DMA MR failed!\n"); in bnxt_re_get_dma_mr()
4821 atomic_inc(&rdev->stats.rsors.mr_count); in bnxt_re_get_dma_mr()
4822 max_mr_count = atomic_read(&rdev->stats.rsors.mr_count); in bnxt_re_get_dma_mr()
4823 if (max_mr_count > atomic_read(&rdev->stats.rsors.max_mr_count)) in bnxt_re_get_dma_mr()
4824 atomic_set(&rdev->stats.rsors.max_mr_count, max_mr_count); in bnxt_re_get_dma_mr()
4829 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); in bnxt_re_get_dma_mr()
4838 struct bnxt_re_dev *rdev = mr->rdev; in bnxt_re_dereg_mr() local
4841 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); in bnxt_re_dereg_mr()
4843 dev_err(rdev_to_dev(rdev), "Dereg MR failed (%d): rc - %#x\n", in bnxt_re_dereg_mr()
4847 bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res, in bnxt_re_dereg_mr()
4858 atomic_dec(&rdev->stats.rsors.mr_count); in bnxt_re_dereg_mr()
4890 struct bnxt_re_dev *rdev = pd->rdev; in bnxt_re_alloc_mr() local
4895 dev_dbg(rdev_to_dev(rdev), "Alloc MR\n"); in bnxt_re_alloc_mr()
4897 dev_dbg(rdev_to_dev(rdev), "MR type 0x%x not supported\n", type); in bnxt_re_alloc_mr()
4901 dev_dbg(rdev_to_dev(rdev), "Max SG exceeded\n"); in bnxt_re_alloc_mr()
4906 dev_err(rdev_to_dev(rdev), "Allocate MR mem failed!\n"); in bnxt_re_alloc_mr()
4909 mr->rdev = rdev; in bnxt_re_alloc_mr()
4914 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr); in bnxt_re_alloc_mr()
4916 dev_err(rdev_to_dev(rdev), "Allocate MR failed!\n"); in bnxt_re_alloc_mr()
4923 dev_err(rdev_to_dev(rdev), in bnxt_re_alloc_mr()
4928 rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res, in bnxt_re_alloc_mr()
4931 dev_err(rdev_to_dev(rdev), in bnxt_re_alloc_mr()
4935 dev_dbg(rdev_to_dev(rdev), "Alloc MR pages = 0x%p\n", mr->pages); in bnxt_re_alloc_mr()
4937 atomic_inc(&rdev->stats.rsors.mr_count); in bnxt_re_alloc_mr()
4938 max_mr_count = atomic_read(&rdev->stats.rsors.mr_count); in bnxt_re_alloc_mr()
4939 if (max_mr_count > atomic_read(&rdev->stats.rsors.max_mr_count)) in bnxt_re_alloc_mr()
4940 atomic_set(&rdev->stats.rsors.max_mr_count, max_mr_count); in bnxt_re_alloc_mr()
4946 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); in bnxt_re_alloc_mr()
4957 struct bnxt_re_dev *rdev = pd->rdev; in bnxt_re_alloc_mw() local
4964 dev_err(rdev_to_dev(rdev), "Allocate MW failed!\n"); in bnxt_re_alloc_mw()
4968 mw->rdev = rdev; in bnxt_re_alloc_mw()
4974 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw); in bnxt_re_alloc_mw()
4976 dev_err(rdev_to_dev(rdev), "Allocate MW failed!\n"); in bnxt_re_alloc_mw()
4980 atomic_inc(&rdev->stats.rsors.mw_count); in bnxt_re_alloc_mw()
4981 max_mw_count = atomic_read(&rdev->stats.rsors.mw_count); in bnxt_re_alloc_mw()
4982 if (max_mw_count > atomic_read(&rdev->stats.rsors.max_mw_count)) in bnxt_re_alloc_mw()
4983 atomic_set(&rdev->stats.rsors.max_mw_count, max_mw_count); in bnxt_re_alloc_mw()
4995 struct bnxt_re_dev *rdev = mw->rdev; in bnxt_re_dealloc_mw() local
4998 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw); in bnxt_re_dealloc_mw()
5000 dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc); in bnxt_re_dealloc_mw()
5005 atomic_dec(&rdev->stats.rsors.mw_count); in bnxt_re_dealloc_mw()
5057 struct bnxt_re_dev *rdev = pd->rdev; in bnxt_re_reg_user_mr() local
5065 dev_dbg(rdev_to_dev(rdev), "Reg user MR\n"); in bnxt_re_reg_user_mr()
5067 if (bnxt_re_get_total_mr_mw_count(rdev) >= rdev->dev_attr->max_mr) in bnxt_re_reg_user_mr()
5070 if (rdev->mod_exit) { in bnxt_re_reg_user_mr()
5071 dev_dbg(rdev_to_dev(rdev), "%s(): in mod_exit, just return!\n", __func__); in bnxt_re_reg_user_mr()
5076 dev_err(rdev_to_dev(rdev), "Requested MR Size: %lu " in bnxt_re_reg_user_mr()
5082 dev_err(rdev_to_dev(rdev), "Allocate MR failed!\n"); in bnxt_re_reg_user_mr()
5085 mr->rdev = rdev; in bnxt_re_reg_user_mr()
5090 if (!_is_alloc_mr_unified(rdev->qplib_res.dattr)) { in bnxt_re_reg_user_mr()
5091 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr); in bnxt_re_reg_user_mr()
5093 dev_err(rdev_to_dev(rdev), "Alloc MR failed!\n"); in bnxt_re_reg_user_mr()
5100 umem = ib_umem_get_flags_compat(rdev, ib_pd->uobject->context, in bnxt_re_reg_user_mr()
5105 dev_err(rdev_to_dev(rdev), "%s: ib_umem_get failed! rc = %d\n", in bnxt_re_reg_user_mr()
5114 dev_err(rdev_to_dev(rdev), "umem is invalid!\n"); in bnxt_re_reg_user_mr()
5120 rdev->dev_attr->page_size_cap); in bnxt_re_reg_user_mr()
5122 dev_err(rdev_to_dev(rdev), "umem page size unsupported!\n"); in bnxt_re_reg_user_mr()
5136 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mrinfo, false); in bnxt_re_reg_user_mr()
5138 dev_err(rdev_to_dev(rdev), "Reg user MR failed!\n"); in bnxt_re_reg_user_mr()
5143 atomic_inc(&rdev->stats.rsors.mr_count); in bnxt_re_reg_user_mr()
5144 max_mr_count = atomic_read(&rdev->stats.rsors.mr_count); in bnxt_re_reg_user_mr()
5145 if (max_mr_count > atomic_read(&rdev->stats.rsors.max_mr_count)) in bnxt_re_reg_user_mr()
5146 atomic_set(&rdev->stats.rsors.max_mr_count, max_mr_count); in bnxt_re_reg_user_mr()
5153 if (!_is_alloc_mr_unified(rdev->qplib_res.dattr)) in bnxt_re_reg_user_mr()
5154 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); in bnxt_re_reg_user_mr()
5168 struct bnxt_re_dev *rdev = mr->rdev; in bnxt_re_rereg_user_mr() local
5176 umem = ib_umem_get_flags_compat(rdev, ib_pd->uobject->context, in bnxt_re_rereg_user_mr()
5181 dev_err(rdev_to_dev(rdev), in bnxt_re_rereg_user_mr()
5191 dev_err(rdev_to_dev(rdev), "umem is invalid!\n"); in bnxt_re_rereg_user_mr()
5197 rdev->dev_attr->page_size_cap); in bnxt_re_rereg_user_mr()
5199 dev_err(rdev_to_dev(rdev), in bnxt_re_rereg_user_mr()
5219 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mrinfo, false); in bnxt_re_rereg_user_mr()
5221 dev_err(rdev_to_dev(rdev), "Rereg user MR failed!\n"); in bnxt_re_rereg_user_mr()
5234 static int bnxt_re_check_abi_version(struct bnxt_re_dev *rdev) in bnxt_re_check_abi_version() argument
5236 struct ib_device *ibdev = &rdev->ibdev; in bnxt_re_check_abi_version()
5240 dev_dbg(rdev_to_dev(rdev), "ABI version requested %d\n", in bnxt_re_check_abi_version()
5243 dev_dbg(rdev_to_dev(rdev), " is different from the device %d \n", in bnxt_re_check_abi_version()
5258 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); in bnxt_re_alloc_ucontext() local
5259 struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr; in bnxt_re_alloc_ucontext()
5267 cctx = rdev->chip_ctx; in bnxt_re_alloc_ucontext()
5268 rc = bnxt_re_check_abi_version(rdev); in bnxt_re_alloc_ucontext()
5272 uctx->rdev = rdev; in bnxt_re_alloc_ucontext()
5275 dev_err(rdev_to_dev(rdev), "shared memory allocation failed!\n"); in bnxt_re_alloc_ucontext()
5292 resp.dev_id = rdev->en_dev->pdev->devfn; in bnxt_re_alloc_ucontext()
5293 resp.max_qp = rdev->qplib_res.hctx->qp_ctx.max; in bnxt_re_alloc_ucontext()
5299 if (rdev->dev_attr && BNXT_RE_HW_RETX(rdev->dev_attr->dev_cap_flags)) in bnxt_re_alloc_ucontext()
5317 if (rdev->dbr_pacing) in bnxt_re_alloc_ucontext()
5320 if (rdev->dbr_drop_recov && rdev->user_dbr_drop_recov) in bnxt_re_alloc_ucontext()
5329 dev_warn(rdev_to_dev(rdev), in bnxt_re_alloc_ucontext()
5332 dev_warn(rdev_to_dev(rdev), in bnxt_re_alloc_ucontext()
5335 dev_warn(rdev_to_dev(rdev), in bnxt_re_alloc_ucontext()
5339 dev_warn(rdev_to_dev(rdev), in bnxt_re_alloc_ucontext()
5345 rc = bnxt_re_copy_to_udata(rdev, &resp, in bnxt_re_alloc_ucontext()
5367 struct bnxt_re_dev *rdev = uctx->rdev; in bnxt_re_dealloc_ucontext() local
5377 if (_is_chip_gen_p5_p7(rdev->chip_ctx) && uctx->wcdpi.dbr) { in bnxt_re_dealloc_ucontext()
5378 rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res, in bnxt_re_dealloc_ucontext()
5381 dev_err(rdev_to_dev(rdev), in bnxt_re_dealloc_ucontext()
5386 rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res, in bnxt_re_dealloc_ucontext()
5389 dev_err(rdev_to_dev(rdev), "Deallocte HW DPI failed!\n"); in bnxt_re_dealloc_ucontext()
5401 if (!_is_chip_p7(uctx->rdev->chip_ctx)) in is_bnxt_re_cq_page()
5421 struct bnxt_re_dev *rdev = uctx->rdev; in bnxt_re_mmap() local
5430 dev_dbg(rdev_to_dev(rdev), "%s:%d uctx->shpg 0x%lx, vtophys(uctx->shpg) 0x%lx, pfn = 0x%lx \n", in bnxt_re_mmap()
5433 dev_err(rdev_to_dev(rdev), "Shared page mapping failed!\n"); in bnxt_re_mmap()
5449 pfn = vtophys(rdev->dbr_page) >> PAGE_SHIFT; in bnxt_re_mmap()
5464 dev_err(rdev_to_dev(rdev), in bnxt_re_mmap()
5479 dev_err(rdev_to_dev(rdev), "DPI mapping failed!\n"); in bnxt_re_mmap()