Lines Matching full:qp
564 struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp, in bnxt_re_legacy_bind_fence_mw() local
566 struct ib_pd *ib_pd = qp->ib_qp.pd; in bnxt_re_legacy_bind_fence_mw()
580 dev_dbg(rdev_to_dev(qp->rdev), in bnxt_re_legacy_bind_fence_mw()
581 "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n", in bnxt_re_legacy_bind_fence_mw()
582 wqe.bind.r_key, qp->qplib_qp.id, pd); in bnxt_re_legacy_bind_fence_mw()
583 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe); in bnxt_re_legacy_bind_fence_mw()
585 dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n"); in bnxt_re_legacy_bind_fence_mw()
588 bnxt_qplib_post_send_db(&qp->qplib_qp); in bnxt_re_legacy_bind_fence_mw()
1402 unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp) in bnxt_re_lock_cqs() argument
1406 spin_lock_irqsave(&qp->scq->cq_lock, flags); in bnxt_re_lock_cqs()
1407 if (qp->rcq && qp->rcq != qp->scq) in bnxt_re_lock_cqs()
1408 spin_lock(&qp->rcq->cq_lock); in bnxt_re_lock_cqs()
1413 void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, in bnxt_re_unlock_cqs() argument
1416 if (qp->rcq && qp->rcq != qp->scq) in bnxt_re_unlock_cqs()
1417 spin_unlock(&qp->rcq->cq_lock); in bnxt_re_unlock_cqs()
1418 spin_unlock_irqrestore(&qp->scq->cq_lock, flags); in bnxt_re_unlock_cqs()
1422 static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp) in bnxt_re_destroy_gsi_sqp() argument
1430 rdev = qp->rdev; in bnxt_re_destroy_gsi_sqp()
1434 /* remove from active qp list */ in bnxt_re_destroy_gsi_sqp()
1445 "Destroy HW AH for shadow QP failed!\n"); in bnxt_re_destroy_gsi_sqp()
1449 dev_dbg(rdev_to_dev(rdev), "Destroy the shadow QP\n"); in bnxt_re_destroy_gsi_sqp()
1452 dev_err(rdev_to_dev(rdev), "Destroy Shadow QP failed\n"); in bnxt_re_destroy_gsi_sqp()
1454 /* Clean the CQ for shadow QP completions */ in bnxt_re_destroy_gsi_sqp()
1492 "Perf Debug: %ps Total (%d) QP destroyed in (%ld) msec\n", in bnxt_re_dump_debug_stats()
1503 /* Potential hint to know latency of QP destroy. in bnxt_re_dump_debug_stats()
1504 * Average time taken for 1K QP Destroy. in bnxt_re_dump_debug_stats()
1508 "Perf Debug: %ps Active QP (%d) Watermark (%d)\n", in bnxt_re_dump_debug_stats()
1517 struct bnxt_re_qp *qp = to_bnxt_re(ib_qp, struct bnxt_re_qp, ib_qp); in bnxt_re_destroy_qp() local
1518 struct bnxt_re_dev *rdev = qp->rdev; in bnxt_re_destroy_qp()
1524 list_del(&qp->list); in bnxt_re_destroy_qp()
1526 if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_RC) in bnxt_re_destroy_qp()
1528 else if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD) in bnxt_re_destroy_qp()
1532 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp); in bnxt_re_destroy_qp()
1536 __func__, qp->qplib_qp.id, rc); in bnxt_re_destroy_qp()
1539 flags = bnxt_re_lock_cqs(qp); in bnxt_re_destroy_qp()
1540 bnxt_qplib_clean_qp(&qp->qplib_qp); in bnxt_re_destroy_qp()
1541 bnxt_re_unlock_cqs(qp, flags); in bnxt_re_destroy_qp()
1544 bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp); in bnxt_re_destroy_qp()
1549 bnxt_re_destroy_gsi_sqp(qp); in bnxt_re_destroy_qp()
1551 bnxt_qplib_free_hdr_buf(&rdev->qplib_res, &qp->qplib_qp); in bnxt_re_destroy_qp()
1554 if (qp->rumem && !IS_ERR(qp->rumem)) in bnxt_re_destroy_qp()
1555 ib_umem_release(qp->rumem); in bnxt_re_destroy_qp()
1556 if (qp->sumem && !IS_ERR(qp->sumem)) in bnxt_re_destroy_qp()
1557 ib_umem_release(qp->sumem); in bnxt_re_destroy_qp()
1558 kfree(qp); in bnxt_re_destroy_qp()
1599 static int bnxt_re_setup_swqe_size(struct bnxt_re_qp *qp, in bnxt_re_setup_swqe_size() argument
1608 rdev = qp->rdev; in bnxt_re_setup_swqe_size()
1609 qplqp = &qp->qplib_qp; in bnxt_re_setup_swqe_size()
1639 struct bnxt_re_pd *pd, struct bnxt_re_qp *qp, in bnxt_re_init_user_qp() argument
1652 qplib_qp = &qp->qplib_qp; in bnxt_re_init_user_qp()
1694 qp->sumem = umem; in bnxt_re_init_user_qp()
1700 if (!qp->qplib_qp.srq) { in bnxt_re_init_user_qp()
1713 qp->rumem = umem; in bnxt_re_init_user_qp()
1724 ib_umem_release(qp->sumem); in bnxt_re_init_user_qp()
1725 qp->sumem = NULL; in bnxt_re_init_user_qp()
1772 "Allocate HW AH for Shadow QP failed!\n"); in bnxt_re_create_shadow_qp_ah()
1837 struct bnxt_re_qp *qp; in bnxt_re_create_shadow_qp() local
1840 qp = kzalloc(sizeof(*qp), GFP_KERNEL); in bnxt_re_create_shadow_qp()
1841 if (!qp) { in bnxt_re_create_shadow_qp()
1842 dev_err(rdev_to_dev(rdev), "Allocate internal UD QP failed!\n"); in bnxt_re_create_shadow_qp()
1845 memset(qp, 0, sizeof(*qp)); in bnxt_re_create_shadow_qp()
1846 qp->rdev = rdev; in bnxt_re_create_shadow_qp()
1848 /* Initialize the shadow QP structure from the QP1 values */ in bnxt_re_create_shadow_qp()
1849 ether_addr_copy(qp->qplib_qp.smac, rdev->dev_addr); in bnxt_re_create_shadow_qp()
1850 qp->qplib_qp.pd = &pd->qplib_pd; in bnxt_re_create_shadow_qp()
1851 qp->qplib_qp.qp_handle = (u64)&qp->qplib_qp; in bnxt_re_create_shadow_qp()
1852 qp->qplib_qp.type = IB_QPT_UD; in bnxt_re_create_shadow_qp()
1854 qp->qplib_qp.max_inline_data = 0; in bnxt_re_create_shadow_qp()
1855 qp->qplib_qp.sig_type = true; in bnxt_re_create_shadow_qp()
1857 /* Shadow QP SQ depth should be same as QP1 RQ depth */ in bnxt_re_create_shadow_qp()
1858 qp->qplib_qp.sq.wqe_size = bnxt_re_get_swqe_size(0, 6); in bnxt_re_create_shadow_qp()
1859 qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe; in bnxt_re_create_shadow_qp()
1860 qp->qplib_qp.sq.max_sge = 2; in bnxt_re_create_shadow_qp()
1861 /* Q full delta can be 1 since it is internal QP */ in bnxt_re_create_shadow_qp()
1862 qp->qplib_qp.sq.q_full_delta = 1; in bnxt_re_create_shadow_qp()
1863 qp->qplib_qp.sq.sginfo.pgsize = PAGE_SIZE; in bnxt_re_create_shadow_qp()
1864 qp->qplib_qp.sq.sginfo.pgshft = PAGE_SHIFT; in bnxt_re_create_shadow_qp()
1866 qp->qplib_qp.scq = qp1_qp->scq; in bnxt_re_create_shadow_qp()
1867 qp->qplib_qp.rcq = qp1_qp->rcq; in bnxt_re_create_shadow_qp()
1869 qp->qplib_qp.rq.wqe_size = _max_rwqe_sz(6); /* 128 Byte wqe size */ in bnxt_re_create_shadow_qp()
1870 qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe; in bnxt_re_create_shadow_qp()
1871 qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge; in bnxt_re_create_shadow_qp()
1872 qp->qplib_qp.rq.sginfo.pgsize = PAGE_SIZE; in bnxt_re_create_shadow_qp()
1873 qp->qplib_qp.rq.sginfo.pgshft = PAGE_SHIFT; in bnxt_re_create_shadow_qp()
1874 /* Q full delta can be 1 since it is internal QP */ in bnxt_re_create_shadow_qp()
1875 qp->qplib_qp.rq.q_full_delta = 1; in bnxt_re_create_shadow_qp()
1876 qp->qplib_qp.mtu = qp1_qp->mtu; in bnxt_re_create_shadow_qp()
1877 qp->qplib_qp.dpi = &rdev->dpi_privileged; in bnxt_re_create_shadow_qp()
1879 rc = bnxt_qplib_alloc_hdr_buf(qp1_res, &qp->qplib_qp, 0, in bnxt_re_create_shadow_qp()
1884 rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp); in bnxt_re_create_shadow_qp()
1886 dev_err(rdev_to_dev(rdev), "create HW QP failed!\n"); in bnxt_re_create_shadow_qp()
1890 dev_dbg(rdev_to_dev(rdev), "Created shadow QP with ID = %d\n", in bnxt_re_create_shadow_qp()
1891 qp->qplib_qp.id); in bnxt_re_create_shadow_qp()
1892 spin_lock_init(&qp->sq_lock); in bnxt_re_create_shadow_qp()
1893 INIT_LIST_HEAD(&qp->list); in bnxt_re_create_shadow_qp()
1895 list_add_tail(&qp->list, &rdev->qp_list); in bnxt_re_create_shadow_qp()
1898 return qp; in bnxt_re_create_shadow_qp()
1900 bnxt_qplib_free_hdr_buf(qp1_res, &qp->qplib_qp); in bnxt_re_create_shadow_qp()
1902 kfree(qp); in bnxt_re_create_shadow_qp()
1906 static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp, in bnxt_re_init_rq_attr() argument
1915 rdev = qp->rdev; in bnxt_re_init_rq_attr()
1916 qplqp = &qp->qplib_qp; in bnxt_re_init_rq_attr()
1951 static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp) in bnxt_re_adjust_gsi_rq_attr() argument
1957 rdev = qp->rdev; in bnxt_re_adjust_gsi_rq_attr()
1958 qplqp = &qp->qplib_qp; in bnxt_re_adjust_gsi_rq_attr()
1965 static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp, in bnxt_re_init_sq_attr() argument
1977 rdev = qp->rdev; in bnxt_re_init_sq_attr()
1978 qplqp = &qp->qplib_qp; in bnxt_re_init_sq_attr()
1987 rc = bnxt_re_setup_swqe_size(qp, init_attr); in bnxt_re_init_sq_attr()
2023 static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp, in bnxt_re_adjust_gsi_sq_attr() argument
2032 rdev = qp->rdev; in bnxt_re_adjust_gsi_sq_attr()
2033 qplqp = &qp->qplib_qp; in bnxt_re_adjust_gsi_sq_attr()
2061 dev_err(rdev_to_dev(rdev), "QP type 0x%x not supported\n", in bnxt_re_init_qp_type()
2081 static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd, in bnxt_re_init_qp_attr() argument
2093 rdev = qp->rdev; in bnxt_re_init_qp_attr()
2094 qplqp = &qp->qplib_qp; in bnxt_re_init_qp_attr()
2125 "QP create flags 0x%x not supported\n", in bnxt_re_init_qp_attr()
2139 qp->scq = cq; in bnxt_re_init_qp_attr()
2150 qp->rcq = cq; in bnxt_re_init_qp_attr()
2154 rc = bnxt_re_init_rq_attr(qp, init_attr, cntx); in bnxt_re_init_qp_attr()
2158 bnxt_re_adjust_gsi_rq_attr(qp); in bnxt_re_init_qp_attr()
2161 rc = bnxt_re_init_sq_attr(qp, init_attr, cntx); in bnxt_re_init_qp_attr()
2165 bnxt_re_adjust_gsi_sq_attr(qp, init_attr, cntx); in bnxt_re_init_qp_attr()
2168 rc = bnxt_re_init_user_qp(rdev, pd, qp, udata); in bnxt_re_init_qp_attr()
2173 static int bnxt_re_create_shadow_gsi(struct bnxt_re_qp *qp, in bnxt_re_create_shadow_gsi() argument
2182 rdev = qp->rdev; in bnxt_re_create_shadow_gsi()
2183 /* Create a shadow QP to handle the QP1 traffic */ in bnxt_re_create_shadow_gsi()
2190 sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res, &qp->qplib_qp); in bnxt_re_create_shadow_gsi()
2194 "Failed to create Shadow QP for QP1\n"); in bnxt_re_create_shadow_gsi()
2199 sqp->rcq = qp->rcq; in bnxt_re_create_shadow_gsi()
2200 sqp->scq = qp->scq; in bnxt_re_create_shadow_gsi()
2202 &qp->qplib_qp); in bnxt_re_create_shadow_gsi()
2233 static int bnxt_re_create_gsi_qp(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd) in bnxt_re_create_gsi_qp() argument
2242 rdev = qp->rdev; in bnxt_re_create_gsi_qp()
2243 qplqp = &qp->qplib_qp; in bnxt_re_create_gsi_qp()
2260 rc = bnxt_re_create_shadow_gsi(qp, pd); in bnxt_re_create_gsi_qp()
2278 dev_err(rdev_to_dev(rdev), "Create QP failed - max exceeded! " in bnxt_re_test_qp_limits()
2296 struct bnxt_re_qp *qp; in __get_qp_from_qp_in() local
2298 qp = kzalloc(sizeof(*qp), GFP_KERNEL); in __get_qp_from_qp_in()
2299 if (!qp) in __get_qp_from_qp_in()
2300 dev_err(rdev_to_dev(rdev), "Allocate QP failed!\n"); in __get_qp_from_qp_in()
2301 return qp; in __get_qp_from_qp_in()
2313 struct bnxt_re_qp *qp; in bnxt_re_create_qp() local
2326 dev_err(rdev_to_dev(rdev), "Create QP failed - max exceeded(QPs Alloc'd %u of max %u)\n", in bnxt_re_create_qp()
2337 qp = __get_qp_from_qp_in(qp_in, rdev); in bnxt_re_create_qp()
2338 if (!qp) { in bnxt_re_create_qp()
2342 qp->rdev = rdev; in bnxt_re_create_qp()
2344 rc = bnxt_re_init_qp_attr(qp, pd, qp_init_attr, udata); in bnxt_re_create_qp()
2350 rc = bnxt_re_create_gsi_qp(qp, pd); in bnxt_re_create_qp()
2356 rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp); in bnxt_re_create_qp()
2358 dev_err(rdev_to_dev(rdev), "create HW QP failed!\n"); in bnxt_re_create_qp()
2365 resp.qpid = qp->qplib_qp.id; in bnxt_re_create_qp()
2374 qp->ib_qp.qp_num = qp->qplib_qp.id; in bnxt_re_create_qp()
2376 rdev->gsi_ctx.gsi_qp = qp; in bnxt_re_create_qp()
2377 spin_lock_init(&qp->sq_lock); in bnxt_re_create_qp()
2378 spin_lock_init(&qp->rq_lock); in bnxt_re_create_qp()
2379 INIT_LIST_HEAD(&qp->list); in bnxt_re_create_qp()
2381 list_add_tail(&qp->list, &rdev->qp_list); in bnxt_re_create_qp()
2401 return &qp->ib_qp; in bnxt_re_create_qp()
2404 bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp); in bnxt_re_create_qp()
2407 if (qp->rumem && !IS_ERR(qp->rumem)) in bnxt_re_create_qp()
2408 ib_umem_release(qp->rumem); in bnxt_re_create_qp()
2409 if (qp->sumem && !IS_ERR(qp->sumem)) in bnxt_re_create_qp()
2410 ib_umem_release(qp->sumem); in bnxt_re_create_qp()
2413 kfree(qp); in bnxt_re_create_qp()
2422 struct bnxt_re_qp *qp = rdev->gsi_ctx.gsi_sqp; in bnxt_re_modify_shadow_qp() local
2426 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE; in bnxt_re_modify_shadow_qp()
2427 qp->qplib_qp.state = qp1_qp->qplib_qp.state; in bnxt_re_modify_shadow_qp()
2430 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY; in bnxt_re_modify_shadow_qp()
2431 qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index; in bnxt_re_modify_shadow_qp()
2435 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY; in bnxt_re_modify_shadow_qp()
2437 qp->qplib_qp.qkey = BNXT_RE_QP_RANDOM_QKEY; in bnxt_re_modify_shadow_qp()
2440 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN; in bnxt_re_modify_shadow_qp()
2441 qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn; in bnxt_re_modify_shadow_qp()
2444 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp); in bnxt_re_modify_shadow_qp()
2446 dev_err(rdev_to_dev(rdev), "Modify Shadow QP for QP1 failed\n"); in bnxt_re_modify_shadow_qp()
2456 struct bnxt_re_qp *qp) in get_source_port() argument
2463 if (qp->qplib_qp.nw_type == CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6) { in get_source_port()
2471 memcpy(smac, qp->qplib_qp.smac, ETH_ALEN); in get_source_port()
2474 memcpy(data, qp->qplib_qp.ah.dmac, ETH_ALEN); in get_source_port()
2480 memcpy(data + buf_len, qp->qplib_qp.ah.dgid.data + ip_off, addr_len); in get_source_port()
2483 memcpy(data + buf_len, qp->qp_info_entry.sgid.raw + ip_off, addr_len); in get_source_port()
2486 qpn = htonl(qp->qplib_qp.dest_qpn); in get_source_port()
2496 static void bnxt_re_update_qp_info(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp) in bnxt_re_update_qp_info() argument
2500 type = __from_hw_to_ib_qp_type(qp->qplib_qp.type); in bnxt_re_update_qp_info()
2503 if (ipv6_addr_v4mapped((struct in6_addr *)&qp->qplib_qp.ah.dgid)) { in bnxt_re_update_qp_info()
2504 qp->qp_info_entry.s_ip.ipv4_addr = ipv4_from_gid(qp->qp_info_entry.sgid.raw); in bnxt_re_update_qp_info()
2505 qp->qp_info_entry.d_ip.ipv4_addr = ipv4_from_gid(qp->qplib_qp.ah.dgid.data); in bnxt_re_update_qp_info()
2507 memcpy(&qp->qp_info_entry.s_ip.ipv6_addr, qp->qp_info_entry.sgid.raw, in bnxt_re_update_qp_info()
2508 sizeof(qp->qp_info_entry.s_ip.ipv6_addr)); in bnxt_re_update_qp_info()
2509 memcpy(&qp->qp_info_entry.d_ip.ipv6_addr, qp->qplib_qp.ah.dgid.data, in bnxt_re_update_qp_info()
2510 sizeof(qp->qp_info_entry.d_ip.ipv6_addr)); in bnxt_re_update_qp_info()
2514 (qp->qplib_qp.nw_type == CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4 || in bnxt_re_update_qp_info()
2515 qp->qplib_qp.nw_type == CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6)) { in bnxt_re_update_qp_info()
2516 qp->qp_info_entry.s_port = get_source_port(rdev, qp); in bnxt_re_update_qp_info()
2518 qp->qp_info_entry.d_port = BNXT_RE_QP_DEST_PORT; in bnxt_re_update_qp_info()
2521 static void bnxt_qplib_manage_flush_qp(struct bnxt_re_qp *qp) in bnxt_qplib_manage_flush_qp() argument
2526 if (qp->sumem) in bnxt_qplib_manage_flush_qp()
2529 if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) { in bnxt_qplib_manage_flush_qp()
2530 rq = &qp->qplib_qp.rq; in bnxt_qplib_manage_flush_qp()
2531 sq = &qp->qplib_qp.sq; in bnxt_qplib_manage_flush_qp()
2533 dev_dbg(rdev_to_dev(qp->rdev), in bnxt_qplib_manage_flush_qp()
2534 "Move QP = %p to flush list\n", qp); in bnxt_qplib_manage_flush_qp()
2535 flags = bnxt_re_lock_cqs(qp); in bnxt_qplib_manage_flush_qp()
2536 bnxt_qplib_add_flush_qp(&qp->qplib_qp); in bnxt_qplib_manage_flush_qp()
2537 bnxt_re_unlock_cqs(qp, flags); in bnxt_qplib_manage_flush_qp()
2540 bnxt_re_handle_cqn(&qp->scq->qplib_cq); in bnxt_qplib_manage_flush_qp()
2542 if (qp->rcq && (qp->rcq != qp->scq) && in bnxt_qplib_manage_flush_qp()
2544 bnxt_re_handle_cqn(&qp->rcq->qplib_cq); in bnxt_qplib_manage_flush_qp()
2547 if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) { in bnxt_qplib_manage_flush_qp()
2548 dev_dbg(rdev_to_dev(qp->rdev), in bnxt_qplib_manage_flush_qp()
2549 "Move QP = %p out of flush list\n", qp); in bnxt_qplib_manage_flush_qp()
2550 flags = bnxt_re_lock_cqs(qp); in bnxt_qplib_manage_flush_qp()
2551 bnxt_qplib_clean_qp(&qp->qplib_qp); in bnxt_qplib_manage_flush_qp()
2552 bnxt_re_unlock_cqs(qp, flags); in bnxt_qplib_manage_flush_qp()
2574 struct bnxt_re_qp *qp; in bnxt_re_modify_qp() local
2583 qp = to_bnxt_re(ib_qp, struct bnxt_re_qp, ib_qp); in bnxt_re_modify_qp()
2584 rdev = qp->rdev; in bnxt_re_modify_qp()
2587 qp->qplib_qp.modify_flags = 0; in bnxt_re_modify_qp()
2588 ppp = &qp->qplib_qp.ppp; in bnxt_re_modify_qp()
2590 curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state); in bnxt_re_modify_qp()
2605 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE; in bnxt_re_modify_qp()
2606 qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state); in bnxt_re_modify_qp()
2620 qp->qplib_qp.modify_flags |= in bnxt_re_modify_qp()
2622 qp->qplib_qp.en_sqd_async_notify = true; in bnxt_re_modify_qp()
2625 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS; in bnxt_re_modify_qp()
2626 qp->qplib_qp.access = in bnxt_re_modify_qp()
2629 qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE; in bnxt_re_modify_qp()
2630 qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE; in bnxt_re_modify_qp()
2631 qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_READ; in bnxt_re_modify_qp()
2634 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY; in bnxt_re_modify_qp()
2635 qp->qplib_qp.pkey_index = qp_attr->pkey_index; in bnxt_re_modify_qp()
2638 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY; in bnxt_re_modify_qp()
2639 qp->qplib_qp.qkey = qp_attr->qkey; in bnxt_re_modify_qp()
2642 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID | in bnxt_re_modify_qp()
2649 memcpy(qp->qplib_qp.ah.dgid.data, qp_attr->ah_attr.grh.dgid.raw, in bnxt_re_modify_qp()
2650 sizeof(qp->qplib_qp.ah.dgid.data)); in bnxt_re_modify_qp()
2651 qp->qplib_qp.ah.flow_label = qp_attr->ah_attr.grh.flow_label; in bnxt_re_modify_qp()
2652 qp->qplib_qp.ah.sgid_index = _get_sgid_index(rdev, in bnxt_re_modify_qp()
2654 qp->qplib_qp.ah.host_sgid_index = qp_attr->ah_attr.grh.sgid_index; in bnxt_re_modify_qp()
2655 qp->qplib_qp.ah.hop_limit = qp_attr->ah_attr.grh.hop_limit; in bnxt_re_modify_qp()
2656 qp->qplib_qp.ah.traffic_class = in bnxt_re_modify_qp()
2658 qp->qplib_qp.ah.sl = qp_attr->ah_attr.sl; in bnxt_re_modify_qp()
2659 ether_addr_copy(qp->qplib_qp.ah.dmac, ROCE_DMAC(&qp_attr->ah_attr)); in bnxt_re_modify_qp()
2669 memcpy(qp->qplib_qp.smac, rdev->dev_addr, in bnxt_re_modify_qp()
2676 qp->qplib_qp.nw_type = in bnxt_re_modify_qp()
2680 qp->qplib_qp.nw_type = in bnxt_re_modify_qp()
2684 qp->qplib_qp.nw_type = in bnxt_re_modify_qp()
2689 memcpy(&qp->qp_info_entry.sgid, gid_ptr, sizeof(qp->qp_info_entry.sgid)); in bnxt_re_modify_qp()
2694 bnxt_re_init_qpmtu(qp, if_getmtu(rdev->netdev), qp_attr_mask, qp_attr, in bnxt_re_modify_qp()
2699 resp.path_mtu = qp->qplib_qp.mtu; in bnxt_re_modify_qp()
2702 dev_err(rdev_to_dev(rdev), "qp %#x invalid mtu\n", in bnxt_re_modify_qp()
2703 qp->qplib_qp.id); in bnxt_re_modify_qp()
2710 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT; in bnxt_re_modify_qp()
2711 qp->qplib_qp.timeout = qp_attr->timeout; in bnxt_re_modify_qp()
2714 qp->qplib_qp.modify_flags |= in bnxt_re_modify_qp()
2716 qp->qplib_qp.retry_cnt = qp_attr->retry_cnt; in bnxt_re_modify_qp()
2719 qp->qplib_qp.modify_flags |= in bnxt_re_modify_qp()
2721 qp->qplib_qp.rnr_retry = qp_attr->rnr_retry; in bnxt_re_modify_qp()
2724 qp->qplib_qp.modify_flags |= in bnxt_re_modify_qp()
2726 qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer; in bnxt_re_modify_qp()
2729 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN; in bnxt_re_modify_qp()
2730 qp->qplib_qp.rq.psn = qp_attr->rq_psn; in bnxt_re_modify_qp()
2733 qp->qplib_qp.modify_flags |= in bnxt_re_modify_qp()
2741 qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic, in bnxt_re_modify_qp()
2745 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN; in bnxt_re_modify_qp()
2746 qp->qplib_qp.sq.psn = qp_attr->sq_psn; in bnxt_re_modify_qp()
2757 qp->qplib_qp.modify_flags |= in bnxt_re_modify_qp()
2759 qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic; in bnxt_re_modify_qp()
2762 qp->qplib_qp.modify_flags |= in bnxt_re_modify_qp()
2775 "Create QP failed - max exceeded\n"); in bnxt_re_modify_qp()
2782 qp->qplib_qp.sq.max_wqe = entries; in bnxt_re_modify_qp()
2783 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe - in bnxt_re_modify_qp()
2790 qp->qplib_qp.sq.q_full_delta -= 1; in bnxt_re_modify_qp()
2791 qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge; in bnxt_re_modify_qp()
2792 if (qp->qplib_qp.rq.max_wqe) { in bnxt_re_modify_qp()
2796 qp->qplib_qp.rq.max_wqe = entries; in bnxt_re_modify_qp()
2797 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe - in bnxt_re_modify_qp()
2799 qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge; in bnxt_re_modify_qp()
2805 qp->qplib_qp.modify_flags |= in bnxt_re_modify_qp()
2807 qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num; in bnxt_re_modify_qp()
2810 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp); in bnxt_re_modify_qp()
2812 dev_err(rdev_to_dev(rdev), "Modify HW QP failed!\n"); in bnxt_re_modify_qp()
2816 bnxt_qplib_manage_flush_qp(qp); in bnxt_re_modify_qp()
2836 rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask); in bnxt_re_modify_qp()
2840 bnxt_re_update_qp_info(rdev, qp); in bnxt_re_modify_qp()
2847 struct bnxt_re_qp *qp = to_bnxt_re(ib_qp, struct bnxt_re_qp, ib_qp); in bnxt_re_query_qp() local
2848 struct bnxt_re_dev *rdev = qp->rdev; in bnxt_re_query_qp()
2856 qplib_qp->id = qp->qplib_qp.id; in bnxt_re_query_qp()
2857 qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index; in bnxt_re_query_qp()
2861 dev_err(rdev_to_dev(rdev), "Query HW QP (0x%x) failed! rc = %d\n", in bnxt_re_query_qp()
2892 qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe; in bnxt_re_query_qp()
2893 qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge; in bnxt_re_query_qp()
2894 qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe; in bnxt_re_query_qp()
2895 qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge; in bnxt_re_query_qp()
2896 qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data; in bnxt_re_query_qp()
2924 static int bnxt_re_build_qp1_send(struct bnxt_re_qp *qp, const struct ib_send_wr *wr, in bnxt_re_build_qp1_send() argument
2937 memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr)); in bnxt_re_build_qp1_send()
2940 rc = bnxt_re_query_gid(&qp->rdev->ibdev, 1, qplib_ah->sgid_index, &sgid); in bnxt_re_build_qp1_send()
2945 qp->qp1_hdr.eth_present = 1; in bnxt_re_build_qp1_send()
2947 memcpy(qp->qp1_hdr.eth.dmac_h, ptmac, 4); in bnxt_re_build_qp1_send()
2949 memcpy(qp->qp1_hdr.eth.dmac_l, ptmac, 2); in bnxt_re_build_qp1_send()
2951 ptmac = qp->qplib_qp.smac; in bnxt_re_build_qp1_send()
2952 memcpy(qp->qp1_hdr.eth.smac_h, ptmac, 2); in bnxt_re_build_qp1_send()
2954 memcpy(qp->qp1_hdr.eth.smac_l, ptmac, 4); in bnxt_re_build_qp1_send()
2956 qp->qp1_hdr.eth.type = cpu_to_be16(BNXT_QPLIB_ETHTYPE_ROCEV1); in bnxt_re_build_qp1_send()
2961 qp->qp1_hdr.vlan_present = 1; in bnxt_re_build_qp1_send()
2962 qp->qp1_hdr.eth.type = cpu_to_be16(ETH_P_8021Q); in bnxt_re_build_qp1_send()
2965 qp->qp1_hdr.grh_present = 1; in bnxt_re_build_qp1_send()
2966 qp->qp1_hdr.grh.ip_version = 6; in bnxt_re_build_qp1_send()
2967 qp->qp1_hdr.grh.payload_length = in bnxt_re_build_qp1_send()
2970 qp->qp1_hdr.grh.next_header = 0x1b; in bnxt_re_build_qp1_send()
2971 memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid.raw, sizeof(sgid)); in bnxt_re_build_qp1_send()
2972 memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data, in bnxt_re_build_qp1_send()
2977 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; in bnxt_re_build_qp1_send()
2978 qp->qp1_hdr.immediate_present = 1; in bnxt_re_build_qp1_send()
2980 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY; in bnxt_re_build_qp1_send()
2983 qp->qp1_hdr.bth.solicited_event = 1; in bnxt_re_build_qp1_send()
2984 qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3; in bnxt_re_build_qp1_send()
2986 qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF); in bnxt_re_build_qp1_send()
2987 qp->qp1_hdr.bth.destination_qpn = IB_QP1; in bnxt_re_build_qp1_send()
2988 qp->qp1_hdr.bth.ack_req = 0; in bnxt_re_build_qp1_send()
2989 qp->send_psn++; in bnxt_re_build_qp1_send()
2990 qp->send_psn &= BTH_PSN_MASK; in bnxt_re_build_qp1_send()
2991 qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn); in bnxt_re_build_qp1_send()
2994 qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY); in bnxt_re_build_qp1_send()
2995 qp->qp1_hdr.deth.source_qpn = IB_QP1; in bnxt_re_build_qp1_send()
2998 buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge); in bnxt_re_build_qp1_send()
3000 dev_err(rdev_to_dev(qp->rdev), "QP1 buffer is empty!\n"); in bnxt_re_build_qp1_send()
3016 static int bnxt_re_build_gsi_send(struct bnxt_re_qp *qp, in bnxt_re_build_gsi_send() argument
3023 rdev = qp->rdev; in bnxt_re_build_gsi_send()
3035 rc = bnxt_re_build_qp1_send(qp, wr, wqe, len); in bnxt_re_build_gsi_send()
3048 static int bnxt_re_build_qp1_recv(struct bnxt_re_qp *qp, in bnxt_re_build_qp1_recv() argument
3052 struct bnxt_re_dev *rdev = qp->rdev; in bnxt_re_build_qp1_recv()
3059 if (bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge)) { in bnxt_re_build_qp1_recv()
3086 dev_err(rdev_to_dev(qp->rdev),"QP1 rq buffer is empty!\n"); in bnxt_re_build_qp1_recv()
3100 dev_err(rdev_to_dev(qp->rdev), in bnxt_re_build_qp1_recv()
3114 dev_err(rdev_to_dev(qp->rdev), in bnxt_re_build_qp1_recv()
3128 dev_err(rdev_to_dev(qp->rdev), in bnxt_re_build_qp1_recv()
3139 dev_err(rdev_to_dev(qp->rdev), in bnxt_re_build_qp1_recv()
3147 dev_err(rdev_to_dev(qp->rdev), "QP1 buffer is empty!\n"); in bnxt_re_build_qp1_recv()
3154 static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp, in bnxt_re_build_qp1_shadow_qp_recv() argument
3164 rdev = qp->rdev; in bnxt_re_build_qp1_shadow_qp_recv()
3166 rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp); in bnxt_re_build_qp1_shadow_qp_recv()
3168 if (bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge)) { in bnxt_re_build_qp1_shadow_qp_recv()
3178 dev_err(rdev_to_dev(qp->rdev), in bnxt_re_build_qp1_shadow_qp_recv()
3197 static bool is_ud_qp(struct bnxt_re_qp *qp) in is_ud_qp() argument
3199 return (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD || in is_ud_qp()
3200 qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI); in is_ud_qp()
3203 static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp, in bnxt_re_build_send_wqe() argument
3209 if(is_ud_qp(qp)) { in bnxt_re_build_send_wqe()
3228 dev_err(rdev_to_dev(qp->rdev), "%s Invalid opcode %d!\n", in bnxt_re_build_send_wqe()
3384 static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp) in bnxt_ud_qp_hw_stall_workaround() argument
3386 if ((qp->ib_qp.qp_type == IB_QPT_UD || qp->ib_qp.qp_type == IB_QPT_GSI || in bnxt_ud_qp_hw_stall_workaround()
3387 qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) && in bnxt_ud_qp_hw_stall_workaround()
3388 qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) { in bnxt_ud_qp_hw_stall_workaround()
3394 bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL); in bnxt_ud_qp_hw_stall_workaround()
3395 qp->qplib_qp.wqe_cnt = 0; in bnxt_ud_qp_hw_stall_workaround()
3400 struct bnxt_re_qp *qp, in bnxt_re_post_send_shadow_qp() argument
3407 spin_lock_irqsave(&qp->sq_lock, flags); in bnxt_re_post_send_shadow_qp()
3412 if (wr->num_sge > qp->qplib_qp.sq.max_sge) { in bnxt_re_post_send_shadow_qp()
3422 rc = bnxt_re_build_send_wqe(qp, wr, &wqe); in bnxt_re_post_send_shadow_qp()
3426 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe); in bnxt_re_post_send_shadow_qp()
3435 bnxt_qplib_post_send_db(&qp->qplib_qp); in bnxt_re_post_send_shadow_qp()
3436 bnxt_ud_qp_hw_stall_workaround(qp); in bnxt_re_post_send_shadow_qp()
3437 spin_unlock_irqrestore(&qp->sq_lock, flags); in bnxt_re_post_send_shadow_qp()
3456 struct bnxt_re_qp *qp = to_bnxt_re(ib_qp, struct bnxt_re_qp, ib_qp); in bnxt_re_post_send() local
3463 rdev = qp->rdev; in bnxt_re_post_send()
3464 spin_lock_irqsave(&qp->sq_lock, flags); in bnxt_re_post_send()
3469 if (wr->num_sge > qp->qplib_qp.sq.max_sge) { in bnxt_re_post_send()
3486 rc = bnxt_re_build_gsi_send(qp, wr, &wqe); in bnxt_re_post_send()
3502 rc = bnxt_re_build_send_wqe(qp, wr, &wqe); in bnxt_re_post_send()
3535 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe); in bnxt_re_post_send()
3546 bnxt_qplib_post_send_db(&qp->qplib_qp); in bnxt_re_post_send()
3548 bnxt_ud_qp_hw_stall_workaround(qp); in bnxt_re_post_send()
3549 spin_unlock_irqrestore(&qp->sq_lock, flags); in bnxt_re_post_send()
3555 struct bnxt_re_qp *qp, in bnxt_re_post_recv_shadow_qp() argument
3566 if (wr->num_sge > qp->qplib_qp.rq.max_sge) { in bnxt_re_post_recv_shadow_qp()
3577 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe); in bnxt_re_post_recv_shadow_qp()
3586 bnxt_qplib_post_recv_db(&qp->qplib_qp); in bnxt_re_post_recv_shadow_qp()
3590 static int bnxt_re_build_gsi_recv(struct bnxt_re_qp *qp, in bnxt_re_build_gsi_recv() argument
3594 struct bnxt_re_dev *rdev = qp->rdev; in bnxt_re_build_gsi_recv()
3598 rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, wqe); in bnxt_re_build_gsi_recv()
3600 rc = bnxt_re_build_qp1_recv(qp, wr, wqe); in bnxt_re_build_gsi_recv()
3608 struct bnxt_re_qp *qp = to_bnxt_re(ib_qp, struct bnxt_re_qp, ib_qp); in bnxt_re_post_recv() local
3615 spin_lock_irqsave(&qp->rq_lock, flags); in bnxt_re_post_recv()
3618 if (wr->num_sge > qp->qplib_qp.rq.max_sge) { in bnxt_re_post_recv()
3619 dev_err(rdev_to_dev(qp->rdev), in bnxt_re_post_recv()
3630 qp->rdev->gsi_ctx.gsi_qp_mode != BNXT_RE_GSI_MODE_UD) { in bnxt_re_post_recv()
3633 rc = bnxt_re_build_gsi_recv(qp, wr, &wqe); in bnxt_re_post_recv()
3637 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe); in bnxt_re_post_recv()
3640 dev_err(rdev_to_dev(qp->rdev), in bnxt_re_post_recv()
3647 bnxt_qplib_post_recv_db(&qp->qplib_qp); in bnxt_re_post_recv()
3654 bnxt_qplib_post_recv_db(&qp->qplib_qp); in bnxt_re_post_recv()
3655 spin_unlock_irqrestore(&qp->rq_lock, flags); in bnxt_re_post_recv()
3772 * whether this CQ is for GSI QP. So assuming that the first in bnxt_re_create_cq()
3780 * CQE for Shadow QP SQEs + CQE for Shadow QP RQEs. in bnxt_re_create_cq()
3781 * Max entries of shadow QP SQ and RQ = QP1 RQEs = cqe in bnxt_re_create_cq()
4348 /* Shadow QP header buffer */ in bnxt_re_process_raw_qp_packet_receive()
4413 "Failed to post Rx buffers to shadow QP\n"); in bnxt_re_process_raw_qp_packet_receive()
4503 wc->qp = &gsi_qp->ib_qp; in bnxt_re_process_res_shadow_qp_wc()
4529 struct bnxt_re_qp *qp, struct ib_wc *wc, in bnxt_re_process_res_ud_wc() argument
4540 /* report only on GSI QP for Thor */ in bnxt_re_process_res_ud_wc()
4541 if (rdev->gsi_ctx.gsi_qp->qplib_qp.id == qp->qplib_qp.id && in bnxt_re_process_res_ud_wc()
4563 static int bnxt_re_legacy_send_phantom_wqe(struct bnxt_re_qp *qp) in bnxt_re_legacy_send_phantom_wqe() argument
4565 struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp; in bnxt_re_legacy_send_phantom_wqe()
4569 spin_lock_irqsave(&qp->sq_lock, flags); in bnxt_re_legacy_send_phantom_wqe()
4575 "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n", in bnxt_re_legacy_send_phantom_wqe()
4581 spin_unlock_irqrestore(&qp->sq_lock, flags); in bnxt_re_legacy_send_phantom_wqe()
4589 struct bnxt_re_qp *qp; in bnxt_re_poll_cq() local
4633 qp = container_of(lib_qp, struct bnxt_re_qp, qplib_qp); in bnxt_re_poll_cq()
4634 if (bnxt_re_legacy_send_phantom_wqe(qp) == -ENOMEM) in bnxt_re_poll_cq()
4655 qp = to_bnxt_re((struct bnxt_qplib_qp *)cqe->qp_handle, in bnxt_re_poll_cq()
4657 if (!qp) { in bnxt_re_poll_cq()
4659 "POLL CQ bad QP handle\n"); in bnxt_re_poll_cq()
4662 wc->qp = &qp->ib_qp; in bnxt_re_poll_cq()
4672 qp->qplib_qp.id == in bnxt_re_poll_cq()
4687 rc = bnxt_re_process_raw_qp_packet_receive(qp, cqe); in bnxt_re_poll_cq()
4711 qp->qplib_qp.id == in bnxt_re_poll_cq()
4724 bnxt_re_process_res_shadow_qp_wc(qp, wc, cqe); in bnxt_re_poll_cq()
4728 bnxt_re_process_res_ud_wc(rdev, qp, wc, cqe); in bnxt_re_poll_cq()