Lines Matching full:qp
44 #include "qp.h"
80 struct mlx5_core_qp *qp; member
146 static int mlx5_ib_read_kernel_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, in mlx5_ib_read_kernel_wqe_sq() argument
155 wqe_index = wqe_index & qp->sq.fbc.sz_m1; in mlx5_ib_read_kernel_wqe_sq()
158 p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, wqe_index); in mlx5_ib_read_kernel_wqe_sq()
174 wqe_index = (wqe_index + 1) & qp->sq.fbc.sz_m1; in mlx5_ib_read_kernel_wqe_sq()
175 p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, wqe_index); in mlx5_ib_read_kernel_wqe_sq()
181 static int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, in mlx5_ib_read_user_wqe_sq() argument
184 struct mlx5_ib_qp_base *base = &qp->trans_qp.base; in mlx5_ib_read_user_wqe_sq()
186 struct mlx5_ib_wq *wq = &qp->sq; in mlx5_ib_read_user_wqe_sq()
232 int mlx5_ib_read_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer, in mlx5_ib_read_wqe_sq() argument
235 struct mlx5_ib_qp_base *base = &qp->trans_qp.base; in mlx5_ib_read_wqe_sq()
242 return mlx5_ib_read_kernel_wqe_sq(qp, wqe_index, buffer, in mlx5_ib_read_wqe_sq()
245 return mlx5_ib_read_user_wqe_sq(qp, wqe_index, buffer, buflen, bc); in mlx5_ib_read_wqe_sq()
248 static int mlx5_ib_read_user_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, in mlx5_ib_read_user_wqe_rq() argument
251 struct mlx5_ib_qp_base *base = &qp->trans_qp.base; in mlx5_ib_read_user_wqe_rq()
253 struct mlx5_ib_wq *wq = &qp->rq; in mlx5_ib_read_user_wqe_rq()
268 int mlx5_ib_read_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer, in mlx5_ib_read_wqe_rq() argument
271 struct mlx5_ib_qp_base *base = &qp->trans_qp.base; in mlx5_ib_read_wqe_rq()
273 struct mlx5_ib_wq *wq = &qp->rq; in mlx5_ib_read_wqe_rq()
282 return mlx5_ib_read_user_wqe_rq(qp, wqe_index, buffer, buflen, bc); in mlx5_ib_read_wqe_rq()
321 struct mlx5_ib_qp *qp = to_mqp(ibqp); in mlx5_ib_qp_err_syndrome() local
334 err = mlx5_core_qp_query(dev, &qp->trans_qp.base.mqp, outb, outlen, in mlx5_ib_qp_err_syndrome()
344 pr_err("%s/%d: QP %d error: %s (0x%x 0x%x 0x%x)\n", in mlx5_ib_qp_err_syndrome()
359 struct ib_qp *ibqp = &to_mibqp(qpe_work->qp)->ibqp; in mlx5_ib_handle_qp_event()
363 event.element.qp = ibqp; in mlx5_ib_handle_qp_event()
390 pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", in mlx5_ib_handle_qp_event()
391 qpe_work->type, qpe_work->qp->qpn); in mlx5_ib_handle_qp_event()
402 mlx5_core_res_put(&qpe_work->qp->common); in mlx5_ib_handle_qp_event()
406 static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type) in mlx5_ib_qp_event() argument
408 struct ib_qp *ibqp = &to_mibqp(qp)->ibqp; in mlx5_ib_qp_event()
413 to_mibqp(qp)->port = to_mibqp(qp)->trans_qp.alt_port; in mlx5_ib_qp_event()
423 qpe_work->qp = qp; in mlx5_ib_qp_event()
430 mlx5_core_res_put(&qp->common); in mlx5_ib_qp_event()
434 int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd) in set_rq_size() argument
444 qp->rq.max_gs = 0; in set_rq_size()
445 qp->rq.wqe_cnt = 0; in set_rq_size()
446 qp->rq.wqe_shift = 0; in set_rq_size()
450 int wq_sig = !!(qp->flags_en & MLX5_QP_FLAG_SIGNATURE); in set_rq_size()
453 qp->rq.wqe_cnt = ucmd->rq_wqe_count; in set_rq_size()
456 qp->rq.wqe_shift = ucmd->rq_wqe_shift; in set_rq_size()
457 if ((1 << qp->rq.wqe_shift) / in set_rq_size()
461 qp->rq.max_gs = in set_rq_size()
462 (1 << qp->rq.wqe_shift) / in set_rq_size()
465 qp->rq.max_post = qp->rq.wqe_cnt; in set_rq_size()
474 qp->rq.wqe_cnt = wq_size / wqe_size; in set_rq_size()
482 qp->rq.wqe_shift = ilog2(wqe_size); in set_rq_size()
483 qp->rq.max_gs = in set_rq_size()
484 (1 << qp->rq.wqe_shift) / in set_rq_size()
487 qp->rq.max_post = qp->rq.wqe_cnt; in set_rq_size()
592 struct mlx5_ib_qp *qp) in calc_sq_size() argument
611 qp->max_inline_data = wqe_size - sq_overhead(attr) - in calc_sq_size()
613 attr->cap.max_inline_data = qp->max_inline_data; in calc_sq_size()
616 qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB; in calc_sq_size()
617 if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) { in calc_sq_size()
620 qp->sq.wqe_cnt, in calc_sq_size()
624 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); in calc_sq_size()
625 qp->sq.max_gs = get_send_sge(attr, wqe_size); in calc_sq_size()
626 if (qp->sq.max_gs < attr->cap.max_send_sge) in calc_sq_size()
629 attr->cap.max_send_sge = qp->sq.max_gs; in calc_sq_size()
630 qp->sq.max_post = wq_size / wqe_size; in calc_sq_size()
631 attr->cap.max_send_wr = qp->sq.max_post; in calc_sq_size()
637 struct mlx5_ib_qp *qp, in set_user_buf_size() argument
642 int desc_sz = 1 << qp->sq.wqe_shift; in set_user_buf_size()
656 qp->sq.wqe_cnt = ucmd->sq_wqe_count; in set_user_buf_size()
658 if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) { in set_user_buf_size()
660 qp->sq.wqe_cnt, in set_user_buf_size()
666 qp->flags & IB_QP_CREATE_SOURCE_QPN) { in set_user_buf_size()
667 base->ubuffer.buf_size = qp->rq.wqe_cnt << qp->rq.wqe_shift; in set_user_buf_size()
668 qp->raw_packet_qp.sq.ubuffer.buf_size = qp->sq.wqe_cnt << 6; in set_user_buf_size()
670 base->ubuffer.buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + in set_user_buf_size()
671 (qp->sq.wqe_cnt << 6); in set_user_buf_size()
942 struct mlx5_ib_qp *qp, struct ib_udata *udata, in _create_user_qp() argument
963 uar_flags = qp->flags_en & in _create_user_qp()
978 if (qp->flags & IB_QP_CREATE_CROSS_CHANNEL) in _create_user_qp()
993 qp->rq.offset = 0; in _create_user_qp()
994 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); in _create_user_qp()
995 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; in _create_user_qp()
997 err = set_user_buf_size(dev, qp, ucmd, base, attr); in _create_user_qp()
1045 qp->bfregn = bfregn; in _create_user_qp()
1047 err = mlx5_ib_db_map_user(context, ucmd->db_addr, &qp->db); in _create_user_qp()
1067 static void destroy_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, in destroy_qp() argument
1074 /* User QP */ in destroy_qp()
1075 mlx5_ib_db_unmap_user(context, &qp->db); in destroy_qp()
1082 if (qp->bfregn != MLX5_IB_INVALID_BFREG) in destroy_qp()
1083 mlx5_ib_free_bfreg(dev, &context->bfregi, qp->bfregn); in destroy_qp()
1087 /* Kernel QP */ in destroy_qp()
1088 kvfree(qp->sq.wqe_head); in destroy_qp()
1089 kvfree(qp->sq.w_list); in destroy_qp()
1090 kvfree(qp->sq.wrid); in destroy_qp()
1091 kvfree(qp->sq.wr_data); in destroy_qp()
1092 kvfree(qp->rq.wrid); in destroy_qp()
1093 if (qp->db.db) in destroy_qp()
1094 mlx5_db_free(dev->mdev, &qp->db); in destroy_qp()
1095 if (qp->buf.frags) in destroy_qp()
1096 mlx5_frag_buf_free(dev->mdev, &qp->buf); in destroy_qp()
1101 struct mlx5_ib_qp *qp, u32 **in, int *inlen, in _create_kernel_qp() argument
1109 qp->bf.bfreg = &dev->fp_bfreg; in _create_kernel_qp()
1111 qp->bf.bfreg = &dev->bfreg; in _create_kernel_qp()
1116 qp->bf.buf_size = (1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size)) / 2; in _create_kernel_qp()
1117 uar_index = qp->bf.bfreg->index; in _create_kernel_qp()
1119 err = calc_sq_size(dev, init_attr, qp); in _create_kernel_qp()
1125 qp->rq.offset = 0; in _create_kernel_qp()
1126 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; in _create_kernel_qp()
1127 base->ubuffer.buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift); in _create_kernel_qp()
1130 &qp->buf, dev->mdev->priv.numa_node); in _create_kernel_qp()
1136 if (qp->rq.wqe_cnt) in _create_kernel_qp()
1137 mlx5_init_fbc(qp->buf.frags, qp->rq.wqe_shift, in _create_kernel_qp()
1138 ilog2(qp->rq.wqe_cnt), &qp->rq.fbc); in _create_kernel_qp()
1140 if (qp->sq.wqe_cnt) { in _create_kernel_qp()
1141 int sq_strides_offset = (qp->sq.offset & (PAGE_SIZE - 1)) / in _create_kernel_qp()
1143 mlx5_init_fbc_offset(qp->buf.frags + in _create_kernel_qp()
1144 (qp->sq.offset / PAGE_SIZE), in _create_kernel_qp()
1146 ilog2(qp->sq.wqe_cnt), in _create_kernel_qp()
1147 sq_strides_offset, &qp->sq.fbc); in _create_kernel_qp()
1149 qp->sq.cur_edge = get_sq_edge(&qp->sq, 0); in _create_kernel_qp()
1153 MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * qp->buf.npages; in _create_kernel_qp()
1163 MLX5_SET(qpc, qpc, log_page_size, qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); in _create_kernel_qp()
1169 if (qp->flags & MLX5_IB_QP_CREATE_SQPN_QP1) in _create_kernel_qp()
1172 mlx5_fill_page_frag_array(&qp->buf, in _create_kernel_qp()
1176 err = mlx5_db_alloc(dev->mdev, &qp->db); in _create_kernel_qp()
1182 qp->sq.wrid = kvmalloc_array(qp->sq.wqe_cnt, in _create_kernel_qp()
1183 sizeof(*qp->sq.wrid), GFP_KERNEL); in _create_kernel_qp()
1184 qp->sq.wr_data = kvmalloc_array(qp->sq.wqe_cnt, in _create_kernel_qp()
1185 sizeof(*qp->sq.wr_data), GFP_KERNEL); in _create_kernel_qp()
1186 qp->rq.wrid = kvmalloc_array(qp->rq.wqe_cnt, in _create_kernel_qp()
1187 sizeof(*qp->rq.wrid), GFP_KERNEL); in _create_kernel_qp()
1188 qp->sq.w_list = kvmalloc_array(qp->sq.wqe_cnt, in _create_kernel_qp()
1189 sizeof(*qp->sq.w_list), GFP_KERNEL); in _create_kernel_qp()
1190 qp->sq.wqe_head = kvmalloc_array(qp->sq.wqe_cnt, in _create_kernel_qp()
1191 sizeof(*qp->sq.wqe_head), GFP_KERNEL); in _create_kernel_qp()
1193 if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid || in _create_kernel_qp()
1194 !qp->sq.w_list || !qp->sq.wqe_head) { in _create_kernel_qp()
1202 kvfree(qp->sq.wqe_head); in _create_kernel_qp()
1203 kvfree(qp->sq.w_list); in _create_kernel_qp()
1204 kvfree(qp->sq.wrid); in _create_kernel_qp()
1205 kvfree(qp->sq.wr_data); in _create_kernel_qp()
1206 kvfree(qp->rq.wrid); in _create_kernel_qp()
1207 mlx5_db_free(dev->mdev, &qp->db); in _create_kernel_qp()
1213 mlx5_frag_buf_free(dev->mdev, &qp->buf); in _create_kernel_qp()
1217 static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr) in get_rx_type() argument
1219 if (attr->srq || (qp->type == IB_QPT_XRC_TGT) || in get_rx_type()
1220 (qp->type == MLX5_IB_QPT_DCI) || (qp->type == IB_QPT_XRC_INI)) in get_rx_type()
1222 else if (!qp->has_rq) in get_rx_type()
1229 struct mlx5_ib_qp *qp, in create_raw_packet_qp_tis() argument
1241 if (qp->flags & IB_QP_CREATE_SOURCE_QPN) in create_raw_packet_qp_tis()
1242 MLX5_SET(tisc, tisc, underlay_qpn, qp->underlay_qpn); in create_raw_packet_qp_tis()
1567 static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, in create_raw_packet_qp() argument
1573 struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; in create_raw_packet_qp()
1583 if (!qp->sq.wqe_cnt && !qp->rq.wqe_cnt) in create_raw_packet_qp()
1585 if (qp->sq.wqe_cnt) { in create_raw_packet_qp()
1586 err = create_raw_packet_qp_tis(dev, qp, sq, tdn, pd); in create_raw_packet_qp()
1602 sq->base.container_mibqp = qp; in create_raw_packet_qp()
1606 if (qp->rq.wqe_cnt) { in create_raw_packet_qp()
1607 rq->base.container_mibqp = qp; in create_raw_packet_qp()
1609 if (qp->flags & IB_QP_CREATE_CVLAN_STRIPPING) in create_raw_packet_qp()
1611 if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING) in create_raw_packet_qp()
1618 err = create_raw_packet_qp_tir(dev, rq, tdn, &qp->flags_en, pd, in create_raw_packet_qp()
1646 qp->trans_qp.base.mqp.qpn = qp->sq.wqe_cnt ? sq->base.mqp.qpn : in create_raw_packet_qp()
1653 if (!qp->sq.wqe_cnt) in create_raw_packet_qp()
1663 struct mlx5_ib_qp *qp) in destroy_raw_packet_qp() argument
1665 struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; in destroy_raw_packet_qp()
1669 if (qp->rq.wqe_cnt) { in destroy_raw_packet_qp()
1670 destroy_raw_packet_qp_tir(dev, rq, qp->flags_en, qp->ibqp.pd); in destroy_raw_packet_qp()
1674 if (qp->sq.wqe_cnt) { in destroy_raw_packet_qp()
1676 destroy_raw_packet_qp_tis(dev, sq, qp->ibqp.pd); in destroy_raw_packet_qp()
1680 static void raw_packet_qp_copy_info(struct mlx5_ib_qp *qp, in raw_packet_qp_copy_info() argument
1686 sq->sq = &qp->sq; in raw_packet_qp_copy_info()
1687 rq->rq = &qp->rq; in raw_packet_qp_copy_info()
1688 sq->doorbell = &qp->db; in raw_packet_qp_copy_info()
1689 rq->doorbell = &qp->db; in raw_packet_qp_copy_info()
1692 static void destroy_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) in destroy_rss_raw_qp_tir() argument
1694 if (qp->flags_en & (MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC | in destroy_rss_raw_qp_tir()
1697 mlx5_cmd_destroy_tir(dev->mdev, qp->rss_qp.tirn, in destroy_rss_raw_qp_tir()
1698 to_mpd(qp->ibqp.pd)->uid); in destroy_rss_raw_qp_tir()
1714 struct mlx5_ib_qp *qp, in create_rss_raw_qp_tir() argument
1746 qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC; in create_rss_raw_qp_tir()
1748 if (qp->flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC) in create_rss_raw_qp_tir()
1751 if (qp->flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC) in create_rss_raw_qp_tir()
1876 qp->rss_qp.tirn = MLX5_GET(create_tir_out, out, tirn); in create_rss_raw_qp_tir()
1881 mlx5_cmd_destroy_tir(dev->mdev, qp->rss_qp.tirn, in create_rss_raw_qp_tir()
1890 params->resp.tirn = qp->rss_qp.tirn; in create_rss_raw_qp_tir()
1909 /* qpn is reserved for that QP */ in create_rss_raw_qp_tir()
1910 qp->trans_qp.base.mqp.qpn = 0; in create_rss_raw_qp_tir()
1911 qp->is_rss = true; in create_rss_raw_qp_tir()
1920 struct mlx5_ib_qp *qp, in configure_requester_scat_cqe() argument
1927 allow_scat_cqe = qp->flags_en & MLX5_QP_FLAG_ALLOW_SCATTER_CQE; in configure_requester_scat_cqe()
1963 struct mlx5_ib_qp *qp) in get_atomic_mode() argument
1973 if (qp->type == MLX5_IB_QPT_DCT) in get_atomic_mode()
1988 if (atomic_mode > MLX5_ATOMIC_MODE_8B && qp->is_ooo_rq) in get_atomic_mode()
1994 static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, in create_xrc_tgt_qp() argument
2010 qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE; in create_xrc_tgt_qp()
2022 if (qp->flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) in create_xrc_tgt_qp()
2024 if (qp->flags & IB_QP_CREATE_CROSS_CHANNEL) in create_xrc_tgt_qp()
2026 if (qp->flags & IB_QP_CREATE_MANAGED_SEND) in create_xrc_tgt_qp()
2028 if (qp->flags & IB_QP_CREATE_MANAGED_RECV) in create_xrc_tgt_qp()
2038 MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma); in create_xrc_tgt_qp()
2044 if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING) { in create_xrc_tgt_qp()
2048 qp->flags &= ~IB_QP_CREATE_PCI_WRITE_END_PADDING; in create_xrc_tgt_qp()
2051 base = &qp->trans_qp.base; in create_xrc_tgt_qp()
2057 base->container_mibqp = qp; in create_xrc_tgt_qp()
2063 list_add_tail(&qp->qps_list, &dev->qp_list); in create_xrc_tgt_qp()
2066 qp->trans_qp.xrcdn = to_mxrcd(attr->xrcd)->xrcdn; in create_xrc_tgt_qp()
2071 struct mlx5_ib_qp *qp, in create_dci() argument
2092 spin_lock_init(&qp->sq.lock); in create_dci()
2093 spin_lock_init(&qp->rq.lock); in create_dci()
2095 mlx5_st = to_mlx5_st(qp->type); in create_dci()
2100 qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE; in create_dci()
2102 base = &qp->trans_qp.base; in create_dci()
2104 qp->has_rq = qp_has_rq(init_attr); in create_dci()
2105 err = set_rq_size(dev, &init_attr->cap, qp->has_rq, qp, ucmd); in create_dci()
2111 if (ucmd->rq_wqe_shift != qp->rq.wqe_shift || in create_dci()
2112 ucmd->rq_wqe_count != qp->rq.wqe_cnt) in create_dci()
2124 err = _create_user_qp(dev, pd, qp, udata, init_attr, &in, ¶ms->resp, in create_dci()
2137 if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE) in create_dci()
2140 if (qp->flags & IB_QP_CREATE_CROSS_CHANNEL) in create_dci()
2142 if (qp->flags & IB_QP_CREATE_MANAGED_SEND) in create_dci()
2144 if (qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) in create_dci()
2145 configure_requester_scat_cqe(dev, qp, init_attr, qpc); in create_dci()
2147 if (qp->rq.wqe_cnt) { in create_dci()
2148 MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4); in create_dci()
2149 MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt)); in create_dci()
2152 if (qp->flags_en & MLX5_QP_FLAG_DCI_STREAM) { in create_dci()
2160 MLX5_SET(qpc, qpc, rq_type, get_rx_type(qp, init_attr)); in create_dci()
2162 MLX5_SET(qpc, qpc, log_sq_size, ilog2(qp->sq.wqe_cnt)); in create_dci()
2183 MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma); in create_dci()
2189 if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING) { in create_dci()
2193 qp->flags &= ~IB_QP_CREATE_PCI_WRITE_END_PADDING; in create_dci()
2202 base->container_mibqp = qp; in create_dci()
2207 get_cqs(qp->type, init_attr->send_cq, init_attr->recv_cq, in create_dci()
2214 list_add_tail(&qp->qps_list, &dev->qp_list); in create_dci()
2218 list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp); in create_dci()
2220 list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp); in create_dci()
2227 destroy_qp(dev, qp, base, udata); in create_dci()
2232 struct mlx5_ib_qp *qp, in create_user_qp() argument
2253 spin_lock_init(&qp->sq.lock); in create_user_qp()
2254 spin_lock_init(&qp->rq.lock); in create_user_qp()
2256 mlx5_st = to_mlx5_st(qp->type); in create_user_qp()
2261 qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE; in create_user_qp()
2263 if (qp->flags & IB_QP_CREATE_SOURCE_QPN) in create_user_qp()
2264 qp->underlay_qpn = init_attr->source_qpn; in create_user_qp()
2267 qp->flags & IB_QP_CREATE_SOURCE_QPN) ? in create_user_qp()
2268 &qp->raw_packet_qp.rq.base : in create_user_qp()
2269 &qp->trans_qp.base; in create_user_qp()
2271 qp->has_rq = qp_has_rq(init_attr); in create_user_qp()
2272 err = set_rq_size(dev, &init_attr->cap, qp->has_rq, qp, ucmd); in create_user_qp()
2278 if (ucmd->rq_wqe_shift != qp->rq.wqe_shift || in create_user_qp()
2279 ucmd->rq_wqe_count != qp->rq.wqe_cnt) in create_user_qp()
2292 err = _create_user_qp(dev, pd, qp, udata, init_attr, &in, ¶ms->resp, in create_user_qp()
2298 qp->port = init_attr->port_num; in create_user_qp()
2308 if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE) in create_user_qp()
2311 if (qp->flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) in create_user_qp()
2314 if (qp->flags & IB_QP_CREATE_CROSS_CHANNEL) in create_user_qp()
2316 if (qp->flags & IB_QP_CREATE_MANAGED_SEND) in create_user_qp()
2318 if (qp->flags & IB_QP_CREATE_MANAGED_RECV) in create_user_qp()
2320 if (qp->flags_en & MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE) in create_user_qp()
2322 if ((qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) && in create_user_qp()
2331 if ((qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) && in create_user_qp()
2332 (qp->type == MLX5_IB_QPT_DCI || qp->type == IB_QPT_RC)) in create_user_qp()
2333 configure_requester_scat_cqe(dev, qp, init_attr, qpc); in create_user_qp()
2335 if (qp->rq.wqe_cnt) { in create_user_qp()
2336 MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4); in create_user_qp()
2337 MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt)); in create_user_qp()
2343 MLX5_SET(qpc, qpc, rq_type, get_rx_type(qp, init_attr)); in create_user_qp()
2345 if (qp->sq.wqe_cnt) { in create_user_qp()
2346 MLX5_SET(qpc, qpc, log_sq_size, ilog2(qp->sq.wqe_cnt)); in create_user_qp()
2378 MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma); in create_user_qp()
2384 if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING && in create_user_qp()
2389 qp->flags &= ~IB_QP_CREATE_PCI_WRITE_END_PADDING; in create_user_qp()
2393 qp->flags & IB_QP_CREATE_SOURCE_QPN) { in create_user_qp()
2394 qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd->sq_buf_addr; in create_user_qp()
2395 raw_packet_qp_copy_info(qp, &qp->raw_packet_qp); in create_user_qp()
2396 err = create_raw_packet_qp(dev, qp, in, inlen, pd, udata, in create_user_qp()
2405 base->container_mibqp = qp; in create_user_qp()
2410 get_cqs(qp->type, init_attr->send_cq, init_attr->recv_cq, in create_user_qp()
2417 list_add_tail(&qp->qps_list, &dev->qp_list); in create_user_qp()
2421 list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp); in create_user_qp()
2423 list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp); in create_user_qp()
2430 destroy_qp(dev, qp, base, udata); in create_user_qp()
2435 struct mlx5_ib_qp *qp, in create_kernel_qp() argument
2453 spin_lock_init(&qp->sq.lock); in create_kernel_qp()
2454 spin_lock_init(&qp->rq.lock); in create_kernel_qp()
2456 mlx5_st = to_mlx5_st(qp->type); in create_kernel_qp()
2461 qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE; in create_kernel_qp()
2463 base = &qp->trans_qp.base; in create_kernel_qp()
2465 qp->has_rq = qp_has_rq(attr); in create_kernel_qp()
2466 err = set_rq_size(dev, &attr->cap, qp->has_rq, qp, NULL); in create_kernel_qp()
2472 err = _create_kernel_qp(dev, attr, qp, &in, &inlen, base); in create_kernel_qp()
2477 qp->port = attr->port_num; in create_kernel_qp()
2490 if (qp->flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) in create_kernel_qp()
2493 if (qp->rq.wqe_cnt) { in create_kernel_qp()
2494 MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4); in create_kernel_qp()
2495 MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt)); in create_kernel_qp()
2498 MLX5_SET(qpc, qpc, rq_type, get_rx_type(qp, attr)); in create_kernel_qp()
2500 if (qp->sq.wqe_cnt) in create_kernel_qp()
2501 MLX5_SET(qpc, qpc, log_sq_size, ilog2(qp->sq.wqe_cnt)); in create_kernel_qp()
2521 MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma); in create_kernel_qp()
2527 /* we use IB_QP_CREATE_IPOIB_UD_LSO to indicates ipoib qp */ in create_kernel_qp()
2528 if (qp->flags & IB_QP_CREATE_IPOIB_UD_LSO) in create_kernel_qp()
2531 if (qp->flags & IB_QP_CREATE_INTEGRITY_EN && in create_kernel_qp()
2540 base->container_mibqp = qp; in create_kernel_qp()
2543 get_cqs(qp->type, attr->send_cq, attr->recv_cq, in create_kernel_qp()
2550 list_add_tail(&qp->qps_list, &dev->qp_list); in create_kernel_qp()
2554 list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp); in create_kernel_qp()
2556 list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp); in create_kernel_qp()
2563 destroy_qp(dev, qp, base, NULL); in create_kernel_qp()
2656 static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
2660 static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, in destroy_qp_common() argument
2668 if (qp->is_rss) { in destroy_qp_common()
2669 destroy_rss_raw_qp_tir(dev, qp); in destroy_qp_common()
2673 base = (qp->type == IB_QPT_RAW_PACKET || in destroy_qp_common()
2674 qp->flags & IB_QP_CREATE_SOURCE_QPN) ? in destroy_qp_common()
2675 &qp->raw_packet_qp.rq.base : in destroy_qp_common()
2676 &qp->trans_qp.base; in destroy_qp_common()
2678 if (qp->state != IB_QPS_RESET) { in destroy_qp_common()
2679 if (qp->type != IB_QPT_RAW_PACKET && in destroy_qp_common()
2680 !(qp->flags & IB_QP_CREATE_SOURCE_QPN)) { in destroy_qp_common()
2688 err = modify_raw_packet_qp(dev, qp, &raw_qp_param, 0); in destroy_qp_common()
2691 mlx5_ib_warn(dev, "mlx5_ib: modify QP 0x%06x to RESET failed\n", in destroy_qp_common()
2695 get_cqs(qp->type, qp->ibqp.send_cq, qp->ibqp.recv_cq, &send_cq, in destroy_qp_common()
2701 list_del(&qp->qps_list); in destroy_qp_common()
2703 list_del(&qp->cq_send_list); in destroy_qp_common()
2706 list_del(&qp->cq_recv_list); in destroy_qp_common()
2710 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); in destroy_qp_common()
2718 if (qp->type == IB_QPT_RAW_PACKET || in destroy_qp_common()
2719 qp->flags & IB_QP_CREATE_SOURCE_QPN) { in destroy_qp_common()
2720 destroy_raw_packet_qp(dev, qp); in destroy_qp_common()
2724 mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n", in destroy_qp_common()
2728 destroy_qp(dev, qp, base, udata); in destroy_qp_common()
2732 struct mlx5_ib_qp *qp, in create_dct() argument
2743 qp->dct.in = kzalloc(MLX5_ST_SZ_BYTES(create_dct_in), GFP_KERNEL); in create_dct()
2744 if (!qp->dct.in) in create_dct()
2747 MLX5_SET(create_dct_in, qp->dct.in, uid, to_mpd(pd)->uid); in create_dct()
2748 dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry); in create_dct()
2757 if (qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) { in create_dct()
2764 qp->state = IB_QPS_RESET; in create_dct()
2798 mlx5_ib_dbg(dev, "Unsupported QP type %d\n", attr->qp_type); in check_qp_type()
2826 "Raw Packet QP is only supported for CQE version > 0\n"); in check_valid_flow()
2832 "Wrong QP type %d for the RWQ indirect table\n", in check_valid_flow()
2870 bool cond, struct mlx5_ib_qp *qp) in process_vendor_flag() argument
2876 qp->flags_en |= flag; in process_vendor_flag()
2894 mlx5_ib_dbg(dev, "Vendor create QP flag 0x%X is not supported\n", flag); in process_vendor_flag()
2897 static int process_vendor_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, in process_vendor_flags() argument
2911 qp->type = MLX5_IB_QPT_DCI; in process_vendor_flags()
2914 qp->type = MLX5_IB_QPT_DCT; in process_vendor_flags()
2917 if (qp->type != IB_QPT_DRIVER) in process_vendor_flags()
2926 process_vendor_flag(dev, &flags, MLX5_QP_FLAG_TYPE_DCI, true, qp); in process_vendor_flags()
2927 process_vendor_flag(dev, &flags, MLX5_QP_FLAG_TYPE_DCT, true, qp); in process_vendor_flags()
2930 qp); in process_vendor_flags()
2932 process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SIGNATURE, true, qp); in process_vendor_flags()
2934 MLX5_CAP_GEN(mdev, sctr_data_cqe), qp); in process_vendor_flags()
2936 MLX5_CAP_GEN(mdev, sctr_data_cqe), qp); in process_vendor_flags()
2938 if (qp->type == IB_QPT_RAW_PACKET) { in process_vendor_flags()
2943 cond, qp); in process_vendor_flags()
2946 qp); in process_vendor_flags()
2949 qp); in process_vendor_flags()
2952 if (qp->type == IB_QPT_RC) in process_vendor_flags()
2955 MLX5_CAP_GEN(mdev, qp_packet_based), qp); in process_vendor_flags()
2957 process_vendor_flag(dev, &flags, MLX5_QP_FLAG_BFREG_INDEX, true, qp); in process_vendor_flags()
2958 process_vendor_flag(dev, &flags, MLX5_QP_FLAG_UAR_PAGE_INDEX, true, qp); in process_vendor_flags()
2960 cond = qp->flags_en & ~(MLX5_QP_FLAG_TUNNEL_OFFLOADS | in process_vendor_flags()
2964 mlx5_ib_dbg(dev, "RSS RAW QP has unsupported flags 0x%X\n", in process_vendor_flags()
2976 bool cond, struct mlx5_ib_qp *qp) in process_create_flag() argument
2982 qp->flags |= flag; in process_create_flag()
2987 mlx5_ib_dbg(dev, "Verbs create QP flag 0x%X is not supported\n", flag); in process_create_flag()
2990 static int process_create_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, in process_create_flags() argument
2993 enum ib_qp_type qp_type = qp->type; in process_create_flags()
3007 qp); in process_create_flags()
3010 MLX5_CAP_GEN(mdev, sho), qp); in process_create_flags()
3013 MLX5_CAP_GEN(mdev, block_lb_mc), qp); in process_create_flags()
3015 MLX5_CAP_GEN(mdev, cd), qp); in process_create_flags()
3017 MLX5_CAP_GEN(mdev, cd), qp); in process_create_flags()
3019 MLX5_CAP_GEN(mdev, cd), qp); in process_create_flags()
3025 qp); in process_create_flags()
3028 cond, qp); in process_create_flags()
3035 IB_QP_CREATE_SCATTER_FCS, cond, qp); in process_create_flags()
3040 IB_QP_CREATE_CVLAN_STRIPPING, cond, qp); in process_create_flags()
3045 MLX5_CAP_GEN(mdev, end_pad), qp); in process_create_flags()
3048 true, qp); in process_create_flags()
3051 mlx5_ib_dbg(dev, "Create QP has unsupported flags 0x%X\n", in process_create_flags()
3081 /* RSS RAW QP */ in process_udata_size()
3102 struct mlx5_ib_qp *qp, in create_qp() argument
3108 err = create_rss_raw_qp_tir(dev, pd, qp, params); in create_qp()
3112 switch (qp->type) { in create_qp()
3114 err = create_dct(dev, pd, qp, params); in create_qp()
3117 err = create_dci(dev, pd, qp, params); in create_qp()
3120 err = create_xrc_tgt_qp(dev, qp, params); in create_qp()
3123 err = mlx5_ib_create_gsi(pd, qp, params->attr); in create_qp()
3126 rdma_restrack_no_track(&qp->ibqp.res); in create_qp()
3131 err = create_user_qp(dev, pd, qp, params); in create_qp()
3133 err = create_kernel_qp(dev, pd, qp, params); in create_qp()
3138 mlx5_ib_err(dev, "Create QP type %d failed\n", qp->type); in create_qp()
3142 if (is_qp0(qp->type)) in create_qp()
3143 qp->ibqp.qp_num = 0; in create_qp()
3144 else if (is_qp1(qp->type)) in create_qp()
3145 qp->ibqp.qp_num = 1; in create_qp()
3147 qp->ibqp.qp_num = qp->trans_qp.base.mqp.qpn; in create_qp()
3150 "QP type %d, ib qpn 0x%X, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x, ece 0x%x\n", in create_qp()
3151 qp->type, qp->ibqp.qp_num, qp->trans_qp.base.mqp.qpn, in create_qp()
3161 static int check_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, in check_qp_attr() argument
3166 switch (qp->type) { in check_qp_attr()
3183 mlx5_ib_dbg(dev, "QP type %d has wrong attributes\n", qp->type); in check_qp_attr()
3188 static int get_qp_uidx(struct mlx5_ib_qp *qp, in get_qp_uidx() argument
3259 struct mlx5_ib_qp *qp = to_mqp(ibqp); in mlx5_ib_create_qp() local
3299 mutex_init(&qp->mutex); in mlx5_ib_create_qp()
3300 qp->type = type; in mlx5_ib_create_qp()
3302 err = process_vendor_flags(dev, qp, params.ucmd, attr); in mlx5_ib_create_qp()
3306 err = get_qp_uidx(qp, ¶ms); in mlx5_ib_create_qp()
3310 err = process_create_flags(dev, qp, attr); in mlx5_ib_create_qp()
3314 err = check_qp_attr(dev, qp, attr); in mlx5_ib_create_qp()
3318 err = create_qp(dev, pd, qp, ¶ms); in mlx5_ib_create_qp()
3327 * It is safe to copy response for all user create QP flows, in mlx5_ib_create_qp()
3338 switch (qp->type) { in mlx5_ib_create_qp()
3340 mlx5_ib_destroy_dct(qp); in mlx5_ib_create_qp()
3343 mlx5_ib_destroy_gsi(qp); in mlx5_ib_create_qp()
3346 destroy_qp_common(dev, qp, udata); in mlx5_ib_create_qp()
3354 int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) in mlx5_ib_destroy_qp() argument
3356 struct mlx5_ib_dev *dev = to_mdev(qp->device); in mlx5_ib_destroy_qp()
3357 struct mlx5_ib_qp *mqp = to_mqp(qp); in mlx5_ib_destroy_qp()
3369 static int set_qpc_atomic_flags(struct mlx5_ib_qp *qp, in set_qpc_atomic_flags() argument
3373 struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device); in set_qpc_atomic_flags()
3380 dest_rd_atomic = qp->trans_qp.resp_depth; in set_qpc_atomic_flags()
3385 access_flags = qp->trans_qp.atomic_rd_en; in set_qpc_atomic_flags()
3395 atomic_mode = get_atomic_mode(dev, qp); in set_qpc_atomic_flags()
3534 static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, in mlx5_set_path() argument
3567 if ((qp->type == IB_QPT_RC || in mlx5_set_path()
3568 qp->type == IB_QPT_UC || in mlx5_set_path()
3569 qp->type == IB_QPT_XRC_INI || in mlx5_set_path()
3570 qp->type == IB_QPT_XRC_TGT) && in mlx5_set_path()
3574 qp->ibqp.qp_num, in mlx5_set_path()
3609 if ((qp->type == IB_QPT_RAW_PACKET) && qp->sq.wqe_cnt) in mlx5_set_path()
3611 &qp->raw_packet_qp.sq, in mlx5_set_path()
3612 sl & 0xf, qp->ibqp.pd); in mlx5_set_path()
3826 "RAW PACKET QP counters are not supported on current FW\n"); in modify_raw_packet_qp_rq()
3916 static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, in modify_raw_packet_qp() argument
3920 struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; in modify_raw_packet_qp()
3923 int modify_rq = !!qp->rq.wqe_cnt; in modify_raw_packet_qp()
3924 int modify_sq = !!qp->sq.wqe_cnt; in modify_raw_packet_qp()
3963 qp->ibqp.pd); in modify_raw_packet_qp()
3974 qp->ibqp.pd); in modify_raw_packet_qp()
3985 raw_qp_param, qp->ibqp.pd); in modify_raw_packet_qp()
4020 static bool qp_supports_affinity(struct mlx5_ib_qp *qp) in qp_supports_affinity() argument
4022 if ((qp->type == IB_QPT_RC) || (qp->type == IB_QPT_UD) || in qp_supports_affinity()
4023 (qp->type == IB_QPT_UC) || (qp->type == IB_QPT_RAW_PACKET) || in qp_supports_affinity()
4024 (qp->type == IB_QPT_XRC_INI) || (qp->type == IB_QPT_XRC_TGT) || in qp_supports_affinity()
4025 (qp->type == MLX5_IB_QPT_DCI)) in qp_supports_affinity()
4030 static unsigned int get_tx_affinity(struct ib_qp *qp, in get_tx_affinity() argument
4037 struct mlx5_ib_dev *dev = to_mdev(qp->device); in get_tx_affinity()
4038 struct mlx5_ib_qp *mqp = to_mqp(qp); in get_tx_affinity()
4066 static int __mlx5_ib_qp_set_raw_qp_counter(struct mlx5_ib_qp *qp, u32 set_id, in __mlx5_ib_qp_set_raw_qp_counter() argument
4069 struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; in __mlx5_ib_qp_set_raw_qp_counter()
4074 if (!qp->rq.wqe_cnt) in __mlx5_ib_qp_set_raw_qp_counter()
4078 MLX5_SET(modify_rq_in, in, uid, to_mpd(qp->ibqp.pd)->uid); in __mlx5_ib_qp_set_raw_qp_counter()
4090 static int __mlx5_ib_qp_set_counter(struct ib_qp *qp, in __mlx5_ib_qp_set_counter() argument
4093 struct mlx5_ib_dev *dev = to_mdev(qp->device); in __mlx5_ib_qp_set_counter()
4095 struct mlx5_ib_qp *mqp = to_mqp(qp); in __mlx5_ib_qp_set_counter()
4167 struct mlx5_ib_qp *qp = to_mqp(ibqp); in __mlx5_ib_modify_qp() local
4168 struct mlx5_ib_qp_base *base = &qp->trans_qp.base; in __mlx5_ib_modify_qp()
4180 mlx5_st = to_mlx5_st(qp->type); in __mlx5_ib_modify_qp()
4188 pd = to_mpd(qp->ibqp.pd); in __mlx5_ib_modify_qp()
4216 if (is_sqp(qp->type)) { in __mlx5_ib_modify_qp()
4219 } else if ((qp->type == IB_QPT_UD && in __mlx5_ib_modify_qp()
4220 !(qp->flags & IB_QP_CREATE_SOURCE_QPN)) || in __mlx5_ib_modify_qp()
4221 qp->type == MLX5_IB_QPT_REG_UMR) { in __mlx5_ib_modify_qp()
4247 if (dev->ib_dev.type == RDMA_DEVICE_TYPE_SMI && is_qp0(qp->type)) { in __mlx5_ib_modify_qp()
4249 smi_to_native_portnum(dev, qp->port)); in __mlx5_ib_modify_qp()
4251 MLX5_SET(ads, pri_path, plane_index, qp->port); in __mlx5_ib_modify_qp()
4252 } else if (is_sqp(qp->type)) in __mlx5_ib_modify_qp()
4253 MLX5_SET(ads, pri_path, vhca_port_num, qp->port); in __mlx5_ib_modify_qp()
4259 err = mlx5_set_path(dev, qp, &attr->ah_attr, pri_path, in __mlx5_ib_modify_qp()
4261 qp->port, in __mlx5_ib_modify_qp()
4271 err = mlx5_set_path(dev, qp, &attr->alt_ah_attr, alt_path, in __mlx5_ib_modify_qp()
4280 get_cqs(qp->type, qp->ibqp.send_cq, qp->ibqp.recv_cq, in __mlx5_ib_modify_qp()
4308 err = set_qpc_atomic_flags(qp, attr, attr_mask, qpc); in __mlx5_ib_modify_qp()
4322 if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) in __mlx5_ib_modify_qp()
4323 MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma); in __mlx5_ib_modify_qp()
4327 qp->port) - 1; in __mlx5_ib_modify_qp()
4330 if (qp->flags & IB_QP_CREATE_SOURCE_QPN) in __mlx5_ib_modify_qp()
4343 if (qp->flags & MLX5_IB_QP_CREATE_SQPN_QP1) in __mlx5_ib_modify_qp()
4346 if (qp->is_ooo_rq && cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { in __mlx5_ib_modify_qp()
4364 if (qp->type == IB_QPT_RAW_PACKET || in __mlx5_ib_modify_qp()
4365 qp->flags & IB_QP_CREATE_SOURCE_QPN) { in __mlx5_ib_modify_qp()
4405 err = modify_raw_packet_qp(dev, qp, &raw_qp_param, tx_affinity); in __mlx5_ib_modify_qp()
4421 qp->state = new_state; in __mlx5_ib_modify_qp()
4424 qp->trans_qp.atomic_rd_en = attr->qp_access_flags; in __mlx5_ib_modify_qp()
4426 qp->trans_qp.resp_depth = attr->max_dest_rd_atomic; in __mlx5_ib_modify_qp()
4428 qp->port = attr->port_num; in __mlx5_ib_modify_qp()
4430 qp->trans_qp.alt_port = attr->alt_port_num; in __mlx5_ib_modify_qp()
4433 * If we moved a kernel QP to RESET, clean up all old CQ in __mlx5_ib_modify_qp()
4434 * entries and reinitialize the QP. in __mlx5_ib_modify_qp()
4437 !ibqp->uobject && qp->type != IB_QPT_XRC_TGT) { in __mlx5_ib_modify_qp()
4443 qp->rq.head = 0; in __mlx5_ib_modify_qp()
4444 qp->rq.tail = 0; in __mlx5_ib_modify_qp()
4445 qp->sq.head = 0; in __mlx5_ib_modify_qp()
4446 qp->sq.tail = 0; in __mlx5_ib_modify_qp()
4447 qp->sq.cur_post = 0; in __mlx5_ib_modify_qp()
4448 if (qp->sq.wqe_cnt) in __mlx5_ib_modify_qp()
4449 qp->sq.cur_edge = get_sq_edge(&qp->sq, 0); in __mlx5_ib_modify_qp()
4450 qp->sq.last_poll = 0; in __mlx5_ib_modify_qp()
4451 qp->db.db[MLX5_RCV_DBR] = 0; in __mlx5_ib_modify_qp()
4452 qp->db.db[MLX5_SND_DBR] = 0; in __mlx5_ib_modify_qp()
4455 if ((new_state == IB_QPS_RTS) && qp->counter_pending) { in __mlx5_ib_modify_qp()
4458 qp->counter_pending = 0; in __mlx5_ib_modify_qp()
4477 /* check valid transition for driver QP types
4478 * for now the only QP type that this function supports is DCI
4512 /* mlx5_ib_modify_dct: modify a DCT QP
4523 struct mlx5_ib_qp *qp = to_mqp(ibqp); in mlx5_ib_modify_dct() local
4533 cur_state = qp->state; in mlx5_ib_modify_dct()
4536 dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry); in mlx5_ib_modify_dct()
4539 * DCT doesn't initialize QP till modify command is executed, in mlx5_ib_modify_dct()
4566 atomic_mode = get_atomic_mode(dev, qp); in mlx5_ib_modify_dct()
4583 qp->port = attr->port_num; in mlx5_ib_modify_dct()
4610 if (qp->is_ooo_rq) { in mlx5_ib_modify_dct()
4615 err = mlx5_core_create_dct(dev, &qp->dct.mdct, qp->dct.in, in mlx5_ib_modify_dct()
4618 err = mlx5_cmd_check(dev->mdev, err, qp->dct.in, out); in mlx5_ib_modify_dct()
4621 resp.dctn = qp->dct.mdct.mqp.qpn; in mlx5_ib_modify_dct()
4626 mlx5_core_destroy_dct(dev, &qp->dct.mdct); in mlx5_ib_modify_dct()
4634 qp->state = new_state; in mlx5_ib_modify_dct()
4639 struct mlx5_ib_qp *qp) in mlx5_ib_modify_qp_allowed() argument
4644 if (qp->type == IB_QPT_RAW_PACKET || qp->type == MLX5_IB_QPT_REG_UMR) in mlx5_ib_modify_qp_allowed()
4689 struct mlx5_ib_qp *qp = to_mqp(ibqp); in mlx5_ib_modify_qp() local
4695 if (!mlx5_ib_modify_qp_allowed(dev, qp)) in mlx5_ib_modify_qp()
4723 if (!get_dp_ooo_cap(dev->mdev, qp->type)) in mlx5_ib_modify_qp()
4725 qp->is_ooo_rq = 1; in mlx5_ib_modify_qp()
4729 if (qp->type == IB_QPT_GSI) in mlx5_ib_modify_qp()
4732 qp_type = (qp->type == MLX5_IB_QPT_HW_GSI) ? IB_QPT_GSI : qp->type; in mlx5_ib_modify_qp()
4737 mutex_lock(&qp->mutex); in mlx5_ib_modify_qp()
4739 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; in mlx5_ib_modify_qp()
4742 if (qp->flags & IB_QP_CREATE_SOURCE_QPN) { in mlx5_ib_modify_qp()
4744 mlx5_ib_dbg(dev, "invalid attr_mask 0x%x when underlay QP is used\n", in mlx5_ib_modify_qp()
4752 mlx5_ib_dbg(dev, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n", in mlx5_ib_modify_qp()
4753 cur_state, new_state, qp->type, attr_mask); in mlx5_ib_modify_qp()
4757 mlx5_ib_dbg(dev, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n", in mlx5_ib_modify_qp()
4790 /* Return -EFAULT to the user and expect him to destroy QP. */ in mlx5_ib_modify_qp()
4794 mutex_unlock(&qp->mutex); in mlx5_ib_modify_qp()
4897 struct mlx5_ib_qp *qp, u8 *qp_state) in sqrq_state_to_qp_state() argument
4929 WARN(1, "Buggy Raw Packet QP state, SQ 0x%x state: 0x%x, RQ 0x%x state: 0x%x", in sqrq_state_to_qp_state()
4930 qp->raw_packet_qp.sq.base.mqp.qpn, sq_state, in sqrq_state_to_qp_state()
4931 qp->raw_packet_qp.rq.base.mqp.qpn, rq_state); in sqrq_state_to_qp_state()
4936 *qp_state = qp->state; in sqrq_state_to_qp_state()
4942 struct mlx5_ib_qp *qp, in query_raw_packet_qp_state() argument
4945 struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; in query_raw_packet_qp_state()
4952 if (qp->sq.wqe_cnt) { in query_raw_packet_qp_state()
4958 if (qp->rq.wqe_cnt) { in query_raw_packet_qp_state()
4964 return sqrq_state_to_qp_state(sq_state, rq_state, qp, in query_raw_packet_qp_state()
4968 static int query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, in query_qp_attr() argument
4980 err = mlx5_core_qp_query(dev, &qp->trans_qp.base.mqp, outb, outlen, in query_qp_attr()
4987 qp->state = to_ib_qp_state(MLX5_GET(qpc, qpc, state)); in query_qp_attr()
5014 if (qp->type == IB_QPT_RC || qp->type == IB_QPT_UC || in query_qp_attr()
5015 qp->type == IB_QPT_XRC_INI || qp->type == IB_QPT_XRC_TGT) { in query_qp_attr()
5101 struct mlx5_ib_qp *qp = to_mqp(ibqp); in mlx5_ib_query_qp() local
5108 if (qp->type == IB_QPT_GSI) in mlx5_ib_query_qp()
5116 if (unlikely(qp->type == MLX5_IB_QPT_DCT)) in mlx5_ib_query_qp()
5117 return mlx5_ib_dct_query_qp(dev, qp, qp_attr, in mlx5_ib_query_qp()
5120 mutex_lock(&qp->mutex); in mlx5_ib_query_qp()
5122 if (qp->type == IB_QPT_RAW_PACKET || in mlx5_ib_query_qp()
5123 qp->flags & IB_QP_CREATE_SOURCE_QPN) { in mlx5_ib_query_qp()
5124 err = query_raw_packet_qp_state(dev, qp, &raw_packet_qp_state); in mlx5_ib_query_qp()
5127 qp->state = raw_packet_qp_state; in mlx5_ib_query_qp()
5130 err = query_qp_attr(dev, qp, qp_attr); in mlx5_ib_query_qp()
5135 qp_attr->qp_state = qp->state; in mlx5_ib_query_qp()
5137 qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt; in mlx5_ib_query_qp()
5138 qp_attr->cap.max_recv_sge = qp->rq.max_gs; in mlx5_ib_query_qp()
5141 qp_attr->cap.max_send_wr = qp->sq.max_post; in mlx5_ib_query_qp()
5142 qp_attr->cap.max_send_sge = qp->sq.max_gs; in mlx5_ib_query_qp()
5149 qp_init_attr->qp_type = qp->type; in mlx5_ib_query_qp()
5153 qp_attr->cap.max_inline_data = qp->max_inline_data; in mlx5_ib_query_qp()
5157 qp_init_attr->create_flags = qp->flags; in mlx5_ib_query_qp()
5159 qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ? in mlx5_ib_query_qp()
5163 mutex_unlock(&qp->mutex); in mlx5_ib_query_qp()
5772 void mlx5_ib_drain_sq(struct ib_qp *qp) in mlx5_ib_drain_sq() argument
5774 struct ib_cq *cq = qp->send_cq; in mlx5_ib_drain_sq()
5786 struct mlx5_ib_dev *dev = to_mdev(qp->device); in mlx5_ib_drain_sq()
5789 ret = ib_modify_qp(qp, &attr, IB_QP_STATE); in mlx5_ib_drain_sq()
5798 ret = mlx5_ib_post_send_drain(qp, &swr.wr, &bad_swr); in mlx5_ib_drain_sq()
5807 void mlx5_ib_drain_rq(struct ib_qp *qp) in mlx5_ib_drain_rq() argument
5809 struct ib_cq *cq = qp->recv_cq; in mlx5_ib_drain_rq()
5815 struct mlx5_ib_dev *dev = to_mdev(qp->device); in mlx5_ib_drain_rq()
5818 ret = ib_modify_qp(qp, &attr, IB_QP_STATE); in mlx5_ib_drain_rq()
5828 ret = mlx5_ib_post_recv_drain(qp, &rwr, &bad_rwr); in mlx5_ib_drain_rq()
5838 * Bind a qp to a counter. If @counter is NULL then bind the qp to
5841 int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter) in mlx5_ib_qp_set_counter() argument
5843 struct mlx5_ib_dev *dev = to_mdev(qp->device); in mlx5_ib_qp_set_counter()
5844 struct mlx5_ib_qp *mqp = to_mqp(qp); in mlx5_ib_qp_set_counter()
5849 qp->counter = counter; in mlx5_ib_qp_set_counter()
5859 err = __mlx5_ib_qp_set_counter(qp, counter); in mlx5_ib_qp_set_counter()
5861 qp->counter = counter; in mlx5_ib_qp_set_counter()
5867 qp->counter = counter; in mlx5_ib_qp_set_counter()