Lines Matching full:qp
61 static void *get_recv_wqe(struct mlx5_qp *qp, int n) in get_recv_wqe() argument
63 return qp->buf.buf + qp->rq.offset + (n << qp->rq.wqe_shift); in get_recv_wqe()
93 int mlx5_copy_to_recv_wqe(struct mlx5_qp *qp, int idx, void *buf, int size) in mlx5_copy_to_recv_wqe() argument
96 int max = 1 << (qp->rq.wqe_shift - 4); in mlx5_copy_to_recv_wqe()
98 scat = get_recv_wqe(qp, idx); in mlx5_copy_to_recv_wqe()
99 if (unlikely(qp->wq_sig)) in mlx5_copy_to_recv_wqe()
105 int mlx5_copy_to_send_wqe(struct mlx5_qp *qp, int idx, void *buf, int size) in mlx5_copy_to_send_wqe() argument
112 idx &= (qp->sq.wqe_cnt - 1); in mlx5_copy_to_send_wqe()
113 ctrl = mlx5_get_send_wqe(qp, idx); in mlx5_copy_to_send_wqe()
114 if (qp->ibv_qp->qp_type != IBV_QPT_RC) { in mlx5_copy_to_send_wqe()
139 if (unlikely((void *)(scat + max) > qp->sq.qend)) { in mlx5_copy_to_send_wqe()
140 int tmp = ((void *)qp->sq.qend - (void *)scat) >> 4; in mlx5_copy_to_send_wqe()
147 scat = mlx5_get_send_wqe(qp, 0); in mlx5_copy_to_send_wqe()
153 void *mlx5_get_send_wqe(struct mlx5_qp *qp, int n) in mlx5_get_send_wqe() argument
155 return qp->sq_start + (n << MLX5_SEND_WQE_SHIFT); in mlx5_get_send_wqe()
164 void mlx5_init_qp_indices(struct mlx5_qp *qp) in mlx5_init_qp_indices() argument
166 qp->sq.head = 0; in mlx5_init_qp_indices()
167 qp->sq.tail = 0; in mlx5_init_qp_indices()
168 qp->rq.head = 0; in mlx5_init_qp_indices()
169 qp->rq.tail = 0; in mlx5_init_qp_indices()
170 qp->sq.cur_post = 0; in mlx5_init_qp_indices()
239 unsigned bytecnt, struct mlx5_qp *qp) in mlx5_bf_copy() argument
251 if (unlikely(src == qp->sq.qend)) in mlx5_bf_copy()
252 src = qp->sq_start; in mlx5_bf_copy()
269 static int set_data_inl_seg(struct mlx5_qp *qp, struct ibv_send_wr *wr, in set_data_inl_seg() argument
278 void *qend = qp->sq.qend; in set_data_inl_seg()
290 if (unlikely(inl > qp->max_inline_data)) in set_data_inl_seg()
298 wqe = mlx5_get_send_wqe(qp, 0); in set_data_inl_seg()
319 static void dump_wqe(FILE *fp, int idx, int size_16, struct mlx5_qp *qp) in dump_wqe() argument
325 fprintf(fp, "dump wqe at %p\n", mlx5_get_send_wqe(qp, tidx)); in dump_wqe()
328 void *buf = mlx5_get_send_wqe(qp, tidx); in dump_wqe()
329 tidx = (tidx + 1) & (qp->sq.wqe_cnt - 1); in dump_wqe()
340 void *mlx5_get_atomic_laddr(struct mlx5_qp *qp, uint16_t idx, int *byte_count) in mlx5_get_atomic_laddr() argument
345 dpseg = mlx5_get_send_wqe(qp, idx) + sizeof(struct mlx5_wqe_ctrl_seg) + in mlx5_get_atomic_laddr()
421 static void set_umr_data_seg(struct mlx5_qp *qp, enum ibv_mw_type type, in set_umr_data_seg() argument
441 static void set_umr_mkey_seg(struct mlx5_qp *qp, enum ibv_mw_type type, in set_umr_mkey_seg() argument
479 static inline void set_umr_control_seg(struct mlx5_qp *qp, enum ibv_mw_type type, in set_umr_control_seg() argument
516 static inline int set_bind_wr(struct mlx5_qp *qp, enum ibv_mw_type type, in set_bind_wr() argument
520 void *qend = qp->sq.qend; in set_bind_wr()
544 set_umr_control_seg(qp, type, rkey, bind_info, qpn, seg, size); in set_bind_wr()
546 *seg = mlx5_get_send_wqe(qp, 0); in set_bind_wr()
548 set_umr_mkey_seg(qp, type, rkey, bind_info, qpn, seg, size); in set_bind_wr()
553 *seg = mlx5_get_send_wqe(qp, 0); in set_bind_wr()
555 set_umr_data_seg(qp, type, rkey, bind_info, qpn, seg, size); in set_bind_wr()
563 void *qend, struct mlx5_qp *qp, int *size) in set_tso_eth_seg() argument
569 FILE *fp = to_mctx(qp->ibv_qp->context)->dbg_fp; in set_tso_eth_seg()
572 wr->tso.hdr_sz > qp->max_tso_header)) { in set_tso_eth_seg()
576 qp->max_tso_header); in set_tso_eth_seg()
601 *seg = mlx5_get_send_wqe(qp, 0); in set_tso_eth_seg()
616 struct mlx5_qp *qp = to_mqp(ibqp); in _mlx5_post_send() local
629 struct mlx5_bf *bf = qp->bf; in _mlx5_post_send()
630 void *qend = qp->sq.qend; in _mlx5_post_send()
638 mlx5_spin_lock(&qp->sq.lock); in _mlx5_post_send()
640 next_fence = qp->fm_cache; in _mlx5_post_send()
651 if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, in _mlx5_post_send()
652 to_mcq(qp->ibv_qp->send_cq)))) { in _mlx5_post_send()
659 if (unlikely(wr->num_sge > qp->sq.max_gs)) { in _mlx5_post_send()
661 wr->num_sge, qp->sq.max_gs); in _mlx5_post_send()
672 idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1); in _mlx5_post_send()
673 ctrl = seg = mlx5_get_send_wqe(qp, idx); in _mlx5_post_send()
676 ctrl->fm_ce_se = qp->sq_signal_bits | fence | in _mlx5_post_send()
708 if (unlikely(!qp->atomics_enabled)) { in _mlx5_post_send()
730 err = set_bind_wr(qp, wr->bind_mw.mw->type, in _mlx5_post_send()
739 qp->sq.wr_data[idx] = IBV_WC_BIND_MW; in _mlx5_post_send()
746 err = set_bind_wr(qp, IBV_MW_TYPE_2, 0, in _mlx5_post_send()
754 qp->sq.wr_data[idx] = IBV_WC_LOCAL_INV; in _mlx5_post_send()
775 err = set_bind_wr(qp, wr->bind_mw.mw->type, in _mlx5_post_send()
784 qp->sq.wr_data[idx] = IBV_WC_BIND_MW; in _mlx5_post_send()
791 err = set_bind_wr(qp, IBV_MW_TYPE_2, 0, in _mlx5_post_send()
799 qp->sq.wr_data[idx] = IBV_WC_LOCAL_INV; in _mlx5_post_send()
813 seg = mlx5_get_send_wqe(qp, 0); in _mlx5_post_send()
821 if (!(qp->qp_cap_cache & MLX5_CSUM_SUPPORT_RAW_OVER_ETH)) { in _mlx5_post_send()
831 max_tso = qp->max_tso; in _mlx5_post_send()
832 err = set_tso_eth_seg(&seg, wr, qend, qp, &size); in _mlx5_post_send()
859 err = set_data_inl_seg(qp, wr, seg, &sz, &sg_copy_ptr); in _mlx5_post_send()
872 seg = mlx5_get_send_wqe(qp, 0); in _mlx5_post_send()
901 ctrl->opmod_idx_opcode = htobe32(((qp->sq.cur_post & 0xffff) << 8) | in _mlx5_post_send()
906 if (unlikely(qp->wq_sig)) in _mlx5_post_send()
909 qp->sq.wrid[idx] = wr->wr_id; in _mlx5_post_send()
910 qp->sq.wqe_head[idx] = qp->sq.head + nreq; in _mlx5_post_send()
911 qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB); in _mlx5_post_send()
915 dump_wqe(to_mctx(ibqp->context)->dbg_fp, idx, size, qp); in _mlx5_post_send()
921 qp->sq.head += nreq; in _mlx5_post_send()
922 qp->fm_cache = next_fence; in _mlx5_post_send()
929 qp->db[MLX5_SND_DBR] = htobe32(qp->sq.cur_post & 0xffff); in _mlx5_post_send()
943 align(size * 16, 64), qp); in _mlx5_post_send()
964 mlx5_spin_unlock(&qp->sq.lock); in _mlx5_post_send()
990 int mlx5_bind_mw(struct ibv_qp *qp, struct ibv_mw *mw, in mlx5_bind_mw() argument
1028 ret = _mlx5_post_send(qp, &wr, &bad_wr); in mlx5_bind_mw()
1037 static void set_sig_seg(struct mlx5_qp *qp, struct mlx5_rwqe_sig *sig, in set_sig_seg() argument
1041 uint32_t qpn = qp->ibv_qp->qp_num; in set_sig_seg()
1137 struct mlx5_qp *qp = to_mqp(ibqp); in mlx5_post_recv() local
1145 mlx5_spin_lock(&qp->rq.lock); in mlx5_post_recv()
1147 ind = qp->rq.head & (qp->rq.wqe_cnt - 1); in mlx5_post_recv()
1150 if (unlikely(mlx5_wq_overflow(&qp->rq, nreq, in mlx5_post_recv()
1151 to_mcq(qp->ibv_qp->recv_cq)))) { in mlx5_post_recv()
1157 if (unlikely(wr->num_sge > qp->rq.max_gs)) { in mlx5_post_recv()
1163 scat = get_recv_wqe(qp, ind); in mlx5_post_recv()
1165 if (unlikely(qp->wq_sig)) { in mlx5_post_recv()
1166 memset(sig, 0, 1 << qp->rq.wqe_shift); in mlx5_post_recv()
1176 if (j < qp->rq.max_gs) { in mlx5_post_recv()
1182 if (unlikely(qp->wq_sig)) in mlx5_post_recv()
1183 set_sig_seg(qp, sig, (wr->num_sge + 1) << 4, in mlx5_post_recv()
1184 qp->rq.head & 0xffff); in mlx5_post_recv()
1186 qp->rq.wrid[ind] = wr->wr_id; in mlx5_post_recv()
1188 ind = (ind + 1) & (qp->rq.wqe_cnt - 1); in mlx5_post_recv()
1193 qp->rq.head += nreq; in mlx5_post_recv()
1202 * For Raw Packet QP, avoid updating the doorbell record in mlx5_post_recv()
1203 * as long as the QP isn't in RTR state, to avoid receiving in mlx5_post_recv()
1210 qp->db[MLX5_RCV_DBR] = htobe32(qp->rq.head & 0xffff); in mlx5_post_recv()
1213 mlx5_spin_unlock(&qp->rq.lock); in mlx5_post_recv()
1238 int mlx5_store_qp(struct mlx5_context *ctx, uint32_t qpn, struct mlx5_qp *qp) in mlx5_store_qp() argument
1250 ctx->qp_table[tind].table[qpn & MLX5_QP_TABLE_MASK] = qp; in mlx5_store_qp()