Lines Matching refs:cqe

66 static inline uint8_t get_cqe_l3_hdr_type(struct mlx5_cqe64 *cqe)  in get_cqe_l3_hdr_type()  argument
68 return (cqe->l4_hdr_type_etc >> 2) & 0x3; in get_cqe_l3_hdr_type()
83 void *cqe = get_cqe(cq, n & cq->ibv_cq.cqe); in get_sw_cqe() local
86 cqe64 = (cq->cqe_sz == 64) ? cqe : cqe + 64; in get_sw_cqe()
89 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibv_cq.cqe + 1)))) { in get_sw_cqe()
90 return cqe; in get_sw_cqe()
106 static inline void handle_good_req(struct ibv_wc *wc, struct mlx5_cqe64 *cqe, struct mlx5_wq *wq, i… in handle_good_req() argument
108 switch (be32toh(cqe->sop_drop_qpn) >> 24) { in handle_good_req()
124 wc->byte_len = be32toh(cqe->byte_cnt); in handle_good_req()
143 static inline int handle_responder_lazy(struct mlx5_cq *cq, struct mlx5_cqe64 *cqe, in handle_responder_lazy() argument
152 wqe_ctr = be16toh(cqe->wqe_counter); in handle_responder_lazy()
155 if (cqe->op_own & MLX5_INLINE_SCATTER_32) in handle_responder_lazy()
156 err = mlx5_copy_to_recv_srq(srq, wqe_ctr, cqe, in handle_responder_lazy()
157 be32toh(cqe->byte_cnt)); in handle_responder_lazy()
158 else if (cqe->op_own & MLX5_INLINE_SCATTER_64) in handle_responder_lazy()
159 err = mlx5_copy_to_recv_srq(srq, wqe_ctr, cqe - 1, in handle_responder_lazy()
160 be32toh(cqe->byte_cnt)); in handle_responder_lazy()
173 if (cqe->op_own & MLX5_INLINE_SCATTER_32) in handle_responder_lazy()
174 err = mlx5_copy_to_recv_wqe(qp, wqe_ctr, cqe, in handle_responder_lazy()
175 be32toh(cqe->byte_cnt)); in handle_responder_lazy()
176 else if (cqe->op_own & MLX5_INLINE_SCATTER_64) in handle_responder_lazy()
177 err = mlx5_copy_to_recv_wqe(qp, wqe_ctr, cqe - 1, in handle_responder_lazy()
178 be32toh(cqe->byte_cnt)); in handle_responder_lazy()
184 static inline int handle_responder(struct ibv_wc *wc, struct mlx5_cqe64 *cqe, in handle_responder() argument
193 wc->byte_len = be32toh(cqe->byte_cnt); in handle_responder()
195 wqe_ctr = be16toh(cqe->wqe_counter); in handle_responder()
198 if (cqe->op_own & MLX5_INLINE_SCATTER_32) in handle_responder()
199 err = mlx5_copy_to_recv_srq(srq, wqe_ctr, cqe, in handle_responder()
201 else if (cqe->op_own & MLX5_INLINE_SCATTER_64) in handle_responder()
202 err = mlx5_copy_to_recv_srq(srq, wqe_ctr, cqe - 1, in handle_responder()
208 wc->wc_flags |= (!!(cqe->hds_ip_ext & MLX5_CQE_L4_OK) & in handle_responder()
209 !!(cqe->hds_ip_ext & MLX5_CQE_L3_OK) & in handle_responder()
210 (get_cqe_l3_hdr_type(cqe) == in handle_responder()
220 if (cqe->op_own & MLX5_INLINE_SCATTER_32) in handle_responder()
221 err = mlx5_copy_to_recv_wqe(qp, wqe_ctr, cqe, in handle_responder()
223 else if (cqe->op_own & MLX5_INLINE_SCATTER_64) in handle_responder()
224 err = mlx5_copy_to_recv_wqe(qp, wqe_ctr, cqe - 1, in handle_responder()
230 switch (cqe->op_own >> 4) { in handle_responder()
234 wc->imm_data = cqe->imm_inval_pkey; in handle_responder()
242 wc->imm_data = cqe->imm_inval_pkey; in handle_responder()
247 wc->imm_data = be32toh(cqe->imm_inval_pkey); in handle_responder()
250 wc->slid = be16toh(cqe->slid); in handle_responder()
251 wc->sl = (be32toh(cqe->flags_rqpn) >> 24) & 0xf; in handle_responder()
252 wc->src_qp = be32toh(cqe->flags_rqpn) & 0xffffff; in handle_responder()
253 wc->dlid_path_bits = cqe->ml_path & 0x7f; in handle_responder()
254 g = (be32toh(cqe->flags_rqpn) >> 28) & 3; in handle_responder()
256 wc->pkey_index = be32toh(cqe->imm_inval_pkey) & 0xffff; in handle_responder()
271 static enum ibv_wc_status mlx5_handle_error_cqe(struct mlx5_err_cqe *cqe) in mlx5_handle_error_cqe() argument
273 switch (cqe->syndrome) { in mlx5_handle_error_cqe()
471 void *cqe; in mlx5_get_next_cqe() local
474 cqe = next_cqe_sw(cq); in mlx5_get_next_cqe()
475 if (!cqe) in mlx5_get_next_cqe()
478 cqe64 = (cq->cqe_sz == 64) ? cqe : cqe + 64; in mlx5_get_next_cqe()
503 *pcqe = cqe; in mlx5_get_next_cqe()
510 void *cqe,
518 void *cqe, in mlx5_parse_cqe() argument
576 mqp, wqe_ctr, cqe, wc_byte_len); in mlx5_parse_cqe()
579 mqp, wqe_ctr, cqe - 1, wc_byte_len); in mlx5_parse_cqe()
589 err = mlx5_copy_to_send_wqe(mqp, wqe_ctr, cqe, in mlx5_parse_cqe()
593 mqp, wqe_ctr, cqe - 1, wc->byte_len); in mlx5_parse_cqe()
699 void *cqe, int cqe_ver)
703 void *cqe, int cqe_ver) in mlx5_parse_lazy_cqe() argument
705 return mlx5_parse_cqe(cq, cqe64, cqe, &cq->cur_rsc, &cq->cur_srq, NULL, cqe_ver, 1); in mlx5_parse_lazy_cqe()
719 void *cqe; in mlx5_poll_one() local
722 err = mlx5_get_next_cqe(cq, &cqe64, &cqe); in mlx5_poll_one()
726 return mlx5_parse_cqe(cq, cqe64, cqe, cur_rsc, cur_srq, wc, cqe_ver, 0); in mlx5_poll_one()
836 void *cqe; in mlx5_start_poll() local
858 err = mlx5_get_next_cqe(cq, &cqe64, &cqe); in mlx5_start_poll()
879 err = mlx5_parse_lazy_cqe(cq, cqe64, cqe, cqe_version); in mlx5_start_poll()
905 void *cqe; in mlx5_next_poll() local
908 err = mlx5_get_next_cqe(cq, &cqe64, &cqe); in mlx5_next_poll()
916 return mlx5_parse_lazy_cqe(cq, cqe64, cqe, cqe_version); in mlx5_next_poll()
1368 void *cqe, *dest; in __mlx5_cq_clean() local
1383 if (prod_index == cq->cons_index + cq->ibv_cq.cqe) in __mlx5_cq_clean()
1392 cqe = get_cqe(cq, prod_index & cq->ibv_cq.cqe); in __mlx5_cq_clean()
1393 cqe64 = (cq->cqe_sz == 64) ? cqe : cqe + 64; in __mlx5_cq_clean()
1397 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibv_cq.cqe); in __mlx5_cq_clean()
1400 memcpy(dest, cqe, cq->cqe_sz); in __mlx5_cq_clean()
1484 struct mlx5_cqe64 *cqe; in mlx5_alloc_cq_buf() local
1508 cqe = buf->buf + i * cqe_sz; in mlx5_alloc_cq_buf()
1509 cqe += cqe_sz == 128 ? 1 : 0; in mlx5_alloc_cq_buf()
1510 cqe->op_own = MLX5_CQE_INVALID << 4; in mlx5_alloc_cq_buf()