Lines Matching refs:cq

76 static void *get_cqe(struct mlx5_cq *cq, int n)  in get_cqe()  argument
78 return cq->active_buf->buf + n * cq->cqe_sz; in get_cqe()
81 static void *get_sw_cqe(struct mlx5_cq *cq, int n) in get_sw_cqe() argument
83 void *cqe = get_cqe(cq, n & cq->ibv_cq.cqe); in get_sw_cqe()
86 cqe64 = (cq->cqe_sz == 64) ? cqe : cqe + 64; in get_sw_cqe()
89 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibv_cq.cqe + 1)))) { in get_sw_cqe()
96 static void *next_cqe_sw(struct mlx5_cq *cq) in next_cqe_sw() argument
98 return get_sw_cqe(cq, cq->cons_index); in next_cqe_sw()
101 static void update_cons_index(struct mlx5_cq *cq) in update_cons_index() argument
103 cq->dbrec[MLX5_CQ_SET_CI] = htobe32(cq->cons_index & 0xffffff); in update_cons_index()
143 static inline int handle_responder_lazy(struct mlx5_cq *cq, struct mlx5_cqe64 *cqe, in handle_responder_lazy() argument
153 cq->ibv_cq.wr_id = srq->wrid[wqe_ctr]; in handle_responder_lazy()
165 cq->flags |= MLX5_CQ_FLAGS_RX_CSUM_VALID; in handle_responder_lazy()
171 cq->ibv_cq.wr_id = wq->wrid[wqe_ctr]; in handle_responder_lazy()
463 static inline int mlx5_get_next_cqe(struct mlx5_cq *cq,
467 static inline int mlx5_get_next_cqe(struct mlx5_cq *cq, in mlx5_get_next_cqe() argument
474 cqe = next_cqe_sw(cq); in mlx5_get_next_cqe()
478 cqe64 = (cq->cqe_sz == 64) ? cqe : cqe + 64; in mlx5_get_next_cqe()
480 ++cq->cons_index; in mlx5_get_next_cqe()
492 struct mlx5_context *mctx = to_mctx(cq->ibv_cq.context); in mlx5_get_next_cqe()
497 mlx5_dbg(fp, MLX5_DBG_CQ_CQE, "dump cqe for cqn 0x%x:\n", cq->cqn); in mlx5_get_next_cqe()
508 static inline int mlx5_parse_cqe(struct mlx5_cq *cq,
516 static inline int mlx5_parse_cqe(struct mlx5_cq *cq, in mlx5_parse_cqe() argument
536 mctx = to_mctx(ibv_cq_ex_to_cq(&cq->ibv_cq)->context); in mlx5_parse_cqe()
539 cq->cqe64 = cqe64; in mlx5_parse_cqe()
540 cq->flags &= (~MLX5_CQ_FLAGS_RX_CSUM_VALID); in mlx5_parse_cqe()
563 cq->umr_opcode = wq->wr_data[idx]; in mlx5_parse_cqe()
583 cq->ibv_cq.wr_id = wq->wrid[idx]; in mlx5_parse_cqe()
584 cq->ibv_cq.status = err; in mlx5_parse_cqe()
613 cq->ibv_cq.status = handle_responder_lazy(cq, cqe64, in mlx5_parse_cqe()
627 enum ibv_wc_status *pstatus = lazy ? &cq->ibv_cq.status : &wc->status; in mlx5_parse_cqe()
657 cq->ibv_cq.wr_id = wq->wrid[idx]; in mlx5_parse_cqe()
670 cq->ibv_cq.wr_id = (*cur_srq)->wrid[wqe_ctr]; in mlx5_parse_cqe()
685 cq->ibv_cq.wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; in mlx5_parse_cqe()
697 static inline int mlx5_parse_lazy_cqe(struct mlx5_cq *cq,
701 static inline int mlx5_parse_lazy_cqe(struct mlx5_cq *cq, in mlx5_parse_lazy_cqe() argument
705 return mlx5_parse_cqe(cq, cqe64, cqe, &cq->cur_rsc, &cq->cur_srq, NULL, cqe_ver, 1); in mlx5_parse_lazy_cqe()
708 static inline int mlx5_poll_one(struct mlx5_cq *cq,
713 static inline int mlx5_poll_one(struct mlx5_cq *cq, in mlx5_poll_one() argument
722 err = mlx5_get_next_cqe(cq, &cqe64, &cqe); in mlx5_poll_one()
726 return mlx5_parse_cqe(cq, cqe64, cqe, cur_rsc, cur_srq, wc, cqe_ver, 0); in mlx5_poll_one()
735 struct mlx5_cq *cq = to_mcq(ibcq); in poll_cq() local
741 if (cq->stall_enable) { in poll_cq()
742 if (cq->stall_adaptive_enable) { in poll_cq()
743 if (cq->stall_last_count) in poll_cq()
744 mlx5_stall_cycles_poll_cq(cq->stall_last_count + cq->stall_cycles); in poll_cq()
745 } else if (cq->stall_next_poll) { in poll_cq()
746 cq->stall_next_poll = 0; in poll_cq()
751 mlx5_spin_lock(&cq->lock); in poll_cq()
754 err = mlx5_poll_one(cq, &rsc, &srq, wc + npolled, cqe_ver); in poll_cq()
759 update_cons_index(cq); in poll_cq()
761 mlx5_spin_unlock(&cq->lock); in poll_cq()
763 if (cq->stall_enable) { in poll_cq()
764 if (cq->stall_adaptive_enable) { in poll_cq()
766 cq->stall_cycles = max(cq->stall_cycles-mlx5_stall_cq_dec_step, in poll_cq()
768 mlx5_get_cycles(&cq->stall_last_count); in poll_cq()
770 cq->stall_cycles = min(cq->stall_cycles+mlx5_stall_cq_inc_step, in poll_cq()
772 mlx5_get_cycles(&cq->stall_last_count); in poll_cq()
774 cq->stall_cycles = max(cq->stall_cycles-mlx5_stall_cq_dec_step, in poll_cq()
776 cq->stall_last_count = 0; in poll_cq()
779 cq->stall_next_poll = 1; in poll_cq()
798 struct mlx5_cq *cq = to_mcq(ibv_cq_ex_to_cq(ibcq)); in _mlx5_end_poll() local
800 update_cons_index(cq); in _mlx5_end_poll()
803 mlx5_spin_unlock(&cq->lock); in _mlx5_end_poll()
807 if (!(cq->flags & MLX5_CQ_FLAGS_FOUND_CQES)) { in _mlx5_end_poll()
808 cq->stall_cycles = max(cq->stall_cycles - mlx5_stall_cq_dec_step, in _mlx5_end_poll()
810 mlx5_get_cycles(&cq->stall_last_count); in _mlx5_end_poll()
811 } else if (cq->flags & MLX5_CQ_FLAGS_EMPTY_DURING_POLL) { in _mlx5_end_poll()
812 cq->stall_cycles = min(cq->stall_cycles + mlx5_stall_cq_inc_step, in _mlx5_end_poll()
814 mlx5_get_cycles(&cq->stall_last_count); in _mlx5_end_poll()
816 cq->stall_cycles = max(cq->stall_cycles - mlx5_stall_cq_dec_step, in _mlx5_end_poll()
818 cq->stall_last_count = 0; in _mlx5_end_poll()
820 } else if (!(cq->flags & MLX5_CQ_FLAGS_FOUND_CQES)) { in _mlx5_end_poll()
821 cq->stall_next_poll = 1; in _mlx5_end_poll()
824 cq->flags &= ~(MLX5_CQ_FLAGS_FOUND_CQES | MLX5_CQ_FLAGS_EMPTY_DURING_POLL); in _mlx5_end_poll()
834 struct mlx5_cq *cq = to_mcq(ibv_cq_ex_to_cq(ibcq)); in mlx5_start_poll() local
844 if (cq->stall_last_count) in mlx5_start_poll()
845 mlx5_stall_cycles_poll_cq(cq->stall_last_count + cq->stall_cycles); in mlx5_start_poll()
846 } else if (cq->stall_next_poll) { in mlx5_start_poll()
847 cq->stall_next_poll = 0; in mlx5_start_poll()
853 mlx5_spin_lock(&cq->lock); in mlx5_start_poll()
855 cq->cur_rsc = NULL; in mlx5_start_poll()
856 cq->cur_srq = NULL; in mlx5_start_poll()
858 err = mlx5_get_next_cqe(cq, &cqe64, &cqe); in mlx5_start_poll()
861 mlx5_spin_unlock(&cq->lock); in mlx5_start_poll()
865 cq->stall_cycles = max(cq->stall_cycles - mlx5_stall_cq_dec_step, in mlx5_start_poll()
867 mlx5_get_cycles(&cq->stall_last_count); in mlx5_start_poll()
869 cq->stall_next_poll = 1; in mlx5_start_poll()
877 cq->flags |= MLX5_CQ_FLAGS_FOUND_CQES; in mlx5_start_poll()
879 err = mlx5_parse_lazy_cqe(cq, cqe64, cqe, cqe_version); in mlx5_start_poll()
881 mlx5_spin_unlock(&cq->lock); in mlx5_start_poll()
885 cq->stall_cycles = max(cq->stall_cycles - mlx5_stall_cq_dec_step, in mlx5_start_poll()
887 cq->stall_last_count = 0; in mlx5_start_poll()
890 cq->flags &= ~(MLX5_CQ_FLAGS_FOUND_CQES); in mlx5_start_poll()
903 struct mlx5_cq *cq = to_mcq(ibv_cq_ex_to_cq(ibcq)); in mlx5_next_poll() local
908 err = mlx5_get_next_cqe(cq, &cqe64, &cqe); in mlx5_next_poll()
911 cq->flags |= MLX5_CQ_FLAGS_EMPTY_DURING_POLL; in mlx5_next_poll()
916 return mlx5_parse_lazy_cqe(cq, cqe64, cqe, cqe_version); in mlx5_next_poll()
1053 struct mlx5_cq *cq = to_mcq(ibv_cq_ex_to_cq(ibcq)); in mlx5_cq_read_wc_opcode() local
1055 switch (mlx5dv_get_cqe_opcode(cq->cqe64)) { in mlx5_cq_read_wc_opcode()
1063 switch (be32toh(cq->cqe64->sop_drop_qpn) >> 24) { in mlx5_cq_read_wc_opcode()
1078 return cq->umr_opcode; in mlx5_cq_read_wc_opcode()
1096 struct mlx5_cq *cq = to_mcq(ibv_cq_ex_to_cq(ibcq)); in mlx5_cq_read_wc_qp_num() local
1098 return be32toh(cq->cqe64->sop_drop_qpn) & 0xffffff; in mlx5_cq_read_wc_qp_num()
1103 struct mlx5_cq *cq = to_mcq(ibv_cq_ex_to_cq(ibcq)); in mlx5_cq_read_wc_flags() local
1106 if (cq->flags & MLX5_CQ_FLAGS_RX_CSUM_VALID) in mlx5_cq_read_wc_flags()
1107 wc_flags = (!!(cq->cqe64->hds_ip_ext & MLX5_CQE_L4_OK) & in mlx5_cq_read_wc_flags()
1108 !!(cq->cqe64->hds_ip_ext & MLX5_CQE_L3_OK) & in mlx5_cq_read_wc_flags()
1109 (get_cqe_l3_hdr_type(cq->cqe64) == in mlx5_cq_read_wc_flags()
1113 switch (mlx5dv_get_cqe_opcode(cq->cqe64)) { in mlx5_cq_read_wc_flags()
1123 wc_flags |= ((be32toh(cq->cqe64->flags_rqpn) >> 28) & 3) ? IBV_WC_GRH : 0; in mlx5_cq_read_wc_flags()
1129 struct mlx5_cq *cq = to_mcq(ibv_cq_ex_to_cq(ibcq)); in mlx5_cq_read_wc_byte_len() local
1131 return be32toh(cq->cqe64->byte_cnt); in mlx5_cq_read_wc_byte_len()
1136 struct mlx5_cq *cq = to_mcq(ibv_cq_ex_to_cq(ibcq)); in mlx5_cq_read_wc_vendor_err() local
1137 struct mlx5_err_cqe *ecqe = (struct mlx5_err_cqe *)cq->cqe64; in mlx5_cq_read_wc_vendor_err()
1144 struct mlx5_cq *cq = to_mcq(ibv_cq_ex_to_cq(ibcq)); in mlx5_cq_read_wc_imm_data() local
1146 switch (mlx5dv_get_cqe_opcode(cq->cqe64)) { in mlx5_cq_read_wc_imm_data()
1148 return be32toh(cq->cqe64->imm_inval_pkey); in mlx5_cq_read_wc_imm_data()
1150 return cq->cqe64->imm_inval_pkey; in mlx5_cq_read_wc_imm_data()
1156 struct mlx5_cq *cq = to_mcq(ibv_cq_ex_to_cq(ibcq)); in mlx5_cq_read_wc_slid() local
1158 return (uint32_t)be16toh(cq->cqe64->slid); in mlx5_cq_read_wc_slid()
1163 struct mlx5_cq *cq = to_mcq(ibv_cq_ex_to_cq(ibcq)); in mlx5_cq_read_wc_sl() local
1165 return (be32toh(cq->cqe64->flags_rqpn) >> 24) & 0xf; in mlx5_cq_read_wc_sl()
1170 struct mlx5_cq *cq = to_mcq(ibv_cq_ex_to_cq(ibcq)); in mlx5_cq_read_wc_src_qp() local
1172 return be32toh(cq->cqe64->flags_rqpn) & 0xffffff; in mlx5_cq_read_wc_src_qp()
1177 struct mlx5_cq *cq = to_mcq(ibv_cq_ex_to_cq(ibcq)); in mlx5_cq_read_wc_dlid_path_bits() local
1179 return cq->cqe64->ml_path & 0x7f; in mlx5_cq_read_wc_dlid_path_bits()
1184 struct mlx5_cq *cq = to_mcq(ibv_cq_ex_to_cq(ibcq)); in mlx5_cq_read_wc_completion_ts() local
1186 return be64toh(cq->cqe64->timestamp); in mlx5_cq_read_wc_completion_ts()
1191 struct mlx5_cq *cq = to_mcq(ibv_cq_ex_to_cq(ibcq)); in mlx5_cq_read_wc_cvlan() local
1193 return be16toh(cq->cqe64->vlan_info); in mlx5_cq_read_wc_cvlan()
1198 struct mlx5_cq *cq = to_mcq(ibv_cq_ex_to_cq(ibcq)); in mlx5_cq_read_flow_tag() local
1200 return be32toh(cq->cqe64->sop_drop_qpn) & MLX5_FLOW_TAG_MASK; in mlx5_cq_read_flow_tag()
1243 void mlx5_cq_fill_pfns(struct mlx5_cq *cq, const struct ibv_cq_init_attr_ex *cq_attr) in mlx5_cq_fill_pfns() argument
1245 struct mlx5_context *mctx = to_mctx(ibv_cq_ex_to_cq(&cq->ibv_cq)->context); in mlx5_cq_fill_pfns()
1246 const struct op *poll_ops = &ops[((cq->stall_enable && cq->stall_adaptive_enable) ? ADAPTIVE : 0) | in mlx5_cq_fill_pfns()
1248 (cq->flags & MLX5_CQ_FLAGS_SINGLE_THREADED ? in mlx5_cq_fill_pfns()
1250 (cq->stall_enable ? STALL : 0)]; in mlx5_cq_fill_pfns()
1252 cq->ibv_cq.start_poll = poll_ops->start_poll; in mlx5_cq_fill_pfns()
1253 cq->ibv_cq.next_poll = poll_ops->next_poll; in mlx5_cq_fill_pfns()
1254 cq->ibv_cq.end_poll = poll_ops->end_poll; in mlx5_cq_fill_pfns()
1256 cq->ibv_cq.read_opcode = mlx5_cq_read_wc_opcode; in mlx5_cq_fill_pfns()
1257 cq->ibv_cq.read_vendor_err = mlx5_cq_read_wc_vendor_err; in mlx5_cq_fill_pfns()
1258 cq->ibv_cq.read_wc_flags = mlx5_cq_read_wc_flags; in mlx5_cq_fill_pfns()
1260 cq->ibv_cq.read_byte_len = mlx5_cq_read_wc_byte_len; in mlx5_cq_fill_pfns()
1262 cq->ibv_cq.read_imm_data = mlx5_cq_read_wc_imm_data; in mlx5_cq_fill_pfns()
1264 cq->ibv_cq.read_qp_num = mlx5_cq_read_wc_qp_num; in mlx5_cq_fill_pfns()
1266 cq->ibv_cq.read_src_qp = mlx5_cq_read_wc_src_qp; in mlx5_cq_fill_pfns()
1268 cq->ibv_cq.read_slid = mlx5_cq_read_wc_slid; in mlx5_cq_fill_pfns()
1270 cq->ibv_cq.read_sl = mlx5_cq_read_wc_sl; in mlx5_cq_fill_pfns()
1272 cq->ibv_cq.read_dlid_path_bits = mlx5_cq_read_wc_dlid_path_bits; in mlx5_cq_fill_pfns()
1274 cq->ibv_cq.read_completion_ts = mlx5_cq_read_wc_completion_ts; in mlx5_cq_fill_pfns()
1276 cq->ibv_cq.read_cvlan = mlx5_cq_read_wc_cvlan; in mlx5_cq_fill_pfns()
1278 cq->ibv_cq.read_flow_tag = mlx5_cq_read_flow_tag; in mlx5_cq_fill_pfns()
1283 struct mlx5_cq *cq = to_mcq(ibvcq); in mlx5_arm_cq() local
1290 sn = cq->arm_sn & 3; in mlx5_arm_cq()
1291 ci = cq->cons_index & 0xffffff; in mlx5_arm_cq()
1294 cq->dbrec[MLX5_CQ_ARM_DB] = htobe32(sn << 28 | cmd | ci); in mlx5_arm_cq()
1303 doorbell[1] = htobe32(cq->cqn); in mlx5_arm_cq()
1312 void mlx5_cq_event(struct ibv_cq *cq) in mlx5_cq_event() argument
1314 to_mcq(cq)->arm_sn++; in mlx5_cq_event()
1363 void __mlx5_cq_clean(struct mlx5_cq *cq, uint32_t rsn, struct mlx5_srq *srq) in __mlx5_cq_clean() argument
1372 if (!cq || cq->flags & MLX5_CQ_FLAGS_DV_OWNED) in __mlx5_cq_clean()
1382 for (prod_index = cq->cons_index; get_sw_cqe(cq, prod_index); ++prod_index) in __mlx5_cq_clean()
1383 if (prod_index == cq->cons_index + cq->ibv_cq.cqe) in __mlx5_cq_clean()
1390 cqe_version = (to_mctx(cq->ibv_cq.context))->cqe_version; in __mlx5_cq_clean()
1391 while ((int) --prod_index - (int) cq->cons_index >= 0) { in __mlx5_cq_clean()
1392 cqe = get_cqe(cq, prod_index & cq->ibv_cq.cqe); in __mlx5_cq_clean()
1393 cqe64 = (cq->cqe_sz == 64) ? cqe : cqe + 64; in __mlx5_cq_clean()
1397 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibv_cq.cqe); in __mlx5_cq_clean()
1398 dest64 = (cq->cqe_sz == 64) ? dest : dest + 64; in __mlx5_cq_clean()
1400 memcpy(dest, cqe, cq->cqe_sz); in __mlx5_cq_clean()
1407 cq->cons_index += nfreed; in __mlx5_cq_clean()
1413 update_cons_index(cq); in __mlx5_cq_clean()
1417 void mlx5_cq_clean(struct mlx5_cq *cq, uint32_t qpn, struct mlx5_srq *srq) in mlx5_cq_clean() argument
1419 mlx5_spin_lock(&cq->lock); in mlx5_cq_clean()
1420 __mlx5_cq_clean(cq, qpn, srq); in mlx5_cq_clean()
1421 mlx5_spin_unlock(&cq->lock); in mlx5_cq_clean()
1434 void mlx5_cq_resize_copy_cqes(struct mlx5_cq *cq) in mlx5_cq_resize_copy_cqes() argument
1446 ssize = cq->cqe_sz; in mlx5_cq_resize_copy_cqes()
1447 dsize = cq->resize_cqe_sz; in mlx5_cq_resize_copy_cqes()
1449 i = cq->cons_index; in mlx5_cq_resize_copy_cqes()
1450 scqe = get_buf_cqe(cq->active_buf, i & cq->active_cqes, ssize); in mlx5_cq_resize_copy_cqes()
1453 if (is_hw(scqe64->op_own, i, cq->active_cqes)) { in mlx5_cq_resize_copy_cqes()
1459 dcqe = get_buf_cqe(cq->resize_buf, (i + 1) & (cq->resize_cqes - 1), dsize); in mlx5_cq_resize_copy_cqes()
1461 sw_own = sw_ownership_bit(i + 1, cq->resize_cqes); in mlx5_cq_resize_copy_cqes()
1466 scqe = get_buf_cqe(cq->active_buf, i & cq->active_cqes, ssize); in mlx5_cq_resize_copy_cqes()
1468 if (is_hw(scqe64->op_own, i, cq->active_cqes)) { in mlx5_cq_resize_copy_cqes()
1478 ++cq->cons_index; in mlx5_cq_resize_copy_cqes()
1481 int mlx5_alloc_cq_buf(struct mlx5_context *mctx, struct mlx5_cq *cq, in mlx5_alloc_cq_buf() argument