Home
last modified time | relevance | path

Searched refs:cqe (Results 1 – 25 of 133) sorted by relevance

123456

/freebsd/contrib/ofed/libmlx4/
H A Dcq.c104 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibv_cq.cqe); in get_sw_cqe() local
105 struct mlx4_cqe *tcqe = cq->cqe_size == 64 ? cqe + 1 : cqe; in get_sw_cqe()
108 !!(n & (cq->ibv_cq.cqe + 1))) ? NULL : cqe; in get_sw_cqe()
116 static enum ibv_wc_status mlx4_handle_error_cqe(struct mlx4_err_cqe *cqe) in mlx4_handle_error_cqe() argument
118 if (cqe->syndrome == MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR) in mlx4_handle_error_cqe()
122 htobe32(cqe->vlan_my_qpn), htobe32(cqe->wqe_index), in mlx4_handle_error_cqe()
123 cqe->vendor_err, in mlx4_handle_error_cqe()
124 cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK); in mlx4_handle_error_cqe()
126 switch (cqe->syndrome) { in mlx4_handle_error_cqe()
158 static inline void handle_good_req(struct ibv_wc *wc, struct mlx4_cqe *cqe) in handle_good_req() argument
[all …]
H A Dverbs.c415 ret = ibv_cmd_create_cq(context, cq_attr->cqe, cq_attr->channel, in mlx4_cmd_create_cq()
460 if (cq_attr->cqe > 0x3fffff) { in create_cq()
497 cq_attr->cqe = align_queue_size(cq_attr->cqe + 1); in create_cq()
499 if (mlx4_alloc_cq_buf(to_mdev(context->device), &cq->buf, cq_attr->cqe, mctx->cqe_size)) in create_cq()
517 --cq_attr->cqe; in create_cq()
547 struct ibv_cq *mlx4_create_cq(struct ibv_context *context, int cqe, in mlx4_create_cq() argument
552 struct ibv_cq_init_attr_ex cq_attr = {.cqe = cqe, .channel = channel, in mlx4_create_cq()
567 struct ibv_cq_init_attr_ex cq_attr_c = {.cqe = cq_attr->cqe, in mlx4_create_cq_ex()
577 int mlx4_resize_cq(struct ibv_cq *ibcq, int cqe) in mlx4_resize_cq() argument
586 if (cqe > 0x3fffff) in mlx4_resize_cq()
[all …]
/freebsd/sys/dev/mthca/
H A Dmthca_cq.c174 static inline struct mthca_cqe *cqe_sw(struct mthca_cqe *cqe) in cqe_sw() argument
176 return MTHCA_CQ_ENTRY_OWNER_HW & cqe->owner ? NULL : cqe; in cqe_sw()
181 return cqe_sw(get_cqe(cq, cq->cons_index & cq->ibcq.cqe)); in next_cqe_sw()
184 static inline void set_cqe_hw(struct mthca_cqe *cqe) in set_cqe_hw() argument
186 cqe->owner = MTHCA_CQ_ENTRY_OWNER_HW; in set_cqe_hw()
191 __be32 *cqe = cqe_ptr; in dump_cqe() local
193 (void) cqe; /* avoid warning if mthca_dbg compiled away... */ in dump_cqe()
195 be32_to_cpu(cqe[0]), be32_to_cpu(cqe[1]), be32_to_cpu(cqe[2]), in dump_cqe()
196 be32_to_cpu(cqe[3]), be32_to_cpu(cqe[4]), be32_to_cpu(cqe[5]), in dump_cqe()
197 be32_to_cpu(cqe[6]), be32_to_cpu(cqe[7])); in dump_cqe()
[all …]
/freebsd/sys/dev/mlx4/mlx4_ib/
H A Dmlx4_ib_cq.c82 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe() local
83 struct mlx4_cqe *tcqe = ((cq->buf.entry_size == 64) ? (cqe + 1) : cqe); in get_sw_cqe()
86 !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe; in get_sw_cqe()
134 static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe) in mlx4_ib_free_cq_buf() argument
136 mlx4_buf_free(dev->dev, (cqe + 1) * buf->entry_size, &buf->buf); in mlx4_ib_free_cq_buf()
141 struct ib_umem **umem, u64 buf_addr, int cqe) in mlx4_ib_get_cq_umem() argument
148 *umem = ib_umem_get(&context->ibucontext, buf_addr, cqe * cqe_size, in mlx4_ib_get_cq_umem()
178 int entries = attr->cqe; in mlx4_ib_create_cq()
194 cq->ibcq.cqe = entries - 1; in mlx4_ib_create_cq()
270 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); in mlx4_ib_create_cq()
[all …]
/freebsd/contrib/ofed/libcxgb4/
H A Dcq.c44 struct t4_cqe cqe; in insert_recv_cqe() local
48 memset(&cqe, 0, sizeof(cqe)); in insert_recv_cqe()
49 cqe.header = htobe32(V_CQE_STATUS(T4_ERR_SWFLUSH) | in insert_recv_cqe()
54 cqe.bits_type_ts = htobe64(V_CQE_GENBIT((u64)cq->gen)); in insert_recv_cqe()
55 cq->sw_queue[cq->sw_pidx] = cqe; in insert_recv_cqe()
77 struct t4_cqe cqe; in insert_sq_cqe() local
81 memset(&cqe, 0, sizeof(cqe)); in insert_sq_cqe()
82 cqe.header = htobe32(V_CQE_STATUS(T4_ERR_SWFLUSH) | in insert_sq_cqe()
87 CQE_WRID_SQ_IDX(&cqe) = swcqe->idx; in insert_sq_cqe()
88 cqe.bits_type_ts = htobe64(V_CQE_GENBIT((u64)cq->gen)); in insert_sq_cqe()
[all …]
/freebsd/contrib/ofed/libmlx5/
H A Dcq.c66 static inline uint8_t get_cqe_l3_hdr_type(struct mlx5_cqe64 *cqe) in get_cqe_l3_hdr_type() argument
68 return (cqe->l4_hdr_type_etc >> 2) & 0x3; in get_cqe_l3_hdr_type()
83 void *cqe = get_cqe(cq, n & cq->ibv_cq.cqe); in get_sw_cqe() local
86 cqe64 = (cq->cqe_sz == 64) ? cqe : cqe + 64; in get_sw_cqe()
89 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibv_cq.cqe + 1)))) { in get_sw_cqe()
90 return cqe; in get_sw_cqe()
106 static inline void handle_good_req(struct ibv_wc *wc, struct mlx5_cqe64 *cqe, struct mlx5_wq *wq, i… in handle_good_req() argument
108 switch (be32toh(cqe->sop_drop_qpn) >> 24) { in handle_good_req()
124 wc->byte_len = be32toh(cqe->byte_cnt); in handle_good_req()
143 static inline int handle_responder_lazy(struct mlx5_cq *cq, struct mlx5_cqe64 *cqe, in handle_responder_lazy() argument
[all …]
H A Dmlx5dv.h312 uint8_t mlx5dv_get_cqe_owner(struct mlx5_cqe64 *cqe) in mlx5dv_get_cqe_owner() argument
314 return cqe->op_own & 0x1; in mlx5dv_get_cqe_owner()
318 void mlx5dv_set_cqe_owner(struct mlx5_cqe64 *cqe, uint8_t val) in mlx5dv_set_cqe_owner() argument
320 cqe->op_own = (val & 0x1) | (cqe->op_own & ~0x1); in mlx5dv_set_cqe_owner()
325 uint8_t mlx5dv_get_cqe_se(struct mlx5_cqe64 *cqe) in mlx5dv_get_cqe_se() argument
327 return (cqe->op_own >> 1) & 0x1; in mlx5dv_get_cqe_se()
331 uint8_t mlx5dv_get_cqe_format(struct mlx5_cqe64 *cqe) in mlx5dv_get_cqe_format() argument
333 return (cqe->op_own >> 2) & 0x3; in mlx5dv_get_cqe_format()
337 uint8_t mlx5dv_get_cqe_opcode(struct mlx5_cqe64 *cqe) in mlx5dv_get_cqe_opcode() argument
339 return cqe->op_own >> 4; in mlx5dv_get_cqe_opcode()
/freebsd/sys/dev/cxgbe/iw_cxgbe/
H A Dcq.c206 struct t4_cqe cqe; in insert_recv_cqe() local
210 memset(&cqe, 0, sizeof(cqe)); in insert_recv_cqe()
211 cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) | in insert_recv_cqe()
216 cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen)); in insert_recv_cqe()
217 cq->sw_queue[cq->sw_pidx] = cqe; in insert_recv_cqe()
239 struct t4_cqe cqe; in insert_sq_cqe() local
243 memset(&cqe, 0, sizeof(cqe)); in insert_sq_cqe()
244 cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) | in insert_sq_cqe()
249 CQE_WRID_SQ_IDX(&cqe) = swcqe->idx; in insert_sq_cqe()
250 cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen)); in insert_sq_cqe()
[all …]
/freebsd/sys/dev/mlx5/mlx5_en/
H A Dmlx5_en_rx.c132 mlx5e_lro_update_hdr(struct mbuf *mb, struct mlx5_cqe64 *cqe) in mlx5e_lro_update_hdr() argument
148 l4_hdr_type = get_cqe_l4_hdr_type(cqe); in mlx5e_lro_update_hdr()
153 tot_len = be32_to_cpu(cqe->byte_cnt) - ETHER_HDR_LEN; in mlx5e_lro_update_hdr()
170 if (get_cqe_lro_tcppsh(cqe)) in mlx5e_lro_update_hdr()
175 th->th_ack = cqe->lro_ack_seq_num; in mlx5e_lro_update_hdr()
176 th->th_win = cqe->lro_tcp_win; in mlx5e_lro_update_hdr()
189 if (get_cqe_lro_timestamp_valid(cqe) && in mlx5e_lro_update_hdr()
198 ts_ptr[1] = *(uint32_t *)&cqe->timestamp; in mlx5e_lro_update_hdr()
199 ts_ptr[2] = *((uint32_t *)&cqe->timestamp + 1); in mlx5e_lro_update_hdr()
203 ip4->ip_ttl = cqe->lro_min_ttl; in mlx5e_lro_update_hdr()
[all …]
H A Dmlx5_en_txrx.c36 struct mlx5_cqe64 *cqe; in mlx5e_get_cqe() local
38 cqe = mlx5_cqwq_get_wqe(&cq->wq, mlx5_cqwq_get_ci(&cq->wq)); in mlx5e_get_cqe()
40 if ((cqe->op_own ^ mlx5_cqwq_get_wrap_cnt(&cq->wq)) & MLX5_CQE_OWNER_MASK) in mlx5e_get_cqe()
46 return (cqe); in mlx5e_get_cqe()
/freebsd/sys/dev/mlx5/mlx5_ib/
H A Dmlx5_ib_cq.c81 void *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe() local
84 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in get_sw_cqe()
87 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) { in get_sw_cqe()
88 return cqe; in get_sw_cqe()
117 static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe, in handle_good_req() argument
121 switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) { in handle_good_req()
135 wc->byte_len = be32_to_cpu(cqe->byte_cnt); in handle_good_req()
164 static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, in handle_responder() argument
268 dump_cqe(struct mlx5_ib_dev * dev,struct mlx5_err_cqe * cqe) dump_cqe() argument
281 mlx5_handle_error_cqe(struct mlx5_ib_dev * dev,struct mlx5_err_cqe * cqe,struct ib_wc * wc) mlx5_handle_error_cqe() argument
405 get_sig_err_item(struct mlx5_sig_err_cqe * cqe,struct ib_sig_err * item) get_sig_err_item() argument
530 void *cqe; mlx5_poll_one() local
843 void *cqe; init_cq_buf() local
1037 void *cqe, *dest; __mlx5_ib_cq_clean() local
[all...]
/freebsd/sys/dev/bnxt/bnxt_re/
H A Dqplib_fp.c1608 struct cq_req *cqe = (struct cq_req *)hw_cqe; in __clean_cq() local
1610 if (qp == le64_to_cpu(cqe->qp_handle)) in __clean_cq()
1611 cqe->qp_handle = 0; in __clean_cq()
1618 struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe; in __clean_cq() local
1620 if (qp == le64_to_cpu(cqe->qp_handle)) in __clean_cq()
1621 cqe->qp_handle = 0; in __clean_cq()
2621 struct bnxt_qplib_cqe *cqe; in __flush_sq() local
2627 cqe = *pcqe; in __flush_sq()
2639 memset(cqe, 0, sizeof(*cqe)); in __flush_sq()
2640 cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR; in __flush_sq()
[all …]
H A Dib_verbs.c3729 int cqe = attr->cqe; in bnxt_re_create_cq() local
3754 if (cqe < 1 || cqe > dev_attr->max_cq_wqes) { in bnxt_re_create_cq()
3783 cqe *= 3; in bnxt_re_create_cq()
3786 entries = bnxt_re_init_depth(cqe + 1, uctx); in bnxt_re_create_cq()
3880 cq->ibcq.cqe = entries; in bnxt_re_create_cq()
3985 int bnxt_re_resize_cq(struct ib_cq *ib_cq, int cqe, struct ib_udata *udata) in bnxt_re_resize_cq() argument
4017 if (cqe < 1 || cqe > dev_attr->max_cq_wqes) { in bnxt_re_resize_cq()
4023 entries = bnxt_re_init_depth(cqe + 1, uctx); in bnxt_re_resize_cq()
4029 if (entries == cq->ibcq.cqe) { in bnxt_re_resize_cq()
4030 dev_info(rdev_to_dev(rdev), "CQ is already at size %d\n", cqe); in bnxt_re_resize_cq()
[all …]
/freebsd/sys/dev/oce/
H A Doce_if.c163 static int oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
164 static int oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
165 static void oce_rx(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
169 static void oce_rx_lro(struct oce_rq *rq, struct nic_hwlro_singleton_cqe *cqe, struct nic_hwlro_cqe…
1393 struct oce_nic_tx_cqe *cqe; in oce_wq_handler() local
1399 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe); in oce_wq_handler()
1400 while (cqe->u0.dw[3]) { in oce_wq_handler()
1401 DW_SWAP((uint32_t *) cqe, sizeof(oce_wq_cqe)); in oce_wq_handler()
1403 wq->ring->cidx = cqe->u0.s.wqe_index + 1; in oce_wq_handler()
1409 cqe->u0.dw[3] = 0; in oce_wq_handler()
[all …]
H A Doce_queue.c1024 struct oce_nic_tx_cqe *cqe; in oce_drain_wq_cq() local
1031 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe); in oce_drain_wq_cq()
1032 if (cqe->u0.dw[3] == 0) in oce_drain_wq_cq()
1034 cqe->u0.dw[3] = 0; in oce_drain_wq_cq()
1067 struct oce_nic_rx_cqe *cqe; in oce_drain_rq_cq() local
1074 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe); in oce_drain_rq_cq()
1076 while (RQ_CQE_VALID(cqe)) { in oce_drain_rq_cq()
1077 RQ_CQE_INVALIDATE(cqe); in oce_drain_rq_cq()
1079 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, in oce_drain_rq_cq()
1113 struct nic_hwlro_singleton_cqe *cqe; in oce_rx_cq_clean_hwlro() local
[all …]
/freebsd/sys/dev/nvmf/host/
H A Dnvmf_aer.c35 static void nvmf_complete_aer(void *arg, const struct nvme_completion *cqe);
124 nvmf_complete_aer_page(void *arg, const struct nvme_completion *cqe) in nvmf_complete_aer_page() argument
130 aer->status = cqe->status; in nvmf_complete_aer_page()
154 nvmf_complete_aer(void *arg, const struct nvme_completion *cqe) in nvmf_complete_aer() argument
168 if (cqe->status != 0) { in nvmf_complete_aer()
169 if (!nvmf_cqe_aborted(cqe)) in nvmf_complete_aer()
171 le16toh(cqe->status)); in nvmf_complete_aer()
175 cdw0 = le32toh(cqe->cdw0); in nvmf_complete_aer()
230 if (status.cqe.status != 0) { in nvmf_set_async_event_config()
233 le16toh(status.cqe.status)); in nvmf_set_async_event_config()
H A Dnvmf_qpair.c95 struct nvme_completion cqe; in nvmf_abort_request() local
97 memset(&cqe, 0, sizeof(cqe)); in nvmf_abort_request()
98 cqe.cid = cid; in nvmf_abort_request()
99 cqe.status = htole16(NVMEF(NVME_STATUS_SCT, NVME_SCT_PATH_RELATED) | in nvmf_abort_request()
101 req->cb(req->cb_arg, &cqe); in nvmf_abort_request()
176 const struct nvme_completion *cqe; in nvmf_receive_capsule() local
179 cqe = nvmf_capsule_cqe(nc); in nvmf_receive_capsule()
188 cid = cqe->cid; in nvmf_receive_capsule()
202 qp->sqhd = le16toh(cqe->sqhd); in nvmf_receive_capsule()
248 req->cb(req->cb_arg, cqe); in nvmf_receive_capsule()
H A Dnvmf.c40 nvmf_complete(void *arg, const struct nvme_completion *cqe) in nvmf_complete() argument
45 status->cqe = *cqe; in nvmf_complete()
92 if (status.cqe.status != 0) { in nvmf_read_property()
94 le16toh(status.cqe.status)); in nvmf_read_property()
98 rsp = (const struct nvmf_fabric_prop_get_rsp *)&status.cqe; in nvmf_read_property()
118 if (status.cqe.status != 0) { in nvmf_write_property()
120 le16toh(status.cqe.status)); in nvmf_write_property()
164 nvmf_keep_alive_complete(void *arg, const struct nvme_completion *cqe) in nvmf_keep_alive_complete() argument
169 if (cqe->status != 0) { in nvmf_keep_alive_complete()
172 le16toh(cqe->status)); in nvmf_keep_alive_complete()
[all …]
H A Dnvmf_var.h107 struct nvme_completion cqe; member
121 nvmf_cqe_aborted(const struct nvme_completion *cqe) in nvmf_cqe_aborted() argument
125 status = le16toh(cqe->status); in nvmf_cqe_aborted()
156 void nvmf_complete(void *arg, const struct nvme_completion *cqe);
/freebsd/sys/dev/mlx5/mlx5_accel/
H A Dipsec.h261 void mlx5e_accel_ipsec_handle_rx_cqe(struct mbuf *mb, struct mlx5_cqe64 *cqe,
264 static inline int mlx5e_accel_ipsec_flow(struct mlx5_cqe64 *cqe) in mlx5e_accel_ipsec_flow() argument
266 return MLX5_IPSEC_METADATA_MARKER(be32_to_cpu(cqe->ft_metadata)); in mlx5e_accel_ipsec_flow()
270 mlx5e_accel_ipsec_handle_rx(struct mbuf *mb, struct mlx5_cqe64 *cqe, in mlx5e_accel_ipsec_handle_rx() argument
273 u32 ipsec_meta_data = be32_to_cpu(cqe->ft_metadata); in mlx5e_accel_ipsec_handle_rx()
276 mlx5e_accel_ipsec_handle_rx_cqe(mb, cqe, mr); in mlx5e_accel_ipsec_handle_rx()
/freebsd/sys/dev/mlx5/
H A Ddevice.h733 static inline u8 get_cqe_opcode(struct mlx5_cqe64 *cqe) in get_cqe_opcode() argument
735 return (cqe->op_own >> 4); in get_cqe_opcode()
738 static inline bool get_cqe_lro_timestamp_valid(struct mlx5_cqe64 *cqe) in get_cqe_lro_timestamp_valid() argument
740 return (cqe->lro_tcppsh_abort_dupack >> 7) & 1; in get_cqe_lro_timestamp_valid()
743 static inline bool get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe) in get_cqe_lro_tcppsh() argument
745 return (cqe->lro_tcppsh_abort_dupack >> 6) & 1; in get_cqe_lro_tcppsh()
748 static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe) in get_cqe_l4_hdr_type() argument
750 return (cqe->l4_hdr_type_etc >> 4) & 0x7; in get_cqe_l4_hdr_type()
753 static inline u16 get_cqe_vlan(struct mlx5_cqe64 *cqe) in get_cqe_vlan() argument
755 return be16_to_cpu(cqe->vlan_info) & 0xfff; in get_cqe_vlan()
[all …]
/freebsd/sys/dev/mlx4/mlx4_en/
H A Dmlx4_en_rx.c604 struct mlx4_cqe *cqe) in invalid_cqe() argument
607 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == in invalid_cqe()
610 ((struct mlx4_err_cqe *)cqe)->vendor_err_syndrome, in invalid_cqe()
611 ((struct mlx4_err_cqe *)cqe)->syndrome); in invalid_cqe()
614 if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) { in invalid_cqe()
741 struct mlx4_cqe *cqe; in mlx4_en_process_rx_cq() local
764 cqe = &buf[CQE_FACTOR_INDEX(index, factor)]; in mlx4_en_process_rx_cq()
767 while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, in mlx4_en_process_rx_cq()
777 if (invalid_cqe(priv, cqe)) { in mlx4_en_process_rx_cq()
783 length = be32_to_cpu(cqe->byte_cnt); in mlx4_en_process_rx_cq()
[all …]
/freebsd/sys/dev/irdma/
H A Dirdma_uk.c1104 __le64 *cqe, *wqe; in irdma_detect_unsignaled_cmpls() local
1109 cqe = IRDMA_GET_CURRENT_CQ_ELEM(cq); in irdma_detect_unsignaled_cmpls()
1110 irdma_pr_err("%p %d %d\n", cqe, cq->cq_ring.head, wqe_idx); in irdma_detect_unsignaled_cmpls()
1112 IRDMA_GET_CQ_ELEM_AT_OFFSET(cq, i + cq->cq_ring.size, cqe); in irdma_detect_unsignaled_cmpls()
1113 get_64bit_val(cqe, IRDMA_BYTE_0, &qword0); in irdma_detect_unsignaled_cmpls()
1114 get_64bit_val(cqe, IRDMA_BYTE_8, &qword1); in irdma_detect_unsignaled_cmpls()
1115 get_64bit_val(cqe, IRDMA_BYTE_16, &qword2); in irdma_detect_unsignaled_cmpls()
1116 get_64bit_val(cqe, IRDMA_BYTE_24, &qword3); in irdma_detect_unsignaled_cmpls()
1119 i, widx, cqe, qword0, qword1, qword2, qword3); in irdma_detect_unsignaled_cmpls()
1152 __le64 *cqe; in irdma_uk_cq_poll_cmpl() local
[all …]
/freebsd/sys/dev/nvmf/controller/
H A Dnvmft_qpair.c189 _nvmft_send_response(struct nvmft_qpair *qp, const void *cqe) in _nvmft_send_response() argument
196 memcpy(&cpl, cqe, sizeof(cpl)); in _nvmft_send_response()
235 nvmft_send_response(struct nvmft_qpair *qp, const void *cqe) in nvmft_send_response() argument
237 const struct nvme_completion *cpl = cqe; in nvmft_send_response()
244 return (_nvmft_send_response(qp, cqe)); in nvmft_send_response()
248 nvmft_init_cqe(void *cqe, struct nvmf_capsule *nc, uint16_t status) in nvmft_init_cqe() argument
250 struct nvme_completion *cpl = cqe; in nvmft_init_cqe()
/freebsd/sys/dev/mlx5/mlx5_fpga/
H A Dmlx5fpga_conn.c248 struct mlx5_cqe64 *cqe, u8 status) in mlx5_fpga_conn_rq_cqe() argument
253 ix = be16_to_cpu(cqe->wqe_counter) & (conn->qp.rq.size - 1); in mlx5_fpga_conn_rq_cqe()
257 buf->sg[0].size = be32_to_cpu(cqe->byte_cnt); in mlx5_fpga_conn_rq_cqe()
289 struct mlx5_cqe64 *cqe, u8 status) in mlx5_fpga_conn_sq_cqe() argument
297 ix = be16_to_cpu(cqe->wqe_counter) & (conn->qp.sq.size - 1); in mlx5_fpga_conn_sq_cqe()
331 struct mlx5_cqe64 *cqe) in mlx5_fpga_conn_handle_cqe() argument
335 opcode = cqe->op_own >> 4; in mlx5_fpga_conn_handle_cqe()
339 status = ((struct mlx5_err_cqe *)cqe)->syndrome; in mlx5_fpga_conn_handle_cqe()
342 mlx5_fpga_conn_sq_cqe(conn, cqe, status); in mlx5_fpga_conn_handle_cqe()
346 status = ((struct mlx5_err_cqe *)cqe)->syndrome; in mlx5_fpga_conn_handle_cqe()
[all …]

123456