Home
last modified time | relevance | path

Searched refs:cqe (Results 1 – 25 of 215) sorted by relevance

123456789

/linux/drivers/infiniband/hw/mthca/
H A Dmthca_cq.c174 static inline struct mthca_cqe *cqe_sw(struct mthca_cqe *cqe) in cqe_sw() argument
176 return MTHCA_CQ_ENTRY_OWNER_HW & cqe->owner ? NULL : cqe; in cqe_sw()
181 return cqe_sw(get_cqe(cq, cq->cons_index & cq->ibcq.cqe)); in next_cqe_sw()
184 static inline void set_cqe_hw(struct mthca_cqe *cqe) in set_cqe_hw() argument
186 cqe->owner = MTHCA_CQ_ENTRY_OWNER_HW; in set_cqe_hw()
191 __be32 *cqe = cqe_ptr; in dump_cqe() local
193 (void) cqe; /* avoid warning if mthca_dbg compiled away... */ in dump_cqe()
195 be32_to_cpu(cqe[0]), be32_to_cpu(cqe[1]), be32_to_cpu(cqe[2]), in dump_cqe()
196 be32_to_cpu(cqe[3]), be32_to_cpu(cqe[4]), be32_to_cpu(cqe[5]), in dump_cqe()
197 be32_to_cpu(cqe[6]), be32_to_cpu(cqe[7])); in dump_cqe()
[all …]
/linux/drivers/infiniband/hw/mlx4/
H A Dcq.c81 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe() local
82 struct mlx4_cqe *tcqe = ((cq->buf.entry_size == 64) ? (cqe + 1) : cqe); in get_sw_cqe()
85 !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe; in get_sw_cqe()
133 static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe) in mlx4_ib_free_cq_buf() argument
135 mlx4_buf_free(dev->dev, (cqe + 1) * buf->entry_size, &buf->buf); in mlx4_ib_free_cq_buf()
140 struct ib_umem **umem, u64 buf_addr, int cqe) in mlx4_ib_get_cq_umem() argument
147 *umem = ib_umem_get(&dev->ib_dev, buf_addr, cqe * cqe_size, in mlx4_ib_get_cq_umem()
179 int entries = attr->cqe; in mlx4_ib_create_cq()
196 cq->ibcq.cqe = entries - 1; in mlx4_ib_create_cq()
282 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); in mlx4_ib_create_cq()
[all …]
/linux/drivers/infiniband/hw/vmw_pvrdma/
H A Dpvrdma_cq.c83 cq->ibcq.cqe, &head); in pvrdma_req_notify_cq()
106 int entries = attr->cqe; in pvrdma_create_cq()
133 cq->ibcq.cqe = entries; in pvrdma_create_cq()
187 cmd->cqe = entries; in pvrdma_create_cq()
196 cq->ibcq.cqe = resp->cqe; in pvrdma_create_cq()
289 cq->ibcq.cqe, &head); in _pvrdma_flush_cqe()
294 cq->ibcq.cqe); in _pvrdma_flush_cqe()
295 struct pvrdma_cqe *cqe; in _pvrdma_flush_cqe() local
299 (cq->ibcq.cqe - head + tail); in _pvrdma_flush_cqe()
303 curr = cq->ibcq.cqe - 1; in _pvrdma_flush_cqe()
[all …]
/linux/drivers/infiniband/sw/siw/
H A Dsiw_cq.c50 struct siw_cqe *cqe; in siw_reap_cqe() local
55 cqe = &cq->queue[cq->cq_get % cq->num_cqe]; in siw_reap_cqe()
56 if (READ_ONCE(cqe->flags) & SIW_WQE_VALID) { in siw_reap_cqe()
58 wc->wr_id = cqe->id; in siw_reap_cqe()
59 wc->byte_len = cqe->bytes; in siw_reap_cqe()
67 if (cqe->flags & SIW_WQE_REM_INVAL) { in siw_reap_cqe()
68 wc->ex.invalidate_rkey = cqe->inval_stag; in siw_reap_cqe()
71 wc->qp = cqe->base_qp; in siw_reap_cqe()
72 wc->opcode = map_wc_opcode[cqe->opcode]; in siw_reap_cqe()
73 wc->status = map_cqe_status[cqe->status].ib; in siw_reap_cqe()
[all …]
/linux/drivers/infiniband/sw/rxe/
H A Drxe_cq.c12 int cqe, int comp_vector) in rxe_cq_chk_attr() argument
16 if (cqe <= 0) { in rxe_cq_chk_attr()
17 rxe_dbg_dev(rxe, "cqe(%d) <= 0\n", cqe); in rxe_cq_chk_attr()
21 if (cqe > rxe->attr.max_cqe) { in rxe_cq_chk_attr()
23 cqe, rxe->attr.max_cqe); in rxe_cq_chk_attr()
29 if (cqe < count) { in rxe_cq_chk_attr()
31 cqe, count); in rxe_cq_chk_attr()
42 int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, in rxe_cq_from_init() argument
50 cq->queue = rxe_queue_init(rxe, &cqe, in rxe_cq_from_init()
68 cq->ibcq.cqe = cqe; in rxe_cq_from_init()
[all …]
/linux/drivers/net/ethernet/mellanox/mlxsw/
H A Dpci_hw.h111 static inline u32 mlxsw_pci_cqe_##name##_get(enum mlxsw_pci_cqe_v v, char *cqe) \
116 return mlxsw_pci_cqe##v0##_##name##_get(cqe); \
118 return mlxsw_pci_cqe##v1##_##name##_get(cqe); \
120 return mlxsw_pci_cqe##v2##_##name##_get(cqe); \
124 char *cqe, u32 val) \
129 mlxsw_pci_cqe##v0##_##name##_set(cqe, val); \
132 mlxsw_pci_cqe##v1##_##name##_set(cqe, val); \
135 mlxsw_pci_cqe##v2##_##name##_set(cqe, val); \
153 MLXSW_ITEM32(pci, cqe, system_port, 0x00, 0, 16);
164 MLXSW_ITEM32(pci, cqe, wqe_counter, 0x04, 16, 16);
[all …]
/linux/drivers/net/ethernet/mellanox/mlx5/core/
H A Den_rx.c65 struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
69 struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
71 static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
72 static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
73 static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
90 struct mlx5_cqe64 *cqe) in mlx5e_read_enhanced_title_slot() argument
95 memcpy(title, cqe, sizeof(struct mlx5_cqe64)); in mlx5e_read_enhanced_title_slot()
138 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci); in mlx5e_cqes_update_owner() local
140 cqe->op_own = op_own; in mlx5e_cqes_update_owner()
146 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci); in mlx5e_cqes_update_owner() local
[all …]
H A Dwq.h202 struct mlx5_cqe64 *cqe = mlx5_frag_buf_get_wqe(&wq->fbc, ix); in mlx5_cqwq_get_wqe() local
205 cqe += wq->fbc.log_stride == 7; in mlx5_cqwq_get_wqe()
207 return cqe; in mlx5_cqwq_get_wqe()
233 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci); in mlx5_cqwq_get_cqe() local
234 u8 cqe_ownership_bit = cqe->op_own & MLX5_CQE_OWNER_MASK; in mlx5_cqwq_get_cqe()
243 return cqe; in mlx5_cqwq_get_cqe()
251 struct mlx5_cqe64 *cqe; in mlx5_cqwq_get_cqe_enahnced_comp() local
253 cqe = mlx5_cqwq_get_wqe(wq, ci); in mlx5_cqwq_get_cqe_enahnced_comp()
254 if (cqe->validity_iteration_count != sw_validity_iteration_count) in mlx5_cqwq_get_cqe_enahnced_comp()
260 return cqe; in mlx5_cqwq_get_cqe_enahnced_comp()
/linux/tools/testing/selftests/net/
H A Dio_uring_zerocopy_tx.c97 struct io_uring_cqe *cqe; in do_tx() local
157 ret = io_uring_wait_cqe(&ring, &cqe); in do_tx()
161 if (cqe->user_data != NONZC_TAG && in do_tx()
162 cqe->user_data != ZC_TAG) in do_tx()
165 if (cqe->flags & IORING_CQE_F_NOTIF) { in do_tx()
166 if (cqe->flags & IORING_CQE_F_MORE) in do_tx()
175 if (cqe->flags & IORING_CQE_F_MORE) { in do_tx()
176 if (cqe->user_data != ZC_TAG) in do_tx()
177 error(1, cqe->res, "unexpected F_MORE"); in do_tx()
180 if (cqe->res >= 0) { in do_tx()
[all …]
/linux/drivers/infiniband/hw/mlx5/
H A Dcq.c84 void *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe() local
87 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in get_sw_cqe()
90 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) { in get_sw_cqe()
91 return cqe; in get_sw_cqe()
120 static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe, in handle_good_req() argument
124 switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) { in handle_good_req()
140 wc->byte_len = be32_to_cpu(cqe->byte_cnt); in handle_good_req()
169 static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, in handle_responder() argument
185 msrq = mlx5_cmd_get_srq(dev, be32_to_cpu(cqe->srqn)); in handle_responder()
192 wqe_ctr = be16_to_cpu(cqe->wqe_counter); in handle_responder()
[all …]
/linux/drivers/infiniband/hw/erdma/
H A Derdma_cq.c11 __be32 *cqe = get_queue_entry(cq->kern_cq.qbuf, cq->kern_cq.ci, in get_next_valid_cqe() local
14 be32_to_cpu(READ_ONCE(*cqe))); in get_next_valid_cqe()
16 return owner ^ !!(cq->kern_cq.ci & cq->depth) ? cqe : NULL; in get_next_valid_cqe()
115 struct erdma_cqe *cqe; in erdma_poll_one_cqe() local
122 cqe = get_next_valid_cqe(cq); in erdma_poll_one_cqe()
123 if (!cqe) in erdma_poll_one_cqe()
131 qpn = be32_to_cpu(cqe->qpn); in erdma_poll_one_cqe()
132 wqe_idx = be32_to_cpu(cqe->qe_idx); in erdma_poll_one_cqe()
133 cqe_hdr = be32_to_cpu(cqe->hdr); in erdma_poll_one_cqe()
158 wc->byte_len = be32_to_cpu(cqe->size); in erdma_poll_one_cqe()
[all …]
/linux/drivers/infiniband/sw/rdmavt/
H A Dcq.c55 if (head >= (unsigned)cq->ibcq.cqe) { in rvt_cq_enter()
56 head = cq->ibcq.cqe; in rvt_cq_enter()
169 unsigned int entries = attr->cqe; in rvt_create_cq()
251 cq->ibcq.cqe = entries; in rvt_create_cq()
340 int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) in rvt_resize_cq() argument
352 if (cqe < 1 || cqe > rdi->dparms.props.max_cqe) in rvt_resize_cq()
359 sz = sizeof(struct ib_uverbs_wc) * (cqe + 1); in rvt_resize_cq()
365 sz = sizeof(struct ib_wc) * (cqe + 1); in rvt_resize_cq()
395 if (head > (u32)cq->ibcq.cqe) in rvt_resize_cq()
396 head = (u32)cq->ibcq.cqe; in rvt_resize_cq()
[all …]
/linux/drivers/net/ethernet/marvell/octeontx2/nic/
H A Dotx2_txrx.c31 struct nix_cqe_rx_s *cqe,
117 struct nix_cqe_tx_s *cqe) in otx2_xdp_snd_pkt_handler() argument
119 struct nix_send_comp_s *snd_comp = &cqe->comp; in otx2_xdp_snd_pkt_handler()
136 struct nix_cqe_tx_s *cqe, in otx2_snd_pkt_handler() argument
139 struct nix_send_comp_s *snd_comp = &cqe->comp; in otx2_snd_pkt_handler()
233 struct nix_cqe_rx_s *cqe, struct sk_buff *skb) in otx2_set_rxhash() argument
249 hash = cqe->hdr.flow_tag; in otx2_set_rxhash()
254 static void otx2_free_rcv_seg(struct otx2_nic *pfvf, struct nix_cqe_rx_s *cqe, in otx2_free_rcv_seg() argument
257 struct nix_rx_sg_s *sg = &cqe->sg; in otx2_free_rcv_seg()
263 end = start + ((cqe->parse.desc_sizem1 + 1) * 16); in otx2_free_rcv_seg()
[all …]
/linux/drivers/net/ethernet/qlogic/qede/
H A Dqede_fp.c650 struct eth_fast_path_rx_tpa_start_cqe *cqe) in qede_set_gro_params() argument
652 u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags); in qede_set_gro_params()
660 skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) - in qede_set_gro_params()
661 cqe->header_len; in qede_set_gro_params()
833 struct eth_fast_path_rx_tpa_start_cqe *cqe) in qede_tpa_start() argument
835 struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; in qede_tpa_start()
840 pad = cqe->placement_offset + rxq->rx_headroom; in qede_tpa_start()
843 le16_to_cpu(cqe->len_on_first_bd), in qede_tpa_start()
864 if ((le16_to_cpu(cqe->pars_flags.flags) >> in qede_tpa_start()
867 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag); in qede_tpa_start()
[all …]
/linux/drivers/net/ethernet/huawei/hinic/
H A Dhinic_hw_qp.c324 cqe_size = wq->q_depth * sizeof(*rq->cqe); in alloc_rq_cqe()
325 rq->cqe = vzalloc(cqe_size); in alloc_rq_cqe()
326 if (!rq->cqe) in alloc_rq_cqe()
335 rq->cqe[i] = dma_alloc_coherent(&pdev->dev, in alloc_rq_cqe()
336 sizeof(*rq->cqe[i]), in alloc_rq_cqe()
338 if (!rq->cqe[i]) in alloc_rq_cqe()
346 dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[j]), rq->cqe[j], in alloc_rq_cqe()
352 vfree(rq->cqe); in alloc_rq_cqe()
368 dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[i]), rq->cqe[i], in free_rq_cqe()
372 vfree(rq->cqe); in free_rq_cqe()
[all …]
/linux/drivers/net/ethernet/mellanox/mlx5/core/en_accel/
H A Dmacsec.h32 static inline bool mlx5e_macsec_is_rx_flow(struct mlx5_cqe64 *cqe) in mlx5e_macsec_is_rx_flow() argument
34 return MLX5_MACSEC_METADATA_MARKER(be32_to_cpu(cqe->ft_metadata)); in mlx5e_macsec_is_rx_flow()
38 struct mlx5_cqe64 *cqe);
46 static inline bool mlx5e_macsec_is_rx_flow(struct mlx5_cqe64 *cqe) { return false; } in mlx5e_macsec_is_rx_flow() argument
49 struct mlx5_cqe64 *cqe) in mlx5e_macsec_offload_handle_rx_skb() argument
/linux/drivers/scsi/qedi/
H A Dqedi_fw.c31 union iscsi_cqe *cqe, in qedi_process_logout_resp() argument
42 cqe_logout_response = &cqe->cqe_common.iscsi_hdr.logout_response; in qedi_process_logout_resp()
50 resp_hdr->itt = build_itt(cqe->cqe_solicited.itid, conn->session->age); in qedi_process_logout_resp()
82 union iscsi_cqe *cqe, in qedi_process_text_resp() argument
97 cqe_text_response = &cqe->cqe_common.iscsi_hdr.text_response; in qedi_process_text_resp()
109 resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid, in qedi_process_text_resp()
178 union iscsi_cqe *cqe, in qedi_process_tmf_resp() argument
190 cqe_tmp_response = &cqe->cqe_common.iscsi_hdr.tmf_response; in qedi_process_tmf_resp()
214 resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid, in qedi_process_tmf_resp()
258 union iscsi_cqe *cqe, in qedi_process_login_resp() argument
[all …]
/linux/io_uring/
H A Dfdinfo.c78 * we may get imprecise sqe and cqe info if uring is actively running in io_uring_show_fdinfo()
128 struct io_uring_cqe *cqe = &r->cqes[(entry & cq_mask) << cq_shift]; in io_uring_show_fdinfo() local
131 entry & cq_mask, cqe->user_data, cqe->res, in io_uring_show_fdinfo()
132 cqe->flags); in io_uring_show_fdinfo()
135 cqe->big_cqe[0], cqe->big_cqe[1]); in io_uring_show_fdinfo()
217 struct io_uring_cqe *cqe = &ocqe->cqe; in io_uring_show_fdinfo()
220 cqe in io_uring_show_fdinfo()
218 struct io_uring_cqe *cqe = &ocqe->cqe; io_uring_show_fdinfo() local
[all...]
H A Dio_uring.h182 struct io_uring_cqe *cqe; in io_fill_cqe_req() local
189 if (unlikely(!io_get_cqe(ctx, &cqe))) in io_fill_cqe_req()
193 trace_io_uring_complete(req->ctx, req, req->cqe.user_data, in io_fill_cqe_req()
194 req->cqe.res, req->cqe.flags, in io_fill_cqe_req()
197 memcpy(cqe, &req->cqe, sizeof(*cqe)); in io_fill_cqe_req()
199 memcpy(cqe->big_cqe, &req->big_cqe, sizeof(*cqe)); in io_fill_cqe_req()
216 req->cqe.res = res; in io_req_set_res()
217 req->cqe.flags = cflags; in io_req_set_res()
/linux/drivers/infiniband/hw/ocrdma/
H A Docrdma_verbs.c970 int entries = attr->cqe; in ocrdma_create_cq()
1024 ibcq->cqe = new_cnt; in ocrdma_resize_cq()
1035 struct ocrdma_cqe *cqe = NULL; in ocrdma_flush_cq() local
1037 cqe = cq->va; in ocrdma_flush_cq()
1045 if (is_cqe_valid(cq, cqe)) in ocrdma_flush_cq()
1047 cqe++; in ocrdma_flush_cq()
1594 struct ocrdma_cqe *cqe; in ocrdma_discard_cqes() local
1613 cqe = cq->va + cur_getp; in ocrdma_discard_cqes()
1618 qpn = cqe->cmn.qpn & OCRDMA_CQE_QPN_MASK; in ocrdma_discard_cqes()
1624 if (is_cqe_for_sq(cqe)) { in ocrdma_discard_cqes()
[all …]
H A Docrdma.h496 static inline int is_cqe_valid(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe) in is_cqe_valid() argument
499 cqe_valid = le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_VALID; in is_cqe_valid()
503 static inline int is_cqe_for_sq(struct ocrdma_cqe *cqe) in is_cqe_for_sq() argument
505 return (le32_to_cpu(cqe->flags_status_srcqpn) & in is_cqe_for_sq()
509 static inline int is_cqe_invalidated(struct ocrdma_cqe *cqe) in is_cqe_invalidated() argument
511 return (le32_to_cpu(cqe->flags_status_srcqpn) & in is_cqe_invalidated()
515 static inline int is_cqe_imm(struct ocrdma_cqe *cqe) in is_cqe_imm() argument
517 return (le32_to_cpu(cqe->flags_status_srcqpn) & in is_cqe_imm()
521 static inline int is_cqe_wr_imm(struct ocrdma_cqe *cqe) in is_cqe_wr_imm() argument
523 return (le32_to_cpu(cqe->flags_status_srcqpn) & in is_cqe_wr_imm()
/linux/drivers/net/ethernet/mellanox/mlx4/
H A Den_rx.c628 static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va, in check_csum() argument
646 hw_checksum = csum_unfold((__force __sum16)cqe->checksum); in check_csum()
648 if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) && in check_csum()
655 if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6)) in check_csum()
669 struct mlx4_cqe *cqe; member
683 mlx4_en_get_cqe_ts(_ctx->cqe)); in mlx4_en_xdp_rx_timestamp()
691 struct mlx4_cqe *cqe = _ctx->cqe; in mlx4_en_xdp_rx_hash() local
698 *hash = be32_to_cpu(cqe->immed_rss_invalid); in mlx4_en_xdp_rx_hash()
699 status = cqe->status; in mlx4_en_xdp_rx_hash()
708 if (cqe->ipv6_ext_mask) in mlx4_en_xdp_rx_hash()
[all …]
/linux/drivers/net/ethernet/mellanox/mlx5/core/en/
H A Dxdp.c186 _ctx->rq->clock, get_cqe_ts(_ctx->cqe)); in mlx5e_xdp_rx_timestamp()
241 const struct mlx5_cqe64 *cqe = _ctx->cqe; in mlx5e_xdp_rx_hash() local
247 *hash = be32_to_cpu(cqe->rss_hash_result); in mlx5e_xdp_rx_hash()
249 hash_type = cqe->rss_hash_type; in mlx5e_xdp_rx_hash()
263 const struct mlx5_cqe64 *cqe = _ctx->cqe; in mlx5e_xdp_rx_vlan_tag() local
265 if (!cqe_has_vlan(cqe)) in mlx5e_xdp_rx_vlan_tag()
269 *vlan_tci = be16_to_cpu(cqe->vlan_info); in mlx5e_xdp_rx_vlan_tag()
280 struct mlx5_cqe64 *cqe; member
289 ts = get_cqe_ts(priv->cqe); in mlx5e_xsk_fill_timestamp()
664 struct mlx5_cqe64 *cqe) in mlx5e_free_xdpsq_desc() argument
[all …]
/linux/tools/testing/vsock/
H A Dvsock_uring_test.c63 struct io_uring_cqe *cqe; in vsock_io_uring_client() local
99 if (io_uring_wait_cqe(&ring, &cqe)) in vsock_io_uring_client()
102 io_uring_cqe_seen(&ring, cqe); in vsock_io_uring_client()
144 struct io_uring_cqe *cqe; in vsock_io_uring_server() local
156 if (io_uring_wait_cqe(&ring, &cqe)) in vsock_io_uring_server()
159 recv_len += cqe->res; in vsock_io_uring_server()
160 io_uring_cqe_seen(&ring, cqe); in vsock_io_uring_server()
/linux/drivers/scsi/bnx2i/
H A Dbnx2i.h383 * @num_cqe_rcvd: statistic counter, total cqe's received
506 struct cqe { struct
650 struct cqe *cq_virt;
654 struct cqe *cq_prod_qe;
655 struct cqe *cq_cons_qe;
656 struct cqe *cq_first_qe;
657 struct cqe *cq_last_qe;
774 struct cqe cqe; member
870 struct cqe *cq
507 cqe_bytecqe global() argument
[all...]

123456789