Home
last modified time | relevance | path

Searched full:cqe (Results 1 – 25 of 243) sorted by relevance

12345678910

/linux/drivers/infiniband/hw/mthca/
H A Dmthca_cq.c174 static inline struct mthca_cqe *cqe_sw(struct mthca_cqe *cqe) in cqe_sw() argument
176 return MTHCA_CQ_ENTRY_OWNER_HW & cqe->owner ? NULL : cqe; in cqe_sw()
181 return cqe_sw(get_cqe(cq, cq->cons_index & cq->ibcq.cqe)); in next_cqe_sw()
184 static inline void set_cqe_hw(struct mthca_cqe *cqe) in set_cqe_hw() argument
186 cqe->owner = MTHCA_CQ_ENTRY_OWNER_HW; in set_cqe_hw()
191 __be32 *cqe = cqe_ptr; in dump_cqe() local
193 (void) cqe; /* avoid warning if mthca_dbg compiled away... */ in dump_cqe()
194 mthca_dbg(dev, "CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n", in dump_cqe()
195 be32_to_cpu(cqe[0]), be32_to_cpu(cqe[1]), be32_to_cpu(cqe[2]), in dump_cqe()
196 be32_to_cpu(cqe[3]), be32_to_cpu(cqe[4]), be32_to_cpu(cqe[5]), in dump_cqe()
[all …]
/linux/drivers/net/ethernet/mellanox/mlx5/core/
H A Den_rx.c66 struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
70 struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
72 static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
73 static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
74 static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
91 struct mlx5_cqe64 *cqe) in mlx5e_read_enhanced_title_slot() argument
96 memcpy(title, cqe, sizeof(struct mlx5_cqe64)); in mlx5e_read_enhanced_title_slot()
139 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci); in mlx5e_cqes_update_owner() local
141 cqe->op_own = op_own; in mlx5e_cqes_update_owner()
147 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci); in mlx5e_cqes_update_owner() local
[all …]
/linux/drivers/infiniband/sw/siw/
H A Dsiw_cq.c44 * Reap one CQE from the CQ. Only used by kernel clients
46 * flush for user mapped CQE array as well.
50 struct siw_cqe *cqe; in siw_reap_cqe() local
55 cqe = &cq->queue[cq->cq_get % cq->num_cqe]; in siw_reap_cqe()
56 if (READ_ONCE(cqe->flags) & SIW_WQE_VALID) { in siw_reap_cqe()
58 wc->wr_id = cqe->id; in siw_reap_cqe()
59 wc->byte_len = cqe->bytes; in siw_reap_cqe()
62 * During CQ flush, also user land CQE's may get in siw_reap_cqe()
67 if (cqe->flags & SIW_WQE_REM_INVAL) { in siw_reap_cqe()
68 wc->ex.invalidate_rkey = cqe->inval_stag; in siw_reap_cqe()
[all …]
/linux/drivers/infiniband/hw/cxgb4/
H A Dcq.c186 struct t4_cqe cqe; in insert_recv_cqe() local
190 memset(&cqe, 0, sizeof(cqe)); in insert_recv_cqe()
191 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | in insert_recv_cqe()
196 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen)); in insert_recv_cqe()
198 cqe.u.srcqe.abs_rqe_idx = cpu_to_be32(srqidx); in insert_recv_cqe()
199 cq->sw_queue[cq->sw_pidx] = cqe; in insert_recv_cqe()
220 struct t4_cqe cqe; in insert_sq_cqe() local
224 memset(&cqe, 0, sizeof(cqe)); in insert_sq_cqe()
225 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | in insert_sq_cqe()
230 CQE_WRID_SQ_IDX(&cqe) = swcqe->idx; in insert_sq_cqe()
[all …]
/linux/drivers/net/ethernet/mellanox/mlxsw/
H A Dpci_hw.h116 static inline u32 mlxsw_pci_cqe_##name##_get(enum mlxsw_pci_cqe_v v, char *cqe) \
121 return mlxsw_pci_cqe##v0##_##name##_get(cqe); \
123 return mlxsw_pci_cqe##v1##_##name##_get(cqe); \
125 return mlxsw_pci_cqe##v2##_##name##_get(cqe); \
129 char *cqe, u32 val) \
134 mlxsw_pci_cqe##v0##_##name##_set(cqe, val); \
137 mlxsw_pci_cqe##v1##_##name##_set(cqe, val); \
140 mlxsw_pci_cqe##v2##_##name##_set(cqe, val); \
158 MLXSW_ITEM32(pci, cqe, system_port, 0x00, 0, 16);
169 MLXSW_ITEM32(pci, cqe, wqe_counter, 0x04, 16, 16);
[all …]
/linux/tools/testing/selftests/net/
H A Dio_uring_zerocopy_tx.c97 struct io_uring_cqe *cqe; in do_tx() local
157 ret = io_uring_wait_cqe(&ring, &cqe); in do_tx()
159 error(1, -ret, "wait cqe"); in do_tx()
161 if (cqe->user_data != NONZC_TAG && in do_tx()
162 cqe->user_data != ZC_TAG) in do_tx()
163 error(1, EINVAL, "invalid cqe->user_data"); in do_tx()
165 if (cqe->flags & IORING_CQE_F_NOTIF) { in do_tx()
166 if (cqe->flags & IORING_CQE_F_MORE) in do_tx()
175 if (cqe->flags & IORING_CQE_F_MORE) { in do_tx()
176 if (cqe in do_tx()
[all...]
/linux/drivers/net/ethernet/qlogic/qede/
H A Dqede_fp.c651 struct eth_fast_path_rx_tpa_start_cqe *cqe) in qede_set_gro_params() argument
653 u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags); in qede_set_gro_params()
661 skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) - in qede_set_gro_params()
662 cqe->header_len; in qede_set_gro_params()
834 struct eth_fast_path_rx_tpa_start_cqe *cqe) in qede_tpa_start() argument
836 struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; in qede_tpa_start()
841 pad = cqe->placement_offset + rxq->rx_headroom; in qede_tpa_start()
844 le16_to_cpu(cqe->len_on_first_bd), in qede_tpa_start()
865 if ((le16_to_cpu(cqe->pars_flags.flags) >> in qede_tpa_start()
868 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag); in qede_tpa_start()
[all …]
/linux/drivers/scsi/qedi/
H A Dqedi_fw.c31 union iscsi_cqe *cqe, in qedi_process_logout_resp() argument
42 cqe_logout_response = &cqe->cqe_common.iscsi_hdr.logout_response; in qedi_process_logout_resp()
50 resp_hdr->itt = build_itt(cqe->cqe_solicited.itid, conn->session->age); in qedi_process_logout_resp()
82 union iscsi_cqe *cqe, in qedi_process_text_resp() argument
97 cqe_text_response = &cqe->cqe_common.iscsi_hdr.text_response; in qedi_process_text_resp()
109 resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid, in qedi_process_text_resp()
178 union iscsi_cqe *cqe, in qedi_process_tmf_resp() argument
190 cqe_tmp_response = &cqe->cqe_common.iscsi_hdr.tmf_response; in qedi_process_tmf_resp()
214 resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid, in qedi_process_tmf_resp()
258 union iscsi_cqe *cqe, in qedi_process_login_resp() argument
[all …]
/linux/include/trace/events/
H A Dio_uring.h124 __entry->user_data = req->cqe.user_data;
161 __entry->user_data = req->cqe.user_data;
202 __entry->data = req->cqe.user_data;
246 * io_uring_cqring_wait - called before start waiting for an available CQE
251 * Allows to track waiting for CQE, so that we can e.g. troubleshoot
302 __entry->user_data = req->cqe.user_data;
319 * @cqe: pointer to the filled in CQE being posted
323 TP_PROTO(struct io_ring_ctx *ctx, void *req, struct io_uring_cqe *cqe),
325 TP_ARGS(ctx, req, cqe),
340 __entry->user_data = cqe->user_data;
[all …]
/linux/io_uring/
H A Dfdinfo.c79 * we may get imprecise sqe and cqe info if uring is actively running in __io_uring_show_fdinfo()
156 struct io_uring_cqe *cqe; in __io_uring_show_fdinfo() local
159 cqe = &r->cqes[(cq_head & cq_mask)]; in __io_uring_show_fdinfo()
160 if (cqe->flags & IORING_CQE_F_32 || ctx->flags & IORING_SETUP_CQE32) in __io_uring_show_fdinfo()
163 cq_head & cq_mask, cqe->user_data, cqe->res, in __io_uring_show_fdinfo()
164 cqe->flags); in __io_uring_show_fdinfo()
167 cqe->big_cqe[0], cqe->big_cqe[1]); in __io_uring_show_fdinfo()
244 struct io_uring_cqe *cqe = &ocqe->cqe; in __io_uring_show_fdinfo() local
247 cqe->user_data, cqe->res, cqe->flags); in __io_uring_show_fdinfo()
H A Dcmd_net.c76 struct io_uring_cqe cqe[2]; in io_process_timestamp_skb() local
91 cqe->user_data = 0; in io_process_timestamp_skb()
92 cqe->res = tskey; in io_process_timestamp_skb()
93 cqe->flags = IORING_CQE_F_MORE | ctx_cqe32_flags(cmd_to_io_kiocb(cmd)->ctx); in io_process_timestamp_skb()
94 cqe->flags |= tstype << IORING_TIMESTAMP_TYPE_SHIFT; in io_process_timestamp_skb()
96 cqe->flags |= IORING_CQE_F_TSTAMP_HW; in io_process_timestamp_skb()
98 iots = (struct io_timespec *)&cqe[1]; in io_process_timestamp_skb()
101 return io_uring_cmd_post_mshot_cqe32(cmd, issue_flags, cqe); in io_process_timestamp_skb()
H A Dio_uring.h231 * ->submitter_task may be NULL and we can still post a CQE, in io_lockdep_assert_cq_locked()
298 bool is_cqe32 = req->cqe.flags & IORING_CQE_F_32; in io_fill_cqe_req()
299 struct io_uring_cqe *cqe; in io_fill_cqe_req() local
305 if (unlikely(!io_get_cqe(ctx, &cqe, is_cqe32))) in io_fill_cqe_req()
308 memcpy(cqe, &req->cqe, sizeof(*cqe)); in io_fill_cqe_req()
310 memcpy(cqe->big_cqe, &req->big_cqe, sizeof(*cqe)); in io_fill_cqe_req()
315 trace_io_uring_complete(req->ctx, req, cqe); in io_fill_cqe_req()
330 req->cqe.res = res; in io_req_set_res()
331 req->cqe.flags = cflags; in io_req_set_res()
344 req->cqe.res = res; in io_req_set_res32()
[all …]
H A Dio_uring.c534 struct io_uring_cqe *cqe; in __io_cqring_overflow_flush() local
540 if (ocqe->cqe.flags & IORING_CQE_F_32 || in __io_cqring_overflow_flush()
549 if (!io_get_cqe_overflow(ctx, &cqe, true, is_cqe32)) in __io_cqring_overflow_flush()
551 memcpy(cqe, &ocqe->cqe, cqe_size); in __io_cqring_overflow_flush()
661 struct io_cqe *cqe, in io_alloc_ocqe() argument
668 if (cqe->flags & IORING_CQE_F_32 || ctx->flags & IORING_SETUP_CQE32) { in io_alloc_ocqe()
674 trace_io_uring_cqe_overflow(ctx, cqe->user_data, cqe->res, cqe->flags, ocqe); in io_alloc_ocqe()
676 ocqe->cqe.user_data = cqe->user_data; in io_alloc_ocqe()
677 ocqe->cqe.res = cqe->res; in io_alloc_ocqe()
678 ocqe->cqe.flags = cqe->flags; in io_alloc_ocqe()
[all …]
H A Dpoll.c124 u32 index = hash_long(req->cqe.user_data, table->hash_bits); in io_poll_req_insert()
236 * require, which is either spurious wakeup or multishot CQE is served.
238 * req->cqe.res. IOU_POLL_REMOVE_POLL_USE_RES indicates to remove multishot
239 * poll and that the result is stored in req->cqe.
258 * cqe.res contains only events of the first wake up in io_poll_check_events()
263 req->cqe.res = 0; in io_poll_check_events()
266 req->cqe.res = 0; in io_poll_check_events()
279 if (!req->cqe.res) { in io_poll_check_events()
283 req->cqe.res = vfs_poll(req->file, &pt) & events; in io_poll_check_events()
290 if (unlikely(!req->cqe.res)) { in io_poll_check_events()
[all …]
/linux/drivers/net/ethernet/marvell/octeontx2/nic/
H A Dotx2_txrx.c42 struct nix_cqe_rx_s *cqe,
107 struct nix_cqe_tx_s *cqe, in otx2_xdp_snd_pkt_handler() argument
110 struct nix_send_comp_s *snd_comp = &cqe->comp; in otx2_xdp_snd_pkt_handler()
128 struct nix_cqe_tx_s *cqe, in otx2_snd_pkt_handler() argument
131 struct nix_send_comp_s *snd_comp = &cqe->comp; in otx2_snd_pkt_handler()
225 struct nix_cqe_rx_s *cqe, struct sk_buff *skb) in otx2_set_rxhash() argument
241 hash = cqe->hdr.flow_tag; in otx2_set_rxhash()
246 static void otx2_free_rcv_seg(struct otx2_nic *pfvf, struct nix_cqe_rx_s *cqe, in otx2_free_rcv_seg() argument
249 struct nix_rx_sg_s *sg = &cqe->sg; in otx2_free_rcv_seg()
255 end = start + ((cqe->parse.desc_sizem1 + 1) * 16); in otx2_free_rcv_seg()
[all …]
/linux/drivers/net/ethernet/mellanox/mlx4/
H A Den_rx.c594 * the (IPv4 | IPv6) bits are set in cqe->status.
596 static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va, in check_csum() argument
602 /* CQE csum doesn't cover padding octets in short ethernet in check_csum()
614 hw_checksum = csum_unfold((__force __sum16)cqe->checksum); in check_csum()
616 if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) && in check_csum()
623 if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6)) in check_csum()
637 struct mlx4_cqe *cqe; member
651 mlx4_en_get_cqe_ts(_ctx->cqe)); in mlx4_en_xdp_rx_timestamp()
659 struct mlx4_cqe *cqe = _ctx->cqe; in mlx4_en_xdp_rx_hash() local
666 *hash = be32_to_cpu(cqe->immed_rss_invalid); in mlx4_en_xdp_rx_hash()
[all …]
/linux/drivers/scsi/bnx2i/
H A Dbnx2i_hwi.c1125 /* Invalidate all EQ CQE index, req only for 57710 */ in bnx2i_alloc_qp_resc()
1239 * initialization. Firmware completes this handshake with a CQE carrying
1332 * @cqe: pointer to newly DMA'ed CQE entry for processing
1334 * process SCSI CMD Response CQE & complete the request to SCSI-ML
1338 struct cqe *cqe) in bnx2i_process_scsi_cmd_resp() argument
1348 resp_cqe = (struct bnx2i_cmd_response *)cqe; in bnx2i_process_scsi_cmd_resp()
1383 resp_cqe = (struct bnx2i_cmd_response *)cqe; in bnx2i_process_scsi_cmd_resp()
1429 * @cqe: pointer to newly DMA'ed CQE entry for processing
1431 * process Login Response CQE & complete it to open-iscsi user daemon
1435 struct cqe *cqe) in bnx2i_process_login_resp() argument
[all …]
/linux/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/
H A Dsend.c349 struct mlx5_cqe64 *cqe) in hws_send_engine_dump_error_cqe() argument
351 u8 wqe_opcode = cqe ? be32_to_cpu(cqe->sop_drop_qpn) >> 24 : 0; in hws_send_engine_dump_error_cqe()
353 u32 opcode = cqe ? get_cqe_opcode(cqe) : 0; in hws_send_engine_dump_error_cqe()
357 * want to pollute dmesg. Print only the first bad cqe per engine, in hws_send_engine_dump_error_cqe()
396 if (!cqe) { in hws_send_engine_dump_error_cqe()
397 mlx5hws_err(ctx, " rule 0x%08llx: no CQE\n", in hws_send_engine_dump_error_cqe()
402 mlx5hws_err(ctx, " rule 0x%08llx: cqe->opcode = %d %s\n", in hws_send_engine_dump_error_cqe()
408 struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe; in hws_send_engine_dump_error_cqe()
429 " rule 0x%08llx: cqe->byte_cnt = 0x%08x\n", in hws_send_engine_dump_error_cqe()
430 HWS_PTR_TO_ID(rule), be32_to_cpu(cqe->byte_cnt)); in hws_send_engine_dump_error_cqe()
[all …]
/linux/drivers/net/ethernet/huawei/hinic/
H A Dhinic_hw_qp.c324 cqe_size = wq->q_depth * sizeof(*rq->cqe); in alloc_rq_cqe()
325 rq->cqe = vzalloc(cqe_size); in alloc_rq_cqe()
326 if (!rq->cqe) in alloc_rq_cqe()
335 rq->cqe[i] = dma_alloc_coherent(&pdev->dev, in alloc_rq_cqe()
336 sizeof(*rq->cqe[i]), in alloc_rq_cqe()
338 if (!rq->cqe[i]) in alloc_rq_cqe()
346 dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[j]), rq->cqe[j], in alloc_rq_cqe()
352 vfree(rq->cqe); in alloc_rq_cqe()
368 dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[i]), rq->cqe[i], in free_rq_cqe()
372 vfree(rq->cqe); in free_rq_cqe()
[all …]
/linux/drivers/net/ethernet/mellanox/mlx5/core/en_accel/
H A Dmacsec.h32 static inline bool mlx5e_macsec_is_rx_flow(struct mlx5_cqe64 *cqe) in mlx5e_macsec_is_rx_flow() argument
34 return MLX5_MACSEC_METADATA_MARKER(be32_to_cpu(cqe->ft_metadata)); in mlx5e_macsec_is_rx_flow()
38 struct mlx5_cqe64 *cqe);
46 static inline bool mlx5e_macsec_is_rx_flow(struct mlx5_cqe64 *cqe) { return false; } in mlx5e_macsec_is_rx_flow() argument
49 struct mlx5_cqe64 *cqe) in mlx5e_macsec_offload_handle_rx_skb() argument
/linux/drivers/infiniband/ulp/iser/
H A Discsi_iser.h231 * @cqe: completion handler
244 struct ib_cqe cqe; member
262 * @cqe: completion handler
271 struct ib_cqe cqe; member
283 * @cqe: completion handler
291 struct ib_cqe cqe; member
552 iser_rx(struct ib_cqe *cqe) in iser_rx() argument
554 return container_of(cqe, struct iser_rx_desc, cqe); in iser_rx()
558 iser_tx(struct ib_cqe *cqe) in iser_tx() argument
560 return container_of(cqe, struct iser_tx_desc, cqe); in iser_tx()
[all …]
/linux/tools/testing/selftests/ublk/
H A Dfault_inject.c103 const struct io_uring_cqe *cqe)
105 unsigned tag = user_data_to_tag(cqe->user_data);
108 if (cqe->res != -ETIME)
109 ublk_err("%s: unexpected cqe res %d\n", __func__, cqe->res);
114 ublk_err("%s: io not complete after 1 cqe\n", __func__);
61 ublk_fault_inject_tgt_io_done(struct ublk_thread * t,struct ublk_queue * q,const struct io_uring_cqe * cqe) ublk_fault_inject_tgt_io_done() argument
H A Dnull.c95 const struct io_uring_cqe *cqe) in ublk_null_io_done()
97 unsigned tag = user_data_to_tag(cqe->user_data); in ublk_null_io_done()
98 unsigned op = user_data_to_op(cqe->user_data); in ublk_null_io_done()
101 if (cqe->res < 0 || op != ublk_cmd_op_nr(UBLK_U_IO_UNREGISTER_IO_BUF)) { in ublk_null_io_done()
103 io->result = cqe->res; in ublk_null_io_done()
104 if (cqe->res < 0) in ublk_null_io_done()
106 __func__, op, cqe->user_data); in ublk_null_io_done()
93 ublk_null_io_done(struct ublk_thread * t,struct ublk_queue * q,const struct io_uring_cqe * cqe) ublk_null_io_done() argument
H A Dfile_backed.c169 const struct io_uring_cqe *cqe) in ublk_loop_memset_file()
171 unsigned tag = user_data_to_tag(cqe->user_data); in ublk_loop_memset_file()
172 unsigned op = user_data_to_op(cqe->user_data); in ublk_loop_memset_file()
175 if (cqe->res < 0) { in ublk_loop_memset_file()
176 io->result = cqe->res; in ublk_loop_memset_file()
178 __func__, op, cqe->user_data); in ublk_loop_tgt_init()
180 __s32 data_len = user_data_to_tgt_data(cqe->user_data) in ublk_loop_tgt_init()
181 ? ublk_integrity_data_len(q, cqe->res) in ublk_loop_tgt_init()
182 : cqe->res; in ublk_loop_tgt_init()
131 ublk_loop_io_done(struct ublk_thread * t,struct ublk_queue * q,const struct io_uring_cqe * cqe) ublk_loop_io_done() argument
/linux/include/uapi/linux/
H A Dio_uring.h138 * in. The picked direct descriptor will be returned in cqe->res, or -ENFILE
168 /* don't post CQE if request succeeded */
232 * Allow both 16b and 32b CQEs. If a 32b CQE is posted, it will have
233 * IORING_CQE_F_32 set in cqe->flags.
420 * the zerocopy usage in cqe.res
421 * for the IORING_CQE_F_NOTIF cqe.
430 * the starting buffer ID in cqe->flags as per
445 * cqe.res for IORING_CQE_F_NOTIF if
449 * bits of cqe.res should be treated as reserved!
471 * IORING_MSG_RING_CQE_SKIP Don't post a CQE to the target ring. Not
[all …]

12345678910