| /linux/drivers/infiniband/hw/mthca/ |
| H A D | mthca_cq.c | 174 static inline struct mthca_cqe *cqe_sw(struct mthca_cqe *cqe) in cqe_sw() argument 176 return MTHCA_CQ_ENTRY_OWNER_HW & cqe->owner ? NULL : cqe; in cqe_sw() 181 return cqe_sw(get_cqe(cq, cq->cons_index & cq->ibcq.cqe)); in next_cqe_sw() 184 static inline void set_cqe_hw(struct mthca_cqe *cqe) in set_cqe_hw() argument 186 cqe->owner = MTHCA_CQ_ENTRY_OWNER_HW; in set_cqe_hw() 191 __be32 *cqe = cqe_ptr; in dump_cqe() local 193 (void) cqe; /* avoid warning if mthca_dbg compiled away... */ in dump_cqe() 194 mthca_dbg(dev, "CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n", in dump_cqe() 195 be32_to_cpu(cqe[0]), be32_to_cpu(cqe[1]), be32_to_cpu(cqe[2]), in dump_cqe() 196 be32_to_cpu(cqe[3]), be32_to_cpu(cqe[4]), be32_to_cpu(cqe[5]), in dump_cqe() [all …]
|
| /linux/drivers/infiniband/sw/rxe/ |
| H A D | rxe_cq.c | 12 int cqe, int comp_vector) in rxe_cq_chk_attr() argument 16 if (cqe <= 0) { in rxe_cq_chk_attr() 17 rxe_dbg_dev(rxe, "cqe(%d) <= 0\n", cqe); in rxe_cq_chk_attr() 21 if (cqe > rxe->attr.max_cqe) { in rxe_cq_chk_attr() 22 rxe_dbg_dev(rxe, "cqe(%d) > max_cqe(%d)\n", in rxe_cq_chk_attr() 23 cqe, rxe->attr.max_cqe); in rxe_cq_chk_attr() 29 if (cqe < count) { in rxe_cq_chk_attr() 30 rxe_dbg_cq(cq, "cqe(%d) < current # elements in queue (%d)\n", in rxe_cq_chk_attr() 31 cqe, count); in rxe_cq_chk_attr() 42 int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, in rxe_cq_from_init() argument [all …]
|
| /linux/drivers/infiniband/sw/siw/ |
| H A D | siw_cq.c | 44 * Reap one CQE from the CQ. Only used by kernel clients 46 * flush for user mapped CQE array as well. 50 struct siw_cqe *cqe; in siw_reap_cqe() local 55 cqe = &cq->queue[cq->cq_get % cq->num_cqe]; in siw_reap_cqe() 56 if (READ_ONCE(cqe->flags) & SIW_WQE_VALID) { in siw_reap_cqe() 58 wc->wr_id = cqe->id; in siw_reap_cqe() 59 wc->byte_len = cqe->bytes; in siw_reap_cqe() 62 * During CQ flush, also user land CQE's may get in siw_reap_cqe() 67 if (cqe->flags & SIW_WQE_REM_INVAL) { in siw_reap_cqe() 68 wc->ex.invalidate_rkey = cqe->inval_stag; in siw_reap_cqe() [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/ |
| H A D | en_rx.c | 66 struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset, 70 struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset, 72 static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); 73 static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); 74 static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); 91 struct mlx5_cqe64 *cqe) in mlx5e_read_enhanced_title_slot() argument 96 memcpy(title, cqe, sizeof(struct mlx5_cqe64)); in mlx5e_read_enhanced_title_slot() 139 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci); in mlx5e_cqes_update_owner() local 141 cqe->op_own = op_own; in mlx5e_cqes_update_owner() 147 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci); in mlx5e_cqes_update_owner() local [all …]
|
| /linux/drivers/infiniband/hw/cxgb4/ |
| H A D | cq.c | 186 struct t4_cqe cqe; in insert_recv_cqe() local 190 memset(&cqe, 0, sizeof(cqe)); in insert_recv_cqe() 191 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | in insert_recv_cqe() 196 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen)); in insert_recv_cqe() 198 cqe.u.srcqe.abs_rqe_idx = cpu_to_be32(srqidx); in insert_recv_cqe() 199 cq->sw_queue[cq->sw_pidx] = cqe; in insert_recv_cqe() 220 struct t4_cqe cqe; in insert_sq_cqe() local 224 memset(&cqe, 0, sizeof(cqe)); in insert_sq_cqe() 225 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | in insert_sq_cqe() 230 CQE_WRID_SQ_IDX(&cqe) = swcqe->idx; in insert_sq_cqe() [all …]
|
| /linux/drivers/infiniband/hw/vmw_pvrdma/ |
| H A D | pvrdma_cq.c | 83 cq->ibcq.cqe, &head); in pvrdma_req_notify_cq() 106 int entries = attr->cqe; in pvrdma_create_cq() 133 cq->ibcq.cqe = entries; in pvrdma_create_cq() 187 cmd->cqe = entries; in pvrdma_create_cq() 196 cq->ibcq.cqe = resp->cqe; in pvrdma_create_cq() 289 cq->ibcq.cqe, &head); in _pvrdma_flush_cqe() 294 cq->ibcq.cqe); in _pvrdma_flush_cqe() 295 struct pvrdma_cqe *cqe; in _pvrdma_flush_cqe() local 299 (cq->ibcq.cqe - head + tail); in _pvrdma_flush_cqe() 303 curr = cq->ibcq.cqe - 1; in _pvrdma_flush_cqe() [all …]
|
| /linux/drivers/infiniband/hw/mlx5/ |
| H A D | cq.c | 84 void *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe() local 87 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in get_sw_cqe() 90 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) { in get_sw_cqe() 91 return cqe; in get_sw_cqe() 120 static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe, in handle_good_req() argument 124 switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) { in handle_good_req() 140 wc->byte_len = be32_to_cpu(cqe->byte_cnt); in handle_good_req() 169 static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, in handle_responder() argument 185 msrq = mlx5_cmd_get_srq(dev, be32_to_cpu(cqe->srqn)); in handle_responder() 192 wqe_ctr = be16_to_cpu(cqe->wqe_counter); in handle_responder() [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlxsw/ |
| H A D | pci_hw.h | 116 static inline u32 mlxsw_pci_cqe_##name##_get(enum mlxsw_pci_cqe_v v, char *cqe) \ 121 return mlxsw_pci_cqe##v0##_##name##_get(cqe); \ 123 return mlxsw_pci_cqe##v1##_##name##_get(cqe); \ 125 return mlxsw_pci_cqe##v2##_##name##_get(cqe); \ 129 char *cqe, u32 val) \ 134 mlxsw_pci_cqe##v0##_##name##_set(cqe, val); \ 137 mlxsw_pci_cqe##v1##_##name##_set(cqe, val); \ 140 mlxsw_pci_cqe##v2##_##name##_set(cqe, val); \ 158 MLXSW_ITEM32(pci, cqe, system_port, 0x00, 0, 16); 169 MLXSW_ITEM32(pci, cqe, wqe_counter, 0x04, 16, 16); [all …]
|
| /linux/tools/testing/selftests/net/ |
| H A D | io_uring_zerocopy_tx.c | 97 struct io_uring_cqe *cqe; in do_tx() local 157 ret = io_uring_wait_cqe(&ring, &cqe); in do_tx() 159 error(1, ret, "wait cqe"); in do_tx() 161 if (cqe->user_data != NONZC_TAG && in do_tx() 162 cqe->user_data != ZC_TAG) in do_tx() 163 error(1, -EINVAL, "invalid cqe->user_data"); in do_tx() 165 if (cqe->flags & IORING_CQE_F_NOTIF) { in do_tx() 166 if (cqe->flags & IORING_CQE_F_MORE) in do_tx() 175 if (cqe->flags & IORING_CQE_F_MORE) { in do_tx() 176 if (cqe->user_data != ZC_TAG) in do_tx() [all …]
|
| /linux/drivers/net/ethernet/qlogic/qede/ |
| H A D | qede_fp.c | 651 struct eth_fast_path_rx_tpa_start_cqe *cqe) in qede_set_gro_params() argument 653 u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags); in qede_set_gro_params() 661 skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) - in qede_set_gro_params() 662 cqe->header_len; in qede_set_gro_params() 834 struct eth_fast_path_rx_tpa_start_cqe *cqe) in qede_tpa_start() argument 836 struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; in qede_tpa_start() 841 pad = cqe->placement_offset + rxq->rx_headroom; in qede_tpa_start() 844 le16_to_cpu(cqe->len_on_first_bd), in qede_tpa_start() 865 if ((le16_to_cpu(cqe->pars_flags.flags) >> in qede_tpa_start() 868 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag); in qede_tpa_start() [all …]
|
| /linux/drivers/scsi/qedi/ |
| H A D | qedi_fw.c | 31 union iscsi_cqe *cqe, in qedi_process_logout_resp() argument 42 cqe_logout_response = &cqe->cqe_common.iscsi_hdr.logout_response; in qedi_process_logout_resp() 50 resp_hdr->itt = build_itt(cqe->cqe_solicited.itid, conn->session->age); in qedi_process_logout_resp() 82 union iscsi_cqe *cqe, in qedi_process_text_resp() argument 97 cqe_text_response = &cqe->cqe_common.iscsi_hdr.text_response; in qedi_process_text_resp() 109 resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid, in qedi_process_text_resp() 178 union iscsi_cqe *cqe, in qedi_process_tmf_resp() argument 190 cqe_tmp_response = &cqe->cqe_common.iscsi_hdr.tmf_response; in qedi_process_tmf_resp() 214 resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid, in qedi_process_tmf_resp() 258 union iscsi_cqe *cqe, in qedi_process_login_resp() argument [all …]
|
| /linux/drivers/infiniband/sw/rdmavt/ |
| H A D | cq.c | 55 if (head >= (unsigned)cq->ibcq.cqe) { in rvt_cq_enter() 56 head = cq->ibcq.cqe; in rvt_cq_enter() 169 unsigned int entries = attr->cqe; in rvt_create_cq() 239 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe. in rvt_create_cq() 251 cq->ibcq.cqe = entries; in rvt_create_cq() 340 int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) in rvt_resize_cq() argument 352 if (cqe < 1 || cqe > rdi->dparms.props.max_cqe) in rvt_resize_cq() 359 sz = sizeof(struct ib_uverbs_wc) * (cqe + 1); in rvt_resize_cq() 365 sz = sizeof(struct ib_wc) * (cqe + 1); in rvt_resize_cq() 395 if (head > (u32)cq->ibcq.cqe) in rvt_resize_cq() [all …]
|
| /linux/io_uring/ |
| H A D | fdinfo.c | 77 * we may get imprecise sqe and cqe info if uring is actively running in __io_uring_show_fdinfo() 125 struct io_uring_cqe *cqe; in __io_uring_show_fdinfo() local 128 cqe = &r->cqes[(cq_head & cq_mask)]; in __io_uring_show_fdinfo() 129 if (cqe->flags & IORING_CQE_F_32 || ctx->flags & IORING_SETUP_CQE32) in __io_uring_show_fdinfo() 132 cq_head & cq_mask, cqe->user_data, cqe->res, in __io_uring_show_fdinfo() 133 cqe->flags); in __io_uring_show_fdinfo() 136 cqe->big_cqe[0], cqe->big_cqe[1]); in __io_uring_show_fdinfo() 210 struct io_uring_cqe *cqe = &ocqe->cqe; in __io_uring_show_fdinfo() local 213 cqe->user_data, cqe->res, cqe->flags); in __io_uring_show_fdinfo()
|
| H A D | cmd_net.c | 60 struct io_uring_cqe cqe[2]; in io_process_timestamp_skb() local 75 cqe->user_data = 0; in io_process_timestamp_skb() 76 cqe->res = tskey; in io_process_timestamp_skb() 77 cqe->flags = IORING_CQE_F_MORE | ctx_cqe32_flags(cmd_to_io_kiocb(cmd)->ctx); in io_process_timestamp_skb() 78 cqe->flags |= tstype << IORING_TIMESTAMP_TYPE_SHIFT; in io_process_timestamp_skb() 80 cqe->flags |= IORING_CQE_F_TSTAMP_HW; in io_process_timestamp_skb() 82 iots = (struct io_timespec *)&cqe[1]; in io_process_timestamp_skb() 85 return io_uring_cmd_post_mshot_cqe32(cmd, issue_flags, cqe); in io_process_timestamp_skb()
|
| /linux/drivers/net/ethernet/marvell/octeontx2/nic/ |
| H A D | otx2_txrx.c | 42 struct nix_cqe_rx_s *cqe, 107 struct nix_cqe_tx_s *cqe, in otx2_xdp_snd_pkt_handler() argument 110 struct nix_send_comp_s *snd_comp = &cqe->comp; in otx2_xdp_snd_pkt_handler() 128 struct nix_cqe_tx_s *cqe, in otx2_snd_pkt_handler() argument 131 struct nix_send_comp_s *snd_comp = &cqe->comp; in otx2_snd_pkt_handler() 225 struct nix_cqe_rx_s *cqe, struct sk_buff *skb) in otx2_set_rxhash() argument 241 hash = cqe->hdr.flow_tag; in otx2_set_rxhash() 246 static void otx2_free_rcv_seg(struct otx2_nic *pfvf, struct nix_cqe_rx_s *cqe, in otx2_free_rcv_seg() argument 249 struct nix_rx_sg_s *sg = &cqe->sg; in otx2_free_rcv_seg() 255 end = start + ((cqe->parse.desc_sizem1 + 1) * 16); in otx2_free_rcv_seg() [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx4/ |
| H A D | en_rx.c | 594 * the (IPv4 | IPv6) bits are set in cqe->status. 596 static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va, in check_csum() argument 602 /* CQE csum doesn't cover padding octets in short ethernet in check_csum() 614 hw_checksum = csum_unfold((__force __sum16)cqe->checksum); in check_csum() 616 if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) && in check_csum() 623 if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6)) in check_csum() 637 struct mlx4_cqe *cqe; member 651 mlx4_en_get_cqe_ts(_ctx->cqe)); in mlx4_en_xdp_rx_timestamp() 659 struct mlx4_cqe *cqe = _ctx->cqe; in mlx4_en_xdp_rx_hash() local 666 *hash = be32_to_cpu(cqe->immed_rss_invalid); in mlx4_en_xdp_rx_hash() [all …]
|
| /linux/drivers/scsi/bnx2i/ |
| H A D | bnx2i_hwi.c | 1125 /* Invalidate all EQ CQE index, req only for 57710 */ in bnx2i_alloc_qp_resc() 1239 * initialization. Firmware completes this handshake with a CQE carrying 1332 * @cqe: pointer to newly DMA'ed CQE entry for processing 1334 * process SCSI CMD Response CQE & complete the request to SCSI-ML 1338 struct cqe *cqe) in bnx2i_process_scsi_cmd_resp() argument 1348 resp_cqe = (struct bnx2i_cmd_response *)cqe; in bnx2i_process_scsi_cmd_resp() 1383 resp_cqe = (struct bnx2i_cmd_response *)cqe; in bnx2i_process_scsi_cmd_resp() 1429 * @cqe: pointer to newly DMA'ed CQE entry for processing 1431 * process Login Response CQE & complete it to open-iscsi user daemon 1435 struct cqe *cqe) in bnx2i_process_login_resp() argument [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/ |
| H A D | send.c | 349 struct mlx5_cqe64 *cqe) in hws_send_engine_dump_error_cqe() argument 351 u8 wqe_opcode = cqe ? be32_to_cpu(cqe->sop_drop_qpn) >> 24 : 0; in hws_send_engine_dump_error_cqe() 353 u32 opcode = cqe ? get_cqe_opcode(cqe) : 0; in hws_send_engine_dump_error_cqe() 357 * want to pollute dmesg. Print only the first bad cqe per engine, in hws_send_engine_dump_error_cqe() 396 if (!cqe) { in hws_send_engine_dump_error_cqe() 397 mlx5hws_err(ctx, " rule 0x%08llx: no CQE\n", in hws_send_engine_dump_error_cqe() 402 mlx5hws_err(ctx, " rule 0x%08llx: cqe->opcode = %d %s\n", in hws_send_engine_dump_error_cqe() 408 struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe; in hws_send_engine_dump_error_cqe() 429 " rule 0x%08llx: cqe->byte_cnt = 0x%08x\n", in hws_send_engine_dump_error_cqe() 430 HWS_PTR_TO_ID(rule), be32_to_cpu(cqe->byte_cnt)); in hws_send_engine_dump_error_cqe() [all …]
|
| /linux/drivers/net/ethernet/huawei/hinic/ |
| H A D | hinic_hw_qp.c | 324 cqe_size = wq->q_depth * sizeof(*rq->cqe); in alloc_rq_cqe() 325 rq->cqe = vzalloc(cqe_size); in alloc_rq_cqe() 326 if (!rq->cqe) in alloc_rq_cqe() 335 rq->cqe[i] = dma_alloc_coherent(&pdev->dev, in alloc_rq_cqe() 336 sizeof(*rq->cqe[i]), in alloc_rq_cqe() 338 if (!rq->cqe[i]) in alloc_rq_cqe() 346 dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[j]), rq->cqe[j], in alloc_rq_cqe() 352 vfree(rq->cqe); in alloc_rq_cqe() 368 dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[i]), rq->cqe[i], in free_rq_cqe() 372 vfree(rq->cqe); in free_rq_cqe() [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ |
| H A D | macsec.h | 32 static inline bool mlx5e_macsec_is_rx_flow(struct mlx5_cqe64 *cqe) in mlx5e_macsec_is_rx_flow() argument 34 return MLX5_MACSEC_METADATA_MARKER(be32_to_cpu(cqe->ft_metadata)); in mlx5e_macsec_is_rx_flow() 38 struct mlx5_cqe64 *cqe); 46 static inline bool mlx5e_macsec_is_rx_flow(struct mlx5_cqe64 *cqe) { return false; } in mlx5e_macsec_is_rx_flow() argument 49 struct mlx5_cqe64 *cqe) in mlx5e_macsec_offload_handle_rx_skb() argument
|
| /linux/drivers/infiniband/ulp/iser/ |
| H A D | iscsi_iser.h | 231 * @cqe: completion handler 244 struct ib_cqe cqe; member 262 * @cqe: completion handler 271 struct ib_cqe cqe; member 283 * @cqe: completion handler 291 struct ib_cqe cqe; member 552 iser_rx(struct ib_cqe *cqe) in iser_rx() argument 554 return container_of(cqe, struct iser_rx_desc, cqe); in iser_rx() 558 iser_tx(struct ib_cqe *cqe) in iser_tx() argument 560 return container_of(cqe, struct iser_tx_desc, cqe); in iser_tx() [all …]
|
| /linux/tools/testing/selftests/ublk/ |
| H A D | fault_inject.c | 61 const struct io_uring_cqe *cqe) in ublk_fault_inject_tgt_io_done() argument 63 unsigned tag = user_data_to_tag(cqe->user_data); in ublk_fault_inject_tgt_io_done() 66 if (cqe->res != -ETIME) in ublk_fault_inject_tgt_io_done() 67 ublk_err("%s: unexpected cqe res %d\n", __func__, cqe->res); in ublk_fault_inject_tgt_io_done() 72 ublk_err("%s: io not complete after 1 cqe\n", __func__); in ublk_fault_inject_tgt_io_done()
|
| H A D | null.c | 93 const struct io_uring_cqe *cqe) in ublk_null_io_done() argument 95 unsigned tag = user_data_to_tag(cqe->user_data); in ublk_null_io_done() 96 unsigned op = user_data_to_op(cqe->user_data); in ublk_null_io_done() 99 if (cqe->res < 0 || op != ublk_cmd_op_nr(UBLK_U_IO_UNREGISTER_IO_BUF)) { in ublk_null_io_done() 101 io->result = cqe->res; in ublk_null_io_done() 102 if (cqe->res < 0) in ublk_null_io_done() 104 __func__, op, cqe->user_data); in ublk_null_io_done()
|
| /linux/include/uapi/linux/ |
| H A D | io_uring.h | 136 * in. The picked direct descriptor will be returned in cqe->res, or -ENFILE 166 /* don't post CQE if request succeeded */ 229 * Allow both 16b and 32b CQEs. If a 32b CQE is posted, it will have 230 * IORING_CQE_F_32 set in cqe->flags. 392 * the zerocopy usage in cqe.res 393 * for the IORING_CQE_F_NOTIF cqe. 402 * the starting buffer ID in cqe->flags as per 417 * cqe.res for IORING_CQE_F_NOTIF if 421 * bits of cqe.res should be treated as reserved! 443 * IORING_MSG_RING_CQE_SKIP Don't post a CQE to the target ring. Not [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/en/ |
| H A D | xdp.c | 186 _ctx->rq->clock, get_cqe_ts(_ctx->cqe)); in mlx5e_xdp_rx_timestamp() 241 const struct mlx5_cqe64 *cqe = _ctx->cqe; in mlx5e_xdp_rx_hash() local 247 *hash = be32_to_cpu(cqe->rss_hash_result); in mlx5e_xdp_rx_hash() 249 hash_type = cqe->rss_hash_type; in mlx5e_xdp_rx_hash() 263 const struct mlx5_cqe64 *cqe = _ctx->cqe; in mlx5e_xdp_rx_vlan_tag() local 265 if (!cqe_has_vlan(cqe)) in mlx5e_xdp_rx_vlan_tag() 269 *vlan_tci = be16_to_cpu(cqe->vlan_info); in mlx5e_xdp_rx_vlan_tag() 280 struct mlx5_cqe64 *cqe; member 289 ts = get_cqe_ts(priv->cqe); in mlx5e_xsk_fill_timestamp() 658 struct mlx5_cqe64 *cqe) in mlx5e_free_xdpsq_desc() argument [all …]
|