/freebsd/contrib/ofed/libmlx4/ |
H A D | cq.c | 104 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibv_cq.cqe); in get_sw_cqe() local 105 struct mlx4_cqe *tcqe = cq->cqe_size == 64 ? cqe + 1 : cqe; in get_sw_cqe() 108 !!(n & (cq->ibv_cq.cqe + 1))) ? NULL : cqe; in get_sw_cqe() 116 static enum ibv_wc_status mlx4_handle_error_cqe(struct mlx4_err_cqe *cqe) in mlx4_handle_error_cqe() argument 118 if (cqe->syndrome == MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR) in mlx4_handle_error_cqe() 122 htobe32(cqe->vlan_my_qpn), htobe32(cqe->wqe_index), in mlx4_handle_error_cqe() 123 cqe->vendor_err, in mlx4_handle_error_cqe() 124 cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK); in mlx4_handle_error_cqe() 126 switch (cqe->syndrome) { in mlx4_handle_error_cqe() 158 static inline void handle_good_req(struct ibv_wc *wc, struct mlx4_cqe *cqe) in handle_good_req() argument [all …]
|
H A D | verbs.c | 415 ret = ibv_cmd_create_cq(context, cq_attr->cqe, cq_attr->channel, in mlx4_cmd_create_cq() 460 if (cq_attr->cqe > 0x3fffff) { in create_cq() 479 /* mlx4 devices don't support slid and sl in cqe when completion in create_cq() 497 cq_attr->cqe = align_queue_size(cq_attr->cqe + 1); in create_cq() 499 if (mlx4_alloc_cq_buf(to_mdev(context->device), &cq->buf, cq_attr->cqe, mctx->cqe_size)) in create_cq() 517 --cq_attr->cqe; in create_cq() 547 struct ibv_cq *mlx4_create_cq(struct ibv_context *context, int cqe, in mlx4_create_cq() argument 552 struct ibv_cq_init_attr_ex cq_attr = {.cqe = cqe, .channel = channel, in mlx4_create_cq() 567 struct ibv_cq_init_attr_ex cq_attr_c = {.cqe = cq_attr->cqe, in mlx4_create_cq_ex() 577 int mlx4_resize_cq(struct ibv_cq *ibcq, int cqe) in mlx4_resize_cq() argument [all …]
|
/freebsd/sys/dev/mthca/ |
H A D | mthca_cq.c | 174 static inline struct mthca_cqe *cqe_sw(struct mthca_cqe *cqe) in cqe_sw() argument 176 return MTHCA_CQ_ENTRY_OWNER_HW & cqe->owner ? NULL : cqe; in cqe_sw() 181 return cqe_sw(get_cqe(cq, cq->cons_index & cq->ibcq.cqe)); in next_cqe_sw() 184 static inline void set_cqe_hw(struct mthca_cqe *cqe) in set_cqe_hw() argument 186 cqe->owner = MTHCA_CQ_ENTRY_OWNER_HW; in set_cqe_hw() 191 __be32 *cqe = cqe_ptr; in dump_cqe() local 193 (void) cqe; /* avoid warning if mthca_dbg compiled away... */ in dump_cqe() 194 mthca_dbg(dev, "CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n", in dump_cqe() 195 be32_to_cpu(cqe[0]), be32_to_cpu(cqe[1]), be32_to_cpu(cqe[2]), in dump_cqe() 196 be32_to_cpu(cqe[3]), be32_to_cpu(cqe[4]), be32_to_cpu(cqe[5]), in dump_cqe() [all …]
|
/freebsd/contrib/ofed/libcxgb4/ |
H A D | cq.c | 44 struct t4_cqe cqe; in insert_recv_cqe() local 48 memset(&cqe, 0, sizeof(cqe)); in insert_recv_cqe() 49 cqe.header = htobe32(V_CQE_STATUS(T4_ERR_SWFLUSH) | in insert_recv_cqe() 54 cqe.bits_type_ts = htobe64(V_CQE_GENBIT((u64)cq->gen)); in insert_recv_cqe() 55 cq->sw_queue[cq->sw_pidx] = cqe; in insert_recv_cqe() 77 struct t4_cqe cqe; in insert_sq_cqe() local 81 memset(&cqe, 0, sizeof(cqe)); in insert_sq_cqe() 82 cqe.header = htobe32(V_CQE_STATUS(T4_ERR_SWFLUSH) | in insert_sq_cqe() 87 CQE_WRID_SQ_IDX(&cqe) = swcqe->idx; in insert_sq_cqe() 88 cqe.bits_type_ts = htobe64(V_CQE_GENBIT((u64)cq->gen)); in insert_sq_cqe() [all …]
|
/freebsd/sys/dev/mlx4/mlx4_ib/ |
H A D | mlx4_ib_cq.c | 82 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe() local 83 struct mlx4_cqe *tcqe = ((cq->buf.entry_size == 64) ? (cqe + 1) : cqe); in get_sw_cqe() 86 !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe; in get_sw_cqe() 134 static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe) in mlx4_ib_free_cq_buf() argument 136 mlx4_buf_free(dev->dev, (cqe + 1) * buf->entry_size, &buf->buf); in mlx4_ib_free_cq_buf() 141 struct ib_umem **umem, u64 buf_addr, int cqe) in mlx4_ib_get_cq_umem() argument 148 *umem = ib_umem_get(&context->ibucontext, buf_addr, cqe * cqe_size, in mlx4_ib_get_cq_umem() 178 int entries = attr->cqe; in mlx4_ib_create_cq() 194 cq->ibcq.cqe = entries - 1; in mlx4_ib_create_cq() 270 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); in mlx4_ib_create_cq() [all …]
|
/freebsd/sys/dev/cxgbe/iw_cxgbe/ |
H A D | cq.c | 206 struct t4_cqe cqe; in insert_recv_cqe() local 210 memset(&cqe, 0, sizeof(cqe)); in insert_recv_cqe() 211 cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) | in insert_recv_cqe() 216 cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen)); in insert_recv_cqe() 217 cq->sw_queue[cq->sw_pidx] = cqe; in insert_recv_cqe() 239 struct t4_cqe cqe; in insert_sq_cqe() local 243 memset(&cqe, 0, sizeof(cqe)); in insert_sq_cqe() 244 cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) | in insert_sq_cqe() 249 CQE_WRID_SQ_IDX(&cqe) = swcqe->idx; in insert_sq_cqe() 250 cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen)); in insert_sq_cqe() [all …]
|
/freebsd/contrib/ofed/libmlx5/ |
H A D | cq.c | 66 static inline uint8_t get_cqe_l3_hdr_type(struct mlx5_cqe64 *cqe) in get_cqe_l3_hdr_type() argument 68 return (cqe->l4_hdr_type_etc >> 2) & 0x3; in get_cqe_l3_hdr_type() 83 void *cqe = get_cqe(cq, n & cq->ibv_cq.cqe); in get_sw_cqe() local 86 cqe64 = (cq->cqe_sz == 64) ? cqe : cqe + 64; in get_sw_cqe() 89 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibv_cq.cqe + 1)))) { in get_sw_cqe() 90 return cqe; in get_sw_cqe() 106 static inline void handle_good_req(struct ibv_wc *wc, struct mlx5_cqe64 *cqe, struct mlx5_wq *wq, i… in handle_good_req() argument 108 switch (be32toh(cqe->sop_drop_qpn) >> 24) { in handle_good_req() 124 wc->byte_len = be32toh(cqe->byte_cnt); in handle_good_req() 143 static inline int handle_responder_lazy(struct mlx5_cq *cq, struct mlx5_cqe64 *cqe, in handle_responder_lazy() argument [all …]
|
H A D | mlx5dv.h | 80 * This flag indicates if CQE version 0 or 1 is needed. 215 * CQE related part 312 uint8_t mlx5dv_get_cqe_owner(struct mlx5_cqe64 *cqe) in mlx5dv_get_cqe_owner() argument 314 return cqe->op_own & 0x1; in mlx5dv_get_cqe_owner() 318 void mlx5dv_set_cqe_owner(struct mlx5_cqe64 *cqe, uint8_t val) in mlx5dv_set_cqe_owner() argument 320 cqe->op_own = (val & 0x1) | (cqe->op_own & ~0x1); in mlx5dv_set_cqe_owner() 325 uint8_t mlx5dv_get_cqe_se(struct mlx5_cqe64 *cqe) in mlx5dv_get_cqe_se() argument 327 return (cqe->op_own >> 1) & 0x1; in mlx5dv_get_cqe_se() 331 uint8_t mlx5dv_get_cqe_format(struct mlx5_cqe64 *cqe) in mlx5dv_get_cqe_format() argument 333 return (cqe->op_own >> 2) & 0x3; in mlx5dv_get_cqe_format() [all …]
|
/freebsd/sys/dev/mlx5/mlx5_ib/ |
H A D | mlx5_ib_cq.c | 81 void *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe() local 84 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in get_sw_cqe() 87 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) { in get_sw_cqe() 88 return cqe; in get_sw_cqe() 117 static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe, in handle_good_req() argument 121 switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) { in handle_good_req() 135 wc->byte_len = be32_to_cpu(cqe->byte_cnt); in handle_good_req() 164 static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, in handle_responder() argument 268 dump_cqe(struct mlx5_ib_dev * dev,struct mlx5_err_cqe * cqe) dump_cqe() argument 281 mlx5_handle_error_cqe(struct mlx5_ib_dev * dev,struct mlx5_err_cqe * cqe,struct ib_wc * wc) mlx5_handle_error_cqe() argument 405 get_sig_err_item(struct mlx5_sig_err_cqe * cqe,struct ib_sig_err * item) get_sig_err_item() argument 530 void *cqe; mlx5_poll_one() local 843 void *cqe; init_cq_buf() local 1037 void *cqe, *dest; __mlx5_ib_cq_clean() local [all...] |
/freebsd/sys/dev/qlnx/qlnxe/ |
H A D | eth_common.h | 94 …E ETH_RX_MAX_BUFF_PER_PKT /* Maximum number of additional buffers, reported by TPA-start CQE */ 95 …TPA_CQE_CONT_LEN_LIST_SIZE 6 /* Maximum number of buffers, reported by TPA-continue CQE */ 96 … ETH_TPA_CQE_END_LEN_LIST_SIZE 4 /* Maximum number of buffers, reported by TPA-end CQE */ 236 #define ETH_PMD_FLOW_FLAGS_VALID_MASK 0x1 /* CQE valid bit */ 238 #define ETH_PMD_FLOW_FLAGS_TOGGLE_MASK 0x1 /* CQE ring toggle bit */ 245 * Regular ETH Rx FP CQE. 249 u8 type /* CQE type (use enum eth_rx_cqe_type) */; 268 struct eth_pmd_flow_flags pmd_flags /* CQE valid and toggle bits */; 272 * TPA-continue ETH Rx FP CQE. 276 u8 type /* CQE type (use enum eth_rx_cqe_type) */; [all …]
|
/freebsd/sys/dev/mlx5/mlx5_en/ |
H A D | mlx5_en_rx.c | 150 mlx5e_lro_update_hdr(struct mbuf *mb, struct mlx5_cqe64 *cqe) in mlx5e_lro_update_hdr() argument 167 l4_hdr_type = get_cqe_l4_hdr_type(cqe); in mlx5e_lro_update_hdr() 172 tot_len = be32_to_cpu(cqe->byte_cnt) - ETHER_HDR_LEN; in mlx5e_lro_update_hdr() 189 if (get_cqe_lro_tcppsh(cqe)) in mlx5e_lro_update_hdr() 194 th->th_ack = cqe->lro_ack_seq_num; in mlx5e_lro_update_hdr() 195 th->th_win = cqe->lro_tcp_win; in mlx5e_lro_update_hdr() 208 if (get_cqe_lro_timestamp_valid(cqe) && in mlx5e_lro_update_hdr() 213 * cqe->timestamp is 64bit long. in mlx5e_lro_update_hdr() 217 ts_ptr[2] = *((uint32_t *)&cqe->timestamp + 1); in mlx5e_lro_update_hdr() 223 ip4->ip_ttl = cqe->lro_min_ttl; in mlx5e_lro_update_hdr() [all …]
|
H A D | mlx5_en_txrx.c | 36 struct mlx5_cqe64 *cqe; in mlx5e_get_cqe() local 38 cqe = mlx5_cqwq_get_wqe(&cq->wq, mlx5_cqwq_get_ci(&cq->wq)); in mlx5e_get_cqe() 40 if ((cqe->op_own ^ mlx5_cqwq_get_wrap_cnt(&cq->wq)) & MLX5_CQE_OWNER_MASK) in mlx5e_get_cqe() 43 /* ensure cqe content is read after cqe ownership bit */ in mlx5e_get_cqe() 46 return (cqe); in mlx5e_get_cqe() 73 "Error CQE on CQN 0x%x, CI 0x%x, QN 0x%x, OPCODE 0x%x, SYNDROME 0x%x, VENDOR SYNDROME 0x%x\n", in mlx5e_dump_err_cqe()
|
/freebsd/sys/dev/bnxt/bnxt_re/ |
H A D | qplib_fp.c | 244 * clean_nq - Invalidate cqe from given nq. 250 * consumer index. Invalidated cqe(marked from this function) will be 1608 struct cq_req *cqe = (struct cq_req *)hw_cqe; in __clean_cq() local 1610 if (qp == le64_to_cpu(cqe->qp_handle)) in __clean_cq() 1611 cqe->qp_handle = 0; in __clean_cq() 1618 struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe; in __clean_cq() local 1620 if (qp == le64_to_cpu(cqe->qp_handle)) in __clean_cq() 1621 cqe->qp_handle = 0; in __clean_cq() 2621 struct bnxt_qplib_cqe *cqe; in __flush_sq() local 2627 cqe = *pcqe; in __flush_sq() [all …]
|
/freebsd/sys/dev/oce/ |
H A D | oce_if.c | 163 static int oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe); 164 static int oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe); 165 static void oce_rx(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe); 169 static void oce_rx_lro(struct oce_rq *rq, struct nic_hwlro_singleton_cqe *cqe, struct nic_hwlro_cqe_part2 *cqe2); 1391 struct oce_nic_tx_cqe *cqe; in oce_wq_handler() 1397 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe); in oce_wq_handler() 1398 while (cqe->u0.dw[3]) { in oce_wq_handler() 1399 DW_SWAP((uint32_t *) cqe, sizeof(oce_wq_cqe)); in oce_wq_handler() 1401 wq->ring->cidx = cqe->u0.s.wqe_index + 1; in oce_wq_handler() 1407 cqe in oce_wq_handler() 1393 struct oce_nic_tx_cqe *cqe; oce_wq_handler() local 1576 oce_rx_lro(struct oce_rq * rq,struct nic_hwlro_singleton_cqe * cqe,struct nic_hwlro_cqe_part2 * cqe2) oce_rx_lro() argument 1657 oce_rx(struct oce_rq * rq,struct oce_nic_rx_cqe * cqe) oce_rx() argument 1781 oce_cqe_vtp_valid(POCE_SOFTC sc,struct oce_nic_rx_cqe * cqe) oce_cqe_vtp_valid() argument 1797 oce_cqe_portid_valid(POCE_SOFTC sc,struct oce_nic_rx_cqe * cqe) oce_cqe_portid_valid() argument 1966 struct nic_hwlro_singleton_cqe *cqe; oce_rq_handler_lro() local 2040 struct oce_nic_rx_cqe *cqe; oce_rq_handler() local 2624 oce_process_grp5_events(POCE_SOFTC sc,struct oce_mq_cqe * cqe) oce_process_grp5_events() argument 2655 struct oce_mq_cqe *cqe; oce_mq_handler() local [all...] |
H A D | oce_queue.c | 1024 struct oce_nic_tx_cqe *cqe; in oce_drain_wq_cq() local 1031 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe); in oce_drain_wq_cq() 1032 if (cqe->u0.dw[3] == 0) in oce_drain_wq_cq() 1034 cqe->u0.dw[3] = 0; in oce_drain_wq_cq() 1067 struct oce_nic_rx_cqe *cqe; in oce_drain_rq_cq() local 1074 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe); in oce_drain_rq_cq() 1075 /* dequeue till you reach an invalid cqe */ in oce_drain_rq_cq() 1076 while (RQ_CQE_VALID(cqe)) { in oce_drain_rq_cq() 1077 RQ_CQE_INVALIDATE(cqe); in oce_drain_rq_cq() 1079 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, in oce_drain_rq_cq() [all …]
|
/freebsd/contrib/ofed/libibverbs/man/ |
H A D | ibv_resize_cq.3 | 11 .BI "int ibv_resize_cq(struct ibv_cq " "*cq" ", int " "cqe" "); 18 .I cqe 20 .I cqe 24 .I cqe 35 The cqe member of
|
H A D | ibv_req_notify_cq.3 | 17 Upon the addition of a new CQ entry (CQE) to 23 is zero, a completion event is generated for any new CQE. If 25 is non\-zero, an event is only generated for a new CQE with that is 26 considered "solicited." A CQE is solicited if it is a receive
|
H A D | ibv_create_cq.3 | 11 .BI "struct ibv_cq *ibv_create_cq(struct ibv_context " "*context" ", int " "cqe" , 21 .I cqe 48 size. Check the cqe attribute in the returned CQ for the actual size.
|
/freebsd/sys/dev/nvmf/host/ |
H A D | nvmf_aer.c | 35 static void nvmf_complete_aer(void *arg, const struct nvme_completion *cqe); 124 nvmf_complete_aer_page(void *arg, const struct nvme_completion *cqe) in nvmf_complete_aer_page() argument 130 aer->status = cqe->status; in nvmf_complete_aer_page() 154 nvmf_complete_aer(void *arg, const struct nvme_completion *cqe) in nvmf_complete_aer() argument 168 if (cqe->status != 0) { in nvmf_complete_aer() 169 if (!nvmf_cqe_aborted(cqe)) in nvmf_complete_aer() 171 le16toh(cqe->status)); in nvmf_complete_aer() 175 cdw0 = le32toh(cqe->cdw0); in nvmf_complete_aer() 230 if (status.cqe.status != 0) { in nvmf_set_async_event_config() 233 le16toh(status.cqe.status)); in nvmf_set_async_event_config()
|
H A D | nvmf_qpair.c | 96 struct nvme_completion cqe; in nvmf_abort_request() local 98 memset(&cqe, 0, sizeof(cqe)); in nvmf_abort_request() 99 cqe.cid = cid; in nvmf_abort_request() 100 cqe.status = htole16(NVMEF(NVME_STATUS_SCT, NVME_SCT_PATH_RELATED) | in nvmf_abort_request() 102 req->cb(req->cb_arg, &cqe); in nvmf_abort_request() 177 const struct nvme_completion *cqe; in nvmf_receive_capsule() local 180 cqe = nvmf_capsule_cqe(nc); in nvmf_receive_capsule() 189 cid = cqe->cid; in nvmf_receive_capsule() 203 qp->sqhd = le16toh(cqe->sqhd); in nvmf_receive_capsule() 249 req->cb(req->cb_arg, cqe); in nvmf_receive_capsule()
|
/freebsd/sys/dev/mlx4/mlx4_en/ |
H A D | mlx4_en_rx.c | 604 struct mlx4_cqe *cqe) in invalid_cqe() argument 607 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == in invalid_cqe() 609 en_err(priv, "CQE completed in error - vendor syndrom:%d syndrom:%d\n", in invalid_cqe() 610 ((struct mlx4_err_cqe *)cqe)->vendor_err_syndrome, in invalid_cqe() 611 ((struct mlx4_err_cqe *)cqe)->syndrome); in invalid_cqe() 614 if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) { in invalid_cqe() 732 /* For cpu arch with cache line of 64B the performance is better when cqe size==64B 733 * To enlarge cqe size from 32B to 64B --> 32B of garbage (i.e. 0xccccccc) 734 * was added in the beginning of each cqe (the real data is in the corresponding 32B). 736 * and we get the real cqe data*/ [all …]
|
/freebsd/sys/dev/mlx5/ |
H A D | device.h | 730 _Static_assert(sizeof(struct mlx5_cqe64) == 0x40, "CQE layout broken"); 734 static inline u8 get_cqe_opcode(struct mlx5_cqe64 *cqe) in get_cqe_opcode() argument 736 return (cqe->op_own >> 4); in get_cqe_opcode() 739 static inline bool get_cqe_lro_timestamp_valid(struct mlx5_cqe64 *cqe) in get_cqe_lro_timestamp_valid() argument 741 return (cqe->lro_tcppsh_abort_dupack >> 7) & 1; in get_cqe_lro_timestamp_valid() 744 static inline bool get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe) in get_cqe_lro_tcppsh() argument 746 return (cqe->lro_tcppsh_abort_dupack >> 6) & 1; in get_cqe_lro_tcppsh() 749 static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe) in get_cqe_l4_hdr_type() argument 751 return (cqe->l4_hdr_type_etc >> 4) & 0x7; in get_cqe_l4_hdr_type() 754 static inline u16 get_cqe_vlan(struct mlx5_cqe64 *cqe) in get_cqe_vlan() argument [all …]
|
/freebsd/sys/dev/mlx5/mlx5_fpga/ |
H A D | mlx5fpga_conn.c | 248 struct mlx5_cqe64 *cqe, u8 status) in mlx5_fpga_conn_rq_cqe() argument 253 ix = be16_to_cpu(cqe->wqe_counter) & (conn->qp.rq.size - 1); in mlx5_fpga_conn_rq_cqe() 257 buf->sg[0].size = be32_to_cpu(cqe->byte_cnt); in mlx5_fpga_conn_rq_cqe() 289 struct mlx5_cqe64 *cqe, u8 status) in mlx5_fpga_conn_sq_cqe() argument 297 ix = be16_to_cpu(cqe->wqe_counter) & (conn->qp.sq.size - 1); in mlx5_fpga_conn_sq_cqe() 331 struct mlx5_cqe64 *cqe) in mlx5_fpga_conn_handle_cqe() argument 335 opcode = cqe->op_own >> 4; in mlx5_fpga_conn_handle_cqe() 339 status = ((struct mlx5_err_cqe *)cqe)->syndrome; in mlx5_fpga_conn_handle_cqe() 342 mlx5_fpga_conn_sq_cqe(conn, cqe, status); in mlx5_fpga_conn_handle_cqe() 346 status = ((struct mlx5_err_cqe *)cqe)->syndrome; in mlx5_fpga_conn_handle_cqe() [all …]
|
/freebsd/contrib/ofed/libirdma/ |
H A D | irdma_uverbs.c | 371 * get_cq_size - returns actual cqe needed by HW 374 * @cqe_64byte_ena: enable 64byte cqe 434 if (attr_ex->cqe < uk_attrs->min_hw_cq_size || attr_ex->cqe > uk_attrs->max_hw_cq_size - 1) { in ucreate_cq() 439 /* save the cqe requested by application */ in ucreate_cq() 440 ncqe = attr_ex->cqe; in ucreate_cq() 453 info.cq_size = get_cq_size(attr_ex->cqe, hw_rev); in ucreate_cq() 510 attr_ex->cqe = info.cq_size; in ucreate_cq() 517 attr_ex->cqe = ncqe; in ucreate_cq() 524 /* Do not report the CQE's reserved for immediate and burned by HW */ in ucreate_cq() 525 iwucq->verbs_cq.cq.cqe = ncqe; in ucreate_cq() [all …]
|
/freebsd/sys/dev/mlx5/mlx5_accel/ |
H A D | ipsec.h | 264 struct mlx5_cqe64 *cqe, struct mlx5e_rq_mbuf *mr); 266 static inline int mlx5e_accel_ipsec_flow(struct mlx5_cqe64 *cqe) in mlx5e_accel_ipsec_flow() argument 268 return MLX5_IPSEC_METADATA_MARKER(be32_to_cpu(cqe->ft_metadata)); in mlx5e_accel_ipsec_flow() 272 mlx5e_accel_ipsec_handle_rx(if_t ifp, struct mbuf *mb, struct mlx5_cqe64 *cqe, in mlx5e_accel_ipsec_handle_rx() argument 275 u32 ipsec_meta_data = be32_to_cpu(cqe->ft_metadata); in mlx5e_accel_ipsec_handle_rx() 278 mlx5e_accel_ipsec_handle_rx_cqe(ifp, mb, cqe, mr); in mlx5e_accel_ipsec_handle_rx()
|