Home
last modified time | relevance | path

Searched refs:cqe (Results 1 – 25 of 45) sorted by relevance

12

/illumos-gate/usr/src/uts/common/io/qede/
H A Dqede_dbg.c293 qede_dump_reg_cqe(struct eth_fast_path_rx_reg_cqe *cqe) in qede_dump_reg_cqe() argument
296 cmn_err(CE_WARN, " pkt_len = %d", LE_16(cqe->pkt_len)); in qede_dump_reg_cqe()
297 cmn_err(CE_WARN, " bd_num = %d", cqe->bd_num); in qede_dump_reg_cqe()
299 LE_16(cqe->len_on_first_bd)); in qede_dump_reg_cqe()
300 cmn_err(CE_WARN, " placement_offset = %d", cqe->placement_offset); in qede_dump_reg_cqe()
301 cmn_err(CE_WARN, " vlan_tag = %d", LE_16(cqe->vlan_tag)); in qede_dump_reg_cqe()
302 cmn_err(CE_WARN, " rss_hash = %d", LE_32(cqe->rss_hash)); in qede_dump_reg_cqe()
304 LE_16((uint16_t)cqe->pars_flags.flags)); in qede_dump_reg_cqe()
306 cqe->tunnel_pars_flags.flags); in qede_dump_reg_cqe()
307 cmn_err(CE_WARN, " bitfields = %x", cqe->bitfields); in qede_dump_reg_cqe()
[all …]
H A Dqede_fp.c300 struct eth_fast_path_rx_tpa_start_cqe *cqe) in qede_lro_start() argument
305 lro_info = &rx_ring->lro_info[cqe->tpa_agg_index]; in qede_lro_start()
311 qede_dump_start_lro_cqe(cqe); in qede_lro_start()
323 lro_info->pars_flags = LE_16(cqe->pars_flags.flags); in qede_lro_start()
324 lro_info->pad = LE_16(cqe->placement_offset); in qede_lro_start()
325 lro_info->header_len = (uint32_t)cqe->header_len; in qede_lro_start()
326 lro_info->vlan_tag = LE_16(cqe->vlan_tag); in qede_lro_start()
327 lro_info->rss_hash = LE_32(cqe->rss_hash); in qede_lro_start()
329 seg_len = (int)LE_16(cqe->seg_len); in qede_lro_start()
330 len_on_first_bd = (int)LE_16(cqe->len_on_first_bd); in qede_lro_start()
[all …]
/illumos-gate/usr/src/lib/udapl/udapl_tavor/tavor/
H A Ddapl_tavor_hw.h177 #define TAVOR_CQE_QPNUM_GET(cqe) \ argument
178 ((BETOH_32(((uint32_t *)(cqe))[0]) & TAVOR_CQE_QPNUM_MASK) >> \
180 #define TAVOR_CQE_DQPN_GET(cqe) \ argument
181 ((BETOH_32(((uint32_t *)(cqe))[2]) & TAVOR_CQE_DQPN_MASK) >> \
183 #define TAVOR_CQE_SL_GET(cqe) \ argument
184 ((BETOH_32(((uint32_t *)(cqe))[3]) & TAVOR_CQE_SL_MASK) >> \
186 #define TAVOR_CQE_GRH_GET(cqe) \ argument
187 ((BETOH_32(((uint32_t *)(cqe))[3]) & TAVOR_CQE_GRH_MASK) >> \
189 #define TAVOR_CQE_PATHBITS_GET(cqe) \ argument
190 ((BETOH_32(((uint32_t *)(cqe))[3]) & TAVOR_CQE_PATHBITS_MASK) >>\
[all …]
H A Ddapl_hermon_hw.c42 #define HERMON_CQE_OPCODE_GET(cqe) (((uint8_t *)cqe)[31] & 0x1F) argument
43 #define HERMON_CQE_SENDRECV_GET(cqe) (((uint8_t *)cqe)[31] & 0x40) argument
44 #define HERMON_CQE_OWNER_IS_SW(cq, cqe) ((((uint8_t *)cqe)[31] >> 7) == \ argument
531 uint32_t *cqe; in dapli_hermon_cq_peek() local
542 cqe = (uint32_t *)&cq->cq_addr[cons_indx]; in dapli_hermon_cq_peek()
549 while (HERMON_CQE_OWNER_IS_SW(cq, cqe)) { in dapli_hermon_cq_peek()
550 opcode = HERMON_CQE_OPCODE_GET(cqe); in dapli_hermon_cq_peek()
554 TAVOR_CQE_IMM_ETH_PKEY_CRED_GET(cqe); in dapli_hermon_cq_peek()
565 cqe = (uint32_t *)&cq->cq_addr[cons_indx]; in dapli_hermon_cq_peek()
618 uint32_t *cqe; in dapli_hermon_cq_poll() local
[all …]
H A Ddapl_arbel_hw.c554 tavor_hw_cqe_t *cqe; in dapli_arbel_cq_peek() local
572 cqe = &cq->cq_addr[cons_indx]; in dapli_arbel_cq_peek()
579 while (TAVOR_CQE_OWNER_IS_SW(cqe)) { in dapli_arbel_cq_peek()
580 opcode = TAVOR_CQE_OPCODE_GET(cqe); in dapli_arbel_cq_peek()
585 TAVOR_CQE_IMM_ETH_PKEY_CRED_GET(cqe); in dapli_arbel_cq_peek()
596 cqe = &cq->cq_addr[cons_indx]; in dapli_arbel_cq_peek()
614 tavor_hw_cqe_t *cqe; in dapli_arbel_cq_poll() local
631 cqe = &cq->cq_addr[cons_indx]; in dapli_arbel_cq_poll()
644 while (TAVOR_CQE_OWNER_IS_SW(cqe)) { in dapli_arbel_cq_poll()
645 status = dapli_arbel_cq_cqe_consume(cq, cqe, in dapli_arbel_cq_poll()
[all …]
H A Ddapl_tavor_hw.c786 tavor_hw_cqe_t *cqe; in dapli_tavor_cq_peek() local
804 cqe = &cq->cq_addr[cons_indx]; in dapli_tavor_cq_peek()
811 while (TAVOR_CQE_OWNER_IS_SW(cqe)) { in dapli_tavor_cq_peek()
812 opcode = TAVOR_CQE_OPCODE_GET(cqe); in dapli_tavor_cq_peek()
817 TAVOR_CQE_IMM_ETH_PKEY_CRED_GET(cqe); in dapli_tavor_cq_peek()
828 cqe = &cq->cq_addr[cons_indx]; in dapli_tavor_cq_peek()
843 tavor_hw_cqe_t *cqe; in dapli_tavor_cq_poll() local
861 cqe = &cq->cq_addr[cons_indx]; in dapli_tavor_cq_poll()
874 while (TAVOR_CQE_OWNER_IS_SW(cqe)) { in dapli_tavor_cq_poll()
875 status = dapli_tavor_cq_cqe_consume(cq, cqe, in dapli_tavor_cq_poll()
[all …]
H A Ddapl_tavor_wr.c59 dapls_tavor_wrid_get_entry(ib_cq_handle_t cq, tavor_hw_cqe_t *cqe, in dapls_tavor_wrid_get_entry() argument
71 qpnum = TAVOR_CQE_QPNUM_GET(cqe); in dapls_tavor_wrid_get_entry()
87 wre_tmp = dapli_tavor_wrid_find_match(wq, cqe); in dapls_tavor_wrid_get_entry()
124 dapli_tavor_wrid_find_match(dapls_tavor_workq_hdr_t *wq, tavor_hw_cqe_t *cqe) in dapli_tavor_wrid_find_match() argument
135 wqeaddr_size = TAVOR_CQE_WQEADDRSZ_GET(cqe); in dapli_tavor_wrid_find_match()
161 curr = dapli_tavor_wrid_find_match_srq(container, cqe); in dapli_tavor_wrid_find_match()
257 tavor_hw_cqe_t *cqe) in dapli_tavor_wrid_find_match_srq() argument
266 wqe_addr = TAVOR_CQE_WQEADDRSZ_GET(cqe) & 0xFFFFFFC0; in dapli_tavor_wrid_find_match_srq()
/illumos-gate/usr/src/uts/common/io/fibre-channel/fca/oce/
H A Doce_mq.c42 struct oce_mq_cqe *cqe = NULL; in oce_drain_mq_cq() local
55 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe); in oce_drain_mq_cq()
56 while (cqe->u0.dw[3]) { in oce_drain_mq_cq()
57 DW_SWAP(u32ptr(cqe), sizeof (struct oce_mq_cqe)); in oce_drain_mq_cq()
58 if (cqe->u0.s.async_event) { in oce_drain_mq_cq()
59 acqe = (struct oce_async_cqe_link_state *)cqe; in oce_drain_mq_cq()
76 cqe->u0.dw[3] = 0; in oce_drain_mq_cq()
78 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe); in oce_drain_mq_cq()
100 struct oce_mq_cqe *cqe = NULL; in oce_clean_mq() local
104 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe); in oce_clean_mq()
[all …]
H A Doce_rx.c37 struct oce_nic_rx_cqe *cqe);
39 struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
42 static void oce_set_rx_oflags(mblk_t *mp, struct oce_nic_rx_cqe *cqe);
44 struct oce_nic_rx_cqe *cqe);
333 oce_rx(struct oce_dev *dev, struct oce_rq *rq, struct oce_nic_rx_cqe *cqe) in oce_rx() argument
346 frag_cnt = cqe->u0.s.num_fragments & 0x7; in oce_rx()
354 pkt_len = cqe->u0.s.pkt_size; in oce_rx()
393 oce_rx_bcopy(struct oce_dev *dev, struct oce_rq *rq, struct oce_nic_rx_cqe *cqe) in oce_rx_bcopy() argument
410 pkt_len = cqe->u0.s.pkt_size; in oce_rx_bcopy()
412 frag_cnt = cqe->u0.s.num_fragments & 0x7; in oce_rx_bcopy()
[all …]
H A Doce_tx.c558 struct oce_nic_tx_cqe *cqe; in oce_process_tx_compl() local
571 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe); in oce_process_tx_compl()
572 while (WQ_CQE_VALID(cqe)) { in oce_process_tx_compl()
574 DW_SWAP(u32ptr(cqe), sizeof (struct oce_nic_tx_cqe)); in oce_process_tx_compl()
577 if (cqe->u0.s.status != 0) { in oce_process_tx_compl()
589 WQ_CQE_INVALIDATE(cqe); in oce_process_tx_compl()
591 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, in oce_process_tx_compl()
/illumos-gate/usr/src/uts/common/io/bnxe/577xx/drivers/common/lm/device/
H A Dlm_recv.c350 IN const struct eth_end_agg_rx_cqe* cqe, in lm_tpa_stop() argument
359 … u32_t sge_size = mm_le16_to_cpu(cqe->pkt_len) - pkt->l2pkt_rx_info->size; in lm_tpa_stop()
372 DbgBreakIf( mm_le16_to_cpu(cqe->pkt_len) < pkt->l2pkt_rx_info->size); in lm_tpa_stop()
380 pkt->l2pkt_rx_info->total_packet_size = mm_le16_to_cpu(cqe->pkt_len); in lm_tpa_stop()
381 pkt->l2pkt_rx_info->coal_seg_cnt = mm_le16_to_cpu(cqe->num_of_coalesced_segs); in lm_tpa_stop()
382 pkt->l2pkt_rx_info->dup_ack_cnt = cqe->pure_ack_count; in lm_tpa_stop()
383 pkt->l2pkt_rx_info->ts_delta = mm_le32_to_cpu(cqe->timestamp_delta); in lm_tpa_stop()
392 ASSERT_STATIC(LM_TPA_MAX_AGG_SIZE == ARRSIZE(cqe->sgl_or_raw_data.sgl)); in lm_tpa_stop()
393 DbgBreakIf(ARRSIZE(cqe->sgl_or_raw_data.sgl) < sge_num_elem); in lm_tpa_stop()
400 DbgBreakIf( mm_le16_to_cpu(cqe->pkt_len) != pkt->l2pkt_rx_info->size); in lm_tpa_stop()
[all …]
/illumos-gate/usr/src/uts/common/io/ib/adapters/tavor/
H A Dtavor_cq.c50 tavor_hw_cqe_t *cqe, ibt_wc_t *wc);
52 tavor_hw_cqe_t *cqe, ibt_wc_t *wc);
53 static void tavor_cqe_sync(tavor_cqhdl_t cq, tavor_hw_cqe_t *cqe,
842 tavor_hw_cqe_t *cqe; in tavor_cq_poll() local
868 cqe = &cq->cq_buf[cons_indx]; in tavor_cq_poll()
871 tavor_cqe_sync(cq, cqe, DDI_DMA_SYNC_FORCPU); in tavor_cq_poll()
884 while (TAVOR_CQE_OWNER_IS_SW(cq, cqe)) { in tavor_cq_poll()
885 status = tavor_cq_cqe_consume(state, cq, cqe, in tavor_cq_poll()
889 TAVOR_CQE_OWNER_SET_HW(cq, cqe); in tavor_cq_poll()
892 tavor_cqe_sync(cq, cqe, DDI_DMA_SYNC_FORDEV); in tavor_cq_poll()
[all …]
/illumos-gate/usr/src/uts/common/io/bnxe/577xx/drivers/common/lm/l4/
H A Dlm_l4fp.c267 char * cqe; in lm_tcp_qe_buffer_next_free_cqe() local
269 cqe = cqe_buffer->head; in lm_tcp_qe_buffer_next_free_cqe()
271 if(cqe == cqe_buffer->last) { in lm_tcp_qe_buffer_next_free_cqe()
274 cqe_buffer->head = cqe + cqe_buffer->qe_size; in lm_tcp_qe_buffer_next_free_cqe()
280 return cqe; in lm_tcp_qe_buffer_next_free_cqe()
289 char * cqe; in lm_tcp_qe_buffer_next_occupied_cqe() local
291 cqe = cqe_buffer->tail; in lm_tcp_qe_buffer_next_occupied_cqe()
293 if ((cqe == cqe_buffer->head) && (cqe_buffer->left > 0)) { in lm_tcp_qe_buffer_next_occupied_cqe()
297 if(cqe == cqe_buffer->last) { in lm_tcp_qe_buffer_next_occupied_cqe()
300 cqe_buffer->tail = cqe + cqe_buffer->qe_size; in lm_tcp_qe_buffer_next_occupied_cqe()
[all …]
H A Dlm_l4tx.c371 struct toe_tx_cqe * cqe, in lm_tcp_tx_process_cqe() argument
378 cmd = ((cqe->params & TOE_TX_CQE_COMPLETION_OPCODE) >> TOE_TX_CQE_COMPLETION_OPCODE_SHIFT); in lm_tcp_tx_process_cqe()
383 …DbgBreakIfAll(cqe->len & 0xc0000000); /* two upper bits on show a completion larger than 1GB - a b… in lm_tcp_tx_process_cqe()
389 if (cqe->len && in lm_tcp_tx_process_cqe()
400 lm_tcp_tx_inc_trm_aborted_bytes(pdev, tcp, cqe->len); in lm_tcp_tx_process_cqe()
401 cqe->len = 0; in lm_tcp_tx_process_cqe()
403 if (cqe->len) { in lm_tcp_tx_process_cqe()
405 lm_tcp_tx_cmp_process(pdev, tcp, cqe->len); in lm_tcp_tx_process_cqe()
434 DbgBreakIf(cqe->len); in lm_tcp_tx_process_cqe()
455 struct toe_tx_cqe *cqe, *hist_cqe; in lm_tcp_tx_process_cqes() local
[all …]
H A Dlm_l4fp.h74 struct toe_rx_cqe * cqe,
84 struct toe_tx_cqe * cqe,
/illumos-gate/usr/src/uts/common/io/ib/adapters/hermon/
H A Dhermon_cq.c58 hermon_hw_cqe_t *cqe, ibt_wc_t *wc);
60 hermon_hw_cqe_t *cqe, ibt_wc_t *wc);
870 hermon_hw_cqe_t *cqe; in hermon_cq_poll() local
899 cqe = &cq->cq_buf[cons_indx & wrap_around_mask]; in hermon_cq_poll()
912 while (HERMON_CQE_OWNER_IS_SW(cq, cqe, cons_indx, shift, mask)) { in hermon_cq_poll()
915 opcode = HERMON_CQE_OPCODE_GET(cq, cqe); in hermon_cq_poll()
926 cqe = &cq->cq_buf[cons_indx & wrap_around_mask]; in hermon_cq_poll()
936 hermon_cq_cqe_consume(state, cq, cqe, &wc_p[polled_cnt++]); in hermon_cq_poll()
942 cqe = &cq->cq_buf[cons_indx & wrap_around_mask]; in hermon_cq_poll()
1292 hermon_hw_cqe_t *cqe, ibt_wc_t *wc) in hermon_cq_cqe_consume() argument
[all …]
/illumos-gate/usr/src/lib/udapl/udapl_tavor/common/
H A Ddapl_evd_util.c55 IN ib_work_completion_t cqe);
424 dapli_evd_eh_print_cqe(IN ib_work_completion_t cqe) in dapli_evd_eh_print_cqe() argument
440 dto_cookie = (DAPL_COOKIE *) (uintptr_t)DAPL_GET_CQE_WRID(&cqe); in dapli_evd_eh_print_cqe()
447 "\t\t work_req_id 0x%llx\n", DAPL_GET_CQE_WRID(&cqe)); in dapli_evd_eh_print_cqe()
449 "\t\t op_type: %s\n", optable[DAPL_GET_CQE_OPTYPE(&cqe)]); in dapli_evd_eh_print_cqe()
450 if ((DAPL_GET_CQE_OPTYPE(&cqe) == OP_SEND) || in dapli_evd_eh_print_cqe()
451 (DAPL_GET_CQE_OPTYPE(&cqe) == OP_RDMA_WRITE)) { in dapli_evd_eh_print_cqe()
456 "\t\t bytes_num %d\n", DAPL_GET_CQE_BYTESNUM(&cqe)); in dapli_evd_eh_print_cqe()
459 "\t\t status %d\n", DAPL_GET_CQE_STATUS(&cqe)); in dapli_evd_eh_print_cqe()
851 ib_work_completion_t *cqe; in dapls_evd_post_premature_events() local
[all …]
/illumos-gate/usr/src/uts/common/sys/ib/adapters/tavor/
H A Dtavor_hw.h1441 #define TAVOR_CQE_QPNUM_GET(cq, cqe) \ argument
1443 &((uint32_t *)(cqe))[0]) & TAVOR_CQE_QPNUM_MASK) >> \
1445 #define TAVOR_CQE_DQPN_GET(cq, cqe) \ argument
1447 &((uint32_t *)(cqe))[2]) & TAVOR_CQE_DQPN_MASK) >> \
1449 #define TAVOR_CQE_SL_GET(cq, cqe) \ argument
1451 &((uint32_t *)(cqe))[3]) & TAVOR_CQE_SL_MASK) >> \
1453 #define TAVOR_CQE_GRH_GET(cq, cqe) \ argument
1455 &((uint32_t *)(cqe))[3]) & TAVOR_CQE_GRH_MASK) >> \
1457 #define TAVOR_CQE_PATHBITS_GET(cq, cqe) \ argument
1459 &((uint32_t *)(cqe))[3]) & TAVOR_CQE_PATHBITS_MASK) >> \
[all …]
/illumos-gate/usr/src/uts/common/sys/ib/adapters/hermon/
H A Dhermon_hw.h2653 #define HERMON_CQE_IS_IPOK(cq, cqe) \ argument
2654 (((uint8_t *)(cqe))[HERMON_CQE_IPOK] & HERMON_CQE_IPOK_BIT)
2656 #define HERMON_CQE_CKSUM(cq, cqe) \ argument
2657 ((((uint8_t *)(cqe))[HERMON_CQE_CKSUM_15_8] << 8) | \
2658 (((uint8_t *)(cqe))[HERMON_CQE_CKSUM_7_0]))
2660 #define HERMON_CQE_IPOIB_STATUS(cq, cqe) \ argument
2661 htonl((((uint32_t *)(cqe)))[4])
2663 #define HERMON_CQE_QPNUM_GET(cq, cqe) \ argument
2664 ((htonl((((uint32_t *)(cqe)))[0]) & HERMON_CQE_QPNUM_MASK) >> \
2667 #define HERMON_CQE_IMM_ETH_PKEY_CRED_GET(cq, cqe) \ argument
[all …]
H A Dhermon_wr.h55 #define HERMON_CQE_WQEADDRSZ_GET(cq, cqe) \ argument
56 ((uint32_t)((((uint8_t *)(cqe))[0x18]) << 8) | ((uint8_t *)(cqe))[0x19])
186 ibt_wrid_t hermon_wrid_get_entry(hermon_cqhdl_t cqhdl, hermon_hw_cqe_t *cqe);
/illumos-gate/usr/src/uts/common/io/fibre-channel/fca/emlxs/
H A Demlxs_sli4.c162 CQE_ASYNC_t *cqe);
164 CQE_ASYNC_t *cqe);
5291 emlxs_sli4_process_async_event(emlxs_hba_t *hba, CQE_ASYNC_t *cqe) in emlxs_sli4_process_async_event() argument
5297 if (hba->link_event_tag == cqe->un.link.event_tag) { in emlxs_sli4_process_async_event()
5299 } else if (hba->link_event_tag + 1 < cqe->un.link.event_tag) { in emlxs_sli4_process_async_event()
5302 hba->link_event_tag = cqe->un.link.event_tag; in emlxs_sli4_process_async_event()
5304 switch (cqe->event_code) { in emlxs_sli4_process_async_event()
5308 switch (cqe->un.link.link_status) { in emlxs_sli4_process_async_event()
5313 cqe->valid, cqe->event_type, HBASTATS.LinkEvent); in emlxs_sli4_process_async_event()
5320 cqe->valid, cqe->event_type, HBASTATS.LinkEvent); in emlxs_sli4_process_async_event()
[all …]
/illumos-gate/usr/src/uts/common/io/ib/mgt/ibmf/
H A Dibmf_handlers.c313 ibt_wc_t cqe; in ibmf_i_mad_completions() local
330 status = ibt_poll_cq(cq_handle, &cqe, 1, NULL); in ibmf_i_mad_completions()
348 ibmf_i_process_completion(ibmf_cip, &cqe); in ibmf_i_mad_completions()
358 status = ibt_poll_cq(cq_handle, &cqe, 1, NULL); in ibmf_i_mad_completions()
376 ibmf_i_process_completion(ibmf_cip, &cqe); in ibmf_i_mad_completions()
/illumos-gate/usr/src/uts/common/io/nvme/
H A Dnvme.c1874 nvme_cqe_t *cqe; in nvme_get_completed() local
1880 cqe = &cq->ncq_cq[cq->ncq_head]; in nvme_get_completed()
1883 if (cqe->cqe_sf.sf_p == cq->ncq_phase) in nvme_get_completed()
1886 qp = nvme->n_ioq[cqe->cqe_sqid]; in nvme_get_completed()
1889 cmd = nvme_unqueue_cmd(nvme, qp, cqe->cqe_cid); in nvme_get_completed()
1892 qp->nq_sqhead = cqe->cqe_sqhd; in nvme_get_completed()
1901 "!received completion for unknown cid 0x%x", cqe->cqe_cid); in nvme_get_completed()
1914 ASSERT3U(cmd->nc_sqid, ==, cqe->cqe_sqid); in nvme_get_completed()
1915 bcopy(cqe, &cmd->nc_cqe, sizeof (nvme_cqe_t)); in nvme_get_completed()
2008 nvme_cqe_t *cqe = &cmd->nc_cqe; in nvme_check_unknown_cmd_status() local
[all …]
/illumos-gate/usr/src/uts/common/io/ib/clients/of/sol_uverbs/
H A Dsol_uverbs_comp.c208 cq_attr.cq_size = cmd.cqe; in sol_uverbs_create_cq()
220 if (!cmd.cqe) { in sol_uverbs_create_cq()
328 resp.cqe = real_size; in sol_uverbs_create_cq()
534 resize_status = ibt_resize_cq(ucq->cq, cmd.cqe, &resp.cqe); in sol_uverbs_resize_cq()
/illumos-gate/usr/src/uts/common/io/qede/579xx/drivers/ecore/
H A Decore_spq.c537 struct eth_slow_path_rx_cqe *cqe, in ecore_cqe_completion() argument
541 return OSAL_VF_CQE_COMPLETION(p_hwfn, cqe, protocol); in ecore_cqe_completion()
547 return ecore_spq_completion(p_hwfn, cqe->echo, 0, OSAL_NULL); in ecore_cqe_completion()
551 struct eth_slow_path_rx_cqe *cqe) in ecore_eth_cqe_completion() argument
555 rc = ecore_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH); in ecore_eth_cqe_completion()
559 cqe->ramrod_cmd_id); in ecore_eth_cqe_completion()

12