/titanic_41/usr/src/uts/common/io/fibre-channel/fca/oce/ |
H A D | oce_mq.c | 42 struct oce_mq_cqe *cqe = NULL; in oce_drain_mq_cq() local 55 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe); in oce_drain_mq_cq() 56 while (cqe->u0.dw[3]) { in oce_drain_mq_cq() 57 DW_SWAP(u32ptr(cqe), sizeof (struct oce_mq_cqe)); in oce_drain_mq_cq() 58 if (cqe->u0.s.async_event) { in oce_drain_mq_cq() 59 acqe = (struct oce_async_cqe_link_state *)cqe; in oce_drain_mq_cq() 76 cqe->u0.dw[3] = 0; in oce_drain_mq_cq() 78 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe); in oce_drain_mq_cq() 100 struct oce_mq_cqe *cqe = NULL; in oce_clean_mq() local 104 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe); in oce_clean_mq() [all …]
|
H A D | oce_rx.c | 37 struct oce_nic_rx_cqe *cqe); 39 struct oce_rq *rq, struct oce_nic_rx_cqe *cqe); 42 static void oce_set_rx_oflags(mblk_t *mp, struct oce_nic_rx_cqe *cqe); 44 struct oce_nic_rx_cqe *cqe); 333 oce_rx(struct oce_dev *dev, struct oce_rq *rq, struct oce_nic_rx_cqe *cqe) in oce_rx() argument 346 frag_cnt = cqe->u0.s.num_fragments & 0x7; in oce_rx() 354 pkt_len = cqe->u0.s.pkt_size; in oce_rx() 393 oce_rx_bcopy(struct oce_dev *dev, struct oce_rq *rq, struct oce_nic_rx_cqe *cqe) in oce_rx_bcopy() argument 410 pkt_len = cqe->u0.s.pkt_size; in oce_rx_bcopy() 412 frag_cnt = cqe->u0.s.num_fragments & 0x7; in oce_rx_bcopy() [all …]
|
H A D | oce_tx.c | 558 struct oce_nic_tx_cqe *cqe; in oce_process_tx_compl() local 571 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe); in oce_process_tx_compl() 572 while (WQ_CQE_VALID(cqe)) { in oce_process_tx_compl() 574 DW_SWAP(u32ptr(cqe), sizeof (struct oce_nic_tx_cqe)); in oce_process_tx_compl() 577 if (cqe->u0.s.status != 0) { in oce_process_tx_compl() 589 WQ_CQE_INVALIDATE(cqe); in oce_process_tx_compl() 591 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, in oce_process_tx_compl()
|
/titanic_41/usr/src/lib/udapl/udapl_tavor/tavor/ |
H A D | dapl_tavor_hw.h | 177 #define TAVOR_CQE_QPNUM_GET(cqe) \ argument 178 ((BETOH_32(((uint32_t *)(cqe))[0]) & TAVOR_CQE_QPNUM_MASK) >> \ 180 #define TAVOR_CQE_DQPN_GET(cqe) \ argument 181 ((BETOH_32(((uint32_t *)(cqe))[2]) & TAVOR_CQE_DQPN_MASK) >> \ 183 #define TAVOR_CQE_SL_GET(cqe) \ argument 184 ((BETOH_32(((uint32_t *)(cqe))[3]) & TAVOR_CQE_SL_MASK) >> \ 186 #define TAVOR_CQE_GRH_GET(cqe) \ argument 187 ((BETOH_32(((uint32_t *)(cqe))[3]) & TAVOR_CQE_GRH_MASK) >> \ 189 #define TAVOR_CQE_PATHBITS_GET(cqe) \ argument 190 ((BETOH_32(((uint32_t *)(cqe))[3]) & TAVOR_CQE_PATHBITS_MASK) >>\ [all …]
|
H A D | dapl_hermon_hw.c | 42 #define HERMON_CQE_OPCODE_GET(cqe) (((uint8_t *)cqe)[31] & 0x1F) argument 43 #define HERMON_CQE_SENDRECV_GET(cqe) (((uint8_t *)cqe)[31] & 0x40) argument 44 #define HERMON_CQE_OWNER_IS_SW(cq, cqe) ((((uint8_t *)cqe)[31] >> 7) == \ argument 531 uint32_t *cqe; in dapli_hermon_cq_peek() local 542 cqe = (uint32_t *)&cq->cq_addr[cons_indx]; in dapli_hermon_cq_peek() 549 while (HERMON_CQE_OWNER_IS_SW(cq, cqe)) { in dapli_hermon_cq_peek() 550 opcode = HERMON_CQE_OPCODE_GET(cqe); in dapli_hermon_cq_peek() 554 TAVOR_CQE_IMM_ETH_PKEY_CRED_GET(cqe); in dapli_hermon_cq_peek() 565 cqe = (uint32_t *)&cq->cq_addr[cons_indx]; in dapli_hermon_cq_peek() 618 uint32_t *cqe; in dapli_hermon_cq_poll() local [all …]
|
H A D | dapl_arbel_hw.c | 554 tavor_hw_cqe_t *cqe; in dapli_arbel_cq_peek() local 572 cqe = &cq->cq_addr[cons_indx]; in dapli_arbel_cq_peek() 579 while (TAVOR_CQE_OWNER_IS_SW(cqe)) { in dapli_arbel_cq_peek() 580 opcode = TAVOR_CQE_OPCODE_GET(cqe); in dapli_arbel_cq_peek() 585 TAVOR_CQE_IMM_ETH_PKEY_CRED_GET(cqe); in dapli_arbel_cq_peek() 596 cqe = &cq->cq_addr[cons_indx]; in dapli_arbel_cq_peek() 614 tavor_hw_cqe_t *cqe; in dapli_arbel_cq_poll() local 631 cqe = &cq->cq_addr[cons_indx]; in dapli_arbel_cq_poll() 644 while (TAVOR_CQE_OWNER_IS_SW(cqe)) { in dapli_arbel_cq_poll() 645 status = dapli_arbel_cq_cqe_consume(cq, cqe, in dapli_arbel_cq_poll() [all …]
|
H A D | dapl_tavor_hw.c | 786 tavor_hw_cqe_t *cqe; in dapli_tavor_cq_peek() local 804 cqe = &cq->cq_addr[cons_indx]; in dapli_tavor_cq_peek() 811 while (TAVOR_CQE_OWNER_IS_SW(cqe)) { in dapli_tavor_cq_peek() 812 opcode = TAVOR_CQE_OPCODE_GET(cqe); in dapli_tavor_cq_peek() 817 TAVOR_CQE_IMM_ETH_PKEY_CRED_GET(cqe); in dapli_tavor_cq_peek() 828 cqe = &cq->cq_addr[cons_indx]; in dapli_tavor_cq_peek() 843 tavor_hw_cqe_t *cqe; in dapli_tavor_cq_poll() local 861 cqe = &cq->cq_addr[cons_indx]; in dapli_tavor_cq_poll() 874 while (TAVOR_CQE_OWNER_IS_SW(cqe)) { in dapli_tavor_cq_poll() 875 status = dapli_tavor_cq_cqe_consume(cq, cqe, in dapli_tavor_cq_poll() [all …]
|
H A D | dapl_tavor_wr.c | 59 dapls_tavor_wrid_get_entry(ib_cq_handle_t cq, tavor_hw_cqe_t *cqe, in dapls_tavor_wrid_get_entry() argument 71 qpnum = TAVOR_CQE_QPNUM_GET(cqe); in dapls_tavor_wrid_get_entry() 87 wre_tmp = dapli_tavor_wrid_find_match(wq, cqe); in dapls_tavor_wrid_get_entry() 124 dapli_tavor_wrid_find_match(dapls_tavor_workq_hdr_t *wq, tavor_hw_cqe_t *cqe) in dapli_tavor_wrid_find_match() argument 135 wqeaddr_size = TAVOR_CQE_WQEADDRSZ_GET(cqe); in dapli_tavor_wrid_find_match() 161 curr = dapli_tavor_wrid_find_match_srq(container, cqe); in dapli_tavor_wrid_find_match() 257 tavor_hw_cqe_t *cqe) in dapli_tavor_wrid_find_match_srq() argument 266 wqe_addr = TAVOR_CQE_WQEADDRSZ_GET(cqe) & 0xFFFFFFC0; in dapli_tavor_wrid_find_match_srq()
|
/titanic_41/usr/src/uts/common/io/bnxe/577xx/drivers/common/lm/device/ |
H A D | lm_recv.c | 350 IN const struct eth_end_agg_rx_cqe* cqe, in lm_tpa_stop() argument 359 … u32_t sge_size = mm_le16_to_cpu(cqe->pkt_len) - pkt->l2pkt_rx_info->size; in lm_tpa_stop() 372 DbgBreakIf( mm_le16_to_cpu(cqe->pkt_len) < pkt->l2pkt_rx_info->size); in lm_tpa_stop() 380 pkt->l2pkt_rx_info->total_packet_size = mm_le16_to_cpu(cqe->pkt_len); in lm_tpa_stop() 381 pkt->l2pkt_rx_info->coal_seg_cnt = mm_le16_to_cpu(cqe->num_of_coalesced_segs); in lm_tpa_stop() 382 pkt->l2pkt_rx_info->dup_ack_cnt = cqe->pure_ack_count; in lm_tpa_stop() 383 pkt->l2pkt_rx_info->ts_delta = mm_le32_to_cpu(cqe->timestamp_delta); in lm_tpa_stop() 392 ASSERT_STATIC(LM_TPA_MAX_AGG_SIZE == ARRSIZE(cqe->sgl_or_raw_data.sgl)); in lm_tpa_stop() 393 DbgBreakIf(ARRSIZE(cqe->sgl_or_raw_data.sgl) < sge_num_elem); in lm_tpa_stop() 400 DbgBreakIf( mm_le16_to_cpu(cqe->pkt_len) != pkt->l2pkt_rx_info->size); in lm_tpa_stop() [all …]
|
/titanic_41/usr/src/uts/common/io/bnxe/577xx/drivers/common/lm/l4/ |
H A D | lm_l4fp.c | 267 char * cqe; in lm_tcp_qe_buffer_next_free_cqe() local 269 cqe = cqe_buffer->head; in lm_tcp_qe_buffer_next_free_cqe() 271 if(cqe == cqe_buffer->last) { in lm_tcp_qe_buffer_next_free_cqe() 274 cqe_buffer->head = cqe + cqe_buffer->qe_size; in lm_tcp_qe_buffer_next_free_cqe() 280 return cqe; in lm_tcp_qe_buffer_next_free_cqe() 289 char * cqe; in lm_tcp_qe_buffer_next_occupied_cqe() local 291 cqe = cqe_buffer->tail; in lm_tcp_qe_buffer_next_occupied_cqe() 293 if ((cqe == cqe_buffer->head) && (cqe_buffer->left > 0)) { in lm_tcp_qe_buffer_next_occupied_cqe() 297 if(cqe == cqe_buffer->last) { in lm_tcp_qe_buffer_next_occupied_cqe() 300 cqe_buffer->tail = cqe + cqe_buffer->qe_size; in lm_tcp_qe_buffer_next_occupied_cqe() [all …]
|
H A D | lm_l4tx.c | 371 struct toe_tx_cqe * cqe, in lm_tcp_tx_process_cqe() argument 378 cmd = ((cqe->params & TOE_TX_CQE_COMPLETION_OPCODE) >> TOE_TX_CQE_COMPLETION_OPCODE_SHIFT); in lm_tcp_tx_process_cqe() 383 …DbgBreakIfAll(cqe->len & 0xc0000000); /* two upper bits on show a completion larger than 1GB - a b… in lm_tcp_tx_process_cqe() 389 if (cqe->len && in lm_tcp_tx_process_cqe() 400 lm_tcp_tx_inc_trm_aborted_bytes(pdev, tcp, cqe->len); in lm_tcp_tx_process_cqe() 401 cqe->len = 0; in lm_tcp_tx_process_cqe() 403 if (cqe->len) { in lm_tcp_tx_process_cqe() 405 lm_tcp_tx_cmp_process(pdev, tcp, cqe->len); in lm_tcp_tx_process_cqe() 434 DbgBreakIf(cqe->len); in lm_tcp_tx_process_cqe() 455 struct toe_tx_cqe *cqe, *hist_cqe; in lm_tcp_tx_process_cqes() local [all …]
|
H A D | lm_l4fp.h | 74 struct toe_rx_cqe * cqe, 84 struct toe_tx_cqe * cqe,
|
H A D | lm_l4rx.c | 1183 struct toe_rx_cqe * cqe, in lm_tcp_rx_process_cqe() argument 1191 cmd = ((cqe->params1 & TOE_RX_CQE_COMPLETION_OPCODE) >> TOE_RX_CQE_COMPLETION_OPCODE_SHIFT); in lm_tcp_rx_process_cqe() 1197 …nbytes = (cqe->data.ooo_params.ooo_params & TOE_RX_CQE_OOO_PARAMS_NBYTES) >> TOE_RX_CQE_OOO_PARAMS… in lm_tcp_rx_process_cqe() 1198 …isle_num = (cqe->data.ooo_params.ooo_params & TOE_RX_CQE_OOO_PARAMS_ISLE_NUM) >> TOE_RX_CQE_OOO_PA… in lm_tcp_rx_process_cqe() 1204 nbytes = cqe->data.raw_data; in lm_tcp_rx_process_cqe() 1206 …nbytes = (cqe->data.in_order_params.in_order_params & TOE_RX_CQE_IN_ORDER_PARAMS_NBYTES) >> TOE_RX… in lm_tcp_rx_process_cqe() 1427 struct toe_rx_cqe *cqe, *hist_cqe; in lm_tcp_rx_process_cqes() local 1487 cqe = lm_toe_bd_chain_consume_bd(&rcq->bd_chain); in lm_tcp_rx_process_cqes() 1488 update_stats_type = cqe->data.raw_data; in lm_tcp_rx_process_cqes() 1489 DbgBreakIf(!cqe); in lm_tcp_rx_process_cqes() [all …]
|
/titanic_41/usr/src/uts/common/io/ib/adapters/tavor/ |
H A D | tavor_cq.c | 50 tavor_hw_cqe_t *cqe, ibt_wc_t *wc); 52 tavor_hw_cqe_t *cqe, ibt_wc_t *wc); 53 static void tavor_cqe_sync(tavor_cqhdl_t cq, tavor_hw_cqe_t *cqe, 910 tavor_hw_cqe_t *cqe; in tavor_cq_poll() local 941 cqe = &cq->cq_buf[cons_indx]; in tavor_cq_poll() 944 tavor_cqe_sync(cq, cqe, DDI_DMA_SYNC_FORCPU); in tavor_cq_poll() 957 while (TAVOR_CQE_OWNER_IS_SW(cq, cqe)) { in tavor_cq_poll() 958 status = tavor_cq_cqe_consume(state, cq, cqe, in tavor_cq_poll() 962 TAVOR_CQE_OWNER_SET_HW(cq, cqe); in tavor_cq_poll() 965 tavor_cqe_sync(cq, cqe, DDI_DMA_SYNC_FORDEV); in tavor_cq_poll() [all …]
|
/titanic_41/usr/src/uts/common/io/ib/adapters/hermon/ |
H A D | hermon_cq.c | 58 hermon_hw_cqe_t *cqe, ibt_wc_t *wc); 60 hermon_hw_cqe_t *cqe, ibt_wc_t *wc); 870 hermon_hw_cqe_t *cqe; in hermon_cq_poll() local 899 cqe = &cq->cq_buf[cons_indx & wrap_around_mask]; in hermon_cq_poll() 912 while (HERMON_CQE_OWNER_IS_SW(cq, cqe, cons_indx, shift, mask)) { in hermon_cq_poll() 915 opcode = HERMON_CQE_OPCODE_GET(cq, cqe); in hermon_cq_poll() 926 cqe = &cq->cq_buf[cons_indx & wrap_around_mask]; in hermon_cq_poll() 936 hermon_cq_cqe_consume(state, cq, cqe, &wc_p[polled_cnt++]); in hermon_cq_poll() 942 cqe = &cq->cq_buf[cons_indx & wrap_around_mask]; in hermon_cq_poll() 1292 hermon_hw_cqe_t *cqe, ibt_wc_t *wc) in hermon_cq_cqe_consume() argument [all …]
|
/titanic_41/usr/src/lib/udapl/udapl_tavor/common/ |
H A D | dapl_evd_util.c | 55 IN ib_work_completion_t cqe); 417 IN ib_work_completion_t cqe) in dapli_evd_eh_print_cqe() argument 433 dto_cookie = (DAPL_COOKIE *) (uintptr_t)DAPL_GET_CQE_WRID(&cqe); in dapli_evd_eh_print_cqe() 440 "\t\t work_req_id 0x%llx\n", DAPL_GET_CQE_WRID(&cqe)); in dapli_evd_eh_print_cqe() 442 "\t\t op_type: %s\n", optable[DAPL_GET_CQE_OPTYPE(&cqe)]); in dapli_evd_eh_print_cqe() 443 if ((DAPL_GET_CQE_OPTYPE(&cqe) == OP_SEND) || in dapli_evd_eh_print_cqe() 444 (DAPL_GET_CQE_OPTYPE(&cqe) == OP_RDMA_WRITE)) { in dapli_evd_eh_print_cqe() 449 "\t\t bytes_num %d\n", DAPL_GET_CQE_BYTESNUM(&cqe)); in dapli_evd_eh_print_cqe() 452 "\t\t status %d\n", DAPL_GET_CQE_STATUS(&cqe)); in dapli_evd_eh_print_cqe() 844 ib_work_completion_t *cqe; in dapls_evd_post_premature_events() local [all …]
|
/titanic_41/usr/src/uts/common/sys/ib/adapters/tavor/ |
H A D | tavor_hw.h | 1441 #define TAVOR_CQE_QPNUM_GET(cq, cqe) \ argument 1443 &((uint32_t *)(cqe))[0]) & TAVOR_CQE_QPNUM_MASK) >> \ 1445 #define TAVOR_CQE_DQPN_GET(cq, cqe) \ argument 1447 &((uint32_t *)(cqe))[2]) & TAVOR_CQE_DQPN_MASK) >> \ 1449 #define TAVOR_CQE_SL_GET(cq, cqe) \ argument 1451 &((uint32_t *)(cqe))[3]) & TAVOR_CQE_SL_MASK) >> \ 1453 #define TAVOR_CQE_GRH_GET(cq, cqe) \ argument 1455 &((uint32_t *)(cqe))[3]) & TAVOR_CQE_GRH_MASK) >> \ 1457 #define TAVOR_CQE_PATHBITS_GET(cq, cqe) \ argument 1459 &((uint32_t *)(cqe))[3]) & TAVOR_CQE_PATHBITS_MASK) >> \ [all …]
|
H A D | tavor_wr.h | 303 uint64_t tavor_wrid_get_entry(tavor_cqhdl_t cqhdl, tavor_hw_cqe_t *cqe, 314 tavor_cqhdl_t cq, tavor_hw_cqe_t *cqe);
|
/titanic_41/usr/src/uts/common/sys/ib/adapters/hermon/ |
H A D | hermon_hw.h | 2653 #define HERMON_CQE_IS_IPOK(cq, cqe) \ argument 2654 (((uint8_t *)(cqe))[HERMON_CQE_IPOK] & HERMON_CQE_IPOK_BIT) 2656 #define HERMON_CQE_CKSUM(cq, cqe) \ argument 2657 ((((uint8_t *)(cqe))[HERMON_CQE_CKSUM_15_8] << 8) | \ 2658 (((uint8_t *)(cqe))[HERMON_CQE_CKSUM_7_0])) 2660 #define HERMON_CQE_IPOIB_STATUS(cq, cqe) \ argument 2661 htonl((((uint32_t *)(cqe)))[4]) 2663 #define HERMON_CQE_QPNUM_GET(cq, cqe) \ argument 2664 ((htonl((((uint32_t *)(cqe)))[0]) & HERMON_CQE_QPNUM_MASK) >> \ 2667 #define HERMON_CQE_IMM_ETH_PKEY_CRED_GET(cq, cqe) \ argument [all …]
|
H A D | hermon_wr.h | 55 #define HERMON_CQE_WQEADDRSZ_GET(cq, cqe) \ argument 56 ((uint32_t)((((uint8_t *)(cqe))[0x18]) << 8) | ((uint8_t *)(cqe))[0x19]) 186 ibt_wrid_t hermon_wrid_get_entry(hermon_cqhdl_t cqhdl, hermon_hw_cqe_t *cqe);
|
/titanic_41/usr/src/uts/common/io/fibre-channel/fca/emlxs/ |
H A D | emlxs_sli4.c | 140 CQE_ASYNC_t *cqe); 142 CQE_ASYNC_t *cqe); 4962 emlxs_sli4_process_async_event(emlxs_hba_t *hba, CQE_ASYNC_t *cqe) in emlxs_sli4_process_async_event() argument 4968 if (hba->link_event_tag == cqe->un.link.event_tag) { in emlxs_sli4_process_async_event() 4970 } else if (hba->link_event_tag + 1 < cqe->un.link.event_tag) { in emlxs_sli4_process_async_event() 4973 hba->link_event_tag = cqe->un.link.event_tag; in emlxs_sli4_process_async_event() 4975 switch (cqe->event_code) { in emlxs_sli4_process_async_event() 4979 switch (cqe->un.link.link_status) { in emlxs_sli4_process_async_event() 4984 cqe->valid, cqe->event_type, HBASTATS.LinkEvent); in emlxs_sli4_process_async_event() 4991 cqe->valid, cqe->event_type, HBASTATS.LinkEvent); in emlxs_sli4_process_async_event() [all …]
|
/titanic_41/usr/src/uts/common/io/ib/mgt/ibmf/ |
H A D | ibmf_handlers.c | 313 ibt_wc_t cqe; in ibmf_i_mad_completions() local 330 status = ibt_poll_cq(cq_handle, &cqe, 1, NULL); in ibmf_i_mad_completions() 348 ibmf_i_process_completion(ibmf_cip, &cqe); in ibmf_i_mad_completions() 358 status = ibt_poll_cq(cq_handle, &cqe, 1, NULL); in ibmf_i_mad_completions() 376 ibmf_i_process_completion(ibmf_cip, &cqe); in ibmf_i_mad_completions()
|
/titanic_41/usr/src/uts/common/io/nvme/ |
H A D | nvme.c | 969 nvme_cqe_t *cqe; in nvme_retrieve_cmd() local 976 cqe = &qp->nq_cq[qp->nq_cqhead]; in nvme_retrieve_cmd() 979 if (cqe->cqe_sf.sf_p == qp->nq_phase) { in nvme_retrieve_cmd() 984 ASSERT(nvme->n_ioq[cqe->cqe_sqid] == qp); in nvme_retrieve_cmd() 986 cmd = nvme_unqueue_cmd(nvme, qp, cqe->cqe_cid); in nvme_retrieve_cmd() 988 ASSERT(cmd->nc_sqid == cqe->cqe_sqid); in nvme_retrieve_cmd() 989 bcopy(cqe, &cmd->nc_cqe, sizeof (nvme_cqe_t)); in nvme_retrieve_cmd() 991 qp->nq_sqhead = cqe->cqe_sqhd; in nvme_retrieve_cmd() 1008 nvme_cqe_t *cqe = &cmd->nc_cqe; in nvme_check_unknown_cmd_status() local 1013 cqe->cqe_sqid, cqe->cqe_cid, cqe->cqe_sf.sf_sc, cqe->cqe_sf.sf_sct, in nvme_check_unknown_cmd_status() [all …]
|
/titanic_41/usr/src/uts/common/io/ib/clients/of/sol_uverbs/ |
H A D | sol_uverbs_comp.c | 208 cq_attr.cq_size = cmd.cqe; in sol_uverbs_create_cq() 220 if (!cmd.cqe) { in sol_uverbs_create_cq() 328 resp.cqe = real_size; in sol_uverbs_create_cq() 534 resize_status = ibt_resize_cq(ucq->cq, cmd.cqe, &resp.cqe); in sol_uverbs_resize_cq()
|
/titanic_41/usr/src/uts/common/sys/ib/clients/of/rdma/ |
H A D | ib_user_verbs.h | 310 uint32_t cqe; member 326 uint32_t cqe; member 333 uint32_t cqe; member 338 uint32_t cqe; member
|