/illumos-gate/usr/src/lib/libtecla/common/ |
H A D | chrqueue.c | 92 GlCharQueue *cq; /* The object to be returned */ in _new_GlCharQueue() local 96 cq = malloc(sizeof(GlCharQueue)); in _new_GlCharQueue() 97 if(!cq) { in _new_GlCharQueue() 106 cq->err = NULL; in _new_GlCharQueue() 107 cq->bufmem = NULL; in _new_GlCharQueue() 108 cq->buffers.head = NULL; in _new_GlCharQueue() 109 cq->buffers.tail = NULL; in _new_GlCharQueue() 110 cq->nflush = cq->ntotal = 0; in _new_GlCharQueue() 114 cq->err = _new_ErrMsg(); in _new_GlCharQueue() 115 if(!cq->err) in _new_GlCharQueue() [all …]
|
H A D | chrqueue.h | 52 GlCharQueue *_del_GlCharQueue(GlCharQueue *cq); 57 int _glq_append_chars(GlCharQueue *cq, const char *chars, int n, 63 void _glq_empty_queue(GlCharQueue *cq); 68 int _glq_char_count(GlCharQueue *cq); 97 GlqFlushState _glq_flush_queue(GlCharQueue *cq, GlWriteFn *write_fn, 104 const char *_glq_last_error(GlCharQueue *cq);
|
/illumos-gate/usr/src/uts/common/io/ib/adapters/tavor/ |
H A D | tavor_cq.c | 49 static int tavor_cq_cqe_consume(tavor_state_t *state, tavor_cqhdl_t cq, 51 static int tavor_cq_errcqe_consume(tavor_state_t *state, tavor_cqhdl_t cq, 53 static void tavor_cqe_sync(tavor_cqhdl_t cq, tavor_hw_cqe_t *cqe, 55 static void tavor_cq_resize_helper(tavor_cqhdl_t cq, tavor_hw_cqe_t *new_cqbuf, 70 tavor_cqhdl_t cq; in tavor_cq_alloc() local 129 cq = (tavor_cqhdl_t)rsrc->tr_addr; in tavor_cq_alloc() 130 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*cq)) in tavor_cq_alloc() 131 cq->cq_is_umap = cq_is_umap; in tavor_cq_alloc() 134 cq->cq_cqnum = cqc->tr_indx; in tavor_cq_alloc() 143 if (cq->cq_is_umap) { in tavor_cq_alloc() [all …]
|
H A D | tavor_wr.c | 74 tavor_cqhdl_t cq, tavor_hw_cqe_t *cqe); 75 static void tavor_wrid_reaplist_add(tavor_cqhdl_t cq, tavor_workq_hdr_t *wq); 76 static tavor_workq_hdr_t *tavor_wrid_wqhdr_find(tavor_cqhdl_t cq, uint_t qpn, 79 tavor_cqhdl_t cq, uint_t qpn, uint_t wq_type, uint_t create_wql); 88 static void tavor_cq_wqhdr_add(tavor_cqhdl_t cq, tavor_workq_hdr_t *wqhdr); 89 static void tavor_cq_wqhdr_remove(tavor_cqhdl_t cq, tavor_workq_hdr_t *wqhdr); 2599 tavor_wrid_get_entry(tavor_cqhdl_t cq, tavor_hw_cqe_t *cqe, in tavor_wrid_get_entry() argument 2608 mutex_enter(&cq->cq_wrid_wqhdr_lock); in tavor_wrid_get_entry() 2614 opcode = TAVOR_CQE_OPCODE_GET(cq, cqe); in tavor_wrid_get_entry() 2622 send_or_recv = TAVOR_CQE_SENDRECV_GET(cq, cqe); in tavor_wrid_get_entry() [all …]
|
H A D | tavor_umap.c | 78 static ibt_status_t tavor_umap_cq_data_out(tavor_cqhdl_t cq, 248 tavor_cqhdl_t cq; in tavor_umap_cqmem() local 254 cq = (tavor_cqhdl_t)rsrcp->tr_addr; in tavor_umap_cqmem() 257 size = ptob(btopr(cq->cq_cqinfo.qa_size)); in tavor_umap_cqmem() 262 &tavor_devmap_umem_cbops, cq->cq_cqinfo.qa_umemcookie, 0, size, in tavor_umap_cqmem() 371 tavor_cqhdl_t cq; in tavor_devmap_umem_map() local 422 cq = tavor_cqhdl_from_cqnum(state, key); in tavor_devmap_umem_map() 429 mutex_enter(&cq->cq_lock); in tavor_devmap_umem_map() 430 if (cq->cq_umap_dhp == NULL) { in tavor_devmap_umem_map() 431 cq->cq_umap_dhp = dhp; in tavor_devmap_umem_map() [all …]
|
/illumos-gate/usr/src/uts/common/io/ib/adapters/hermon/ |
H A D | hermon_cq.c | 47 #define hermon_cq_update_ci_doorbell(cq) \ argument 49 HERMON_UAR_DB_RECORD_WRITE(cq->cq_arm_ci_vdbr, \ 50 cq->cq_consindx & 0x00FFFFFF) 52 static int hermon_cq_arm_doorbell(hermon_state_t *state, hermon_cqhdl_t cq, 57 static void hermon_cq_cqe_consume(hermon_state_t *state, hermon_cqhdl_t cq, 59 static void hermon_cq_errcqe_consume(hermon_state_t *state, hermon_cqhdl_t cq, 75 hermon_cqhdl_t cq; in hermon_cq_alloc() local 140 cq = (hermon_cqhdl_t)rsrc->hr_addr; in hermon_cq_alloc() 141 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*cq)) in hermon_cq_alloc() 142 cq->cq_is_umap = cq_is_umap; in hermon_cq_alloc() [all …]
|
H A D | hermon_umap.c | 86 static ibt_status_t hermon_umap_cq_data_out(hermon_cqhdl_t cq, 292 hermon_cqhdl_t cq; in hermon_umap_cqmem() local 298 cq = (hermon_cqhdl_t)rsrcp->hr_addr; in hermon_umap_cqmem() 301 size = ptob(btopr(cq->cq_resize_hdl ? in hermon_umap_cqmem() 302 cq->cq_resize_hdl->cq_cqinfo.qa_size : cq->cq_cqinfo.qa_size)); in hermon_umap_cqmem() 307 &hermon_devmap_umem_cbops, cq->cq_resize_hdl ? in hermon_umap_cqmem() 308 cq->cq_resize_hdl->cq_cqinfo.qa_umemcookie : in hermon_umap_cqmem() 309 cq->cq_cqinfo.qa_umemcookie, 0, size, in hermon_umap_cqmem() 460 hermon_cqhdl_t cq; in hermon_devmap_umem_map() local 511 cq = hermon_cqhdl_from_cqnum(state, key); in hermon_devmap_umem_map() [all …]
|
/illumos-gate/usr/src/lib/udapl/udapl_tavor/tavor/ |
H A D | dapl_hermon_hw.c | 44 #define HERMON_CQE_OWNER_IS_SW(cq, cqe) ((((uint8_t *)cqe)[31] >> 7) == \ argument 45 ((cq->cq_consindx & cq->cq_size) >> cq->cq_log_cqsz)) 69 #define cq_wrap_around_mask (cq->cq_size - 1) 118 ((tavor_hw_uar_t *)ia_uar)->cq = HTOBE_64(doorbell); in dapli_hermon_cq_doorbell() 126 ((tavor_hw_uar32_t *)ia_uar)->cq[0] = in dapli_hermon_cq_doorbell() 128 ((tavor_hw_uar32_t *)ia_uar)->cq[1] = in dapli_hermon_cq_doorbell() 133 &((tavor_hw_uar_t *)ia_uar)->cq); in dapli_hermon_cq_doorbell() 529 dapli_hermon_cq_peek(ib_cq_handle_t cq, int *num_cqe) in dapli_hermon_cq_peek() argument 539 cons_indx = cq->cq_consindx & cq_wrap_around_mask; in dapli_hermon_cq_peek() 542 cqe = (uint32_t *)&cq->cq_addr[cons_indx]; in dapli_hermon_cq_peek() [all …]
|
H A D | dapl_arbel_hw.c | 119 ((tavor_hw_uar_t *)ia_uar)->cq = HTOBE_64(doorbell); in dapli_arbel_cq_doorbell() 127 ((tavor_hw_uar32_t *)ia_uar)->cq[0] = in dapli_arbel_cq_doorbell() 129 ((tavor_hw_uar32_t *)ia_uar)->cq[1] = in dapli_arbel_cq_doorbell() 134 &((tavor_hw_uar_t *)ia_uar)->cq); in dapli_arbel_cq_doorbell() 552 dapli_arbel_cq_peek(ib_cq_handle_t cq, int *num_cqe) in dapli_arbel_cq_peek() argument 563 cons_indx = cq->cq_consindx; in dapli_arbel_cq_peek() 569 wrap_around_mask = (cq->cq_size - 1); in dapli_arbel_cq_peek() 572 cqe = &cq->cq_addr[cons_indx]; in dapli_arbel_cq_peek() 596 cqe = &cq->cq_addr[cons_indx]; in dapli_arbel_cq_peek() 602 #define dapli_arbel_cq_update_ci(cq, dbp) \ argument [all …]
|
H A D | dapl_tavor_hw.c | 77 ((tavor_hw_uar_t *)ia_uar)->cq = HTOBE_64( \ 98 ((tavor_hw_uar_t *)ia_uar)->cq = HTOBE_64(doorbell); in dapli_tavor_cq_doorbell() 106 ((tavor_hw_uar32_t *)ia_uar)->cq[0] = in dapli_tavor_cq_doorbell() 108 ((tavor_hw_uar32_t *)ia_uar)->cq[1] = in dapli_tavor_cq_doorbell() 113 &((tavor_hw_uar_t *)ia_uar)->cq); in dapli_tavor_cq_doorbell() 784 dapli_tavor_cq_peek(ib_cq_handle_t cq, int *num_cqe) in dapli_tavor_cq_peek() argument 795 cons_indx = cq->cq_consindx; in dapli_tavor_cq_peek() 801 wrap_around_mask = (cq->cq_size - 1); in dapli_tavor_cq_peek() 804 cqe = &cq->cq_addr[cons_indx]; in dapli_tavor_cq_peek() 828 cqe = &cq->cq_addr[cons_indx]; in dapli_tavor_cq_peek() [all …]
|
H A D | dapl_tavor_wr.c | 59 dapls_tavor_wrid_get_entry(ib_cq_handle_t cq, tavor_hw_cqe_t *cqe, in dapls_tavor_wrid_get_entry() argument 68 dapl_os_lock(&cq->cq_wrid_wqhdr_lock); in dapls_tavor_wrid_get_entry() 72 wq = dapli_tavor_wrid_wqhdr_find(cq, qpnum, send_or_recv); in dapls_tavor_wrid_get_entry() 114 dapl_os_unlock(&cq->cq_wrid_wqhdr_lock); in dapls_tavor_wrid_get_entry() 303 dapls_tavor_wrid_cq_reap(ib_cq_handle_t cq) in dapls_tavor_wrid_cq_reap() argument 312 dapl_os_lock(&cq->cq_wrid_wqhdr_lock); in dapls_tavor_wrid_cq_reap() 315 container = cq->cq_wrid_reap_head; in dapls_tavor_wrid_cq_reap() 326 dapli_tavor_cq_wqhdr_remove(cq, consume_wqhdr); in dapls_tavor_wrid_cq_reap() 331 cq->cq_wrid_reap_head = cq->cq_wrid_reap_tail = NULL; in dapls_tavor_wrid_cq_reap() 333 dapl_os_unlock(&cq->cq_wrid_wqhdr_lock); in dapls_tavor_wrid_cq_reap() [all …]
|
/illumos-gate/usr/src/uts/common/io/fibre-channel/fca/oce/ |
H A D | oce_mq.c | 47 struct oce_cq *cq; in oce_drain_mq_cq() local 52 cq = mq->cq; in oce_drain_mq_cq() 55 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe); in oce_drain_mq_cq() 77 RING_GET(cq->ring, 1); in oce_drain_mq_cq() 78 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe); in oce_drain_mq_cq() 82 oce_arm_cq(dev, cq->cq_id, num_cqe, B_TRUE); in oce_drain_mq_cq() 89 oce_arm_cq(mq->parent, mq->cq->cq_id, 0, B_TRUE); in oce_start_mq() 97 struct oce_cq *cq; in oce_clean_mq() local 102 cq = mq->cq; in oce_clean_mq() 104 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe); in oce_clean_mq() [all …]
|
H A D | oce_queue.c | 199 struct oce_cq *cq = NULL; in oce_cq_create() local 205 cq = kmem_zalloc(sizeof (struct oce_cq), KM_NOSLEEP); in oce_cq_create() 206 if (cq == NULL) { in oce_cq_create() 213 cq->ring = create_ring_buffer(dev, q_len, in oce_cq_create() 215 if (cq->ring == NULL) { in oce_cq_create() 218 (void *)cq->ring); in oce_cq_create() 219 kmem_free(cq, sizeof (struct oce_cq)); in oce_cq_create() 249 fwcmd->params.req.num_pages = cq->ring->dbuf->num_pages; in oce_cq_create() 250 oce_page_list(cq->ring->dbuf, &fwcmd->params.req.pages[0], in oce_cq_create() 251 cq->ring->dbuf->num_pages); in oce_cq_create() [all …]
|
H A D | oce_rx.c | 502 struct oce_cq *cq; in oce_drain_rq_cq() local 509 cq = rq->cq; in oce_drain_rq_cq() 513 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe); in oce_drain_rq_cq() 515 (void) DBUF_SYNC(cq->ring->dbuf, DDI_DMA_SYNC_FORKERNEL); in oce_drain_rq_cq() 554 RING_GET(cq->ring, 1); in oce_drain_rq_cq() 555 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, in oce_drain_rq_cq() 567 oce_arm_cq(dev, cq->cq_id, num_cqe, B_TRUE); in oce_drain_rq_cq() 615 struct oce_cq *cq; in oce_clean_rq() local 621 cq = rq->cq; in oce_clean_rq() 622 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe); in oce_clean_rq() [all …]
|
/illumos-gate/usr/src/uts/common/io/mlxcx/ |
H A D | mlxcx_ring.c | 340 mlxcx_completion_queue_t *cq; in mlxcx_cq_setup() local 342 cq = kmem_zalloc(sizeof (mlxcx_completion_queue_t), KM_SLEEP); in mlxcx_cq_setup() 343 mutex_init(&cq->mlcq_mtx, NULL, MUTEX_DRIVER, in mlxcx_cq_setup() 345 mutex_init(&cq->mlcq_arm_mtx, NULL, MUTEX_DRIVER, in mlxcx_cq_setup() 347 mutex_init(&cq->mlcq_bufbmtx, NULL, MUTEX_DRIVER, in mlxcx_cq_setup() 349 list_create(&cq->mlcq_buffers, sizeof (mlxcx_buffer_t), in mlxcx_cq_setup() 351 list_create(&cq->mlcq_buffers_b, sizeof (mlxcx_buffer_t), in mlxcx_cq_setup() 354 cq->mlcq_mlx = mlxp; in mlxcx_cq_setup() 355 list_insert_tail(&mlxp->mlx_cqs, cq); in mlxcx_cq_setup() 357 mutex_enter(&cq->mlcq_mtx); in mlxcx_cq_setup() [all …]
|
H A D | mlxcx_gld.c | 510 mlxcx_completion_queue_t *cq; in mlxcx_mac_ring_tx() local 563 cq = sq->mlwq_cq; in mlxcx_mac_ring_tx() 569 if (cq->mlcq_state & MLXCX_CQ_TEARDOWN) { in mlxcx_mac_ring_tx() 587 if (cq->mlcq_bufcnt >= cq->mlcq_bufhwm || in mlxcx_mac_ring_tx() 588 (cq->mlcq_bufcnt + bcount) > cq->mlcq_nents) { in mlxcx_mac_ring_tx() 589 atomic_or_uint(&cq->mlcq_state, MLXCX_CQ_BLOCKED_MAC); in mlxcx_mac_ring_tx() 601 atomic_or_uint(&cq->mlcq_state, MLXCX_CQ_BLOCKED_MAC); in mlxcx_mac_ring_tx() 810 mlxcx_completion_queue_t *cq = wq->mlwq_cq; in mlxcx_mac_ring_start() local 814 ASSERT(cq != NULL); in mlxcx_mac_ring_start() 826 mutex_enter(&cq->mlcq_mtx); in mlxcx_mac_ring_start() [all …]
|
/illumos-gate/usr/src/uts/common/io/ib/clients/of/sol_ofs/ |
H A D | sol_kverbs.c | 910 struct ib_cq *cq = (struct ib_cq *)ibt_get_cq_private(ibt_cq); in ofs_cq_handler() local 914 "arg: 0x%p", ibt_cq, cq, cq->comp_handler, arg); in ofs_cq_handler() 916 if (cq->comp_handler) { in ofs_cq_handler() 917 cq->comp_handler(cq, cq->cq_context); in ofs_cq_handler() 946 struct ib_cq *cq; in ib_create_cq() local 949 if ((cq = kmem_alloc(sizeof (struct ib_cq), KM_NOSLEEP)) == NULL) { in ib_create_cq() 967 kmem_free(cq, sizeof (struct ib_cq)); in ib_create_cq() 980 rtn = ibt_alloc_cq(device->hca_hdl, &cq_attr, &cq->ibt_cq, &real_size); in ib_create_cq() 984 cq->device = device; in ib_create_cq() 985 cq->comp_handler = comp_handler; in ib_create_cq() [all …]
|
/illumos-gate/usr/src/uts/common/io/ |
H A D | dedump.c | 161 struct copyreq *cq = (struct copyreq *)mp->b_rptr; in dedump_copyreq() local 164 "%lu\n", hdr, cq->cq_cmd, (void *)cq->cq_cr, cq->cq_id, cq->cq_flag, in dedump_copyreq() 165 (void *)cq->cq_private, (void *)cq->cq_addr, cq->cq_size); in dedump_copyreq()
|
/illumos-gate/usr/src/cmd/spell/ |
H A D | huff.c | 144 *py = ((y-1)<<w) + cq + k; in encode() 196 cq = c*q; in huff() 197 cs = cq<<(L-w); in huff() 198 qcs = (((long)(q-1)<<w) + cq) << (L-QW-w); in huff() 199 v0 = c - cq; in huff()
|
/illumos-gate/usr/src/uts/common/sys/ib/adapters/tavor/ |
H A D | tavor_hw.h | 1441 #define TAVOR_CQE_QPNUM_GET(cq, cqe) \ argument 1442 ((ddi_get32((cq)->cq_cqinfo.qa_acchdl, \ 1445 #define TAVOR_CQE_DQPN_GET(cq, cqe) \ argument 1446 ((ddi_get32((cq)->cq_cqinfo.qa_acchdl, \ 1449 #define TAVOR_CQE_SL_GET(cq, cqe) \ argument 1450 ((ddi_get32((cq)->cq_cqinfo.qa_acchdl, \ 1453 #define TAVOR_CQE_GRH_GET(cq, cqe) \ argument 1454 ((ddi_get32((cq)->cq_cqinfo.qa_acchdl, \ 1457 #define TAVOR_CQE_PATHBITS_GET(cq, cqe) \ argument 1458 ((ddi_get32((cq)->cq_cqinfo.qa_acchdl, \ [all …]
|
/illumos-gate/usr/src/uts/common/io/ena/ |
H A D | ena_admin.c | 168 ena_admin_cq_t *cq = &aq->ea_cq; in ena_admin_read_resp() local 174 uint16_t head_mod = cq->eac_head & modulo_mask; in ena_admin_read_resp() 175 uint8_t phase = cq->eac_phase & ENAHW_RESP_PHASE_MASK; in ena_admin_read_resp() 203 ena_admin_cq_t *cq = &aq->ea_cq; in ena_admin_process_responses() local 208 uint16_t head_mod = cq->eac_head & modulo_mask; in ena_admin_process_responses() 209 uint8_t phase = cq->eac_phase & ENAHW_RESP_PHASE_MASK; in ena_admin_process_responses() 211 ENA_DMA_SYNC(cq->eac_dma, DDI_DMA_SYNC_FORKERNEL); in ena_admin_process_responses() 212 hwresp = &cq->eac_entries[head_mod]; in ena_admin_process_responses() 216 cq->eac_head++; in ena_admin_process_responses() 217 head_mod = cq->eac_head & modulo_mask; in ena_admin_process_responses() [all …]
|
/illumos-gate/usr/src/uts/common/os/ |
H A D | dumpsubr.c | 1631 dumpsys_close_cq(cqueue_t *cq, int live) in dumpsys_close_cq() argument 1634 mutex_enter(&cq->mutex); in dumpsys_close_cq() 1635 atomic_dec_uint(&cq->open); in dumpsys_close_cq() 1636 cv_signal(&cq->cv); in dumpsys_close_cq() 1637 mutex_exit(&cq->mutex); in dumpsys_close_cq() 1639 atomic_dec_uint(&cq->open); in dumpsys_close_cq() 1667 dumpsys_lock(cqueue_t *cq, int live) in dumpsys_lock() argument 1670 mutex_enter(&cq->mutex); in dumpsys_lock() 1672 dumpsys_spinlock(&cq->spinlock); in dumpsys_lock() 1676 dumpsys_unlock(cqueue_t *cq, int live, int signal) in dumpsys_unlock() argument [all …]
|
/illumos-gate/usr/src/uts/common/io/ib/clients/of/sol_uverbs/ |
H A D | sol_uverbs_comp.c | 286 rc = ibt_alloc_cq(uctxt->hca->hdl, &cq_attr, &ucq->cq, &real_size); in sol_uverbs_create_cq() 296 ibt_set_cq_private(ucq->cq, ucq); in sol_uverbs_create_cq() 307 (void *) ucq->cq, &resp.drv_out, sizeof (resp.drv_out)); in sol_uverbs_create_cq() 357 ibt_set_cq_handler(ucq->cq, sol_uverbs_comp_event_handler, ucq); in sol_uverbs_create_cq() 374 (void) ibt_free_cq(ucq->cq); in sol_uverbs_create_cq() 394 rc = ibt_free_cq(ucq->cq); in uverbs_ucq_free() 534 resize_status = ibt_resize_cq(ucq->cq, cmd.cqe, &resp.cqe); in sol_uverbs_resize_cq() 546 (void *) ucq->cq, &resp.drv_out, sizeof (resp.drv_out)); in sol_uverbs_resize_cq() 623 rc = ibt_enable_cq_notify(ucq->cq, flag); in sol_uverbs_req_notify_cq() 697 rc = ibt_poll_cq(ucq->cq, completions, cmd.ne, &resp.count); in sol_uverbs_poll_cq() [all …]
|
/illumos-gate/usr/src/uts/common/inet/ |
H A D | mi.c | 206 struct copyreq *cq = (struct copyreq *)mp->b_rptr; in mi_copyin() local 217 cq->cq_private = mp->b_cont; in mi_copyin() 218 cq->cq_size = len; in mi_copyin() 219 cq->cq_flag = 0; in mi_copyin() 220 bcopy(mp->b_cont->b_rptr, &cq->cq_addr, sizeof (cq->cq_addr)); in mi_copyin() 296 struct copyreq *cq = (struct copyreq *)mp->b_rptr; in mi_copyin_n() local 302 cq->cq_private = mp->b_cont; in mi_copyin_n() 303 cq->cq_size = len; in mi_copyin_n() 304 cq->cq_flag = 0; in mi_copyin_n() 305 bcopy(mp->b_cont->b_rptr, &cq->cq_addr, sizeof (cq->cq_addr)); in mi_copyin_n() [all …]
|
/illumos-gate/usr/src/uts/common/io/ib/clients/rds/ |
H A D | rdsib_ib.c | 625 rds_poll_ctrl_completions(ibt_cq_hdl_t cq, rds_ep_t *ep) in rds_poll_ctrl_completions() argument 637 ret = ibt_poll_cq(cq, &wc, 1, &npolled); in rds_poll_ctrl_completions() 641 "returned: %d", ep, cq, ret); in rds_poll_ctrl_completions() 644 "returned: IBT_CQ_EMPTY", ep, cq); in rds_poll_ctrl_completions() 664 ep, cq, wc.wc_id, wc.wc_status); in rds_poll_ctrl_completions() 869 rds_poll_data_completions(ibt_cq_hdl_t cq, rds_ep_t *ep) in rds_poll_data_completions() argument 882 ret = ibt_poll_cq(cq, &wc, 1, &npolled); in rds_poll_data_completions() 886 "returned: %d", ep, cq, ret); in rds_poll_data_completions() 889 "returned: IBT_CQ_EMPTY", ep, cq); in rds_poll_data_completions() 912 ep, cq, wc.wc_id, wc.wc_status); in rds_poll_data_completions() [all …]
|