Home
last modified time | relevance | path

Searched refs:cq (Results 1 – 25 of 85) sorted by relevance

1234

/titanic_44/usr/src/lib/libtecla/common/
H A Dchrqueue.c94 GlCharQueue *cq; /* The object to be returned */ in _new_GlCharQueue() local
98 cq = malloc(sizeof(GlCharQueue)); in _new_GlCharQueue()
99 if(!cq) { in _new_GlCharQueue()
108 cq->err = NULL; in _new_GlCharQueue()
109 cq->bufmem = NULL; in _new_GlCharQueue()
110 cq->buffers.head = NULL; in _new_GlCharQueue()
111 cq->buffers.tail = NULL; in _new_GlCharQueue()
112 cq->nflush = cq->ntotal = 0; in _new_GlCharQueue()
116 cq->err = _new_ErrMsg(); in _new_GlCharQueue()
117 if(!cq->err) in _new_GlCharQueue()
[all …]
H A Dchrqueue.h54 GlCharQueue *_del_GlCharQueue(GlCharQueue *cq);
59 int _glq_append_chars(GlCharQueue *cq, const char *chars, int n,
65 void _glq_empty_queue(GlCharQueue *cq);
70 int _glq_char_count(GlCharQueue *cq);
99 GlqFlushState _glq_flush_queue(GlCharQueue *cq, GlWriteFn *write_fn,
106 const char *_glq_last_error(GlCharQueue *cq);
/titanic_44/usr/src/uts/common/io/ib/adapters/tavor/
H A Dtavor_cq.c49 static int tavor_cq_cqe_consume(tavor_state_t *state, tavor_cqhdl_t cq,
51 static int tavor_cq_errcqe_consume(tavor_state_t *state, tavor_cqhdl_t cq,
53 static void tavor_cqe_sync(tavor_cqhdl_t cq, tavor_hw_cqe_t *cqe,
55 static void tavor_cq_resize_helper(tavor_cqhdl_t cq, tavor_hw_cqe_t *new_cqbuf,
70 tavor_cqhdl_t cq; in tavor_cq_alloc() local
138 cq = (tavor_cqhdl_t)rsrc->tr_addr; in tavor_cq_alloc()
139 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*cq)) in tavor_cq_alloc()
140 cq->cq_is_umap = cq_is_umap; in tavor_cq_alloc()
143 cq->cq_cqnum = cqc->tr_indx; in tavor_cq_alloc()
152 if (cq->cq_is_umap) { in tavor_cq_alloc()
[all …]
H A Dtavor_wr.c74 tavor_cqhdl_t cq, tavor_hw_cqe_t *cqe);
75 static void tavor_wrid_reaplist_add(tavor_cqhdl_t cq, tavor_workq_hdr_t *wq);
76 static tavor_workq_hdr_t *tavor_wrid_wqhdr_find(tavor_cqhdl_t cq, uint_t qpn,
79 tavor_cqhdl_t cq, uint_t qpn, uint_t wq_type, uint_t create_wql);
88 static void tavor_cq_wqhdr_add(tavor_cqhdl_t cq, tavor_workq_hdr_t *wqhdr);
89 static void tavor_cq_wqhdr_remove(tavor_cqhdl_t cq, tavor_workq_hdr_t *wqhdr);
2779 tavor_wrid_get_entry(tavor_cqhdl_t cq, tavor_hw_cqe_t *cqe, in tavor_wrid_get_entry() argument
2790 mutex_enter(&cq->cq_wrid_wqhdr_lock); in tavor_wrid_get_entry()
2796 opcode = TAVOR_CQE_OPCODE_GET(cq, cqe); in tavor_wrid_get_entry()
2804 send_or_recv = TAVOR_CQE_SENDRECV_GET(cq, cqe); in tavor_wrid_get_entry()
[all …]
H A Dtavor_umap.c78 static ibt_status_t tavor_umap_cq_data_out(tavor_cqhdl_t cq,
281 tavor_cqhdl_t cq; in tavor_umap_cqmem() local
289 cq = (tavor_cqhdl_t)rsrcp->tr_addr; in tavor_umap_cqmem()
292 size = ptob(btopr(cq->cq_cqinfo.qa_size)); in tavor_umap_cqmem()
297 &tavor_devmap_umem_cbops, cq->cq_cqinfo.qa_umemcookie, 0, size, in tavor_umap_cqmem()
419 tavor_cqhdl_t cq; in tavor_devmap_umem_map() local
475 cq = tavor_cqhdl_from_cqnum(state, key); in tavor_devmap_umem_map()
482 mutex_enter(&cq->cq_lock); in tavor_devmap_umem_map()
483 if (cq->cq_umap_dhp == NULL) { in tavor_devmap_umem_map()
484 cq->cq_umap_dhp = dhp; in tavor_devmap_umem_map()
[all …]
/titanic_44/usr/src/uts/common/io/ib/adapters/hermon/
H A Dhermon_cq.c47 #define hermon_cq_update_ci_doorbell(cq) \ argument
49 HERMON_UAR_DB_RECORD_WRITE(cq->cq_arm_ci_vdbr, \
50 cq->cq_consindx & 0x00FFFFFF)
52 static int hermon_cq_arm_doorbell(hermon_state_t *state, hermon_cqhdl_t cq,
57 static void hermon_cq_cqe_consume(hermon_state_t *state, hermon_cqhdl_t cq,
59 static void hermon_cq_errcqe_consume(hermon_state_t *state, hermon_cqhdl_t cq,
75 hermon_cqhdl_t cq; in hermon_cq_alloc() local
140 cq = (hermon_cqhdl_t)rsrc->hr_addr; in hermon_cq_alloc()
141 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*cq)) in hermon_cq_alloc()
142 cq->cq_is_umap = cq_is_umap; in hermon_cq_alloc()
[all …]
H A Dhermon_umap.c86 static ibt_status_t hermon_umap_cq_data_out(hermon_cqhdl_t cq,
292 hermon_cqhdl_t cq; in hermon_umap_cqmem() local
298 cq = (hermon_cqhdl_t)rsrcp->hr_addr; in hermon_umap_cqmem()
301 size = ptob(btopr(cq->cq_resize_hdl ? in hermon_umap_cqmem()
302 cq->cq_resize_hdl->cq_cqinfo.qa_size : cq->cq_cqinfo.qa_size)); in hermon_umap_cqmem()
307 &hermon_devmap_umem_cbops, cq->cq_resize_hdl ? in hermon_umap_cqmem()
308 cq->cq_resize_hdl->cq_cqinfo.qa_umemcookie : in hermon_umap_cqmem()
309 cq->cq_cqinfo.qa_umemcookie, 0, size, in hermon_umap_cqmem()
460 hermon_cqhdl_t cq; in hermon_devmap_umem_map() local
511 cq = hermon_cqhdl_from_cqnum(state, key); in hermon_devmap_umem_map()
[all …]
/titanic_44/usr/src/uts/common/io/fibre-channel/fca/oce/
H A Doce_mq.c47 struct oce_cq *cq; in oce_drain_mq_cq() local
52 cq = mq->cq; in oce_drain_mq_cq()
55 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe); in oce_drain_mq_cq()
77 RING_GET(cq->ring, 1); in oce_drain_mq_cq()
78 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe); in oce_drain_mq_cq()
82 oce_arm_cq(dev, cq->cq_id, num_cqe, B_TRUE); in oce_drain_mq_cq()
89 oce_arm_cq(mq->parent, mq->cq->cq_id, 0, B_TRUE); in oce_start_mq()
97 struct oce_cq *cq; in oce_clean_mq() local
102 cq = mq->cq; in oce_clean_mq()
104 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe); in oce_clean_mq()
[all …]
H A Doce_queue.c199 struct oce_cq *cq = NULL; in oce_cq_create() local
205 cq = kmem_zalloc(sizeof (struct oce_cq), KM_NOSLEEP); in oce_cq_create()
206 if (cq == NULL) { in oce_cq_create()
213 cq->ring = create_ring_buffer(dev, q_len, in oce_cq_create()
215 if (cq->ring == NULL) { in oce_cq_create()
218 (void *)cq->ring); in oce_cq_create()
219 kmem_free(cq, sizeof (struct oce_cq)); in oce_cq_create()
249 fwcmd->params.req.num_pages = cq->ring->dbuf->num_pages; in oce_cq_create()
250 oce_page_list(cq->ring->dbuf, &fwcmd->params.req.pages[0], in oce_cq_create()
251 cq->ring->dbuf->num_pages); in oce_cq_create()
[all …]
H A Doce_rx.c502 struct oce_cq *cq; in oce_drain_rq_cq() local
509 cq = rq->cq; in oce_drain_rq_cq()
513 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe); in oce_drain_rq_cq()
515 (void) DBUF_SYNC(cq->ring->dbuf, DDI_DMA_SYNC_FORKERNEL); in oce_drain_rq_cq()
554 RING_GET(cq->ring, 1); in oce_drain_rq_cq()
555 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, in oce_drain_rq_cq()
567 oce_arm_cq(dev, cq->cq_id, num_cqe, B_TRUE); in oce_drain_rq_cq()
615 struct oce_cq *cq; in oce_clean_rq() local
621 cq = rq->cq; in oce_clean_rq()
622 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe); in oce_clean_rq()
[all …]
H A Doce_intr.c310 struct oce_cq *cq; in oce_isr() local
334 cq = dev->cq[cq_id]; in oce_isr()
337 (void) cq->cq_handler(cq->cb_arg); in oce_isr()
H A Doce_tx.c560 struct oce_cq *cq; in oce_process_tx_compl() local
565 cq = wq->cq; in oce_process_tx_compl()
567 (void) ddi_dma_sync(cq->ring->dbuf->dma_handle, 0, 0, in oce_process_tx_compl()
571 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe); in oce_process_tx_compl()
590 RING_GET(cq->ring, 1); in oce_process_tx_compl()
591 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, in oce_process_tx_compl()
597 oce_arm_cq(wq->parent, cq->cq_id, num_cqe, rearm); in oce_process_tx_compl()
960 oce_drain_eq(wq->cq->eq); in oce_clean_wq()
/titanic_44/usr/src/lib/udapl/udapl_tavor/tavor/
H A Ddapl_hermon_hw.c44 #define HERMON_CQE_OWNER_IS_SW(cq, cqe) ((((uint8_t *)cqe)[31] >> 7) == \ argument
45 ((cq->cq_consindx & cq->cq_size) >> cq->cq_log_cqsz))
69 #define cq_wrap_around_mask (cq->cq_size - 1)
118 ((tavor_hw_uar_t *)ia_uar)->cq = HTOBE_64(doorbell); in dapli_hermon_cq_doorbell()
126 ((tavor_hw_uar32_t *)ia_uar)->cq[0] = in dapli_hermon_cq_doorbell()
128 ((tavor_hw_uar32_t *)ia_uar)->cq[1] = in dapli_hermon_cq_doorbell()
133 &((tavor_hw_uar_t *)ia_uar)->cq); in dapli_hermon_cq_doorbell()
529 dapli_hermon_cq_peek(ib_cq_handle_t cq, int *num_cqe) in dapli_hermon_cq_peek() argument
539 cons_indx = cq->cq_consindx & cq_wrap_around_mask; in dapli_hermon_cq_peek()
542 cqe = (uint32_t *)&cq->cq_addr[cons_indx]; in dapli_hermon_cq_peek()
[all …]
H A Ddapl_arbel_hw.c119 ((tavor_hw_uar_t *)ia_uar)->cq = HTOBE_64(doorbell); in dapli_arbel_cq_doorbell()
127 ((tavor_hw_uar32_t *)ia_uar)->cq[0] = in dapli_arbel_cq_doorbell()
129 ((tavor_hw_uar32_t *)ia_uar)->cq[1] = in dapli_arbel_cq_doorbell()
134 &((tavor_hw_uar_t *)ia_uar)->cq); in dapli_arbel_cq_doorbell()
552 dapli_arbel_cq_peek(ib_cq_handle_t cq, int *num_cqe) in dapli_arbel_cq_peek() argument
563 cons_indx = cq->cq_consindx; in dapli_arbel_cq_peek()
569 wrap_around_mask = (cq->cq_size - 1); in dapli_arbel_cq_peek()
572 cqe = &cq->cq_addr[cons_indx]; in dapli_arbel_cq_peek()
596 cqe = &cq->cq_addr[cons_indx]; in dapli_arbel_cq_peek()
602 #define dapli_arbel_cq_update_ci(cq, dbp) \ argument
[all …]
H A Ddapl_tavor_hw.c77 ((tavor_hw_uar_t *)ia_uar)->cq = HTOBE_64( \
98 ((tavor_hw_uar_t *)ia_uar)->cq = HTOBE_64(doorbell); in dapli_tavor_cq_doorbell()
106 ((tavor_hw_uar32_t *)ia_uar)->cq[0] = in dapli_tavor_cq_doorbell()
108 ((tavor_hw_uar32_t *)ia_uar)->cq[1] = in dapli_tavor_cq_doorbell()
113 &((tavor_hw_uar_t *)ia_uar)->cq); in dapli_tavor_cq_doorbell()
784 dapli_tavor_cq_peek(ib_cq_handle_t cq, int *num_cqe) in dapli_tavor_cq_peek() argument
795 cons_indx = cq->cq_consindx; in dapli_tavor_cq_peek()
801 wrap_around_mask = (cq->cq_size - 1); in dapli_tavor_cq_peek()
804 cqe = &cq->cq_addr[cons_indx]; in dapli_tavor_cq_peek()
828 cqe = &cq->cq_addr[cons_indx]; in dapli_tavor_cq_peek()
[all …]
H A Ddapl_tavor_wr.c59 dapls_tavor_wrid_get_entry(ib_cq_handle_t cq, tavor_hw_cqe_t *cqe, in dapls_tavor_wrid_get_entry() argument
68 dapl_os_lock(&cq->cq_wrid_wqhdr_lock); in dapls_tavor_wrid_get_entry()
72 wq = dapli_tavor_wrid_wqhdr_find(cq, qpnum, send_or_recv); in dapls_tavor_wrid_get_entry()
114 dapl_os_unlock(&cq->cq_wrid_wqhdr_lock); in dapls_tavor_wrid_get_entry()
303 dapls_tavor_wrid_cq_reap(ib_cq_handle_t cq) in dapls_tavor_wrid_cq_reap() argument
312 dapl_os_lock(&cq->cq_wrid_wqhdr_lock); in dapls_tavor_wrid_cq_reap()
315 container = cq->cq_wrid_reap_head; in dapls_tavor_wrid_cq_reap()
326 dapli_tavor_cq_wqhdr_remove(cq, consume_wqhdr); in dapls_tavor_wrid_cq_reap()
331 cq->cq_wrid_reap_head = cq->cq_wrid_reap_tail = NULL; in dapls_tavor_wrid_cq_reap()
333 dapl_os_unlock(&cq->cq_wrid_wqhdr_lock); in dapls_tavor_wrid_cq_reap()
[all …]
/titanic_44/usr/src/uts/common/io/ib/clients/of/sol_ofs/
H A Dsol_kverbs.c909 struct ib_cq *cq = (struct ib_cq *)ibt_get_cq_private(ibt_cq); in ofs_cq_handler() local
913 "arg: 0x%p", ibt_cq, cq, cq->comp_handler, arg); in ofs_cq_handler()
915 if (cq->comp_handler) { in ofs_cq_handler()
916 cq->comp_handler(cq, cq->cq_context); in ofs_cq_handler()
945 struct ib_cq *cq; in ib_create_cq() local
948 if ((cq = kmem_alloc(sizeof (struct ib_cq), KM_NOSLEEP)) == NULL) { in ib_create_cq()
978 rtn = ibt_alloc_cq(device->hca_hdl, &cq_attr, &cq->ibt_cq, &real_size); in ib_create_cq()
982 cq->device = device; in ib_create_cq()
983 cq->comp_handler = comp_handler; in ib_create_cq()
984 cq->event_handler = event_handler; in ib_create_cq()
[all …]
/titanic_44/usr/src/uts/common/io/
H A Ddedump.c163 struct copyreq *cq = (struct copyreq *)mp->b_rptr; in dedump_copyreq() local
166 "%lu\n", hdr, cq->cq_cmd, (void *)cq->cq_cr, cq->cq_id, cq->cq_flag, in dedump_copyreq()
167 (void *)cq->cq_private, (void *)cq->cq_addr, cq->cq_size); in dedump_copyreq()
/titanic_44/usr/src/cmd/spell/
H A Dhuff.c147 *py = ((y-1)<<w) + cq + k; in encode()
199 cq = c*q; in huff()
200 cs = cq<<(L-w); in huff()
201 qcs = (((long)(q-1)<<w) + cq) << (L-QW-w); in huff()
202 v0 = c - cq; in huff()
/titanic_44/usr/src/uts/common/sys/ib/adapters/tavor/
H A Dtavor_hw.h1441 #define TAVOR_CQE_QPNUM_GET(cq, cqe) \ argument
1442 ((ddi_get32((cq)->cq_cqinfo.qa_acchdl, \
1445 #define TAVOR_CQE_DQPN_GET(cq, cqe) \ argument
1446 ((ddi_get32((cq)->cq_cqinfo.qa_acchdl, \
1449 #define TAVOR_CQE_SL_GET(cq, cqe) \ argument
1450 ((ddi_get32((cq)->cq_cqinfo.qa_acchdl, \
1453 #define TAVOR_CQE_GRH_GET(cq, cqe) \ argument
1454 ((ddi_get32((cq)->cq_cqinfo.qa_acchdl, \
1457 #define TAVOR_CQE_PATHBITS_GET(cq, cqe) \ argument
1458 ((ddi_get32((cq)->cq_cqinfo.qa_acchdl, \
[all …]
/titanic_44/usr/src/uts/common/os/
H A Ddumpsubr.c1631 dumpsys_close_cq(cqueue_t *cq, int live) in dumpsys_close_cq() argument
1634 mutex_enter(&cq->mutex); in dumpsys_close_cq()
1635 atomic_dec_uint(&cq->open); in dumpsys_close_cq()
1636 cv_signal(&cq->cv); in dumpsys_close_cq()
1637 mutex_exit(&cq->mutex); in dumpsys_close_cq()
1639 atomic_dec_uint(&cq->open); in dumpsys_close_cq()
1667 dumpsys_lock(cqueue_t *cq, int live) in dumpsys_lock() argument
1670 mutex_enter(&cq->mutex); in dumpsys_lock()
1672 dumpsys_spinlock(&cq->spinlock); in dumpsys_lock()
1676 dumpsys_unlock(cqueue_t *cq, int live, int signal) in dumpsys_unlock() argument
[all …]
/titanic_44/usr/src/uts/common/io/ib/clients/of/sol_uverbs/
H A Dsol_uverbs_comp.c286 rc = ibt_alloc_cq(uctxt->hca->hdl, &cq_attr, &ucq->cq, &real_size); in sol_uverbs_create_cq()
296 ibt_set_cq_private(ucq->cq, ucq); in sol_uverbs_create_cq()
307 (void *) ucq->cq, &resp.drv_out, sizeof (resp.drv_out)); in sol_uverbs_create_cq()
357 ibt_set_cq_handler(ucq->cq, sol_uverbs_comp_event_handler, ucq); in sol_uverbs_create_cq()
374 (void) ibt_free_cq(ucq->cq); in sol_uverbs_create_cq()
394 rc = ibt_free_cq(ucq->cq); in uverbs_ucq_free()
534 resize_status = ibt_resize_cq(ucq->cq, cmd.cqe, &resp.cqe); in sol_uverbs_resize_cq()
546 (void *) ucq->cq, &resp.drv_out, sizeof (resp.drv_out)); in sol_uverbs_resize_cq()
623 rc = ibt_enable_cq_notify(ucq->cq, flag); in sol_uverbs_req_notify_cq()
697 rc = ibt_poll_cq(ucq->cq, completions, cmd.ne, &resp.count); in sol_uverbs_poll_cq()
[all …]
/titanic_44/usr/src/uts/common/inet/
H A Dmi.c206 struct copyreq *cq = (struct copyreq *)mp->b_rptr; in mi_copyin() local
217 cq->cq_private = mp->b_cont; in mi_copyin()
218 cq->cq_size = len; in mi_copyin()
219 cq->cq_flag = 0; in mi_copyin()
220 bcopy(mp->b_cont->b_rptr, &cq->cq_addr, sizeof (cq->cq_addr)); in mi_copyin()
296 struct copyreq *cq = (struct copyreq *)mp->b_rptr; in mi_copyin_n() local
302 cq->cq_private = mp->b_cont; in mi_copyin_n()
303 cq->cq_size = len; in mi_copyin_n()
304 cq->cq_flag = 0; in mi_copyin_n()
305 bcopy(mp->b_cont->b_rptr, &cq->cq_addr, sizeof (cq->cq_addr)); in mi_copyin_n()
[all …]
/titanic_44/usr/src/uts/common/io/ib/clients/rds/
H A Drdsib_ib.c625 rds_poll_ctrl_completions(ibt_cq_hdl_t cq, rds_ep_t *ep) in rds_poll_ctrl_completions() argument
637 ret = ibt_poll_cq(cq, &wc, 1, &npolled); in rds_poll_ctrl_completions()
641 "returned: %d", ep, cq, ret); in rds_poll_ctrl_completions()
644 "returned: IBT_CQ_EMPTY", ep, cq); in rds_poll_ctrl_completions()
664 ep, cq, wc.wc_id, wc.wc_status); in rds_poll_ctrl_completions()
869 rds_poll_data_completions(ibt_cq_hdl_t cq, rds_ep_t *ep) in rds_poll_data_completions() argument
882 ret = ibt_poll_cq(cq, &wc, 1, &npolled); in rds_poll_data_completions()
886 "returned: %d", ep, cq, ret); in rds_poll_data_completions()
889 "returned: IBT_CQ_EMPTY", ep, cq); in rds_poll_data_completions()
912 ep, cq, wc.wc_id, wc.wc_status); in rds_poll_data_completions()
[all …]
/titanic_44/usr/src/lib/libnisdb/
H A Ddb_mindex.cc746 db_query * cq = extract_index_values_from_record(recloc); in remove_aux() local
747 if (cq == NULL) { in remove_aux()
752 if (cq->size() != indices.indices_len) { /* something is wrong */ in remove_aux()
753 delete cq; // clean up in remove_aux()
784 queryRes = removeLDAP(cq, o); in remove_aux()
795 db_qcomp * comps = cq->queryloc(); in remove_aux()
813 delete cq; in remove_aux()
959 db_query *cq = extract_index_values_from_object(obj); in add() local
960 if (cq == NULL) { in add()
969 if (cq ->size() != indices.indices_len) { /* something wrong */ in add()
[all …]

1234