/illumos-gate/usr/src/uts/common/io/fibre-channel/fca/oce/ |
H A D | oce_mq.c | 46 struct oce_mq *mq; in oce_drain_mq_cq() local 51 mq = (struct oce_mq *)arg; in oce_drain_mq_cq() 52 cq = mq->cq; in oce_drain_mq_cq() 53 dev = mq->parent; in oce_drain_mq_cq() 54 mutex_enter(&mq->lock); in oce_drain_mq_cq() 81 mutex_exit(&mq->lock); in oce_drain_mq_cq() 87 oce_start_mq(struct oce_mq *mq) in oce_start_mq() argument 89 oce_arm_cq(mq->parent, mq->cq->cq_id, 0, B_TRUE); in oce_start_mq() 95 oce_clean_mq(struct oce_mq *mq) in oce_clean_mq() argument 102 cq = mq->cq; in oce_clean_mq() [all …]
|
H A D | oce_queue.c | 332 struct oce_mq *mq = NULL; in oce_mq_create() local 346 mq = kmem_zalloc(sizeof (struct oce_mq), KM_NOSLEEP); in oce_mq_create() 348 if (mq == NULL) { in oce_mq_create() 357 mq->ring = create_ring_buffer(dev, q_len, in oce_mq_create() 359 if (mq->ring == NULL) { in oce_mq_create() 362 (void *)mq->ring); in oce_mq_create() 371 fwcmd->params.req.num_pages = mq->ring->dbuf->num_pages; in oce_mq_create() 372 oce_page_list(mq->ring->dbuf, fwcmd->params.req.pages, in oce_mq_create() 373 mq->ring->dbuf->num_pages); in oce_mq_create() 394 mq->mq_id = LE_16(fwcmd->params.rsp.mq_id); in oce_mq_create() [all …]
|
H A D | oce_gld.c | 108 (void) oce_start_mq(dev->mq); in oce_start() 147 mutex_enter(&dev->mq->lock); in oce_stop() 152 mutex_exit(&dev->mq->lock); in oce_stop()
|
/illumos-gate/usr/src/uts/sun4v/io/ |
H A D | dr_mem.c | 725 memquery_t mq; in dr_mem_query() local 730 (void) kphysm_del_span_query(btop(mbp->addr), btop(mbp->size), &mq); in dr_mem_query() 732 if (!mq.phys_pages) in dr_mem_query() 736 mqp->mq.phys_pages = ptob(mq.phys_pages); in dr_mem_query() 737 mqp->mq.managed = ptob(mq.managed); in dr_mem_query() 738 mqp->mq.nonrelocatable = ptob(mq.nonrelocatable); in dr_mem_query() 739 mqp->mq.first_nonrelocatable = ptob(mq.first_nonrelocatable); in dr_mem_query() 740 mqp->mq.last_nonrelocatable = ptob(mq.last_nonrelocatable); in dr_mem_query() 744 if (mqp->mq.nonrelocatable) in dr_mem_query() 745 mqp->mq.last_nonrelocatable += PAGESIZE - 1; in dr_mem_query() [all …]
|
/illumos-gate/usr/src/uts/sun4u/opl/io/ |
H A D | dr_mem.c | 661 memquery_t mq; in dr_del_mlist_query() local 664 _b64top(ml->ml_address), _b64top(ml->ml_size), &mq); in dr_del_mlist_query() 668 mp->phys_pages += mq.phys_pages; in dr_del_mlist_query() 669 mp->managed += mq.managed; in dr_del_mlist_query() 670 mp->nonrelocatable += mq.nonrelocatable; in dr_del_mlist_query() 672 if (mq.nonrelocatable != 0) { in dr_del_mlist_query() 673 if (mq.first_nonrelocatable < mp->first_nonrelocatable) in dr_del_mlist_query() 675 mq.first_nonrelocatable; in dr_del_mlist_query() 676 if (mq.last_nonrelocatable > mp->last_nonrelocatable) in dr_del_mlist_query() 678 mq.last_nonrelocatable; in dr_del_mlist_query() [all …]
|
/illumos-gate/usr/src/uts/sun4u/ngdr/io/ |
H A D | dr_mem.c | 675 memquery_t mq; in dr_del_span_query() local 678 _b64top(ml->ml_address), _b64top(ml->ml_size), &mq); in dr_del_span_query() 682 mp->phys_pages += mq.phys_pages; in dr_del_span_query() 683 mp->managed += mq.managed; in dr_del_span_query() 684 mp->nonrelocatable += mq.nonrelocatable; in dr_del_span_query() 686 if (mq.nonrelocatable != 0) { in dr_del_span_query() 687 if (mq.first_nonrelocatable < mp->first_nonrelocatable) in dr_del_span_query() 689 mq.first_nonrelocatable; in dr_del_span_query() 690 if (mq.last_nonrelocatable > mp->last_nonrelocatable) in dr_del_span_query() 692 mq.last_nonrelocatable; in dr_del_span_query() [all …]
|
/illumos-gate/usr/src/uts/i86pc/io/dr/ |
H A D | dr_mem_acpi.c | 281 memquery_t mq; in dr_mem_status() local 371 rv = ml ? dr_del_mlist_query(ml, &mq) : -1; in dr_mem_status() 375 msp->ms_managed_pages = mq.managed; in dr_mem_status() 376 msp->ms_noreloc_pages = mq.nonrelocatable; in dr_mem_status() 378 mq.first_nonrelocatable; in dr_mem_status() 380 mq.last_nonrelocatable; in dr_mem_status() 382 if (mq.nonrelocatable && in dr_mem_status()
|
/illumos-gate/usr/src/uts/common/io/bnx/570x/driver/common/lmdev/ |
H A D | bnx_hw_reset.c | 657 REG_RD(pdev, mq.mq_config, &val); in lm_chip_reset() 658 REG_WR(pdev, mq.mq_config, val | MQ_CONFIG_HALT_DIS); in lm_chip_reset() 2091 REG_WR(pdev, mq.mq_map_l4_0, 0x8001c1b9); in lm_reset_setup() 2095 REG_WR(pdev, mq.mq_map_l4_0, 0x80010db9); in lm_reset_setup() 2096 REG_WR(pdev, mq.mq_map_l4_4, 0x82810eb2); in lm_reset_setup() 2097 REG_WR(pdev, mq.mq_map_l4_5, 0x8f0113b4); in lm_reset_setup() 2133 REG_RD(pdev, mq.mq_config, &val); in lm_reset_setup() 2169 REG_WR(pdev, mq.mq_config, val); in lm_reset_setup() 2174 REG_WR(pdev, mq.mq_knl_byp_wind_start, val); in lm_reset_setup() 2175 REG_WR(pdev, mq.mq_knl_wind_end, val); in lm_reset_setup() [all …]
|
/illumos-gate/usr/src/cmd/hal/hald/solaris/ |
H A D | devinfo_storage.c | 819 GQueue *mq; in devinfo_storage_minors() local 834 if ((mq = g_queue_new()) == NULL) { in devinfo_storage_minors() 838 g_queue_free (mq); in devinfo_storage_minors() 885 g_queue_push_head (mq, maindev); in devinfo_storage_minors() 888 g_queue_push_tail (mq, m); in devinfo_storage_minors() 897 while (!g_queue_is_empty (mq)) { in devinfo_storage_minors() 898 devinfo_storage_free_minor (g_queue_pop_head (mq)); in devinfo_storage_minors() 924 g_queue_push_tail (mq, m); in devinfo_storage_minors() 933 while (!g_queue_is_empty (mq)) { in devinfo_storage_minors() 934 m = g_queue_pop_head (mq); in devinfo_storage_minors()
|
/illumos-gate/usr/src/uts/sun4v/sys/ |
H A D | dr_mem.h | 80 memquery_t mq; /* query results */ member
|
/illumos-gate/usr/src/cmd/dc/ |
H A D | dc.c | 1342 struct blk *mp, *mq, *mr; in mult() local 1357 mq = q; in mult() 1360 mq = copy(q, length(q)); in mult() 1361 chsign(mq); in mult() 1365 mr = salloc(length(mp) + length(mq)); in mult() 1367 rewind(mq); in mult() 1368 while (sfeof(mq) == 0) { in mult() 1369 cq = sgetc(mq); in mult() 1392 if (mq != q) in mult() 1393 release(mq); in mult()
|
/illumos-gate/usr/src/uts/sun4u/io/ |
H A D | sbd_mem.c | 842 memquery_t mq; in sbd_pre_release_mem() local 905 mp->sbm_basepfn, mp->sbm_npages, &mq); in sbd_pre_release_mem() 927 if (mq.nonrelocatable != 0) { in sbd_pre_release_mem() 974 if ((mq.nonrelocatable != 0) || in sbd_pre_release_mem() 1737 memquery_t mq; in sbd_select_mem_target() local 1870 t_mp->sbm_basepfn, t_mp->sbm_npages, &mq); in sbd_select_mem_target() 1878 if (mq.nonrelocatable != 0) { in sbd_select_mem_target() 1883 mq.first_nonrelocatable, in sbd_select_mem_target() 1884 mq.last_nonrelocatable); in sbd_select_mem_target()
|
H A D | sbd.c | 4037 memquery_t mq; in sbd_mem_status() local 4151 mp->sbm_npages, &mq); in sbd_mem_status() 4153 msp->ms_managed_pages = mq.managed; in sbd_mem_status() 4154 msp->ms_noreloc_pages = mq.nonrelocatable; in sbd_mem_status() 4155 msp->ms_noreloc_first = mq.first_nonrelocatable; in sbd_mem_status() 4156 msp->ms_noreloc_last = mq.last_nonrelocatable; in sbd_mem_status() 4158 if (mq.nonrelocatable) { in sbd_mem_status()
|
/illumos-gate/usr/src/uts/common/sys/fibre-channel/fca/oce/ |
H A D | oce_io.h | 354 void oce_clean_mq(struct oce_mq *mq); 355 int oce_start_mq(struct oce_mq *mq);
|
H A D | oce_impl.h | 216 struct oce_mq *mq; /* MQ ring */ member
|
/illumos-gate/usr/src/uts/common/io/fibre-channel/fca/emlxs/ |
H A D | emlxs_sli4.c | 3516 EMLXS_MPDATA_SYNC(hba->sli.sli4.mq.addr.dma_handle, 0, in emlxs_sli4_issue_mq() 3549 hba->sli.sli4.mq.addr.virt) - in emlxs_sli4_issue_mq() 3553 EMLXS_MPDATA_SYNC(hba->sli.sli4.mq.addr.dma_handle, offset, in emlxs_sli4_issue_mq() 3565 "MQ RING: Qid %04x", hba->sli.sli4.mq.qid); in emlxs_sli4_issue_mq() 3568 emlxs_sli4_write_mqdb(hba, hba->sli.sli4.mq.qid, 1); in emlxs_sli4_issue_mq() 3938 iptr = hba->sli.sli4.mq.addr.virt; in emlxs_sli4_issue_mbox_cmd() 3939 iptr += (hba->sli.sli4.mq.host_index * MAILBOX_CMD_SLI4_WSIZE); in emlxs_sli4_issue_mbox_cmd() 3940 hba->sli.sli4.mq.host_index++; in emlxs_sli4_issue_mbox_cmd() 3941 if (hba->sli.sli4.mq.host_index >= hba->sli.sli4.mq.max_index) { in emlxs_sli4_issue_mbox_cmd() 3942 hba->sli.sli4.mq.host_index = 0; in emlxs_sli4_issue_mbox_cmd() [all …]
|
H A D | emlxs_mbox.c | 955 addr = hba->sli.sli4.mq.addr.phys; in emlxs_mb_mq_create() 960 qp->params.request.MQContext.CQId = hba->sli.sli4.mq.cqid; in emlxs_mb_mq_create() 1007 addr = hba->sli.sli4.mq.addr.phys; in emlxs_mb_mq_create_ext() 1016 qp->params.request.context.CQId = hba->sli.sli4.mq.cqid; in emlxs_mb_mq_create_ext() 1036 addr = hba->sli.sli4.mq.addr.phys; in emlxs_mb_mq_create_ext() 1046 qp1->params.request.CQId = hba->sli.sli4.mq.cqid; in emlxs_mb_mq_create_ext()
|
/illumos-gate/usr/src/contrib/ast/src/cmd/ksh93/sh/ |
H A D | array.c | 450 Namval_t *nq, *mq; in array_clone() local 513 mq = 0; in array_clone() 515 mq = nv_search(name,ap->table,NV_ADD); in array_clone() 518 mq->nvalue.cp = 0; in array_clone() 520 ar->val[ar->cur].np = mq; in array_clone() 521 nv_clone(nq,mq,flags); in array_clone() 529 mq->nvalue = nq->nvalue; in array_clone()
|
H A D | nvtree.c | 1019 Namval_t *nq, *mq; in walk_tree() local 1030 mq = nv_open(stakptr(0),shp->prev_root,NV_VARNAME|NV_NOASSIGN|NV_NOFAIL); in walk_tree() 1032 if(nq && mq) in walk_tree() 1034 nv_clone(nq,mq,flags|NV_RAW); in walk_tree()
|
H A D | nvtype.c | 457 Namval_t *mq = (Namval_t*)((*ap->fun)(nq,NIL(char*),NV_ACURRENT)); in clone_type() local 458 nv_clone(mp,mq,NV_MOVE); in clone_type()
|
/illumos-gate/usr/src/uts/common/sys/fibre-channel/fca/emlxs/ |
H A D | emlxs_extern.h | 533 extern void emlxs_cmpl_mbox(emlxs_hba_t *hba, MAILBOXQ *mq);
|
H A D | emlxs_fc.h | 1683 MQ_DESC_t mq; member
|
/illumos-gate/usr/src/uts/common/io/bnx/570x/common/include/ |
H A D | 5706_reg.h | 13580 mailbox_queue_reg_t mq; member
|