Lines Matching refs:vdp

108 #define	USE_WRITE_BARRIER(vdp)						\
109 ((vdp)->xdf_feature_barrier && !(vdp)->xdf_flush_supported)
110 #define USE_FLUSH_DISKCACHE(vdp) \
111 ((vdp)->xdf_feature_barrier && (vdp)->xdf_flush_supported)
112 #define IS_WRITE_BARRIER(vdp, bp) \
113 (!IS_READ(bp) && USE_WRITE_BARRIER(vdp) && \
114 ((bp)->b_un.b_addr == (vdp)->xdf_cache_flush_block))
116 (!IS_READ(bp) && USE_FLUSH_DISKCACHE(vdp) && ((bp)->b_bcount == 0))
183 xdf_t *vdp = arg;
185 mutex_enter(&vdp->xdf_dev_lk);
186 vdp->xdf_timeout_id = 0;
187 mutex_exit(&vdp->xdf_dev_lk);
190 xdf_io_start(vdp);
202 xdf_t *vdp = (xdf_t *)arg;
203 ASSERT(vdp != NULL);
206 vdp->xdf_addr));
208 ddi_trigger_softintr(vdp->xdf_softintr_id);
213 gs_get(xdf_t *vdp, int isread)
221 if (vdp->xdf_gnt_callback.next == NULL) {
222 SETDMACBON(vdp);
224 &vdp->xdf_gnt_callback,
226 (void *)vdp,
235 if (vdp->xdf_timeout_id == 0)
237 vdp->xdf_timeout_id =
238 timeout(xdf_timeout_handler, vdp, hz);
243 gs->gs_oeid = vdp->xdf_peer;
282 vreq_get(xdf_t *vdp, buf_t *bp)
290 if (vdp->xdf_timeout_id == 0)
292 vdp->xdf_timeout_id =
293 timeout(xdf_timeout_handler, vdp, hz);
305 list_insert_head(&vdp->xdf_vreq_act, (void *)vreq);
311 vreq_free(xdf_t *vdp, v_req_t *vreq)
315 ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
318 list_remove(&vdp->xdf_vreq_act, vreq);
358 check_fbwrite(xdf_t *vdp, buf_t *bp, daddr_t blkno)
363 if (IS_WRITE_BARRIER(vdp, bp))
376 vdp->xdf_cache_flush_block, DEV_BSIZE);
386 vreq_setup(xdf_t *vdp, v_req_t *vreq)
407 if ((gs = gs_get(vdp, IS_READ(bp))) == NULL) {
409 "get ge_slotfailed\n", vdp->xdf_addr));
421 if (IS_WRITE_BARRIER(vdp, bp))
426 if (!IS_READ(bp) && USE_WRITE_BARRIER(vdp))
427 check_fbwrite(vdp, bp, vreq->v_blkno);
435 rc = ddi_dma_alloc_handle(vdp->xdf_dip, &xb_dma_attr,
436 xdf_dmacallback, (caddr_t)vdp, &dh);
438 SETDMACBON(vdp);
440 vdp->xdf_addr));
460 rc = ddi_dma_alloc_handle(vdp->xdf_dip, &dmaattr,
461 xdf_dmacallback, (caddr_t)vdp, &mdh);
463 SETDMACBON(vdp);
466 vdp->xdf_addr));
483 DDI_DMA_STREAMING, xdf_dmacallback, (caddr_t)vdp,
486 SETDMACBON(vdp);
489 vdp->xdf_addr));
510 dma_flags, xdf_dmacallback, (caddr_t)vdp,
515 xdf_dmacallback, (caddr_t)vdp, &dc, &ndcs);
526 SETDMACBON(vdp);
528 vdp->xdf_addr));
545 if ((gs = gs_get(vdp, IS_READ(bp))) == NULL) {
547 vdp->xdf_addr));
567 if ((gs = gs_get(vdp, IS_READ(bp))) == NULL) {
569 vdp->xdf_addr));
589 xdf_cmlb_attach(xdf_t *vdp)
591 dev_info_t *dip = vdp->xdf_dip;
594 XD_IS_CD(vdp) ? DTYPE_RODIRECT : DTYPE_DIRECT,
595 XD_IS_RM(vdp),
597 XD_IS_CD(vdp) ? DDI_NT_CD_XVMD : DDI_NT_BLOCK_XVMD,
599 (XD_IS_CD(vdp) ? 0 : CMLB_CREATE_ALTSLICE_VTOC_16_DTYPE_DIRECT) |
602 XD_IS_CD(vdp) ? 0 : CMLB_FAKE_LABEL_ONE_PARTITION,
604 vdp->xdf_vd_lbl, NULL));
617 xdf_kstat_enter(xdf_t *vdp, buf_t *bp)
621 ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
623 if (vdp->xdf_xdev_iostat == NULL)
626 kstat_runq_enter(KSTAT_IO_PTR(vdp->xdf_xdev_iostat));
628 kstat_waitq_enter(KSTAT_IO_PTR(vdp->xdf_xdev_iostat));
633 xdf_kstat_exit(xdf_t *vdp, buf_t *bp)
637 ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
639 if (vdp->xdf_xdev_iostat == NULL)
642 kstat_runq_exit(KSTAT_IO_PTR(vdp->xdf_xdev_iostat));
644 kstat_waitq_exit(KSTAT_IO_PTR(vdp->xdf_xdev_iostat));
649 xdf_kstat_waitq_to_runq(xdf_t *vdp, buf_t *bp)
653 ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
657 if (vdp->xdf_xdev_iostat == NULL)
659 kstat_waitq_to_runq(KSTAT_IO_PTR(vdp->xdf_xdev_iostat));
663 xdf_kstat_runq_to_waitq(xdf_t *vdp, buf_t *bp)
667 ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
671 if (vdp->xdf_xdev_iostat == NULL)
673 kstat_runq_back_to_waitq(KSTAT_IO_PTR(vdp->xdf_xdev_iostat));
679 xdf_t *vdp = (xdf_t *)ddi_get_driver_private(dip);
689 mutex_enter(&vdp->xdf_iostat_lk);
690 mutex_enter(&vdp->xdf_dev_lk);
693 if (vdp->xdf_xdev_iostat != NULL) {
694 mutex_exit(&vdp->xdf_dev_lk);
695 mutex_exit(&vdp->xdf_iostat_lk);
700 vdp->xdf_xdev_iostat = kstat;
701 vdp->xdf_xdev_iostat->ks_lock = &vdp->xdf_dev_lk;
702 kstat_install(vdp->xdf_xdev_iostat);
719 bp = vdp->xdf_f_act;
721 xdf_kstat_enter(vdp, bp);
724 if (vdp->xdf_ready_tq_bp != NULL)
725 xdf_kstat_enter(vdp, vdp->xdf_ready_tq_bp);
727 mutex_exit(&vdp->xdf_dev_lk);
728 mutex_exit(&vdp->xdf_iostat_lk);
735 xdf_t *vdp = (xdf_t *)ddi_get_driver_private(dip);
748 mutex_enter(&vdp->xdf_iostat_lk);
749 mutex_enter(&vdp->xdf_dev_lk);
751 if (vdp->xdf_xdev_iostat == NULL) {
752 mutex_exit(&vdp->xdf_dev_lk);
753 mutex_exit(&vdp->xdf_iostat_lk);
769 bp = vdp->xdf_f_act;
771 xdf_kstat_exit(vdp, bp);
774 if (vdp->xdf_ready_tq_bp != NULL)
775 xdf_kstat_exit(vdp, vdp->xdf_ready_tq_bp);
777 kstat = vdp->xdf_xdev_iostat;
778 vdp->xdf_xdev_iostat = NULL;
779 mutex_exit(&vdp->xdf_dev_lk);
781 mutex_exit(&vdp->xdf_iostat_lk);
795 xdf_bp_push(xdf_t *vdp, buf_t *bp)
797 ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
800 xdf_kstat_enter(vdp, bp);
802 if (curthread == vdp->xdf_ready_tq_thread) {
804 ASSERT(vdp->xdf_ready_tq_bp == NULL);
805 vdp->xdf_ready_tq_bp = bp;
810 ASSERT(bp != vdp->xdf_ready_tq_bp);
812 if (vdp->xdf_f_act == NULL) {
814 ASSERT(vdp->xdf_l_act == NULL);
815 ASSERT(vdp->xdf_i_act == NULL);
816 vdp->xdf_f_act = vdp->xdf_l_act = vdp->xdf_i_act = bp;
821 vdp->xdf_l_act->av_forw = bp;
822 vdp->xdf_l_act = bp;
823 if (vdp->xdf_i_act == NULL)
824 vdp->xdf_i_act = bp;
828 xdf_bp_pop(xdf_t *vdp, buf_t *bp)
832 ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
835 if (vdp->xdf_ready_tq_bp == bp) {
838 vdp->xdf_ready_tq_bp = NULL;
843 ASSERT((bp->av_forw != NULL) || (bp == vdp->xdf_l_act));
844 ASSERT((bp->av_forw == NULL) || (bp != vdp->xdf_l_act));
845 ASSERT(VREQ_DONE(BP_VREQ(vdp->xdf_f_act)));
846 ASSERT(vdp->xdf_f_act != vdp->xdf_i_act);
848 if (bp == vdp->xdf_f_act) {
850 vdp->xdf_f_act = bp->av_forw;
851 if (bp == vdp->xdf_l_act)
852 vdp->xdf_l_act = NULL;
855 bp_iter = vdp->xdf_f_act;
859 ASSERT(bp_iter != vdp->xdf_i_act);
862 if (bp == vdp->xdf_l_act)
863 vdp->xdf_l_act = bp_iter;
869 xdf_bp_next(xdf_t *vdp)
874 if (vdp->xdf_state == XD_CONNECTED) {
879 if ((bp = vdp->xdf_ready_tq_bp) == NULL)
887 if (vdp->xdf_state != XD_READY)
890 ASSERT(vdp->xdf_ready_tq_bp == NULL);
892 if ((bp = vdp->xdf_i_act) == NULL)
898 vdp->xdf_i_act = bp->av_forw;
903 xdf_io_fini(xdf_t *vdp, uint64_t id, int bioerr)
909 ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
921 xdf_bp_pop(vdp, bp);
924 xdf_kstat_exit(vdp, bp);
926 vreq_free(vdp, vreq);
942 xdf_intr_locked(xdf_t *vdp)
952 ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
954 if ((xbr = vdp->xdf_xb_ring) == NULL)
957 acchdl = vdp->xdf_xb_ring_hdl;
971 vdp->xdf_addr,
978 xdf_io_fini(vdp, id, bioerr);
990 xdf_t *vdp = (xdf_t *)arg;
993 mutex_enter(&vdp->xdf_dev_lk);
994 rv = xdf_intr_locked(vdp);
995 mutex_exit(&vdp->xdf_dev_lk);
998 xdf_io_start(vdp);
1004 xdf_ring_push(xdf_t *vdp)
1006 ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
1008 if (vdp->xdf_xb_ring == NULL)
1011 if (xvdi_ring_push_request(vdp->xdf_xb_ring)) {
1014 vdp->xdf_addr));
1017 if (xvdi_get_evtchn(vdp->xdf_dip) != INVALID_EVTCHN)
1018 xvdi_notify_oe(vdp->xdf_dip);
1022 xdf_ring_drain_locked(xdf_t *vdp)
1026 ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
1032 if (vdp->xdf_xb_ring == NULL)
1035 if (xvdi_ring_has_unconsumed_responses(vdp->xdf_xb_ring))
1036 (void) xdf_intr_locked(vdp);
1037 if (!xvdi_ring_has_incomp_request(vdp->xdf_xb_ring))
1039 xdf_ring_push(vdp);
1042 mutex_exit(&vdp->xdf_dev_lk);
1047 mutex_enter(&vdp->xdf_dev_lk);
1049 cmn_err(CE_WARN, "xdf@%s: xdf_ring_drain: timeout", vdp->xdf_addr);
1052 if (vdp->xdf_xb_ring != NULL) {
1053 if (xvdi_ring_has_incomp_request(vdp->xdf_xb_ring) ||
1054 xvdi_ring_has_unconsumed_responses(vdp->xdf_xb_ring))
1059 vdp->xdf_addr, rv);
1064 xdf_ring_drain(xdf_t *vdp)
1067 mutex_enter(&vdp->xdf_dev_lk);
1068 rv = xdf_ring_drain_locked(vdp);
1069 mutex_exit(&vdp->xdf_dev_lk);
1077 xdf_ring_destroy(xdf_t *vdp)
1083 ASSERT(MUTEX_HELD(&vdp->xdf_cb_lk));
1084 ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
1086 if ((vdp->xdf_state != XD_INIT) &&
1087 (vdp->xdf_state != XD_CONNECTED) &&
1088 (vdp->xdf_state != XD_READY)) {
1089 ASSERT(vdp->xdf_xb_ring == NULL);
1090 ASSERT(vdp->xdf_xb_ring_hdl == NULL);
1091 ASSERT(vdp->xdf_peer == INVALID_DOMID);
1092 ASSERT(vdp->xdf_evtchn == INVALID_EVTCHN);
1093 ASSERT(list_is_empty(&vdp->xdf_vreq_act));
1102 ec_unbind_evtchn(vdp->xdf_evtchn);
1104 (void) ddi_remove_intr(vdp->xdf_dip, 0, NULL);
1113 (void) xdf_ring_drain_locked(vdp);
1116 xvdi_free_evtchn(vdp->xdf_dip);
1117 vdp->xdf_evtchn = INVALID_EVTCHN;
1119 while ((vreq = list_head(&vdp->xdf_vreq_act)) != NULL) {
1129 xdf_kstat_runq_to_waitq(vdp, bp);
1135 vreq_free(vdp, vreq);
1141 vdp->xdf_i_act = vdp->xdf_f_act;
1144 xvdi_free_ring(vdp->xdf_xb_ring);
1145 vdp->xdf_xb_ring = NULL;
1146 vdp->xdf_xb_ring_hdl = NULL;
1147 vdp->xdf_peer = INVALID_DOMID;
1161 xdf_eject_pending(xdf_t *vdp)
1163 dev_info_t *dip = vdp->xdf_dip;
1166 if (!vdp->xdf_media_req_supported)
1185 xdf_media_req(xdf_t *vdp, char *req, boolean_t media_required)
1187 dev_info_t *dip = vdp->xdf_dip;
1196 ASSERT(MUTEX_HELD(&vdp->xdf_cb_lk));
1197 ASSERT(MUTEX_NOT_HELD(&vdp->xdf_dev_lk));
1203 if (!XD_IS_CD(vdp) || !vdp->xdf_media_req_supported)
1207 if (xdf_eject_pending(vdp))
1211 if (media_required && (vdp->xdf_xdev_nblocks == 0))
1215 if (vdp->xdf_state != XD_READY)
1228 xdf_process_rreq(xdf_t *vdp, struct buf *bp, blkif_request_t *rreq)
1235 dev_info_t *dip = vdp->xdf_dip;
1240 ddi_acc_handle_t acchdl = vdp->xdf_xb_ring_hdl;
1245 ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
1266 if (!vdp->xdf_wce)
1307 vdp->xdf_addr, seg, vreq->v_dmac.dmac_size, blk_off));
1310 vdp->xdf_addr, seg, fsect, lsect, gr, dma_addr));
1327 vdp->xdf_addr, rreq->id));
1331 xdf_io_start(xdf_t *vdp)
1338 mutex_enter(&vdp->xdf_dev_lk);
1346 if (vdp->xdf_suspending)
1348 if ((bp = xdf_bp_next(vdp)) == NULL)
1353 ((vreq = vreq_get(vdp, bp)) == NULL))
1357 if (vreq_setup(vdp, vreq) != DDI_SUCCESS)
1361 if ((rreq = xvdi_ring_get_request(vdp->xdf_xb_ring)) == NULL)
1367 xdf_process_rreq(vdp, bp, rreq);
1374 xdf_kstat_waitq_to_runq(vdp, bp);
1379 xdf_ring_push(vdp);
1381 mutex_exit(&vdp->xdf_dev_lk);
1387 xdf_isopen(xdf_t *vdp, int partition)
1402 if (vdp->xdf_vd_open[i] & parbit)
1415 xdf_busy(xdf_t *vdp)
1417 ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
1419 if ((vdp->xdf_xb_ring != NULL) &&
1420 xvdi_ring_has_unconsumed_responses(vdp->xdf_xb_ring)) {
1421 ASSERT(vdp->xdf_state != XD_CLOSED);
1425 if (!list_is_empty(&vdp->xdf_vreq_act) || (vdp->xdf_f_act != NULL)) {
1426 ASSERT(vdp->xdf_state != XD_CLOSED);
1430 if (xdf_isopen(vdp, -1)) {
1431 ASSERT(vdp->xdf_state != XD_CLOSED);
1435 if (vdp->xdf_connect_req > 0) {
1436 ASSERT(vdp->xdf_state != XD_CLOSED);
1444 xdf_set_state(xdf_t *vdp, xdf_state_t new_state)
1446 ASSERT(MUTEX_HELD(&vdp->xdf_cb_lk));
1447 ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
1449 vdp->xdf_addr, vdp->xdf_state, new_state));
1450 vdp->xdf_state = new_state;
1451 cv_broadcast(&vdp->xdf_dev_cv);
1455 xdf_disconnect(xdf_t *vdp, xdf_state_t new_state, boolean_t quiet)
1457 dev_info_t *dip = vdp->xdf_dip;
1460 ASSERT(MUTEX_HELD(&vdp->xdf_cb_lk));
1461 ASSERT(MUTEX_NOT_HELD(&vdp->xdf_dev_lk));
1465 if (vdp->xdf_state == new_state)
1468 mutex_enter(&vdp->xdf_dev_lk);
1469 busy = xdf_busy(vdp);
1472 if (vdp->xdf_state == XD_CLOSED) {
1474 xdf_set_state(vdp, new_state);
1475 mutex_exit(&vdp->xdf_dev_lk);
1481 if (!quiet && busy && (vdp->xdf_state == XD_READY) &&
1482 (vdp->xdf_xdev_nblocks != 0)) {
1484 vdp->xdf_addr);
1488 xdf_ring_destroy(vdp);
1491 xdf_set_state(vdp, (busy) ? XD_UNKNOWN : new_state);
1492 mutex_exit(&vdp->xdf_dev_lk);
1495 if (vdp->xdf_state == XD_CLOSED)
1507 xdf_setstate_init(xdf_t *vdp)
1509 dev_info_t *dip = vdp->xdf_dip;
1515 ASSERT(MUTEX_HELD(&vdp->xdf_cb_lk));
1516 ASSERT(MUTEX_NOT_HELD(&vdp->xdf_dev_lk));
1517 ASSERT((vdp->xdf_state == XD_UNKNOWN) ||
1518 (vdp->xdf_state == XD_CLOSED));
1521 ("xdf@%s: starting connection process\n", vdp->xdf_addr));
1527 if (xdf_eject_pending(vdp))
1533 if ((vdp->xdf_peer = xvdi_get_oeid(dip)) == INVALID_DOMID)
1550 vdp->xdf_evtchn = xvdi_get_evtchn(dip);
1552 ec_bind_evtchn_to_handler(vdp->xdf_evtchn, IPL_VBD, xdf_intr, vdp);
1554 if (ddi_add_intr(dip, 0, NULL, NULL, xdf_intr, (caddr_t)vdp) !=
1557 "failed to add intr handler", vdp->xdf_addr);
1563 sizeof (union blkif_sring_entry), &gref, &vdp->xdf_xb_ring) !=
1566 vdp->xdf_addr);
1569 vdp->xdf_xb_ring_hdl = vdp->xdf_xb_ring->xr_acc_hdl; /* ugly!! */
1577 vdp->xdf_addr);
1592 XBP_EVENT_CHAN, "%u", vdp->xdf_evtchn)) != 0) ||
1609 ASSERT(MUTEX_HELD(&vdp->xdf_cb_lk));
1610 mutex_enter(&vdp->xdf_dev_lk);
1611 xdf_set_state(vdp, XD_INIT);
1612 mutex_exit(&vdp->xdf_dev_lk);
1617 xvdi_free_ring(vdp->xdf_xb_ring);
1620 ec_unbind_evtchn(vdp->xdf_evtchn);
1622 (void) ddi_remove_intr(vdp->xdf_dip, 0, NULL);
1626 vdp->xdf_evtchn = INVALID_EVTCHN;
1628 xdf_disconnect(vdp, XD_UNKNOWN, B_FALSE);
1630 vdp->xdf_addr);
1635 xdf_get_flush_block(xdf_t *vdp)
1640 vdp->xdf_flush_mem = kmem_alloc(vdp->xdf_xdev_secsize * 2, KM_SLEEP);
1641 vdp->xdf_cache_flush_block =
1642 (char *)P2ROUNDUP((uintptr_t)(vdp->xdf_flush_mem),
1643 (int)vdp->xdf_xdev_secsize);
1645 if (xdf_lb_rdwr(vdp->xdf_dip, TG_READ, vdp->xdf_cache_flush_block,
1646 xdf_flush_block, vdp->xdf_xdev_secsize, NULL) != 0)
1654 xdf_t *vdp = (xdf_t *)arg;
1656 vdp->xdf_ready_tq_thread = curthread;
1665 mutex_enter(&vdp->xdf_dev_lk);
1666 if (vdp->xdf_cmbl_reattach) {
1667 vdp->xdf_cmbl_reattach = B_FALSE;
1669 mutex_exit(&vdp->xdf_dev_lk);
1670 if (xdf_cmlb_attach(vdp) != 0) {
1671 xdf_disconnect(vdp, XD_UNKNOWN, B_FALSE);
1674 mutex_enter(&vdp->xdf_dev_lk);
1678 if (vdp->xdf_state != XD_CONNECTED) {
1679 mutex_exit(&vdp->xdf_dev_lk);
1682 mutex_exit(&vdp->xdf_dev_lk);
1688 vdp->xdf_flush_supported = B_FALSE;
1689 if (vdp->xdf_feature_barrier) {
1694 vdp->xdf_flush_supported = B_TRUE;
1695 if (xdf_lb_rdwr(vdp->xdf_dip, TG_WRITE, NULL, 0, 0, 0) == 0) {
1696 vdp->xdf_flush_supported = B_TRUE;
1698 vdp->xdf_flush_supported = B_FALSE;
1710 if (xdf_get_flush_block(vdp) != DDI_SUCCESS) {
1711 xdf_disconnect(vdp, XD_UNKNOWN, B_FALSE);
1717 mutex_enter(&vdp->xdf_cb_lk);
1718 mutex_enter(&vdp->xdf_dev_lk);
1719 if (vdp->xdf_state == XD_CONNECTED)
1720 xdf_set_state(vdp, XD_READY);
1721 mutex_exit(&vdp->xdf_dev_lk);
1724 xdf_io_start(vdp);
1726 mutex_exit(&vdp->xdf_cb_lk);
1738 xdf_t *vdp;
1741 vdp = ddi_get_soft_state(xdf_ssp, ddi_get_instance(dip));
1743 ncyl = vdp->xdf_xdev_nblocks / (XDF_NHEADS * XDF_NSECTS);
1750 geomp->g_secsize = vdp->xdf_xdev_secsize;
1751 geomp->g_capacity = vdp->xdf_xdev_nblocks;
1763 xdf_setstate_connected(xdf_t *vdp)
1765 dev_info_t *dip = vdp->xdf_dip;
1772 ASSERT(MUTEX_HELD(&vdp->xdf_cb_lk));
1773 ASSERT(MUTEX_NOT_HELD(&vdp->xdf_dev_lk));
1774 ASSERT(vdp->xdf_state == XD_INIT);
1785 if (!(vdp->xdf_feature_barrier = xenbus_exists(oename, XBP_FB)))
1787 vdp->xdf_addr);
1802 "cannot read backend info", vdp->xdf_addr);
1807 vdp->xdf_addr);
1816 vdp->xdf_xdev_nblocks = nblocks;
1817 vdp->xdf_xdev_secsize = secsize;
1819 if (vdp->xdf_xdev_nblocks > DK_MAX_BLOCKS) {
1822 " 32-bit kernel", vdp->xdf_addr, vdp->xdf_xdev_nblocks);
1833 if (vdp->xdf_pgeom_fixed &&
1834 (vdp->xdf_pgeom.g_capacity > vdp->xdf_xdev_nblocks)) {
1837 vdp->xdf_addr);
1841 vdp->xdf_media_req_supported = xenbus_exists(oename, XBP_MEDIA_REQ_SUP);
1844 mutex_enter(&vdp->xdf_dev_lk);
1845 xdf_set_state(vdp, XD_CONNECTED);
1849 if ((vdp->xdf_dinfo != dinfo) ||
1850 (!vdp->xdf_pgeom_fixed &&
1851 (memcmp(&vdp->xdf_pgeom, &pgeom, sizeof (pgeom)) != 0))) {
1852 vdp->xdf_cmbl_reattach = B_TRUE;
1854 vdp->xdf_dinfo = dinfo;
1855 if (!vdp->xdf_pgeom_fixed)
1856 vdp->xdf_pgeom = pgeom;
1859 if (XD_IS_CD(vdp) || XD_IS_RM(vdp)) {
1860 if (vdp->xdf_xdev_nblocks == 0) {
1861 vdp->xdf_mstate = DKIO_EJECTED;
1862 cv_broadcast(&vdp->xdf_mstate_cv);
1864 vdp->xdf_mstate = DKIO_INSERTED;
1865 cv_broadcast(&vdp->xdf_mstate_cv);
1868 if (vdp->xdf_mstate != DKIO_NONE) {
1869 vdp->xdf_mstate = DKIO_NONE;
1870 cv_broadcast(&vdp->xdf_mstate_cv);
1874 mutex_exit(&vdp->xdf_dev_lk);
1876 cmn_err(CE_CONT, "?xdf@%s: %"PRIu64" blocks", vdp->xdf_addr,
1877 (uint64_t)vdp->xdf_xdev_nblocks);
1880 xdf_io_start(vdp);
1890 (void) ddi_taskq_dispatch(vdp->xdf_ready_tq, xdf_setstate_ready, vdp,
1902 xdf_t *vdp = (xdf_t *)ddi_get_driver_private(dip);
1905 vdp->xdf_addr, new_state));
1907 mutex_enter(&vdp->xdf_cb_lk);
1910 ASSERT(vdp->xdf_oe_change_thread == NULL);
1911 DEBUG_EVAL(vdp->xdf_oe_change_thread = curthread);
1914 if (vdp->xdf_suspending || (vdp->xdf_state == XD_SUSPEND)) {
1915 DEBUG_EVAL(vdp->xdf_oe_change_thread = NULL);
1916 mutex_exit(&vdp->xdf_cb_lk);
1925 if (vdp->xdf_state == XD_INIT)
1928 xdf_disconnect(vdp, XD_UNKNOWN, B_FALSE);
1929 if (xdf_setstate_init(vdp) != DDI_SUCCESS)
1931 ASSERT(vdp->xdf_state == XD_INIT);
1935 if ((vdp->xdf_state == XD_CONNECTED) ||
1936 (vdp->xdf_state == XD_READY))
1939 if (vdp->xdf_state != XD_INIT) {
1940 xdf_disconnect(vdp, XD_UNKNOWN, B_FALSE);
1941 if (xdf_setstate_init(vdp) != DDI_SUCCESS)
1943 ASSERT(vdp->xdf_state == XD_INIT);
1946 if (xdf_setstate_connected(vdp) != DDI_SUCCESS) {
1947 xdf_disconnect(vdp, XD_UNKNOWN, B_FALSE);
1950 ASSERT(vdp->xdf_state == XD_CONNECTED);
1954 if (xdf_isopen(vdp, -1)) {
1957 vdp->xdf_addr);
1962 xdf_disconnect(vdp, XD_CLOSED, B_FALSE);
1967 cv_broadcast(&vdp->xdf_dev_cv);
1968 DEBUG_EVAL(vdp->xdf_oe_change_thread = NULL);
1969 mutex_exit(&vdp->xdf_cb_lk);
1973 xdf_connect_locked(xdf_t *vdp, boolean_t wait)
1977 ASSERT(MUTEX_HELD(&vdp->xdf_cb_lk));
1978 ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
1981 if (vdp->xdf_state == XD_CLOSED)
1984 vdp->xdf_connect_req++;
1985 while (vdp->xdf_state != XD_READY) {
1986 mutex_exit(&vdp->xdf_dev_lk);
1989 if (vdp->xdf_connect_thread == NULL)
1990 vdp->xdf_connect_thread = curthread;
1992 if (vdp->xdf_connect_thread == curthread) {
2000 (void) xdf_disconnect(vdp, XD_UNKNOWN, B_TRUE);
2003 if (vdp->xdf_state == XD_UNKNOWN)
2004 (void) xdf_setstate_init(vdp);
2005 if (vdp->xdf_state == XD_INIT)
2006 (void) xdf_setstate_connected(vdp);
2009 mutex_enter(&vdp->xdf_dev_lk);
2010 if (!wait || (vdp->xdf_state == XD_READY))
2013 mutex_exit((&vdp->xdf_cb_lk));
2014 if (vdp->xdf_connect_thread != curthread) {
2015 rv = cv_wait_sig(&vdp->xdf_dev_cv, &vdp->xdf_dev_lk);
2018 rv = cv_reltimedwait_sig(&vdp->xdf_dev_cv,
2019 &vdp->xdf_dev_lk, drv_usectohz(100*1000),
2024 mutex_exit((&vdp->xdf_dev_lk));
2025 mutex_enter((&vdp->xdf_cb_lk));
2026 mutex_enter((&vdp->xdf_dev_lk));
2032 ASSERT(MUTEX_HELD(&vdp->xdf_cb_lk));
2033 ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
2035 if (vdp->xdf_connect_thread == curthread) {
2040 cv_signal(&vdp->xdf_dev_cv);
2041 vdp->xdf_connect_thread = NULL;
2045 mutex_exit((&vdp->xdf_dev_lk));
2046 (void) xdf_media_req(vdp, XBV_MEDIA_REQ_LOCK, B_TRUE);
2047 mutex_enter((&vdp->xdf_dev_lk));
2049 vdp->xdf_connect_req--;
2050 return (vdp->xdf_state);
2056 xdf_t *vdp = (xdf_t *)arg;
2058 ASSERT(vdp != NULL);
2060 mutex_enter(&vdp->xdf_dev_lk);
2061 ASSERT(ISDMACBON(vdp));
2062 SETDMACBOFF(vdp);
2063 mutex_exit(&vdp->xdf_dev_lk);
2065 xdf_io_start(vdp);
2177 xdf_t *vdp = (xdf_t *)ddi_get_driver_private(dip);
2181 mutex_enter(&vdp->xdf_cb_lk);
2189 ASSERT(MUTEX_HELD(&vdp->xdf_cb_lk));
2198 mutex_exit(&vdp->xdf_cb_lk);
2211 if (cv_wait_sig(&vdp->xdf_hp_status_cv, &vdp->xdf_cb_lk) == 0) {
2213 mutex_exit(&vdp->xdf_cb_lk);
2219 ASSERT(MUTEX_HELD(&vdp->xdf_cb_lk));
2233 if (XD_IS_CD(vdp) && !xenbus_exists(oename, XBP_MEDIA_REQ_SUP)) {
2234 mutex_exit(&vdp->xdf_cb_lk);
2238 mutex_enter(&vdp->xdf_dev_lk);
2239 rv = xdf_connect_locked(vdp, B_TRUE);
2240 mutex_exit(&vdp->xdf_dev_lk);
2241 mutex_exit(&vdp->xdf_cb_lk);
2249 xdf_t *vdp = (xdf_t *)ddi_get_driver_private(dip);
2252 mutex_enter(&vdp->xdf_dev_lk);
2255 mutex_exit(&vdp->xdf_dev_lk);
2264 if ((vdp->xdf_xdev_nblocks != 0) &&
2265 (geomp->g_capacity > vdp->xdf_xdev_nblocks)) {
2266 mutex_exit(&vdp->xdf_dev_lk);
2270 bzero(&vdp->xdf_pgeom, sizeof (vdp->xdf_pgeom));
2271 vdp->xdf_pgeom.g_ncyl = geomp->g_ncyl;
2272 vdp->xdf_pgeom.g_acyl = geomp->g_acyl;
2273 vdp->xdf_pgeom.g_nhead = geomp->g_nhead;
2274 vdp->xdf_pgeom.g_nsect = geomp->g_nsect;
2275 vdp->xdf_pgeom.g_secsize = geomp->g_secsize;
2276 vdp->xdf_pgeom.g_capacity = geomp->g_capacity;
2277 vdp->xdf_pgeom.g_intrlv = geomp->g_intrlv;
2278 vdp->xdf_pgeom.g_rpm = geomp->g_rpm;
2280 vdp->xdf_pgeom_fixed = B_TRUE;
2281 mutex_exit(&vdp->xdf_dev_lk);
2284 cmlb_invalidate(vdp->xdf_vd_lbl, NULL);
2292 xdf_t *vdp = (xdf_t *)ddi_get_driver_private(dip);
2295 mutex_enter(&vdp->xdf_cb_lk);
2296 rv = XD_IS_CD(vdp);
2297 mutex_exit(&vdp->xdf_cb_lk);
2304 xdf_t *vdp = (xdf_t *)ddi_get_driver_private(dip);
2307 mutex_enter(&vdp->xdf_cb_lk);
2308 rv = XD_IS_RM(vdp);
2309 mutex_exit(&vdp->xdf_cb_lk);
2316 xdf_t *vdp = (xdf_t *)ddi_get_driver_private(dip);
2319 mutex_enter(&vdp->xdf_cb_lk);
2320 rv = vdp->xdf_media_req_supported;
2321 mutex_exit(&vdp->xdf_cb_lk);
2330 xdf_t *vdp;
2331 vdp = ddi_get_soft_state(xdf_ssp, ddi_get_instance(dip));
2333 if (vdp == NULL)
2336 mutex_enter(&vdp->xdf_dev_lk);
2337 *capp = vdp->xdf_pgeom.g_capacity;
2338 DPRINTF(LBL_DBG, ("xdf@%s:capacity %llu\n", vdp->xdf_addr, *capp));
2339 mutex_exit(&vdp->xdf_dev_lk);
2346 xdf_t *vdp;
2348 if ((vdp = ddi_get_soft_state(xdf_ssp, ddi_get_instance(dip))) == NULL)
2350 *geomp = vdp->xdf_pgeom;
2367 xdf_t *vdp;
2369 if (!(vdp = ddi_get_soft_state(xdf_ssp, ddi_get_instance(dip))))
2372 if (XD_IS_RO(vdp))
2385 xdf_t *vdp;
2389 if ((vdp = ddi_get_soft_state(xdf_ssp, instance)) == NULL)
2400 mutex_enter(&vdp->xdf_cb_lk);
2401 *(uint32_t *)arg = vdp->xdf_xdev_secsize;
2402 mutex_exit(&vdp->xdf_cb_lk);
2416 xdf_t *vdp;
2420 vdp = ddi_get_soft_state(xdf_ssp, ddi_get_instance(dip));
2423 ASSERT(curthread != vdp->xdf_oe_change_thread);
2425 if ((start + ((reqlen / (vdp->xdf_xdev_secsize / DEV_BSIZE))
2426 >> DEV_BSHIFT)) > vdp->xdf_pgeom.g_capacity)
2437 bp->b_blkno = start * (vdp->xdf_xdev_secsize / DEV_BSIZE);
2440 mutex_enter(&vdp->xdf_dev_lk);
2441 xdf_bp_push(vdp, bp);
2442 mutex_exit(&vdp->xdf_dev_lk);
2443 xdf_io_start(vdp);
2444 if (curthread == vdp->xdf_ready_tq_thread)
2445 (void) xdf_ring_drain(vdp);
2457 xdf_ioctl_mlock(xdf_t *vdp)
2460 mutex_enter(&vdp->xdf_cb_lk);
2461 rv = xdf_media_req(vdp, XBV_MEDIA_REQ_LOCK, B_TRUE);
2462 mutex_exit(&vdp->xdf_cb_lk);
2470 xdf_ioctl_munlock(xdf_t *vdp)
2473 mutex_enter(&vdp->xdf_cb_lk);
2474 rv = xdf_media_req(vdp, XBV_MEDIA_REQ_NONE, B_TRUE);
2475 mutex_exit(&vdp->xdf_cb_lk);
2484 xdf_ioctl_eject(xdf_t *vdp)
2488 mutex_enter(&vdp->xdf_cb_lk);
2489 if ((rv = xdf_media_req(vdp, XBV_MEDIA_REQ_EJECT, B_FALSE)) != 0) {
2490 mutex_exit(&vdp->xdf_cb_lk);
2500 (void) xdf_disconnect(vdp, XD_UNKNOWN, B_TRUE);
2501 mutex_enter(&vdp->xdf_dev_lk);
2502 if (xdf_connect_locked(vdp, B_TRUE) != XD_READY) {
2503 mutex_exit(&vdp->xdf_dev_lk);
2504 mutex_exit(&vdp->xdf_cb_lk);
2507 mutex_exit(&vdp->xdf_dev_lk);
2508 mutex_exit(&vdp->xdf_cb_lk);
2519 xdf_dkstate(xdf_t *vdp, enum dkio_state mstate)
2523 mutex_enter(&vdp->xdf_cb_lk);
2524 prev_state = vdp->xdf_mstate;
2526 if (vdp->xdf_mstate == mstate) {
2527 while (vdp->xdf_mstate == prev_state) {
2528 if (cv_wait_sig(&vdp->xdf_mstate_cv,
2529 &vdp->xdf_cb_lk) == 0) {
2530 mutex_exit(&vdp->xdf_cb_lk);
2537 (vdp->xdf_mstate == DKIO_INSERTED)) {
2538 (void) xdf_media_req(vdp, XBV_MEDIA_REQ_LOCK, B_TRUE);
2539 mutex_exit(&vdp->xdf_cb_lk);
2543 mutex_exit(&vdp->xdf_cb_lk);
2554 xdf_t *vdp;
2557 if (((vdp = ddi_get_soft_state(xdf_ssp, XDF_INST(minor))) == NULL) ||
2558 (!xdf_isopen(vdp, part)))
2562 vdp->xdf_addr, cmd, cmd));
2585 return (cmlb_ioctl(vdp->xdf_vd_lbl, dev, cmd, arg, mode, credp,
2590 return (xdf_ioctl_eject(vdp));
2592 return (xdf_ioctl_mlock(vdp));
2594 return (xdf_ioctl_munlock(vdp));
2597 if (!XD_IS_CD(vdp))
2606 media_info.dki_lbsize = vdp->xdf_xdev_secsize;
2607 media_info.dki_capacity = vdp->xdf_pgeom.g_capacity;
2608 if (XD_IS_CD(vdp))
2622 if (XD_IS_CD(vdp))
2631 info.dki_unit = ddi_get_instance(vdp->xdf_dip);
2651 if ((rv = xdf_dkstate(vdp, mstate)) != 0)
2653 mstate = vdp->xdf_mstate;
2660 int i = BOOLEAN2VOID(XD_IS_RM(vdp));
2666 int i = BOOLEAN2VOID(XD_IS_RM(vdp));
2675 vdp->xdf_wce = VOID2BOOLEAN(i);
2681 if (vdp->xdf_flush_supported) {
2682 rv = xdf_lb_rdwr(vdp->xdf_dip, TG_WRITE,
2684 } else if (vdp->xdf_feature_barrier &&
2686 rv = xdf_lb_rdwr(vdp->xdf_dip, TG_WRITE,
2687 vdp->xdf_cache_flush_block, xdf_flush_block,
2688 vdp->xdf_xdev_secsize, (void *)dev);
2707 xdf_t *vdp;
2716 vdp = ddi_get_soft_state(xdf_ssp, XDF_INST(minor));
2718 mutex_enter(&vdp->xdf_dev_lk);
2719 if (!xdf_isopen(vdp, part)) {
2720 mutex_exit(&vdp->xdf_dev_lk);
2726 ASSERT(curthread != vdp->xdf_oe_change_thread);
2729 if (!IS_READ(bp) && XD_IS_RO(vdp)) {
2730 mutex_exit(&vdp->xdf_dev_lk);
2738 p_blkct = vdp->xdf_xdev_nblocks;
2742 mutex_exit(&vdp->xdf_dev_lk);
2743 if (cmlb_partinfo(vdp->xdf_vd_lbl, part, &p_blkct,
2748 mutex_enter(&vdp->xdf_dev_lk);
2755 blkno = bp->b_blkno / (vdp->xdf_xdev_secsize / XB_BSIZE);
2760 vdp->xdf_addr, (longlong_t)blkno, (uint64_t)p_blkct));
2761 mutex_exit(&vdp->xdf_dev_lk);
2768 mutex_exit(&vdp->xdf_dev_lk);
2780 if (vdp->xdf_xdev_secsize != 0 &&
2781 vdp->xdf_xdev_secsize != XB_BSIZE) {
2782 nblks = bp->b_bcount / vdp->xdf_xdev_secsize;
2788 if (vdp->xdf_xdev_secsize != 0 &&
2789 vdp->xdf_xdev_secsize != XB_BSIZE) {
2792 vdp->xdf_xdev_secsize;
2802 vdp->xdf_addr, (longlong_t)blkno, (ulong_t)bp->b_bcount));
2808 xdf_bp_push(vdp, bp);
2809 mutex_exit(&vdp->xdf_dev_lk);
2810 xdf_io_start(vdp);
2812 (void) xdf_ring_drain(vdp);
2820 xdf_t *vdp;
2826 if ((vdp = ddi_get_soft_state(xdf_ssp, XDF_INST(minor))) == NULL)
2830 vdp->xdf_addr, (int64_t)uiop->uio_offset));
2833 if (!xdf_isopen(vdp, part))
2836 if (cmlb_partinfo(vdp->xdf_vd_lbl, part, &p_blkcnt,
2840 if (uiop->uio_loffset >= XB_DTOB(p_blkcnt, vdp))
2853 xdf_t *vdp;
2859 if ((vdp = ddi_get_soft_state(xdf_ssp, XDF_INST(minor))) == NULL)
2863 vdp->xdf_addr, (int64_t)uiop->uio_offset));
2866 if (!xdf_isopen(vdp, part))
2869 if (cmlb_partinfo(vdp->xdf_vd_lbl, part, &p_blkcnt,
2873 if (uiop->uio_loffset >= XB_DTOB(p_blkcnt, vdp))
2886 xdf_t *vdp;
2893 if ((vdp = ddi_get_soft_state(xdf_ssp, XDF_INST(minor))) == NULL)
2897 if (!xdf_isopen(vdp, part))
2900 if (cmlb_partinfo(vdp->xdf_vd_lbl, part, &p_blkcnt,
2904 if (uiop->uio_loffset >= XB_DTOB(p_blkcnt, vdp))
2917 xdf_t *vdp;
2924 if ((vdp = ddi_get_soft_state(xdf_ssp, XDF_INST(minor))) == NULL)
2928 if (!xdf_isopen(vdp, part))
2931 if (cmlb_partinfo(vdp->xdf_vd_lbl, part, &p_blkcnt,
2935 if (uiop->uio_loffset >= XB_DTOB(p_blkcnt, vdp))
2948 xdf_t *vdp;
2955 if ((vdp = ddi_get_soft_state(xdf_ssp, XDF_INST(minor))) == NULL)
2959 vdp->xdf_addr, (void *)addr, blkno, nblk));
2962 ASSERT(curthread != vdp->xdf_oe_change_thread);
2965 if (!xdf_isopen(vdp, part))
2968 if (cmlb_partinfo(vdp->xdf_vd_lbl, part, &p_blkcnt, &p_blkst,
2973 (p_blkcnt * (vdp->xdf_xdev_secsize / XB_BSIZE))) {
2975 vdp->xdf_addr, (daddr_t)((blkno + nblk) /
2976 (vdp->xdf_xdev_secsize / XB_BSIZE)), (uint64_t)p_blkcnt);
2988 mutex_enter(&vdp->xdf_dev_lk);
2989 xdf_bp_push(vdp, dbp);
2990 mutex_exit(&vdp->xdf_dev_lk);
2991 xdf_io_start(vdp);
2992 err = xdf_ring_drain(vdp);
3002 xdf_t *vdp;
3007 if ((vdp = ddi_get_soft_state(xdf_ssp, XDF_INST(minor))) == NULL)
3010 mutex_enter(&vdp->xdf_dev_lk);
3012 if (!xdf_isopen(vdp, part)) {
3013 mutex_exit(&vdp->xdf_dev_lk);
3018 ASSERT((vdp->xdf_vd_open[otyp] & parbit) != 0);
3020 ASSERT(vdp->xdf_vd_lyropen[part] > 0);
3021 if (--vdp->xdf_vd_lyropen[part] == 0)
3022 vdp->xdf_vd_open[otyp] &= ~parbit;
3024 vdp->xdf_vd_open[otyp] &= ~parbit;
3026 vdp->xdf_vd_exclopen &= ~parbit;
3028 mutex_exit(&vdp->xdf_dev_lk);
3036 xdf_t *vdp;
3044 if ((vdp = ddi_get_soft_state(xdf_ssp, XDF_INST(minor))) == NULL)
3049 DPRINTF(DDI_DBG, ("xdf@%s: opening\n", vdp->xdf_addr));
3052 mutex_enter(&vdp->xdf_cb_lk);
3053 mutex_enter(&vdp->xdf_dev_lk);
3054 if (!nodelay && (xdf_connect_locked(vdp, B_TRUE) != XD_READY)) {
3055 mutex_exit(&vdp->xdf_dev_lk);
3056 mutex_exit(&vdp->xdf_cb_lk);
3059 mutex_exit(&vdp->xdf_cb_lk);
3061 if ((flag & FWRITE) && XD_IS_RO(vdp)) {
3062 mutex_exit(&vdp->xdf_dev_lk);
3068 if ((vdp->xdf_vd_exclopen & parbit) ||
3069 ((flag & FEXCL) && xdf_isopen(vdp, part))) {
3070 mutex_exit(&vdp->xdf_dev_lk);
3075 firstopen = !xdf_isopen(vdp, -1);
3078 vdp->xdf_vd_lyropen[part]++;
3080 vdp->xdf_vd_open[otyp] |= parbit;
3083 vdp->xdf_vd_exclopen |= parbit;
3085 mutex_exit(&vdp->xdf_dev_lk);
3089 cmlb_invalidate(vdp->xdf_vd_lbl, NULL);
3100 if ((cmlb_partinfo(vdp->xdf_vd_lbl, part, &p_blkct,
3113 xdf_t *vdp = (xdf_t *)ddi_get_driver_private(dip);
3114 cv_broadcast(&vdp->xdf_hp_status_cv);
3121 xdf_t *vdp = ddi_get_soft_state(xdf_ssp, ddi_get_instance(dip));
3134 if (vdp == NULL)
3138 return (cmlb_prop_op(vdp->xdf_vd_lbl,
3172 xdf_t *vdp;
3175 if ((vdp = ddi_get_soft_state(xdf_ssp, ddi_get_instance(dip))) == NULL)
3179 xen_printf("xdf@%s: xdf_resume\n", vdp->xdf_addr);
3181 mutex_enter(&vdp->xdf_cb_lk);
3184 mutex_exit(&vdp->xdf_cb_lk);
3191 mutex_exit(&vdp->xdf_cb_lk);
3195 mutex_enter(&vdp->xdf_dev_lk);
3196 ASSERT(vdp->xdf_state != XD_READY);
3197 xdf_set_state(vdp, XD_UNKNOWN);
3198 mutex_exit(&vdp->xdf_dev_lk);
3200 if (xdf_setstate_init(vdp) != DDI_SUCCESS) {
3201 mutex_exit(&vdp->xdf_cb_lk);
3205 mutex_exit(&vdp->xdf_cb_lk);
3208 xen_printf("xdf@%s: xdf_resume: done\n", vdp->xdf_addr);
3212 xen_printf("xdf@%s: xdf_resume: fail\n", vdp->xdf_addr);
3222 xdf_t *vdp;
3276 vdp = ddi_get_soft_state(xdf_ssp, instance);
3277 ddi_set_driver_private(dip, vdp);
3278 vdp->xdf_dip = dip;
3279 vdp->xdf_addr = ddi_get_name_addr(dip);
3280 vdp->xdf_suspending = B_FALSE;
3281 vdp->xdf_media_req_supported = B_FALSE;
3282 vdp->xdf_peer = INVALID_DOMID;
3283 vdp->xdf_evtchn = INVALID_EVTCHN;
3284 list_create(&vdp->xdf_vreq_act, sizeof (v_req_t),
3286 cv_init(&vdp->xdf_dev_cv, NULL, CV_DEFAULT, NULL);
3287 cv_init(&vdp->xdf_hp_status_cv, NULL, CV_DEFAULT, NULL);
3288 cv_init(&vdp->xdf_mstate_cv, NULL, CV_DEFAULT, NULL);
3289 mutex_init(&vdp->xdf_dev_lk, NULL, MUTEX_DRIVER, (void *)ibc);
3290 mutex_init(&vdp->xdf_cb_lk, NULL, MUTEX_DRIVER, (void *)ibc);
3291 mutex_init(&vdp->xdf_iostat_lk, NULL, MUTEX_DRIVER, (void *)ibc);
3292 vdp->xdf_cmbl_reattach = B_TRUE;
3294 vdp->xdf_dinfo |= VDISK_CDROM;
3295 vdp->xdf_mstate = DKIO_EJECTED;
3297 vdp->xdf_mstate = DKIO_NONE;
3300 if ((vdp->xdf_ready_tq = ddi_taskq_create(dip, "xdf_ready_tq",
3308 if (ddi_add_softintr(dip, DDI_SOFTINT_LOW, &vdp->xdf_softintr_id,
3309 &softibc, NULL, xdf_iorestart, (caddr_t)vdp) != DDI_SUCCESS) {
3322 xdf_synthetic_pgeom(dip, &vdp->xdf_pgeom);
3323 vdp->xdf_pgeom_fixed = B_FALSE;
3329 cmlb_alloc_handle(&vdp->xdf_vd_lbl);
3330 if (xdf_cmlb_attach(vdp) != 0) {
3340 vdp->xdf_wce = B_TRUE;
3342 mutex_enter(&vdp->xdf_cb_lk);
3346 mutex_exit(&vdp->xdf_cb_lk);
3350 if (xdf_setstate_init(vdp) != DDI_SUCCESS) {
3353 mutex_exit(&vdp->xdf_cb_lk);
3356 mutex_exit(&vdp->xdf_cb_lk);
3379 DPRINTF(DDI_DBG, ("xdf@%s: attached\n", vdp->xdf_addr));
3383 (void) xvdi_switch_state(vdp->xdf_dip, XBT_NULL, XenbusStateClosed);
3386 if (vdp->xdf_vd_lbl != NULL) {
3387 cmlb_detach(vdp->xdf_vd_lbl, NULL);
3388 cmlb_free_handle(&vdp->xdf_vd_lbl);
3389 vdp->xdf_vd_lbl = NULL;
3391 if (vdp->xdf_softintr_id != NULL)
3392 ddi_remove_softintr(vdp->xdf_softintr_id);
3394 if (vdp->xdf_ready_tq != NULL)
3395 ddi_taskq_destroy(vdp->xdf_ready_tq);
3396 mutex_destroy(&vdp->xdf_cb_lk);
3397 mutex_destroy(&vdp->xdf_dev_lk);
3398 cv_destroy(&vdp->xdf_dev_cv);
3399 cv_destroy(&vdp->xdf_hp_status_cv);
3411 xdf_t *vdp;
3413 if ((vdp = ddi_get_soft_state(xdf_ssp, instance)) == NULL)
3417 xen_printf("xdf@%s: xdf_suspend\n", vdp->xdf_addr);
3421 mutex_enter(&vdp->xdf_cb_lk);
3422 mutex_enter(&vdp->xdf_dev_lk);
3424 vdp->xdf_suspending = B_TRUE;
3425 xdf_ring_destroy(vdp);
3426 xdf_set_state(vdp, XD_SUSPEND);
3427 vdp->xdf_suspending = B_FALSE;
3429 mutex_exit(&vdp->xdf_dev_lk);
3430 mutex_exit(&vdp->xdf_cb_lk);
3433 xen_printf("xdf@%s: xdf_suspend: done\n", vdp->xdf_addr);
3441 xdf_t *vdp;
3461 vdp = ddi_get_soft_state(xdf_ssp, instance);
3463 if (vdp == NULL)
3466 mutex_enter(&vdp->xdf_cb_lk);
3467 xdf_disconnect(vdp, XD_CLOSED, B_FALSE);
3468 if (vdp->xdf_state != XD_CLOSED) {
3469 mutex_exit(&vdp->xdf_cb_lk);
3472 mutex_exit(&vdp->xdf_cb_lk);
3474 ASSERT(!ISDMACBON(vdp));
3480 if (vdp->xdf_timeout_id != 0)
3481 (void) untimeout(vdp->xdf_timeout_id);
3484 ddi_taskq_destroy(vdp->xdf_ready_tq);
3486 cmlb_detach(vdp->xdf_vd_lbl, NULL);
3487 cmlb_free_handle(&vdp->xdf_vd_lbl);
3494 list_destroy(&vdp->xdf_vreq_act);
3497 ddi_remove_softintr(vdp->xdf_softintr_id);
3500 cv_destroy(&vdp->xdf_dev_cv);
3501 mutex_destroy(&vdp->xdf_cb_lk);
3502 mutex_destroy(&vdp->xdf_dev_lk);
3503 if (vdp->xdf_cache_flush_block != NULL)
3504 kmem_free(vdp->xdf_flush_mem, 2 * vdp->xdf_xdev_secsize);