xref: /titanic_44/usr/src/uts/common/xen/io/xdf.c (revision 5cd2e4b941a477f4673d418d1d6ab5984a910a72)
1843e1988Sjohnlev /*
2843e1988Sjohnlev  * CDDL HEADER START
3843e1988Sjohnlev  *
4843e1988Sjohnlev  * The contents of this file are subject to the terms of the
5843e1988Sjohnlev  * Common Development and Distribution License (the "License").
6843e1988Sjohnlev  * You may not use this file except in compliance with the License.
7843e1988Sjohnlev  *
8843e1988Sjohnlev  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9843e1988Sjohnlev  * or http://www.opensolaris.org/os/licensing.
10843e1988Sjohnlev  * See the License for the specific language governing permissions
11843e1988Sjohnlev  * and limitations under the License.
12843e1988Sjohnlev  *
13843e1988Sjohnlev  * When distributing Covered Code, include this CDDL HEADER in each
14843e1988Sjohnlev  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15843e1988Sjohnlev  * If applicable, add the following below this CDDL HEADER, with the
16843e1988Sjohnlev  * fields enclosed by brackets "[]" replaced with your own identifying
17843e1988Sjohnlev  * information: Portions Copyright [yyyy] [name of copyright owner]
18843e1988Sjohnlev  *
19843e1988Sjohnlev  * CDDL HEADER END
20843e1988Sjohnlev  */
21843e1988Sjohnlev 
22843e1988Sjohnlev /*
237f0b8309SEdward Pilatowicz  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24843e1988Sjohnlev  * Use is subject to license terms.
25843e1988Sjohnlev  */
26843e1988Sjohnlev 
27843e1988Sjohnlev /*
28843e1988Sjohnlev  * xdf.c - Xen Virtual Block Device Driver
29843e1988Sjohnlev  * TODO:
30843e1988Sjohnlev  *	- support alternate block size (currently only DEV_BSIZE supported)
31843e1988Sjohnlev  *	- revalidate geometry for removable devices
327f0b8309SEdward Pilatowicz  *
337f0b8309SEdward Pilatowicz  * This driver export solaris disk device nodes, accepts IO requests from
347f0b8309SEdward Pilatowicz  * those nodes, and services those requests by talking to a backend device
357f0b8309SEdward Pilatowicz  * in another domain.
367f0b8309SEdward Pilatowicz  *
377f0b8309SEdward Pilatowicz  * Communication with the backend device is done via a ringbuffer (which is
387f0b8309SEdward Pilatowicz  * managed via xvdi interfaces) and dma memory (which is managed via ddi
397f0b8309SEdward Pilatowicz  * interfaces).
407f0b8309SEdward Pilatowicz  *
417f0b8309SEdward Pilatowicz  * Communication with the backend device is dependant upon establishing a
427f0b8309SEdward Pilatowicz  * connection to the backend device.  This connection process involves
437f0b8309SEdward Pilatowicz  * reading device configuration information from xenbus and publishing
447f0b8309SEdward Pilatowicz  * some frontend runtime configuration parameters via the xenbus (for
457f0b8309SEdward Pilatowicz  * consumption by the backend).  Once we've published runtime configuration
467f0b8309SEdward Pilatowicz  * information via the xenbus, the backend device can enter the connected
477f0b8309SEdward Pilatowicz  * state and we'll enter the XD_CONNECTED state.  But before we can allow
487f0b8309SEdward Pilatowicz  * random IO to begin, we need to do IO to the backend device to determine
497f0b8309SEdward Pilatowicz  * the device label and if flush operations are supported.  Once this is
507f0b8309SEdward Pilatowicz  * done we enter the XD_READY state and can process any IO operations.
517f0b8309SEdward Pilatowicz  *
527f0b8309SEdward Pilatowicz  * We recieve notifications of xenbus state changes for the backend device
537f0b8309SEdward Pilatowicz  * (aka, the "other end") via the xdf_oe_change() callback.  This callback
547f0b8309SEdward Pilatowicz  * is single threaded, meaning that we can't recieve new notification of
557f0b8309SEdward Pilatowicz  * other end state changes while we're processing an outstanding
567f0b8309SEdward Pilatowicz  * notification of an other end state change.  There for we can't do any
577f0b8309SEdward Pilatowicz  * blocking operations from the xdf_oe_change() callback.  This is why we
587f0b8309SEdward Pilatowicz  * have a seperate taskq (xdf_ready_tq) which exists to do the necessary
597f0b8309SEdward Pilatowicz  * IO to get us from the XD_CONNECTED to the XD_READY state.  All IO
607f0b8309SEdward Pilatowicz  * generated by the xdf_ready_tq thread (xdf_ready_tq_thread) will go
617f0b8309SEdward Pilatowicz  * throught xdf_lb_rdwr(), which is a synchronous IO interface.  IOs
627f0b8309SEdward Pilatowicz  * generated by the xdf_ready_tq_thread thread have priority over all
637f0b8309SEdward Pilatowicz  * other IO requests.
647f0b8309SEdward Pilatowicz  *
657f0b8309SEdward Pilatowicz  * We also communicate with the backend device via the xenbus "media-req"
667f0b8309SEdward Pilatowicz  * (XBP_MEDIA_REQ) property.  For more information on this see the
677f0b8309SEdward Pilatowicz  * comments in blkif.h.
68843e1988Sjohnlev  */
69843e1988Sjohnlev 
707f0b8309SEdward Pilatowicz #include <io/xdf.h>
717f0b8309SEdward Pilatowicz 
7206bbe1e0Sedp #include <sys/conf.h>
73551bc2a6Smrj #include <sys/dkio.h>
74551bc2a6Smrj #include <sys/promif.h>
75551bc2a6Smrj #include <sys/sysmacros.h>
76551bc2a6Smrj #include <sys/kstat.h>
77551bc2a6Smrj #include <sys/mach_mmu.h>
78551bc2a6Smrj #ifdef XPV_HVM_DRIVER
79551bc2a6Smrj #include <sys/xpv_support.h>
8006bbe1e0Sedp #include <sys/sunndi.h>
817f0b8309SEdward Pilatowicz #else /* !XPV_HVM_DRIVER */
827f0b8309SEdward Pilatowicz #include <sys/evtchn_impl.h>
837f0b8309SEdward Pilatowicz #endif /* !XPV_HVM_DRIVER */
84551bc2a6Smrj #include <public/io/xenbus.h>
85551bc2a6Smrj #include <xen/sys/xenbus_impl.h>
86551bc2a6Smrj #include <sys/scsi/generic/inquiry.h>
87a576ab5bSrab #include <xen/io/blkif_impl.h>
887f0b8309SEdward Pilatowicz #include <sys/fdio.h>
897f0b8309SEdward Pilatowicz #include <sys/cdio.h>
90843e1988Sjohnlev 
917f0b8309SEdward Pilatowicz /*
927f0b8309SEdward Pilatowicz  * DEBUG_EVAL can be used to include debug only statements without
937f0b8309SEdward Pilatowicz  * having to use '#ifdef DEBUG' statements
947f0b8309SEdward Pilatowicz  */
957f0b8309SEdward Pilatowicz #ifdef DEBUG
967f0b8309SEdward Pilatowicz #define	DEBUG_EVAL(x)	(x)
977f0b8309SEdward Pilatowicz #else /* !DEBUG */
987f0b8309SEdward Pilatowicz #define	DEBUG_EVAL(x)
997f0b8309SEdward Pilatowicz #endif /* !DEBUG */
1007f0b8309SEdward Pilatowicz 
1017f0b8309SEdward Pilatowicz #define	XDF_DRAIN_MSEC_DELAY		(50*1000)	/* 00.05 sec */
1027f0b8309SEdward Pilatowicz #define	XDF_DRAIN_RETRY_COUNT		200		/* 10.00 sec */
1037f0b8309SEdward Pilatowicz 
1047f0b8309SEdward Pilatowicz #define	INVALID_DOMID	((domid_t)-1)
105843e1988Sjohnlev #define	FLUSH_DISKCACHE	0x1
106843e1988Sjohnlev #define	WRITE_BARRIER	0x2
107843e1988Sjohnlev #define	DEFAULT_FLUSH_BLOCK	156 /* block to write to cause a cache flush */
108843e1988Sjohnlev #define	USE_WRITE_BARRIER(vdp)						\
109843e1988Sjohnlev 	((vdp)->xdf_feature_barrier && !(vdp)->xdf_flush_supported)
110843e1988Sjohnlev #define	USE_FLUSH_DISKCACHE(vdp)					\
111843e1988Sjohnlev 	((vdp)->xdf_feature_barrier && (vdp)->xdf_flush_supported)
112843e1988Sjohnlev #define	IS_WRITE_BARRIER(vdp, bp)					\
113843e1988Sjohnlev 	(!IS_READ(bp) && USE_WRITE_BARRIER(vdp) &&			\
114843e1988Sjohnlev 	((bp)->b_un.b_addr == (vdp)->xdf_cache_flush_block))
115843e1988Sjohnlev #define	IS_FLUSH_DISKCACHE(bp)						\
116843e1988Sjohnlev 	(!IS_READ(bp) && USE_FLUSH_DISKCACHE(vdp) && ((bp)->b_bcount == 0))
117843e1988Sjohnlev 
1187f0b8309SEdward Pilatowicz #define	VREQ_DONE(vreq)							\
1197f0b8309SEdward Pilatowicz 	VOID2BOOLEAN(((vreq)->v_status == VREQ_DMAWIN_DONE) &&		\
1207f0b8309SEdward Pilatowicz 	    (((vreq)->v_flush_diskcache == FLUSH_DISKCACHE) ||		\
1217f0b8309SEdward Pilatowicz 	    (((vreq)->v_dmaw + 1) == (vreq)->v_ndmaws)))
1227f0b8309SEdward Pilatowicz 
1237f0b8309SEdward Pilatowicz #define	BP_VREQ(bp)		((v_req_t *)((bp)->av_back))
1247f0b8309SEdward Pilatowicz #define	BP_VREQ_SET(bp, vreq)	(((bp)->av_back = (buf_t *)(vreq)))
1257f0b8309SEdward Pilatowicz 
1267f0b8309SEdward Pilatowicz extern int		do_polled_io;
1277f0b8309SEdward Pilatowicz 
1287f0b8309SEdward Pilatowicz /* run-time tunables that we don't want the compiler to optimize away */
1297f0b8309SEdward Pilatowicz volatile int		xdf_debug = 0;
1307f0b8309SEdward Pilatowicz volatile boolean_t	xdf_barrier_flush_disable = B_FALSE;
1317f0b8309SEdward Pilatowicz 
1327f0b8309SEdward Pilatowicz /* per module globals */
1337f0b8309SEdward Pilatowicz major_t			xdf_major;
1347f0b8309SEdward Pilatowicz static void		*xdf_ssp;
135843e1988Sjohnlev static kmem_cache_t	*xdf_vreq_cache;
136843e1988Sjohnlev static kmem_cache_t	*xdf_gs_cache;
137843e1988Sjohnlev static int		xdf_maxphys = XB_MAXPHYS;
1387f0b8309SEdward Pilatowicz static diskaddr_t	xdf_flush_block = DEFAULT_FLUSH_BLOCK;
1397f0b8309SEdward Pilatowicz static int		xdf_fbrewrites;	/* flush block re-write count */
140843e1988Sjohnlev 
1417f0b8309SEdward Pilatowicz /* misc public functions (used by xdf_shell.c) */
1427f0b8309SEdward Pilatowicz int xdf_lb_rdwr(dev_info_t *, uchar_t, void *, diskaddr_t, size_t, void *);
1437f0b8309SEdward Pilatowicz int xdf_lb_getinfo(dev_info_t *, int, void *, void *);
144843e1988Sjohnlev 
1457f0b8309SEdward Pilatowicz /*  misc private functions */
1467f0b8309SEdward Pilatowicz static void xdf_io_start(xdf_t *);
14706bbe1e0Sedp 
1487f0b8309SEdward Pilatowicz /* callbacks from commmon label */
1497f0b8309SEdward Pilatowicz static cmlb_tg_ops_t xdf_lb_ops = {
1507f0b8309SEdward Pilatowicz 	TG_DK_OPS_VERSION_1,
1517f0b8309SEdward Pilatowicz 	xdf_lb_rdwr,
1527f0b8309SEdward Pilatowicz 	xdf_lb_getinfo
153843e1988Sjohnlev };
154843e1988Sjohnlev 
155843e1988Sjohnlev /*
156843e1988Sjohnlev  * I/O buffer DMA attributes
157843e1988Sjohnlev  * Make sure: one DMA window contains BLKIF_MAX_SEGMENTS_PER_REQUEST at most
158843e1988Sjohnlev  */
159843e1988Sjohnlev static ddi_dma_attr_t xb_dma_attr = {
160843e1988Sjohnlev 	DMA_ATTR_V0,
161843e1988Sjohnlev 	(uint64_t)0,			/* lowest address */
162843e1988Sjohnlev 	(uint64_t)0xffffffffffffffff,	/* highest usable address */
163843e1988Sjohnlev 	(uint64_t)0xffffff,		/* DMA counter limit max */
164843e1988Sjohnlev 	(uint64_t)XB_BSIZE,		/* alignment in bytes */
165843e1988Sjohnlev 	XB_BSIZE - 1,			/* bitmap of burst sizes */
166843e1988Sjohnlev 	XB_BSIZE,			/* min transfer */
167843e1988Sjohnlev 	(uint64_t)XB_MAX_XFER, 		/* maximum transfer */
168843e1988Sjohnlev 	(uint64_t)PAGEOFFSET,		/* 1 page segment length  */
169843e1988Sjohnlev 	BLKIF_MAX_SEGMENTS_PER_REQUEST,	/* maximum number of segments */
170843e1988Sjohnlev 	XB_BSIZE,			/* granularity */
171843e1988Sjohnlev 	0,				/* flags (reserved) */
172843e1988Sjohnlev };
173843e1988Sjohnlev 
174843e1988Sjohnlev static ddi_device_acc_attr_t xc_acc_attr = {
175843e1988Sjohnlev 	DDI_DEVICE_ATTR_V0,
176843e1988Sjohnlev 	DDI_NEVERSWAP_ACC,
177843e1988Sjohnlev 	DDI_STRICTORDER_ACC
178843e1988Sjohnlev };
179843e1988Sjohnlev 
180843e1988Sjohnlev static void
xdf_timeout_handler(void * arg)1817f0b8309SEdward Pilatowicz xdf_timeout_handler(void *arg)
182843e1988Sjohnlev {
1837f0b8309SEdward Pilatowicz 	xdf_t *vdp = arg;
184843e1988Sjohnlev 
185843e1988Sjohnlev 	mutex_enter(&vdp->xdf_dev_lk);
1867f0b8309SEdward Pilatowicz 	vdp->xdf_timeout_id = 0;
187843e1988Sjohnlev 	mutex_exit(&vdp->xdf_dev_lk);
188843e1988Sjohnlev 
1897f0b8309SEdward Pilatowicz 	/* new timeout thread could be re-scheduled */
1907f0b8309SEdward Pilatowicz 	xdf_io_start(vdp);
191843e1988Sjohnlev }
192843e1988Sjohnlev 
193843e1988Sjohnlev /*
194843e1988Sjohnlev  * callback func when DMA/GTE resources is available
195843e1988Sjohnlev  *
196843e1988Sjohnlev  * Note: we only register one callback function to grant table subsystem
197843e1988Sjohnlev  * since we only have one 'struct gnttab_free_callback' in xdf_t.
198843e1988Sjohnlev  */
199843e1988Sjohnlev static int
xdf_dmacallback(caddr_t arg)200843e1988Sjohnlev xdf_dmacallback(caddr_t arg)
201843e1988Sjohnlev {
202843e1988Sjohnlev 	xdf_t *vdp = (xdf_t *)arg;
203843e1988Sjohnlev 	ASSERT(vdp != NULL);
204843e1988Sjohnlev 
205843e1988Sjohnlev 	DPRINTF(DMA_DBG, ("xdf@%s: DMA callback started\n",
2067f0b8309SEdward Pilatowicz 	    vdp->xdf_addr));
207843e1988Sjohnlev 
208843e1988Sjohnlev 	ddi_trigger_softintr(vdp->xdf_softintr_id);
209843e1988Sjohnlev 	return (DDI_DMA_CALLBACK_DONE);
210843e1988Sjohnlev }
211843e1988Sjohnlev 
2127f0b8309SEdward Pilatowicz static ge_slot_t *
gs_get(xdf_t * vdp,int isread)2137f0b8309SEdward Pilatowicz gs_get(xdf_t *vdp, int isread)
214843e1988Sjohnlev {
2157f0b8309SEdward Pilatowicz 	grant_ref_t gh;
2167f0b8309SEdward Pilatowicz 	ge_slot_t *gs;
217843e1988Sjohnlev 
2187f0b8309SEdward Pilatowicz 	/* try to alloc GTEs needed in this slot, first */
2197f0b8309SEdward Pilatowicz 	if (gnttab_alloc_grant_references(
2207f0b8309SEdward Pilatowicz 	    BLKIF_MAX_SEGMENTS_PER_REQUEST, &gh) == -1) {
2217f0b8309SEdward Pilatowicz 		if (vdp->xdf_gnt_callback.next == NULL) {
2227f0b8309SEdward Pilatowicz 			SETDMACBON(vdp);
2237f0b8309SEdward Pilatowicz 			gnttab_request_free_callback(
2247f0b8309SEdward Pilatowicz 			    &vdp->xdf_gnt_callback,
2257f0b8309SEdward Pilatowicz 			    (void (*)(void *))xdf_dmacallback,
2267f0b8309SEdward Pilatowicz 			    (void *)vdp,
2277f0b8309SEdward Pilatowicz 			    BLKIF_MAX_SEGMENTS_PER_REQUEST);
2287f0b8309SEdward Pilatowicz 		}
2297f0b8309SEdward Pilatowicz 		return (NULL);
2307f0b8309SEdward Pilatowicz 	}
231843e1988Sjohnlev 
2327f0b8309SEdward Pilatowicz 	gs = kmem_cache_alloc(xdf_gs_cache, KM_NOSLEEP);
2337f0b8309SEdward Pilatowicz 	if (gs == NULL) {
2347f0b8309SEdward Pilatowicz 		gnttab_free_grant_references(gh);
2357f0b8309SEdward Pilatowicz 		if (vdp->xdf_timeout_id == 0)
2367f0b8309SEdward Pilatowicz 			/* restart I/O after one second */
2377f0b8309SEdward Pilatowicz 			vdp->xdf_timeout_id =
2387f0b8309SEdward Pilatowicz 			    timeout(xdf_timeout_handler, vdp, hz);
2397f0b8309SEdward Pilatowicz 		return (NULL);
2407f0b8309SEdward Pilatowicz 	}
241843e1988Sjohnlev 
2427f0b8309SEdward Pilatowicz 	/* init gs_slot */
2437f0b8309SEdward Pilatowicz 	gs->gs_oeid = vdp->xdf_peer;
2447f0b8309SEdward Pilatowicz 	gs->gs_isread = isread;
2457f0b8309SEdward Pilatowicz 	gs->gs_ghead = gh;
2467f0b8309SEdward Pilatowicz 	gs->gs_ngrefs = 0;
247843e1988Sjohnlev 
2487f0b8309SEdward Pilatowicz 	return (gs);
249843e1988Sjohnlev }
250843e1988Sjohnlev 
251843e1988Sjohnlev static void
gs_free(ge_slot_t * gs)2527f0b8309SEdward Pilatowicz gs_free(ge_slot_t *gs)
253843e1988Sjohnlev {
2547f0b8309SEdward Pilatowicz 	int		i;
255843e1988Sjohnlev 
2567f0b8309SEdward Pilatowicz 	/* release all grant table entry resources used in this slot */
2577f0b8309SEdward Pilatowicz 	for (i = 0; i < gs->gs_ngrefs; i++)
2587f0b8309SEdward Pilatowicz 		gnttab_end_foreign_access(gs->gs_ge[i], !gs->gs_isread, 0);
2597f0b8309SEdward Pilatowicz 	gnttab_free_grant_references(gs->gs_ghead);
2607f0b8309SEdward Pilatowicz 	list_remove(&gs->gs_vreq->v_gs, gs);
2617f0b8309SEdward Pilatowicz 	kmem_cache_free(xdf_gs_cache, gs);
2627f0b8309SEdward Pilatowicz }
263843e1988Sjohnlev 
2647f0b8309SEdward Pilatowicz static grant_ref_t
gs_grant(ge_slot_t * gs,mfn_t mfn)2657f0b8309SEdward Pilatowicz gs_grant(ge_slot_t *gs, mfn_t mfn)
2667f0b8309SEdward Pilatowicz {
2677f0b8309SEdward Pilatowicz 	grant_ref_t gr = gnttab_claim_grant_reference(&gs->gs_ghead);
2687f0b8309SEdward Pilatowicz 
2697f0b8309SEdward Pilatowicz 	ASSERT(gr != -1);
2707f0b8309SEdward Pilatowicz 	ASSERT(gs->gs_ngrefs < BLKIF_MAX_SEGMENTS_PER_REQUEST);
2717f0b8309SEdward Pilatowicz 	gs->gs_ge[gs->gs_ngrefs++] = gr;
2727f0b8309SEdward Pilatowicz 	gnttab_grant_foreign_access_ref(gr, gs->gs_oeid, mfn, !gs->gs_isread);
2737f0b8309SEdward Pilatowicz 
2747f0b8309SEdward Pilatowicz 	return (gr);
275843e1988Sjohnlev }
276843e1988Sjohnlev 
277843e1988Sjohnlev /*
278843e1988Sjohnlev  * Alloc a vreq for this bp
279843e1988Sjohnlev  * bp->av_back contains the pointer to the vreq upon return
280843e1988Sjohnlev  */
281843e1988Sjohnlev static v_req_t *
vreq_get(xdf_t * vdp,buf_t * bp)282843e1988Sjohnlev vreq_get(xdf_t *vdp, buf_t *bp)
283843e1988Sjohnlev {
284843e1988Sjohnlev 	v_req_t *vreq = NULL;
285843e1988Sjohnlev 
2867f0b8309SEdward Pilatowicz 	ASSERT(BP_VREQ(bp) == NULL);
287843e1988Sjohnlev 
288843e1988Sjohnlev 	vreq = kmem_cache_alloc(xdf_vreq_cache, KM_NOSLEEP);
289843e1988Sjohnlev 	if (vreq == NULL) {
290843e1988Sjohnlev 		if (vdp->xdf_timeout_id == 0)
291843e1988Sjohnlev 			/* restart I/O after one second */
292843e1988Sjohnlev 			vdp->xdf_timeout_id =
293843e1988Sjohnlev 			    timeout(xdf_timeout_handler, vdp, hz);
294843e1988Sjohnlev 		return (NULL);
295843e1988Sjohnlev 	}
296843e1988Sjohnlev 	bzero(vreq, sizeof (v_req_t));
2977f0b8309SEdward Pilatowicz 	list_create(&vreq->v_gs, sizeof (ge_slot_t),
2987f0b8309SEdward Pilatowicz 	    offsetof(ge_slot_t, gs_vreq_link));
299843e1988Sjohnlev 	vreq->v_buf = bp;
300843e1988Sjohnlev 	vreq->v_status = VREQ_INIT;
3017f0b8309SEdward Pilatowicz 	vreq->v_runq = B_FALSE;
3027f0b8309SEdward Pilatowicz 	BP_VREQ_SET(bp, vreq);
303843e1988Sjohnlev 	/* init of other fields in vreq is up to the caller */
304843e1988Sjohnlev 
3057f0b8309SEdward Pilatowicz 	list_insert_head(&vdp->xdf_vreq_act, (void *)vreq);
3067f0b8309SEdward Pilatowicz 
307843e1988Sjohnlev 	return (vreq);
308843e1988Sjohnlev }
309843e1988Sjohnlev 
310843e1988Sjohnlev static void
vreq_free(xdf_t * vdp,v_req_t * vreq)311843e1988Sjohnlev vreq_free(xdf_t *vdp, v_req_t *vreq)
312843e1988Sjohnlev {
313843e1988Sjohnlev 	buf_t	*bp = vreq->v_buf;
314843e1988Sjohnlev 
3157f0b8309SEdward Pilatowicz 	ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
3167f0b8309SEdward Pilatowicz 	ASSERT(BP_VREQ(bp) == vreq);
3177f0b8309SEdward Pilatowicz 
3187f0b8309SEdward Pilatowicz 	list_remove(&vdp->xdf_vreq_act, vreq);
319843e1988Sjohnlev 
320a6e5dd18Scz147101 	if (vreq->v_flush_diskcache == FLUSH_DISKCACHE)
321a6e5dd18Scz147101 		goto done;
322a6e5dd18Scz147101 
323843e1988Sjohnlev 	switch (vreq->v_status) {
324843e1988Sjohnlev 	case VREQ_DMAWIN_DONE:
325843e1988Sjohnlev 	case VREQ_GS_ALLOCED:
326843e1988Sjohnlev 	case VREQ_DMABUF_BOUND:
327843e1988Sjohnlev 		(void) ddi_dma_unbind_handle(vreq->v_dmahdl);
328843e1988Sjohnlev 		/*FALLTHRU*/
329843e1988Sjohnlev 	case VREQ_DMAMEM_ALLOCED:
330843e1988Sjohnlev 		if (!ALIGNED_XFER(bp)) {
331843e1988Sjohnlev 			ASSERT(vreq->v_abuf != NULL);
332843e1988Sjohnlev 			if (!IS_ERROR(bp) && IS_READ(bp))
333843e1988Sjohnlev 				bcopy(vreq->v_abuf, bp->b_un.b_addr,
334843e1988Sjohnlev 				    bp->b_bcount);
335843e1988Sjohnlev 			ddi_dma_mem_free(&vreq->v_align);
336843e1988Sjohnlev 		}
337843e1988Sjohnlev 		/*FALLTHRU*/
338843e1988Sjohnlev 	case VREQ_MEMDMAHDL_ALLOCED:
339843e1988Sjohnlev 		if (!ALIGNED_XFER(bp))
340843e1988Sjohnlev 			ddi_dma_free_handle(&vreq->v_memdmahdl);
341843e1988Sjohnlev 		/*FALLTHRU*/
342843e1988Sjohnlev 	case VREQ_DMAHDL_ALLOCED:
343843e1988Sjohnlev 		ddi_dma_free_handle(&vreq->v_dmahdl);
344843e1988Sjohnlev 		break;
345843e1988Sjohnlev 	default:
346843e1988Sjohnlev 		break;
347843e1988Sjohnlev 	}
348a6e5dd18Scz147101 done:
3497f0b8309SEdward Pilatowicz 	ASSERT(!vreq->v_runq);
3507f0b8309SEdward Pilatowicz 	list_destroy(&vreq->v_gs);
351843e1988Sjohnlev 	kmem_cache_free(xdf_vreq_cache, vreq);
352843e1988Sjohnlev }
353843e1988Sjohnlev 
354843e1988Sjohnlev /*
3557f0b8309SEdward Pilatowicz  * Snarf new data if our flush block was re-written
3567f0b8309SEdward Pilatowicz  */
3577f0b8309SEdward Pilatowicz static void
check_fbwrite(xdf_t * vdp,buf_t * bp,daddr_t blkno)3587f0b8309SEdward Pilatowicz check_fbwrite(xdf_t *vdp, buf_t *bp, daddr_t blkno)
3597f0b8309SEdward Pilatowicz {
3607f0b8309SEdward Pilatowicz 	int nblks;
3617f0b8309SEdward Pilatowicz 	boolean_t mapin;
3627f0b8309SEdward Pilatowicz 
3637f0b8309SEdward Pilatowicz 	if (IS_WRITE_BARRIER(vdp, bp))
3647f0b8309SEdward Pilatowicz 		return; /* write was a flush write */
3657f0b8309SEdward Pilatowicz 
3667f0b8309SEdward Pilatowicz 	mapin = B_FALSE;
3677f0b8309SEdward Pilatowicz 	nblks = bp->b_bcount >> DEV_BSHIFT;
3687f0b8309SEdward Pilatowicz 	if (xdf_flush_block >= blkno && xdf_flush_block < (blkno + nblks)) {
3697f0b8309SEdward Pilatowicz 		xdf_fbrewrites++;
3707f0b8309SEdward Pilatowicz 		if (bp->b_flags & (B_PAGEIO | B_PHYS)) {
3717f0b8309SEdward Pilatowicz 			mapin = B_TRUE;
3727f0b8309SEdward Pilatowicz 			bp_mapin(bp);
3737f0b8309SEdward Pilatowicz 		}
3747f0b8309SEdward Pilatowicz 		bcopy(bp->b_un.b_addr +
3757f0b8309SEdward Pilatowicz 		    ((xdf_flush_block - blkno) << DEV_BSHIFT),
3767f0b8309SEdward Pilatowicz 		    vdp->xdf_cache_flush_block, DEV_BSIZE);
3777f0b8309SEdward Pilatowicz 		if (mapin)
3787f0b8309SEdward Pilatowicz 			bp_mapout(bp);
3797f0b8309SEdward Pilatowicz 	}
3807f0b8309SEdward Pilatowicz }
3817f0b8309SEdward Pilatowicz 
3827f0b8309SEdward Pilatowicz /*
383843e1988Sjohnlev  * Initalize the DMA and grant table resources for the buf
384843e1988Sjohnlev  */
385843e1988Sjohnlev static int
vreq_setup(xdf_t * vdp,v_req_t * vreq)386843e1988Sjohnlev vreq_setup(xdf_t *vdp, v_req_t *vreq)
387843e1988Sjohnlev {
388843e1988Sjohnlev 	int rc;
389843e1988Sjohnlev 	ddi_dma_attr_t dmaattr;
390843e1988Sjohnlev 	uint_t ndcs, ndws;
391843e1988Sjohnlev 	ddi_dma_handle_t dh;
392843e1988Sjohnlev 	ddi_dma_handle_t mdh;
393843e1988Sjohnlev 	ddi_dma_cookie_t dc;
394843e1988Sjohnlev 	ddi_acc_handle_t abh;
395843e1988Sjohnlev 	caddr_t	aba;
396843e1988Sjohnlev 	ge_slot_t *gs;
397843e1988Sjohnlev 	size_t bufsz;
398843e1988Sjohnlev 	off_t off;
399843e1988Sjohnlev 	size_t sz;
400843e1988Sjohnlev 	buf_t *bp = vreq->v_buf;
401843e1988Sjohnlev 	int dma_flags = (IS_READ(bp) ? DDI_DMA_READ : DDI_DMA_WRITE) |
402843e1988Sjohnlev 	    DDI_DMA_STREAMING | DDI_DMA_PARTIAL;
403843e1988Sjohnlev 
404843e1988Sjohnlev 	switch (vreq->v_status) {
405843e1988Sjohnlev 	case VREQ_INIT:
406843e1988Sjohnlev 		if (IS_FLUSH_DISKCACHE(bp)) {
407843e1988Sjohnlev 			if ((gs = gs_get(vdp, IS_READ(bp))) == NULL) {
4087f0b8309SEdward Pilatowicz 				DPRINTF(DMA_DBG, ("xdf@%s: "
4097f0b8309SEdward Pilatowicz 				    "get ge_slotfailed\n", vdp->xdf_addr));
410843e1988Sjohnlev 				return (DDI_FAILURE);
411843e1988Sjohnlev 			}
412843e1988Sjohnlev 			vreq->v_blkno = 0;
413843e1988Sjohnlev 			vreq->v_nslots = 1;
414843e1988Sjohnlev 			vreq->v_flush_diskcache = FLUSH_DISKCACHE;
415a6e5dd18Scz147101 			vreq->v_status = VREQ_GS_ALLOCED;
4167f0b8309SEdward Pilatowicz 			gs->gs_vreq = vreq;
4177f0b8309SEdward Pilatowicz 			list_insert_head(&vreq->v_gs, gs);
418843e1988Sjohnlev 			return (DDI_SUCCESS);
419843e1988Sjohnlev 		}
420843e1988Sjohnlev 
421843e1988Sjohnlev 		if (IS_WRITE_BARRIER(vdp, bp))
422843e1988Sjohnlev 			vreq->v_flush_diskcache = WRITE_BARRIER;
423843e1988Sjohnlev 		vreq->v_blkno = bp->b_blkno +
424843e1988Sjohnlev 		    (diskaddr_t)(uintptr_t)bp->b_private;
425843e1988Sjohnlev 		/* See if we wrote new data to our flush block */
426843e1988Sjohnlev 		if (!IS_READ(bp) && USE_WRITE_BARRIER(vdp))
427843e1988Sjohnlev 			check_fbwrite(vdp, bp, vreq->v_blkno);
428843e1988Sjohnlev 		vreq->v_status = VREQ_INIT_DONE;
429843e1988Sjohnlev 		/*FALLTHRU*/
430843e1988Sjohnlev 
431843e1988Sjohnlev 	case VREQ_INIT_DONE:
432843e1988Sjohnlev 		/*
433843e1988Sjohnlev 		 * alloc DMA handle
434843e1988Sjohnlev 		 */
435843e1988Sjohnlev 		rc = ddi_dma_alloc_handle(vdp->xdf_dip, &xb_dma_attr,
436843e1988Sjohnlev 		    xdf_dmacallback, (caddr_t)vdp, &dh);
437843e1988Sjohnlev 		if (rc != DDI_SUCCESS) {
438843e1988Sjohnlev 			SETDMACBON(vdp);
439843e1988Sjohnlev 			DPRINTF(DMA_DBG, ("xdf@%s: DMA handle alloc failed\n",
4407f0b8309SEdward Pilatowicz 			    vdp->xdf_addr));
441843e1988Sjohnlev 			return (DDI_FAILURE);
442843e1988Sjohnlev 		}
443843e1988Sjohnlev 
444843e1988Sjohnlev 		vreq->v_dmahdl = dh;
445843e1988Sjohnlev 		vreq->v_status = VREQ_DMAHDL_ALLOCED;
446843e1988Sjohnlev 		/*FALLTHRU*/
447843e1988Sjohnlev 
448843e1988Sjohnlev 	case VREQ_DMAHDL_ALLOCED:
449843e1988Sjohnlev 		/*
450843e1988Sjohnlev 		 * alloc dma handle for 512-byte aligned buf
451843e1988Sjohnlev 		 */
452843e1988Sjohnlev 		if (!ALIGNED_XFER(bp)) {
453843e1988Sjohnlev 			/*
454843e1988Sjohnlev 			 * XXPV: we need to temporarily enlarge the seg
455843e1988Sjohnlev 			 * boundary and s/g length to work round CR6381968
456843e1988Sjohnlev 			 */
457843e1988Sjohnlev 			dmaattr = xb_dma_attr;
458843e1988Sjohnlev 			dmaattr.dma_attr_seg = (uint64_t)-1;
459843e1988Sjohnlev 			dmaattr.dma_attr_sgllen = INT_MAX;
460843e1988Sjohnlev 			rc = ddi_dma_alloc_handle(vdp->xdf_dip, &dmaattr,
461843e1988Sjohnlev 			    xdf_dmacallback, (caddr_t)vdp, &mdh);
462843e1988Sjohnlev 			if (rc != DDI_SUCCESS) {
463843e1988Sjohnlev 				SETDMACBON(vdp);
4647f0b8309SEdward Pilatowicz 				DPRINTF(DMA_DBG, ("xdf@%s: "
4657f0b8309SEdward Pilatowicz 				    "unaligned buf DMAhandle alloc failed\n",
4667f0b8309SEdward Pilatowicz 				    vdp->xdf_addr));
467843e1988Sjohnlev 				return (DDI_FAILURE);
468843e1988Sjohnlev 			}
469843e1988Sjohnlev 			vreq->v_memdmahdl = mdh;
470843e1988Sjohnlev 			vreq->v_status = VREQ_MEMDMAHDL_ALLOCED;
471843e1988Sjohnlev 		}
472843e1988Sjohnlev 		/*FALLTHRU*/
473843e1988Sjohnlev 
474843e1988Sjohnlev 	case VREQ_MEMDMAHDL_ALLOCED:
475843e1988Sjohnlev 		/*
476843e1988Sjohnlev 		 * alloc 512-byte aligned buf
477843e1988Sjohnlev 		 */
478843e1988Sjohnlev 		if (!ALIGNED_XFER(bp)) {
479843e1988Sjohnlev 			if (bp->b_flags & (B_PAGEIO | B_PHYS))
480843e1988Sjohnlev 				bp_mapin(bp);
481843e1988Sjohnlev 			rc = ddi_dma_mem_alloc(vreq->v_memdmahdl,
482843e1988Sjohnlev 			    roundup(bp->b_bcount, XB_BSIZE), &xc_acc_attr,
483843e1988Sjohnlev 			    DDI_DMA_STREAMING, xdf_dmacallback, (caddr_t)vdp,
484843e1988Sjohnlev 			    &aba, &bufsz, &abh);
485843e1988Sjohnlev 			if (rc != DDI_SUCCESS) {
486843e1988Sjohnlev 				SETDMACBON(vdp);
4877f0b8309SEdward Pilatowicz 				DPRINTF(DMA_DBG, ("xdf@%s: "
4887f0b8309SEdward Pilatowicz 				    "DMA mem allocation failed\n",
4897f0b8309SEdward Pilatowicz 				    vdp->xdf_addr));
490843e1988Sjohnlev 				return (DDI_FAILURE);
491843e1988Sjohnlev 			}
492843e1988Sjohnlev 
493843e1988Sjohnlev 			vreq->v_abuf = aba;
494843e1988Sjohnlev 			vreq->v_align = abh;
495843e1988Sjohnlev 			vreq->v_status = VREQ_DMAMEM_ALLOCED;
496843e1988Sjohnlev 
497843e1988Sjohnlev 			ASSERT(bufsz >= bp->b_bcount);
498843e1988Sjohnlev 			if (!IS_READ(bp))
499843e1988Sjohnlev 				bcopy(bp->b_un.b_addr, vreq->v_abuf,
500843e1988Sjohnlev 				    bp->b_bcount);
501843e1988Sjohnlev 		}
502843e1988Sjohnlev 		/*FALLTHRU*/
503843e1988Sjohnlev 
504843e1988Sjohnlev 	case VREQ_DMAMEM_ALLOCED:
505843e1988Sjohnlev 		/*
506843e1988Sjohnlev 		 * dma bind
507843e1988Sjohnlev 		 */
508843e1988Sjohnlev 		if (ALIGNED_XFER(bp)) {
509843e1988Sjohnlev 			rc = ddi_dma_buf_bind_handle(vreq->v_dmahdl, bp,
510843e1988Sjohnlev 			    dma_flags, xdf_dmacallback, (caddr_t)vdp,
511843e1988Sjohnlev 			    &dc, &ndcs);
512843e1988Sjohnlev 		} else {
513843e1988Sjohnlev 			rc = ddi_dma_addr_bind_handle(vreq->v_dmahdl,
514843e1988Sjohnlev 			    NULL, vreq->v_abuf, bp->b_bcount, dma_flags,
515843e1988Sjohnlev 			    xdf_dmacallback, (caddr_t)vdp, &dc, &ndcs);
516843e1988Sjohnlev 		}
517843e1988Sjohnlev 		if (rc == DDI_DMA_MAPPED || rc == DDI_DMA_PARTIAL_MAP) {
518843e1988Sjohnlev 			/* get num of dma windows */
519843e1988Sjohnlev 			if (rc == DDI_DMA_PARTIAL_MAP) {
520843e1988Sjohnlev 				rc = ddi_dma_numwin(vreq->v_dmahdl, &ndws);
521843e1988Sjohnlev 				ASSERT(rc == DDI_SUCCESS);
522843e1988Sjohnlev 			} else {
523843e1988Sjohnlev 				ndws = 1;
524843e1988Sjohnlev 			}
525843e1988Sjohnlev 		} else {
526843e1988Sjohnlev 			SETDMACBON(vdp);
527843e1988Sjohnlev 			DPRINTF(DMA_DBG, ("xdf@%s: DMA bind failed\n",
5287f0b8309SEdward Pilatowicz 			    vdp->xdf_addr));
529843e1988Sjohnlev 			return (DDI_FAILURE);
530843e1988Sjohnlev 		}
531843e1988Sjohnlev 
532843e1988Sjohnlev 		vreq->v_dmac = dc;
533843e1988Sjohnlev 		vreq->v_dmaw = 0;
534843e1988Sjohnlev 		vreq->v_ndmacs = ndcs;
535843e1988Sjohnlev 		vreq->v_ndmaws = ndws;
536843e1988Sjohnlev 		vreq->v_nslots = ndws;
537843e1988Sjohnlev 		vreq->v_status = VREQ_DMABUF_BOUND;
538843e1988Sjohnlev 		/*FALLTHRU*/
539843e1988Sjohnlev 
540843e1988Sjohnlev 	case VREQ_DMABUF_BOUND:
541843e1988Sjohnlev 		/*
542843e1988Sjohnlev 		 * get ge_slot, callback is set upon failure from gs_get(),
543843e1988Sjohnlev 		 * if not set previously
544843e1988Sjohnlev 		 */
545843e1988Sjohnlev 		if ((gs = gs_get(vdp, IS_READ(bp))) == NULL) {
546843e1988Sjohnlev 			DPRINTF(DMA_DBG, ("xdf@%s: get ge_slot failed\n",
5477f0b8309SEdward Pilatowicz 			    vdp->xdf_addr));
548843e1988Sjohnlev 			return (DDI_FAILURE);
549843e1988Sjohnlev 		}
550843e1988Sjohnlev 
551843e1988Sjohnlev 		vreq->v_status = VREQ_GS_ALLOCED;
5527f0b8309SEdward Pilatowicz 		gs->gs_vreq = vreq;
5537f0b8309SEdward Pilatowicz 		list_insert_head(&vreq->v_gs, gs);
554843e1988Sjohnlev 		break;
555843e1988Sjohnlev 
556843e1988Sjohnlev 	case VREQ_GS_ALLOCED:
557843e1988Sjohnlev 		/* nothing need to be done */
558843e1988Sjohnlev 		break;
559843e1988Sjohnlev 
560843e1988Sjohnlev 	case VREQ_DMAWIN_DONE:
561843e1988Sjohnlev 		/*
562843e1988Sjohnlev 		 * move to the next dma window
563843e1988Sjohnlev 		 */
564843e1988Sjohnlev 		ASSERT((vreq->v_dmaw + 1) < vreq->v_ndmaws);
565843e1988Sjohnlev 
566843e1988Sjohnlev 		/* get a ge_slot for this DMA window */
567843e1988Sjohnlev 		if ((gs = gs_get(vdp, IS_READ(bp))) == NULL) {
568843e1988Sjohnlev 			DPRINTF(DMA_DBG, ("xdf@%s: get ge_slot failed\n",
5697f0b8309SEdward Pilatowicz 			    vdp->xdf_addr));
570843e1988Sjohnlev 			return (DDI_FAILURE);
571843e1988Sjohnlev 		}
572843e1988Sjohnlev 
573843e1988Sjohnlev 		vreq->v_dmaw++;
5747f0b8309SEdward Pilatowicz 		VERIFY(ddi_dma_getwin(vreq->v_dmahdl, vreq->v_dmaw, &off, &sz,
5757f0b8309SEdward Pilatowicz 		    &vreq->v_dmac, &vreq->v_ndmacs) == DDI_SUCCESS);
576843e1988Sjohnlev 		vreq->v_status = VREQ_GS_ALLOCED;
5777f0b8309SEdward Pilatowicz 		gs->gs_vreq = vreq;
5787f0b8309SEdward Pilatowicz 		list_insert_head(&vreq->v_gs, gs);
579843e1988Sjohnlev 		break;
580843e1988Sjohnlev 
581843e1988Sjohnlev 	default:
582843e1988Sjohnlev 		return (DDI_FAILURE);
583843e1988Sjohnlev 	}
584843e1988Sjohnlev 
585843e1988Sjohnlev 	return (DDI_SUCCESS);
586843e1988Sjohnlev }
587843e1988Sjohnlev 
5887f0b8309SEdward Pilatowicz static int
xdf_cmlb_attach(xdf_t * vdp)5897f0b8309SEdward Pilatowicz xdf_cmlb_attach(xdf_t *vdp)
590843e1988Sjohnlev {
5917f0b8309SEdward Pilatowicz 	dev_info_t	*dip = vdp->xdf_dip;
592843e1988Sjohnlev 
5937f0b8309SEdward Pilatowicz 	return (cmlb_attach(dip, &xdf_lb_ops,
5947f0b8309SEdward Pilatowicz 	    XD_IS_CD(vdp) ? DTYPE_RODIRECT : DTYPE_DIRECT,
5957f0b8309SEdward Pilatowicz 	    XD_IS_RM(vdp),
5967f0b8309SEdward Pilatowicz 	    B_TRUE,
5977f0b8309SEdward Pilatowicz 	    XD_IS_CD(vdp) ? DDI_NT_CD_XVMD : DDI_NT_BLOCK_XVMD,
5987f0b8309SEdward Pilatowicz #if defined(XPV_HVM_DRIVER)
5997f0b8309SEdward Pilatowicz 	    (XD_IS_CD(vdp) ? 0 : CMLB_CREATE_ALTSLICE_VTOC_16_DTYPE_DIRECT) |
6007f0b8309SEdward Pilatowicz 	    CMLB_INTERNAL_MINOR_NODES,
6017f0b8309SEdward Pilatowicz #else /* !XPV_HVM_DRIVER */
6027f0b8309SEdward Pilatowicz 	    XD_IS_CD(vdp) ? 0 : CMLB_FAKE_LABEL_ONE_PARTITION,
6037f0b8309SEdward Pilatowicz #endif /* !XPV_HVM_DRIVER */
6047f0b8309SEdward Pilatowicz 	    vdp->xdf_vd_lbl, NULL));
605843e1988Sjohnlev }
606843e1988Sjohnlev 
607843e1988Sjohnlev static void
xdf_io_err(buf_t * bp,int err,size_t resid)6087f0b8309SEdward Pilatowicz xdf_io_err(buf_t *bp, int err, size_t resid)
609843e1988Sjohnlev {
6107f0b8309SEdward Pilatowicz 	bioerror(bp, err);
6117f0b8309SEdward Pilatowicz 	if (resid == 0)
6127f0b8309SEdward Pilatowicz 		bp->b_resid = bp->b_bcount;
6137f0b8309SEdward Pilatowicz 	biodone(bp);
614843e1988Sjohnlev }
615843e1988Sjohnlev 
616843e1988Sjohnlev static void
xdf_kstat_enter(xdf_t * vdp,buf_t * bp)6177f0b8309SEdward Pilatowicz xdf_kstat_enter(xdf_t *vdp, buf_t *bp)
618843e1988Sjohnlev {
6197f0b8309SEdward Pilatowicz 	v_req_t *vreq = BP_VREQ(bp);
6207f0b8309SEdward Pilatowicz 
6217f0b8309SEdward Pilatowicz 	ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
6227f0b8309SEdward Pilatowicz 
6237f0b8309SEdward Pilatowicz 	if (vdp->xdf_xdev_iostat == NULL)
6247f0b8309SEdward Pilatowicz 		return;
6257f0b8309SEdward Pilatowicz 	if ((vreq != NULL) && vreq->v_runq) {
6267f0b8309SEdward Pilatowicz 		kstat_runq_enter(KSTAT_IO_PTR(vdp->xdf_xdev_iostat));
6277f0b8309SEdward Pilatowicz 	} else {
6287f0b8309SEdward Pilatowicz 		kstat_waitq_enter(KSTAT_IO_PTR(vdp->xdf_xdev_iostat));
6297f0b8309SEdward Pilatowicz 	}
6307f0b8309SEdward Pilatowicz }
6317f0b8309SEdward Pilatowicz 
6327f0b8309SEdward Pilatowicz static void
xdf_kstat_exit(xdf_t * vdp,buf_t * bp)6337f0b8309SEdward Pilatowicz xdf_kstat_exit(xdf_t *vdp, buf_t *bp)
6347f0b8309SEdward Pilatowicz {
6357f0b8309SEdward Pilatowicz 	v_req_t *vreq = BP_VREQ(bp);
6367f0b8309SEdward Pilatowicz 
6377f0b8309SEdward Pilatowicz 	ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
6387f0b8309SEdward Pilatowicz 
6397f0b8309SEdward Pilatowicz 	if (vdp->xdf_xdev_iostat == NULL)
6407f0b8309SEdward Pilatowicz 		return;
6417f0b8309SEdward Pilatowicz 	if ((vreq != NULL) && vreq->v_runq) {
6427f0b8309SEdward Pilatowicz 		kstat_runq_exit(KSTAT_IO_PTR(vdp->xdf_xdev_iostat));
6437f0b8309SEdward Pilatowicz 	} else {
6447f0b8309SEdward Pilatowicz 		kstat_waitq_exit(KSTAT_IO_PTR(vdp->xdf_xdev_iostat));
6457f0b8309SEdward Pilatowicz 	}
6467f0b8309SEdward Pilatowicz }
6477f0b8309SEdward Pilatowicz 
6487f0b8309SEdward Pilatowicz static void
xdf_kstat_waitq_to_runq(xdf_t * vdp,buf_t * bp)6497f0b8309SEdward Pilatowicz xdf_kstat_waitq_to_runq(xdf_t *vdp, buf_t *bp)
6507f0b8309SEdward Pilatowicz {
6517f0b8309SEdward Pilatowicz 	v_req_t *vreq = BP_VREQ(bp);
6527f0b8309SEdward Pilatowicz 
6537f0b8309SEdward Pilatowicz 	ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
6547f0b8309SEdward Pilatowicz 	ASSERT(!vreq->v_runq);
6557f0b8309SEdward Pilatowicz 
6567f0b8309SEdward Pilatowicz 	vreq->v_runq = B_TRUE;
6577f0b8309SEdward Pilatowicz 	if (vdp->xdf_xdev_iostat == NULL)
6587f0b8309SEdward Pilatowicz 		return;
6597f0b8309SEdward Pilatowicz 	kstat_waitq_to_runq(KSTAT_IO_PTR(vdp->xdf_xdev_iostat));
6607f0b8309SEdward Pilatowicz }
6617f0b8309SEdward Pilatowicz 
6627f0b8309SEdward Pilatowicz static void
xdf_kstat_runq_to_waitq(xdf_t * vdp,buf_t * bp)6637f0b8309SEdward Pilatowicz xdf_kstat_runq_to_waitq(xdf_t *vdp, buf_t *bp)
6647f0b8309SEdward Pilatowicz {
6657f0b8309SEdward Pilatowicz 	v_req_t *vreq = BP_VREQ(bp);
6667f0b8309SEdward Pilatowicz 
6677f0b8309SEdward Pilatowicz 	ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
6687f0b8309SEdward Pilatowicz 	ASSERT(vreq->v_runq);
6697f0b8309SEdward Pilatowicz 
6707f0b8309SEdward Pilatowicz 	vreq->v_runq = B_FALSE;
6717f0b8309SEdward Pilatowicz 	if (vdp->xdf_xdev_iostat == NULL)
6727f0b8309SEdward Pilatowicz 		return;
6737f0b8309SEdward Pilatowicz 	kstat_runq_back_to_waitq(KSTAT_IO_PTR(vdp->xdf_xdev_iostat));
6747f0b8309SEdward Pilatowicz }
6757f0b8309SEdward Pilatowicz 
6767f0b8309SEdward Pilatowicz int
xdf_kstat_create(dev_info_t * dip,char * ks_module,int instance)6777f0b8309SEdward Pilatowicz xdf_kstat_create(dev_info_t *dip, char *ks_module, int instance)
6787f0b8309SEdward Pilatowicz {
6797f0b8309SEdward Pilatowicz 	xdf_t		*vdp = (xdf_t *)ddi_get_driver_private(dip);
6807f0b8309SEdward Pilatowicz 	kstat_t		*kstat;
6817f0b8309SEdward Pilatowicz 	buf_t		*bp;
6827f0b8309SEdward Pilatowicz 
6837f0b8309SEdward Pilatowicz 	if ((kstat = kstat_create(
6847f0b8309SEdward Pilatowicz 	    ks_module, instance, NULL, "disk",
6857f0b8309SEdward Pilatowicz 	    KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT)) == NULL)
6867f0b8309SEdward Pilatowicz 		return (-1);
6877f0b8309SEdward Pilatowicz 
6887f0b8309SEdward Pilatowicz 	/* See comment about locking in xdf_kstat_delete(). */
6897f0b8309SEdward Pilatowicz 	mutex_enter(&vdp->xdf_iostat_lk);
690843e1988Sjohnlev 	mutex_enter(&vdp->xdf_dev_lk);
6917f0b8309SEdward Pilatowicz 
6927f0b8309SEdward Pilatowicz 	/* only one kstat can exist at a time */
6937f0b8309SEdward Pilatowicz 	if (vdp->xdf_xdev_iostat != NULL) {
6947f0b8309SEdward Pilatowicz 		mutex_exit(&vdp->xdf_dev_lk);
6957f0b8309SEdward Pilatowicz 		mutex_exit(&vdp->xdf_iostat_lk);
6967f0b8309SEdward Pilatowicz 		kstat_delete(kstat);
6977f0b8309SEdward Pilatowicz 		return (-1);
698843e1988Sjohnlev 	}
699843e1988Sjohnlev 
7007f0b8309SEdward Pilatowicz 	vdp->xdf_xdev_iostat = kstat;
7017f0b8309SEdward Pilatowicz 	vdp->xdf_xdev_iostat->ks_lock = &vdp->xdf_dev_lk;
7027f0b8309SEdward Pilatowicz 	kstat_install(vdp->xdf_xdev_iostat);
703843e1988Sjohnlev 
704843e1988Sjohnlev 	/*
7057f0b8309SEdward Pilatowicz 	 * Now that we've created a kstat, we need to update the waitq and
7067f0b8309SEdward Pilatowicz 	 * runq counts for the kstat to reflect our current state.
7077f0b8309SEdward Pilatowicz 	 *
7087f0b8309SEdward Pilatowicz 	 * For a buf_t structure to be on the runq, it must have a ring
7097f0b8309SEdward Pilatowicz 	 * buffer slot associated with it.  To get a ring buffer slot the
7107f0b8309SEdward Pilatowicz 	 * buf must first have a v_req_t and a ge_slot_t associated with it.
7117f0b8309SEdward Pilatowicz 	 * Then when it is granted a ring buffer slot, v_runq will be set to
7127f0b8309SEdward Pilatowicz 	 * true.
7137f0b8309SEdward Pilatowicz 	 *
7147f0b8309SEdward Pilatowicz 	 * For a buf_t structure to be on the waitq, it must not be on the
7157f0b8309SEdward Pilatowicz 	 * runq.  So to find all the buf_t's that should be on waitq, we
7167f0b8309SEdward Pilatowicz 	 * walk the active buf list and add any buf_t's which aren't on the
7177f0b8309SEdward Pilatowicz 	 * runq to the waitq.
718843e1988Sjohnlev 	 */
7197f0b8309SEdward Pilatowicz 	bp = vdp->xdf_f_act;
7207f0b8309SEdward Pilatowicz 	while (bp != NULL) {
7217f0b8309SEdward Pilatowicz 		xdf_kstat_enter(vdp, bp);
7227f0b8309SEdward Pilatowicz 		bp = bp->av_forw;
7237f0b8309SEdward Pilatowicz 	}
7247f0b8309SEdward Pilatowicz 	if (vdp->xdf_ready_tq_bp != NULL)
7257f0b8309SEdward Pilatowicz 		xdf_kstat_enter(vdp, vdp->xdf_ready_tq_bp);
726843e1988Sjohnlev 
7277f0b8309SEdward Pilatowicz 	mutex_exit(&vdp->xdf_dev_lk);
7287f0b8309SEdward Pilatowicz 	mutex_exit(&vdp->xdf_iostat_lk);
7297f0b8309SEdward Pilatowicz 	return (0);
730843e1988Sjohnlev }
73106bbe1e0Sedp 
73206bbe1e0Sedp void
xdf_kstat_delete(dev_info_t * dip)73306bbe1e0Sedp xdf_kstat_delete(dev_info_t *dip)
73406bbe1e0Sedp {
73506bbe1e0Sedp 	xdf_t		*vdp = (xdf_t *)ddi_get_driver_private(dip);
73606bbe1e0Sedp 	kstat_t		*kstat;
7377f0b8309SEdward Pilatowicz 	buf_t		*bp;
73806bbe1e0Sedp 
73906bbe1e0Sedp 	/*
74006bbe1e0Sedp 	 * The locking order here is xdf_iostat_lk and then xdf_dev_lk.
74106bbe1e0Sedp 	 * xdf_dev_lk is used to protect the xdf_xdev_iostat pointer
74206bbe1e0Sedp 	 * and the contents of the our kstat.  xdf_iostat_lk is used
74306bbe1e0Sedp 	 * to protect the allocation and freeing of the actual kstat.
74406bbe1e0Sedp 	 * xdf_dev_lk can't be used for this purpose because kstat
74506bbe1e0Sedp 	 * readers use it to access the contents of the kstat and
74606bbe1e0Sedp 	 * hence it can't be held when calling kstat_delete().
74706bbe1e0Sedp 	 */
74806bbe1e0Sedp 	mutex_enter(&vdp->xdf_iostat_lk);
74906bbe1e0Sedp 	mutex_enter(&vdp->xdf_dev_lk);
75006bbe1e0Sedp 
75106bbe1e0Sedp 	if (vdp->xdf_xdev_iostat == NULL) {
75206bbe1e0Sedp 		mutex_exit(&vdp->xdf_dev_lk);
75306bbe1e0Sedp 		mutex_exit(&vdp->xdf_iostat_lk);
75406bbe1e0Sedp 		return;
75506bbe1e0Sedp 	}
75606bbe1e0Sedp 
7577f0b8309SEdward Pilatowicz 	/*
7587f0b8309SEdward Pilatowicz 	 * We're about to destroy the kstat structures, so it isn't really
7597f0b8309SEdward Pilatowicz 	 * necessary to update the runq and waitq counts.  But, since this
7607f0b8309SEdward Pilatowicz 	 * isn't a hot code path we can afford to be a little pedantic and
7617f0b8309SEdward Pilatowicz 	 * go ahead and decrement the runq and waitq kstat counters to zero
7627f0b8309SEdward Pilatowicz 	 * before free'ing them.  This helps us ensure that we've gotten all
7637f0b8309SEdward Pilatowicz 	 * our accounting correct.
7647f0b8309SEdward Pilatowicz 	 *
7657f0b8309SEdward Pilatowicz 	 * For an explanation of how we determine which buffers go on the
7667f0b8309SEdward Pilatowicz 	 * runq vs which go on the waitq, see the comments in
7677f0b8309SEdward Pilatowicz 	 * xdf_kstat_create().
7687f0b8309SEdward Pilatowicz 	 */
7697f0b8309SEdward Pilatowicz 	bp = vdp->xdf_f_act;
7707f0b8309SEdward Pilatowicz 	while (bp != NULL) {
7717f0b8309SEdward Pilatowicz 		xdf_kstat_exit(vdp, bp);
7727f0b8309SEdward Pilatowicz 		bp = bp->av_forw;
7737f0b8309SEdward Pilatowicz 	}
7747f0b8309SEdward Pilatowicz 	if (vdp->xdf_ready_tq_bp != NULL)
7757f0b8309SEdward Pilatowicz 		xdf_kstat_exit(vdp, vdp->xdf_ready_tq_bp);
7767f0b8309SEdward Pilatowicz 
77706bbe1e0Sedp 	kstat = vdp->xdf_xdev_iostat;
77806bbe1e0Sedp 	vdp->xdf_xdev_iostat = NULL;
77906bbe1e0Sedp 	mutex_exit(&vdp->xdf_dev_lk);
78006bbe1e0Sedp 	kstat_delete(kstat);
78106bbe1e0Sedp 	mutex_exit(&vdp->xdf_iostat_lk);
78206bbe1e0Sedp }
78306bbe1e0Sedp 
7847f0b8309SEdward Pilatowicz /*
7857f0b8309SEdward Pilatowicz  * Add an IO requests onto the active queue.
7867f0b8309SEdward Pilatowicz  *
7877f0b8309SEdward Pilatowicz  * We have to detect IOs generated by xdf_ready_tq_thread.  These IOs
7887f0b8309SEdward Pilatowicz  * are used to establish a connection to the backend, so they recieve
7897f0b8309SEdward Pilatowicz  * priority over all other IOs.  Since xdf_ready_tq_thread only does
7907f0b8309SEdward Pilatowicz  * synchronous IO, there can only be one xdf_ready_tq_thread request at any
7917f0b8309SEdward Pilatowicz  * given time and we record the buf associated with that request in
7927f0b8309SEdward Pilatowicz  * xdf_ready_tq_bp.
7937f0b8309SEdward Pilatowicz  */
7947f0b8309SEdward Pilatowicz static void
xdf_bp_push(xdf_t * vdp,buf_t * bp)7957f0b8309SEdward Pilatowicz xdf_bp_push(xdf_t *vdp, buf_t *bp)
79606bbe1e0Sedp {
7977f0b8309SEdward Pilatowicz 	ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
7987f0b8309SEdward Pilatowicz 	ASSERT(bp->av_forw == NULL);
79906bbe1e0Sedp 
8007f0b8309SEdward Pilatowicz 	xdf_kstat_enter(vdp, bp);
8017f0b8309SEdward Pilatowicz 
8027f0b8309SEdward Pilatowicz 	if (curthread == vdp->xdf_ready_tq_thread) {
8037f0b8309SEdward Pilatowicz 		/* new IO requests from the ready thread */
8047f0b8309SEdward Pilatowicz 		ASSERT(vdp->xdf_ready_tq_bp == NULL);
8057f0b8309SEdward Pilatowicz 		vdp->xdf_ready_tq_bp = bp;
8067f0b8309SEdward Pilatowicz 		return;
8077f0b8309SEdward Pilatowicz 	}
8087f0b8309SEdward Pilatowicz 
8097f0b8309SEdward Pilatowicz 	/* this is normal IO request */
8107f0b8309SEdward Pilatowicz 	ASSERT(bp != vdp->xdf_ready_tq_bp);
8117f0b8309SEdward Pilatowicz 
8127f0b8309SEdward Pilatowicz 	if (vdp->xdf_f_act == NULL) {
8137f0b8309SEdward Pilatowicz 		/* this is only only IO on the active queue */
8147f0b8309SEdward Pilatowicz 		ASSERT(vdp->xdf_l_act == NULL);
8157f0b8309SEdward Pilatowicz 		ASSERT(vdp->xdf_i_act == NULL);
8167f0b8309SEdward Pilatowicz 		vdp->xdf_f_act = vdp->xdf_l_act = vdp->xdf_i_act = bp;
8177f0b8309SEdward Pilatowicz 		return;
8187f0b8309SEdward Pilatowicz 	}
8197f0b8309SEdward Pilatowicz 
8207f0b8309SEdward Pilatowicz 	/* add this IO to the tail of the active queue */
8217f0b8309SEdward Pilatowicz 	vdp->xdf_l_act->av_forw = bp;
8227f0b8309SEdward Pilatowicz 	vdp->xdf_l_act = bp;
8237f0b8309SEdward Pilatowicz 	if (vdp->xdf_i_act == NULL)
8247f0b8309SEdward Pilatowicz 		vdp->xdf_i_act = bp;
8257f0b8309SEdward Pilatowicz }
8267f0b8309SEdward Pilatowicz 
8277f0b8309SEdward Pilatowicz static void
xdf_bp_pop(xdf_t * vdp,buf_t * bp)8287f0b8309SEdward Pilatowicz xdf_bp_pop(xdf_t *vdp, buf_t *bp)
8297f0b8309SEdward Pilatowicz {
8307f0b8309SEdward Pilatowicz 	buf_t	*bp_iter;
8317f0b8309SEdward Pilatowicz 
8327f0b8309SEdward Pilatowicz 	ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
8337f0b8309SEdward Pilatowicz 	ASSERT(VREQ_DONE(BP_VREQ(bp)));
8347f0b8309SEdward Pilatowicz 
8357f0b8309SEdward Pilatowicz 	if (vdp->xdf_ready_tq_bp == bp) {
8367f0b8309SEdward Pilatowicz 		/* we're done with a ready thread IO request */
8377f0b8309SEdward Pilatowicz 		ASSERT(bp->av_forw == NULL);
8387f0b8309SEdward Pilatowicz 		vdp->xdf_ready_tq_bp = NULL;
8397f0b8309SEdward Pilatowicz 		return;
8407f0b8309SEdward Pilatowicz 	}
8417f0b8309SEdward Pilatowicz 
8427f0b8309SEdward Pilatowicz 	/* we're done with a normal IO request */
8437f0b8309SEdward Pilatowicz 	ASSERT((bp->av_forw != NULL) || (bp == vdp->xdf_l_act));
8447f0b8309SEdward Pilatowicz 	ASSERT((bp->av_forw == NULL) || (bp != vdp->xdf_l_act));
8457f0b8309SEdward Pilatowicz 	ASSERT(VREQ_DONE(BP_VREQ(vdp->xdf_f_act)));
8467f0b8309SEdward Pilatowicz 	ASSERT(vdp->xdf_f_act != vdp->xdf_i_act);
8477f0b8309SEdward Pilatowicz 
8487f0b8309SEdward Pilatowicz 	if (bp == vdp->xdf_f_act) {
8497f0b8309SEdward Pilatowicz 		/* This IO was at the head of our active queue. */
8507f0b8309SEdward Pilatowicz 		vdp->xdf_f_act = bp->av_forw;
8517f0b8309SEdward Pilatowicz 		if (bp == vdp->xdf_l_act)
8527f0b8309SEdward Pilatowicz 			vdp->xdf_l_act = NULL;
8537f0b8309SEdward Pilatowicz 	} else {
8547f0b8309SEdward Pilatowicz 		/* There IO finished before some other pending IOs. */
8557f0b8309SEdward Pilatowicz 		bp_iter = vdp->xdf_f_act;
8567f0b8309SEdward Pilatowicz 		while (bp != bp_iter->av_forw) {
8577f0b8309SEdward Pilatowicz 			bp_iter = bp_iter->av_forw;
8587f0b8309SEdward Pilatowicz 			ASSERT(VREQ_DONE(BP_VREQ(bp_iter)));
8597f0b8309SEdward Pilatowicz 			ASSERT(bp_iter != vdp->xdf_i_act);
8607f0b8309SEdward Pilatowicz 		}
8617f0b8309SEdward Pilatowicz 		bp_iter->av_forw = bp->av_forw;
8627f0b8309SEdward Pilatowicz 		if (bp == vdp->xdf_l_act)
8637f0b8309SEdward Pilatowicz 			vdp->xdf_l_act = bp_iter;
8647f0b8309SEdward Pilatowicz 	}
8657f0b8309SEdward Pilatowicz 	bp->av_forw = NULL;
8667f0b8309SEdward Pilatowicz }
8677f0b8309SEdward Pilatowicz 
8687f0b8309SEdward Pilatowicz static buf_t *
xdf_bp_next(xdf_t * vdp)8697f0b8309SEdward Pilatowicz xdf_bp_next(xdf_t *vdp)
8707f0b8309SEdward Pilatowicz {
8717f0b8309SEdward Pilatowicz 	v_req_t	*vreq;
8727f0b8309SEdward Pilatowicz 	buf_t	*bp;
8737f0b8309SEdward Pilatowicz 
8747f0b8309SEdward Pilatowicz 	if (vdp->xdf_state == XD_CONNECTED) {
8757f0b8309SEdward Pilatowicz 		/*
8767f0b8309SEdward Pilatowicz 		 * If we're in the XD_CONNECTED state, we only service IOs
8777f0b8309SEdward Pilatowicz 		 * from the xdf_ready_tq_thread thread.
8787f0b8309SEdward Pilatowicz 		 */
8797f0b8309SEdward Pilatowicz 		if ((bp = vdp->xdf_ready_tq_bp) == NULL)
8807f0b8309SEdward Pilatowicz 			return (NULL);
8817f0b8309SEdward Pilatowicz 		if (((vreq = BP_VREQ(bp)) == NULL) || (!VREQ_DONE(vreq)))
8827f0b8309SEdward Pilatowicz 			return (bp);
8837f0b8309SEdward Pilatowicz 		return (NULL);
8847f0b8309SEdward Pilatowicz 	}
8857f0b8309SEdward Pilatowicz 
8867f0b8309SEdward Pilatowicz 	/* if we're not in the XD_CONNECTED or XD_READY state we can't do IO */
8877f0b8309SEdward Pilatowicz 	if (vdp->xdf_state != XD_READY)
8887f0b8309SEdward Pilatowicz 		return (NULL);
8897f0b8309SEdward Pilatowicz 
8907f0b8309SEdward Pilatowicz 	ASSERT(vdp->xdf_ready_tq_bp == NULL);
8917f0b8309SEdward Pilatowicz 	for (;;) {
8927f0b8309SEdward Pilatowicz 		if ((bp = vdp->xdf_i_act) == NULL)
8937f0b8309SEdward Pilatowicz 			return (NULL);
8947f0b8309SEdward Pilatowicz 		if (((vreq = BP_VREQ(bp)) == NULL) || (!VREQ_DONE(vreq)))
8957f0b8309SEdward Pilatowicz 			return (bp);
8967f0b8309SEdward Pilatowicz 
8977f0b8309SEdward Pilatowicz 		/* advance the active buf index pointer */
8987f0b8309SEdward Pilatowicz 		vdp->xdf_i_act = bp->av_forw;
8997f0b8309SEdward Pilatowicz 	}
9007f0b8309SEdward Pilatowicz }
9017f0b8309SEdward Pilatowicz 
9027f0b8309SEdward Pilatowicz static void
xdf_io_fini(xdf_t * vdp,uint64_t id,int bioerr)9037f0b8309SEdward Pilatowicz xdf_io_fini(xdf_t *vdp, uint64_t id, int bioerr)
9047f0b8309SEdward Pilatowicz {
9057f0b8309SEdward Pilatowicz 	ge_slot_t	*gs = (ge_slot_t *)(uintptr_t)id;
9067f0b8309SEdward Pilatowicz 	v_req_t		*vreq = gs->gs_vreq;
9077f0b8309SEdward Pilatowicz 	buf_t		*bp = vreq->v_buf;
9087f0b8309SEdward Pilatowicz 
9097f0b8309SEdward Pilatowicz 	ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
9107f0b8309SEdward Pilatowicz 	ASSERT(BP_VREQ(bp) == vreq);
9117f0b8309SEdward Pilatowicz 
9127f0b8309SEdward Pilatowicz 	gs_free(gs);
9137f0b8309SEdward Pilatowicz 
9147f0b8309SEdward Pilatowicz 	if (bioerr != 0)
9157f0b8309SEdward Pilatowicz 		bioerror(bp, bioerr);
9167f0b8309SEdward Pilatowicz 	ASSERT(vreq->v_nslots > 0);
9177f0b8309SEdward Pilatowicz 	if (--vreq->v_nslots > 0)
9187f0b8309SEdward Pilatowicz 		return;
9197f0b8309SEdward Pilatowicz 
9207f0b8309SEdward Pilatowicz 	/* remove this IO from our active queue */
9217f0b8309SEdward Pilatowicz 	xdf_bp_pop(vdp, bp);
9227f0b8309SEdward Pilatowicz 
9237f0b8309SEdward Pilatowicz 	ASSERT(vreq->v_runq);
9247f0b8309SEdward Pilatowicz 	xdf_kstat_exit(vdp, bp);
9257f0b8309SEdward Pilatowicz 	vreq->v_runq = B_FALSE;
9267f0b8309SEdward Pilatowicz 	vreq_free(vdp, vreq);
9277f0b8309SEdward Pilatowicz 
9287f0b8309SEdward Pilatowicz 	if (IS_ERROR(bp)) {
9297f0b8309SEdward Pilatowicz 		xdf_io_err(bp, geterror(bp), 0);
9307f0b8309SEdward Pilatowicz 	} else if (bp->b_resid != 0) {
9317f0b8309SEdward Pilatowicz 		/* Partial transfers are an error */
9327f0b8309SEdward Pilatowicz 		xdf_io_err(bp, EIO, bp->b_resid);
9337f0b8309SEdward Pilatowicz 	} else {
9347f0b8309SEdward Pilatowicz 		biodone(bp);
9357f0b8309SEdward Pilatowicz 	}
9367f0b8309SEdward Pilatowicz }
9377f0b8309SEdward Pilatowicz 
9387f0b8309SEdward Pilatowicz /*
9397f0b8309SEdward Pilatowicz  * xdf interrupt handler
9407f0b8309SEdward Pilatowicz  */
9417f0b8309SEdward Pilatowicz static uint_t
xdf_intr_locked(xdf_t * vdp)9427f0b8309SEdward Pilatowicz xdf_intr_locked(xdf_t *vdp)
9437f0b8309SEdward Pilatowicz {
9447f0b8309SEdward Pilatowicz 	xendev_ring_t *xbr;
9457f0b8309SEdward Pilatowicz 	blkif_response_t *resp;
9467f0b8309SEdward Pilatowicz 	int bioerr;
9477f0b8309SEdward Pilatowicz 	uint64_t id;
9487f0b8309SEdward Pilatowicz 	uint8_t op;
9497f0b8309SEdward Pilatowicz 	uint16_t status;
9507f0b8309SEdward Pilatowicz 	ddi_acc_handle_t acchdl;
9517f0b8309SEdward Pilatowicz 
9527f0b8309SEdward Pilatowicz 	ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
9537f0b8309SEdward Pilatowicz 
9547f0b8309SEdward Pilatowicz 	if ((xbr = vdp->xdf_xb_ring) == NULL)
9557f0b8309SEdward Pilatowicz 		return (DDI_INTR_UNCLAIMED);
9567f0b8309SEdward Pilatowicz 
9577f0b8309SEdward Pilatowicz 	acchdl = vdp->xdf_xb_ring_hdl;
9587f0b8309SEdward Pilatowicz 
9597f0b8309SEdward Pilatowicz 	/*
9607f0b8309SEdward Pilatowicz 	 * complete all requests which have a response
9617f0b8309SEdward Pilatowicz 	 */
9627f0b8309SEdward Pilatowicz 	while (resp = xvdi_ring_get_response(xbr)) {
9637f0b8309SEdward Pilatowicz 		id = ddi_get64(acchdl, &resp->id);
9647f0b8309SEdward Pilatowicz 		op = ddi_get8(acchdl, &resp->operation);
9657f0b8309SEdward Pilatowicz 		status = ddi_get16(acchdl, (uint16_t *)&resp->status);
9667f0b8309SEdward Pilatowicz 		DPRINTF(INTR_DBG, ("resp: op %d id %"PRIu64" status %d\n",
9677f0b8309SEdward Pilatowicz 		    op, id, status));
9687f0b8309SEdward Pilatowicz 
9697f0b8309SEdward Pilatowicz 		if (status != BLKIF_RSP_OKAY) {
9707f0b8309SEdward Pilatowicz 			DPRINTF(IO_DBG, ("xdf@%s: I/O error while %s",
9717f0b8309SEdward Pilatowicz 			    vdp->xdf_addr,
9727f0b8309SEdward Pilatowicz 			    (op == BLKIF_OP_READ) ? "reading" : "writing"));
9737f0b8309SEdward Pilatowicz 			bioerr = EIO;
9747f0b8309SEdward Pilatowicz 		} else {
9757f0b8309SEdward Pilatowicz 			bioerr = 0;
9767f0b8309SEdward Pilatowicz 		}
9777f0b8309SEdward Pilatowicz 
9787f0b8309SEdward Pilatowicz 		xdf_io_fini(vdp, id, bioerr);
9797f0b8309SEdward Pilatowicz 	}
9807f0b8309SEdward Pilatowicz 	return (DDI_INTR_CLAIMED);
9817f0b8309SEdward Pilatowicz }
9827f0b8309SEdward Pilatowicz 
9832de7185cSEdward Pilatowicz /*
9842de7185cSEdward Pilatowicz  * xdf_intr runs at PIL 5, so no one else can grab xdf_dev_lk and
9852de7185cSEdward Pilatowicz  * block at a lower pil.
9862de7185cSEdward Pilatowicz  */
9877f0b8309SEdward Pilatowicz static uint_t
xdf_intr(caddr_t arg)9887f0b8309SEdward Pilatowicz xdf_intr(caddr_t arg)
9897f0b8309SEdward Pilatowicz {
9907f0b8309SEdward Pilatowicz 	xdf_t *vdp = (xdf_t *)arg;
9917f0b8309SEdward Pilatowicz 	int rv;
9927f0b8309SEdward Pilatowicz 
99306bbe1e0Sedp 	mutex_enter(&vdp->xdf_dev_lk);
9947f0b8309SEdward Pilatowicz 	rv = xdf_intr_locked(vdp);
99506bbe1e0Sedp 	mutex_exit(&vdp->xdf_dev_lk);
9967f0b8309SEdward Pilatowicz 
9977f0b8309SEdward Pilatowicz 	if (!do_polled_io)
9987f0b8309SEdward Pilatowicz 		xdf_io_start(vdp);
9997f0b8309SEdward Pilatowicz 
10007f0b8309SEdward Pilatowicz 	return (rv);
100106bbe1e0Sedp }
100206bbe1e0Sedp 
10037f0b8309SEdward Pilatowicz static void
xdf_ring_push(xdf_t * vdp)10047f0b8309SEdward Pilatowicz xdf_ring_push(xdf_t *vdp)
10057f0b8309SEdward Pilatowicz {
10067f0b8309SEdward Pilatowicz 	ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
10077f0b8309SEdward Pilatowicz 
10087f0b8309SEdward Pilatowicz 	if (vdp->xdf_xb_ring == NULL)
10097f0b8309SEdward Pilatowicz 		return;
10107f0b8309SEdward Pilatowicz 
10117f0b8309SEdward Pilatowicz 	if (xvdi_ring_push_request(vdp->xdf_xb_ring)) {
10127f0b8309SEdward Pilatowicz 		DPRINTF(IO_DBG, (
10137f0b8309SEdward Pilatowicz 		    "xdf@%s: xdf_ring_push: sent request(s) to backend\n",
10147f0b8309SEdward Pilatowicz 		    vdp->xdf_addr));
101506bbe1e0Sedp 	}
101606bbe1e0Sedp 
10177f0b8309SEdward Pilatowicz 	if (xvdi_get_evtchn(vdp->xdf_dip) != INVALID_EVTCHN)
10187f0b8309SEdward Pilatowicz 		xvdi_notify_oe(vdp->xdf_dip);
10197f0b8309SEdward Pilatowicz }
10207f0b8309SEdward Pilatowicz 
10217f0b8309SEdward Pilatowicz static int
xdf_ring_drain_locked(xdf_t * vdp)10227f0b8309SEdward Pilatowicz xdf_ring_drain_locked(xdf_t *vdp)
10237f0b8309SEdward Pilatowicz {
10247f0b8309SEdward Pilatowicz 	int		pollc, rv = 0;
10257f0b8309SEdward Pilatowicz 
10267f0b8309SEdward Pilatowicz 	ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
10277f0b8309SEdward Pilatowicz 
10287f0b8309SEdward Pilatowicz 	if (xdf_debug & SUSRES_DBG)
10297f0b8309SEdward Pilatowicz 		xen_printf("xdf_ring_drain: start\n");
10307f0b8309SEdward Pilatowicz 
10317f0b8309SEdward Pilatowicz 	for (pollc = 0; pollc < XDF_DRAIN_RETRY_COUNT; pollc++) {
10327f0b8309SEdward Pilatowicz 		if (vdp->xdf_xb_ring == NULL)
10337f0b8309SEdward Pilatowicz 			goto out;
10347f0b8309SEdward Pilatowicz 
10357f0b8309SEdward Pilatowicz 		if (xvdi_ring_has_unconsumed_responses(vdp->xdf_xb_ring))
10367f0b8309SEdward Pilatowicz 			(void) xdf_intr_locked(vdp);
10377f0b8309SEdward Pilatowicz 		if (!xvdi_ring_has_incomp_request(vdp->xdf_xb_ring))
10387f0b8309SEdward Pilatowicz 			goto out;
10397f0b8309SEdward Pilatowicz 		xdf_ring_push(vdp);
10407f0b8309SEdward Pilatowicz 
10417f0b8309SEdward Pilatowicz 		/* file-backed devices can be slow */
104206bbe1e0Sedp 		mutex_exit(&vdp->xdf_dev_lk);
10437f0b8309SEdward Pilatowicz #ifdef XPV_HVM_DRIVER
10447f0b8309SEdward Pilatowicz 		(void) HYPERVISOR_yield();
10457f0b8309SEdward Pilatowicz #endif /* XPV_HVM_DRIVER */
10467f0b8309SEdward Pilatowicz 		delay(drv_usectohz(XDF_DRAIN_MSEC_DELAY));
10477f0b8309SEdward Pilatowicz 		mutex_enter(&vdp->xdf_dev_lk);
10487f0b8309SEdward Pilatowicz 	}
10497f0b8309SEdward Pilatowicz 	cmn_err(CE_WARN, "xdf@%s: xdf_ring_drain: timeout", vdp->xdf_addr);
10507f0b8309SEdward Pilatowicz 
10517f0b8309SEdward Pilatowicz out:
10527f0b8309SEdward Pilatowicz 	if (vdp->xdf_xb_ring != NULL) {
10537f0b8309SEdward Pilatowicz 		if (xvdi_ring_has_incomp_request(vdp->xdf_xb_ring) ||
10547f0b8309SEdward Pilatowicz 		    xvdi_ring_has_unconsumed_responses(vdp->xdf_xb_ring))
10557f0b8309SEdward Pilatowicz 			rv = EIO;
10567f0b8309SEdward Pilatowicz 	}
10577f0b8309SEdward Pilatowicz 	if (xdf_debug & SUSRES_DBG)
10587f0b8309SEdward Pilatowicz 		xen_printf("xdf@%s: xdf_ring_drain: end, err=%d\n",
10597f0b8309SEdward Pilatowicz 		    vdp->xdf_addr, rv);
10607f0b8309SEdward Pilatowicz 	return (rv);
10617f0b8309SEdward Pilatowicz }
10627f0b8309SEdward Pilatowicz 
10637f0b8309SEdward Pilatowicz static int
xdf_ring_drain(xdf_t * vdp)10647f0b8309SEdward Pilatowicz xdf_ring_drain(xdf_t *vdp)
10657f0b8309SEdward Pilatowicz {
10667f0b8309SEdward Pilatowicz 	int rv;
10677f0b8309SEdward Pilatowicz 	mutex_enter(&vdp->xdf_dev_lk);
10687f0b8309SEdward Pilatowicz 	rv = xdf_ring_drain_locked(vdp);
10697f0b8309SEdward Pilatowicz 	mutex_exit(&vdp->xdf_dev_lk);
10707f0b8309SEdward Pilatowicz 	return (rv);
10717f0b8309SEdward Pilatowicz }
10727f0b8309SEdward Pilatowicz 
10737f0b8309SEdward Pilatowicz /*
10747f0b8309SEdward Pilatowicz  * Destroy all v_req_t, grant table entries, and our ring buffer.
10757f0b8309SEdward Pilatowicz  */
10767f0b8309SEdward Pilatowicz static void
xdf_ring_destroy(xdf_t * vdp)10777f0b8309SEdward Pilatowicz xdf_ring_destroy(xdf_t *vdp)
10787f0b8309SEdward Pilatowicz {
10797f0b8309SEdward Pilatowicz 	v_req_t		*vreq;
10807f0b8309SEdward Pilatowicz 	buf_t		*bp;
10817f0b8309SEdward Pilatowicz 	ge_slot_t	*gs;
10827f0b8309SEdward Pilatowicz 
10837f0b8309SEdward Pilatowicz 	ASSERT(MUTEX_HELD(&vdp->xdf_cb_lk));
10847f0b8309SEdward Pilatowicz 	ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
10857f0b8309SEdward Pilatowicz 
10867f0b8309SEdward Pilatowicz 	if ((vdp->xdf_state != XD_INIT) &&
10877f0b8309SEdward Pilatowicz 	    (vdp->xdf_state != XD_CONNECTED) &&
10887f0b8309SEdward Pilatowicz 	    (vdp->xdf_state != XD_READY)) {
10897f0b8309SEdward Pilatowicz 		ASSERT(vdp->xdf_xb_ring == NULL);
10907f0b8309SEdward Pilatowicz 		ASSERT(vdp->xdf_xb_ring_hdl == NULL);
10917f0b8309SEdward Pilatowicz 		ASSERT(vdp->xdf_peer == INVALID_DOMID);
10927f0b8309SEdward Pilatowicz 		ASSERT(vdp->xdf_evtchn == INVALID_EVTCHN);
10937f0b8309SEdward Pilatowicz 		ASSERT(list_is_empty(&vdp->xdf_vreq_act));
10947f0b8309SEdward Pilatowicz 		return;
10957f0b8309SEdward Pilatowicz 	}
10967f0b8309SEdward Pilatowicz 
10977f0b8309SEdward Pilatowicz 	/*
10987f0b8309SEdward Pilatowicz 	 * We don't want to recieve async notifications from the backend
10997f0b8309SEdward Pilatowicz 	 * when it finishes processing ring entries.
11007f0b8309SEdward Pilatowicz 	 */
11017f0b8309SEdward Pilatowicz #ifdef XPV_HVM_DRIVER
11027f0b8309SEdward Pilatowicz 	ec_unbind_evtchn(vdp->xdf_evtchn);
11037f0b8309SEdward Pilatowicz #else /* !XPV_HVM_DRIVER */
11047f0b8309SEdward Pilatowicz 	(void) ddi_remove_intr(vdp->xdf_dip, 0, NULL);
11057f0b8309SEdward Pilatowicz #endif /* !XPV_HVM_DRIVER */
11067f0b8309SEdward Pilatowicz 
11077f0b8309SEdward Pilatowicz 	/*
11087f0b8309SEdward Pilatowicz 	 * Drain any requests in the ring.  We need to do this before we
11097f0b8309SEdward Pilatowicz 	 * can free grant table entries, because if active ring entries
11107f0b8309SEdward Pilatowicz 	 * point to grants, then the backend could be trying to access
11117f0b8309SEdward Pilatowicz 	 * those grants.
11127f0b8309SEdward Pilatowicz 	 */
11137f0b8309SEdward Pilatowicz 	(void) xdf_ring_drain_locked(vdp);
11147f0b8309SEdward Pilatowicz 
11157f0b8309SEdward Pilatowicz 	/* We're done talking to the backend so free up our event channel */
11167f0b8309SEdward Pilatowicz 	xvdi_free_evtchn(vdp->xdf_dip);
11177f0b8309SEdward Pilatowicz 	vdp->xdf_evtchn = INVALID_EVTCHN;
11187f0b8309SEdward Pilatowicz 
11197f0b8309SEdward Pilatowicz 	while ((vreq = list_head(&vdp->xdf_vreq_act)) != NULL) {
11207f0b8309SEdward Pilatowicz 		bp = vreq->v_buf;
11217f0b8309SEdward Pilatowicz 		ASSERT(BP_VREQ(bp) == vreq);
11227f0b8309SEdward Pilatowicz 
11237f0b8309SEdward Pilatowicz 		/* Free up any grant table entries associaed with this IO */
11247f0b8309SEdward Pilatowicz 		while ((gs = list_head(&vreq->v_gs)) != NULL)
11257f0b8309SEdward Pilatowicz 			gs_free(gs);
11267f0b8309SEdward Pilatowicz 
11277f0b8309SEdward Pilatowicz 		/* If this IO was on the runq, move it back to the waitq. */
11287f0b8309SEdward Pilatowicz 		if (vreq->v_runq)
11297f0b8309SEdward Pilatowicz 			xdf_kstat_runq_to_waitq(vdp, bp);
11307f0b8309SEdward Pilatowicz 
11317f0b8309SEdward Pilatowicz 		/*
11327f0b8309SEdward Pilatowicz 		 * Reset any buf IO state since we're going to re-issue the
11337f0b8309SEdward Pilatowicz 		 * IO when we reconnect.
11347f0b8309SEdward Pilatowicz 		 */
11357f0b8309SEdward Pilatowicz 		vreq_free(vdp, vreq);
11367f0b8309SEdward Pilatowicz 		BP_VREQ_SET(bp, NULL);
11377f0b8309SEdward Pilatowicz 		bioerror(bp, 0);
11387f0b8309SEdward Pilatowicz 	}
11397f0b8309SEdward Pilatowicz 
11407f0b8309SEdward Pilatowicz 	/* reset the active queue index pointer */
11417f0b8309SEdward Pilatowicz 	vdp->xdf_i_act = vdp->xdf_f_act;
11427f0b8309SEdward Pilatowicz 
11437f0b8309SEdward Pilatowicz 	/* Destroy the ring */
11447f0b8309SEdward Pilatowicz 	xvdi_free_ring(vdp->xdf_xb_ring);
11457f0b8309SEdward Pilatowicz 	vdp->xdf_xb_ring = NULL;
11467f0b8309SEdward Pilatowicz 	vdp->xdf_xb_ring_hdl = NULL;
11477f0b8309SEdward Pilatowicz 	vdp->xdf_peer = INVALID_DOMID;
11487f0b8309SEdward Pilatowicz }
11497f0b8309SEdward Pilatowicz 
11507f0b8309SEdward Pilatowicz void
xdfmin(struct buf * bp)11517f0b8309SEdward Pilatowicz xdfmin(struct buf *bp)
11527f0b8309SEdward Pilatowicz {
11537f0b8309SEdward Pilatowicz 	if (bp->b_bcount > xdf_maxphys)
11547f0b8309SEdward Pilatowicz 		bp->b_bcount = xdf_maxphys;
11557f0b8309SEdward Pilatowicz }
11567f0b8309SEdward Pilatowicz 
11577f0b8309SEdward Pilatowicz /*
11587f0b8309SEdward Pilatowicz  * Check if we have a pending "eject" media request.
11597f0b8309SEdward Pilatowicz  */
11607f0b8309SEdward Pilatowicz static int
xdf_eject_pending(xdf_t * vdp)11617f0b8309SEdward Pilatowicz xdf_eject_pending(xdf_t *vdp)
11627f0b8309SEdward Pilatowicz {
11637f0b8309SEdward Pilatowicz 	dev_info_t	*dip = vdp->xdf_dip;
11647f0b8309SEdward Pilatowicz 	char		*xsname, *str;
11657f0b8309SEdward Pilatowicz 
11667f0b8309SEdward Pilatowicz 	if (!vdp->xdf_media_req_supported)
11677f0b8309SEdward Pilatowicz 		return (B_FALSE);
11687f0b8309SEdward Pilatowicz 
11697f0b8309SEdward Pilatowicz 	if (((xsname = xvdi_get_xsname(dip)) == NULL) ||
11707f0b8309SEdward Pilatowicz 	    (xenbus_read_str(xsname, XBP_MEDIA_REQ, &str) != 0))
11717f0b8309SEdward Pilatowicz 		return (B_FALSE);
11727f0b8309SEdward Pilatowicz 
11737f0b8309SEdward Pilatowicz 	if (strcmp(str, XBV_MEDIA_REQ_EJECT) != 0) {
11747f0b8309SEdward Pilatowicz 		strfree(str);
11757f0b8309SEdward Pilatowicz 		return (B_FALSE);
11767f0b8309SEdward Pilatowicz 	}
11777f0b8309SEdward Pilatowicz 	strfree(str);
11787f0b8309SEdward Pilatowicz 	return (B_TRUE);
11797f0b8309SEdward Pilatowicz }
11807f0b8309SEdward Pilatowicz 
11817f0b8309SEdward Pilatowicz /*
11827f0b8309SEdward Pilatowicz  * Generate a media request.
11837f0b8309SEdward Pilatowicz  */
11847f0b8309SEdward Pilatowicz static int
xdf_media_req(xdf_t * vdp,char * req,boolean_t media_required)11857f0b8309SEdward Pilatowicz xdf_media_req(xdf_t *vdp, char *req, boolean_t media_required)
11867f0b8309SEdward Pilatowicz {
11877f0b8309SEdward Pilatowicz 	dev_info_t	*dip = vdp->xdf_dip;
11887f0b8309SEdward Pilatowicz 	char		*xsname;
11897f0b8309SEdward Pilatowicz 
11902de7185cSEdward Pilatowicz 	/*
11912de7185cSEdward Pilatowicz 	 * we can't be holding xdf_dev_lk because xenbus_printf() can
11922de7185cSEdward Pilatowicz 	 * block while waiting for a PIL 1 interrupt message.  this
11932de7185cSEdward Pilatowicz 	 * would cause a deadlock with xdf_intr() which needs to grab
11942de7185cSEdward Pilatowicz 	 * xdf_dev_lk as well and runs at PIL 5.
11952de7185cSEdward Pilatowicz 	 */
11967f0b8309SEdward Pilatowicz 	ASSERT(MUTEX_HELD(&vdp->xdf_cb_lk));
11972de7185cSEdward Pilatowicz 	ASSERT(MUTEX_NOT_HELD(&vdp->xdf_dev_lk));
11987f0b8309SEdward Pilatowicz 
11997f0b8309SEdward Pilatowicz 	if ((xsname = xvdi_get_xsname(dip)) == NULL)
12007f0b8309SEdward Pilatowicz 		return (ENXIO);
12017f0b8309SEdward Pilatowicz 
12027f0b8309SEdward Pilatowicz 	/* Check if we support media requests */
12037f0b8309SEdward Pilatowicz 	if (!XD_IS_CD(vdp) || !vdp->xdf_media_req_supported)
12047f0b8309SEdward Pilatowicz 		return (ENOTTY);
12057f0b8309SEdward Pilatowicz 
12067f0b8309SEdward Pilatowicz 	/* If an eject is pending then don't allow any new requests */
12077f0b8309SEdward Pilatowicz 	if (xdf_eject_pending(vdp))
12087f0b8309SEdward Pilatowicz 		return (ENXIO);
12097f0b8309SEdward Pilatowicz 
12107f0b8309SEdward Pilatowicz 	/* Make sure that there is media present */
12117f0b8309SEdward Pilatowicz 	if (media_required && (vdp->xdf_xdev_nblocks == 0))
12127f0b8309SEdward Pilatowicz 		return (ENXIO);
12137f0b8309SEdward Pilatowicz 
12147f0b8309SEdward Pilatowicz 	/* We only allow operations when the device is ready and connected */
12157f0b8309SEdward Pilatowicz 	if (vdp->xdf_state != XD_READY)
12167f0b8309SEdward Pilatowicz 		return (EIO);
12177f0b8309SEdward Pilatowicz 
12187f0b8309SEdward Pilatowicz 	if (xenbus_printf(XBT_NULL, xsname, XBP_MEDIA_REQ, "%s", req) != 0)
12197f0b8309SEdward Pilatowicz 		return (EIO);
122006bbe1e0Sedp 
122106bbe1e0Sedp 	return (0);
122206bbe1e0Sedp }
122306bbe1e0Sedp 
12247f0b8309SEdward Pilatowicz /*
12257f0b8309SEdward Pilatowicz  * populate a single blkif_request_t w/ a buf
12267f0b8309SEdward Pilatowicz  */
12277f0b8309SEdward Pilatowicz static void
xdf_process_rreq(xdf_t * vdp,struct buf * bp,blkif_request_t * rreq)12287f0b8309SEdward Pilatowicz xdf_process_rreq(xdf_t *vdp, struct buf *bp, blkif_request_t *rreq)
12297f0b8309SEdward Pilatowicz {
12307f0b8309SEdward Pilatowicz 	grant_ref_t	gr;
12317f0b8309SEdward Pilatowicz 	uint8_t		fsect, lsect;
12327f0b8309SEdward Pilatowicz 	size_t		bcnt;
12337f0b8309SEdward Pilatowicz 	paddr_t		dma_addr;
12347f0b8309SEdward Pilatowicz 	off_t		blk_off;
12357f0b8309SEdward Pilatowicz 	dev_info_t	*dip = vdp->xdf_dip;
12367f0b8309SEdward Pilatowicz 	blkif_vdev_t	vdev = xvdi_get_vdevnum(dip);
12377f0b8309SEdward Pilatowicz 	v_req_t		*vreq = BP_VREQ(bp);
12387f0b8309SEdward Pilatowicz 	uint64_t	blkno = vreq->v_blkno;
12397f0b8309SEdward Pilatowicz 	uint_t		ndmacs = vreq->v_ndmacs;
12407f0b8309SEdward Pilatowicz 	ddi_acc_handle_t acchdl = vdp->xdf_xb_ring_hdl;
12417f0b8309SEdward Pilatowicz 	int		seg = 0;
12427f0b8309SEdward Pilatowicz 	int		isread = IS_READ(bp);
12437f0b8309SEdward Pilatowicz 	ge_slot_t	*gs = list_head(&vreq->v_gs);
12447f0b8309SEdward Pilatowicz 
12457f0b8309SEdward Pilatowicz 	ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
12467f0b8309SEdward Pilatowicz 	ASSERT(vreq->v_status == VREQ_GS_ALLOCED);
12477f0b8309SEdward Pilatowicz 
12487f0b8309SEdward Pilatowicz 	if (isread)
12497f0b8309SEdward Pilatowicz 		ddi_put8(acchdl, &rreq->operation, BLKIF_OP_READ);
12507f0b8309SEdward Pilatowicz 	else {
12517f0b8309SEdward Pilatowicz 		switch (vreq->v_flush_diskcache) {
12527f0b8309SEdward Pilatowicz 		case FLUSH_DISKCACHE:
12537f0b8309SEdward Pilatowicz 			ddi_put8(acchdl, &rreq->operation,
12547f0b8309SEdward Pilatowicz 			    BLKIF_OP_FLUSH_DISKCACHE);
12557f0b8309SEdward Pilatowicz 			ddi_put16(acchdl, &rreq->handle, vdev);
12567f0b8309SEdward Pilatowicz 			ddi_put64(acchdl, &rreq->id,
12577f0b8309SEdward Pilatowicz 			    (uint64_t)(uintptr_t)(gs));
12587f0b8309SEdward Pilatowicz 			ddi_put8(acchdl, &rreq->nr_segments, 0);
12597f0b8309SEdward Pilatowicz 			vreq->v_status = VREQ_DMAWIN_DONE;
12607f0b8309SEdward Pilatowicz 			return;
12617f0b8309SEdward Pilatowicz 		case WRITE_BARRIER:
12627f0b8309SEdward Pilatowicz 			ddi_put8(acchdl, &rreq->operation,
12637f0b8309SEdward Pilatowicz 			    BLKIF_OP_WRITE_BARRIER);
12647f0b8309SEdward Pilatowicz 			break;
12657f0b8309SEdward Pilatowicz 		default:
12667f0b8309SEdward Pilatowicz 			if (!vdp->xdf_wce)
12677f0b8309SEdward Pilatowicz 				ddi_put8(acchdl, &rreq->operation,
12687f0b8309SEdward Pilatowicz 				    BLKIF_OP_WRITE_BARRIER);
12697f0b8309SEdward Pilatowicz 			else
12707f0b8309SEdward Pilatowicz 				ddi_put8(acchdl, &rreq->operation,
12717f0b8309SEdward Pilatowicz 				    BLKIF_OP_WRITE);
12727f0b8309SEdward Pilatowicz 			break;
12737f0b8309SEdward Pilatowicz 		}
12747f0b8309SEdward Pilatowicz 	}
12757f0b8309SEdward Pilatowicz 
12767f0b8309SEdward Pilatowicz 	ddi_put16(acchdl, &rreq->handle, vdev);
12777f0b8309SEdward Pilatowicz 	ddi_put64(acchdl, &rreq->sector_number, blkno);
12787f0b8309SEdward Pilatowicz 	ddi_put64(acchdl, &rreq->id, (uint64_t)(uintptr_t)(gs));
12797f0b8309SEdward Pilatowicz 
12807f0b8309SEdward Pilatowicz 	/*
12817f0b8309SEdward Pilatowicz 	 * loop until all segments are populated or no more dma cookie in buf
12827f0b8309SEdward Pilatowicz 	 */
12837f0b8309SEdward Pilatowicz 	for (;;) {
12847f0b8309SEdward Pilatowicz 		/*
12857f0b8309SEdward Pilatowicz 		 * Each segment of a blkif request can transfer up to
12867f0b8309SEdward Pilatowicz 		 * one 4K page of data.
12877f0b8309SEdward Pilatowicz 		 */
12887f0b8309SEdward Pilatowicz 		bcnt = vreq->v_dmac.dmac_size;
12897f0b8309SEdward Pilatowicz 		dma_addr = vreq->v_dmac.dmac_laddress;
12907f0b8309SEdward Pilatowicz 		blk_off = (uint_t)((paddr_t)XB_SEGOFFSET & dma_addr);
12917f0b8309SEdward Pilatowicz 		fsect = blk_off >> XB_BSHIFT;
12927f0b8309SEdward Pilatowicz 		lsect = fsect + (bcnt >> XB_BSHIFT) - 1;
12937f0b8309SEdward Pilatowicz 
12947f0b8309SEdward Pilatowicz 		ASSERT(bcnt <= PAGESIZE);
12957f0b8309SEdward Pilatowicz 		ASSERT((bcnt % XB_BSIZE) == 0);
12967f0b8309SEdward Pilatowicz 		ASSERT((blk_off & XB_BMASK) == 0);
12977f0b8309SEdward Pilatowicz 		ASSERT(fsect < XB_MAX_SEGLEN / XB_BSIZE &&
12987f0b8309SEdward Pilatowicz 		    lsect < XB_MAX_SEGLEN / XB_BSIZE);
12997f0b8309SEdward Pilatowicz 
13007f0b8309SEdward Pilatowicz 		gr = gs_grant(gs, PATOMA(dma_addr) >> PAGESHIFT);
13017f0b8309SEdward Pilatowicz 		ddi_put32(acchdl, &rreq->seg[seg].gref, gr);
13027f0b8309SEdward Pilatowicz 		ddi_put8(acchdl, &rreq->seg[seg].first_sect, fsect);
13037f0b8309SEdward Pilatowicz 		ddi_put8(acchdl, &rreq->seg[seg].last_sect, lsect);
13047f0b8309SEdward Pilatowicz 
13057f0b8309SEdward Pilatowicz 		DPRINTF(IO_DBG, (
13067f0b8309SEdward Pilatowicz 		    "xdf@%s: seg%d: dmacS %lu blk_off %ld\n",
13077f0b8309SEdward Pilatowicz 		    vdp->xdf_addr, seg, vreq->v_dmac.dmac_size, blk_off));
13087f0b8309SEdward Pilatowicz 		DPRINTF(IO_DBG, (
13097f0b8309SEdward Pilatowicz 		    "xdf@%s: seg%d: fs %d ls %d gr %d dma 0x%"PRIx64"\n",
13107f0b8309SEdward Pilatowicz 		    vdp->xdf_addr, seg, fsect, lsect, gr, dma_addr));
13117f0b8309SEdward Pilatowicz 
13127f0b8309SEdward Pilatowicz 		blkno += (bcnt >> XB_BSHIFT);
13137f0b8309SEdward Pilatowicz 		seg++;
13147f0b8309SEdward Pilatowicz 		ASSERT(seg <= BLKIF_MAX_SEGMENTS_PER_REQUEST);
13157f0b8309SEdward Pilatowicz 		if (--ndmacs) {
13167f0b8309SEdward Pilatowicz 			ddi_dma_nextcookie(vreq->v_dmahdl, &vreq->v_dmac);
13177f0b8309SEdward Pilatowicz 			continue;
13187f0b8309SEdward Pilatowicz 		}
13197f0b8309SEdward Pilatowicz 
13207f0b8309SEdward Pilatowicz 		vreq->v_status = VREQ_DMAWIN_DONE;
13217f0b8309SEdward Pilatowicz 		vreq->v_blkno = blkno;
13227f0b8309SEdward Pilatowicz 		break;
13237f0b8309SEdward Pilatowicz 	}
13247f0b8309SEdward Pilatowicz 	ddi_put8(acchdl,  &rreq->nr_segments, seg);
13257f0b8309SEdward Pilatowicz 	DPRINTF(IO_DBG, (
13267f0b8309SEdward Pilatowicz 	    "xdf@%s: xdf_process_rreq: request id=%"PRIx64" ready\n",
13277f0b8309SEdward Pilatowicz 	    vdp->xdf_addr, rreq->id));
13287f0b8309SEdward Pilatowicz }
13297f0b8309SEdward Pilatowicz 
13307f0b8309SEdward Pilatowicz static void
xdf_io_start(xdf_t * vdp)13317f0b8309SEdward Pilatowicz xdf_io_start(xdf_t *vdp)
13327f0b8309SEdward Pilatowicz {
13337f0b8309SEdward Pilatowicz 	struct buf	*bp;
13347f0b8309SEdward Pilatowicz 	v_req_t		*vreq;
13357f0b8309SEdward Pilatowicz 	blkif_request_t	*rreq;
13367f0b8309SEdward Pilatowicz 	boolean_t	rreqready = B_FALSE;
13377f0b8309SEdward Pilatowicz 
13387f0b8309SEdward Pilatowicz 	mutex_enter(&vdp->xdf_dev_lk);
13397f0b8309SEdward Pilatowicz 
13407f0b8309SEdward Pilatowicz 	/*
13417f0b8309SEdward Pilatowicz 	 * Populate the ring request(s).  Loop until there is no buf to
13427f0b8309SEdward Pilatowicz 	 * transfer or no free slot available in I/O ring.
13437f0b8309SEdward Pilatowicz 	 */
13447f0b8309SEdward Pilatowicz 	for (;;) {
13457f0b8309SEdward Pilatowicz 		/* don't start any new IO if we're suspending */
13467f0b8309SEdward Pilatowicz 		if (vdp->xdf_suspending)
13477f0b8309SEdward Pilatowicz 			break;
13487f0b8309SEdward Pilatowicz 		if ((bp = xdf_bp_next(vdp)) == NULL)
13497f0b8309SEdward Pilatowicz 			break;
13507f0b8309SEdward Pilatowicz 
13517f0b8309SEdward Pilatowicz 		/* if the buf doesn't already have a vreq, allocate one */
13527f0b8309SEdward Pilatowicz 		if (((vreq = BP_VREQ(bp)) == NULL) &&
13537f0b8309SEdward Pilatowicz 		    ((vreq = vreq_get(vdp, bp)) == NULL))
13547f0b8309SEdward Pilatowicz 			break;
13557f0b8309SEdward Pilatowicz 
13567f0b8309SEdward Pilatowicz 		/* alloc DMA/GTE resources */
13577f0b8309SEdward Pilatowicz 		if (vreq_setup(vdp, vreq) != DDI_SUCCESS)
13587f0b8309SEdward Pilatowicz 			break;
13597f0b8309SEdward Pilatowicz 
13607f0b8309SEdward Pilatowicz 		/* get next blkif_request in the ring */
13617f0b8309SEdward Pilatowicz 		if ((rreq = xvdi_ring_get_request(vdp->xdf_xb_ring)) == NULL)
13627f0b8309SEdward Pilatowicz 			break;
13637f0b8309SEdward Pilatowicz 		bzero(rreq, sizeof (blkif_request_t));
13647f0b8309SEdward Pilatowicz 		rreqready = B_TRUE;
13657f0b8309SEdward Pilatowicz 
13667f0b8309SEdward Pilatowicz 		/* populate blkif_request with this buf */
13677f0b8309SEdward Pilatowicz 		xdf_process_rreq(vdp, bp, rreq);
13687f0b8309SEdward Pilatowicz 
13697f0b8309SEdward Pilatowicz 		/*
13707f0b8309SEdward Pilatowicz 		 * This buffer/vreq pair is has been allocated a ring buffer
13717f0b8309SEdward Pilatowicz 		 * resources, so if it isn't already in our runq, add it.
13727f0b8309SEdward Pilatowicz 		 */
13737f0b8309SEdward Pilatowicz 		if (!vreq->v_runq)
13747f0b8309SEdward Pilatowicz 			xdf_kstat_waitq_to_runq(vdp, bp);
13757f0b8309SEdward Pilatowicz 	}
13767f0b8309SEdward Pilatowicz 
13777f0b8309SEdward Pilatowicz 	/* Send the request(s) to the backend */
13787f0b8309SEdward Pilatowicz 	if (rreqready)
13797f0b8309SEdward Pilatowicz 		xdf_ring_push(vdp);
13807f0b8309SEdward Pilatowicz 
13817f0b8309SEdward Pilatowicz 	mutex_exit(&vdp->xdf_dev_lk);
13827f0b8309SEdward Pilatowicz }
13837f0b8309SEdward Pilatowicz 
13847f0b8309SEdward Pilatowicz 
13857f0b8309SEdward Pilatowicz /* check if partition is open, -1 - check all partitions on the disk */
13867f0b8309SEdward Pilatowicz static boolean_t
xdf_isopen(xdf_t * vdp,int partition)13877f0b8309SEdward Pilatowicz xdf_isopen(xdf_t *vdp, int partition)
13887f0b8309SEdward Pilatowicz {
13897f0b8309SEdward Pilatowicz 	int i;
13907f0b8309SEdward Pilatowicz 	ulong_t parbit;
13917f0b8309SEdward Pilatowicz 	boolean_t rval = B_FALSE;
13927f0b8309SEdward Pilatowicz 
13937f0b8309SEdward Pilatowicz 	ASSERT((partition == -1) ||
13947f0b8309SEdward Pilatowicz 	    ((partition >= 0) || (partition < XDF_PEXT)));
13957f0b8309SEdward Pilatowicz 
13967f0b8309SEdward Pilatowicz 	if (partition == -1)
13977f0b8309SEdward Pilatowicz 		parbit = (ulong_t)-1;
13987f0b8309SEdward Pilatowicz 	else
13997f0b8309SEdward Pilatowicz 		parbit = 1 << partition;
14007f0b8309SEdward Pilatowicz 
14017f0b8309SEdward Pilatowicz 	for (i = 0; i < OTYPCNT; i++) {
14027f0b8309SEdward Pilatowicz 		if (vdp->xdf_vd_open[i] & parbit)
14037f0b8309SEdward Pilatowicz 			rval = B_TRUE;
14047f0b8309SEdward Pilatowicz 	}
14057f0b8309SEdward Pilatowicz 
14067f0b8309SEdward Pilatowicz 	return (rval);
14077f0b8309SEdward Pilatowicz }
14087f0b8309SEdward Pilatowicz 
14097f0b8309SEdward Pilatowicz /*
14107f0b8309SEdward Pilatowicz  * The connection should never be closed as long as someone is holding
14117f0b8309SEdward Pilatowicz  * us open, there is pending IO, or someone is waiting waiting for a
14127f0b8309SEdward Pilatowicz  * connection.
14137f0b8309SEdward Pilatowicz  */
14147f0b8309SEdward Pilatowicz static boolean_t
xdf_busy(xdf_t * vdp)14157f0b8309SEdward Pilatowicz xdf_busy(xdf_t *vdp)
14167f0b8309SEdward Pilatowicz {
14177f0b8309SEdward Pilatowicz 	ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
14187f0b8309SEdward Pilatowicz 
14197f0b8309SEdward Pilatowicz 	if ((vdp->xdf_xb_ring != NULL) &&
14207f0b8309SEdward Pilatowicz 	    xvdi_ring_has_unconsumed_responses(vdp->xdf_xb_ring)) {
14217f0b8309SEdward Pilatowicz 		ASSERT(vdp->xdf_state != XD_CLOSED);
14227f0b8309SEdward Pilatowicz 		return (B_TRUE);
14237f0b8309SEdward Pilatowicz 	}
14247f0b8309SEdward Pilatowicz 
14257f0b8309SEdward Pilatowicz 	if (!list_is_empty(&vdp->xdf_vreq_act) || (vdp->xdf_f_act != NULL)) {
14267f0b8309SEdward Pilatowicz 		ASSERT(vdp->xdf_state != XD_CLOSED);
14277f0b8309SEdward Pilatowicz 		return (B_TRUE);
14287f0b8309SEdward Pilatowicz 	}
14297f0b8309SEdward Pilatowicz 
14307f0b8309SEdward Pilatowicz 	if (xdf_isopen(vdp, -1)) {
14317f0b8309SEdward Pilatowicz 		ASSERT(vdp->xdf_state != XD_CLOSED);
14327f0b8309SEdward Pilatowicz 		return (B_TRUE);
14337f0b8309SEdward Pilatowicz 	}
14347f0b8309SEdward Pilatowicz 
14357f0b8309SEdward Pilatowicz 	if (vdp->xdf_connect_req > 0) {
14367f0b8309SEdward Pilatowicz 		ASSERT(vdp->xdf_state != XD_CLOSED);
14377f0b8309SEdward Pilatowicz 		return (B_TRUE);
14387f0b8309SEdward Pilatowicz 	}
14397f0b8309SEdward Pilatowicz 
14407f0b8309SEdward Pilatowicz 	return (B_FALSE);
14417f0b8309SEdward Pilatowicz }
14427f0b8309SEdward Pilatowicz 
14437f0b8309SEdward Pilatowicz static void
xdf_set_state(xdf_t * vdp,xdf_state_t new_state)14447f0b8309SEdward Pilatowicz xdf_set_state(xdf_t *vdp, xdf_state_t new_state)
14457f0b8309SEdward Pilatowicz {
14462de7185cSEdward Pilatowicz 	ASSERT(MUTEX_HELD(&vdp->xdf_cb_lk));
14477f0b8309SEdward Pilatowicz 	ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
14487f0b8309SEdward Pilatowicz 	DPRINTF(DDI_DBG, ("xdf@%s: state change %d -> %d\n",
14497f0b8309SEdward Pilatowicz 	    vdp->xdf_addr, vdp->xdf_state, new_state));
14507f0b8309SEdward Pilatowicz 	vdp->xdf_state = new_state;
14517f0b8309SEdward Pilatowicz 	cv_broadcast(&vdp->xdf_dev_cv);
14527f0b8309SEdward Pilatowicz }
14537f0b8309SEdward Pilatowicz 
14547f0b8309SEdward Pilatowicz static void
xdf_disconnect(xdf_t * vdp,xdf_state_t new_state,boolean_t quiet)14557f0b8309SEdward Pilatowicz xdf_disconnect(xdf_t *vdp, xdf_state_t new_state, boolean_t quiet)
14567f0b8309SEdward Pilatowicz {
14577f0b8309SEdward Pilatowicz 	dev_info_t	*dip = vdp->xdf_dip;
14587f0b8309SEdward Pilatowicz 	boolean_t	busy;
14597f0b8309SEdward Pilatowicz 
14607f0b8309SEdward Pilatowicz 	ASSERT(MUTEX_HELD(&vdp->xdf_cb_lk));
14617f0b8309SEdward Pilatowicz 	ASSERT(MUTEX_NOT_HELD(&vdp->xdf_dev_lk));
14627f0b8309SEdward Pilatowicz 	ASSERT((new_state == XD_UNKNOWN) || (new_state == XD_CLOSED));
14637f0b8309SEdward Pilatowicz 
14647f0b8309SEdward Pilatowicz 	/* Check if we're already there. */
14657f0b8309SEdward Pilatowicz 	if (vdp->xdf_state == new_state)
14667f0b8309SEdward Pilatowicz 		return;
14677f0b8309SEdward Pilatowicz 
14687f0b8309SEdward Pilatowicz 	mutex_enter(&vdp->xdf_dev_lk);
14697f0b8309SEdward Pilatowicz 	busy = xdf_busy(vdp);
14707f0b8309SEdward Pilatowicz 
14717f0b8309SEdward Pilatowicz 	/* If we're already closed then there's nothing todo. */
14727f0b8309SEdward Pilatowicz 	if (vdp->xdf_state == XD_CLOSED) {
14737f0b8309SEdward Pilatowicz 		ASSERT(!busy);
14747f0b8309SEdward Pilatowicz 		xdf_set_state(vdp, new_state);
14757f0b8309SEdward Pilatowicz 		mutex_exit(&vdp->xdf_dev_lk);
14767f0b8309SEdward Pilatowicz 		return;
14777f0b8309SEdward Pilatowicz 	}
14787f0b8309SEdward Pilatowicz 
14797f0b8309SEdward Pilatowicz #ifdef DEBUG
14807f0b8309SEdward Pilatowicz 	/* UhOh.  Warn the user that something bad has happened. */
14817f0b8309SEdward Pilatowicz 	if (!quiet && busy && (vdp->xdf_state == XD_READY) &&
14827f0b8309SEdward Pilatowicz 	    (vdp->xdf_xdev_nblocks != 0)) {
14837f0b8309SEdward Pilatowicz 		cmn_err(CE_WARN, "xdf@%s: disconnected while in use",
14847f0b8309SEdward Pilatowicz 		    vdp->xdf_addr);
14857f0b8309SEdward Pilatowicz 	}
14867f0b8309SEdward Pilatowicz #endif /* DEBUG */
14877f0b8309SEdward Pilatowicz 
14887f0b8309SEdward Pilatowicz 	xdf_ring_destroy(vdp);
14897f0b8309SEdward Pilatowicz 
14907f0b8309SEdward Pilatowicz 	/* If we're busy then we can only go into the unknown state */
14917f0b8309SEdward Pilatowicz 	xdf_set_state(vdp, (busy) ? XD_UNKNOWN : new_state);
14927f0b8309SEdward Pilatowicz 	mutex_exit(&vdp->xdf_dev_lk);
14937f0b8309SEdward Pilatowicz 
14947f0b8309SEdward Pilatowicz 	/* if we're closed now, let the other end know */
14957f0b8309SEdward Pilatowicz 	if (vdp->xdf_state == XD_CLOSED)
14967f0b8309SEdward Pilatowicz 		(void) xvdi_switch_state(dip, XBT_NULL, XenbusStateClosed);
14977f0b8309SEdward Pilatowicz }
14987f0b8309SEdward Pilatowicz 
14997f0b8309SEdward Pilatowicz 
15007f0b8309SEdward Pilatowicz /*
15017f0b8309SEdward Pilatowicz  * Kick-off connect process
15027f0b8309SEdward Pilatowicz  * Status should be XD_UNKNOWN or XD_CLOSED
15037f0b8309SEdward Pilatowicz  * On success, status will be changed to XD_INIT
15047f0b8309SEdward Pilatowicz  * On error, it will be changed to XD_UNKNOWN
15057f0b8309SEdward Pilatowicz  */
15067f0b8309SEdward Pilatowicz static int
xdf_setstate_init(xdf_t * vdp)15077f0b8309SEdward Pilatowicz xdf_setstate_init(xdf_t *vdp)
15087f0b8309SEdward Pilatowicz {
15097f0b8309SEdward Pilatowicz 	dev_info_t		*dip = vdp->xdf_dip;
15107f0b8309SEdward Pilatowicz 	xenbus_transaction_t	xbt;
15117f0b8309SEdward Pilatowicz 	grant_ref_t		gref;
15127f0b8309SEdward Pilatowicz 	char			*xsname, *str;
15137f0b8309SEdward Pilatowicz 	int 			rv;
15147f0b8309SEdward Pilatowicz 
15157f0b8309SEdward Pilatowicz 	ASSERT(MUTEX_HELD(&vdp->xdf_cb_lk));
15167f0b8309SEdward Pilatowicz 	ASSERT(MUTEX_NOT_HELD(&vdp->xdf_dev_lk));
15177f0b8309SEdward Pilatowicz 	ASSERT((vdp->xdf_state == XD_UNKNOWN) ||
15187f0b8309SEdward Pilatowicz 	    (vdp->xdf_state == XD_CLOSED));
15197f0b8309SEdward Pilatowicz 
15207f0b8309SEdward Pilatowicz 	DPRINTF(DDI_DBG,
15217f0b8309SEdward Pilatowicz 	    ("xdf@%s: starting connection process\n", vdp->xdf_addr));
15227f0b8309SEdward Pilatowicz 
15237f0b8309SEdward Pilatowicz 	/*
15242de7185cSEdward Pilatowicz 	 * If an eject is pending then don't allow a new connection.
15252de7185cSEdward Pilatowicz 	 * (Only the backend can clear media request eject request.)
15267f0b8309SEdward Pilatowicz 	 */
15272de7185cSEdward Pilatowicz 	if (xdf_eject_pending(vdp))
15287f0b8309SEdward Pilatowicz 		return (DDI_FAILURE);
15297f0b8309SEdward Pilatowicz 
15307f0b8309SEdward Pilatowicz 	if ((xsname = xvdi_get_xsname(dip)) == NULL)
15317f0b8309SEdward Pilatowicz 		goto errout;
15327f0b8309SEdward Pilatowicz 
15337f0b8309SEdward Pilatowicz 	if ((vdp->xdf_peer = xvdi_get_oeid(dip)) == INVALID_DOMID)
15347f0b8309SEdward Pilatowicz 		goto errout;
15357f0b8309SEdward Pilatowicz 
15367f0b8309SEdward Pilatowicz 	(void) xvdi_switch_state(dip, XBT_NULL, XenbusStateInitialising);
15377f0b8309SEdward Pilatowicz 
15387f0b8309SEdward Pilatowicz 	/*
15397f0b8309SEdward Pilatowicz 	 * Sanity check for the existance of the xenbus device-type property.
15407f0b8309SEdward Pilatowicz 	 * This property might not exist if we our xenbus device nodes was
15417f0b8309SEdward Pilatowicz 	 * force destroyed while we were still connected to the backend.
15427f0b8309SEdward Pilatowicz 	 */
15437f0b8309SEdward Pilatowicz 	if (xenbus_read_str(xsname, XBP_DEV_TYPE, &str) != 0)
15447f0b8309SEdward Pilatowicz 		goto errout;
15457f0b8309SEdward Pilatowicz 	strfree(str);
15467f0b8309SEdward Pilatowicz 
15477f0b8309SEdward Pilatowicz 	if (xvdi_alloc_evtchn(dip) != DDI_SUCCESS)
15487f0b8309SEdward Pilatowicz 		goto errout;
15497f0b8309SEdward Pilatowicz 
15507f0b8309SEdward Pilatowicz 	vdp->xdf_evtchn = xvdi_get_evtchn(dip);
15517f0b8309SEdward Pilatowicz #ifdef XPV_HVM_DRIVER
15527f0b8309SEdward Pilatowicz 	ec_bind_evtchn_to_handler(vdp->xdf_evtchn, IPL_VBD, xdf_intr, vdp);
15537f0b8309SEdward Pilatowicz #else /* !XPV_HVM_DRIVER */
15547f0b8309SEdward Pilatowicz 	if (ddi_add_intr(dip, 0, NULL, NULL, xdf_intr, (caddr_t)vdp) !=
15557f0b8309SEdward Pilatowicz 	    DDI_SUCCESS) {
15567f0b8309SEdward Pilatowicz 		cmn_err(CE_WARN, "xdf@%s: xdf_setstate_init: "
15577f0b8309SEdward Pilatowicz 		    "failed to add intr handler", vdp->xdf_addr);
15587f0b8309SEdward Pilatowicz 		goto errout1;
15597f0b8309SEdward Pilatowicz 	}
15607f0b8309SEdward Pilatowicz #endif /* !XPV_HVM_DRIVER */
15617f0b8309SEdward Pilatowicz 
15627f0b8309SEdward Pilatowicz 	if (xvdi_alloc_ring(dip, BLKIF_RING_SIZE,
15637f0b8309SEdward Pilatowicz 	    sizeof (union blkif_sring_entry), &gref, &vdp->xdf_xb_ring) !=
15647f0b8309SEdward Pilatowicz 	    DDI_SUCCESS) {
15657f0b8309SEdward Pilatowicz 		cmn_err(CE_WARN, "xdf@%s: failed to alloc comm ring",
15667f0b8309SEdward Pilatowicz 		    vdp->xdf_addr);
15677f0b8309SEdward Pilatowicz 		goto errout2;
15687f0b8309SEdward Pilatowicz 	}
15697f0b8309SEdward Pilatowicz 	vdp->xdf_xb_ring_hdl = vdp->xdf_xb_ring->xr_acc_hdl; /* ugly!! */
15707f0b8309SEdward Pilatowicz 
15717f0b8309SEdward Pilatowicz 	/*
15727f0b8309SEdward Pilatowicz 	 * Write into xenstore the info needed by backend
15737f0b8309SEdward Pilatowicz 	 */
15747f0b8309SEdward Pilatowicz trans_retry:
15757f0b8309SEdward Pilatowicz 	if (xenbus_transaction_start(&xbt)) {
15767f0b8309SEdward Pilatowicz 		cmn_err(CE_WARN, "xdf@%s: failed to start transaction",
15777f0b8309SEdward Pilatowicz 		    vdp->xdf_addr);
15787f0b8309SEdward Pilatowicz 		xvdi_fatal_error(dip, EIO, "connect transaction init");
15797f0b8309SEdward Pilatowicz 		goto fail_trans;
15807f0b8309SEdward Pilatowicz 	}
15817f0b8309SEdward Pilatowicz 
15827f0b8309SEdward Pilatowicz 	/*
15837f0b8309SEdward Pilatowicz 	 * XBP_PROTOCOL is written by the domain builder in the case of PV
15847f0b8309SEdward Pilatowicz 	 * domains. However, it is not written for HVM domains, so let's
15857f0b8309SEdward Pilatowicz 	 * write it here.
15867f0b8309SEdward Pilatowicz 	 */
15877f0b8309SEdward Pilatowicz 	if (((rv = xenbus_printf(xbt, xsname,
15887f0b8309SEdward Pilatowicz 	    XBP_MEDIA_REQ, "%s", XBV_MEDIA_REQ_NONE)) != 0) ||
15897f0b8309SEdward Pilatowicz 	    ((rv = xenbus_printf(xbt, xsname,
15907f0b8309SEdward Pilatowicz 	    XBP_RING_REF, "%u", gref)) != 0) ||
15917f0b8309SEdward Pilatowicz 	    ((rv = xenbus_printf(xbt, xsname,
15927f0b8309SEdward Pilatowicz 	    XBP_EVENT_CHAN, "%u", vdp->xdf_evtchn)) != 0) ||
15937f0b8309SEdward Pilatowicz 	    ((rv = xenbus_printf(xbt, xsname,
15947f0b8309SEdward Pilatowicz 	    XBP_PROTOCOL, "%s", XEN_IO_PROTO_ABI_NATIVE)) != 0) ||
15957f0b8309SEdward Pilatowicz 	    ((rv = xvdi_switch_state(dip, xbt, XenbusStateInitialised)) > 0)) {
15967f0b8309SEdward Pilatowicz 		(void) xenbus_transaction_end(xbt, 1);
15977f0b8309SEdward Pilatowicz 		xvdi_fatal_error(dip, rv, "connect transaction setup");
15987f0b8309SEdward Pilatowicz 		goto fail_trans;
15997f0b8309SEdward Pilatowicz 	}
16007f0b8309SEdward Pilatowicz 
16017f0b8309SEdward Pilatowicz 	/* kick-off connect process */
16027f0b8309SEdward Pilatowicz 	if (rv = xenbus_transaction_end(xbt, 0)) {
16037f0b8309SEdward Pilatowicz 		if (rv == EAGAIN)
16047f0b8309SEdward Pilatowicz 			goto trans_retry;
16057f0b8309SEdward Pilatowicz 		xvdi_fatal_error(dip, rv, "connect transaction commit");
16067f0b8309SEdward Pilatowicz 		goto fail_trans;
16077f0b8309SEdward Pilatowicz 	}
16087f0b8309SEdward Pilatowicz 
16097f0b8309SEdward Pilatowicz 	ASSERT(MUTEX_HELD(&vdp->xdf_cb_lk));
16107f0b8309SEdward Pilatowicz 	mutex_enter(&vdp->xdf_dev_lk);
16117f0b8309SEdward Pilatowicz 	xdf_set_state(vdp, XD_INIT);
16127f0b8309SEdward Pilatowicz 	mutex_exit(&vdp->xdf_dev_lk);
16137f0b8309SEdward Pilatowicz 
16147f0b8309SEdward Pilatowicz 	return (DDI_SUCCESS);
16157f0b8309SEdward Pilatowicz 
16167f0b8309SEdward Pilatowicz fail_trans:
16177f0b8309SEdward Pilatowicz 	xvdi_free_ring(vdp->xdf_xb_ring);
16187f0b8309SEdward Pilatowicz errout2:
16197f0b8309SEdward Pilatowicz #ifdef XPV_HVM_DRIVER
16207f0b8309SEdward Pilatowicz 	ec_unbind_evtchn(vdp->xdf_evtchn);
16217f0b8309SEdward Pilatowicz #else /* !XPV_HVM_DRIVER */
16227f0b8309SEdward Pilatowicz 	(void) ddi_remove_intr(vdp->xdf_dip, 0, NULL);
16237f0b8309SEdward Pilatowicz #endif /* !XPV_HVM_DRIVER */
16247f0b8309SEdward Pilatowicz errout1:
16257f0b8309SEdward Pilatowicz 	xvdi_free_evtchn(dip);
16267f0b8309SEdward Pilatowicz 	vdp->xdf_evtchn = INVALID_EVTCHN;
16277f0b8309SEdward Pilatowicz errout:
16287f0b8309SEdward Pilatowicz 	xdf_disconnect(vdp, XD_UNKNOWN, B_FALSE);
16297f0b8309SEdward Pilatowicz 	cmn_err(CE_WARN, "xdf@%s: failed to start connection to backend",
16307f0b8309SEdward Pilatowicz 	    vdp->xdf_addr);
16317f0b8309SEdward Pilatowicz 	return (DDI_FAILURE);
16327f0b8309SEdward Pilatowicz }
16337f0b8309SEdward Pilatowicz 
16347f0b8309SEdward Pilatowicz int
xdf_get_flush_block(xdf_t * vdp)16357f0b8309SEdward Pilatowicz xdf_get_flush_block(xdf_t *vdp)
16367f0b8309SEdward Pilatowicz {
16377f0b8309SEdward Pilatowicz 	/*
16387f0b8309SEdward Pilatowicz 	 * Get a DEV_BSIZE aligned bufer
16397f0b8309SEdward Pilatowicz 	 */
164065908c77Syu, larry liu - Sun Microsystems - Beijing China 	vdp->xdf_flush_mem = kmem_alloc(vdp->xdf_xdev_secsize * 2, KM_SLEEP);
16417f0b8309SEdward Pilatowicz 	vdp->xdf_cache_flush_block =
164265908c77Syu, larry liu - Sun Microsystems - Beijing China 	    (char *)P2ROUNDUP((uintptr_t)(vdp->xdf_flush_mem),
164365908c77Syu, larry liu - Sun Microsystems - Beijing China 	    (int)vdp->xdf_xdev_secsize);
164465908c77Syu, larry liu - Sun Microsystems - Beijing China 
16457f0b8309SEdward Pilatowicz 	if (xdf_lb_rdwr(vdp->xdf_dip, TG_READ, vdp->xdf_cache_flush_block,
164665908c77Syu, larry liu - Sun Microsystems - Beijing China 	    xdf_flush_block, vdp->xdf_xdev_secsize, NULL) != 0)
16477f0b8309SEdward Pilatowicz 		return (DDI_FAILURE);
16487f0b8309SEdward Pilatowicz 	return (DDI_SUCCESS);
16497f0b8309SEdward Pilatowicz }
16507f0b8309SEdward Pilatowicz 
16517f0b8309SEdward Pilatowicz static void
xdf_setstate_ready(void * arg)16527f0b8309SEdward Pilatowicz xdf_setstate_ready(void *arg)
16537f0b8309SEdward Pilatowicz {
16547f0b8309SEdward Pilatowicz 	xdf_t	*vdp = (xdf_t *)arg;
16557f0b8309SEdward Pilatowicz 
16567f0b8309SEdward Pilatowicz 	vdp->xdf_ready_tq_thread = curthread;
16577f0b8309SEdward Pilatowicz 
16587f0b8309SEdward Pilatowicz 	/*
16597f0b8309SEdward Pilatowicz 	 * We've created all the minor nodes via cmlb_attach() using default
16607f0b8309SEdward Pilatowicz 	 * value in xdf_attach() to make it possible to block in xdf_open(),
16617f0b8309SEdward Pilatowicz 	 * in case there's anyone (say, booting thread) ever trying to open
16627f0b8309SEdward Pilatowicz 	 * it before connected to backend. We will refresh all those minor
16637f0b8309SEdward Pilatowicz 	 * nodes w/ latest info we've got now when we are almost connected.
16647f0b8309SEdward Pilatowicz 	 */
16657f0b8309SEdward Pilatowicz 	mutex_enter(&vdp->xdf_dev_lk);
16667f0b8309SEdward Pilatowicz 	if (vdp->xdf_cmbl_reattach) {
16677f0b8309SEdward Pilatowicz 		vdp->xdf_cmbl_reattach = B_FALSE;
16687f0b8309SEdward Pilatowicz 
16697f0b8309SEdward Pilatowicz 		mutex_exit(&vdp->xdf_dev_lk);
16707f0b8309SEdward Pilatowicz 		if (xdf_cmlb_attach(vdp) != 0) {
16717f0b8309SEdward Pilatowicz 			xdf_disconnect(vdp, XD_UNKNOWN, B_FALSE);
16727f0b8309SEdward Pilatowicz 			return;
16737f0b8309SEdward Pilatowicz 		}
16747f0b8309SEdward Pilatowicz 		mutex_enter(&vdp->xdf_dev_lk);
16757f0b8309SEdward Pilatowicz 	}
16767f0b8309SEdward Pilatowicz 
16777f0b8309SEdward Pilatowicz 	/* If we're not still trying to get to the ready state, then bail. */
16787f0b8309SEdward Pilatowicz 	if (vdp->xdf_state != XD_CONNECTED) {
16797f0b8309SEdward Pilatowicz 		mutex_exit(&vdp->xdf_dev_lk);
16807f0b8309SEdward Pilatowicz 		return;
16817f0b8309SEdward Pilatowicz 	}
16827f0b8309SEdward Pilatowicz 	mutex_exit(&vdp->xdf_dev_lk);
16837f0b8309SEdward Pilatowicz 
16847f0b8309SEdward Pilatowicz 	/*
16857f0b8309SEdward Pilatowicz 	 * If backend has feature-barrier, see if it supports disk
16867f0b8309SEdward Pilatowicz 	 * cache flush op.
16877f0b8309SEdward Pilatowicz 	 */
16887f0b8309SEdward Pilatowicz 	vdp->xdf_flush_supported = B_FALSE;
16897f0b8309SEdward Pilatowicz 	if (vdp->xdf_feature_barrier) {
16907f0b8309SEdward Pilatowicz 		/*
16917f0b8309SEdward Pilatowicz 		 * Pretend we already know flush is supported so probe
16927f0b8309SEdward Pilatowicz 		 * will attempt the correct op.
16937f0b8309SEdward Pilatowicz 		 */
16947f0b8309SEdward Pilatowicz 		vdp->xdf_flush_supported = B_TRUE;
16957f0b8309SEdward Pilatowicz 		if (xdf_lb_rdwr(vdp->xdf_dip, TG_WRITE, NULL, 0, 0, 0) == 0) {
16967f0b8309SEdward Pilatowicz 			vdp->xdf_flush_supported = B_TRUE;
16977f0b8309SEdward Pilatowicz 		} else {
16987f0b8309SEdward Pilatowicz 			vdp->xdf_flush_supported = B_FALSE;
16997f0b8309SEdward Pilatowicz 			/*
17007f0b8309SEdward Pilatowicz 			 * If the other end does not support the cache flush op
17017f0b8309SEdward Pilatowicz 			 * then we must use a barrier-write to force disk
17027f0b8309SEdward Pilatowicz 			 * cache flushing.  Barrier writes require that a data
17037f0b8309SEdward Pilatowicz 			 * block actually be written.
17047f0b8309SEdward Pilatowicz 			 * Cache a block to barrier-write when we are
17057f0b8309SEdward Pilatowicz 			 * asked to perform a flush.
17067f0b8309SEdward Pilatowicz 			 * XXX - would it be better to just copy 1 block
17077f0b8309SEdward Pilatowicz 			 * (512 bytes) from whatever write we did last
17087f0b8309SEdward Pilatowicz 			 * and rewrite that block?
17097f0b8309SEdward Pilatowicz 			 */
17107f0b8309SEdward Pilatowicz 			if (xdf_get_flush_block(vdp) != DDI_SUCCESS) {
17117f0b8309SEdward Pilatowicz 				xdf_disconnect(vdp, XD_UNKNOWN, B_FALSE);
17127f0b8309SEdward Pilatowicz 				return;
17137f0b8309SEdward Pilatowicz 			}
17147f0b8309SEdward Pilatowicz 		}
17157f0b8309SEdward Pilatowicz 	}
17167f0b8309SEdward Pilatowicz 
17177f0b8309SEdward Pilatowicz 	mutex_enter(&vdp->xdf_cb_lk);
17187f0b8309SEdward Pilatowicz 	mutex_enter(&vdp->xdf_dev_lk);
17197f0b8309SEdward Pilatowicz 	if (vdp->xdf_state == XD_CONNECTED)
17207f0b8309SEdward Pilatowicz 		xdf_set_state(vdp, XD_READY);
17217f0b8309SEdward Pilatowicz 	mutex_exit(&vdp->xdf_dev_lk);
17227f0b8309SEdward Pilatowicz 
17237f0b8309SEdward Pilatowicz 	/* Restart any currently queued up io */
17247f0b8309SEdward Pilatowicz 	xdf_io_start(vdp);
17257f0b8309SEdward Pilatowicz 
17267f0b8309SEdward Pilatowicz 	mutex_exit(&vdp->xdf_cb_lk);
17277f0b8309SEdward Pilatowicz }
17287f0b8309SEdward Pilatowicz 
17297f0b8309SEdward Pilatowicz /*
17307f0b8309SEdward Pilatowicz  * synthetic geometry
17317f0b8309SEdward Pilatowicz  */
17327f0b8309SEdward Pilatowicz #define	XDF_NSECTS	256
17337f0b8309SEdward Pilatowicz #define	XDF_NHEADS	16
17347f0b8309SEdward Pilatowicz 
17357f0b8309SEdward Pilatowicz static void
xdf_synthetic_pgeom(dev_info_t * dip,cmlb_geom_t * geomp)17367f0b8309SEdward Pilatowicz xdf_synthetic_pgeom(dev_info_t *dip, cmlb_geom_t *geomp)
17377f0b8309SEdward Pilatowicz {
17387f0b8309SEdward Pilatowicz 	xdf_t *vdp;
17397f0b8309SEdward Pilatowicz 	uint_t ncyl;
17407f0b8309SEdward Pilatowicz 
17417f0b8309SEdward Pilatowicz 	vdp = ddi_get_soft_state(xdf_ssp, ddi_get_instance(dip));
17427f0b8309SEdward Pilatowicz 
17437f0b8309SEdward Pilatowicz 	ncyl = vdp->xdf_xdev_nblocks / (XDF_NHEADS * XDF_NSECTS);
17447f0b8309SEdward Pilatowicz 
17457f0b8309SEdward Pilatowicz 	bzero(geomp, sizeof (*geomp));
17467f0b8309SEdward Pilatowicz 	geomp->g_ncyl = ncyl == 0 ? 1 : ncyl;
17477f0b8309SEdward Pilatowicz 	geomp->g_acyl = 0;
17487f0b8309SEdward Pilatowicz 	geomp->g_nhead = XDF_NHEADS;
17497f0b8309SEdward Pilatowicz 	geomp->g_nsect = XDF_NSECTS;
175065908c77Syu, larry liu - Sun Microsystems - Beijing China 	geomp->g_secsize = vdp->xdf_xdev_secsize;
17517f0b8309SEdward Pilatowicz 	geomp->g_capacity = vdp->xdf_xdev_nblocks;
17527f0b8309SEdward Pilatowicz 	geomp->g_intrlv = 0;
17537f0b8309SEdward Pilatowicz 	geomp->g_rpm = 7200;
17547f0b8309SEdward Pilatowicz }
17557f0b8309SEdward Pilatowicz 
17567f0b8309SEdward Pilatowicz /*
17577f0b8309SEdward Pilatowicz  * Finish other initialization after we've connected to backend
17587f0b8309SEdward Pilatowicz  * Status should be XD_INIT before calling this routine
17597f0b8309SEdward Pilatowicz  * On success, status should be changed to XD_CONNECTED.
17607f0b8309SEdward Pilatowicz  * On error, status should stay XD_INIT
17617f0b8309SEdward Pilatowicz  */
17627f0b8309SEdward Pilatowicz static int
xdf_setstate_connected(xdf_t * vdp)17637f0b8309SEdward Pilatowicz xdf_setstate_connected(xdf_t *vdp)
17647f0b8309SEdward Pilatowicz {
17657f0b8309SEdward Pilatowicz 	dev_info_t	*dip = vdp->xdf_dip;
17667f0b8309SEdward Pilatowicz 	cmlb_geom_t	pgeom;
17677f0b8309SEdward Pilatowicz 	diskaddr_t	nblocks = 0;
176865908c77Syu, larry liu - Sun Microsystems - Beijing China 	uint_t		secsize = 0;
17697f0b8309SEdward Pilatowicz 	char		*oename, *xsname, *str;
17707f0b8309SEdward Pilatowicz 	uint_t		dinfo;
17717f0b8309SEdward Pilatowicz 
17727f0b8309SEdward Pilatowicz 	ASSERT(MUTEX_HELD(&vdp->xdf_cb_lk));
17737f0b8309SEdward Pilatowicz 	ASSERT(MUTEX_NOT_HELD(&vdp->xdf_dev_lk));
17747f0b8309SEdward Pilatowicz 	ASSERT(vdp->xdf_state == XD_INIT);
17757f0b8309SEdward Pilatowicz 
17767f0b8309SEdward Pilatowicz 	if (((xsname = xvdi_get_xsname(dip)) == NULL) ||
17777f0b8309SEdward Pilatowicz 	    ((oename = xvdi_get_oename(dip)) == NULL))
17787f0b8309SEdward Pilatowicz 		return (DDI_FAILURE);
17797f0b8309SEdward Pilatowicz 
17802de7185cSEdward Pilatowicz 	/* Make sure the other end is XenbusStateConnected */
17812de7185cSEdward Pilatowicz 	if (xenbus_read_driver_state(oename) != XenbusStateConnected)
17822de7185cSEdward Pilatowicz 		return (DDI_FAILURE);
17832de7185cSEdward Pilatowicz 
17847f0b8309SEdward Pilatowicz 	/* Determine if feature barrier is supported by backend */
17857f0b8309SEdward Pilatowicz 	if (!(vdp->xdf_feature_barrier = xenbus_exists(oename, XBP_FB)))
17865d03b6c3SMark Johnson 		cmn_err(CE_NOTE, "!xdf@%s: feature-barrier not supported",
17877f0b8309SEdward Pilatowicz 		    vdp->xdf_addr);
17887f0b8309SEdward Pilatowicz 
17897f0b8309SEdward Pilatowicz 	/*
17907f0b8309SEdward Pilatowicz 	 * Probe backend.  Read the device size into xdf_xdev_nblocks
17917f0b8309SEdward Pilatowicz 	 * and set the VDISK_READONLY, VDISK_CDROM, and VDISK_REMOVABLE
17927f0b8309SEdward Pilatowicz 	 * flags in xdf_dinfo.  If the emulated device type is "cdrom",
17937f0b8309SEdward Pilatowicz 	 * we always set VDISK_CDROM, regardless of if it's present in
17947f0b8309SEdward Pilatowicz 	 * the xenbus info parameter.
17957f0b8309SEdward Pilatowicz 	 */
17967f0b8309SEdward Pilatowicz 	if (xenbus_gather(XBT_NULL, oename,
17977f0b8309SEdward Pilatowicz 	    XBP_SECTORS, "%"SCNu64, &nblocks,
179865908c77Syu, larry liu - Sun Microsystems - Beijing China 	    XBP_SECTOR_SIZE, "%u", &secsize,
17997f0b8309SEdward Pilatowicz 	    XBP_INFO, "%u", &dinfo,
18007f0b8309SEdward Pilatowicz 	    NULL) != 0) {
18017f0b8309SEdward Pilatowicz 		cmn_err(CE_WARN, "xdf@%s: xdf_setstate_connected: "
18027f0b8309SEdward Pilatowicz 		    "cannot read backend info", vdp->xdf_addr);
18037f0b8309SEdward Pilatowicz 		return (DDI_FAILURE);
18047f0b8309SEdward Pilatowicz 	}
18057f0b8309SEdward Pilatowicz 	if (xenbus_read_str(xsname, XBP_DEV_TYPE, &str) != 0) {
18067f0b8309SEdward Pilatowicz 		cmn_err(CE_WARN, "xdf@%s: cannot read device-type",
18077f0b8309SEdward Pilatowicz 		    vdp->xdf_addr);
18087f0b8309SEdward Pilatowicz 		return (DDI_FAILURE);
18097f0b8309SEdward Pilatowicz 	}
18107f0b8309SEdward Pilatowicz 	if (strcmp(str, XBV_DEV_TYPE_CD) == 0)
18117f0b8309SEdward Pilatowicz 		dinfo |= VDISK_CDROM;
18127f0b8309SEdward Pilatowicz 	strfree(str);
18137f0b8309SEdward Pilatowicz 
181465908c77Syu, larry liu - Sun Microsystems - Beijing China 	if (secsize == 0 || !(ISP2(secsize / DEV_BSIZE)))
181565908c77Syu, larry liu - Sun Microsystems - Beijing China 		secsize = DEV_BSIZE;
18167f0b8309SEdward Pilatowicz 	vdp->xdf_xdev_nblocks = nblocks;
181765908c77Syu, larry liu - Sun Microsystems - Beijing China 	vdp->xdf_xdev_secsize = secsize;
18187f0b8309SEdward Pilatowicz #ifdef _ILP32
18197f0b8309SEdward Pilatowicz 	if (vdp->xdf_xdev_nblocks > DK_MAX_BLOCKS) {
18207f0b8309SEdward Pilatowicz 		cmn_err(CE_WARN, "xdf@%s: xdf_setstate_connected: "
18217f0b8309SEdward Pilatowicz 		    "backend disk device too large with %llu blocks for"
18227f0b8309SEdward Pilatowicz 		    " 32-bit kernel", vdp->xdf_addr, vdp->xdf_xdev_nblocks);
18237f0b8309SEdward Pilatowicz 		xvdi_fatal_error(dip, EFBIG, "reading backend info");
18247f0b8309SEdward Pilatowicz 		return (DDI_FAILURE);
18257f0b8309SEdward Pilatowicz 	}
18267f0b8309SEdward Pilatowicz #endif
18277f0b8309SEdward Pilatowicz 
18287f0b8309SEdward Pilatowicz 	/*
18297f0b8309SEdward Pilatowicz 	 * If the physical geometry for a fixed disk has been explicity
18307f0b8309SEdward Pilatowicz 	 * set then make sure that the specified physical geometry isn't
18317f0b8309SEdward Pilatowicz 	 * larger than the device we connected to.
18327f0b8309SEdward Pilatowicz 	 */
18337f0b8309SEdward Pilatowicz 	if (vdp->xdf_pgeom_fixed &&
18347f0b8309SEdward Pilatowicz 	    (vdp->xdf_pgeom.g_capacity > vdp->xdf_xdev_nblocks)) {
18357f0b8309SEdward Pilatowicz 		cmn_err(CE_WARN,
18367f0b8309SEdward Pilatowicz 		    "xdf@%s: connect failed, fixed geometry too large",
18377f0b8309SEdward Pilatowicz 		    vdp->xdf_addr);
18387f0b8309SEdward Pilatowicz 		return (DDI_FAILURE);
18397f0b8309SEdward Pilatowicz 	}
18407f0b8309SEdward Pilatowicz 
18417f0b8309SEdward Pilatowicz 	vdp->xdf_media_req_supported = xenbus_exists(oename, XBP_MEDIA_REQ_SUP);
18427f0b8309SEdward Pilatowicz 
18437f0b8309SEdward Pilatowicz 	/* mark vbd is ready for I/O */
18447f0b8309SEdward Pilatowicz 	mutex_enter(&vdp->xdf_dev_lk);
18457f0b8309SEdward Pilatowicz 	xdf_set_state(vdp, XD_CONNECTED);
18467f0b8309SEdward Pilatowicz 
18477f0b8309SEdward Pilatowicz 	/* check if the cmlb label should be updated */
18487f0b8309SEdward Pilatowicz 	xdf_synthetic_pgeom(dip, &pgeom);
18497f0b8309SEdward Pilatowicz 	if ((vdp->xdf_dinfo != dinfo) ||
18507f0b8309SEdward Pilatowicz 	    (!vdp->xdf_pgeom_fixed &&
18517f0b8309SEdward Pilatowicz 	    (memcmp(&vdp->xdf_pgeom, &pgeom, sizeof (pgeom)) != 0))) {
18527f0b8309SEdward Pilatowicz 		vdp->xdf_cmbl_reattach = B_TRUE;
18537f0b8309SEdward Pilatowicz 
18547f0b8309SEdward Pilatowicz 		vdp->xdf_dinfo = dinfo;
18557f0b8309SEdward Pilatowicz 		if (!vdp->xdf_pgeom_fixed)
18567f0b8309SEdward Pilatowicz 			vdp->xdf_pgeom = pgeom;
18577f0b8309SEdward Pilatowicz 	}
18587f0b8309SEdward Pilatowicz 
18597f0b8309SEdward Pilatowicz 	if (XD_IS_CD(vdp) || XD_IS_RM(vdp)) {
18607f0b8309SEdward Pilatowicz 		if (vdp->xdf_xdev_nblocks == 0) {
18617f0b8309SEdward Pilatowicz 			vdp->xdf_mstate = DKIO_EJECTED;
18627f0b8309SEdward Pilatowicz 			cv_broadcast(&vdp->xdf_mstate_cv);
18637f0b8309SEdward Pilatowicz 		} else {
18647f0b8309SEdward Pilatowicz 			vdp->xdf_mstate = DKIO_INSERTED;
18657f0b8309SEdward Pilatowicz 			cv_broadcast(&vdp->xdf_mstate_cv);
18667f0b8309SEdward Pilatowicz 		}
18677f0b8309SEdward Pilatowicz 	} else {
18687f0b8309SEdward Pilatowicz 		if (vdp->xdf_mstate != DKIO_NONE) {
18697f0b8309SEdward Pilatowicz 			vdp->xdf_mstate = DKIO_NONE;
18707f0b8309SEdward Pilatowicz 			cv_broadcast(&vdp->xdf_mstate_cv);
18717f0b8309SEdward Pilatowicz 		}
18727f0b8309SEdward Pilatowicz 	}
18737f0b8309SEdward Pilatowicz 
18747f0b8309SEdward Pilatowicz 	mutex_exit(&vdp->xdf_dev_lk);
18757f0b8309SEdward Pilatowicz 
18767f0b8309SEdward Pilatowicz 	cmn_err(CE_CONT, "?xdf@%s: %"PRIu64" blocks", vdp->xdf_addr,
18777f0b8309SEdward Pilatowicz 	    (uint64_t)vdp->xdf_xdev_nblocks);
18787f0b8309SEdward Pilatowicz 
18797f0b8309SEdward Pilatowicz 	/* Restart any currently queued up io */
18807f0b8309SEdward Pilatowicz 	xdf_io_start(vdp);
18817f0b8309SEdward Pilatowicz 
18827f0b8309SEdward Pilatowicz 	/*
18837f0b8309SEdward Pilatowicz 	 * To get to the ready state we have to do IO to the backend device,
18847f0b8309SEdward Pilatowicz 	 * but we can't initiate IO from the other end change callback thread
18857f0b8309SEdward Pilatowicz 	 * (which is the current context we're executing in.)  This is because
18867f0b8309SEdward Pilatowicz 	 * if the other end disconnects while we're doing IO from the callback
18877f0b8309SEdward Pilatowicz 	 * thread, then we can't recieve that disconnect event and we hang
18887f0b8309SEdward Pilatowicz 	 * waiting for an IO that can never complete.
18897f0b8309SEdward Pilatowicz 	 */
18907f0b8309SEdward Pilatowicz 	(void) ddi_taskq_dispatch(vdp->xdf_ready_tq, xdf_setstate_ready, vdp,
18917f0b8309SEdward Pilatowicz 	    DDI_SLEEP);
18927f0b8309SEdward Pilatowicz 
18937f0b8309SEdward Pilatowicz 	(void) xvdi_switch_state(dip, XBT_NULL, XenbusStateConnected);
18947f0b8309SEdward Pilatowicz 	return (DDI_SUCCESS);
18957f0b8309SEdward Pilatowicz }
18967f0b8309SEdward Pilatowicz 
18977f0b8309SEdward Pilatowicz /*ARGSUSED*/
18987f0b8309SEdward Pilatowicz static void
xdf_oe_change(dev_info_t * dip,ddi_eventcookie_t id,void * arg,void * impl_data)18997f0b8309SEdward Pilatowicz xdf_oe_change(dev_info_t *dip, ddi_eventcookie_t id, void *arg, void *impl_data)
19007f0b8309SEdward Pilatowicz {
19017f0b8309SEdward Pilatowicz 	XenbusState new_state = *(XenbusState *)impl_data;
19027f0b8309SEdward Pilatowicz 	xdf_t *vdp = (xdf_t *)ddi_get_driver_private(dip);
19037f0b8309SEdward Pilatowicz 
19047f0b8309SEdward Pilatowicz 	DPRINTF(DDI_DBG, ("xdf@%s: otherend state change to %d!\n",
19057f0b8309SEdward Pilatowicz 	    vdp->xdf_addr, new_state));
19067f0b8309SEdward Pilatowicz 
19077f0b8309SEdward Pilatowicz 	mutex_enter(&vdp->xdf_cb_lk);
19087f0b8309SEdward Pilatowicz 
19097f0b8309SEdward Pilatowicz 	/* We assume that this callback is single threaded */
19107f0b8309SEdward Pilatowicz 	ASSERT(vdp->xdf_oe_change_thread == NULL);
19117f0b8309SEdward Pilatowicz 	DEBUG_EVAL(vdp->xdf_oe_change_thread = curthread);
19127f0b8309SEdward Pilatowicz 
19137f0b8309SEdward Pilatowicz 	/* ignore any backend state changes if we're suspending/suspended */
19147f0b8309SEdward Pilatowicz 	if (vdp->xdf_suspending || (vdp->xdf_state == XD_SUSPEND)) {
19157f0b8309SEdward Pilatowicz 		DEBUG_EVAL(vdp->xdf_oe_change_thread = NULL);
19167f0b8309SEdward Pilatowicz 		mutex_exit(&vdp->xdf_cb_lk);
19177f0b8309SEdward Pilatowicz 		return;
19187f0b8309SEdward Pilatowicz 	}
19197f0b8309SEdward Pilatowicz 
19207f0b8309SEdward Pilatowicz 	switch (new_state) {
19217f0b8309SEdward Pilatowicz 	case XenbusStateUnknown:
19227f0b8309SEdward Pilatowicz 	case XenbusStateInitialising:
19237f0b8309SEdward Pilatowicz 	case XenbusStateInitWait:
19247f0b8309SEdward Pilatowicz 	case XenbusStateInitialised:
19257f0b8309SEdward Pilatowicz 		if (vdp->xdf_state == XD_INIT)
19267f0b8309SEdward Pilatowicz 			break;
19277f0b8309SEdward Pilatowicz 
19287f0b8309SEdward Pilatowicz 		xdf_disconnect(vdp, XD_UNKNOWN, B_FALSE);
19297f0b8309SEdward Pilatowicz 		if (xdf_setstate_init(vdp) != DDI_SUCCESS)
19307f0b8309SEdward Pilatowicz 			break;
19317f0b8309SEdward Pilatowicz 		ASSERT(vdp->xdf_state == XD_INIT);
19327f0b8309SEdward Pilatowicz 		break;
19337f0b8309SEdward Pilatowicz 
19347f0b8309SEdward Pilatowicz 	case XenbusStateConnected:
19357f0b8309SEdward Pilatowicz 		if ((vdp->xdf_state == XD_CONNECTED) ||
19367f0b8309SEdward Pilatowicz 		    (vdp->xdf_state == XD_READY))
19377f0b8309SEdward Pilatowicz 			break;
19387f0b8309SEdward Pilatowicz 
19397f0b8309SEdward Pilatowicz 		if (vdp->xdf_state != XD_INIT) {
19407f0b8309SEdward Pilatowicz 			xdf_disconnect(vdp, XD_UNKNOWN, B_FALSE);
19417f0b8309SEdward Pilatowicz 			if (xdf_setstate_init(vdp) != DDI_SUCCESS)
19427f0b8309SEdward Pilatowicz 				break;
19437f0b8309SEdward Pilatowicz 			ASSERT(vdp->xdf_state == XD_INIT);
19447f0b8309SEdward Pilatowicz 		}
19457f0b8309SEdward Pilatowicz 
19467f0b8309SEdward Pilatowicz 		if (xdf_setstate_connected(vdp) != DDI_SUCCESS) {
19477f0b8309SEdward Pilatowicz 			xdf_disconnect(vdp, XD_UNKNOWN, B_FALSE);
19487f0b8309SEdward Pilatowicz 			break;
19497f0b8309SEdward Pilatowicz 		}
19507f0b8309SEdward Pilatowicz 		ASSERT(vdp->xdf_state == XD_CONNECTED);
19517f0b8309SEdward Pilatowicz 		break;
19527f0b8309SEdward Pilatowicz 
19537f0b8309SEdward Pilatowicz 	case XenbusStateClosing:
19547f0b8309SEdward Pilatowicz 		if (xdf_isopen(vdp, -1)) {
19557f0b8309SEdward Pilatowicz 			cmn_err(CE_NOTE,
19567f0b8309SEdward Pilatowicz 			    "xdf@%s: hot-unplug failed, still in use",
19577f0b8309SEdward Pilatowicz 			    vdp->xdf_addr);
19587f0b8309SEdward Pilatowicz 			break;
19597f0b8309SEdward Pilatowicz 		}
19607f0b8309SEdward Pilatowicz 		/*FALLTHROUGH*/
19617f0b8309SEdward Pilatowicz 	case XenbusStateClosed:
19627f0b8309SEdward Pilatowicz 		xdf_disconnect(vdp, XD_CLOSED, B_FALSE);
19637f0b8309SEdward Pilatowicz 		break;
19647f0b8309SEdward Pilatowicz 	}
19657f0b8309SEdward Pilatowicz 
19667f0b8309SEdward Pilatowicz 	/* notify anybody waiting for oe state change */
19677f0b8309SEdward Pilatowicz 	cv_broadcast(&vdp->xdf_dev_cv);
19687f0b8309SEdward Pilatowicz 	DEBUG_EVAL(vdp->xdf_oe_change_thread = NULL);
19697f0b8309SEdward Pilatowicz 	mutex_exit(&vdp->xdf_cb_lk);
19707f0b8309SEdward Pilatowicz }
19717f0b8309SEdward Pilatowicz 
19727f0b8309SEdward Pilatowicz static int
xdf_connect_locked(xdf_t * vdp,boolean_t wait)19737f0b8309SEdward Pilatowicz xdf_connect_locked(xdf_t *vdp, boolean_t wait)
19747f0b8309SEdward Pilatowicz {
19752de7185cSEdward Pilatowicz 	int	rv, timeouts = 0, reset = 20;
19767f0b8309SEdward Pilatowicz 
19777f0b8309SEdward Pilatowicz 	ASSERT(MUTEX_HELD(&vdp->xdf_cb_lk));
19787f0b8309SEdward Pilatowicz 	ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
19797f0b8309SEdward Pilatowicz 
19807f0b8309SEdward Pilatowicz 	/* we can't connect once we're in the closed state */
19817f0b8309SEdward Pilatowicz 	if (vdp->xdf_state == XD_CLOSED)
19827f0b8309SEdward Pilatowicz 		return (XD_CLOSED);
19837f0b8309SEdward Pilatowicz 
19847f0b8309SEdward Pilatowicz 	vdp->xdf_connect_req++;
19857f0b8309SEdward Pilatowicz 	while (vdp->xdf_state != XD_READY) {
19867f0b8309SEdward Pilatowicz 		mutex_exit(&vdp->xdf_dev_lk);
19872de7185cSEdward Pilatowicz 
19882de7185cSEdward Pilatowicz 		/* only one thread at a time can be the connection thread */
19892de7185cSEdward Pilatowicz 		if (vdp->xdf_connect_thread == NULL)
19902de7185cSEdward Pilatowicz 			vdp->xdf_connect_thread = curthread;
19912de7185cSEdward Pilatowicz 
19922de7185cSEdward Pilatowicz 		if (vdp->xdf_connect_thread == curthread) {
19932de7185cSEdward Pilatowicz 			if ((timeouts > 0) && ((timeouts % reset) == 0)) {
19942de7185cSEdward Pilatowicz 				/*
19952de7185cSEdward Pilatowicz 				 * If we haven't establised a connection
19962de7185cSEdward Pilatowicz 				 * within the reset time, then disconnect
19972de7185cSEdward Pilatowicz 				 * so we can try again, and double the reset
19982de7185cSEdward Pilatowicz 				 * time.  The reset time starts at 2 sec.
19992de7185cSEdward Pilatowicz 				 */
20002de7185cSEdward Pilatowicz 				(void) xdf_disconnect(vdp, XD_UNKNOWN, B_TRUE);
20012de7185cSEdward Pilatowicz 				reset *= 2;
20022de7185cSEdward Pilatowicz 			}
20037f0b8309SEdward Pilatowicz 			if (vdp->xdf_state == XD_UNKNOWN)
20047f0b8309SEdward Pilatowicz 				(void) xdf_setstate_init(vdp);
20052de7185cSEdward Pilatowicz 			if (vdp->xdf_state == XD_INIT)
20062de7185cSEdward Pilatowicz 				(void) xdf_setstate_connected(vdp);
20072de7185cSEdward Pilatowicz 		}
20087f0b8309SEdward Pilatowicz 
20092de7185cSEdward Pilatowicz 		mutex_enter(&vdp->xdf_dev_lk);
20107f0b8309SEdward Pilatowicz 		if (!wait || (vdp->xdf_state == XD_READY))
20117f0b8309SEdward Pilatowicz 			goto out;
20127f0b8309SEdward Pilatowicz 
20137f0b8309SEdward Pilatowicz 		mutex_exit((&vdp->xdf_cb_lk));
20142de7185cSEdward Pilatowicz 		if (vdp->xdf_connect_thread != curthread) {
20157f0b8309SEdward Pilatowicz 			rv = cv_wait_sig(&vdp->xdf_dev_cv, &vdp->xdf_dev_lk);
20162de7185cSEdward Pilatowicz 		} else {
20172de7185cSEdward Pilatowicz 			/* delay for 0.1 sec */
2018d3d50737SRafael Vanoni 			rv = cv_reltimedwait_sig(&vdp->xdf_dev_cv,
2019d3d50737SRafael Vanoni 			    &vdp->xdf_dev_lk, drv_usectohz(100*1000),
2020d3d50737SRafael Vanoni 			    TR_CLOCK_TICK);
20212de7185cSEdward Pilatowicz 			if (rv == -1)
20222de7185cSEdward Pilatowicz 				timeouts++;
20232de7185cSEdward Pilatowicz 		}
20247f0b8309SEdward Pilatowicz 		mutex_exit((&vdp->xdf_dev_lk));
20257f0b8309SEdward Pilatowicz 		mutex_enter((&vdp->xdf_cb_lk));
20267f0b8309SEdward Pilatowicz 		mutex_enter((&vdp->xdf_dev_lk));
20277f0b8309SEdward Pilatowicz 		if (rv == 0)
20287f0b8309SEdward Pilatowicz 			goto out;
20297f0b8309SEdward Pilatowicz 	}
20307f0b8309SEdward Pilatowicz 
20317f0b8309SEdward Pilatowicz out:
20327f0b8309SEdward Pilatowicz 	ASSERT(MUTEX_HELD(&vdp->xdf_cb_lk));
20337f0b8309SEdward Pilatowicz 	ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
20347f0b8309SEdward Pilatowicz 
20352de7185cSEdward Pilatowicz 	if (vdp->xdf_connect_thread == curthread) {
20362de7185cSEdward Pilatowicz 		/*
20372de7185cSEdward Pilatowicz 		 * wake up someone else so they can become the connection
20382de7185cSEdward Pilatowicz 		 * thread.
20392de7185cSEdward Pilatowicz 		 */
20402de7185cSEdward Pilatowicz 		cv_signal(&vdp->xdf_dev_cv);
20412de7185cSEdward Pilatowicz 		vdp->xdf_connect_thread = NULL;
20422de7185cSEdward Pilatowicz 	}
20432de7185cSEdward Pilatowicz 
20447f0b8309SEdward Pilatowicz 	/* Try to lock the media */
20452de7185cSEdward Pilatowicz 	mutex_exit((&vdp->xdf_dev_lk));
20467f0b8309SEdward Pilatowicz 	(void) xdf_media_req(vdp, XBV_MEDIA_REQ_LOCK, B_TRUE);
20472de7185cSEdward Pilatowicz 	mutex_enter((&vdp->xdf_dev_lk));
20487f0b8309SEdward Pilatowicz 
20497f0b8309SEdward Pilatowicz 	vdp->xdf_connect_req--;
20507f0b8309SEdward Pilatowicz 	return (vdp->xdf_state);
20517f0b8309SEdward Pilatowicz }
20527f0b8309SEdward Pilatowicz 
20537f0b8309SEdward Pilatowicz static uint_t
xdf_iorestart(caddr_t arg)20547f0b8309SEdward Pilatowicz xdf_iorestart(caddr_t arg)
20557f0b8309SEdward Pilatowicz {
20567f0b8309SEdward Pilatowicz 	xdf_t *vdp = (xdf_t *)arg;
20577f0b8309SEdward Pilatowicz 
20587f0b8309SEdward Pilatowicz 	ASSERT(vdp != NULL);
20597f0b8309SEdward Pilatowicz 
20607f0b8309SEdward Pilatowicz 	mutex_enter(&vdp->xdf_dev_lk);
20617f0b8309SEdward Pilatowicz 	ASSERT(ISDMACBON(vdp));
20627f0b8309SEdward Pilatowicz 	SETDMACBOFF(vdp);
20637f0b8309SEdward Pilatowicz 	mutex_exit(&vdp->xdf_dev_lk);
20647f0b8309SEdward Pilatowicz 
20657f0b8309SEdward Pilatowicz 	xdf_io_start(vdp);
20667f0b8309SEdward Pilatowicz 
20677f0b8309SEdward Pilatowicz 	return (DDI_INTR_CLAIMED);
20687f0b8309SEdward Pilatowicz }
20697f0b8309SEdward Pilatowicz 
207006bbe1e0Sedp #if defined(XPV_HVM_DRIVER)
207106bbe1e0Sedp 
207206bbe1e0Sedp typedef struct xdf_hvm_entry {
207306bbe1e0Sedp 	list_node_t	xdf_he_list;
207406bbe1e0Sedp 	char		*xdf_he_path;
207506bbe1e0Sedp 	dev_info_t	*xdf_he_dip;
207606bbe1e0Sedp } xdf_hvm_entry_t;
207706bbe1e0Sedp 
207806bbe1e0Sedp static list_t xdf_hvm_list;
207906bbe1e0Sedp static kmutex_t xdf_hvm_list_lock;
208006bbe1e0Sedp 
208106bbe1e0Sedp static xdf_hvm_entry_t *
i_xdf_hvm_find(const char * path,dev_info_t * dip)20827f0b8309SEdward Pilatowicz i_xdf_hvm_find(const char *path, dev_info_t *dip)
208306bbe1e0Sedp {
208406bbe1e0Sedp 	xdf_hvm_entry_t	*i;
208506bbe1e0Sedp 
208606bbe1e0Sedp 	ASSERT((path != NULL) || (dip != NULL));
208706bbe1e0Sedp 	ASSERT(MUTEX_HELD(&xdf_hvm_list_lock));
208806bbe1e0Sedp 
208906bbe1e0Sedp 	i = list_head(&xdf_hvm_list);
209006bbe1e0Sedp 	while (i != NULL) {
209106bbe1e0Sedp 		if ((path != NULL) && strcmp(i->xdf_he_path, path) != 0) {
209206bbe1e0Sedp 			i = list_next(&xdf_hvm_list, i);
209306bbe1e0Sedp 			continue;
209406bbe1e0Sedp 		}
209506bbe1e0Sedp 		if ((dip != NULL) && (i->xdf_he_dip != dip)) {
209606bbe1e0Sedp 			i = list_next(&xdf_hvm_list, i);
209706bbe1e0Sedp 			continue;
209806bbe1e0Sedp 		}
209906bbe1e0Sedp 		break;
210006bbe1e0Sedp 	}
210106bbe1e0Sedp 	return (i);
210206bbe1e0Sedp }
210306bbe1e0Sedp 
210406bbe1e0Sedp dev_info_t *
xdf_hvm_hold(const char * path)21057f0b8309SEdward Pilatowicz xdf_hvm_hold(const char *path)
210606bbe1e0Sedp {
210706bbe1e0Sedp 	xdf_hvm_entry_t	*i;
210806bbe1e0Sedp 	dev_info_t	*dip;
210906bbe1e0Sedp 
211006bbe1e0Sedp 	mutex_enter(&xdf_hvm_list_lock);
211106bbe1e0Sedp 	i = i_xdf_hvm_find(path, NULL);
211206bbe1e0Sedp 	if (i == NULL) {
211306bbe1e0Sedp 		mutex_exit(&xdf_hvm_list_lock);
211406bbe1e0Sedp 		return (B_FALSE);
211506bbe1e0Sedp 	}
211606bbe1e0Sedp 	ndi_hold_devi(dip = i->xdf_he_dip);
211706bbe1e0Sedp 	mutex_exit(&xdf_hvm_list_lock);
211806bbe1e0Sedp 	return (dip);
211906bbe1e0Sedp }
212006bbe1e0Sedp 
212106bbe1e0Sedp static void
xdf_hvm_add(dev_info_t * dip)212206bbe1e0Sedp xdf_hvm_add(dev_info_t *dip)
212306bbe1e0Sedp {
212406bbe1e0Sedp 	xdf_hvm_entry_t	*i;
212506bbe1e0Sedp 	char		*path;
212606bbe1e0Sedp 
212706bbe1e0Sedp 	/* figure out the path for the dip */
212806bbe1e0Sedp 	path = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
212906bbe1e0Sedp 	(void) ddi_pathname(dip, path);
213006bbe1e0Sedp 
213106bbe1e0Sedp 	i = kmem_alloc(sizeof (*i), KM_SLEEP);
213206bbe1e0Sedp 	i->xdf_he_dip = dip;
213306bbe1e0Sedp 	i->xdf_he_path = i_ddi_strdup(path, KM_SLEEP);
213406bbe1e0Sedp 
213506bbe1e0Sedp 	mutex_enter(&xdf_hvm_list_lock);
213606bbe1e0Sedp 	ASSERT(i_xdf_hvm_find(path, NULL) == NULL);
213706bbe1e0Sedp 	ASSERT(i_xdf_hvm_find(NULL, dip) == NULL);
213806bbe1e0Sedp 	list_insert_head(&xdf_hvm_list, i);
213906bbe1e0Sedp 	mutex_exit(&xdf_hvm_list_lock);
214006bbe1e0Sedp 
214106bbe1e0Sedp 	kmem_free(path, MAXPATHLEN);
214206bbe1e0Sedp }
214306bbe1e0Sedp 
214406bbe1e0Sedp static void
xdf_hvm_rm(dev_info_t * dip)214506bbe1e0Sedp xdf_hvm_rm(dev_info_t *dip)
214606bbe1e0Sedp {
214706bbe1e0Sedp 	xdf_hvm_entry_t	*i;
214806bbe1e0Sedp 
214906bbe1e0Sedp 	mutex_enter(&xdf_hvm_list_lock);
215006bbe1e0Sedp 	VERIFY((i = i_xdf_hvm_find(NULL, dip)) != NULL);
215106bbe1e0Sedp 	list_remove(&xdf_hvm_list, i);
215206bbe1e0Sedp 	mutex_exit(&xdf_hvm_list_lock);
215306bbe1e0Sedp 
215406bbe1e0Sedp 	kmem_free(i->xdf_he_path, strlen(i->xdf_he_path) + 1);
215506bbe1e0Sedp 	kmem_free(i, sizeof (*i));
215606bbe1e0Sedp }
215706bbe1e0Sedp 
215806bbe1e0Sedp static void
xdf_hvm_init(void)215906bbe1e0Sedp xdf_hvm_init(void)
216006bbe1e0Sedp {
216106bbe1e0Sedp 	list_create(&xdf_hvm_list, sizeof (xdf_hvm_entry_t),
216206bbe1e0Sedp 	    offsetof(xdf_hvm_entry_t, xdf_he_list));
216306bbe1e0Sedp 	mutex_init(&xdf_hvm_list_lock, NULL, MUTEX_DEFAULT, NULL);
216406bbe1e0Sedp }
216506bbe1e0Sedp 
216606bbe1e0Sedp static void
xdf_hvm_fini(void)216706bbe1e0Sedp xdf_hvm_fini(void)
216806bbe1e0Sedp {
216906bbe1e0Sedp 	ASSERT(list_head(&xdf_hvm_list) == NULL);
217006bbe1e0Sedp 	list_destroy(&xdf_hvm_list);
217106bbe1e0Sedp 	mutex_destroy(&xdf_hvm_list_lock);
217206bbe1e0Sedp }
217306bbe1e0Sedp 
21747f0b8309SEdward Pilatowicz boolean_t
xdf_hvm_connect(dev_info_t * dip)217506bbe1e0Sedp xdf_hvm_connect(dev_info_t *dip)
217606bbe1e0Sedp {
217706bbe1e0Sedp 	xdf_t	*vdp = (xdf_t *)ddi_get_driver_private(dip);
21787f0b8309SEdward Pilatowicz 	char	*oename, *str;
217906bbe1e0Sedp 	int	rv;
218006bbe1e0Sedp 
21817f0b8309SEdward Pilatowicz 	mutex_enter(&vdp->xdf_cb_lk);
21827f0b8309SEdward Pilatowicz 
21837f0b8309SEdward Pilatowicz 	/*
21847f0b8309SEdward Pilatowicz 	 * Before try to establish a connection we need to wait for the
21857f0b8309SEdward Pilatowicz 	 * backend hotplug scripts to have run.  Once they are run the
21867f0b8309SEdward Pilatowicz 	 * "<oename>/hotplug-status" property will be set to "connected".
21877f0b8309SEdward Pilatowicz 	 */
21887f0b8309SEdward Pilatowicz 	for (;;) {
21897f0b8309SEdward Pilatowicz 		ASSERT(MUTEX_HELD(&vdp->xdf_cb_lk));
21907f0b8309SEdward Pilatowicz 
21917f0b8309SEdward Pilatowicz 		/*
21927f0b8309SEdward Pilatowicz 		 * Get the xenbus path to the backend device.  Note that
21937f0b8309SEdward Pilatowicz 		 * we can't cache this path (and we look it up on each pass
21947f0b8309SEdward Pilatowicz 		 * through this loop) because it could change during
21957f0b8309SEdward Pilatowicz 		 * suspend, resume, and migration operations.
21967f0b8309SEdward Pilatowicz 		 */
21977f0b8309SEdward Pilatowicz 		if ((oename = xvdi_get_oename(dip)) == NULL) {
21987f0b8309SEdward Pilatowicz 			mutex_exit(&vdp->xdf_cb_lk);
21997f0b8309SEdward Pilatowicz 			return (B_FALSE);
22007f0b8309SEdward Pilatowicz 		}
22017f0b8309SEdward Pilatowicz 
22027f0b8309SEdward Pilatowicz 		str = NULL;
22037f0b8309SEdward Pilatowicz 		if ((xenbus_read_str(oename, XBP_HP_STATUS, &str) == 0) &&
22047f0b8309SEdward Pilatowicz 		    (strcmp(str, XBV_HP_STATUS_CONN) == 0))
22057f0b8309SEdward Pilatowicz 			break;
22067f0b8309SEdward Pilatowicz 
22077f0b8309SEdward Pilatowicz 		if (str != NULL)
22087f0b8309SEdward Pilatowicz 			strfree(str);
22097f0b8309SEdward Pilatowicz 
22107f0b8309SEdward Pilatowicz 		/* wait for an update to "<oename>/hotplug-status" */
22117f0b8309SEdward Pilatowicz 		if (cv_wait_sig(&vdp->xdf_hp_status_cv, &vdp->xdf_cb_lk) == 0) {
22127f0b8309SEdward Pilatowicz 			/* we got interrupted by a signal */
22137f0b8309SEdward Pilatowicz 			mutex_exit(&vdp->xdf_cb_lk);
22147f0b8309SEdward Pilatowicz 			return (B_FALSE);
22157f0b8309SEdward Pilatowicz 		}
22167f0b8309SEdward Pilatowicz 	}
22177f0b8309SEdward Pilatowicz 
22187f0b8309SEdward Pilatowicz 	/* Good news.  The backend hotplug scripts have been run. */
22197f0b8309SEdward Pilatowicz 	ASSERT(MUTEX_HELD(&vdp->xdf_cb_lk));
22207f0b8309SEdward Pilatowicz 	ASSERT(strcmp(str, XBV_HP_STATUS_CONN) == 0);
22217f0b8309SEdward Pilatowicz 	strfree(str);
22227f0b8309SEdward Pilatowicz 
22237f0b8309SEdward Pilatowicz 	/*
22247f0b8309SEdward Pilatowicz 	 * If we're emulating a cd device and if the backend doesn't support
22257f0b8309SEdward Pilatowicz 	 * media request opreations, then we're not going to bother trying
22267f0b8309SEdward Pilatowicz 	 * to establish a connection for a couple reasons.  First off, media
22277f0b8309SEdward Pilatowicz 	 * requests support is required to support operations like eject and
22287f0b8309SEdward Pilatowicz 	 * media locking.  Second, other backend platforms like Linux don't
22297f0b8309SEdward Pilatowicz 	 * support hvm pv cdrom access.  They don't even have a backend pv
22307f0b8309SEdward Pilatowicz 	 * driver for cdrom device nodes, so we don't want to block forever
22317f0b8309SEdward Pilatowicz 	 * waiting for a connection to a backend driver that doesn't exist.
22327f0b8309SEdward Pilatowicz 	 */
22337f0b8309SEdward Pilatowicz 	if (XD_IS_CD(vdp) && !xenbus_exists(oename, XBP_MEDIA_REQ_SUP)) {
22347f0b8309SEdward Pilatowicz 		mutex_exit(&vdp->xdf_cb_lk);
22357f0b8309SEdward Pilatowicz 		return (B_FALSE);
22367f0b8309SEdward Pilatowicz 	}
22377f0b8309SEdward Pilatowicz 
22382de7185cSEdward Pilatowicz 	mutex_enter(&vdp->xdf_dev_lk);
22397f0b8309SEdward Pilatowicz 	rv = xdf_connect_locked(vdp, B_TRUE);
22407f0b8309SEdward Pilatowicz 	mutex_exit(&vdp->xdf_dev_lk);
22417f0b8309SEdward Pilatowicz 	mutex_exit(&vdp->xdf_cb_lk);
22427f0b8309SEdward Pilatowicz 
22437f0b8309SEdward Pilatowicz 	return ((rv == XD_READY) ? B_TRUE : B_FALSE);
224406bbe1e0Sedp }
224506bbe1e0Sedp 
224606bbe1e0Sedp int
xdf_hvm_setpgeom(dev_info_t * dip,cmlb_geom_t * geomp)224706bbe1e0Sedp xdf_hvm_setpgeom(dev_info_t *dip, cmlb_geom_t *geomp)
224806bbe1e0Sedp {
224906bbe1e0Sedp 	xdf_t	*vdp = (xdf_t *)ddi_get_driver_private(dip);
225006bbe1e0Sedp 
225106bbe1e0Sedp 	/* sanity check the requested physical geometry */
225206bbe1e0Sedp 	mutex_enter(&vdp->xdf_dev_lk);
225306bbe1e0Sedp 	if ((geomp->g_secsize != XB_BSIZE) ||
225406bbe1e0Sedp 	    (geomp->g_capacity == 0)) {
225506bbe1e0Sedp 		mutex_exit(&vdp->xdf_dev_lk);
225606bbe1e0Sedp 		return (EINVAL);
225706bbe1e0Sedp 	}
225806bbe1e0Sedp 
225906bbe1e0Sedp 	/*
226006bbe1e0Sedp 	 * If we've already connected to the backend device then make sure
226106bbe1e0Sedp 	 * we're not defining a physical geometry larger than our backend
226206bbe1e0Sedp 	 * device.
226306bbe1e0Sedp 	 */
226406bbe1e0Sedp 	if ((vdp->xdf_xdev_nblocks != 0) &&
226506bbe1e0Sedp 	    (geomp->g_capacity > vdp->xdf_xdev_nblocks)) {
226606bbe1e0Sedp 		mutex_exit(&vdp->xdf_dev_lk);
226706bbe1e0Sedp 		return (EINVAL);
226806bbe1e0Sedp 	}
226906bbe1e0Sedp 
22707f0b8309SEdward Pilatowicz 	bzero(&vdp->xdf_pgeom, sizeof (vdp->xdf_pgeom));
22717f0b8309SEdward Pilatowicz 	vdp->xdf_pgeom.g_ncyl = geomp->g_ncyl;
22727f0b8309SEdward Pilatowicz 	vdp->xdf_pgeom.g_acyl = geomp->g_acyl;
22737f0b8309SEdward Pilatowicz 	vdp->xdf_pgeom.g_nhead = geomp->g_nhead;
22747f0b8309SEdward Pilatowicz 	vdp->xdf_pgeom.g_nsect = geomp->g_nsect;
22757f0b8309SEdward Pilatowicz 	vdp->xdf_pgeom.g_secsize = geomp->g_secsize;
22767f0b8309SEdward Pilatowicz 	vdp->xdf_pgeom.g_capacity = geomp->g_capacity;
22777f0b8309SEdward Pilatowicz 	vdp->xdf_pgeom.g_intrlv = geomp->g_intrlv;
22787f0b8309SEdward Pilatowicz 	vdp->xdf_pgeom.g_rpm = geomp->g_rpm;
22797f0b8309SEdward Pilatowicz 
22807f0b8309SEdward Pilatowicz 	vdp->xdf_pgeom_fixed = B_TRUE;
228106bbe1e0Sedp 	mutex_exit(&vdp->xdf_dev_lk);
228206bbe1e0Sedp 
228306bbe1e0Sedp 	/* force a re-validation */
228406bbe1e0Sedp 	cmlb_invalidate(vdp->xdf_vd_lbl, NULL);
228506bbe1e0Sedp 
228606bbe1e0Sedp 	return (0);
228706bbe1e0Sedp }
228806bbe1e0Sedp 
22897f0b8309SEdward Pilatowicz boolean_t
xdf_is_cd(dev_info_t * dip)22907f0b8309SEdward Pilatowicz xdf_is_cd(dev_info_t *dip)
22917f0b8309SEdward Pilatowicz {
22927f0b8309SEdward Pilatowicz 	xdf_t		*vdp = (xdf_t *)ddi_get_driver_private(dip);
22937f0b8309SEdward Pilatowicz 	boolean_t	rv;
22947f0b8309SEdward Pilatowicz 
22957f0b8309SEdward Pilatowicz 	mutex_enter(&vdp->xdf_cb_lk);
22967f0b8309SEdward Pilatowicz 	rv = XD_IS_CD(vdp);
22977f0b8309SEdward Pilatowicz 	mutex_exit(&vdp->xdf_cb_lk);
22987f0b8309SEdward Pilatowicz 	return (rv);
22997f0b8309SEdward Pilatowicz }
23007f0b8309SEdward Pilatowicz 
23017f0b8309SEdward Pilatowicz boolean_t
xdf_is_rm(dev_info_t * dip)23027f0b8309SEdward Pilatowicz xdf_is_rm(dev_info_t *dip)
23037f0b8309SEdward Pilatowicz {
23047f0b8309SEdward Pilatowicz 	xdf_t		*vdp = (xdf_t *)ddi_get_driver_private(dip);
23057f0b8309SEdward Pilatowicz 	boolean_t	rv;
23067f0b8309SEdward Pilatowicz 
23077f0b8309SEdward Pilatowicz 	mutex_enter(&vdp->xdf_cb_lk);
23087f0b8309SEdward Pilatowicz 	rv = XD_IS_RM(vdp);
23097f0b8309SEdward Pilatowicz 	mutex_exit(&vdp->xdf_cb_lk);
23107f0b8309SEdward Pilatowicz 	return (rv);
23117f0b8309SEdward Pilatowicz }
23127f0b8309SEdward Pilatowicz 
23137f0b8309SEdward Pilatowicz boolean_t
xdf_media_req_supported(dev_info_t * dip)23147f0b8309SEdward Pilatowicz xdf_media_req_supported(dev_info_t *dip)
23157f0b8309SEdward Pilatowicz {
23167f0b8309SEdward Pilatowicz 	xdf_t		*vdp = (xdf_t *)ddi_get_driver_private(dip);
23177f0b8309SEdward Pilatowicz 	boolean_t	rv;
23187f0b8309SEdward Pilatowicz 
23197f0b8309SEdward Pilatowicz 	mutex_enter(&vdp->xdf_cb_lk);
23207f0b8309SEdward Pilatowicz 	rv = vdp->xdf_media_req_supported;
23217f0b8309SEdward Pilatowicz 	mutex_exit(&vdp->xdf_cb_lk);
23227f0b8309SEdward Pilatowicz 	return (rv);
23237f0b8309SEdward Pilatowicz }
23247f0b8309SEdward Pilatowicz 
232506bbe1e0Sedp #endif /* XPV_HVM_DRIVER */
23267f0b8309SEdward Pilatowicz 
23277f0b8309SEdward Pilatowicz static int
xdf_lb_getcap(dev_info_t * dip,diskaddr_t * capp)23287f0b8309SEdward Pilatowicz xdf_lb_getcap(dev_info_t *dip, diskaddr_t *capp)
23297f0b8309SEdward Pilatowicz {
23307f0b8309SEdward Pilatowicz 	xdf_t *vdp;
23317f0b8309SEdward Pilatowicz 	vdp = ddi_get_soft_state(xdf_ssp, ddi_get_instance(dip));
23327f0b8309SEdward Pilatowicz 
23337f0b8309SEdward Pilatowicz 	if (vdp == NULL)
23347f0b8309SEdward Pilatowicz 		return (ENXIO);
23357f0b8309SEdward Pilatowicz 
23367f0b8309SEdward Pilatowicz 	mutex_enter(&vdp->xdf_dev_lk);
23377f0b8309SEdward Pilatowicz 	*capp = vdp->xdf_pgeom.g_capacity;
23387f0b8309SEdward Pilatowicz 	DPRINTF(LBL_DBG, ("xdf@%s:capacity %llu\n", vdp->xdf_addr, *capp));
23397f0b8309SEdward Pilatowicz 	mutex_exit(&vdp->xdf_dev_lk);
23407f0b8309SEdward Pilatowicz 	return (0);
23417f0b8309SEdward Pilatowicz }
23427f0b8309SEdward Pilatowicz 
23437f0b8309SEdward Pilatowicz static int
xdf_lb_getpgeom(dev_info_t * dip,cmlb_geom_t * geomp)23447f0b8309SEdward Pilatowicz xdf_lb_getpgeom(dev_info_t *dip, cmlb_geom_t *geomp)
23457f0b8309SEdward Pilatowicz {
23467f0b8309SEdward Pilatowicz 	xdf_t *vdp;
23477f0b8309SEdward Pilatowicz 
23487f0b8309SEdward Pilatowicz 	if ((vdp = ddi_get_soft_state(xdf_ssp, ddi_get_instance(dip))) == NULL)
23497f0b8309SEdward Pilatowicz 		return (ENXIO);
23507f0b8309SEdward Pilatowicz 	*geomp = vdp->xdf_pgeom;
23517f0b8309SEdward Pilatowicz 	return (0);
23527f0b8309SEdward Pilatowicz }
23537f0b8309SEdward Pilatowicz 
23547f0b8309SEdward Pilatowicz /*
23557f0b8309SEdward Pilatowicz  * No real HBA, no geometry available from it
23567f0b8309SEdward Pilatowicz  */
23577f0b8309SEdward Pilatowicz /*ARGSUSED*/
23587f0b8309SEdward Pilatowicz static int
xdf_lb_getvgeom(dev_info_t * dip,cmlb_geom_t * geomp)23597f0b8309SEdward Pilatowicz xdf_lb_getvgeom(dev_info_t *dip, cmlb_geom_t *geomp)
23607f0b8309SEdward Pilatowicz {
23617f0b8309SEdward Pilatowicz 	return (EINVAL);
23627f0b8309SEdward Pilatowicz }
23637f0b8309SEdward Pilatowicz 
23647f0b8309SEdward Pilatowicz static int
xdf_lb_getattribute(dev_info_t * dip,tg_attribute_t * tgattributep)23657f0b8309SEdward Pilatowicz xdf_lb_getattribute(dev_info_t *dip, tg_attribute_t *tgattributep)
23667f0b8309SEdward Pilatowicz {
23677f0b8309SEdward Pilatowicz 	xdf_t *vdp;
23687f0b8309SEdward Pilatowicz 
23697f0b8309SEdward Pilatowicz 	if (!(vdp = ddi_get_soft_state(xdf_ssp, ddi_get_instance(dip))))
23707f0b8309SEdward Pilatowicz 		return (ENXIO);
23717f0b8309SEdward Pilatowicz 
23727f0b8309SEdward Pilatowicz 	if (XD_IS_RO(vdp))
23737f0b8309SEdward Pilatowicz 		tgattributep->media_is_writable = 0;
23747f0b8309SEdward Pilatowicz 	else
23757f0b8309SEdward Pilatowicz 		tgattributep->media_is_writable = 1;
2376*5cd2e4b9SYuri Pankov 	tgattributep->media_is_rotational = 0;
23777f0b8309SEdward Pilatowicz 	return (0);
23787f0b8309SEdward Pilatowicz }
23797f0b8309SEdward Pilatowicz 
23807f0b8309SEdward Pilatowicz /* ARGSUSED3 */
23817f0b8309SEdward Pilatowicz int
xdf_lb_getinfo(dev_info_t * dip,int cmd,void * arg,void * tg_cookie)23827f0b8309SEdward Pilatowicz xdf_lb_getinfo(dev_info_t *dip, int cmd, void *arg, void *tg_cookie)
23837f0b8309SEdward Pilatowicz {
238465908c77Syu, larry liu - Sun Microsystems - Beijing China 	int instance;
238565908c77Syu, larry liu - Sun Microsystems - Beijing China 	xdf_t   *vdp;
238665908c77Syu, larry liu - Sun Microsystems - Beijing China 
238765908c77Syu, larry liu - Sun Microsystems - Beijing China 	instance = ddi_get_instance(dip);
238865908c77Syu, larry liu - Sun Microsystems - Beijing China 
238965908c77Syu, larry liu - Sun Microsystems - Beijing China 	if ((vdp = ddi_get_soft_state(xdf_ssp, instance)) == NULL)
239065908c77Syu, larry liu - Sun Microsystems - Beijing China 		return (ENXIO);
239165908c77Syu, larry liu - Sun Microsystems - Beijing China 
23927f0b8309SEdward Pilatowicz 	switch (cmd) {
23937f0b8309SEdward Pilatowicz 	case TG_GETPHYGEOM:
23947f0b8309SEdward Pilatowicz 		return (xdf_lb_getpgeom(dip, (cmlb_geom_t *)arg));
23957f0b8309SEdward Pilatowicz 	case TG_GETVIRTGEOM:
23967f0b8309SEdward Pilatowicz 		return (xdf_lb_getvgeom(dip, (cmlb_geom_t *)arg));
23977f0b8309SEdward Pilatowicz 	case TG_GETCAPACITY:
23987f0b8309SEdward Pilatowicz 		return (xdf_lb_getcap(dip, (diskaddr_t *)arg));
23997f0b8309SEdward Pilatowicz 	case TG_GETBLOCKSIZE:
240065908c77Syu, larry liu - Sun Microsystems - Beijing China 		mutex_enter(&vdp->xdf_cb_lk);
240165908c77Syu, larry liu - Sun Microsystems - Beijing China 		*(uint32_t *)arg = vdp->xdf_xdev_secsize;
240265908c77Syu, larry liu - Sun Microsystems - Beijing China 		mutex_exit(&vdp->xdf_cb_lk);
24037f0b8309SEdward Pilatowicz 		return (0);
24047f0b8309SEdward Pilatowicz 	case TG_GETATTR:
24057f0b8309SEdward Pilatowicz 		return (xdf_lb_getattribute(dip, (tg_attribute_t *)arg));
24067f0b8309SEdward Pilatowicz 	default:
24077f0b8309SEdward Pilatowicz 		return (ENOTTY);
24087f0b8309SEdward Pilatowicz 	}
24097f0b8309SEdward Pilatowicz }
24107f0b8309SEdward Pilatowicz 
24117f0b8309SEdward Pilatowicz /* ARGSUSED5 */
24127f0b8309SEdward Pilatowicz int
xdf_lb_rdwr(dev_info_t * dip,uchar_t cmd,void * bufp,diskaddr_t start,size_t reqlen,void * tg_cookie)24137f0b8309SEdward Pilatowicz xdf_lb_rdwr(dev_info_t *dip, uchar_t cmd, void *bufp,
24147f0b8309SEdward Pilatowicz     diskaddr_t start, size_t reqlen, void *tg_cookie)
24157f0b8309SEdward Pilatowicz {
24167f0b8309SEdward Pilatowicz 	xdf_t *vdp;
24177f0b8309SEdward Pilatowicz 	struct buf *bp;
24187f0b8309SEdward Pilatowicz 	int err = 0;
24197f0b8309SEdward Pilatowicz 
24207f0b8309SEdward Pilatowicz 	vdp = ddi_get_soft_state(xdf_ssp, ddi_get_instance(dip));
24217f0b8309SEdward Pilatowicz 
24227f0b8309SEdward Pilatowicz 	/* We don't allow IO from the oe_change callback thread */
24237f0b8309SEdward Pilatowicz 	ASSERT(curthread != vdp->xdf_oe_change_thread);
24247f0b8309SEdward Pilatowicz 
242565908c77Syu, larry liu - Sun Microsystems - Beijing China 	if ((start + ((reqlen / (vdp->xdf_xdev_secsize / DEV_BSIZE))
242665908c77Syu, larry liu - Sun Microsystems - Beijing China 	    >> DEV_BSHIFT)) > vdp->xdf_pgeom.g_capacity)
24277f0b8309SEdward Pilatowicz 		return (EINVAL);
24287f0b8309SEdward Pilatowicz 
24297f0b8309SEdward Pilatowicz 	bp = getrbuf(KM_SLEEP);
24307f0b8309SEdward Pilatowicz 	if (cmd == TG_READ)
24317f0b8309SEdward Pilatowicz 		bp->b_flags = B_BUSY | B_READ;
24327f0b8309SEdward Pilatowicz 	else
24337f0b8309SEdward Pilatowicz 		bp->b_flags = B_BUSY | B_WRITE;
243465908c77Syu, larry liu - Sun Microsystems - Beijing China 
24357f0b8309SEdward Pilatowicz 	bp->b_un.b_addr = bufp;
24367f0b8309SEdward Pilatowicz 	bp->b_bcount = reqlen;
243765908c77Syu, larry liu - Sun Microsystems - Beijing China 	bp->b_blkno = start * (vdp->xdf_xdev_secsize / DEV_BSIZE);
24387f0b8309SEdward Pilatowicz 	bp->b_edev = DDI_DEV_T_NONE; /* don't have dev_t */
24397f0b8309SEdward Pilatowicz 
24407f0b8309SEdward Pilatowicz 	mutex_enter(&vdp->xdf_dev_lk);
24417f0b8309SEdward Pilatowicz 	xdf_bp_push(vdp, bp);
24427f0b8309SEdward Pilatowicz 	mutex_exit(&vdp->xdf_dev_lk);
24437f0b8309SEdward Pilatowicz 	xdf_io_start(vdp);
24447f0b8309SEdward Pilatowicz 	if (curthread == vdp->xdf_ready_tq_thread)
24457f0b8309SEdward Pilatowicz 		(void) xdf_ring_drain(vdp);
24467f0b8309SEdward Pilatowicz 	err = biowait(bp);
24477f0b8309SEdward Pilatowicz 	ASSERT(bp->b_flags & B_DONE);
24487f0b8309SEdward Pilatowicz 	freerbuf(bp);
24497f0b8309SEdward Pilatowicz 	return (err);
24507f0b8309SEdward Pilatowicz }
24517f0b8309SEdward Pilatowicz 
24527f0b8309SEdward Pilatowicz /*
24537f0b8309SEdward Pilatowicz  * Lock the current media.  Set the media state to "lock".
24547f0b8309SEdward Pilatowicz  * (Media locks are only respected by the backend driver.)
24557f0b8309SEdward Pilatowicz  */
24567f0b8309SEdward Pilatowicz static int
xdf_ioctl_mlock(xdf_t * vdp)24577f0b8309SEdward Pilatowicz xdf_ioctl_mlock(xdf_t *vdp)
24587f0b8309SEdward Pilatowicz {
24597f0b8309SEdward Pilatowicz 	int rv;
24607f0b8309SEdward Pilatowicz 	mutex_enter(&vdp->xdf_cb_lk);
24617f0b8309SEdward Pilatowicz 	rv = xdf_media_req(vdp, XBV_MEDIA_REQ_LOCK, B_TRUE);
24627f0b8309SEdward Pilatowicz 	mutex_exit(&vdp->xdf_cb_lk);
24637f0b8309SEdward Pilatowicz 	return (rv);
24647f0b8309SEdward Pilatowicz }
24657f0b8309SEdward Pilatowicz 
24667f0b8309SEdward Pilatowicz /*
24677f0b8309SEdward Pilatowicz  * Release a media lock.  Set the media state to "none".
24687f0b8309SEdward Pilatowicz  */
24697f0b8309SEdward Pilatowicz static int
xdf_ioctl_munlock(xdf_t * vdp)24707f0b8309SEdward Pilatowicz xdf_ioctl_munlock(xdf_t *vdp)
24717f0b8309SEdward Pilatowicz {
24727f0b8309SEdward Pilatowicz 	int rv;
24737f0b8309SEdward Pilatowicz 	mutex_enter(&vdp->xdf_cb_lk);
24747f0b8309SEdward Pilatowicz 	rv = xdf_media_req(vdp, XBV_MEDIA_REQ_NONE, B_TRUE);
24757f0b8309SEdward Pilatowicz 	mutex_exit(&vdp->xdf_cb_lk);
24767f0b8309SEdward Pilatowicz 	return (rv);
24777f0b8309SEdward Pilatowicz }
24787f0b8309SEdward Pilatowicz 
24797f0b8309SEdward Pilatowicz /*
24807f0b8309SEdward Pilatowicz  * Eject the current media.  Ignores any media locks.  (Media locks
24817f0b8309SEdward Pilatowicz  * are only for benifit of the the backend.)
24827f0b8309SEdward Pilatowicz  */
24837f0b8309SEdward Pilatowicz static int
xdf_ioctl_eject(xdf_t * vdp)24847f0b8309SEdward Pilatowicz xdf_ioctl_eject(xdf_t *vdp)
24857f0b8309SEdward Pilatowicz {
24867f0b8309SEdward Pilatowicz 	int rv;
24877f0b8309SEdward Pilatowicz 
24887f0b8309SEdward Pilatowicz 	mutex_enter(&vdp->xdf_cb_lk);
24897f0b8309SEdward Pilatowicz 	if ((rv = xdf_media_req(vdp, XBV_MEDIA_REQ_EJECT, B_FALSE)) != 0) {
24907f0b8309SEdward Pilatowicz 		mutex_exit(&vdp->xdf_cb_lk);
24917f0b8309SEdward Pilatowicz 		return (rv);
24927f0b8309SEdward Pilatowicz 	}
24937f0b8309SEdward Pilatowicz 
24947f0b8309SEdward Pilatowicz 	/*
24957f0b8309SEdward Pilatowicz 	 * We've set the media requests xenbus parameter to eject, so now
24967f0b8309SEdward Pilatowicz 	 * disconnect from the backend, wait for the backend to clear
24977f0b8309SEdward Pilatowicz 	 * the media requets xenbus paramter, and then we can reconnect
24987f0b8309SEdward Pilatowicz 	 * to the backend.
24997f0b8309SEdward Pilatowicz 	 */
25007f0b8309SEdward Pilatowicz 	(void) xdf_disconnect(vdp, XD_UNKNOWN, B_TRUE);
25017f0b8309SEdward Pilatowicz 	mutex_enter(&vdp->xdf_dev_lk);
25027f0b8309SEdward Pilatowicz 	if (xdf_connect_locked(vdp, B_TRUE) != XD_READY) {
25037f0b8309SEdward Pilatowicz 		mutex_exit(&vdp->xdf_dev_lk);
25047f0b8309SEdward Pilatowicz 		mutex_exit(&vdp->xdf_cb_lk);
25057f0b8309SEdward Pilatowicz 		return (EIO);
25067f0b8309SEdward Pilatowicz 	}
25077f0b8309SEdward Pilatowicz 	mutex_exit(&vdp->xdf_dev_lk);
25087f0b8309SEdward Pilatowicz 	mutex_exit(&vdp->xdf_cb_lk);
25097f0b8309SEdward Pilatowicz 	return (0);
25107f0b8309SEdward Pilatowicz }
25117f0b8309SEdward Pilatowicz 
25127f0b8309SEdward Pilatowicz /*
25137f0b8309SEdward Pilatowicz  * Watch for media state changes.  This can be an insertion of a device
25147f0b8309SEdward Pilatowicz  * (triggered by a 'xm block-configure' request in another domain) or
25157f0b8309SEdward Pilatowicz  * the ejection of a device (triggered by a local "eject" operation).
25167f0b8309SEdward Pilatowicz  * For a full description of the DKIOCSTATE ioctl behavior see dkio(7I).
25177f0b8309SEdward Pilatowicz  */
25187f0b8309SEdward Pilatowicz static int
xdf_dkstate(xdf_t * vdp,enum dkio_state mstate)25197f0b8309SEdward Pilatowicz xdf_dkstate(xdf_t *vdp, enum dkio_state mstate)
25207f0b8309SEdward Pilatowicz {
25217f0b8309SEdward Pilatowicz 	enum dkio_state		prev_state;
25227f0b8309SEdward Pilatowicz 
25237f0b8309SEdward Pilatowicz 	mutex_enter(&vdp->xdf_cb_lk);
25247f0b8309SEdward Pilatowicz 	prev_state = vdp->xdf_mstate;
25257f0b8309SEdward Pilatowicz 
25267f0b8309SEdward Pilatowicz 	if (vdp->xdf_mstate == mstate) {
25277f0b8309SEdward Pilatowicz 		while (vdp->xdf_mstate == prev_state) {
25287f0b8309SEdward Pilatowicz 			if (cv_wait_sig(&vdp->xdf_mstate_cv,
25297f0b8309SEdward Pilatowicz 			    &vdp->xdf_cb_lk) == 0) {
25307f0b8309SEdward Pilatowicz 				mutex_exit(&vdp->xdf_cb_lk);
25317f0b8309SEdward Pilatowicz 				return (EINTR);
25327f0b8309SEdward Pilatowicz 			}
25337f0b8309SEdward Pilatowicz 		}
25347f0b8309SEdward Pilatowicz 	}
25357f0b8309SEdward Pilatowicz 
25367f0b8309SEdward Pilatowicz 	if ((prev_state != DKIO_INSERTED) &&
25377f0b8309SEdward Pilatowicz 	    (vdp->xdf_mstate == DKIO_INSERTED)) {
25387f0b8309SEdward Pilatowicz 		(void) xdf_media_req(vdp, XBV_MEDIA_REQ_LOCK, B_TRUE);
25397f0b8309SEdward Pilatowicz 		mutex_exit(&vdp->xdf_cb_lk);
25407f0b8309SEdward Pilatowicz 		return (0);
25417f0b8309SEdward Pilatowicz 	}
25427f0b8309SEdward Pilatowicz 
25437f0b8309SEdward Pilatowicz 	mutex_exit(&vdp->xdf_cb_lk);
25447f0b8309SEdward Pilatowicz 	return (0);
25457f0b8309SEdward Pilatowicz }
25467f0b8309SEdward Pilatowicz 
25477f0b8309SEdward Pilatowicz /*ARGSUSED*/
25487f0b8309SEdward Pilatowicz static int
xdf_ioctl(dev_t dev,int cmd,intptr_t arg,int mode,cred_t * credp,int * rvalp)25497f0b8309SEdward Pilatowicz xdf_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
25507f0b8309SEdward Pilatowicz     int *rvalp)
25517f0b8309SEdward Pilatowicz {
25527f0b8309SEdward Pilatowicz 	minor_t		minor = getminor(dev);
25537f0b8309SEdward Pilatowicz 	int		part = XDF_PART(minor);
25547f0b8309SEdward Pilatowicz 	xdf_t		*vdp;
25557f0b8309SEdward Pilatowicz 	int		rv;
25567f0b8309SEdward Pilatowicz 
25577f0b8309SEdward Pilatowicz 	if (((vdp = ddi_get_soft_state(xdf_ssp, XDF_INST(minor))) == NULL) ||
25587f0b8309SEdward Pilatowicz 	    (!xdf_isopen(vdp, part)))
25597f0b8309SEdward Pilatowicz 		return (ENXIO);
25607f0b8309SEdward Pilatowicz 
25617f0b8309SEdward Pilatowicz 	DPRINTF(IOCTL_DBG, ("xdf@%s:ioctl: cmd %d (0x%x)\n",
25627f0b8309SEdward Pilatowicz 	    vdp->xdf_addr, cmd, cmd));
25637f0b8309SEdward Pilatowicz 
25647f0b8309SEdward Pilatowicz 	switch (cmd) {
25657f0b8309SEdward Pilatowicz 	default:
25667f0b8309SEdward Pilatowicz 		return (ENOTTY);
25677f0b8309SEdward Pilatowicz 	case DKIOCG_PHYGEOM:
25687f0b8309SEdward Pilatowicz 	case DKIOCG_VIRTGEOM:
25697f0b8309SEdward Pilatowicz 	case DKIOCGGEOM:
25707f0b8309SEdward Pilatowicz 	case DKIOCSGEOM:
25717f0b8309SEdward Pilatowicz 	case DKIOCGAPART:
25727f0b8309SEdward Pilatowicz 	case DKIOCSAPART:
25737f0b8309SEdward Pilatowicz 	case DKIOCGVTOC:
25747f0b8309SEdward Pilatowicz 	case DKIOCSVTOC:
25757f0b8309SEdward Pilatowicz 	case DKIOCPARTINFO:
25767f0b8309SEdward Pilatowicz 	case DKIOCGEXTVTOC:
25777f0b8309SEdward Pilatowicz 	case DKIOCSEXTVTOC:
25787f0b8309SEdward Pilatowicz 	case DKIOCEXTPARTINFO:
25797f0b8309SEdward Pilatowicz 	case DKIOCGMBOOT:
25807f0b8309SEdward Pilatowicz 	case DKIOCSMBOOT:
25817f0b8309SEdward Pilatowicz 	case DKIOCGETEFI:
25827f0b8309SEdward Pilatowicz 	case DKIOCSETEFI:
2583aa1b14e7SSheshadri Vasudevan 	case DKIOCSETEXTPART:
25847f0b8309SEdward Pilatowicz 	case DKIOCPARTITION:
25857f0b8309SEdward Pilatowicz 		return (cmlb_ioctl(vdp->xdf_vd_lbl, dev, cmd, arg, mode, credp,
25867f0b8309SEdward Pilatowicz 		    rvalp, NULL));
25877f0b8309SEdward Pilatowicz 	case FDEJECT:
25887f0b8309SEdward Pilatowicz 	case DKIOCEJECT:
25897f0b8309SEdward Pilatowicz 	case CDROMEJECT:
25907f0b8309SEdward Pilatowicz 		return (xdf_ioctl_eject(vdp));
25917f0b8309SEdward Pilatowicz 	case DKIOCLOCK:
25927f0b8309SEdward Pilatowicz 		return (xdf_ioctl_mlock(vdp));
25937f0b8309SEdward Pilatowicz 	case DKIOCUNLOCK:
25947f0b8309SEdward Pilatowicz 		return (xdf_ioctl_munlock(vdp));
25957f0b8309SEdward Pilatowicz 	case CDROMREADOFFSET: {
25967f0b8309SEdward Pilatowicz 		int offset = 0;
25977f0b8309SEdward Pilatowicz 		if (!XD_IS_CD(vdp))
25987f0b8309SEdward Pilatowicz 			return (ENOTTY);
25997f0b8309SEdward Pilatowicz 		if (ddi_copyout(&offset, (void *)arg, sizeof (int), mode))
26007f0b8309SEdward Pilatowicz 			return (EFAULT);
26017f0b8309SEdward Pilatowicz 		return (0);
26027f0b8309SEdward Pilatowicz 	}
26037f0b8309SEdward Pilatowicz 	case DKIOCGMEDIAINFO: {
26047f0b8309SEdward Pilatowicz 		struct dk_minfo media_info;
26057f0b8309SEdward Pilatowicz 
260665908c77Syu, larry liu - Sun Microsystems - Beijing China 		media_info.dki_lbsize = vdp->xdf_xdev_secsize;
26077f0b8309SEdward Pilatowicz 		media_info.dki_capacity = vdp->xdf_pgeom.g_capacity;
26087f0b8309SEdward Pilatowicz 		if (XD_IS_CD(vdp))
26097f0b8309SEdward Pilatowicz 			media_info.dki_media_type = DK_CDROM;
26107f0b8309SEdward Pilatowicz 		else
26117f0b8309SEdward Pilatowicz 			media_info.dki_media_type = DK_FIXED_DISK;
26127f0b8309SEdward Pilatowicz 
26137f0b8309SEdward Pilatowicz 		if (ddi_copyout(&media_info, (void *)arg,
26147f0b8309SEdward Pilatowicz 		    sizeof (struct dk_minfo), mode))
26157f0b8309SEdward Pilatowicz 			return (EFAULT);
26167f0b8309SEdward Pilatowicz 		return (0);
26177f0b8309SEdward Pilatowicz 	}
26187f0b8309SEdward Pilatowicz 	case DKIOCINFO: {
26197f0b8309SEdward Pilatowicz 		struct dk_cinfo info;
26207f0b8309SEdward Pilatowicz 
26217f0b8309SEdward Pilatowicz 		/* controller information */
26227f0b8309SEdward Pilatowicz 		if (XD_IS_CD(vdp))
26237f0b8309SEdward Pilatowicz 			info.dki_ctype = DKC_CDROM;
26247f0b8309SEdward Pilatowicz 		else
26257f0b8309SEdward Pilatowicz 			info.dki_ctype = DKC_VBD;
26267f0b8309SEdward Pilatowicz 
26277f0b8309SEdward Pilatowicz 		info.dki_cnum = 0;
26287f0b8309SEdward Pilatowicz 		(void) strncpy((char *)(&info.dki_cname), "xdf", 8);
26297f0b8309SEdward Pilatowicz 
26307f0b8309SEdward Pilatowicz 		/* unit information */
26317f0b8309SEdward Pilatowicz 		info.dki_unit = ddi_get_instance(vdp->xdf_dip);
26327f0b8309SEdward Pilatowicz 		(void) strncpy((char *)(&info.dki_dname), "xdf", 8);
26337f0b8309SEdward Pilatowicz 		info.dki_flags = DKI_FMTVOL;
26347f0b8309SEdward Pilatowicz 		info.dki_partition = part;
26357f0b8309SEdward Pilatowicz 		info.dki_maxtransfer = maxphys / DEV_BSIZE;
26367f0b8309SEdward Pilatowicz 		info.dki_addr = 0;
26377f0b8309SEdward Pilatowicz 		info.dki_space = 0;
26387f0b8309SEdward Pilatowicz 		info.dki_prio = 0;
26397f0b8309SEdward Pilatowicz 		info.dki_vec = 0;
26407f0b8309SEdward Pilatowicz 
26417f0b8309SEdward Pilatowicz 		if (ddi_copyout(&info, (void *)arg, sizeof (info), mode))
26427f0b8309SEdward Pilatowicz 			return (EFAULT);
26437f0b8309SEdward Pilatowicz 		return (0);
26447f0b8309SEdward Pilatowicz 	}
26457f0b8309SEdward Pilatowicz 	case DKIOCSTATE: {
26467f0b8309SEdward Pilatowicz 		enum dkio_state mstate;
26477f0b8309SEdward Pilatowicz 
26487f0b8309SEdward Pilatowicz 		if (ddi_copyin((void *)arg, &mstate,
26497f0b8309SEdward Pilatowicz 		    sizeof (mstate), mode) != 0)
26507f0b8309SEdward Pilatowicz 			return (EFAULT);
26517f0b8309SEdward Pilatowicz 		if ((rv = xdf_dkstate(vdp, mstate)) != 0)
26527f0b8309SEdward Pilatowicz 			return (rv);
26537f0b8309SEdward Pilatowicz 		mstate = vdp->xdf_mstate;
26547f0b8309SEdward Pilatowicz 		if (ddi_copyout(&mstate, (void *)arg,
26557f0b8309SEdward Pilatowicz 		    sizeof (mstate), mode) != 0)
26567f0b8309SEdward Pilatowicz 			return (EFAULT);
26577f0b8309SEdward Pilatowicz 		return (0);
26587f0b8309SEdward Pilatowicz 	}
26597f0b8309SEdward Pilatowicz 	case DKIOCREMOVABLE: {
26607f0b8309SEdward Pilatowicz 		int i = BOOLEAN2VOID(XD_IS_RM(vdp));
26617f0b8309SEdward Pilatowicz 		if (ddi_copyout(&i, (caddr_t)arg, sizeof (i), mode))
26627f0b8309SEdward Pilatowicz 			return (EFAULT);
26637f0b8309SEdward Pilatowicz 		return (0);
26647f0b8309SEdward Pilatowicz 	}
26657f0b8309SEdward Pilatowicz 	case DKIOCGETWCE: {
26667f0b8309SEdward Pilatowicz 		int i = BOOLEAN2VOID(XD_IS_RM(vdp));
26677f0b8309SEdward Pilatowicz 		if (ddi_copyout(&i, (void *)arg, sizeof (i), mode))
26687f0b8309SEdward Pilatowicz 			return (EFAULT);
26697f0b8309SEdward Pilatowicz 		return (0);
26707f0b8309SEdward Pilatowicz 	}
26717f0b8309SEdward Pilatowicz 	case DKIOCSETWCE: {
26727f0b8309SEdward Pilatowicz 		int i;
26737f0b8309SEdward Pilatowicz 		if (ddi_copyin((void *)arg, &i, sizeof (i), mode))
26747f0b8309SEdward Pilatowicz 			return (EFAULT);
26757f0b8309SEdward Pilatowicz 		vdp->xdf_wce = VOID2BOOLEAN(i);
26767f0b8309SEdward Pilatowicz 		return (0);
26777f0b8309SEdward Pilatowicz 	}
26787f0b8309SEdward Pilatowicz 	case DKIOCFLUSHWRITECACHE: {
26797f0b8309SEdward Pilatowicz 		struct dk_callback *dkc = (struct dk_callback *)arg;
26807f0b8309SEdward Pilatowicz 
26817f0b8309SEdward Pilatowicz 		if (vdp->xdf_flush_supported) {
26827f0b8309SEdward Pilatowicz 			rv = xdf_lb_rdwr(vdp->xdf_dip, TG_WRITE,
26837f0b8309SEdward Pilatowicz 			    NULL, 0, 0, (void *)dev);
26847f0b8309SEdward Pilatowicz 		} else if (vdp->xdf_feature_barrier &&
26857f0b8309SEdward Pilatowicz 		    !xdf_barrier_flush_disable) {
26867f0b8309SEdward Pilatowicz 			rv = xdf_lb_rdwr(vdp->xdf_dip, TG_WRITE,
26877f0b8309SEdward Pilatowicz 			    vdp->xdf_cache_flush_block, xdf_flush_block,
268865908c77Syu, larry liu - Sun Microsystems - Beijing China 			    vdp->xdf_xdev_secsize, (void *)dev);
26897f0b8309SEdward Pilatowicz 		} else {
26907f0b8309SEdward Pilatowicz 			return (ENOTTY);
26917f0b8309SEdward Pilatowicz 		}
26927f0b8309SEdward Pilatowicz 		if ((mode & FKIOCTL) && (dkc != NULL) &&
26937f0b8309SEdward Pilatowicz 		    (dkc->dkc_callback != NULL)) {
26947f0b8309SEdward Pilatowicz 			(*dkc->dkc_callback)(dkc->dkc_cookie, rv);
26957f0b8309SEdward Pilatowicz 			/* need to return 0 after calling callback */
26967f0b8309SEdward Pilatowicz 			rv = 0;
26977f0b8309SEdward Pilatowicz 		}
26987f0b8309SEdward Pilatowicz 		return (rv);
26997f0b8309SEdward Pilatowicz 	}
27007f0b8309SEdward Pilatowicz 	}
27017f0b8309SEdward Pilatowicz 	/*NOTREACHED*/
27027f0b8309SEdward Pilatowicz }
27037f0b8309SEdward Pilatowicz 
27047f0b8309SEdward Pilatowicz static int
xdf_strategy(struct buf * bp)27057f0b8309SEdward Pilatowicz xdf_strategy(struct buf *bp)
27067f0b8309SEdward Pilatowicz {
27077f0b8309SEdward Pilatowicz 	xdf_t	*vdp;
27087f0b8309SEdward Pilatowicz 	minor_t minor;
27097f0b8309SEdward Pilatowicz 	diskaddr_t p_blkct, p_blkst;
271065908c77Syu, larry liu - Sun Microsystems - Beijing China 	daddr_t blkno;
27117f0b8309SEdward Pilatowicz 	ulong_t nblks;
27127f0b8309SEdward Pilatowicz 	int part;
27137f0b8309SEdward Pilatowicz 
27147f0b8309SEdward Pilatowicz 	minor = getminor(bp->b_edev);
27157f0b8309SEdward Pilatowicz 	part = XDF_PART(minor);
27167f0b8309SEdward Pilatowicz 	vdp = ddi_get_soft_state(xdf_ssp, XDF_INST(minor));
27177f0b8309SEdward Pilatowicz 
27187f0b8309SEdward Pilatowicz 	mutex_enter(&vdp->xdf_dev_lk);
27197f0b8309SEdward Pilatowicz 	if (!xdf_isopen(vdp, part)) {
27207f0b8309SEdward Pilatowicz 		mutex_exit(&vdp->xdf_dev_lk);
27217f0b8309SEdward Pilatowicz 		xdf_io_err(bp, ENXIO, 0);
27227f0b8309SEdward Pilatowicz 		return (0);
27237f0b8309SEdward Pilatowicz 	}
27247f0b8309SEdward Pilatowicz 
27257f0b8309SEdward Pilatowicz 	/* We don't allow IO from the oe_change callback thread */
27267f0b8309SEdward Pilatowicz 	ASSERT(curthread != vdp->xdf_oe_change_thread);
27277f0b8309SEdward Pilatowicz 
27287f0b8309SEdward Pilatowicz 	/* Check for writes to a read only device */
27297f0b8309SEdward Pilatowicz 	if (!IS_READ(bp) && XD_IS_RO(vdp)) {
27307f0b8309SEdward Pilatowicz 		mutex_exit(&vdp->xdf_dev_lk);
27317f0b8309SEdward Pilatowicz 		xdf_io_err(bp, EROFS, 0);
27327f0b8309SEdward Pilatowicz 		return (0);
27337f0b8309SEdward Pilatowicz 	}
27347f0b8309SEdward Pilatowicz 
27357f0b8309SEdward Pilatowicz 	/* Check if this I/O is accessing a partition or the entire disk */
27367f0b8309SEdward Pilatowicz 	if ((long)bp->b_private == XB_SLICE_NONE) {
27377f0b8309SEdward Pilatowicz 		/* This I/O is using an absolute offset */
27387f0b8309SEdward Pilatowicz 		p_blkct = vdp->xdf_xdev_nblocks;
27397f0b8309SEdward Pilatowicz 		p_blkst = 0;
27407f0b8309SEdward Pilatowicz 	} else {
27417f0b8309SEdward Pilatowicz 		/* This I/O is using a partition relative offset */
27427f0b8309SEdward Pilatowicz 		mutex_exit(&vdp->xdf_dev_lk);
27437f0b8309SEdward Pilatowicz 		if (cmlb_partinfo(vdp->xdf_vd_lbl, part, &p_blkct,
27447f0b8309SEdward Pilatowicz 		    &p_blkst, NULL, NULL, NULL)) {
27457f0b8309SEdward Pilatowicz 			xdf_io_err(bp, ENXIO, 0);
27467f0b8309SEdward Pilatowicz 			return (0);
27477f0b8309SEdward Pilatowicz 		}
27487f0b8309SEdward Pilatowicz 		mutex_enter(&vdp->xdf_dev_lk);
27497f0b8309SEdward Pilatowicz 	}
27507f0b8309SEdward Pilatowicz 
275165908c77Syu, larry liu - Sun Microsystems - Beijing China 	/*
275265908c77Syu, larry liu - Sun Microsystems - Beijing China 	 * Adjust the real blkno and bcount according to the underline
275365908c77Syu, larry liu - Sun Microsystems - Beijing China 	 * physical sector size.
275465908c77Syu, larry liu - Sun Microsystems - Beijing China 	 */
275565908c77Syu, larry liu - Sun Microsystems - Beijing China 	blkno = bp->b_blkno / (vdp->xdf_xdev_secsize / XB_BSIZE);
275665908c77Syu, larry liu - Sun Microsystems - Beijing China 
27577f0b8309SEdward Pilatowicz 	/* check for a starting block beyond the disk or partition limit */
275865908c77Syu, larry liu - Sun Microsystems - Beijing China 	if (blkno > p_blkct) {
27597f0b8309SEdward Pilatowicz 		DPRINTF(IO_DBG, ("xdf@%s: block %lld exceeds VBD size %"PRIu64,
276065908c77Syu, larry liu - Sun Microsystems - Beijing China 		    vdp->xdf_addr, (longlong_t)blkno, (uint64_t)p_blkct));
276165908c77Syu, larry liu - Sun Microsystems - Beijing China 		mutex_exit(&vdp->xdf_dev_lk);
27627f0b8309SEdward Pilatowicz 		xdf_io_err(bp, EINVAL, 0);
27637f0b8309SEdward Pilatowicz 		return (0);
27647f0b8309SEdward Pilatowicz 	}
27657f0b8309SEdward Pilatowicz 
27667f0b8309SEdward Pilatowicz 	/* Legacy: don't set error flag at this case */
276765908c77Syu, larry liu - Sun Microsystems - Beijing China 	if (blkno == p_blkct) {
276865908c77Syu, larry liu - Sun Microsystems - Beijing China 		mutex_exit(&vdp->xdf_dev_lk);
27697f0b8309SEdward Pilatowicz 		bp->b_resid = bp->b_bcount;
27707f0b8309SEdward Pilatowicz 		biodone(bp);
27717f0b8309SEdward Pilatowicz 		return (0);
27727f0b8309SEdward Pilatowicz 	}
27737f0b8309SEdward Pilatowicz 
27747f0b8309SEdward Pilatowicz 	/* sanitize the input buf */
27757f0b8309SEdward Pilatowicz 	bioerror(bp, 0);
27767f0b8309SEdward Pilatowicz 	bp->b_resid = 0;
27777f0b8309SEdward Pilatowicz 	bp->av_back = bp->av_forw = NULL;
27787f0b8309SEdward Pilatowicz 
27797f0b8309SEdward Pilatowicz 	/* Adjust for partial transfer, this will result in an error later */
278065908c77Syu, larry liu - Sun Microsystems - Beijing China 	if (vdp->xdf_xdev_secsize != 0 &&
278165908c77Syu, larry liu - Sun Microsystems - Beijing China 	    vdp->xdf_xdev_secsize != XB_BSIZE) {
278265908c77Syu, larry liu - Sun Microsystems - Beijing China 		nblks = bp->b_bcount / vdp->xdf_xdev_secsize;
278365908c77Syu, larry liu - Sun Microsystems - Beijing China 	} else {
27847f0b8309SEdward Pilatowicz 		nblks = bp->b_bcount >> XB_BSHIFT;
278565908c77Syu, larry liu - Sun Microsystems - Beijing China 	}
278665908c77Syu, larry liu - Sun Microsystems - Beijing China 
278765908c77Syu, larry liu - Sun Microsystems - Beijing China 	if ((blkno + nblks) > p_blkct) {
278865908c77Syu, larry liu - Sun Microsystems - Beijing China 		if (vdp->xdf_xdev_secsize != 0 &&
278965908c77Syu, larry liu - Sun Microsystems - Beijing China 		    vdp->xdf_xdev_secsize != XB_BSIZE) {
279065908c77Syu, larry liu - Sun Microsystems - Beijing China 			bp->b_resid =
279165908c77Syu, larry liu - Sun Microsystems - Beijing China 			    ((blkno + nblks) - p_blkct) *
279265908c77Syu, larry liu - Sun Microsystems - Beijing China 			    vdp->xdf_xdev_secsize;
279365908c77Syu, larry liu - Sun Microsystems - Beijing China 		} else {
279465908c77Syu, larry liu - Sun Microsystems - Beijing China 			bp->b_resid =
279565908c77Syu, larry liu - Sun Microsystems - Beijing China 			    ((blkno + nblks) - p_blkct) <<
279665908c77Syu, larry liu - Sun Microsystems - Beijing China 			    XB_BSHIFT;
279765908c77Syu, larry liu - Sun Microsystems - Beijing China 		}
27987f0b8309SEdward Pilatowicz 		bp->b_bcount -= bp->b_resid;
27997f0b8309SEdward Pilatowicz 	}
28007f0b8309SEdward Pilatowicz 
28017f0b8309SEdward Pilatowicz 	DPRINTF(IO_DBG, ("xdf@%s: strategy blk %lld len %lu\n",
280265908c77Syu, larry liu - Sun Microsystems - Beijing China 	    vdp->xdf_addr, (longlong_t)blkno, (ulong_t)bp->b_bcount));
28037f0b8309SEdward Pilatowicz 
28047f0b8309SEdward Pilatowicz 	/* Fix up the buf struct */
28057f0b8309SEdward Pilatowicz 	bp->b_flags |= B_BUSY;
28067f0b8309SEdward Pilatowicz 	bp->b_private = (void *)(uintptr_t)p_blkst;
28077f0b8309SEdward Pilatowicz 
28087f0b8309SEdward Pilatowicz 	xdf_bp_push(vdp, bp);
28097f0b8309SEdward Pilatowicz 	mutex_exit(&vdp->xdf_dev_lk);
28107f0b8309SEdward Pilatowicz 	xdf_io_start(vdp);
28117f0b8309SEdward Pilatowicz 	if (do_polled_io)
28127f0b8309SEdward Pilatowicz 		(void) xdf_ring_drain(vdp);
28137f0b8309SEdward Pilatowicz 	return (0);
28147f0b8309SEdward Pilatowicz }
28157f0b8309SEdward Pilatowicz 
28167f0b8309SEdward Pilatowicz /*ARGSUSED*/
28177f0b8309SEdward Pilatowicz static int
xdf_read(dev_t dev,struct uio * uiop,cred_t * credp)28187f0b8309SEdward Pilatowicz xdf_read(dev_t dev, struct uio *uiop, cred_t *credp)
28197f0b8309SEdward Pilatowicz {
28207f0b8309SEdward Pilatowicz 	xdf_t	*vdp;
28217f0b8309SEdward Pilatowicz 	minor_t minor;
28227f0b8309SEdward Pilatowicz 	diskaddr_t p_blkcnt;
28237f0b8309SEdward Pilatowicz 	int part;
28247f0b8309SEdward Pilatowicz 
28257f0b8309SEdward Pilatowicz 	minor = getminor(dev);
28267f0b8309SEdward Pilatowicz 	if ((vdp = ddi_get_soft_state(xdf_ssp, XDF_INST(minor))) == NULL)
28277f0b8309SEdward Pilatowicz 		return (ENXIO);
28287f0b8309SEdward Pilatowicz 
28297f0b8309SEdward Pilatowicz 	DPRINTF(IO_DBG, ("xdf@%s: read offset 0x%"PRIx64"\n",
28307f0b8309SEdward Pilatowicz 	    vdp->xdf_addr, (int64_t)uiop->uio_offset));
28317f0b8309SEdward Pilatowicz 
28327f0b8309SEdward Pilatowicz 	part = XDF_PART(minor);
28337f0b8309SEdward Pilatowicz 	if (!xdf_isopen(vdp, part))
28347f0b8309SEdward Pilatowicz 		return (ENXIO);
28357f0b8309SEdward Pilatowicz 
28367f0b8309SEdward Pilatowicz 	if (cmlb_partinfo(vdp->xdf_vd_lbl, part, &p_blkcnt,
28377f0b8309SEdward Pilatowicz 	    NULL, NULL, NULL, NULL))
28387f0b8309SEdward Pilatowicz 		return (ENXIO);
28397f0b8309SEdward Pilatowicz 
284065908c77Syu, larry liu - Sun Microsystems - Beijing China 	if (uiop->uio_loffset >= XB_DTOB(p_blkcnt, vdp))
284165908c77Syu, larry liu - Sun Microsystems - Beijing China 		return (ENOSPC);
284265908c77Syu, larry liu - Sun Microsystems - Beijing China 
28437f0b8309SEdward Pilatowicz 	if (U_INVAL(uiop))
28447f0b8309SEdward Pilatowicz 		return (EINVAL);
28457f0b8309SEdward Pilatowicz 
28467f0b8309SEdward Pilatowicz 	return (physio(xdf_strategy, NULL, dev, B_READ, xdfmin, uiop));
28477f0b8309SEdward Pilatowicz }
28487f0b8309SEdward Pilatowicz 
28497f0b8309SEdward Pilatowicz /*ARGSUSED*/
28507f0b8309SEdward Pilatowicz static int
xdf_write(dev_t dev,struct uio * uiop,cred_t * credp)28517f0b8309SEdward Pilatowicz xdf_write(dev_t dev, struct uio *uiop, cred_t *credp)
28527f0b8309SEdward Pilatowicz {
28537f0b8309SEdward Pilatowicz 	xdf_t *vdp;
28547f0b8309SEdward Pilatowicz 	minor_t minor;
28557f0b8309SEdward Pilatowicz 	diskaddr_t p_blkcnt;
28567f0b8309SEdward Pilatowicz 	int part;
28577f0b8309SEdward Pilatowicz 
28587f0b8309SEdward Pilatowicz 	minor = getminor(dev);
28597f0b8309SEdward Pilatowicz 	if ((vdp = ddi_get_soft_state(xdf_ssp, XDF_INST(minor))) == NULL)
28607f0b8309SEdward Pilatowicz 		return (ENXIO);
28617f0b8309SEdward Pilatowicz 
28627f0b8309SEdward Pilatowicz 	DPRINTF(IO_DBG, ("xdf@%s: write offset 0x%"PRIx64"\n",
28637f0b8309SEdward Pilatowicz 	    vdp->xdf_addr, (int64_t)uiop->uio_offset));
28647f0b8309SEdward Pilatowicz 
28657f0b8309SEdward Pilatowicz 	part = XDF_PART(minor);
28667f0b8309SEdward Pilatowicz 	if (!xdf_isopen(vdp, part))
28677f0b8309SEdward Pilatowicz 		return (ENXIO);
28687f0b8309SEdward Pilatowicz 
28697f0b8309SEdward Pilatowicz 	if (cmlb_partinfo(vdp->xdf_vd_lbl, part, &p_blkcnt,
28707f0b8309SEdward Pilatowicz 	    NULL, NULL, NULL, NULL))
28717f0b8309SEdward Pilatowicz 		return (ENXIO);
28727f0b8309SEdward Pilatowicz 
287365908c77Syu, larry liu - Sun Microsystems - Beijing China 	if (uiop->uio_loffset >= XB_DTOB(p_blkcnt, vdp))
28747f0b8309SEdward Pilatowicz 		return (ENOSPC);
28757f0b8309SEdward Pilatowicz 
28767f0b8309SEdward Pilatowicz 	if (U_INVAL(uiop))
28777f0b8309SEdward Pilatowicz 		return (EINVAL);
28787f0b8309SEdward Pilatowicz 
28797f0b8309SEdward Pilatowicz 	return (physio(xdf_strategy, NULL, dev, B_WRITE, xdfmin, uiop));
28807f0b8309SEdward Pilatowicz }
28817f0b8309SEdward Pilatowicz 
28827f0b8309SEdward Pilatowicz /*ARGSUSED*/
28837f0b8309SEdward Pilatowicz static int
xdf_aread(dev_t dev,struct aio_req * aiop,cred_t * credp)28847f0b8309SEdward Pilatowicz xdf_aread(dev_t dev, struct aio_req *aiop, cred_t *credp)
28857f0b8309SEdward Pilatowicz {
28867f0b8309SEdward Pilatowicz 	xdf_t	*vdp;
28877f0b8309SEdward Pilatowicz 	minor_t minor;
28887f0b8309SEdward Pilatowicz 	struct uio *uiop = aiop->aio_uio;
28897f0b8309SEdward Pilatowicz 	diskaddr_t p_blkcnt;
28907f0b8309SEdward Pilatowicz 	int part;
28917f0b8309SEdward Pilatowicz 
28927f0b8309SEdward Pilatowicz 	minor = getminor(dev);
28937f0b8309SEdward Pilatowicz 	if ((vdp = ddi_get_soft_state(xdf_ssp, XDF_INST(minor))) == NULL)
28947f0b8309SEdward Pilatowicz 		return (ENXIO);
28957f0b8309SEdward Pilatowicz 
28967f0b8309SEdward Pilatowicz 	part = XDF_PART(minor);
28977f0b8309SEdward Pilatowicz 	if (!xdf_isopen(vdp, part))
28987f0b8309SEdward Pilatowicz 		return (ENXIO);
28997f0b8309SEdward Pilatowicz 
29007f0b8309SEdward Pilatowicz 	if (cmlb_partinfo(vdp->xdf_vd_lbl, part, &p_blkcnt,
29017f0b8309SEdward Pilatowicz 	    NULL, NULL, NULL, NULL))
29027f0b8309SEdward Pilatowicz 		return (ENXIO);
29037f0b8309SEdward Pilatowicz 
290465908c77Syu, larry liu - Sun Microsystems - Beijing China 	if (uiop->uio_loffset >= XB_DTOB(p_blkcnt, vdp))
29057f0b8309SEdward Pilatowicz 		return (ENOSPC);
29067f0b8309SEdward Pilatowicz 
29077f0b8309SEdward Pilatowicz 	if (U_INVAL(uiop))
29087f0b8309SEdward Pilatowicz 		return (EINVAL);
29097f0b8309SEdward Pilatowicz 
29107f0b8309SEdward Pilatowicz 	return (aphysio(xdf_strategy, anocancel, dev, B_READ, xdfmin, aiop));
29117f0b8309SEdward Pilatowicz }
29127f0b8309SEdward Pilatowicz 
29137f0b8309SEdward Pilatowicz /*ARGSUSED*/
29147f0b8309SEdward Pilatowicz static int
xdf_awrite(dev_t dev,struct aio_req * aiop,cred_t * credp)29157f0b8309SEdward Pilatowicz xdf_awrite(dev_t dev, struct aio_req *aiop, cred_t *credp)
29167f0b8309SEdward Pilatowicz {
29177f0b8309SEdward Pilatowicz 	xdf_t *vdp;
29187f0b8309SEdward Pilatowicz 	minor_t minor;
29197f0b8309SEdward Pilatowicz 	struct uio *uiop = aiop->aio_uio;
29207f0b8309SEdward Pilatowicz 	diskaddr_t p_blkcnt;
29217f0b8309SEdward Pilatowicz 	int part;
29227f0b8309SEdward Pilatowicz 
29237f0b8309SEdward Pilatowicz 	minor = getminor(dev);
29247f0b8309SEdward Pilatowicz 	if ((vdp = ddi_get_soft_state(xdf_ssp, XDF_INST(minor))) == NULL)
29257f0b8309SEdward Pilatowicz 		return (ENXIO);
29267f0b8309SEdward Pilatowicz 
29277f0b8309SEdward Pilatowicz 	part = XDF_PART(minor);
29287f0b8309SEdward Pilatowicz 	if (!xdf_isopen(vdp, part))
29297f0b8309SEdward Pilatowicz 		return (ENXIO);
29307f0b8309SEdward Pilatowicz 
29317f0b8309SEdward Pilatowicz 	if (cmlb_partinfo(vdp->xdf_vd_lbl, part, &p_blkcnt,
29327f0b8309SEdward Pilatowicz 	    NULL, NULL, NULL, NULL))
29337f0b8309SEdward Pilatowicz 		return (ENXIO);
29347f0b8309SEdward Pilatowicz 
293565908c77Syu, larry liu - Sun Microsystems - Beijing China 	if (uiop->uio_loffset >= XB_DTOB(p_blkcnt, vdp))
29367f0b8309SEdward Pilatowicz 		return (ENOSPC);
29377f0b8309SEdward Pilatowicz 
29387f0b8309SEdward Pilatowicz 	if (U_INVAL(uiop))
29397f0b8309SEdward Pilatowicz 		return (EINVAL);
29407f0b8309SEdward Pilatowicz 
29417f0b8309SEdward Pilatowicz 	return (aphysio(xdf_strategy, anocancel, dev, B_WRITE, xdfmin, aiop));
29427f0b8309SEdward Pilatowicz }
29437f0b8309SEdward Pilatowicz 
29447f0b8309SEdward Pilatowicz static int
xdf_dump(dev_t dev,caddr_t addr,daddr_t blkno,int nblk)29457f0b8309SEdward Pilatowicz xdf_dump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk)
29467f0b8309SEdward Pilatowicz {
29477f0b8309SEdward Pilatowicz 	struct buf dumpbuf, *dbp = &dumpbuf;
29487f0b8309SEdward Pilatowicz 	xdf_t	*vdp;
29497f0b8309SEdward Pilatowicz 	minor_t minor;
29507f0b8309SEdward Pilatowicz 	int err = 0;
29517f0b8309SEdward Pilatowicz 	int part;
29527f0b8309SEdward Pilatowicz 	diskaddr_t p_blkcnt, p_blkst;
29537f0b8309SEdward Pilatowicz 
29547f0b8309SEdward Pilatowicz 	minor = getminor(dev);
29557f0b8309SEdward Pilatowicz 	if ((vdp = ddi_get_soft_state(xdf_ssp, XDF_INST(minor))) == NULL)
29567f0b8309SEdward Pilatowicz 		return (ENXIO);
29577f0b8309SEdward Pilatowicz 
29587f0b8309SEdward Pilatowicz 	DPRINTF(IO_DBG, ("xdf@%s: dump addr (0x%p) blk (%ld) nblks (%d)\n",
29597f0b8309SEdward Pilatowicz 	    vdp->xdf_addr, (void *)addr, blkno, nblk));
29607f0b8309SEdward Pilatowicz 
29617f0b8309SEdward Pilatowicz 	/* We don't allow IO from the oe_change callback thread */
29627f0b8309SEdward Pilatowicz 	ASSERT(curthread != vdp->xdf_oe_change_thread);
29637f0b8309SEdward Pilatowicz 
29647f0b8309SEdward Pilatowicz 	part = XDF_PART(minor);
29657f0b8309SEdward Pilatowicz 	if (!xdf_isopen(vdp, part))
29667f0b8309SEdward Pilatowicz 		return (ENXIO);
29677f0b8309SEdward Pilatowicz 
29687f0b8309SEdward Pilatowicz 	if (cmlb_partinfo(vdp->xdf_vd_lbl, part, &p_blkcnt, &p_blkst,
29697f0b8309SEdward Pilatowicz 	    NULL, NULL, NULL))
29707f0b8309SEdward Pilatowicz 		return (ENXIO);
29717f0b8309SEdward Pilatowicz 
297265908c77Syu, larry liu - Sun Microsystems - Beijing China 	if ((blkno + nblk) >
297365908c77Syu, larry liu - Sun Microsystems - Beijing China 	    (p_blkcnt * (vdp->xdf_xdev_secsize / XB_BSIZE))) {
29747f0b8309SEdward Pilatowicz 		cmn_err(CE_WARN, "xdf@%s: block %ld exceeds VBD size %"PRIu64,
297565908c77Syu, larry liu - Sun Microsystems - Beijing China 		    vdp->xdf_addr, (daddr_t)((blkno + nblk) /
297665908c77Syu, larry liu - Sun Microsystems - Beijing China 		    (vdp->xdf_xdev_secsize / XB_BSIZE)), (uint64_t)p_blkcnt);
29777f0b8309SEdward Pilatowicz 		return (EINVAL);
29787f0b8309SEdward Pilatowicz 	}
29797f0b8309SEdward Pilatowicz 
29807f0b8309SEdward Pilatowicz 	bioinit(dbp);
29817f0b8309SEdward Pilatowicz 	dbp->b_flags = B_BUSY;
29827f0b8309SEdward Pilatowicz 	dbp->b_un.b_addr = addr;
29837f0b8309SEdward Pilatowicz 	dbp->b_bcount = nblk << DEV_BSHIFT;
29847f0b8309SEdward Pilatowicz 	dbp->b_blkno = blkno;
29857f0b8309SEdward Pilatowicz 	dbp->b_edev = dev;
29867f0b8309SEdward Pilatowicz 	dbp->b_private = (void *)(uintptr_t)p_blkst;
29877f0b8309SEdward Pilatowicz 
29887f0b8309SEdward Pilatowicz 	mutex_enter(&vdp->xdf_dev_lk);
29897f0b8309SEdward Pilatowicz 	xdf_bp_push(vdp, dbp);
29907f0b8309SEdward Pilatowicz 	mutex_exit(&vdp->xdf_dev_lk);
29917f0b8309SEdward Pilatowicz 	xdf_io_start(vdp);
29927f0b8309SEdward Pilatowicz 	err = xdf_ring_drain(vdp);
29937f0b8309SEdward Pilatowicz 	biofini(dbp);
29947f0b8309SEdward Pilatowicz 	return (err);
29957f0b8309SEdward Pilatowicz }
29967f0b8309SEdward Pilatowicz 
29977f0b8309SEdward Pilatowicz /*ARGSUSED*/
29987f0b8309SEdward Pilatowicz static int
xdf_close(dev_t dev,int flag,int otyp,struct cred * credp)29997f0b8309SEdward Pilatowicz xdf_close(dev_t dev, int flag, int otyp, struct cred *credp)
30007f0b8309SEdward Pilatowicz {
30017f0b8309SEdward Pilatowicz 	minor_t	minor;
30027f0b8309SEdward Pilatowicz 	xdf_t	*vdp;
30037f0b8309SEdward Pilatowicz 	int part;
30047f0b8309SEdward Pilatowicz 	ulong_t parbit;
30057f0b8309SEdward Pilatowicz 
30067f0b8309SEdward Pilatowicz 	minor = getminor(dev);
30077f0b8309SEdward Pilatowicz 	if ((vdp = ddi_get_soft_state(xdf_ssp, XDF_INST(minor))) == NULL)
30087f0b8309SEdward Pilatowicz 		return (ENXIO);
30097f0b8309SEdward Pilatowicz 
30107f0b8309SEdward Pilatowicz 	mutex_enter(&vdp->xdf_dev_lk);
30117f0b8309SEdward Pilatowicz 	part = XDF_PART(minor);
30127f0b8309SEdward Pilatowicz 	if (!xdf_isopen(vdp, part)) {
30137f0b8309SEdward Pilatowicz 		mutex_exit(&vdp->xdf_dev_lk);
30147f0b8309SEdward Pilatowicz 		return (ENXIO);
30157f0b8309SEdward Pilatowicz 	}
30167f0b8309SEdward Pilatowicz 	parbit = 1 << part;
30177f0b8309SEdward Pilatowicz 
30187f0b8309SEdward Pilatowicz 	ASSERT((vdp->xdf_vd_open[otyp] & parbit) != 0);
30197f0b8309SEdward Pilatowicz 	if (otyp == OTYP_LYR) {
30207f0b8309SEdward Pilatowicz 		ASSERT(vdp->xdf_vd_lyropen[part] > 0);
30217f0b8309SEdward Pilatowicz 		if (--vdp->xdf_vd_lyropen[part] == 0)
30227f0b8309SEdward Pilatowicz 			vdp->xdf_vd_open[otyp] &= ~parbit;
30237f0b8309SEdward Pilatowicz 	} else {
30247f0b8309SEdward Pilatowicz 		vdp->xdf_vd_open[otyp] &= ~parbit;
30257f0b8309SEdward Pilatowicz 	}
30267f0b8309SEdward Pilatowicz 	vdp->xdf_vd_exclopen &= ~parbit;
30277f0b8309SEdward Pilatowicz 
30287f0b8309SEdward Pilatowicz 	mutex_exit(&vdp->xdf_dev_lk);
30297f0b8309SEdward Pilatowicz 	return (0);
30307f0b8309SEdward Pilatowicz }
30317f0b8309SEdward Pilatowicz 
30327f0b8309SEdward Pilatowicz static int
xdf_open(dev_t * devp,int flag,int otyp,cred_t * credp)30337f0b8309SEdward Pilatowicz xdf_open(dev_t *devp, int flag, int otyp, cred_t *credp)
30347f0b8309SEdward Pilatowicz {
30357f0b8309SEdward Pilatowicz 	minor_t	minor;
30367f0b8309SEdward Pilatowicz 	xdf_t	*vdp;
30377f0b8309SEdward Pilatowicz 	int part;
30387f0b8309SEdward Pilatowicz 	ulong_t parbit;
30397f0b8309SEdward Pilatowicz 	diskaddr_t p_blkct = 0;
30407f0b8309SEdward Pilatowicz 	boolean_t firstopen;
30417f0b8309SEdward Pilatowicz 	boolean_t nodelay;
30427f0b8309SEdward Pilatowicz 
30437f0b8309SEdward Pilatowicz 	minor = getminor(*devp);
30447f0b8309SEdward Pilatowicz 	if ((vdp = ddi_get_soft_state(xdf_ssp, XDF_INST(minor))) == NULL)
30457f0b8309SEdward Pilatowicz 		return (ENXIO);
30467f0b8309SEdward Pilatowicz 
30477f0b8309SEdward Pilatowicz 	nodelay = (flag & (FNDELAY | FNONBLOCK));
30487f0b8309SEdward Pilatowicz 
30497f0b8309SEdward Pilatowicz 	DPRINTF(DDI_DBG, ("xdf@%s: opening\n", vdp->xdf_addr));
30507f0b8309SEdward Pilatowicz 
30517f0b8309SEdward Pilatowicz 	/* do cv_wait until connected or failed */
30527f0b8309SEdward Pilatowicz 	mutex_enter(&vdp->xdf_cb_lk);
30537f0b8309SEdward Pilatowicz 	mutex_enter(&vdp->xdf_dev_lk);
30547f0b8309SEdward Pilatowicz 	if (!nodelay && (xdf_connect_locked(vdp, B_TRUE) != XD_READY)) {
30557f0b8309SEdward Pilatowicz 		mutex_exit(&vdp->xdf_dev_lk);
30567f0b8309SEdward Pilatowicz 		mutex_exit(&vdp->xdf_cb_lk);
30577f0b8309SEdward Pilatowicz 		return (ENXIO);
30587f0b8309SEdward Pilatowicz 	}
30597f0b8309SEdward Pilatowicz 	mutex_exit(&vdp->xdf_cb_lk);
30607f0b8309SEdward Pilatowicz 
30617f0b8309SEdward Pilatowicz 	if ((flag & FWRITE) && XD_IS_RO(vdp)) {
30627f0b8309SEdward Pilatowicz 		mutex_exit(&vdp->xdf_dev_lk);
30637f0b8309SEdward Pilatowicz 		return (EROFS);
30647f0b8309SEdward Pilatowicz 	}
30657f0b8309SEdward Pilatowicz 
30667f0b8309SEdward Pilatowicz 	part = XDF_PART(minor);
30677f0b8309SEdward Pilatowicz 	parbit = 1 << part;
30687f0b8309SEdward Pilatowicz 	if ((vdp->xdf_vd_exclopen & parbit) ||
30697f0b8309SEdward Pilatowicz 	    ((flag & FEXCL) && xdf_isopen(vdp, part))) {
30707f0b8309SEdward Pilatowicz 		mutex_exit(&vdp->xdf_dev_lk);
30717f0b8309SEdward Pilatowicz 		return (EBUSY);
30727f0b8309SEdward Pilatowicz 	}
30737f0b8309SEdward Pilatowicz 
30747f0b8309SEdward Pilatowicz 	/* are we the first one to open this node? */
30757f0b8309SEdward Pilatowicz 	firstopen = !xdf_isopen(vdp, -1);
30767f0b8309SEdward Pilatowicz 
30777f0b8309SEdward Pilatowicz 	if (otyp == OTYP_LYR)
30787f0b8309SEdward Pilatowicz 		vdp->xdf_vd_lyropen[part]++;
30797f0b8309SEdward Pilatowicz 
30807f0b8309SEdward Pilatowicz 	vdp->xdf_vd_open[otyp] |= parbit;
30817f0b8309SEdward Pilatowicz 
30827f0b8309SEdward Pilatowicz 	if (flag & FEXCL)
30837f0b8309SEdward Pilatowicz 		vdp->xdf_vd_exclopen |= parbit;
30847f0b8309SEdward Pilatowicz 
30857f0b8309SEdward Pilatowicz 	mutex_exit(&vdp->xdf_dev_lk);
30867f0b8309SEdward Pilatowicz 
30877f0b8309SEdward Pilatowicz 	/* force a re-validation */
30887f0b8309SEdward Pilatowicz 	if (firstopen)
30897f0b8309SEdward Pilatowicz 		cmlb_invalidate(vdp->xdf_vd_lbl, NULL);
30907f0b8309SEdward Pilatowicz 
30917f0b8309SEdward Pilatowicz 	/* If this is a non-blocking open then we're done */
30927f0b8309SEdward Pilatowicz 	if (nodelay)
30937f0b8309SEdward Pilatowicz 		return (0);
30947f0b8309SEdward Pilatowicz 
30957f0b8309SEdward Pilatowicz 	/*
30967f0b8309SEdward Pilatowicz 	 * This is a blocking open, so we require:
30977f0b8309SEdward Pilatowicz 	 * - that the disk have a valid label on it
30987f0b8309SEdward Pilatowicz 	 * - that the size of the partition that we're opening is non-zero
30997f0b8309SEdward Pilatowicz 	 */
31007f0b8309SEdward Pilatowicz 	if ((cmlb_partinfo(vdp->xdf_vd_lbl, part, &p_blkct,
31017f0b8309SEdward Pilatowicz 	    NULL, NULL, NULL, NULL) != 0) || (p_blkct == 0)) {
31027f0b8309SEdward Pilatowicz 		(void) xdf_close(*devp, flag, otyp, credp);
31037f0b8309SEdward Pilatowicz 		return (ENXIO);
31047f0b8309SEdward Pilatowicz 	}
31057f0b8309SEdward Pilatowicz 
31067f0b8309SEdward Pilatowicz 	return (0);
31077f0b8309SEdward Pilatowicz }
31087f0b8309SEdward Pilatowicz 
31097f0b8309SEdward Pilatowicz /*ARGSUSED*/
31107f0b8309SEdward Pilatowicz static void
xdf_watch_hp_status_cb(dev_info_t * dip,const char * path,void * arg)31117f0b8309SEdward Pilatowicz xdf_watch_hp_status_cb(dev_info_t *dip, const char *path, void *arg)
31127f0b8309SEdward Pilatowicz {
31137f0b8309SEdward Pilatowicz 	xdf_t *vdp = (xdf_t *)ddi_get_driver_private(dip);
31147f0b8309SEdward Pilatowicz 	cv_broadcast(&vdp->xdf_hp_status_cv);
31157f0b8309SEdward Pilatowicz }
31167f0b8309SEdward Pilatowicz 
31177f0b8309SEdward Pilatowicz static int
xdf_prop_op(dev_t dev,dev_info_t * dip,ddi_prop_op_t prop_op,int flags,char * name,caddr_t valuep,int * lengthp)31187f0b8309SEdward Pilatowicz xdf_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int flags,
31197f0b8309SEdward Pilatowicz     char *name, caddr_t valuep, int *lengthp)
31207f0b8309SEdward Pilatowicz {
31217f0b8309SEdward Pilatowicz 	xdf_t	*vdp = ddi_get_soft_state(xdf_ssp, ddi_get_instance(dip));
31227f0b8309SEdward Pilatowicz 
31237f0b8309SEdward Pilatowicz 	/*
31247f0b8309SEdward Pilatowicz 	 * Sanity check that if a dev_t or dip were specified that they
31257f0b8309SEdward Pilatowicz 	 * correspond to this device driver.  On debug kernels we'll
31267f0b8309SEdward Pilatowicz 	 * panic and on non-debug kernels we'll return failure.
31277f0b8309SEdward Pilatowicz 	 */
31287f0b8309SEdward Pilatowicz 	ASSERT(ddi_driver_major(dip) == xdf_major);
31297f0b8309SEdward Pilatowicz 	ASSERT((dev == DDI_DEV_T_ANY) || (getmajor(dev) == xdf_major));
31307f0b8309SEdward Pilatowicz 	if ((ddi_driver_major(dip) != xdf_major) ||
31317f0b8309SEdward Pilatowicz 	    ((dev != DDI_DEV_T_ANY) && (getmajor(dev) != xdf_major)))
31327f0b8309SEdward Pilatowicz 		return (DDI_PROP_NOT_FOUND);
31337f0b8309SEdward Pilatowicz 
31347f0b8309SEdward Pilatowicz 	if (vdp == NULL)
31357f0b8309SEdward Pilatowicz 		return (ddi_prop_op(dev, dip, prop_op, flags,
31367f0b8309SEdward Pilatowicz 		    name, valuep, lengthp));
31377f0b8309SEdward Pilatowicz 
31387f0b8309SEdward Pilatowicz 	return (cmlb_prop_op(vdp->xdf_vd_lbl,
31397f0b8309SEdward Pilatowicz 	    dev, dip, prop_op, flags, name, valuep, lengthp,
31407f0b8309SEdward Pilatowicz 	    XDF_PART(getminor(dev)), NULL));
31417f0b8309SEdward Pilatowicz }
31427f0b8309SEdward Pilatowicz 
31437f0b8309SEdward Pilatowicz /*ARGSUSED*/
31447f0b8309SEdward Pilatowicz static int
xdf_getinfo(dev_info_t * dip,ddi_info_cmd_t cmd,void * arg,void ** rp)31457f0b8309SEdward Pilatowicz xdf_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **rp)
31467f0b8309SEdward Pilatowicz {
31477f0b8309SEdward Pilatowicz 	int	instance = XDF_INST(getminor((dev_t)arg));
31487f0b8309SEdward Pilatowicz 	xdf_t	*vbdp;
31497f0b8309SEdward Pilatowicz 
31507f0b8309SEdward Pilatowicz 	switch (cmd) {
31517f0b8309SEdward Pilatowicz 	case DDI_INFO_DEVT2DEVINFO:
31527f0b8309SEdward Pilatowicz 		if ((vbdp = ddi_get_soft_state(xdf_ssp, instance)) == NULL) {
31537f0b8309SEdward Pilatowicz 			*rp = NULL;
31547f0b8309SEdward Pilatowicz 			return (DDI_FAILURE);
31557f0b8309SEdward Pilatowicz 		}
31567f0b8309SEdward Pilatowicz 		*rp = vbdp->xdf_dip;
31577f0b8309SEdward Pilatowicz 		return (DDI_SUCCESS);
31587f0b8309SEdward Pilatowicz 
31597f0b8309SEdward Pilatowicz 	case DDI_INFO_DEVT2INSTANCE:
31607f0b8309SEdward Pilatowicz 		*rp = (void *)(uintptr_t)instance;
31617f0b8309SEdward Pilatowicz 		return (DDI_SUCCESS);
31627f0b8309SEdward Pilatowicz 
31637f0b8309SEdward Pilatowicz 	default:
31647f0b8309SEdward Pilatowicz 		return (DDI_FAILURE);
31657f0b8309SEdward Pilatowicz 	}
31667f0b8309SEdward Pilatowicz }
31677f0b8309SEdward Pilatowicz 
31687f0b8309SEdward Pilatowicz /*ARGSUSED*/
31697f0b8309SEdward Pilatowicz static int
xdf_resume(dev_info_t * dip)31707f0b8309SEdward Pilatowicz xdf_resume(dev_info_t *dip)
31717f0b8309SEdward Pilatowicz {
31727f0b8309SEdward Pilatowicz 	xdf_t	*vdp;
31737f0b8309SEdward Pilatowicz 	char	*oename;
31747f0b8309SEdward Pilatowicz 
31757f0b8309SEdward Pilatowicz 	if ((vdp = ddi_get_soft_state(xdf_ssp, ddi_get_instance(dip))) == NULL)
31767f0b8309SEdward Pilatowicz 		goto err;
31777f0b8309SEdward Pilatowicz 
31787f0b8309SEdward Pilatowicz 	if (xdf_debug & SUSRES_DBG)
31797f0b8309SEdward Pilatowicz 		xen_printf("xdf@%s: xdf_resume\n", vdp->xdf_addr);
31807f0b8309SEdward Pilatowicz 
31817f0b8309SEdward Pilatowicz 	mutex_enter(&vdp->xdf_cb_lk);
31827f0b8309SEdward Pilatowicz 
31837f0b8309SEdward Pilatowicz 	if (xvdi_resume(dip) != DDI_SUCCESS) {
31847f0b8309SEdward Pilatowicz 		mutex_exit(&vdp->xdf_cb_lk);
31857f0b8309SEdward Pilatowicz 		goto err;
31867f0b8309SEdward Pilatowicz 	}
31877f0b8309SEdward Pilatowicz 
31887f0b8309SEdward Pilatowicz 	if (((oename = xvdi_get_oename(dip)) == NULL) ||
31897f0b8309SEdward Pilatowicz 	    (xvdi_add_xb_watch_handler(dip, oename, XBP_HP_STATUS,
31907f0b8309SEdward Pilatowicz 	    xdf_watch_hp_status_cb, NULL) != DDI_SUCCESS)) {
31917f0b8309SEdward Pilatowicz 		mutex_exit(&vdp->xdf_cb_lk);
31927f0b8309SEdward Pilatowicz 		goto err;
31937f0b8309SEdward Pilatowicz 	}
31947f0b8309SEdward Pilatowicz 
31957f0b8309SEdward Pilatowicz 	mutex_enter(&vdp->xdf_dev_lk);
31967f0b8309SEdward Pilatowicz 	ASSERT(vdp->xdf_state != XD_READY);
31977f0b8309SEdward Pilatowicz 	xdf_set_state(vdp, XD_UNKNOWN);
31987f0b8309SEdward Pilatowicz 	mutex_exit(&vdp->xdf_dev_lk);
31997f0b8309SEdward Pilatowicz 
32007f0b8309SEdward Pilatowicz 	if (xdf_setstate_init(vdp) != DDI_SUCCESS) {
32017f0b8309SEdward Pilatowicz 		mutex_exit(&vdp->xdf_cb_lk);
32027f0b8309SEdward Pilatowicz 		goto err;
32037f0b8309SEdward Pilatowicz 	}
32047f0b8309SEdward Pilatowicz 
32057f0b8309SEdward Pilatowicz 	mutex_exit(&vdp->xdf_cb_lk);
32067f0b8309SEdward Pilatowicz 
32077f0b8309SEdward Pilatowicz 	if (xdf_debug & SUSRES_DBG)
32087f0b8309SEdward Pilatowicz 		xen_printf("xdf@%s: xdf_resume: done\n", vdp->xdf_addr);
32097f0b8309SEdward Pilatowicz 	return (DDI_SUCCESS);
32107f0b8309SEdward Pilatowicz err:
32117f0b8309SEdward Pilatowicz 	if (xdf_debug & SUSRES_DBG)
32127f0b8309SEdward Pilatowicz 		xen_printf("xdf@%s: xdf_resume: fail\n", vdp->xdf_addr);
32137f0b8309SEdward Pilatowicz 	return (DDI_FAILURE);
32147f0b8309SEdward Pilatowicz }
32157f0b8309SEdward Pilatowicz 
32167f0b8309SEdward Pilatowicz static int
xdf_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)32177f0b8309SEdward Pilatowicz xdf_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
32187f0b8309SEdward Pilatowicz {
32197f0b8309SEdward Pilatowicz 	int			n, instance = ddi_get_instance(dip);
32207f0b8309SEdward Pilatowicz 	ddi_iblock_cookie_t	ibc, softibc;
32217f0b8309SEdward Pilatowicz 	boolean_t		dev_iscd = B_FALSE;
32227f0b8309SEdward Pilatowicz 	xdf_t			*vdp;
32237f0b8309SEdward Pilatowicz 	char			*oename, *xsname, *str;
32247f0b8309SEdward Pilatowicz 
32257f0b8309SEdward Pilatowicz 	if ((n = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_NOTPROM,
32267f0b8309SEdward Pilatowicz 	    "xdf_debug", 0)) != 0)
32277f0b8309SEdward Pilatowicz 		xdf_debug = n;
32287f0b8309SEdward Pilatowicz 
32297f0b8309SEdward Pilatowicz 	switch (cmd) {
32307f0b8309SEdward Pilatowicz 	case DDI_RESUME:
32317f0b8309SEdward Pilatowicz 		return (xdf_resume(dip));
32327f0b8309SEdward Pilatowicz 	case DDI_ATTACH:
32337f0b8309SEdward Pilatowicz 		break;
32347f0b8309SEdward Pilatowicz 	default:
32357f0b8309SEdward Pilatowicz 		return (DDI_FAILURE);
32367f0b8309SEdward Pilatowicz 	}
32377f0b8309SEdward Pilatowicz 	/* DDI_ATTACH */
32387f0b8309SEdward Pilatowicz 
32397f0b8309SEdward Pilatowicz 	if (((xsname = xvdi_get_xsname(dip)) == NULL) ||
32407f0b8309SEdward Pilatowicz 	    ((oename = xvdi_get_oename(dip)) == NULL))
32417f0b8309SEdward Pilatowicz 		return (DDI_FAILURE);
32427f0b8309SEdward Pilatowicz 
32437f0b8309SEdward Pilatowicz 	/*
32447f0b8309SEdward Pilatowicz 	 * Disable auto-detach.  This is necessary so that we don't get
32457f0b8309SEdward Pilatowicz 	 * detached while we're disconnected from the back end.
32467f0b8309SEdward Pilatowicz 	 */
32477f0b8309SEdward Pilatowicz 	if ((ddi_prop_update_int(DDI_DEV_T_NONE, dip,
32487f0b8309SEdward Pilatowicz 	    DDI_NO_AUTODETACH, 1) != DDI_PROP_SUCCESS))
32497f0b8309SEdward Pilatowicz 		return (DDI_FAILURE);
32507f0b8309SEdward Pilatowicz 
32517f0b8309SEdward Pilatowicz 	/* driver handles kernel-issued IOCTLs */
32527f0b8309SEdward Pilatowicz 	if (ddi_prop_create(DDI_DEV_T_NONE, dip,
32537f0b8309SEdward Pilatowicz 	    DDI_PROP_CANSLEEP, DDI_KERNEL_IOCTL, NULL, 0) != DDI_PROP_SUCCESS)
32547f0b8309SEdward Pilatowicz 		return (DDI_FAILURE);
32557f0b8309SEdward Pilatowicz 
32567f0b8309SEdward Pilatowicz 	if (ddi_get_iblock_cookie(dip, 0, &ibc) != DDI_SUCCESS)
32577f0b8309SEdward Pilatowicz 		return (DDI_FAILURE);
32587f0b8309SEdward Pilatowicz 
32597f0b8309SEdward Pilatowicz 	if (ddi_get_soft_iblock_cookie(dip,
32607f0b8309SEdward Pilatowicz 	    DDI_SOFTINT_LOW, &softibc) != DDI_SUCCESS)
32617f0b8309SEdward Pilatowicz 		return (DDI_FAILURE);
32627f0b8309SEdward Pilatowicz 
32637f0b8309SEdward Pilatowicz 	if (xenbus_read_str(xsname, XBP_DEV_TYPE, &str) != 0) {
32647f0b8309SEdward Pilatowicz 		cmn_err(CE_WARN, "xdf@%s: cannot read device-type",
32657f0b8309SEdward Pilatowicz 		    ddi_get_name_addr(dip));
32667f0b8309SEdward Pilatowicz 		return (DDI_FAILURE);
32677f0b8309SEdward Pilatowicz 	}
32687f0b8309SEdward Pilatowicz 	if (strcmp(str, XBV_DEV_TYPE_CD) == 0)
32697f0b8309SEdward Pilatowicz 		dev_iscd = B_TRUE;
32707f0b8309SEdward Pilatowicz 	strfree(str);
32717f0b8309SEdward Pilatowicz 
32727f0b8309SEdward Pilatowicz 	if (ddi_soft_state_zalloc(xdf_ssp, instance) != DDI_SUCCESS)
32737f0b8309SEdward Pilatowicz 		return (DDI_FAILURE);
32747f0b8309SEdward Pilatowicz 
32757f0b8309SEdward Pilatowicz 	DPRINTF(DDI_DBG, ("xdf@%s: attaching\n", ddi_get_name_addr(dip)));
32767f0b8309SEdward Pilatowicz 	vdp = ddi_get_soft_state(xdf_ssp, instance);
32777f0b8309SEdward Pilatowicz 	ddi_set_driver_private(dip, vdp);
32787f0b8309SEdward Pilatowicz 	vdp->xdf_dip = dip;
32797f0b8309SEdward Pilatowicz 	vdp->xdf_addr = ddi_get_name_addr(dip);
32807f0b8309SEdward Pilatowicz 	vdp->xdf_suspending = B_FALSE;
32817f0b8309SEdward Pilatowicz 	vdp->xdf_media_req_supported = B_FALSE;
32827f0b8309SEdward Pilatowicz 	vdp->xdf_peer = INVALID_DOMID;
32837f0b8309SEdward Pilatowicz 	vdp->xdf_evtchn = INVALID_EVTCHN;
32847f0b8309SEdward Pilatowicz 	list_create(&vdp->xdf_vreq_act, sizeof (v_req_t),
32857f0b8309SEdward Pilatowicz 	    offsetof(v_req_t, v_link));
32867f0b8309SEdward Pilatowicz 	cv_init(&vdp->xdf_dev_cv, NULL, CV_DEFAULT, NULL);
32877f0b8309SEdward Pilatowicz 	cv_init(&vdp->xdf_hp_status_cv, NULL, CV_DEFAULT, NULL);
32887f0b8309SEdward Pilatowicz 	cv_init(&vdp->xdf_mstate_cv, NULL, CV_DEFAULT, NULL);
32897f0b8309SEdward Pilatowicz 	mutex_init(&vdp->xdf_dev_lk, NULL, MUTEX_DRIVER, (void *)ibc);
32907f0b8309SEdward Pilatowicz 	mutex_init(&vdp->xdf_cb_lk, NULL, MUTEX_DRIVER, (void *)ibc);
32917f0b8309SEdward Pilatowicz 	mutex_init(&vdp->xdf_iostat_lk, NULL, MUTEX_DRIVER, (void *)ibc);
32927f0b8309SEdward Pilatowicz 	vdp->xdf_cmbl_reattach = B_TRUE;
32937f0b8309SEdward Pilatowicz 	if (dev_iscd) {
32947f0b8309SEdward Pilatowicz 		vdp->xdf_dinfo |= VDISK_CDROM;
32957f0b8309SEdward Pilatowicz 		vdp->xdf_mstate = DKIO_EJECTED;
32967f0b8309SEdward Pilatowicz 	} else {
32977f0b8309SEdward Pilatowicz 		vdp->xdf_mstate = DKIO_NONE;
32987f0b8309SEdward Pilatowicz 	}
32997f0b8309SEdward Pilatowicz 
33007f0b8309SEdward Pilatowicz 	if ((vdp->xdf_ready_tq = ddi_taskq_create(dip, "xdf_ready_tq",
33017f0b8309SEdward Pilatowicz 	    1, TASKQ_DEFAULTPRI, 0)) == NULL)
33027f0b8309SEdward Pilatowicz 		goto errout0;
33037f0b8309SEdward Pilatowicz 
33047f0b8309SEdward Pilatowicz 	if (xvdi_add_xb_watch_handler(dip, oename, XBP_HP_STATUS,
33057f0b8309SEdward Pilatowicz 	    xdf_watch_hp_status_cb, NULL) != DDI_SUCCESS)
33067f0b8309SEdward Pilatowicz 		goto errout0;
33077f0b8309SEdward Pilatowicz 
33087f0b8309SEdward Pilatowicz 	if (ddi_add_softintr(dip, DDI_SOFTINT_LOW, &vdp->xdf_softintr_id,
33097f0b8309SEdward Pilatowicz 	    &softibc, NULL, xdf_iorestart, (caddr_t)vdp) != DDI_SUCCESS) {
33107f0b8309SEdward Pilatowicz 		cmn_err(CE_WARN, "xdf@%s: failed to add softintr",
33117f0b8309SEdward Pilatowicz 		    ddi_get_name_addr(dip));
33127f0b8309SEdward Pilatowicz 		goto errout0;
33137f0b8309SEdward Pilatowicz 	}
33147f0b8309SEdward Pilatowicz 
33157f0b8309SEdward Pilatowicz 	/*
33167f0b8309SEdward Pilatowicz 	 * Initialize the physical geometry stucture.  Note that currently
33177f0b8309SEdward Pilatowicz 	 * we don't know the size of the backend device so the number
33187f0b8309SEdward Pilatowicz 	 * of blocks on the device will be initialized to zero.  Once
33197f0b8309SEdward Pilatowicz 	 * we connect to the backend device we'll update the physical
33207f0b8309SEdward Pilatowicz 	 * geometry to reflect the real size of the device.
33217f0b8309SEdward Pilatowicz 	 */
33227f0b8309SEdward Pilatowicz 	xdf_synthetic_pgeom(dip, &vdp->xdf_pgeom);
33237f0b8309SEdward Pilatowicz 	vdp->xdf_pgeom_fixed = B_FALSE;
33247f0b8309SEdward Pilatowicz 
33257f0b8309SEdward Pilatowicz 	/*
33267f0b8309SEdward Pilatowicz 	 * create default device minor nodes: non-removable disk
33277f0b8309SEdward Pilatowicz 	 * we will adjust minor nodes after we are connected w/ backend
33287f0b8309SEdward Pilatowicz 	 */
33297f0b8309SEdward Pilatowicz 	cmlb_alloc_handle(&vdp->xdf_vd_lbl);
33307f0b8309SEdward Pilatowicz 	if (xdf_cmlb_attach(vdp) != 0) {
33317f0b8309SEdward Pilatowicz 		cmn_err(CE_WARN,
33327f0b8309SEdward Pilatowicz 		    "xdf@%s: attach failed, cmlb attach failed",
33337f0b8309SEdward Pilatowicz 		    ddi_get_name_addr(dip));
33347f0b8309SEdward Pilatowicz 		goto errout0;
33357f0b8309SEdward Pilatowicz 	}
33367f0b8309SEdward Pilatowicz 
33377f0b8309SEdward Pilatowicz 	/*
33387f0b8309SEdward Pilatowicz 	 * We ship with cache-enabled disks
33397f0b8309SEdward Pilatowicz 	 */
33407f0b8309SEdward Pilatowicz 	vdp->xdf_wce = B_TRUE;
33417f0b8309SEdward Pilatowicz 
33427f0b8309SEdward Pilatowicz 	mutex_enter(&vdp->xdf_cb_lk);
33437f0b8309SEdward Pilatowicz 	/* Watch backend XenbusState change */
33447f0b8309SEdward Pilatowicz 	if (xvdi_add_event_handler(dip,
33457f0b8309SEdward Pilatowicz 	    XS_OE_STATE, xdf_oe_change, NULL) != DDI_SUCCESS) {
33467f0b8309SEdward Pilatowicz 		mutex_exit(&vdp->xdf_cb_lk);
33477f0b8309SEdward Pilatowicz 		goto errout0;
33487f0b8309SEdward Pilatowicz 	}
33497f0b8309SEdward Pilatowicz 
33507f0b8309SEdward Pilatowicz 	if (xdf_setstate_init(vdp) != DDI_SUCCESS) {
33517f0b8309SEdward Pilatowicz 		cmn_err(CE_WARN, "xdf@%s: start connection failed",
33527f0b8309SEdward Pilatowicz 		    ddi_get_name_addr(dip));
33537f0b8309SEdward Pilatowicz 		mutex_exit(&vdp->xdf_cb_lk);
33547f0b8309SEdward Pilatowicz 		goto errout1;
33557f0b8309SEdward Pilatowicz 	}
33567f0b8309SEdward Pilatowicz 	mutex_exit(&vdp->xdf_cb_lk);
33577f0b8309SEdward Pilatowicz 
33587f0b8309SEdward Pilatowicz #if defined(XPV_HVM_DRIVER)
33597f0b8309SEdward Pilatowicz 
33607f0b8309SEdward Pilatowicz 	xdf_hvm_add(dip);
33617f0b8309SEdward Pilatowicz 
33627f0b8309SEdward Pilatowicz 	/* Report our version to dom0.  */
3363349b53ddSStuart Maybee 	if (xenbus_printf(XBT_NULL, "guest/xdf", "version", "%d",
33647f0b8309SEdward Pilatowicz 	    HVMPV_XDF_VERS))
33657f0b8309SEdward Pilatowicz 		cmn_err(CE_WARN, "xdf: couldn't write version\n");
33667f0b8309SEdward Pilatowicz 
33677f0b8309SEdward Pilatowicz #else /* !XPV_HVM_DRIVER */
33687f0b8309SEdward Pilatowicz 
33697f0b8309SEdward Pilatowicz 	/* create kstat for iostat(1M) */
33707f0b8309SEdward Pilatowicz 	if (xdf_kstat_create(dip, "xdf", instance) != 0) {
33717f0b8309SEdward Pilatowicz 		cmn_err(CE_WARN, "xdf@%s: failed to create kstat",
33727f0b8309SEdward Pilatowicz 		    ddi_get_name_addr(dip));
33737f0b8309SEdward Pilatowicz 		goto errout1;
33747f0b8309SEdward Pilatowicz 	}
33757f0b8309SEdward Pilatowicz 
33767f0b8309SEdward Pilatowicz #endif /* !XPV_HVM_DRIVER */
33777f0b8309SEdward Pilatowicz 
33787f0b8309SEdward Pilatowicz 	ddi_report_dev(dip);
33797f0b8309SEdward Pilatowicz 	DPRINTF(DDI_DBG, ("xdf@%s: attached\n", vdp->xdf_addr));
33807f0b8309SEdward Pilatowicz 	return (DDI_SUCCESS);
33817f0b8309SEdward Pilatowicz 
33827f0b8309SEdward Pilatowicz errout1:
33837f0b8309SEdward Pilatowicz 	(void) xvdi_switch_state(vdp->xdf_dip, XBT_NULL, XenbusStateClosed);
33847f0b8309SEdward Pilatowicz 	xvdi_remove_event_handler(dip, XS_OE_STATE);
33857f0b8309SEdward Pilatowicz errout0:
33867f0b8309SEdward Pilatowicz 	if (vdp->xdf_vd_lbl != NULL) {
33877f0b8309SEdward Pilatowicz 		cmlb_detach(vdp->xdf_vd_lbl, NULL);
33887f0b8309SEdward Pilatowicz 		cmlb_free_handle(&vdp->xdf_vd_lbl);
33897f0b8309SEdward Pilatowicz 		vdp->xdf_vd_lbl = NULL;
33907f0b8309SEdward Pilatowicz 	}
33917f0b8309SEdward Pilatowicz 	if (vdp->xdf_softintr_id != NULL)
33927f0b8309SEdward Pilatowicz 		ddi_remove_softintr(vdp->xdf_softintr_id);
33937f0b8309SEdward Pilatowicz 	xvdi_remove_xb_watch_handlers(dip);
33947f0b8309SEdward Pilatowicz 	if (vdp->xdf_ready_tq != NULL)
33957f0b8309SEdward Pilatowicz 		ddi_taskq_destroy(vdp->xdf_ready_tq);
33967f0b8309SEdward Pilatowicz 	mutex_destroy(&vdp->xdf_cb_lk);
33977f0b8309SEdward Pilatowicz 	mutex_destroy(&vdp->xdf_dev_lk);
33987f0b8309SEdward Pilatowicz 	cv_destroy(&vdp->xdf_dev_cv);
33997f0b8309SEdward Pilatowicz 	cv_destroy(&vdp->xdf_hp_status_cv);
34007f0b8309SEdward Pilatowicz 	ddi_soft_state_free(xdf_ssp, instance);
34017f0b8309SEdward Pilatowicz 	ddi_set_driver_private(dip, NULL);
34027f0b8309SEdward Pilatowicz 	ddi_prop_remove_all(dip);
34037f0b8309SEdward Pilatowicz 	cmn_err(CE_WARN, "xdf@%s: attach failed", ddi_get_name_addr(dip));
34047f0b8309SEdward Pilatowicz 	return (DDI_FAILURE);
34057f0b8309SEdward Pilatowicz }
34067f0b8309SEdward Pilatowicz 
34077f0b8309SEdward Pilatowicz static int
xdf_suspend(dev_info_t * dip)34087f0b8309SEdward Pilatowicz xdf_suspend(dev_info_t *dip)
34097f0b8309SEdward Pilatowicz {
34107f0b8309SEdward Pilatowicz 	int		instance = ddi_get_instance(dip);
34117f0b8309SEdward Pilatowicz 	xdf_t		*vdp;
34127f0b8309SEdward Pilatowicz 
34137f0b8309SEdward Pilatowicz 	if ((vdp = ddi_get_soft_state(xdf_ssp, instance)) == NULL)
34147f0b8309SEdward Pilatowicz 		return (DDI_FAILURE);
34157f0b8309SEdward Pilatowicz 
34167f0b8309SEdward Pilatowicz 	if (xdf_debug & SUSRES_DBG)
34177f0b8309SEdward Pilatowicz 		xen_printf("xdf@%s: xdf_suspend\n", vdp->xdf_addr);
34187f0b8309SEdward Pilatowicz 
34197f0b8309SEdward Pilatowicz 	xvdi_suspend(dip);
34207f0b8309SEdward Pilatowicz 
34217f0b8309SEdward Pilatowicz 	mutex_enter(&vdp->xdf_cb_lk);
34227f0b8309SEdward Pilatowicz 	mutex_enter(&vdp->xdf_dev_lk);
34237f0b8309SEdward Pilatowicz 
34247f0b8309SEdward Pilatowicz 	vdp->xdf_suspending = B_TRUE;
34257f0b8309SEdward Pilatowicz 	xdf_ring_destroy(vdp);
34267f0b8309SEdward Pilatowicz 	xdf_set_state(vdp, XD_SUSPEND);
34277f0b8309SEdward Pilatowicz 	vdp->xdf_suspending = B_FALSE;
34287f0b8309SEdward Pilatowicz 
34297f0b8309SEdward Pilatowicz 	mutex_exit(&vdp->xdf_dev_lk);
34307f0b8309SEdward Pilatowicz 	mutex_exit(&vdp->xdf_cb_lk);
34317f0b8309SEdward Pilatowicz 
34327f0b8309SEdward Pilatowicz 	if (xdf_debug & SUSRES_DBG)
34337f0b8309SEdward Pilatowicz 		xen_printf("xdf@%s: xdf_suspend: done\n", vdp->xdf_addr);
34347f0b8309SEdward Pilatowicz 
34357f0b8309SEdward Pilatowicz 	return (DDI_SUCCESS);
34367f0b8309SEdward Pilatowicz }
34377f0b8309SEdward Pilatowicz 
34387f0b8309SEdward Pilatowicz static int
xdf_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)34397f0b8309SEdward Pilatowicz xdf_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
34407f0b8309SEdward Pilatowicz {
34417f0b8309SEdward Pilatowicz 	xdf_t *vdp;
34427f0b8309SEdward Pilatowicz 	int instance;
34437f0b8309SEdward Pilatowicz 
34447f0b8309SEdward Pilatowicz 	switch (cmd) {
34457f0b8309SEdward Pilatowicz 
34467f0b8309SEdward Pilatowicz 	case DDI_PM_SUSPEND:
34477f0b8309SEdward Pilatowicz 		break;
34487f0b8309SEdward Pilatowicz 
34497f0b8309SEdward Pilatowicz 	case DDI_SUSPEND:
34507f0b8309SEdward Pilatowicz 		return (xdf_suspend(dip));
34517f0b8309SEdward Pilatowicz 
34527f0b8309SEdward Pilatowicz 	case DDI_DETACH:
34537f0b8309SEdward Pilatowicz 		break;
34547f0b8309SEdward Pilatowicz 
34557f0b8309SEdward Pilatowicz 	default:
34567f0b8309SEdward Pilatowicz 		return (DDI_FAILURE);
34577f0b8309SEdward Pilatowicz 	}
34587f0b8309SEdward Pilatowicz 
34597f0b8309SEdward Pilatowicz 	instance = ddi_get_instance(dip);
34607f0b8309SEdward Pilatowicz 	DPRINTF(DDI_DBG, ("xdf@%s: detaching\n", ddi_get_name_addr(dip)));
34617f0b8309SEdward Pilatowicz 	vdp = ddi_get_soft_state(xdf_ssp, instance);
34627f0b8309SEdward Pilatowicz 
34637f0b8309SEdward Pilatowicz 	if (vdp == NULL)
34647f0b8309SEdward Pilatowicz 		return (DDI_FAILURE);
34657f0b8309SEdward Pilatowicz 
34667f0b8309SEdward Pilatowicz 	mutex_enter(&vdp->xdf_cb_lk);
34677f0b8309SEdward Pilatowicz 	xdf_disconnect(vdp, XD_CLOSED, B_FALSE);
34687f0b8309SEdward Pilatowicz 	if (vdp->xdf_state != XD_CLOSED) {
34697f0b8309SEdward Pilatowicz 		mutex_exit(&vdp->xdf_cb_lk);
34707f0b8309SEdward Pilatowicz 		return (DDI_FAILURE);
34717f0b8309SEdward Pilatowicz 	}
34727f0b8309SEdward Pilatowicz 	mutex_exit(&vdp->xdf_cb_lk);
34737f0b8309SEdward Pilatowicz 
34747f0b8309SEdward Pilatowicz 	ASSERT(!ISDMACBON(vdp));
34757f0b8309SEdward Pilatowicz 
34767f0b8309SEdward Pilatowicz #if defined(XPV_HVM_DRIVER)
34777f0b8309SEdward Pilatowicz 	xdf_hvm_rm(dip);
34787f0b8309SEdward Pilatowicz #endif /* XPV_HVM_DRIVER */
34797f0b8309SEdward Pilatowicz 
34807f0b8309SEdward Pilatowicz 	if (vdp->xdf_timeout_id != 0)
34817f0b8309SEdward Pilatowicz 		(void) untimeout(vdp->xdf_timeout_id);
34827f0b8309SEdward Pilatowicz 
34837f0b8309SEdward Pilatowicz 	xvdi_remove_event_handler(dip, XS_OE_STATE);
34847f0b8309SEdward Pilatowicz 	ddi_taskq_destroy(vdp->xdf_ready_tq);
34857f0b8309SEdward Pilatowicz 
34867f0b8309SEdward Pilatowicz 	cmlb_detach(vdp->xdf_vd_lbl, NULL);
34877f0b8309SEdward Pilatowicz 	cmlb_free_handle(&vdp->xdf_vd_lbl);
34887f0b8309SEdward Pilatowicz 
34897f0b8309SEdward Pilatowicz 	/* we'll support backend running in domU later */
34907f0b8309SEdward Pilatowicz #ifdef	DOMU_BACKEND
34917f0b8309SEdward Pilatowicz 	(void) xvdi_post_event(dip, XEN_HP_REMOVE);
34927f0b8309SEdward Pilatowicz #endif
34937f0b8309SEdward Pilatowicz 
34947f0b8309SEdward Pilatowicz 	list_destroy(&vdp->xdf_vreq_act);
34957f0b8309SEdward Pilatowicz 	ddi_prop_remove_all(dip);
34967f0b8309SEdward Pilatowicz 	xdf_kstat_delete(dip);
34977f0b8309SEdward Pilatowicz 	ddi_remove_softintr(vdp->xdf_softintr_id);
34987f0b8309SEdward Pilatowicz 	xvdi_remove_xb_watch_handlers(dip);
34997f0b8309SEdward Pilatowicz 	ddi_set_driver_private(dip, NULL);
35007f0b8309SEdward Pilatowicz 	cv_destroy(&vdp->xdf_dev_cv);
35017f0b8309SEdward Pilatowicz 	mutex_destroy(&vdp->xdf_cb_lk);
35027f0b8309SEdward Pilatowicz 	mutex_destroy(&vdp->xdf_dev_lk);
35037f0b8309SEdward Pilatowicz 	if (vdp->xdf_cache_flush_block != NULL)
350465908c77Syu, larry liu - Sun Microsystems - Beijing China 		kmem_free(vdp->xdf_flush_mem, 2 * vdp->xdf_xdev_secsize);
35057f0b8309SEdward Pilatowicz 	ddi_soft_state_free(xdf_ssp, instance);
35067f0b8309SEdward Pilatowicz 	return (DDI_SUCCESS);
35077f0b8309SEdward Pilatowicz }
35087f0b8309SEdward Pilatowicz 
35097f0b8309SEdward Pilatowicz /*
35107f0b8309SEdward Pilatowicz  * Driver linkage structures.
35117f0b8309SEdward Pilatowicz  */
35127f0b8309SEdward Pilatowicz static struct cb_ops xdf_cbops = {
35137f0b8309SEdward Pilatowicz 	xdf_open,
35147f0b8309SEdward Pilatowicz 	xdf_close,
35157f0b8309SEdward Pilatowicz 	xdf_strategy,
35167f0b8309SEdward Pilatowicz 	nodev,
35177f0b8309SEdward Pilatowicz 	xdf_dump,
35187f0b8309SEdward Pilatowicz 	xdf_read,
35197f0b8309SEdward Pilatowicz 	xdf_write,
35207f0b8309SEdward Pilatowicz 	xdf_ioctl,
35217f0b8309SEdward Pilatowicz 	nodev,
35227f0b8309SEdward Pilatowicz 	nodev,
35237f0b8309SEdward Pilatowicz 	nodev,
35247f0b8309SEdward Pilatowicz 	nochpoll,
35257f0b8309SEdward Pilatowicz 	xdf_prop_op,
35267f0b8309SEdward Pilatowicz 	NULL,
35277f0b8309SEdward Pilatowicz 	D_MP | D_NEW | D_64BIT,
35287f0b8309SEdward Pilatowicz 	CB_REV,
35297f0b8309SEdward Pilatowicz 	xdf_aread,
35307f0b8309SEdward Pilatowicz 	xdf_awrite
35317f0b8309SEdward Pilatowicz };
35327f0b8309SEdward Pilatowicz 
35337f0b8309SEdward Pilatowicz struct dev_ops xdf_devops = {
35347f0b8309SEdward Pilatowicz 	DEVO_REV,		/* devo_rev */
35357f0b8309SEdward Pilatowicz 	0,			/* devo_refcnt */
35367f0b8309SEdward Pilatowicz 	xdf_getinfo,		/* devo_getinfo */
35377f0b8309SEdward Pilatowicz 	nulldev,		/* devo_identify */
35387f0b8309SEdward Pilatowicz 	nulldev,		/* devo_probe */
35397f0b8309SEdward Pilatowicz 	xdf_attach,		/* devo_attach */
35407f0b8309SEdward Pilatowicz 	xdf_detach,		/* devo_detach */
35417f0b8309SEdward Pilatowicz 	nodev,			/* devo_reset */
35427f0b8309SEdward Pilatowicz 	&xdf_cbops,		/* devo_cb_ops */
35437f0b8309SEdward Pilatowicz 	NULL,			/* devo_bus_ops */
35447f0b8309SEdward Pilatowicz 	NULL,			/* devo_power */
35457f0b8309SEdward Pilatowicz 	ddi_quiesce_not_supported, /* devo_quiesce */
35467f0b8309SEdward Pilatowicz };
35477f0b8309SEdward Pilatowicz 
35487f0b8309SEdward Pilatowicz /*
35497f0b8309SEdward Pilatowicz  * Module linkage structures.
35507f0b8309SEdward Pilatowicz  */
35517f0b8309SEdward Pilatowicz static struct modldrv modldrv = {
35527f0b8309SEdward Pilatowicz 	&mod_driverops,		/* Type of module.  This one is a driver */
35537f0b8309SEdward Pilatowicz 	"virtual block driver",	/* short description */
35547f0b8309SEdward Pilatowicz 	&xdf_devops		/* driver specific ops */
35557f0b8309SEdward Pilatowicz };
35567f0b8309SEdward Pilatowicz 
35577f0b8309SEdward Pilatowicz static struct modlinkage xdf_modlinkage = {
35587f0b8309SEdward Pilatowicz 	MODREV_1, (void *)&modldrv, NULL
35597f0b8309SEdward Pilatowicz };
35607f0b8309SEdward Pilatowicz 
35617f0b8309SEdward Pilatowicz /*
35627f0b8309SEdward Pilatowicz  * standard module entry points
35637f0b8309SEdward Pilatowicz  */
35647f0b8309SEdward Pilatowicz int
_init(void)35657f0b8309SEdward Pilatowicz _init(void)
35667f0b8309SEdward Pilatowicz {
35677f0b8309SEdward Pilatowicz 	int rc;
35687f0b8309SEdward Pilatowicz 
35697f0b8309SEdward Pilatowicz 	xdf_major = ddi_name_to_major("xdf");
35707f0b8309SEdward Pilatowicz 	if (xdf_major == (major_t)-1)
35717f0b8309SEdward Pilatowicz 		return (EINVAL);
35727f0b8309SEdward Pilatowicz 
35737f0b8309SEdward Pilatowicz 	if ((rc = ddi_soft_state_init(&xdf_ssp, sizeof (xdf_t), 0)) != 0)
35747f0b8309SEdward Pilatowicz 		return (rc);
35757f0b8309SEdward Pilatowicz 
35767f0b8309SEdward Pilatowicz 	xdf_vreq_cache = kmem_cache_create("xdf_vreq_cache",
35777f0b8309SEdward Pilatowicz 	    sizeof (v_req_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
35787f0b8309SEdward Pilatowicz 	xdf_gs_cache = kmem_cache_create("xdf_gs_cache",
35797f0b8309SEdward Pilatowicz 	    sizeof (ge_slot_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
35807f0b8309SEdward Pilatowicz 
35817f0b8309SEdward Pilatowicz #if defined(XPV_HVM_DRIVER)
35827f0b8309SEdward Pilatowicz 	xdf_hvm_init();
35837f0b8309SEdward Pilatowicz #endif /* XPV_HVM_DRIVER */
35847f0b8309SEdward Pilatowicz 
35857f0b8309SEdward Pilatowicz 	if ((rc = mod_install(&xdf_modlinkage)) != 0) {
35867f0b8309SEdward Pilatowicz #if defined(XPV_HVM_DRIVER)
35877f0b8309SEdward Pilatowicz 		xdf_hvm_fini();
35887f0b8309SEdward Pilatowicz #endif /* XPV_HVM_DRIVER */
35897f0b8309SEdward Pilatowicz 		kmem_cache_destroy(xdf_vreq_cache);
35907f0b8309SEdward Pilatowicz 		kmem_cache_destroy(xdf_gs_cache);
35917f0b8309SEdward Pilatowicz 		ddi_soft_state_fini(&xdf_ssp);
35927f0b8309SEdward Pilatowicz 		return (rc);
35937f0b8309SEdward Pilatowicz 	}
35947f0b8309SEdward Pilatowicz 
35957f0b8309SEdward Pilatowicz 	return (rc);
35967f0b8309SEdward Pilatowicz }
35977f0b8309SEdward Pilatowicz 
35987f0b8309SEdward Pilatowicz int
_fini(void)35997f0b8309SEdward Pilatowicz _fini(void)
36007f0b8309SEdward Pilatowicz {
36017f0b8309SEdward Pilatowicz 	int err;
36027f0b8309SEdward Pilatowicz 	if ((err = mod_remove(&xdf_modlinkage)) != 0)
36037f0b8309SEdward Pilatowicz 		return (err);
36047f0b8309SEdward Pilatowicz 
36057f0b8309SEdward Pilatowicz #if defined(XPV_HVM_DRIVER)
36067f0b8309SEdward Pilatowicz 	xdf_hvm_fini();
36077f0b8309SEdward Pilatowicz #endif /* XPV_HVM_DRIVER */
36087f0b8309SEdward Pilatowicz 
36097f0b8309SEdward Pilatowicz 	kmem_cache_destroy(xdf_vreq_cache);
36107f0b8309SEdward Pilatowicz 	kmem_cache_destroy(xdf_gs_cache);
36117f0b8309SEdward Pilatowicz 	ddi_soft_state_fini(&xdf_ssp);
36127f0b8309SEdward Pilatowicz 
36137f0b8309SEdward Pilatowicz 	return (0);
36147f0b8309SEdward Pilatowicz }
36157f0b8309SEdward Pilatowicz 
36167f0b8309SEdward Pilatowicz int
_info(struct modinfo * modinfop)36177f0b8309SEdward Pilatowicz _info(struct modinfo *modinfop)
36187f0b8309SEdward Pilatowicz {
36197f0b8309SEdward Pilatowicz 	return (mod_info(&xdf_modlinkage, modinfop));
36207f0b8309SEdward Pilatowicz }
3621