xref: /titanic_51/usr/src/uts/common/xen/os/xvdi.c (revision 1c2187e7a735b31a46941879f0bd124e0aa325a3)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * Xen virtual device driver interfaces
31  */
32 
33 /*
34  * todo:
35  * + name space clean up:
36  *	xvdi_* - public xen interfaces, for use by all leaf drivers
37  *	xd_* - public xen data structures
38  *	i_xvdi_* - implementation private functions
39  *	xendev_* - xendev driver interfaces, both internal and in cb_ops/bus_ops
40  * + add mdb dcmds to dump ring status
41  * + implement xvdi_xxx to wrap xenbus_xxx read/write function
42  * + convert (xendev_ring_t *) into xvdi_ring_handle_t
43  */
44 #include <sys/conf.h>
45 #include <sys/param.h>
46 #include <sys/hypervisor.h>
47 #include <sys/xen_mmu.h>
48 #include <sys/kmem.h>
49 #include <vm/seg_kmem.h>
50 #include <sys/debug.h>
51 #include <sys/modctl.h>
52 #include <sys/autoconf.h>
53 #include <sys/ddi_impldefs.h>
54 #include <sys/ddi_subrdefs.h>
55 #include <sys/ddi.h>
56 #include <sys/sunddi.h>
57 #include <sys/sunndi.h>
58 #include <sys/sunldi.h>
59 #include <sys/fs/dv_node.h>
60 #include <sys/evtchn_impl.h>
61 #include <sys/gnttab.h>
62 #include <sys/avintr.h>
63 #include <sys/psm.h>
64 #include <sys/spl.h>
65 #include <sys/promif.h>
66 #include <sys/list.h>
67 #include <sys/bootconf.h>
68 #include <sys/bootsvcs.h>
69 #include <sys/bootinfo.h>
70 #include <sys/note.h>
71 #include <sys/xen_mmu.h>
72 #include <xen/sys/xenbus_impl.h>
73 #include <xen/sys/xendev.h>
74 #include <vm/hat_i86.h>
75 #include <sys/scsi/generic/inquiry.h>
76 #include <util/sscanf.h>
77 #include <xen/public/io/xs_wire.h>
78 
79 
80 static void xvdi_ring_init_sring(xendev_ring_t *);
81 static void xvdi_ring_init_front_ring(xendev_ring_t *, size_t, size_t);
82 static void xvdi_ring_init_back_ring(xendev_ring_t *, size_t, size_t);
83 static void xvdi_reinit_ring(dev_info_t *, grant_ref_t *, xendev_ring_t *);
84 
85 static int i_xvdi_add_watches(dev_info_t *);
86 static void i_xvdi_rem_watches(dev_info_t *);
87 
88 static int i_xvdi_add_watch_oestate(dev_info_t *);
89 static void i_xvdi_rem_watch_oestate(dev_info_t *);
90 static void i_xvdi_oestate_cb(struct xenbus_device *, XenbusState);
91 static void i_xvdi_oestate_handler(void *);
92 
93 static int i_xvdi_add_watch_hpstate(dev_info_t *);
94 static void i_xvdi_rem_watch_hpstate(dev_info_t *);
95 static void i_xvdi_hpstate_cb(struct xenbus_watch *, const char **,
96     unsigned int);
97 static void i_xvdi_hpstate_handler(void *);
98 
99 static int i_xvdi_add_watch_bepath(dev_info_t *);
100 static void i_xvdi_rem_watch_bepath(dev_info_t *);
101 static void i_xvdi_bepath_cb(struct xenbus_watch *, const char **,
102     unsigned in);
103 
104 static void xendev_offline_device(void *);
105 
106 static void i_xvdi_probe_path_cb(struct xenbus_watch *, const char **,
107     unsigned int);
108 static void i_xvdi_probe_path_handler(void *);
109 
110 typedef struct xd_cfg {
111 	xendev_devclass_t devclass;
112 	char *xsdev;
113 	char *xs_path_fe;
114 	char *xs_path_be;
115 	char *node_fe;
116 	char *node_be;
117 	char *device_type;
118 	int xd_ipl;
119 	int flags;
120 } i_xd_cfg_t;
121 
122 #define	XD_DOM_ZERO	0x01	/* dom0 only. */
123 #define	XD_DOM_GUEST	0x02	/* Guest domains (i.e. non-dom0). */
124 #define	XD_DOM_IO	0x04	/* IO domains. */
125 
126 #define	XD_DOM_ALL	(XD_DOM_ZERO | XD_DOM_GUEST)
127 
128 static i_xd_cfg_t xdci[] = {
129 	{ XEN_CONSOLE, NULL, NULL, NULL, "xencons", NULL,
130 	    "console", IPL_CONS, XD_DOM_ALL, },
131 
132 	{ XEN_VNET, "vif", "device/vif", "backend/vif", "xnf", "xnb",
133 	    "network", IPL_VIF, XD_DOM_ALL, },
134 
135 	{ XEN_VBLK, "vbd", "device/vbd", "backend/vbd", "xdf", "xdb",
136 	    "block", IPL_VBD, XD_DOM_ALL, },
137 
138 	{ XEN_XENBUS, NULL, NULL, NULL, "xenbus", NULL,
139 	    NULL, 0, XD_DOM_ALL, },
140 
141 	{ XEN_DOMCAPS, NULL, NULL, NULL, "domcaps", NULL,
142 	    NULL, 0, XD_DOM_ALL, },
143 
144 	{ XEN_BALLOON, NULL, NULL, NULL, "balloon", NULL,
145 	    NULL, 0, XD_DOM_ALL, },
146 
147 	{ XEN_EVTCHN, NULL, NULL, NULL, "evtchn", NULL,
148 	    NULL, 0, XD_DOM_ZERO, },
149 
150 	{ XEN_PRIVCMD, NULL, NULL, NULL, "privcmd", NULL,
151 	    NULL, 0, XD_DOM_ZERO, },
152 };
153 #define	NXDC	(sizeof (xdci) / sizeof (xdci[0]))
154 
155 static void i_xvdi_enum_fe(dev_info_t *, i_xd_cfg_t *);
156 static void i_xvdi_enum_be(dev_info_t *, i_xd_cfg_t *);
157 static void i_xvdi_enum_worker(dev_info_t *, i_xd_cfg_t *, char *);
158 
159 /*
160  * Xen device channel device access and DMA attributes
161  */
162 static ddi_device_acc_attr_t xendev_dc_accattr = {
163 	DDI_DEVICE_ATTR_V0, DDI_NEVERSWAP_ACC, DDI_STRICTORDER_ACC
164 };
165 
166 static ddi_dma_attr_t xendev_dc_dmaattr = {
167 	DMA_ATTR_V0,		/* version of this structure */
168 	0,			/* lowest usable address */
169 	0xffffffffffffffffULL,	/* highest usable address */
170 	0x7fffffff,		/* maximum DMAable byte count */
171 	MMU_PAGESIZE,		/* alignment in bytes */
172 	0x7ff,			/* bitmap of burst sizes */
173 	1,			/* minimum transfer */
174 	0xffffffffU,		/* maximum transfer */
175 	0xffffffffffffffffULL,	/* maximum segment length */
176 	1,			/* maximum number of segments */
177 	1,			/* granularity */
178 	0,			/* flags (reserved) */
179 };
180 
181 static dev_info_t *xendev_dip = NULL;
182 
183 #define	XVDI_DBG_STATE	0x01
184 #define	XVDI_DBG_PROBE	0x02
185 
186 #ifdef DEBUG
187 int i_xvdi_debug = 0;
188 
189 #define	XVDI_DPRINTF(flag, format, ...)			\
190 {							\
191 	if (i_xvdi_debug & (flag))			\
192 		prom_printf((format), __VA_ARGS__);	\
193 }
194 #else
195 #define	XVDI_DPRINTF(flag, format, ...)
196 #endif /* DEBUG */
197 
198 static i_xd_cfg_t *
199 i_xvdi_devclass2cfg(xendev_devclass_t devclass)
200 {
201 	i_xd_cfg_t *xdcp;
202 	int i;
203 
204 	for (i = 0, xdcp = xdci; i < NXDC; i++, xdcp++)
205 		if (xdcp->devclass == devclass)
206 			return (xdcp);
207 
208 	return (NULL);
209 }
210 
211 int
212 xvdi_init_dev(dev_info_t *dip)
213 {
214 	xendev_devclass_t devcls;
215 	int vdevnum;
216 	domid_t domid;
217 	struct xendev_ppd *pdp;
218 	i_xd_cfg_t *xdcp;
219 	boolean_t backend;
220 	char xsnamebuf[TYPICALMAXPATHLEN];
221 	char *xsname;
222 
223 	devcls = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
224 	    DDI_PROP_DONTPASS, "devclass", XEN_INVAL);
225 	vdevnum = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
226 	    DDI_PROP_DONTPASS, "vdev", -1);
227 	domid = (domid_t)ddi_prop_get_int(DDI_DEV_T_ANY, dip,
228 	    DDI_PROP_DONTPASS, "domain", DOMID_SELF);
229 
230 	backend = (domid != DOMID_SELF);
231 	xdcp = i_xvdi_devclass2cfg(devcls);
232 	if (xdcp->device_type != NULL)
233 		(void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
234 		    "device_type", xdcp->device_type);
235 
236 	pdp = kmem_zalloc(sizeof (*pdp), KM_SLEEP);
237 	pdp->xd_domain = domid;
238 	pdp->xd_vdevnum = vdevnum;
239 	pdp->xd_devclass = devcls;
240 	pdp->xd_evtchn = INVALID_EVTCHN;
241 	mutex_init(&pdp->xd_lk, NULL, MUTEX_DRIVER, NULL);
242 	ddi_set_parent_data(dip, pdp);
243 
244 	/*
245 	 * devices that do not need to interact with xenstore
246 	 */
247 	if (vdevnum == -1) {
248 		(void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
249 		    "unit-address", "0");
250 		if (devcls == XEN_CONSOLE)
251 			(void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
252 			    "pm-hardware-state", "needs-suspend-resume");
253 		return (DDI_SUCCESS);
254 	}
255 
256 	/*
257 	 * PV devices that need to probe xenstore
258 	 */
259 
260 	(void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
261 	    "pm-hardware-state", "needs-suspend-resume");
262 
263 	xsname = xsnamebuf;
264 	if (!backend)
265 		(void) snprintf(xsnamebuf, sizeof (xsnamebuf),
266 		    "%s/%d", xdcp->xs_path_fe, vdevnum);
267 	else
268 		(void) snprintf(xsnamebuf, sizeof (xsnamebuf),
269 		    "%s/%d/%d", xdcp->xs_path_be, domid, vdevnum);
270 	if ((xenbus_read_driver_state(xsname) >= XenbusStateClosing)) {
271 		/* Don't try to init a dev that may be closing */
272 		mutex_destroy(&pdp->xd_lk);
273 		kmem_free(pdp, sizeof (*pdp));
274 		ddi_set_parent_data(dip, NULL);
275 		return (DDI_FAILURE);
276 	}
277 
278 	pdp->xd_xsdev.nodename = i_ddi_strdup(xsname, KM_SLEEP);
279 	pdp->xd_xsdev.devicetype = xdcp->xsdev;
280 	pdp->xd_xsdev.frontend = (backend ? 0 : 1);
281 	pdp->xd_xsdev.data = dip;
282 	pdp->xd_xsdev.otherend_id = (backend ? domid : -1);
283 	if (i_xvdi_add_watches(dip) != DDI_SUCCESS) {
284 		cmn_err(CE_WARN, "xvdi_init_dev: "
285 		    "cannot add watches for %s", xsname);
286 		xvdi_uninit_dev(dip);
287 		return (DDI_FAILURE);
288 	}
289 
290 	/*
291 	 * frontend device will use "unit-addr" as
292 	 * the bus address, which will be set here
293 	 */
294 	if (!backend) {
295 		void *prop_str;
296 		unsigned int prop_len, addr;
297 
298 		switch (devcls) {
299 		case XEN_VNET:
300 			if (xenbus_read(XBT_NULL, xsname, "mac", &prop_str,
301 			    &prop_len) == 0) {
302 				(void) ndi_prop_update_string(DDI_DEV_T_NONE,
303 				    dip, "mac", prop_str);
304 				kmem_free(prop_str, prop_len);
305 			}
306 			prop_str = NULL;
307 			if (xenbus_scanf(XBT_NULL, xsname, "handle", "%u",
308 			    &addr) == 0) {
309 				char unitaddr[9]; /* hold 32-bit hex */
310 
311 				(void) snprintf(unitaddr, 9, "%x", addr);
312 				(void) ndi_prop_update_string(DDI_DEV_T_NONE,
313 				    dip, "unit-address", unitaddr);
314 			}
315 			break;
316 		case XEN_VBLK:
317 			if (xenbus_read(XBT_NULL, pdp->xd_xsdev.otherend,
318 			    "dev", &prop_str, &prop_len) == 0) {
319 				(void) ndi_prop_update_string(DDI_DEV_T_NONE,
320 				    dip, "unit-address", prop_str);
321 				kmem_free(prop_str, prop_len);
322 			}
323 			break;
324 		default:
325 			break;
326 		}
327 	}
328 
329 	return (DDI_SUCCESS);
330 }
331 
332 void
333 xvdi_uninit_dev(dev_info_t *dip)
334 {
335 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
336 
337 	if (pdp != NULL) {
338 		/* Remove any registered callbacks. */
339 		xvdi_remove_event_handler(dip, NULL);
340 
341 		/* Remove any registered watches. */
342 		i_xvdi_rem_watches(dip);
343 
344 		/* tell other end to close */
345 		(void) xvdi_switch_state(dip, XBT_NULL, XenbusStateClosed);
346 
347 		if (pdp->xd_xsdev.nodename != NULL)
348 			kmem_free((char *)(pdp->xd_xsdev.nodename),
349 			    strlen(pdp->xd_xsdev.nodename) + 1);
350 
351 		ddi_set_parent_data(dip, NULL);
352 
353 		mutex_destroy(&pdp->xd_lk);
354 		kmem_free(pdp, sizeof (*pdp));
355 	}
356 }
357 
358 /*
359  * Bind the event channel for this device instance.
360  * Currently we only support one evtchn per device instance.
361  */
362 int
363 xvdi_bind_evtchn(dev_info_t *dip, evtchn_port_t evtchn)
364 {
365 	struct xendev_ppd *pdp;
366 	domid_t oeid;
367 	int r;
368 
369 	pdp = ddi_get_parent_data(dip);
370 	ASSERT(pdp != NULL);
371 	ASSERT(pdp->xd_evtchn == INVALID_EVTCHN);
372 
373 	mutex_enter(&pdp->xd_lk);
374 	if (pdp->xd_devclass == XEN_CONSOLE) {
375 		if (!DOMAIN_IS_INITDOMAIN(xen_info)) {
376 			pdp->xd_evtchn = xen_info->console.domU.evtchn;
377 		} else {
378 			pdp->xd_evtchn = INVALID_EVTCHN;
379 			mutex_exit(&pdp->xd_lk);
380 			return (DDI_SUCCESS);
381 		}
382 	} else {
383 		oeid = pdp->xd_xsdev.otherend_id;
384 		if (oeid == (domid_t)-1) {
385 			mutex_exit(&pdp->xd_lk);
386 			return (DDI_FAILURE);
387 		}
388 
389 		if ((r = xen_bind_interdomain(oeid, evtchn, &pdp->xd_evtchn))) {
390 			xvdi_dev_error(dip, r, "bind event channel");
391 			mutex_exit(&pdp->xd_lk);
392 			return (DDI_FAILURE);
393 		}
394 	}
395 	pdp->xd_ispec.intrspec_vec = ec_bind_evtchn_to_irq(pdp->xd_evtchn);
396 	mutex_exit(&pdp->xd_lk);
397 
398 	return (DDI_SUCCESS);
399 }
400 
401 /*
402  * Allocate an event channel for this device instance.
403  * Currently we only support one evtchn per device instance.
404  */
405 int
406 xvdi_alloc_evtchn(dev_info_t *dip)
407 {
408 	struct xendev_ppd *pdp;
409 	domid_t oeid;
410 	int rv;
411 
412 	pdp = ddi_get_parent_data(dip);
413 	ASSERT(pdp != NULL);
414 	ASSERT(pdp->xd_evtchn == INVALID_EVTCHN);
415 
416 	mutex_enter(&pdp->xd_lk);
417 	if (pdp->xd_devclass == XEN_CONSOLE) {
418 		if (!DOMAIN_IS_INITDOMAIN(xen_info)) {
419 			pdp->xd_evtchn = xen_info->console.domU.evtchn;
420 		} else {
421 			pdp->xd_evtchn = INVALID_EVTCHN;
422 			mutex_exit(&pdp->xd_lk);
423 			return (DDI_SUCCESS);
424 		}
425 	} else {
426 		oeid = pdp->xd_xsdev.otherend_id;
427 		if (oeid == (domid_t)-1) {
428 			mutex_exit(&pdp->xd_lk);
429 			return (DDI_FAILURE);
430 		}
431 
432 		if ((rv = xen_alloc_unbound_evtchn(oeid, &pdp->xd_evtchn))) {
433 			xvdi_dev_error(dip, rv, "bind event channel");
434 			mutex_exit(&pdp->xd_lk);
435 			return (DDI_FAILURE);
436 		}
437 	}
438 	pdp->xd_ispec.intrspec_vec = ec_bind_evtchn_to_irq(pdp->xd_evtchn);
439 	mutex_exit(&pdp->xd_lk);
440 
441 	return (DDI_SUCCESS);
442 }
443 
444 /*
445  * Unbind the event channel for this device instance.
446  * Currently we only support one evtchn per device instance.
447  */
448 void
449 xvdi_free_evtchn(dev_info_t *dip)
450 {
451 	struct xendev_ppd *pdp;
452 
453 	pdp = ddi_get_parent_data(dip);
454 	ASSERT(pdp != NULL);
455 
456 	mutex_enter(&pdp->xd_lk);
457 	if (pdp->xd_evtchn != INVALID_EVTCHN) {
458 		ec_unbind_irq(pdp->xd_ispec.intrspec_vec);
459 		pdp->xd_evtchn = INVALID_EVTCHN;
460 		pdp->xd_ispec.intrspec_vec = 0;
461 	}
462 	mutex_exit(&pdp->xd_lk);
463 }
464 
465 /*
466  * Map an inter-domain communication ring for a virtual device.
467  * This is used by backend drivers.
468  */
469 int
470 xvdi_map_ring(dev_info_t *dip, size_t nentry, size_t entrysize,
471     grant_ref_t gref, xendev_ring_t **ringpp)
472 {
473 	domid_t oeid;
474 	gnttab_map_grant_ref_t mapop;
475 	gnttab_unmap_grant_ref_t unmapop;
476 	caddr_t ringva;
477 	ddi_acc_hdl_t *ap;
478 	ddi_acc_impl_t *iap;
479 	xendev_ring_t *ring;
480 	int err;
481 	char errstr[] = "mapping in ring buffer";
482 
483 	ring = kmem_zalloc(sizeof (xendev_ring_t), KM_SLEEP);
484 	oeid = xvdi_get_oeid(dip);
485 
486 	/* alloc va in backend dom for ring buffer */
487 	ringva = vmem_xalloc(heap_arena, PAGESIZE, PAGESIZE,
488 	    0, 0, 0, 0, VM_SLEEP);
489 
490 	/* map in ring page */
491 	hat_prepare_mapping(kas.a_hat, ringva);
492 	mapop.host_addr = (uint64_t)(uintptr_t)ringva;
493 	mapop.flags = GNTMAP_host_map;
494 	mapop.ref = gref;
495 	mapop.dom = oeid;
496 	err = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &mapop, 1);
497 	if (err) {
498 		xvdi_fatal_error(dip, err, errstr);
499 		goto errout1;
500 	}
501 
502 	if (mapop.status != 0) {
503 		xvdi_fatal_error(dip, err, errstr);
504 		goto errout2;
505 	}
506 	ring->xr_vaddr = ringva;
507 	ring->xr_grant_hdl = mapop.handle;
508 	ring->xr_gref = gref;
509 
510 	/*
511 	 * init an acc handle and associate it w/ this ring
512 	 * this is only for backend drivers. we get the memory by calling
513 	 * vmem_xalloc(), instead of calling any ddi function, so we have
514 	 * to init an acc handle by ourselves
515 	 */
516 	ring->xr_acc_hdl = impl_acc_hdl_alloc(KM_SLEEP, NULL);
517 	ap = impl_acc_hdl_get(ring->xr_acc_hdl);
518 	ap->ah_vers = VERS_ACCHDL;
519 	ap->ah_dip = dip;
520 	ap->ah_xfermodes = DDI_DMA_CONSISTENT;
521 	ap->ah_acc = xendev_dc_accattr;
522 	iap = (ddi_acc_impl_t *)ap->ah_platform_private;
523 	iap->ahi_acc_attr |= DDI_ACCATTR_CPU_VADDR;
524 	impl_acc_hdl_init(ap);
525 	ap->ah_offset = 0;
526 	ap->ah_len = (off_t)PAGESIZE;
527 	ap->ah_addr = ring->xr_vaddr;
528 
529 	/* init backend ring */
530 	xvdi_ring_init_back_ring(ring, nentry, entrysize);
531 
532 	*ringpp = ring;
533 
534 	return (DDI_SUCCESS);
535 
536 errout2:
537 	/* unmap ring page */
538 	unmapop.host_addr = (uint64_t)(uintptr_t)ringva;
539 	unmapop.handle = ring->xr_grant_hdl;
540 	unmapop.dev_bus_addr = NULL;
541 	(void) HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &unmapop, 1);
542 	hat_release_mapping(kas.a_hat, ringva);
543 errout1:
544 	vmem_xfree(heap_arena, ringva, PAGESIZE);
545 	kmem_free(ring, sizeof (xendev_ring_t));
546 	return (DDI_FAILURE);
547 }
548 
549 /*
550  * Unmap a ring for a virtual device.
551  * This is used by backend drivers.
552  */
553 void
554 xvdi_unmap_ring(xendev_ring_t *ring)
555 {
556 	gnttab_unmap_grant_ref_t unmapop;
557 
558 	ASSERT((ring != NULL) && (ring->xr_vaddr != NULL));
559 
560 	impl_acc_hdl_free(ring->xr_acc_hdl);
561 	unmapop.host_addr = (uint64_t)(uintptr_t)ring->xr_vaddr;
562 	unmapop.handle = ring->xr_grant_hdl;
563 	unmapop.dev_bus_addr = NULL;
564 	(void) HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &unmapop, 1);
565 	hat_release_mapping(kas.a_hat, ring->xr_vaddr);
566 	vmem_xfree(heap_arena, ring->xr_vaddr, PAGESIZE);
567 	kmem_free(ring, sizeof (xendev_ring_t));
568 }
569 
570 /*
571  * Re-initialise an inter-domain communications ring for the backend domain.
572  * ring will be re-initialized after re-grant succeed
573  * ring will be freed if fails to re-grant access to backend domain
574  * so, don't keep useful data in the ring
575  * used only in frontend driver
576  */
577 static void
578 xvdi_reinit_ring(dev_info_t *dip, grant_ref_t *gref, xendev_ring_t *ringp)
579 {
580 	paddr_t rpaddr;
581 	maddr_t rmaddr;
582 
583 	ASSERT((ringp != NULL) && (ringp->xr_paddr != 0));
584 	rpaddr = ringp->xr_paddr;
585 
586 	rmaddr = DOMAIN_IS_INITDOMAIN(xen_info) ? rpaddr : pa_to_ma(rpaddr);
587 	gnttab_grant_foreign_access_ref(ringp->xr_gref, xvdi_get_oeid(dip),
588 	    rmaddr >> PAGESHIFT, 0);
589 	*gref = ringp->xr_gref;
590 
591 	/* init frontend ring */
592 	xvdi_ring_init_sring(ringp);
593 	xvdi_ring_init_front_ring(ringp, ringp->xr_sring.fr.nr_ents,
594 	    ringp->xr_entry_size);
595 }
596 
597 /*
598  * allocate Xen inter-domain communications ring for Xen virtual devices
599  * used only in frontend driver
600  * if *ringpp is not NULL, we'll simply re-init it
601  */
602 int
603 xvdi_alloc_ring(dev_info_t *dip, size_t nentry, size_t entrysize,
604     grant_ref_t *gref, xendev_ring_t **ringpp)
605 {
606 	size_t len;
607 	xendev_ring_t *ring;
608 	ddi_dma_cookie_t dma_cookie;
609 	uint_t ncookies;
610 	grant_ref_t ring_gref;
611 	domid_t oeid;
612 	maddr_t rmaddr;
613 
614 	if (*ringpp) {
615 		xvdi_reinit_ring(dip, gref, *ringpp);
616 		return (DDI_SUCCESS);
617 	}
618 
619 	*ringpp = ring = kmem_zalloc(sizeof (xendev_ring_t), KM_SLEEP);
620 	oeid = xvdi_get_oeid(dip);
621 
622 	/*
623 	 * Allocate page for this ring buffer
624 	 */
625 	if (ddi_dma_alloc_handle(dip, &xendev_dc_dmaattr, DDI_DMA_SLEEP,
626 	    0, &ring->xr_dma_hdl) != DDI_SUCCESS)
627 		goto err;
628 
629 	if (ddi_dma_mem_alloc(ring->xr_dma_hdl, PAGESIZE,
630 	    &xendev_dc_accattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
631 	    &ring->xr_vaddr, &len, &ring->xr_acc_hdl) != DDI_SUCCESS) {
632 		ddi_dma_free_handle(&ring->xr_dma_hdl);
633 		goto err;
634 	}
635 
636 	if (ddi_dma_addr_bind_handle(ring->xr_dma_hdl, NULL,
637 	    ring->xr_vaddr, len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
638 	    DDI_DMA_SLEEP, 0, &dma_cookie, &ncookies) != DDI_DMA_MAPPED) {
639 		ddi_dma_mem_free(&ring->xr_acc_hdl);
640 		ring->xr_vaddr = NULL;
641 		ddi_dma_free_handle(&ring->xr_dma_hdl);
642 		goto err;
643 	}
644 	ASSERT(ncookies == 1);
645 	ring->xr_paddr = dma_cookie.dmac_laddress;
646 	rmaddr = DOMAIN_IS_INITDOMAIN(xen_info) ? ring->xr_paddr :
647 	    pa_to_ma(ring->xr_paddr);
648 
649 	if ((ring_gref = gnttab_grant_foreign_access(oeid,
650 	    rmaddr >> PAGESHIFT, 0)) == (grant_ref_t)-1) {
651 		(void) ddi_dma_unbind_handle(ring->xr_dma_hdl);
652 		ddi_dma_mem_free(&ring->xr_acc_hdl);
653 		ring->xr_vaddr = NULL;
654 		ddi_dma_free_handle(&ring->xr_dma_hdl);
655 		goto err;
656 	}
657 	*gref = ring->xr_gref = ring_gref;
658 
659 	/* init frontend ring */
660 	xvdi_ring_init_sring(ring);
661 	xvdi_ring_init_front_ring(ring, nentry, entrysize);
662 
663 	return (DDI_SUCCESS);
664 
665 err:
666 	kmem_free(ring, sizeof (xendev_ring_t));
667 	return (DDI_FAILURE);
668 }
669 
670 /*
671  * Release ring buffers allocated for Xen devices
672  * used for frontend driver
673  */
674 void
675 xvdi_free_ring(xendev_ring_t *ring)
676 {
677 	ASSERT((ring != NULL) && (ring->xr_vaddr != NULL));
678 
679 	(void) gnttab_end_foreign_access_ref(ring->xr_gref, 0);
680 	(void) ddi_dma_unbind_handle(ring->xr_dma_hdl);
681 	ddi_dma_mem_free(&ring->xr_acc_hdl);
682 	ddi_dma_free_handle(&ring->xr_dma_hdl);
683 	kmem_free(ring, sizeof (xendev_ring_t));
684 }
685 
686 dev_info_t *
687 xvdi_create_dev(dev_info_t *parent, xendev_devclass_t devclass,
688     domid_t dom, int vdev)
689 {
690 	dev_info_t *dip;
691 	boolean_t backend;
692 	i_xd_cfg_t *xdcp;
693 	char xsnamebuf[TYPICALMAXPATHLEN];
694 	char *type, *node = NULL, *xsname = NULL;
695 	unsigned int tlen;
696 	int ret;
697 
698 	ASSERT(DEVI_BUSY_OWNED(parent));
699 
700 	backend = (dom != DOMID_SELF);
701 	xdcp = i_xvdi_devclass2cfg(devclass);
702 	ASSERT(xdcp != NULL);
703 
704 	if (vdev != -1) {
705 		if (!backend) {
706 			(void) snprintf(xsnamebuf, sizeof (xsnamebuf),
707 			    "%s/%d", xdcp->xs_path_fe, vdev);
708 			xsname = xsnamebuf;
709 			node = xdcp->node_fe;
710 		} else {
711 			(void) snprintf(xsnamebuf, sizeof (xsnamebuf),
712 			    "%s/%d/%d", xdcp->xs_path_be, dom, vdev);
713 			xsname = xsnamebuf;
714 			node = xdcp->node_be;
715 		}
716 	} else {
717 		node = xdcp->node_fe;
718 	}
719 
720 	/* Must have a driver to use. */
721 	if (node == NULL)
722 		return (NULL);
723 
724 	/*
725 	 * We need to check the state of this device before we go
726 	 * further, otherwise we'll end up with a dead loop if
727 	 * anything goes wrong.
728 	 */
729 	if ((xsname != NULL) &&
730 	    (xenbus_read_driver_state(xsname) >= XenbusStateClosing))
731 		return (NULL);
732 
733 	ndi_devi_alloc_sleep(parent, node, DEVI_SID_NODEID, &dip);
734 
735 	/*
736 	 * Driver binding uses the compatible property _before_ the
737 	 * node name, so we set the node name to the 'model' of the
738 	 * device (i.e. 'xnb' or 'xdb') and, if 'type' is present,
739 	 * encode both the model and the type in a compatible property
740 	 * (i.e. 'xnb,netfront' or 'xnb,SUNW_mac').  This allows a
741 	 * driver binding based on the <model,type> pair _before_ a
742 	 * binding based on the node name.
743 	 */
744 	if ((xsname != NULL) &&
745 	    (xenbus_read(XBT_NULL, xsname, "type", (void *)&type, &tlen)
746 	    == 0)) {
747 		size_t clen;
748 		char *c[1];
749 
750 		clen = strlen(node) + strlen(type) + 2;
751 		c[0] = kmem_alloc(clen, KM_SLEEP);
752 		(void) snprintf(c[0], clen, "%s,%s", node, type);
753 
754 		(void) ndi_prop_update_string_array(DDI_DEV_T_NONE,
755 		    dip, "compatible", (char **)c, 1);
756 
757 		kmem_free(c[0], clen);
758 		kmem_free(type, tlen);
759 	}
760 
761 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, "devclass", devclass);
762 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, "domain", dom);
763 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, "vdev", vdev);
764 
765 	if (i_ddi_devi_attached(parent))
766 		ret = ndi_devi_online(dip, 0);
767 	else
768 		ret = ndi_devi_bind_driver(dip, 0);
769 	if (ret != NDI_SUCCESS)
770 		(void) ndi_devi_offline(dip, NDI_DEVI_REMOVE);
771 
772 	return (dip);
773 }
774 
775 /*
776  * xendev_enum_class()
777  */
778 void
779 xendev_enum_class(dev_info_t *parent, xendev_devclass_t devclass)
780 {
781 	i_xd_cfg_t *xdcp;
782 
783 	xdcp = i_xvdi_devclass2cfg(devclass);
784 	ASSERT(xdcp != NULL);
785 
786 	if (xdcp->xsdev == NULL) {
787 		int circ;
788 
789 		/*
790 		 * Don't need to probe this kind of device from the
791 		 * store, just create one if it doesn't exist.
792 		 */
793 
794 		ndi_devi_enter(parent, &circ);
795 		if (xvdi_find_dev(parent, devclass, DOMID_SELF, -1)
796 		    == NULL)
797 			(void) xvdi_create_dev(parent, devclass,
798 			    DOMID_SELF, -1);
799 		ndi_devi_exit(parent, circ);
800 	} else {
801 		/*
802 		 * Probe this kind of device from the store, both
803 		 * frontend and backend.
804 		 */
805 
806 		i_xvdi_enum_fe(parent, xdcp);
807 		i_xvdi_enum_be(parent, xdcp);
808 	}
809 }
810 
811 /*
812  * xendev_enum_all()
813  */
814 void
815 xendev_enum_all(dev_info_t *parent, boolean_t store_unavailable)
816 {
817 	int i;
818 	i_xd_cfg_t *xdcp;
819 	boolean_t dom0 = DOMAIN_IS_INITDOMAIN(xen_info);
820 	boolean_t domU = !dom0;
821 
822 	for (i = 0, xdcp = xdci; i < NXDC; i++, xdcp++) {
823 
824 		if (dom0 && !(xdcp->flags & XD_DOM_ZERO))
825 			continue;
826 
827 		if (domU && !(xdcp->flags & XD_DOM_GUEST))
828 			continue;
829 
830 		/*
831 		 * Dom0 relies on watchpoints to create non-soft
832 		 * devices - don't attempt to iterate over the store.
833 		 */
834 		if (dom0 && (xdcp->xsdev != NULL))
835 			continue;
836 
837 		/*
838 		 * If the store is not yet available, don't attempt to
839 		 * iterate.
840 		 */
841 		if (store_unavailable && (xdcp->xsdev != NULL))
842 			continue;
843 
844 		xendev_enum_class(parent, xdcp->devclass);
845 	}
846 }
847 
848 xendev_devclass_t
849 xendev_nodename_to_devclass(char *nodename)
850 {
851 	int i;
852 	i_xd_cfg_t *xdcp;
853 
854 	/*
855 	 * This relies on the convention that variants of a base
856 	 * driver share the same prefix and that there are no drivers
857 	 * which share a common prefix with the name of any other base
858 	 * drivers.
859 	 *
860 	 * So for a base driver 'xnb' (which is the name listed in
861 	 * xdci) the variants all begin with the string 'xnb' (in fact
862 	 * they are 'xnbe', 'xnbo' and 'xnbu') and there are no other
863 	 * base drivers which have the prefix 'xnb'.
864 	 */
865 	ASSERT(nodename != NULL);
866 	for (i = 0, xdcp = xdci; i < NXDC; i++, xdcp++) {
867 		if (((xdcp->node_fe != NULL) &&
868 		    (strncmp(nodename, xdcp->node_fe,
869 		    strlen(xdcp->node_fe)) == 0)) ||
870 		    ((xdcp->node_be != NULL) &&
871 		    (strncmp(nodename, xdcp->node_be,
872 		    strlen(xdcp->node_be)) == 0)))
873 
874 			return (xdcp->devclass);
875 	}
876 	return (XEN_INVAL);
877 }
878 
879 int
880 xendev_devclass_ipl(xendev_devclass_t devclass)
881 {
882 	i_xd_cfg_t *xdcp;
883 
884 	xdcp = i_xvdi_devclass2cfg(devclass);
885 	ASSERT(xdcp != NULL);
886 
887 	return (xdcp->xd_ipl);
888 }
889 
890 /*
891  * Determine if a devinfo instance exists of a particular device
892  * class, domain and xenstore virtual device number.
893  */
894 dev_info_t *
895 xvdi_find_dev(dev_info_t *parent, xendev_devclass_t devclass,
896     domid_t dom, int vdev)
897 {
898 	dev_info_t *dip;
899 
900 	ASSERT(DEVI_BUSY_OWNED(parent));
901 
902 	switch (devclass) {
903 	case XEN_CONSOLE:
904 	case XEN_XENBUS:
905 	case XEN_DOMCAPS:
906 	case XEN_BALLOON:
907 	case XEN_EVTCHN:
908 	case XEN_PRIVCMD:
909 		/* Console and soft devices have no vdev. */
910 		vdev = -1;
911 		break;
912 	default:
913 		break;
914 	}
915 
916 	for (dip = ddi_get_child(parent); dip != NULL;
917 	    dip = ddi_get_next_sibling(dip)) {
918 		int *vdevnump, *domidp, *devclsp, vdevnum;
919 		uint_t ndomid, nvdevnum, ndevcls;
920 		xendev_devclass_t devcls;
921 		domid_t domid;
922 		struct xendev_ppd *pdp = ddi_get_parent_data(dip);
923 
924 		if (pdp == NULL) {
925 			if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
926 			    DDI_PROP_DONTPASS, "domain", &domidp, &ndomid) !=
927 			    DDI_PROP_SUCCESS)
928 				continue;
929 			ASSERT(ndomid == 1);
930 			domid = (domid_t)*domidp;
931 			ddi_prop_free(domidp);
932 
933 			if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
934 			    DDI_PROP_DONTPASS, "vdev", &vdevnump, &nvdevnum) !=
935 			    DDI_PROP_SUCCESS)
936 				continue;
937 			ASSERT(nvdevnum == 1);
938 			vdevnum = *vdevnump;
939 			ddi_prop_free(vdevnump);
940 
941 			if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
942 			    DDI_PROP_DONTPASS, "devclass", &devclsp,
943 			    &ndevcls) != DDI_PROP_SUCCESS)
944 				continue;
945 			ASSERT(ndevcls == 1);
946 			devcls = (xendev_devclass_t)*devclsp;
947 			ddi_prop_free(devclsp);
948 		} else {
949 			domid = pdp->xd_domain;
950 			vdevnum = pdp->xd_vdevnum;
951 			devcls = pdp->xd_devclass;
952 		}
953 
954 		if ((domid == dom) && (vdevnum == vdev) && (devcls == devclass))
955 			return (dip);
956 	}
957 	return (NULL);
958 }
959 
960 int
961 xvdi_get_evtchn(dev_info_t *xdip)
962 {
963 	struct xendev_ppd *pdp = ddi_get_parent_data(xdip);
964 
965 	ASSERT(pdp != NULL);
966 	return (pdp->xd_evtchn);
967 }
968 
969 int
970 xvdi_get_vdevnum(dev_info_t *xdip)
971 {
972 	struct xendev_ppd *pdp = ddi_get_parent_data(xdip);
973 
974 	ASSERT(pdp != NULL);
975 	return (pdp->xd_vdevnum);
976 }
977 
978 char *
979 xvdi_get_xsname(dev_info_t *xdip)
980 {
981 	struct xendev_ppd *pdp = ddi_get_parent_data(xdip);
982 
983 	ASSERT(pdp != NULL);
984 	return ((char *)(pdp->xd_xsdev.nodename));
985 }
986 
987 char *
988 xvdi_get_oename(dev_info_t *xdip)
989 {
990 	struct xendev_ppd *pdp = ddi_get_parent_data(xdip);
991 
992 	ASSERT(pdp != NULL);
993 	if (pdp->xd_devclass == XEN_CONSOLE)
994 		return (NULL);
995 	return ((char *)(pdp->xd_xsdev.otherend));
996 }
997 
998 struct xenbus_device *
999 xvdi_get_xsd(dev_info_t *xdip)
1000 {
1001 	struct xendev_ppd *pdp = ddi_get_parent_data(xdip);
1002 
1003 	ASSERT(pdp != NULL);
1004 	return (&pdp->xd_xsdev);
1005 }
1006 
1007 domid_t
1008 xvdi_get_oeid(dev_info_t *xdip)
1009 {
1010 	struct xendev_ppd *pdp = ddi_get_parent_data(xdip);
1011 
1012 	ASSERT(pdp != NULL);
1013 	if (pdp->xd_devclass == XEN_CONSOLE)
1014 		return ((domid_t)-1);
1015 	return ((domid_t)(pdp->xd_xsdev.otherend_id));
1016 }
1017 
1018 void
1019 xvdi_dev_error(dev_info_t *dip, int errno, char *errstr)
1020 {
1021 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1022 
1023 	ASSERT(pdp != NULL);
1024 	xenbus_dev_error(&pdp->xd_xsdev, errno, errstr);
1025 }
1026 
1027 void
1028 xvdi_fatal_error(dev_info_t *dip, int errno, char *errstr)
1029 {
1030 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1031 
1032 	ASSERT(pdp != NULL);
1033 	xenbus_dev_fatal(&pdp->xd_xsdev, errno, errstr);
1034 }
1035 
1036 static void
1037 i_xvdi_oestate_handler(void *arg)
1038 {
1039 	dev_info_t *dip = arg;
1040 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1041 	XenbusState oestate = pdp->xd_xsdev.otherend_state;
1042 	ddi_eventcookie_t evc;
1043 
1044 	mutex_enter(&pdp->xd_lk);
1045 
1046 	if (pdp->xd_oe_ehid != NULL) {
1047 		/* send notification to driver */
1048 		if (ddi_get_eventcookie(dip, XS_OE_STATE,
1049 		    &evc) == DDI_SUCCESS) {
1050 			mutex_exit(&pdp->xd_lk);
1051 			(void) ndi_post_event(dip, dip, evc, &oestate);
1052 			mutex_enter(&pdp->xd_lk);
1053 		}
1054 	} else {
1055 		/*
1056 		 * take default action, if driver hasn't registered its
1057 		 * event handler yet
1058 		 */
1059 		if (oestate == XenbusStateClosing) {
1060 			(void) xvdi_switch_state(dip, XBT_NULL,
1061 			    XenbusStateClosed);
1062 		} else if (oestate == XenbusStateClosed) {
1063 			(void) xvdi_switch_state(dip, XBT_NULL,
1064 			    XenbusStateClosed);
1065 			(void) xvdi_post_event(dip, XEN_HP_REMOVE);
1066 		}
1067 	}
1068 
1069 	mutex_exit(&pdp->xd_lk);
1070 
1071 	/*
1072 	 * We'll try to remove the devinfo node of this device if the
1073 	 * other end has closed.
1074 	 */
1075 	if (oestate == XenbusStateClosed)
1076 		(void) ddi_taskq_dispatch(DEVI(ddi_get_parent(dip))->devi_taskq,
1077 		    xendev_offline_device, dip, DDI_SLEEP);
1078 }
1079 
1080 static void
1081 i_xvdi_hpstate_handler(void *arg)
1082 {
1083 	dev_info_t *dip = (dev_info_t *)arg;
1084 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1085 	ddi_eventcookie_t evc;
1086 	char *hp_status;
1087 	unsigned int hpl;
1088 
1089 	mutex_enter(&pdp->xd_lk);
1090 	if ((ddi_get_eventcookie(dip, XS_HP_STATE, &evc) == DDI_SUCCESS) &&
1091 	    (xenbus_read(XBT_NULL, pdp->xd_hp_watch.node, "",
1092 	    (void *)&hp_status, &hpl) == 0)) {
1093 
1094 		xendev_hotplug_state_t new_state = Unrecognized;
1095 
1096 		if (strcmp(hp_status, "connected") == 0)
1097 			new_state = Connected;
1098 
1099 		mutex_exit(&pdp->xd_lk);
1100 
1101 		(void) ndi_post_event(dip, dip, evc, &new_state);
1102 		kmem_free(hp_status, hpl);
1103 		return;
1104 	}
1105 	mutex_exit(&pdp->xd_lk);
1106 }
1107 
1108 void
1109 xvdi_notify_oe(dev_info_t *dip)
1110 {
1111 	struct xendev_ppd *pdp;
1112 
1113 	pdp = ddi_get_parent_data(dip);
1114 	ASSERT(pdp->xd_evtchn != INVALID_EVTCHN);
1115 	ec_notify_via_evtchn(pdp->xd_evtchn);
1116 }
1117 
1118 static void
1119 i_xvdi_bepath_cb(struct xenbus_watch *w, const char **vec, unsigned int len)
1120 {
1121 	dev_info_t *dip = (dev_info_t *)w->dev;
1122 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1123 	char *be = NULL;
1124 	unsigned int bel;
1125 
1126 	ASSERT(len > XS_WATCH_PATH);
1127 	ASSERT(vec[XS_WATCH_PATH] != NULL);
1128 
1129 	/*
1130 	 * If the backend is not the same as that we already stored,
1131 	 * re-set our watch for its' state.
1132 	 */
1133 	if ((xenbus_read(XBT_NULL, "", vec[XS_WATCH_PATH], (void *)be, &bel)
1134 	    == 0) && (strcmp(be, pdp->xd_xsdev.otherend) != 0))
1135 		(void) i_xvdi_add_watch_oestate(dip);
1136 
1137 	if (be != NULL) {
1138 		ASSERT(bel > 0);
1139 		kmem_free(be, bel);
1140 	}
1141 }
1142 
1143 static int
1144 i_xvdi_add_watch_oestate(dev_info_t *dip)
1145 {
1146 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1147 
1148 	ASSERT(pdp != NULL);
1149 	ASSERT(pdp->xd_xsdev.nodename != NULL);
1150 	ASSERT(mutex_owned(&pdp->xd_lk));
1151 
1152 	/*
1153 	 * Create taskq for delivering other end state change event to
1154 	 * this device later.
1155 	 *
1156 	 * Set nthreads to 1 to make sure that events can be delivered
1157 	 * in order.
1158 	 *
1159 	 * Note: It is _not_ guaranteed that driver can see every
1160 	 * xenstore change under the path that it is watching. If two
1161 	 * changes happen consecutively in a very short amount of
1162 	 * time, it is likely that the driver will see only the last
1163 	 * one.
1164 	 */
1165 	if (pdp->xd_oe_taskq == NULL)
1166 		if ((pdp->xd_oe_taskq = ddi_taskq_create(dip,
1167 		    "xendev_oe_taskq", 1, TASKQ_DEFAULTPRI, 0)) == NULL)
1168 			return (DDI_FAILURE);
1169 
1170 	/*
1171 	 * Watch for changes to the XenbusState of otherend.
1172 	 */
1173 	pdp->xd_xsdev.otherend_state = XenbusStateUnknown;
1174 	pdp->xd_xsdev.otherend_changed = i_xvdi_oestate_cb;
1175 
1176 	if (talk_to_otherend(&pdp->xd_xsdev) != 0) {
1177 		i_xvdi_rem_watch_oestate(dip);
1178 		return (DDI_FAILURE);
1179 	}
1180 
1181 	return (DDI_SUCCESS);
1182 }
1183 
1184 static void
1185 i_xvdi_rem_watch_oestate(dev_info_t *dip)
1186 {
1187 	struct xendev_ppd *pdp;
1188 	struct xenbus_device *dev;
1189 
1190 	pdp = ddi_get_parent_data(dip);
1191 	ASSERT(pdp != NULL);
1192 	ASSERT(mutex_owned(&pdp->xd_lk));
1193 
1194 	dev = &pdp->xd_xsdev;
1195 
1196 	/* Unwatch for changes to XenbusState of otherend */
1197 	if (dev->otherend_watch.node != NULL) {
1198 		mutex_exit(&pdp->xd_lk);
1199 		unregister_xenbus_watch(&dev->otherend_watch);
1200 		mutex_enter(&pdp->xd_lk);
1201 	}
1202 
1203 	/* make sure no event handler is running */
1204 	if (pdp->xd_oe_taskq != NULL) {
1205 		mutex_exit(&pdp->xd_lk);
1206 		ddi_taskq_destroy(pdp->xd_oe_taskq);
1207 		mutex_enter(&pdp->xd_lk);
1208 		pdp->xd_oe_taskq = NULL;
1209 	}
1210 
1211 	/* clean up */
1212 	dev->otherend_state = XenbusStateUnknown;
1213 	dev->otherend_id = (domid_t)-1;
1214 	if (dev->otherend_watch.node != NULL)
1215 		kmem_free((void *)dev->otherend_watch.node,
1216 		    strlen(dev->otherend_watch.node) + 1);
1217 	dev->otherend_watch.node = NULL;
1218 	if (dev->otherend != NULL)
1219 		kmem_free((void *)dev->otherend, strlen(dev->otherend) + 1);
1220 	dev->otherend = NULL;
1221 }
1222 
1223 static int
1224 i_xvdi_add_watch_hpstate(dev_info_t *dip)
1225 {
1226 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1227 
1228 	ASSERT(pdp != NULL);
1229 	ASSERT(pdp->xd_xsdev.frontend == 0);
1230 	ASSERT(mutex_owned(&pdp->xd_lk));
1231 
1232 	/*
1233 	 * Create taskq for delivering hotplug status change event to
1234 	 * this device later.
1235 	 *
1236 	 * Set nthreads to 1 to make sure that events can be delivered
1237 	 * in order.
1238 	 *
1239 	 * Note: It is _not_ guaranteed that driver can see every
1240 	 * hotplug status change under the path that it is
1241 	 * watching. If two changes happen consecutively in a very
1242 	 * short amount of time, it is likely that the driver only
1243 	 * sees the last one.
1244 	 */
1245 	if (pdp->xd_hp_taskq == NULL)
1246 		if ((pdp->xd_hp_taskq = ddi_taskq_create(dip,
1247 		    "xendev_hp_taskq", 1, TASKQ_DEFAULTPRI, 0)) == NULL)
1248 			return (DDI_FAILURE);
1249 
1250 	if (pdp->xd_hp_watch.node == NULL) {
1251 		size_t len;
1252 		char *path;
1253 
1254 		ASSERT(pdp->xd_xsdev.nodename != NULL);
1255 
1256 		len = strlen(pdp->xd_xsdev.nodename) +
1257 		    strlen("/hotplug-status") + 1;
1258 		path = kmem_alloc(len, KM_SLEEP);
1259 		(void) snprintf(path, len, "%s/hotplug-status",
1260 		    pdp->xd_xsdev.nodename);
1261 
1262 		pdp->xd_hp_watch.node = path;
1263 		pdp->xd_hp_watch.callback = i_xvdi_hpstate_cb;
1264 		pdp->xd_hp_watch.dev = (struct xenbus_device *)dip; /* yuck! */
1265 		if (register_xenbus_watch(&pdp->xd_hp_watch) != 0) {
1266 			i_xvdi_rem_watch_hpstate(dip);
1267 			return (DDI_FAILURE);
1268 		}
1269 	}
1270 
1271 	return (DDI_SUCCESS);
1272 }
1273 
1274 static void
1275 i_xvdi_rem_watch_hpstate(dev_info_t *dip)
1276 {
1277 	struct xendev_ppd *pdp;
1278 	pdp = ddi_get_parent_data(dip);
1279 
1280 	ASSERT(pdp != NULL);
1281 	ASSERT(pdp->xd_xsdev.frontend == 0);
1282 	ASSERT(mutex_owned(&pdp->xd_lk));
1283 
1284 	/* Unwatch for changes to "hotplug-status" node for backend device. */
1285 	if (pdp->xd_hp_watch.node != NULL) {
1286 		mutex_exit(&pdp->xd_lk);
1287 		unregister_xenbus_watch(&pdp->xd_hp_watch);
1288 		mutex_enter(&pdp->xd_lk);
1289 	}
1290 
1291 	/* Make sure no event handler is running. */
1292 	if (pdp->xd_hp_taskq != NULL) {
1293 		mutex_exit(&pdp->xd_lk);
1294 		ddi_taskq_destroy(pdp->xd_hp_taskq);
1295 		mutex_enter(&pdp->xd_lk);
1296 		pdp->xd_hp_taskq = NULL;
1297 	}
1298 
1299 	/* Clean up. */
1300 	if (pdp->xd_hp_watch.node != NULL) {
1301 		kmem_free((void *)pdp->xd_hp_watch.node,
1302 		    strlen(pdp->xd_hp_watch.node) + 1);
1303 		pdp->xd_hp_watch.node = NULL;
1304 	}
1305 }
1306 
1307 static int
1308 i_xvdi_add_watches(dev_info_t *dip)
1309 {
1310 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1311 
1312 	ASSERT(pdp != NULL);
1313 
1314 	mutex_enter(&pdp->xd_lk);
1315 
1316 	if (i_xvdi_add_watch_oestate(dip) != DDI_SUCCESS) {
1317 		mutex_exit(&pdp->xd_lk);
1318 		return (DDI_FAILURE);
1319 	}
1320 
1321 	if (pdp->xd_xsdev.frontend == 1) {
1322 		/*
1323 		 * Frontend devices must watch for the backend path
1324 		 * changing.
1325 		 */
1326 		if (i_xvdi_add_watch_bepath(dip) != DDI_SUCCESS)
1327 			goto unwatch_and_fail;
1328 	} else {
1329 		/*
1330 		 * Backend devices must watch for hotplug events.
1331 		 */
1332 		if (i_xvdi_add_watch_hpstate(dip) != DDI_SUCCESS)
1333 			goto unwatch_and_fail;
1334 	}
1335 
1336 	mutex_exit(&pdp->xd_lk);
1337 
1338 	return (DDI_SUCCESS);
1339 
1340 unwatch_and_fail:
1341 	i_xvdi_rem_watch_oestate(dip);
1342 	mutex_exit(&pdp->xd_lk);
1343 
1344 	return (DDI_FAILURE);
1345 }
1346 
1347 static void
1348 i_xvdi_rem_watches(dev_info_t *dip)
1349 {
1350 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1351 
1352 	ASSERT(pdp != NULL);
1353 
1354 	mutex_enter(&pdp->xd_lk);
1355 
1356 	i_xvdi_rem_watch_oestate(dip);
1357 
1358 	if (pdp->xd_xsdev.frontend == 1)
1359 		i_xvdi_rem_watch_bepath(dip);
1360 	else
1361 		i_xvdi_rem_watch_hpstate(dip);
1362 
1363 	mutex_exit(&pdp->xd_lk);
1364 }
1365 
1366 static int
1367 i_xvdi_add_watch_bepath(dev_info_t *dip)
1368 {
1369 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1370 
1371 	ASSERT(pdp != NULL);
1372 	ASSERT(pdp->xd_xsdev.frontend == 1);
1373 
1374 	/*
1375 	 * Frontend devices need to watch for the backend path changing.
1376 	 */
1377 	if (pdp->xd_bepath_watch.node == NULL) {
1378 		size_t len;
1379 		char *path;
1380 
1381 		ASSERT(pdp->xd_xsdev.nodename != NULL);
1382 
1383 		len = strlen(pdp->xd_xsdev.nodename) + strlen("/backend") + 1;
1384 		path = kmem_alloc(len, KM_SLEEP);
1385 		(void) snprintf(path, len, "%s/backend",
1386 		    pdp->xd_xsdev.nodename);
1387 
1388 		pdp->xd_bepath_watch.node = path;
1389 		pdp->xd_bepath_watch.callback = i_xvdi_bepath_cb;
1390 		pdp->xd_bepath_watch.dev = (struct xenbus_device *)dip;
1391 		if (register_xenbus_watch(&pdp->xd_bepath_watch) != 0) {
1392 			kmem_free(path, len);
1393 			pdp->xd_bepath_watch.node = NULL;
1394 			return (DDI_FAILURE);
1395 		}
1396 	}
1397 
1398 	return (DDI_SUCCESS);
1399 }
1400 
1401 static void
1402 i_xvdi_rem_watch_bepath(dev_info_t *dip)
1403 {
1404 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1405 
1406 	ASSERT(pdp != NULL);
1407 	ASSERT(pdp->xd_xsdev.frontend == 1);
1408 	ASSERT(mutex_owned(&pdp->xd_lk));
1409 
1410 	if (pdp->xd_bepath_watch.node != NULL) {
1411 		mutex_exit(&pdp->xd_lk);
1412 		unregister_xenbus_watch(&pdp->xd_bepath_watch);
1413 		mutex_enter(&pdp->xd_lk);
1414 
1415 		kmem_free((void *)(pdp->xd_bepath_watch.node),
1416 		    strlen(pdp->xd_bepath_watch.node) + 1);
1417 		pdp->xd_bepath_watch.node = NULL;
1418 	}
1419 }
1420 
1421 int
1422 xvdi_switch_state(dev_info_t *dip, xenbus_transaction_t xbt,
1423     XenbusState newState)
1424 {
1425 	int rv;
1426 	struct xendev_ppd *pdp;
1427 
1428 	pdp = ddi_get_parent_data(dip);
1429 	ASSERT(pdp != NULL);
1430 
1431 	XVDI_DPRINTF(XVDI_DBG_STATE,
1432 	    "xvdi_switch_state: dip 0x%p moves to %d",
1433 	    (void *)dip, newState);
1434 
1435 	rv = xenbus_switch_state(&pdp->xd_xsdev, xbt, newState);
1436 	if (rv > 0)
1437 		cmn_err(CE_WARN, "xvdi_switch_state: change state failed");
1438 
1439 	return (rv);
1440 }
1441 
1442 /*
1443  * Notify hotplug script running in userland
1444  */
1445 int
1446 xvdi_post_event(dev_info_t *dip, xendev_hotplug_cmd_t hpc)
1447 {
1448 	struct xendev_ppd *pdp;
1449 	nvlist_t *attr_list = NULL;
1450 	i_xd_cfg_t *xdcp;
1451 	sysevent_id_t eid;
1452 	int err;
1453 	char devname[256]; /* XXPV dme: ? */
1454 
1455 	pdp = ddi_get_parent_data(dip);
1456 	ASSERT(pdp != NULL);
1457 
1458 	xdcp = i_xvdi_devclass2cfg(pdp->xd_devclass);
1459 	ASSERT(xdcp != NULL);
1460 
1461 	(void) snprintf(devname, sizeof (devname) - 1, "%s%d",
1462 	    ddi_driver_name(dip),  ddi_get_instance(dip));
1463 
1464 	err = nvlist_alloc(&attr_list, NV_UNIQUE_NAME, KM_NOSLEEP);
1465 	if (err != DDI_SUCCESS)
1466 		goto failure;
1467 
1468 	err = nvlist_add_int32(attr_list, "domain", pdp->xd_domain);
1469 	if (err != DDI_SUCCESS)
1470 		goto failure;
1471 	err = nvlist_add_int32(attr_list, "vdev", pdp->xd_vdevnum);
1472 	if (err != DDI_SUCCESS)
1473 		goto failure;
1474 	err = nvlist_add_string(attr_list, "devclass", xdcp->xsdev);
1475 	if (err != DDI_SUCCESS)
1476 		goto failure;
1477 	err = nvlist_add_string(attr_list, "device", devname);
1478 	if (err != DDI_SUCCESS)
1479 		goto failure;
1480 	err = nvlist_add_string(attr_list, "fob",
1481 	    ((pdp->xd_xsdev.frontend == 1) ? "frontend" : "backend"));
1482 	if (err != DDI_SUCCESS)
1483 		goto failure;
1484 
1485 	switch (hpc) {
1486 	case XEN_HP_ADD:
1487 		err = ddi_log_sysevent(dip, DDI_VENDOR_SUNW, "EC_xendev",
1488 		    "add", attr_list, &eid, DDI_NOSLEEP);
1489 		break;
1490 	case XEN_HP_REMOVE:
1491 		err = ddi_log_sysevent(dip, DDI_VENDOR_SUNW, "EC_xendev",
1492 		    "remove", attr_list, &eid, DDI_NOSLEEP);
1493 		break;
1494 	default:
1495 		err = DDI_FAILURE;
1496 		goto failure;
1497 	}
1498 
1499 failure:
1500 	if (attr_list != NULL)
1501 		nvlist_free(attr_list);
1502 
1503 	return (err);
1504 }
1505 
1506 /* ARGSUSED */
1507 static void
1508 i_xvdi_probe_path_cb(struct xenbus_watch *w, const char **vec,
1509     unsigned int len)
1510 {
1511 	char *path;
1512 
1513 	if (xendev_dip == NULL)
1514 		xendev_dip = ddi_find_devinfo("xpvd", -1, 0);
1515 
1516 	path = i_ddi_strdup((char *)vec[XS_WATCH_PATH], KM_SLEEP);
1517 
1518 	(void) ddi_taskq_dispatch(DEVI(xendev_dip)->devi_taskq,
1519 	    i_xvdi_probe_path_handler, (void *)path, DDI_SLEEP);
1520 }
1521 
1522 static void
1523 i_xvdi_watch_device(char *path)
1524 {
1525 	struct xenbus_watch *w;
1526 
1527 	ASSERT(path != NULL);
1528 
1529 	w = kmem_zalloc(sizeof (*w), KM_SLEEP);
1530 	w->node = path;
1531 	w->callback = &i_xvdi_probe_path_cb;
1532 	w->dev = NULL;
1533 
1534 	if (register_xenbus_watch(w) != 0) {
1535 		cmn_err(CE_WARN, "i_xvdi_watch_device: "
1536 		    "cannot set watch on %s", path);
1537 		kmem_free(w, sizeof (*w));
1538 		return;
1539 	}
1540 }
1541 
1542 void
1543 xvdi_watch_devices(int newstate)
1544 {
1545 	int devclass;
1546 
1547 	/*
1548 	 * Watch for devices being created in the store.
1549 	 */
1550 	if (newstate == XENSTORE_DOWN)
1551 		return;
1552 	for (devclass = 0; devclass < NXDC; devclass++) {
1553 		if (xdci[devclass].xs_path_fe != NULL)
1554 			i_xvdi_watch_device(xdci[devclass].xs_path_fe);
1555 		if (xdci[devclass].xs_path_be != NULL)
1556 			i_xvdi_watch_device(xdci[devclass].xs_path_be);
1557 	}
1558 }
1559 
1560 /*
1561  * Iterate over the store looking for backend devices to create.
1562  */
1563 static void
1564 i_xvdi_enum_be(dev_info_t *parent, i_xd_cfg_t *xdcp)
1565 {
1566 	char **domains;
1567 	unsigned int ndomains;
1568 	int ldomains, i;
1569 
1570 	if ((domains = xenbus_directory(XBT_NULL, xdcp->xs_path_be, "",
1571 	    &ndomains)) == NULL)
1572 		return;
1573 
1574 	for (i = 0, ldomains = 0; i < ndomains; i++) {
1575 		ldomains += strlen(domains[i]) + 1 + sizeof (char *);
1576 
1577 		i_xvdi_enum_worker(parent, xdcp, domains[i]);
1578 	}
1579 	kmem_free(domains, ldomains);
1580 }
1581 
1582 /*
1583  * Iterate over the store looking for frontend devices to create.
1584  */
1585 static void
1586 i_xvdi_enum_fe(dev_info_t *parent, i_xd_cfg_t *xdcp)
1587 {
1588 	i_xvdi_enum_worker(parent, xdcp, NULL);
1589 }
1590 
1591 static void
1592 i_xvdi_enum_worker(dev_info_t *parent, i_xd_cfg_t *xdcp,
1593     char *domain)
1594 {
1595 	char *path, *domain_path, *ep;
1596 	char **devices;
1597 	unsigned int ndevices;
1598 	int ldevices, j, circ;
1599 	domid_t dom;
1600 
1601 	if (domain == NULL) {
1602 		dom = DOMID_SELF;
1603 		path = xdcp->xs_path_fe;
1604 		domain_path = "";
1605 	} else {
1606 		(void) ddi_strtol(domain, &ep, 0, (long *)&dom);
1607 		path = xdcp->xs_path_be;
1608 		domain_path = domain;
1609 	}
1610 
1611 	if ((devices = xenbus_directory(XBT_NULL, path, domain_path,
1612 	    &ndevices)) == NULL)
1613 		return;
1614 
1615 	for (j = 0, ldevices = 0; j < ndevices; j++) {
1616 		int vdev;
1617 
1618 		ldevices += strlen(devices[j]) + 1 + sizeof (char *);
1619 		(void) ddi_strtol(devices[j], &ep, 0, (long *)&vdev);
1620 
1621 		ndi_devi_enter(parent, &circ);
1622 
1623 		if (xvdi_find_dev(parent, xdcp->devclass, dom, vdev)
1624 		    == NULL)
1625 			(void) xvdi_create_dev(parent, xdcp->devclass,
1626 			    dom, vdev);
1627 
1628 		ndi_devi_exit(parent, circ);
1629 	}
1630 	kmem_free(devices, ldevices);
1631 }
1632 
1633 /*
1634  * Leaf drivers should call this in their detach() routine during suspend.
1635  */
1636 void
1637 xvdi_suspend(dev_info_t *dip)
1638 {
1639 	i_xvdi_rem_watches(dip);
1640 }
1641 
1642 /*
1643  * Leaf drivers should call this in their attach() routine during resume.
1644  */
1645 int
1646 xvdi_resume(dev_info_t *dip)
1647 {
1648 	return (i_xvdi_add_watches(dip));
1649 }
1650 
1651 /*
1652  * Add event handler for the leaf driver
1653  * to handle event triggered by the change in xenstore
1654  */
1655 int
1656 xvdi_add_event_handler(dev_info_t *dip, char *name,
1657     void (*evthandler)(dev_info_t *, ddi_eventcookie_t, void *, void *))
1658 {
1659 	ddi_eventcookie_t ecv;
1660 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1661 	ddi_callback_id_t *cbid;
1662 
1663 	ASSERT(pdp != NULL);
1664 
1665 	mutex_enter(&pdp->xd_lk);
1666 
1667 	if (strcmp(name, XS_OE_STATE) == 0) {
1668 		ASSERT(pdp->xd_xsdev.otherend != NULL);
1669 
1670 		cbid = &pdp->xd_oe_ehid;
1671 	} else if (strcmp(name, XS_HP_STATE) == 0) {
1672 		if (pdp->xd_xsdev.frontend == 1) {
1673 			mutex_exit(&pdp->xd_lk);
1674 			return (DDI_FAILURE);
1675 		}
1676 
1677 		ASSERT(pdp->xd_hp_watch.node != NULL);
1678 
1679 		cbid = &pdp->xd_hp_ehid;
1680 	} else {
1681 		/* Unsupported watch. */
1682 		mutex_exit(&pdp->xd_lk);
1683 		return (DDI_FAILURE);
1684 	}
1685 
1686 	/*
1687 	 * No event handler provided, take default action to handle
1688 	 * event.
1689 	 */
1690 	if (evthandler == NULL) {
1691 		mutex_exit(&pdp->xd_lk);
1692 		return (DDI_SUCCESS);
1693 	}
1694 
1695 	ASSERT(*cbid == NULL);
1696 
1697 	if (ddi_get_eventcookie(dip, name, &ecv) != DDI_SUCCESS) {
1698 		cmn_err(CE_WARN, "failed to find %s cookie for %s@%s",
1699 		    name, ddi_get_name(dip), ddi_get_name_addr(dip));
1700 		mutex_exit(&pdp->xd_lk);
1701 		return (DDI_FAILURE);
1702 	}
1703 	if (ddi_add_event_handler(dip, ecv, evthandler, NULL, cbid)
1704 	    != DDI_SUCCESS) {
1705 		cmn_err(CE_WARN, "failed to add %s event handler for %s@%s",
1706 		    name, ddi_get_name(dip), ddi_get_name_addr(dip));
1707 		*cbid = NULL;
1708 		mutex_exit(&pdp->xd_lk);
1709 		return (DDI_FAILURE);
1710 	}
1711 
1712 	mutex_exit(&pdp->xd_lk);
1713 
1714 	return (DDI_SUCCESS);
1715 }
1716 
1717 /*
1718  * Remove event handler for the leaf driver and unwatch xenstore
1719  * so, driver will not be notified when xenstore entry changed later
1720  */
1721 void
1722 xvdi_remove_event_handler(dev_info_t *dip, char *name)
1723 {
1724 	struct xendev_ppd *pdp;
1725 	boolean_t rem_oe = B_FALSE, rem_hp = B_FALSE;
1726 	ddi_callback_id_t oeid = NULL, hpid = NULL;
1727 
1728 	pdp = ddi_get_parent_data(dip);
1729 	ASSERT(pdp != NULL);
1730 
1731 	if (name == NULL) {
1732 		rem_oe = B_TRUE;
1733 		rem_hp = B_TRUE;
1734 	} else if (strcmp(name, XS_OE_STATE) == 0) {
1735 		rem_oe = B_TRUE;
1736 	} else if (strcmp(name, XS_HP_STATE) == 0) {
1737 		rem_hp = B_TRUE;
1738 	} else {
1739 		cmn_err(CE_WARN, "event %s not supported, cannot remove", name);
1740 		return;
1741 	}
1742 
1743 	mutex_enter(&pdp->xd_lk);
1744 
1745 	if (rem_oe && (pdp->xd_oe_ehid != NULL)) {
1746 		oeid = pdp->xd_oe_ehid;
1747 		pdp->xd_oe_ehid = NULL;
1748 	}
1749 
1750 	if (rem_hp && (pdp->xd_hp_ehid != NULL)) {
1751 		hpid = pdp->xd_hp_ehid;
1752 		pdp->xd_hp_ehid = NULL;
1753 	}
1754 
1755 	mutex_exit(&pdp->xd_lk);
1756 
1757 	if (oeid != NULL)
1758 		(void) ddi_remove_event_handler(oeid);
1759 	if (hpid != NULL)
1760 		(void) ddi_remove_event_handler(hpid);
1761 }
1762 
1763 
1764 /*
1765  * common ring interfaces
1766  */
1767 
1768 #define	FRONT_RING(_ringp)	(&(_ringp)->xr_sring.fr)
1769 #define	BACK_RING(_ringp)	(&(_ringp)->xr_sring.br)
1770 #define	GET_RING_SIZE(_ringp)	RING_SIZE(FRONT_RING(ringp))
1771 #define	GET_RING_ENTRY_FE(_ringp, _idx)		\
1772 	(FRONT_RING(_ringp)->sring->ring +	\
1773 	(_ringp)->xr_entry_size * ((_idx) & (GET_RING_SIZE(_ringp) - 1)))
1774 #define	GET_RING_ENTRY_BE(_ringp, _idx)		\
1775 	(BACK_RING(_ringp)->sring->ring +	\
1776 	(_ringp)->xr_entry_size * ((_idx) & (GET_RING_SIZE(_ringp) - 1)))
1777 
1778 unsigned int
1779 xvdi_ring_avail_slots(xendev_ring_t *ringp)
1780 {
1781 	comif_ring_fe_t *frp;
1782 	comif_ring_be_t *brp;
1783 
1784 	if (ringp->xr_frontend) {
1785 		frp = FRONT_RING(ringp);
1786 		return (GET_RING_SIZE(ringp) -
1787 		    (frp->req_prod_pvt - frp->rsp_cons));
1788 	} else {
1789 		brp = BACK_RING(ringp);
1790 		return (GET_RING_SIZE(ringp) -
1791 		    (brp->rsp_prod_pvt - brp->req_cons));
1792 	}
1793 }
1794 
1795 int
1796 xvdi_ring_has_unconsumed_requests(xendev_ring_t *ringp)
1797 {
1798 	comif_ring_be_t *brp;
1799 
1800 	ASSERT(!ringp->xr_frontend);
1801 	brp = BACK_RING(ringp);
1802 	return ((brp->req_cons !=
1803 	    ddi_get32(ringp->xr_acc_hdl, &brp->sring->req_prod)) &&
1804 	    ((brp->req_cons - brp->rsp_prod_pvt) != RING_SIZE(brp)));
1805 }
1806 
1807 int
1808 xvdi_ring_has_incomp_request(xendev_ring_t *ringp)
1809 {
1810 	comif_ring_fe_t *frp;
1811 
1812 	ASSERT(ringp->xr_frontend);
1813 	frp = FRONT_RING(ringp);
1814 	return (frp->req_prod_pvt !=
1815 	    ddi_get32(ringp->xr_acc_hdl, &frp->sring->rsp_prod));
1816 }
1817 
1818 int
1819 xvdi_ring_has_unconsumed_responses(xendev_ring_t *ringp)
1820 {
1821 	comif_ring_fe_t *frp;
1822 
1823 	ASSERT(ringp->xr_frontend);
1824 	frp = FRONT_RING(ringp);
1825 	return (frp->rsp_cons !=
1826 	    ddi_get32(ringp->xr_acc_hdl, &frp->sring->rsp_prod));
1827 }
1828 
1829 /* NOTE: req_event will be increased as needed */
1830 void *
1831 xvdi_ring_get_request(xendev_ring_t *ringp)
1832 {
1833 	comif_ring_fe_t *frp;
1834 	comif_ring_be_t *brp;
1835 
1836 	if (ringp->xr_frontend) {
1837 		/* for frontend ring */
1838 		frp = FRONT_RING(ringp);
1839 		if (!RING_FULL(frp))
1840 			return (GET_RING_ENTRY_FE(ringp, frp->req_prod_pvt++));
1841 		else
1842 			return (NULL);
1843 	} else {
1844 		/* for backend ring */
1845 		brp = BACK_RING(ringp);
1846 		/* RING_FINAL_CHECK_FOR_REQUESTS() */
1847 		if (xvdi_ring_has_unconsumed_requests(ringp))
1848 			return (GET_RING_ENTRY_BE(ringp, brp->req_cons++));
1849 		else {
1850 			ddi_put32(ringp->xr_acc_hdl, &brp->sring->req_event,
1851 			    brp->req_cons + 1);
1852 			membar_enter();
1853 			if (xvdi_ring_has_unconsumed_requests(ringp))
1854 				return (GET_RING_ENTRY_BE(ringp,
1855 				    brp->req_cons++));
1856 			else
1857 				return (NULL);
1858 		}
1859 	}
1860 }
1861 
1862 int
1863 xvdi_ring_push_request(xendev_ring_t *ringp)
1864 {
1865 	RING_IDX old, new, reqevt;
1866 	comif_ring_fe_t *frp;
1867 
1868 	/* only frontend should be able to push request */
1869 	ASSERT(ringp->xr_frontend);
1870 
1871 	/* RING_PUSH_REQUEST_AND_CHECK_NOTIFY() */
1872 	frp = FRONT_RING(ringp);
1873 	old = ddi_get32(ringp->xr_acc_hdl, &frp->sring->req_prod);
1874 	new = frp->req_prod_pvt;
1875 	ddi_put32(ringp->xr_acc_hdl, &frp->sring->req_prod, new);
1876 	membar_enter();
1877 	reqevt = ddi_get32(ringp->xr_acc_hdl, &frp->sring->req_event);
1878 	return ((RING_IDX)(new - reqevt) < (RING_IDX)(new - old));
1879 }
1880 
1881 /* NOTE: rsp_event will be increased as needed */
1882 void *
1883 xvdi_ring_get_response(xendev_ring_t *ringp)
1884 {
1885 	comif_ring_fe_t *frp;
1886 	comif_ring_be_t *brp;
1887 
1888 	if (!ringp->xr_frontend) {
1889 		/* for backend ring */
1890 		brp = BACK_RING(ringp);
1891 		return (GET_RING_ENTRY_BE(ringp, brp->rsp_prod_pvt++));
1892 	} else {
1893 		/* for frontend ring */
1894 		frp = FRONT_RING(ringp);
1895 		/* RING_FINAL_CHECK_FOR_RESPONSES() */
1896 		if (xvdi_ring_has_unconsumed_responses(ringp))
1897 			return (GET_RING_ENTRY_FE(ringp, frp->rsp_cons++));
1898 		else {
1899 			ddi_put32(ringp->xr_acc_hdl, &frp->sring->rsp_event,
1900 			    frp->rsp_cons + 1);
1901 			membar_enter();
1902 			if (xvdi_ring_has_unconsumed_responses(ringp))
1903 				return (GET_RING_ENTRY_FE(ringp,
1904 				    frp->rsp_cons++));
1905 			else
1906 				return (NULL);
1907 		}
1908 	}
1909 }
1910 
1911 int
1912 xvdi_ring_push_response(xendev_ring_t *ringp)
1913 {
1914 	RING_IDX old, new, rspevt;
1915 	comif_ring_be_t *brp;
1916 
1917 	/* only backend should be able to push response */
1918 	ASSERT(!ringp->xr_frontend);
1919 
1920 	/* RING_PUSH_RESPONSE_AND_CHECK_NOTIFY() */
1921 	brp = BACK_RING(ringp);
1922 	old = ddi_get32(ringp->xr_acc_hdl, &brp->sring->rsp_prod);
1923 	new = brp->rsp_prod_pvt;
1924 	ddi_put32(ringp->xr_acc_hdl, &brp->sring->rsp_prod, new);
1925 	membar_enter();
1926 	rspevt = ddi_get32(ringp->xr_acc_hdl, &brp->sring->rsp_event);
1927 	return ((RING_IDX)(new - rspevt) < (RING_IDX)(new - old));
1928 }
1929 
1930 static void
1931 xvdi_ring_init_sring(xendev_ring_t *ringp)
1932 {
1933 	ddi_acc_handle_t acchdl;
1934 	comif_sring_t *xsrp;
1935 	int i;
1936 
1937 	xsrp = (comif_sring_t *)ringp->xr_vaddr;
1938 	acchdl = ringp->xr_acc_hdl;
1939 
1940 	/* shared ring initialization */
1941 	ddi_put32(acchdl, &xsrp->req_prod, 0);
1942 	ddi_put32(acchdl, &xsrp->rsp_prod, 0);
1943 	ddi_put32(acchdl, &xsrp->req_event, 1);
1944 	ddi_put32(acchdl, &xsrp->rsp_event, 1);
1945 	for (i = 0; i < sizeof (xsrp->pad); i++)
1946 		ddi_put8(acchdl, xsrp->pad + i, 0);
1947 }
1948 
1949 static void
1950 xvdi_ring_init_front_ring(xendev_ring_t *ringp, size_t nentry, size_t entrysize)
1951 {
1952 	comif_ring_fe_t *xfrp;
1953 
1954 	xfrp = &ringp->xr_sring.fr;
1955 	xfrp->req_prod_pvt = 0;
1956 	xfrp->rsp_cons = 0;
1957 	xfrp->nr_ents = nentry;
1958 	xfrp->sring = (comif_sring_t *)ringp->xr_vaddr;
1959 
1960 	ringp->xr_frontend = 1;
1961 	ringp->xr_entry_size = entrysize;
1962 }
1963 
1964 static void
1965 xvdi_ring_init_back_ring(xendev_ring_t *ringp, size_t nentry, size_t entrysize)
1966 {
1967 	comif_ring_be_t *xbrp;
1968 
1969 	xbrp = &ringp->xr_sring.br;
1970 	xbrp->rsp_prod_pvt = 0;
1971 	xbrp->req_cons = 0;
1972 	xbrp->nr_ents = nentry;
1973 	xbrp->sring = (comif_sring_t *)ringp->xr_vaddr;
1974 
1975 	ringp->xr_frontend = 0;
1976 	ringp->xr_entry_size = entrysize;
1977 }
1978 
1979 static void
1980 xendev_offline_device(void *arg)
1981 {
1982 	dev_info_t *dip = (dev_info_t *)arg;
1983 	char devname[MAXNAMELEN] = {0};
1984 
1985 	/*
1986 	 * This is currently the only chance to delete a devinfo node, which
1987 	 * is _not_ always successful.
1988 	 */
1989 	(void) ddi_deviname(dip, devname);
1990 	(void) devfs_clean(ddi_get_parent(dip), devname + 1, DV_CLEAN_FORCE);
1991 	(void) ndi_devi_offline(dip, NDI_DEVI_REMOVE);
1992 }
1993 
1994 static void
1995 i_xvdi_oestate_cb(struct xenbus_device *dev, XenbusState oestate)
1996 {
1997 	dev_info_t *dip = (dev_info_t *)dev->data;
1998 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1999 
2000 	/*
2001 	 * Don't trigger two consecutive ndi_devi_offline on the same
2002 	 * dip.
2003 	 */
2004 	if ((oestate == XenbusStateClosed) &&
2005 	    (dev->otherend_state == XenbusStateClosed))
2006 		return;
2007 
2008 	dev->otherend_state = oestate;
2009 	(void) ddi_taskq_dispatch(pdp->xd_oe_taskq,
2010 	    i_xvdi_oestate_handler, (void *)dip, DDI_SLEEP);
2011 }
2012 
2013 /*ARGSUSED*/
2014 static void
2015 i_xvdi_hpstate_cb(struct xenbus_watch *w, const char **vec,
2016     unsigned int len)
2017 {
2018 	dev_info_t *dip = (dev_info_t *)w->dev;
2019 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
2020 
2021 	(void) ddi_taskq_dispatch(pdp->xd_hp_taskq,
2022 	    i_xvdi_hpstate_handler, (void *)dip, DDI_SLEEP);
2023 }
2024 
2025 static void
2026 i_xvdi_probe_path_handler(void *arg)
2027 {
2028 	dev_info_t *parent;
2029 	char *path = arg, *p = NULL;
2030 	int i, vdev, circ;
2031 	i_xd_cfg_t *xdcp;
2032 	boolean_t frontend;
2033 	domid_t dom;
2034 
2035 	for (i = 0, xdcp = &xdci[0]; i < NXDC; i++, xdcp++) {
2036 
2037 		if ((xdcp->xs_path_fe != NULL) &&
2038 		    (strncmp(path, xdcp->xs_path_fe, strlen(xdcp->xs_path_fe))
2039 		    == 0)) {
2040 
2041 			frontend = B_TRUE;
2042 			p = path + strlen(xdcp->xs_path_fe);
2043 			break;
2044 		}
2045 
2046 		if ((xdcp->xs_path_be != NULL) &&
2047 		    (strncmp(path, xdcp->xs_path_be, strlen(xdcp->xs_path_be))
2048 		    == 0)) {
2049 
2050 			frontend = B_FALSE;
2051 			p = path + strlen(xdcp->xs_path_be);
2052 			break;
2053 		}
2054 
2055 	}
2056 
2057 	if (p == NULL) {
2058 		cmn_err(CE_WARN, "i_xvdi_probe_path_handler: "
2059 		    "unexpected path prefix in %s", path);
2060 		goto done;
2061 	}
2062 
2063 	if (frontend) {
2064 		dom = DOMID_SELF;
2065 		if (sscanf(p, "/%d/", &vdev) != 1) {
2066 			XVDI_DPRINTF(XVDI_DBG_PROBE,
2067 			    "i_xvdi_probe_path_handler: "
2068 			    "cannot parse frontend path %s",
2069 			    path);
2070 			goto done;
2071 		}
2072 	} else {
2073 		if (sscanf(p, "/%d/%d/", &dom, &vdev) != 2) {
2074 			XVDI_DPRINTF(XVDI_DBG_PROBE,
2075 			    "i_xvdi_probe_path_handler: "
2076 			    "cannot parse backend path %s",
2077 			    path);
2078 			goto done;
2079 		}
2080 	}
2081 
2082 	parent = xendev_dip;
2083 	ASSERT(parent != NULL);
2084 
2085 	ndi_devi_enter(parent, &circ);
2086 
2087 	if (xvdi_find_dev(parent, xdcp->devclass, dom, vdev) == NULL) {
2088 		XVDI_DPRINTF(XVDI_DBG_PROBE,
2089 		    "i_xvdi_probe_path_handler: create for %s", path);
2090 		(void) xvdi_create_dev(parent, xdcp->devclass, dom, vdev);
2091 	} else {
2092 		XVDI_DPRINTF(XVDI_DBG_PROBE,
2093 		    "i_xvdi_probe_path_handler: %s already exists", path);
2094 	}
2095 
2096 	ndi_devi_exit(parent, circ);
2097 
2098 done:
2099 	kmem_free(path, strlen(path) + 1);
2100 }
2101