xref: /titanic_50/usr/src/uts/common/xen/os/xvdi.c (revision 3fbbb872ea33adea240e8fd8c692f6d3131cc69b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * Xen virtual device driver interfaces
31  */
32 
33 /*
34  * todo:
35  * + name space clean up:
36  *	xvdi_* - public xen interfaces, for use by all leaf drivers
37  *	xd_* - public xen data structures
38  *	i_xvdi_* - implementation private functions
39  *	xendev_* - xendev driver interfaces, both internal and in cb_ops/bus_ops
40  * + add mdb dcmds to dump ring status
41  * + implement xvdi_xxx to wrap xenbus_xxx read/write function
42  * + convert (xendev_ring_t *) into xvdi_ring_handle_t
43  */
44 #include <sys/conf.h>
45 #include <sys/param.h>
46 #include <sys/kmem.h>
47 #include <vm/seg_kmem.h>
48 #include <sys/debug.h>
49 #include <sys/modctl.h>
50 #include <sys/autoconf.h>
51 #include <sys/ddi_impldefs.h>
52 #include <sys/ddi_subrdefs.h>
53 #include <sys/ddi.h>
54 #include <sys/sunddi.h>
55 #include <sys/sunndi.h>
56 #include <sys/sunldi.h>
57 #include <sys/fs/dv_node.h>
58 #include <sys/avintr.h>
59 #include <sys/psm.h>
60 #include <sys/spl.h>
61 #include <sys/promif.h>
62 #include <sys/list.h>
63 #include <sys/bootconf.h>
64 #include <sys/bootsvcs.h>
65 #include <sys/bootinfo.h>
66 #include <sys/note.h>
67 #ifdef XPV_HVM_DRIVER
68 #include <sys/xpv_support.h>
69 #include <sys/hypervisor.h>
70 #include <public/grant_table.h>
71 #include <public/xen.h>
72 #include <public/io/xenbus.h>
73 #include <public/io/xs_wire.h>
74 #include <public/event_channel.h>
75 #include <public/io/xenbus.h>
76 #else /* XPV_HVM_DRIVER */
77 #include <sys/hypervisor.h>
78 #include <sys/xen_mmu.h>
79 #include <xen/sys/xenbus_impl.h>
80 #include <sys/evtchn_impl.h>
81 #endif /* XPV_HVM_DRIVER */
82 #include <sys/gnttab.h>
83 #include <xen/sys/xendev.h>
84 #include <vm/hat_i86.h>
85 #include <sys/scsi/generic/inquiry.h>
86 #include <util/sscanf.h>
87 #include <xen/public/io/xs_wire.h>
88 
89 
90 static void xvdi_ring_init_sring(xendev_ring_t *);
91 static void xvdi_ring_init_front_ring(xendev_ring_t *, size_t, size_t);
92 #ifndef XPV_HVM_DRIVER
93 static void xvdi_ring_init_back_ring(xendev_ring_t *, size_t, size_t);
94 #endif
95 static void xvdi_reinit_ring(dev_info_t *, grant_ref_t *, xendev_ring_t *);
96 
97 static int i_xvdi_add_watches(dev_info_t *);
98 static void i_xvdi_rem_watches(dev_info_t *);
99 
100 static int i_xvdi_add_watch_oestate(dev_info_t *);
101 static void i_xvdi_rem_watch_oestate(dev_info_t *);
102 static void i_xvdi_oestate_cb(struct xenbus_device *, XenbusState);
103 static void i_xvdi_oestate_handler(void *);
104 
105 static int i_xvdi_add_watch_hpstate(dev_info_t *);
106 static void i_xvdi_rem_watch_hpstate(dev_info_t *);
107 static void i_xvdi_hpstate_cb(struct xenbus_watch *, const char **,
108     unsigned int);
109 static void i_xvdi_hpstate_handler(void *);
110 
111 static int i_xvdi_add_watch_bepath(dev_info_t *);
112 static void i_xvdi_rem_watch_bepath(dev_info_t *);
113 static void i_xvdi_bepath_cb(struct xenbus_watch *, const char **,
114     unsigned in);
115 
116 static void xendev_offline_device(void *);
117 
118 static void i_xvdi_probe_path_cb(struct xenbus_watch *, const char **,
119     unsigned int);
120 static void i_xvdi_probe_path_handler(void *);
121 
122 typedef struct xd_cfg {
123 	xendev_devclass_t devclass;
124 	char *xsdev;
125 	char *xs_path_fe;
126 	char *xs_path_be;
127 	char *node_fe;
128 	char *node_be;
129 	char *device_type;
130 	int xd_ipl;
131 	int flags;
132 } i_xd_cfg_t;
133 
134 #define	XD_DOM_ZERO	0x01	/* dom0 only. */
135 #define	XD_DOM_GUEST	0x02	/* Guest domains (i.e. non-dom0). */
136 #define	XD_DOM_IO	0x04	/* IO domains. */
137 
138 #define	XD_DOM_ALL	(XD_DOM_ZERO | XD_DOM_GUEST)
139 
140 static i_xd_cfg_t xdci[] = {
141 	{ XEN_CONSOLE, NULL, NULL, NULL, "xencons", NULL,
142 	    "console", IPL_CONS, XD_DOM_ALL, },
143 
144 	{ XEN_VNET, "vif", "device/vif", "backend/vif", "xnf", "xnb",
145 	    "network", IPL_VIF, XD_DOM_ALL, },
146 
147 	{ XEN_VBLK, "vbd", "device/vbd", "backend/vbd", "xdf", "xdb",
148 	    "block", IPL_VBD, XD_DOM_ALL, },
149 
150 	{ XEN_XENBUS, NULL, NULL, NULL, "xenbus", NULL,
151 	    NULL, 0, XD_DOM_ALL, },
152 
153 	{ XEN_DOMCAPS, NULL, NULL, NULL, "domcaps", NULL,
154 	    NULL, 0, XD_DOM_ALL, },
155 
156 	{ XEN_BALLOON, NULL, NULL, NULL, "balloon", NULL,
157 	    NULL, 0, XD_DOM_ALL, },
158 
159 	{ XEN_EVTCHN, NULL, NULL, NULL, "evtchn", NULL,
160 	    NULL, 0, XD_DOM_ZERO, },
161 
162 	{ XEN_PRIVCMD, NULL, NULL, NULL, "privcmd", NULL,
163 	    NULL, 0, XD_DOM_ZERO, },
164 };
165 #define	NXDC	(sizeof (xdci) / sizeof (xdci[0]))
166 
167 static void i_xvdi_enum_fe(dev_info_t *, i_xd_cfg_t *);
168 static void i_xvdi_enum_be(dev_info_t *, i_xd_cfg_t *);
169 static void i_xvdi_enum_worker(dev_info_t *, i_xd_cfg_t *, char *);
170 
171 /*
172  * Xen device channel device access and DMA attributes
173  */
174 static ddi_device_acc_attr_t xendev_dc_accattr = {
175 	DDI_DEVICE_ATTR_V0, DDI_NEVERSWAP_ACC, DDI_STRICTORDER_ACC
176 };
177 
178 static ddi_dma_attr_t xendev_dc_dmaattr = {
179 	DMA_ATTR_V0,		/* version of this structure */
180 	0,			/* lowest usable address */
181 	0xffffffffffffffffULL,	/* highest usable address */
182 	0x7fffffff,		/* maximum DMAable byte count */
183 	MMU_PAGESIZE,		/* alignment in bytes */
184 	0x7ff,			/* bitmap of burst sizes */
185 	1,			/* minimum transfer */
186 	0xffffffffU,		/* maximum transfer */
187 	0xffffffffffffffffULL,	/* maximum segment length */
188 	1,			/* maximum number of segments */
189 	1,			/* granularity */
190 	0,			/* flags (reserved) */
191 };
192 
193 static dev_info_t *xendev_dip = NULL;
194 
195 #define	XVDI_DBG_STATE	0x01
196 #define	XVDI_DBG_PROBE	0x02
197 
198 #ifdef DEBUG
199 int i_xvdi_debug = 0;
200 
201 #define	XVDI_DPRINTF(flag, format, ...)			\
202 {							\
203 	if (i_xvdi_debug & (flag))			\
204 		prom_printf((format), __VA_ARGS__);	\
205 }
206 #else
207 #define	XVDI_DPRINTF(flag, format, ...)
208 #endif /* DEBUG */
209 
210 static i_xd_cfg_t *
211 i_xvdi_devclass2cfg(xendev_devclass_t devclass)
212 {
213 	i_xd_cfg_t *xdcp;
214 	int i;
215 
216 	for (i = 0, xdcp = xdci; i < NXDC; i++, xdcp++)
217 		if (xdcp->devclass == devclass)
218 			return (xdcp);
219 
220 	return (NULL);
221 }
222 
223 int
224 xvdi_init_dev(dev_info_t *dip)
225 {
226 	xendev_devclass_t devcls;
227 	int vdevnum;
228 	domid_t domid;
229 	struct xendev_ppd *pdp;
230 	i_xd_cfg_t *xdcp;
231 	boolean_t backend;
232 	char xsnamebuf[TYPICALMAXPATHLEN];
233 	char *xsname;
234 
235 	devcls = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
236 	    DDI_PROP_DONTPASS, "devclass", XEN_INVAL);
237 	vdevnum = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
238 	    DDI_PROP_DONTPASS, "vdev", -1);
239 	domid = (domid_t)ddi_prop_get_int(DDI_DEV_T_ANY, dip,
240 	    DDI_PROP_DONTPASS, "domain", DOMID_SELF);
241 
242 	backend = (domid != DOMID_SELF);
243 	xdcp = i_xvdi_devclass2cfg(devcls);
244 	if (xdcp->device_type != NULL)
245 		(void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
246 		    "device_type", xdcp->device_type);
247 
248 	pdp = kmem_zalloc(sizeof (*pdp), KM_SLEEP);
249 	pdp->xd_domain = domid;
250 	pdp->xd_vdevnum = vdevnum;
251 	pdp->xd_devclass = devcls;
252 	pdp->xd_evtchn = INVALID_EVTCHN;
253 	mutex_init(&pdp->xd_lk, NULL, MUTEX_DRIVER, NULL);
254 	ddi_set_parent_data(dip, pdp);
255 
256 	/*
257 	 * devices that do not need to interact with xenstore
258 	 */
259 	if (vdevnum == -1) {
260 		(void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
261 		    "unit-address", "0");
262 		if (devcls == XEN_CONSOLE)
263 			(void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
264 			    "pm-hardware-state", "needs-suspend-resume");
265 		return (DDI_SUCCESS);
266 	}
267 
268 	/*
269 	 * PV devices that need to probe xenstore
270 	 */
271 
272 	(void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
273 	    "pm-hardware-state", "needs-suspend-resume");
274 
275 	xsname = xsnamebuf;
276 	if (!backend)
277 		(void) snprintf(xsnamebuf, sizeof (xsnamebuf),
278 		    "%s/%d", xdcp->xs_path_fe, vdevnum);
279 	else
280 		(void) snprintf(xsnamebuf, sizeof (xsnamebuf),
281 		    "%s/%d/%d", xdcp->xs_path_be, domid, vdevnum);
282 	if ((xenbus_read_driver_state(xsname) >= XenbusStateClosing)) {
283 		/* Don't try to init a dev that may be closing */
284 		mutex_destroy(&pdp->xd_lk);
285 		kmem_free(pdp, sizeof (*pdp));
286 		ddi_set_parent_data(dip, NULL);
287 		return (DDI_FAILURE);
288 	}
289 
290 	pdp->xd_xsdev.nodename = i_ddi_strdup(xsname, KM_SLEEP);
291 	pdp->xd_xsdev.devicetype = xdcp->xsdev;
292 	pdp->xd_xsdev.frontend = (backend ? 0 : 1);
293 	pdp->xd_xsdev.data = dip;
294 	pdp->xd_xsdev.otherend_id = (backend ? domid : -1);
295 	if (i_xvdi_add_watches(dip) != DDI_SUCCESS) {
296 		cmn_err(CE_WARN, "xvdi_init_dev: "
297 		    "cannot add watches for %s", xsname);
298 		xvdi_uninit_dev(dip);
299 		return (DDI_FAILURE);
300 	}
301 
302 	/*
303 	 * frontend device will use "unit-addr" as
304 	 * the bus address, which will be set here
305 	 */
306 	if (!backend) {
307 		void *prop_str;
308 		unsigned int prop_len, addr;
309 
310 		switch (devcls) {
311 		case XEN_VNET:
312 			if (xenbus_read(XBT_NULL, xsname, "mac", &prop_str,
313 			    &prop_len) == 0) {
314 				(void) ndi_prop_update_string(DDI_DEV_T_NONE,
315 				    dip, "mac", prop_str);
316 				kmem_free(prop_str, prop_len);
317 			}
318 			prop_str = NULL;
319 			if (xenbus_scanf(XBT_NULL, xsname, "handle", "%u",
320 			    &addr) == 0) {
321 				char unitaddr[9]; /* hold 32-bit hex */
322 
323 				(void) snprintf(unitaddr, 9, "%x", addr);
324 				(void) ndi_prop_update_string(DDI_DEV_T_NONE,
325 				    dip, "unit-address", unitaddr);
326 			}
327 			break;
328 		case XEN_VBLK:
329 			if (xenbus_read(XBT_NULL, pdp->xd_xsdev.otherend,
330 			    "dev", &prop_str, &prop_len) == 0) {
331 				(void) ndi_prop_update_string(DDI_DEV_T_NONE,
332 				    dip, "unit-address", prop_str);
333 				kmem_free(prop_str, prop_len);
334 			}
335 #ifdef XPV_HVM_DRIVER
336 			/*
337 			 * The mapping between the 'dev' name and the
338 			 * device ID maintained by Xenstore has to be
339 			 * tracked explicitly in HVM domains.
340 			 */
341 			prop_str = strrchr(pdp->xd_xsdev.otherend, '/');
342 			if (prop_str != NULL) {
343 				prop_str = ((caddr_t)prop_str) + 1;
344 				(void) ndi_prop_update_string(DDI_DEV_T_NONE,
345 				    dip, "xenstore-id", prop_str);
346 			}
347 #endif /* XPV_HVM_DRIVER */
348 			break;
349 		default:
350 			break;
351 		}
352 	}
353 
354 	return (DDI_SUCCESS);
355 }
356 
357 void
358 xvdi_uninit_dev(dev_info_t *dip)
359 {
360 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
361 
362 	if (pdp != NULL) {
363 		/* Remove any registered callbacks. */
364 		xvdi_remove_event_handler(dip, NULL);
365 
366 		/* Remove any registered watches. */
367 		i_xvdi_rem_watches(dip);
368 
369 		/* tell other end to close */
370 		if (pdp->xd_xsdev.otherend_id != (domid_t)-1)
371 			(void) xvdi_switch_state(dip, XBT_NULL,
372 			    XenbusStateClosed);
373 
374 		if (pdp->xd_xsdev.nodename != NULL)
375 			kmem_free((char *)(pdp->xd_xsdev.nodename),
376 			    strlen(pdp->xd_xsdev.nodename) + 1);
377 
378 		ddi_set_parent_data(dip, NULL);
379 
380 		mutex_destroy(&pdp->xd_lk);
381 		kmem_free(pdp, sizeof (*pdp));
382 	}
383 }
384 
385 /*
386  * Bind the event channel for this device instance.
387  * Currently we only support one evtchn per device instance.
388  */
389 int
390 xvdi_bind_evtchn(dev_info_t *dip, evtchn_port_t evtchn)
391 {
392 	struct xendev_ppd *pdp;
393 	domid_t oeid;
394 	int r;
395 
396 	pdp = ddi_get_parent_data(dip);
397 	ASSERT(pdp != NULL);
398 	ASSERT(pdp->xd_evtchn == INVALID_EVTCHN);
399 
400 	mutex_enter(&pdp->xd_lk);
401 	if (pdp->xd_devclass == XEN_CONSOLE) {
402 		if (!DOMAIN_IS_INITDOMAIN(xen_info)) {
403 			pdp->xd_evtchn = xen_info->console.domU.evtchn;
404 		} else {
405 			pdp->xd_evtchn = INVALID_EVTCHN;
406 			mutex_exit(&pdp->xd_lk);
407 			return (DDI_SUCCESS);
408 		}
409 	} else {
410 		oeid = pdp->xd_xsdev.otherend_id;
411 		if (oeid == (domid_t)-1) {
412 			mutex_exit(&pdp->xd_lk);
413 			return (DDI_FAILURE);
414 		}
415 
416 		if ((r = xen_bind_interdomain(oeid, evtchn, &pdp->xd_evtchn))) {
417 			xvdi_dev_error(dip, r, "bind event channel");
418 			mutex_exit(&pdp->xd_lk);
419 			return (DDI_FAILURE);
420 		}
421 	}
422 #ifndef XPV_HVM_DRIVER
423 	pdp->xd_ispec.intrspec_vec = ec_bind_evtchn_to_irq(pdp->xd_evtchn);
424 #endif
425 	mutex_exit(&pdp->xd_lk);
426 
427 	return (DDI_SUCCESS);
428 }
429 
430 /*
431  * Allocate an event channel for this device instance.
432  * Currently we only support one evtchn per device instance.
433  */
434 int
435 xvdi_alloc_evtchn(dev_info_t *dip)
436 {
437 	struct xendev_ppd *pdp;
438 	domid_t oeid;
439 	int rv;
440 
441 	pdp = ddi_get_parent_data(dip);
442 	ASSERT(pdp != NULL);
443 	ASSERT(pdp->xd_evtchn == INVALID_EVTCHN);
444 
445 	mutex_enter(&pdp->xd_lk);
446 	if (pdp->xd_devclass == XEN_CONSOLE) {
447 		if (!DOMAIN_IS_INITDOMAIN(xen_info)) {
448 			pdp->xd_evtchn = xen_info->console.domU.evtchn;
449 		} else {
450 			pdp->xd_evtchn = INVALID_EVTCHN;
451 			mutex_exit(&pdp->xd_lk);
452 			return (DDI_SUCCESS);
453 		}
454 	} else {
455 		oeid = pdp->xd_xsdev.otherend_id;
456 		if (oeid == (domid_t)-1) {
457 			mutex_exit(&pdp->xd_lk);
458 			return (DDI_FAILURE);
459 		}
460 
461 		if ((rv = xen_alloc_unbound_evtchn(oeid, &pdp->xd_evtchn))) {
462 			xvdi_dev_error(dip, rv, "bind event channel");
463 			mutex_exit(&pdp->xd_lk);
464 			return (DDI_FAILURE);
465 		}
466 	}
467 #ifndef XPV_HVM_DRIVER
468 	pdp->xd_ispec.intrspec_vec = ec_bind_evtchn_to_irq(pdp->xd_evtchn);
469 #endif
470 	mutex_exit(&pdp->xd_lk);
471 
472 	return (DDI_SUCCESS);
473 }
474 
475 /*
476  * Unbind the event channel for this device instance.
477  * Currently we only support one evtchn per device instance.
478  */
479 void
480 xvdi_free_evtchn(dev_info_t *dip)
481 {
482 	struct xendev_ppd *pdp;
483 
484 	pdp = ddi_get_parent_data(dip);
485 	ASSERT(pdp != NULL);
486 
487 	mutex_enter(&pdp->xd_lk);
488 	if (pdp->xd_evtchn != INVALID_EVTCHN) {
489 #ifndef XPV_HVM_DRIVER
490 		ec_unbind_irq(pdp->xd_ispec.intrspec_vec);
491 		pdp->xd_ispec.intrspec_vec = 0;
492 #endif
493 		pdp->xd_evtchn = INVALID_EVTCHN;
494 	}
495 	mutex_exit(&pdp->xd_lk);
496 }
497 
498 #ifndef XPV_HVM_DRIVER
499 /*
500  * Map an inter-domain communication ring for a virtual device.
501  * This is used by backend drivers.
502  */
503 int
504 xvdi_map_ring(dev_info_t *dip, size_t nentry, size_t entrysize,
505     grant_ref_t gref, xendev_ring_t **ringpp)
506 {
507 	domid_t oeid;
508 	gnttab_map_grant_ref_t mapop;
509 	gnttab_unmap_grant_ref_t unmapop;
510 	caddr_t ringva;
511 	ddi_acc_hdl_t *ap;
512 	ddi_acc_impl_t *iap;
513 	xendev_ring_t *ring;
514 	int err;
515 	char errstr[] = "mapping in ring buffer";
516 
517 	ring = kmem_zalloc(sizeof (xendev_ring_t), KM_SLEEP);
518 	oeid = xvdi_get_oeid(dip);
519 
520 	/* alloc va in backend dom for ring buffer */
521 	ringva = vmem_xalloc(heap_arena, PAGESIZE, PAGESIZE,
522 	    0, 0, 0, 0, VM_SLEEP);
523 
524 	/* map in ring page */
525 	hat_prepare_mapping(kas.a_hat, ringva);
526 	mapop.host_addr = (uint64_t)(uintptr_t)ringva;
527 	mapop.flags = GNTMAP_host_map;
528 	mapop.ref = gref;
529 	mapop.dom = oeid;
530 	err = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &mapop, 1);
531 	if (err) {
532 		xvdi_fatal_error(dip, err, errstr);
533 		goto errout1;
534 	}
535 
536 	if (mapop.status != 0) {
537 		xvdi_fatal_error(dip, err, errstr);
538 		goto errout2;
539 	}
540 	ring->xr_vaddr = ringva;
541 	ring->xr_grant_hdl = mapop.handle;
542 	ring->xr_gref = gref;
543 
544 	/*
545 	 * init an acc handle and associate it w/ this ring
546 	 * this is only for backend drivers. we get the memory by calling
547 	 * vmem_xalloc(), instead of calling any ddi function, so we have
548 	 * to init an acc handle by ourselves
549 	 */
550 	ring->xr_acc_hdl = impl_acc_hdl_alloc(KM_SLEEP, NULL);
551 	ap = impl_acc_hdl_get(ring->xr_acc_hdl);
552 	ap->ah_vers = VERS_ACCHDL;
553 	ap->ah_dip = dip;
554 	ap->ah_xfermodes = DDI_DMA_CONSISTENT;
555 	ap->ah_acc = xendev_dc_accattr;
556 	iap = (ddi_acc_impl_t *)ap->ah_platform_private;
557 	iap->ahi_acc_attr |= DDI_ACCATTR_CPU_VADDR;
558 	impl_acc_hdl_init(ap);
559 	ap->ah_offset = 0;
560 	ap->ah_len = (off_t)PAGESIZE;
561 	ap->ah_addr = ring->xr_vaddr;
562 
563 	/* init backend ring */
564 	xvdi_ring_init_back_ring(ring, nentry, entrysize);
565 
566 	*ringpp = ring;
567 
568 	return (DDI_SUCCESS);
569 
570 errout2:
571 	/* unmap ring page */
572 	unmapop.host_addr = (uint64_t)(uintptr_t)ringva;
573 	unmapop.handle = ring->xr_grant_hdl;
574 	unmapop.dev_bus_addr = NULL;
575 	(void) HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &unmapop, 1);
576 	hat_release_mapping(kas.a_hat, ringva);
577 errout1:
578 	vmem_xfree(heap_arena, ringva, PAGESIZE);
579 	kmem_free(ring, sizeof (xendev_ring_t));
580 	return (DDI_FAILURE);
581 }
582 
583 /*
584  * Unmap a ring for a virtual device.
585  * This is used by backend drivers.
586  */
587 void
588 xvdi_unmap_ring(xendev_ring_t *ring)
589 {
590 	gnttab_unmap_grant_ref_t unmapop;
591 
592 	ASSERT((ring != NULL) && (ring->xr_vaddr != NULL));
593 
594 	impl_acc_hdl_free(ring->xr_acc_hdl);
595 	unmapop.host_addr = (uint64_t)(uintptr_t)ring->xr_vaddr;
596 	unmapop.handle = ring->xr_grant_hdl;
597 	unmapop.dev_bus_addr = NULL;
598 	(void) HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &unmapop, 1);
599 	hat_release_mapping(kas.a_hat, ring->xr_vaddr);
600 	vmem_xfree(heap_arena, ring->xr_vaddr, PAGESIZE);
601 	kmem_free(ring, sizeof (xendev_ring_t));
602 }
603 #endif /* XPV_HVM_DRIVER */
604 
605 /*
606  * Re-initialise an inter-domain communications ring for the backend domain.
607  * ring will be re-initialized after re-grant succeed
608  * ring will be freed if fails to re-grant access to backend domain
609  * so, don't keep useful data in the ring
610  * used only in frontend driver
611  */
612 static void
613 xvdi_reinit_ring(dev_info_t *dip, grant_ref_t *gref, xendev_ring_t *ringp)
614 {
615 	paddr_t rpaddr;
616 	maddr_t rmaddr;
617 
618 	ASSERT((ringp != NULL) && (ringp->xr_paddr != 0));
619 	rpaddr = ringp->xr_paddr;
620 
621 	rmaddr = DOMAIN_IS_INITDOMAIN(xen_info) ? rpaddr : pa_to_ma(rpaddr);
622 	gnttab_grant_foreign_access_ref(ringp->xr_gref, xvdi_get_oeid(dip),
623 	    rmaddr >> PAGESHIFT, 0);
624 	*gref = ringp->xr_gref;
625 
626 	/* init frontend ring */
627 	xvdi_ring_init_sring(ringp);
628 	xvdi_ring_init_front_ring(ringp, ringp->xr_sring.fr.nr_ents,
629 	    ringp->xr_entry_size);
630 }
631 
632 /*
633  * allocate Xen inter-domain communications ring for Xen virtual devices
634  * used only in frontend driver
635  * if *ringpp is not NULL, we'll simply re-init it
636  */
637 int
638 xvdi_alloc_ring(dev_info_t *dip, size_t nentry, size_t entrysize,
639     grant_ref_t *gref, xendev_ring_t **ringpp)
640 {
641 	size_t len;
642 	xendev_ring_t *ring;
643 	ddi_dma_cookie_t dma_cookie;
644 	uint_t ncookies;
645 	grant_ref_t ring_gref;
646 	domid_t oeid;
647 	maddr_t rmaddr;
648 
649 	if (*ringpp) {
650 		xvdi_reinit_ring(dip, gref, *ringpp);
651 		return (DDI_SUCCESS);
652 	}
653 
654 	*ringpp = ring = kmem_zalloc(sizeof (xendev_ring_t), KM_SLEEP);
655 	oeid = xvdi_get_oeid(dip);
656 
657 	/*
658 	 * Allocate page for this ring buffer
659 	 */
660 	if (ddi_dma_alloc_handle(dip, &xendev_dc_dmaattr, DDI_DMA_SLEEP,
661 	    0, &ring->xr_dma_hdl) != DDI_SUCCESS)
662 		goto err;
663 
664 	if (ddi_dma_mem_alloc(ring->xr_dma_hdl, PAGESIZE,
665 	    &xendev_dc_accattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
666 	    &ring->xr_vaddr, &len, &ring->xr_acc_hdl) != DDI_SUCCESS) {
667 		ddi_dma_free_handle(&ring->xr_dma_hdl);
668 		goto err;
669 	}
670 
671 	if (ddi_dma_addr_bind_handle(ring->xr_dma_hdl, NULL,
672 	    ring->xr_vaddr, len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
673 	    DDI_DMA_SLEEP, 0, &dma_cookie, &ncookies) != DDI_DMA_MAPPED) {
674 		ddi_dma_mem_free(&ring->xr_acc_hdl);
675 		ring->xr_vaddr = NULL;
676 		ddi_dma_free_handle(&ring->xr_dma_hdl);
677 		goto err;
678 	}
679 	ASSERT(ncookies == 1);
680 	ring->xr_paddr = dma_cookie.dmac_laddress;
681 	rmaddr = DOMAIN_IS_INITDOMAIN(xen_info) ? ring->xr_paddr :
682 	    pa_to_ma(ring->xr_paddr);
683 
684 	if ((ring_gref = gnttab_grant_foreign_access(oeid,
685 	    rmaddr >> PAGESHIFT, 0)) == (grant_ref_t)-1) {
686 		(void) ddi_dma_unbind_handle(ring->xr_dma_hdl);
687 		ddi_dma_mem_free(&ring->xr_acc_hdl);
688 		ring->xr_vaddr = NULL;
689 		ddi_dma_free_handle(&ring->xr_dma_hdl);
690 		goto err;
691 	}
692 	*gref = ring->xr_gref = ring_gref;
693 
694 	/* init frontend ring */
695 	xvdi_ring_init_sring(ring);
696 	xvdi_ring_init_front_ring(ring, nentry, entrysize);
697 
698 	return (DDI_SUCCESS);
699 
700 err:
701 	kmem_free(ring, sizeof (xendev_ring_t));
702 	return (DDI_FAILURE);
703 }
704 
705 /*
706  * Release ring buffers allocated for Xen devices
707  * used for frontend driver
708  */
709 void
710 xvdi_free_ring(xendev_ring_t *ring)
711 {
712 	ASSERT((ring != NULL) && (ring->xr_vaddr != NULL));
713 
714 	(void) gnttab_end_foreign_access_ref(ring->xr_gref, 0);
715 	(void) ddi_dma_unbind_handle(ring->xr_dma_hdl);
716 	ddi_dma_mem_free(&ring->xr_acc_hdl);
717 	ddi_dma_free_handle(&ring->xr_dma_hdl);
718 	kmem_free(ring, sizeof (xendev_ring_t));
719 }
720 
721 dev_info_t *
722 xvdi_create_dev(dev_info_t *parent, xendev_devclass_t devclass,
723     domid_t dom, int vdev)
724 {
725 	dev_info_t *dip;
726 	boolean_t backend;
727 	i_xd_cfg_t *xdcp;
728 	char xsnamebuf[TYPICALMAXPATHLEN];
729 	char *type, *node = NULL, *xsname = NULL;
730 	unsigned int tlen;
731 	int ret;
732 
733 	ASSERT(DEVI_BUSY_OWNED(parent));
734 
735 	backend = (dom != DOMID_SELF);
736 	xdcp = i_xvdi_devclass2cfg(devclass);
737 	ASSERT(xdcp != NULL);
738 
739 	if (vdev != -1) {
740 		if (!backend) {
741 			(void) snprintf(xsnamebuf, sizeof (xsnamebuf),
742 			    "%s/%d", xdcp->xs_path_fe, vdev);
743 			xsname = xsnamebuf;
744 			node = xdcp->node_fe;
745 		} else {
746 			(void) snprintf(xsnamebuf, sizeof (xsnamebuf),
747 			    "%s/%d/%d", xdcp->xs_path_be, dom, vdev);
748 			xsname = xsnamebuf;
749 			node = xdcp->node_be;
750 		}
751 	} else {
752 		node = xdcp->node_fe;
753 	}
754 
755 	/* Must have a driver to use. */
756 	if (node == NULL)
757 		return (NULL);
758 
759 	/*
760 	 * We need to check the state of this device before we go
761 	 * further, otherwise we'll end up with a dead loop if
762 	 * anything goes wrong.
763 	 */
764 	if ((xsname != NULL) &&
765 	    (xenbus_read_driver_state(xsname) >= XenbusStateClosing))
766 		return (NULL);
767 
768 	ndi_devi_alloc_sleep(parent, node, DEVI_SID_NODEID, &dip);
769 
770 	/*
771 	 * Driver binding uses the compatible property _before_ the
772 	 * node name, so we set the node name to the 'model' of the
773 	 * device (i.e. 'xnb' or 'xdb') and, if 'type' is present,
774 	 * encode both the model and the type in a compatible property
775 	 * (i.e. 'xnb,netfront' or 'xnb,SUNW_mac').  This allows a
776 	 * driver binding based on the <model,type> pair _before_ a
777 	 * binding based on the node name.
778 	 */
779 	if ((xsname != NULL) &&
780 	    (xenbus_read(XBT_NULL, xsname, "type", (void *)&type, &tlen)
781 	    == 0)) {
782 		size_t clen;
783 		char *c[1];
784 
785 		clen = strlen(node) + strlen(type) + 2;
786 		c[0] = kmem_alloc(clen, KM_SLEEP);
787 		(void) snprintf(c[0], clen, "%s,%s", node, type);
788 
789 		(void) ndi_prop_update_string_array(DDI_DEV_T_NONE,
790 		    dip, "compatible", (char **)c, 1);
791 
792 		kmem_free(c[0], clen);
793 		kmem_free(type, tlen);
794 	}
795 
796 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, "devclass", devclass);
797 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, "domain", dom);
798 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, "vdev", vdev);
799 
800 	if (i_ddi_devi_attached(parent))
801 		ret = ndi_devi_online(dip, 0);
802 	else
803 		ret = ndi_devi_bind_driver(dip, 0);
804 	if (ret != NDI_SUCCESS)
805 		(void) ndi_devi_offline(dip, NDI_DEVI_REMOVE);
806 
807 	return (dip);
808 }
809 
810 /*
811  * xendev_enum_class()
812  */
813 void
814 xendev_enum_class(dev_info_t *parent, xendev_devclass_t devclass)
815 {
816 	i_xd_cfg_t *xdcp;
817 
818 	xdcp = i_xvdi_devclass2cfg(devclass);
819 	ASSERT(xdcp != NULL);
820 
821 	if (xdcp->xsdev == NULL) {
822 		int circ;
823 
824 		/*
825 		 * Don't need to probe this kind of device from the
826 		 * store, just create one if it doesn't exist.
827 		 */
828 
829 		ndi_devi_enter(parent, &circ);
830 		if (xvdi_find_dev(parent, devclass, DOMID_SELF, -1)
831 		    == NULL)
832 			(void) xvdi_create_dev(parent, devclass,
833 			    DOMID_SELF, -1);
834 		ndi_devi_exit(parent, circ);
835 	} else {
836 		/*
837 		 * Probe this kind of device from the store, both
838 		 * frontend and backend.
839 		 */
840 
841 		i_xvdi_enum_fe(parent, xdcp);
842 		i_xvdi_enum_be(parent, xdcp);
843 	}
844 }
845 
846 /*
847  * xendev_enum_all()
848  */
849 void
850 xendev_enum_all(dev_info_t *parent, boolean_t store_unavailable)
851 {
852 	int i;
853 	i_xd_cfg_t *xdcp;
854 	boolean_t dom0 = DOMAIN_IS_INITDOMAIN(xen_info);
855 	boolean_t domU = !dom0;
856 
857 	for (i = 0, xdcp = xdci; i < NXDC; i++, xdcp++) {
858 
859 		if (dom0 && !(xdcp->flags & XD_DOM_ZERO))
860 			continue;
861 
862 		if (domU && !(xdcp->flags & XD_DOM_GUEST))
863 			continue;
864 
865 		/*
866 		 * Dom0 relies on watchpoints to create non-soft
867 		 * devices - don't attempt to iterate over the store.
868 		 */
869 		if (dom0 && (xdcp->xsdev != NULL))
870 			continue;
871 
872 		/*
873 		 * If the store is not yet available, don't attempt to
874 		 * iterate.
875 		 */
876 		if (store_unavailable && (xdcp->xsdev != NULL))
877 			continue;
878 
879 		xendev_enum_class(parent, xdcp->devclass);
880 	}
881 }
882 
883 xendev_devclass_t
884 xendev_nodename_to_devclass(char *nodename)
885 {
886 	int i;
887 	i_xd_cfg_t *xdcp;
888 
889 	/*
890 	 * This relies on the convention that variants of a base
891 	 * driver share the same prefix and that there are no drivers
892 	 * which share a common prefix with the name of any other base
893 	 * drivers.
894 	 *
895 	 * So for a base driver 'xnb' (which is the name listed in
896 	 * xdci) the variants all begin with the string 'xnb' (in fact
897 	 * they are 'xnbe', 'xnbo' and 'xnbu') and there are no other
898 	 * base drivers which have the prefix 'xnb'.
899 	 */
900 	ASSERT(nodename != NULL);
901 	for (i = 0, xdcp = xdci; i < NXDC; i++, xdcp++) {
902 		if (((xdcp->node_fe != NULL) &&
903 		    (strncmp(nodename, xdcp->node_fe,
904 		    strlen(xdcp->node_fe)) == 0)) ||
905 		    ((xdcp->node_be != NULL) &&
906 		    (strncmp(nodename, xdcp->node_be,
907 		    strlen(xdcp->node_be)) == 0)))
908 
909 			return (xdcp->devclass);
910 	}
911 	return (XEN_INVAL);
912 }
913 
914 int
915 xendev_devclass_ipl(xendev_devclass_t devclass)
916 {
917 	i_xd_cfg_t *xdcp;
918 
919 	xdcp = i_xvdi_devclass2cfg(devclass);
920 	ASSERT(xdcp != NULL);
921 
922 	return (xdcp->xd_ipl);
923 }
924 
925 /*
926  * Determine if a devinfo instance exists of a particular device
927  * class, domain and xenstore virtual device number.
928  */
929 dev_info_t *
930 xvdi_find_dev(dev_info_t *parent, xendev_devclass_t devclass,
931     domid_t dom, int vdev)
932 {
933 	dev_info_t *dip;
934 
935 	ASSERT(DEVI_BUSY_OWNED(parent));
936 
937 	switch (devclass) {
938 	case XEN_CONSOLE:
939 	case XEN_XENBUS:
940 	case XEN_DOMCAPS:
941 	case XEN_BALLOON:
942 	case XEN_EVTCHN:
943 	case XEN_PRIVCMD:
944 		/* Console and soft devices have no vdev. */
945 		vdev = -1;
946 		break;
947 	default:
948 		break;
949 	}
950 
951 	for (dip = ddi_get_child(parent); dip != NULL;
952 	    dip = ddi_get_next_sibling(dip)) {
953 		int *vdevnump, *domidp, *devclsp, vdevnum;
954 		uint_t ndomid, nvdevnum, ndevcls;
955 		xendev_devclass_t devcls;
956 		domid_t domid;
957 		struct xendev_ppd *pdp = ddi_get_parent_data(dip);
958 
959 		if (pdp == NULL) {
960 			if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
961 			    DDI_PROP_DONTPASS, "domain", &domidp, &ndomid) !=
962 			    DDI_PROP_SUCCESS)
963 				continue;
964 			ASSERT(ndomid == 1);
965 			domid = (domid_t)*domidp;
966 			ddi_prop_free(domidp);
967 
968 			if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
969 			    DDI_PROP_DONTPASS, "vdev", &vdevnump, &nvdevnum) !=
970 			    DDI_PROP_SUCCESS)
971 				continue;
972 			ASSERT(nvdevnum == 1);
973 			vdevnum = *vdevnump;
974 			ddi_prop_free(vdevnump);
975 
976 			if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
977 			    DDI_PROP_DONTPASS, "devclass", &devclsp,
978 			    &ndevcls) != DDI_PROP_SUCCESS)
979 				continue;
980 			ASSERT(ndevcls == 1);
981 			devcls = (xendev_devclass_t)*devclsp;
982 			ddi_prop_free(devclsp);
983 		} else {
984 			domid = pdp->xd_domain;
985 			vdevnum = pdp->xd_vdevnum;
986 			devcls = pdp->xd_devclass;
987 		}
988 
989 		if ((domid == dom) && (vdevnum == vdev) && (devcls == devclass))
990 			return (dip);
991 	}
992 	return (NULL);
993 }
994 
995 int
996 xvdi_get_evtchn(dev_info_t *xdip)
997 {
998 	struct xendev_ppd *pdp = ddi_get_parent_data(xdip);
999 
1000 	ASSERT(pdp != NULL);
1001 	return (pdp->xd_evtchn);
1002 }
1003 
1004 int
1005 xvdi_get_vdevnum(dev_info_t *xdip)
1006 {
1007 	struct xendev_ppd *pdp = ddi_get_parent_data(xdip);
1008 
1009 	ASSERT(pdp != NULL);
1010 	return (pdp->xd_vdevnum);
1011 }
1012 
1013 char *
1014 xvdi_get_xsname(dev_info_t *xdip)
1015 {
1016 	struct xendev_ppd *pdp = ddi_get_parent_data(xdip);
1017 
1018 	ASSERT(pdp != NULL);
1019 	return ((char *)(pdp->xd_xsdev.nodename));
1020 }
1021 
1022 char *
1023 xvdi_get_oename(dev_info_t *xdip)
1024 {
1025 	struct xendev_ppd *pdp = ddi_get_parent_data(xdip);
1026 
1027 	ASSERT(pdp != NULL);
1028 	if (pdp->xd_devclass == XEN_CONSOLE)
1029 		return (NULL);
1030 	return ((char *)(pdp->xd_xsdev.otherend));
1031 }
1032 
1033 struct xenbus_device *
1034 xvdi_get_xsd(dev_info_t *xdip)
1035 {
1036 	struct xendev_ppd *pdp = ddi_get_parent_data(xdip);
1037 
1038 	ASSERT(pdp != NULL);
1039 	return (&pdp->xd_xsdev);
1040 }
1041 
1042 domid_t
1043 xvdi_get_oeid(dev_info_t *xdip)
1044 {
1045 	struct xendev_ppd *pdp = ddi_get_parent_data(xdip);
1046 
1047 	ASSERT(pdp != NULL);
1048 	if (pdp->xd_devclass == XEN_CONSOLE)
1049 		return ((domid_t)-1);
1050 	return ((domid_t)(pdp->xd_xsdev.otherend_id));
1051 }
1052 
1053 void
1054 xvdi_dev_error(dev_info_t *dip, int errno, char *errstr)
1055 {
1056 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1057 
1058 	ASSERT(pdp != NULL);
1059 	xenbus_dev_error(&pdp->xd_xsdev, errno, errstr);
1060 }
1061 
1062 void
1063 xvdi_fatal_error(dev_info_t *dip, int errno, char *errstr)
1064 {
1065 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1066 
1067 	ASSERT(pdp != NULL);
1068 	xenbus_dev_fatal(&pdp->xd_xsdev, errno, errstr);
1069 }
1070 
1071 static void
1072 i_xvdi_oestate_handler(void *arg)
1073 {
1074 	dev_info_t *dip = arg;
1075 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1076 	XenbusState oestate = pdp->xd_xsdev.otherend_state;
1077 	ddi_eventcookie_t evc;
1078 
1079 	mutex_enter(&pdp->xd_lk);
1080 
1081 	if (pdp->xd_oe_ehid != NULL) {
1082 		/* send notification to driver */
1083 		if (ddi_get_eventcookie(dip, XS_OE_STATE,
1084 		    &evc) == DDI_SUCCESS) {
1085 			mutex_exit(&pdp->xd_lk);
1086 			(void) ndi_post_event(dip, dip, evc, &oestate);
1087 			mutex_enter(&pdp->xd_lk);
1088 		}
1089 	} else {
1090 		/*
1091 		 * take default action, if driver hasn't registered its
1092 		 * event handler yet
1093 		 */
1094 		if (oestate == XenbusStateClosing) {
1095 			(void) xvdi_switch_state(dip, XBT_NULL,
1096 			    XenbusStateClosed);
1097 		} else if (oestate == XenbusStateClosed) {
1098 			(void) xvdi_switch_state(dip, XBT_NULL,
1099 			    XenbusStateClosed);
1100 			(void) xvdi_post_event(dip, XEN_HP_REMOVE);
1101 		}
1102 	}
1103 
1104 	mutex_exit(&pdp->xd_lk);
1105 
1106 	/*
1107 	 * We'll try to remove the devinfo node of this device if the
1108 	 * other end has closed.
1109 	 */
1110 	if (oestate == XenbusStateClosed)
1111 		(void) ddi_taskq_dispatch(DEVI(ddi_get_parent(dip))->devi_taskq,
1112 		    xendev_offline_device, dip, DDI_SLEEP);
1113 }
1114 
1115 static void
1116 i_xvdi_hpstate_handler(void *arg)
1117 {
1118 	dev_info_t *dip = (dev_info_t *)arg;
1119 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1120 	ddi_eventcookie_t evc;
1121 	char *hp_status;
1122 	unsigned int hpl;
1123 
1124 	mutex_enter(&pdp->xd_lk);
1125 	if ((ddi_get_eventcookie(dip, XS_HP_STATE, &evc) == DDI_SUCCESS) &&
1126 	    (xenbus_read(XBT_NULL, pdp->xd_hp_watch.node, "",
1127 	    (void *)&hp_status, &hpl) == 0)) {
1128 
1129 		xendev_hotplug_state_t new_state = Unrecognized;
1130 
1131 		if (strcmp(hp_status, "connected") == 0)
1132 			new_state = Connected;
1133 
1134 		mutex_exit(&pdp->xd_lk);
1135 
1136 		(void) ndi_post_event(dip, dip, evc, &new_state);
1137 		kmem_free(hp_status, hpl);
1138 		return;
1139 	}
1140 	mutex_exit(&pdp->xd_lk);
1141 }
1142 
1143 void
1144 xvdi_notify_oe(dev_info_t *dip)
1145 {
1146 	struct xendev_ppd *pdp;
1147 
1148 	pdp = ddi_get_parent_data(dip);
1149 	ASSERT(pdp->xd_evtchn != INVALID_EVTCHN);
1150 	ec_notify_via_evtchn(pdp->xd_evtchn);
1151 }
1152 
1153 static void
1154 i_xvdi_bepath_cb(struct xenbus_watch *w, const char **vec, unsigned int len)
1155 {
1156 	dev_info_t *dip = (dev_info_t *)w->dev;
1157 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1158 	char *be = NULL;
1159 	unsigned int bel;
1160 
1161 	ASSERT(len > XS_WATCH_PATH);
1162 	ASSERT(vec[XS_WATCH_PATH] != NULL);
1163 
1164 	/*
1165 	 * If the backend is not the same as that we already stored,
1166 	 * re-set our watch for its' state.
1167 	 */
1168 	if ((xenbus_read(XBT_NULL, "", vec[XS_WATCH_PATH], (void *)be, &bel)
1169 	    == 0) && (strcmp(be, pdp->xd_xsdev.otherend) != 0))
1170 		(void) i_xvdi_add_watch_oestate(dip);
1171 
1172 	if (be != NULL) {
1173 		ASSERT(bel > 0);
1174 		kmem_free(be, bel);
1175 	}
1176 }
1177 
1178 static int
1179 i_xvdi_add_watch_oestate(dev_info_t *dip)
1180 {
1181 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1182 
1183 	ASSERT(pdp != NULL);
1184 	ASSERT(pdp->xd_xsdev.nodename != NULL);
1185 	ASSERT(mutex_owned(&pdp->xd_lk));
1186 
1187 	/*
1188 	 * Create taskq for delivering other end state change event to
1189 	 * this device later.
1190 	 *
1191 	 * Set nthreads to 1 to make sure that events can be delivered
1192 	 * in order.
1193 	 *
1194 	 * Note: It is _not_ guaranteed that driver can see every
1195 	 * xenstore change under the path that it is watching. If two
1196 	 * changes happen consecutively in a very short amount of
1197 	 * time, it is likely that the driver will see only the last
1198 	 * one.
1199 	 */
1200 	if (pdp->xd_oe_taskq == NULL)
1201 		if ((pdp->xd_oe_taskq = ddi_taskq_create(dip,
1202 		    "xendev_oe_taskq", 1, TASKQ_DEFAULTPRI, 0)) == NULL)
1203 			return (DDI_FAILURE);
1204 
1205 	/*
1206 	 * Watch for changes to the XenbusState of otherend.
1207 	 */
1208 	pdp->xd_xsdev.otherend_state = XenbusStateUnknown;
1209 	pdp->xd_xsdev.otherend_changed = i_xvdi_oestate_cb;
1210 
1211 	if (talk_to_otherend(&pdp->xd_xsdev) != 0) {
1212 		i_xvdi_rem_watch_oestate(dip);
1213 		return (DDI_FAILURE);
1214 	}
1215 
1216 	return (DDI_SUCCESS);
1217 }
1218 
1219 static void
1220 i_xvdi_rem_watch_oestate(dev_info_t *dip)
1221 {
1222 	struct xendev_ppd *pdp;
1223 	struct xenbus_device *dev;
1224 
1225 	pdp = ddi_get_parent_data(dip);
1226 	ASSERT(pdp != NULL);
1227 	ASSERT(mutex_owned(&pdp->xd_lk));
1228 
1229 	dev = &pdp->xd_xsdev;
1230 
1231 	/* Unwatch for changes to XenbusState of otherend */
1232 	if (dev->otherend_watch.node != NULL) {
1233 		mutex_exit(&pdp->xd_lk);
1234 		unregister_xenbus_watch(&dev->otherend_watch);
1235 		mutex_enter(&pdp->xd_lk);
1236 	}
1237 
1238 	/* make sure no event handler is running */
1239 	if (pdp->xd_oe_taskq != NULL) {
1240 		mutex_exit(&pdp->xd_lk);
1241 		ddi_taskq_destroy(pdp->xd_oe_taskq);
1242 		mutex_enter(&pdp->xd_lk);
1243 		pdp->xd_oe_taskq = NULL;
1244 	}
1245 
1246 	/* clean up */
1247 	dev->otherend_state = XenbusStateUnknown;
1248 	dev->otherend_id = (domid_t)-1;
1249 	if (dev->otherend_watch.node != NULL)
1250 		kmem_free((void *)dev->otherend_watch.node,
1251 		    strlen(dev->otherend_watch.node) + 1);
1252 	dev->otherend_watch.node = NULL;
1253 	if (dev->otherend != NULL)
1254 		kmem_free((void *)dev->otherend, strlen(dev->otherend) + 1);
1255 	dev->otherend = NULL;
1256 }
1257 
1258 static int
1259 i_xvdi_add_watch_hpstate(dev_info_t *dip)
1260 {
1261 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1262 
1263 	ASSERT(pdp != NULL);
1264 	ASSERT(pdp->xd_xsdev.frontend == 0);
1265 	ASSERT(mutex_owned(&pdp->xd_lk));
1266 
1267 	/*
1268 	 * Create taskq for delivering hotplug status change event to
1269 	 * this device later.
1270 	 *
1271 	 * Set nthreads to 1 to make sure that events can be delivered
1272 	 * in order.
1273 	 *
1274 	 * Note: It is _not_ guaranteed that driver can see every
1275 	 * hotplug status change under the path that it is
1276 	 * watching. If two changes happen consecutively in a very
1277 	 * short amount of time, it is likely that the driver only
1278 	 * sees the last one.
1279 	 */
1280 	if (pdp->xd_hp_taskq == NULL)
1281 		if ((pdp->xd_hp_taskq = ddi_taskq_create(dip,
1282 		    "xendev_hp_taskq", 1, TASKQ_DEFAULTPRI, 0)) == NULL)
1283 			return (DDI_FAILURE);
1284 
1285 	if (pdp->xd_hp_watch.node == NULL) {
1286 		size_t len;
1287 		char *path;
1288 
1289 		ASSERT(pdp->xd_xsdev.nodename != NULL);
1290 
1291 		len = strlen(pdp->xd_xsdev.nodename) +
1292 		    strlen("/hotplug-status") + 1;
1293 		path = kmem_alloc(len, KM_SLEEP);
1294 		(void) snprintf(path, len, "%s/hotplug-status",
1295 		    pdp->xd_xsdev.nodename);
1296 
1297 		pdp->xd_hp_watch.node = path;
1298 		pdp->xd_hp_watch.callback = i_xvdi_hpstate_cb;
1299 		pdp->xd_hp_watch.dev = (struct xenbus_device *)dip; /* yuck! */
1300 		if (register_xenbus_watch(&pdp->xd_hp_watch) != 0) {
1301 			i_xvdi_rem_watch_hpstate(dip);
1302 			return (DDI_FAILURE);
1303 		}
1304 	}
1305 
1306 	return (DDI_SUCCESS);
1307 }
1308 
1309 static void
1310 i_xvdi_rem_watch_hpstate(dev_info_t *dip)
1311 {
1312 	struct xendev_ppd *pdp;
1313 	pdp = ddi_get_parent_data(dip);
1314 
1315 	ASSERT(pdp != NULL);
1316 	ASSERT(pdp->xd_xsdev.frontend == 0);
1317 	ASSERT(mutex_owned(&pdp->xd_lk));
1318 
1319 	/* Unwatch for changes to "hotplug-status" node for backend device. */
1320 	if (pdp->xd_hp_watch.node != NULL) {
1321 		mutex_exit(&pdp->xd_lk);
1322 		unregister_xenbus_watch(&pdp->xd_hp_watch);
1323 		mutex_enter(&pdp->xd_lk);
1324 	}
1325 
1326 	/* Make sure no event handler is running. */
1327 	if (pdp->xd_hp_taskq != NULL) {
1328 		mutex_exit(&pdp->xd_lk);
1329 		ddi_taskq_destroy(pdp->xd_hp_taskq);
1330 		mutex_enter(&pdp->xd_lk);
1331 		pdp->xd_hp_taskq = NULL;
1332 	}
1333 
1334 	/* Clean up. */
1335 	if (pdp->xd_hp_watch.node != NULL) {
1336 		kmem_free((void *)pdp->xd_hp_watch.node,
1337 		    strlen(pdp->xd_hp_watch.node) + 1);
1338 		pdp->xd_hp_watch.node = NULL;
1339 	}
1340 }
1341 
1342 static int
1343 i_xvdi_add_watches(dev_info_t *dip)
1344 {
1345 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1346 
1347 	ASSERT(pdp != NULL);
1348 
1349 	mutex_enter(&pdp->xd_lk);
1350 
1351 	if (i_xvdi_add_watch_oestate(dip) != DDI_SUCCESS) {
1352 		mutex_exit(&pdp->xd_lk);
1353 		return (DDI_FAILURE);
1354 	}
1355 
1356 	if (pdp->xd_xsdev.frontend == 1) {
1357 		/*
1358 		 * Frontend devices must watch for the backend path
1359 		 * changing.
1360 		 */
1361 		if (i_xvdi_add_watch_bepath(dip) != DDI_SUCCESS)
1362 			goto unwatch_and_fail;
1363 	} else {
1364 		/*
1365 		 * Backend devices must watch for hotplug events.
1366 		 */
1367 		if (i_xvdi_add_watch_hpstate(dip) != DDI_SUCCESS)
1368 			goto unwatch_and_fail;
1369 	}
1370 
1371 	mutex_exit(&pdp->xd_lk);
1372 
1373 	return (DDI_SUCCESS);
1374 
1375 unwatch_and_fail:
1376 	i_xvdi_rem_watch_oestate(dip);
1377 	mutex_exit(&pdp->xd_lk);
1378 
1379 	return (DDI_FAILURE);
1380 }
1381 
1382 static void
1383 i_xvdi_rem_watches(dev_info_t *dip)
1384 {
1385 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1386 
1387 	ASSERT(pdp != NULL);
1388 
1389 	mutex_enter(&pdp->xd_lk);
1390 
1391 	i_xvdi_rem_watch_oestate(dip);
1392 
1393 	if (pdp->xd_xsdev.frontend == 1)
1394 		i_xvdi_rem_watch_bepath(dip);
1395 	else
1396 		i_xvdi_rem_watch_hpstate(dip);
1397 
1398 	mutex_exit(&pdp->xd_lk);
1399 }
1400 
1401 static int
1402 i_xvdi_add_watch_bepath(dev_info_t *dip)
1403 {
1404 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1405 
1406 	ASSERT(pdp != NULL);
1407 	ASSERT(pdp->xd_xsdev.frontend == 1);
1408 
1409 	/*
1410 	 * Frontend devices need to watch for the backend path changing.
1411 	 */
1412 	if (pdp->xd_bepath_watch.node == NULL) {
1413 		size_t len;
1414 		char *path;
1415 
1416 		ASSERT(pdp->xd_xsdev.nodename != NULL);
1417 
1418 		len = strlen(pdp->xd_xsdev.nodename) + strlen("/backend") + 1;
1419 		path = kmem_alloc(len, KM_SLEEP);
1420 		(void) snprintf(path, len, "%s/backend",
1421 		    pdp->xd_xsdev.nodename);
1422 
1423 		pdp->xd_bepath_watch.node = path;
1424 		pdp->xd_bepath_watch.callback = i_xvdi_bepath_cb;
1425 		pdp->xd_bepath_watch.dev = (struct xenbus_device *)dip;
1426 		if (register_xenbus_watch(&pdp->xd_bepath_watch) != 0) {
1427 			kmem_free(path, len);
1428 			pdp->xd_bepath_watch.node = NULL;
1429 			return (DDI_FAILURE);
1430 		}
1431 	}
1432 
1433 	return (DDI_SUCCESS);
1434 }
1435 
1436 static void
1437 i_xvdi_rem_watch_bepath(dev_info_t *dip)
1438 {
1439 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1440 
1441 	ASSERT(pdp != NULL);
1442 	ASSERT(pdp->xd_xsdev.frontend == 1);
1443 	ASSERT(mutex_owned(&pdp->xd_lk));
1444 
1445 	if (pdp->xd_bepath_watch.node != NULL) {
1446 		mutex_exit(&pdp->xd_lk);
1447 		unregister_xenbus_watch(&pdp->xd_bepath_watch);
1448 		mutex_enter(&pdp->xd_lk);
1449 
1450 		kmem_free((void *)(pdp->xd_bepath_watch.node),
1451 		    strlen(pdp->xd_bepath_watch.node) + 1);
1452 		pdp->xd_bepath_watch.node = NULL;
1453 	}
1454 }
1455 
1456 int
1457 xvdi_switch_state(dev_info_t *dip, xenbus_transaction_t xbt,
1458     XenbusState newState)
1459 {
1460 	int rv;
1461 	struct xendev_ppd *pdp;
1462 
1463 	pdp = ddi_get_parent_data(dip);
1464 	ASSERT(pdp != NULL);
1465 
1466 	XVDI_DPRINTF(XVDI_DBG_STATE,
1467 	    "xvdi_switch_state: dip 0x%p moves to %d",
1468 	    (void *)dip, newState);
1469 
1470 	rv = xenbus_switch_state(&pdp->xd_xsdev, xbt, newState);
1471 	if (rv > 0)
1472 		cmn_err(CE_WARN, "xvdi_switch_state: change state failed");
1473 
1474 	return (rv);
1475 }
1476 
1477 /*
1478  * Notify hotplug script running in userland
1479  */
1480 int
1481 xvdi_post_event(dev_info_t *dip, xendev_hotplug_cmd_t hpc)
1482 {
1483 	struct xendev_ppd *pdp;
1484 	nvlist_t *attr_list = NULL;
1485 	i_xd_cfg_t *xdcp;
1486 	sysevent_id_t eid;
1487 	int err;
1488 	char devname[256]; /* XXPV dme: ? */
1489 
1490 	pdp = ddi_get_parent_data(dip);
1491 	ASSERT(pdp != NULL);
1492 
1493 	xdcp = i_xvdi_devclass2cfg(pdp->xd_devclass);
1494 	ASSERT(xdcp != NULL);
1495 
1496 	(void) snprintf(devname, sizeof (devname) - 1, "%s%d",
1497 	    ddi_driver_name(dip),  ddi_get_instance(dip));
1498 
1499 	err = nvlist_alloc(&attr_list, NV_UNIQUE_NAME, KM_NOSLEEP);
1500 	if (err != DDI_SUCCESS)
1501 		goto failure;
1502 
1503 	err = nvlist_add_int32(attr_list, "domain", pdp->xd_domain);
1504 	if (err != DDI_SUCCESS)
1505 		goto failure;
1506 	err = nvlist_add_int32(attr_list, "vdev", pdp->xd_vdevnum);
1507 	if (err != DDI_SUCCESS)
1508 		goto failure;
1509 	err = nvlist_add_string(attr_list, "devclass", xdcp->xsdev);
1510 	if (err != DDI_SUCCESS)
1511 		goto failure;
1512 	err = nvlist_add_string(attr_list, "device", devname);
1513 	if (err != DDI_SUCCESS)
1514 		goto failure;
1515 	err = nvlist_add_string(attr_list, "fob",
1516 	    ((pdp->xd_xsdev.frontend == 1) ? "frontend" : "backend"));
1517 	if (err != DDI_SUCCESS)
1518 		goto failure;
1519 
1520 	switch (hpc) {
1521 	case XEN_HP_ADD:
1522 		err = ddi_log_sysevent(dip, DDI_VENDOR_SUNW, "EC_xendev",
1523 		    "add", attr_list, &eid, DDI_NOSLEEP);
1524 		break;
1525 	case XEN_HP_REMOVE:
1526 		err = ddi_log_sysevent(dip, DDI_VENDOR_SUNW, "EC_xendev",
1527 		    "remove", attr_list, &eid, DDI_NOSLEEP);
1528 		break;
1529 	default:
1530 		err = DDI_FAILURE;
1531 		goto failure;
1532 	}
1533 
1534 failure:
1535 	if (attr_list != NULL)
1536 		nvlist_free(attr_list);
1537 
1538 	return (err);
1539 }
1540 
1541 /* ARGSUSED */
1542 static void
1543 i_xvdi_probe_path_cb(struct xenbus_watch *w, const char **vec,
1544     unsigned int len)
1545 {
1546 	char *path;
1547 
1548 	if (xendev_dip == NULL)
1549 		xendev_dip = ddi_find_devinfo("xpvd", -1, 0);
1550 
1551 	path = i_ddi_strdup((char *)vec[XS_WATCH_PATH], KM_SLEEP);
1552 
1553 	(void) ddi_taskq_dispatch(DEVI(xendev_dip)->devi_taskq,
1554 	    i_xvdi_probe_path_handler, (void *)path, DDI_SLEEP);
1555 }
1556 
1557 static void
1558 i_xvdi_watch_device(char *path)
1559 {
1560 	struct xenbus_watch *w;
1561 
1562 	ASSERT(path != NULL);
1563 
1564 	w = kmem_zalloc(sizeof (*w), KM_SLEEP);
1565 	w->node = path;
1566 	w->callback = &i_xvdi_probe_path_cb;
1567 	w->dev = NULL;
1568 
1569 	if (register_xenbus_watch(w) != 0) {
1570 		cmn_err(CE_WARN, "i_xvdi_watch_device: "
1571 		    "cannot set watch on %s", path);
1572 		kmem_free(w, sizeof (*w));
1573 		return;
1574 	}
1575 }
1576 
1577 void
1578 xvdi_watch_devices(int newstate)
1579 {
1580 	int devclass;
1581 
1582 	/*
1583 	 * Watch for devices being created in the store.
1584 	 */
1585 	if (newstate == XENSTORE_DOWN)
1586 		return;
1587 	for (devclass = 0; devclass < NXDC; devclass++) {
1588 		if (xdci[devclass].xs_path_fe != NULL)
1589 			i_xvdi_watch_device(xdci[devclass].xs_path_fe);
1590 		if (xdci[devclass].xs_path_be != NULL)
1591 			i_xvdi_watch_device(xdci[devclass].xs_path_be);
1592 	}
1593 }
1594 
1595 /*
1596  * Iterate over the store looking for backend devices to create.
1597  */
1598 static void
1599 i_xvdi_enum_be(dev_info_t *parent, i_xd_cfg_t *xdcp)
1600 {
1601 	char **domains;
1602 	unsigned int ndomains;
1603 	int ldomains, i;
1604 
1605 	if ((domains = xenbus_directory(XBT_NULL, xdcp->xs_path_be, "",
1606 	    &ndomains)) == NULL)
1607 		return;
1608 
1609 	for (i = 0, ldomains = 0; i < ndomains; i++) {
1610 		ldomains += strlen(domains[i]) + 1 + sizeof (char *);
1611 
1612 		i_xvdi_enum_worker(parent, xdcp, domains[i]);
1613 	}
1614 	kmem_free(domains, ldomains);
1615 }
1616 
1617 /*
1618  * Iterate over the store looking for frontend devices to create.
1619  */
1620 static void
1621 i_xvdi_enum_fe(dev_info_t *parent, i_xd_cfg_t *xdcp)
1622 {
1623 	i_xvdi_enum_worker(parent, xdcp, NULL);
1624 }
1625 
1626 static void
1627 i_xvdi_enum_worker(dev_info_t *parent, i_xd_cfg_t *xdcp,
1628     char *domain)
1629 {
1630 	char *path, *domain_path, *ep;
1631 	char **devices;
1632 	unsigned int ndevices;
1633 	int ldevices, j, circ;
1634 	domid_t dom;
1635 
1636 	if (domain == NULL) {
1637 		dom = DOMID_SELF;
1638 		path = xdcp->xs_path_fe;
1639 		domain_path = "";
1640 	} else {
1641 		(void) ddi_strtol(domain, &ep, 0, (long *)&dom);
1642 		path = xdcp->xs_path_be;
1643 		domain_path = domain;
1644 	}
1645 
1646 	if ((devices = xenbus_directory(XBT_NULL, path, domain_path,
1647 	    &ndevices)) == NULL)
1648 		return;
1649 
1650 	for (j = 0, ldevices = 0; j < ndevices; j++) {
1651 		int vdev;
1652 
1653 		ldevices += strlen(devices[j]) + 1 + sizeof (char *);
1654 		(void) ddi_strtol(devices[j], &ep, 0, (long *)&vdev);
1655 
1656 		ndi_devi_enter(parent, &circ);
1657 
1658 		if (xvdi_find_dev(parent, xdcp->devclass, dom, vdev)
1659 		    == NULL)
1660 			(void) xvdi_create_dev(parent, xdcp->devclass,
1661 			    dom, vdev);
1662 
1663 		ndi_devi_exit(parent, circ);
1664 	}
1665 	kmem_free(devices, ldevices);
1666 }
1667 
1668 /*
1669  * Leaf drivers should call this in their detach() routine during suspend.
1670  */
1671 void
1672 xvdi_suspend(dev_info_t *dip)
1673 {
1674 	i_xvdi_rem_watches(dip);
1675 }
1676 
1677 /*
1678  * Leaf drivers should call this in their attach() routine during resume.
1679  */
1680 int
1681 xvdi_resume(dev_info_t *dip)
1682 {
1683 	return (i_xvdi_add_watches(dip));
1684 }
1685 
1686 /*
1687  * Add event handler for the leaf driver
1688  * to handle event triggered by the change in xenstore
1689  */
1690 int
1691 xvdi_add_event_handler(dev_info_t *dip, char *name,
1692     void (*evthandler)(dev_info_t *, ddi_eventcookie_t, void *, void *))
1693 {
1694 	ddi_eventcookie_t ecv;
1695 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
1696 	ddi_callback_id_t *cbid;
1697 
1698 	ASSERT(pdp != NULL);
1699 
1700 	mutex_enter(&pdp->xd_lk);
1701 
1702 	if (strcmp(name, XS_OE_STATE) == 0) {
1703 		ASSERT(pdp->xd_xsdev.otherend != NULL);
1704 
1705 		cbid = &pdp->xd_oe_ehid;
1706 	} else if (strcmp(name, XS_HP_STATE) == 0) {
1707 		if (pdp->xd_xsdev.frontend == 1) {
1708 			mutex_exit(&pdp->xd_lk);
1709 			return (DDI_FAILURE);
1710 		}
1711 
1712 		ASSERT(pdp->xd_hp_watch.node != NULL);
1713 
1714 		cbid = &pdp->xd_hp_ehid;
1715 	} else {
1716 		/* Unsupported watch. */
1717 		mutex_exit(&pdp->xd_lk);
1718 		return (DDI_FAILURE);
1719 	}
1720 
1721 	/*
1722 	 * No event handler provided, take default action to handle
1723 	 * event.
1724 	 */
1725 	if (evthandler == NULL) {
1726 		mutex_exit(&pdp->xd_lk);
1727 		return (DDI_SUCCESS);
1728 	}
1729 
1730 	ASSERT(*cbid == NULL);
1731 
1732 	if (ddi_get_eventcookie(dip, name, &ecv) != DDI_SUCCESS) {
1733 		cmn_err(CE_WARN, "failed to find %s cookie for %s@%s",
1734 		    name, ddi_get_name(dip), ddi_get_name_addr(dip));
1735 		mutex_exit(&pdp->xd_lk);
1736 		return (DDI_FAILURE);
1737 	}
1738 	if (ddi_add_event_handler(dip, ecv, evthandler, NULL, cbid)
1739 	    != DDI_SUCCESS) {
1740 		cmn_err(CE_WARN, "failed to add %s event handler for %s@%s",
1741 		    name, ddi_get_name(dip), ddi_get_name_addr(dip));
1742 		*cbid = NULL;
1743 		mutex_exit(&pdp->xd_lk);
1744 		return (DDI_FAILURE);
1745 	}
1746 
1747 	mutex_exit(&pdp->xd_lk);
1748 
1749 	return (DDI_SUCCESS);
1750 }
1751 
1752 /*
1753  * Remove event handler for the leaf driver and unwatch xenstore
1754  * so, driver will not be notified when xenstore entry changed later
1755  */
1756 void
1757 xvdi_remove_event_handler(dev_info_t *dip, char *name)
1758 {
1759 	struct xendev_ppd *pdp;
1760 	boolean_t rem_oe = B_FALSE, rem_hp = B_FALSE;
1761 	ddi_callback_id_t oeid = NULL, hpid = NULL;
1762 
1763 	pdp = ddi_get_parent_data(dip);
1764 	ASSERT(pdp != NULL);
1765 
1766 	if (name == NULL) {
1767 		rem_oe = B_TRUE;
1768 		rem_hp = B_TRUE;
1769 	} else if (strcmp(name, XS_OE_STATE) == 0) {
1770 		rem_oe = B_TRUE;
1771 	} else if (strcmp(name, XS_HP_STATE) == 0) {
1772 		rem_hp = B_TRUE;
1773 	} else {
1774 		cmn_err(CE_WARN, "event %s not supported, cannot remove", name);
1775 		return;
1776 	}
1777 
1778 	mutex_enter(&pdp->xd_lk);
1779 
1780 	if (rem_oe && (pdp->xd_oe_ehid != NULL)) {
1781 		oeid = pdp->xd_oe_ehid;
1782 		pdp->xd_oe_ehid = NULL;
1783 	}
1784 
1785 	if (rem_hp && (pdp->xd_hp_ehid != NULL)) {
1786 		hpid = pdp->xd_hp_ehid;
1787 		pdp->xd_hp_ehid = NULL;
1788 	}
1789 
1790 	mutex_exit(&pdp->xd_lk);
1791 
1792 	if (oeid != NULL)
1793 		(void) ddi_remove_event_handler(oeid);
1794 	if (hpid != NULL)
1795 		(void) ddi_remove_event_handler(hpid);
1796 }
1797 
1798 
1799 /*
1800  * common ring interfaces
1801  */
1802 
1803 #define	FRONT_RING(_ringp)	(&(_ringp)->xr_sring.fr)
1804 #define	BACK_RING(_ringp)	(&(_ringp)->xr_sring.br)
1805 #define	GET_RING_SIZE(_ringp)	RING_SIZE(FRONT_RING(ringp))
1806 #define	GET_RING_ENTRY_FE(_ringp, _idx)		\
1807 	(FRONT_RING(_ringp)->sring->ring +	\
1808 	(_ringp)->xr_entry_size * ((_idx) & (GET_RING_SIZE(_ringp) - 1)))
1809 #define	GET_RING_ENTRY_BE(_ringp, _idx)		\
1810 	(BACK_RING(_ringp)->sring->ring +	\
1811 	(_ringp)->xr_entry_size * ((_idx) & (GET_RING_SIZE(_ringp) - 1)))
1812 
1813 unsigned int
1814 xvdi_ring_avail_slots(xendev_ring_t *ringp)
1815 {
1816 	comif_ring_fe_t *frp;
1817 	comif_ring_be_t *brp;
1818 
1819 	if (ringp->xr_frontend) {
1820 		frp = FRONT_RING(ringp);
1821 		return (GET_RING_SIZE(ringp) -
1822 		    (frp->req_prod_pvt - frp->rsp_cons));
1823 	} else {
1824 		brp = BACK_RING(ringp);
1825 		return (GET_RING_SIZE(ringp) -
1826 		    (brp->rsp_prod_pvt - brp->req_cons));
1827 	}
1828 }
1829 
1830 int
1831 xvdi_ring_has_unconsumed_requests(xendev_ring_t *ringp)
1832 {
1833 	comif_ring_be_t *brp;
1834 
1835 	ASSERT(!ringp->xr_frontend);
1836 	brp = BACK_RING(ringp);
1837 	return ((brp->req_cons !=
1838 	    ddi_get32(ringp->xr_acc_hdl, &brp->sring->req_prod)) &&
1839 	    ((brp->req_cons - brp->rsp_prod_pvt) != RING_SIZE(brp)));
1840 }
1841 
1842 int
1843 xvdi_ring_has_incomp_request(xendev_ring_t *ringp)
1844 {
1845 	comif_ring_fe_t *frp;
1846 
1847 	ASSERT(ringp->xr_frontend);
1848 	frp = FRONT_RING(ringp);
1849 	return (frp->req_prod_pvt !=
1850 	    ddi_get32(ringp->xr_acc_hdl, &frp->sring->rsp_prod));
1851 }
1852 
1853 int
1854 xvdi_ring_has_unconsumed_responses(xendev_ring_t *ringp)
1855 {
1856 	comif_ring_fe_t *frp;
1857 
1858 	ASSERT(ringp->xr_frontend);
1859 	frp = FRONT_RING(ringp);
1860 	return (frp->rsp_cons !=
1861 	    ddi_get32(ringp->xr_acc_hdl, &frp->sring->rsp_prod));
1862 }
1863 
1864 /* NOTE: req_event will be increased as needed */
1865 void *
1866 xvdi_ring_get_request(xendev_ring_t *ringp)
1867 {
1868 	comif_ring_fe_t *frp;
1869 	comif_ring_be_t *brp;
1870 
1871 	if (ringp->xr_frontend) {
1872 		/* for frontend ring */
1873 		frp = FRONT_RING(ringp);
1874 		if (!RING_FULL(frp))
1875 			return (GET_RING_ENTRY_FE(ringp, frp->req_prod_pvt++));
1876 		else
1877 			return (NULL);
1878 	} else {
1879 		/* for backend ring */
1880 		brp = BACK_RING(ringp);
1881 		/* RING_FINAL_CHECK_FOR_REQUESTS() */
1882 		if (xvdi_ring_has_unconsumed_requests(ringp))
1883 			return (GET_RING_ENTRY_BE(ringp, brp->req_cons++));
1884 		else {
1885 			ddi_put32(ringp->xr_acc_hdl, &brp->sring->req_event,
1886 			    brp->req_cons + 1);
1887 			membar_enter();
1888 			if (xvdi_ring_has_unconsumed_requests(ringp))
1889 				return (GET_RING_ENTRY_BE(ringp,
1890 				    brp->req_cons++));
1891 			else
1892 				return (NULL);
1893 		}
1894 	}
1895 }
1896 
1897 int
1898 xvdi_ring_push_request(xendev_ring_t *ringp)
1899 {
1900 	RING_IDX old, new, reqevt;
1901 	comif_ring_fe_t *frp;
1902 
1903 	/* only frontend should be able to push request */
1904 	ASSERT(ringp->xr_frontend);
1905 
1906 	/* RING_PUSH_REQUEST_AND_CHECK_NOTIFY() */
1907 	frp = FRONT_RING(ringp);
1908 	old = ddi_get32(ringp->xr_acc_hdl, &frp->sring->req_prod);
1909 	new = frp->req_prod_pvt;
1910 	ddi_put32(ringp->xr_acc_hdl, &frp->sring->req_prod, new);
1911 	membar_enter();
1912 	reqevt = ddi_get32(ringp->xr_acc_hdl, &frp->sring->req_event);
1913 	return ((RING_IDX)(new - reqevt) < (RING_IDX)(new - old));
1914 }
1915 
1916 /* NOTE: rsp_event will be increased as needed */
1917 void *
1918 xvdi_ring_get_response(xendev_ring_t *ringp)
1919 {
1920 	comif_ring_fe_t *frp;
1921 	comif_ring_be_t *brp;
1922 
1923 	if (!ringp->xr_frontend) {
1924 		/* for backend ring */
1925 		brp = BACK_RING(ringp);
1926 		return (GET_RING_ENTRY_BE(ringp, brp->rsp_prod_pvt++));
1927 	} else {
1928 		/* for frontend ring */
1929 		frp = FRONT_RING(ringp);
1930 		/* RING_FINAL_CHECK_FOR_RESPONSES() */
1931 		if (xvdi_ring_has_unconsumed_responses(ringp))
1932 			return (GET_RING_ENTRY_FE(ringp, frp->rsp_cons++));
1933 		else {
1934 			ddi_put32(ringp->xr_acc_hdl, &frp->sring->rsp_event,
1935 			    frp->rsp_cons + 1);
1936 			membar_enter();
1937 			if (xvdi_ring_has_unconsumed_responses(ringp))
1938 				return (GET_RING_ENTRY_FE(ringp,
1939 				    frp->rsp_cons++));
1940 			else
1941 				return (NULL);
1942 		}
1943 	}
1944 }
1945 
1946 int
1947 xvdi_ring_push_response(xendev_ring_t *ringp)
1948 {
1949 	RING_IDX old, new, rspevt;
1950 	comif_ring_be_t *brp;
1951 
1952 	/* only backend should be able to push response */
1953 	ASSERT(!ringp->xr_frontend);
1954 
1955 	/* RING_PUSH_RESPONSE_AND_CHECK_NOTIFY() */
1956 	brp = BACK_RING(ringp);
1957 	old = ddi_get32(ringp->xr_acc_hdl, &brp->sring->rsp_prod);
1958 	new = brp->rsp_prod_pvt;
1959 	ddi_put32(ringp->xr_acc_hdl, &brp->sring->rsp_prod, new);
1960 	membar_enter();
1961 	rspevt = ddi_get32(ringp->xr_acc_hdl, &brp->sring->rsp_event);
1962 	return ((RING_IDX)(new - rspevt) < (RING_IDX)(new - old));
1963 }
1964 
1965 static void
1966 xvdi_ring_init_sring(xendev_ring_t *ringp)
1967 {
1968 	ddi_acc_handle_t acchdl;
1969 	comif_sring_t *xsrp;
1970 	int i;
1971 
1972 	xsrp = (comif_sring_t *)ringp->xr_vaddr;
1973 	acchdl = ringp->xr_acc_hdl;
1974 
1975 	/* shared ring initialization */
1976 	ddi_put32(acchdl, &xsrp->req_prod, 0);
1977 	ddi_put32(acchdl, &xsrp->rsp_prod, 0);
1978 	ddi_put32(acchdl, &xsrp->req_event, 1);
1979 	ddi_put32(acchdl, &xsrp->rsp_event, 1);
1980 	for (i = 0; i < sizeof (xsrp->pad); i++)
1981 		ddi_put8(acchdl, xsrp->pad + i, 0);
1982 }
1983 
1984 static void
1985 xvdi_ring_init_front_ring(xendev_ring_t *ringp, size_t nentry, size_t entrysize)
1986 {
1987 	comif_ring_fe_t *xfrp;
1988 
1989 	xfrp = &ringp->xr_sring.fr;
1990 	xfrp->req_prod_pvt = 0;
1991 	xfrp->rsp_cons = 0;
1992 	xfrp->nr_ents = nentry;
1993 	xfrp->sring = (comif_sring_t *)ringp->xr_vaddr;
1994 
1995 	ringp->xr_frontend = 1;
1996 	ringp->xr_entry_size = entrysize;
1997 }
1998 
1999 #ifndef XPV_HVM_DRIVER
2000 static void
2001 xvdi_ring_init_back_ring(xendev_ring_t *ringp, size_t nentry, size_t entrysize)
2002 {
2003 	comif_ring_be_t *xbrp;
2004 
2005 	xbrp = &ringp->xr_sring.br;
2006 	xbrp->rsp_prod_pvt = 0;
2007 	xbrp->req_cons = 0;
2008 	xbrp->nr_ents = nentry;
2009 	xbrp->sring = (comif_sring_t *)ringp->xr_vaddr;
2010 
2011 	ringp->xr_frontend = 0;
2012 	ringp->xr_entry_size = entrysize;
2013 }
2014 #endif /* XPV_HVM_DRIVER */
2015 
2016 static void
2017 xendev_offline_device(void *arg)
2018 {
2019 	dev_info_t *dip = (dev_info_t *)arg;
2020 	char devname[MAXNAMELEN] = {0};
2021 
2022 	/*
2023 	 * This is currently the only chance to delete a devinfo node, which
2024 	 * is _not_ always successful.
2025 	 */
2026 	(void) ddi_deviname(dip, devname);
2027 	(void) devfs_clean(ddi_get_parent(dip), devname + 1, DV_CLEAN_FORCE);
2028 	(void) ndi_devi_offline(dip, NDI_DEVI_REMOVE);
2029 }
2030 
2031 static void
2032 i_xvdi_oestate_cb(struct xenbus_device *dev, XenbusState oestate)
2033 {
2034 	dev_info_t *dip = (dev_info_t *)dev->data;
2035 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
2036 
2037 	/*
2038 	 * Don't trigger two consecutive ndi_devi_offline on the same
2039 	 * dip.
2040 	 */
2041 	if ((oestate == XenbusStateClosed) &&
2042 	    (dev->otherend_state == XenbusStateClosed))
2043 		return;
2044 
2045 	dev->otherend_state = oestate;
2046 	(void) ddi_taskq_dispatch(pdp->xd_oe_taskq,
2047 	    i_xvdi_oestate_handler, (void *)dip, DDI_SLEEP);
2048 }
2049 
2050 /*ARGSUSED*/
2051 static void
2052 i_xvdi_hpstate_cb(struct xenbus_watch *w, const char **vec,
2053     unsigned int len)
2054 {
2055 	dev_info_t *dip = (dev_info_t *)w->dev;
2056 	struct xendev_ppd *pdp = ddi_get_parent_data(dip);
2057 
2058 	(void) ddi_taskq_dispatch(pdp->xd_hp_taskq,
2059 	    i_xvdi_hpstate_handler, (void *)dip, DDI_SLEEP);
2060 }
2061 
2062 static void
2063 i_xvdi_probe_path_handler(void *arg)
2064 {
2065 	dev_info_t *parent;
2066 	char *path = arg, *p = NULL;
2067 	int i, vdev, circ;
2068 	i_xd_cfg_t *xdcp;
2069 	boolean_t frontend;
2070 	domid_t dom;
2071 
2072 	for (i = 0, xdcp = &xdci[0]; i < NXDC; i++, xdcp++) {
2073 
2074 		if ((xdcp->xs_path_fe != NULL) &&
2075 		    (strncmp(path, xdcp->xs_path_fe, strlen(xdcp->xs_path_fe))
2076 		    == 0)) {
2077 
2078 			frontend = B_TRUE;
2079 			p = path + strlen(xdcp->xs_path_fe);
2080 			break;
2081 		}
2082 
2083 		if ((xdcp->xs_path_be != NULL) &&
2084 		    (strncmp(path, xdcp->xs_path_be, strlen(xdcp->xs_path_be))
2085 		    == 0)) {
2086 
2087 			frontend = B_FALSE;
2088 			p = path + strlen(xdcp->xs_path_be);
2089 			break;
2090 		}
2091 
2092 	}
2093 
2094 	if (p == NULL) {
2095 		cmn_err(CE_WARN, "i_xvdi_probe_path_handler: "
2096 		    "unexpected path prefix in %s", path);
2097 		goto done;
2098 	}
2099 
2100 	if (frontend) {
2101 		dom = DOMID_SELF;
2102 		if (sscanf(p, "/%d/", &vdev) != 1) {
2103 			XVDI_DPRINTF(XVDI_DBG_PROBE,
2104 			    "i_xvdi_probe_path_handler: "
2105 			    "cannot parse frontend path %s",
2106 			    path);
2107 			goto done;
2108 		}
2109 	} else {
2110 		if (sscanf(p, "/%d/%d/", &dom, &vdev) != 2) {
2111 			XVDI_DPRINTF(XVDI_DBG_PROBE,
2112 			    "i_xvdi_probe_path_handler: "
2113 			    "cannot parse backend path %s",
2114 			    path);
2115 			goto done;
2116 		}
2117 	}
2118 
2119 	parent = xendev_dip;
2120 	ASSERT(parent != NULL);
2121 
2122 	ndi_devi_enter(parent, &circ);
2123 
2124 	if (xvdi_find_dev(parent, xdcp->devclass, dom, vdev) == NULL) {
2125 		XVDI_DPRINTF(XVDI_DBG_PROBE,
2126 		    "i_xvdi_probe_path_handler: create for %s", path);
2127 		(void) xvdi_create_dev(parent, xdcp->devclass, dom, vdev);
2128 	} else {
2129 		XVDI_DPRINTF(XVDI_DBG_PROBE,
2130 		    "i_xvdi_probe_path_handler: %s already exists", path);
2131 	}
2132 
2133 	ndi_devi_exit(parent, circ);
2134 
2135 done:
2136 	kmem_free(path, strlen(path) + 1);
2137 }
2138