xref: /titanic_44/usr/src/uts/common/os/driver_lyr.c (revision 0fbb751d81ab0a7c7ddfd8d4e447e075a9f7024f)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 1994, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 
25 /*
26  * Layered driver support.
27  */
28 
29 #include <sys/atomic.h>
30 #include <sys/types.h>
31 #include <sys/t_lock.h>
32 #include <sys/param.h>
33 #include <sys/conf.h>
34 #include <sys/systm.h>
35 #include <sys/sysmacros.h>
36 #include <sys/buf.h>
37 #include <sys/cred.h>
38 #include <sys/uio.h>
39 #include <sys/vnode.h>
40 #include <sys/fs/snode.h>
41 #include <sys/open.h>
42 #include <sys/kmem.h>
43 #include <sys/file.h>
44 #include <sys/bootconf.h>
45 #include <sys/pathname.h>
46 #include <sys/bitmap.h>
47 #include <sys/stat.h>
48 #include <sys/dditypes.h>
49 #include <sys/ddi_impldefs.h>
50 #include <sys/ddi.h>
51 #include <sys/sunddi.h>
52 #include <sys/sunndi.h>
53 #include <sys/esunddi.h>
54 #include <sys/autoconf.h>
55 #include <sys/sunldi.h>
56 #include <sys/sunldi_impl.h>
57 #include <sys/errno.h>
58 #include <sys/debug.h>
59 #include <sys/modctl.h>
60 #include <sys/var.h>
61 #include <vm/seg_vn.h>
62 
63 #include <sys/stropts.h>
64 #include <sys/strsubr.h>
65 #include <sys/socket.h>
66 #include <sys/socketvar.h>
67 #include <sys/kstr.h>
68 
69 /*
70  * Device contract related
71  */
72 #include <sys/contract_impl.h>
73 #include <sys/contract/device_impl.h>
74 
75 /*
76  * Define macros to manipulate snode, vnode, and open device flags
77  */
78 #define	VTYP_VALID(i)	(((i) == VCHR) || ((i) == VBLK))
79 #define	VTYP_TO_OTYP(i)	(((i) == VCHR) ? OTYP_CHR : OTYP_BLK)
80 #define	VTYP_TO_STYP(i)	(((i) == VCHR) ? S_IFCHR : S_IFBLK)
81 
82 #define	OTYP_VALID(i)	(((i) == OTYP_CHR) || ((i) == OTYP_BLK))
83 #define	OTYP_TO_VTYP(i)	(((i) == OTYP_CHR) ? VCHR : VBLK)
84 #define	OTYP_TO_STYP(i)	(((i) == OTYP_CHR) ? S_IFCHR : S_IFBLK)
85 
86 #define	STYP_VALID(i)	(((i) == S_IFCHR) || ((i) == S_IFBLK))
87 #define	STYP_TO_VTYP(i)	(((i) == S_IFCHR) ? VCHR : VBLK)
88 
89 /*
90  * Define macros for accessing layered driver hash structures
91  */
92 #define	LH_HASH(vp)		(handle_hash_func(vp) % LH_HASH_SZ)
93 #define	LI_HASH(mid, dip, dev)	(ident_hash_func(mid, dip, dev) % LI_HASH_SZ)
94 
95 /*
96  * Define layered handle flags used in the lh_type field
97  */
98 #define	LH_STREAM	(0x1)	/* handle to a streams device */
99 #define	LH_CBDEV	(0x2)	/* handle to a char/block device */
100 
101 /*
102  * Define macro for devid property lookups
103  */
104 #define	DEVID_PROP_FLAGS	(DDI_PROP_DONTPASS | \
105 				DDI_PROP_TYPE_STRING|DDI_PROP_CANSLEEP)
106 
107 /*
108  * Dummy string for NDI events
109  */
110 #define	NDI_EVENT_SERVICE	"NDI_EVENT_SERVICE"
111 
112 static void ldi_ev_lock(void);
113 static void ldi_ev_unlock(void);
114 
115 #ifdef	LDI_OBSOLETE_EVENT
116 int ldi_remove_event_handler(ldi_handle_t lh, ldi_callback_id_t id);
117 #endif
118 
119 
120 /*
121  * globals
122  */
123 static kmutex_t			ldi_ident_hash_lock[LI_HASH_SZ];
124 static struct ldi_ident		*ldi_ident_hash[LI_HASH_SZ];
125 
126 static kmutex_t			ldi_handle_hash_lock[LH_HASH_SZ];
127 static struct ldi_handle	*ldi_handle_hash[LH_HASH_SZ];
128 static size_t			ldi_handle_hash_count;
129 
130 static struct ldi_ev_callback_list ldi_ev_callback_list;
131 
132 static uint32_t ldi_ev_id_pool = 0;
133 
134 struct ldi_ev_cookie {
135 	char *ck_evname;
136 	uint_t ck_sync;
137 	uint_t ck_ctype;
138 };
139 
140 static struct ldi_ev_cookie ldi_ev_cookies[] = {
141 	{ LDI_EV_OFFLINE, 1, CT_DEV_EV_OFFLINE},
142 	{ LDI_EV_DEGRADE, 0, CT_DEV_EV_DEGRADED},
143 	{ LDI_EV_DEVICE_REMOVE, 0, 0},
144 	{ NULL}			/* must terminate list */
145 };
146 
147 void
148 ldi_init(void)
149 {
150 	int i;
151 
152 	ldi_handle_hash_count = 0;
153 	for (i = 0; i < LH_HASH_SZ; i++) {
154 		mutex_init(&ldi_handle_hash_lock[i], NULL, MUTEX_DEFAULT, NULL);
155 		ldi_handle_hash[i] = NULL;
156 	}
157 	for (i = 0; i < LI_HASH_SZ; i++) {
158 		mutex_init(&ldi_ident_hash_lock[i], NULL, MUTEX_DEFAULT, NULL);
159 		ldi_ident_hash[i] = NULL;
160 	}
161 
162 	/*
163 	 * Initialize the LDI event subsystem
164 	 */
165 	mutex_init(&ldi_ev_callback_list.le_lock, NULL, MUTEX_DEFAULT, NULL);
166 	cv_init(&ldi_ev_callback_list.le_cv, NULL, CV_DEFAULT, NULL);
167 	ldi_ev_callback_list.le_busy = 0;
168 	ldi_ev_callback_list.le_thread = NULL;
169 	list_create(&ldi_ev_callback_list.le_head,
170 	    sizeof (ldi_ev_callback_impl_t),
171 	    offsetof(ldi_ev_callback_impl_t, lec_list));
172 }
173 
174 /*
175  * LDI ident manipulation functions
176  */
177 static uint_t
178 ident_hash_func(modid_t modid, dev_info_t *dip, dev_t dev)
179 {
180 	if (dip != NULL) {
181 		uintptr_t k = (uintptr_t)dip;
182 		k >>= (int)highbit(sizeof (struct dev_info));
183 		return ((uint_t)k);
184 	} else if (dev != DDI_DEV_T_NONE) {
185 		return (modid + getminor(dev) + getmajor(dev));
186 	} else {
187 		return (modid);
188 	}
189 }
190 
191 static struct ldi_ident **
192 ident_find_ref_nolock(modid_t modid, dev_info_t *dip, dev_t dev, major_t major)
193 {
194 	struct ldi_ident	**lipp = NULL;
195 	uint_t			index = LI_HASH(modid, dip, dev);
196 
197 	ASSERT(MUTEX_HELD(&ldi_ident_hash_lock[index]));
198 
199 	for (lipp = &(ldi_ident_hash[index]);
200 	    (*lipp != NULL);
201 	    lipp = &((*lipp)->li_next)) {
202 		if (((*lipp)->li_modid == modid) &&
203 		    ((*lipp)->li_major == major) &&
204 		    ((*lipp)->li_dip == dip) &&
205 		    ((*lipp)->li_dev == dev))
206 			break;
207 	}
208 
209 	ASSERT(lipp != NULL);
210 	return (lipp);
211 }
212 
213 static struct ldi_ident *
214 ident_alloc(char *mod_name, dev_info_t *dip, dev_t dev, major_t major)
215 {
216 	struct ldi_ident	*lip, **lipp;
217 	modid_t			modid;
218 	uint_t			index;
219 
220 	ASSERT(mod_name != NULL);
221 
222 	/* get the module id */
223 	modid = mod_name_to_modid(mod_name);
224 	ASSERT(modid != -1);
225 
226 	/* allocate a new ident in case we need it */
227 	lip = kmem_zalloc(sizeof (*lip), KM_SLEEP);
228 
229 	/* search the hash for a matching ident */
230 	index = LI_HASH(modid, dip, dev);
231 	mutex_enter(&ldi_ident_hash_lock[index]);
232 	lipp = ident_find_ref_nolock(modid, dip, dev, major);
233 
234 	if (*lipp != NULL) {
235 		/* we found an ident in the hash */
236 		ASSERT(strcmp((*lipp)->li_modname, mod_name) == 0);
237 		(*lipp)->li_ref++;
238 		mutex_exit(&ldi_ident_hash_lock[index]);
239 		kmem_free(lip, sizeof (struct ldi_ident));
240 		return (*lipp);
241 	}
242 
243 	/* initialize the new ident */
244 	lip->li_next = NULL;
245 	lip->li_ref = 1;
246 	lip->li_modid = modid;
247 	lip->li_major = major;
248 	lip->li_dip = dip;
249 	lip->li_dev = dev;
250 	(void) strncpy(lip->li_modname, mod_name, sizeof (lip->li_modname) - 1);
251 
252 	/* add it to the ident hash */
253 	lip->li_next = ldi_ident_hash[index];
254 	ldi_ident_hash[index] = lip;
255 
256 	mutex_exit(&ldi_ident_hash_lock[index]);
257 	return (lip);
258 }
259 
260 static void
261 ident_hold(struct ldi_ident *lip)
262 {
263 	uint_t			index;
264 
265 	ASSERT(lip != NULL);
266 	index = LI_HASH(lip->li_modid, lip->li_dip, lip->li_dev);
267 	mutex_enter(&ldi_ident_hash_lock[index]);
268 	ASSERT(lip->li_ref > 0);
269 	lip->li_ref++;
270 	mutex_exit(&ldi_ident_hash_lock[index]);
271 }
272 
273 static void
274 ident_release(struct ldi_ident *lip)
275 {
276 	struct ldi_ident	**lipp;
277 	uint_t			index;
278 
279 	ASSERT(lip != NULL);
280 	index = LI_HASH(lip->li_modid, lip->li_dip, lip->li_dev);
281 	mutex_enter(&ldi_ident_hash_lock[index]);
282 
283 	ASSERT(lip->li_ref > 0);
284 	if (--lip->li_ref > 0) {
285 		/* there are more references to this ident */
286 		mutex_exit(&ldi_ident_hash_lock[index]);
287 		return;
288 	}
289 
290 	/* this was the last reference/open for this ident.  free it. */
291 	lipp = ident_find_ref_nolock(
292 	    lip->li_modid, lip->li_dip, lip->li_dev, lip->li_major);
293 
294 	ASSERT((lipp != NULL) && (*lipp != NULL));
295 	*lipp = lip->li_next;
296 	mutex_exit(&ldi_ident_hash_lock[index]);
297 	kmem_free(lip, sizeof (struct ldi_ident));
298 }
299 
300 /*
301  * LDI handle manipulation functions
302  */
303 static uint_t
304 handle_hash_func(void *vp)
305 {
306 	uintptr_t k = (uintptr_t)vp;
307 	k >>= (int)highbit(sizeof (vnode_t));
308 	return ((uint_t)k);
309 }
310 
311 static struct ldi_handle **
312 handle_find_ref_nolock(vnode_t *vp, struct ldi_ident *ident)
313 {
314 	struct ldi_handle	**lhpp = NULL;
315 	uint_t			index = LH_HASH(vp);
316 
317 	ASSERT(MUTEX_HELD(&ldi_handle_hash_lock[index]));
318 
319 	for (lhpp = &(ldi_handle_hash[index]);
320 	    (*lhpp != NULL);
321 	    lhpp = &((*lhpp)->lh_next)) {
322 		if (((*lhpp)->lh_ident == ident) &&
323 		    ((*lhpp)->lh_vp == vp))
324 			break;
325 	}
326 
327 	ASSERT(lhpp != NULL);
328 	return (lhpp);
329 }
330 
331 static struct ldi_handle *
332 handle_find(vnode_t *vp, struct ldi_ident *ident)
333 {
334 	struct ldi_handle	**lhpp;
335 	int			index = LH_HASH(vp);
336 
337 	mutex_enter(&ldi_handle_hash_lock[index]);
338 	lhpp = handle_find_ref_nolock(vp, ident);
339 	mutex_exit(&ldi_handle_hash_lock[index]);
340 	ASSERT(lhpp != NULL);
341 	return (*lhpp);
342 }
343 
344 static struct ldi_handle *
345 handle_alloc(vnode_t *vp, struct ldi_ident *ident)
346 {
347 	struct ldi_handle	*lhp, **lhpp;
348 	uint_t			index;
349 
350 	ASSERT((vp != NULL) && (ident != NULL));
351 
352 	/* allocate a new handle in case we need it */
353 	lhp = kmem_zalloc(sizeof (*lhp), KM_SLEEP);
354 
355 	/* search the hash for a matching handle */
356 	index = LH_HASH(vp);
357 	mutex_enter(&ldi_handle_hash_lock[index]);
358 	lhpp = handle_find_ref_nolock(vp, ident);
359 
360 	if (*lhpp != NULL) {
361 		/* we found a handle in the hash */
362 		(*lhpp)->lh_ref++;
363 		mutex_exit(&ldi_handle_hash_lock[index]);
364 
365 		LDI_ALLOCFREE((CE_WARN, "ldi handle alloc: dup "
366 		    "lh=0x%p, ident=0x%p, vp=0x%p, drv=%s, minor=0x%x",
367 		    (void *)*lhpp, (void *)ident, (void *)vp,
368 		    mod_major_to_name(getmajor(vp->v_rdev)),
369 		    getminor(vp->v_rdev)));
370 
371 		kmem_free(lhp, sizeof (struct ldi_handle));
372 		return (*lhpp);
373 	}
374 
375 	/* initialize the new handle */
376 	lhp->lh_ref = 1;
377 	lhp->lh_vp = vp;
378 	lhp->lh_ident = ident;
379 #ifdef	LDI_OBSOLETE_EVENT
380 	mutex_init(lhp->lh_lock, NULL, MUTEX_DEFAULT, NULL);
381 #endif
382 
383 	/* set the device type for this handle */
384 	lhp->lh_type = 0;
385 	if (vp->v_stream) {
386 		ASSERT(vp->v_type == VCHR);
387 		lhp->lh_type |= LH_STREAM;
388 	} else {
389 		lhp->lh_type |= LH_CBDEV;
390 	}
391 
392 	/* get holds on other objects */
393 	ident_hold(ident);
394 	ASSERT(vp->v_count >= 1);
395 	VN_HOLD(vp);
396 
397 	/* add it to the handle hash */
398 	lhp->lh_next = ldi_handle_hash[index];
399 	ldi_handle_hash[index] = lhp;
400 	atomic_add_long(&ldi_handle_hash_count, 1);
401 
402 	LDI_ALLOCFREE((CE_WARN, "ldi handle alloc: new "
403 	    "lh=0x%p, ident=0x%p, vp=0x%p, drv=%s, minor=0x%x",
404 	    (void *)lhp, (void *)ident, (void *)vp,
405 	    mod_major_to_name(getmajor(vp->v_rdev)),
406 	    getminor(vp->v_rdev)));
407 
408 	mutex_exit(&ldi_handle_hash_lock[index]);
409 	return (lhp);
410 }
411 
412 static void
413 handle_release(struct ldi_handle *lhp)
414 {
415 	struct ldi_handle	**lhpp;
416 	uint_t			index;
417 
418 	ASSERT(lhp != NULL);
419 
420 	index = LH_HASH(lhp->lh_vp);
421 	mutex_enter(&ldi_handle_hash_lock[index]);
422 
423 	LDI_ALLOCFREE((CE_WARN, "ldi handle release: "
424 	    "lh=0x%p, ident=0x%p, vp=0x%p, drv=%s, minor=0x%x",
425 	    (void *)lhp, (void *)lhp->lh_ident, (void *)lhp->lh_vp,
426 	    mod_major_to_name(getmajor(lhp->lh_vp->v_rdev)),
427 	    getminor(lhp->lh_vp->v_rdev)));
428 
429 	ASSERT(lhp->lh_ref > 0);
430 	if (--lhp->lh_ref > 0) {
431 		/* there are more references to this handle */
432 		mutex_exit(&ldi_handle_hash_lock[index]);
433 		return;
434 	}
435 
436 	/* this was the last reference/open for this handle.  free it. */
437 	lhpp = handle_find_ref_nolock(lhp->lh_vp, lhp->lh_ident);
438 	ASSERT((lhpp != NULL) && (*lhpp != NULL));
439 	*lhpp = lhp->lh_next;
440 	atomic_add_long(&ldi_handle_hash_count, -1);
441 	mutex_exit(&ldi_handle_hash_lock[index]);
442 
443 	VN_RELE(lhp->lh_vp);
444 	ident_release(lhp->lh_ident);
445 #ifdef	LDI_OBSOLETE_EVENT
446 	mutex_destroy(lhp->lh_lock);
447 #endif
448 	kmem_free(lhp, sizeof (struct ldi_handle));
449 }
450 
451 #ifdef	LDI_OBSOLETE_EVENT
452 /*
453  * LDI event manipulation functions
454  */
455 static void
456 handle_event_add(ldi_event_t *lep)
457 {
458 	struct ldi_handle *lhp = lep->le_lhp;
459 
460 	ASSERT(lhp != NULL);
461 
462 	mutex_enter(lhp->lh_lock);
463 	if (lhp->lh_events == NULL) {
464 		lhp->lh_events = lep;
465 		mutex_exit(lhp->lh_lock);
466 		return;
467 	}
468 
469 	lep->le_next = lhp->lh_events;
470 	lhp->lh_events->le_prev = lep;
471 	lhp->lh_events = lep;
472 	mutex_exit(lhp->lh_lock);
473 }
474 
475 static void
476 handle_event_remove(ldi_event_t *lep)
477 {
478 	struct ldi_handle *lhp = lep->le_lhp;
479 
480 	ASSERT(lhp != NULL);
481 
482 	mutex_enter(lhp->lh_lock);
483 	if (lep->le_prev)
484 		lep->le_prev->le_next = lep->le_next;
485 	if (lep->le_next)
486 		lep->le_next->le_prev = lep->le_prev;
487 	if (lhp->lh_events == lep)
488 		lhp->lh_events = lep->le_next;
489 	mutex_exit(lhp->lh_lock);
490 
491 }
492 
493 static void
494 i_ldi_callback(dev_info_t *dip, ddi_eventcookie_t event_cookie,
495     void *arg, void *bus_impldata)
496 {
497 	ldi_event_t *lep = (ldi_event_t *)arg;
498 
499 	ASSERT(lep != NULL);
500 
501 	LDI_EVENTCB((CE_NOTE, "%s: dip=0x%p, "
502 	    "event_cookie=0x%p, ldi_eventp=0x%p", "i_ldi_callback",
503 	    (void *)dip, (void *)event_cookie, (void *)lep));
504 
505 	lep->le_handler(lep->le_lhp, event_cookie, lep->le_arg, bus_impldata);
506 }
507 #endif
508 
509 /*
510  * LDI open helper functions
511  */
512 
513 /* get a vnode to a device by dev_t and otyp */
514 static int
515 ldi_vp_from_dev(dev_t dev, int otyp, vnode_t **vpp)
516 {
517 	dev_info_t		*dip;
518 	vnode_t			*vp;
519 
520 	/* sanity check required input parameters */
521 	if ((dev == DDI_DEV_T_NONE) || (!OTYP_VALID(otyp)) || (vpp == NULL))
522 		return (EINVAL);
523 
524 	if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
525 		return (ENODEV);
526 
527 	vp = makespecvp(dev, OTYP_TO_VTYP(otyp));
528 	spec_assoc_vp_with_devi(vp, dip);
529 	ddi_release_devi(dip);  /* from e_ddi_hold_devi_by_dev */
530 
531 	*vpp = vp;
532 	return (0);
533 }
534 
535 /* get a vnode to a device by pathname */
536 int
537 ldi_vp_from_name(char *path, vnode_t **vpp)
538 {
539 	vnode_t			*vp = NULL;
540 	int			ret;
541 
542 	/* sanity check required input parameters */
543 	if ((path == NULL) || (vpp == NULL))
544 		return (EINVAL);
545 
546 	if (modrootloaded) {
547 		cred_t *saved_cred = curthread->t_cred;
548 
549 		/* we don't want lookupname to fail because of credentials */
550 		curthread->t_cred = kcred;
551 
552 		/*
553 		 * all lookups should be done in the global zone.  but
554 		 * lookupnameat() won't actually do this if an absolute
555 		 * path is passed in.  since the ldi interfaces require an
556 		 * absolute path we pass lookupnameat() a pointer to
557 		 * the character after the leading '/' and tell it to
558 		 * start searching at the current system root directory.
559 		 */
560 		ASSERT(*path == '/');
561 		ret = lookupnameat(path + 1, UIO_SYSSPACE, FOLLOW, NULLVPP,
562 		    &vp, rootdir);
563 
564 		/* restore this threads credentials */
565 		curthread->t_cred = saved_cred;
566 
567 		if (ret == 0) {
568 			if (!vn_matchops(vp, spec_getvnodeops()) ||
569 			    !VTYP_VALID(vp->v_type)) {
570 				VN_RELE(vp);
571 				return (ENXIO);
572 			}
573 		}
574 	}
575 
576 	if (vp == NULL) {
577 		dev_info_t	*dip;
578 		dev_t		dev;
579 		int		spec_type;
580 
581 		/*
582 		 * Root is not mounted, the minor node is not specified,
583 		 * or an OBP path has been specified.
584 		 */
585 
586 		/*
587 		 * Determine if path can be pruned to produce an
588 		 * OBP or devfs path for resolve_pathname.
589 		 */
590 		if (strncmp(path, "/devices/", 9) == 0)
591 			path += strlen("/devices");
592 
593 		/*
594 		 * if no minor node was specified the DEFAULT minor node
595 		 * will be returned.  if there is no DEFAULT minor node
596 		 * one will be fabricated of type S_IFCHR with the minor
597 		 * number equal to the instance number.
598 		 */
599 		ret = resolve_pathname(path, &dip, &dev, &spec_type);
600 		if (ret != 0)
601 			return (ENODEV);
602 
603 		ASSERT(STYP_VALID(spec_type));
604 		vp = makespecvp(dev, STYP_TO_VTYP(spec_type));
605 		spec_assoc_vp_with_devi(vp, dip);
606 		ddi_release_devi(dip);
607 	}
608 
609 	*vpp = vp;
610 	return (0);
611 }
612 
613 static int
614 ldi_devid_match(ddi_devid_t devid, dev_info_t *dip, dev_t dev)
615 {
616 	char		*devidstr;
617 	ddi_prop_t	*propp;
618 
619 	/* convert devid as a string property */
620 	if ((devidstr = ddi_devid_str_encode(devid, NULL)) == NULL)
621 		return (0);
622 
623 	/*
624 	 * Search for the devid.  For speed and ease in locking this
625 	 * code directly uses the property implementation.  See
626 	 * ddi_common_devid_to_devlist() for a comment as to why.
627 	 */
628 	mutex_enter(&(DEVI(dip)->devi_lock));
629 
630 	/* check if there is a DDI_DEV_T_NONE devid property */
631 	propp = i_ddi_prop_search(DDI_DEV_T_NONE,
632 	    DEVID_PROP_NAME, DEVID_PROP_FLAGS, &DEVI(dip)->devi_hw_prop_ptr);
633 	if (propp != NULL) {
634 		if (ddi_devid_str_compare(propp->prop_val, devidstr) == 0) {
635 			/* a DDI_DEV_T_NONE devid exists and matchs */
636 			mutex_exit(&(DEVI(dip)->devi_lock));
637 			ddi_devid_str_free(devidstr);
638 			return (1);
639 		} else {
640 			/* a DDI_DEV_T_NONE devid exists and doesn't match */
641 			mutex_exit(&(DEVI(dip)->devi_lock));
642 			ddi_devid_str_free(devidstr);
643 			return (0);
644 		}
645 	}
646 
647 	/* check if there is a devt specific devid property */
648 	propp = i_ddi_prop_search(dev,
649 	    DEVID_PROP_NAME, DEVID_PROP_FLAGS, &(DEVI(dip)->devi_hw_prop_ptr));
650 	if (propp != NULL) {
651 		if (ddi_devid_str_compare(propp->prop_val, devidstr) == 0) {
652 			/* a devt specific devid exists and matchs */
653 			mutex_exit(&(DEVI(dip)->devi_lock));
654 			ddi_devid_str_free(devidstr);
655 			return (1);
656 		} else {
657 			/* a devt specific devid exists and doesn't match */
658 			mutex_exit(&(DEVI(dip)->devi_lock));
659 			ddi_devid_str_free(devidstr);
660 			return (0);
661 		}
662 	}
663 
664 	/* we didn't find any devids associated with the device */
665 	mutex_exit(&(DEVI(dip)->devi_lock));
666 	ddi_devid_str_free(devidstr);
667 	return (0);
668 }
669 
670 /* get a handle to a device by devid and minor name */
671 int
672 ldi_vp_from_devid(ddi_devid_t devid, char *minor_name, vnode_t **vpp)
673 {
674 	dev_info_t		*dip;
675 	vnode_t			*vp;
676 	int			ret, i, ndevs, styp;
677 	dev_t			dev, *devs;
678 
679 	/* sanity check required input parameters */
680 	if ((devid == NULL) || (minor_name == NULL) || (vpp == NULL))
681 		return (EINVAL);
682 
683 	ret = ddi_lyr_devid_to_devlist(devid, minor_name, &ndevs, &devs);
684 	if ((ret != DDI_SUCCESS) || (ndevs <= 0))
685 		return (ENODEV);
686 
687 	for (i = 0; i < ndevs; i++) {
688 		dev = devs[i];
689 
690 		if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
691 			continue;
692 
693 		/*
694 		 * now we have to verify that the devid of the disk
695 		 * still matches what was requested.
696 		 *
697 		 * we have to do this because the devid could have
698 		 * changed between the call to ddi_lyr_devid_to_devlist()
699 		 * and e_ddi_hold_devi_by_dev().  this is because when
700 		 * ddi_lyr_devid_to_devlist() returns a list of devts
701 		 * there is no kind of hold on those devts so a device
702 		 * could have been replaced out from under us in the
703 		 * interim.
704 		 */
705 		if ((i_ddi_minorname_to_devtspectype(dip, minor_name,
706 		    NULL, &styp) == DDI_SUCCESS) &&
707 		    ldi_devid_match(devid, dip, dev))
708 			break;
709 
710 		ddi_release_devi(dip);	/* from e_ddi_hold_devi_by_dev() */
711 	}
712 
713 	ddi_lyr_free_devlist(devs, ndevs);
714 
715 	if (i == ndevs)
716 		return (ENODEV);
717 
718 	ASSERT(STYP_VALID(styp));
719 	vp = makespecvp(dev, STYP_TO_VTYP(styp));
720 	spec_assoc_vp_with_devi(vp, dip);
721 	ddi_release_devi(dip);		/* from e_ddi_hold_devi_by_dev */
722 
723 	*vpp = vp;
724 	return (0);
725 }
726 
727 /* given a vnode, open a device */
728 static int
729 ldi_open_by_vp(vnode_t **vpp, int flag, cred_t *cr,
730     ldi_handle_t *lhp, struct ldi_ident *li)
731 {
732 	struct ldi_handle	*nlhp;
733 	vnode_t			*vp;
734 	int			err;
735 
736 	ASSERT((vpp != NULL) && (*vpp != NULL));
737 	ASSERT((lhp != NULL) && (li != NULL));
738 
739 	vp = *vpp;
740 	/* if the vnode passed in is not a device, then bail */
741 	if (!vn_matchops(vp, spec_getvnodeops()) || !VTYP_VALID(vp->v_type))
742 		return (ENXIO);
743 
744 	/*
745 	 * the caller may have specified a node that
746 	 * doesn't have cb_ops defined.  the ldi doesn't yet
747 	 * support opening devices without a valid cb_ops.
748 	 */
749 	if (devopsp[getmajor(vp->v_rdev)]->devo_cb_ops == NULL)
750 		return (ENXIO);
751 
752 	/* open the device */
753 	if ((err = VOP_OPEN(&vp, flag | FKLYR, cr, NULL)) != 0)
754 		return (err);
755 
756 	/* possible clone open, make sure that we still have a spec node */
757 	ASSERT(vn_matchops(vp, spec_getvnodeops()));
758 
759 	nlhp = handle_alloc(vp, li);
760 
761 	if (vp != *vpp) {
762 		/*
763 		 * allocating the layered handle took a new hold on the vnode
764 		 * so we can release the hold that was returned by the clone
765 		 * open
766 		 */
767 		LDI_OPENCLOSE((CE_WARN, "%s: lh=0x%p",
768 		    "ldi clone open", (void *)nlhp));
769 	} else {
770 		LDI_OPENCLOSE((CE_WARN, "%s: lh=0x%p",
771 		    "ldi open", (void *)nlhp));
772 	}
773 
774 	*vpp = vp;
775 	*lhp = (ldi_handle_t)nlhp;
776 	return (0);
777 }
778 
779 /* Call a drivers prop_op(9E) interface */
780 static int
781 i_ldi_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
782     int flags, char *name, caddr_t valuep, int *lengthp)
783 {
784 	struct dev_ops	*ops = NULL;
785 	int		res;
786 
787 	ASSERT((dip != NULL) && (name != NULL));
788 	ASSERT((prop_op == PROP_LEN) || (valuep != NULL));
789 	ASSERT(lengthp != NULL);
790 
791 	/*
792 	 * we can only be invoked after a driver has been opened and
793 	 * someone has a layered handle to it, so there had better be
794 	 * a valid ops vector.
795 	 */
796 	ops = DEVI(dip)->devi_ops;
797 	ASSERT(ops && ops->devo_cb_ops);
798 
799 	/*
800 	 * Some nexus drivers incorrectly set cb_prop_op to nodev,
801 	 * nulldev or even NULL.
802 	 */
803 	if ((ops->devo_cb_ops->cb_prop_op == nodev) ||
804 	    (ops->devo_cb_ops->cb_prop_op == nulldev) ||
805 	    (ops->devo_cb_ops->cb_prop_op == NULL)) {
806 		return (DDI_PROP_NOT_FOUND);
807 	}
808 
809 	/* check if this is actually DDI_DEV_T_ANY query */
810 	if (flags & LDI_DEV_T_ANY) {
811 		flags &= ~LDI_DEV_T_ANY;
812 		dev = DDI_DEV_T_ANY;
813 	}
814 
815 	res = cdev_prop_op(dev, dip, prop_op, flags, name, valuep, lengthp);
816 	return (res);
817 }
818 
819 static void
820 i_ldi_prop_op_free(struct prop_driver_data *pdd)
821 {
822 	kmem_free(pdd, pdd->pdd_size);
823 }
824 
825 static caddr_t
826 i_ldi_prop_op_alloc(int prop_len)
827 {
828 	struct prop_driver_data	*pdd;
829 	int			pdd_size;
830 
831 	pdd_size = sizeof (struct prop_driver_data) + prop_len;
832 	pdd = kmem_alloc(pdd_size, KM_SLEEP);
833 	pdd->pdd_size = pdd_size;
834 	pdd->pdd_prop_free = i_ldi_prop_op_free;
835 	return ((caddr_t)&pdd[1]);
836 }
837 
838 /*
839  * i_ldi_prop_op_typed() is a wrapper for i_ldi_prop_op that is used
840  * by the typed ldi property lookup interfaces.
841  */
842 static int
843 i_ldi_prop_op_typed(dev_t dev, dev_info_t *dip, int flags, char *name,
844     caddr_t *datap, int *lengthp, int elem_size)
845 {
846 	caddr_t	prop_val;
847 	int	prop_len, res;
848 
849 	ASSERT((dip != NULL) && (name != NULL));
850 	ASSERT((datap != NULL) && (lengthp != NULL));
851 
852 	/*
853 	 * first call the drivers prop_op() interface to allow it
854 	 * it to override default property values.
855 	 */
856 	res = i_ldi_prop_op(dev, dip, PROP_LEN,
857 	    flags | DDI_PROP_DYNAMIC, name, NULL, &prop_len);
858 	if (res != DDI_PROP_SUCCESS)
859 		return (DDI_PROP_NOT_FOUND);
860 
861 	/* sanity check the property length */
862 	if (prop_len == 0) {
863 		/*
864 		 * the ddi typed interfaces don't allow a drivers to
865 		 * create properties with a length of 0.  so we should
866 		 * prevent drivers from returning 0 length dynamic
867 		 * properties for typed property lookups.
868 		 */
869 		return (DDI_PROP_NOT_FOUND);
870 	}
871 
872 	/* sanity check the property length against the element size */
873 	if (elem_size && ((prop_len % elem_size) != 0))
874 		return (DDI_PROP_NOT_FOUND);
875 
876 	/*
877 	 * got it.  now allocate a prop_driver_data struct so that the
878 	 * user can free the property via ddi_prop_free().
879 	 */
880 	prop_val = i_ldi_prop_op_alloc(prop_len);
881 
882 	/* lookup the property again, this time get the value */
883 	res = i_ldi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
884 	    flags | DDI_PROP_DYNAMIC, name, prop_val, &prop_len);
885 	if (res != DDI_PROP_SUCCESS) {
886 		ddi_prop_free(prop_val);
887 		return (DDI_PROP_NOT_FOUND);
888 	}
889 
890 	/* sanity check the property length */
891 	if (prop_len == 0) {
892 		ddi_prop_free(prop_val);
893 		return (DDI_PROP_NOT_FOUND);
894 	}
895 
896 	/* sanity check the property length against the element size */
897 	if (elem_size && ((prop_len % elem_size) != 0)) {
898 		ddi_prop_free(prop_val);
899 		return (DDI_PROP_NOT_FOUND);
900 	}
901 
902 	/*
903 	 * return the prop_driver_data struct and, optionally, the length
904 	 * of the data.
905 	 */
906 	*datap = prop_val;
907 	*lengthp = prop_len;
908 
909 	return (DDI_PROP_SUCCESS);
910 }
911 
912 /*
913  * i_check_string looks at a string property and makes sure its
914  * a valid null terminated string
915  */
916 static int
917 i_check_string(char *str, int prop_len)
918 {
919 	int i;
920 
921 	ASSERT(str != NULL);
922 
923 	for (i = 0; i < prop_len; i++) {
924 		if (str[i] == '\0')
925 			return (0);
926 	}
927 	return (1);
928 }
929 
930 /*
931  * i_pack_string_array takes a a string array property that is represented
932  * as a concatenation of strings (with the NULL character included for
933  * each string) and converts it into a format that can be returned by
934  * ldi_prop_lookup_string_array.
935  */
936 static int
937 i_pack_string_array(char *str_concat, int prop_len,
938     char ***str_arrayp, int *nelemp)
939 {
940 	int i, nelem, pack_size;
941 	char **str_array, *strptr;
942 
943 	/*
944 	 * first we need to sanity check the input string array.
945 	 * in essence this can be done my making sure that the last
946 	 * character of the array passed in is null.  (meaning the last
947 	 * string in the array is NULL terminated.
948 	 */
949 	if (str_concat[prop_len - 1] != '\0')
950 		return (1);
951 
952 	/* now let's count the number of strings in the array */
953 	for (nelem = i = 0; i < prop_len; i++)
954 		if (str_concat[i] == '\0')
955 			nelem++;
956 	ASSERT(nelem >= 1);
957 
958 	/* now let's allocate memory for the new packed property */
959 	pack_size = (sizeof (char *) * (nelem + 1)) + prop_len;
960 	str_array = (char **)i_ldi_prop_op_alloc(pack_size);
961 
962 	/* let's copy the actual string data into the new property */
963 	strptr = (char *)&(str_array[nelem + 1]);
964 	bcopy(str_concat, strptr, prop_len);
965 
966 	/* now initialize the string array pointers */
967 	for (i = 0; i < nelem; i++) {
968 		str_array[i] = strptr;
969 		strptr += strlen(strptr) + 1;
970 	}
971 	str_array[nelem] = NULL;
972 
973 	/* set the return values */
974 	*str_arrayp = str_array;
975 	*nelemp = nelem;
976 
977 	return (0);
978 }
979 
980 
981 /*
982  * LDI Project private device usage interfaces
983  */
984 
985 /*
986  * Get a count of how many devices are currentl open by different consumers
987  */
988 int
989 ldi_usage_count()
990 {
991 	return (ldi_handle_hash_count);
992 }
993 
994 static void
995 ldi_usage_walker_tgt_helper(ldi_usage_t *ldi_usage, vnode_t *vp)
996 {
997 	dev_info_t	*dip;
998 	dev_t		dev;
999 
1000 	ASSERT(STYP_VALID(VTYP_TO_STYP(vp->v_type)));
1001 
1002 	/* get the target devt */
1003 	dev = vp->v_rdev;
1004 
1005 	/* try to get the target dip */
1006 	dip = VTOCS(vp)->s_dip;
1007 	if (dip != NULL) {
1008 		e_ddi_hold_devi(dip);
1009 	} else if (dev != DDI_DEV_T_NONE) {
1010 		dip = e_ddi_hold_devi_by_dev(dev, 0);
1011 	}
1012 
1013 	/* set the target information */
1014 	ldi_usage->tgt_name = mod_major_to_name(getmajor(dev));
1015 	ldi_usage->tgt_modid = mod_name_to_modid(ldi_usage->tgt_name);
1016 	ldi_usage->tgt_devt = dev;
1017 	ldi_usage->tgt_spec_type = VTYP_TO_STYP(vp->v_type);
1018 	ldi_usage->tgt_dip = dip;
1019 }
1020 
1021 
1022 static int
1023 ldi_usage_walker_helper(struct ldi_ident *lip, vnode_t *vp,
1024     void *arg, int (*callback)(const ldi_usage_t *, void *))
1025 {
1026 	ldi_usage_t	ldi_usage;
1027 	struct devnames	*dnp;
1028 	dev_info_t	*dip;
1029 	major_t		major;
1030 	dev_t		dev;
1031 	int		ret = LDI_USAGE_CONTINUE;
1032 
1033 	/* set the target device information */
1034 	ldi_usage_walker_tgt_helper(&ldi_usage, vp);
1035 
1036 	/* get the source devt */
1037 	dev = lip->li_dev;
1038 
1039 	/* try to get the source dip */
1040 	dip = lip->li_dip;
1041 	if (dip != NULL) {
1042 		e_ddi_hold_devi(dip);
1043 	} else if (dev != DDI_DEV_T_NONE) {
1044 		dip = e_ddi_hold_devi_by_dev(dev, 0);
1045 	}
1046 
1047 	/* set the valid source information */
1048 	ldi_usage.src_modid = lip->li_modid;
1049 	ldi_usage.src_name = lip->li_modname;
1050 	ldi_usage.src_devt = dev;
1051 	ldi_usage.src_dip = dip;
1052 
1053 	/*
1054 	 * if the source ident represents either:
1055 	 *
1056 	 * - a kernel module (and not a device or device driver)
1057 	 * - a device node
1058 	 *
1059 	 * then we currently have all the info we need to report the
1060 	 * usage information so invoke the callback function.
1061 	 */
1062 	if (((lip->li_major == -1) && (dev == DDI_DEV_T_NONE)) ||
1063 	    (dip != NULL)) {
1064 		ret = callback(&ldi_usage, arg);
1065 		if (dip != NULL)
1066 			ddi_release_devi(dip);
1067 		if (ldi_usage.tgt_dip != NULL)
1068 			ddi_release_devi(ldi_usage.tgt_dip);
1069 		return (ret);
1070 	}
1071 
1072 	/*
1073 	 * now this is kinda gross.
1074 	 *
1075 	 * what we do here is attempt to associate every device instance
1076 	 * of the source driver on the system with the open target driver.
1077 	 * we do this because we don't know which instance of the device
1078 	 * could potentially access the lower device so we assume that all
1079 	 * the instances could access it.
1080 	 *
1081 	 * there are two ways we could have gotten here:
1082 	 *
1083 	 * 1) this layered ident represents one created using only a
1084 	 *    major number or a driver module name.  this means that when
1085 	 *    it was created we could not associate it with a particular
1086 	 *    dev_t or device instance.
1087 	 *
1088 	 *    when could this possibly happen you ask?
1089 	 *
1090 	 *    a perfect example of this is streams persistent links.
1091 	 *    when a persistant streams link is formed we can't associate
1092 	 *    the lower device stream with any particular upper device
1093 	 *    stream or instance.  this is because any particular upper
1094 	 *    device stream could be closed, then another could be
1095 	 *    opened with a different dev_t and device instance, and it
1096 	 *    would still have access to the lower linked stream.
1097 	 *
1098 	 *    since any instance of the upper streams driver could
1099 	 *    potentially access the lower stream whenever it wants,
1100 	 *    we represent that here by associating the opened lower
1101 	 *    device with every existing device instance of the upper
1102 	 *    streams driver.
1103 	 *
1104 	 * 2) This case should really never happen but we'll include it
1105 	 *    for completeness.
1106 	 *
1107 	 *    it's possible that we could have gotten here because we
1108 	 *    have a dev_t for the upper device but we couldn't find a
1109 	 *    dip associated with that dev_t.
1110 	 *
1111 	 *    the only types of devices that have dev_t without an
1112 	 *    associated dip are unbound DLPIv2 network devices.  These
1113 	 *    types of devices exist to be able to attach a stream to any
1114 	 *    instance of a hardware network device.  since these types of
1115 	 *    devices are usually hardware devices they should never
1116 	 *    really have other devices open.
1117 	 */
1118 	if (dev != DDI_DEV_T_NONE)
1119 		major = getmajor(dev);
1120 	else
1121 		major = lip->li_major;
1122 
1123 	ASSERT((major >= 0) && (major < devcnt));
1124 
1125 	dnp = &devnamesp[major];
1126 	LOCK_DEV_OPS(&dnp->dn_lock);
1127 	dip = dnp->dn_head;
1128 	while ((dip) && (ret == LDI_USAGE_CONTINUE)) {
1129 		e_ddi_hold_devi(dip);
1130 		UNLOCK_DEV_OPS(&dnp->dn_lock);
1131 
1132 		/* set the source dip */
1133 		ldi_usage.src_dip = dip;
1134 
1135 		/* invoke the callback function */
1136 		ret = callback(&ldi_usage, arg);
1137 
1138 		LOCK_DEV_OPS(&dnp->dn_lock);
1139 		ddi_release_devi(dip);
1140 		dip = ddi_get_next(dip);
1141 	}
1142 	UNLOCK_DEV_OPS(&dnp->dn_lock);
1143 
1144 	/* if there was a target dip, release it */
1145 	if (ldi_usage.tgt_dip != NULL)
1146 		ddi_release_devi(ldi_usage.tgt_dip);
1147 
1148 	return (ret);
1149 }
1150 
1151 /*
1152  * ldi_usage_walker() - this walker reports LDI kernel device usage
1153  * information via the callback() callback function.  the LDI keeps track
1154  * of what devices are being accessed in its own internal data structures.
1155  * this function walks those data structures to determine device usage.
1156  */
1157 void
1158 ldi_usage_walker(void *arg, int (*callback)(const ldi_usage_t *, void *))
1159 {
1160 	struct ldi_handle	*lhp;
1161 	struct ldi_ident	*lip;
1162 	vnode_t			*vp;
1163 	int			i;
1164 	int			ret = LDI_USAGE_CONTINUE;
1165 
1166 	for (i = 0; i < LH_HASH_SZ; i++) {
1167 		mutex_enter(&ldi_handle_hash_lock[i]);
1168 
1169 		lhp = ldi_handle_hash[i];
1170 		while ((lhp != NULL) && (ret == LDI_USAGE_CONTINUE)) {
1171 			lip = lhp->lh_ident;
1172 			vp = lhp->lh_vp;
1173 
1174 			/* invoke the devinfo callback function */
1175 			ret = ldi_usage_walker_helper(lip, vp, arg, callback);
1176 
1177 			lhp = lhp->lh_next;
1178 		}
1179 		mutex_exit(&ldi_handle_hash_lock[i]);
1180 
1181 		if (ret != LDI_USAGE_CONTINUE)
1182 			break;
1183 	}
1184 }
1185 
1186 /*
1187  * LDI Project private interfaces (streams linking interfaces)
1188  *
1189  * Streams supports a type of built in device layering via linking.
1190  * Certain types of streams drivers can be streams multiplexors.
1191  * A streams multiplexor supports the I_LINK/I_PLINK operation.
1192  * These operations allows other streams devices to be linked under the
1193  * multiplexor.  By definition all streams multiplexors are devices
1194  * so this linking is a type of device layering where the multiplexor
1195  * device is layered on top of the device linked below it.
1196  */
1197 
1198 /*
1199  * ldi_mlink_lh() is invoked when streams are linked using LDI handles.
1200  * It is not used for normal I_LINKs and I_PLINKs using file descriptors.
1201  *
1202  * The streams framework keeps track of links via the file_t of the lower
1203  * stream.  The LDI keeps track of devices using a vnode.  In the case
1204  * of a streams link created via an LDI handle, fnk_lh() allocates
1205  * a file_t that the streams framework can use to track the linkage.
1206  */
1207 int
1208 ldi_mlink_lh(vnode_t *vp, int cmd, intptr_t arg, cred_t *crp, int *rvalp)
1209 {
1210 	struct ldi_handle	*lhp = (struct ldi_handle *)arg;
1211 	vnode_t			*vpdown;
1212 	file_t			*fpdown;
1213 	int			err;
1214 
1215 	if (lhp == NULL)
1216 		return (EINVAL);
1217 
1218 	vpdown = lhp->lh_vp;
1219 	ASSERT(vn_matchops(vpdown, spec_getvnodeops()));
1220 	ASSERT(cmd == _I_PLINK_LH);
1221 
1222 	/*
1223 	 * create a new lower vnode and a file_t that points to it,
1224 	 * streams linking requires a file_t.  falloc() returns with
1225 	 * fpdown locked.
1226 	 */
1227 	VN_HOLD(vpdown);
1228 	(void) falloc(vpdown, FREAD|FWRITE, &fpdown, NULL);
1229 	mutex_exit(&fpdown->f_tlock);
1230 
1231 	/* try to establish the link */
1232 	err = mlink_file(vp, I_PLINK, fpdown, crp, rvalp, 1);
1233 
1234 	if (err != 0) {
1235 		/* the link failed, free the file_t and release the vnode */
1236 		mutex_enter(&fpdown->f_tlock);
1237 		unfalloc(fpdown);
1238 		VN_RELE(vpdown);
1239 	}
1240 
1241 	return (err);
1242 }
1243 
1244 /*
1245  * ldi_mlink_fp() is invoked for all successful streams linkages created
1246  * via I_LINK and I_PLINK.  ldi_mlink_fp() records the linkage information
1247  * in its internal state so that the devinfo snapshot code has some
1248  * observability into streams device linkage information.
1249  */
1250 void
1251 ldi_mlink_fp(struct stdata *stp, file_t *fpdown, int lhlink, int type)
1252 {
1253 	vnode_t			*vp = fpdown->f_vnode;
1254 	struct snode		*sp, *csp;
1255 	ldi_ident_t		li;
1256 	major_t			major;
1257 	int			ret;
1258 
1259 	/* if the lower stream is not a device then return */
1260 	if (!vn_matchops(vp, spec_getvnodeops()))
1261 		return;
1262 
1263 	ASSERT(!servicing_interrupt());
1264 
1265 	LDI_STREAMS_LNK((CE_NOTE, "%s: linking streams "
1266 	    "stp=0x%p, fpdown=0x%p", "ldi_mlink_fp",
1267 	    (void *)stp, (void *)fpdown));
1268 
1269 	sp = VTOS(vp);
1270 	csp = VTOS(sp->s_commonvp);
1271 
1272 	/* check if this was a plink via a layered handle */
1273 	if (lhlink) {
1274 		/*
1275 		 * increment the common snode s_count.
1276 		 *
1277 		 * this is done because after the link operation there
1278 		 * are two ways that s_count can be decremented.
1279 		 *
1280 		 * when the layered handle used to create the link is
1281 		 * closed, spec_close() is called and it will decrement
1282 		 * s_count in the common snode.  if we don't increment
1283 		 * s_count here then this could cause spec_close() to
1284 		 * actually close the device while it's still linked
1285 		 * under a multiplexer.
1286 		 *
1287 		 * also, when the lower stream is unlinked, closef() is
1288 		 * called for the file_t associated with this snode.
1289 		 * closef() will call spec_close(), which will decrement
1290 		 * s_count.  if we dont't increment s_count here then this
1291 		 * could cause spec_close() to actually close the device
1292 		 * while there may still be valid layered handles
1293 		 * pointing to it.
1294 		 */
1295 		mutex_enter(&csp->s_lock);
1296 		ASSERT(csp->s_count >= 1);
1297 		csp->s_count++;
1298 		mutex_exit(&csp->s_lock);
1299 
1300 		/*
1301 		 * decrement the f_count.
1302 		 * this is done because the layered driver framework does
1303 		 * not actually cache a copy of the file_t allocated to
1304 		 * do the link.  this is done here instead of in ldi_mlink_lh()
1305 		 * because there is a window in ldi_mlink_lh() between where
1306 		 * milnk_file() returns and we would decrement the f_count
1307 		 * when the stream could be unlinked.
1308 		 */
1309 		mutex_enter(&fpdown->f_tlock);
1310 		fpdown->f_count--;
1311 		mutex_exit(&fpdown->f_tlock);
1312 	}
1313 
1314 	/*
1315 	 * NOTE: here we rely on the streams subsystem not allowing
1316 	 * a stream to be multiplexed more than once.  if this
1317 	 * changes, we break.
1318 	 *
1319 	 * mark the snode/stream as multiplexed
1320 	 */
1321 	mutex_enter(&sp->s_lock);
1322 	ASSERT(!(sp->s_flag & SMUXED));
1323 	sp->s_flag |= SMUXED;
1324 	mutex_exit(&sp->s_lock);
1325 
1326 	/* get a layered ident for the upper stream */
1327 	if (type == LINKNORMAL) {
1328 		/*
1329 		 * if the link is not persistant then we can associate
1330 		 * the upper stream with a dev_t.  this is because the
1331 		 * upper stream is associated with a vnode, which is
1332 		 * associated with a dev_t and this binding can't change
1333 		 * during the life of the stream.  since the link isn't
1334 		 * persistant once the stream is destroyed the link is
1335 		 * destroyed.  so the dev_t will be valid for the life
1336 		 * of the link.
1337 		 */
1338 		ret = ldi_ident_from_stream(getendq(stp->sd_wrq), &li);
1339 	} else {
1340 		/*
1341 		 * if the link is persistant we can only associate the
1342 		 * link with a driver (and not a dev_t.)  this is
1343 		 * because subsequent opens of the upper device may result
1344 		 * in a different stream (and dev_t) having access to
1345 		 * the lower stream.
1346 		 *
1347 		 * for example, if the upper stream is closed after the
1348 		 * persistant link operation is compleated, a subsequent
1349 		 * open of the upper device will create a new stream which
1350 		 * may have a different dev_t and an unlink operation
1351 		 * can be performed using this new upper stream.
1352 		 */
1353 		ASSERT(type == LINKPERSIST);
1354 		major = getmajor(stp->sd_vnode->v_rdev);
1355 		ret = ldi_ident_from_major(major, &li);
1356 	}
1357 
1358 	ASSERT(ret == 0);
1359 	(void) handle_alloc(vp, (struct ldi_ident *)li);
1360 	ldi_ident_release(li);
1361 }
1362 
1363 void
1364 ldi_munlink_fp(struct stdata *stp, file_t *fpdown, int type)
1365 {
1366 	struct ldi_handle	*lhp;
1367 	vnode_t			*vp = (vnode_t *)fpdown->f_vnode;
1368 	struct snode		*sp;
1369 	ldi_ident_t		li;
1370 	major_t			major;
1371 	int			ret;
1372 
1373 	/* if the lower stream is not a device then return */
1374 	if (!vn_matchops(vp, spec_getvnodeops()))
1375 		return;
1376 
1377 	ASSERT(!servicing_interrupt());
1378 	ASSERT((type == LINKNORMAL) || (type == LINKPERSIST));
1379 
1380 	LDI_STREAMS_LNK((CE_NOTE, "%s: unlinking streams "
1381 	    "stp=0x%p, fpdown=0x%p", "ldi_munlink_fp",
1382 	    (void *)stp, (void *)fpdown));
1383 
1384 	/*
1385 	 * NOTE: here we rely on the streams subsystem not allowing
1386 	 * a stream to be multiplexed more than once.  if this
1387 	 * changes, we break.
1388 	 *
1389 	 * mark the snode/stream as not multiplexed
1390 	 */
1391 	sp = VTOS(vp);
1392 	mutex_enter(&sp->s_lock);
1393 	ASSERT(sp->s_flag & SMUXED);
1394 	sp->s_flag &= ~SMUXED;
1395 	mutex_exit(&sp->s_lock);
1396 
1397 	/*
1398 	 * clear the owner for this snode
1399 	 * see the comment in ldi_mlink_fp() for information about how
1400 	 * the ident is allocated
1401 	 */
1402 	if (type == LINKNORMAL) {
1403 		ret = ldi_ident_from_stream(getendq(stp->sd_wrq), &li);
1404 	} else {
1405 		ASSERT(type == LINKPERSIST);
1406 		major = getmajor(stp->sd_vnode->v_rdev);
1407 		ret = ldi_ident_from_major(major, &li);
1408 	}
1409 
1410 	ASSERT(ret == 0);
1411 	lhp = handle_find(vp, (struct ldi_ident *)li);
1412 	handle_release(lhp);
1413 	ldi_ident_release(li);
1414 }
1415 
1416 /*
1417  * LDI Consolidation private interfaces
1418  */
1419 int
1420 ldi_ident_from_mod(struct modlinkage *modlp, ldi_ident_t *lip)
1421 {
1422 	struct modctl		*modp;
1423 	major_t			major;
1424 	char			*name;
1425 
1426 	if ((modlp == NULL) || (lip == NULL))
1427 		return (EINVAL);
1428 
1429 	ASSERT(!servicing_interrupt());
1430 
1431 	modp = mod_getctl(modlp);
1432 	if (modp == NULL)
1433 		return (EINVAL);
1434 	name = modp->mod_modname;
1435 	if (name == NULL)
1436 		return (EINVAL);
1437 	major = mod_name_to_major(name);
1438 
1439 	*lip = (ldi_ident_t)ident_alloc(name, NULL, DDI_DEV_T_NONE, major);
1440 
1441 	LDI_ALLOCFREE((CE_WARN, "%s: li=0x%p, mod=%s",
1442 	    "ldi_ident_from_mod", (void *)*lip, name));
1443 
1444 	return (0);
1445 }
1446 
1447 ldi_ident_t
1448 ldi_ident_from_anon()
1449 {
1450 	ldi_ident_t	lip;
1451 
1452 	ASSERT(!servicing_interrupt());
1453 
1454 	lip = (ldi_ident_t)ident_alloc("genunix", NULL, DDI_DEV_T_NONE, -1);
1455 
1456 	LDI_ALLOCFREE((CE_WARN, "%s: li=0x%p, mod=%s",
1457 	    "ldi_ident_from_anon", (void *)lip, "genunix"));
1458 
1459 	return (lip);
1460 }
1461 
1462 
1463 /*
1464  * LDI Public interfaces
1465  */
1466 int
1467 ldi_ident_from_stream(struct queue *sq, ldi_ident_t *lip)
1468 {
1469 	struct stdata		*stp;
1470 	dev_t			dev;
1471 	char			*name;
1472 
1473 	if ((sq == NULL) || (lip == NULL))
1474 		return (EINVAL);
1475 
1476 	ASSERT(!servicing_interrupt());
1477 
1478 	stp = sq->q_stream;
1479 	if (!vn_matchops(stp->sd_vnode, spec_getvnodeops()))
1480 		return (EINVAL);
1481 
1482 	dev = stp->sd_vnode->v_rdev;
1483 	name = mod_major_to_name(getmajor(dev));
1484 	if (name == NULL)
1485 		return (EINVAL);
1486 	*lip = (ldi_ident_t)ident_alloc(name, NULL, dev, -1);
1487 
1488 	LDI_ALLOCFREE((CE_WARN,
1489 	    "%s: li=0x%p, mod=%s, minor=0x%x, stp=0x%p",
1490 	    "ldi_ident_from_stream", (void *)*lip, name, getminor(dev),
1491 	    (void *)stp));
1492 
1493 	return (0);
1494 }
1495 
1496 int
1497 ldi_ident_from_dev(dev_t dev, ldi_ident_t *lip)
1498 {
1499 	char			*name;
1500 
1501 	if (lip == NULL)
1502 		return (EINVAL);
1503 
1504 	ASSERT(!servicing_interrupt());
1505 
1506 	name = mod_major_to_name(getmajor(dev));
1507 	if (name == NULL)
1508 		return (EINVAL);
1509 	*lip = (ldi_ident_t)ident_alloc(name, NULL, dev, -1);
1510 
1511 	LDI_ALLOCFREE((CE_WARN,
1512 	    "%s: li=0x%p, mod=%s, minor=0x%x",
1513 	    "ldi_ident_from_dev", (void *)*lip, name, getminor(dev)));
1514 
1515 	return (0);
1516 }
1517 
1518 int
1519 ldi_ident_from_dip(dev_info_t *dip, ldi_ident_t *lip)
1520 {
1521 	struct dev_info		*devi = (struct dev_info *)dip;
1522 	char			*name;
1523 
1524 	if ((dip == NULL) || (lip == NULL))
1525 		return (EINVAL);
1526 
1527 	ASSERT(!servicing_interrupt());
1528 
1529 	name = mod_major_to_name(devi->devi_major);
1530 	if (name == NULL)
1531 		return (EINVAL);
1532 	*lip = (ldi_ident_t)ident_alloc(name, dip, DDI_DEV_T_NONE, -1);
1533 
1534 	LDI_ALLOCFREE((CE_WARN,
1535 	    "%s: li=0x%p, mod=%s, dip=0x%p",
1536 	    "ldi_ident_from_dip", (void *)*lip, name, (void *)devi));
1537 
1538 	return (0);
1539 }
1540 
1541 int
1542 ldi_ident_from_major(major_t major, ldi_ident_t *lip)
1543 {
1544 	char			*name;
1545 
1546 	if (lip == NULL)
1547 		return (EINVAL);
1548 
1549 	ASSERT(!servicing_interrupt());
1550 
1551 	name = mod_major_to_name(major);
1552 	if (name == NULL)
1553 		return (EINVAL);
1554 	*lip = (ldi_ident_t)ident_alloc(name, NULL, DDI_DEV_T_NONE, major);
1555 
1556 	LDI_ALLOCFREE((CE_WARN,
1557 	    "%s: li=0x%p, mod=%s",
1558 	    "ldi_ident_from_major", (void *)*lip, name));
1559 
1560 	return (0);
1561 }
1562 
1563 void
1564 ldi_ident_release(ldi_ident_t li)
1565 {
1566 	struct ldi_ident	*ident = (struct ldi_ident *)li;
1567 	char			*name;
1568 
1569 	if (li == NULL)
1570 		return;
1571 
1572 	ASSERT(!servicing_interrupt());
1573 
1574 	name = ident->li_modname;
1575 
1576 	LDI_ALLOCFREE((CE_WARN,
1577 	    "%s: li=0x%p, mod=%s",
1578 	    "ldi_ident_release", (void *)li, name));
1579 
1580 	ident_release((struct ldi_ident *)li);
1581 }
1582 
1583 /* get a handle to a device by dev_t and otyp */
1584 int
1585 ldi_open_by_dev(dev_t *devp, int otyp, int flag, cred_t *cr,
1586     ldi_handle_t *lhp, ldi_ident_t li)
1587 {
1588 	struct ldi_ident	*lip = (struct ldi_ident *)li;
1589 	int			ret;
1590 	vnode_t			*vp;
1591 
1592 	/* sanity check required input parameters */
1593 	if ((devp == NULL) || (!OTYP_VALID(otyp)) || (cr == NULL) ||
1594 	    (lhp == NULL) || (lip == NULL))
1595 		return (EINVAL);
1596 
1597 	ASSERT(!servicing_interrupt());
1598 
1599 	if ((ret = ldi_vp_from_dev(*devp, otyp, &vp)) != 0)
1600 		return (ret);
1601 
1602 	if ((ret = ldi_open_by_vp(&vp, flag, cr, lhp, lip)) == 0) {
1603 		*devp = vp->v_rdev;
1604 	}
1605 	VN_RELE(vp);
1606 
1607 	return (ret);
1608 }
1609 
1610 /* get a handle to a device by pathname */
1611 int
1612 ldi_open_by_name(char *pathname, int flag, cred_t *cr,
1613     ldi_handle_t *lhp, ldi_ident_t li)
1614 {
1615 	struct ldi_ident	*lip = (struct ldi_ident *)li;
1616 	int			ret;
1617 	vnode_t			*vp;
1618 
1619 	/* sanity check required input parameters */
1620 	if ((pathname == NULL) || (*pathname != '/') ||
1621 	    (cr == NULL) || (lhp == NULL) || (lip == NULL))
1622 		return (EINVAL);
1623 
1624 	ASSERT(!servicing_interrupt());
1625 
1626 	if ((ret = ldi_vp_from_name(pathname, &vp)) != 0)
1627 		return (ret);
1628 
1629 	ret = ldi_open_by_vp(&vp, flag, cr, lhp, lip);
1630 	VN_RELE(vp);
1631 
1632 	return (ret);
1633 }
1634 
1635 /* get a handle to a device by devid and minor_name */
1636 int
1637 ldi_open_by_devid(ddi_devid_t devid, char *minor_name,
1638     int flag, cred_t *cr, ldi_handle_t *lhp, ldi_ident_t li)
1639 {
1640 	struct ldi_ident	*lip = (struct ldi_ident *)li;
1641 	int			ret;
1642 	vnode_t			*vp;
1643 
1644 	/* sanity check required input parameters */
1645 	if ((minor_name == NULL) || (cr == NULL) ||
1646 	    (lhp == NULL) || (lip == NULL))
1647 		return (EINVAL);
1648 
1649 	ASSERT(!servicing_interrupt());
1650 
1651 	if ((ret = ldi_vp_from_devid(devid, minor_name, &vp)) != 0)
1652 		return (ret);
1653 
1654 	ret = ldi_open_by_vp(&vp, flag, cr, lhp, lip);
1655 	VN_RELE(vp);
1656 
1657 	return (ret);
1658 }
1659 
1660 int
1661 ldi_close(ldi_handle_t lh, int flag, cred_t *cr)
1662 {
1663 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
1664 	struct ldi_event	*lep;
1665 	int			err = 0;
1666 	int			notify = 0;
1667 	list_t			*listp;
1668 	ldi_ev_callback_impl_t	*lecp;
1669 
1670 	if (lh == NULL)
1671 		return (EINVAL);
1672 
1673 	ASSERT(!servicing_interrupt());
1674 
1675 #ifdef	LDI_OBSOLETE_EVENT
1676 
1677 	/*
1678 	 * Any event handlers should have been unregistered by the
1679 	 * time ldi_close() is called.  If they haven't then it's a
1680 	 * bug.
1681 	 *
1682 	 * In a debug kernel we'll panic to make the problem obvious.
1683 	 */
1684 	ASSERT(handlep->lh_events == NULL);
1685 
1686 	/*
1687 	 * On a production kernel we'll "do the right thing" (unregister
1688 	 * the event handlers) and then complain about having to do the
1689 	 * work ourselves.
1690 	 */
1691 	while ((lep = handlep->lh_events) != NULL) {
1692 		err = 1;
1693 		(void) ldi_remove_event_handler(lh, (ldi_callback_id_t)lep);
1694 	}
1695 	if (err) {
1696 		struct ldi_ident *lip = handlep->lh_ident;
1697 		ASSERT(lip != NULL);
1698 		cmn_err(CE_NOTE, "ldi err: %s "
1699 		    "failed to unregister layered event handlers before "
1700 		    "closing devices", lip->li_modname);
1701 	}
1702 #endif
1703 
1704 	/* do a layered close on the device */
1705 	err = VOP_CLOSE(handlep->lh_vp, flag | FKLYR, 1, (offset_t)0, cr, NULL);
1706 
1707 	LDI_OPENCLOSE((CE_WARN, "%s: lh=0x%p", "ldi close", (void *)lh));
1708 
1709 	/*
1710 	 * Search the event callback list for callbacks with this
1711 	 * handle. There are 2 cases
1712 	 * 1. Called in the context of a notify. The handle consumer
1713 	 *    is releasing its hold on the device to allow a reconfiguration
1714 	 *    of the device. Simply NULL out the handle and the notify callback.
1715 	 *    The finalize callback is still available so that the consumer
1716 	 *    knows of the final disposition of the device.
1717 	 * 2. Not called in the context of notify. NULL out the handle as well
1718 	 *    as the notify and finalize callbacks. Since the consumer has
1719 	 *    closed the handle, we assume it is not interested in the
1720 	 *    notify and finalize callbacks.
1721 	 */
1722 	ldi_ev_lock();
1723 
1724 	if (handlep->lh_flags & LH_FLAGS_NOTIFY)
1725 		notify = 1;
1726 	listp = &ldi_ev_callback_list.le_head;
1727 	for (lecp = list_head(listp); lecp; lecp = list_next(listp, lecp)) {
1728 		if (lecp->lec_lhp != handlep)
1729 			continue;
1730 		lecp->lec_lhp = NULL;
1731 		lecp->lec_notify = NULL;
1732 		LDI_EVDBG((CE_NOTE, "ldi_close: NULLed lh and notify"));
1733 		if (!notify) {
1734 			LDI_EVDBG((CE_NOTE, "ldi_close: NULLed finalize"));
1735 			lecp->lec_finalize = NULL;
1736 		}
1737 	}
1738 
1739 	if (notify)
1740 		handlep->lh_flags &= ~LH_FLAGS_NOTIFY;
1741 	ldi_ev_unlock();
1742 
1743 	/*
1744 	 * Free the handle even if the device close failed.  why?
1745 	 *
1746 	 * If the device close failed we can't really make assumptions
1747 	 * about the devices state so we shouldn't allow access to the
1748 	 * device via this handle any more.  If the device consumer wants
1749 	 * to access the device again they should open it again.
1750 	 *
1751 	 * This is the same way file/device close failures are handled
1752 	 * in other places like spec_close() and closeandsetf().
1753 	 */
1754 	handle_release(handlep);
1755 	return (err);
1756 }
1757 
1758 int
1759 ldi_read(ldi_handle_t lh, struct uio *uiop, cred_t *credp)
1760 {
1761 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
1762 	vnode_t			*vp;
1763 	dev_t			dev;
1764 	int			ret;
1765 
1766 	if (lh == NULL)
1767 		return (EINVAL);
1768 
1769 	vp = handlep->lh_vp;
1770 	dev = vp->v_rdev;
1771 	if (handlep->lh_type & LH_CBDEV) {
1772 		ret = cdev_read(dev, uiop, credp);
1773 	} else if (handlep->lh_type & LH_STREAM) {
1774 		ret = strread(vp, uiop, credp);
1775 	} else {
1776 		return (ENOTSUP);
1777 	}
1778 	return (ret);
1779 }
1780 
1781 int
1782 ldi_write(ldi_handle_t lh, struct uio *uiop, cred_t *credp)
1783 {
1784 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
1785 	vnode_t			*vp;
1786 	dev_t			dev;
1787 	int			ret;
1788 
1789 	if (lh == NULL)
1790 		return (EINVAL);
1791 
1792 	vp = handlep->lh_vp;
1793 	dev = vp->v_rdev;
1794 	if (handlep->lh_type & LH_CBDEV) {
1795 		ret = cdev_write(dev, uiop, credp);
1796 	} else if (handlep->lh_type & LH_STREAM) {
1797 		ret = strwrite(vp, uiop, credp);
1798 	} else {
1799 		return (ENOTSUP);
1800 	}
1801 	return (ret);
1802 }
1803 
1804 int
1805 ldi_get_size(ldi_handle_t lh, uint64_t *sizep)
1806 {
1807 	int			otyp;
1808 	uint_t			value;
1809 	int64_t			drv_prop64;
1810 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
1811 	uint_t			blksize;
1812 	int			blkshift;
1813 
1814 
1815 	if ((lh == NULL) || (sizep == NULL))
1816 		return (DDI_FAILURE);
1817 
1818 	if (handlep->lh_type & LH_STREAM)
1819 		return (DDI_FAILURE);
1820 
1821 	/*
1822 	 * Determine device type (char or block).
1823 	 * Character devices support Size/size
1824 	 * property value. Block devices may support
1825 	 * Nblocks/nblocks or Size/size property value.
1826 	 */
1827 	if ((ldi_get_otyp(lh, &otyp)) != 0)
1828 		return (DDI_FAILURE);
1829 
1830 	if (otyp == OTYP_BLK) {
1831 		if (ldi_prop_exists(lh,
1832 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "Nblocks")) {
1833 
1834 			drv_prop64 = ldi_prop_get_int64(lh,
1835 			    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1836 			    "Nblocks", 0);
1837 			blksize = ldi_prop_get_int(lh,
1838 			    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1839 			    "blksize", DEV_BSIZE);
1840 			if (blksize == DEV_BSIZE)
1841 				blksize = ldi_prop_get_int(lh, LDI_DEV_T_ANY |
1842 				    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1843 				    "device-blksize", DEV_BSIZE);
1844 
1845 			/* blksize must be a power of two */
1846 			ASSERT(BIT_ONLYONESET(blksize));
1847 			blkshift = highbit(blksize) - 1;
1848 
1849 			/*
1850 			 * We don't support Nblocks values that don't have
1851 			 * an accurate uint64_t byte count representation.
1852 			 */
1853 			if ((uint64_t)drv_prop64 >= (UINT64_MAX >> blkshift))
1854 				return (DDI_FAILURE);
1855 
1856 			*sizep = (uint64_t)
1857 			    (((u_offset_t)drv_prop64) << blkshift);
1858 			return (DDI_SUCCESS);
1859 		}
1860 
1861 		if (ldi_prop_exists(lh,
1862 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "nblocks")) {
1863 
1864 			value = ldi_prop_get_int(lh,
1865 			    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1866 			    "nblocks", 0);
1867 			blksize = ldi_prop_get_int(lh,
1868 			    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1869 			    "blksize", DEV_BSIZE);
1870 			if (blksize == DEV_BSIZE)
1871 				blksize = ldi_prop_get_int(lh, LDI_DEV_T_ANY |
1872 				    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1873 				    "device-blksize", DEV_BSIZE);
1874 
1875 			/* blksize must be a power of two */
1876 			ASSERT(BIT_ONLYONESET(blksize));
1877 			blkshift = highbit(blksize) - 1;
1878 
1879 			/*
1880 			 * We don't support nblocks values that don't have an
1881 			 * accurate uint64_t byte count representation.
1882 			 */
1883 			if ((uint64_t)value >= (UINT64_MAX >> blkshift))
1884 				return (DDI_FAILURE);
1885 
1886 			*sizep = (uint64_t)
1887 			    (((u_offset_t)value) << blkshift);
1888 			return (DDI_SUCCESS);
1889 		}
1890 	}
1891 
1892 	if (ldi_prop_exists(lh,
1893 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "Size")) {
1894 
1895 		drv_prop64 = ldi_prop_get_int64(lh,
1896 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "Size", 0);
1897 		*sizep = (uint64_t)drv_prop64;
1898 		return (DDI_SUCCESS);
1899 	}
1900 
1901 	if (ldi_prop_exists(lh,
1902 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "size")) {
1903 
1904 		value = ldi_prop_get_int(lh,
1905 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "size", 0);
1906 		*sizep = (uint64_t)value;
1907 		return (DDI_SUCCESS);
1908 	}
1909 
1910 	/* unable to determine device size */
1911 	return (DDI_FAILURE);
1912 }
1913 
1914 int
1915 ldi_ioctl(ldi_handle_t lh, int cmd, intptr_t arg, int mode,
1916 	cred_t *cr, int *rvalp)
1917 {
1918 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
1919 	vnode_t			*vp;
1920 	dev_t			dev;
1921 	int			ret, copymode, unused;
1922 
1923 	if (lh == NULL)
1924 		return (EINVAL);
1925 
1926 	/*
1927 	 * if the data pointed to by arg is located in the kernel then
1928 	 * make sure the FNATIVE flag is set.
1929 	 */
1930 	if (mode & FKIOCTL)
1931 		mode = (mode & ~FMODELS) | FNATIVE | FKIOCTL;
1932 
1933 	/*
1934 	 * Some drivers assume that rvalp will always be non-NULL, so in
1935 	 * an attempt to avoid panics if the caller passed in a NULL
1936 	 * value, update rvalp to point to a temporary variable.
1937 	 */
1938 	if (rvalp == NULL)
1939 		rvalp = &unused;
1940 	vp = handlep->lh_vp;
1941 	dev = vp->v_rdev;
1942 	if (handlep->lh_type & LH_CBDEV) {
1943 		ret = cdev_ioctl(dev, cmd, arg, mode, cr, rvalp);
1944 	} else if (handlep->lh_type & LH_STREAM) {
1945 		copymode = (mode & FKIOCTL) ? K_TO_K : U_TO_K;
1946 
1947 		/*
1948 		 * if we get an I_PLINK from within the kernel the
1949 		 * arg is a layered handle pointer instead of
1950 		 * a file descriptor, so we translate this ioctl
1951 		 * into a private one that can handle this.
1952 		 */
1953 		if ((mode & FKIOCTL) && (cmd == I_PLINK))
1954 			cmd = _I_PLINK_LH;
1955 
1956 		ret = strioctl(vp, cmd, arg, mode, copymode, cr, rvalp);
1957 	} else {
1958 		return (ENOTSUP);
1959 	}
1960 
1961 	return (ret);
1962 }
1963 
1964 int
1965 ldi_poll(ldi_handle_t lh, short events, int anyyet, short *reventsp,
1966     struct pollhead **phpp)
1967 {
1968 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
1969 	vnode_t			*vp;
1970 	dev_t			dev;
1971 	int			ret;
1972 
1973 	if (lh == NULL)
1974 		return (EINVAL);
1975 
1976 	vp = handlep->lh_vp;
1977 	dev = vp->v_rdev;
1978 	if (handlep->lh_type & LH_CBDEV) {
1979 		ret = cdev_poll(dev, events, anyyet, reventsp, phpp);
1980 	} else if (handlep->lh_type & LH_STREAM) {
1981 		ret = strpoll(vp->v_stream, events, anyyet, reventsp, phpp);
1982 	} else {
1983 		return (ENOTSUP);
1984 	}
1985 
1986 	return (ret);
1987 }
1988 
1989 int
1990 ldi_prop_op(ldi_handle_t lh, ddi_prop_op_t prop_op,
1991 	int flags, char *name, caddr_t valuep, int *length)
1992 {
1993 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
1994 	dev_t			dev;
1995 	dev_info_t		*dip;
1996 	int			ret;
1997 	struct snode		*csp;
1998 
1999 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2000 		return (DDI_PROP_INVAL_ARG);
2001 
2002 	if ((prop_op != PROP_LEN) && (valuep == NULL))
2003 		return (DDI_PROP_INVAL_ARG);
2004 
2005 	if (length == NULL)
2006 		return (DDI_PROP_INVAL_ARG);
2007 
2008 	/*
2009 	 * try to find the associated dip,
2010 	 * this places a hold on the driver
2011 	 */
2012 	dev = handlep->lh_vp->v_rdev;
2013 
2014 	csp = VTOCS(handlep->lh_vp);
2015 	mutex_enter(&csp->s_lock);
2016 	if ((dip = csp->s_dip) != NULL)
2017 		e_ddi_hold_devi(dip);
2018 	mutex_exit(&csp->s_lock);
2019 	if (dip == NULL)
2020 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2021 
2022 	if (dip == NULL)
2023 		return (DDI_PROP_NOT_FOUND);
2024 
2025 	ret = i_ldi_prop_op(dev, dip, prop_op, flags, name, valuep, length);
2026 	ddi_release_devi(dip);
2027 
2028 	return (ret);
2029 }
2030 
2031 int
2032 ldi_strategy(ldi_handle_t lh, struct buf *bp)
2033 {
2034 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2035 	dev_t			dev;
2036 
2037 	if ((lh == NULL) || (bp == NULL))
2038 		return (EINVAL);
2039 
2040 	/* this entry point is only supported for cb devices */
2041 	dev = handlep->lh_vp->v_rdev;
2042 	if (!(handlep->lh_type & LH_CBDEV))
2043 		return (ENOTSUP);
2044 
2045 	bp->b_edev = dev;
2046 	bp->b_dev = cmpdev(dev);
2047 	return (bdev_strategy(bp));
2048 }
2049 
2050 int
2051 ldi_dump(ldi_handle_t lh, caddr_t addr, daddr_t blkno, int nblk)
2052 {
2053 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2054 	dev_t			dev;
2055 
2056 	if (lh == NULL)
2057 		return (EINVAL);
2058 
2059 	/* this entry point is only supported for cb devices */
2060 	dev = handlep->lh_vp->v_rdev;
2061 	if (!(handlep->lh_type & LH_CBDEV))
2062 		return (ENOTSUP);
2063 
2064 	return (bdev_dump(dev, addr, blkno, nblk));
2065 }
2066 
2067 int
2068 ldi_devmap(ldi_handle_t lh, devmap_cookie_t dhp, offset_t off,
2069     size_t len, size_t *maplen, uint_t model)
2070 {
2071 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2072 	dev_t			dev;
2073 
2074 	if (lh == NULL)
2075 		return (EINVAL);
2076 
2077 	/* this entry point is only supported for cb devices */
2078 	dev = handlep->lh_vp->v_rdev;
2079 	if (!(handlep->lh_type & LH_CBDEV))
2080 		return (ENOTSUP);
2081 
2082 	return (cdev_devmap(dev, dhp, off, len, maplen, model));
2083 }
2084 
2085 int
2086 ldi_aread(ldi_handle_t lh, struct aio_req *aio_reqp, cred_t *cr)
2087 {
2088 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2089 	dev_t			dev;
2090 	struct cb_ops		*cb;
2091 
2092 	if (lh == NULL)
2093 		return (EINVAL);
2094 
2095 	/* this entry point is only supported for cb devices */
2096 	if (!(handlep->lh_type & LH_CBDEV))
2097 		return (ENOTSUP);
2098 
2099 	/*
2100 	 * Kaio is only supported on block devices.
2101 	 */
2102 	dev = handlep->lh_vp->v_rdev;
2103 	cb = devopsp[getmajor(dev)]->devo_cb_ops;
2104 	if (cb->cb_strategy == nodev || cb->cb_strategy == NULL)
2105 		return (ENOTSUP);
2106 
2107 	if (cb->cb_aread == NULL)
2108 		return (ENOTSUP);
2109 
2110 	return (cb->cb_aread(dev, aio_reqp, cr));
2111 }
2112 
2113 int
2114 ldi_awrite(ldi_handle_t lh, struct aio_req *aio_reqp, cred_t *cr)
2115 {
2116 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2117 	struct cb_ops		*cb;
2118 	dev_t			dev;
2119 
2120 	if (lh == NULL)
2121 		return (EINVAL);
2122 
2123 	/* this entry point is only supported for cb devices */
2124 	if (!(handlep->lh_type & LH_CBDEV))
2125 		return (ENOTSUP);
2126 
2127 	/*
2128 	 * Kaio is only supported on block devices.
2129 	 */
2130 	dev = handlep->lh_vp->v_rdev;
2131 	cb = devopsp[getmajor(dev)]->devo_cb_ops;
2132 	if (cb->cb_strategy == nodev || cb->cb_strategy == NULL)
2133 		return (ENOTSUP);
2134 
2135 	if (cb->cb_awrite == NULL)
2136 		return (ENOTSUP);
2137 
2138 	return (cb->cb_awrite(dev, aio_reqp, cr));
2139 }
2140 
2141 int
2142 ldi_putmsg(ldi_handle_t lh, mblk_t *smp)
2143 {
2144 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2145 	int			ret;
2146 
2147 	if ((lh == NULL) || (smp == NULL))
2148 		return (EINVAL);
2149 
2150 	if (!(handlep->lh_type & LH_STREAM)) {
2151 		freemsg(smp);
2152 		return (ENOTSUP);
2153 	}
2154 
2155 	/*
2156 	 * If we don't have db_credp, set it. Note that we can not be called
2157 	 * from interrupt context.
2158 	 */
2159 	if (msg_getcred(smp, NULL) == NULL)
2160 		mblk_setcred(smp, CRED(), curproc->p_pid);
2161 
2162 	/* Send message while honoring flow control */
2163 	ret = kstrputmsg(handlep->lh_vp, smp, NULL, 0, 0,
2164 	    MSG_BAND | MSG_HOLDSIG | MSG_IGNERROR, 0);
2165 
2166 	return (ret);
2167 }
2168 
2169 int
2170 ldi_getmsg(ldi_handle_t lh, mblk_t **rmp, timestruc_t *timeo)
2171 {
2172 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2173 	clock_t			timout; /* milliseconds */
2174 	uchar_t			pri;
2175 	rval_t			rval;
2176 	int			ret, pflag;
2177 
2178 
2179 	if (lh == NULL)
2180 		return (EINVAL);
2181 
2182 	if (!(handlep->lh_type & LH_STREAM))
2183 		return (ENOTSUP);
2184 
2185 	/* Convert from nanoseconds to milliseconds */
2186 	if (timeo != NULL) {
2187 		timout = timeo->tv_sec * 1000 + timeo->tv_nsec / 1000000;
2188 		if (timout > INT_MAX)
2189 			return (EINVAL);
2190 	} else
2191 		timout = -1;
2192 
2193 	/* Wait for timeout millseconds for a message */
2194 	pflag = MSG_ANY;
2195 	pri = 0;
2196 	*rmp = NULL;
2197 	ret = kstrgetmsg(handlep->lh_vp,
2198 	    rmp, NULL, &pri, &pflag, timout, &rval);
2199 	return (ret);
2200 }
2201 
2202 int
2203 ldi_get_dev(ldi_handle_t lh, dev_t *devp)
2204 {
2205 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2206 
2207 	if ((lh == NULL) || (devp == NULL))
2208 		return (EINVAL);
2209 
2210 	*devp = handlep->lh_vp->v_rdev;
2211 	return (0);
2212 }
2213 
2214 int
2215 ldi_get_otyp(ldi_handle_t lh, int *otyp)
2216 {
2217 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2218 
2219 	if ((lh == NULL) || (otyp == NULL))
2220 		return (EINVAL);
2221 
2222 	*otyp = VTYP_TO_OTYP(handlep->lh_vp->v_type);
2223 	return (0);
2224 }
2225 
2226 int
2227 ldi_get_devid(ldi_handle_t lh, ddi_devid_t *devid)
2228 {
2229 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2230 	int			ret;
2231 	dev_t			dev;
2232 
2233 	if ((lh == NULL) || (devid == NULL))
2234 		return (EINVAL);
2235 
2236 	dev = handlep->lh_vp->v_rdev;
2237 
2238 	ret = ddi_lyr_get_devid(dev, devid);
2239 	if (ret != DDI_SUCCESS)
2240 		return (ENOTSUP);
2241 
2242 	return (0);
2243 }
2244 
2245 int
2246 ldi_get_minor_name(ldi_handle_t lh, char **minor_name)
2247 {
2248 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2249 	int			ret, otyp;
2250 	dev_t			dev;
2251 
2252 	if ((lh == NULL) || (minor_name == NULL))
2253 		return (EINVAL);
2254 
2255 	dev = handlep->lh_vp->v_rdev;
2256 	otyp = VTYP_TO_OTYP(handlep->lh_vp->v_type);
2257 
2258 	ret = ddi_lyr_get_minor_name(dev, OTYP_TO_STYP(otyp), minor_name);
2259 	if (ret != DDI_SUCCESS)
2260 		return (ENOTSUP);
2261 
2262 	return (0);
2263 }
2264 
2265 int
2266 ldi_prop_lookup_int_array(ldi_handle_t lh,
2267     uint_t flags, char *name, int **data, uint_t *nelements)
2268 {
2269 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2270 	dev_info_t		*dip;
2271 	dev_t			dev;
2272 	int			res;
2273 	struct snode		*csp;
2274 
2275 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2276 		return (DDI_PROP_INVAL_ARG);
2277 
2278 	dev = handlep->lh_vp->v_rdev;
2279 
2280 	csp = VTOCS(handlep->lh_vp);
2281 	mutex_enter(&csp->s_lock);
2282 	if ((dip = csp->s_dip) != NULL)
2283 		e_ddi_hold_devi(dip);
2284 	mutex_exit(&csp->s_lock);
2285 	if (dip == NULL)
2286 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2287 
2288 	if (dip == NULL) {
2289 		flags |= DDI_UNBND_DLPI2;
2290 	} else if (flags & LDI_DEV_T_ANY) {
2291 		flags &= ~LDI_DEV_T_ANY;
2292 		dev = DDI_DEV_T_ANY;
2293 	}
2294 
2295 	if (dip != NULL) {
2296 		int *prop_val, prop_len;
2297 
2298 		res = i_ldi_prop_op_typed(dev, dip, flags, name,
2299 		    (caddr_t *)&prop_val, &prop_len, sizeof (int));
2300 
2301 		/* if we got it then return it */
2302 		if (res == DDI_PROP_SUCCESS) {
2303 			*nelements = prop_len / sizeof (int);
2304 			*data = prop_val;
2305 
2306 			ddi_release_devi(dip);
2307 			return (res);
2308 		}
2309 	}
2310 
2311 	/* call the normal property interfaces */
2312 	res = ddi_prop_lookup_int_array(dev, dip, flags,
2313 	    name, data, nelements);
2314 
2315 	if (dip != NULL)
2316 		ddi_release_devi(dip);
2317 
2318 	return (res);
2319 }
2320 
2321 int
2322 ldi_prop_lookup_int64_array(ldi_handle_t lh,
2323     uint_t flags, char *name, int64_t **data, uint_t *nelements)
2324 {
2325 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2326 	dev_info_t		*dip;
2327 	dev_t			dev;
2328 	int			res;
2329 	struct snode		*csp;
2330 
2331 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2332 		return (DDI_PROP_INVAL_ARG);
2333 
2334 	dev = handlep->lh_vp->v_rdev;
2335 
2336 	csp = VTOCS(handlep->lh_vp);
2337 	mutex_enter(&csp->s_lock);
2338 	if ((dip = csp->s_dip) != NULL)
2339 		e_ddi_hold_devi(dip);
2340 	mutex_exit(&csp->s_lock);
2341 	if (dip == NULL)
2342 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2343 
2344 	if (dip == NULL) {
2345 		flags |= DDI_UNBND_DLPI2;
2346 	} else if (flags & LDI_DEV_T_ANY) {
2347 		flags &= ~LDI_DEV_T_ANY;
2348 		dev = DDI_DEV_T_ANY;
2349 	}
2350 
2351 	if (dip != NULL) {
2352 		int64_t	*prop_val;
2353 		int	prop_len;
2354 
2355 		res = i_ldi_prop_op_typed(dev, dip, flags, name,
2356 		    (caddr_t *)&prop_val, &prop_len, sizeof (int64_t));
2357 
2358 		/* if we got it then return it */
2359 		if (res == DDI_PROP_SUCCESS) {
2360 			*nelements = prop_len / sizeof (int64_t);
2361 			*data = prop_val;
2362 
2363 			ddi_release_devi(dip);
2364 			return (res);
2365 		}
2366 	}
2367 
2368 	/* call the normal property interfaces */
2369 	res = ddi_prop_lookup_int64_array(dev, dip, flags,
2370 	    name, data, nelements);
2371 
2372 	if (dip != NULL)
2373 		ddi_release_devi(dip);
2374 
2375 	return (res);
2376 }
2377 
2378 int
2379 ldi_prop_lookup_string_array(ldi_handle_t lh,
2380     uint_t flags, char *name, char ***data, uint_t *nelements)
2381 {
2382 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2383 	dev_info_t		*dip;
2384 	dev_t			dev;
2385 	int			res;
2386 	struct snode		*csp;
2387 
2388 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2389 		return (DDI_PROP_INVAL_ARG);
2390 
2391 	dev = handlep->lh_vp->v_rdev;
2392 
2393 	csp = VTOCS(handlep->lh_vp);
2394 	mutex_enter(&csp->s_lock);
2395 	if ((dip = csp->s_dip) != NULL)
2396 		e_ddi_hold_devi(dip);
2397 	mutex_exit(&csp->s_lock);
2398 	if (dip == NULL)
2399 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2400 
2401 	if (dip == NULL) {
2402 		flags |= DDI_UNBND_DLPI2;
2403 	} else if (flags & LDI_DEV_T_ANY) {
2404 		flags &= ~LDI_DEV_T_ANY;
2405 		dev = DDI_DEV_T_ANY;
2406 	}
2407 
2408 	if (dip != NULL) {
2409 		char	*prop_val;
2410 		int	prop_len;
2411 
2412 		res = i_ldi_prop_op_typed(dev, dip, flags, name,
2413 		    (caddr_t *)&prop_val, &prop_len, 0);
2414 
2415 		/* if we got it then return it */
2416 		if (res == DDI_PROP_SUCCESS) {
2417 			char	**str_array;
2418 			int	nelem;
2419 
2420 			/*
2421 			 * pack the returned string array into the format
2422 			 * our callers expect
2423 			 */
2424 			if (i_pack_string_array(prop_val, prop_len,
2425 			    &str_array, &nelem) == 0) {
2426 
2427 				*data = str_array;
2428 				*nelements = nelem;
2429 
2430 				ddi_prop_free(prop_val);
2431 				ddi_release_devi(dip);
2432 				return (res);
2433 			}
2434 
2435 			/*
2436 			 * the format of the returned property must have
2437 			 * been bad so throw it out
2438 			 */
2439 			ddi_prop_free(prop_val);
2440 		}
2441 	}
2442 
2443 	/* call the normal property interfaces */
2444 	res = ddi_prop_lookup_string_array(dev, dip, flags,
2445 	    name, data, nelements);
2446 
2447 	if (dip != NULL)
2448 		ddi_release_devi(dip);
2449 
2450 	return (res);
2451 }
2452 
2453 int
2454 ldi_prop_lookup_string(ldi_handle_t lh,
2455     uint_t flags, char *name, char **data)
2456 {
2457 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2458 	dev_info_t		*dip;
2459 	dev_t			dev;
2460 	int			res;
2461 	struct snode		*csp;
2462 
2463 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2464 		return (DDI_PROP_INVAL_ARG);
2465 
2466 	dev = handlep->lh_vp->v_rdev;
2467 
2468 	csp = VTOCS(handlep->lh_vp);
2469 	mutex_enter(&csp->s_lock);
2470 	if ((dip = csp->s_dip) != NULL)
2471 		e_ddi_hold_devi(dip);
2472 	mutex_exit(&csp->s_lock);
2473 	if (dip == NULL)
2474 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2475 
2476 	if (dip == NULL) {
2477 		flags |= DDI_UNBND_DLPI2;
2478 	} else if (flags & LDI_DEV_T_ANY) {
2479 		flags &= ~LDI_DEV_T_ANY;
2480 		dev = DDI_DEV_T_ANY;
2481 	}
2482 
2483 	if (dip != NULL) {
2484 		char	*prop_val;
2485 		int	prop_len;
2486 
2487 		res = i_ldi_prop_op_typed(dev, dip, flags, name,
2488 		    (caddr_t *)&prop_val, &prop_len, 0);
2489 
2490 		/* if we got it then return it */
2491 		if (res == DDI_PROP_SUCCESS) {
2492 			/*
2493 			 * sanity check the vaule returned.
2494 			 */
2495 			if (i_check_string(prop_val, prop_len)) {
2496 				ddi_prop_free(prop_val);
2497 			} else {
2498 				*data = prop_val;
2499 				ddi_release_devi(dip);
2500 				return (res);
2501 			}
2502 		}
2503 	}
2504 
2505 	/* call the normal property interfaces */
2506 	res = ddi_prop_lookup_string(dev, dip, flags, name, data);
2507 
2508 	if (dip != NULL)
2509 		ddi_release_devi(dip);
2510 
2511 #ifdef DEBUG
2512 	if (res == DDI_PROP_SUCCESS) {
2513 		/*
2514 		 * keep ourselves honest
2515 		 * make sure the framework returns strings in the
2516 		 * same format as we're demanding from drivers.
2517 		 */
2518 		struct prop_driver_data	*pdd;
2519 		int			pdd_prop_size;
2520 
2521 		pdd = ((struct prop_driver_data *)(*data)) - 1;
2522 		pdd_prop_size = pdd->pdd_size -
2523 		    sizeof (struct prop_driver_data);
2524 		ASSERT(i_check_string(*data, pdd_prop_size) == 0);
2525 	}
2526 #endif /* DEBUG */
2527 
2528 	return (res);
2529 }
2530 
2531 int
2532 ldi_prop_lookup_byte_array(ldi_handle_t lh,
2533     uint_t flags, char *name, uchar_t **data, uint_t *nelements)
2534 {
2535 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2536 	dev_info_t		*dip;
2537 	dev_t			dev;
2538 	int			res;
2539 	struct snode		*csp;
2540 
2541 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2542 		return (DDI_PROP_INVAL_ARG);
2543 
2544 	dev = handlep->lh_vp->v_rdev;
2545 
2546 	csp = VTOCS(handlep->lh_vp);
2547 	mutex_enter(&csp->s_lock);
2548 	if ((dip = csp->s_dip) != NULL)
2549 		e_ddi_hold_devi(dip);
2550 	mutex_exit(&csp->s_lock);
2551 	if (dip == NULL)
2552 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2553 
2554 	if (dip == NULL) {
2555 		flags |= DDI_UNBND_DLPI2;
2556 	} else if (flags & LDI_DEV_T_ANY) {
2557 		flags &= ~LDI_DEV_T_ANY;
2558 		dev = DDI_DEV_T_ANY;
2559 	}
2560 
2561 	if (dip != NULL) {
2562 		uchar_t	*prop_val;
2563 		int	prop_len;
2564 
2565 		res = i_ldi_prop_op_typed(dev, dip, flags, name,
2566 		    (caddr_t *)&prop_val, &prop_len, sizeof (uchar_t));
2567 
2568 		/* if we got it then return it */
2569 		if (res == DDI_PROP_SUCCESS) {
2570 			*nelements = prop_len / sizeof (uchar_t);
2571 			*data = prop_val;
2572 
2573 			ddi_release_devi(dip);
2574 			return (res);
2575 		}
2576 	}
2577 
2578 	/* call the normal property interfaces */
2579 	res = ddi_prop_lookup_byte_array(dev, dip, flags,
2580 	    name, data, nelements);
2581 
2582 	if (dip != NULL)
2583 		ddi_release_devi(dip);
2584 
2585 	return (res);
2586 }
2587 
2588 int
2589 ldi_prop_get_int(ldi_handle_t lh,
2590     uint_t flags, char *name, int defvalue)
2591 {
2592 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2593 	dev_info_t		*dip;
2594 	dev_t			dev;
2595 	int			res;
2596 	struct snode		*csp;
2597 
2598 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2599 		return (defvalue);
2600 
2601 	dev = handlep->lh_vp->v_rdev;
2602 
2603 	csp = VTOCS(handlep->lh_vp);
2604 	mutex_enter(&csp->s_lock);
2605 	if ((dip = csp->s_dip) != NULL)
2606 		e_ddi_hold_devi(dip);
2607 	mutex_exit(&csp->s_lock);
2608 	if (dip == NULL)
2609 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2610 
2611 	if (dip == NULL) {
2612 		flags |= DDI_UNBND_DLPI2;
2613 	} else if (flags & LDI_DEV_T_ANY) {
2614 		flags &= ~LDI_DEV_T_ANY;
2615 		dev = DDI_DEV_T_ANY;
2616 	}
2617 
2618 	if (dip != NULL) {
2619 		int	prop_val;
2620 		int	prop_len;
2621 
2622 		/*
2623 		 * first call the drivers prop_op interface to allow it
2624 		 * it to override default property values.
2625 		 */
2626 		prop_len = sizeof (int);
2627 		res = i_ldi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
2628 		    flags | DDI_PROP_DYNAMIC, name,
2629 		    (caddr_t)&prop_val, &prop_len);
2630 
2631 		/* if we got it then return it */
2632 		if ((res == DDI_PROP_SUCCESS) &&
2633 		    (prop_len == sizeof (int))) {
2634 			res = prop_val;
2635 			ddi_release_devi(dip);
2636 			return (res);
2637 		}
2638 	}
2639 
2640 	/* call the normal property interfaces */
2641 	res = ddi_prop_get_int(dev, dip, flags, name, defvalue);
2642 
2643 	if (dip != NULL)
2644 		ddi_release_devi(dip);
2645 
2646 	return (res);
2647 }
2648 
2649 int64_t
2650 ldi_prop_get_int64(ldi_handle_t lh,
2651     uint_t flags, char *name, int64_t defvalue)
2652 {
2653 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2654 	dev_info_t		*dip;
2655 	dev_t			dev;
2656 	int64_t			res;
2657 	struct snode		*csp;
2658 
2659 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2660 		return (defvalue);
2661 
2662 	dev = handlep->lh_vp->v_rdev;
2663 
2664 	csp = VTOCS(handlep->lh_vp);
2665 	mutex_enter(&csp->s_lock);
2666 	if ((dip = csp->s_dip) != NULL)
2667 		e_ddi_hold_devi(dip);
2668 	mutex_exit(&csp->s_lock);
2669 	if (dip == NULL)
2670 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2671 
2672 	if (dip == NULL) {
2673 		flags |= DDI_UNBND_DLPI2;
2674 	} else if (flags & LDI_DEV_T_ANY) {
2675 		flags &= ~LDI_DEV_T_ANY;
2676 		dev = DDI_DEV_T_ANY;
2677 	}
2678 
2679 	if (dip != NULL) {
2680 		int64_t	prop_val;
2681 		int	prop_len;
2682 
2683 		/*
2684 		 * first call the drivers prop_op interface to allow it
2685 		 * it to override default property values.
2686 		 */
2687 		prop_len = sizeof (int64_t);
2688 		res = i_ldi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
2689 		    flags | DDI_PROP_DYNAMIC, name,
2690 		    (caddr_t)&prop_val, &prop_len);
2691 
2692 		/* if we got it then return it */
2693 		if ((res == DDI_PROP_SUCCESS) &&
2694 		    (prop_len == sizeof (int64_t))) {
2695 			res = prop_val;
2696 			ddi_release_devi(dip);
2697 			return (res);
2698 		}
2699 	}
2700 
2701 	/* call the normal property interfaces */
2702 	res = ddi_prop_get_int64(dev, dip, flags, name, defvalue);
2703 
2704 	if (dip != NULL)
2705 		ddi_release_devi(dip);
2706 
2707 	return (res);
2708 }
2709 
2710 int
2711 ldi_prop_exists(ldi_handle_t lh, uint_t flags, char *name)
2712 {
2713 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2714 	dev_info_t		*dip;
2715 	dev_t			dev;
2716 	int			res, prop_len;
2717 	struct snode		*csp;
2718 
2719 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2720 		return (0);
2721 
2722 	dev = handlep->lh_vp->v_rdev;
2723 
2724 	csp = VTOCS(handlep->lh_vp);
2725 	mutex_enter(&csp->s_lock);
2726 	if ((dip = csp->s_dip) != NULL)
2727 		e_ddi_hold_devi(dip);
2728 	mutex_exit(&csp->s_lock);
2729 	if (dip == NULL)
2730 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2731 
2732 	/* if NULL dip, prop does NOT exist */
2733 	if (dip == NULL)
2734 		return (0);
2735 
2736 	if (flags & LDI_DEV_T_ANY) {
2737 		flags &= ~LDI_DEV_T_ANY;
2738 		dev = DDI_DEV_T_ANY;
2739 	}
2740 
2741 	/*
2742 	 * first call the drivers prop_op interface to allow it
2743 	 * it to override default property values.
2744 	 */
2745 	res = i_ldi_prop_op(dev, dip, PROP_LEN,
2746 	    flags | DDI_PROP_DYNAMIC, name, NULL, &prop_len);
2747 
2748 	if (res == DDI_PROP_SUCCESS) {
2749 		ddi_release_devi(dip);
2750 		return (1);
2751 	}
2752 
2753 	/* call the normal property interfaces */
2754 	res = ddi_prop_exists(dev, dip, flags, name);
2755 
2756 	ddi_release_devi(dip);
2757 	return (res);
2758 }
2759 
2760 #ifdef	LDI_OBSOLETE_EVENT
2761 
2762 int
2763 ldi_get_eventcookie(ldi_handle_t lh, char *name, ddi_eventcookie_t *ecp)
2764 {
2765 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2766 	dev_info_t		*dip;
2767 	dev_t			dev;
2768 	int			res;
2769 	struct snode		*csp;
2770 
2771 	if ((lh == NULL) || (name == NULL) ||
2772 	    (strlen(name) == 0) || (ecp == NULL)) {
2773 		return (DDI_FAILURE);
2774 	}
2775 
2776 	ASSERT(!servicing_interrupt());
2777 
2778 	dev = handlep->lh_vp->v_rdev;
2779 
2780 	csp = VTOCS(handlep->lh_vp);
2781 	mutex_enter(&csp->s_lock);
2782 	if ((dip = csp->s_dip) != NULL)
2783 		e_ddi_hold_devi(dip);
2784 	mutex_exit(&csp->s_lock);
2785 	if (dip == NULL)
2786 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2787 
2788 	if (dip == NULL)
2789 		return (DDI_FAILURE);
2790 
2791 	LDI_EVENTCB((CE_NOTE, "%s: event_name=%s, "
2792 	    "dip=0x%p, event_cookiep=0x%p", "ldi_get_eventcookie",
2793 	    name, (void *)dip, (void *)ecp));
2794 
2795 	res = ddi_get_eventcookie(dip, name, ecp);
2796 
2797 	ddi_release_devi(dip);
2798 	return (res);
2799 }
2800 
2801 int
2802 ldi_add_event_handler(ldi_handle_t lh, ddi_eventcookie_t ec,
2803     void (*handler)(ldi_handle_t, ddi_eventcookie_t, void *, void *),
2804     void *arg, ldi_callback_id_t *id)
2805 {
2806 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2807 	struct ldi_event	*lep;
2808 	dev_info_t		*dip;
2809 	dev_t			dev;
2810 	int			res;
2811 	struct snode		*csp;
2812 
2813 	if ((lh == NULL) || (ec == NULL) || (handler == NULL) || (id == NULL))
2814 		return (DDI_FAILURE);
2815 
2816 	ASSERT(!servicing_interrupt());
2817 
2818 	dev = handlep->lh_vp->v_rdev;
2819 
2820 	csp = VTOCS(handlep->lh_vp);
2821 	mutex_enter(&csp->s_lock);
2822 	if ((dip = csp->s_dip) != NULL)
2823 		e_ddi_hold_devi(dip);
2824 	mutex_exit(&csp->s_lock);
2825 	if (dip == NULL)
2826 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2827 
2828 	if (dip == NULL)
2829 		return (DDI_FAILURE);
2830 
2831 	lep = kmem_zalloc(sizeof (struct ldi_event), KM_SLEEP);
2832 	lep->le_lhp = handlep;
2833 	lep->le_arg = arg;
2834 	lep->le_handler = handler;
2835 
2836 	if ((res = ddi_add_event_handler(dip, ec, i_ldi_callback,
2837 	    (void *)lep, &lep->le_id)) != DDI_SUCCESS) {
2838 		LDI_EVENTCB((CE_WARN, "%s: unable to add"
2839 		    "event callback", "ldi_add_event_handler"));
2840 		ddi_release_devi(dip);
2841 		kmem_free(lep, sizeof (struct ldi_event));
2842 		return (res);
2843 	}
2844 
2845 	*id = (ldi_callback_id_t)lep;
2846 
2847 	LDI_EVENTCB((CE_NOTE, "%s: dip=0x%p, event=0x%p, "
2848 	    "ldi_eventp=0x%p, cb_id=0x%p", "ldi_add_event_handler",
2849 	    (void *)dip, (void *)ec, (void *)lep, (void *)id));
2850 
2851 	handle_event_add(lep);
2852 	ddi_release_devi(dip);
2853 	return (res);
2854 }
2855 
2856 int
2857 ldi_remove_event_handler(ldi_handle_t lh, ldi_callback_id_t id)
2858 {
2859 	ldi_event_t		*lep = (ldi_event_t *)id;
2860 	int			res;
2861 
2862 	if ((lh == NULL) || (id == NULL))
2863 		return (DDI_FAILURE);
2864 
2865 	ASSERT(!servicing_interrupt());
2866 
2867 	if ((res = ddi_remove_event_handler(lep->le_id))
2868 	    != DDI_SUCCESS) {
2869 		LDI_EVENTCB((CE_WARN, "%s: unable to remove "
2870 		    "event callback", "ldi_remove_event_handler"));
2871 		return (res);
2872 	}
2873 
2874 	handle_event_remove(lep);
2875 	kmem_free(lep, sizeof (struct ldi_event));
2876 	return (res);
2877 }
2878 
2879 #endif
2880 
2881 /*
2882  * Here are some definitions of terms used in the following LDI events
2883  * code:
2884  *
2885  * "LDI events" AKA "native events": These are events defined by the
2886  * "new" LDI event framework. These events are serviced by the LDI event
2887  * framework itself and thus are native to it.
2888  *
2889  * "LDI contract events": These are contract events that correspond to the
2890  *  LDI events. This mapping of LDI events to contract events is defined by
2891  * the ldi_ev_cookies[] array above.
2892  *
2893  * NDI events: These are events which are serviced by the NDI event subsystem.
2894  * LDI subsystem just provides a thin wrapper around the NDI event interfaces
2895  * These events are therefore *not* native events.
2896  */
2897 
2898 static int
2899 ldi_native_event(const char *evname)
2900 {
2901 	int i;
2902 
2903 	LDI_EVTRC((CE_NOTE, "ldi_native_event: entered: ev=%s", evname));
2904 
2905 	for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
2906 		if (strcmp(ldi_ev_cookies[i].ck_evname, evname) == 0)
2907 			return (1);
2908 	}
2909 
2910 	return (0);
2911 }
2912 
2913 static uint_t
2914 ldi_ev_sync_event(const char *evname)
2915 {
2916 	int i;
2917 
2918 	ASSERT(ldi_native_event(evname));
2919 
2920 	LDI_EVTRC((CE_NOTE, "ldi_ev_sync_event: entered: %s", evname));
2921 
2922 	for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
2923 		if (strcmp(ldi_ev_cookies[i].ck_evname, evname) == 0)
2924 			return (ldi_ev_cookies[i].ck_sync);
2925 	}
2926 
2927 	/*
2928 	 * This should never happen until non-contract based
2929 	 * LDI events are introduced. If that happens, we will
2930 	 * use a "special" token to indicate that there are no
2931 	 * contracts corresponding to this LDI event.
2932 	 */
2933 	cmn_err(CE_PANIC, "Unknown LDI event: %s", evname);
2934 
2935 	return (0);
2936 }
2937 
2938 static uint_t
2939 ldi_contract_event(const char *evname)
2940 {
2941 	int i;
2942 
2943 	ASSERT(ldi_native_event(evname));
2944 
2945 	LDI_EVTRC((CE_NOTE, "ldi_contract_event: entered: %s", evname));
2946 
2947 	for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
2948 		if (strcmp(ldi_ev_cookies[i].ck_evname, evname) == 0)
2949 			return (ldi_ev_cookies[i].ck_ctype);
2950 	}
2951 
2952 	/*
2953 	 * This should never happen until non-contract based
2954 	 * LDI events are introduced. If that happens, we will
2955 	 * use a "special" token to indicate that there are no
2956 	 * contracts corresponding to this LDI event.
2957 	 */
2958 	cmn_err(CE_PANIC, "Unknown LDI event: %s", evname);
2959 
2960 	return (0);
2961 }
2962 
2963 char *
2964 ldi_ev_get_type(ldi_ev_cookie_t cookie)
2965 {
2966 	int i;
2967 	struct ldi_ev_cookie *cookie_impl = (struct ldi_ev_cookie *)cookie;
2968 
2969 	for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
2970 		if (&ldi_ev_cookies[i] == cookie_impl) {
2971 			LDI_EVTRC((CE_NOTE, "ldi_ev_get_type: LDI: %s",
2972 			    ldi_ev_cookies[i].ck_evname));
2973 			return (ldi_ev_cookies[i].ck_evname);
2974 		}
2975 	}
2976 
2977 	/*
2978 	 * Not an LDI native event. Must be NDI event service.
2979 	 * Just return a generic string
2980 	 */
2981 	LDI_EVTRC((CE_NOTE, "ldi_ev_get_type: is NDI"));
2982 	return (NDI_EVENT_SERVICE);
2983 }
2984 
2985 static int
2986 ldi_native_cookie(ldi_ev_cookie_t cookie)
2987 {
2988 	int i;
2989 	struct ldi_ev_cookie *cookie_impl = (struct ldi_ev_cookie *)cookie;
2990 
2991 	for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
2992 		if (&ldi_ev_cookies[i] == cookie_impl) {
2993 			LDI_EVTRC((CE_NOTE, "ldi_native_cookie: native LDI"));
2994 			return (1);
2995 		}
2996 	}
2997 
2998 	LDI_EVTRC((CE_NOTE, "ldi_native_cookie: is NDI"));
2999 	return (0);
3000 }
3001 
3002 static ldi_ev_cookie_t
3003 ldi_get_native_cookie(const char *evname)
3004 {
3005 	int i;
3006 
3007 	for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
3008 		if (strcmp(ldi_ev_cookies[i].ck_evname, evname) == 0) {
3009 			LDI_EVTRC((CE_NOTE, "ldi_get_native_cookie: found"));
3010 			return ((ldi_ev_cookie_t)&ldi_ev_cookies[i]);
3011 		}
3012 	}
3013 
3014 	LDI_EVTRC((CE_NOTE, "ldi_get_native_cookie: NOT found"));
3015 	return (NULL);
3016 }
3017 
3018 /*
3019  * ldi_ev_lock() needs to be recursive, since layered drivers may call
3020  * other LDI interfaces (such as ldi_close() from within the context of
3021  * a notify callback. Since the notify callback is called with the
3022  * ldi_ev_lock() held and ldi_close() also grabs ldi_ev_lock, the lock needs
3023  * to be recursive.
3024  */
3025 static void
3026 ldi_ev_lock(void)
3027 {
3028 	LDI_EVTRC((CE_NOTE, "ldi_ev_lock: entered"));
3029 
3030 	mutex_enter(&ldi_ev_callback_list.le_lock);
3031 	if (ldi_ev_callback_list.le_thread == curthread) {
3032 		ASSERT(ldi_ev_callback_list.le_busy >= 1);
3033 		ldi_ev_callback_list.le_busy++;
3034 	} else {
3035 		while (ldi_ev_callback_list.le_busy)
3036 			cv_wait(&ldi_ev_callback_list.le_cv,
3037 			    &ldi_ev_callback_list.le_lock);
3038 		ASSERT(ldi_ev_callback_list.le_thread == NULL);
3039 		ldi_ev_callback_list.le_busy = 1;
3040 		ldi_ev_callback_list.le_thread = curthread;
3041 	}
3042 	mutex_exit(&ldi_ev_callback_list.le_lock);
3043 
3044 	LDI_EVTRC((CE_NOTE, "ldi_ev_lock: exit"));
3045 }
3046 
3047 static void
3048 ldi_ev_unlock(void)
3049 {
3050 	LDI_EVTRC((CE_NOTE, "ldi_ev_unlock: entered"));
3051 	mutex_enter(&ldi_ev_callback_list.le_lock);
3052 	ASSERT(ldi_ev_callback_list.le_thread == curthread);
3053 	ASSERT(ldi_ev_callback_list.le_busy >= 1);
3054 
3055 	ldi_ev_callback_list.le_busy--;
3056 	if (ldi_ev_callback_list.le_busy == 0) {
3057 		ldi_ev_callback_list.le_thread = NULL;
3058 		cv_signal(&ldi_ev_callback_list.le_cv);
3059 	}
3060 	mutex_exit(&ldi_ev_callback_list.le_lock);
3061 	LDI_EVTRC((CE_NOTE, "ldi_ev_unlock: exit"));
3062 }
3063 
3064 int
3065 ldi_ev_get_cookie(ldi_handle_t lh, char *evname, ldi_ev_cookie_t *cookiep)
3066 {
3067 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
3068 	dev_info_t		*dip;
3069 	dev_t			dev;
3070 	int			res;
3071 	struct snode		*csp;
3072 	ddi_eventcookie_t	ddi_cookie;
3073 	ldi_ev_cookie_t		tcookie;
3074 
3075 	LDI_EVDBG((CE_NOTE, "ldi_ev_get_cookie: entered: evname=%s",
3076 	    evname ? evname : "<NULL>"));
3077 
3078 	if (lh == NULL || evname == NULL ||
3079 	    strlen(evname) == 0 || cookiep == NULL) {
3080 		LDI_EVDBG((CE_NOTE, "ldi_ev_get_cookie: invalid args"));
3081 		return (LDI_EV_FAILURE);
3082 	}
3083 
3084 	*cookiep = NULL;
3085 
3086 	/*
3087 	 * First check if it is a LDI native event
3088 	 */
3089 	tcookie = ldi_get_native_cookie(evname);
3090 	if (tcookie) {
3091 		LDI_EVDBG((CE_NOTE, "ldi_ev_get_cookie: got native cookie"));
3092 		*cookiep = tcookie;
3093 		return (LDI_EV_SUCCESS);
3094 	}
3095 
3096 	/*
3097 	 * Not a LDI native event. Try NDI event services
3098 	 */
3099 
3100 	dev = handlep->lh_vp->v_rdev;
3101 
3102 	csp = VTOCS(handlep->lh_vp);
3103 	mutex_enter(&csp->s_lock);
3104 	if ((dip = csp->s_dip) != NULL)
3105 		e_ddi_hold_devi(dip);
3106 	mutex_exit(&csp->s_lock);
3107 	if (dip == NULL)
3108 		dip = e_ddi_hold_devi_by_dev(dev, 0);
3109 
3110 	if (dip == NULL) {
3111 		cmn_err(CE_WARN, "ldi_ev_get_cookie: No devinfo node for LDI "
3112 		    "handle: %p", (void *)handlep);
3113 		return (LDI_EV_FAILURE);
3114 	}
3115 
3116 	LDI_EVDBG((CE_NOTE, "Calling ddi_get_eventcookie: dip=%p, ev=%s",
3117 	    (void *)dip, evname));
3118 
3119 	res = ddi_get_eventcookie(dip, evname, &ddi_cookie);
3120 
3121 	ddi_release_devi(dip);
3122 
3123 	if (res == DDI_SUCCESS) {
3124 		LDI_EVDBG((CE_NOTE, "ldi_ev_get_cookie: NDI cookie found"));
3125 		*cookiep = (ldi_ev_cookie_t)ddi_cookie;
3126 		return (LDI_EV_SUCCESS);
3127 	} else {
3128 		LDI_EVDBG((CE_WARN, "ldi_ev_get_cookie: NDI cookie: failed"));
3129 		return (LDI_EV_FAILURE);
3130 	}
3131 }
3132 
3133 /*ARGSUSED*/
3134 static void
3135 i_ldi_ev_callback(dev_info_t *dip, ddi_eventcookie_t event_cookie,
3136     void *arg, void *ev_data)
3137 {
3138 	ldi_ev_callback_impl_t *lecp = (ldi_ev_callback_impl_t *)arg;
3139 
3140 	ASSERT(lecp != NULL);
3141 	ASSERT(!ldi_native_cookie(lecp->lec_cookie));
3142 	ASSERT(lecp->lec_lhp);
3143 	ASSERT(lecp->lec_notify == NULL);
3144 	ASSERT(lecp->lec_finalize);
3145 
3146 	LDI_EVDBG((CE_NOTE, "i_ldi_ev_callback: ldh=%p, cookie=%p, arg=%p, "
3147 	    "ev_data=%p", (void *)lecp->lec_lhp, (void *)event_cookie,
3148 	    (void *)lecp->lec_arg, (void *)ev_data));
3149 
3150 	lecp->lec_finalize(lecp->lec_lhp, (ldi_ev_cookie_t)event_cookie,
3151 	    lecp->lec_arg, ev_data);
3152 }
3153 
3154 int
3155 ldi_ev_register_callbacks(ldi_handle_t lh, ldi_ev_cookie_t cookie,
3156     ldi_ev_callback_t *callb, void *arg, ldi_callback_id_t *id)
3157 {
3158 	struct ldi_handle	*lhp = (struct ldi_handle *)lh;
3159 	ldi_ev_callback_impl_t	*lecp;
3160 	dev_t			dev;
3161 	struct snode		*csp;
3162 	dev_info_t		*dip;
3163 	int			ddi_event;
3164 
3165 	ASSERT(!servicing_interrupt());
3166 
3167 	if (lh == NULL || cookie == NULL || callb == NULL || id == NULL) {
3168 		LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks: Invalid args"));
3169 		return (LDI_EV_FAILURE);
3170 	}
3171 
3172 	if (callb->cb_vers != LDI_EV_CB_VERS) {
3173 		LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks: Invalid vers"));
3174 		return (LDI_EV_FAILURE);
3175 	}
3176 
3177 	if (callb->cb_notify == NULL && callb->cb_finalize == NULL) {
3178 		LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks: NULL callb"));
3179 		return (LDI_EV_FAILURE);
3180 	}
3181 
3182 	*id = 0;
3183 
3184 	dev = lhp->lh_vp->v_rdev;
3185 	csp = VTOCS(lhp->lh_vp);
3186 	mutex_enter(&csp->s_lock);
3187 	if ((dip = csp->s_dip) != NULL)
3188 		e_ddi_hold_devi(dip);
3189 	mutex_exit(&csp->s_lock);
3190 	if (dip == NULL)
3191 		dip = e_ddi_hold_devi_by_dev(dev, 0);
3192 
3193 	if (dip == NULL) {
3194 		cmn_err(CE_WARN, "ldi_ev_register: No devinfo node for "
3195 		    "LDI handle: %p", (void *)lhp);
3196 		return (LDI_EV_FAILURE);
3197 	}
3198 
3199 	lecp = kmem_zalloc(sizeof (ldi_ev_callback_impl_t), KM_SLEEP);
3200 
3201 	ddi_event = 0;
3202 	if (!ldi_native_cookie(cookie)) {
3203 		if (callb->cb_notify || callb->cb_finalize == NULL) {
3204 			/*
3205 			 * NDI event services only accept finalize
3206 			 */
3207 			cmn_err(CE_WARN, "%s: module: %s: NDI event cookie. "
3208 			    "Only finalize"
3209 			    " callback supported with this cookie",
3210 			    "ldi_ev_register_callbacks",
3211 			    lhp->lh_ident->li_modname);
3212 			kmem_free(lecp, sizeof (ldi_ev_callback_impl_t));
3213 			ddi_release_devi(dip);
3214 			return (LDI_EV_FAILURE);
3215 		}
3216 
3217 		if (ddi_add_event_handler(dip, (ddi_eventcookie_t)cookie,
3218 		    i_ldi_ev_callback, (void *)lecp,
3219 		    (ddi_callback_id_t *)&lecp->lec_id)
3220 		    != DDI_SUCCESS) {
3221 			kmem_free(lecp, sizeof (ldi_ev_callback_impl_t));
3222 			ddi_release_devi(dip);
3223 			LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks(): "
3224 			    "ddi_add_event_handler failed"));
3225 			return (LDI_EV_FAILURE);
3226 		}
3227 		ddi_event = 1;
3228 		LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks(): "
3229 		    "ddi_add_event_handler success"));
3230 	}
3231 
3232 
3233 
3234 	ldi_ev_lock();
3235 
3236 	/*
3237 	 * Add the notify/finalize callback to the LDI's list of callbacks.
3238 	 */
3239 	lecp->lec_lhp = lhp;
3240 	lecp->lec_dev = lhp->lh_vp->v_rdev;
3241 	lecp->lec_spec = VTYP_TO_STYP(lhp->lh_vp->v_type);
3242 	lecp->lec_notify = callb->cb_notify;
3243 	lecp->lec_finalize = callb->cb_finalize;
3244 	lecp->lec_arg = arg;
3245 	lecp->lec_cookie = cookie;
3246 	if (!ddi_event)
3247 		lecp->lec_id = (void *)(uintptr_t)(++ldi_ev_id_pool);
3248 	else
3249 		ASSERT(lecp->lec_id);
3250 	lecp->lec_dip = dip;
3251 	list_insert_tail(&ldi_ev_callback_list.le_head, lecp);
3252 
3253 	*id = (ldi_callback_id_t)lecp->lec_id;
3254 
3255 	ldi_ev_unlock();
3256 
3257 	ddi_release_devi(dip);
3258 
3259 	LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks: registered "
3260 	    "notify/finalize"));
3261 
3262 	return (LDI_EV_SUCCESS);
3263 }
3264 
3265 static int
3266 ldi_ev_device_match(ldi_ev_callback_impl_t *lecp, dev_info_t *dip,
3267     dev_t dev, int spec_type)
3268 {
3269 	ASSERT(lecp);
3270 	ASSERT(dip);
3271 	ASSERT(dev != DDI_DEV_T_NONE);
3272 	ASSERT(dev != NODEV);
3273 	ASSERT((dev == DDI_DEV_T_ANY && spec_type == 0) ||
3274 	    (spec_type == S_IFCHR || spec_type == S_IFBLK));
3275 	ASSERT(lecp->lec_dip);
3276 	ASSERT(lecp->lec_spec == S_IFCHR || lecp->lec_spec == S_IFBLK);
3277 	ASSERT(lecp->lec_dev != DDI_DEV_T_ANY);
3278 	ASSERT(lecp->lec_dev != DDI_DEV_T_NONE);
3279 	ASSERT(lecp->lec_dev != NODEV);
3280 
3281 	if (dip != lecp->lec_dip)
3282 		return (0);
3283 
3284 	if (dev != DDI_DEV_T_ANY) {
3285 		if (dev != lecp->lec_dev || spec_type != lecp->lec_spec)
3286 			return (0);
3287 	}
3288 
3289 	LDI_EVTRC((CE_NOTE, "ldi_ev_device_match: MATCH dip=%p", (void *)dip));
3290 
3291 	return (1);
3292 }
3293 
3294 /*
3295  * LDI framework function to post a "notify" event to all layered drivers
3296  * that have registered for that event
3297  *
3298  * Returns:
3299  *		LDI_EV_SUCCESS - registered callbacks allow event
3300  *		LDI_EV_FAILURE - registered callbacks block event
3301  *		LDI_EV_NONE    - No matching LDI callbacks
3302  *
3303  * This function is *not* to be called by layered drivers. It is for I/O
3304  * framework code in Solaris, such as the I/O retire code and DR code
3305  * to call while servicing a device event such as offline or degraded.
3306  */
3307 int
3308 ldi_invoke_notify(dev_info_t *dip, dev_t dev, int spec_type, char *event,
3309     void *ev_data)
3310 {
3311 	ldi_ev_callback_impl_t *lecp;
3312 	list_t	*listp;
3313 	int	ret;
3314 	char	*lec_event;
3315 
3316 	ASSERT(dip);
3317 	ASSERT(dev != DDI_DEV_T_NONE);
3318 	ASSERT(dev != NODEV);
3319 	ASSERT((dev == DDI_DEV_T_ANY && spec_type == 0) ||
3320 	    (spec_type == S_IFCHR || spec_type == S_IFBLK));
3321 	ASSERT(event);
3322 	ASSERT(ldi_native_event(event));
3323 	ASSERT(ldi_ev_sync_event(event));
3324 
3325 	LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): entered: dip=%p, ev=%s",
3326 	    (void *)dip, event));
3327 
3328 	ret = LDI_EV_NONE;
3329 	ldi_ev_lock();
3330 	listp = &ldi_ev_callback_list.le_head;
3331 	for (lecp = list_head(listp); lecp; lecp = list_next(listp, lecp)) {
3332 
3333 		/* Check if matching device */
3334 		if (!ldi_ev_device_match(lecp, dip, dev, spec_type))
3335 			continue;
3336 
3337 		if (lecp->lec_lhp == NULL) {
3338 			/*
3339 			 * Consumer has unregistered the handle and so
3340 			 * is no longer interested in notify events.
3341 			 */
3342 			LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): No LDI "
3343 			    "handle, skipping"));
3344 			continue;
3345 		}
3346 
3347 		if (lecp->lec_notify == NULL) {
3348 			LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): No notify "
3349 			    "callback. skipping"));
3350 			continue;	/* not interested in notify */
3351 		}
3352 
3353 		/*
3354 		 * Check if matching event
3355 		 */
3356 		lec_event = ldi_ev_get_type(lecp->lec_cookie);
3357 		if (strcmp(event, lec_event) != 0) {
3358 			LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): Not matching"
3359 			    " event {%s,%s}. skipping", event, lec_event));
3360 			continue;
3361 		}
3362 
3363 		lecp->lec_lhp->lh_flags |= LH_FLAGS_NOTIFY;
3364 		if (lecp->lec_notify(lecp->lec_lhp, lecp->lec_cookie,
3365 		    lecp->lec_arg, ev_data) != LDI_EV_SUCCESS) {
3366 			ret = LDI_EV_FAILURE;
3367 			LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): notify"
3368 			    " FAILURE"));
3369 			break;
3370 		}
3371 
3372 		/* We have a matching callback that allows the event to occur */
3373 		ret = LDI_EV_SUCCESS;
3374 
3375 		LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): 1 consumer success"));
3376 	}
3377 
3378 	if (ret != LDI_EV_FAILURE)
3379 		goto out;
3380 
3381 	LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): undoing notify"));
3382 
3383 	/*
3384 	 * Undo notifies already sent
3385 	 */
3386 	lecp = list_prev(listp, lecp);
3387 	for (; lecp; lecp = list_prev(listp, lecp)) {
3388 
3389 		/*
3390 		 * Check if matching device
3391 		 */
3392 		if (!ldi_ev_device_match(lecp, dip, dev, spec_type))
3393 			continue;
3394 
3395 
3396 		if (lecp->lec_finalize == NULL) {
3397 			LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): no finalize, "
3398 			    "skipping"));
3399 			continue;	/* not interested in finalize */
3400 		}
3401 
3402 		/*
3403 		 * it is possible that in response to a notify event a
3404 		 * layered driver closed its LDI handle so it is ok
3405 		 * to have a NULL LDI handle for finalize. The layered
3406 		 * driver is expected to maintain state in its "arg"
3407 		 * parameter to keep track of the closed device.
3408 		 */
3409 
3410 		/* Check if matching event */
3411 		lec_event = ldi_ev_get_type(lecp->lec_cookie);
3412 		if (strcmp(event, lec_event) != 0) {
3413 			LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): not matching "
3414 			    "event: %s,%s, skipping", event, lec_event));
3415 			continue;
3416 		}
3417 
3418 		LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): calling finalize"));
3419 
3420 		lecp->lec_finalize(lecp->lec_lhp, lecp->lec_cookie,
3421 		    LDI_EV_FAILURE, lecp->lec_arg, ev_data);
3422 
3423 		/*
3424 		 * If LDI native event and LDI handle closed in context
3425 		 * of notify, NULL out the finalize callback as we have
3426 		 * already called the 1 finalize above allowed in this situation
3427 		 */
3428 		if (lecp->lec_lhp == NULL &&
3429 		    ldi_native_cookie(lecp->lec_cookie)) {
3430 			LDI_EVDBG((CE_NOTE,
3431 			    "ldi_invoke_notify(): NULL-ing finalize after "
3432 			    "calling 1 finalize following ldi_close"));
3433 			lecp->lec_finalize = NULL;
3434 		}
3435 	}
3436 
3437 out:
3438 	ldi_ev_unlock();
3439 
3440 	if (ret == LDI_EV_NONE) {
3441 		LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): no matching "
3442 		    "LDI callbacks"));
3443 	}
3444 
3445 	return (ret);
3446 }
3447 
3448 /*
3449  * Framework function to be called from a layered driver to propagate
3450  * LDI "notify" events to exported minors.
3451  *
3452  * This function is a public interface exported by the LDI framework
3453  * for use by layered drivers to propagate device events up the software
3454  * stack.
3455  */
3456 int
3457 ldi_ev_notify(dev_info_t *dip, minor_t minor, int spec_type,
3458     ldi_ev_cookie_t cookie, void *ev_data)
3459 {
3460 	char		*evname = ldi_ev_get_type(cookie);
3461 	uint_t		ct_evtype;
3462 	dev_t		dev;
3463 	major_t		major;
3464 	int		retc;
3465 	int		retl;
3466 
3467 	ASSERT(spec_type == S_IFBLK || spec_type == S_IFCHR);
3468 	ASSERT(dip);
3469 	ASSERT(ldi_native_cookie(cookie));
3470 
3471 	LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): entered: event=%s, dip=%p",
3472 	    evname, (void *)dip));
3473 
3474 	if (!ldi_ev_sync_event(evname)) {
3475 		cmn_err(CE_PANIC, "ldi_ev_notify(): %s not a "
3476 		    "negotiatable event", evname);
3477 		return (LDI_EV_SUCCESS);
3478 	}
3479 
3480 	major = ddi_driver_major(dip);
3481 	if (major == DDI_MAJOR_T_NONE) {
3482 		char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3483 		(void) ddi_pathname(dip, path);
3484 		cmn_err(CE_WARN, "ldi_ev_notify: cannot derive major number "
3485 		    "for device %s", path);
3486 		kmem_free(path, MAXPATHLEN);
3487 		return (LDI_EV_FAILURE);
3488 	}
3489 	dev = makedevice(major, minor);
3490 
3491 	/*
3492 	 * Generate negotiation contract events on contracts (if any) associated
3493 	 * with this minor.
3494 	 */
3495 	LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): calling contract nego."));
3496 	ct_evtype = ldi_contract_event(evname);
3497 	retc = contract_device_negotiate(dip, dev, spec_type, ct_evtype);
3498 	if (retc == CT_NACK) {
3499 		LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): contract neg. NACK"));
3500 		return (LDI_EV_FAILURE);
3501 	}
3502 
3503 	LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): LDI invoke notify"));
3504 	retl = ldi_invoke_notify(dip, dev, spec_type, evname, ev_data);
3505 	if (retl == LDI_EV_FAILURE) {
3506 		LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): ldi_invoke_notify "
3507 		    "returned FAILURE. Calling contract negend"));
3508 		contract_device_negend(dip, dev, spec_type, CT_EV_FAILURE);
3509 		return (LDI_EV_FAILURE);
3510 	}
3511 
3512 	/*
3513 	 * The very fact that we are here indicates that there is a
3514 	 * LDI callback (and hence a constraint) for the retire of the
3515 	 * HW device. So we just return success even if there are no
3516 	 * contracts or LDI callbacks against the minors layered on top
3517 	 * of the HW minors
3518 	 */
3519 	LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): returning SUCCESS"));
3520 	return (LDI_EV_SUCCESS);
3521 }
3522 
3523 /*
3524  * LDI framework function to invoke "finalize" callbacks for all layered
3525  * drivers that have registered callbacks for that event.
3526  *
3527  * This function is *not* to be called by layered drivers. It is for I/O
3528  * framework code in Solaris, such as the I/O retire code and DR code
3529  * to call while servicing a device event such as offline or degraded.
3530  */
3531 void
3532 ldi_invoke_finalize(dev_info_t *dip, dev_t dev, int spec_type, char *event,
3533     int ldi_result, void *ev_data)
3534 {
3535 	ldi_ev_callback_impl_t *lecp;
3536 	list_t	*listp;
3537 	char	*lec_event;
3538 	int	found = 0;
3539 
3540 	ASSERT(dip);
3541 	ASSERT(dev != DDI_DEV_T_NONE);
3542 	ASSERT(dev != NODEV);
3543 	ASSERT((dev == DDI_DEV_T_ANY && spec_type == 0) ||
3544 	    (spec_type == S_IFCHR || spec_type == S_IFBLK));
3545 	ASSERT(event);
3546 	ASSERT(ldi_native_event(event));
3547 	ASSERT(ldi_result == LDI_EV_SUCCESS || ldi_result == LDI_EV_FAILURE);
3548 
3549 	LDI_EVDBG((CE_NOTE, "ldi_invoke_finalize(): entered: dip=%p, result=%d"
3550 	    " event=%s", (void *)dip, ldi_result, event));
3551 
3552 	ldi_ev_lock();
3553 	listp = &ldi_ev_callback_list.le_head;
3554 	for (lecp = list_head(listp); lecp; lecp = list_next(listp, lecp)) {
3555 
3556 		if (lecp->lec_finalize == NULL) {
3557 			LDI_EVDBG((CE_NOTE, "ldi_invoke_finalize(): No "
3558 			    "finalize. Skipping"));
3559 			continue;	/* Not interested in finalize */
3560 		}
3561 
3562 		/*
3563 		 * Check if matching device
3564 		 */
3565 		if (!ldi_ev_device_match(lecp, dip, dev, spec_type))
3566 			continue;
3567 
3568 		/*
3569 		 * It is valid for the LDI handle to be NULL during finalize.
3570 		 * The layered driver may have done an LDI close in the notify
3571 		 * callback.
3572 		 */
3573 
3574 		/*
3575 		 * Check if matching event
3576 		 */
3577 		lec_event = ldi_ev_get_type(lecp->lec_cookie);
3578 		if (strcmp(event, lec_event) != 0) {
3579 			LDI_EVDBG((CE_NOTE, "ldi_invoke_finalize(): Not "
3580 			    "matching event {%s,%s}. Skipping",
3581 			    event, lec_event));
3582 			continue;
3583 		}
3584 
3585 		LDI_EVDBG((CE_NOTE, "ldi_invoke_finalize(): calling finalize"));
3586 
3587 		found = 1;
3588 
3589 		lecp->lec_finalize(lecp->lec_lhp, lecp->lec_cookie,
3590 		    ldi_result, lecp->lec_arg, ev_data);
3591 
3592 		/*
3593 		 * If LDI native event and LDI handle closed in context
3594 		 * of notify, NULL out the finalize callback as we have
3595 		 * already called the 1 finalize above allowed in this situation
3596 		 */
3597 		if (lecp->lec_lhp == NULL &&
3598 		    ldi_native_cookie(lecp->lec_cookie)) {
3599 			LDI_EVDBG((CE_NOTE,
3600 			    "ldi_invoke_finalize(): NULLing finalize after "
3601 			    "calling 1 finalize following ldi_close"));
3602 			lecp->lec_finalize = NULL;
3603 		}
3604 	}
3605 	ldi_ev_unlock();
3606 
3607 	if (found)
3608 		return;
3609 
3610 	LDI_EVDBG((CE_NOTE, "ldi_invoke_finalize(): no matching callbacks"));
3611 }
3612 
3613 /*
3614  * Framework function to be called from a layered driver to propagate
3615  * LDI "finalize" events to exported minors.
3616  *
3617  * This function is a public interface exported by the LDI framework
3618  * for use by layered drivers to propagate device events up the software
3619  * stack.
3620  */
3621 void
3622 ldi_ev_finalize(dev_info_t *dip, minor_t minor, int spec_type, int ldi_result,
3623     ldi_ev_cookie_t cookie, void *ev_data)
3624 {
3625 	dev_t dev;
3626 	major_t major;
3627 	char *evname;
3628 	int ct_result = (ldi_result == LDI_EV_SUCCESS) ?
3629 	    CT_EV_SUCCESS : CT_EV_FAILURE;
3630 	uint_t ct_evtype;
3631 
3632 	ASSERT(dip);
3633 	ASSERT(spec_type == S_IFBLK || spec_type == S_IFCHR);
3634 	ASSERT(ldi_result == LDI_EV_SUCCESS || ldi_result == LDI_EV_FAILURE);
3635 	ASSERT(ldi_native_cookie(cookie));
3636 
3637 	LDI_EVDBG((CE_NOTE, "ldi_ev_finalize: entered: dip=%p", (void *)dip));
3638 
3639 	major = ddi_driver_major(dip);
3640 	if (major == DDI_MAJOR_T_NONE) {
3641 		char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3642 		(void) ddi_pathname(dip, path);
3643 		cmn_err(CE_WARN, "ldi_ev_finalize: cannot derive major number "
3644 		    "for device %s", path);
3645 		kmem_free(path, MAXPATHLEN);
3646 		return;
3647 	}
3648 	dev = makedevice(major, minor);
3649 
3650 	evname = ldi_ev_get_type(cookie);
3651 
3652 	LDI_EVDBG((CE_NOTE, "ldi_ev_finalize: calling contracts"));
3653 	ct_evtype = ldi_contract_event(evname);
3654 	contract_device_finalize(dip, dev, spec_type, ct_evtype, ct_result);
3655 
3656 	LDI_EVDBG((CE_NOTE, "ldi_ev_finalize: calling ldi_invoke_finalize"));
3657 	ldi_invoke_finalize(dip, dev, spec_type, evname, ldi_result, ev_data);
3658 }
3659 
3660 int
3661 ldi_ev_remove_callbacks(ldi_callback_id_t id)
3662 {
3663 	ldi_ev_callback_impl_t	*lecp;
3664 	ldi_ev_callback_impl_t	*next;
3665 	ldi_ev_callback_impl_t	*found;
3666 	list_t			*listp;
3667 
3668 	ASSERT(!servicing_interrupt());
3669 
3670 	if (id == 0) {
3671 		cmn_err(CE_WARN, "ldi_ev_remove_callbacks: Invalid ID 0");
3672 		return (LDI_EV_FAILURE);
3673 	}
3674 
3675 	LDI_EVDBG((CE_NOTE, "ldi_ev_remove_callbacks: entered: id=%p",
3676 	    (void *)id));
3677 
3678 	ldi_ev_lock();
3679 
3680 	listp = &ldi_ev_callback_list.le_head;
3681 	next = found = NULL;
3682 	for (lecp = list_head(listp); lecp; lecp = next) {
3683 		next = list_next(listp, lecp);
3684 		if (lecp->lec_id == id) {
3685 			ASSERT(found == NULL);
3686 			list_remove(listp, lecp);
3687 			found = lecp;
3688 		}
3689 	}
3690 	ldi_ev_unlock();
3691 
3692 	if (found == NULL) {
3693 		cmn_err(CE_WARN, "No LDI event handler for id (%p)",
3694 		    (void *)id);
3695 		return (LDI_EV_SUCCESS);
3696 	}
3697 
3698 	if (!ldi_native_cookie(found->lec_cookie)) {
3699 		ASSERT(found->lec_notify == NULL);
3700 		if (ddi_remove_event_handler((ddi_callback_id_t)id)
3701 		    != DDI_SUCCESS) {
3702 			cmn_err(CE_WARN, "failed to remove NDI event handler "
3703 			    "for id (%p)", (void *)id);
3704 			ldi_ev_lock();
3705 			list_insert_tail(listp, found);
3706 			ldi_ev_unlock();
3707 			return (LDI_EV_FAILURE);
3708 		}
3709 		LDI_EVDBG((CE_NOTE, "ldi_ev_remove_callbacks: NDI event "
3710 		    "service removal succeeded"));
3711 	} else {
3712 		LDI_EVDBG((CE_NOTE, "ldi_ev_remove_callbacks: removed "
3713 		    "LDI native callbacks"));
3714 	}
3715 	kmem_free(found, sizeof (ldi_ev_callback_impl_t));
3716 
3717 	return (LDI_EV_SUCCESS);
3718 }
3719