xref: /titanic_51/usr/src/uts/common/os/driver_lyr.c (revision 1d7f3fadeebf3754e3f042d91e7a4439755dc598)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * Layered driver support.
30  */
31 
32 #include <sys/atomic.h>
33 #include <sys/types.h>
34 #include <sys/t_lock.h>
35 #include <sys/param.h>
36 #include <sys/conf.h>
37 #include <sys/systm.h>
38 #include <sys/sysmacros.h>
39 #include <sys/buf.h>
40 #include <sys/cred.h>
41 #include <sys/uio.h>
42 #include <sys/vnode.h>
43 #include <sys/fs/snode.h>
44 #include <sys/open.h>
45 #include <sys/kmem.h>
46 #include <sys/file.h>
47 #include <sys/bootconf.h>
48 #include <sys/pathname.h>
49 #include <sys/bitmap.h>
50 #include <sys/stat.h>
51 #include <sys/dditypes.h>
52 #include <sys/ddi_impldefs.h>
53 #include <sys/ddi.h>
54 #include <sys/sunddi.h>
55 #include <sys/sunndi.h>
56 #include <sys/esunddi.h>
57 #include <sys/autoconf.h>
58 #include <sys/sunldi.h>
59 #include <sys/sunldi_impl.h>
60 #include <sys/errno.h>
61 #include <sys/debug.h>
62 #include <sys/modctl.h>
63 #include <sys/var.h>
64 #include <vm/seg_vn.h>
65 
66 #include <sys/stropts.h>
67 #include <sys/strsubr.h>
68 #include <sys/socket.h>
69 #include <sys/socketvar.h>
70 #include <sys/kstr.h>
71 
72 /*
73  * Device contract related
74  */
75 #include <sys/contract_impl.h>
76 #include <sys/contract/device_impl.h>
77 
78 /*
79  * Define macros to manipulate snode, vnode, and open device flags
80  */
81 #define	VTYP_VALID(i)	(((i) == VCHR) || ((i) == VBLK))
82 #define	VTYP_TO_OTYP(i)	(((i) == VCHR) ? OTYP_CHR : OTYP_BLK)
83 #define	VTYP_TO_STYP(i)	(((i) == VCHR) ? S_IFCHR : S_IFBLK)
84 
85 #define	OTYP_VALID(i)	(((i) == OTYP_CHR) || ((i) == OTYP_BLK))
86 #define	OTYP_TO_VTYP(i)	(((i) == OTYP_CHR) ? VCHR : VBLK)
87 #define	OTYP_TO_STYP(i)	(((i) == OTYP_CHR) ? S_IFCHR : S_IFBLK)
88 
89 #define	STYP_VALID(i)	(((i) == S_IFCHR) || ((i) == S_IFBLK))
90 #define	STYP_TO_VTYP(i)	(((i) == S_IFCHR) ? VCHR : VBLK)
91 
92 /*
93  * Define macros for accessing layered driver hash structures
94  */
95 #define	LH_HASH(vp)		(handle_hash_func(vp) % LH_HASH_SZ)
96 #define	LI_HASH(mid, dip, dev)	(ident_hash_func(mid, dip, dev) % LI_HASH_SZ)
97 
98 /*
99  * Define layered handle flags used in the lh_type field
100  */
101 #define	LH_STREAM	(0x1)	/* handle to a streams device */
102 #define	LH_CBDEV	(0x2)	/* handle to a char/block device */
103 
104 /*
105  * Define macro for devid property lookups
106  */
107 #define	DEVID_PROP_FLAGS	(DDI_PROP_DONTPASS | \
108 				DDI_PROP_TYPE_STRING|DDI_PROP_CANSLEEP)
109 
110 /*
111  * Dummy string for NDI events
112  */
113 #define	NDI_EVENT_SERVICE	"NDI_EVENT_SERVICE"
114 
115 static void ldi_ev_lock(void);
116 static void ldi_ev_unlock(void);
117 
118 #ifdef	LDI_OBSOLETE_EVENT
119 int ldi_remove_event_handler(ldi_handle_t lh, ldi_callback_id_t id);
120 #endif
121 
122 
123 /*
124  * globals
125  */
126 static kmutex_t			ldi_ident_hash_lock[LI_HASH_SZ];
127 static struct ldi_ident		*ldi_ident_hash[LI_HASH_SZ];
128 
129 static kmutex_t			ldi_handle_hash_lock[LH_HASH_SZ];
130 static struct ldi_handle	*ldi_handle_hash[LH_HASH_SZ];
131 static size_t			ldi_handle_hash_count;
132 
133 static struct ldi_ev_callback_list ldi_ev_callback_list;
134 
135 static uint32_t ldi_ev_id_pool = 0;
136 
137 struct ldi_ev_cookie {
138 	char *ck_evname;
139 	uint_t ck_sync;
140 	uint_t ck_ctype;
141 };
142 
143 static struct ldi_ev_cookie ldi_ev_cookies[] = {
144 	{ LDI_EV_OFFLINE, 1, CT_DEV_EV_OFFLINE},
145 	{ LDI_EV_DEGRADE, 0, CT_DEV_EV_DEGRADED},
146 	{ NULL}			/* must terminate list */
147 };
148 
149 void
150 ldi_init(void)
151 {
152 	int i;
153 
154 	ldi_handle_hash_count = 0;
155 	for (i = 0; i < LH_HASH_SZ; i++) {
156 		mutex_init(&ldi_handle_hash_lock[i], NULL, MUTEX_DEFAULT, NULL);
157 		ldi_handle_hash[i] = NULL;
158 	}
159 	for (i = 0; i < LI_HASH_SZ; i++) {
160 		mutex_init(&ldi_ident_hash_lock[i], NULL, MUTEX_DEFAULT, NULL);
161 		ldi_ident_hash[i] = NULL;
162 	}
163 
164 	/*
165 	 * Initialize the LDI event subsystem
166 	 */
167 	mutex_init(&ldi_ev_callback_list.le_lock, NULL, MUTEX_DEFAULT, NULL);
168 	cv_init(&ldi_ev_callback_list.le_cv, NULL, CV_DEFAULT, NULL);
169 	ldi_ev_callback_list.le_busy = 0;
170 	ldi_ev_callback_list.le_thread = NULL;
171 	list_create(&ldi_ev_callback_list.le_head,
172 	    sizeof (ldi_ev_callback_impl_t),
173 	    offsetof(ldi_ev_callback_impl_t, lec_list));
174 }
175 
176 /*
177  * LDI ident manipulation functions
178  */
179 static uint_t
180 ident_hash_func(modid_t modid, dev_info_t *dip, dev_t dev)
181 {
182 	if (dip != NULL) {
183 		uintptr_t k = (uintptr_t)dip;
184 		k >>= (int)highbit(sizeof (struct dev_info));
185 		return ((uint_t)k);
186 	} else if (dev != DDI_DEV_T_NONE) {
187 		return (modid + getminor(dev) + getmajor(dev));
188 	} else {
189 		return (modid);
190 	}
191 }
192 
193 static struct ldi_ident **
194 ident_find_ref_nolock(modid_t modid, dev_info_t *dip, dev_t dev, major_t major)
195 {
196 	struct ldi_ident	**lipp = NULL;
197 	uint_t			index = LI_HASH(modid, dip, dev);
198 
199 	ASSERT(MUTEX_HELD(&ldi_ident_hash_lock[index]));
200 
201 	for (lipp = &(ldi_ident_hash[index]);
202 	    (*lipp != NULL);
203 	    lipp = &((*lipp)->li_next)) {
204 		if (((*lipp)->li_modid == modid) &&
205 		    ((*lipp)->li_major == major) &&
206 		    ((*lipp)->li_dip == dip) &&
207 		    ((*lipp)->li_dev == dev))
208 			break;
209 	}
210 
211 	ASSERT(lipp != NULL);
212 	return (lipp);
213 }
214 
215 static struct ldi_ident *
216 ident_alloc(char *mod_name, dev_info_t *dip, dev_t dev, major_t major)
217 {
218 	struct ldi_ident	*lip, **lipp;
219 	modid_t			modid;
220 	uint_t			index;
221 
222 	ASSERT(mod_name != NULL);
223 
224 	/* get the module id */
225 	modid = mod_name_to_modid(mod_name);
226 	ASSERT(modid != -1);
227 
228 	/* allocate a new ident in case we need it */
229 	lip = kmem_zalloc(sizeof (*lip), KM_SLEEP);
230 
231 	/* search the hash for a matching ident */
232 	index = LI_HASH(modid, dip, dev);
233 	mutex_enter(&ldi_ident_hash_lock[index]);
234 	lipp = ident_find_ref_nolock(modid, dip, dev, major);
235 
236 	if (*lipp != NULL) {
237 		/* we found an indent in the hash */
238 		ASSERT(strcmp((*lipp)->li_modname, mod_name) == 0);
239 		(*lipp)->li_ref++;
240 		mutex_exit(&ldi_ident_hash_lock[index]);
241 		kmem_free(lip, sizeof (struct ldi_ident));
242 		return (*lipp);
243 	}
244 
245 	/* initialize the new ident */
246 	lip->li_next = NULL;
247 	lip->li_ref = 1;
248 	lip->li_modid = modid;
249 	lip->li_major = major;
250 	lip->li_dip = dip;
251 	lip->li_dev = dev;
252 	(void) strncpy(lip->li_modname, mod_name, sizeof (lip->li_modname) - 1);
253 
254 	/* add it to the ident hash */
255 	lip->li_next = ldi_ident_hash[index];
256 	ldi_ident_hash[index] = lip;
257 
258 	mutex_exit(&ldi_ident_hash_lock[index]);
259 	return (lip);
260 }
261 
262 static void
263 ident_hold(struct ldi_ident *lip)
264 {
265 	uint_t			index;
266 
267 	ASSERT(lip != NULL);
268 	index = LI_HASH(lip->li_modid, lip->li_dip, lip->li_dev);
269 	mutex_enter(&ldi_ident_hash_lock[index]);
270 	ASSERT(lip->li_ref > 0);
271 	lip->li_ref++;
272 	mutex_exit(&ldi_ident_hash_lock[index]);
273 }
274 
275 static void
276 ident_release(struct ldi_ident *lip)
277 {
278 	struct ldi_ident	**lipp;
279 	uint_t			index;
280 
281 	ASSERT(lip != NULL);
282 	index = LI_HASH(lip->li_modid, lip->li_dip, lip->li_dev);
283 	mutex_enter(&ldi_ident_hash_lock[index]);
284 
285 	ASSERT(lip->li_ref > 0);
286 	if (--lip->li_ref > 0) {
287 		/* there are more references to this ident */
288 		mutex_exit(&ldi_ident_hash_lock[index]);
289 		return;
290 	}
291 
292 	/* this was the last reference/open for this ident.  free it. */
293 	lipp = ident_find_ref_nolock(
294 	    lip->li_modid, lip->li_dip, lip->li_dev, lip->li_major);
295 
296 	ASSERT((lipp != NULL) && (*lipp != NULL));
297 	*lipp = lip->li_next;
298 	mutex_exit(&ldi_ident_hash_lock[index]);
299 	kmem_free(lip, sizeof (struct ldi_ident));
300 }
301 
302 /*
303  * LDI handle manipulation functions
304  */
305 static uint_t
306 handle_hash_func(void *vp)
307 {
308 	uintptr_t k = (uintptr_t)vp;
309 	k >>= (int)highbit(sizeof (vnode_t));
310 	return ((uint_t)k);
311 }
312 
313 static struct ldi_handle **
314 handle_find_ref_nolock(vnode_t *vp, struct ldi_ident *ident)
315 {
316 	struct ldi_handle	**lhpp = NULL;
317 	uint_t			index = LH_HASH(vp);
318 
319 	ASSERT(MUTEX_HELD(&ldi_handle_hash_lock[index]));
320 
321 	for (lhpp = &(ldi_handle_hash[index]);
322 	    (*lhpp != NULL);
323 	    lhpp = &((*lhpp)->lh_next)) {
324 		if (((*lhpp)->lh_ident == ident) &&
325 		    ((*lhpp)->lh_vp == vp))
326 			break;
327 	}
328 
329 	ASSERT(lhpp != NULL);
330 	return (lhpp);
331 }
332 
333 static struct ldi_handle *
334 handle_find(vnode_t *vp, struct ldi_ident *ident)
335 {
336 	struct ldi_handle	**lhpp;
337 	int			index = LH_HASH(vp);
338 
339 	mutex_enter(&ldi_handle_hash_lock[index]);
340 	lhpp = handle_find_ref_nolock(vp, ident);
341 	mutex_exit(&ldi_handle_hash_lock[index]);
342 	ASSERT(lhpp != NULL);
343 	return (*lhpp);
344 }
345 
346 static struct ldi_handle *
347 handle_alloc(vnode_t *vp, struct ldi_ident *ident)
348 {
349 	struct ldi_handle	*lhp, **lhpp;
350 	uint_t			index;
351 
352 	ASSERT((vp != NULL) && (ident != NULL));
353 
354 	/* allocate a new handle in case we need it */
355 	lhp = kmem_zalloc(sizeof (*lhp), KM_SLEEP);
356 
357 	/* search the hash for a matching handle */
358 	index = LH_HASH(vp);
359 	mutex_enter(&ldi_handle_hash_lock[index]);
360 	lhpp = handle_find_ref_nolock(vp, ident);
361 
362 	if (*lhpp != NULL) {
363 		/* we found a handle in the hash */
364 		(*lhpp)->lh_ref++;
365 		mutex_exit(&ldi_handle_hash_lock[index]);
366 
367 		LDI_ALLOCFREE((CE_WARN, "ldi handle alloc: dup "
368 		    "lh=0x%p, ident=0x%p, vp=0x%p, drv=%s, minor=0x%x",
369 		    (void *)*lhpp, (void *)ident, (void *)vp,
370 		    mod_major_to_name(getmajor(vp->v_rdev)),
371 		    getminor(vp->v_rdev)));
372 
373 		kmem_free(lhp, sizeof (struct ldi_handle));
374 		return (*lhpp);
375 	}
376 
377 	/* initialize the new handle */
378 	lhp->lh_ref = 1;
379 	lhp->lh_vp = vp;
380 	lhp->lh_ident = ident;
381 #ifdef	LDI_OBSOLETE_EVENT
382 	mutex_init(lhp->lh_lock, NULL, MUTEX_DEFAULT, NULL);
383 #endif
384 
385 	/* set the device type for this handle */
386 	lhp->lh_type = 0;
387 	if (STREAMSTAB(getmajor(vp->v_rdev))) {
388 		ASSERT(vp->v_type == VCHR);
389 		lhp->lh_type |= LH_STREAM;
390 	} else {
391 		lhp->lh_type |= LH_CBDEV;
392 	}
393 
394 	/* get holds on other objects */
395 	ident_hold(ident);
396 	ASSERT(vp->v_count >= 1);
397 	VN_HOLD(vp);
398 
399 	/* add it to the handle hash */
400 	lhp->lh_next = ldi_handle_hash[index];
401 	ldi_handle_hash[index] = lhp;
402 	atomic_add_long(&ldi_handle_hash_count, 1);
403 
404 	LDI_ALLOCFREE((CE_WARN, "ldi handle alloc: new "
405 	    "lh=0x%p, ident=0x%p, vp=0x%p, drv=%s, minor=0x%x",
406 	    (void *)lhp, (void *)ident, (void *)vp,
407 	    mod_major_to_name(getmajor(vp->v_rdev)),
408 	    getminor(vp->v_rdev)));
409 
410 	mutex_exit(&ldi_handle_hash_lock[index]);
411 	return (lhp);
412 }
413 
414 static void
415 handle_release(struct ldi_handle *lhp)
416 {
417 	struct ldi_handle	**lhpp;
418 	uint_t			index;
419 
420 	ASSERT(lhp != NULL);
421 
422 	index = LH_HASH(lhp->lh_vp);
423 	mutex_enter(&ldi_handle_hash_lock[index]);
424 
425 	LDI_ALLOCFREE((CE_WARN, "ldi handle release: "
426 	    "lh=0x%p, ident=0x%p, vp=0x%p, drv=%s, minor=0x%x",
427 	    (void *)lhp, (void *)lhp->lh_ident, (void *)lhp->lh_vp,
428 	    mod_major_to_name(getmajor(lhp->lh_vp->v_rdev)),
429 	    getminor(lhp->lh_vp->v_rdev)));
430 
431 	ASSERT(lhp->lh_ref > 0);
432 	if (--lhp->lh_ref > 0) {
433 		/* there are more references to this handle */
434 		mutex_exit(&ldi_handle_hash_lock[index]);
435 		return;
436 	}
437 
438 	/* this was the last reference/open for this handle.  free it. */
439 	lhpp = handle_find_ref_nolock(lhp->lh_vp, lhp->lh_ident);
440 	ASSERT((lhpp != NULL) && (*lhpp != NULL));
441 	*lhpp = lhp->lh_next;
442 	atomic_add_long(&ldi_handle_hash_count, -1);
443 	mutex_exit(&ldi_handle_hash_lock[index]);
444 
445 	VN_RELE(lhp->lh_vp);
446 	ident_release(lhp->lh_ident);
447 #ifdef	LDI_OBSOLETE_EVENT
448 	mutex_destroy(lhp->lh_lock);
449 #endif
450 	kmem_free(lhp, sizeof (struct ldi_handle));
451 }
452 
453 #ifdef	LDI_OBSOLETE_EVENT
454 /*
455  * LDI event manipulation functions
456  */
457 static void
458 handle_event_add(ldi_event_t *lep)
459 {
460 	struct ldi_handle *lhp = lep->le_lhp;
461 
462 	ASSERT(lhp != NULL);
463 
464 	mutex_enter(lhp->lh_lock);
465 	if (lhp->lh_events == NULL) {
466 		lhp->lh_events = lep;
467 		mutex_exit(lhp->lh_lock);
468 		return;
469 	}
470 
471 	lep->le_next = lhp->lh_events;
472 	lhp->lh_events->le_prev = lep;
473 	lhp->lh_events = lep;
474 	mutex_exit(lhp->lh_lock);
475 }
476 
477 static void
478 handle_event_remove(ldi_event_t *lep)
479 {
480 	struct ldi_handle *lhp = lep->le_lhp;
481 
482 	ASSERT(lhp != NULL);
483 
484 	mutex_enter(lhp->lh_lock);
485 	if (lep->le_prev)
486 		lep->le_prev->le_next = lep->le_next;
487 	if (lep->le_next)
488 		lep->le_next->le_prev = lep->le_prev;
489 	if (lhp->lh_events == lep)
490 		lhp->lh_events = lep->le_next;
491 	mutex_exit(lhp->lh_lock);
492 
493 }
494 
495 static void
496 i_ldi_callback(dev_info_t *dip, ddi_eventcookie_t event_cookie,
497     void *arg, void *bus_impldata)
498 {
499 	ldi_event_t *lep = (ldi_event_t *)arg;
500 
501 	ASSERT(lep != NULL);
502 
503 	LDI_EVENTCB((CE_NOTE, "%s: dip=0x%p, "
504 	    "event_cookie=0x%p, ldi_eventp=0x%p", "i_ldi_callback",
505 	    (void *)dip, (void *)event_cookie, (void *)lep));
506 
507 	lep->le_handler(lep->le_lhp, event_cookie, lep->le_arg, bus_impldata);
508 }
509 #endif
510 
511 /*
512  * LDI open helper functions
513  */
514 
515 /* get a vnode to a device by dev_t and otyp */
516 static int
517 ldi_vp_from_dev(dev_t dev, int otyp, vnode_t **vpp)
518 {
519 	dev_info_t		*dip;
520 	vnode_t			*vp;
521 
522 	/* sanity check required input parameters */
523 	if ((dev == DDI_DEV_T_NONE) || (!OTYP_VALID(otyp)) || (vpp == NULL))
524 		return (EINVAL);
525 
526 	if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
527 		return (ENODEV);
528 
529 	if (STREAMSTAB(getmajor(dev)) && (otyp != OTYP_CHR)) {
530 		ddi_release_devi(dip);  /* from e_ddi_hold_devi_by_dev */
531 		return (ENXIO);
532 	}
533 
534 	vp = makespecvp(dev, OTYP_TO_VTYP(otyp));
535 	spec_assoc_vp_with_devi(vp, dip);
536 	ddi_release_devi(dip);  /* from e_ddi_hold_devi_by_dev */
537 
538 	*vpp = vp;
539 	return (0);
540 }
541 
542 /* get a vnode to a device by pathname */
543 static int
544 ldi_vp_from_name(char *path, vnode_t **vpp)
545 {
546 	vnode_t			*vp = NULL;
547 	int			ret;
548 
549 	/* sanity check required input parameters */
550 	if ((path == NULL) || (vpp == NULL))
551 		return (EINVAL);
552 
553 	if (modrootloaded) {
554 		cred_t *saved_cred = curthread->t_cred;
555 
556 		/* we don't want lookupname to fail because of credentials */
557 		curthread->t_cred = kcred;
558 
559 		/*
560 		 * all lookups should be done in the global zone.  but
561 		 * lookupnameat() won't actually do this if an absolute
562 		 * path is passed in.  since the ldi interfaces require an
563 		 * absolute path we pass lookupnameat() a pointer to
564 		 * the character after the leading '/' and tell it to
565 		 * start searching at the current system root directory.
566 		 */
567 		ASSERT(*path == '/');
568 		ret = lookupnameat(path + 1, UIO_SYSSPACE, FOLLOW, NULLVPP,
569 		    &vp, rootdir);
570 
571 		/* restore this threads credentials */
572 		curthread->t_cred = saved_cred;
573 
574 		if (ret == 0) {
575 			if (!vn_matchops(vp, spec_getvnodeops()) ||
576 			    !VTYP_VALID(vp->v_type)) {
577 				VN_RELE(vp);
578 				return (ENXIO);
579 			}
580 		}
581 	}
582 
583 	if (vp == NULL) {
584 		dev_info_t	*dip;
585 		dev_t		dev;
586 		int		spec_type;
587 
588 		/*
589 		 * Root is not mounted, the minor node is not specified,
590 		 * or an OBP path has been specified.
591 		 */
592 
593 		/*
594 		 * Determine if path can be pruned to produce an
595 		 * OBP or devfs path for resolve_pathname.
596 		 */
597 		if (strncmp(path, "/devices/", 9) == 0)
598 			path += strlen("/devices");
599 
600 		/*
601 		 * if no minor node was specified the DEFAULT minor node
602 		 * will be returned.  if there is no DEFAULT minor node
603 		 * one will be fabricated of type S_IFCHR with the minor
604 		 * number equal to the instance number.
605 		 */
606 		ret = resolve_pathname(path, &dip, &dev, &spec_type);
607 		if (ret != 0)
608 			return (ENODEV);
609 
610 		ASSERT(STYP_VALID(spec_type));
611 		vp = makespecvp(dev, STYP_TO_VTYP(spec_type));
612 		spec_assoc_vp_with_devi(vp, dip);
613 		ddi_release_devi(dip);
614 	}
615 
616 	*vpp = vp;
617 	return (0);
618 }
619 
620 static int
621 ldi_devid_match(ddi_devid_t devid, dev_info_t *dip, dev_t dev)
622 {
623 	char		*devidstr;
624 	ddi_prop_t	*propp;
625 
626 	/* convert devid as a string property */
627 	if ((devidstr = ddi_devid_str_encode(devid, NULL)) == NULL)
628 		return (0);
629 
630 	/*
631 	 * Search for the devid.  For speed and ease in locking this
632 	 * code directly uses the property implementation.  See
633 	 * ddi_common_devid_to_devlist() for a comment as to why.
634 	 */
635 	mutex_enter(&(DEVI(dip)->devi_lock));
636 
637 	/* check if there is a DDI_DEV_T_NONE devid property */
638 	propp = i_ddi_prop_search(DDI_DEV_T_NONE,
639 	    DEVID_PROP_NAME, DEVID_PROP_FLAGS, &DEVI(dip)->devi_hw_prop_ptr);
640 	if (propp != NULL) {
641 		if (ddi_devid_str_compare(propp->prop_val, devidstr) == 0) {
642 			/* a DDI_DEV_T_NONE devid exists and matchs */
643 			mutex_exit(&(DEVI(dip)->devi_lock));
644 			ddi_devid_str_free(devidstr);
645 			return (1);
646 		} else {
647 			/* a DDI_DEV_T_NONE devid exists and doesn't match */
648 			mutex_exit(&(DEVI(dip)->devi_lock));
649 			ddi_devid_str_free(devidstr);
650 			return (0);
651 		}
652 	}
653 
654 	/* check if there is a devt specific devid property */
655 	propp = i_ddi_prop_search(dev,
656 	    DEVID_PROP_NAME, DEVID_PROP_FLAGS, &(DEVI(dip)->devi_hw_prop_ptr));
657 	if (propp != NULL) {
658 		if (ddi_devid_str_compare(propp->prop_val, devidstr) == 0) {
659 			/* a devt specific devid exists and matchs */
660 			mutex_exit(&(DEVI(dip)->devi_lock));
661 			ddi_devid_str_free(devidstr);
662 			return (1);
663 		} else {
664 			/* a devt specific devid exists and doesn't match */
665 			mutex_exit(&(DEVI(dip)->devi_lock));
666 			ddi_devid_str_free(devidstr);
667 			return (0);
668 		}
669 	}
670 
671 	/* we didn't find any devids associated with the device */
672 	mutex_exit(&(DEVI(dip)->devi_lock));
673 	ddi_devid_str_free(devidstr);
674 	return (0);
675 }
676 
677 /* get a handle to a device by devid and minor name */
678 static int
679 ldi_vp_from_devid(ddi_devid_t devid, char *minor_name, vnode_t **vpp)
680 {
681 	dev_info_t		*dip;
682 	vnode_t			*vp;
683 	int			ret, i, ndevs, styp;
684 	dev_t			dev, *devs;
685 
686 	/* sanity check required input parameters */
687 	if ((devid == NULL) || (minor_name == NULL) || (vpp == NULL))
688 		return (EINVAL);
689 
690 	ret = ddi_lyr_devid_to_devlist(devid, minor_name, &ndevs, &devs);
691 	if ((ret != DDI_SUCCESS) || (ndevs <= 0))
692 		return (ENODEV);
693 
694 	for (i = 0; i < ndevs; i++) {
695 		dev = devs[i];
696 
697 		if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
698 			continue;
699 
700 		/*
701 		 * now we have to verify that the devid of the disk
702 		 * still matches what was requested.
703 		 *
704 		 * we have to do this because the devid could have
705 		 * changed between the call to ddi_lyr_devid_to_devlist()
706 		 * and e_ddi_hold_devi_by_dev().  this is because when
707 		 * ddi_lyr_devid_to_devlist() returns a list of devts
708 		 * there is no kind of hold on those devts so a device
709 		 * could have been replaced out from under us in the
710 		 * interim.
711 		 */
712 		if ((i_ddi_minorname_to_devtspectype(dip, minor_name,
713 		    NULL, &styp) == DDI_SUCCESS) &&
714 		    ldi_devid_match(devid, dip, dev))
715 			break;
716 
717 		ddi_release_devi(dip);	/* from e_ddi_hold_devi_by_dev() */
718 	}
719 
720 	ddi_lyr_free_devlist(devs, ndevs);
721 
722 	if (i == ndevs)
723 		return (ENODEV);
724 
725 	ASSERT(STYP_VALID(styp));
726 	vp = makespecvp(dev, STYP_TO_VTYP(styp));
727 	spec_assoc_vp_with_devi(vp, dip);
728 	ddi_release_devi(dip);		/* from e_ddi_hold_devi_by_dev */
729 
730 	*vpp = vp;
731 	return (0);
732 }
733 
734 /* given a vnode, open a device */
735 static int
736 ldi_open_by_vp(vnode_t **vpp, int flag, cred_t *cr,
737     ldi_handle_t *lhp, struct ldi_ident *li)
738 {
739 	struct ldi_handle	*nlhp;
740 	vnode_t			*vp;
741 	int			err;
742 
743 	ASSERT((vpp != NULL) && (*vpp != NULL));
744 	ASSERT((lhp != NULL) && (li != NULL));
745 
746 	vp = *vpp;
747 	/* if the vnode passed in is not a device, then bail */
748 	if (!vn_matchops(vp, spec_getvnodeops()) || !VTYP_VALID(vp->v_type))
749 		return (ENXIO);
750 
751 	/*
752 	 * the caller may have specified a node that
753 	 * doesn't have cb_ops defined.  the ldi doesn't yet
754 	 * support opening devices without a valid cb_ops.
755 	 */
756 	if (devopsp[getmajor(vp->v_rdev)]->devo_cb_ops == NULL)
757 		return (ENXIO);
758 
759 	/* open the device */
760 	if ((err = VOP_OPEN(&vp, flag | FKLYR, cr, NULL)) != 0)
761 		return (err);
762 
763 	/* possible clone open, make sure that we still have a spec node */
764 	ASSERT(vn_matchops(vp, spec_getvnodeops()));
765 
766 	nlhp = handle_alloc(vp, li);
767 
768 	if (vp != *vpp) {
769 		/*
770 		 * allocating the layered handle took a new hold on the vnode
771 		 * so we can release the hold that was returned by the clone
772 		 * open
773 		 */
774 		LDI_OPENCLOSE((CE_WARN, "%s: lh=0x%p",
775 		    "ldi clone open", (void *)nlhp));
776 	} else {
777 		LDI_OPENCLOSE((CE_WARN, "%s: lh=0x%p",
778 		    "ldi open", (void *)nlhp));
779 	}
780 
781 	/* Flush back any dirty pages associated with the device. */
782 	if (nlhp->lh_type & LH_CBDEV) {
783 		vnode_t	*cvp = common_specvp(nlhp->lh_vp);
784 		dev_t	dev = cvp->v_rdev;
785 
786 		(void) VOP_PUTPAGE(cvp, 0, 0, B_INVAL, kcred, NULL);
787 		bflush(dev);
788 	}
789 
790 	*vpp = vp;
791 	*lhp = (ldi_handle_t)nlhp;
792 	return (0);
793 }
794 
795 /* Call a drivers prop_op(9E) interface */
796 static int
797 i_ldi_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
798     int flags, char *name, caddr_t valuep, int *lengthp)
799 {
800 	struct dev_ops	*ops = NULL;
801 	int		res;
802 
803 	ASSERT((dip != NULL) && (name != NULL));
804 	ASSERT((prop_op == PROP_LEN) || (valuep != NULL));
805 	ASSERT(lengthp != NULL);
806 
807 	/*
808 	 * we can only be invoked after a driver has been opened and
809 	 * someone has a layered handle to it, so there had better be
810 	 * a valid ops vector.
811 	 */
812 	ops = DEVI(dip)->devi_ops;
813 	ASSERT(ops && ops->devo_cb_ops);
814 
815 	/*
816 	 * Some nexus drivers incorrectly set cb_prop_op to nodev,
817 	 * nulldev or even NULL.
818 	 */
819 	if ((ops->devo_cb_ops->cb_prop_op == nodev) ||
820 	    (ops->devo_cb_ops->cb_prop_op == nulldev) ||
821 	    (ops->devo_cb_ops->cb_prop_op == NULL)) {
822 		return (DDI_PROP_NOT_FOUND);
823 	}
824 
825 	/* check if this is actually DDI_DEV_T_ANY query */
826 	if (flags & LDI_DEV_T_ANY) {
827 		flags &= ~LDI_DEV_T_ANY;
828 		dev = DDI_DEV_T_ANY;
829 	}
830 
831 	res = cdev_prop_op(dev, dip, prop_op, flags, name, valuep, lengthp);
832 	return (res);
833 }
834 
835 static void
836 i_ldi_prop_op_free(struct prop_driver_data *pdd)
837 {
838 	kmem_free(pdd, pdd->pdd_size);
839 }
840 
841 static caddr_t
842 i_ldi_prop_op_alloc(int prop_len)
843 {
844 	struct prop_driver_data	*pdd;
845 	int			pdd_size;
846 
847 	pdd_size = sizeof (struct prop_driver_data) + prop_len;
848 	pdd = kmem_alloc(pdd_size, KM_SLEEP);
849 	pdd->pdd_size = pdd_size;
850 	pdd->pdd_prop_free = i_ldi_prop_op_free;
851 	return ((caddr_t)&pdd[1]);
852 }
853 
854 /*
855  * i_ldi_prop_op_typed() is a wrapper for i_ldi_prop_op that is used
856  * by the typed ldi property lookup interfaces.
857  */
858 static int
859 i_ldi_prop_op_typed(dev_t dev, dev_info_t *dip, int flags, char *name,
860     caddr_t *datap, int *lengthp, int elem_size)
861 {
862 	caddr_t	prop_val;
863 	int	prop_len, res;
864 
865 	ASSERT((dip != NULL) && (name != NULL));
866 	ASSERT((datap != NULL) && (lengthp != NULL));
867 
868 	/*
869 	 * first call the drivers prop_op() interface to allow it
870 	 * it to override default property values.
871 	 */
872 	res = i_ldi_prop_op(dev, dip, PROP_LEN,
873 	    flags | DDI_PROP_DYNAMIC, name, NULL, &prop_len);
874 	if (res != DDI_PROP_SUCCESS)
875 		return (DDI_PROP_NOT_FOUND);
876 
877 	/* sanity check the property length */
878 	if (prop_len == 0) {
879 		/*
880 		 * the ddi typed interfaces don't allow a drivers to
881 		 * create properties with a length of 0.  so we should
882 		 * prevent drivers from returning 0 length dynamic
883 		 * properties for typed property lookups.
884 		 */
885 		return (DDI_PROP_NOT_FOUND);
886 	}
887 
888 	/* sanity check the property length against the element size */
889 	if (elem_size && ((prop_len % elem_size) != 0))
890 		return (DDI_PROP_NOT_FOUND);
891 
892 	/*
893 	 * got it.  now allocate a prop_driver_data struct so that the
894 	 * user can free the property via ddi_prop_free().
895 	 */
896 	prop_val = i_ldi_prop_op_alloc(prop_len);
897 
898 	/* lookup the property again, this time get the value */
899 	res = i_ldi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
900 	    flags | DDI_PROP_DYNAMIC, name, prop_val, &prop_len);
901 	if (res != DDI_PROP_SUCCESS) {
902 		ddi_prop_free(prop_val);
903 		return (DDI_PROP_NOT_FOUND);
904 	}
905 
906 	/* sanity check the property length */
907 	if (prop_len == 0) {
908 		ddi_prop_free(prop_val);
909 		return (DDI_PROP_NOT_FOUND);
910 	}
911 
912 	/* sanity check the property length against the element size */
913 	if (elem_size && ((prop_len % elem_size) != 0)) {
914 		ddi_prop_free(prop_val);
915 		return (DDI_PROP_NOT_FOUND);
916 	}
917 
918 	/*
919 	 * return the prop_driver_data struct and, optionally, the length
920 	 * of the data.
921 	 */
922 	*datap = prop_val;
923 	*lengthp = prop_len;
924 
925 	return (DDI_PROP_SUCCESS);
926 }
927 
928 /*
929  * i_check_string looks at a string property and makes sure its
930  * a valid null terminated string
931  */
932 static int
933 i_check_string(char *str, int prop_len)
934 {
935 	int i;
936 
937 	ASSERT(str != NULL);
938 
939 	for (i = 0; i < prop_len; i++) {
940 		if (str[i] == '\0')
941 			return (0);
942 	}
943 	return (1);
944 }
945 
946 /*
947  * i_pack_string_array takes a a string array property that is represented
948  * as a concatenation of strings (with the NULL character included for
949  * each string) and converts it into a format that can be returned by
950  * ldi_prop_lookup_string_array.
951  */
952 static int
953 i_pack_string_array(char *str_concat, int prop_len,
954     char ***str_arrayp, int *nelemp)
955 {
956 	int i, nelem, pack_size;
957 	char **str_array, *strptr;
958 
959 	/*
960 	 * first we need to sanity check the input string array.
961 	 * in essence this can be done my making sure that the last
962 	 * character of the array passed in is null.  (meaning the last
963 	 * string in the array is NULL terminated.
964 	 */
965 	if (str_concat[prop_len - 1] != '\0')
966 		return (1);
967 
968 	/* now let's count the number of strings in the array */
969 	for (nelem = i = 0; i < prop_len; i++)
970 		if (str_concat[i] == '\0')
971 			nelem++;
972 	ASSERT(nelem >= 1);
973 
974 	/* now let's allocate memory for the new packed property */
975 	pack_size = (sizeof (char *) * (nelem + 1)) + prop_len;
976 	str_array = (char **)i_ldi_prop_op_alloc(pack_size);
977 
978 	/* let's copy the actual string data into the new property */
979 	strptr = (char *)&(str_array[nelem + 1]);
980 	bcopy(str_concat, strptr, prop_len);
981 
982 	/* now initialize the string array pointers */
983 	for (i = 0; i < nelem; i++) {
984 		str_array[i] = strptr;
985 		strptr += strlen(strptr) + 1;
986 	}
987 	str_array[nelem] = NULL;
988 
989 	/* set the return values */
990 	*str_arrayp = str_array;
991 	*nelemp = nelem;
992 
993 	return (0);
994 }
995 
996 
997 /*
998  * LDI Project private device usage interfaces
999  */
1000 
1001 /*
1002  * Get a count of how many devices are currentl open by different consumers
1003  */
1004 int
1005 ldi_usage_count()
1006 {
1007 	return (ldi_handle_hash_count);
1008 }
1009 
1010 static void
1011 ldi_usage_walker_tgt_helper(ldi_usage_t *ldi_usage, vnode_t *vp)
1012 {
1013 	dev_info_t	*dip;
1014 	dev_t		dev;
1015 
1016 	ASSERT(STYP_VALID(VTYP_TO_STYP(vp->v_type)));
1017 
1018 	/* get the target devt */
1019 	dev = vp->v_rdev;
1020 
1021 	/* try to get the target dip */
1022 	dip = VTOCS(vp)->s_dip;
1023 	if (dip != NULL) {
1024 		e_ddi_hold_devi(dip);
1025 	} else if (dev != DDI_DEV_T_NONE) {
1026 		dip = e_ddi_hold_devi_by_dev(dev, 0);
1027 	}
1028 
1029 	/* set the target information */
1030 	ldi_usage->tgt_name = mod_major_to_name(getmajor(dev));
1031 	ldi_usage->tgt_modid = mod_name_to_modid(ldi_usage->tgt_name);
1032 	ldi_usage->tgt_devt = dev;
1033 	ldi_usage->tgt_spec_type = VTYP_TO_STYP(vp->v_type);
1034 	ldi_usage->tgt_dip = dip;
1035 }
1036 
1037 
1038 static int
1039 ldi_usage_walker_helper(struct ldi_ident *lip, vnode_t *vp,
1040     void *arg, int (*callback)(const ldi_usage_t *, void *))
1041 {
1042 	ldi_usage_t	ldi_usage;
1043 	struct devnames	*dnp;
1044 	dev_info_t	*dip;
1045 	major_t		major;
1046 	dev_t		dev;
1047 	int		ret = LDI_USAGE_CONTINUE;
1048 
1049 	/* set the target device information */
1050 	ldi_usage_walker_tgt_helper(&ldi_usage, vp);
1051 
1052 	/* get the source devt */
1053 	dev = lip->li_dev;
1054 
1055 	/* try to get the source dip */
1056 	dip = lip->li_dip;
1057 	if (dip != NULL) {
1058 		e_ddi_hold_devi(dip);
1059 	} else if (dev != DDI_DEV_T_NONE) {
1060 		dip = e_ddi_hold_devi_by_dev(dev, 0);
1061 	}
1062 
1063 	/* set the valid source information */
1064 	ldi_usage.src_modid = lip->li_modid;
1065 	ldi_usage.src_name = lip->li_modname;
1066 	ldi_usage.src_devt = dev;
1067 	ldi_usage.src_dip = dip;
1068 
1069 	/*
1070 	 * if the source ident represents either:
1071 	 *
1072 	 * - a kernel module (and not a device or device driver)
1073 	 * - a device node
1074 	 *
1075 	 * then we currently have all the info we need to report the
1076 	 * usage information so invoke the callback function.
1077 	 */
1078 	if (((lip->li_major == -1) && (dev == DDI_DEV_T_NONE)) ||
1079 	    (dip != NULL)) {
1080 		ret = callback(&ldi_usage, arg);
1081 		if (dip != NULL)
1082 			ddi_release_devi(dip);
1083 		if (ldi_usage.tgt_dip != NULL)
1084 			ddi_release_devi(ldi_usage.tgt_dip);
1085 		return (ret);
1086 	}
1087 
1088 	/*
1089 	 * now this is kinda gross.
1090 	 *
1091 	 * what we do here is attempt to associate every device instance
1092 	 * of the source driver on the system with the open target driver.
1093 	 * we do this because we don't know which instance of the device
1094 	 * could potentially access the lower device so we assume that all
1095 	 * the instances could access it.
1096 	 *
1097 	 * there are two ways we could have gotten here:
1098 	 *
1099 	 * 1) this layered ident represents one created using only a
1100 	 *    major number or a driver module name.  this means that when
1101 	 *    it was created we could not associate it with a particular
1102 	 *    dev_t or device instance.
1103 	 *
1104 	 *    when could this possibly happen you ask?
1105 	 *
1106 	 *    a perfect example of this is streams persistent links.
1107 	 *    when a persistant streams link is formed we can't associate
1108 	 *    the lower device stream with any particular upper device
1109 	 *    stream or instance.  this is because any particular upper
1110 	 *    device stream could be closed, then another could be
1111 	 *    opened with a different dev_t and device instance, and it
1112 	 *    would still have access to the lower linked stream.
1113 	 *
1114 	 *    since any instance of the upper streams driver could
1115 	 *    potentially access the lower stream whenever it wants,
1116 	 *    we represent that here by associating the opened lower
1117 	 *    device with every existing device instance of the upper
1118 	 *    streams driver.
1119 	 *
1120 	 * 2) This case should really never happen but we'll include it
1121 	 *    for completeness.
1122 	 *
1123 	 *    it's possible that we could have gotten here because we
1124 	 *    have a dev_t for the upper device but we couldn't find a
1125 	 *    dip associated with that dev_t.
1126 	 *
1127 	 *    the only types of devices that have dev_t without an
1128 	 *    associated dip are unbound DLPIv2 network devices.  These
1129 	 *    types of devices exist to be able to attach a stream to any
1130 	 *    instance of a hardware network device.  since these types of
1131 	 *    devices are usually hardware devices they should never
1132 	 *    really have other devices open.
1133 	 */
1134 	if (dev != DDI_DEV_T_NONE)
1135 		major = getmajor(dev);
1136 	else
1137 		major = lip->li_major;
1138 
1139 	ASSERT((major >= 0) && (major < devcnt));
1140 
1141 	dnp = &devnamesp[major];
1142 	LOCK_DEV_OPS(&dnp->dn_lock);
1143 	dip = dnp->dn_head;
1144 	while ((dip) && (ret == LDI_USAGE_CONTINUE)) {
1145 		e_ddi_hold_devi(dip);
1146 		UNLOCK_DEV_OPS(&dnp->dn_lock);
1147 
1148 		/* set the source dip */
1149 		ldi_usage.src_dip = dip;
1150 
1151 		/* invoke the callback function */
1152 		ret = callback(&ldi_usage, arg);
1153 
1154 		LOCK_DEV_OPS(&dnp->dn_lock);
1155 		ddi_release_devi(dip);
1156 		dip = ddi_get_next(dip);
1157 	}
1158 	UNLOCK_DEV_OPS(&dnp->dn_lock);
1159 
1160 	/* if there was a target dip, release it */
1161 	if (ldi_usage.tgt_dip != NULL)
1162 		ddi_release_devi(ldi_usage.tgt_dip);
1163 
1164 	return (ret);
1165 }
1166 
1167 /*
1168  * ldi_usage_walker() - this walker reports LDI kernel device usage
1169  * information via the callback() callback function.  the LDI keeps track
1170  * of what devices are being accessed in its own internal data structures.
1171  * this function walks those data structures to determine device usage.
1172  */
1173 void
1174 ldi_usage_walker(void *arg, int (*callback)(const ldi_usage_t *, void *))
1175 {
1176 	struct ldi_handle	*lhp;
1177 	struct ldi_ident	*lip;
1178 	vnode_t			*vp;
1179 	int			i;
1180 	int			ret = LDI_USAGE_CONTINUE;
1181 
1182 	for (i = 0; i < LH_HASH_SZ; i++) {
1183 		mutex_enter(&ldi_handle_hash_lock[i]);
1184 
1185 		lhp = ldi_handle_hash[i];
1186 		while ((lhp != NULL) && (ret == LDI_USAGE_CONTINUE)) {
1187 			lip = lhp->lh_ident;
1188 			vp = lhp->lh_vp;
1189 
1190 			/* invoke the devinfo callback function */
1191 			ret = ldi_usage_walker_helper(lip, vp, arg, callback);
1192 
1193 			lhp = lhp->lh_next;
1194 		}
1195 		mutex_exit(&ldi_handle_hash_lock[i]);
1196 
1197 		if (ret != LDI_USAGE_CONTINUE)
1198 			break;
1199 	}
1200 }
1201 
1202 /*
1203  * LDI Project private interfaces (streams linking interfaces)
1204  *
1205  * Streams supports a type of built in device layering via linking.
1206  * Certain types of streams drivers can be streams multiplexors.
1207  * A streams multiplexor supports the I_LINK/I_PLINK operation.
1208  * These operations allows other streams devices to be linked under the
1209  * multiplexor.  By definition all streams multiplexors are devices
1210  * so this linking is a type of device layering where the multiplexor
1211  * device is layered on top of the device linked below it.
1212  */
1213 
1214 /*
1215  * ldi_mlink_lh() is invoked when streams are linked using LDI handles.
1216  * It is not used for normal I_LINKs and I_PLINKs using file descriptors.
1217  *
1218  * The streams framework keeps track of links via the file_t of the lower
1219  * stream.  The LDI keeps track of devices using a vnode.  In the case
1220  * of a streams link created via an LDI handle, fnk_lh() allocates
1221  * a file_t that the streams framework can use to track the linkage.
1222  */
1223 int
1224 ldi_mlink_lh(vnode_t *vp, int cmd, intptr_t arg, cred_t *crp, int *rvalp)
1225 {
1226 	struct ldi_handle	*lhp = (struct ldi_handle *)arg;
1227 	vnode_t			*vpdown;
1228 	file_t			*fpdown;
1229 	int			err;
1230 
1231 	if (lhp == NULL)
1232 		return (EINVAL);
1233 
1234 	vpdown = lhp->lh_vp;
1235 	ASSERT(vn_matchops(vpdown, spec_getvnodeops()));
1236 	ASSERT(cmd == _I_PLINK_LH);
1237 
1238 	/*
1239 	 * create a new lower vnode and a file_t that points to it,
1240 	 * streams linking requires a file_t.  falloc() returns with
1241 	 * fpdown locked.
1242 	 */
1243 	VN_HOLD(vpdown);
1244 	(void) falloc(vpdown, FREAD|FWRITE, &fpdown, NULL);
1245 	mutex_exit(&fpdown->f_tlock);
1246 
1247 	/* try to establish the link */
1248 	err = mlink_file(vp, I_PLINK, fpdown, crp, rvalp, 1);
1249 
1250 	if (err != 0) {
1251 		/* the link failed, free the file_t and release the vnode */
1252 		mutex_enter(&fpdown->f_tlock);
1253 		unfalloc(fpdown);
1254 		VN_RELE(vpdown);
1255 	}
1256 
1257 	return (err);
1258 }
1259 
1260 /*
1261  * ldi_mlink_fp() is invoked for all successful streams linkages created
1262  * via I_LINK and I_PLINK.  ldi_mlink_fp() records the linkage information
1263  * in its internal state so that the devinfo snapshot code has some
1264  * observability into streams device linkage information.
1265  */
1266 void
1267 ldi_mlink_fp(struct stdata *stp, file_t *fpdown, int lhlink, int type)
1268 {
1269 	vnode_t			*vp = fpdown->f_vnode;
1270 	struct snode		*sp, *csp;
1271 	ldi_ident_t		li;
1272 	major_t			major;
1273 	int			ret;
1274 
1275 	/* if the lower stream is not a device then return */
1276 	if (!vn_matchops(vp, spec_getvnodeops()))
1277 		return;
1278 
1279 	ASSERT(!servicing_interrupt());
1280 
1281 	LDI_STREAMS_LNK((CE_NOTE, "%s: linking streams "
1282 	    "stp=0x%p, fpdown=0x%p", "ldi_mlink_fp",
1283 	    (void *)stp, (void *)fpdown));
1284 
1285 	sp = VTOS(vp);
1286 	csp = VTOS(sp->s_commonvp);
1287 
1288 	/* check if this was a plink via a layered handle */
1289 	if (lhlink) {
1290 		/*
1291 		 * increment the common snode s_count.
1292 		 *
1293 		 * this is done because after the link operation there
1294 		 * are two ways that s_count can be decremented.
1295 		 *
1296 		 * when the layered handle used to create the link is
1297 		 * closed, spec_close() is called and it will decrement
1298 		 * s_count in the common snode.  if we don't increment
1299 		 * s_count here then this could cause spec_close() to
1300 		 * actually close the device while it's still linked
1301 		 * under a multiplexer.
1302 		 *
1303 		 * also, when the lower stream is unlinked, closef() is
1304 		 * called for the file_t associated with this snode.
1305 		 * closef() will call spec_close(), which will decrement
1306 		 * s_count.  if we dont't increment s_count here then this
1307 		 * could cause spec_close() to actually close the device
1308 		 * while there may still be valid layered handles
1309 		 * pointing to it.
1310 		 */
1311 		mutex_enter(&csp->s_lock);
1312 		ASSERT(csp->s_count >= 1);
1313 		csp->s_count++;
1314 		mutex_exit(&csp->s_lock);
1315 
1316 		/*
1317 		 * decrement the f_count.
1318 		 * this is done because the layered driver framework does
1319 		 * not actually cache a copy of the file_t allocated to
1320 		 * do the link.  this is done here instead of in ldi_mlink_lh()
1321 		 * because there is a window in ldi_mlink_lh() between where
1322 		 * milnk_file() returns and we would decrement the f_count
1323 		 * when the stream could be unlinked.
1324 		 */
1325 		mutex_enter(&fpdown->f_tlock);
1326 		fpdown->f_count--;
1327 		mutex_exit(&fpdown->f_tlock);
1328 	}
1329 
1330 	/*
1331 	 * NOTE: here we rely on the streams subsystem not allowing
1332 	 * a stream to be multiplexed more than once.  if this
1333 	 * changes, we break.
1334 	 *
1335 	 * mark the snode/stream as multiplexed
1336 	 */
1337 	mutex_enter(&sp->s_lock);
1338 	ASSERT(!(sp->s_flag & SMUXED));
1339 	sp->s_flag |= SMUXED;
1340 	mutex_exit(&sp->s_lock);
1341 
1342 	/* get a layered ident for the upper stream */
1343 	if (type == LINKNORMAL) {
1344 		/*
1345 		 * if the link is not persistant then we can associate
1346 		 * the upper stream with a dev_t.  this is because the
1347 		 * upper stream is associated with a vnode, which is
1348 		 * associated with a dev_t and this binding can't change
1349 		 * during the life of the stream.  since the link isn't
1350 		 * persistant once the stream is destroyed the link is
1351 		 * destroyed.  so the dev_t will be valid for the life
1352 		 * of the link.
1353 		 */
1354 		ret = ldi_ident_from_stream(getendq(stp->sd_wrq), &li);
1355 	} else {
1356 		/*
1357 		 * if the link is persistant we can only associate the
1358 		 * link with a driver (and not a dev_t.)  this is
1359 		 * because subsequent opens of the upper device may result
1360 		 * in a different stream (and dev_t) having access to
1361 		 * the lower stream.
1362 		 *
1363 		 * for example, if the upper stream is closed after the
1364 		 * persistant link operation is compleated, a subsequent
1365 		 * open of the upper device will create a new stream which
1366 		 * may have a different dev_t and an unlink operation
1367 		 * can be performed using this new upper stream.
1368 		 */
1369 		ASSERT(type == LINKPERSIST);
1370 		major = getmajor(stp->sd_vnode->v_rdev);
1371 		ret = ldi_ident_from_major(major, &li);
1372 	}
1373 
1374 	ASSERT(ret == 0);
1375 	(void) handle_alloc(vp, (struct ldi_ident *)li);
1376 	ldi_ident_release(li);
1377 }
1378 
1379 void
1380 ldi_munlink_fp(struct stdata *stp, file_t *fpdown, int type)
1381 {
1382 	struct ldi_handle	*lhp;
1383 	vnode_t			*vp = (vnode_t *)fpdown->f_vnode;
1384 	struct snode		*sp;
1385 	ldi_ident_t		li;
1386 	major_t			major;
1387 	int			ret;
1388 
1389 	/* if the lower stream is not a device then return */
1390 	if (!vn_matchops(vp, spec_getvnodeops()))
1391 		return;
1392 
1393 	ASSERT(!servicing_interrupt());
1394 	ASSERT((type == LINKNORMAL) || (type == LINKPERSIST));
1395 
1396 	LDI_STREAMS_LNK((CE_NOTE, "%s: unlinking streams "
1397 	    "stp=0x%p, fpdown=0x%p", "ldi_munlink_fp",
1398 	    (void *)stp, (void *)fpdown));
1399 
1400 	/*
1401 	 * NOTE: here we rely on the streams subsystem not allowing
1402 	 * a stream to be multiplexed more than once.  if this
1403 	 * changes, we break.
1404 	 *
1405 	 * mark the snode/stream as not multiplexed
1406 	 */
1407 	sp = VTOS(vp);
1408 	mutex_enter(&sp->s_lock);
1409 	ASSERT(sp->s_flag & SMUXED);
1410 	sp->s_flag &= ~SMUXED;
1411 	mutex_exit(&sp->s_lock);
1412 
1413 	/*
1414 	 * clear the owner for this snode
1415 	 * see the comment in ldi_mlink_fp() for information about how
1416 	 * the ident is allocated
1417 	 */
1418 	if (type == LINKNORMAL) {
1419 		ret = ldi_ident_from_stream(getendq(stp->sd_wrq), &li);
1420 	} else {
1421 		ASSERT(type == LINKPERSIST);
1422 		major = getmajor(stp->sd_vnode->v_rdev);
1423 		ret = ldi_ident_from_major(major, &li);
1424 	}
1425 
1426 	ASSERT(ret == 0);
1427 	lhp = handle_find(vp, (struct ldi_ident *)li);
1428 	handle_release(lhp);
1429 	ldi_ident_release(li);
1430 }
1431 
1432 /*
1433  * LDI Consolidation private interfaces
1434  */
1435 int
1436 ldi_ident_from_mod(struct modlinkage *modlp, ldi_ident_t *lip)
1437 {
1438 	struct modctl		*modp;
1439 	major_t			major;
1440 	char			*name;
1441 
1442 	if ((modlp == NULL) || (lip == NULL))
1443 		return (EINVAL);
1444 
1445 	ASSERT(!servicing_interrupt());
1446 
1447 	modp = mod_getctl(modlp);
1448 	if (modp == NULL)
1449 		return (EINVAL);
1450 	name = modp->mod_modname;
1451 	if (name == NULL)
1452 		return (EINVAL);
1453 	major = mod_name_to_major(name);
1454 
1455 	*lip = (ldi_ident_t)ident_alloc(name, NULL, DDI_DEV_T_NONE, major);
1456 
1457 	LDI_ALLOCFREE((CE_WARN, "%s: li=0x%p, mod=%s",
1458 	    "ldi_ident_from_mod", (void *)*lip, name));
1459 
1460 	return (0);
1461 }
1462 
1463 ldi_ident_t
1464 ldi_ident_from_anon()
1465 {
1466 	ldi_ident_t	lip;
1467 
1468 	ASSERT(!servicing_interrupt());
1469 
1470 	lip = (ldi_ident_t)ident_alloc("genunix", NULL, DDI_DEV_T_NONE, -1);
1471 
1472 	LDI_ALLOCFREE((CE_WARN, "%s: li=0x%p, mod=%s",
1473 	    "ldi_ident_from_anon", (void *)lip, "genunix"));
1474 
1475 	return (lip);
1476 }
1477 
1478 
1479 /*
1480  * LDI Public interfaces
1481  */
1482 int
1483 ldi_ident_from_stream(struct queue *sq, ldi_ident_t *lip)
1484 {
1485 	struct stdata		*stp;
1486 	dev_t			dev;
1487 	char			*name;
1488 
1489 	if ((sq == NULL) || (lip == NULL))
1490 		return (EINVAL);
1491 
1492 	ASSERT(!servicing_interrupt());
1493 
1494 	stp = sq->q_stream;
1495 	if (!vn_matchops(stp->sd_vnode, spec_getvnodeops()))
1496 		return (EINVAL);
1497 
1498 	dev = stp->sd_vnode->v_rdev;
1499 	name = mod_major_to_name(getmajor(dev));
1500 	if (name == NULL)
1501 		return (EINVAL);
1502 	*lip = (ldi_ident_t)ident_alloc(name, NULL, dev, -1);
1503 
1504 	LDI_ALLOCFREE((CE_WARN,
1505 	    "%s: li=0x%p, mod=%s, minor=0x%x, stp=0x%p",
1506 	    "ldi_ident_from_stream", (void *)*lip, name, getminor(dev),
1507 	    (void *)stp));
1508 
1509 	return (0);
1510 }
1511 
1512 int
1513 ldi_ident_from_dev(dev_t dev, ldi_ident_t *lip)
1514 {
1515 	char			*name;
1516 
1517 	if (lip == NULL)
1518 		return (EINVAL);
1519 
1520 	ASSERT(!servicing_interrupt());
1521 
1522 	name = mod_major_to_name(getmajor(dev));
1523 	if (name == NULL)
1524 		return (EINVAL);
1525 	*lip = (ldi_ident_t)ident_alloc(name, NULL, dev, -1);
1526 
1527 	LDI_ALLOCFREE((CE_WARN,
1528 	    "%s: li=0x%p, mod=%s, minor=0x%x",
1529 	    "ldi_ident_from_dev", (void *)*lip, name, getminor(dev)));
1530 
1531 	return (0);
1532 }
1533 
1534 int
1535 ldi_ident_from_dip(dev_info_t *dip, ldi_ident_t *lip)
1536 {
1537 	struct dev_info		*devi = (struct dev_info *)dip;
1538 	char			*name;
1539 
1540 	if ((dip == NULL) || (lip == NULL))
1541 		return (EINVAL);
1542 
1543 	ASSERT(!servicing_interrupt());
1544 
1545 	name = mod_major_to_name(devi->devi_major);
1546 	if (name == NULL)
1547 		return (EINVAL);
1548 	*lip = (ldi_ident_t)ident_alloc(name, dip, DDI_DEV_T_NONE, -1);
1549 
1550 	LDI_ALLOCFREE((CE_WARN,
1551 	    "%s: li=0x%p, mod=%s, dip=0x%p",
1552 	    "ldi_ident_from_dip", (void *)*lip, name, (void *)devi));
1553 
1554 	return (0);
1555 }
1556 
1557 int
1558 ldi_ident_from_major(major_t major, ldi_ident_t *lip)
1559 {
1560 	char			*name;
1561 
1562 	if (lip == NULL)
1563 		return (EINVAL);
1564 
1565 	ASSERT(!servicing_interrupt());
1566 
1567 	name = mod_major_to_name(major);
1568 	if (name == NULL)
1569 		return (EINVAL);
1570 	*lip = (ldi_ident_t)ident_alloc(name, NULL, DDI_DEV_T_NONE, major);
1571 
1572 	LDI_ALLOCFREE((CE_WARN,
1573 	    "%s: li=0x%p, mod=%s",
1574 	    "ldi_ident_from_major", (void *)*lip, name));
1575 
1576 	return (0);
1577 }
1578 
1579 void
1580 ldi_ident_release(ldi_ident_t li)
1581 {
1582 	struct ldi_ident	*ident = (struct ldi_ident *)li;
1583 	char			*name;
1584 
1585 	if (li == NULL)
1586 		return;
1587 
1588 	ASSERT(!servicing_interrupt());
1589 
1590 	name = ident->li_modname;
1591 
1592 	LDI_ALLOCFREE((CE_WARN,
1593 	    "%s: li=0x%p, mod=%s",
1594 	    "ldi_ident_release", (void *)li, name));
1595 
1596 	ident_release((struct ldi_ident *)li);
1597 }
1598 
1599 /* get a handle to a device by dev_t and otyp */
1600 int
1601 ldi_open_by_dev(dev_t *devp, int otyp, int flag, cred_t *cr,
1602     ldi_handle_t *lhp, ldi_ident_t li)
1603 {
1604 	struct ldi_ident	*lip = (struct ldi_ident *)li;
1605 	int			ret;
1606 	vnode_t			*vp;
1607 
1608 	/* sanity check required input parameters */
1609 	if ((devp == NULL) || (!OTYP_VALID(otyp)) || (cr == NULL) ||
1610 	    (lhp == NULL) || (lip == NULL))
1611 		return (EINVAL);
1612 
1613 	ASSERT(!servicing_interrupt());
1614 
1615 	if ((ret = ldi_vp_from_dev(*devp, otyp, &vp)) != 0)
1616 		return (ret);
1617 
1618 	if ((ret = ldi_open_by_vp(&vp, flag, cr, lhp, lip)) == 0) {
1619 		*devp = vp->v_rdev;
1620 	}
1621 	VN_RELE(vp);
1622 
1623 	return (ret);
1624 }
1625 
1626 /* get a handle to a device by pathname */
1627 int
1628 ldi_open_by_name(char *pathname, int flag, cred_t *cr,
1629     ldi_handle_t *lhp, ldi_ident_t li)
1630 {
1631 	struct ldi_ident	*lip = (struct ldi_ident *)li;
1632 	int			ret;
1633 	vnode_t			*vp;
1634 
1635 	/* sanity check required input parameters */
1636 	if ((pathname == NULL) || (*pathname != '/') ||
1637 	    (cr == NULL) || (lhp == NULL) || (lip == NULL))
1638 		return (EINVAL);
1639 
1640 	ASSERT(!servicing_interrupt());
1641 
1642 	if ((ret = ldi_vp_from_name(pathname, &vp)) != 0)
1643 		return (ret);
1644 
1645 	ret = ldi_open_by_vp(&vp, flag, cr, lhp, lip);
1646 	VN_RELE(vp);
1647 
1648 	return (ret);
1649 }
1650 
1651 /* get a handle to a device by devid and minor_name */
1652 int
1653 ldi_open_by_devid(ddi_devid_t devid, char *minor_name,
1654     int flag, cred_t *cr, ldi_handle_t *lhp, ldi_ident_t li)
1655 {
1656 	struct ldi_ident	*lip = (struct ldi_ident *)li;
1657 	int			ret;
1658 	vnode_t			*vp;
1659 
1660 	/* sanity check required input parameters */
1661 	if ((minor_name == NULL) || (cr == NULL) ||
1662 	    (lhp == NULL) || (lip == NULL))
1663 		return (EINVAL);
1664 
1665 	ASSERT(!servicing_interrupt());
1666 
1667 	if ((ret = ldi_vp_from_devid(devid, minor_name, &vp)) != 0)
1668 		return (ret);
1669 
1670 	ret = ldi_open_by_vp(&vp, flag, cr, lhp, lip);
1671 	VN_RELE(vp);
1672 
1673 	return (ret);
1674 }
1675 
1676 int
1677 ldi_close(ldi_handle_t lh, int flag, cred_t *cr)
1678 {
1679 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
1680 	struct ldi_event	*lep;
1681 	int			err = 0;
1682 	int			notify = 0;
1683 	list_t			*listp;
1684 	ldi_ev_callback_impl_t	*lecp;
1685 
1686 	if (lh == NULL)
1687 		return (EINVAL);
1688 
1689 	ASSERT(!servicing_interrupt());
1690 
1691 	/* Flush back any dirty pages associated with the device. */
1692 	if (handlep->lh_type & LH_CBDEV) {
1693 		vnode_t	*cvp = common_specvp(handlep->lh_vp);
1694 		dev_t	dev = cvp->v_rdev;
1695 
1696 		(void) VOP_PUTPAGE(cvp, 0, 0, B_INVAL, kcred, NULL);
1697 		bflush(dev);
1698 	}
1699 
1700 #ifdef	LDI_OBSOLETE_EVENT
1701 
1702 	/*
1703 	 * Any event handlers should have been unregistered by the
1704 	 * time ldi_close() is called.  If they haven't then it's a
1705 	 * bug.
1706 	 *
1707 	 * In a debug kernel we'll panic to make the problem obvious.
1708 	 */
1709 	ASSERT(handlep->lh_events == NULL);
1710 
1711 	/*
1712 	 * On a production kernel we'll "do the right thing" (unregister
1713 	 * the event handlers) and then complain about having to do the
1714 	 * work ourselves.
1715 	 */
1716 	while ((lep = handlep->lh_events) != NULL) {
1717 		err = 1;
1718 		(void) ldi_remove_event_handler(lh, (ldi_callback_id_t)lep);
1719 	}
1720 	if (err) {
1721 		struct ldi_ident *lip = handlep->lh_ident;
1722 		ASSERT(lip != NULL);
1723 		cmn_err(CE_NOTE, "ldi err: %s "
1724 		    "failed to unregister layered event handlers before "
1725 		    "closing devices", lip->li_modname);
1726 	}
1727 #endif
1728 
1729 	/* do a layered close on the device */
1730 	err = VOP_CLOSE(handlep->lh_vp, flag | FKLYR, 1, (offset_t)0, cr, NULL);
1731 
1732 	LDI_OPENCLOSE((CE_WARN, "%s: lh=0x%p", "ldi close", (void *)lh));
1733 
1734 	/*
1735 	 * Search the event callback list for callbacks with this
1736 	 * handle. There are 2 cases
1737 	 * 1. Called in the context of a notify. The handle consumer
1738 	 *    is releasing its hold on the device to allow a reconfiguration
1739 	 *    of the device. Simply NULL out the handle and the notify callback.
1740 	 *    The finalize callback is still available so that the consumer
1741 	 *    knows of the final disposition of the device.
1742 	 * 2. Not called in the context of notify. NULL out the handle as well
1743 	 *    as the notify and finalize callbacks. Since the consumer has
1744 	 *    closed the handle, we assume it is not interested in the
1745 	 *    notify and finalize callbacks.
1746 	 */
1747 	ldi_ev_lock();
1748 
1749 	if (handlep->lh_flags & LH_FLAGS_NOTIFY)
1750 		notify = 1;
1751 	listp = &ldi_ev_callback_list.le_head;
1752 	for (lecp = list_head(listp); lecp; lecp = list_next(listp, lecp)) {
1753 		if (lecp->lec_lhp != handlep)
1754 			continue;
1755 		lecp->lec_lhp = NULL;
1756 		lecp->lec_notify = NULL;
1757 		LDI_EVDBG((CE_NOTE, "ldi_close: NULLed lh and notify"));
1758 		if (!notify) {
1759 			LDI_EVDBG((CE_NOTE, "ldi_close: NULLed finalize"));
1760 			lecp->lec_finalize = NULL;
1761 		}
1762 	}
1763 
1764 	if (notify)
1765 		handlep->lh_flags &= ~LH_FLAGS_NOTIFY;
1766 	ldi_ev_unlock();
1767 
1768 	/*
1769 	 * Free the handle even if the device close failed.  why?
1770 	 *
1771 	 * If the device close failed we can't really make assumptions
1772 	 * about the devices state so we shouldn't allow access to the
1773 	 * device via this handle any more.  If the device consumer wants
1774 	 * to access the device again they should open it again.
1775 	 *
1776 	 * This is the same way file/device close failures are handled
1777 	 * in other places like spec_close() and closeandsetf().
1778 	 */
1779 	handle_release(handlep);
1780 	return (err);
1781 }
1782 
1783 int
1784 ldi_read(ldi_handle_t lh, struct uio *uiop, cred_t *credp)
1785 {
1786 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
1787 	vnode_t			*vp;
1788 	dev_t			dev;
1789 	int			ret;
1790 
1791 	if (lh == NULL)
1792 		return (EINVAL);
1793 
1794 	vp = handlep->lh_vp;
1795 	dev = vp->v_rdev;
1796 	if (handlep->lh_type & LH_CBDEV) {
1797 		ret = cdev_read(dev, uiop, credp);
1798 	} else if (handlep->lh_type & LH_STREAM) {
1799 		ret = strread(vp, uiop, credp);
1800 	} else {
1801 		return (ENOTSUP);
1802 	}
1803 	return (ret);
1804 }
1805 
1806 int
1807 ldi_write(ldi_handle_t lh, struct uio *uiop, cred_t *credp)
1808 {
1809 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
1810 	vnode_t			*vp;
1811 	dev_t			dev;
1812 	int			ret;
1813 
1814 	if (lh == NULL)
1815 		return (EINVAL);
1816 
1817 	vp = handlep->lh_vp;
1818 	dev = vp->v_rdev;
1819 	if (handlep->lh_type & LH_CBDEV) {
1820 		ret = cdev_write(dev, uiop, credp);
1821 	} else if (handlep->lh_type & LH_STREAM) {
1822 		ret = strwrite(vp, uiop, credp);
1823 	} else {
1824 		return (ENOTSUP);
1825 	}
1826 	return (ret);
1827 }
1828 
1829 int
1830 ldi_get_size(ldi_handle_t lh, uint64_t *sizep)
1831 {
1832 	int			otyp;
1833 	uint_t			value;
1834 	int64_t			drv_prop64;
1835 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
1836 	uint_t			blksize;
1837 	int			blkshift;
1838 
1839 
1840 	if ((lh == NULL) || (sizep == NULL))
1841 		return (DDI_FAILURE);
1842 
1843 	if (handlep->lh_type & LH_STREAM)
1844 		return (DDI_FAILURE);
1845 
1846 	/*
1847 	 * Determine device type (char or block).
1848 	 * Character devices support Size/size
1849 	 * property value. Block devices may support
1850 	 * Nblocks/nblocks or Size/size property value.
1851 	 */
1852 	if ((ldi_get_otyp(lh, &otyp)) != 0)
1853 		return (DDI_FAILURE);
1854 
1855 	if (otyp == OTYP_BLK) {
1856 		if (ldi_prop_exists(lh,
1857 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "Nblocks")) {
1858 
1859 			drv_prop64 = ldi_prop_get_int64(lh,
1860 			    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1861 			    "Nblocks", 0);
1862 			blksize = ldi_prop_get_int(lh,
1863 			    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1864 			    "blksize", DEV_BSIZE);
1865 			if (blksize == DEV_BSIZE)
1866 				blksize = ldi_prop_get_int(lh, LDI_DEV_T_ANY |
1867 				    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1868 				    "device-blksize", DEV_BSIZE);
1869 
1870 			/* blksize must be a power of two */
1871 			ASSERT(BIT_ONLYONESET(blksize));
1872 			blkshift = highbit(blksize) - 1;
1873 
1874 			/*
1875 			 * We don't support Nblocks values that don't have
1876 			 * an accurate uint64_t byte count representation.
1877 			 */
1878 			if ((uint64_t)drv_prop64 >= (UINT64_MAX >> blkshift))
1879 				return (DDI_FAILURE);
1880 
1881 			*sizep = (uint64_t)
1882 			    (((u_offset_t)drv_prop64) << blkshift);
1883 			return (DDI_SUCCESS);
1884 		}
1885 
1886 		if (ldi_prop_exists(lh,
1887 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "nblocks")) {
1888 
1889 			value = ldi_prop_get_int(lh,
1890 			    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1891 			    "nblocks", 0);
1892 			blksize = ldi_prop_get_int(lh,
1893 			    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1894 			    "blksize", DEV_BSIZE);
1895 			if (blksize == DEV_BSIZE)
1896 				blksize = ldi_prop_get_int(lh, LDI_DEV_T_ANY |
1897 				    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1898 				    "device-blksize", DEV_BSIZE);
1899 
1900 			/* blksize must be a power of two */
1901 			ASSERT(BIT_ONLYONESET(blksize));
1902 			blkshift = highbit(blksize) - 1;
1903 
1904 			/*
1905 			 * We don't support nblocks values that don't have an
1906 			 * accurate uint64_t byte count representation.
1907 			 */
1908 			if ((uint64_t)value >= (UINT64_MAX >> blkshift))
1909 				return (DDI_FAILURE);
1910 
1911 			*sizep = (uint64_t)
1912 			    (((u_offset_t)value) << blkshift);
1913 			return (DDI_SUCCESS);
1914 		}
1915 	}
1916 
1917 	if (ldi_prop_exists(lh,
1918 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "Size")) {
1919 
1920 		drv_prop64 = ldi_prop_get_int64(lh,
1921 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "Size", 0);
1922 		*sizep = (uint64_t)drv_prop64;
1923 		return (DDI_SUCCESS);
1924 	}
1925 
1926 	if (ldi_prop_exists(lh,
1927 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "size")) {
1928 
1929 		value = ldi_prop_get_int(lh,
1930 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "size", 0);
1931 		*sizep = (uint64_t)value;
1932 		return (DDI_SUCCESS);
1933 	}
1934 
1935 	/* unable to determine device size */
1936 	return (DDI_FAILURE);
1937 }
1938 
1939 int
1940 ldi_ioctl(ldi_handle_t lh, int cmd, intptr_t arg, int mode,
1941 	cred_t *cr, int *rvalp)
1942 {
1943 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
1944 	vnode_t			*vp;
1945 	dev_t			dev;
1946 	int			ret, copymode;
1947 
1948 	if (lh == NULL)
1949 		return (EINVAL);
1950 
1951 	/*
1952 	 * if the data pointed to by arg is located in the kernel then
1953 	 * make sure the FNATIVE flag is set.
1954 	 */
1955 	if (mode & FKIOCTL)
1956 		mode = (mode & ~FMODELS) | FNATIVE | FKIOCTL;
1957 
1958 	vp = handlep->lh_vp;
1959 	dev = vp->v_rdev;
1960 	if (handlep->lh_type & LH_CBDEV) {
1961 		ret = cdev_ioctl(dev, cmd, arg, mode, cr, rvalp);
1962 	} else if (handlep->lh_type & LH_STREAM) {
1963 		copymode = (mode & FKIOCTL) ? K_TO_K : U_TO_K;
1964 
1965 		/*
1966 		 * if we get an I_PLINK from within the kernel the
1967 		 * arg is a layered handle pointer instead of
1968 		 * a file descriptor, so we translate this ioctl
1969 		 * into a private one that can handle this.
1970 		 */
1971 		if ((mode & FKIOCTL) && (cmd == I_PLINK))
1972 			cmd = _I_PLINK_LH;
1973 
1974 		ret = strioctl(vp, cmd, arg, mode, copymode, cr, rvalp);
1975 	} else {
1976 		return (ENOTSUP);
1977 	}
1978 
1979 	return (ret);
1980 }
1981 
1982 int
1983 ldi_poll(ldi_handle_t lh, short events, int anyyet, short *reventsp,
1984     struct pollhead **phpp)
1985 {
1986 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
1987 	vnode_t			*vp;
1988 	dev_t			dev;
1989 	int			ret;
1990 
1991 	if (lh == NULL)
1992 		return (EINVAL);
1993 
1994 	vp = handlep->lh_vp;
1995 	dev = vp->v_rdev;
1996 	if (handlep->lh_type & LH_CBDEV) {
1997 		ret = cdev_poll(dev, events, anyyet, reventsp, phpp);
1998 	} else if (handlep->lh_type & LH_STREAM) {
1999 		ret = strpoll(vp->v_stream, events, anyyet, reventsp, phpp);
2000 	} else {
2001 		return (ENOTSUP);
2002 	}
2003 
2004 	return (ret);
2005 }
2006 
2007 int
2008 ldi_prop_op(ldi_handle_t lh, ddi_prop_op_t prop_op,
2009 	int flags, char *name, caddr_t valuep, int *length)
2010 {
2011 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2012 	dev_t			dev;
2013 	dev_info_t		*dip;
2014 	int			ret;
2015 	struct snode		*csp;
2016 
2017 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2018 		return (DDI_PROP_INVAL_ARG);
2019 
2020 	if ((prop_op != PROP_LEN) && (valuep == NULL))
2021 		return (DDI_PROP_INVAL_ARG);
2022 
2023 	if (length == NULL)
2024 		return (DDI_PROP_INVAL_ARG);
2025 
2026 	/*
2027 	 * try to find the associated dip,
2028 	 * this places a hold on the driver
2029 	 */
2030 	dev = handlep->lh_vp->v_rdev;
2031 
2032 	csp = VTOCS(handlep->lh_vp);
2033 	mutex_enter(&csp->s_lock);
2034 	if ((dip = csp->s_dip) != NULL)
2035 		e_ddi_hold_devi(dip);
2036 	mutex_exit(&csp->s_lock);
2037 	if (dip == NULL)
2038 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2039 
2040 	if (dip == NULL)
2041 		return (DDI_PROP_NOT_FOUND);
2042 
2043 	ret = i_ldi_prop_op(dev, dip, prop_op, flags, name, valuep, length);
2044 	ddi_release_devi(dip);
2045 
2046 	return (ret);
2047 }
2048 
2049 int
2050 ldi_strategy(ldi_handle_t lh, struct buf *bp)
2051 {
2052 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2053 	dev_t			dev;
2054 
2055 	if ((lh == NULL) || (bp == NULL))
2056 		return (EINVAL);
2057 
2058 	/* this entry point is only supported for cb devices */
2059 	dev = handlep->lh_vp->v_rdev;
2060 	if (!(handlep->lh_type & LH_CBDEV))
2061 		return (ENOTSUP);
2062 
2063 	bp->b_edev = dev;
2064 	bp->b_dev = cmpdev(dev);
2065 	return (bdev_strategy(bp));
2066 }
2067 
2068 int
2069 ldi_dump(ldi_handle_t lh, caddr_t addr, daddr_t blkno, int nblk)
2070 {
2071 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2072 	dev_t			dev;
2073 
2074 	if (lh == NULL)
2075 		return (EINVAL);
2076 
2077 	/* this entry point is only supported for cb devices */
2078 	dev = handlep->lh_vp->v_rdev;
2079 	if (!(handlep->lh_type & LH_CBDEV))
2080 		return (ENOTSUP);
2081 
2082 	return (bdev_dump(dev, addr, blkno, nblk));
2083 }
2084 
2085 int
2086 ldi_devmap(ldi_handle_t lh, devmap_cookie_t dhp, offset_t off,
2087     size_t len, size_t *maplen, uint_t model)
2088 {
2089 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2090 	dev_t			dev;
2091 
2092 	if (lh == NULL)
2093 		return (EINVAL);
2094 
2095 	/* this entry point is only supported for cb devices */
2096 	dev = handlep->lh_vp->v_rdev;
2097 	if (!(handlep->lh_type & LH_CBDEV))
2098 		return (ENOTSUP);
2099 
2100 	return (cdev_devmap(dev, dhp, off, len, maplen, model));
2101 }
2102 
2103 int
2104 ldi_aread(ldi_handle_t lh, struct aio_req *aio_reqp, cred_t *cr)
2105 {
2106 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2107 	dev_t			dev;
2108 	struct cb_ops		*cb;
2109 
2110 	if (lh == NULL)
2111 		return (EINVAL);
2112 
2113 	/* this entry point is only supported for cb devices */
2114 	if (!(handlep->lh_type & LH_CBDEV))
2115 		return (ENOTSUP);
2116 
2117 	/*
2118 	 * Kaio is only supported on block devices.
2119 	 */
2120 	dev = handlep->lh_vp->v_rdev;
2121 	cb = devopsp[getmajor(dev)]->devo_cb_ops;
2122 	if (cb->cb_strategy == nodev || cb->cb_strategy == NULL)
2123 		return (ENOTSUP);
2124 
2125 	if (cb->cb_aread == NULL)
2126 		return (ENOTSUP);
2127 
2128 	return (cb->cb_aread(dev, aio_reqp, cr));
2129 }
2130 
2131 int
2132 ldi_awrite(ldi_handle_t lh, struct aio_req *aio_reqp, cred_t *cr)
2133 {
2134 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2135 	struct cb_ops		*cb;
2136 	dev_t			dev;
2137 
2138 	if (lh == NULL)
2139 		return (EINVAL);
2140 
2141 	/* this entry point is only supported for cb devices */
2142 	if (!(handlep->lh_type & LH_CBDEV))
2143 		return (ENOTSUP);
2144 
2145 	/*
2146 	 * Kaio is only supported on block devices.
2147 	 */
2148 	dev = handlep->lh_vp->v_rdev;
2149 	cb = devopsp[getmajor(dev)]->devo_cb_ops;
2150 	if (cb->cb_strategy == nodev || cb->cb_strategy == NULL)
2151 		return (ENOTSUP);
2152 
2153 	if (cb->cb_awrite == NULL)
2154 		return (ENOTSUP);
2155 
2156 	return (cb->cb_awrite(dev, aio_reqp, cr));
2157 }
2158 
2159 int
2160 ldi_putmsg(ldi_handle_t lh, mblk_t *smp)
2161 {
2162 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2163 	int			ret;
2164 
2165 	if ((lh == NULL) || (smp == NULL))
2166 		return (EINVAL);
2167 
2168 	if (!(handlep->lh_type & LH_STREAM)) {
2169 		freemsg(smp);
2170 		return (ENOTSUP);
2171 	}
2172 
2173 	/* Send message while honoring flow control */
2174 	ret = kstrputmsg(handlep->lh_vp, smp, NULL, 0, 0,
2175 	    MSG_BAND | MSG_HOLDSIG | MSG_IGNERROR, 0);
2176 
2177 	return (ret);
2178 }
2179 
2180 int
2181 ldi_getmsg(ldi_handle_t lh, mblk_t **rmp, timestruc_t *timeo)
2182 {
2183 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2184 	clock_t			timout; /* milliseconds */
2185 	uchar_t			pri;
2186 	rval_t			rval;
2187 	int			ret, pflag;
2188 
2189 
2190 	if (lh == NULL)
2191 		return (EINVAL);
2192 
2193 	if (!(handlep->lh_type & LH_STREAM))
2194 		return (ENOTSUP);
2195 
2196 	/* Convert from nanoseconds to milliseconds */
2197 	if (timeo != NULL) {
2198 		timout = timeo->tv_sec * 1000 + timeo->tv_nsec / 1000000;
2199 		if (timout > INT_MAX)
2200 			return (EINVAL);
2201 	} else
2202 		timout = -1;
2203 
2204 	/* Wait for timeout millseconds for a message */
2205 	pflag = MSG_ANY;
2206 	pri = 0;
2207 	*rmp = NULL;
2208 	ret = kstrgetmsg(handlep->lh_vp,
2209 	    rmp, NULL, &pri, &pflag, timout, &rval);
2210 	return (ret);
2211 }
2212 
2213 int
2214 ldi_get_dev(ldi_handle_t lh, dev_t *devp)
2215 {
2216 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2217 
2218 	if ((lh == NULL) || (devp == NULL))
2219 		return (EINVAL);
2220 
2221 	*devp = handlep->lh_vp->v_rdev;
2222 	return (0);
2223 }
2224 
2225 int
2226 ldi_get_otyp(ldi_handle_t lh, int *otyp)
2227 {
2228 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2229 
2230 	if ((lh == NULL) || (otyp == NULL))
2231 		return (EINVAL);
2232 
2233 	*otyp = VTYP_TO_OTYP(handlep->lh_vp->v_type);
2234 	return (0);
2235 }
2236 
2237 int
2238 ldi_get_devid(ldi_handle_t lh, ddi_devid_t *devid)
2239 {
2240 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2241 	int			ret;
2242 	dev_t			dev;
2243 
2244 	if ((lh == NULL) || (devid == NULL))
2245 		return (EINVAL);
2246 
2247 	dev = handlep->lh_vp->v_rdev;
2248 
2249 	ret = ddi_lyr_get_devid(dev, devid);
2250 	if (ret != DDI_SUCCESS)
2251 		return (ENOTSUP);
2252 
2253 	return (0);
2254 }
2255 
2256 int
2257 ldi_get_minor_name(ldi_handle_t lh, char **minor_name)
2258 {
2259 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2260 	int			ret, otyp;
2261 	dev_t			dev;
2262 
2263 	if ((lh == NULL) || (minor_name == NULL))
2264 		return (EINVAL);
2265 
2266 	dev = handlep->lh_vp->v_rdev;
2267 	otyp = VTYP_TO_OTYP(handlep->lh_vp->v_type);
2268 
2269 	ret = ddi_lyr_get_minor_name(dev, OTYP_TO_STYP(otyp), minor_name);
2270 	if (ret != DDI_SUCCESS)
2271 		return (ENOTSUP);
2272 
2273 	return (0);
2274 }
2275 
2276 int
2277 ldi_prop_lookup_int_array(ldi_handle_t lh,
2278     uint_t flags, char *name, int **data, uint_t *nelements)
2279 {
2280 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2281 	dev_info_t		*dip;
2282 	dev_t			dev;
2283 	int			res;
2284 	struct snode		*csp;
2285 
2286 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2287 		return (DDI_PROP_INVAL_ARG);
2288 
2289 	dev = handlep->lh_vp->v_rdev;
2290 
2291 	csp = VTOCS(handlep->lh_vp);
2292 	mutex_enter(&csp->s_lock);
2293 	if ((dip = csp->s_dip) != NULL)
2294 		e_ddi_hold_devi(dip);
2295 	mutex_exit(&csp->s_lock);
2296 	if (dip == NULL)
2297 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2298 
2299 	if (dip == NULL) {
2300 		flags |= DDI_UNBND_DLPI2;
2301 	} else if (flags & LDI_DEV_T_ANY) {
2302 		flags &= ~LDI_DEV_T_ANY;
2303 		dev = DDI_DEV_T_ANY;
2304 	}
2305 
2306 	if (dip != NULL) {
2307 		int *prop_val, prop_len;
2308 
2309 		res = i_ldi_prop_op_typed(dev, dip, flags, name,
2310 		    (caddr_t *)&prop_val, &prop_len, sizeof (int));
2311 
2312 		/* if we got it then return it */
2313 		if (res == DDI_PROP_SUCCESS) {
2314 			*nelements = prop_len / sizeof (int);
2315 			*data = prop_val;
2316 
2317 			ddi_release_devi(dip);
2318 			return (res);
2319 		}
2320 	}
2321 
2322 	/* call the normal property interfaces */
2323 	res = ddi_prop_lookup_int_array(dev, dip, flags,
2324 	    name, data, nelements);
2325 
2326 	if (dip != NULL)
2327 		ddi_release_devi(dip);
2328 
2329 	return (res);
2330 }
2331 
2332 int
2333 ldi_prop_lookup_int64_array(ldi_handle_t lh,
2334     uint_t flags, char *name, int64_t **data, uint_t *nelements)
2335 {
2336 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2337 	dev_info_t		*dip;
2338 	dev_t			dev;
2339 	int			res;
2340 	struct snode		*csp;
2341 
2342 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2343 		return (DDI_PROP_INVAL_ARG);
2344 
2345 	dev = handlep->lh_vp->v_rdev;
2346 
2347 	csp = VTOCS(handlep->lh_vp);
2348 	mutex_enter(&csp->s_lock);
2349 	if ((dip = csp->s_dip) != NULL)
2350 		e_ddi_hold_devi(dip);
2351 	mutex_exit(&csp->s_lock);
2352 	if (dip == NULL)
2353 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2354 
2355 	if (dip == NULL) {
2356 		flags |= DDI_UNBND_DLPI2;
2357 	} else if (flags & LDI_DEV_T_ANY) {
2358 		flags &= ~LDI_DEV_T_ANY;
2359 		dev = DDI_DEV_T_ANY;
2360 	}
2361 
2362 	if (dip != NULL) {
2363 		int64_t	*prop_val;
2364 		int	prop_len;
2365 
2366 		res = i_ldi_prop_op_typed(dev, dip, flags, name,
2367 		    (caddr_t *)&prop_val, &prop_len, sizeof (int64_t));
2368 
2369 		/* if we got it then return it */
2370 		if (res == DDI_PROP_SUCCESS) {
2371 			*nelements = prop_len / sizeof (int64_t);
2372 			*data = prop_val;
2373 
2374 			ddi_release_devi(dip);
2375 			return (res);
2376 		}
2377 	}
2378 
2379 	/* call the normal property interfaces */
2380 	res = ddi_prop_lookup_int64_array(dev, dip, flags,
2381 	    name, data, nelements);
2382 
2383 	if (dip != NULL)
2384 		ddi_release_devi(dip);
2385 
2386 	return (res);
2387 }
2388 
2389 int
2390 ldi_prop_lookup_string_array(ldi_handle_t lh,
2391     uint_t flags, char *name, char ***data, uint_t *nelements)
2392 {
2393 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2394 	dev_info_t		*dip;
2395 	dev_t			dev;
2396 	int			res;
2397 	struct snode		*csp;
2398 
2399 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2400 		return (DDI_PROP_INVAL_ARG);
2401 
2402 	dev = handlep->lh_vp->v_rdev;
2403 
2404 	csp = VTOCS(handlep->lh_vp);
2405 	mutex_enter(&csp->s_lock);
2406 	if ((dip = csp->s_dip) != NULL)
2407 		e_ddi_hold_devi(dip);
2408 	mutex_exit(&csp->s_lock);
2409 	if (dip == NULL)
2410 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2411 
2412 	if (dip == NULL) {
2413 		flags |= DDI_UNBND_DLPI2;
2414 	} else if (flags & LDI_DEV_T_ANY) {
2415 		flags &= ~LDI_DEV_T_ANY;
2416 		dev = DDI_DEV_T_ANY;
2417 	}
2418 
2419 	if (dip != NULL) {
2420 		char	*prop_val;
2421 		int	prop_len;
2422 
2423 		res = i_ldi_prop_op_typed(dev, dip, flags, name,
2424 		    (caddr_t *)&prop_val, &prop_len, 0);
2425 
2426 		/* if we got it then return it */
2427 		if (res == DDI_PROP_SUCCESS) {
2428 			char	**str_array;
2429 			int	nelem;
2430 
2431 			/*
2432 			 * pack the returned string array into the format
2433 			 * our callers expect
2434 			 */
2435 			if (i_pack_string_array(prop_val, prop_len,
2436 			    &str_array, &nelem) == 0) {
2437 
2438 				*data = str_array;
2439 				*nelements = nelem;
2440 
2441 				ddi_prop_free(prop_val);
2442 				ddi_release_devi(dip);
2443 				return (res);
2444 			}
2445 
2446 			/*
2447 			 * the format of the returned property must have
2448 			 * been bad so throw it out
2449 			 */
2450 			ddi_prop_free(prop_val);
2451 		}
2452 	}
2453 
2454 	/* call the normal property interfaces */
2455 	res = ddi_prop_lookup_string_array(dev, dip, flags,
2456 	    name, data, nelements);
2457 
2458 	if (dip != NULL)
2459 		ddi_release_devi(dip);
2460 
2461 	return (res);
2462 }
2463 
2464 int
2465 ldi_prop_lookup_string(ldi_handle_t lh,
2466     uint_t flags, char *name, char **data)
2467 {
2468 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2469 	dev_info_t		*dip;
2470 	dev_t			dev;
2471 	int			res;
2472 	struct snode		*csp;
2473 
2474 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2475 		return (DDI_PROP_INVAL_ARG);
2476 
2477 	dev = handlep->lh_vp->v_rdev;
2478 
2479 	csp = VTOCS(handlep->lh_vp);
2480 	mutex_enter(&csp->s_lock);
2481 	if ((dip = csp->s_dip) != NULL)
2482 		e_ddi_hold_devi(dip);
2483 	mutex_exit(&csp->s_lock);
2484 	if (dip == NULL)
2485 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2486 
2487 	if (dip == NULL) {
2488 		flags |= DDI_UNBND_DLPI2;
2489 	} else if (flags & LDI_DEV_T_ANY) {
2490 		flags &= ~LDI_DEV_T_ANY;
2491 		dev = DDI_DEV_T_ANY;
2492 	}
2493 
2494 	if (dip != NULL) {
2495 		char	*prop_val;
2496 		int	prop_len;
2497 
2498 		res = i_ldi_prop_op_typed(dev, dip, flags, name,
2499 		    (caddr_t *)&prop_val, &prop_len, 0);
2500 
2501 		/* if we got it then return it */
2502 		if (res == DDI_PROP_SUCCESS) {
2503 			/*
2504 			 * sanity check the vaule returned.
2505 			 */
2506 			if (i_check_string(prop_val, prop_len)) {
2507 				ddi_prop_free(prop_val);
2508 			} else {
2509 				*data = prop_val;
2510 				ddi_release_devi(dip);
2511 				return (res);
2512 			}
2513 		}
2514 	}
2515 
2516 	/* call the normal property interfaces */
2517 	res = ddi_prop_lookup_string(dev, dip, flags, name, data);
2518 
2519 	if (dip != NULL)
2520 		ddi_release_devi(dip);
2521 
2522 #ifdef DEBUG
2523 	if (res == DDI_PROP_SUCCESS) {
2524 		/*
2525 		 * keep ourselves honest
2526 		 * make sure the framework returns strings in the
2527 		 * same format as we're demanding from drivers.
2528 		 */
2529 		struct prop_driver_data	*pdd;
2530 		int			pdd_prop_size;
2531 
2532 		pdd = ((struct prop_driver_data *)(*data)) - 1;
2533 		pdd_prop_size = pdd->pdd_size -
2534 		    sizeof (struct prop_driver_data);
2535 		ASSERT(i_check_string(*data, pdd_prop_size) == 0);
2536 	}
2537 #endif /* DEBUG */
2538 
2539 	return (res);
2540 }
2541 
2542 int
2543 ldi_prop_lookup_byte_array(ldi_handle_t lh,
2544     uint_t flags, char *name, uchar_t **data, uint_t *nelements)
2545 {
2546 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2547 	dev_info_t		*dip;
2548 	dev_t			dev;
2549 	int			res;
2550 	struct snode		*csp;
2551 
2552 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2553 		return (DDI_PROP_INVAL_ARG);
2554 
2555 	dev = handlep->lh_vp->v_rdev;
2556 
2557 	csp = VTOCS(handlep->lh_vp);
2558 	mutex_enter(&csp->s_lock);
2559 	if ((dip = csp->s_dip) != NULL)
2560 		e_ddi_hold_devi(dip);
2561 	mutex_exit(&csp->s_lock);
2562 	if (dip == NULL)
2563 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2564 
2565 	if (dip == NULL) {
2566 		flags |= DDI_UNBND_DLPI2;
2567 	} else if (flags & LDI_DEV_T_ANY) {
2568 		flags &= ~LDI_DEV_T_ANY;
2569 		dev = DDI_DEV_T_ANY;
2570 	}
2571 
2572 	if (dip != NULL) {
2573 		uchar_t	*prop_val;
2574 		int	prop_len;
2575 
2576 		res = i_ldi_prop_op_typed(dev, dip, flags, name,
2577 		    (caddr_t *)&prop_val, &prop_len, sizeof (uchar_t));
2578 
2579 		/* if we got it then return it */
2580 		if (res == DDI_PROP_SUCCESS) {
2581 			*nelements = prop_len / sizeof (uchar_t);
2582 			*data = prop_val;
2583 
2584 			ddi_release_devi(dip);
2585 			return (res);
2586 		}
2587 	}
2588 
2589 	/* call the normal property interfaces */
2590 	res = ddi_prop_lookup_byte_array(dev, dip, flags,
2591 	    name, data, nelements);
2592 
2593 	if (dip != NULL)
2594 		ddi_release_devi(dip);
2595 
2596 	return (res);
2597 }
2598 
2599 int
2600 ldi_prop_get_int(ldi_handle_t lh,
2601     uint_t flags, char *name, int defvalue)
2602 {
2603 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2604 	dev_info_t		*dip;
2605 	dev_t			dev;
2606 	int			res;
2607 	struct snode		*csp;
2608 
2609 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2610 		return (defvalue);
2611 
2612 	dev = handlep->lh_vp->v_rdev;
2613 
2614 	csp = VTOCS(handlep->lh_vp);
2615 	mutex_enter(&csp->s_lock);
2616 	if ((dip = csp->s_dip) != NULL)
2617 		e_ddi_hold_devi(dip);
2618 	mutex_exit(&csp->s_lock);
2619 	if (dip == NULL)
2620 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2621 
2622 	if (dip == NULL) {
2623 		flags |= DDI_UNBND_DLPI2;
2624 	} else if (flags & LDI_DEV_T_ANY) {
2625 		flags &= ~LDI_DEV_T_ANY;
2626 		dev = DDI_DEV_T_ANY;
2627 	}
2628 
2629 	if (dip != NULL) {
2630 		int	prop_val;
2631 		int	prop_len;
2632 
2633 		/*
2634 		 * first call the drivers prop_op interface to allow it
2635 		 * it to override default property values.
2636 		 */
2637 		prop_len = sizeof (int);
2638 		res = i_ldi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
2639 		    flags | DDI_PROP_DYNAMIC, name,
2640 		    (caddr_t)&prop_val, &prop_len);
2641 
2642 		/* if we got it then return it */
2643 		if ((res == DDI_PROP_SUCCESS) &&
2644 		    (prop_len == sizeof (int))) {
2645 			res = prop_val;
2646 			ddi_release_devi(dip);
2647 			return (res);
2648 		}
2649 	}
2650 
2651 	/* call the normal property interfaces */
2652 	res = ddi_prop_get_int(dev, dip, flags, name, defvalue);
2653 
2654 	if (dip != NULL)
2655 		ddi_release_devi(dip);
2656 
2657 	return (res);
2658 }
2659 
2660 int64_t
2661 ldi_prop_get_int64(ldi_handle_t lh,
2662     uint_t flags, char *name, int64_t defvalue)
2663 {
2664 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2665 	dev_info_t		*dip;
2666 	dev_t			dev;
2667 	int64_t			res;
2668 	struct snode		*csp;
2669 
2670 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2671 		return (defvalue);
2672 
2673 	dev = handlep->lh_vp->v_rdev;
2674 
2675 	csp = VTOCS(handlep->lh_vp);
2676 	mutex_enter(&csp->s_lock);
2677 	if ((dip = csp->s_dip) != NULL)
2678 		e_ddi_hold_devi(dip);
2679 	mutex_exit(&csp->s_lock);
2680 	if (dip == NULL)
2681 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2682 
2683 	if (dip == NULL) {
2684 		flags |= DDI_UNBND_DLPI2;
2685 	} else if (flags & LDI_DEV_T_ANY) {
2686 		flags &= ~LDI_DEV_T_ANY;
2687 		dev = DDI_DEV_T_ANY;
2688 	}
2689 
2690 	if (dip != NULL) {
2691 		int64_t	prop_val;
2692 		int	prop_len;
2693 
2694 		/*
2695 		 * first call the drivers prop_op interface to allow it
2696 		 * it to override default property values.
2697 		 */
2698 		prop_len = sizeof (int64_t);
2699 		res = i_ldi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
2700 		    flags | DDI_PROP_DYNAMIC, name,
2701 		    (caddr_t)&prop_val, &prop_len);
2702 
2703 		/* if we got it then return it */
2704 		if ((res == DDI_PROP_SUCCESS) &&
2705 		    (prop_len == sizeof (int64_t))) {
2706 			res = prop_val;
2707 			ddi_release_devi(dip);
2708 			return (res);
2709 		}
2710 	}
2711 
2712 	/* call the normal property interfaces */
2713 	res = ddi_prop_get_int64(dev, dip, flags, name, defvalue);
2714 
2715 	if (dip != NULL)
2716 		ddi_release_devi(dip);
2717 
2718 	return (res);
2719 }
2720 
2721 int
2722 ldi_prop_exists(ldi_handle_t lh, uint_t flags, char *name)
2723 {
2724 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2725 	dev_info_t		*dip;
2726 	dev_t			dev;
2727 	int			res, prop_len;
2728 	struct snode		*csp;
2729 
2730 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2731 		return (0);
2732 
2733 	dev = handlep->lh_vp->v_rdev;
2734 
2735 	csp = VTOCS(handlep->lh_vp);
2736 	mutex_enter(&csp->s_lock);
2737 	if ((dip = csp->s_dip) != NULL)
2738 		e_ddi_hold_devi(dip);
2739 	mutex_exit(&csp->s_lock);
2740 	if (dip == NULL)
2741 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2742 
2743 	/* if NULL dip, prop does NOT exist */
2744 	if (dip == NULL)
2745 		return (0);
2746 
2747 	if (flags & LDI_DEV_T_ANY) {
2748 		flags &= ~LDI_DEV_T_ANY;
2749 		dev = DDI_DEV_T_ANY;
2750 	}
2751 
2752 	/*
2753 	 * first call the drivers prop_op interface to allow it
2754 	 * it to override default property values.
2755 	 */
2756 	res = i_ldi_prop_op(dev, dip, PROP_LEN,
2757 	    flags | DDI_PROP_DYNAMIC, name, NULL, &prop_len);
2758 
2759 	if (res == DDI_PROP_SUCCESS) {
2760 		ddi_release_devi(dip);
2761 		return (1);
2762 	}
2763 
2764 	/* call the normal property interfaces */
2765 	res = ddi_prop_exists(dev, dip, flags, name);
2766 
2767 	ddi_release_devi(dip);
2768 	return (res);
2769 }
2770 
2771 #ifdef	LDI_OBSOLETE_EVENT
2772 
2773 int
2774 ldi_get_eventcookie(ldi_handle_t lh, char *name, ddi_eventcookie_t *ecp)
2775 {
2776 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2777 	dev_info_t		*dip;
2778 	dev_t			dev;
2779 	int			res;
2780 	struct snode		*csp;
2781 
2782 	if ((lh == NULL) || (name == NULL) ||
2783 	    (strlen(name) == 0) || (ecp == NULL)) {
2784 		return (DDI_FAILURE);
2785 	}
2786 
2787 	ASSERT(!servicing_interrupt());
2788 
2789 	dev = handlep->lh_vp->v_rdev;
2790 
2791 	csp = VTOCS(handlep->lh_vp);
2792 	mutex_enter(&csp->s_lock);
2793 	if ((dip = csp->s_dip) != NULL)
2794 		e_ddi_hold_devi(dip);
2795 	mutex_exit(&csp->s_lock);
2796 	if (dip == NULL)
2797 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2798 
2799 	if (dip == NULL)
2800 		return (DDI_FAILURE);
2801 
2802 	LDI_EVENTCB((CE_NOTE, "%s: event_name=%s, "
2803 	    "dip=0x%p, event_cookiep=0x%p", "ldi_get_eventcookie",
2804 	    name, (void *)dip, (void *)ecp));
2805 
2806 	res = ddi_get_eventcookie(dip, name, ecp);
2807 
2808 	ddi_release_devi(dip);
2809 	return (res);
2810 }
2811 
2812 int
2813 ldi_add_event_handler(ldi_handle_t lh, ddi_eventcookie_t ec,
2814     void (*handler)(ldi_handle_t, ddi_eventcookie_t, void *, void *),
2815     void *arg, ldi_callback_id_t *id)
2816 {
2817 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2818 	struct ldi_event	*lep;
2819 	dev_info_t		*dip;
2820 	dev_t			dev;
2821 	int			res;
2822 	struct snode		*csp;
2823 
2824 	if ((lh == NULL) || (ec == NULL) || (handler == NULL) || (id == NULL))
2825 		return (DDI_FAILURE);
2826 
2827 	ASSERT(!servicing_interrupt());
2828 
2829 	dev = handlep->lh_vp->v_rdev;
2830 
2831 	csp = VTOCS(handlep->lh_vp);
2832 	mutex_enter(&csp->s_lock);
2833 	if ((dip = csp->s_dip) != NULL)
2834 		e_ddi_hold_devi(dip);
2835 	mutex_exit(&csp->s_lock);
2836 	if (dip == NULL)
2837 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2838 
2839 	if (dip == NULL)
2840 		return (DDI_FAILURE);
2841 
2842 	lep = kmem_zalloc(sizeof (struct ldi_event), KM_SLEEP);
2843 	lep->le_lhp = handlep;
2844 	lep->le_arg = arg;
2845 	lep->le_handler = handler;
2846 
2847 	if ((res = ddi_add_event_handler(dip, ec, i_ldi_callback,
2848 	    (void *)lep, &lep->le_id)) != DDI_SUCCESS) {
2849 		LDI_EVENTCB((CE_WARN, "%s: unable to add"
2850 		    "event callback", "ldi_add_event_handler"));
2851 		ddi_release_devi(dip);
2852 		kmem_free(lep, sizeof (struct ldi_event));
2853 		return (res);
2854 	}
2855 
2856 	*id = (ldi_callback_id_t)lep;
2857 
2858 	LDI_EVENTCB((CE_NOTE, "%s: dip=0x%p, event=0x%p, "
2859 	    "ldi_eventp=0x%p, cb_id=0x%p", "ldi_add_event_handler",
2860 	    (void *)dip, (void *)ec, (void *)lep, (void *)id));
2861 
2862 	handle_event_add(lep);
2863 	ddi_release_devi(dip);
2864 	return (res);
2865 }
2866 
2867 int
2868 ldi_remove_event_handler(ldi_handle_t lh, ldi_callback_id_t id)
2869 {
2870 	ldi_event_t		*lep = (ldi_event_t *)id;
2871 	int			res;
2872 
2873 	if ((lh == NULL) || (id == NULL))
2874 		return (DDI_FAILURE);
2875 
2876 	ASSERT(!servicing_interrupt());
2877 
2878 	if ((res = ddi_remove_event_handler(lep->le_id))
2879 	    != DDI_SUCCESS) {
2880 		LDI_EVENTCB((CE_WARN, "%s: unable to remove "
2881 		    "event callback", "ldi_remove_event_handler"));
2882 		return (res);
2883 	}
2884 
2885 	handle_event_remove(lep);
2886 	kmem_free(lep, sizeof (struct ldi_event));
2887 	return (res);
2888 }
2889 
2890 #endif
2891 
2892 /*
2893  * Here are some definitions of terms used in the following LDI events
2894  * code:
2895  *
2896  * "LDI events" AKA "native events": These are events defined by the
2897  * "new" LDI event framework. These events are serviced by the LDI event
2898  * framework itself and thus are native to it.
2899  *
2900  * "LDI contract events": These are contract events that correspond to the
2901  *  LDI events. This mapping of LDI events to contract events is defined by
2902  * the ldi_ev_cookies[] array above.
2903  *
2904  * NDI events: These are events which are serviced by the NDI event subsystem.
2905  * LDI subsystem just provides a thin wrapper around the NDI event interfaces
2906  * These events are therefore *not* native events.
2907  */
2908 
2909 static int
2910 ldi_native_event(const char *evname)
2911 {
2912 	int i;
2913 
2914 	LDI_EVTRC((CE_NOTE, "ldi_native_event: entered: ev=%s", evname));
2915 
2916 	for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
2917 		if (strcmp(ldi_ev_cookies[i].ck_evname, evname) == 0)
2918 			return (1);
2919 	}
2920 
2921 	return (0);
2922 }
2923 
2924 static uint_t
2925 ldi_ev_sync_event(const char *evname)
2926 {
2927 	int i;
2928 
2929 	ASSERT(ldi_native_event(evname));
2930 
2931 	LDI_EVTRC((CE_NOTE, "ldi_ev_sync_event: entered: %s", evname));
2932 
2933 	for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
2934 		if (strcmp(ldi_ev_cookies[i].ck_evname, evname) == 0)
2935 			return (ldi_ev_cookies[i].ck_sync);
2936 	}
2937 
2938 	/*
2939 	 * This should never happen until non-contract based
2940 	 * LDI events are introduced. If that happens, we will
2941 	 * use a "special" token to indicate that there are no
2942 	 * contracts corresponding to this LDI event.
2943 	 */
2944 	cmn_err(CE_PANIC, "Unknown LDI event: %s", evname);
2945 
2946 	return (0);
2947 }
2948 
2949 static uint_t
2950 ldi_contract_event(const char *evname)
2951 {
2952 	int i;
2953 
2954 	ASSERT(ldi_native_event(evname));
2955 
2956 	LDI_EVTRC((CE_NOTE, "ldi_contract_event: entered: %s", evname));
2957 
2958 	for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
2959 		if (strcmp(ldi_ev_cookies[i].ck_evname, evname) == 0)
2960 			return (ldi_ev_cookies[i].ck_ctype);
2961 	}
2962 
2963 	/*
2964 	 * This should never happen until non-contract based
2965 	 * LDI events are introduced. If that happens, we will
2966 	 * use a "special" token to indicate that there are no
2967 	 * contracts corresponding to this LDI event.
2968 	 */
2969 	cmn_err(CE_PANIC, "Unknown LDI event: %s", evname);
2970 
2971 	return (0);
2972 }
2973 
2974 char *
2975 ldi_ev_get_type(ldi_ev_cookie_t cookie)
2976 {
2977 	int i;
2978 	struct ldi_ev_cookie *cookie_impl = (struct ldi_ev_cookie *)cookie;
2979 
2980 	for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
2981 		if (&ldi_ev_cookies[i] == cookie_impl) {
2982 			LDI_EVTRC((CE_NOTE, "ldi_ev_get_type: LDI: %s",
2983 			    ldi_ev_cookies[i].ck_evname));
2984 			return (ldi_ev_cookies[i].ck_evname);
2985 		}
2986 	}
2987 
2988 	/*
2989 	 * Not an LDI native event. Must be NDI event service.
2990 	 * Just return a generic string
2991 	 */
2992 	LDI_EVTRC((CE_NOTE, "ldi_ev_get_type: is NDI"));
2993 	return (NDI_EVENT_SERVICE);
2994 }
2995 
2996 static int
2997 ldi_native_cookie(ldi_ev_cookie_t cookie)
2998 {
2999 	int i;
3000 	struct ldi_ev_cookie *cookie_impl = (struct ldi_ev_cookie *)cookie;
3001 
3002 	for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
3003 		if (&ldi_ev_cookies[i] == cookie_impl) {
3004 			LDI_EVTRC((CE_NOTE, "ldi_native_cookie: native LDI"));
3005 			return (1);
3006 		}
3007 	}
3008 
3009 	LDI_EVTRC((CE_NOTE, "ldi_native_cookie: is NDI"));
3010 	return (0);
3011 }
3012 
3013 static ldi_ev_cookie_t
3014 ldi_get_native_cookie(const char *evname)
3015 {
3016 	int i;
3017 
3018 	for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
3019 		if (strcmp(ldi_ev_cookies[i].ck_evname, evname) == 0) {
3020 			LDI_EVTRC((CE_NOTE, "ldi_get_native_cookie: found"));
3021 			return ((ldi_ev_cookie_t)&ldi_ev_cookies[i]);
3022 		}
3023 	}
3024 
3025 	LDI_EVTRC((CE_NOTE, "ldi_get_native_cookie: NOT found"));
3026 	return (NULL);
3027 }
3028 
3029 /*
3030  * ldi_ev_lock() needs to be recursive, since layered drivers may call
3031  * other LDI interfaces (such as ldi_close() from within the context of
3032  * a notify callback. Since the notify callback is called with the
3033  * ldi_ev_lock() held and ldi_close() also grabs ldi_ev_lock, the lock needs
3034  * to be recursive.
3035  */
3036 static void
3037 ldi_ev_lock(void)
3038 {
3039 	LDI_EVTRC((CE_NOTE, "ldi_ev_lock: entered"));
3040 
3041 	mutex_enter(&ldi_ev_callback_list.le_lock);
3042 	if (ldi_ev_callback_list.le_thread == curthread) {
3043 		ASSERT(ldi_ev_callback_list.le_busy >= 1);
3044 		ldi_ev_callback_list.le_busy++;
3045 	} else {
3046 		while (ldi_ev_callback_list.le_busy)
3047 			cv_wait(&ldi_ev_callback_list.le_cv,
3048 			    &ldi_ev_callback_list.le_lock);
3049 		ASSERT(ldi_ev_callback_list.le_thread == NULL);
3050 		ldi_ev_callback_list.le_busy = 1;
3051 		ldi_ev_callback_list.le_thread = curthread;
3052 	}
3053 	mutex_exit(&ldi_ev_callback_list.le_lock);
3054 
3055 	LDI_EVTRC((CE_NOTE, "ldi_ev_lock: exit"));
3056 }
3057 
3058 static void
3059 ldi_ev_unlock(void)
3060 {
3061 	LDI_EVTRC((CE_NOTE, "ldi_ev_unlock: entered"));
3062 	mutex_enter(&ldi_ev_callback_list.le_lock);
3063 	ASSERT(ldi_ev_callback_list.le_thread == curthread);
3064 	ASSERT(ldi_ev_callback_list.le_busy >= 1);
3065 
3066 	ldi_ev_callback_list.le_busy--;
3067 	if (ldi_ev_callback_list.le_busy == 0) {
3068 		ldi_ev_callback_list.le_thread = NULL;
3069 		cv_signal(&ldi_ev_callback_list.le_cv);
3070 	}
3071 	mutex_exit(&ldi_ev_callback_list.le_lock);
3072 	LDI_EVTRC((CE_NOTE, "ldi_ev_unlock: exit"));
3073 }
3074 
3075 int
3076 ldi_ev_get_cookie(ldi_handle_t lh, char *evname, ldi_ev_cookie_t *cookiep)
3077 {
3078 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
3079 	dev_info_t		*dip;
3080 	dev_t			dev;
3081 	int			res;
3082 	struct snode		*csp;
3083 	ddi_eventcookie_t	ddi_cookie;
3084 	ldi_ev_cookie_t		tcookie;
3085 
3086 	LDI_EVDBG((CE_NOTE, "ldi_ev_get_cookie: entered: evname=%s",
3087 	    evname ? evname : "<NULL>"));
3088 
3089 	if (lh == NULL || evname == NULL ||
3090 	    strlen(evname) == 0 || cookiep == NULL) {
3091 		LDI_EVDBG((CE_NOTE, "ldi_ev_get_cookie: invalid args"));
3092 		return (LDI_EV_FAILURE);
3093 	}
3094 
3095 	*cookiep = NULL;
3096 
3097 	/*
3098 	 * First check if it is a LDI native event
3099 	 */
3100 	tcookie = ldi_get_native_cookie(evname);
3101 	if (tcookie) {
3102 		LDI_EVDBG((CE_NOTE, "ldi_ev_get_cookie: got native cookie"));
3103 		*cookiep = tcookie;
3104 		return (LDI_EV_SUCCESS);
3105 	}
3106 
3107 	/*
3108 	 * Not a LDI native event. Try NDI event services
3109 	 */
3110 
3111 	dev = handlep->lh_vp->v_rdev;
3112 
3113 	csp = VTOCS(handlep->lh_vp);
3114 	mutex_enter(&csp->s_lock);
3115 	if ((dip = csp->s_dip) != NULL)
3116 		e_ddi_hold_devi(dip);
3117 	mutex_exit(&csp->s_lock);
3118 	if (dip == NULL)
3119 		dip = e_ddi_hold_devi_by_dev(dev, 0);
3120 
3121 	if (dip == NULL) {
3122 		cmn_err(CE_WARN, "ldi_ev_get_cookie: No devinfo node for LDI "
3123 		    "handle: %p", (void *)handlep);
3124 		return (LDI_EV_FAILURE);
3125 	}
3126 
3127 	LDI_EVDBG((CE_NOTE, "Calling ddi_get_eventcookie: dip=%p, ev=%s",
3128 	    (void *)dip, evname));
3129 
3130 	res = ddi_get_eventcookie(dip, evname, &ddi_cookie);
3131 
3132 	ddi_release_devi(dip);
3133 
3134 	if (res == DDI_SUCCESS) {
3135 		LDI_EVDBG((CE_NOTE, "ldi_ev_get_cookie: NDI cookie found"));
3136 		*cookiep = (ldi_ev_cookie_t)ddi_cookie;
3137 		return (LDI_EV_SUCCESS);
3138 	} else {
3139 		LDI_EVDBG((CE_WARN, "ldi_ev_get_cookie: NDI cookie: failed"));
3140 		return (LDI_EV_FAILURE);
3141 	}
3142 }
3143 
3144 /*ARGSUSED*/
3145 static void
3146 i_ldi_ev_callback(dev_info_t *dip, ddi_eventcookie_t event_cookie,
3147     void *arg, void *ev_data)
3148 {
3149 	ldi_ev_callback_impl_t *lecp = (ldi_ev_callback_impl_t *)arg;
3150 
3151 	ASSERT(lecp != NULL);
3152 	ASSERT(!ldi_native_cookie(lecp->lec_cookie));
3153 	ASSERT(lecp->lec_lhp);
3154 	ASSERT(lecp->lec_notify == NULL);
3155 	ASSERT(lecp->lec_finalize);
3156 
3157 	LDI_EVDBG((CE_NOTE, "i_ldi_ev_callback: ldh=%p, cookie=%p, arg=%p, "
3158 	    "ev_data=%p", (void *)lecp->lec_lhp, (void *)event_cookie,
3159 	    (void *)lecp->lec_arg, (void *)ev_data));
3160 
3161 	lecp->lec_finalize(lecp->lec_lhp, (ldi_ev_cookie_t)event_cookie,
3162 	    lecp->lec_arg, ev_data);
3163 }
3164 
3165 int
3166 ldi_ev_register_callbacks(ldi_handle_t lh, ldi_ev_cookie_t cookie,
3167     ldi_ev_callback_t *callb, void *arg, ldi_callback_id_t *id)
3168 {
3169 	struct ldi_handle	*lhp = (struct ldi_handle *)lh;
3170 	ldi_ev_callback_impl_t	*lecp;
3171 	dev_t			dev;
3172 	struct snode		*csp;
3173 	dev_info_t		*dip;
3174 	int			ddi_event;
3175 
3176 	ASSERT(!servicing_interrupt());
3177 
3178 	if (lh == NULL || cookie == NULL || callb == NULL || id == NULL) {
3179 		LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks: Invalid args"));
3180 		return (LDI_EV_FAILURE);
3181 	}
3182 
3183 	if (callb->cb_vers != LDI_EV_CB_VERS) {
3184 		LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks: Invalid vers"));
3185 		return (LDI_EV_FAILURE);
3186 	}
3187 
3188 	if (callb->cb_notify == NULL && callb->cb_finalize == NULL) {
3189 		LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks: NULL callb"));
3190 		return (LDI_EV_FAILURE);
3191 	}
3192 
3193 	*id = 0;
3194 
3195 	dev = lhp->lh_vp->v_rdev;
3196 	csp = VTOCS(lhp->lh_vp);
3197 	mutex_enter(&csp->s_lock);
3198 	if ((dip = csp->s_dip) != NULL)
3199 		e_ddi_hold_devi(dip);
3200 	mutex_exit(&csp->s_lock);
3201 	if (dip == NULL)
3202 		dip = e_ddi_hold_devi_by_dev(dev, 0);
3203 
3204 	if (dip == NULL) {
3205 		cmn_err(CE_WARN, "ldi_ev_register: No devinfo node for "
3206 		    "LDI handle: %p", (void *)lhp);
3207 		return (LDI_EV_FAILURE);
3208 	}
3209 
3210 	lecp = kmem_zalloc(sizeof (ldi_ev_callback_impl_t), KM_SLEEP);
3211 
3212 	ddi_event = 0;
3213 	if (!ldi_native_cookie(cookie)) {
3214 		if (callb->cb_notify || callb->cb_finalize == NULL) {
3215 			/*
3216 			 * NDI event services only accept finalize
3217 			 */
3218 			cmn_err(CE_WARN, "%s: module: %s: NDI event cookie. "
3219 			    "Only finalize"
3220 			    " callback supported with this cookie",
3221 			    "ldi_ev_register_callbacks",
3222 			    lhp->lh_ident->li_modname);
3223 			kmem_free(lecp, sizeof (ldi_ev_callback_impl_t));
3224 			ddi_release_devi(dip);
3225 			return (LDI_EV_FAILURE);
3226 		}
3227 
3228 		if (ddi_add_event_handler(dip, (ddi_eventcookie_t)cookie,
3229 		    i_ldi_ev_callback, (void *)lecp,
3230 		    (ddi_callback_id_t *)&lecp->lec_id)
3231 		    != DDI_SUCCESS) {
3232 			kmem_free(lecp, sizeof (ldi_ev_callback_impl_t));
3233 			ddi_release_devi(dip);
3234 			LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks(): "
3235 			    "ddi_add_event_handler failed"));
3236 			return (LDI_EV_FAILURE);
3237 		}
3238 		ddi_event = 1;
3239 		LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks(): "
3240 		    "ddi_add_event_handler success"));
3241 	}
3242 
3243 
3244 
3245 	ldi_ev_lock();
3246 
3247 	/*
3248 	 * Add the notify/finalize callback to the LDI's list of callbacks.
3249 	 */
3250 	lecp->lec_lhp = lhp;
3251 	lecp->lec_dev = lhp->lh_vp->v_rdev;
3252 	lecp->lec_spec = (lhp->lh_vp->v_type == VCHR) ?
3253 	    S_IFCHR : S_IFBLK;
3254 	lecp->lec_notify = callb->cb_notify;
3255 	lecp->lec_finalize = callb->cb_finalize;
3256 	lecp->lec_arg = arg;
3257 	lecp->lec_cookie = cookie;
3258 	if (!ddi_event)
3259 		lecp->lec_id = (void *)(uintptr_t)(++ldi_ev_id_pool);
3260 	else
3261 		ASSERT(lecp->lec_id);
3262 	lecp->lec_dip = dip;
3263 	list_insert_tail(&ldi_ev_callback_list.le_head, lecp);
3264 
3265 	*id = (ldi_callback_id_t)lecp->lec_id;
3266 
3267 	ldi_ev_unlock();
3268 
3269 	ddi_release_devi(dip);
3270 
3271 	LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks: registered "
3272 	    "notify/finalize"));
3273 
3274 	return (LDI_EV_SUCCESS);
3275 }
3276 
3277 static int
3278 ldi_ev_device_match(ldi_ev_callback_impl_t *lecp, dev_info_t *dip,
3279     dev_t dev, int spec_type)
3280 {
3281 	ASSERT(lecp);
3282 	ASSERT(dip);
3283 	ASSERT(dev != DDI_DEV_T_NONE);
3284 	ASSERT(dev != NODEV);
3285 	ASSERT((dev == DDI_DEV_T_ANY && spec_type == 0) ||
3286 	    (spec_type == S_IFCHR || spec_type == S_IFBLK));
3287 	ASSERT(lecp->lec_dip);
3288 	ASSERT(lecp->lec_spec == S_IFCHR || lecp->lec_spec == S_IFBLK);
3289 	ASSERT(lecp->lec_dev != DDI_DEV_T_ANY);
3290 	ASSERT(lecp->lec_dev != DDI_DEV_T_NONE);
3291 	ASSERT(lecp->lec_dev != NODEV);
3292 
3293 	if (dip != lecp->lec_dip)
3294 		return (0);
3295 
3296 	if (dev != DDI_DEV_T_ANY) {
3297 		if (dev != lecp->lec_dev || spec_type != lecp->lec_spec)
3298 			return (0);
3299 	}
3300 
3301 	LDI_EVTRC((CE_NOTE, "ldi_ev_device_match: MATCH dip=%p", (void *)dip));
3302 
3303 	return (1);
3304 }
3305 
3306 /*
3307  * LDI framework function to post a "notify" event to all layered drivers
3308  * that have registered for that event
3309  *
3310  * Returns:
3311  *		LDI_EV_SUCCESS - registered callbacks allow event
3312  *		LDI_EV_FAILURE - registered callbacks block event
3313  *		LDI_EV_NONE    - No matching LDI callbacks
3314  *
3315  * This function is *not* to be called by layered drivers. It is for I/O
3316  * framework code in Solaris, such as the I/O retire code and DR code
3317  * to call while servicing a device event such as offline or degraded.
3318  */
3319 int
3320 ldi_invoke_notify(dev_info_t *dip, dev_t dev, int spec_type, char *event,
3321     void *ev_data)
3322 {
3323 	ldi_ev_callback_impl_t *lecp;
3324 	list_t	*listp;
3325 	int	ret;
3326 	char	*lec_event;
3327 
3328 	ASSERT(dip);
3329 	ASSERT(dev != DDI_DEV_T_NONE);
3330 	ASSERT(dev != NODEV);
3331 	ASSERT((dev == DDI_DEV_T_ANY && spec_type == 0) ||
3332 	    (spec_type == S_IFCHR || spec_type == S_IFBLK));
3333 	ASSERT(event);
3334 	ASSERT(ldi_native_event(event));
3335 	ASSERT(ldi_ev_sync_event(event));
3336 
3337 	LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): entered: dip=%p, ev=%s",
3338 	    (void *)dip, event));
3339 
3340 	ret = LDI_EV_NONE;
3341 	ldi_ev_lock();
3342 	listp = &ldi_ev_callback_list.le_head;
3343 	for (lecp = list_head(listp); lecp; lecp = list_next(listp, lecp)) {
3344 
3345 		/* Check if matching device */
3346 		if (!ldi_ev_device_match(lecp, dip, dev, spec_type))
3347 			continue;
3348 
3349 		if (lecp->lec_lhp == NULL) {
3350 			/*
3351 			 * Consumer has unregistered the handle and so
3352 			 * is no longer interested in notify events.
3353 			 */
3354 			LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): No LDI "
3355 			    "handle, skipping"));
3356 			continue;
3357 		}
3358 
3359 		if (lecp->lec_notify == NULL) {
3360 			LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): No notify "
3361 			    "callback. skipping"));
3362 			continue;	/* not interested in notify */
3363 		}
3364 
3365 		/*
3366 		 * Check if matching event
3367 		 */
3368 		lec_event = ldi_ev_get_type(lecp->lec_cookie);
3369 		if (strcmp(event, lec_event) != 0) {
3370 			LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): Not matching"
3371 			    " event {%s,%s}. skipping", event, lec_event));
3372 			continue;
3373 		}
3374 
3375 		lecp->lec_lhp->lh_flags |= LH_FLAGS_NOTIFY;
3376 		if (lecp->lec_notify(lecp->lec_lhp, lecp->lec_cookie,
3377 		    lecp->lec_arg, ev_data) != LDI_EV_SUCCESS) {
3378 			ret = LDI_EV_FAILURE;
3379 			LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): notify"
3380 			    " FAILURE"));
3381 			break;
3382 		}
3383 
3384 		/* We have a matching callback that allows the event to occur */
3385 		ret = LDI_EV_SUCCESS;
3386 
3387 		LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): 1 consumer success"));
3388 	}
3389 
3390 	if (ret != LDI_EV_FAILURE)
3391 		goto out;
3392 
3393 	LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): undoing notify"));
3394 
3395 	/*
3396 	 * Undo notifies already sent
3397 	 */
3398 	lecp = list_prev(listp, lecp);
3399 	for (; lecp; lecp = list_prev(listp, lecp)) {
3400 
3401 		/*
3402 		 * Check if matching device
3403 		 */
3404 		if (!ldi_ev_device_match(lecp, dip, dev, spec_type))
3405 			continue;
3406 
3407 
3408 		if (lecp->lec_finalize == NULL) {
3409 			LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): no finalize, "
3410 			    "skipping"));
3411 			continue;	/* not interested in finalize */
3412 		}
3413 
3414 		/*
3415 		 * it is possible that in response to a notify event a
3416 		 * layered driver closed its LDI handle so it is ok
3417 		 * to have a NULL LDI handle for finalize. The layered
3418 		 * driver is expected to maintain state in its "arg"
3419 		 * parameter to keep track of the closed device.
3420 		 */
3421 
3422 		/* Check if matching event */
3423 		lec_event = ldi_ev_get_type(lecp->lec_cookie);
3424 		if (strcmp(event, lec_event) != 0) {
3425 			LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): not matching "
3426 			    "event: %s,%s, skipping", event, lec_event));
3427 			continue;
3428 		}
3429 
3430 		LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): calling finalize"));
3431 
3432 		lecp->lec_finalize(lecp->lec_lhp, lecp->lec_cookie,
3433 		    LDI_EV_FAILURE, lecp->lec_arg, ev_data);
3434 
3435 		/*
3436 		 * If LDI native event and LDI handle closed in context
3437 		 * of notify, NULL out the finalize callback as we have
3438 		 * already called the 1 finalize above allowed in this situation
3439 		 */
3440 		if (lecp->lec_lhp == NULL &&
3441 		    ldi_native_cookie(lecp->lec_cookie)) {
3442 			LDI_EVDBG((CE_NOTE,
3443 			    "ldi_invoke_notify(): NULL-ing finalize after "
3444 			    "calling 1 finalize following ldi_close"));
3445 			lecp->lec_finalize = NULL;
3446 		}
3447 	}
3448 
3449 out:
3450 	ldi_ev_unlock();
3451 
3452 	if (ret == LDI_EV_NONE) {
3453 		LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): no matching "
3454 		    "LDI callbacks"));
3455 	}
3456 
3457 	return (ret);
3458 }
3459 
3460 /*
3461  * Framework function to be called from a layered driver to propagate
3462  * LDI "notify" events to exported minors.
3463  *
3464  * This function is a public interface exported by the LDI framework
3465  * for use by layered drivers to propagate device events up the software
3466  * stack.
3467  */
3468 int
3469 ldi_ev_notify(dev_info_t *dip, minor_t minor, int spec_type,
3470     ldi_ev_cookie_t cookie, void *ev_data)
3471 {
3472 	char		*evname = ldi_ev_get_type(cookie);
3473 	uint_t		ct_evtype;
3474 	dev_t		dev;
3475 	major_t		major;
3476 	int		retc;
3477 	int		retl;
3478 
3479 	ASSERT(spec_type == S_IFBLK || spec_type == S_IFCHR);
3480 	ASSERT(dip);
3481 	ASSERT(ldi_native_cookie(cookie));
3482 
3483 	LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): entered: event=%s, dip=%p",
3484 	    evname, (void *)dip));
3485 
3486 	if (!ldi_ev_sync_event(evname)) {
3487 		cmn_err(CE_PANIC, "ldi_ev_notify(): %s not a "
3488 		    "negotiatable event", evname);
3489 		return (LDI_EV_SUCCESS);
3490 	}
3491 
3492 	major = ddi_driver_major(dip);
3493 	if (major == DDI_MAJOR_T_NONE) {
3494 		char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3495 		(void) ddi_pathname(dip, path);
3496 		cmn_err(CE_WARN, "ldi_ev_notify: cannot derive major number "
3497 		    "for device %s", path);
3498 		kmem_free(path, MAXPATHLEN);
3499 		return (LDI_EV_FAILURE);
3500 	}
3501 	dev = makedevice(major, minor);
3502 
3503 	/*
3504 	 * Generate negotiation contract events on contracts (if any) associated
3505 	 * with this minor.
3506 	 */
3507 	LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): calling contract nego."));
3508 	ct_evtype = ldi_contract_event(evname);
3509 	retc = contract_device_negotiate(dip, dev, spec_type, ct_evtype);
3510 	if (retc == CT_NACK) {
3511 		LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): contract neg. NACK"));
3512 		return (LDI_EV_FAILURE);
3513 	}
3514 
3515 	LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): LDI invoke notify"));
3516 	retl = ldi_invoke_notify(dip, dev, spec_type, evname, ev_data);
3517 	if (retl == LDI_EV_FAILURE) {
3518 		LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): ldi_invoke_notify "
3519 		    "returned FAILURE. Calling contract negend"));
3520 		contract_device_negend(dip, dev, spec_type, CT_EV_FAILURE);
3521 		return (LDI_EV_FAILURE);
3522 	}
3523 
3524 	/*
3525 	 * The very fact that we are here indicates that there is a
3526 	 * LDI callback (and hence a constraint) for the retire of the
3527 	 * HW device. So we just return success even if there are no
3528 	 * contracts or LDI callbacks against the minors layered on top
3529 	 * of the HW minors
3530 	 */
3531 	LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): returning SUCCESS"));
3532 	return (LDI_EV_SUCCESS);
3533 }
3534 
3535 /*
3536  * LDI framework function to invoke "finalize" callbacks for all layered
3537  * drivers that have registered callbacks for that event.
3538  *
3539  * This function is *not* to be called by layered drivers. It is for I/O
3540  * framework code in Solaris, such as the I/O retire code and DR code
3541  * to call while servicing a device event such as offline or degraded.
3542  */
3543 void
3544 ldi_invoke_finalize(dev_info_t *dip, dev_t dev, int spec_type, char *event,
3545     int ldi_result, void *ev_data)
3546 {
3547 	ldi_ev_callback_impl_t *lecp;
3548 	list_t	*listp;
3549 	char	*lec_event;
3550 	int	found = 0;
3551 
3552 	ASSERT(dip);
3553 	ASSERT(dev != DDI_DEV_T_NONE);
3554 	ASSERT(dev != NODEV);
3555 	ASSERT((dev == DDI_DEV_T_ANY && spec_type == 0) ||
3556 	    (spec_type == S_IFCHR || spec_type == S_IFBLK));
3557 	ASSERT(event);
3558 	ASSERT(ldi_native_event(event));
3559 	ASSERT(ldi_result == LDI_EV_SUCCESS || ldi_result == LDI_EV_FAILURE);
3560 
3561 	LDI_EVDBG((CE_NOTE, "ldi_invoke_finalize(): entered: dip=%p, result=%d"
3562 	    " event=%s", (void *)dip, ldi_result, event));
3563 
3564 	ldi_ev_lock();
3565 	listp = &ldi_ev_callback_list.le_head;
3566 	for (lecp = list_head(listp); lecp; lecp = list_next(listp, lecp)) {
3567 
3568 		if (lecp->lec_finalize == NULL) {
3569 			LDI_EVDBG((CE_NOTE, "ldi_invoke_finalize(): No "
3570 			    "finalize. Skipping"));
3571 			continue;	/* Not interested in finalize */
3572 		}
3573 
3574 		/*
3575 		 * Check if matching device
3576 		 */
3577 		if (!ldi_ev_device_match(lecp, dip, dev, spec_type))
3578 			continue;
3579 
3580 		/*
3581 		 * It is valid for the LDI handle to be NULL during finalize.
3582 		 * The layered driver may have done an LDI close in the notify
3583 		 * callback.
3584 		 */
3585 
3586 		/*
3587 		 * Check if matching event
3588 		 */
3589 		lec_event = ldi_ev_get_type(lecp->lec_cookie);
3590 		if (strcmp(event, lec_event) != 0) {
3591 			LDI_EVDBG((CE_NOTE, "ldi_invoke_finalize(): Not "
3592 			    "matching event {%s,%s}. Skipping",
3593 			    event, lec_event));
3594 			continue;
3595 		}
3596 
3597 		LDI_EVDBG((CE_NOTE, "ldi_invoke_finalize(): calling finalize"));
3598 
3599 		found = 1;
3600 
3601 		lecp->lec_finalize(lecp->lec_lhp, lecp->lec_cookie,
3602 		    ldi_result, lecp->lec_arg, ev_data);
3603 
3604 		/*
3605 		 * If LDI native event and LDI handle closed in context
3606 		 * of notify, NULL out the finalize callback as we have
3607 		 * already called the 1 finalize above allowed in this situation
3608 		 */
3609 		if (lecp->lec_lhp == NULL &&
3610 		    ldi_native_cookie(lecp->lec_cookie)) {
3611 			LDI_EVDBG((CE_NOTE,
3612 			    "ldi_invoke_finalize(): NULLing finalize after "
3613 			    "calling 1 finalize following ldi_close"));
3614 			lecp->lec_finalize = NULL;
3615 		}
3616 	}
3617 	ldi_ev_unlock();
3618 
3619 	if (found)
3620 		return;
3621 
3622 	LDI_EVDBG((CE_NOTE, "ldi_invoke_finalize(): no matching callbacks"));
3623 }
3624 
3625 /*
3626  * Framework function to be called from a layered driver to propagate
3627  * LDI "finalize" events to exported minors.
3628  *
3629  * This function is a public interface exported by the LDI framework
3630  * for use by layered drivers to propagate device events up the software
3631  * stack.
3632  */
3633 void
3634 ldi_ev_finalize(dev_info_t *dip, minor_t minor, int spec_type, int ldi_result,
3635     ldi_ev_cookie_t cookie, void *ev_data)
3636 {
3637 	dev_t dev;
3638 	major_t major;
3639 	char *evname;
3640 	int ct_result = (ldi_result == LDI_EV_SUCCESS) ?
3641 	    CT_EV_SUCCESS : CT_EV_FAILURE;
3642 	uint_t ct_evtype;
3643 
3644 	ASSERT(dip);
3645 	ASSERT(spec_type == S_IFBLK || spec_type == S_IFCHR);
3646 	ASSERT(ldi_result == LDI_EV_SUCCESS || ldi_result == LDI_EV_FAILURE);
3647 	ASSERT(ldi_native_cookie(cookie));
3648 
3649 	LDI_EVDBG((CE_NOTE, "ldi_ev_finalize: entered: dip=%p", (void *)dip));
3650 
3651 	major = ddi_driver_major(dip);
3652 	if (major == DDI_MAJOR_T_NONE) {
3653 		char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3654 		(void) ddi_pathname(dip, path);
3655 		cmn_err(CE_WARN, "ldi_ev_finalize: cannot derive major number "
3656 		    "for device %s", path);
3657 		kmem_free(path, MAXPATHLEN);
3658 		return;
3659 	}
3660 	dev = makedevice(major, minor);
3661 
3662 	evname = ldi_ev_get_type(cookie);
3663 
3664 	LDI_EVDBG((CE_NOTE, "ldi_ev_finalize: calling contracts"));
3665 	ct_evtype = ldi_contract_event(evname);
3666 	contract_device_finalize(dip, dev, spec_type, ct_evtype, ct_result);
3667 
3668 	LDI_EVDBG((CE_NOTE, "ldi_ev_finalize: calling ldi_invoke_finalize"));
3669 	ldi_invoke_finalize(dip, dev, spec_type, evname, ldi_result, ev_data);
3670 }
3671 
3672 int
3673 ldi_ev_remove_callbacks(ldi_callback_id_t id)
3674 {
3675 	ldi_ev_callback_impl_t	*lecp;
3676 	ldi_ev_callback_impl_t	*next;
3677 	ldi_ev_callback_impl_t	*found;
3678 	list_t			*listp;
3679 
3680 	ASSERT(!servicing_interrupt());
3681 
3682 	if (id == 0) {
3683 		cmn_err(CE_WARN, "ldi_ev_remove_callbacks: Invalid ID 0");
3684 		return (LDI_EV_FAILURE);
3685 	}
3686 
3687 	LDI_EVDBG((CE_NOTE, "ldi_ev_remove_callbacks: entered: id=%p",
3688 	    (void *)id));
3689 
3690 	ldi_ev_lock();
3691 
3692 	listp = &ldi_ev_callback_list.le_head;
3693 	next = found = NULL;
3694 	for (lecp = list_head(listp); lecp; lecp = next) {
3695 		next = list_next(listp, lecp);
3696 		if (lecp->lec_id == id) {
3697 			ASSERT(found == NULL);
3698 			list_remove(listp, lecp);
3699 			found = lecp;
3700 		}
3701 	}
3702 	ldi_ev_unlock();
3703 
3704 	if (found == NULL) {
3705 		cmn_err(CE_WARN, "No LDI event handler for id (%p)",
3706 		    (void *)id);
3707 		return (LDI_EV_SUCCESS);
3708 	}
3709 
3710 	if (!ldi_native_cookie(found->lec_cookie)) {
3711 		ASSERT(found->lec_notify == NULL);
3712 		if (ddi_remove_event_handler((ddi_callback_id_t)id)
3713 		    != DDI_SUCCESS) {
3714 			cmn_err(CE_WARN, "failed to remove NDI event handler "
3715 			    "for id (%p)", (void *)id);
3716 			ldi_ev_lock();
3717 			list_insert_tail(listp, found);
3718 			ldi_ev_unlock();
3719 			return (LDI_EV_FAILURE);
3720 		}
3721 		LDI_EVDBG((CE_NOTE, "ldi_ev_remove_callbacks: NDI event "
3722 		    "service removal succeeded"));
3723 	} else {
3724 		LDI_EVDBG((CE_NOTE, "ldi_ev_remove_callbacks: removed "
3725 		    "LDI native callbacks"));
3726 	}
3727 	kmem_free(found, sizeof (ldi_ev_callback_impl_t));
3728 
3729 	return (LDI_EV_SUCCESS);
3730 }
3731