xref: /titanic_51/usr/src/uts/common/os/driver_lyr.c (revision 63251bc7f1ca38259078c48e316fee4ed66d4e93)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * Layered driver support.
28  */
29 
30 #include <sys/atomic.h>
31 #include <sys/types.h>
32 #include <sys/t_lock.h>
33 #include <sys/param.h>
34 #include <sys/conf.h>
35 #include <sys/systm.h>
36 #include <sys/sysmacros.h>
37 #include <sys/buf.h>
38 #include <sys/cred.h>
39 #include <sys/uio.h>
40 #include <sys/vnode.h>
41 #include <sys/fs/snode.h>
42 #include <sys/open.h>
43 #include <sys/kmem.h>
44 #include <sys/file.h>
45 #include <sys/bootconf.h>
46 #include <sys/pathname.h>
47 #include <sys/bitmap.h>
48 #include <sys/stat.h>
49 #include <sys/dditypes.h>
50 #include <sys/ddi_impldefs.h>
51 #include <sys/ddi.h>
52 #include <sys/sunddi.h>
53 #include <sys/sunndi.h>
54 #include <sys/esunddi.h>
55 #include <sys/autoconf.h>
56 #include <sys/sunldi.h>
57 #include <sys/sunldi_impl.h>
58 #include <sys/errno.h>
59 #include <sys/debug.h>
60 #include <sys/modctl.h>
61 #include <sys/var.h>
62 #include <vm/seg_vn.h>
63 
64 #include <sys/stropts.h>
65 #include <sys/strsubr.h>
66 #include <sys/socket.h>
67 #include <sys/socketvar.h>
68 #include <sys/kstr.h>
69 
70 /*
71  * Device contract related
72  */
73 #include <sys/contract_impl.h>
74 #include <sys/contract/device_impl.h>
75 
76 /*
77  * Define macros to manipulate snode, vnode, and open device flags
78  */
79 #define	VTYP_VALID(i)	(((i) == VCHR) || ((i) == VBLK))
80 #define	VTYP_TO_OTYP(i)	(((i) == VCHR) ? OTYP_CHR : OTYP_BLK)
81 #define	VTYP_TO_STYP(i)	(((i) == VCHR) ? S_IFCHR : S_IFBLK)
82 
83 #define	OTYP_VALID(i)	(((i) == OTYP_CHR) || ((i) == OTYP_BLK))
84 #define	OTYP_TO_VTYP(i)	(((i) == OTYP_CHR) ? VCHR : VBLK)
85 #define	OTYP_TO_STYP(i)	(((i) == OTYP_CHR) ? S_IFCHR : S_IFBLK)
86 
87 #define	STYP_VALID(i)	(((i) == S_IFCHR) || ((i) == S_IFBLK))
88 #define	STYP_TO_VTYP(i)	(((i) == S_IFCHR) ? VCHR : VBLK)
89 
90 /*
91  * Define macros for accessing layered driver hash structures
92  */
93 #define	LH_HASH(vp)		(handle_hash_func(vp) % LH_HASH_SZ)
94 #define	LI_HASH(mid, dip, dev)	(ident_hash_func(mid, dip, dev) % LI_HASH_SZ)
95 
96 /*
97  * Define layered handle flags used in the lh_type field
98  */
99 #define	LH_STREAM	(0x1)	/* handle to a streams device */
100 #define	LH_CBDEV	(0x2)	/* handle to a char/block device */
101 
102 /*
103  * Define macro for devid property lookups
104  */
105 #define	DEVID_PROP_FLAGS	(DDI_PROP_DONTPASS | \
106 				DDI_PROP_TYPE_STRING|DDI_PROP_CANSLEEP)
107 
108 /*
109  * Dummy string for NDI events
110  */
111 #define	NDI_EVENT_SERVICE	"NDI_EVENT_SERVICE"
112 
113 static void ldi_ev_lock(void);
114 static void ldi_ev_unlock(void);
115 
116 #ifdef	LDI_OBSOLETE_EVENT
117 int ldi_remove_event_handler(ldi_handle_t lh, ldi_callback_id_t id);
118 #endif
119 
120 
121 /*
122  * globals
123  */
124 static kmutex_t			ldi_ident_hash_lock[LI_HASH_SZ];
125 static struct ldi_ident		*ldi_ident_hash[LI_HASH_SZ];
126 
127 static kmutex_t			ldi_handle_hash_lock[LH_HASH_SZ];
128 static struct ldi_handle	*ldi_handle_hash[LH_HASH_SZ];
129 static size_t			ldi_handle_hash_count;
130 
131 static struct ldi_ev_callback_list ldi_ev_callback_list;
132 
133 static uint32_t ldi_ev_id_pool = 0;
134 
135 struct ldi_ev_cookie {
136 	char *ck_evname;
137 	uint_t ck_sync;
138 	uint_t ck_ctype;
139 };
140 
141 static struct ldi_ev_cookie ldi_ev_cookies[] = {
142 	{ LDI_EV_OFFLINE, 1, CT_DEV_EV_OFFLINE},
143 	{ LDI_EV_DEGRADE, 0, CT_DEV_EV_DEGRADED},
144 	{ NULL}			/* must terminate list */
145 };
146 
147 void
148 ldi_init(void)
149 {
150 	int i;
151 
152 	ldi_handle_hash_count = 0;
153 	for (i = 0; i < LH_HASH_SZ; i++) {
154 		mutex_init(&ldi_handle_hash_lock[i], NULL, MUTEX_DEFAULT, NULL);
155 		ldi_handle_hash[i] = NULL;
156 	}
157 	for (i = 0; i < LI_HASH_SZ; i++) {
158 		mutex_init(&ldi_ident_hash_lock[i], NULL, MUTEX_DEFAULT, NULL);
159 		ldi_ident_hash[i] = NULL;
160 	}
161 
162 	/*
163 	 * Initialize the LDI event subsystem
164 	 */
165 	mutex_init(&ldi_ev_callback_list.le_lock, NULL, MUTEX_DEFAULT, NULL);
166 	cv_init(&ldi_ev_callback_list.le_cv, NULL, CV_DEFAULT, NULL);
167 	ldi_ev_callback_list.le_busy = 0;
168 	ldi_ev_callback_list.le_thread = NULL;
169 	list_create(&ldi_ev_callback_list.le_head,
170 	    sizeof (ldi_ev_callback_impl_t),
171 	    offsetof(ldi_ev_callback_impl_t, lec_list));
172 }
173 
174 /*
175  * LDI ident manipulation functions
176  */
177 static uint_t
178 ident_hash_func(modid_t modid, dev_info_t *dip, dev_t dev)
179 {
180 	if (dip != NULL) {
181 		uintptr_t k = (uintptr_t)dip;
182 		k >>= (int)highbit(sizeof (struct dev_info));
183 		return ((uint_t)k);
184 	} else if (dev != DDI_DEV_T_NONE) {
185 		return (modid + getminor(dev) + getmajor(dev));
186 	} else {
187 		return (modid);
188 	}
189 }
190 
191 static struct ldi_ident **
192 ident_find_ref_nolock(modid_t modid, dev_info_t *dip, dev_t dev, major_t major)
193 {
194 	struct ldi_ident	**lipp = NULL;
195 	uint_t			index = LI_HASH(modid, dip, dev);
196 
197 	ASSERT(MUTEX_HELD(&ldi_ident_hash_lock[index]));
198 
199 	for (lipp = &(ldi_ident_hash[index]);
200 	    (*lipp != NULL);
201 	    lipp = &((*lipp)->li_next)) {
202 		if (((*lipp)->li_modid == modid) &&
203 		    ((*lipp)->li_major == major) &&
204 		    ((*lipp)->li_dip == dip) &&
205 		    ((*lipp)->li_dev == dev))
206 			break;
207 	}
208 
209 	ASSERT(lipp != NULL);
210 	return (lipp);
211 }
212 
213 static struct ldi_ident *
214 ident_alloc(char *mod_name, dev_info_t *dip, dev_t dev, major_t major)
215 {
216 	struct ldi_ident	*lip, **lipp;
217 	modid_t			modid;
218 	uint_t			index;
219 
220 	ASSERT(mod_name != NULL);
221 
222 	/* get the module id */
223 	modid = mod_name_to_modid(mod_name);
224 	ASSERT(modid != -1);
225 
226 	/* allocate a new ident in case we need it */
227 	lip = kmem_zalloc(sizeof (*lip), KM_SLEEP);
228 
229 	/* search the hash for a matching ident */
230 	index = LI_HASH(modid, dip, dev);
231 	mutex_enter(&ldi_ident_hash_lock[index]);
232 	lipp = ident_find_ref_nolock(modid, dip, dev, major);
233 
234 	if (*lipp != NULL) {
235 		/* we found an indent in the hash */
236 		ASSERT(strcmp((*lipp)->li_modname, mod_name) == 0);
237 		(*lipp)->li_ref++;
238 		mutex_exit(&ldi_ident_hash_lock[index]);
239 		kmem_free(lip, sizeof (struct ldi_ident));
240 		return (*lipp);
241 	}
242 
243 	/* initialize the new ident */
244 	lip->li_next = NULL;
245 	lip->li_ref = 1;
246 	lip->li_modid = modid;
247 	lip->li_major = major;
248 	lip->li_dip = dip;
249 	lip->li_dev = dev;
250 	(void) strncpy(lip->li_modname, mod_name, sizeof (lip->li_modname) - 1);
251 
252 	/* add it to the ident hash */
253 	lip->li_next = ldi_ident_hash[index];
254 	ldi_ident_hash[index] = lip;
255 
256 	mutex_exit(&ldi_ident_hash_lock[index]);
257 	return (lip);
258 }
259 
260 static void
261 ident_hold(struct ldi_ident *lip)
262 {
263 	uint_t			index;
264 
265 	ASSERT(lip != NULL);
266 	index = LI_HASH(lip->li_modid, lip->li_dip, lip->li_dev);
267 	mutex_enter(&ldi_ident_hash_lock[index]);
268 	ASSERT(lip->li_ref > 0);
269 	lip->li_ref++;
270 	mutex_exit(&ldi_ident_hash_lock[index]);
271 }
272 
273 static void
274 ident_release(struct ldi_ident *lip)
275 {
276 	struct ldi_ident	**lipp;
277 	uint_t			index;
278 
279 	ASSERT(lip != NULL);
280 	index = LI_HASH(lip->li_modid, lip->li_dip, lip->li_dev);
281 	mutex_enter(&ldi_ident_hash_lock[index]);
282 
283 	ASSERT(lip->li_ref > 0);
284 	if (--lip->li_ref > 0) {
285 		/* there are more references to this ident */
286 		mutex_exit(&ldi_ident_hash_lock[index]);
287 		return;
288 	}
289 
290 	/* this was the last reference/open for this ident.  free it. */
291 	lipp = ident_find_ref_nolock(
292 	    lip->li_modid, lip->li_dip, lip->li_dev, lip->li_major);
293 
294 	ASSERT((lipp != NULL) && (*lipp != NULL));
295 	*lipp = lip->li_next;
296 	mutex_exit(&ldi_ident_hash_lock[index]);
297 	kmem_free(lip, sizeof (struct ldi_ident));
298 }
299 
300 /*
301  * LDI handle manipulation functions
302  */
303 static uint_t
304 handle_hash_func(void *vp)
305 {
306 	uintptr_t k = (uintptr_t)vp;
307 	k >>= (int)highbit(sizeof (vnode_t));
308 	return ((uint_t)k);
309 }
310 
311 static struct ldi_handle **
312 handle_find_ref_nolock(vnode_t *vp, struct ldi_ident *ident)
313 {
314 	struct ldi_handle	**lhpp = NULL;
315 	uint_t			index = LH_HASH(vp);
316 
317 	ASSERT(MUTEX_HELD(&ldi_handle_hash_lock[index]));
318 
319 	for (lhpp = &(ldi_handle_hash[index]);
320 	    (*lhpp != NULL);
321 	    lhpp = &((*lhpp)->lh_next)) {
322 		if (((*lhpp)->lh_ident == ident) &&
323 		    ((*lhpp)->lh_vp == vp))
324 			break;
325 	}
326 
327 	ASSERT(lhpp != NULL);
328 	return (lhpp);
329 }
330 
331 static struct ldi_handle *
332 handle_find(vnode_t *vp, struct ldi_ident *ident)
333 {
334 	struct ldi_handle	**lhpp;
335 	int			index = LH_HASH(vp);
336 
337 	mutex_enter(&ldi_handle_hash_lock[index]);
338 	lhpp = handle_find_ref_nolock(vp, ident);
339 	mutex_exit(&ldi_handle_hash_lock[index]);
340 	ASSERT(lhpp != NULL);
341 	return (*lhpp);
342 }
343 
344 static struct ldi_handle *
345 handle_alloc(vnode_t *vp, struct ldi_ident *ident)
346 {
347 	struct ldi_handle	*lhp, **lhpp;
348 	uint_t			index;
349 
350 	ASSERT((vp != NULL) && (ident != NULL));
351 
352 	/* allocate a new handle in case we need it */
353 	lhp = kmem_zalloc(sizeof (*lhp), KM_SLEEP);
354 
355 	/* search the hash for a matching handle */
356 	index = LH_HASH(vp);
357 	mutex_enter(&ldi_handle_hash_lock[index]);
358 	lhpp = handle_find_ref_nolock(vp, ident);
359 
360 	if (*lhpp != NULL) {
361 		/* we found a handle in the hash */
362 		(*lhpp)->lh_ref++;
363 		mutex_exit(&ldi_handle_hash_lock[index]);
364 
365 		LDI_ALLOCFREE((CE_WARN, "ldi handle alloc: dup "
366 		    "lh=0x%p, ident=0x%p, vp=0x%p, drv=%s, minor=0x%x",
367 		    (void *)*lhpp, (void *)ident, (void *)vp,
368 		    mod_major_to_name(getmajor(vp->v_rdev)),
369 		    getminor(vp->v_rdev)));
370 
371 		kmem_free(lhp, sizeof (struct ldi_handle));
372 		return (*lhpp);
373 	}
374 
375 	/* initialize the new handle */
376 	lhp->lh_ref = 1;
377 	lhp->lh_vp = vp;
378 	lhp->lh_ident = ident;
379 #ifdef	LDI_OBSOLETE_EVENT
380 	mutex_init(lhp->lh_lock, NULL, MUTEX_DEFAULT, NULL);
381 #endif
382 
383 	/* set the device type for this handle */
384 	lhp->lh_type = 0;
385 	if (STREAMSTAB(getmajor(vp->v_rdev))) {
386 		ASSERT(vp->v_type == VCHR);
387 		lhp->lh_type |= LH_STREAM;
388 	} else {
389 		lhp->lh_type |= LH_CBDEV;
390 	}
391 
392 	/* get holds on other objects */
393 	ident_hold(ident);
394 	ASSERT(vp->v_count >= 1);
395 	VN_HOLD(vp);
396 
397 	/* add it to the handle hash */
398 	lhp->lh_next = ldi_handle_hash[index];
399 	ldi_handle_hash[index] = lhp;
400 	atomic_add_long(&ldi_handle_hash_count, 1);
401 
402 	LDI_ALLOCFREE((CE_WARN, "ldi handle alloc: new "
403 	    "lh=0x%p, ident=0x%p, vp=0x%p, drv=%s, minor=0x%x",
404 	    (void *)lhp, (void *)ident, (void *)vp,
405 	    mod_major_to_name(getmajor(vp->v_rdev)),
406 	    getminor(vp->v_rdev)));
407 
408 	mutex_exit(&ldi_handle_hash_lock[index]);
409 	return (lhp);
410 }
411 
412 static void
413 handle_release(struct ldi_handle *lhp)
414 {
415 	struct ldi_handle	**lhpp;
416 	uint_t			index;
417 
418 	ASSERT(lhp != NULL);
419 
420 	index = LH_HASH(lhp->lh_vp);
421 	mutex_enter(&ldi_handle_hash_lock[index]);
422 
423 	LDI_ALLOCFREE((CE_WARN, "ldi handle release: "
424 	    "lh=0x%p, ident=0x%p, vp=0x%p, drv=%s, minor=0x%x",
425 	    (void *)lhp, (void *)lhp->lh_ident, (void *)lhp->lh_vp,
426 	    mod_major_to_name(getmajor(lhp->lh_vp->v_rdev)),
427 	    getminor(lhp->lh_vp->v_rdev)));
428 
429 	ASSERT(lhp->lh_ref > 0);
430 	if (--lhp->lh_ref > 0) {
431 		/* there are more references to this handle */
432 		mutex_exit(&ldi_handle_hash_lock[index]);
433 		return;
434 	}
435 
436 	/* this was the last reference/open for this handle.  free it. */
437 	lhpp = handle_find_ref_nolock(lhp->lh_vp, lhp->lh_ident);
438 	ASSERT((lhpp != NULL) && (*lhpp != NULL));
439 	*lhpp = lhp->lh_next;
440 	atomic_add_long(&ldi_handle_hash_count, -1);
441 	mutex_exit(&ldi_handle_hash_lock[index]);
442 
443 	VN_RELE(lhp->lh_vp);
444 	ident_release(lhp->lh_ident);
445 #ifdef	LDI_OBSOLETE_EVENT
446 	mutex_destroy(lhp->lh_lock);
447 #endif
448 	kmem_free(lhp, sizeof (struct ldi_handle));
449 }
450 
451 #ifdef	LDI_OBSOLETE_EVENT
452 /*
453  * LDI event manipulation functions
454  */
455 static void
456 handle_event_add(ldi_event_t *lep)
457 {
458 	struct ldi_handle *lhp = lep->le_lhp;
459 
460 	ASSERT(lhp != NULL);
461 
462 	mutex_enter(lhp->lh_lock);
463 	if (lhp->lh_events == NULL) {
464 		lhp->lh_events = lep;
465 		mutex_exit(lhp->lh_lock);
466 		return;
467 	}
468 
469 	lep->le_next = lhp->lh_events;
470 	lhp->lh_events->le_prev = lep;
471 	lhp->lh_events = lep;
472 	mutex_exit(lhp->lh_lock);
473 }
474 
475 static void
476 handle_event_remove(ldi_event_t *lep)
477 {
478 	struct ldi_handle *lhp = lep->le_lhp;
479 
480 	ASSERT(lhp != NULL);
481 
482 	mutex_enter(lhp->lh_lock);
483 	if (lep->le_prev)
484 		lep->le_prev->le_next = lep->le_next;
485 	if (lep->le_next)
486 		lep->le_next->le_prev = lep->le_prev;
487 	if (lhp->lh_events == lep)
488 		lhp->lh_events = lep->le_next;
489 	mutex_exit(lhp->lh_lock);
490 
491 }
492 
493 static void
494 i_ldi_callback(dev_info_t *dip, ddi_eventcookie_t event_cookie,
495     void *arg, void *bus_impldata)
496 {
497 	ldi_event_t *lep = (ldi_event_t *)arg;
498 
499 	ASSERT(lep != NULL);
500 
501 	LDI_EVENTCB((CE_NOTE, "%s: dip=0x%p, "
502 	    "event_cookie=0x%p, ldi_eventp=0x%p", "i_ldi_callback",
503 	    (void *)dip, (void *)event_cookie, (void *)lep));
504 
505 	lep->le_handler(lep->le_lhp, event_cookie, lep->le_arg, bus_impldata);
506 }
507 #endif
508 
509 /*
510  * LDI open helper functions
511  */
512 
513 /* get a vnode to a device by dev_t and otyp */
514 static int
515 ldi_vp_from_dev(dev_t dev, int otyp, vnode_t **vpp)
516 {
517 	dev_info_t		*dip;
518 	vnode_t			*vp;
519 
520 	/* sanity check required input parameters */
521 	if ((dev == DDI_DEV_T_NONE) || (!OTYP_VALID(otyp)) || (vpp == NULL))
522 		return (EINVAL);
523 
524 	if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
525 		return (ENODEV);
526 
527 	if (STREAMSTAB(getmajor(dev)) && (otyp != OTYP_CHR)) {
528 		ddi_release_devi(dip);  /* from e_ddi_hold_devi_by_dev */
529 		return (ENXIO);
530 	}
531 
532 	vp = makespecvp(dev, OTYP_TO_VTYP(otyp));
533 	spec_assoc_vp_with_devi(vp, dip);
534 	ddi_release_devi(dip);  /* from e_ddi_hold_devi_by_dev */
535 
536 	*vpp = vp;
537 	return (0);
538 }
539 
540 /* get a vnode to a device by pathname */
541 static int
542 ldi_vp_from_name(char *path, vnode_t **vpp)
543 {
544 	vnode_t			*vp = NULL;
545 	int			ret;
546 
547 	/* sanity check required input parameters */
548 	if ((path == NULL) || (vpp == NULL))
549 		return (EINVAL);
550 
551 	if (modrootloaded) {
552 		cred_t *saved_cred = curthread->t_cred;
553 
554 		/* we don't want lookupname to fail because of credentials */
555 		curthread->t_cred = kcred;
556 
557 		/*
558 		 * all lookups should be done in the global zone.  but
559 		 * lookupnameat() won't actually do this if an absolute
560 		 * path is passed in.  since the ldi interfaces require an
561 		 * absolute path we pass lookupnameat() a pointer to
562 		 * the character after the leading '/' and tell it to
563 		 * start searching at the current system root directory.
564 		 */
565 		ASSERT(*path == '/');
566 		ret = lookupnameat(path + 1, UIO_SYSSPACE, FOLLOW, NULLVPP,
567 		    &vp, rootdir);
568 
569 		/* restore this threads credentials */
570 		curthread->t_cred = saved_cred;
571 
572 		if (ret == 0) {
573 			if (!vn_matchops(vp, spec_getvnodeops()) ||
574 			    !VTYP_VALID(vp->v_type)) {
575 				VN_RELE(vp);
576 				return (ENXIO);
577 			}
578 		}
579 	}
580 
581 	if (vp == NULL) {
582 		dev_info_t	*dip;
583 		dev_t		dev;
584 		int		spec_type;
585 
586 		/*
587 		 * Root is not mounted, the minor node is not specified,
588 		 * or an OBP path has been specified.
589 		 */
590 
591 		/*
592 		 * Determine if path can be pruned to produce an
593 		 * OBP or devfs path for resolve_pathname.
594 		 */
595 		if (strncmp(path, "/devices/", 9) == 0)
596 			path += strlen("/devices");
597 
598 		/*
599 		 * if no minor node was specified the DEFAULT minor node
600 		 * will be returned.  if there is no DEFAULT minor node
601 		 * one will be fabricated of type S_IFCHR with the minor
602 		 * number equal to the instance number.
603 		 */
604 		ret = resolve_pathname(path, &dip, &dev, &spec_type);
605 		if (ret != 0)
606 			return (ENODEV);
607 
608 		ASSERT(STYP_VALID(spec_type));
609 		vp = makespecvp(dev, STYP_TO_VTYP(spec_type));
610 		spec_assoc_vp_with_devi(vp, dip);
611 		ddi_release_devi(dip);
612 	}
613 
614 	*vpp = vp;
615 	return (0);
616 }
617 
618 static int
619 ldi_devid_match(ddi_devid_t devid, dev_info_t *dip, dev_t dev)
620 {
621 	char		*devidstr;
622 	ddi_prop_t	*propp;
623 
624 	/* convert devid as a string property */
625 	if ((devidstr = ddi_devid_str_encode(devid, NULL)) == NULL)
626 		return (0);
627 
628 	/*
629 	 * Search for the devid.  For speed and ease in locking this
630 	 * code directly uses the property implementation.  See
631 	 * ddi_common_devid_to_devlist() for a comment as to why.
632 	 */
633 	mutex_enter(&(DEVI(dip)->devi_lock));
634 
635 	/* check if there is a DDI_DEV_T_NONE devid property */
636 	propp = i_ddi_prop_search(DDI_DEV_T_NONE,
637 	    DEVID_PROP_NAME, DEVID_PROP_FLAGS, &DEVI(dip)->devi_hw_prop_ptr);
638 	if (propp != NULL) {
639 		if (ddi_devid_str_compare(propp->prop_val, devidstr) == 0) {
640 			/* a DDI_DEV_T_NONE devid exists and matchs */
641 			mutex_exit(&(DEVI(dip)->devi_lock));
642 			ddi_devid_str_free(devidstr);
643 			return (1);
644 		} else {
645 			/* a DDI_DEV_T_NONE devid exists and doesn't match */
646 			mutex_exit(&(DEVI(dip)->devi_lock));
647 			ddi_devid_str_free(devidstr);
648 			return (0);
649 		}
650 	}
651 
652 	/* check if there is a devt specific devid property */
653 	propp = i_ddi_prop_search(dev,
654 	    DEVID_PROP_NAME, DEVID_PROP_FLAGS, &(DEVI(dip)->devi_hw_prop_ptr));
655 	if (propp != NULL) {
656 		if (ddi_devid_str_compare(propp->prop_val, devidstr) == 0) {
657 			/* a devt specific devid exists and matchs */
658 			mutex_exit(&(DEVI(dip)->devi_lock));
659 			ddi_devid_str_free(devidstr);
660 			return (1);
661 		} else {
662 			/* a devt specific devid exists and doesn't match */
663 			mutex_exit(&(DEVI(dip)->devi_lock));
664 			ddi_devid_str_free(devidstr);
665 			return (0);
666 		}
667 	}
668 
669 	/* we didn't find any devids associated with the device */
670 	mutex_exit(&(DEVI(dip)->devi_lock));
671 	ddi_devid_str_free(devidstr);
672 	return (0);
673 }
674 
675 /* get a handle to a device by devid and minor name */
676 static int
677 ldi_vp_from_devid(ddi_devid_t devid, char *minor_name, vnode_t **vpp)
678 {
679 	dev_info_t		*dip;
680 	vnode_t			*vp;
681 	int			ret, i, ndevs, styp;
682 	dev_t			dev, *devs;
683 
684 	/* sanity check required input parameters */
685 	if ((devid == NULL) || (minor_name == NULL) || (vpp == NULL))
686 		return (EINVAL);
687 
688 	ret = ddi_lyr_devid_to_devlist(devid, minor_name, &ndevs, &devs);
689 	if ((ret != DDI_SUCCESS) || (ndevs <= 0))
690 		return (ENODEV);
691 
692 	for (i = 0; i < ndevs; i++) {
693 		dev = devs[i];
694 
695 		if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
696 			continue;
697 
698 		/*
699 		 * now we have to verify that the devid of the disk
700 		 * still matches what was requested.
701 		 *
702 		 * we have to do this because the devid could have
703 		 * changed between the call to ddi_lyr_devid_to_devlist()
704 		 * and e_ddi_hold_devi_by_dev().  this is because when
705 		 * ddi_lyr_devid_to_devlist() returns a list of devts
706 		 * there is no kind of hold on those devts so a device
707 		 * could have been replaced out from under us in the
708 		 * interim.
709 		 */
710 		if ((i_ddi_minorname_to_devtspectype(dip, minor_name,
711 		    NULL, &styp) == DDI_SUCCESS) &&
712 		    ldi_devid_match(devid, dip, dev))
713 			break;
714 
715 		ddi_release_devi(dip);	/* from e_ddi_hold_devi_by_dev() */
716 	}
717 
718 	ddi_lyr_free_devlist(devs, ndevs);
719 
720 	if (i == ndevs)
721 		return (ENODEV);
722 
723 	ASSERT(STYP_VALID(styp));
724 	vp = makespecvp(dev, STYP_TO_VTYP(styp));
725 	spec_assoc_vp_with_devi(vp, dip);
726 	ddi_release_devi(dip);		/* from e_ddi_hold_devi_by_dev */
727 
728 	*vpp = vp;
729 	return (0);
730 }
731 
732 /* given a vnode, open a device */
733 static int
734 ldi_open_by_vp(vnode_t **vpp, int flag, cred_t *cr,
735     ldi_handle_t *lhp, struct ldi_ident *li)
736 {
737 	struct ldi_handle	*nlhp;
738 	vnode_t			*vp;
739 	int			err;
740 
741 	ASSERT((vpp != NULL) && (*vpp != NULL));
742 	ASSERT((lhp != NULL) && (li != NULL));
743 
744 	vp = *vpp;
745 	/* if the vnode passed in is not a device, then bail */
746 	if (!vn_matchops(vp, spec_getvnodeops()) || !VTYP_VALID(vp->v_type))
747 		return (ENXIO);
748 
749 	/*
750 	 * the caller may have specified a node that
751 	 * doesn't have cb_ops defined.  the ldi doesn't yet
752 	 * support opening devices without a valid cb_ops.
753 	 */
754 	if (devopsp[getmajor(vp->v_rdev)]->devo_cb_ops == NULL)
755 		return (ENXIO);
756 
757 	/* open the device */
758 	if ((err = VOP_OPEN(&vp, flag | FKLYR, cr, NULL)) != 0)
759 		return (err);
760 
761 	/* possible clone open, make sure that we still have a spec node */
762 	ASSERT(vn_matchops(vp, spec_getvnodeops()));
763 
764 	nlhp = handle_alloc(vp, li);
765 
766 	if (vp != *vpp) {
767 		/*
768 		 * allocating the layered handle took a new hold on the vnode
769 		 * so we can release the hold that was returned by the clone
770 		 * open
771 		 */
772 		LDI_OPENCLOSE((CE_WARN, "%s: lh=0x%p",
773 		    "ldi clone open", (void *)nlhp));
774 	} else {
775 		LDI_OPENCLOSE((CE_WARN, "%s: lh=0x%p",
776 		    "ldi open", (void *)nlhp));
777 	}
778 
779 	/* Flush back any dirty pages associated with the device. */
780 	if (nlhp->lh_type & LH_CBDEV) {
781 		vnode_t	*cvp = common_specvp(nlhp->lh_vp);
782 		dev_t	dev = cvp->v_rdev;
783 
784 		(void) VOP_PUTPAGE(cvp, 0, 0, B_INVAL, kcred, NULL);
785 		bflush(dev);
786 	}
787 
788 	*vpp = vp;
789 	*lhp = (ldi_handle_t)nlhp;
790 	return (0);
791 }
792 
793 /* Call a drivers prop_op(9E) interface */
794 static int
795 i_ldi_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
796     int flags, char *name, caddr_t valuep, int *lengthp)
797 {
798 	struct dev_ops	*ops = NULL;
799 	int		res;
800 
801 	ASSERT((dip != NULL) && (name != NULL));
802 	ASSERT((prop_op == PROP_LEN) || (valuep != NULL));
803 	ASSERT(lengthp != NULL);
804 
805 	/*
806 	 * we can only be invoked after a driver has been opened and
807 	 * someone has a layered handle to it, so there had better be
808 	 * a valid ops vector.
809 	 */
810 	ops = DEVI(dip)->devi_ops;
811 	ASSERT(ops && ops->devo_cb_ops);
812 
813 	/*
814 	 * Some nexus drivers incorrectly set cb_prop_op to nodev,
815 	 * nulldev or even NULL.
816 	 */
817 	if ((ops->devo_cb_ops->cb_prop_op == nodev) ||
818 	    (ops->devo_cb_ops->cb_prop_op == nulldev) ||
819 	    (ops->devo_cb_ops->cb_prop_op == NULL)) {
820 		return (DDI_PROP_NOT_FOUND);
821 	}
822 
823 	/* check if this is actually DDI_DEV_T_ANY query */
824 	if (flags & LDI_DEV_T_ANY) {
825 		flags &= ~LDI_DEV_T_ANY;
826 		dev = DDI_DEV_T_ANY;
827 	}
828 
829 	res = cdev_prop_op(dev, dip, prop_op, flags, name, valuep, lengthp);
830 	return (res);
831 }
832 
833 static void
834 i_ldi_prop_op_free(struct prop_driver_data *pdd)
835 {
836 	kmem_free(pdd, pdd->pdd_size);
837 }
838 
839 static caddr_t
840 i_ldi_prop_op_alloc(int prop_len)
841 {
842 	struct prop_driver_data	*pdd;
843 	int			pdd_size;
844 
845 	pdd_size = sizeof (struct prop_driver_data) + prop_len;
846 	pdd = kmem_alloc(pdd_size, KM_SLEEP);
847 	pdd->pdd_size = pdd_size;
848 	pdd->pdd_prop_free = i_ldi_prop_op_free;
849 	return ((caddr_t)&pdd[1]);
850 }
851 
852 /*
853  * i_ldi_prop_op_typed() is a wrapper for i_ldi_prop_op that is used
854  * by the typed ldi property lookup interfaces.
855  */
856 static int
857 i_ldi_prop_op_typed(dev_t dev, dev_info_t *dip, int flags, char *name,
858     caddr_t *datap, int *lengthp, int elem_size)
859 {
860 	caddr_t	prop_val;
861 	int	prop_len, res;
862 
863 	ASSERT((dip != NULL) && (name != NULL));
864 	ASSERT((datap != NULL) && (lengthp != NULL));
865 
866 	/*
867 	 * first call the drivers prop_op() interface to allow it
868 	 * it to override default property values.
869 	 */
870 	res = i_ldi_prop_op(dev, dip, PROP_LEN,
871 	    flags | DDI_PROP_DYNAMIC, name, NULL, &prop_len);
872 	if (res != DDI_PROP_SUCCESS)
873 		return (DDI_PROP_NOT_FOUND);
874 
875 	/* sanity check the property length */
876 	if (prop_len == 0) {
877 		/*
878 		 * the ddi typed interfaces don't allow a drivers to
879 		 * create properties with a length of 0.  so we should
880 		 * prevent drivers from returning 0 length dynamic
881 		 * properties for typed property lookups.
882 		 */
883 		return (DDI_PROP_NOT_FOUND);
884 	}
885 
886 	/* sanity check the property length against the element size */
887 	if (elem_size && ((prop_len % elem_size) != 0))
888 		return (DDI_PROP_NOT_FOUND);
889 
890 	/*
891 	 * got it.  now allocate a prop_driver_data struct so that the
892 	 * user can free the property via ddi_prop_free().
893 	 */
894 	prop_val = i_ldi_prop_op_alloc(prop_len);
895 
896 	/* lookup the property again, this time get the value */
897 	res = i_ldi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
898 	    flags | DDI_PROP_DYNAMIC, name, prop_val, &prop_len);
899 	if (res != DDI_PROP_SUCCESS) {
900 		ddi_prop_free(prop_val);
901 		return (DDI_PROP_NOT_FOUND);
902 	}
903 
904 	/* sanity check the property length */
905 	if (prop_len == 0) {
906 		ddi_prop_free(prop_val);
907 		return (DDI_PROP_NOT_FOUND);
908 	}
909 
910 	/* sanity check the property length against the element size */
911 	if (elem_size && ((prop_len % elem_size) != 0)) {
912 		ddi_prop_free(prop_val);
913 		return (DDI_PROP_NOT_FOUND);
914 	}
915 
916 	/*
917 	 * return the prop_driver_data struct and, optionally, the length
918 	 * of the data.
919 	 */
920 	*datap = prop_val;
921 	*lengthp = prop_len;
922 
923 	return (DDI_PROP_SUCCESS);
924 }
925 
926 /*
927  * i_check_string looks at a string property and makes sure its
928  * a valid null terminated string
929  */
930 static int
931 i_check_string(char *str, int prop_len)
932 {
933 	int i;
934 
935 	ASSERT(str != NULL);
936 
937 	for (i = 0; i < prop_len; i++) {
938 		if (str[i] == '\0')
939 			return (0);
940 	}
941 	return (1);
942 }
943 
944 /*
945  * i_pack_string_array takes a a string array property that is represented
946  * as a concatenation of strings (with the NULL character included for
947  * each string) and converts it into a format that can be returned by
948  * ldi_prop_lookup_string_array.
949  */
950 static int
951 i_pack_string_array(char *str_concat, int prop_len,
952     char ***str_arrayp, int *nelemp)
953 {
954 	int i, nelem, pack_size;
955 	char **str_array, *strptr;
956 
957 	/*
958 	 * first we need to sanity check the input string array.
959 	 * in essence this can be done my making sure that the last
960 	 * character of the array passed in is null.  (meaning the last
961 	 * string in the array is NULL terminated.
962 	 */
963 	if (str_concat[prop_len - 1] != '\0')
964 		return (1);
965 
966 	/* now let's count the number of strings in the array */
967 	for (nelem = i = 0; i < prop_len; i++)
968 		if (str_concat[i] == '\0')
969 			nelem++;
970 	ASSERT(nelem >= 1);
971 
972 	/* now let's allocate memory for the new packed property */
973 	pack_size = (sizeof (char *) * (nelem + 1)) + prop_len;
974 	str_array = (char **)i_ldi_prop_op_alloc(pack_size);
975 
976 	/* let's copy the actual string data into the new property */
977 	strptr = (char *)&(str_array[nelem + 1]);
978 	bcopy(str_concat, strptr, prop_len);
979 
980 	/* now initialize the string array pointers */
981 	for (i = 0; i < nelem; i++) {
982 		str_array[i] = strptr;
983 		strptr += strlen(strptr) + 1;
984 	}
985 	str_array[nelem] = NULL;
986 
987 	/* set the return values */
988 	*str_arrayp = str_array;
989 	*nelemp = nelem;
990 
991 	return (0);
992 }
993 
994 
995 /*
996  * LDI Project private device usage interfaces
997  */
998 
999 /*
1000  * Get a count of how many devices are currentl open by different consumers
1001  */
1002 int
1003 ldi_usage_count()
1004 {
1005 	return (ldi_handle_hash_count);
1006 }
1007 
1008 static void
1009 ldi_usage_walker_tgt_helper(ldi_usage_t *ldi_usage, vnode_t *vp)
1010 {
1011 	dev_info_t	*dip;
1012 	dev_t		dev;
1013 
1014 	ASSERT(STYP_VALID(VTYP_TO_STYP(vp->v_type)));
1015 
1016 	/* get the target devt */
1017 	dev = vp->v_rdev;
1018 
1019 	/* try to get the target dip */
1020 	dip = VTOCS(vp)->s_dip;
1021 	if (dip != NULL) {
1022 		e_ddi_hold_devi(dip);
1023 	} else if (dev != DDI_DEV_T_NONE) {
1024 		dip = e_ddi_hold_devi_by_dev(dev, 0);
1025 	}
1026 
1027 	/* set the target information */
1028 	ldi_usage->tgt_name = mod_major_to_name(getmajor(dev));
1029 	ldi_usage->tgt_modid = mod_name_to_modid(ldi_usage->tgt_name);
1030 	ldi_usage->tgt_devt = dev;
1031 	ldi_usage->tgt_spec_type = VTYP_TO_STYP(vp->v_type);
1032 	ldi_usage->tgt_dip = dip;
1033 }
1034 
1035 
1036 static int
1037 ldi_usage_walker_helper(struct ldi_ident *lip, vnode_t *vp,
1038     void *arg, int (*callback)(const ldi_usage_t *, void *))
1039 {
1040 	ldi_usage_t	ldi_usage;
1041 	struct devnames	*dnp;
1042 	dev_info_t	*dip;
1043 	major_t		major;
1044 	dev_t		dev;
1045 	int		ret = LDI_USAGE_CONTINUE;
1046 
1047 	/* set the target device information */
1048 	ldi_usage_walker_tgt_helper(&ldi_usage, vp);
1049 
1050 	/* get the source devt */
1051 	dev = lip->li_dev;
1052 
1053 	/* try to get the source dip */
1054 	dip = lip->li_dip;
1055 	if (dip != NULL) {
1056 		e_ddi_hold_devi(dip);
1057 	} else if (dev != DDI_DEV_T_NONE) {
1058 		dip = e_ddi_hold_devi_by_dev(dev, 0);
1059 	}
1060 
1061 	/* set the valid source information */
1062 	ldi_usage.src_modid = lip->li_modid;
1063 	ldi_usage.src_name = lip->li_modname;
1064 	ldi_usage.src_devt = dev;
1065 	ldi_usage.src_dip = dip;
1066 
1067 	/*
1068 	 * if the source ident represents either:
1069 	 *
1070 	 * - a kernel module (and not a device or device driver)
1071 	 * - a device node
1072 	 *
1073 	 * then we currently have all the info we need to report the
1074 	 * usage information so invoke the callback function.
1075 	 */
1076 	if (((lip->li_major == -1) && (dev == DDI_DEV_T_NONE)) ||
1077 	    (dip != NULL)) {
1078 		ret = callback(&ldi_usage, arg);
1079 		if (dip != NULL)
1080 			ddi_release_devi(dip);
1081 		if (ldi_usage.tgt_dip != NULL)
1082 			ddi_release_devi(ldi_usage.tgt_dip);
1083 		return (ret);
1084 	}
1085 
1086 	/*
1087 	 * now this is kinda gross.
1088 	 *
1089 	 * what we do here is attempt to associate every device instance
1090 	 * of the source driver on the system with the open target driver.
1091 	 * we do this because we don't know which instance of the device
1092 	 * could potentially access the lower device so we assume that all
1093 	 * the instances could access it.
1094 	 *
1095 	 * there are two ways we could have gotten here:
1096 	 *
1097 	 * 1) this layered ident represents one created using only a
1098 	 *    major number or a driver module name.  this means that when
1099 	 *    it was created we could not associate it with a particular
1100 	 *    dev_t or device instance.
1101 	 *
1102 	 *    when could this possibly happen you ask?
1103 	 *
1104 	 *    a perfect example of this is streams persistent links.
1105 	 *    when a persistant streams link is formed we can't associate
1106 	 *    the lower device stream with any particular upper device
1107 	 *    stream or instance.  this is because any particular upper
1108 	 *    device stream could be closed, then another could be
1109 	 *    opened with a different dev_t and device instance, and it
1110 	 *    would still have access to the lower linked stream.
1111 	 *
1112 	 *    since any instance of the upper streams driver could
1113 	 *    potentially access the lower stream whenever it wants,
1114 	 *    we represent that here by associating the opened lower
1115 	 *    device with every existing device instance of the upper
1116 	 *    streams driver.
1117 	 *
1118 	 * 2) This case should really never happen but we'll include it
1119 	 *    for completeness.
1120 	 *
1121 	 *    it's possible that we could have gotten here because we
1122 	 *    have a dev_t for the upper device but we couldn't find a
1123 	 *    dip associated with that dev_t.
1124 	 *
1125 	 *    the only types of devices that have dev_t without an
1126 	 *    associated dip are unbound DLPIv2 network devices.  These
1127 	 *    types of devices exist to be able to attach a stream to any
1128 	 *    instance of a hardware network device.  since these types of
1129 	 *    devices are usually hardware devices they should never
1130 	 *    really have other devices open.
1131 	 */
1132 	if (dev != DDI_DEV_T_NONE)
1133 		major = getmajor(dev);
1134 	else
1135 		major = lip->li_major;
1136 
1137 	ASSERT((major >= 0) && (major < devcnt));
1138 
1139 	dnp = &devnamesp[major];
1140 	LOCK_DEV_OPS(&dnp->dn_lock);
1141 	dip = dnp->dn_head;
1142 	while ((dip) && (ret == LDI_USAGE_CONTINUE)) {
1143 		e_ddi_hold_devi(dip);
1144 		UNLOCK_DEV_OPS(&dnp->dn_lock);
1145 
1146 		/* set the source dip */
1147 		ldi_usage.src_dip = dip;
1148 
1149 		/* invoke the callback function */
1150 		ret = callback(&ldi_usage, arg);
1151 
1152 		LOCK_DEV_OPS(&dnp->dn_lock);
1153 		ddi_release_devi(dip);
1154 		dip = ddi_get_next(dip);
1155 	}
1156 	UNLOCK_DEV_OPS(&dnp->dn_lock);
1157 
1158 	/* if there was a target dip, release it */
1159 	if (ldi_usage.tgt_dip != NULL)
1160 		ddi_release_devi(ldi_usage.tgt_dip);
1161 
1162 	return (ret);
1163 }
1164 
1165 /*
1166  * ldi_usage_walker() - this walker reports LDI kernel device usage
1167  * information via the callback() callback function.  the LDI keeps track
1168  * of what devices are being accessed in its own internal data structures.
1169  * this function walks those data structures to determine device usage.
1170  */
1171 void
1172 ldi_usage_walker(void *arg, int (*callback)(const ldi_usage_t *, void *))
1173 {
1174 	struct ldi_handle	*lhp;
1175 	struct ldi_ident	*lip;
1176 	vnode_t			*vp;
1177 	int			i;
1178 	int			ret = LDI_USAGE_CONTINUE;
1179 
1180 	for (i = 0; i < LH_HASH_SZ; i++) {
1181 		mutex_enter(&ldi_handle_hash_lock[i]);
1182 
1183 		lhp = ldi_handle_hash[i];
1184 		while ((lhp != NULL) && (ret == LDI_USAGE_CONTINUE)) {
1185 			lip = lhp->lh_ident;
1186 			vp = lhp->lh_vp;
1187 
1188 			/* invoke the devinfo callback function */
1189 			ret = ldi_usage_walker_helper(lip, vp, arg, callback);
1190 
1191 			lhp = lhp->lh_next;
1192 		}
1193 		mutex_exit(&ldi_handle_hash_lock[i]);
1194 
1195 		if (ret != LDI_USAGE_CONTINUE)
1196 			break;
1197 	}
1198 }
1199 
1200 /*
1201  * LDI Project private interfaces (streams linking interfaces)
1202  *
1203  * Streams supports a type of built in device layering via linking.
1204  * Certain types of streams drivers can be streams multiplexors.
1205  * A streams multiplexor supports the I_LINK/I_PLINK operation.
1206  * These operations allows other streams devices to be linked under the
1207  * multiplexor.  By definition all streams multiplexors are devices
1208  * so this linking is a type of device layering where the multiplexor
1209  * device is layered on top of the device linked below it.
1210  */
1211 
1212 /*
1213  * ldi_mlink_lh() is invoked when streams are linked using LDI handles.
1214  * It is not used for normal I_LINKs and I_PLINKs using file descriptors.
1215  *
1216  * The streams framework keeps track of links via the file_t of the lower
1217  * stream.  The LDI keeps track of devices using a vnode.  In the case
1218  * of a streams link created via an LDI handle, fnk_lh() allocates
1219  * a file_t that the streams framework can use to track the linkage.
1220  */
1221 int
1222 ldi_mlink_lh(vnode_t *vp, int cmd, intptr_t arg, cred_t *crp, int *rvalp)
1223 {
1224 	struct ldi_handle	*lhp = (struct ldi_handle *)arg;
1225 	vnode_t			*vpdown;
1226 	file_t			*fpdown;
1227 	int			err;
1228 
1229 	if (lhp == NULL)
1230 		return (EINVAL);
1231 
1232 	vpdown = lhp->lh_vp;
1233 	ASSERT(vn_matchops(vpdown, spec_getvnodeops()));
1234 	ASSERT(cmd == _I_PLINK_LH);
1235 
1236 	/*
1237 	 * create a new lower vnode and a file_t that points to it,
1238 	 * streams linking requires a file_t.  falloc() returns with
1239 	 * fpdown locked.
1240 	 */
1241 	VN_HOLD(vpdown);
1242 	(void) falloc(vpdown, FREAD|FWRITE, &fpdown, NULL);
1243 	mutex_exit(&fpdown->f_tlock);
1244 
1245 	/* try to establish the link */
1246 	err = mlink_file(vp, I_PLINK, fpdown, crp, rvalp, 1);
1247 
1248 	if (err != 0) {
1249 		/* the link failed, free the file_t and release the vnode */
1250 		mutex_enter(&fpdown->f_tlock);
1251 		unfalloc(fpdown);
1252 		VN_RELE(vpdown);
1253 	}
1254 
1255 	return (err);
1256 }
1257 
1258 /*
1259  * ldi_mlink_fp() is invoked for all successful streams linkages created
1260  * via I_LINK and I_PLINK.  ldi_mlink_fp() records the linkage information
1261  * in its internal state so that the devinfo snapshot code has some
1262  * observability into streams device linkage information.
1263  */
1264 void
1265 ldi_mlink_fp(struct stdata *stp, file_t *fpdown, int lhlink, int type)
1266 {
1267 	vnode_t			*vp = fpdown->f_vnode;
1268 	struct snode		*sp, *csp;
1269 	ldi_ident_t		li;
1270 	major_t			major;
1271 	int			ret;
1272 
1273 	/* if the lower stream is not a device then return */
1274 	if (!vn_matchops(vp, spec_getvnodeops()))
1275 		return;
1276 
1277 	ASSERT(!servicing_interrupt());
1278 
1279 	LDI_STREAMS_LNK((CE_NOTE, "%s: linking streams "
1280 	    "stp=0x%p, fpdown=0x%p", "ldi_mlink_fp",
1281 	    (void *)stp, (void *)fpdown));
1282 
1283 	sp = VTOS(vp);
1284 	csp = VTOS(sp->s_commonvp);
1285 
1286 	/* check if this was a plink via a layered handle */
1287 	if (lhlink) {
1288 		/*
1289 		 * increment the common snode s_count.
1290 		 *
1291 		 * this is done because after the link operation there
1292 		 * are two ways that s_count can be decremented.
1293 		 *
1294 		 * when the layered handle used to create the link is
1295 		 * closed, spec_close() is called and it will decrement
1296 		 * s_count in the common snode.  if we don't increment
1297 		 * s_count here then this could cause spec_close() to
1298 		 * actually close the device while it's still linked
1299 		 * under a multiplexer.
1300 		 *
1301 		 * also, when the lower stream is unlinked, closef() is
1302 		 * called for the file_t associated with this snode.
1303 		 * closef() will call spec_close(), which will decrement
1304 		 * s_count.  if we dont't increment s_count here then this
1305 		 * could cause spec_close() to actually close the device
1306 		 * while there may still be valid layered handles
1307 		 * pointing to it.
1308 		 */
1309 		mutex_enter(&csp->s_lock);
1310 		ASSERT(csp->s_count >= 1);
1311 		csp->s_count++;
1312 		mutex_exit(&csp->s_lock);
1313 
1314 		/*
1315 		 * decrement the f_count.
1316 		 * this is done because the layered driver framework does
1317 		 * not actually cache a copy of the file_t allocated to
1318 		 * do the link.  this is done here instead of in ldi_mlink_lh()
1319 		 * because there is a window in ldi_mlink_lh() between where
1320 		 * milnk_file() returns and we would decrement the f_count
1321 		 * when the stream could be unlinked.
1322 		 */
1323 		mutex_enter(&fpdown->f_tlock);
1324 		fpdown->f_count--;
1325 		mutex_exit(&fpdown->f_tlock);
1326 	}
1327 
1328 	/*
1329 	 * NOTE: here we rely on the streams subsystem not allowing
1330 	 * a stream to be multiplexed more than once.  if this
1331 	 * changes, we break.
1332 	 *
1333 	 * mark the snode/stream as multiplexed
1334 	 */
1335 	mutex_enter(&sp->s_lock);
1336 	ASSERT(!(sp->s_flag & SMUXED));
1337 	sp->s_flag |= SMUXED;
1338 	mutex_exit(&sp->s_lock);
1339 
1340 	/* get a layered ident for the upper stream */
1341 	if (type == LINKNORMAL) {
1342 		/*
1343 		 * if the link is not persistant then we can associate
1344 		 * the upper stream with a dev_t.  this is because the
1345 		 * upper stream is associated with a vnode, which is
1346 		 * associated with a dev_t and this binding can't change
1347 		 * during the life of the stream.  since the link isn't
1348 		 * persistant once the stream is destroyed the link is
1349 		 * destroyed.  so the dev_t will be valid for the life
1350 		 * of the link.
1351 		 */
1352 		ret = ldi_ident_from_stream(getendq(stp->sd_wrq), &li);
1353 	} else {
1354 		/*
1355 		 * if the link is persistant we can only associate the
1356 		 * link with a driver (and not a dev_t.)  this is
1357 		 * because subsequent opens of the upper device may result
1358 		 * in a different stream (and dev_t) having access to
1359 		 * the lower stream.
1360 		 *
1361 		 * for example, if the upper stream is closed after the
1362 		 * persistant link operation is compleated, a subsequent
1363 		 * open of the upper device will create a new stream which
1364 		 * may have a different dev_t and an unlink operation
1365 		 * can be performed using this new upper stream.
1366 		 */
1367 		ASSERT(type == LINKPERSIST);
1368 		major = getmajor(stp->sd_vnode->v_rdev);
1369 		ret = ldi_ident_from_major(major, &li);
1370 	}
1371 
1372 	ASSERT(ret == 0);
1373 	(void) handle_alloc(vp, (struct ldi_ident *)li);
1374 	ldi_ident_release(li);
1375 }
1376 
1377 void
1378 ldi_munlink_fp(struct stdata *stp, file_t *fpdown, int type)
1379 {
1380 	struct ldi_handle	*lhp;
1381 	vnode_t			*vp = (vnode_t *)fpdown->f_vnode;
1382 	struct snode		*sp;
1383 	ldi_ident_t		li;
1384 	major_t			major;
1385 	int			ret;
1386 
1387 	/* if the lower stream is not a device then return */
1388 	if (!vn_matchops(vp, spec_getvnodeops()))
1389 		return;
1390 
1391 	ASSERT(!servicing_interrupt());
1392 	ASSERT((type == LINKNORMAL) || (type == LINKPERSIST));
1393 
1394 	LDI_STREAMS_LNK((CE_NOTE, "%s: unlinking streams "
1395 	    "stp=0x%p, fpdown=0x%p", "ldi_munlink_fp",
1396 	    (void *)stp, (void *)fpdown));
1397 
1398 	/*
1399 	 * NOTE: here we rely on the streams subsystem not allowing
1400 	 * a stream to be multiplexed more than once.  if this
1401 	 * changes, we break.
1402 	 *
1403 	 * mark the snode/stream as not multiplexed
1404 	 */
1405 	sp = VTOS(vp);
1406 	mutex_enter(&sp->s_lock);
1407 	ASSERT(sp->s_flag & SMUXED);
1408 	sp->s_flag &= ~SMUXED;
1409 	mutex_exit(&sp->s_lock);
1410 
1411 	/*
1412 	 * clear the owner for this snode
1413 	 * see the comment in ldi_mlink_fp() for information about how
1414 	 * the ident is allocated
1415 	 */
1416 	if (type == LINKNORMAL) {
1417 		ret = ldi_ident_from_stream(getendq(stp->sd_wrq), &li);
1418 	} else {
1419 		ASSERT(type == LINKPERSIST);
1420 		major = getmajor(stp->sd_vnode->v_rdev);
1421 		ret = ldi_ident_from_major(major, &li);
1422 	}
1423 
1424 	ASSERT(ret == 0);
1425 	lhp = handle_find(vp, (struct ldi_ident *)li);
1426 	handle_release(lhp);
1427 	ldi_ident_release(li);
1428 }
1429 
1430 /*
1431  * LDI Consolidation private interfaces
1432  */
1433 int
1434 ldi_ident_from_mod(struct modlinkage *modlp, ldi_ident_t *lip)
1435 {
1436 	struct modctl		*modp;
1437 	major_t			major;
1438 	char			*name;
1439 
1440 	if ((modlp == NULL) || (lip == NULL))
1441 		return (EINVAL);
1442 
1443 	ASSERT(!servicing_interrupt());
1444 
1445 	modp = mod_getctl(modlp);
1446 	if (modp == NULL)
1447 		return (EINVAL);
1448 	name = modp->mod_modname;
1449 	if (name == NULL)
1450 		return (EINVAL);
1451 	major = mod_name_to_major(name);
1452 
1453 	*lip = (ldi_ident_t)ident_alloc(name, NULL, DDI_DEV_T_NONE, major);
1454 
1455 	LDI_ALLOCFREE((CE_WARN, "%s: li=0x%p, mod=%s",
1456 	    "ldi_ident_from_mod", (void *)*lip, name));
1457 
1458 	return (0);
1459 }
1460 
1461 ldi_ident_t
1462 ldi_ident_from_anon()
1463 {
1464 	ldi_ident_t	lip;
1465 
1466 	ASSERT(!servicing_interrupt());
1467 
1468 	lip = (ldi_ident_t)ident_alloc("genunix", NULL, DDI_DEV_T_NONE, -1);
1469 
1470 	LDI_ALLOCFREE((CE_WARN, "%s: li=0x%p, mod=%s",
1471 	    "ldi_ident_from_anon", (void *)lip, "genunix"));
1472 
1473 	return (lip);
1474 }
1475 
1476 
1477 /*
1478  * LDI Public interfaces
1479  */
1480 int
1481 ldi_ident_from_stream(struct queue *sq, ldi_ident_t *lip)
1482 {
1483 	struct stdata		*stp;
1484 	dev_t			dev;
1485 	char			*name;
1486 
1487 	if ((sq == NULL) || (lip == NULL))
1488 		return (EINVAL);
1489 
1490 	ASSERT(!servicing_interrupt());
1491 
1492 	stp = sq->q_stream;
1493 	if (!vn_matchops(stp->sd_vnode, spec_getvnodeops()))
1494 		return (EINVAL);
1495 
1496 	dev = stp->sd_vnode->v_rdev;
1497 	name = mod_major_to_name(getmajor(dev));
1498 	if (name == NULL)
1499 		return (EINVAL);
1500 	*lip = (ldi_ident_t)ident_alloc(name, NULL, dev, -1);
1501 
1502 	LDI_ALLOCFREE((CE_WARN,
1503 	    "%s: li=0x%p, mod=%s, minor=0x%x, stp=0x%p",
1504 	    "ldi_ident_from_stream", (void *)*lip, name, getminor(dev),
1505 	    (void *)stp));
1506 
1507 	return (0);
1508 }
1509 
1510 int
1511 ldi_ident_from_dev(dev_t dev, ldi_ident_t *lip)
1512 {
1513 	char			*name;
1514 
1515 	if (lip == NULL)
1516 		return (EINVAL);
1517 
1518 	ASSERT(!servicing_interrupt());
1519 
1520 	name = mod_major_to_name(getmajor(dev));
1521 	if (name == NULL)
1522 		return (EINVAL);
1523 	*lip = (ldi_ident_t)ident_alloc(name, NULL, dev, -1);
1524 
1525 	LDI_ALLOCFREE((CE_WARN,
1526 	    "%s: li=0x%p, mod=%s, minor=0x%x",
1527 	    "ldi_ident_from_dev", (void *)*lip, name, getminor(dev)));
1528 
1529 	return (0);
1530 }
1531 
1532 int
1533 ldi_ident_from_dip(dev_info_t *dip, ldi_ident_t *lip)
1534 {
1535 	struct dev_info		*devi = (struct dev_info *)dip;
1536 	char			*name;
1537 
1538 	if ((dip == NULL) || (lip == NULL))
1539 		return (EINVAL);
1540 
1541 	ASSERT(!servicing_interrupt());
1542 
1543 	name = mod_major_to_name(devi->devi_major);
1544 	if (name == NULL)
1545 		return (EINVAL);
1546 	*lip = (ldi_ident_t)ident_alloc(name, dip, DDI_DEV_T_NONE, -1);
1547 
1548 	LDI_ALLOCFREE((CE_WARN,
1549 	    "%s: li=0x%p, mod=%s, dip=0x%p",
1550 	    "ldi_ident_from_dip", (void *)*lip, name, (void *)devi));
1551 
1552 	return (0);
1553 }
1554 
1555 int
1556 ldi_ident_from_major(major_t major, ldi_ident_t *lip)
1557 {
1558 	char			*name;
1559 
1560 	if (lip == NULL)
1561 		return (EINVAL);
1562 
1563 	ASSERT(!servicing_interrupt());
1564 
1565 	name = mod_major_to_name(major);
1566 	if (name == NULL)
1567 		return (EINVAL);
1568 	*lip = (ldi_ident_t)ident_alloc(name, NULL, DDI_DEV_T_NONE, major);
1569 
1570 	LDI_ALLOCFREE((CE_WARN,
1571 	    "%s: li=0x%p, mod=%s",
1572 	    "ldi_ident_from_major", (void *)*lip, name));
1573 
1574 	return (0);
1575 }
1576 
1577 void
1578 ldi_ident_release(ldi_ident_t li)
1579 {
1580 	struct ldi_ident	*ident = (struct ldi_ident *)li;
1581 	char			*name;
1582 
1583 	if (li == NULL)
1584 		return;
1585 
1586 	ASSERT(!servicing_interrupt());
1587 
1588 	name = ident->li_modname;
1589 
1590 	LDI_ALLOCFREE((CE_WARN,
1591 	    "%s: li=0x%p, mod=%s",
1592 	    "ldi_ident_release", (void *)li, name));
1593 
1594 	ident_release((struct ldi_ident *)li);
1595 }
1596 
1597 /* get a handle to a device by dev_t and otyp */
1598 int
1599 ldi_open_by_dev(dev_t *devp, int otyp, int flag, cred_t *cr,
1600     ldi_handle_t *lhp, ldi_ident_t li)
1601 {
1602 	struct ldi_ident	*lip = (struct ldi_ident *)li;
1603 	int			ret;
1604 	vnode_t			*vp;
1605 
1606 	/* sanity check required input parameters */
1607 	if ((devp == NULL) || (!OTYP_VALID(otyp)) || (cr == NULL) ||
1608 	    (lhp == NULL) || (lip == NULL))
1609 		return (EINVAL);
1610 
1611 	ASSERT(!servicing_interrupt());
1612 
1613 	if ((ret = ldi_vp_from_dev(*devp, otyp, &vp)) != 0)
1614 		return (ret);
1615 
1616 	if ((ret = ldi_open_by_vp(&vp, flag, cr, lhp, lip)) == 0) {
1617 		*devp = vp->v_rdev;
1618 	}
1619 	VN_RELE(vp);
1620 
1621 	return (ret);
1622 }
1623 
1624 /* get a handle to a device by pathname */
1625 int
1626 ldi_open_by_name(char *pathname, int flag, cred_t *cr,
1627     ldi_handle_t *lhp, ldi_ident_t li)
1628 {
1629 	struct ldi_ident	*lip = (struct ldi_ident *)li;
1630 	int			ret;
1631 	vnode_t			*vp;
1632 
1633 	/* sanity check required input parameters */
1634 	if ((pathname == NULL) || (*pathname != '/') ||
1635 	    (cr == NULL) || (lhp == NULL) || (lip == NULL))
1636 		return (EINVAL);
1637 
1638 	ASSERT(!servicing_interrupt());
1639 
1640 	if ((ret = ldi_vp_from_name(pathname, &vp)) != 0)
1641 		return (ret);
1642 
1643 	ret = ldi_open_by_vp(&vp, flag, cr, lhp, lip);
1644 	VN_RELE(vp);
1645 
1646 	return (ret);
1647 }
1648 
1649 /* get a handle to a device by devid and minor_name */
1650 int
1651 ldi_open_by_devid(ddi_devid_t devid, char *minor_name,
1652     int flag, cred_t *cr, ldi_handle_t *lhp, ldi_ident_t li)
1653 {
1654 	struct ldi_ident	*lip = (struct ldi_ident *)li;
1655 	int			ret;
1656 	vnode_t			*vp;
1657 
1658 	/* sanity check required input parameters */
1659 	if ((minor_name == NULL) || (cr == NULL) ||
1660 	    (lhp == NULL) || (lip == NULL))
1661 		return (EINVAL);
1662 
1663 	ASSERT(!servicing_interrupt());
1664 
1665 	if ((ret = ldi_vp_from_devid(devid, minor_name, &vp)) != 0)
1666 		return (ret);
1667 
1668 	ret = ldi_open_by_vp(&vp, flag, cr, lhp, lip);
1669 	VN_RELE(vp);
1670 
1671 	return (ret);
1672 }
1673 
1674 int
1675 ldi_close(ldi_handle_t lh, int flag, cred_t *cr)
1676 {
1677 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
1678 	struct ldi_event	*lep;
1679 	int			err = 0;
1680 	int			notify = 0;
1681 	list_t			*listp;
1682 	ldi_ev_callback_impl_t	*lecp;
1683 
1684 	if (lh == NULL)
1685 		return (EINVAL);
1686 
1687 	ASSERT(!servicing_interrupt());
1688 
1689 	/* Flush back any dirty pages associated with the device. */
1690 	if (handlep->lh_type & LH_CBDEV) {
1691 		vnode_t	*cvp = common_specvp(handlep->lh_vp);
1692 		dev_t	dev = cvp->v_rdev;
1693 
1694 		(void) VOP_PUTPAGE(cvp, 0, 0, B_INVAL, kcred, NULL);
1695 		bflush(dev);
1696 	}
1697 
1698 #ifdef	LDI_OBSOLETE_EVENT
1699 
1700 	/*
1701 	 * Any event handlers should have been unregistered by the
1702 	 * time ldi_close() is called.  If they haven't then it's a
1703 	 * bug.
1704 	 *
1705 	 * In a debug kernel we'll panic to make the problem obvious.
1706 	 */
1707 	ASSERT(handlep->lh_events == NULL);
1708 
1709 	/*
1710 	 * On a production kernel we'll "do the right thing" (unregister
1711 	 * the event handlers) and then complain about having to do the
1712 	 * work ourselves.
1713 	 */
1714 	while ((lep = handlep->lh_events) != NULL) {
1715 		err = 1;
1716 		(void) ldi_remove_event_handler(lh, (ldi_callback_id_t)lep);
1717 	}
1718 	if (err) {
1719 		struct ldi_ident *lip = handlep->lh_ident;
1720 		ASSERT(lip != NULL);
1721 		cmn_err(CE_NOTE, "ldi err: %s "
1722 		    "failed to unregister layered event handlers before "
1723 		    "closing devices", lip->li_modname);
1724 	}
1725 #endif
1726 
1727 	/* do a layered close on the device */
1728 	err = VOP_CLOSE(handlep->lh_vp, flag | FKLYR, 1, (offset_t)0, cr, NULL);
1729 
1730 	LDI_OPENCLOSE((CE_WARN, "%s: lh=0x%p", "ldi close", (void *)lh));
1731 
1732 	/*
1733 	 * Search the event callback list for callbacks with this
1734 	 * handle. There are 2 cases
1735 	 * 1. Called in the context of a notify. The handle consumer
1736 	 *    is releasing its hold on the device to allow a reconfiguration
1737 	 *    of the device. Simply NULL out the handle and the notify callback.
1738 	 *    The finalize callback is still available so that the consumer
1739 	 *    knows of the final disposition of the device.
1740 	 * 2. Not called in the context of notify. NULL out the handle as well
1741 	 *    as the notify and finalize callbacks. Since the consumer has
1742 	 *    closed the handle, we assume it is not interested in the
1743 	 *    notify and finalize callbacks.
1744 	 */
1745 	ldi_ev_lock();
1746 
1747 	if (handlep->lh_flags & LH_FLAGS_NOTIFY)
1748 		notify = 1;
1749 	listp = &ldi_ev_callback_list.le_head;
1750 	for (lecp = list_head(listp); lecp; lecp = list_next(listp, lecp)) {
1751 		if (lecp->lec_lhp != handlep)
1752 			continue;
1753 		lecp->lec_lhp = NULL;
1754 		lecp->lec_notify = NULL;
1755 		LDI_EVDBG((CE_NOTE, "ldi_close: NULLed lh and notify"));
1756 		if (!notify) {
1757 			LDI_EVDBG((CE_NOTE, "ldi_close: NULLed finalize"));
1758 			lecp->lec_finalize = NULL;
1759 		}
1760 	}
1761 
1762 	if (notify)
1763 		handlep->lh_flags &= ~LH_FLAGS_NOTIFY;
1764 	ldi_ev_unlock();
1765 
1766 	/*
1767 	 * Free the handle even if the device close failed.  why?
1768 	 *
1769 	 * If the device close failed we can't really make assumptions
1770 	 * about the devices state so we shouldn't allow access to the
1771 	 * device via this handle any more.  If the device consumer wants
1772 	 * to access the device again they should open it again.
1773 	 *
1774 	 * This is the same way file/device close failures are handled
1775 	 * in other places like spec_close() and closeandsetf().
1776 	 */
1777 	handle_release(handlep);
1778 	return (err);
1779 }
1780 
1781 int
1782 ldi_read(ldi_handle_t lh, struct uio *uiop, cred_t *credp)
1783 {
1784 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
1785 	vnode_t			*vp;
1786 	dev_t			dev;
1787 	int			ret;
1788 
1789 	if (lh == NULL)
1790 		return (EINVAL);
1791 
1792 	vp = handlep->lh_vp;
1793 	dev = vp->v_rdev;
1794 	if (handlep->lh_type & LH_CBDEV) {
1795 		ret = cdev_read(dev, uiop, credp);
1796 	} else if (handlep->lh_type & LH_STREAM) {
1797 		ret = strread(vp, uiop, credp);
1798 	} else {
1799 		return (ENOTSUP);
1800 	}
1801 	return (ret);
1802 }
1803 
1804 int
1805 ldi_write(ldi_handle_t lh, struct uio *uiop, cred_t *credp)
1806 {
1807 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
1808 	vnode_t			*vp;
1809 	dev_t			dev;
1810 	int			ret;
1811 
1812 	if (lh == NULL)
1813 		return (EINVAL);
1814 
1815 	vp = handlep->lh_vp;
1816 	dev = vp->v_rdev;
1817 	if (handlep->lh_type & LH_CBDEV) {
1818 		ret = cdev_write(dev, uiop, credp);
1819 	} else if (handlep->lh_type & LH_STREAM) {
1820 		ret = strwrite(vp, uiop, credp);
1821 	} else {
1822 		return (ENOTSUP);
1823 	}
1824 	return (ret);
1825 }
1826 
1827 int
1828 ldi_get_size(ldi_handle_t lh, uint64_t *sizep)
1829 {
1830 	int			otyp;
1831 	uint_t			value;
1832 	int64_t			drv_prop64;
1833 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
1834 	uint_t			blksize;
1835 	int			blkshift;
1836 
1837 
1838 	if ((lh == NULL) || (sizep == NULL))
1839 		return (DDI_FAILURE);
1840 
1841 	if (handlep->lh_type & LH_STREAM)
1842 		return (DDI_FAILURE);
1843 
1844 	/*
1845 	 * Determine device type (char or block).
1846 	 * Character devices support Size/size
1847 	 * property value. Block devices may support
1848 	 * Nblocks/nblocks or Size/size property value.
1849 	 */
1850 	if ((ldi_get_otyp(lh, &otyp)) != 0)
1851 		return (DDI_FAILURE);
1852 
1853 	if (otyp == OTYP_BLK) {
1854 		if (ldi_prop_exists(lh,
1855 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "Nblocks")) {
1856 
1857 			drv_prop64 = ldi_prop_get_int64(lh,
1858 			    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1859 			    "Nblocks", 0);
1860 			blksize = ldi_prop_get_int(lh,
1861 			    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1862 			    "blksize", DEV_BSIZE);
1863 			if (blksize == DEV_BSIZE)
1864 				blksize = ldi_prop_get_int(lh, LDI_DEV_T_ANY |
1865 				    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1866 				    "device-blksize", DEV_BSIZE);
1867 
1868 			/* blksize must be a power of two */
1869 			ASSERT(BIT_ONLYONESET(blksize));
1870 			blkshift = highbit(blksize) - 1;
1871 
1872 			/*
1873 			 * We don't support Nblocks values that don't have
1874 			 * an accurate uint64_t byte count representation.
1875 			 */
1876 			if ((uint64_t)drv_prop64 >= (UINT64_MAX >> blkshift))
1877 				return (DDI_FAILURE);
1878 
1879 			*sizep = (uint64_t)
1880 			    (((u_offset_t)drv_prop64) << blkshift);
1881 			return (DDI_SUCCESS);
1882 		}
1883 
1884 		if (ldi_prop_exists(lh,
1885 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "nblocks")) {
1886 
1887 			value = ldi_prop_get_int(lh,
1888 			    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1889 			    "nblocks", 0);
1890 			blksize = ldi_prop_get_int(lh,
1891 			    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1892 			    "blksize", DEV_BSIZE);
1893 			if (blksize == DEV_BSIZE)
1894 				blksize = ldi_prop_get_int(lh, LDI_DEV_T_ANY |
1895 				    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1896 				    "device-blksize", DEV_BSIZE);
1897 
1898 			/* blksize must be a power of two */
1899 			ASSERT(BIT_ONLYONESET(blksize));
1900 			blkshift = highbit(blksize) - 1;
1901 
1902 			/*
1903 			 * We don't support nblocks values that don't have an
1904 			 * accurate uint64_t byte count representation.
1905 			 */
1906 			if ((uint64_t)value >= (UINT64_MAX >> blkshift))
1907 				return (DDI_FAILURE);
1908 
1909 			*sizep = (uint64_t)
1910 			    (((u_offset_t)value) << blkshift);
1911 			return (DDI_SUCCESS);
1912 		}
1913 	}
1914 
1915 	if (ldi_prop_exists(lh,
1916 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "Size")) {
1917 
1918 		drv_prop64 = ldi_prop_get_int64(lh,
1919 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "Size", 0);
1920 		*sizep = (uint64_t)drv_prop64;
1921 		return (DDI_SUCCESS);
1922 	}
1923 
1924 	if (ldi_prop_exists(lh,
1925 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "size")) {
1926 
1927 		value = ldi_prop_get_int(lh,
1928 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "size", 0);
1929 		*sizep = (uint64_t)value;
1930 		return (DDI_SUCCESS);
1931 	}
1932 
1933 	/* unable to determine device size */
1934 	return (DDI_FAILURE);
1935 }
1936 
1937 int
1938 ldi_ioctl(ldi_handle_t lh, int cmd, intptr_t arg, int mode,
1939 	cred_t *cr, int *rvalp)
1940 {
1941 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
1942 	vnode_t			*vp;
1943 	dev_t			dev;
1944 	int			ret, copymode;
1945 
1946 	if (lh == NULL)
1947 		return (EINVAL);
1948 
1949 	/*
1950 	 * if the data pointed to by arg is located in the kernel then
1951 	 * make sure the FNATIVE flag is set.
1952 	 */
1953 	if (mode & FKIOCTL)
1954 		mode = (mode & ~FMODELS) | FNATIVE | FKIOCTL;
1955 
1956 	vp = handlep->lh_vp;
1957 	dev = vp->v_rdev;
1958 	if (handlep->lh_type & LH_CBDEV) {
1959 		ret = cdev_ioctl(dev, cmd, arg, mode, cr, rvalp);
1960 	} else if (handlep->lh_type & LH_STREAM) {
1961 		copymode = (mode & FKIOCTL) ? K_TO_K : U_TO_K;
1962 
1963 		/*
1964 		 * if we get an I_PLINK from within the kernel the
1965 		 * arg is a layered handle pointer instead of
1966 		 * a file descriptor, so we translate this ioctl
1967 		 * into a private one that can handle this.
1968 		 */
1969 		if ((mode & FKIOCTL) && (cmd == I_PLINK))
1970 			cmd = _I_PLINK_LH;
1971 
1972 		ret = strioctl(vp, cmd, arg, mode, copymode, cr, rvalp);
1973 	} else {
1974 		return (ENOTSUP);
1975 	}
1976 
1977 	return (ret);
1978 }
1979 
1980 int
1981 ldi_poll(ldi_handle_t lh, short events, int anyyet, short *reventsp,
1982     struct pollhead **phpp)
1983 {
1984 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
1985 	vnode_t			*vp;
1986 	dev_t			dev;
1987 	int			ret;
1988 
1989 	if (lh == NULL)
1990 		return (EINVAL);
1991 
1992 	vp = handlep->lh_vp;
1993 	dev = vp->v_rdev;
1994 	if (handlep->lh_type & LH_CBDEV) {
1995 		ret = cdev_poll(dev, events, anyyet, reventsp, phpp);
1996 	} else if (handlep->lh_type & LH_STREAM) {
1997 		ret = strpoll(vp->v_stream, events, anyyet, reventsp, phpp);
1998 	} else {
1999 		return (ENOTSUP);
2000 	}
2001 
2002 	return (ret);
2003 }
2004 
2005 int
2006 ldi_prop_op(ldi_handle_t lh, ddi_prop_op_t prop_op,
2007 	int flags, char *name, caddr_t valuep, int *length)
2008 {
2009 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2010 	dev_t			dev;
2011 	dev_info_t		*dip;
2012 	int			ret;
2013 	struct snode		*csp;
2014 
2015 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2016 		return (DDI_PROP_INVAL_ARG);
2017 
2018 	if ((prop_op != PROP_LEN) && (valuep == NULL))
2019 		return (DDI_PROP_INVAL_ARG);
2020 
2021 	if (length == NULL)
2022 		return (DDI_PROP_INVAL_ARG);
2023 
2024 	/*
2025 	 * try to find the associated dip,
2026 	 * this places a hold on the driver
2027 	 */
2028 	dev = handlep->lh_vp->v_rdev;
2029 
2030 	csp = VTOCS(handlep->lh_vp);
2031 	mutex_enter(&csp->s_lock);
2032 	if ((dip = csp->s_dip) != NULL)
2033 		e_ddi_hold_devi(dip);
2034 	mutex_exit(&csp->s_lock);
2035 	if (dip == NULL)
2036 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2037 
2038 	if (dip == NULL)
2039 		return (DDI_PROP_NOT_FOUND);
2040 
2041 	ret = i_ldi_prop_op(dev, dip, prop_op, flags, name, valuep, length);
2042 	ddi_release_devi(dip);
2043 
2044 	return (ret);
2045 }
2046 
2047 int
2048 ldi_strategy(ldi_handle_t lh, struct buf *bp)
2049 {
2050 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2051 	dev_t			dev;
2052 
2053 	if ((lh == NULL) || (bp == NULL))
2054 		return (EINVAL);
2055 
2056 	/* this entry point is only supported for cb devices */
2057 	dev = handlep->lh_vp->v_rdev;
2058 	if (!(handlep->lh_type & LH_CBDEV))
2059 		return (ENOTSUP);
2060 
2061 	bp->b_edev = dev;
2062 	bp->b_dev = cmpdev(dev);
2063 	return (bdev_strategy(bp));
2064 }
2065 
2066 int
2067 ldi_dump(ldi_handle_t lh, caddr_t addr, daddr_t blkno, int nblk)
2068 {
2069 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2070 	dev_t			dev;
2071 
2072 	if (lh == NULL)
2073 		return (EINVAL);
2074 
2075 	/* this entry point is only supported for cb devices */
2076 	dev = handlep->lh_vp->v_rdev;
2077 	if (!(handlep->lh_type & LH_CBDEV))
2078 		return (ENOTSUP);
2079 
2080 	return (bdev_dump(dev, addr, blkno, nblk));
2081 }
2082 
2083 int
2084 ldi_devmap(ldi_handle_t lh, devmap_cookie_t dhp, offset_t off,
2085     size_t len, size_t *maplen, uint_t model)
2086 {
2087 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2088 	dev_t			dev;
2089 
2090 	if (lh == NULL)
2091 		return (EINVAL);
2092 
2093 	/* this entry point is only supported for cb devices */
2094 	dev = handlep->lh_vp->v_rdev;
2095 	if (!(handlep->lh_type & LH_CBDEV))
2096 		return (ENOTSUP);
2097 
2098 	return (cdev_devmap(dev, dhp, off, len, maplen, model));
2099 }
2100 
2101 int
2102 ldi_aread(ldi_handle_t lh, struct aio_req *aio_reqp, cred_t *cr)
2103 {
2104 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2105 	dev_t			dev;
2106 	struct cb_ops		*cb;
2107 
2108 	if (lh == NULL)
2109 		return (EINVAL);
2110 
2111 	/* this entry point is only supported for cb devices */
2112 	if (!(handlep->lh_type & LH_CBDEV))
2113 		return (ENOTSUP);
2114 
2115 	/*
2116 	 * Kaio is only supported on block devices.
2117 	 */
2118 	dev = handlep->lh_vp->v_rdev;
2119 	cb = devopsp[getmajor(dev)]->devo_cb_ops;
2120 	if (cb->cb_strategy == nodev || cb->cb_strategy == NULL)
2121 		return (ENOTSUP);
2122 
2123 	if (cb->cb_aread == NULL)
2124 		return (ENOTSUP);
2125 
2126 	return (cb->cb_aread(dev, aio_reqp, cr));
2127 }
2128 
2129 int
2130 ldi_awrite(ldi_handle_t lh, struct aio_req *aio_reqp, cred_t *cr)
2131 {
2132 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2133 	struct cb_ops		*cb;
2134 	dev_t			dev;
2135 
2136 	if (lh == NULL)
2137 		return (EINVAL);
2138 
2139 	/* this entry point is only supported for cb devices */
2140 	if (!(handlep->lh_type & LH_CBDEV))
2141 		return (ENOTSUP);
2142 
2143 	/*
2144 	 * Kaio is only supported on block devices.
2145 	 */
2146 	dev = handlep->lh_vp->v_rdev;
2147 	cb = devopsp[getmajor(dev)]->devo_cb_ops;
2148 	if (cb->cb_strategy == nodev || cb->cb_strategy == NULL)
2149 		return (ENOTSUP);
2150 
2151 	if (cb->cb_awrite == NULL)
2152 		return (ENOTSUP);
2153 
2154 	return (cb->cb_awrite(dev, aio_reqp, cr));
2155 }
2156 
2157 int
2158 ldi_putmsg(ldi_handle_t lh, mblk_t *smp)
2159 {
2160 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2161 	int			ret;
2162 
2163 	if ((lh == NULL) || (smp == NULL))
2164 		return (EINVAL);
2165 
2166 	if (!(handlep->lh_type & LH_STREAM)) {
2167 		freemsg(smp);
2168 		return (ENOTSUP);
2169 	}
2170 
2171 	/*
2172 	 * If we don't have db_credp, set it. Note that we can not be called
2173 	 * from interrupt context.
2174 	 */
2175 	if (msg_getcred(smp, NULL) == NULL)
2176 		mblk_setcred(smp, CRED(), curproc->p_pid);
2177 
2178 	/* Send message while honoring flow control */
2179 	ret = kstrputmsg(handlep->lh_vp, smp, NULL, 0, 0,
2180 	    MSG_BAND | MSG_HOLDSIG | MSG_IGNERROR, 0);
2181 
2182 	return (ret);
2183 }
2184 
2185 int
2186 ldi_getmsg(ldi_handle_t lh, mblk_t **rmp, timestruc_t *timeo)
2187 {
2188 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2189 	clock_t			timout; /* milliseconds */
2190 	uchar_t			pri;
2191 	rval_t			rval;
2192 	int			ret, pflag;
2193 
2194 
2195 	if (lh == NULL)
2196 		return (EINVAL);
2197 
2198 	if (!(handlep->lh_type & LH_STREAM))
2199 		return (ENOTSUP);
2200 
2201 	/* Convert from nanoseconds to milliseconds */
2202 	if (timeo != NULL) {
2203 		timout = timeo->tv_sec * 1000 + timeo->tv_nsec / 1000000;
2204 		if (timout > INT_MAX)
2205 			return (EINVAL);
2206 	} else
2207 		timout = -1;
2208 
2209 	/* Wait for timeout millseconds for a message */
2210 	pflag = MSG_ANY;
2211 	pri = 0;
2212 	*rmp = NULL;
2213 	ret = kstrgetmsg(handlep->lh_vp,
2214 	    rmp, NULL, &pri, &pflag, timout, &rval);
2215 	return (ret);
2216 }
2217 
2218 int
2219 ldi_get_dev(ldi_handle_t lh, dev_t *devp)
2220 {
2221 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2222 
2223 	if ((lh == NULL) || (devp == NULL))
2224 		return (EINVAL);
2225 
2226 	*devp = handlep->lh_vp->v_rdev;
2227 	return (0);
2228 }
2229 
2230 int
2231 ldi_get_otyp(ldi_handle_t lh, int *otyp)
2232 {
2233 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2234 
2235 	if ((lh == NULL) || (otyp == NULL))
2236 		return (EINVAL);
2237 
2238 	*otyp = VTYP_TO_OTYP(handlep->lh_vp->v_type);
2239 	return (0);
2240 }
2241 
2242 int
2243 ldi_get_devid(ldi_handle_t lh, ddi_devid_t *devid)
2244 {
2245 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2246 	int			ret;
2247 	dev_t			dev;
2248 
2249 	if ((lh == NULL) || (devid == NULL))
2250 		return (EINVAL);
2251 
2252 	dev = handlep->lh_vp->v_rdev;
2253 
2254 	ret = ddi_lyr_get_devid(dev, devid);
2255 	if (ret != DDI_SUCCESS)
2256 		return (ENOTSUP);
2257 
2258 	return (0);
2259 }
2260 
2261 int
2262 ldi_get_minor_name(ldi_handle_t lh, char **minor_name)
2263 {
2264 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2265 	int			ret, otyp;
2266 	dev_t			dev;
2267 
2268 	if ((lh == NULL) || (minor_name == NULL))
2269 		return (EINVAL);
2270 
2271 	dev = handlep->lh_vp->v_rdev;
2272 	otyp = VTYP_TO_OTYP(handlep->lh_vp->v_type);
2273 
2274 	ret = ddi_lyr_get_minor_name(dev, OTYP_TO_STYP(otyp), minor_name);
2275 	if (ret != DDI_SUCCESS)
2276 		return (ENOTSUP);
2277 
2278 	return (0);
2279 }
2280 
2281 int
2282 ldi_prop_lookup_int_array(ldi_handle_t lh,
2283     uint_t flags, char *name, int **data, uint_t *nelements)
2284 {
2285 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2286 	dev_info_t		*dip;
2287 	dev_t			dev;
2288 	int			res;
2289 	struct snode		*csp;
2290 
2291 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2292 		return (DDI_PROP_INVAL_ARG);
2293 
2294 	dev = handlep->lh_vp->v_rdev;
2295 
2296 	csp = VTOCS(handlep->lh_vp);
2297 	mutex_enter(&csp->s_lock);
2298 	if ((dip = csp->s_dip) != NULL)
2299 		e_ddi_hold_devi(dip);
2300 	mutex_exit(&csp->s_lock);
2301 	if (dip == NULL)
2302 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2303 
2304 	if (dip == NULL) {
2305 		flags |= DDI_UNBND_DLPI2;
2306 	} else if (flags & LDI_DEV_T_ANY) {
2307 		flags &= ~LDI_DEV_T_ANY;
2308 		dev = DDI_DEV_T_ANY;
2309 	}
2310 
2311 	if (dip != NULL) {
2312 		int *prop_val, prop_len;
2313 
2314 		res = i_ldi_prop_op_typed(dev, dip, flags, name,
2315 		    (caddr_t *)&prop_val, &prop_len, sizeof (int));
2316 
2317 		/* if we got it then return it */
2318 		if (res == DDI_PROP_SUCCESS) {
2319 			*nelements = prop_len / sizeof (int);
2320 			*data = prop_val;
2321 
2322 			ddi_release_devi(dip);
2323 			return (res);
2324 		}
2325 	}
2326 
2327 	/* call the normal property interfaces */
2328 	res = ddi_prop_lookup_int_array(dev, dip, flags,
2329 	    name, data, nelements);
2330 
2331 	if (dip != NULL)
2332 		ddi_release_devi(dip);
2333 
2334 	return (res);
2335 }
2336 
2337 int
2338 ldi_prop_lookup_int64_array(ldi_handle_t lh,
2339     uint_t flags, char *name, int64_t **data, uint_t *nelements)
2340 {
2341 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2342 	dev_info_t		*dip;
2343 	dev_t			dev;
2344 	int			res;
2345 	struct snode		*csp;
2346 
2347 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2348 		return (DDI_PROP_INVAL_ARG);
2349 
2350 	dev = handlep->lh_vp->v_rdev;
2351 
2352 	csp = VTOCS(handlep->lh_vp);
2353 	mutex_enter(&csp->s_lock);
2354 	if ((dip = csp->s_dip) != NULL)
2355 		e_ddi_hold_devi(dip);
2356 	mutex_exit(&csp->s_lock);
2357 	if (dip == NULL)
2358 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2359 
2360 	if (dip == NULL) {
2361 		flags |= DDI_UNBND_DLPI2;
2362 	} else if (flags & LDI_DEV_T_ANY) {
2363 		flags &= ~LDI_DEV_T_ANY;
2364 		dev = DDI_DEV_T_ANY;
2365 	}
2366 
2367 	if (dip != NULL) {
2368 		int64_t	*prop_val;
2369 		int	prop_len;
2370 
2371 		res = i_ldi_prop_op_typed(dev, dip, flags, name,
2372 		    (caddr_t *)&prop_val, &prop_len, sizeof (int64_t));
2373 
2374 		/* if we got it then return it */
2375 		if (res == DDI_PROP_SUCCESS) {
2376 			*nelements = prop_len / sizeof (int64_t);
2377 			*data = prop_val;
2378 
2379 			ddi_release_devi(dip);
2380 			return (res);
2381 		}
2382 	}
2383 
2384 	/* call the normal property interfaces */
2385 	res = ddi_prop_lookup_int64_array(dev, dip, flags,
2386 	    name, data, nelements);
2387 
2388 	if (dip != NULL)
2389 		ddi_release_devi(dip);
2390 
2391 	return (res);
2392 }
2393 
2394 int
2395 ldi_prop_lookup_string_array(ldi_handle_t lh,
2396     uint_t flags, char *name, char ***data, uint_t *nelements)
2397 {
2398 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2399 	dev_info_t		*dip;
2400 	dev_t			dev;
2401 	int			res;
2402 	struct snode		*csp;
2403 
2404 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2405 		return (DDI_PROP_INVAL_ARG);
2406 
2407 	dev = handlep->lh_vp->v_rdev;
2408 
2409 	csp = VTOCS(handlep->lh_vp);
2410 	mutex_enter(&csp->s_lock);
2411 	if ((dip = csp->s_dip) != NULL)
2412 		e_ddi_hold_devi(dip);
2413 	mutex_exit(&csp->s_lock);
2414 	if (dip == NULL)
2415 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2416 
2417 	if (dip == NULL) {
2418 		flags |= DDI_UNBND_DLPI2;
2419 	} else if (flags & LDI_DEV_T_ANY) {
2420 		flags &= ~LDI_DEV_T_ANY;
2421 		dev = DDI_DEV_T_ANY;
2422 	}
2423 
2424 	if (dip != NULL) {
2425 		char	*prop_val;
2426 		int	prop_len;
2427 
2428 		res = i_ldi_prop_op_typed(dev, dip, flags, name,
2429 		    (caddr_t *)&prop_val, &prop_len, 0);
2430 
2431 		/* if we got it then return it */
2432 		if (res == DDI_PROP_SUCCESS) {
2433 			char	**str_array;
2434 			int	nelem;
2435 
2436 			/*
2437 			 * pack the returned string array into the format
2438 			 * our callers expect
2439 			 */
2440 			if (i_pack_string_array(prop_val, prop_len,
2441 			    &str_array, &nelem) == 0) {
2442 
2443 				*data = str_array;
2444 				*nelements = nelem;
2445 
2446 				ddi_prop_free(prop_val);
2447 				ddi_release_devi(dip);
2448 				return (res);
2449 			}
2450 
2451 			/*
2452 			 * the format of the returned property must have
2453 			 * been bad so throw it out
2454 			 */
2455 			ddi_prop_free(prop_val);
2456 		}
2457 	}
2458 
2459 	/* call the normal property interfaces */
2460 	res = ddi_prop_lookup_string_array(dev, dip, flags,
2461 	    name, data, nelements);
2462 
2463 	if (dip != NULL)
2464 		ddi_release_devi(dip);
2465 
2466 	return (res);
2467 }
2468 
2469 int
2470 ldi_prop_lookup_string(ldi_handle_t lh,
2471     uint_t flags, char *name, char **data)
2472 {
2473 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2474 	dev_info_t		*dip;
2475 	dev_t			dev;
2476 	int			res;
2477 	struct snode		*csp;
2478 
2479 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2480 		return (DDI_PROP_INVAL_ARG);
2481 
2482 	dev = handlep->lh_vp->v_rdev;
2483 
2484 	csp = VTOCS(handlep->lh_vp);
2485 	mutex_enter(&csp->s_lock);
2486 	if ((dip = csp->s_dip) != NULL)
2487 		e_ddi_hold_devi(dip);
2488 	mutex_exit(&csp->s_lock);
2489 	if (dip == NULL)
2490 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2491 
2492 	if (dip == NULL) {
2493 		flags |= DDI_UNBND_DLPI2;
2494 	} else if (flags & LDI_DEV_T_ANY) {
2495 		flags &= ~LDI_DEV_T_ANY;
2496 		dev = DDI_DEV_T_ANY;
2497 	}
2498 
2499 	if (dip != NULL) {
2500 		char	*prop_val;
2501 		int	prop_len;
2502 
2503 		res = i_ldi_prop_op_typed(dev, dip, flags, name,
2504 		    (caddr_t *)&prop_val, &prop_len, 0);
2505 
2506 		/* if we got it then return it */
2507 		if (res == DDI_PROP_SUCCESS) {
2508 			/*
2509 			 * sanity check the vaule returned.
2510 			 */
2511 			if (i_check_string(prop_val, prop_len)) {
2512 				ddi_prop_free(prop_val);
2513 			} else {
2514 				*data = prop_val;
2515 				ddi_release_devi(dip);
2516 				return (res);
2517 			}
2518 		}
2519 	}
2520 
2521 	/* call the normal property interfaces */
2522 	res = ddi_prop_lookup_string(dev, dip, flags, name, data);
2523 
2524 	if (dip != NULL)
2525 		ddi_release_devi(dip);
2526 
2527 #ifdef DEBUG
2528 	if (res == DDI_PROP_SUCCESS) {
2529 		/*
2530 		 * keep ourselves honest
2531 		 * make sure the framework returns strings in the
2532 		 * same format as we're demanding from drivers.
2533 		 */
2534 		struct prop_driver_data	*pdd;
2535 		int			pdd_prop_size;
2536 
2537 		pdd = ((struct prop_driver_data *)(*data)) - 1;
2538 		pdd_prop_size = pdd->pdd_size -
2539 		    sizeof (struct prop_driver_data);
2540 		ASSERT(i_check_string(*data, pdd_prop_size) == 0);
2541 	}
2542 #endif /* DEBUG */
2543 
2544 	return (res);
2545 }
2546 
2547 int
2548 ldi_prop_lookup_byte_array(ldi_handle_t lh,
2549     uint_t flags, char *name, uchar_t **data, uint_t *nelements)
2550 {
2551 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2552 	dev_info_t		*dip;
2553 	dev_t			dev;
2554 	int			res;
2555 	struct snode		*csp;
2556 
2557 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2558 		return (DDI_PROP_INVAL_ARG);
2559 
2560 	dev = handlep->lh_vp->v_rdev;
2561 
2562 	csp = VTOCS(handlep->lh_vp);
2563 	mutex_enter(&csp->s_lock);
2564 	if ((dip = csp->s_dip) != NULL)
2565 		e_ddi_hold_devi(dip);
2566 	mutex_exit(&csp->s_lock);
2567 	if (dip == NULL)
2568 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2569 
2570 	if (dip == NULL) {
2571 		flags |= DDI_UNBND_DLPI2;
2572 	} else if (flags & LDI_DEV_T_ANY) {
2573 		flags &= ~LDI_DEV_T_ANY;
2574 		dev = DDI_DEV_T_ANY;
2575 	}
2576 
2577 	if (dip != NULL) {
2578 		uchar_t	*prop_val;
2579 		int	prop_len;
2580 
2581 		res = i_ldi_prop_op_typed(dev, dip, flags, name,
2582 		    (caddr_t *)&prop_val, &prop_len, sizeof (uchar_t));
2583 
2584 		/* if we got it then return it */
2585 		if (res == DDI_PROP_SUCCESS) {
2586 			*nelements = prop_len / sizeof (uchar_t);
2587 			*data = prop_val;
2588 
2589 			ddi_release_devi(dip);
2590 			return (res);
2591 		}
2592 	}
2593 
2594 	/* call the normal property interfaces */
2595 	res = ddi_prop_lookup_byte_array(dev, dip, flags,
2596 	    name, data, nelements);
2597 
2598 	if (dip != NULL)
2599 		ddi_release_devi(dip);
2600 
2601 	return (res);
2602 }
2603 
2604 int
2605 ldi_prop_get_int(ldi_handle_t lh,
2606     uint_t flags, char *name, int defvalue)
2607 {
2608 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2609 	dev_info_t		*dip;
2610 	dev_t			dev;
2611 	int			res;
2612 	struct snode		*csp;
2613 
2614 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2615 		return (defvalue);
2616 
2617 	dev = handlep->lh_vp->v_rdev;
2618 
2619 	csp = VTOCS(handlep->lh_vp);
2620 	mutex_enter(&csp->s_lock);
2621 	if ((dip = csp->s_dip) != NULL)
2622 		e_ddi_hold_devi(dip);
2623 	mutex_exit(&csp->s_lock);
2624 	if (dip == NULL)
2625 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2626 
2627 	if (dip == NULL) {
2628 		flags |= DDI_UNBND_DLPI2;
2629 	} else if (flags & LDI_DEV_T_ANY) {
2630 		flags &= ~LDI_DEV_T_ANY;
2631 		dev = DDI_DEV_T_ANY;
2632 	}
2633 
2634 	if (dip != NULL) {
2635 		int	prop_val;
2636 		int	prop_len;
2637 
2638 		/*
2639 		 * first call the drivers prop_op interface to allow it
2640 		 * it to override default property values.
2641 		 */
2642 		prop_len = sizeof (int);
2643 		res = i_ldi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
2644 		    flags | DDI_PROP_DYNAMIC, name,
2645 		    (caddr_t)&prop_val, &prop_len);
2646 
2647 		/* if we got it then return it */
2648 		if ((res == DDI_PROP_SUCCESS) &&
2649 		    (prop_len == sizeof (int))) {
2650 			res = prop_val;
2651 			ddi_release_devi(dip);
2652 			return (res);
2653 		}
2654 	}
2655 
2656 	/* call the normal property interfaces */
2657 	res = ddi_prop_get_int(dev, dip, flags, name, defvalue);
2658 
2659 	if (dip != NULL)
2660 		ddi_release_devi(dip);
2661 
2662 	return (res);
2663 }
2664 
2665 int64_t
2666 ldi_prop_get_int64(ldi_handle_t lh,
2667     uint_t flags, char *name, int64_t defvalue)
2668 {
2669 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2670 	dev_info_t		*dip;
2671 	dev_t			dev;
2672 	int64_t			res;
2673 	struct snode		*csp;
2674 
2675 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2676 		return (defvalue);
2677 
2678 	dev = handlep->lh_vp->v_rdev;
2679 
2680 	csp = VTOCS(handlep->lh_vp);
2681 	mutex_enter(&csp->s_lock);
2682 	if ((dip = csp->s_dip) != NULL)
2683 		e_ddi_hold_devi(dip);
2684 	mutex_exit(&csp->s_lock);
2685 	if (dip == NULL)
2686 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2687 
2688 	if (dip == NULL) {
2689 		flags |= DDI_UNBND_DLPI2;
2690 	} else if (flags & LDI_DEV_T_ANY) {
2691 		flags &= ~LDI_DEV_T_ANY;
2692 		dev = DDI_DEV_T_ANY;
2693 	}
2694 
2695 	if (dip != NULL) {
2696 		int64_t	prop_val;
2697 		int	prop_len;
2698 
2699 		/*
2700 		 * first call the drivers prop_op interface to allow it
2701 		 * it to override default property values.
2702 		 */
2703 		prop_len = sizeof (int64_t);
2704 		res = i_ldi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
2705 		    flags | DDI_PROP_DYNAMIC, name,
2706 		    (caddr_t)&prop_val, &prop_len);
2707 
2708 		/* if we got it then return it */
2709 		if ((res == DDI_PROP_SUCCESS) &&
2710 		    (prop_len == sizeof (int64_t))) {
2711 			res = prop_val;
2712 			ddi_release_devi(dip);
2713 			return (res);
2714 		}
2715 	}
2716 
2717 	/* call the normal property interfaces */
2718 	res = ddi_prop_get_int64(dev, dip, flags, name, defvalue);
2719 
2720 	if (dip != NULL)
2721 		ddi_release_devi(dip);
2722 
2723 	return (res);
2724 }
2725 
2726 int
2727 ldi_prop_exists(ldi_handle_t lh, uint_t flags, char *name)
2728 {
2729 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2730 	dev_info_t		*dip;
2731 	dev_t			dev;
2732 	int			res, prop_len;
2733 	struct snode		*csp;
2734 
2735 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2736 		return (0);
2737 
2738 	dev = handlep->lh_vp->v_rdev;
2739 
2740 	csp = VTOCS(handlep->lh_vp);
2741 	mutex_enter(&csp->s_lock);
2742 	if ((dip = csp->s_dip) != NULL)
2743 		e_ddi_hold_devi(dip);
2744 	mutex_exit(&csp->s_lock);
2745 	if (dip == NULL)
2746 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2747 
2748 	/* if NULL dip, prop does NOT exist */
2749 	if (dip == NULL)
2750 		return (0);
2751 
2752 	if (flags & LDI_DEV_T_ANY) {
2753 		flags &= ~LDI_DEV_T_ANY;
2754 		dev = DDI_DEV_T_ANY;
2755 	}
2756 
2757 	/*
2758 	 * first call the drivers prop_op interface to allow it
2759 	 * it to override default property values.
2760 	 */
2761 	res = i_ldi_prop_op(dev, dip, PROP_LEN,
2762 	    flags | DDI_PROP_DYNAMIC, name, NULL, &prop_len);
2763 
2764 	if (res == DDI_PROP_SUCCESS) {
2765 		ddi_release_devi(dip);
2766 		return (1);
2767 	}
2768 
2769 	/* call the normal property interfaces */
2770 	res = ddi_prop_exists(dev, dip, flags, name);
2771 
2772 	ddi_release_devi(dip);
2773 	return (res);
2774 }
2775 
2776 #ifdef	LDI_OBSOLETE_EVENT
2777 
2778 int
2779 ldi_get_eventcookie(ldi_handle_t lh, char *name, ddi_eventcookie_t *ecp)
2780 {
2781 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2782 	dev_info_t		*dip;
2783 	dev_t			dev;
2784 	int			res;
2785 	struct snode		*csp;
2786 
2787 	if ((lh == NULL) || (name == NULL) ||
2788 	    (strlen(name) == 0) || (ecp == NULL)) {
2789 		return (DDI_FAILURE);
2790 	}
2791 
2792 	ASSERT(!servicing_interrupt());
2793 
2794 	dev = handlep->lh_vp->v_rdev;
2795 
2796 	csp = VTOCS(handlep->lh_vp);
2797 	mutex_enter(&csp->s_lock);
2798 	if ((dip = csp->s_dip) != NULL)
2799 		e_ddi_hold_devi(dip);
2800 	mutex_exit(&csp->s_lock);
2801 	if (dip == NULL)
2802 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2803 
2804 	if (dip == NULL)
2805 		return (DDI_FAILURE);
2806 
2807 	LDI_EVENTCB((CE_NOTE, "%s: event_name=%s, "
2808 	    "dip=0x%p, event_cookiep=0x%p", "ldi_get_eventcookie",
2809 	    name, (void *)dip, (void *)ecp));
2810 
2811 	res = ddi_get_eventcookie(dip, name, ecp);
2812 
2813 	ddi_release_devi(dip);
2814 	return (res);
2815 }
2816 
2817 int
2818 ldi_add_event_handler(ldi_handle_t lh, ddi_eventcookie_t ec,
2819     void (*handler)(ldi_handle_t, ddi_eventcookie_t, void *, void *),
2820     void *arg, ldi_callback_id_t *id)
2821 {
2822 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2823 	struct ldi_event	*lep;
2824 	dev_info_t		*dip;
2825 	dev_t			dev;
2826 	int			res;
2827 	struct snode		*csp;
2828 
2829 	if ((lh == NULL) || (ec == NULL) || (handler == NULL) || (id == NULL))
2830 		return (DDI_FAILURE);
2831 
2832 	ASSERT(!servicing_interrupt());
2833 
2834 	dev = handlep->lh_vp->v_rdev;
2835 
2836 	csp = VTOCS(handlep->lh_vp);
2837 	mutex_enter(&csp->s_lock);
2838 	if ((dip = csp->s_dip) != NULL)
2839 		e_ddi_hold_devi(dip);
2840 	mutex_exit(&csp->s_lock);
2841 	if (dip == NULL)
2842 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2843 
2844 	if (dip == NULL)
2845 		return (DDI_FAILURE);
2846 
2847 	lep = kmem_zalloc(sizeof (struct ldi_event), KM_SLEEP);
2848 	lep->le_lhp = handlep;
2849 	lep->le_arg = arg;
2850 	lep->le_handler = handler;
2851 
2852 	if ((res = ddi_add_event_handler(dip, ec, i_ldi_callback,
2853 	    (void *)lep, &lep->le_id)) != DDI_SUCCESS) {
2854 		LDI_EVENTCB((CE_WARN, "%s: unable to add"
2855 		    "event callback", "ldi_add_event_handler"));
2856 		ddi_release_devi(dip);
2857 		kmem_free(lep, sizeof (struct ldi_event));
2858 		return (res);
2859 	}
2860 
2861 	*id = (ldi_callback_id_t)lep;
2862 
2863 	LDI_EVENTCB((CE_NOTE, "%s: dip=0x%p, event=0x%p, "
2864 	    "ldi_eventp=0x%p, cb_id=0x%p", "ldi_add_event_handler",
2865 	    (void *)dip, (void *)ec, (void *)lep, (void *)id));
2866 
2867 	handle_event_add(lep);
2868 	ddi_release_devi(dip);
2869 	return (res);
2870 }
2871 
2872 int
2873 ldi_remove_event_handler(ldi_handle_t lh, ldi_callback_id_t id)
2874 {
2875 	ldi_event_t		*lep = (ldi_event_t *)id;
2876 	int			res;
2877 
2878 	if ((lh == NULL) || (id == NULL))
2879 		return (DDI_FAILURE);
2880 
2881 	ASSERT(!servicing_interrupt());
2882 
2883 	if ((res = ddi_remove_event_handler(lep->le_id))
2884 	    != DDI_SUCCESS) {
2885 		LDI_EVENTCB((CE_WARN, "%s: unable to remove "
2886 		    "event callback", "ldi_remove_event_handler"));
2887 		return (res);
2888 	}
2889 
2890 	handle_event_remove(lep);
2891 	kmem_free(lep, sizeof (struct ldi_event));
2892 	return (res);
2893 }
2894 
2895 #endif
2896 
2897 /*
2898  * Here are some definitions of terms used in the following LDI events
2899  * code:
2900  *
2901  * "LDI events" AKA "native events": These are events defined by the
2902  * "new" LDI event framework. These events are serviced by the LDI event
2903  * framework itself and thus are native to it.
2904  *
2905  * "LDI contract events": These are contract events that correspond to the
2906  *  LDI events. This mapping of LDI events to contract events is defined by
2907  * the ldi_ev_cookies[] array above.
2908  *
2909  * NDI events: These are events which are serviced by the NDI event subsystem.
2910  * LDI subsystem just provides a thin wrapper around the NDI event interfaces
2911  * These events are therefore *not* native events.
2912  */
2913 
2914 static int
2915 ldi_native_event(const char *evname)
2916 {
2917 	int i;
2918 
2919 	LDI_EVTRC((CE_NOTE, "ldi_native_event: entered: ev=%s", evname));
2920 
2921 	for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
2922 		if (strcmp(ldi_ev_cookies[i].ck_evname, evname) == 0)
2923 			return (1);
2924 	}
2925 
2926 	return (0);
2927 }
2928 
2929 static uint_t
2930 ldi_ev_sync_event(const char *evname)
2931 {
2932 	int i;
2933 
2934 	ASSERT(ldi_native_event(evname));
2935 
2936 	LDI_EVTRC((CE_NOTE, "ldi_ev_sync_event: entered: %s", evname));
2937 
2938 	for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
2939 		if (strcmp(ldi_ev_cookies[i].ck_evname, evname) == 0)
2940 			return (ldi_ev_cookies[i].ck_sync);
2941 	}
2942 
2943 	/*
2944 	 * This should never happen until non-contract based
2945 	 * LDI events are introduced. If that happens, we will
2946 	 * use a "special" token to indicate that there are no
2947 	 * contracts corresponding to this LDI event.
2948 	 */
2949 	cmn_err(CE_PANIC, "Unknown LDI event: %s", evname);
2950 
2951 	return (0);
2952 }
2953 
2954 static uint_t
2955 ldi_contract_event(const char *evname)
2956 {
2957 	int i;
2958 
2959 	ASSERT(ldi_native_event(evname));
2960 
2961 	LDI_EVTRC((CE_NOTE, "ldi_contract_event: entered: %s", evname));
2962 
2963 	for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
2964 		if (strcmp(ldi_ev_cookies[i].ck_evname, evname) == 0)
2965 			return (ldi_ev_cookies[i].ck_ctype);
2966 	}
2967 
2968 	/*
2969 	 * This should never happen until non-contract based
2970 	 * LDI events are introduced. If that happens, we will
2971 	 * use a "special" token to indicate that there are no
2972 	 * contracts corresponding to this LDI event.
2973 	 */
2974 	cmn_err(CE_PANIC, "Unknown LDI event: %s", evname);
2975 
2976 	return (0);
2977 }
2978 
2979 char *
2980 ldi_ev_get_type(ldi_ev_cookie_t cookie)
2981 {
2982 	int i;
2983 	struct ldi_ev_cookie *cookie_impl = (struct ldi_ev_cookie *)cookie;
2984 
2985 	for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
2986 		if (&ldi_ev_cookies[i] == cookie_impl) {
2987 			LDI_EVTRC((CE_NOTE, "ldi_ev_get_type: LDI: %s",
2988 			    ldi_ev_cookies[i].ck_evname));
2989 			return (ldi_ev_cookies[i].ck_evname);
2990 		}
2991 	}
2992 
2993 	/*
2994 	 * Not an LDI native event. Must be NDI event service.
2995 	 * Just return a generic string
2996 	 */
2997 	LDI_EVTRC((CE_NOTE, "ldi_ev_get_type: is NDI"));
2998 	return (NDI_EVENT_SERVICE);
2999 }
3000 
3001 static int
3002 ldi_native_cookie(ldi_ev_cookie_t cookie)
3003 {
3004 	int i;
3005 	struct ldi_ev_cookie *cookie_impl = (struct ldi_ev_cookie *)cookie;
3006 
3007 	for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
3008 		if (&ldi_ev_cookies[i] == cookie_impl) {
3009 			LDI_EVTRC((CE_NOTE, "ldi_native_cookie: native LDI"));
3010 			return (1);
3011 		}
3012 	}
3013 
3014 	LDI_EVTRC((CE_NOTE, "ldi_native_cookie: is NDI"));
3015 	return (0);
3016 }
3017 
3018 static ldi_ev_cookie_t
3019 ldi_get_native_cookie(const char *evname)
3020 {
3021 	int i;
3022 
3023 	for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
3024 		if (strcmp(ldi_ev_cookies[i].ck_evname, evname) == 0) {
3025 			LDI_EVTRC((CE_NOTE, "ldi_get_native_cookie: found"));
3026 			return ((ldi_ev_cookie_t)&ldi_ev_cookies[i]);
3027 		}
3028 	}
3029 
3030 	LDI_EVTRC((CE_NOTE, "ldi_get_native_cookie: NOT found"));
3031 	return (NULL);
3032 }
3033 
3034 /*
3035  * ldi_ev_lock() needs to be recursive, since layered drivers may call
3036  * other LDI interfaces (such as ldi_close() from within the context of
3037  * a notify callback. Since the notify callback is called with the
3038  * ldi_ev_lock() held and ldi_close() also grabs ldi_ev_lock, the lock needs
3039  * to be recursive.
3040  */
3041 static void
3042 ldi_ev_lock(void)
3043 {
3044 	LDI_EVTRC((CE_NOTE, "ldi_ev_lock: entered"));
3045 
3046 	mutex_enter(&ldi_ev_callback_list.le_lock);
3047 	if (ldi_ev_callback_list.le_thread == curthread) {
3048 		ASSERT(ldi_ev_callback_list.le_busy >= 1);
3049 		ldi_ev_callback_list.le_busy++;
3050 	} else {
3051 		while (ldi_ev_callback_list.le_busy)
3052 			cv_wait(&ldi_ev_callback_list.le_cv,
3053 			    &ldi_ev_callback_list.le_lock);
3054 		ASSERT(ldi_ev_callback_list.le_thread == NULL);
3055 		ldi_ev_callback_list.le_busy = 1;
3056 		ldi_ev_callback_list.le_thread = curthread;
3057 	}
3058 	mutex_exit(&ldi_ev_callback_list.le_lock);
3059 
3060 	LDI_EVTRC((CE_NOTE, "ldi_ev_lock: exit"));
3061 }
3062 
3063 static void
3064 ldi_ev_unlock(void)
3065 {
3066 	LDI_EVTRC((CE_NOTE, "ldi_ev_unlock: entered"));
3067 	mutex_enter(&ldi_ev_callback_list.le_lock);
3068 	ASSERT(ldi_ev_callback_list.le_thread == curthread);
3069 	ASSERT(ldi_ev_callback_list.le_busy >= 1);
3070 
3071 	ldi_ev_callback_list.le_busy--;
3072 	if (ldi_ev_callback_list.le_busy == 0) {
3073 		ldi_ev_callback_list.le_thread = NULL;
3074 		cv_signal(&ldi_ev_callback_list.le_cv);
3075 	}
3076 	mutex_exit(&ldi_ev_callback_list.le_lock);
3077 	LDI_EVTRC((CE_NOTE, "ldi_ev_unlock: exit"));
3078 }
3079 
3080 int
3081 ldi_ev_get_cookie(ldi_handle_t lh, char *evname, ldi_ev_cookie_t *cookiep)
3082 {
3083 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
3084 	dev_info_t		*dip;
3085 	dev_t			dev;
3086 	int			res;
3087 	struct snode		*csp;
3088 	ddi_eventcookie_t	ddi_cookie;
3089 	ldi_ev_cookie_t		tcookie;
3090 
3091 	LDI_EVDBG((CE_NOTE, "ldi_ev_get_cookie: entered: evname=%s",
3092 	    evname ? evname : "<NULL>"));
3093 
3094 	if (lh == NULL || evname == NULL ||
3095 	    strlen(evname) == 0 || cookiep == NULL) {
3096 		LDI_EVDBG((CE_NOTE, "ldi_ev_get_cookie: invalid args"));
3097 		return (LDI_EV_FAILURE);
3098 	}
3099 
3100 	*cookiep = NULL;
3101 
3102 	/*
3103 	 * First check if it is a LDI native event
3104 	 */
3105 	tcookie = ldi_get_native_cookie(evname);
3106 	if (tcookie) {
3107 		LDI_EVDBG((CE_NOTE, "ldi_ev_get_cookie: got native cookie"));
3108 		*cookiep = tcookie;
3109 		return (LDI_EV_SUCCESS);
3110 	}
3111 
3112 	/*
3113 	 * Not a LDI native event. Try NDI event services
3114 	 */
3115 
3116 	dev = handlep->lh_vp->v_rdev;
3117 
3118 	csp = VTOCS(handlep->lh_vp);
3119 	mutex_enter(&csp->s_lock);
3120 	if ((dip = csp->s_dip) != NULL)
3121 		e_ddi_hold_devi(dip);
3122 	mutex_exit(&csp->s_lock);
3123 	if (dip == NULL)
3124 		dip = e_ddi_hold_devi_by_dev(dev, 0);
3125 
3126 	if (dip == NULL) {
3127 		cmn_err(CE_WARN, "ldi_ev_get_cookie: No devinfo node for LDI "
3128 		    "handle: %p", (void *)handlep);
3129 		return (LDI_EV_FAILURE);
3130 	}
3131 
3132 	LDI_EVDBG((CE_NOTE, "Calling ddi_get_eventcookie: dip=%p, ev=%s",
3133 	    (void *)dip, evname));
3134 
3135 	res = ddi_get_eventcookie(dip, evname, &ddi_cookie);
3136 
3137 	ddi_release_devi(dip);
3138 
3139 	if (res == DDI_SUCCESS) {
3140 		LDI_EVDBG((CE_NOTE, "ldi_ev_get_cookie: NDI cookie found"));
3141 		*cookiep = (ldi_ev_cookie_t)ddi_cookie;
3142 		return (LDI_EV_SUCCESS);
3143 	} else {
3144 		LDI_EVDBG((CE_WARN, "ldi_ev_get_cookie: NDI cookie: failed"));
3145 		return (LDI_EV_FAILURE);
3146 	}
3147 }
3148 
3149 /*ARGSUSED*/
3150 static void
3151 i_ldi_ev_callback(dev_info_t *dip, ddi_eventcookie_t event_cookie,
3152     void *arg, void *ev_data)
3153 {
3154 	ldi_ev_callback_impl_t *lecp = (ldi_ev_callback_impl_t *)arg;
3155 
3156 	ASSERT(lecp != NULL);
3157 	ASSERT(!ldi_native_cookie(lecp->lec_cookie));
3158 	ASSERT(lecp->lec_lhp);
3159 	ASSERT(lecp->lec_notify == NULL);
3160 	ASSERT(lecp->lec_finalize);
3161 
3162 	LDI_EVDBG((CE_NOTE, "i_ldi_ev_callback: ldh=%p, cookie=%p, arg=%p, "
3163 	    "ev_data=%p", (void *)lecp->lec_lhp, (void *)event_cookie,
3164 	    (void *)lecp->lec_arg, (void *)ev_data));
3165 
3166 	lecp->lec_finalize(lecp->lec_lhp, (ldi_ev_cookie_t)event_cookie,
3167 	    lecp->lec_arg, ev_data);
3168 }
3169 
3170 int
3171 ldi_ev_register_callbacks(ldi_handle_t lh, ldi_ev_cookie_t cookie,
3172     ldi_ev_callback_t *callb, void *arg, ldi_callback_id_t *id)
3173 {
3174 	struct ldi_handle	*lhp = (struct ldi_handle *)lh;
3175 	ldi_ev_callback_impl_t	*lecp;
3176 	dev_t			dev;
3177 	struct snode		*csp;
3178 	dev_info_t		*dip;
3179 	int			ddi_event;
3180 
3181 	ASSERT(!servicing_interrupt());
3182 
3183 	if (lh == NULL || cookie == NULL || callb == NULL || id == NULL) {
3184 		LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks: Invalid args"));
3185 		return (LDI_EV_FAILURE);
3186 	}
3187 
3188 	if (callb->cb_vers != LDI_EV_CB_VERS) {
3189 		LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks: Invalid vers"));
3190 		return (LDI_EV_FAILURE);
3191 	}
3192 
3193 	if (callb->cb_notify == NULL && callb->cb_finalize == NULL) {
3194 		LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks: NULL callb"));
3195 		return (LDI_EV_FAILURE);
3196 	}
3197 
3198 	*id = 0;
3199 
3200 	dev = lhp->lh_vp->v_rdev;
3201 	csp = VTOCS(lhp->lh_vp);
3202 	mutex_enter(&csp->s_lock);
3203 	if ((dip = csp->s_dip) != NULL)
3204 		e_ddi_hold_devi(dip);
3205 	mutex_exit(&csp->s_lock);
3206 	if (dip == NULL)
3207 		dip = e_ddi_hold_devi_by_dev(dev, 0);
3208 
3209 	if (dip == NULL) {
3210 		cmn_err(CE_WARN, "ldi_ev_register: No devinfo node for "
3211 		    "LDI handle: %p", (void *)lhp);
3212 		return (LDI_EV_FAILURE);
3213 	}
3214 
3215 	lecp = kmem_zalloc(sizeof (ldi_ev_callback_impl_t), KM_SLEEP);
3216 
3217 	ddi_event = 0;
3218 	if (!ldi_native_cookie(cookie)) {
3219 		if (callb->cb_notify || callb->cb_finalize == NULL) {
3220 			/*
3221 			 * NDI event services only accept finalize
3222 			 */
3223 			cmn_err(CE_WARN, "%s: module: %s: NDI event cookie. "
3224 			    "Only finalize"
3225 			    " callback supported with this cookie",
3226 			    "ldi_ev_register_callbacks",
3227 			    lhp->lh_ident->li_modname);
3228 			kmem_free(lecp, sizeof (ldi_ev_callback_impl_t));
3229 			ddi_release_devi(dip);
3230 			return (LDI_EV_FAILURE);
3231 		}
3232 
3233 		if (ddi_add_event_handler(dip, (ddi_eventcookie_t)cookie,
3234 		    i_ldi_ev_callback, (void *)lecp,
3235 		    (ddi_callback_id_t *)&lecp->lec_id)
3236 		    != DDI_SUCCESS) {
3237 			kmem_free(lecp, sizeof (ldi_ev_callback_impl_t));
3238 			ddi_release_devi(dip);
3239 			LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks(): "
3240 			    "ddi_add_event_handler failed"));
3241 			return (LDI_EV_FAILURE);
3242 		}
3243 		ddi_event = 1;
3244 		LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks(): "
3245 		    "ddi_add_event_handler success"));
3246 	}
3247 
3248 
3249 
3250 	ldi_ev_lock();
3251 
3252 	/*
3253 	 * Add the notify/finalize callback to the LDI's list of callbacks.
3254 	 */
3255 	lecp->lec_lhp = lhp;
3256 	lecp->lec_dev = lhp->lh_vp->v_rdev;
3257 	lecp->lec_spec = (lhp->lh_vp->v_type == VCHR) ?
3258 	    S_IFCHR : S_IFBLK;
3259 	lecp->lec_notify = callb->cb_notify;
3260 	lecp->lec_finalize = callb->cb_finalize;
3261 	lecp->lec_arg = arg;
3262 	lecp->lec_cookie = cookie;
3263 	if (!ddi_event)
3264 		lecp->lec_id = (void *)(uintptr_t)(++ldi_ev_id_pool);
3265 	else
3266 		ASSERT(lecp->lec_id);
3267 	lecp->lec_dip = dip;
3268 	list_insert_tail(&ldi_ev_callback_list.le_head, lecp);
3269 
3270 	*id = (ldi_callback_id_t)lecp->lec_id;
3271 
3272 	ldi_ev_unlock();
3273 
3274 	ddi_release_devi(dip);
3275 
3276 	LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks: registered "
3277 	    "notify/finalize"));
3278 
3279 	return (LDI_EV_SUCCESS);
3280 }
3281 
3282 static int
3283 ldi_ev_device_match(ldi_ev_callback_impl_t *lecp, dev_info_t *dip,
3284     dev_t dev, int spec_type)
3285 {
3286 	ASSERT(lecp);
3287 	ASSERT(dip);
3288 	ASSERT(dev != DDI_DEV_T_NONE);
3289 	ASSERT(dev != NODEV);
3290 	ASSERT((dev == DDI_DEV_T_ANY && spec_type == 0) ||
3291 	    (spec_type == S_IFCHR || spec_type == S_IFBLK));
3292 	ASSERT(lecp->lec_dip);
3293 	ASSERT(lecp->lec_spec == S_IFCHR || lecp->lec_spec == S_IFBLK);
3294 	ASSERT(lecp->lec_dev != DDI_DEV_T_ANY);
3295 	ASSERT(lecp->lec_dev != DDI_DEV_T_NONE);
3296 	ASSERT(lecp->lec_dev != NODEV);
3297 
3298 	if (dip != lecp->lec_dip)
3299 		return (0);
3300 
3301 	if (dev != DDI_DEV_T_ANY) {
3302 		if (dev != lecp->lec_dev || spec_type != lecp->lec_spec)
3303 			return (0);
3304 	}
3305 
3306 	LDI_EVTRC((CE_NOTE, "ldi_ev_device_match: MATCH dip=%p", (void *)dip));
3307 
3308 	return (1);
3309 }
3310 
3311 /*
3312  * LDI framework function to post a "notify" event to all layered drivers
3313  * that have registered for that event
3314  *
3315  * Returns:
3316  *		LDI_EV_SUCCESS - registered callbacks allow event
3317  *		LDI_EV_FAILURE - registered callbacks block event
3318  *		LDI_EV_NONE    - No matching LDI callbacks
3319  *
3320  * This function is *not* to be called by layered drivers. It is for I/O
3321  * framework code in Solaris, such as the I/O retire code and DR code
3322  * to call while servicing a device event such as offline or degraded.
3323  */
3324 int
3325 ldi_invoke_notify(dev_info_t *dip, dev_t dev, int spec_type, char *event,
3326     void *ev_data)
3327 {
3328 	ldi_ev_callback_impl_t *lecp;
3329 	list_t	*listp;
3330 	int	ret;
3331 	char	*lec_event;
3332 
3333 	ASSERT(dip);
3334 	ASSERT(dev != DDI_DEV_T_NONE);
3335 	ASSERT(dev != NODEV);
3336 	ASSERT((dev == DDI_DEV_T_ANY && spec_type == 0) ||
3337 	    (spec_type == S_IFCHR || spec_type == S_IFBLK));
3338 	ASSERT(event);
3339 	ASSERT(ldi_native_event(event));
3340 	ASSERT(ldi_ev_sync_event(event));
3341 
3342 	LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): entered: dip=%p, ev=%s",
3343 	    (void *)dip, event));
3344 
3345 	ret = LDI_EV_NONE;
3346 	ldi_ev_lock();
3347 	listp = &ldi_ev_callback_list.le_head;
3348 	for (lecp = list_head(listp); lecp; lecp = list_next(listp, lecp)) {
3349 
3350 		/* Check if matching device */
3351 		if (!ldi_ev_device_match(lecp, dip, dev, spec_type))
3352 			continue;
3353 
3354 		if (lecp->lec_lhp == NULL) {
3355 			/*
3356 			 * Consumer has unregistered the handle and so
3357 			 * is no longer interested in notify events.
3358 			 */
3359 			LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): No LDI "
3360 			    "handle, skipping"));
3361 			continue;
3362 		}
3363 
3364 		if (lecp->lec_notify == NULL) {
3365 			LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): No notify "
3366 			    "callback. skipping"));
3367 			continue;	/* not interested in notify */
3368 		}
3369 
3370 		/*
3371 		 * Check if matching event
3372 		 */
3373 		lec_event = ldi_ev_get_type(lecp->lec_cookie);
3374 		if (strcmp(event, lec_event) != 0) {
3375 			LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): Not matching"
3376 			    " event {%s,%s}. skipping", event, lec_event));
3377 			continue;
3378 		}
3379 
3380 		lecp->lec_lhp->lh_flags |= LH_FLAGS_NOTIFY;
3381 		if (lecp->lec_notify(lecp->lec_lhp, lecp->lec_cookie,
3382 		    lecp->lec_arg, ev_data) != LDI_EV_SUCCESS) {
3383 			ret = LDI_EV_FAILURE;
3384 			LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): notify"
3385 			    " FAILURE"));
3386 			break;
3387 		}
3388 
3389 		/* We have a matching callback that allows the event to occur */
3390 		ret = LDI_EV_SUCCESS;
3391 
3392 		LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): 1 consumer success"));
3393 	}
3394 
3395 	if (ret != LDI_EV_FAILURE)
3396 		goto out;
3397 
3398 	LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): undoing notify"));
3399 
3400 	/*
3401 	 * Undo notifies already sent
3402 	 */
3403 	lecp = list_prev(listp, lecp);
3404 	for (; lecp; lecp = list_prev(listp, lecp)) {
3405 
3406 		/*
3407 		 * Check if matching device
3408 		 */
3409 		if (!ldi_ev_device_match(lecp, dip, dev, spec_type))
3410 			continue;
3411 
3412 
3413 		if (lecp->lec_finalize == NULL) {
3414 			LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): no finalize, "
3415 			    "skipping"));
3416 			continue;	/* not interested in finalize */
3417 		}
3418 
3419 		/*
3420 		 * it is possible that in response to a notify event a
3421 		 * layered driver closed its LDI handle so it is ok
3422 		 * to have a NULL LDI handle for finalize. The layered
3423 		 * driver is expected to maintain state in its "arg"
3424 		 * parameter to keep track of the closed device.
3425 		 */
3426 
3427 		/* Check if matching event */
3428 		lec_event = ldi_ev_get_type(lecp->lec_cookie);
3429 		if (strcmp(event, lec_event) != 0) {
3430 			LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): not matching "
3431 			    "event: %s,%s, skipping", event, lec_event));
3432 			continue;
3433 		}
3434 
3435 		LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): calling finalize"));
3436 
3437 		lecp->lec_finalize(lecp->lec_lhp, lecp->lec_cookie,
3438 		    LDI_EV_FAILURE, lecp->lec_arg, ev_data);
3439 
3440 		/*
3441 		 * If LDI native event and LDI handle closed in context
3442 		 * of notify, NULL out the finalize callback as we have
3443 		 * already called the 1 finalize above allowed in this situation
3444 		 */
3445 		if (lecp->lec_lhp == NULL &&
3446 		    ldi_native_cookie(lecp->lec_cookie)) {
3447 			LDI_EVDBG((CE_NOTE,
3448 			    "ldi_invoke_notify(): NULL-ing finalize after "
3449 			    "calling 1 finalize following ldi_close"));
3450 			lecp->lec_finalize = NULL;
3451 		}
3452 	}
3453 
3454 out:
3455 	ldi_ev_unlock();
3456 
3457 	if (ret == LDI_EV_NONE) {
3458 		LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): no matching "
3459 		    "LDI callbacks"));
3460 	}
3461 
3462 	return (ret);
3463 }
3464 
3465 /*
3466  * Framework function to be called from a layered driver to propagate
3467  * LDI "notify" events to exported minors.
3468  *
3469  * This function is a public interface exported by the LDI framework
3470  * for use by layered drivers to propagate device events up the software
3471  * stack.
3472  */
3473 int
3474 ldi_ev_notify(dev_info_t *dip, minor_t minor, int spec_type,
3475     ldi_ev_cookie_t cookie, void *ev_data)
3476 {
3477 	char		*evname = ldi_ev_get_type(cookie);
3478 	uint_t		ct_evtype;
3479 	dev_t		dev;
3480 	major_t		major;
3481 	int		retc;
3482 	int		retl;
3483 
3484 	ASSERT(spec_type == S_IFBLK || spec_type == S_IFCHR);
3485 	ASSERT(dip);
3486 	ASSERT(ldi_native_cookie(cookie));
3487 
3488 	LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): entered: event=%s, dip=%p",
3489 	    evname, (void *)dip));
3490 
3491 	if (!ldi_ev_sync_event(evname)) {
3492 		cmn_err(CE_PANIC, "ldi_ev_notify(): %s not a "
3493 		    "negotiatable event", evname);
3494 		return (LDI_EV_SUCCESS);
3495 	}
3496 
3497 	major = ddi_driver_major(dip);
3498 	if (major == DDI_MAJOR_T_NONE) {
3499 		char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3500 		(void) ddi_pathname(dip, path);
3501 		cmn_err(CE_WARN, "ldi_ev_notify: cannot derive major number "
3502 		    "for device %s", path);
3503 		kmem_free(path, MAXPATHLEN);
3504 		return (LDI_EV_FAILURE);
3505 	}
3506 	dev = makedevice(major, minor);
3507 
3508 	/*
3509 	 * Generate negotiation contract events on contracts (if any) associated
3510 	 * with this minor.
3511 	 */
3512 	LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): calling contract nego."));
3513 	ct_evtype = ldi_contract_event(evname);
3514 	retc = contract_device_negotiate(dip, dev, spec_type, ct_evtype);
3515 	if (retc == CT_NACK) {
3516 		LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): contract neg. NACK"));
3517 		return (LDI_EV_FAILURE);
3518 	}
3519 
3520 	LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): LDI invoke notify"));
3521 	retl = ldi_invoke_notify(dip, dev, spec_type, evname, ev_data);
3522 	if (retl == LDI_EV_FAILURE) {
3523 		LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): ldi_invoke_notify "
3524 		    "returned FAILURE. Calling contract negend"));
3525 		contract_device_negend(dip, dev, spec_type, CT_EV_FAILURE);
3526 		return (LDI_EV_FAILURE);
3527 	}
3528 
3529 	/*
3530 	 * The very fact that we are here indicates that there is a
3531 	 * LDI callback (and hence a constraint) for the retire of the
3532 	 * HW device. So we just return success even if there are no
3533 	 * contracts or LDI callbacks against the minors layered on top
3534 	 * of the HW minors
3535 	 */
3536 	LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): returning SUCCESS"));
3537 	return (LDI_EV_SUCCESS);
3538 }
3539 
3540 /*
3541  * LDI framework function to invoke "finalize" callbacks for all layered
3542  * drivers that have registered callbacks for that event.
3543  *
3544  * This function is *not* to be called by layered drivers. It is for I/O
3545  * framework code in Solaris, such as the I/O retire code and DR code
3546  * to call while servicing a device event such as offline or degraded.
3547  */
3548 void
3549 ldi_invoke_finalize(dev_info_t *dip, dev_t dev, int spec_type, char *event,
3550     int ldi_result, void *ev_data)
3551 {
3552 	ldi_ev_callback_impl_t *lecp;
3553 	list_t	*listp;
3554 	char	*lec_event;
3555 	int	found = 0;
3556 
3557 	ASSERT(dip);
3558 	ASSERT(dev != DDI_DEV_T_NONE);
3559 	ASSERT(dev != NODEV);
3560 	ASSERT((dev == DDI_DEV_T_ANY && spec_type == 0) ||
3561 	    (spec_type == S_IFCHR || spec_type == S_IFBLK));
3562 	ASSERT(event);
3563 	ASSERT(ldi_native_event(event));
3564 	ASSERT(ldi_result == LDI_EV_SUCCESS || ldi_result == LDI_EV_FAILURE);
3565 
3566 	LDI_EVDBG((CE_NOTE, "ldi_invoke_finalize(): entered: dip=%p, result=%d"
3567 	    " event=%s", (void *)dip, ldi_result, event));
3568 
3569 	ldi_ev_lock();
3570 	listp = &ldi_ev_callback_list.le_head;
3571 	for (lecp = list_head(listp); lecp; lecp = list_next(listp, lecp)) {
3572 
3573 		if (lecp->lec_finalize == NULL) {
3574 			LDI_EVDBG((CE_NOTE, "ldi_invoke_finalize(): No "
3575 			    "finalize. Skipping"));
3576 			continue;	/* Not interested in finalize */
3577 		}
3578 
3579 		/*
3580 		 * Check if matching device
3581 		 */
3582 		if (!ldi_ev_device_match(lecp, dip, dev, spec_type))
3583 			continue;
3584 
3585 		/*
3586 		 * It is valid for the LDI handle to be NULL during finalize.
3587 		 * The layered driver may have done an LDI close in the notify
3588 		 * callback.
3589 		 */
3590 
3591 		/*
3592 		 * Check if matching event
3593 		 */
3594 		lec_event = ldi_ev_get_type(lecp->lec_cookie);
3595 		if (strcmp(event, lec_event) != 0) {
3596 			LDI_EVDBG((CE_NOTE, "ldi_invoke_finalize(): Not "
3597 			    "matching event {%s,%s}. Skipping",
3598 			    event, lec_event));
3599 			continue;
3600 		}
3601 
3602 		LDI_EVDBG((CE_NOTE, "ldi_invoke_finalize(): calling finalize"));
3603 
3604 		found = 1;
3605 
3606 		lecp->lec_finalize(lecp->lec_lhp, lecp->lec_cookie,
3607 		    ldi_result, lecp->lec_arg, ev_data);
3608 
3609 		/*
3610 		 * If LDI native event and LDI handle closed in context
3611 		 * of notify, NULL out the finalize callback as we have
3612 		 * already called the 1 finalize above allowed in this situation
3613 		 */
3614 		if (lecp->lec_lhp == NULL &&
3615 		    ldi_native_cookie(lecp->lec_cookie)) {
3616 			LDI_EVDBG((CE_NOTE,
3617 			    "ldi_invoke_finalize(): NULLing finalize after "
3618 			    "calling 1 finalize following ldi_close"));
3619 			lecp->lec_finalize = NULL;
3620 		}
3621 	}
3622 	ldi_ev_unlock();
3623 
3624 	if (found)
3625 		return;
3626 
3627 	LDI_EVDBG((CE_NOTE, "ldi_invoke_finalize(): no matching callbacks"));
3628 }
3629 
3630 /*
3631  * Framework function to be called from a layered driver to propagate
3632  * LDI "finalize" events to exported minors.
3633  *
3634  * This function is a public interface exported by the LDI framework
3635  * for use by layered drivers to propagate device events up the software
3636  * stack.
3637  */
3638 void
3639 ldi_ev_finalize(dev_info_t *dip, minor_t minor, int spec_type, int ldi_result,
3640     ldi_ev_cookie_t cookie, void *ev_data)
3641 {
3642 	dev_t dev;
3643 	major_t major;
3644 	char *evname;
3645 	int ct_result = (ldi_result == LDI_EV_SUCCESS) ?
3646 	    CT_EV_SUCCESS : CT_EV_FAILURE;
3647 	uint_t ct_evtype;
3648 
3649 	ASSERT(dip);
3650 	ASSERT(spec_type == S_IFBLK || spec_type == S_IFCHR);
3651 	ASSERT(ldi_result == LDI_EV_SUCCESS || ldi_result == LDI_EV_FAILURE);
3652 	ASSERT(ldi_native_cookie(cookie));
3653 
3654 	LDI_EVDBG((CE_NOTE, "ldi_ev_finalize: entered: dip=%p", (void *)dip));
3655 
3656 	major = ddi_driver_major(dip);
3657 	if (major == DDI_MAJOR_T_NONE) {
3658 		char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3659 		(void) ddi_pathname(dip, path);
3660 		cmn_err(CE_WARN, "ldi_ev_finalize: cannot derive major number "
3661 		    "for device %s", path);
3662 		kmem_free(path, MAXPATHLEN);
3663 		return;
3664 	}
3665 	dev = makedevice(major, minor);
3666 
3667 	evname = ldi_ev_get_type(cookie);
3668 
3669 	LDI_EVDBG((CE_NOTE, "ldi_ev_finalize: calling contracts"));
3670 	ct_evtype = ldi_contract_event(evname);
3671 	contract_device_finalize(dip, dev, spec_type, ct_evtype, ct_result);
3672 
3673 	LDI_EVDBG((CE_NOTE, "ldi_ev_finalize: calling ldi_invoke_finalize"));
3674 	ldi_invoke_finalize(dip, dev, spec_type, evname, ldi_result, ev_data);
3675 }
3676 
3677 int
3678 ldi_ev_remove_callbacks(ldi_callback_id_t id)
3679 {
3680 	ldi_ev_callback_impl_t	*lecp;
3681 	ldi_ev_callback_impl_t	*next;
3682 	ldi_ev_callback_impl_t	*found;
3683 	list_t			*listp;
3684 
3685 	ASSERT(!servicing_interrupt());
3686 
3687 	if (id == 0) {
3688 		cmn_err(CE_WARN, "ldi_ev_remove_callbacks: Invalid ID 0");
3689 		return (LDI_EV_FAILURE);
3690 	}
3691 
3692 	LDI_EVDBG((CE_NOTE, "ldi_ev_remove_callbacks: entered: id=%p",
3693 	    (void *)id));
3694 
3695 	ldi_ev_lock();
3696 
3697 	listp = &ldi_ev_callback_list.le_head;
3698 	next = found = NULL;
3699 	for (lecp = list_head(listp); lecp; lecp = next) {
3700 		next = list_next(listp, lecp);
3701 		if (lecp->lec_id == id) {
3702 			ASSERT(found == NULL);
3703 			list_remove(listp, lecp);
3704 			found = lecp;
3705 		}
3706 	}
3707 	ldi_ev_unlock();
3708 
3709 	if (found == NULL) {
3710 		cmn_err(CE_WARN, "No LDI event handler for id (%p)",
3711 		    (void *)id);
3712 		return (LDI_EV_SUCCESS);
3713 	}
3714 
3715 	if (!ldi_native_cookie(found->lec_cookie)) {
3716 		ASSERT(found->lec_notify == NULL);
3717 		if (ddi_remove_event_handler((ddi_callback_id_t)id)
3718 		    != DDI_SUCCESS) {
3719 			cmn_err(CE_WARN, "failed to remove NDI event handler "
3720 			    "for id (%p)", (void *)id);
3721 			ldi_ev_lock();
3722 			list_insert_tail(listp, found);
3723 			ldi_ev_unlock();
3724 			return (LDI_EV_FAILURE);
3725 		}
3726 		LDI_EVDBG((CE_NOTE, "ldi_ev_remove_callbacks: NDI event "
3727 		    "service removal succeeded"));
3728 	} else {
3729 		LDI_EVDBG((CE_NOTE, "ldi_ev_remove_callbacks: removed "
3730 		    "LDI native callbacks"));
3731 	}
3732 	kmem_free(found, sizeof (ldi_ev_callback_impl_t));
3733 
3734 	return (LDI_EV_SUCCESS);
3735 }
3736