xref: /titanic_50/usr/src/uts/common/os/driver_lyr.c (revision 5c51f1241dbbdf2656d0e10011981411ed0c9673)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * Layered driver support.
28  */
29 
30 #include <sys/atomic.h>
31 #include <sys/types.h>
32 #include <sys/t_lock.h>
33 #include <sys/param.h>
34 #include <sys/conf.h>
35 #include <sys/systm.h>
36 #include <sys/sysmacros.h>
37 #include <sys/buf.h>
38 #include <sys/cred.h>
39 #include <sys/uio.h>
40 #include <sys/vnode.h>
41 #include <sys/fs/snode.h>
42 #include <sys/open.h>
43 #include <sys/kmem.h>
44 #include <sys/file.h>
45 #include <sys/bootconf.h>
46 #include <sys/pathname.h>
47 #include <sys/bitmap.h>
48 #include <sys/stat.h>
49 #include <sys/dditypes.h>
50 #include <sys/ddi_impldefs.h>
51 #include <sys/ddi.h>
52 #include <sys/sunddi.h>
53 #include <sys/sunndi.h>
54 #include <sys/esunddi.h>
55 #include <sys/autoconf.h>
56 #include <sys/sunldi.h>
57 #include <sys/sunldi_impl.h>
58 #include <sys/errno.h>
59 #include <sys/debug.h>
60 #include <sys/modctl.h>
61 #include <sys/var.h>
62 #include <vm/seg_vn.h>
63 
64 #include <sys/stropts.h>
65 #include <sys/strsubr.h>
66 #include <sys/socket.h>
67 #include <sys/socketvar.h>
68 #include <sys/kstr.h>
69 
70 /*
71  * Device contract related
72  */
73 #include <sys/contract_impl.h>
74 #include <sys/contract/device_impl.h>
75 
76 /*
77  * Define macros to manipulate snode, vnode, and open device flags
78  */
79 #define	VTYP_VALID(i)	(((i) == VCHR) || ((i) == VBLK))
80 #define	VTYP_TO_OTYP(i)	(((i) == VCHR) ? OTYP_CHR : OTYP_BLK)
81 #define	VTYP_TO_STYP(i)	(((i) == VCHR) ? S_IFCHR : S_IFBLK)
82 
83 #define	OTYP_VALID(i)	(((i) == OTYP_CHR) || ((i) == OTYP_BLK))
84 #define	OTYP_TO_VTYP(i)	(((i) == OTYP_CHR) ? VCHR : VBLK)
85 #define	OTYP_TO_STYP(i)	(((i) == OTYP_CHR) ? S_IFCHR : S_IFBLK)
86 
87 #define	STYP_VALID(i)	(((i) == S_IFCHR) || ((i) == S_IFBLK))
88 #define	STYP_TO_VTYP(i)	(((i) == S_IFCHR) ? VCHR : VBLK)
89 
90 /*
91  * Define macros for accessing layered driver hash structures
92  */
93 #define	LH_HASH(vp)		(handle_hash_func(vp) % LH_HASH_SZ)
94 #define	LI_HASH(mid, dip, dev)	(ident_hash_func(mid, dip, dev) % LI_HASH_SZ)
95 
96 /*
97  * Define layered handle flags used in the lh_type field
98  */
99 #define	LH_STREAM	(0x1)	/* handle to a streams device */
100 #define	LH_CBDEV	(0x2)	/* handle to a char/block device */
101 
102 /*
103  * Define macro for devid property lookups
104  */
105 #define	DEVID_PROP_FLAGS	(DDI_PROP_DONTPASS | \
106 				DDI_PROP_TYPE_STRING|DDI_PROP_CANSLEEP)
107 
108 /*
109  * Dummy string for NDI events
110  */
111 #define	NDI_EVENT_SERVICE	"NDI_EVENT_SERVICE"
112 
113 static void ldi_ev_lock(void);
114 static void ldi_ev_unlock(void);
115 
116 #ifdef	LDI_OBSOLETE_EVENT
117 int ldi_remove_event_handler(ldi_handle_t lh, ldi_callback_id_t id);
118 #endif
119 
120 
121 /*
122  * globals
123  */
124 static kmutex_t			ldi_ident_hash_lock[LI_HASH_SZ];
125 static struct ldi_ident		*ldi_ident_hash[LI_HASH_SZ];
126 
127 static kmutex_t			ldi_handle_hash_lock[LH_HASH_SZ];
128 static struct ldi_handle	*ldi_handle_hash[LH_HASH_SZ];
129 static size_t			ldi_handle_hash_count;
130 
131 static struct ldi_ev_callback_list ldi_ev_callback_list;
132 
133 static uint32_t ldi_ev_id_pool = 0;
134 
135 struct ldi_ev_cookie {
136 	char *ck_evname;
137 	uint_t ck_sync;
138 	uint_t ck_ctype;
139 };
140 
141 static struct ldi_ev_cookie ldi_ev_cookies[] = {
142 	{ LDI_EV_OFFLINE, 1, CT_DEV_EV_OFFLINE},
143 	{ LDI_EV_DEGRADE, 0, CT_DEV_EV_DEGRADED},
144 	{ NULL}			/* must terminate list */
145 };
146 
147 void
148 ldi_init(void)
149 {
150 	int i;
151 
152 	ldi_handle_hash_count = 0;
153 	for (i = 0; i < LH_HASH_SZ; i++) {
154 		mutex_init(&ldi_handle_hash_lock[i], NULL, MUTEX_DEFAULT, NULL);
155 		ldi_handle_hash[i] = NULL;
156 	}
157 	for (i = 0; i < LI_HASH_SZ; i++) {
158 		mutex_init(&ldi_ident_hash_lock[i], NULL, MUTEX_DEFAULT, NULL);
159 		ldi_ident_hash[i] = NULL;
160 	}
161 
162 	/*
163 	 * Initialize the LDI event subsystem
164 	 */
165 	mutex_init(&ldi_ev_callback_list.le_lock, NULL, MUTEX_DEFAULT, NULL);
166 	cv_init(&ldi_ev_callback_list.le_cv, NULL, CV_DEFAULT, NULL);
167 	ldi_ev_callback_list.le_busy = 0;
168 	ldi_ev_callback_list.le_thread = NULL;
169 	list_create(&ldi_ev_callback_list.le_head,
170 	    sizeof (ldi_ev_callback_impl_t),
171 	    offsetof(ldi_ev_callback_impl_t, lec_list));
172 }
173 
174 /*
175  * LDI ident manipulation functions
176  */
177 static uint_t
178 ident_hash_func(modid_t modid, dev_info_t *dip, dev_t dev)
179 {
180 	if (dip != NULL) {
181 		uintptr_t k = (uintptr_t)dip;
182 		k >>= (int)highbit(sizeof (struct dev_info));
183 		return ((uint_t)k);
184 	} else if (dev != DDI_DEV_T_NONE) {
185 		return (modid + getminor(dev) + getmajor(dev));
186 	} else {
187 		return (modid);
188 	}
189 }
190 
191 static struct ldi_ident **
192 ident_find_ref_nolock(modid_t modid, dev_info_t *dip, dev_t dev, major_t major)
193 {
194 	struct ldi_ident	**lipp = NULL;
195 	uint_t			index = LI_HASH(modid, dip, dev);
196 
197 	ASSERT(MUTEX_HELD(&ldi_ident_hash_lock[index]));
198 
199 	for (lipp = &(ldi_ident_hash[index]);
200 	    (*lipp != NULL);
201 	    lipp = &((*lipp)->li_next)) {
202 		if (((*lipp)->li_modid == modid) &&
203 		    ((*lipp)->li_major == major) &&
204 		    ((*lipp)->li_dip == dip) &&
205 		    ((*lipp)->li_dev == dev))
206 			break;
207 	}
208 
209 	ASSERT(lipp != NULL);
210 	return (lipp);
211 }
212 
213 static struct ldi_ident *
214 ident_alloc(char *mod_name, dev_info_t *dip, dev_t dev, major_t major)
215 {
216 	struct ldi_ident	*lip, **lipp;
217 	modid_t			modid;
218 	uint_t			index;
219 
220 	ASSERT(mod_name != NULL);
221 
222 	/* get the module id */
223 	modid = mod_name_to_modid(mod_name);
224 	ASSERT(modid != -1);
225 
226 	/* allocate a new ident in case we need it */
227 	lip = kmem_zalloc(sizeof (*lip), KM_SLEEP);
228 
229 	/* search the hash for a matching ident */
230 	index = LI_HASH(modid, dip, dev);
231 	mutex_enter(&ldi_ident_hash_lock[index]);
232 	lipp = ident_find_ref_nolock(modid, dip, dev, major);
233 
234 	if (*lipp != NULL) {
235 		/* we found an ident in the hash */
236 		ASSERT(strcmp((*lipp)->li_modname, mod_name) == 0);
237 		(*lipp)->li_ref++;
238 		mutex_exit(&ldi_ident_hash_lock[index]);
239 		kmem_free(lip, sizeof (struct ldi_ident));
240 		return (*lipp);
241 	}
242 
243 	/* initialize the new ident */
244 	lip->li_next = NULL;
245 	lip->li_ref = 1;
246 	lip->li_modid = modid;
247 	lip->li_major = major;
248 	lip->li_dip = dip;
249 	lip->li_dev = dev;
250 	(void) strncpy(lip->li_modname, mod_name, sizeof (lip->li_modname) - 1);
251 
252 	/* add it to the ident hash */
253 	lip->li_next = ldi_ident_hash[index];
254 	ldi_ident_hash[index] = lip;
255 
256 	mutex_exit(&ldi_ident_hash_lock[index]);
257 	return (lip);
258 }
259 
260 static void
261 ident_hold(struct ldi_ident *lip)
262 {
263 	uint_t			index;
264 
265 	ASSERT(lip != NULL);
266 	index = LI_HASH(lip->li_modid, lip->li_dip, lip->li_dev);
267 	mutex_enter(&ldi_ident_hash_lock[index]);
268 	ASSERT(lip->li_ref > 0);
269 	lip->li_ref++;
270 	mutex_exit(&ldi_ident_hash_lock[index]);
271 }
272 
273 static void
274 ident_release(struct ldi_ident *lip)
275 {
276 	struct ldi_ident	**lipp;
277 	uint_t			index;
278 
279 	ASSERT(lip != NULL);
280 	index = LI_HASH(lip->li_modid, lip->li_dip, lip->li_dev);
281 	mutex_enter(&ldi_ident_hash_lock[index]);
282 
283 	ASSERT(lip->li_ref > 0);
284 	if (--lip->li_ref > 0) {
285 		/* there are more references to this ident */
286 		mutex_exit(&ldi_ident_hash_lock[index]);
287 		return;
288 	}
289 
290 	/* this was the last reference/open for this ident.  free it. */
291 	lipp = ident_find_ref_nolock(
292 	    lip->li_modid, lip->li_dip, lip->li_dev, lip->li_major);
293 
294 	ASSERT((lipp != NULL) && (*lipp != NULL));
295 	*lipp = lip->li_next;
296 	mutex_exit(&ldi_ident_hash_lock[index]);
297 	kmem_free(lip, sizeof (struct ldi_ident));
298 }
299 
300 /*
301  * LDI handle manipulation functions
302  */
303 static uint_t
304 handle_hash_func(void *vp)
305 {
306 	uintptr_t k = (uintptr_t)vp;
307 	k >>= (int)highbit(sizeof (vnode_t));
308 	return ((uint_t)k);
309 }
310 
311 static struct ldi_handle **
312 handle_find_ref_nolock(vnode_t *vp, struct ldi_ident *ident)
313 {
314 	struct ldi_handle	**lhpp = NULL;
315 	uint_t			index = LH_HASH(vp);
316 
317 	ASSERT(MUTEX_HELD(&ldi_handle_hash_lock[index]));
318 
319 	for (lhpp = &(ldi_handle_hash[index]);
320 	    (*lhpp != NULL);
321 	    lhpp = &((*lhpp)->lh_next)) {
322 		if (((*lhpp)->lh_ident == ident) &&
323 		    ((*lhpp)->lh_vp == vp))
324 			break;
325 	}
326 
327 	ASSERT(lhpp != NULL);
328 	return (lhpp);
329 }
330 
331 static struct ldi_handle *
332 handle_find(vnode_t *vp, struct ldi_ident *ident)
333 {
334 	struct ldi_handle	**lhpp;
335 	int			index = LH_HASH(vp);
336 
337 	mutex_enter(&ldi_handle_hash_lock[index]);
338 	lhpp = handle_find_ref_nolock(vp, ident);
339 	mutex_exit(&ldi_handle_hash_lock[index]);
340 	ASSERT(lhpp != NULL);
341 	return (*lhpp);
342 }
343 
344 static struct ldi_handle *
345 handle_alloc(vnode_t *vp, struct ldi_ident *ident)
346 {
347 	struct ldi_handle	*lhp, **lhpp;
348 	uint_t			index;
349 
350 	ASSERT((vp != NULL) && (ident != NULL));
351 
352 	/* allocate a new handle in case we need it */
353 	lhp = kmem_zalloc(sizeof (*lhp), KM_SLEEP);
354 
355 	/* search the hash for a matching handle */
356 	index = LH_HASH(vp);
357 	mutex_enter(&ldi_handle_hash_lock[index]);
358 	lhpp = handle_find_ref_nolock(vp, ident);
359 
360 	if (*lhpp != NULL) {
361 		/* we found a handle in the hash */
362 		(*lhpp)->lh_ref++;
363 		mutex_exit(&ldi_handle_hash_lock[index]);
364 
365 		LDI_ALLOCFREE((CE_WARN, "ldi handle alloc: dup "
366 		    "lh=0x%p, ident=0x%p, vp=0x%p, drv=%s, minor=0x%x",
367 		    (void *)*lhpp, (void *)ident, (void *)vp,
368 		    mod_major_to_name(getmajor(vp->v_rdev)),
369 		    getminor(vp->v_rdev)));
370 
371 		kmem_free(lhp, sizeof (struct ldi_handle));
372 		return (*lhpp);
373 	}
374 
375 	/* initialize the new handle */
376 	lhp->lh_ref = 1;
377 	lhp->lh_vp = vp;
378 	lhp->lh_ident = ident;
379 #ifdef	LDI_OBSOLETE_EVENT
380 	mutex_init(lhp->lh_lock, NULL, MUTEX_DEFAULT, NULL);
381 #endif
382 
383 	/* set the device type for this handle */
384 	lhp->lh_type = 0;
385 	if (STREAMSTAB(getmajor(vp->v_rdev))) {
386 		ASSERT(vp->v_type == VCHR);
387 		lhp->lh_type |= LH_STREAM;
388 	} else {
389 		lhp->lh_type |= LH_CBDEV;
390 	}
391 
392 	/* get holds on other objects */
393 	ident_hold(ident);
394 	ASSERT(vp->v_count >= 1);
395 	VN_HOLD(vp);
396 
397 	/* add it to the handle hash */
398 	lhp->lh_next = ldi_handle_hash[index];
399 	ldi_handle_hash[index] = lhp;
400 	atomic_add_long(&ldi_handle_hash_count, 1);
401 
402 	LDI_ALLOCFREE((CE_WARN, "ldi handle alloc: new "
403 	    "lh=0x%p, ident=0x%p, vp=0x%p, drv=%s, minor=0x%x",
404 	    (void *)lhp, (void *)ident, (void *)vp,
405 	    mod_major_to_name(getmajor(vp->v_rdev)),
406 	    getminor(vp->v_rdev)));
407 
408 	mutex_exit(&ldi_handle_hash_lock[index]);
409 	return (lhp);
410 }
411 
412 static void
413 handle_release(struct ldi_handle *lhp)
414 {
415 	struct ldi_handle	**lhpp;
416 	uint_t			index;
417 
418 	ASSERT(lhp != NULL);
419 
420 	index = LH_HASH(lhp->lh_vp);
421 	mutex_enter(&ldi_handle_hash_lock[index]);
422 
423 	LDI_ALLOCFREE((CE_WARN, "ldi handle release: "
424 	    "lh=0x%p, ident=0x%p, vp=0x%p, drv=%s, minor=0x%x",
425 	    (void *)lhp, (void *)lhp->lh_ident, (void *)lhp->lh_vp,
426 	    mod_major_to_name(getmajor(lhp->lh_vp->v_rdev)),
427 	    getminor(lhp->lh_vp->v_rdev)));
428 
429 	ASSERT(lhp->lh_ref > 0);
430 	if (--lhp->lh_ref > 0) {
431 		/* there are more references to this handle */
432 		mutex_exit(&ldi_handle_hash_lock[index]);
433 		return;
434 	}
435 
436 	/* this was the last reference/open for this handle.  free it. */
437 	lhpp = handle_find_ref_nolock(lhp->lh_vp, lhp->lh_ident);
438 	ASSERT((lhpp != NULL) && (*lhpp != NULL));
439 	*lhpp = lhp->lh_next;
440 	atomic_add_long(&ldi_handle_hash_count, -1);
441 	mutex_exit(&ldi_handle_hash_lock[index]);
442 
443 	VN_RELE(lhp->lh_vp);
444 	ident_release(lhp->lh_ident);
445 #ifdef	LDI_OBSOLETE_EVENT
446 	mutex_destroy(lhp->lh_lock);
447 #endif
448 	kmem_free(lhp, sizeof (struct ldi_handle));
449 }
450 
451 #ifdef	LDI_OBSOLETE_EVENT
452 /*
453  * LDI event manipulation functions
454  */
455 static void
456 handle_event_add(ldi_event_t *lep)
457 {
458 	struct ldi_handle *lhp = lep->le_lhp;
459 
460 	ASSERT(lhp != NULL);
461 
462 	mutex_enter(lhp->lh_lock);
463 	if (lhp->lh_events == NULL) {
464 		lhp->lh_events = lep;
465 		mutex_exit(lhp->lh_lock);
466 		return;
467 	}
468 
469 	lep->le_next = lhp->lh_events;
470 	lhp->lh_events->le_prev = lep;
471 	lhp->lh_events = lep;
472 	mutex_exit(lhp->lh_lock);
473 }
474 
475 static void
476 handle_event_remove(ldi_event_t *lep)
477 {
478 	struct ldi_handle *lhp = lep->le_lhp;
479 
480 	ASSERT(lhp != NULL);
481 
482 	mutex_enter(lhp->lh_lock);
483 	if (lep->le_prev)
484 		lep->le_prev->le_next = lep->le_next;
485 	if (lep->le_next)
486 		lep->le_next->le_prev = lep->le_prev;
487 	if (lhp->lh_events == lep)
488 		lhp->lh_events = lep->le_next;
489 	mutex_exit(lhp->lh_lock);
490 
491 }
492 
493 static void
494 i_ldi_callback(dev_info_t *dip, ddi_eventcookie_t event_cookie,
495     void *arg, void *bus_impldata)
496 {
497 	ldi_event_t *lep = (ldi_event_t *)arg;
498 
499 	ASSERT(lep != NULL);
500 
501 	LDI_EVENTCB((CE_NOTE, "%s: dip=0x%p, "
502 	    "event_cookie=0x%p, ldi_eventp=0x%p", "i_ldi_callback",
503 	    (void *)dip, (void *)event_cookie, (void *)lep));
504 
505 	lep->le_handler(lep->le_lhp, event_cookie, lep->le_arg, bus_impldata);
506 }
507 #endif
508 
509 /*
510  * LDI open helper functions
511  */
512 
513 /* get a vnode to a device by dev_t and otyp */
514 static int
515 ldi_vp_from_dev(dev_t dev, int otyp, vnode_t **vpp)
516 {
517 	dev_info_t		*dip;
518 	vnode_t			*vp;
519 
520 	/* sanity check required input parameters */
521 	if ((dev == DDI_DEV_T_NONE) || (!OTYP_VALID(otyp)) || (vpp == NULL))
522 		return (EINVAL);
523 
524 	if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
525 		return (ENODEV);
526 
527 	if (STREAMSTAB(getmajor(dev)) && (otyp != OTYP_CHR)) {
528 		ddi_release_devi(dip);  /* from e_ddi_hold_devi_by_dev */
529 		return (ENXIO);
530 	}
531 
532 	vp = makespecvp(dev, OTYP_TO_VTYP(otyp));
533 	spec_assoc_vp_with_devi(vp, dip);
534 	ddi_release_devi(dip);  /* from e_ddi_hold_devi_by_dev */
535 
536 	*vpp = vp;
537 	return (0);
538 }
539 
540 /* get a vnode to a device by pathname */
541 static int
542 ldi_vp_from_name(char *path, vnode_t **vpp)
543 {
544 	vnode_t			*vp = NULL;
545 	int			ret;
546 
547 	/* sanity check required input parameters */
548 	if ((path == NULL) || (vpp == NULL))
549 		return (EINVAL);
550 
551 	if (modrootloaded) {
552 		cred_t *saved_cred = curthread->t_cred;
553 
554 		/* we don't want lookupname to fail because of credentials */
555 		curthread->t_cred = kcred;
556 
557 		/*
558 		 * all lookups should be done in the global zone.  but
559 		 * lookupnameat() won't actually do this if an absolute
560 		 * path is passed in.  since the ldi interfaces require an
561 		 * absolute path we pass lookupnameat() a pointer to
562 		 * the character after the leading '/' and tell it to
563 		 * start searching at the current system root directory.
564 		 */
565 		ASSERT(*path == '/');
566 		ret = lookupnameat(path + 1, UIO_SYSSPACE, FOLLOW, NULLVPP,
567 		    &vp, rootdir);
568 
569 		/* restore this threads credentials */
570 		curthread->t_cred = saved_cred;
571 
572 		if (ret == 0) {
573 			if (!vn_matchops(vp, spec_getvnodeops()) ||
574 			    !VTYP_VALID(vp->v_type)) {
575 				VN_RELE(vp);
576 				return (ENXIO);
577 			}
578 		}
579 	}
580 
581 	if (vp == NULL) {
582 		dev_info_t	*dip;
583 		dev_t		dev;
584 		int		spec_type;
585 
586 		/*
587 		 * Root is not mounted, the minor node is not specified,
588 		 * or an OBP path has been specified.
589 		 */
590 
591 		/*
592 		 * Determine if path can be pruned to produce an
593 		 * OBP or devfs path for resolve_pathname.
594 		 */
595 		if (strncmp(path, "/devices/", 9) == 0)
596 			path += strlen("/devices");
597 
598 		/*
599 		 * if no minor node was specified the DEFAULT minor node
600 		 * will be returned.  if there is no DEFAULT minor node
601 		 * one will be fabricated of type S_IFCHR with the minor
602 		 * number equal to the instance number.
603 		 */
604 		ret = resolve_pathname(path, &dip, &dev, &spec_type);
605 		if (ret != 0)
606 			return (ENODEV);
607 
608 		ASSERT(STYP_VALID(spec_type));
609 		vp = makespecvp(dev, STYP_TO_VTYP(spec_type));
610 		spec_assoc_vp_with_devi(vp, dip);
611 		ddi_release_devi(dip);
612 	}
613 
614 	*vpp = vp;
615 	return (0);
616 }
617 
618 static int
619 ldi_devid_match(ddi_devid_t devid, dev_info_t *dip, dev_t dev)
620 {
621 	char		*devidstr;
622 	ddi_prop_t	*propp;
623 
624 	/* convert devid as a string property */
625 	if ((devidstr = ddi_devid_str_encode(devid, NULL)) == NULL)
626 		return (0);
627 
628 	/*
629 	 * Search for the devid.  For speed and ease in locking this
630 	 * code directly uses the property implementation.  See
631 	 * ddi_common_devid_to_devlist() for a comment as to why.
632 	 */
633 	mutex_enter(&(DEVI(dip)->devi_lock));
634 
635 	/* check if there is a DDI_DEV_T_NONE devid property */
636 	propp = i_ddi_prop_search(DDI_DEV_T_NONE,
637 	    DEVID_PROP_NAME, DEVID_PROP_FLAGS, &DEVI(dip)->devi_hw_prop_ptr);
638 	if (propp != NULL) {
639 		if (ddi_devid_str_compare(propp->prop_val, devidstr) == 0) {
640 			/* a DDI_DEV_T_NONE devid exists and matchs */
641 			mutex_exit(&(DEVI(dip)->devi_lock));
642 			ddi_devid_str_free(devidstr);
643 			return (1);
644 		} else {
645 			/* a DDI_DEV_T_NONE devid exists and doesn't match */
646 			mutex_exit(&(DEVI(dip)->devi_lock));
647 			ddi_devid_str_free(devidstr);
648 			return (0);
649 		}
650 	}
651 
652 	/* check if there is a devt specific devid property */
653 	propp = i_ddi_prop_search(dev,
654 	    DEVID_PROP_NAME, DEVID_PROP_FLAGS, &(DEVI(dip)->devi_hw_prop_ptr));
655 	if (propp != NULL) {
656 		if (ddi_devid_str_compare(propp->prop_val, devidstr) == 0) {
657 			/* a devt specific devid exists and matchs */
658 			mutex_exit(&(DEVI(dip)->devi_lock));
659 			ddi_devid_str_free(devidstr);
660 			return (1);
661 		} else {
662 			/* a devt specific devid exists and doesn't match */
663 			mutex_exit(&(DEVI(dip)->devi_lock));
664 			ddi_devid_str_free(devidstr);
665 			return (0);
666 		}
667 	}
668 
669 	/* we didn't find any devids associated with the device */
670 	mutex_exit(&(DEVI(dip)->devi_lock));
671 	ddi_devid_str_free(devidstr);
672 	return (0);
673 }
674 
675 /* get a handle to a device by devid and minor name */
676 static int
677 ldi_vp_from_devid(ddi_devid_t devid, char *minor_name, vnode_t **vpp)
678 {
679 	dev_info_t		*dip;
680 	vnode_t			*vp;
681 	int			ret, i, ndevs, styp;
682 	dev_t			dev, *devs;
683 
684 	/* sanity check required input parameters */
685 	if ((devid == NULL) || (minor_name == NULL) || (vpp == NULL))
686 		return (EINVAL);
687 
688 	ret = ddi_lyr_devid_to_devlist(devid, minor_name, &ndevs, &devs);
689 	if ((ret != DDI_SUCCESS) || (ndevs <= 0))
690 		return (ENODEV);
691 
692 	for (i = 0; i < ndevs; i++) {
693 		dev = devs[i];
694 
695 		if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
696 			continue;
697 
698 		/*
699 		 * now we have to verify that the devid of the disk
700 		 * still matches what was requested.
701 		 *
702 		 * we have to do this because the devid could have
703 		 * changed between the call to ddi_lyr_devid_to_devlist()
704 		 * and e_ddi_hold_devi_by_dev().  this is because when
705 		 * ddi_lyr_devid_to_devlist() returns a list of devts
706 		 * there is no kind of hold on those devts so a device
707 		 * could have been replaced out from under us in the
708 		 * interim.
709 		 */
710 		if ((i_ddi_minorname_to_devtspectype(dip, minor_name,
711 		    NULL, &styp) == DDI_SUCCESS) &&
712 		    ldi_devid_match(devid, dip, dev))
713 			break;
714 
715 		ddi_release_devi(dip);	/* from e_ddi_hold_devi_by_dev() */
716 	}
717 
718 	ddi_lyr_free_devlist(devs, ndevs);
719 
720 	if (i == ndevs)
721 		return (ENODEV);
722 
723 	ASSERT(STYP_VALID(styp));
724 	vp = makespecvp(dev, STYP_TO_VTYP(styp));
725 	spec_assoc_vp_with_devi(vp, dip);
726 	ddi_release_devi(dip);		/* from e_ddi_hold_devi_by_dev */
727 
728 	*vpp = vp;
729 	return (0);
730 }
731 
732 /* given a vnode, open a device */
733 static int
734 ldi_open_by_vp(vnode_t **vpp, int flag, cred_t *cr,
735     ldi_handle_t *lhp, struct ldi_ident *li)
736 {
737 	struct ldi_handle	*nlhp;
738 	vnode_t			*vp;
739 	int			err;
740 
741 	ASSERT((vpp != NULL) && (*vpp != NULL));
742 	ASSERT((lhp != NULL) && (li != NULL));
743 
744 	vp = *vpp;
745 	/* if the vnode passed in is not a device, then bail */
746 	if (!vn_matchops(vp, spec_getvnodeops()) || !VTYP_VALID(vp->v_type))
747 		return (ENXIO);
748 
749 	/*
750 	 * the caller may have specified a node that
751 	 * doesn't have cb_ops defined.  the ldi doesn't yet
752 	 * support opening devices without a valid cb_ops.
753 	 */
754 	if (devopsp[getmajor(vp->v_rdev)]->devo_cb_ops == NULL)
755 		return (ENXIO);
756 
757 	/* open the device */
758 	if ((err = VOP_OPEN(&vp, flag | FKLYR, cr, NULL)) != 0)
759 		return (err);
760 
761 	/* possible clone open, make sure that we still have a spec node */
762 	ASSERT(vn_matchops(vp, spec_getvnodeops()));
763 
764 	nlhp = handle_alloc(vp, li);
765 
766 	if (vp != *vpp) {
767 		/*
768 		 * allocating the layered handle took a new hold on the vnode
769 		 * so we can release the hold that was returned by the clone
770 		 * open
771 		 */
772 		LDI_OPENCLOSE((CE_WARN, "%s: lh=0x%p",
773 		    "ldi clone open", (void *)nlhp));
774 	} else {
775 		LDI_OPENCLOSE((CE_WARN, "%s: lh=0x%p",
776 		    "ldi open", (void *)nlhp));
777 	}
778 
779 	*vpp = vp;
780 	*lhp = (ldi_handle_t)nlhp;
781 	return (0);
782 }
783 
784 /* Call a drivers prop_op(9E) interface */
785 static int
786 i_ldi_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
787     int flags, char *name, caddr_t valuep, int *lengthp)
788 {
789 	struct dev_ops	*ops = NULL;
790 	int		res;
791 
792 	ASSERT((dip != NULL) && (name != NULL));
793 	ASSERT((prop_op == PROP_LEN) || (valuep != NULL));
794 	ASSERT(lengthp != NULL);
795 
796 	/*
797 	 * we can only be invoked after a driver has been opened and
798 	 * someone has a layered handle to it, so there had better be
799 	 * a valid ops vector.
800 	 */
801 	ops = DEVI(dip)->devi_ops;
802 	ASSERT(ops && ops->devo_cb_ops);
803 
804 	/*
805 	 * Some nexus drivers incorrectly set cb_prop_op to nodev,
806 	 * nulldev or even NULL.
807 	 */
808 	if ((ops->devo_cb_ops->cb_prop_op == nodev) ||
809 	    (ops->devo_cb_ops->cb_prop_op == nulldev) ||
810 	    (ops->devo_cb_ops->cb_prop_op == NULL)) {
811 		return (DDI_PROP_NOT_FOUND);
812 	}
813 
814 	/* check if this is actually DDI_DEV_T_ANY query */
815 	if (flags & LDI_DEV_T_ANY) {
816 		flags &= ~LDI_DEV_T_ANY;
817 		dev = DDI_DEV_T_ANY;
818 	}
819 
820 	res = cdev_prop_op(dev, dip, prop_op, flags, name, valuep, lengthp);
821 	return (res);
822 }
823 
824 static void
825 i_ldi_prop_op_free(struct prop_driver_data *pdd)
826 {
827 	kmem_free(pdd, pdd->pdd_size);
828 }
829 
830 static caddr_t
831 i_ldi_prop_op_alloc(int prop_len)
832 {
833 	struct prop_driver_data	*pdd;
834 	int			pdd_size;
835 
836 	pdd_size = sizeof (struct prop_driver_data) + prop_len;
837 	pdd = kmem_alloc(pdd_size, KM_SLEEP);
838 	pdd->pdd_size = pdd_size;
839 	pdd->pdd_prop_free = i_ldi_prop_op_free;
840 	return ((caddr_t)&pdd[1]);
841 }
842 
843 /*
844  * i_ldi_prop_op_typed() is a wrapper for i_ldi_prop_op that is used
845  * by the typed ldi property lookup interfaces.
846  */
847 static int
848 i_ldi_prop_op_typed(dev_t dev, dev_info_t *dip, int flags, char *name,
849     caddr_t *datap, int *lengthp, int elem_size)
850 {
851 	caddr_t	prop_val;
852 	int	prop_len, res;
853 
854 	ASSERT((dip != NULL) && (name != NULL));
855 	ASSERT((datap != NULL) && (lengthp != NULL));
856 
857 	/*
858 	 * first call the drivers prop_op() interface to allow it
859 	 * it to override default property values.
860 	 */
861 	res = i_ldi_prop_op(dev, dip, PROP_LEN,
862 	    flags | DDI_PROP_DYNAMIC, name, NULL, &prop_len);
863 	if (res != DDI_PROP_SUCCESS)
864 		return (DDI_PROP_NOT_FOUND);
865 
866 	/* sanity check the property length */
867 	if (prop_len == 0) {
868 		/*
869 		 * the ddi typed interfaces don't allow a drivers to
870 		 * create properties with a length of 0.  so we should
871 		 * prevent drivers from returning 0 length dynamic
872 		 * properties for typed property lookups.
873 		 */
874 		return (DDI_PROP_NOT_FOUND);
875 	}
876 
877 	/* sanity check the property length against the element size */
878 	if (elem_size && ((prop_len % elem_size) != 0))
879 		return (DDI_PROP_NOT_FOUND);
880 
881 	/*
882 	 * got it.  now allocate a prop_driver_data struct so that the
883 	 * user can free the property via ddi_prop_free().
884 	 */
885 	prop_val = i_ldi_prop_op_alloc(prop_len);
886 
887 	/* lookup the property again, this time get the value */
888 	res = i_ldi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
889 	    flags | DDI_PROP_DYNAMIC, name, prop_val, &prop_len);
890 	if (res != DDI_PROP_SUCCESS) {
891 		ddi_prop_free(prop_val);
892 		return (DDI_PROP_NOT_FOUND);
893 	}
894 
895 	/* sanity check the property length */
896 	if (prop_len == 0) {
897 		ddi_prop_free(prop_val);
898 		return (DDI_PROP_NOT_FOUND);
899 	}
900 
901 	/* sanity check the property length against the element size */
902 	if (elem_size && ((prop_len % elem_size) != 0)) {
903 		ddi_prop_free(prop_val);
904 		return (DDI_PROP_NOT_FOUND);
905 	}
906 
907 	/*
908 	 * return the prop_driver_data struct and, optionally, the length
909 	 * of the data.
910 	 */
911 	*datap = prop_val;
912 	*lengthp = prop_len;
913 
914 	return (DDI_PROP_SUCCESS);
915 }
916 
917 /*
918  * i_check_string looks at a string property and makes sure its
919  * a valid null terminated string
920  */
921 static int
922 i_check_string(char *str, int prop_len)
923 {
924 	int i;
925 
926 	ASSERT(str != NULL);
927 
928 	for (i = 0; i < prop_len; i++) {
929 		if (str[i] == '\0')
930 			return (0);
931 	}
932 	return (1);
933 }
934 
935 /*
936  * i_pack_string_array takes a a string array property that is represented
937  * as a concatenation of strings (with the NULL character included for
938  * each string) and converts it into a format that can be returned by
939  * ldi_prop_lookup_string_array.
940  */
941 static int
942 i_pack_string_array(char *str_concat, int prop_len,
943     char ***str_arrayp, int *nelemp)
944 {
945 	int i, nelem, pack_size;
946 	char **str_array, *strptr;
947 
948 	/*
949 	 * first we need to sanity check the input string array.
950 	 * in essence this can be done my making sure that the last
951 	 * character of the array passed in is null.  (meaning the last
952 	 * string in the array is NULL terminated.
953 	 */
954 	if (str_concat[prop_len - 1] != '\0')
955 		return (1);
956 
957 	/* now let's count the number of strings in the array */
958 	for (nelem = i = 0; i < prop_len; i++)
959 		if (str_concat[i] == '\0')
960 			nelem++;
961 	ASSERT(nelem >= 1);
962 
963 	/* now let's allocate memory for the new packed property */
964 	pack_size = (sizeof (char *) * (nelem + 1)) + prop_len;
965 	str_array = (char **)i_ldi_prop_op_alloc(pack_size);
966 
967 	/* let's copy the actual string data into the new property */
968 	strptr = (char *)&(str_array[nelem + 1]);
969 	bcopy(str_concat, strptr, prop_len);
970 
971 	/* now initialize the string array pointers */
972 	for (i = 0; i < nelem; i++) {
973 		str_array[i] = strptr;
974 		strptr += strlen(strptr) + 1;
975 	}
976 	str_array[nelem] = NULL;
977 
978 	/* set the return values */
979 	*str_arrayp = str_array;
980 	*nelemp = nelem;
981 
982 	return (0);
983 }
984 
985 
986 /*
987  * LDI Project private device usage interfaces
988  */
989 
990 /*
991  * Get a count of how many devices are currentl open by different consumers
992  */
993 int
994 ldi_usage_count()
995 {
996 	return (ldi_handle_hash_count);
997 }
998 
999 static void
1000 ldi_usage_walker_tgt_helper(ldi_usage_t *ldi_usage, vnode_t *vp)
1001 {
1002 	dev_info_t	*dip;
1003 	dev_t		dev;
1004 
1005 	ASSERT(STYP_VALID(VTYP_TO_STYP(vp->v_type)));
1006 
1007 	/* get the target devt */
1008 	dev = vp->v_rdev;
1009 
1010 	/* try to get the target dip */
1011 	dip = VTOCS(vp)->s_dip;
1012 	if (dip != NULL) {
1013 		e_ddi_hold_devi(dip);
1014 	} else if (dev != DDI_DEV_T_NONE) {
1015 		dip = e_ddi_hold_devi_by_dev(dev, 0);
1016 	}
1017 
1018 	/* set the target information */
1019 	ldi_usage->tgt_name = mod_major_to_name(getmajor(dev));
1020 	ldi_usage->tgt_modid = mod_name_to_modid(ldi_usage->tgt_name);
1021 	ldi_usage->tgt_devt = dev;
1022 	ldi_usage->tgt_spec_type = VTYP_TO_STYP(vp->v_type);
1023 	ldi_usage->tgt_dip = dip;
1024 }
1025 
1026 
1027 static int
1028 ldi_usage_walker_helper(struct ldi_ident *lip, vnode_t *vp,
1029     void *arg, int (*callback)(const ldi_usage_t *, void *))
1030 {
1031 	ldi_usage_t	ldi_usage;
1032 	struct devnames	*dnp;
1033 	dev_info_t	*dip;
1034 	major_t		major;
1035 	dev_t		dev;
1036 	int		ret = LDI_USAGE_CONTINUE;
1037 
1038 	/* set the target device information */
1039 	ldi_usage_walker_tgt_helper(&ldi_usage, vp);
1040 
1041 	/* get the source devt */
1042 	dev = lip->li_dev;
1043 
1044 	/* try to get the source dip */
1045 	dip = lip->li_dip;
1046 	if (dip != NULL) {
1047 		e_ddi_hold_devi(dip);
1048 	} else if (dev != DDI_DEV_T_NONE) {
1049 		dip = e_ddi_hold_devi_by_dev(dev, 0);
1050 	}
1051 
1052 	/* set the valid source information */
1053 	ldi_usage.src_modid = lip->li_modid;
1054 	ldi_usage.src_name = lip->li_modname;
1055 	ldi_usage.src_devt = dev;
1056 	ldi_usage.src_dip = dip;
1057 
1058 	/*
1059 	 * if the source ident represents either:
1060 	 *
1061 	 * - a kernel module (and not a device or device driver)
1062 	 * - a device node
1063 	 *
1064 	 * then we currently have all the info we need to report the
1065 	 * usage information so invoke the callback function.
1066 	 */
1067 	if (((lip->li_major == -1) && (dev == DDI_DEV_T_NONE)) ||
1068 	    (dip != NULL)) {
1069 		ret = callback(&ldi_usage, arg);
1070 		if (dip != NULL)
1071 			ddi_release_devi(dip);
1072 		if (ldi_usage.tgt_dip != NULL)
1073 			ddi_release_devi(ldi_usage.tgt_dip);
1074 		return (ret);
1075 	}
1076 
1077 	/*
1078 	 * now this is kinda gross.
1079 	 *
1080 	 * what we do here is attempt to associate every device instance
1081 	 * of the source driver on the system with the open target driver.
1082 	 * we do this because we don't know which instance of the device
1083 	 * could potentially access the lower device so we assume that all
1084 	 * the instances could access it.
1085 	 *
1086 	 * there are two ways we could have gotten here:
1087 	 *
1088 	 * 1) this layered ident represents one created using only a
1089 	 *    major number or a driver module name.  this means that when
1090 	 *    it was created we could not associate it with a particular
1091 	 *    dev_t or device instance.
1092 	 *
1093 	 *    when could this possibly happen you ask?
1094 	 *
1095 	 *    a perfect example of this is streams persistent links.
1096 	 *    when a persistant streams link is formed we can't associate
1097 	 *    the lower device stream with any particular upper device
1098 	 *    stream or instance.  this is because any particular upper
1099 	 *    device stream could be closed, then another could be
1100 	 *    opened with a different dev_t and device instance, and it
1101 	 *    would still have access to the lower linked stream.
1102 	 *
1103 	 *    since any instance of the upper streams driver could
1104 	 *    potentially access the lower stream whenever it wants,
1105 	 *    we represent that here by associating the opened lower
1106 	 *    device with every existing device instance of the upper
1107 	 *    streams driver.
1108 	 *
1109 	 * 2) This case should really never happen but we'll include it
1110 	 *    for completeness.
1111 	 *
1112 	 *    it's possible that we could have gotten here because we
1113 	 *    have a dev_t for the upper device but we couldn't find a
1114 	 *    dip associated with that dev_t.
1115 	 *
1116 	 *    the only types of devices that have dev_t without an
1117 	 *    associated dip are unbound DLPIv2 network devices.  These
1118 	 *    types of devices exist to be able to attach a stream to any
1119 	 *    instance of a hardware network device.  since these types of
1120 	 *    devices are usually hardware devices they should never
1121 	 *    really have other devices open.
1122 	 */
1123 	if (dev != DDI_DEV_T_NONE)
1124 		major = getmajor(dev);
1125 	else
1126 		major = lip->li_major;
1127 
1128 	ASSERT((major >= 0) && (major < devcnt));
1129 
1130 	dnp = &devnamesp[major];
1131 	LOCK_DEV_OPS(&dnp->dn_lock);
1132 	dip = dnp->dn_head;
1133 	while ((dip) && (ret == LDI_USAGE_CONTINUE)) {
1134 		e_ddi_hold_devi(dip);
1135 		UNLOCK_DEV_OPS(&dnp->dn_lock);
1136 
1137 		/* set the source dip */
1138 		ldi_usage.src_dip = dip;
1139 
1140 		/* invoke the callback function */
1141 		ret = callback(&ldi_usage, arg);
1142 
1143 		LOCK_DEV_OPS(&dnp->dn_lock);
1144 		ddi_release_devi(dip);
1145 		dip = ddi_get_next(dip);
1146 	}
1147 	UNLOCK_DEV_OPS(&dnp->dn_lock);
1148 
1149 	/* if there was a target dip, release it */
1150 	if (ldi_usage.tgt_dip != NULL)
1151 		ddi_release_devi(ldi_usage.tgt_dip);
1152 
1153 	return (ret);
1154 }
1155 
1156 /*
1157  * ldi_usage_walker() - this walker reports LDI kernel device usage
1158  * information via the callback() callback function.  the LDI keeps track
1159  * of what devices are being accessed in its own internal data structures.
1160  * this function walks those data structures to determine device usage.
1161  */
1162 void
1163 ldi_usage_walker(void *arg, int (*callback)(const ldi_usage_t *, void *))
1164 {
1165 	struct ldi_handle	*lhp;
1166 	struct ldi_ident	*lip;
1167 	vnode_t			*vp;
1168 	int			i;
1169 	int			ret = LDI_USAGE_CONTINUE;
1170 
1171 	for (i = 0; i < LH_HASH_SZ; i++) {
1172 		mutex_enter(&ldi_handle_hash_lock[i]);
1173 
1174 		lhp = ldi_handle_hash[i];
1175 		while ((lhp != NULL) && (ret == LDI_USAGE_CONTINUE)) {
1176 			lip = lhp->lh_ident;
1177 			vp = lhp->lh_vp;
1178 
1179 			/* invoke the devinfo callback function */
1180 			ret = ldi_usage_walker_helper(lip, vp, arg, callback);
1181 
1182 			lhp = lhp->lh_next;
1183 		}
1184 		mutex_exit(&ldi_handle_hash_lock[i]);
1185 
1186 		if (ret != LDI_USAGE_CONTINUE)
1187 			break;
1188 	}
1189 }
1190 
1191 /*
1192  * LDI Project private interfaces (streams linking interfaces)
1193  *
1194  * Streams supports a type of built in device layering via linking.
1195  * Certain types of streams drivers can be streams multiplexors.
1196  * A streams multiplexor supports the I_LINK/I_PLINK operation.
1197  * These operations allows other streams devices to be linked under the
1198  * multiplexor.  By definition all streams multiplexors are devices
1199  * so this linking is a type of device layering where the multiplexor
1200  * device is layered on top of the device linked below it.
1201  */
1202 
1203 /*
1204  * ldi_mlink_lh() is invoked when streams are linked using LDI handles.
1205  * It is not used for normal I_LINKs and I_PLINKs using file descriptors.
1206  *
1207  * The streams framework keeps track of links via the file_t of the lower
1208  * stream.  The LDI keeps track of devices using a vnode.  In the case
1209  * of a streams link created via an LDI handle, fnk_lh() allocates
1210  * a file_t that the streams framework can use to track the linkage.
1211  */
1212 int
1213 ldi_mlink_lh(vnode_t *vp, int cmd, intptr_t arg, cred_t *crp, int *rvalp)
1214 {
1215 	struct ldi_handle	*lhp = (struct ldi_handle *)arg;
1216 	vnode_t			*vpdown;
1217 	file_t			*fpdown;
1218 	int			err;
1219 
1220 	if (lhp == NULL)
1221 		return (EINVAL);
1222 
1223 	vpdown = lhp->lh_vp;
1224 	ASSERT(vn_matchops(vpdown, spec_getvnodeops()));
1225 	ASSERT(cmd == _I_PLINK_LH);
1226 
1227 	/*
1228 	 * create a new lower vnode and a file_t that points to it,
1229 	 * streams linking requires a file_t.  falloc() returns with
1230 	 * fpdown locked.
1231 	 */
1232 	VN_HOLD(vpdown);
1233 	(void) falloc(vpdown, FREAD|FWRITE, &fpdown, NULL);
1234 	mutex_exit(&fpdown->f_tlock);
1235 
1236 	/* try to establish the link */
1237 	err = mlink_file(vp, I_PLINK, fpdown, crp, rvalp, 1);
1238 
1239 	if (err != 0) {
1240 		/* the link failed, free the file_t and release the vnode */
1241 		mutex_enter(&fpdown->f_tlock);
1242 		unfalloc(fpdown);
1243 		VN_RELE(vpdown);
1244 	}
1245 
1246 	return (err);
1247 }
1248 
1249 /*
1250  * ldi_mlink_fp() is invoked for all successful streams linkages created
1251  * via I_LINK and I_PLINK.  ldi_mlink_fp() records the linkage information
1252  * in its internal state so that the devinfo snapshot code has some
1253  * observability into streams device linkage information.
1254  */
1255 void
1256 ldi_mlink_fp(struct stdata *stp, file_t *fpdown, int lhlink, int type)
1257 {
1258 	vnode_t			*vp = fpdown->f_vnode;
1259 	struct snode		*sp, *csp;
1260 	ldi_ident_t		li;
1261 	major_t			major;
1262 	int			ret;
1263 
1264 	/* if the lower stream is not a device then return */
1265 	if (!vn_matchops(vp, spec_getvnodeops()))
1266 		return;
1267 
1268 	ASSERT(!servicing_interrupt());
1269 
1270 	LDI_STREAMS_LNK((CE_NOTE, "%s: linking streams "
1271 	    "stp=0x%p, fpdown=0x%p", "ldi_mlink_fp",
1272 	    (void *)stp, (void *)fpdown));
1273 
1274 	sp = VTOS(vp);
1275 	csp = VTOS(sp->s_commonvp);
1276 
1277 	/* check if this was a plink via a layered handle */
1278 	if (lhlink) {
1279 		/*
1280 		 * increment the common snode s_count.
1281 		 *
1282 		 * this is done because after the link operation there
1283 		 * are two ways that s_count can be decremented.
1284 		 *
1285 		 * when the layered handle used to create the link is
1286 		 * closed, spec_close() is called and it will decrement
1287 		 * s_count in the common snode.  if we don't increment
1288 		 * s_count here then this could cause spec_close() to
1289 		 * actually close the device while it's still linked
1290 		 * under a multiplexer.
1291 		 *
1292 		 * also, when the lower stream is unlinked, closef() is
1293 		 * called for the file_t associated with this snode.
1294 		 * closef() will call spec_close(), which will decrement
1295 		 * s_count.  if we dont't increment s_count here then this
1296 		 * could cause spec_close() to actually close the device
1297 		 * while there may still be valid layered handles
1298 		 * pointing to it.
1299 		 */
1300 		mutex_enter(&csp->s_lock);
1301 		ASSERT(csp->s_count >= 1);
1302 		csp->s_count++;
1303 		mutex_exit(&csp->s_lock);
1304 
1305 		/*
1306 		 * decrement the f_count.
1307 		 * this is done because the layered driver framework does
1308 		 * not actually cache a copy of the file_t allocated to
1309 		 * do the link.  this is done here instead of in ldi_mlink_lh()
1310 		 * because there is a window in ldi_mlink_lh() between where
1311 		 * milnk_file() returns and we would decrement the f_count
1312 		 * when the stream could be unlinked.
1313 		 */
1314 		mutex_enter(&fpdown->f_tlock);
1315 		fpdown->f_count--;
1316 		mutex_exit(&fpdown->f_tlock);
1317 	}
1318 
1319 	/*
1320 	 * NOTE: here we rely on the streams subsystem not allowing
1321 	 * a stream to be multiplexed more than once.  if this
1322 	 * changes, we break.
1323 	 *
1324 	 * mark the snode/stream as multiplexed
1325 	 */
1326 	mutex_enter(&sp->s_lock);
1327 	ASSERT(!(sp->s_flag & SMUXED));
1328 	sp->s_flag |= SMUXED;
1329 	mutex_exit(&sp->s_lock);
1330 
1331 	/* get a layered ident for the upper stream */
1332 	if (type == LINKNORMAL) {
1333 		/*
1334 		 * if the link is not persistant then we can associate
1335 		 * the upper stream with a dev_t.  this is because the
1336 		 * upper stream is associated with a vnode, which is
1337 		 * associated with a dev_t and this binding can't change
1338 		 * during the life of the stream.  since the link isn't
1339 		 * persistant once the stream is destroyed the link is
1340 		 * destroyed.  so the dev_t will be valid for the life
1341 		 * of the link.
1342 		 */
1343 		ret = ldi_ident_from_stream(getendq(stp->sd_wrq), &li);
1344 	} else {
1345 		/*
1346 		 * if the link is persistant we can only associate the
1347 		 * link with a driver (and not a dev_t.)  this is
1348 		 * because subsequent opens of the upper device may result
1349 		 * in a different stream (and dev_t) having access to
1350 		 * the lower stream.
1351 		 *
1352 		 * for example, if the upper stream is closed after the
1353 		 * persistant link operation is compleated, a subsequent
1354 		 * open of the upper device will create a new stream which
1355 		 * may have a different dev_t and an unlink operation
1356 		 * can be performed using this new upper stream.
1357 		 */
1358 		ASSERT(type == LINKPERSIST);
1359 		major = getmajor(stp->sd_vnode->v_rdev);
1360 		ret = ldi_ident_from_major(major, &li);
1361 	}
1362 
1363 	ASSERT(ret == 0);
1364 	(void) handle_alloc(vp, (struct ldi_ident *)li);
1365 	ldi_ident_release(li);
1366 }
1367 
1368 void
1369 ldi_munlink_fp(struct stdata *stp, file_t *fpdown, int type)
1370 {
1371 	struct ldi_handle	*lhp;
1372 	vnode_t			*vp = (vnode_t *)fpdown->f_vnode;
1373 	struct snode		*sp;
1374 	ldi_ident_t		li;
1375 	major_t			major;
1376 	int			ret;
1377 
1378 	/* if the lower stream is not a device then return */
1379 	if (!vn_matchops(vp, spec_getvnodeops()))
1380 		return;
1381 
1382 	ASSERT(!servicing_interrupt());
1383 	ASSERT((type == LINKNORMAL) || (type == LINKPERSIST));
1384 
1385 	LDI_STREAMS_LNK((CE_NOTE, "%s: unlinking streams "
1386 	    "stp=0x%p, fpdown=0x%p", "ldi_munlink_fp",
1387 	    (void *)stp, (void *)fpdown));
1388 
1389 	/*
1390 	 * NOTE: here we rely on the streams subsystem not allowing
1391 	 * a stream to be multiplexed more than once.  if this
1392 	 * changes, we break.
1393 	 *
1394 	 * mark the snode/stream as not multiplexed
1395 	 */
1396 	sp = VTOS(vp);
1397 	mutex_enter(&sp->s_lock);
1398 	ASSERT(sp->s_flag & SMUXED);
1399 	sp->s_flag &= ~SMUXED;
1400 	mutex_exit(&sp->s_lock);
1401 
1402 	/*
1403 	 * clear the owner for this snode
1404 	 * see the comment in ldi_mlink_fp() for information about how
1405 	 * the ident is allocated
1406 	 */
1407 	if (type == LINKNORMAL) {
1408 		ret = ldi_ident_from_stream(getendq(stp->sd_wrq), &li);
1409 	} else {
1410 		ASSERT(type == LINKPERSIST);
1411 		major = getmajor(stp->sd_vnode->v_rdev);
1412 		ret = ldi_ident_from_major(major, &li);
1413 	}
1414 
1415 	ASSERT(ret == 0);
1416 	lhp = handle_find(vp, (struct ldi_ident *)li);
1417 	handle_release(lhp);
1418 	ldi_ident_release(li);
1419 }
1420 
1421 /*
1422  * LDI Consolidation private interfaces
1423  */
1424 int
1425 ldi_ident_from_mod(struct modlinkage *modlp, ldi_ident_t *lip)
1426 {
1427 	struct modctl		*modp;
1428 	major_t			major;
1429 	char			*name;
1430 
1431 	if ((modlp == NULL) || (lip == NULL))
1432 		return (EINVAL);
1433 
1434 	ASSERT(!servicing_interrupt());
1435 
1436 	modp = mod_getctl(modlp);
1437 	if (modp == NULL)
1438 		return (EINVAL);
1439 	name = modp->mod_modname;
1440 	if (name == NULL)
1441 		return (EINVAL);
1442 	major = mod_name_to_major(name);
1443 
1444 	*lip = (ldi_ident_t)ident_alloc(name, NULL, DDI_DEV_T_NONE, major);
1445 
1446 	LDI_ALLOCFREE((CE_WARN, "%s: li=0x%p, mod=%s",
1447 	    "ldi_ident_from_mod", (void *)*lip, name));
1448 
1449 	return (0);
1450 }
1451 
1452 ldi_ident_t
1453 ldi_ident_from_anon()
1454 {
1455 	ldi_ident_t	lip;
1456 
1457 	ASSERT(!servicing_interrupt());
1458 
1459 	lip = (ldi_ident_t)ident_alloc("genunix", NULL, DDI_DEV_T_NONE, -1);
1460 
1461 	LDI_ALLOCFREE((CE_WARN, "%s: li=0x%p, mod=%s",
1462 	    "ldi_ident_from_anon", (void *)lip, "genunix"));
1463 
1464 	return (lip);
1465 }
1466 
1467 
1468 /*
1469  * LDI Public interfaces
1470  */
1471 int
1472 ldi_ident_from_stream(struct queue *sq, ldi_ident_t *lip)
1473 {
1474 	struct stdata		*stp;
1475 	dev_t			dev;
1476 	char			*name;
1477 
1478 	if ((sq == NULL) || (lip == NULL))
1479 		return (EINVAL);
1480 
1481 	ASSERT(!servicing_interrupt());
1482 
1483 	stp = sq->q_stream;
1484 	if (!vn_matchops(stp->sd_vnode, spec_getvnodeops()))
1485 		return (EINVAL);
1486 
1487 	dev = stp->sd_vnode->v_rdev;
1488 	name = mod_major_to_name(getmajor(dev));
1489 	if (name == NULL)
1490 		return (EINVAL);
1491 	*lip = (ldi_ident_t)ident_alloc(name, NULL, dev, -1);
1492 
1493 	LDI_ALLOCFREE((CE_WARN,
1494 	    "%s: li=0x%p, mod=%s, minor=0x%x, stp=0x%p",
1495 	    "ldi_ident_from_stream", (void *)*lip, name, getminor(dev),
1496 	    (void *)stp));
1497 
1498 	return (0);
1499 }
1500 
1501 int
1502 ldi_ident_from_dev(dev_t dev, ldi_ident_t *lip)
1503 {
1504 	char			*name;
1505 
1506 	if (lip == NULL)
1507 		return (EINVAL);
1508 
1509 	ASSERT(!servicing_interrupt());
1510 
1511 	name = mod_major_to_name(getmajor(dev));
1512 	if (name == NULL)
1513 		return (EINVAL);
1514 	*lip = (ldi_ident_t)ident_alloc(name, NULL, dev, -1);
1515 
1516 	LDI_ALLOCFREE((CE_WARN,
1517 	    "%s: li=0x%p, mod=%s, minor=0x%x",
1518 	    "ldi_ident_from_dev", (void *)*lip, name, getminor(dev)));
1519 
1520 	return (0);
1521 }
1522 
1523 int
1524 ldi_ident_from_dip(dev_info_t *dip, ldi_ident_t *lip)
1525 {
1526 	struct dev_info		*devi = (struct dev_info *)dip;
1527 	char			*name;
1528 
1529 	if ((dip == NULL) || (lip == NULL))
1530 		return (EINVAL);
1531 
1532 	ASSERT(!servicing_interrupt());
1533 
1534 	name = mod_major_to_name(devi->devi_major);
1535 	if (name == NULL)
1536 		return (EINVAL);
1537 	*lip = (ldi_ident_t)ident_alloc(name, dip, DDI_DEV_T_NONE, -1);
1538 
1539 	LDI_ALLOCFREE((CE_WARN,
1540 	    "%s: li=0x%p, mod=%s, dip=0x%p",
1541 	    "ldi_ident_from_dip", (void *)*lip, name, (void *)devi));
1542 
1543 	return (0);
1544 }
1545 
1546 int
1547 ldi_ident_from_major(major_t major, ldi_ident_t *lip)
1548 {
1549 	char			*name;
1550 
1551 	if (lip == NULL)
1552 		return (EINVAL);
1553 
1554 	ASSERT(!servicing_interrupt());
1555 
1556 	name = mod_major_to_name(major);
1557 	if (name == NULL)
1558 		return (EINVAL);
1559 	*lip = (ldi_ident_t)ident_alloc(name, NULL, DDI_DEV_T_NONE, major);
1560 
1561 	LDI_ALLOCFREE((CE_WARN,
1562 	    "%s: li=0x%p, mod=%s",
1563 	    "ldi_ident_from_major", (void *)*lip, name));
1564 
1565 	return (0);
1566 }
1567 
1568 void
1569 ldi_ident_release(ldi_ident_t li)
1570 {
1571 	struct ldi_ident	*ident = (struct ldi_ident *)li;
1572 	char			*name;
1573 
1574 	if (li == NULL)
1575 		return;
1576 
1577 	ASSERT(!servicing_interrupt());
1578 
1579 	name = ident->li_modname;
1580 
1581 	LDI_ALLOCFREE((CE_WARN,
1582 	    "%s: li=0x%p, mod=%s",
1583 	    "ldi_ident_release", (void *)li, name));
1584 
1585 	ident_release((struct ldi_ident *)li);
1586 }
1587 
1588 /* get a handle to a device by dev_t and otyp */
1589 int
1590 ldi_open_by_dev(dev_t *devp, int otyp, int flag, cred_t *cr,
1591     ldi_handle_t *lhp, ldi_ident_t li)
1592 {
1593 	struct ldi_ident	*lip = (struct ldi_ident *)li;
1594 	int			ret;
1595 	vnode_t			*vp;
1596 
1597 	/* sanity check required input parameters */
1598 	if ((devp == NULL) || (!OTYP_VALID(otyp)) || (cr == NULL) ||
1599 	    (lhp == NULL) || (lip == NULL))
1600 		return (EINVAL);
1601 
1602 	ASSERT(!servicing_interrupt());
1603 
1604 	if ((ret = ldi_vp_from_dev(*devp, otyp, &vp)) != 0)
1605 		return (ret);
1606 
1607 	if ((ret = ldi_open_by_vp(&vp, flag, cr, lhp, lip)) == 0) {
1608 		*devp = vp->v_rdev;
1609 	}
1610 	VN_RELE(vp);
1611 
1612 	return (ret);
1613 }
1614 
1615 /* get a handle to a device by pathname */
1616 int
1617 ldi_open_by_name(char *pathname, int flag, cred_t *cr,
1618     ldi_handle_t *lhp, ldi_ident_t li)
1619 {
1620 	struct ldi_ident	*lip = (struct ldi_ident *)li;
1621 	int			ret;
1622 	vnode_t			*vp;
1623 
1624 	/* sanity check required input parameters */
1625 	if ((pathname == NULL) || (*pathname != '/') ||
1626 	    (cr == NULL) || (lhp == NULL) || (lip == NULL))
1627 		return (EINVAL);
1628 
1629 	ASSERT(!servicing_interrupt());
1630 
1631 	if ((ret = ldi_vp_from_name(pathname, &vp)) != 0)
1632 		return (ret);
1633 
1634 	ret = ldi_open_by_vp(&vp, flag, cr, lhp, lip);
1635 	VN_RELE(vp);
1636 
1637 	return (ret);
1638 }
1639 
1640 /* get a handle to a device by devid and minor_name */
1641 int
1642 ldi_open_by_devid(ddi_devid_t devid, char *minor_name,
1643     int flag, cred_t *cr, ldi_handle_t *lhp, ldi_ident_t li)
1644 {
1645 	struct ldi_ident	*lip = (struct ldi_ident *)li;
1646 	int			ret;
1647 	vnode_t			*vp;
1648 
1649 	/* sanity check required input parameters */
1650 	if ((minor_name == NULL) || (cr == NULL) ||
1651 	    (lhp == NULL) || (lip == NULL))
1652 		return (EINVAL);
1653 
1654 	ASSERT(!servicing_interrupt());
1655 
1656 	if ((ret = ldi_vp_from_devid(devid, minor_name, &vp)) != 0)
1657 		return (ret);
1658 
1659 	ret = ldi_open_by_vp(&vp, flag, cr, lhp, lip);
1660 	VN_RELE(vp);
1661 
1662 	return (ret);
1663 }
1664 
1665 int
1666 ldi_close(ldi_handle_t lh, int flag, cred_t *cr)
1667 {
1668 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
1669 	struct ldi_event	*lep;
1670 	int			err = 0;
1671 	int			notify = 0;
1672 	list_t			*listp;
1673 	ldi_ev_callback_impl_t	*lecp;
1674 
1675 	if (lh == NULL)
1676 		return (EINVAL);
1677 
1678 	ASSERT(!servicing_interrupt());
1679 
1680 #ifdef	LDI_OBSOLETE_EVENT
1681 
1682 	/*
1683 	 * Any event handlers should have been unregistered by the
1684 	 * time ldi_close() is called.  If they haven't then it's a
1685 	 * bug.
1686 	 *
1687 	 * In a debug kernel we'll panic to make the problem obvious.
1688 	 */
1689 	ASSERT(handlep->lh_events == NULL);
1690 
1691 	/*
1692 	 * On a production kernel we'll "do the right thing" (unregister
1693 	 * the event handlers) and then complain about having to do the
1694 	 * work ourselves.
1695 	 */
1696 	while ((lep = handlep->lh_events) != NULL) {
1697 		err = 1;
1698 		(void) ldi_remove_event_handler(lh, (ldi_callback_id_t)lep);
1699 	}
1700 	if (err) {
1701 		struct ldi_ident *lip = handlep->lh_ident;
1702 		ASSERT(lip != NULL);
1703 		cmn_err(CE_NOTE, "ldi err: %s "
1704 		    "failed to unregister layered event handlers before "
1705 		    "closing devices", lip->li_modname);
1706 	}
1707 #endif
1708 
1709 	/* do a layered close on the device */
1710 	err = VOP_CLOSE(handlep->lh_vp, flag | FKLYR, 1, (offset_t)0, cr, NULL);
1711 
1712 	LDI_OPENCLOSE((CE_WARN, "%s: lh=0x%p", "ldi close", (void *)lh));
1713 
1714 	/*
1715 	 * Search the event callback list for callbacks with this
1716 	 * handle. There are 2 cases
1717 	 * 1. Called in the context of a notify. The handle consumer
1718 	 *    is releasing its hold on the device to allow a reconfiguration
1719 	 *    of the device. Simply NULL out the handle and the notify callback.
1720 	 *    The finalize callback is still available so that the consumer
1721 	 *    knows of the final disposition of the device.
1722 	 * 2. Not called in the context of notify. NULL out the handle as well
1723 	 *    as the notify and finalize callbacks. Since the consumer has
1724 	 *    closed the handle, we assume it is not interested in the
1725 	 *    notify and finalize callbacks.
1726 	 */
1727 	ldi_ev_lock();
1728 
1729 	if (handlep->lh_flags & LH_FLAGS_NOTIFY)
1730 		notify = 1;
1731 	listp = &ldi_ev_callback_list.le_head;
1732 	for (lecp = list_head(listp); lecp; lecp = list_next(listp, lecp)) {
1733 		if (lecp->lec_lhp != handlep)
1734 			continue;
1735 		lecp->lec_lhp = NULL;
1736 		lecp->lec_notify = NULL;
1737 		LDI_EVDBG((CE_NOTE, "ldi_close: NULLed lh and notify"));
1738 		if (!notify) {
1739 			LDI_EVDBG((CE_NOTE, "ldi_close: NULLed finalize"));
1740 			lecp->lec_finalize = NULL;
1741 		}
1742 	}
1743 
1744 	if (notify)
1745 		handlep->lh_flags &= ~LH_FLAGS_NOTIFY;
1746 	ldi_ev_unlock();
1747 
1748 	/*
1749 	 * Free the handle even if the device close failed.  why?
1750 	 *
1751 	 * If the device close failed we can't really make assumptions
1752 	 * about the devices state so we shouldn't allow access to the
1753 	 * device via this handle any more.  If the device consumer wants
1754 	 * to access the device again they should open it again.
1755 	 *
1756 	 * This is the same way file/device close failures are handled
1757 	 * in other places like spec_close() and closeandsetf().
1758 	 */
1759 	handle_release(handlep);
1760 	return (err);
1761 }
1762 
1763 int
1764 ldi_read(ldi_handle_t lh, struct uio *uiop, cred_t *credp)
1765 {
1766 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
1767 	vnode_t			*vp;
1768 	dev_t			dev;
1769 	int			ret;
1770 
1771 	if (lh == NULL)
1772 		return (EINVAL);
1773 
1774 	vp = handlep->lh_vp;
1775 	dev = vp->v_rdev;
1776 	if (handlep->lh_type & LH_CBDEV) {
1777 		ret = cdev_read(dev, uiop, credp);
1778 	} else if (handlep->lh_type & LH_STREAM) {
1779 		ret = strread(vp, uiop, credp);
1780 	} else {
1781 		return (ENOTSUP);
1782 	}
1783 	return (ret);
1784 }
1785 
1786 int
1787 ldi_write(ldi_handle_t lh, struct uio *uiop, cred_t *credp)
1788 {
1789 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
1790 	vnode_t			*vp;
1791 	dev_t			dev;
1792 	int			ret;
1793 
1794 	if (lh == NULL)
1795 		return (EINVAL);
1796 
1797 	vp = handlep->lh_vp;
1798 	dev = vp->v_rdev;
1799 	if (handlep->lh_type & LH_CBDEV) {
1800 		ret = cdev_write(dev, uiop, credp);
1801 	} else if (handlep->lh_type & LH_STREAM) {
1802 		ret = strwrite(vp, uiop, credp);
1803 	} else {
1804 		return (ENOTSUP);
1805 	}
1806 	return (ret);
1807 }
1808 
1809 int
1810 ldi_get_size(ldi_handle_t lh, uint64_t *sizep)
1811 {
1812 	int			otyp;
1813 	uint_t			value;
1814 	int64_t			drv_prop64;
1815 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
1816 	uint_t			blksize;
1817 	int			blkshift;
1818 
1819 
1820 	if ((lh == NULL) || (sizep == NULL))
1821 		return (DDI_FAILURE);
1822 
1823 	if (handlep->lh_type & LH_STREAM)
1824 		return (DDI_FAILURE);
1825 
1826 	/*
1827 	 * Determine device type (char or block).
1828 	 * Character devices support Size/size
1829 	 * property value. Block devices may support
1830 	 * Nblocks/nblocks or Size/size property value.
1831 	 */
1832 	if ((ldi_get_otyp(lh, &otyp)) != 0)
1833 		return (DDI_FAILURE);
1834 
1835 	if (otyp == OTYP_BLK) {
1836 		if (ldi_prop_exists(lh,
1837 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "Nblocks")) {
1838 
1839 			drv_prop64 = ldi_prop_get_int64(lh,
1840 			    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1841 			    "Nblocks", 0);
1842 			blksize = ldi_prop_get_int(lh,
1843 			    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1844 			    "blksize", DEV_BSIZE);
1845 			if (blksize == DEV_BSIZE)
1846 				blksize = ldi_prop_get_int(lh, LDI_DEV_T_ANY |
1847 				    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1848 				    "device-blksize", DEV_BSIZE);
1849 
1850 			/* blksize must be a power of two */
1851 			ASSERT(BIT_ONLYONESET(blksize));
1852 			blkshift = highbit(blksize) - 1;
1853 
1854 			/*
1855 			 * We don't support Nblocks values that don't have
1856 			 * an accurate uint64_t byte count representation.
1857 			 */
1858 			if ((uint64_t)drv_prop64 >= (UINT64_MAX >> blkshift))
1859 				return (DDI_FAILURE);
1860 
1861 			*sizep = (uint64_t)
1862 			    (((u_offset_t)drv_prop64) << blkshift);
1863 			return (DDI_SUCCESS);
1864 		}
1865 
1866 		if (ldi_prop_exists(lh,
1867 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "nblocks")) {
1868 
1869 			value = ldi_prop_get_int(lh,
1870 			    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1871 			    "nblocks", 0);
1872 			blksize = ldi_prop_get_int(lh,
1873 			    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1874 			    "blksize", DEV_BSIZE);
1875 			if (blksize == DEV_BSIZE)
1876 				blksize = ldi_prop_get_int(lh, LDI_DEV_T_ANY |
1877 				    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1878 				    "device-blksize", DEV_BSIZE);
1879 
1880 			/* blksize must be a power of two */
1881 			ASSERT(BIT_ONLYONESET(blksize));
1882 			blkshift = highbit(blksize) - 1;
1883 
1884 			/*
1885 			 * We don't support nblocks values that don't have an
1886 			 * accurate uint64_t byte count representation.
1887 			 */
1888 			if ((uint64_t)value >= (UINT64_MAX >> blkshift))
1889 				return (DDI_FAILURE);
1890 
1891 			*sizep = (uint64_t)
1892 			    (((u_offset_t)value) << blkshift);
1893 			return (DDI_SUCCESS);
1894 		}
1895 	}
1896 
1897 	if (ldi_prop_exists(lh,
1898 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "Size")) {
1899 
1900 		drv_prop64 = ldi_prop_get_int64(lh,
1901 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "Size", 0);
1902 		*sizep = (uint64_t)drv_prop64;
1903 		return (DDI_SUCCESS);
1904 	}
1905 
1906 	if (ldi_prop_exists(lh,
1907 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "size")) {
1908 
1909 		value = ldi_prop_get_int(lh,
1910 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "size", 0);
1911 		*sizep = (uint64_t)value;
1912 		return (DDI_SUCCESS);
1913 	}
1914 
1915 	/* unable to determine device size */
1916 	return (DDI_FAILURE);
1917 }
1918 
1919 int
1920 ldi_ioctl(ldi_handle_t lh, int cmd, intptr_t arg, int mode,
1921 	cred_t *cr, int *rvalp)
1922 {
1923 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
1924 	vnode_t			*vp;
1925 	dev_t			dev;
1926 	int			ret, copymode;
1927 
1928 	if (lh == NULL)
1929 		return (EINVAL);
1930 
1931 	/*
1932 	 * if the data pointed to by arg is located in the kernel then
1933 	 * make sure the FNATIVE flag is set.
1934 	 */
1935 	if (mode & FKIOCTL)
1936 		mode = (mode & ~FMODELS) | FNATIVE | FKIOCTL;
1937 
1938 	vp = handlep->lh_vp;
1939 	dev = vp->v_rdev;
1940 	if (handlep->lh_type & LH_CBDEV) {
1941 		ret = cdev_ioctl(dev, cmd, arg, mode, cr, rvalp);
1942 	} else if (handlep->lh_type & LH_STREAM) {
1943 		copymode = (mode & FKIOCTL) ? K_TO_K : U_TO_K;
1944 
1945 		/*
1946 		 * if we get an I_PLINK from within the kernel the
1947 		 * arg is a layered handle pointer instead of
1948 		 * a file descriptor, so we translate this ioctl
1949 		 * into a private one that can handle this.
1950 		 */
1951 		if ((mode & FKIOCTL) && (cmd == I_PLINK))
1952 			cmd = _I_PLINK_LH;
1953 
1954 		ret = strioctl(vp, cmd, arg, mode, copymode, cr, rvalp);
1955 	} else {
1956 		return (ENOTSUP);
1957 	}
1958 
1959 	return (ret);
1960 }
1961 
1962 int
1963 ldi_poll(ldi_handle_t lh, short events, int anyyet, short *reventsp,
1964     struct pollhead **phpp)
1965 {
1966 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
1967 	vnode_t			*vp;
1968 	dev_t			dev;
1969 	int			ret;
1970 
1971 	if (lh == NULL)
1972 		return (EINVAL);
1973 
1974 	vp = handlep->lh_vp;
1975 	dev = vp->v_rdev;
1976 	if (handlep->lh_type & LH_CBDEV) {
1977 		ret = cdev_poll(dev, events, anyyet, reventsp, phpp);
1978 	} else if (handlep->lh_type & LH_STREAM) {
1979 		ret = strpoll(vp->v_stream, events, anyyet, reventsp, phpp);
1980 	} else {
1981 		return (ENOTSUP);
1982 	}
1983 
1984 	return (ret);
1985 }
1986 
1987 int
1988 ldi_prop_op(ldi_handle_t lh, ddi_prop_op_t prop_op,
1989 	int flags, char *name, caddr_t valuep, int *length)
1990 {
1991 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
1992 	dev_t			dev;
1993 	dev_info_t		*dip;
1994 	int			ret;
1995 	struct snode		*csp;
1996 
1997 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
1998 		return (DDI_PROP_INVAL_ARG);
1999 
2000 	if ((prop_op != PROP_LEN) && (valuep == NULL))
2001 		return (DDI_PROP_INVAL_ARG);
2002 
2003 	if (length == NULL)
2004 		return (DDI_PROP_INVAL_ARG);
2005 
2006 	/*
2007 	 * try to find the associated dip,
2008 	 * this places a hold on the driver
2009 	 */
2010 	dev = handlep->lh_vp->v_rdev;
2011 
2012 	csp = VTOCS(handlep->lh_vp);
2013 	mutex_enter(&csp->s_lock);
2014 	if ((dip = csp->s_dip) != NULL)
2015 		e_ddi_hold_devi(dip);
2016 	mutex_exit(&csp->s_lock);
2017 	if (dip == NULL)
2018 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2019 
2020 	if (dip == NULL)
2021 		return (DDI_PROP_NOT_FOUND);
2022 
2023 	ret = i_ldi_prop_op(dev, dip, prop_op, flags, name, valuep, length);
2024 	ddi_release_devi(dip);
2025 
2026 	return (ret);
2027 }
2028 
2029 int
2030 ldi_strategy(ldi_handle_t lh, struct buf *bp)
2031 {
2032 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2033 	dev_t			dev;
2034 
2035 	if ((lh == NULL) || (bp == NULL))
2036 		return (EINVAL);
2037 
2038 	/* this entry point is only supported for cb devices */
2039 	dev = handlep->lh_vp->v_rdev;
2040 	if (!(handlep->lh_type & LH_CBDEV))
2041 		return (ENOTSUP);
2042 
2043 	bp->b_edev = dev;
2044 	bp->b_dev = cmpdev(dev);
2045 	return (bdev_strategy(bp));
2046 }
2047 
2048 int
2049 ldi_dump(ldi_handle_t lh, caddr_t addr, daddr_t blkno, int nblk)
2050 {
2051 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2052 	dev_t			dev;
2053 
2054 	if (lh == NULL)
2055 		return (EINVAL);
2056 
2057 	/* this entry point is only supported for cb devices */
2058 	dev = handlep->lh_vp->v_rdev;
2059 	if (!(handlep->lh_type & LH_CBDEV))
2060 		return (ENOTSUP);
2061 
2062 	return (bdev_dump(dev, addr, blkno, nblk));
2063 }
2064 
2065 int
2066 ldi_devmap(ldi_handle_t lh, devmap_cookie_t dhp, offset_t off,
2067     size_t len, size_t *maplen, uint_t model)
2068 {
2069 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2070 	dev_t			dev;
2071 
2072 	if (lh == NULL)
2073 		return (EINVAL);
2074 
2075 	/* this entry point is only supported for cb devices */
2076 	dev = handlep->lh_vp->v_rdev;
2077 	if (!(handlep->lh_type & LH_CBDEV))
2078 		return (ENOTSUP);
2079 
2080 	return (cdev_devmap(dev, dhp, off, len, maplen, model));
2081 }
2082 
2083 int
2084 ldi_aread(ldi_handle_t lh, struct aio_req *aio_reqp, cred_t *cr)
2085 {
2086 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2087 	dev_t			dev;
2088 	struct cb_ops		*cb;
2089 
2090 	if (lh == NULL)
2091 		return (EINVAL);
2092 
2093 	/* this entry point is only supported for cb devices */
2094 	if (!(handlep->lh_type & LH_CBDEV))
2095 		return (ENOTSUP);
2096 
2097 	/*
2098 	 * Kaio is only supported on block devices.
2099 	 */
2100 	dev = handlep->lh_vp->v_rdev;
2101 	cb = devopsp[getmajor(dev)]->devo_cb_ops;
2102 	if (cb->cb_strategy == nodev || cb->cb_strategy == NULL)
2103 		return (ENOTSUP);
2104 
2105 	if (cb->cb_aread == NULL)
2106 		return (ENOTSUP);
2107 
2108 	return (cb->cb_aread(dev, aio_reqp, cr));
2109 }
2110 
2111 int
2112 ldi_awrite(ldi_handle_t lh, struct aio_req *aio_reqp, cred_t *cr)
2113 {
2114 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2115 	struct cb_ops		*cb;
2116 	dev_t			dev;
2117 
2118 	if (lh == NULL)
2119 		return (EINVAL);
2120 
2121 	/* this entry point is only supported for cb devices */
2122 	if (!(handlep->lh_type & LH_CBDEV))
2123 		return (ENOTSUP);
2124 
2125 	/*
2126 	 * Kaio is only supported on block devices.
2127 	 */
2128 	dev = handlep->lh_vp->v_rdev;
2129 	cb = devopsp[getmajor(dev)]->devo_cb_ops;
2130 	if (cb->cb_strategy == nodev || cb->cb_strategy == NULL)
2131 		return (ENOTSUP);
2132 
2133 	if (cb->cb_awrite == NULL)
2134 		return (ENOTSUP);
2135 
2136 	return (cb->cb_awrite(dev, aio_reqp, cr));
2137 }
2138 
2139 int
2140 ldi_putmsg(ldi_handle_t lh, mblk_t *smp)
2141 {
2142 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2143 	int			ret;
2144 
2145 	if ((lh == NULL) || (smp == NULL))
2146 		return (EINVAL);
2147 
2148 	if (!(handlep->lh_type & LH_STREAM)) {
2149 		freemsg(smp);
2150 		return (ENOTSUP);
2151 	}
2152 
2153 	/*
2154 	 * If we don't have db_credp, set it. Note that we can not be called
2155 	 * from interrupt context.
2156 	 */
2157 	if (msg_getcred(smp, NULL) == NULL)
2158 		mblk_setcred(smp, CRED(), curproc->p_pid);
2159 
2160 	/* Send message while honoring flow control */
2161 	ret = kstrputmsg(handlep->lh_vp, smp, NULL, 0, 0,
2162 	    MSG_BAND | MSG_HOLDSIG | MSG_IGNERROR, 0);
2163 
2164 	return (ret);
2165 }
2166 
2167 int
2168 ldi_getmsg(ldi_handle_t lh, mblk_t **rmp, timestruc_t *timeo)
2169 {
2170 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2171 	clock_t			timout; /* milliseconds */
2172 	uchar_t			pri;
2173 	rval_t			rval;
2174 	int			ret, pflag;
2175 
2176 
2177 	if (lh == NULL)
2178 		return (EINVAL);
2179 
2180 	if (!(handlep->lh_type & LH_STREAM))
2181 		return (ENOTSUP);
2182 
2183 	/* Convert from nanoseconds to milliseconds */
2184 	if (timeo != NULL) {
2185 		timout = timeo->tv_sec * 1000 + timeo->tv_nsec / 1000000;
2186 		if (timout > INT_MAX)
2187 			return (EINVAL);
2188 	} else
2189 		timout = -1;
2190 
2191 	/* Wait for timeout millseconds for a message */
2192 	pflag = MSG_ANY;
2193 	pri = 0;
2194 	*rmp = NULL;
2195 	ret = kstrgetmsg(handlep->lh_vp,
2196 	    rmp, NULL, &pri, &pflag, timout, &rval);
2197 	return (ret);
2198 }
2199 
2200 int
2201 ldi_get_dev(ldi_handle_t lh, dev_t *devp)
2202 {
2203 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2204 
2205 	if ((lh == NULL) || (devp == NULL))
2206 		return (EINVAL);
2207 
2208 	*devp = handlep->lh_vp->v_rdev;
2209 	return (0);
2210 }
2211 
2212 int
2213 ldi_get_otyp(ldi_handle_t lh, int *otyp)
2214 {
2215 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2216 
2217 	if ((lh == NULL) || (otyp == NULL))
2218 		return (EINVAL);
2219 
2220 	*otyp = VTYP_TO_OTYP(handlep->lh_vp->v_type);
2221 	return (0);
2222 }
2223 
2224 int
2225 ldi_get_devid(ldi_handle_t lh, ddi_devid_t *devid)
2226 {
2227 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2228 	int			ret;
2229 	dev_t			dev;
2230 
2231 	if ((lh == NULL) || (devid == NULL))
2232 		return (EINVAL);
2233 
2234 	dev = handlep->lh_vp->v_rdev;
2235 
2236 	ret = ddi_lyr_get_devid(dev, devid);
2237 	if (ret != DDI_SUCCESS)
2238 		return (ENOTSUP);
2239 
2240 	return (0);
2241 }
2242 
2243 int
2244 ldi_get_minor_name(ldi_handle_t lh, char **minor_name)
2245 {
2246 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2247 	int			ret, otyp;
2248 	dev_t			dev;
2249 
2250 	if ((lh == NULL) || (minor_name == NULL))
2251 		return (EINVAL);
2252 
2253 	dev = handlep->lh_vp->v_rdev;
2254 	otyp = VTYP_TO_OTYP(handlep->lh_vp->v_type);
2255 
2256 	ret = ddi_lyr_get_minor_name(dev, OTYP_TO_STYP(otyp), minor_name);
2257 	if (ret != DDI_SUCCESS)
2258 		return (ENOTSUP);
2259 
2260 	return (0);
2261 }
2262 
2263 int
2264 ldi_prop_lookup_int_array(ldi_handle_t lh,
2265     uint_t flags, char *name, int **data, uint_t *nelements)
2266 {
2267 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2268 	dev_info_t		*dip;
2269 	dev_t			dev;
2270 	int			res;
2271 	struct snode		*csp;
2272 
2273 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2274 		return (DDI_PROP_INVAL_ARG);
2275 
2276 	dev = handlep->lh_vp->v_rdev;
2277 
2278 	csp = VTOCS(handlep->lh_vp);
2279 	mutex_enter(&csp->s_lock);
2280 	if ((dip = csp->s_dip) != NULL)
2281 		e_ddi_hold_devi(dip);
2282 	mutex_exit(&csp->s_lock);
2283 	if (dip == NULL)
2284 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2285 
2286 	if (dip == NULL) {
2287 		flags |= DDI_UNBND_DLPI2;
2288 	} else if (flags & LDI_DEV_T_ANY) {
2289 		flags &= ~LDI_DEV_T_ANY;
2290 		dev = DDI_DEV_T_ANY;
2291 	}
2292 
2293 	if (dip != NULL) {
2294 		int *prop_val, prop_len;
2295 
2296 		res = i_ldi_prop_op_typed(dev, dip, flags, name,
2297 		    (caddr_t *)&prop_val, &prop_len, sizeof (int));
2298 
2299 		/* if we got it then return it */
2300 		if (res == DDI_PROP_SUCCESS) {
2301 			*nelements = prop_len / sizeof (int);
2302 			*data = prop_val;
2303 
2304 			ddi_release_devi(dip);
2305 			return (res);
2306 		}
2307 	}
2308 
2309 	/* call the normal property interfaces */
2310 	res = ddi_prop_lookup_int_array(dev, dip, flags,
2311 	    name, data, nelements);
2312 
2313 	if (dip != NULL)
2314 		ddi_release_devi(dip);
2315 
2316 	return (res);
2317 }
2318 
2319 int
2320 ldi_prop_lookup_int64_array(ldi_handle_t lh,
2321     uint_t flags, char *name, int64_t **data, uint_t *nelements)
2322 {
2323 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2324 	dev_info_t		*dip;
2325 	dev_t			dev;
2326 	int			res;
2327 	struct snode		*csp;
2328 
2329 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2330 		return (DDI_PROP_INVAL_ARG);
2331 
2332 	dev = handlep->lh_vp->v_rdev;
2333 
2334 	csp = VTOCS(handlep->lh_vp);
2335 	mutex_enter(&csp->s_lock);
2336 	if ((dip = csp->s_dip) != NULL)
2337 		e_ddi_hold_devi(dip);
2338 	mutex_exit(&csp->s_lock);
2339 	if (dip == NULL)
2340 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2341 
2342 	if (dip == NULL) {
2343 		flags |= DDI_UNBND_DLPI2;
2344 	} else if (flags & LDI_DEV_T_ANY) {
2345 		flags &= ~LDI_DEV_T_ANY;
2346 		dev = DDI_DEV_T_ANY;
2347 	}
2348 
2349 	if (dip != NULL) {
2350 		int64_t	*prop_val;
2351 		int	prop_len;
2352 
2353 		res = i_ldi_prop_op_typed(dev, dip, flags, name,
2354 		    (caddr_t *)&prop_val, &prop_len, sizeof (int64_t));
2355 
2356 		/* if we got it then return it */
2357 		if (res == DDI_PROP_SUCCESS) {
2358 			*nelements = prop_len / sizeof (int64_t);
2359 			*data = prop_val;
2360 
2361 			ddi_release_devi(dip);
2362 			return (res);
2363 		}
2364 	}
2365 
2366 	/* call the normal property interfaces */
2367 	res = ddi_prop_lookup_int64_array(dev, dip, flags,
2368 	    name, data, nelements);
2369 
2370 	if (dip != NULL)
2371 		ddi_release_devi(dip);
2372 
2373 	return (res);
2374 }
2375 
2376 int
2377 ldi_prop_lookup_string_array(ldi_handle_t lh,
2378     uint_t flags, char *name, char ***data, uint_t *nelements)
2379 {
2380 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2381 	dev_info_t		*dip;
2382 	dev_t			dev;
2383 	int			res;
2384 	struct snode		*csp;
2385 
2386 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2387 		return (DDI_PROP_INVAL_ARG);
2388 
2389 	dev = handlep->lh_vp->v_rdev;
2390 
2391 	csp = VTOCS(handlep->lh_vp);
2392 	mutex_enter(&csp->s_lock);
2393 	if ((dip = csp->s_dip) != NULL)
2394 		e_ddi_hold_devi(dip);
2395 	mutex_exit(&csp->s_lock);
2396 	if (dip == NULL)
2397 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2398 
2399 	if (dip == NULL) {
2400 		flags |= DDI_UNBND_DLPI2;
2401 	} else if (flags & LDI_DEV_T_ANY) {
2402 		flags &= ~LDI_DEV_T_ANY;
2403 		dev = DDI_DEV_T_ANY;
2404 	}
2405 
2406 	if (dip != NULL) {
2407 		char	*prop_val;
2408 		int	prop_len;
2409 
2410 		res = i_ldi_prop_op_typed(dev, dip, flags, name,
2411 		    (caddr_t *)&prop_val, &prop_len, 0);
2412 
2413 		/* if we got it then return it */
2414 		if (res == DDI_PROP_SUCCESS) {
2415 			char	**str_array;
2416 			int	nelem;
2417 
2418 			/*
2419 			 * pack the returned string array into the format
2420 			 * our callers expect
2421 			 */
2422 			if (i_pack_string_array(prop_val, prop_len,
2423 			    &str_array, &nelem) == 0) {
2424 
2425 				*data = str_array;
2426 				*nelements = nelem;
2427 
2428 				ddi_prop_free(prop_val);
2429 				ddi_release_devi(dip);
2430 				return (res);
2431 			}
2432 
2433 			/*
2434 			 * the format of the returned property must have
2435 			 * been bad so throw it out
2436 			 */
2437 			ddi_prop_free(prop_val);
2438 		}
2439 	}
2440 
2441 	/* call the normal property interfaces */
2442 	res = ddi_prop_lookup_string_array(dev, dip, flags,
2443 	    name, data, nelements);
2444 
2445 	if (dip != NULL)
2446 		ddi_release_devi(dip);
2447 
2448 	return (res);
2449 }
2450 
2451 int
2452 ldi_prop_lookup_string(ldi_handle_t lh,
2453     uint_t flags, char *name, char **data)
2454 {
2455 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2456 	dev_info_t		*dip;
2457 	dev_t			dev;
2458 	int			res;
2459 	struct snode		*csp;
2460 
2461 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2462 		return (DDI_PROP_INVAL_ARG);
2463 
2464 	dev = handlep->lh_vp->v_rdev;
2465 
2466 	csp = VTOCS(handlep->lh_vp);
2467 	mutex_enter(&csp->s_lock);
2468 	if ((dip = csp->s_dip) != NULL)
2469 		e_ddi_hold_devi(dip);
2470 	mutex_exit(&csp->s_lock);
2471 	if (dip == NULL)
2472 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2473 
2474 	if (dip == NULL) {
2475 		flags |= DDI_UNBND_DLPI2;
2476 	} else if (flags & LDI_DEV_T_ANY) {
2477 		flags &= ~LDI_DEV_T_ANY;
2478 		dev = DDI_DEV_T_ANY;
2479 	}
2480 
2481 	if (dip != NULL) {
2482 		char	*prop_val;
2483 		int	prop_len;
2484 
2485 		res = i_ldi_prop_op_typed(dev, dip, flags, name,
2486 		    (caddr_t *)&prop_val, &prop_len, 0);
2487 
2488 		/* if we got it then return it */
2489 		if (res == DDI_PROP_SUCCESS) {
2490 			/*
2491 			 * sanity check the vaule returned.
2492 			 */
2493 			if (i_check_string(prop_val, prop_len)) {
2494 				ddi_prop_free(prop_val);
2495 			} else {
2496 				*data = prop_val;
2497 				ddi_release_devi(dip);
2498 				return (res);
2499 			}
2500 		}
2501 	}
2502 
2503 	/* call the normal property interfaces */
2504 	res = ddi_prop_lookup_string(dev, dip, flags, name, data);
2505 
2506 	if (dip != NULL)
2507 		ddi_release_devi(dip);
2508 
2509 #ifdef DEBUG
2510 	if (res == DDI_PROP_SUCCESS) {
2511 		/*
2512 		 * keep ourselves honest
2513 		 * make sure the framework returns strings in the
2514 		 * same format as we're demanding from drivers.
2515 		 */
2516 		struct prop_driver_data	*pdd;
2517 		int			pdd_prop_size;
2518 
2519 		pdd = ((struct prop_driver_data *)(*data)) - 1;
2520 		pdd_prop_size = pdd->pdd_size -
2521 		    sizeof (struct prop_driver_data);
2522 		ASSERT(i_check_string(*data, pdd_prop_size) == 0);
2523 	}
2524 #endif /* DEBUG */
2525 
2526 	return (res);
2527 }
2528 
2529 int
2530 ldi_prop_lookup_byte_array(ldi_handle_t lh,
2531     uint_t flags, char *name, uchar_t **data, uint_t *nelements)
2532 {
2533 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2534 	dev_info_t		*dip;
2535 	dev_t			dev;
2536 	int			res;
2537 	struct snode		*csp;
2538 
2539 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2540 		return (DDI_PROP_INVAL_ARG);
2541 
2542 	dev = handlep->lh_vp->v_rdev;
2543 
2544 	csp = VTOCS(handlep->lh_vp);
2545 	mutex_enter(&csp->s_lock);
2546 	if ((dip = csp->s_dip) != NULL)
2547 		e_ddi_hold_devi(dip);
2548 	mutex_exit(&csp->s_lock);
2549 	if (dip == NULL)
2550 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2551 
2552 	if (dip == NULL) {
2553 		flags |= DDI_UNBND_DLPI2;
2554 	} else if (flags & LDI_DEV_T_ANY) {
2555 		flags &= ~LDI_DEV_T_ANY;
2556 		dev = DDI_DEV_T_ANY;
2557 	}
2558 
2559 	if (dip != NULL) {
2560 		uchar_t	*prop_val;
2561 		int	prop_len;
2562 
2563 		res = i_ldi_prop_op_typed(dev, dip, flags, name,
2564 		    (caddr_t *)&prop_val, &prop_len, sizeof (uchar_t));
2565 
2566 		/* if we got it then return it */
2567 		if (res == DDI_PROP_SUCCESS) {
2568 			*nelements = prop_len / sizeof (uchar_t);
2569 			*data = prop_val;
2570 
2571 			ddi_release_devi(dip);
2572 			return (res);
2573 		}
2574 	}
2575 
2576 	/* call the normal property interfaces */
2577 	res = ddi_prop_lookup_byte_array(dev, dip, flags,
2578 	    name, data, nelements);
2579 
2580 	if (dip != NULL)
2581 		ddi_release_devi(dip);
2582 
2583 	return (res);
2584 }
2585 
2586 int
2587 ldi_prop_get_int(ldi_handle_t lh,
2588     uint_t flags, char *name, int defvalue)
2589 {
2590 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2591 	dev_info_t		*dip;
2592 	dev_t			dev;
2593 	int			res;
2594 	struct snode		*csp;
2595 
2596 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2597 		return (defvalue);
2598 
2599 	dev = handlep->lh_vp->v_rdev;
2600 
2601 	csp = VTOCS(handlep->lh_vp);
2602 	mutex_enter(&csp->s_lock);
2603 	if ((dip = csp->s_dip) != NULL)
2604 		e_ddi_hold_devi(dip);
2605 	mutex_exit(&csp->s_lock);
2606 	if (dip == NULL)
2607 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2608 
2609 	if (dip == NULL) {
2610 		flags |= DDI_UNBND_DLPI2;
2611 	} else if (flags & LDI_DEV_T_ANY) {
2612 		flags &= ~LDI_DEV_T_ANY;
2613 		dev = DDI_DEV_T_ANY;
2614 	}
2615 
2616 	if (dip != NULL) {
2617 		int	prop_val;
2618 		int	prop_len;
2619 
2620 		/*
2621 		 * first call the drivers prop_op interface to allow it
2622 		 * it to override default property values.
2623 		 */
2624 		prop_len = sizeof (int);
2625 		res = i_ldi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
2626 		    flags | DDI_PROP_DYNAMIC, name,
2627 		    (caddr_t)&prop_val, &prop_len);
2628 
2629 		/* if we got it then return it */
2630 		if ((res == DDI_PROP_SUCCESS) &&
2631 		    (prop_len == sizeof (int))) {
2632 			res = prop_val;
2633 			ddi_release_devi(dip);
2634 			return (res);
2635 		}
2636 	}
2637 
2638 	/* call the normal property interfaces */
2639 	res = ddi_prop_get_int(dev, dip, flags, name, defvalue);
2640 
2641 	if (dip != NULL)
2642 		ddi_release_devi(dip);
2643 
2644 	return (res);
2645 }
2646 
2647 int64_t
2648 ldi_prop_get_int64(ldi_handle_t lh,
2649     uint_t flags, char *name, int64_t defvalue)
2650 {
2651 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2652 	dev_info_t		*dip;
2653 	dev_t			dev;
2654 	int64_t			res;
2655 	struct snode		*csp;
2656 
2657 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2658 		return (defvalue);
2659 
2660 	dev = handlep->lh_vp->v_rdev;
2661 
2662 	csp = VTOCS(handlep->lh_vp);
2663 	mutex_enter(&csp->s_lock);
2664 	if ((dip = csp->s_dip) != NULL)
2665 		e_ddi_hold_devi(dip);
2666 	mutex_exit(&csp->s_lock);
2667 	if (dip == NULL)
2668 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2669 
2670 	if (dip == NULL) {
2671 		flags |= DDI_UNBND_DLPI2;
2672 	} else if (flags & LDI_DEV_T_ANY) {
2673 		flags &= ~LDI_DEV_T_ANY;
2674 		dev = DDI_DEV_T_ANY;
2675 	}
2676 
2677 	if (dip != NULL) {
2678 		int64_t	prop_val;
2679 		int	prop_len;
2680 
2681 		/*
2682 		 * first call the drivers prop_op interface to allow it
2683 		 * it to override default property values.
2684 		 */
2685 		prop_len = sizeof (int64_t);
2686 		res = i_ldi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
2687 		    flags | DDI_PROP_DYNAMIC, name,
2688 		    (caddr_t)&prop_val, &prop_len);
2689 
2690 		/* if we got it then return it */
2691 		if ((res == DDI_PROP_SUCCESS) &&
2692 		    (prop_len == sizeof (int64_t))) {
2693 			res = prop_val;
2694 			ddi_release_devi(dip);
2695 			return (res);
2696 		}
2697 	}
2698 
2699 	/* call the normal property interfaces */
2700 	res = ddi_prop_get_int64(dev, dip, flags, name, defvalue);
2701 
2702 	if (dip != NULL)
2703 		ddi_release_devi(dip);
2704 
2705 	return (res);
2706 }
2707 
2708 int
2709 ldi_prop_exists(ldi_handle_t lh, uint_t flags, char *name)
2710 {
2711 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2712 	dev_info_t		*dip;
2713 	dev_t			dev;
2714 	int			res, prop_len;
2715 	struct snode		*csp;
2716 
2717 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2718 		return (0);
2719 
2720 	dev = handlep->lh_vp->v_rdev;
2721 
2722 	csp = VTOCS(handlep->lh_vp);
2723 	mutex_enter(&csp->s_lock);
2724 	if ((dip = csp->s_dip) != NULL)
2725 		e_ddi_hold_devi(dip);
2726 	mutex_exit(&csp->s_lock);
2727 	if (dip == NULL)
2728 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2729 
2730 	/* if NULL dip, prop does NOT exist */
2731 	if (dip == NULL)
2732 		return (0);
2733 
2734 	if (flags & LDI_DEV_T_ANY) {
2735 		flags &= ~LDI_DEV_T_ANY;
2736 		dev = DDI_DEV_T_ANY;
2737 	}
2738 
2739 	/*
2740 	 * first call the drivers prop_op interface to allow it
2741 	 * it to override default property values.
2742 	 */
2743 	res = i_ldi_prop_op(dev, dip, PROP_LEN,
2744 	    flags | DDI_PROP_DYNAMIC, name, NULL, &prop_len);
2745 
2746 	if (res == DDI_PROP_SUCCESS) {
2747 		ddi_release_devi(dip);
2748 		return (1);
2749 	}
2750 
2751 	/* call the normal property interfaces */
2752 	res = ddi_prop_exists(dev, dip, flags, name);
2753 
2754 	ddi_release_devi(dip);
2755 	return (res);
2756 }
2757 
2758 #ifdef	LDI_OBSOLETE_EVENT
2759 
2760 int
2761 ldi_get_eventcookie(ldi_handle_t lh, char *name, ddi_eventcookie_t *ecp)
2762 {
2763 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2764 	dev_info_t		*dip;
2765 	dev_t			dev;
2766 	int			res;
2767 	struct snode		*csp;
2768 
2769 	if ((lh == NULL) || (name == NULL) ||
2770 	    (strlen(name) == 0) || (ecp == NULL)) {
2771 		return (DDI_FAILURE);
2772 	}
2773 
2774 	ASSERT(!servicing_interrupt());
2775 
2776 	dev = handlep->lh_vp->v_rdev;
2777 
2778 	csp = VTOCS(handlep->lh_vp);
2779 	mutex_enter(&csp->s_lock);
2780 	if ((dip = csp->s_dip) != NULL)
2781 		e_ddi_hold_devi(dip);
2782 	mutex_exit(&csp->s_lock);
2783 	if (dip == NULL)
2784 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2785 
2786 	if (dip == NULL)
2787 		return (DDI_FAILURE);
2788 
2789 	LDI_EVENTCB((CE_NOTE, "%s: event_name=%s, "
2790 	    "dip=0x%p, event_cookiep=0x%p", "ldi_get_eventcookie",
2791 	    name, (void *)dip, (void *)ecp));
2792 
2793 	res = ddi_get_eventcookie(dip, name, ecp);
2794 
2795 	ddi_release_devi(dip);
2796 	return (res);
2797 }
2798 
2799 int
2800 ldi_add_event_handler(ldi_handle_t lh, ddi_eventcookie_t ec,
2801     void (*handler)(ldi_handle_t, ddi_eventcookie_t, void *, void *),
2802     void *arg, ldi_callback_id_t *id)
2803 {
2804 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2805 	struct ldi_event	*lep;
2806 	dev_info_t		*dip;
2807 	dev_t			dev;
2808 	int			res;
2809 	struct snode		*csp;
2810 
2811 	if ((lh == NULL) || (ec == NULL) || (handler == NULL) || (id == NULL))
2812 		return (DDI_FAILURE);
2813 
2814 	ASSERT(!servicing_interrupt());
2815 
2816 	dev = handlep->lh_vp->v_rdev;
2817 
2818 	csp = VTOCS(handlep->lh_vp);
2819 	mutex_enter(&csp->s_lock);
2820 	if ((dip = csp->s_dip) != NULL)
2821 		e_ddi_hold_devi(dip);
2822 	mutex_exit(&csp->s_lock);
2823 	if (dip == NULL)
2824 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2825 
2826 	if (dip == NULL)
2827 		return (DDI_FAILURE);
2828 
2829 	lep = kmem_zalloc(sizeof (struct ldi_event), KM_SLEEP);
2830 	lep->le_lhp = handlep;
2831 	lep->le_arg = arg;
2832 	lep->le_handler = handler;
2833 
2834 	if ((res = ddi_add_event_handler(dip, ec, i_ldi_callback,
2835 	    (void *)lep, &lep->le_id)) != DDI_SUCCESS) {
2836 		LDI_EVENTCB((CE_WARN, "%s: unable to add"
2837 		    "event callback", "ldi_add_event_handler"));
2838 		ddi_release_devi(dip);
2839 		kmem_free(lep, sizeof (struct ldi_event));
2840 		return (res);
2841 	}
2842 
2843 	*id = (ldi_callback_id_t)lep;
2844 
2845 	LDI_EVENTCB((CE_NOTE, "%s: dip=0x%p, event=0x%p, "
2846 	    "ldi_eventp=0x%p, cb_id=0x%p", "ldi_add_event_handler",
2847 	    (void *)dip, (void *)ec, (void *)lep, (void *)id));
2848 
2849 	handle_event_add(lep);
2850 	ddi_release_devi(dip);
2851 	return (res);
2852 }
2853 
2854 int
2855 ldi_remove_event_handler(ldi_handle_t lh, ldi_callback_id_t id)
2856 {
2857 	ldi_event_t		*lep = (ldi_event_t *)id;
2858 	int			res;
2859 
2860 	if ((lh == NULL) || (id == NULL))
2861 		return (DDI_FAILURE);
2862 
2863 	ASSERT(!servicing_interrupt());
2864 
2865 	if ((res = ddi_remove_event_handler(lep->le_id))
2866 	    != DDI_SUCCESS) {
2867 		LDI_EVENTCB((CE_WARN, "%s: unable to remove "
2868 		    "event callback", "ldi_remove_event_handler"));
2869 		return (res);
2870 	}
2871 
2872 	handle_event_remove(lep);
2873 	kmem_free(lep, sizeof (struct ldi_event));
2874 	return (res);
2875 }
2876 
2877 #endif
2878 
2879 /*
2880  * Here are some definitions of terms used in the following LDI events
2881  * code:
2882  *
2883  * "LDI events" AKA "native events": These are events defined by the
2884  * "new" LDI event framework. These events are serviced by the LDI event
2885  * framework itself and thus are native to it.
2886  *
2887  * "LDI contract events": These are contract events that correspond to the
2888  *  LDI events. This mapping of LDI events to contract events is defined by
2889  * the ldi_ev_cookies[] array above.
2890  *
2891  * NDI events: These are events which are serviced by the NDI event subsystem.
2892  * LDI subsystem just provides a thin wrapper around the NDI event interfaces
2893  * These events are therefore *not* native events.
2894  */
2895 
2896 static int
2897 ldi_native_event(const char *evname)
2898 {
2899 	int i;
2900 
2901 	LDI_EVTRC((CE_NOTE, "ldi_native_event: entered: ev=%s", evname));
2902 
2903 	for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
2904 		if (strcmp(ldi_ev_cookies[i].ck_evname, evname) == 0)
2905 			return (1);
2906 	}
2907 
2908 	return (0);
2909 }
2910 
2911 static uint_t
2912 ldi_ev_sync_event(const char *evname)
2913 {
2914 	int i;
2915 
2916 	ASSERT(ldi_native_event(evname));
2917 
2918 	LDI_EVTRC((CE_NOTE, "ldi_ev_sync_event: entered: %s", evname));
2919 
2920 	for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
2921 		if (strcmp(ldi_ev_cookies[i].ck_evname, evname) == 0)
2922 			return (ldi_ev_cookies[i].ck_sync);
2923 	}
2924 
2925 	/*
2926 	 * This should never happen until non-contract based
2927 	 * LDI events are introduced. If that happens, we will
2928 	 * use a "special" token to indicate that there are no
2929 	 * contracts corresponding to this LDI event.
2930 	 */
2931 	cmn_err(CE_PANIC, "Unknown LDI event: %s", evname);
2932 
2933 	return (0);
2934 }
2935 
2936 static uint_t
2937 ldi_contract_event(const char *evname)
2938 {
2939 	int i;
2940 
2941 	ASSERT(ldi_native_event(evname));
2942 
2943 	LDI_EVTRC((CE_NOTE, "ldi_contract_event: entered: %s", evname));
2944 
2945 	for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
2946 		if (strcmp(ldi_ev_cookies[i].ck_evname, evname) == 0)
2947 			return (ldi_ev_cookies[i].ck_ctype);
2948 	}
2949 
2950 	/*
2951 	 * This should never happen until non-contract based
2952 	 * LDI events are introduced. If that happens, we will
2953 	 * use a "special" token to indicate that there are no
2954 	 * contracts corresponding to this LDI event.
2955 	 */
2956 	cmn_err(CE_PANIC, "Unknown LDI event: %s", evname);
2957 
2958 	return (0);
2959 }
2960 
2961 char *
2962 ldi_ev_get_type(ldi_ev_cookie_t cookie)
2963 {
2964 	int i;
2965 	struct ldi_ev_cookie *cookie_impl = (struct ldi_ev_cookie *)cookie;
2966 
2967 	for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
2968 		if (&ldi_ev_cookies[i] == cookie_impl) {
2969 			LDI_EVTRC((CE_NOTE, "ldi_ev_get_type: LDI: %s",
2970 			    ldi_ev_cookies[i].ck_evname));
2971 			return (ldi_ev_cookies[i].ck_evname);
2972 		}
2973 	}
2974 
2975 	/*
2976 	 * Not an LDI native event. Must be NDI event service.
2977 	 * Just return a generic string
2978 	 */
2979 	LDI_EVTRC((CE_NOTE, "ldi_ev_get_type: is NDI"));
2980 	return (NDI_EVENT_SERVICE);
2981 }
2982 
2983 static int
2984 ldi_native_cookie(ldi_ev_cookie_t cookie)
2985 {
2986 	int i;
2987 	struct ldi_ev_cookie *cookie_impl = (struct ldi_ev_cookie *)cookie;
2988 
2989 	for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
2990 		if (&ldi_ev_cookies[i] == cookie_impl) {
2991 			LDI_EVTRC((CE_NOTE, "ldi_native_cookie: native LDI"));
2992 			return (1);
2993 		}
2994 	}
2995 
2996 	LDI_EVTRC((CE_NOTE, "ldi_native_cookie: is NDI"));
2997 	return (0);
2998 }
2999 
3000 static ldi_ev_cookie_t
3001 ldi_get_native_cookie(const char *evname)
3002 {
3003 	int i;
3004 
3005 	for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
3006 		if (strcmp(ldi_ev_cookies[i].ck_evname, evname) == 0) {
3007 			LDI_EVTRC((CE_NOTE, "ldi_get_native_cookie: found"));
3008 			return ((ldi_ev_cookie_t)&ldi_ev_cookies[i]);
3009 		}
3010 	}
3011 
3012 	LDI_EVTRC((CE_NOTE, "ldi_get_native_cookie: NOT found"));
3013 	return (NULL);
3014 }
3015 
3016 /*
3017  * ldi_ev_lock() needs to be recursive, since layered drivers may call
3018  * other LDI interfaces (such as ldi_close() from within the context of
3019  * a notify callback. Since the notify callback is called with the
3020  * ldi_ev_lock() held and ldi_close() also grabs ldi_ev_lock, the lock needs
3021  * to be recursive.
3022  */
3023 static void
3024 ldi_ev_lock(void)
3025 {
3026 	LDI_EVTRC((CE_NOTE, "ldi_ev_lock: entered"));
3027 
3028 	mutex_enter(&ldi_ev_callback_list.le_lock);
3029 	if (ldi_ev_callback_list.le_thread == curthread) {
3030 		ASSERT(ldi_ev_callback_list.le_busy >= 1);
3031 		ldi_ev_callback_list.le_busy++;
3032 	} else {
3033 		while (ldi_ev_callback_list.le_busy)
3034 			cv_wait(&ldi_ev_callback_list.le_cv,
3035 			    &ldi_ev_callback_list.le_lock);
3036 		ASSERT(ldi_ev_callback_list.le_thread == NULL);
3037 		ldi_ev_callback_list.le_busy = 1;
3038 		ldi_ev_callback_list.le_thread = curthread;
3039 	}
3040 	mutex_exit(&ldi_ev_callback_list.le_lock);
3041 
3042 	LDI_EVTRC((CE_NOTE, "ldi_ev_lock: exit"));
3043 }
3044 
3045 static void
3046 ldi_ev_unlock(void)
3047 {
3048 	LDI_EVTRC((CE_NOTE, "ldi_ev_unlock: entered"));
3049 	mutex_enter(&ldi_ev_callback_list.le_lock);
3050 	ASSERT(ldi_ev_callback_list.le_thread == curthread);
3051 	ASSERT(ldi_ev_callback_list.le_busy >= 1);
3052 
3053 	ldi_ev_callback_list.le_busy--;
3054 	if (ldi_ev_callback_list.le_busy == 0) {
3055 		ldi_ev_callback_list.le_thread = NULL;
3056 		cv_signal(&ldi_ev_callback_list.le_cv);
3057 	}
3058 	mutex_exit(&ldi_ev_callback_list.le_lock);
3059 	LDI_EVTRC((CE_NOTE, "ldi_ev_unlock: exit"));
3060 }
3061 
3062 int
3063 ldi_ev_get_cookie(ldi_handle_t lh, char *evname, ldi_ev_cookie_t *cookiep)
3064 {
3065 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
3066 	dev_info_t		*dip;
3067 	dev_t			dev;
3068 	int			res;
3069 	struct snode		*csp;
3070 	ddi_eventcookie_t	ddi_cookie;
3071 	ldi_ev_cookie_t		tcookie;
3072 
3073 	LDI_EVDBG((CE_NOTE, "ldi_ev_get_cookie: entered: evname=%s",
3074 	    evname ? evname : "<NULL>"));
3075 
3076 	if (lh == NULL || evname == NULL ||
3077 	    strlen(evname) == 0 || cookiep == NULL) {
3078 		LDI_EVDBG((CE_NOTE, "ldi_ev_get_cookie: invalid args"));
3079 		return (LDI_EV_FAILURE);
3080 	}
3081 
3082 	*cookiep = NULL;
3083 
3084 	/*
3085 	 * First check if it is a LDI native event
3086 	 */
3087 	tcookie = ldi_get_native_cookie(evname);
3088 	if (tcookie) {
3089 		LDI_EVDBG((CE_NOTE, "ldi_ev_get_cookie: got native cookie"));
3090 		*cookiep = tcookie;
3091 		return (LDI_EV_SUCCESS);
3092 	}
3093 
3094 	/*
3095 	 * Not a LDI native event. Try NDI event services
3096 	 */
3097 
3098 	dev = handlep->lh_vp->v_rdev;
3099 
3100 	csp = VTOCS(handlep->lh_vp);
3101 	mutex_enter(&csp->s_lock);
3102 	if ((dip = csp->s_dip) != NULL)
3103 		e_ddi_hold_devi(dip);
3104 	mutex_exit(&csp->s_lock);
3105 	if (dip == NULL)
3106 		dip = e_ddi_hold_devi_by_dev(dev, 0);
3107 
3108 	if (dip == NULL) {
3109 		cmn_err(CE_WARN, "ldi_ev_get_cookie: No devinfo node for LDI "
3110 		    "handle: %p", (void *)handlep);
3111 		return (LDI_EV_FAILURE);
3112 	}
3113 
3114 	LDI_EVDBG((CE_NOTE, "Calling ddi_get_eventcookie: dip=%p, ev=%s",
3115 	    (void *)dip, evname));
3116 
3117 	res = ddi_get_eventcookie(dip, evname, &ddi_cookie);
3118 
3119 	ddi_release_devi(dip);
3120 
3121 	if (res == DDI_SUCCESS) {
3122 		LDI_EVDBG((CE_NOTE, "ldi_ev_get_cookie: NDI cookie found"));
3123 		*cookiep = (ldi_ev_cookie_t)ddi_cookie;
3124 		return (LDI_EV_SUCCESS);
3125 	} else {
3126 		LDI_EVDBG((CE_WARN, "ldi_ev_get_cookie: NDI cookie: failed"));
3127 		return (LDI_EV_FAILURE);
3128 	}
3129 }
3130 
3131 /*ARGSUSED*/
3132 static void
3133 i_ldi_ev_callback(dev_info_t *dip, ddi_eventcookie_t event_cookie,
3134     void *arg, void *ev_data)
3135 {
3136 	ldi_ev_callback_impl_t *lecp = (ldi_ev_callback_impl_t *)arg;
3137 
3138 	ASSERT(lecp != NULL);
3139 	ASSERT(!ldi_native_cookie(lecp->lec_cookie));
3140 	ASSERT(lecp->lec_lhp);
3141 	ASSERT(lecp->lec_notify == NULL);
3142 	ASSERT(lecp->lec_finalize);
3143 
3144 	LDI_EVDBG((CE_NOTE, "i_ldi_ev_callback: ldh=%p, cookie=%p, arg=%p, "
3145 	    "ev_data=%p", (void *)lecp->lec_lhp, (void *)event_cookie,
3146 	    (void *)lecp->lec_arg, (void *)ev_data));
3147 
3148 	lecp->lec_finalize(lecp->lec_lhp, (ldi_ev_cookie_t)event_cookie,
3149 	    lecp->lec_arg, ev_data);
3150 }
3151 
3152 int
3153 ldi_ev_register_callbacks(ldi_handle_t lh, ldi_ev_cookie_t cookie,
3154     ldi_ev_callback_t *callb, void *arg, ldi_callback_id_t *id)
3155 {
3156 	struct ldi_handle	*lhp = (struct ldi_handle *)lh;
3157 	ldi_ev_callback_impl_t	*lecp;
3158 	dev_t			dev;
3159 	struct snode		*csp;
3160 	dev_info_t		*dip;
3161 	int			ddi_event;
3162 
3163 	ASSERT(!servicing_interrupt());
3164 
3165 	if (lh == NULL || cookie == NULL || callb == NULL || id == NULL) {
3166 		LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks: Invalid args"));
3167 		return (LDI_EV_FAILURE);
3168 	}
3169 
3170 	if (callb->cb_vers != LDI_EV_CB_VERS) {
3171 		LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks: Invalid vers"));
3172 		return (LDI_EV_FAILURE);
3173 	}
3174 
3175 	if (callb->cb_notify == NULL && callb->cb_finalize == NULL) {
3176 		LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks: NULL callb"));
3177 		return (LDI_EV_FAILURE);
3178 	}
3179 
3180 	*id = 0;
3181 
3182 	dev = lhp->lh_vp->v_rdev;
3183 	csp = VTOCS(lhp->lh_vp);
3184 	mutex_enter(&csp->s_lock);
3185 	if ((dip = csp->s_dip) != NULL)
3186 		e_ddi_hold_devi(dip);
3187 	mutex_exit(&csp->s_lock);
3188 	if (dip == NULL)
3189 		dip = e_ddi_hold_devi_by_dev(dev, 0);
3190 
3191 	if (dip == NULL) {
3192 		cmn_err(CE_WARN, "ldi_ev_register: No devinfo node for "
3193 		    "LDI handle: %p", (void *)lhp);
3194 		return (LDI_EV_FAILURE);
3195 	}
3196 
3197 	lecp = kmem_zalloc(sizeof (ldi_ev_callback_impl_t), KM_SLEEP);
3198 
3199 	ddi_event = 0;
3200 	if (!ldi_native_cookie(cookie)) {
3201 		if (callb->cb_notify || callb->cb_finalize == NULL) {
3202 			/*
3203 			 * NDI event services only accept finalize
3204 			 */
3205 			cmn_err(CE_WARN, "%s: module: %s: NDI event cookie. "
3206 			    "Only finalize"
3207 			    " callback supported with this cookie",
3208 			    "ldi_ev_register_callbacks",
3209 			    lhp->lh_ident->li_modname);
3210 			kmem_free(lecp, sizeof (ldi_ev_callback_impl_t));
3211 			ddi_release_devi(dip);
3212 			return (LDI_EV_FAILURE);
3213 		}
3214 
3215 		if (ddi_add_event_handler(dip, (ddi_eventcookie_t)cookie,
3216 		    i_ldi_ev_callback, (void *)lecp,
3217 		    (ddi_callback_id_t *)&lecp->lec_id)
3218 		    != DDI_SUCCESS) {
3219 			kmem_free(lecp, sizeof (ldi_ev_callback_impl_t));
3220 			ddi_release_devi(dip);
3221 			LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks(): "
3222 			    "ddi_add_event_handler failed"));
3223 			return (LDI_EV_FAILURE);
3224 		}
3225 		ddi_event = 1;
3226 		LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks(): "
3227 		    "ddi_add_event_handler success"));
3228 	}
3229 
3230 
3231 
3232 	ldi_ev_lock();
3233 
3234 	/*
3235 	 * Add the notify/finalize callback to the LDI's list of callbacks.
3236 	 */
3237 	lecp->lec_lhp = lhp;
3238 	lecp->lec_dev = lhp->lh_vp->v_rdev;
3239 	lecp->lec_spec = (lhp->lh_vp->v_type == VCHR) ?
3240 	    S_IFCHR : S_IFBLK;
3241 	lecp->lec_notify = callb->cb_notify;
3242 	lecp->lec_finalize = callb->cb_finalize;
3243 	lecp->lec_arg = arg;
3244 	lecp->lec_cookie = cookie;
3245 	if (!ddi_event)
3246 		lecp->lec_id = (void *)(uintptr_t)(++ldi_ev_id_pool);
3247 	else
3248 		ASSERT(lecp->lec_id);
3249 	lecp->lec_dip = dip;
3250 	list_insert_tail(&ldi_ev_callback_list.le_head, lecp);
3251 
3252 	*id = (ldi_callback_id_t)lecp->lec_id;
3253 
3254 	ldi_ev_unlock();
3255 
3256 	ddi_release_devi(dip);
3257 
3258 	LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks: registered "
3259 	    "notify/finalize"));
3260 
3261 	return (LDI_EV_SUCCESS);
3262 }
3263 
3264 static int
3265 ldi_ev_device_match(ldi_ev_callback_impl_t *lecp, dev_info_t *dip,
3266     dev_t dev, int spec_type)
3267 {
3268 	ASSERT(lecp);
3269 	ASSERT(dip);
3270 	ASSERT(dev != DDI_DEV_T_NONE);
3271 	ASSERT(dev != NODEV);
3272 	ASSERT((dev == DDI_DEV_T_ANY && spec_type == 0) ||
3273 	    (spec_type == S_IFCHR || spec_type == S_IFBLK));
3274 	ASSERT(lecp->lec_dip);
3275 	ASSERT(lecp->lec_spec == S_IFCHR || lecp->lec_spec == S_IFBLK);
3276 	ASSERT(lecp->lec_dev != DDI_DEV_T_ANY);
3277 	ASSERT(lecp->lec_dev != DDI_DEV_T_NONE);
3278 	ASSERT(lecp->lec_dev != NODEV);
3279 
3280 	if (dip != lecp->lec_dip)
3281 		return (0);
3282 
3283 	if (dev != DDI_DEV_T_ANY) {
3284 		if (dev != lecp->lec_dev || spec_type != lecp->lec_spec)
3285 			return (0);
3286 	}
3287 
3288 	LDI_EVTRC((CE_NOTE, "ldi_ev_device_match: MATCH dip=%p", (void *)dip));
3289 
3290 	return (1);
3291 }
3292 
3293 /*
3294  * LDI framework function to post a "notify" event to all layered drivers
3295  * that have registered for that event
3296  *
3297  * Returns:
3298  *		LDI_EV_SUCCESS - registered callbacks allow event
3299  *		LDI_EV_FAILURE - registered callbacks block event
3300  *		LDI_EV_NONE    - No matching LDI callbacks
3301  *
3302  * This function is *not* to be called by layered drivers. It is for I/O
3303  * framework code in Solaris, such as the I/O retire code and DR code
3304  * to call while servicing a device event such as offline or degraded.
3305  */
3306 int
3307 ldi_invoke_notify(dev_info_t *dip, dev_t dev, int spec_type, char *event,
3308     void *ev_data)
3309 {
3310 	ldi_ev_callback_impl_t *lecp;
3311 	list_t	*listp;
3312 	int	ret;
3313 	char	*lec_event;
3314 
3315 	ASSERT(dip);
3316 	ASSERT(dev != DDI_DEV_T_NONE);
3317 	ASSERT(dev != NODEV);
3318 	ASSERT((dev == DDI_DEV_T_ANY && spec_type == 0) ||
3319 	    (spec_type == S_IFCHR || spec_type == S_IFBLK));
3320 	ASSERT(event);
3321 	ASSERT(ldi_native_event(event));
3322 	ASSERT(ldi_ev_sync_event(event));
3323 
3324 	LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): entered: dip=%p, ev=%s",
3325 	    (void *)dip, event));
3326 
3327 	ret = LDI_EV_NONE;
3328 	ldi_ev_lock();
3329 	listp = &ldi_ev_callback_list.le_head;
3330 	for (lecp = list_head(listp); lecp; lecp = list_next(listp, lecp)) {
3331 
3332 		/* Check if matching device */
3333 		if (!ldi_ev_device_match(lecp, dip, dev, spec_type))
3334 			continue;
3335 
3336 		if (lecp->lec_lhp == NULL) {
3337 			/*
3338 			 * Consumer has unregistered the handle and so
3339 			 * is no longer interested in notify events.
3340 			 */
3341 			LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): No LDI "
3342 			    "handle, skipping"));
3343 			continue;
3344 		}
3345 
3346 		if (lecp->lec_notify == NULL) {
3347 			LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): No notify "
3348 			    "callback. skipping"));
3349 			continue;	/* not interested in notify */
3350 		}
3351 
3352 		/*
3353 		 * Check if matching event
3354 		 */
3355 		lec_event = ldi_ev_get_type(lecp->lec_cookie);
3356 		if (strcmp(event, lec_event) != 0) {
3357 			LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): Not matching"
3358 			    " event {%s,%s}. skipping", event, lec_event));
3359 			continue;
3360 		}
3361 
3362 		lecp->lec_lhp->lh_flags |= LH_FLAGS_NOTIFY;
3363 		if (lecp->lec_notify(lecp->lec_lhp, lecp->lec_cookie,
3364 		    lecp->lec_arg, ev_data) != LDI_EV_SUCCESS) {
3365 			ret = LDI_EV_FAILURE;
3366 			LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): notify"
3367 			    " FAILURE"));
3368 			break;
3369 		}
3370 
3371 		/* We have a matching callback that allows the event to occur */
3372 		ret = LDI_EV_SUCCESS;
3373 
3374 		LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): 1 consumer success"));
3375 	}
3376 
3377 	if (ret != LDI_EV_FAILURE)
3378 		goto out;
3379 
3380 	LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): undoing notify"));
3381 
3382 	/*
3383 	 * Undo notifies already sent
3384 	 */
3385 	lecp = list_prev(listp, lecp);
3386 	for (; lecp; lecp = list_prev(listp, lecp)) {
3387 
3388 		/*
3389 		 * Check if matching device
3390 		 */
3391 		if (!ldi_ev_device_match(lecp, dip, dev, spec_type))
3392 			continue;
3393 
3394 
3395 		if (lecp->lec_finalize == NULL) {
3396 			LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): no finalize, "
3397 			    "skipping"));
3398 			continue;	/* not interested in finalize */
3399 		}
3400 
3401 		/*
3402 		 * it is possible that in response to a notify event a
3403 		 * layered driver closed its LDI handle so it is ok
3404 		 * to have a NULL LDI handle for finalize. The layered
3405 		 * driver is expected to maintain state in its "arg"
3406 		 * parameter to keep track of the closed device.
3407 		 */
3408 
3409 		/* Check if matching event */
3410 		lec_event = ldi_ev_get_type(lecp->lec_cookie);
3411 		if (strcmp(event, lec_event) != 0) {
3412 			LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): not matching "
3413 			    "event: %s,%s, skipping", event, lec_event));
3414 			continue;
3415 		}
3416 
3417 		LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): calling finalize"));
3418 
3419 		lecp->lec_finalize(lecp->lec_lhp, lecp->lec_cookie,
3420 		    LDI_EV_FAILURE, lecp->lec_arg, ev_data);
3421 
3422 		/*
3423 		 * If LDI native event and LDI handle closed in context
3424 		 * of notify, NULL out the finalize callback as we have
3425 		 * already called the 1 finalize above allowed in this situation
3426 		 */
3427 		if (lecp->lec_lhp == NULL &&
3428 		    ldi_native_cookie(lecp->lec_cookie)) {
3429 			LDI_EVDBG((CE_NOTE,
3430 			    "ldi_invoke_notify(): NULL-ing finalize after "
3431 			    "calling 1 finalize following ldi_close"));
3432 			lecp->lec_finalize = NULL;
3433 		}
3434 	}
3435 
3436 out:
3437 	ldi_ev_unlock();
3438 
3439 	if (ret == LDI_EV_NONE) {
3440 		LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): no matching "
3441 		    "LDI callbacks"));
3442 	}
3443 
3444 	return (ret);
3445 }
3446 
3447 /*
3448  * Framework function to be called from a layered driver to propagate
3449  * LDI "notify" events to exported minors.
3450  *
3451  * This function is a public interface exported by the LDI framework
3452  * for use by layered drivers to propagate device events up the software
3453  * stack.
3454  */
3455 int
3456 ldi_ev_notify(dev_info_t *dip, minor_t minor, int spec_type,
3457     ldi_ev_cookie_t cookie, void *ev_data)
3458 {
3459 	char		*evname = ldi_ev_get_type(cookie);
3460 	uint_t		ct_evtype;
3461 	dev_t		dev;
3462 	major_t		major;
3463 	int		retc;
3464 	int		retl;
3465 
3466 	ASSERT(spec_type == S_IFBLK || spec_type == S_IFCHR);
3467 	ASSERT(dip);
3468 	ASSERT(ldi_native_cookie(cookie));
3469 
3470 	LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): entered: event=%s, dip=%p",
3471 	    evname, (void *)dip));
3472 
3473 	if (!ldi_ev_sync_event(evname)) {
3474 		cmn_err(CE_PANIC, "ldi_ev_notify(): %s not a "
3475 		    "negotiatable event", evname);
3476 		return (LDI_EV_SUCCESS);
3477 	}
3478 
3479 	major = ddi_driver_major(dip);
3480 	if (major == DDI_MAJOR_T_NONE) {
3481 		char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3482 		(void) ddi_pathname(dip, path);
3483 		cmn_err(CE_WARN, "ldi_ev_notify: cannot derive major number "
3484 		    "for device %s", path);
3485 		kmem_free(path, MAXPATHLEN);
3486 		return (LDI_EV_FAILURE);
3487 	}
3488 	dev = makedevice(major, minor);
3489 
3490 	/*
3491 	 * Generate negotiation contract events on contracts (if any) associated
3492 	 * with this minor.
3493 	 */
3494 	LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): calling contract nego."));
3495 	ct_evtype = ldi_contract_event(evname);
3496 	retc = contract_device_negotiate(dip, dev, spec_type, ct_evtype);
3497 	if (retc == CT_NACK) {
3498 		LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): contract neg. NACK"));
3499 		return (LDI_EV_FAILURE);
3500 	}
3501 
3502 	LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): LDI invoke notify"));
3503 	retl = ldi_invoke_notify(dip, dev, spec_type, evname, ev_data);
3504 	if (retl == LDI_EV_FAILURE) {
3505 		LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): ldi_invoke_notify "
3506 		    "returned FAILURE. Calling contract negend"));
3507 		contract_device_negend(dip, dev, spec_type, CT_EV_FAILURE);
3508 		return (LDI_EV_FAILURE);
3509 	}
3510 
3511 	/*
3512 	 * The very fact that we are here indicates that there is a
3513 	 * LDI callback (and hence a constraint) for the retire of the
3514 	 * HW device. So we just return success even if there are no
3515 	 * contracts or LDI callbacks against the minors layered on top
3516 	 * of the HW minors
3517 	 */
3518 	LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): returning SUCCESS"));
3519 	return (LDI_EV_SUCCESS);
3520 }
3521 
3522 /*
3523  * LDI framework function to invoke "finalize" callbacks for all layered
3524  * drivers that have registered callbacks for that event.
3525  *
3526  * This function is *not* to be called by layered drivers. It is for I/O
3527  * framework code in Solaris, such as the I/O retire code and DR code
3528  * to call while servicing a device event such as offline or degraded.
3529  */
3530 void
3531 ldi_invoke_finalize(dev_info_t *dip, dev_t dev, int spec_type, char *event,
3532     int ldi_result, void *ev_data)
3533 {
3534 	ldi_ev_callback_impl_t *lecp;
3535 	list_t	*listp;
3536 	char	*lec_event;
3537 	int	found = 0;
3538 
3539 	ASSERT(dip);
3540 	ASSERT(dev != DDI_DEV_T_NONE);
3541 	ASSERT(dev != NODEV);
3542 	ASSERT((dev == DDI_DEV_T_ANY && spec_type == 0) ||
3543 	    (spec_type == S_IFCHR || spec_type == S_IFBLK));
3544 	ASSERT(event);
3545 	ASSERT(ldi_native_event(event));
3546 	ASSERT(ldi_result == LDI_EV_SUCCESS || ldi_result == LDI_EV_FAILURE);
3547 
3548 	LDI_EVDBG((CE_NOTE, "ldi_invoke_finalize(): entered: dip=%p, result=%d"
3549 	    " event=%s", (void *)dip, ldi_result, event));
3550 
3551 	ldi_ev_lock();
3552 	listp = &ldi_ev_callback_list.le_head;
3553 	for (lecp = list_head(listp); lecp; lecp = list_next(listp, lecp)) {
3554 
3555 		if (lecp->lec_finalize == NULL) {
3556 			LDI_EVDBG((CE_NOTE, "ldi_invoke_finalize(): No "
3557 			    "finalize. Skipping"));
3558 			continue;	/* Not interested in finalize */
3559 		}
3560 
3561 		/*
3562 		 * Check if matching device
3563 		 */
3564 		if (!ldi_ev_device_match(lecp, dip, dev, spec_type))
3565 			continue;
3566 
3567 		/*
3568 		 * It is valid for the LDI handle to be NULL during finalize.
3569 		 * The layered driver may have done an LDI close in the notify
3570 		 * callback.
3571 		 */
3572 
3573 		/*
3574 		 * Check if matching event
3575 		 */
3576 		lec_event = ldi_ev_get_type(lecp->lec_cookie);
3577 		if (strcmp(event, lec_event) != 0) {
3578 			LDI_EVDBG((CE_NOTE, "ldi_invoke_finalize(): Not "
3579 			    "matching event {%s,%s}. Skipping",
3580 			    event, lec_event));
3581 			continue;
3582 		}
3583 
3584 		LDI_EVDBG((CE_NOTE, "ldi_invoke_finalize(): calling finalize"));
3585 
3586 		found = 1;
3587 
3588 		lecp->lec_finalize(lecp->lec_lhp, lecp->lec_cookie,
3589 		    ldi_result, lecp->lec_arg, ev_data);
3590 
3591 		/*
3592 		 * If LDI native event and LDI handle closed in context
3593 		 * of notify, NULL out the finalize callback as we have
3594 		 * already called the 1 finalize above allowed in this situation
3595 		 */
3596 		if (lecp->lec_lhp == NULL &&
3597 		    ldi_native_cookie(lecp->lec_cookie)) {
3598 			LDI_EVDBG((CE_NOTE,
3599 			    "ldi_invoke_finalize(): NULLing finalize after "
3600 			    "calling 1 finalize following ldi_close"));
3601 			lecp->lec_finalize = NULL;
3602 		}
3603 	}
3604 	ldi_ev_unlock();
3605 
3606 	if (found)
3607 		return;
3608 
3609 	LDI_EVDBG((CE_NOTE, "ldi_invoke_finalize(): no matching callbacks"));
3610 }
3611 
3612 /*
3613  * Framework function to be called from a layered driver to propagate
3614  * LDI "finalize" events to exported minors.
3615  *
3616  * This function is a public interface exported by the LDI framework
3617  * for use by layered drivers to propagate device events up the software
3618  * stack.
3619  */
3620 void
3621 ldi_ev_finalize(dev_info_t *dip, minor_t minor, int spec_type, int ldi_result,
3622     ldi_ev_cookie_t cookie, void *ev_data)
3623 {
3624 	dev_t dev;
3625 	major_t major;
3626 	char *evname;
3627 	int ct_result = (ldi_result == LDI_EV_SUCCESS) ?
3628 	    CT_EV_SUCCESS : CT_EV_FAILURE;
3629 	uint_t ct_evtype;
3630 
3631 	ASSERT(dip);
3632 	ASSERT(spec_type == S_IFBLK || spec_type == S_IFCHR);
3633 	ASSERT(ldi_result == LDI_EV_SUCCESS || ldi_result == LDI_EV_FAILURE);
3634 	ASSERT(ldi_native_cookie(cookie));
3635 
3636 	LDI_EVDBG((CE_NOTE, "ldi_ev_finalize: entered: dip=%p", (void *)dip));
3637 
3638 	major = ddi_driver_major(dip);
3639 	if (major == DDI_MAJOR_T_NONE) {
3640 		char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3641 		(void) ddi_pathname(dip, path);
3642 		cmn_err(CE_WARN, "ldi_ev_finalize: cannot derive major number "
3643 		    "for device %s", path);
3644 		kmem_free(path, MAXPATHLEN);
3645 		return;
3646 	}
3647 	dev = makedevice(major, minor);
3648 
3649 	evname = ldi_ev_get_type(cookie);
3650 
3651 	LDI_EVDBG((CE_NOTE, "ldi_ev_finalize: calling contracts"));
3652 	ct_evtype = ldi_contract_event(evname);
3653 	contract_device_finalize(dip, dev, spec_type, ct_evtype, ct_result);
3654 
3655 	LDI_EVDBG((CE_NOTE, "ldi_ev_finalize: calling ldi_invoke_finalize"));
3656 	ldi_invoke_finalize(dip, dev, spec_type, evname, ldi_result, ev_data);
3657 }
3658 
3659 int
3660 ldi_ev_remove_callbacks(ldi_callback_id_t id)
3661 {
3662 	ldi_ev_callback_impl_t	*lecp;
3663 	ldi_ev_callback_impl_t	*next;
3664 	ldi_ev_callback_impl_t	*found;
3665 	list_t			*listp;
3666 
3667 	ASSERT(!servicing_interrupt());
3668 
3669 	if (id == 0) {
3670 		cmn_err(CE_WARN, "ldi_ev_remove_callbacks: Invalid ID 0");
3671 		return (LDI_EV_FAILURE);
3672 	}
3673 
3674 	LDI_EVDBG((CE_NOTE, "ldi_ev_remove_callbacks: entered: id=%p",
3675 	    (void *)id));
3676 
3677 	ldi_ev_lock();
3678 
3679 	listp = &ldi_ev_callback_list.le_head;
3680 	next = found = NULL;
3681 	for (lecp = list_head(listp); lecp; lecp = next) {
3682 		next = list_next(listp, lecp);
3683 		if (lecp->lec_id == id) {
3684 			ASSERT(found == NULL);
3685 			list_remove(listp, lecp);
3686 			found = lecp;
3687 		}
3688 	}
3689 	ldi_ev_unlock();
3690 
3691 	if (found == NULL) {
3692 		cmn_err(CE_WARN, "No LDI event handler for id (%p)",
3693 		    (void *)id);
3694 		return (LDI_EV_SUCCESS);
3695 	}
3696 
3697 	if (!ldi_native_cookie(found->lec_cookie)) {
3698 		ASSERT(found->lec_notify == NULL);
3699 		if (ddi_remove_event_handler((ddi_callback_id_t)id)
3700 		    != DDI_SUCCESS) {
3701 			cmn_err(CE_WARN, "failed to remove NDI event handler "
3702 			    "for id (%p)", (void *)id);
3703 			ldi_ev_lock();
3704 			list_insert_tail(listp, found);
3705 			ldi_ev_unlock();
3706 			return (LDI_EV_FAILURE);
3707 		}
3708 		LDI_EVDBG((CE_NOTE, "ldi_ev_remove_callbacks: NDI event "
3709 		    "service removal succeeded"));
3710 	} else {
3711 		LDI_EVDBG((CE_NOTE, "ldi_ev_remove_callbacks: removed "
3712 		    "LDI native callbacks"));
3713 	}
3714 	kmem_free(found, sizeof (ldi_ev_callback_impl_t));
3715 
3716 	return (LDI_EV_SUCCESS);
3717 }
3718