xref: /illumos-gate/usr/src/uts/common/os/driver_lyr.c (revision 5f61829ae051c3c632fdee24571c7e9eb5dcce8b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 1994, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 /*
25  * Copyright 2019 Joyent, Inc.
26  */
27 
28 /*
29  * Layered driver support.
30  */
31 
32 #include <sys/atomic.h>
33 #include <sys/types.h>
34 #include <sys/t_lock.h>
35 #include <sys/param.h>
36 #include <sys/conf.h>
37 #include <sys/systm.h>
38 #include <sys/sysmacros.h>
39 #include <sys/buf.h>
40 #include <sys/cred.h>
41 #include <sys/uio.h>
42 #include <sys/vnode.h>
43 #include <sys/fs/snode.h>
44 #include <sys/open.h>
45 #include <sys/kmem.h>
46 #include <sys/file.h>
47 #include <sys/bootconf.h>
48 #include <sys/pathname.h>
49 #include <sys/bitmap.h>
50 #include <sys/stat.h>
51 #include <sys/dditypes.h>
52 #include <sys/ddi_impldefs.h>
53 #include <sys/ddi.h>
54 #include <sys/sunddi.h>
55 #include <sys/sunndi.h>
56 #include <sys/esunddi.h>
57 #include <sys/autoconf.h>
58 #include <sys/sunldi.h>
59 #include <sys/sunldi_impl.h>
60 #include <sys/errno.h>
61 #include <sys/debug.h>
62 #include <sys/modctl.h>
63 #include <sys/var.h>
64 #include <vm/seg_vn.h>
65 
66 #include <sys/stropts.h>
67 #include <sys/strsubr.h>
68 #include <sys/socket.h>
69 #include <sys/socketvar.h>
70 #include <sys/kstr.h>
71 
72 /*
73  * Device contract related
74  */
75 #include <sys/contract_impl.h>
76 #include <sys/contract/device_impl.h>
77 
78 /*
79  * Define macros to manipulate snode, vnode, and open device flags
80  */
81 #define	VTYP_VALID(i)	(((i) == VCHR) || ((i) == VBLK))
82 #define	VTYP_TO_OTYP(i)	(((i) == VCHR) ? OTYP_CHR : OTYP_BLK)
83 #define	VTYP_TO_STYP(i)	(((i) == VCHR) ? S_IFCHR : S_IFBLK)
84 
85 #define	OTYP_VALID(i)	(((i) == OTYP_CHR) || ((i) == OTYP_BLK))
86 #define	OTYP_TO_VTYP(i)	(((i) == OTYP_CHR) ? VCHR : VBLK)
87 #define	OTYP_TO_STYP(i)	(((i) == OTYP_CHR) ? S_IFCHR : S_IFBLK)
88 
89 #define	STYP_VALID(i)	(((i) == S_IFCHR) || ((i) == S_IFBLK))
90 #define	STYP_TO_VTYP(i)	(((i) == S_IFCHR) ? VCHR : VBLK)
91 
92 /*
93  * Define macros for accessing layered driver hash structures
94  */
95 #define	LH_HASH(vp)		(handle_hash_func(vp) % LH_HASH_SZ)
96 #define	LI_HASH(mid, dip, dev)	(ident_hash_func(mid, dip, dev) % LI_HASH_SZ)
97 
98 /*
99  * Define layered handle flags used in the lh_type field
100  */
101 #define	LH_STREAM	(0x1)	/* handle to a streams device */
102 #define	LH_CBDEV	(0x2)	/* handle to a char/block device */
103 
104 /*
105  * Define macro for devid property lookups
106  */
107 #define	DEVID_PROP_FLAGS	(DDI_PROP_DONTPASS | \
108 				DDI_PROP_TYPE_STRING|DDI_PROP_CANSLEEP)
109 
110 /*
111  * Dummy string for NDI events
112  */
113 #define	NDI_EVENT_SERVICE	"NDI_EVENT_SERVICE"
114 
115 static void ldi_ev_lock(void);
116 static void ldi_ev_unlock(void);
117 
118 #ifdef	LDI_OBSOLETE_EVENT
119 int ldi_remove_event_handler(ldi_handle_t lh, ldi_callback_id_t id);
120 #endif
121 
122 
123 /*
124  * globals
125  */
126 static kmutex_t			ldi_ident_hash_lock[LI_HASH_SZ];
127 static struct ldi_ident		*ldi_ident_hash[LI_HASH_SZ];
128 
129 static kmutex_t			ldi_handle_hash_lock[LH_HASH_SZ];
130 static struct ldi_handle	*ldi_handle_hash[LH_HASH_SZ];
131 static size_t			ldi_handle_hash_count;
132 
133 /*
134  * Use of "ldi_ev_callback_list" must be protected by ldi_ev_lock()
135  * and ldi_ev_unlock().
136  */
137 static struct ldi_ev_callback_list ldi_ev_callback_list;
138 
139 static uint32_t ldi_ev_id_pool = 0;
140 
141 struct ldi_ev_cookie {
142 	char *ck_evname;
143 	uint_t ck_sync;
144 	uint_t ck_ctype;
145 };
146 
147 static struct ldi_ev_cookie ldi_ev_cookies[] = {
148 	{ LDI_EV_OFFLINE, 1, CT_DEV_EV_OFFLINE},
149 	{ LDI_EV_DEGRADE, 0, CT_DEV_EV_DEGRADED},
150 	{ LDI_EV_DEVICE_REMOVE, 0, 0},
151 	{ NULL}			/* must terminate list */
152 };
153 
154 void
ldi_init(void)155 ldi_init(void)
156 {
157 	int i;
158 
159 	ldi_handle_hash_count = 0;
160 	for (i = 0; i < LH_HASH_SZ; i++) {
161 		mutex_init(&ldi_handle_hash_lock[i], NULL, MUTEX_DEFAULT, NULL);
162 		ldi_handle_hash[i] = NULL;
163 	}
164 	for (i = 0; i < LI_HASH_SZ; i++) {
165 		mutex_init(&ldi_ident_hash_lock[i], NULL, MUTEX_DEFAULT, NULL);
166 		ldi_ident_hash[i] = NULL;
167 	}
168 
169 	/*
170 	 * Initialize the LDI event subsystem
171 	 */
172 	mutex_init(&ldi_ev_callback_list.le_lock, NULL, MUTEX_DEFAULT, NULL);
173 	cv_init(&ldi_ev_callback_list.le_cv, NULL, CV_DEFAULT, NULL);
174 	ldi_ev_callback_list.le_busy = 0;
175 	ldi_ev_callback_list.le_thread = NULL;
176 	ldi_ev_callback_list.le_walker_next = NULL;
177 	ldi_ev_callback_list.le_walker_prev = NULL;
178 	list_create(&ldi_ev_callback_list.le_head,
179 	    sizeof (ldi_ev_callback_impl_t),
180 	    offsetof(ldi_ev_callback_impl_t, lec_list));
181 }
182 
183 /*
184  * LDI ident manipulation functions
185  */
186 static uint_t
ident_hash_func(modid_t modid,dev_info_t * dip,dev_t dev)187 ident_hash_func(modid_t modid, dev_info_t *dip, dev_t dev)
188 {
189 	if (dip != NULL) {
190 		uintptr_t k = (uintptr_t)dip;
191 		k >>= (int)highbit(sizeof (struct dev_info));
192 		return ((uint_t)k);
193 	} else if (dev != DDI_DEV_T_NONE) {
194 		return (modid + getminor(dev) + getmajor(dev));
195 	} else {
196 		return (modid);
197 	}
198 }
199 
200 static struct ldi_ident **
ident_find_ref_nolock(modid_t modid,dev_info_t * dip,dev_t dev,major_t major)201 ident_find_ref_nolock(modid_t modid, dev_info_t *dip, dev_t dev, major_t major)
202 {
203 	struct ldi_ident	**lipp = NULL;
204 	uint_t			index = LI_HASH(modid, dip, dev);
205 
206 	ASSERT(MUTEX_HELD(&ldi_ident_hash_lock[index]));
207 
208 	for (lipp = &(ldi_ident_hash[index]);
209 	    (*lipp != NULL);
210 	    lipp = &((*lipp)->li_next)) {
211 		if (((*lipp)->li_modid == modid) &&
212 		    ((*lipp)->li_major == major) &&
213 		    ((*lipp)->li_dip == dip) &&
214 		    ((*lipp)->li_dev == dev))
215 			break;
216 	}
217 
218 	ASSERT(lipp != NULL);
219 	return (lipp);
220 }
221 
222 static struct ldi_ident *
ident_alloc(char * mod_name,dev_info_t * dip,dev_t dev,major_t major)223 ident_alloc(char *mod_name, dev_info_t *dip, dev_t dev, major_t major)
224 {
225 	struct ldi_ident	*lip, **lipp, *retlip;
226 	modid_t			modid;
227 	uint_t			index;
228 
229 	ASSERT(mod_name != NULL);
230 
231 	/* get the module id */
232 	modid = mod_name_to_modid(mod_name);
233 	ASSERT(modid != -1);
234 
235 	/* allocate a new ident in case we need it */
236 	lip = kmem_zalloc(sizeof (*lip), KM_SLEEP);
237 
238 	/* search the hash for a matching ident */
239 	index = LI_HASH(modid, dip, dev);
240 	mutex_enter(&ldi_ident_hash_lock[index]);
241 	lipp = ident_find_ref_nolock(modid, dip, dev, major);
242 
243 	if (*lipp != NULL) {
244 		/* we found an ident in the hash */
245 		ASSERT(strcmp((*lipp)->li_modname, mod_name) == 0);
246 		(*lipp)->li_ref++;
247 		retlip = *lipp;
248 		mutex_exit(&ldi_ident_hash_lock[index]);
249 		kmem_free(lip, sizeof (struct ldi_ident));
250 		return (retlip);
251 	}
252 
253 	/* initialize the new ident */
254 	lip->li_next = NULL;
255 	lip->li_ref = 1;
256 	lip->li_modid = modid;
257 	lip->li_major = major;
258 	lip->li_dip = dip;
259 	lip->li_dev = dev;
260 	(void) strncpy(lip->li_modname, mod_name, sizeof (lip->li_modname) - 1);
261 
262 	/* add it to the ident hash */
263 	lip->li_next = ldi_ident_hash[index];
264 	ldi_ident_hash[index] = lip;
265 
266 	mutex_exit(&ldi_ident_hash_lock[index]);
267 	return (lip);
268 }
269 
270 static void
ident_hold(struct ldi_ident * lip)271 ident_hold(struct ldi_ident *lip)
272 {
273 	uint_t			index;
274 
275 	ASSERT(lip != NULL);
276 	index = LI_HASH(lip->li_modid, lip->li_dip, lip->li_dev);
277 	mutex_enter(&ldi_ident_hash_lock[index]);
278 	ASSERT(lip->li_ref > 0);
279 	lip->li_ref++;
280 	mutex_exit(&ldi_ident_hash_lock[index]);
281 }
282 
283 static void
ident_release(struct ldi_ident * lip)284 ident_release(struct ldi_ident *lip)
285 {
286 	struct ldi_ident	**lipp;
287 	uint_t			index;
288 
289 	ASSERT(lip != NULL);
290 	index = LI_HASH(lip->li_modid, lip->li_dip, lip->li_dev);
291 	mutex_enter(&ldi_ident_hash_lock[index]);
292 
293 	ASSERT(lip->li_ref > 0);
294 	if (--lip->li_ref > 0) {
295 		/* there are more references to this ident */
296 		mutex_exit(&ldi_ident_hash_lock[index]);
297 		return;
298 	}
299 
300 	/* this was the last reference/open for this ident.  free it. */
301 	lipp = ident_find_ref_nolock(
302 	    lip->li_modid, lip->li_dip, lip->li_dev, lip->li_major);
303 
304 	ASSERT((lipp != NULL) && (*lipp != NULL));
305 	*lipp = lip->li_next;
306 	mutex_exit(&ldi_ident_hash_lock[index]);
307 	kmem_free(lip, sizeof (struct ldi_ident));
308 }
309 
310 /*
311  * LDI handle manipulation functions
312  */
313 static uint_t
handle_hash_func(void * vp)314 handle_hash_func(void *vp)
315 {
316 	uintptr_t k = (uintptr_t)vp;
317 	k >>= (int)highbit(sizeof (vnode_t));
318 	return ((uint_t)k);
319 }
320 
321 static struct ldi_handle **
handle_find_ref_nolock(vnode_t * vp,struct ldi_ident * ident)322 handle_find_ref_nolock(vnode_t *vp, struct ldi_ident *ident)
323 {
324 	struct ldi_handle	**lhpp = NULL;
325 	uint_t			index = LH_HASH(vp);
326 
327 	ASSERT(MUTEX_HELD(&ldi_handle_hash_lock[index]));
328 
329 	for (lhpp = &(ldi_handle_hash[index]);
330 	    (*lhpp != NULL);
331 	    lhpp = &((*lhpp)->lh_next)) {
332 		if (((*lhpp)->lh_ident == ident) &&
333 		    ((*lhpp)->lh_vp == vp))
334 			break;
335 	}
336 
337 	ASSERT(lhpp != NULL);
338 	return (lhpp);
339 }
340 
341 static struct ldi_handle *
handle_find(vnode_t * vp,struct ldi_ident * ident)342 handle_find(vnode_t *vp, struct ldi_ident *ident)
343 {
344 	struct ldi_handle	**lhpp, *retlhp;
345 	int			index = LH_HASH(vp);
346 
347 	mutex_enter(&ldi_handle_hash_lock[index]);
348 	lhpp = handle_find_ref_nolock(vp, ident);
349 	retlhp = *lhpp;
350 	mutex_exit(&ldi_handle_hash_lock[index]);
351 	return (retlhp);
352 }
353 
354 static struct ldi_handle *
handle_alloc(vnode_t * vp,struct ldi_ident * ident)355 handle_alloc(vnode_t *vp, struct ldi_ident *ident)
356 {
357 	struct ldi_handle	*lhp, **lhpp, *retlhp;
358 	uint_t			index;
359 
360 	ASSERT((vp != NULL) && (ident != NULL));
361 
362 	/* allocate a new handle in case we need it */
363 	lhp = kmem_zalloc(sizeof (*lhp), KM_SLEEP);
364 
365 	/* search the hash for a matching handle */
366 	index = LH_HASH(vp);
367 	mutex_enter(&ldi_handle_hash_lock[index]);
368 	lhpp = handle_find_ref_nolock(vp, ident);
369 
370 	if (*lhpp != NULL) {
371 		/* we found a handle in the hash */
372 		(*lhpp)->lh_ref++;
373 		retlhp = *lhpp;
374 		mutex_exit(&ldi_handle_hash_lock[index]);
375 
376 		LDI_ALLOCFREE((CE_WARN, "ldi handle alloc: dup "
377 		    "lh=0x%p, ident=0x%p, vp=0x%p, drv=%s, minor=0x%x",
378 		    (void *)retlhp, (void *)ident, (void *)vp,
379 		    mod_major_to_name(getmajor(vp->v_rdev)),
380 		    getminor(vp->v_rdev)));
381 
382 		kmem_free(lhp, sizeof (struct ldi_handle));
383 		return (retlhp);
384 	}
385 
386 	/* initialize the new handle */
387 	lhp->lh_ref = 1;
388 	lhp->lh_vp = vp;
389 	lhp->lh_ident = ident;
390 #ifdef	LDI_OBSOLETE_EVENT
391 	mutex_init(lhp->lh_lock, NULL, MUTEX_DEFAULT, NULL);
392 #endif
393 
394 	/* set the device type for this handle */
395 	lhp->lh_type = 0;
396 	if (vp->v_stream) {
397 		ASSERT(vp->v_type == VCHR);
398 		lhp->lh_type |= LH_STREAM;
399 	} else {
400 		lhp->lh_type |= LH_CBDEV;
401 	}
402 
403 	/* get holds on other objects */
404 	ident_hold(ident);
405 	ASSERT(vp->v_count >= 1);
406 	VN_HOLD(vp);
407 
408 	/* add it to the handle hash */
409 	lhp->lh_next = ldi_handle_hash[index];
410 	ldi_handle_hash[index] = lhp;
411 	atomic_inc_ulong(&ldi_handle_hash_count);
412 
413 	LDI_ALLOCFREE((CE_WARN, "ldi handle alloc: new "
414 	    "lh=0x%p, ident=0x%p, vp=0x%p, drv=%s, minor=0x%x",
415 	    (void *)lhp, (void *)ident, (void *)vp,
416 	    mod_major_to_name(getmajor(vp->v_rdev)),
417 	    getminor(vp->v_rdev)));
418 
419 	mutex_exit(&ldi_handle_hash_lock[index]);
420 	return (lhp);
421 }
422 
423 static void
handle_release(struct ldi_handle * lhp)424 handle_release(struct ldi_handle *lhp)
425 {
426 	struct ldi_handle	**lhpp;
427 	uint_t			index;
428 
429 	ASSERT(lhp != NULL);
430 
431 	index = LH_HASH(lhp->lh_vp);
432 	mutex_enter(&ldi_handle_hash_lock[index]);
433 
434 	LDI_ALLOCFREE((CE_WARN, "ldi handle release: "
435 	    "lh=0x%p, ident=0x%p, vp=0x%p, drv=%s, minor=0x%x",
436 	    (void *)lhp, (void *)lhp->lh_ident, (void *)lhp->lh_vp,
437 	    mod_major_to_name(getmajor(lhp->lh_vp->v_rdev)),
438 	    getminor(lhp->lh_vp->v_rdev)));
439 
440 	ASSERT(lhp->lh_ref > 0);
441 	if (--lhp->lh_ref > 0) {
442 		/* there are more references to this handle */
443 		mutex_exit(&ldi_handle_hash_lock[index]);
444 		return;
445 	}
446 
447 	/* this was the last reference/open for this handle.  free it. */
448 	lhpp = handle_find_ref_nolock(lhp->lh_vp, lhp->lh_ident);
449 	ASSERT((lhpp != NULL) && (*lhpp != NULL));
450 	*lhpp = lhp->lh_next;
451 	atomic_dec_ulong(&ldi_handle_hash_count);
452 	mutex_exit(&ldi_handle_hash_lock[index]);
453 
454 	VN_RELE(lhp->lh_vp);
455 	ident_release(lhp->lh_ident);
456 #ifdef	LDI_OBSOLETE_EVENT
457 	mutex_destroy(lhp->lh_lock);
458 #endif
459 	kmem_free(lhp, sizeof (struct ldi_handle));
460 }
461 
462 #ifdef	LDI_OBSOLETE_EVENT
463 /*
464  * LDI event manipulation functions
465  */
466 static void
handle_event_add(ldi_event_t * lep)467 handle_event_add(ldi_event_t *lep)
468 {
469 	struct ldi_handle *lhp = lep->le_lhp;
470 
471 	ASSERT(lhp != NULL);
472 
473 	mutex_enter(lhp->lh_lock);
474 	if (lhp->lh_events == NULL) {
475 		lhp->lh_events = lep;
476 		mutex_exit(lhp->lh_lock);
477 		return;
478 	}
479 
480 	lep->le_next = lhp->lh_events;
481 	lhp->lh_events->le_prev = lep;
482 	lhp->lh_events = lep;
483 	mutex_exit(lhp->lh_lock);
484 }
485 
486 static void
handle_event_remove(ldi_event_t * lep)487 handle_event_remove(ldi_event_t *lep)
488 {
489 	struct ldi_handle *lhp = lep->le_lhp;
490 
491 	ASSERT(lhp != NULL);
492 
493 	mutex_enter(lhp->lh_lock);
494 	if (lep->le_prev)
495 		lep->le_prev->le_next = lep->le_next;
496 	if (lep->le_next)
497 		lep->le_next->le_prev = lep->le_prev;
498 	if (lhp->lh_events == lep)
499 		lhp->lh_events = lep->le_next;
500 	mutex_exit(lhp->lh_lock);
501 
502 }
503 
504 static void
i_ldi_callback(dev_info_t * dip,ddi_eventcookie_t event_cookie,void * arg,void * bus_impldata)505 i_ldi_callback(dev_info_t *dip, ddi_eventcookie_t event_cookie,
506     void *arg, void *bus_impldata)
507 {
508 	ldi_event_t *lep = (ldi_event_t *)arg;
509 
510 	ASSERT(lep != NULL);
511 
512 	LDI_EVENTCB((CE_NOTE, "%s: dip=0x%p, "
513 	    "event_cookie=0x%p, ldi_eventp=0x%p", "i_ldi_callback",
514 	    (void *)dip, (void *)event_cookie, (void *)lep));
515 
516 	lep->le_handler(lep->le_lhp, event_cookie, lep->le_arg, bus_impldata);
517 }
518 #endif
519 
520 /*
521  * LDI open helper functions
522  */
523 
524 /* get a vnode to a device by dev_t and otyp */
525 static int
ldi_vp_from_dev(dev_t dev,int otyp,vnode_t ** vpp)526 ldi_vp_from_dev(dev_t dev, int otyp, vnode_t **vpp)
527 {
528 	dev_info_t		*dip;
529 	vnode_t			*vp;
530 
531 	/* sanity check required input parameters */
532 	if ((dev == DDI_DEV_T_NONE) || (!OTYP_VALID(otyp)) || (vpp == NULL))
533 		return (EINVAL);
534 
535 	if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
536 		return (ENODEV);
537 
538 	vp = makespecvp(dev, OTYP_TO_VTYP(otyp));
539 	spec_assoc_vp_with_devi(vp, dip);
540 	ddi_release_devi(dip);  /* from e_ddi_hold_devi_by_dev */
541 
542 	*vpp = vp;
543 	return (0);
544 }
545 
546 /* get a vnode to a device by pathname */
547 int
ldi_vp_from_name(const char * path,vnode_t ** vpp)548 ldi_vp_from_name(const char *path, vnode_t **vpp)
549 {
550 	vnode_t			*vp = NULL;
551 	int			ret;
552 
553 	/* sanity check required input parameters */
554 	if ((path == NULL) || (vpp == NULL))
555 		return (EINVAL);
556 
557 	if (modrootloaded) {
558 		cred_t *saved_cred = curthread->t_cred;
559 
560 		/* we don't want lookupname to fail because of credentials */
561 		curthread->t_cred = kcred;
562 
563 		/*
564 		 * all lookups should be done in the global zone.  but
565 		 * lookupnameat() won't actually do this if an absolute
566 		 * path is passed in.  since the ldi interfaces require an
567 		 * absolute path we pass lookupnameat() a pointer to
568 		 * the character after the leading '/' and tell it to
569 		 * start searching at the current system root directory.
570 		 */
571 		ASSERT(*path == '/');
572 		ret = lookupnameat(path + 1, UIO_SYSSPACE, FOLLOW, NULLVPP,
573 		    &vp, rootdir);
574 
575 		/* restore this threads credentials */
576 		curthread->t_cred = saved_cred;
577 
578 		if (ret == 0) {
579 			if (!vn_matchops(vp, spec_getvnodeops()) ||
580 			    !VTYP_VALID(vp->v_type)) {
581 				VN_RELE(vp);
582 				return (ENXIO);
583 			}
584 		}
585 	}
586 
587 	if (vp == NULL) {
588 		dev_info_t	*dip;
589 		dev_t		dev;
590 		int		spec_type;
591 
592 		/*
593 		 * Root is not mounted, the minor node is not specified,
594 		 * or an OBP path has been specified.
595 		 */
596 
597 		/*
598 		 * Determine if path can be pruned to produce an
599 		 * OBP or devfs path for resolve_pathname.
600 		 */
601 		if (strncmp(path, "/devices/", 9) == 0)
602 			path += strlen("/devices");
603 
604 		/*
605 		 * if no minor node was specified the DEFAULT minor node
606 		 * will be returned.  if there is no DEFAULT minor node
607 		 * one will be fabricated of type S_IFCHR with the minor
608 		 * number equal to the instance number.
609 		 */
610 		ret = resolve_pathname(path, &dip, &dev, &spec_type);
611 		if (ret != 0)
612 			return (ENODEV);
613 
614 		ASSERT(STYP_VALID(spec_type));
615 		vp = makespecvp(dev, STYP_TO_VTYP(spec_type));
616 		spec_assoc_vp_with_devi(vp, dip);
617 		ddi_release_devi(dip);
618 	}
619 
620 	*vpp = vp;
621 	return (0);
622 }
623 
624 static int
ldi_devid_match(ddi_devid_t devid,dev_info_t * dip,dev_t dev)625 ldi_devid_match(ddi_devid_t devid, dev_info_t *dip, dev_t dev)
626 {
627 	char		*devidstr;
628 	ddi_prop_t	*propp;
629 
630 	/* convert devid as a string property */
631 	if ((devidstr = ddi_devid_str_encode(devid, NULL)) == NULL)
632 		return (0);
633 
634 	/*
635 	 * Search for the devid.  For speed and ease in locking this
636 	 * code directly uses the property implementation.  See
637 	 * ddi_common_devid_to_devlist() for a comment as to why.
638 	 */
639 	mutex_enter(&(DEVI(dip)->devi_lock));
640 
641 	/* check if there is a DDI_DEV_T_NONE devid property */
642 	propp = i_ddi_prop_search(DDI_DEV_T_NONE,
643 	    DEVID_PROP_NAME, DEVID_PROP_FLAGS, &DEVI(dip)->devi_hw_prop_ptr);
644 	if (propp != NULL) {
645 		if (ddi_devid_str_compare(propp->prop_val, devidstr) == 0) {
646 			/* a DDI_DEV_T_NONE devid exists and matchs */
647 			mutex_exit(&(DEVI(dip)->devi_lock));
648 			ddi_devid_str_free(devidstr);
649 			return (1);
650 		} else {
651 			/* a DDI_DEV_T_NONE devid exists and doesn't match */
652 			mutex_exit(&(DEVI(dip)->devi_lock));
653 			ddi_devid_str_free(devidstr);
654 			return (0);
655 		}
656 	}
657 
658 	/* check if there is a devt specific devid property */
659 	propp = i_ddi_prop_search(dev,
660 	    DEVID_PROP_NAME, DEVID_PROP_FLAGS, &(DEVI(dip)->devi_hw_prop_ptr));
661 	if (propp != NULL) {
662 		if (ddi_devid_str_compare(propp->prop_val, devidstr) == 0) {
663 			/* a devt specific devid exists and matchs */
664 			mutex_exit(&(DEVI(dip)->devi_lock));
665 			ddi_devid_str_free(devidstr);
666 			return (1);
667 		} else {
668 			/* a devt specific devid exists and doesn't match */
669 			mutex_exit(&(DEVI(dip)->devi_lock));
670 			ddi_devid_str_free(devidstr);
671 			return (0);
672 		}
673 	}
674 
675 	/* we didn't find any devids associated with the device */
676 	mutex_exit(&(DEVI(dip)->devi_lock));
677 	ddi_devid_str_free(devidstr);
678 	return (0);
679 }
680 
681 /* get a handle to a device by devid and minor name */
682 int
ldi_vp_from_devid(ddi_devid_t devid,const char * minor_name,vnode_t ** vpp)683 ldi_vp_from_devid(ddi_devid_t devid, const char *minor_name, vnode_t **vpp)
684 {
685 	dev_info_t		*dip;
686 	vnode_t			*vp;
687 	int			ret, i, ndevs, styp;
688 	dev_t			dev, *devs;
689 
690 	/* sanity check required input parameters */
691 	if ((devid == NULL) || (minor_name == NULL) || (vpp == NULL))
692 		return (EINVAL);
693 
694 	ret = ddi_lyr_devid_to_devlist(devid, minor_name, &ndevs, &devs);
695 	if ((ret != DDI_SUCCESS) || (ndevs <= 0))
696 		return (ENODEV);
697 
698 	for (i = 0; i < ndevs; i++) {
699 		dev = devs[i];
700 
701 		if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
702 			continue;
703 
704 		/*
705 		 * now we have to verify that the devid of the disk
706 		 * still matches what was requested.
707 		 *
708 		 * we have to do this because the devid could have
709 		 * changed between the call to ddi_lyr_devid_to_devlist()
710 		 * and e_ddi_hold_devi_by_dev().  this is because when
711 		 * ddi_lyr_devid_to_devlist() returns a list of devts
712 		 * there is no kind of hold on those devts so a device
713 		 * could have been replaced out from under us in the
714 		 * interim.
715 		 */
716 		if ((i_ddi_minorname_to_devtspectype(dip, minor_name,
717 		    NULL, &styp) == DDI_SUCCESS) &&
718 		    ldi_devid_match(devid, dip, dev))
719 			break;
720 
721 		ddi_release_devi(dip);	/* from e_ddi_hold_devi_by_dev() */
722 	}
723 
724 	ddi_lyr_free_devlist(devs, ndevs);
725 
726 	if (i == ndevs)
727 		return (ENODEV);
728 
729 	ASSERT(STYP_VALID(styp));
730 	vp = makespecvp(dev, STYP_TO_VTYP(styp));
731 	spec_assoc_vp_with_devi(vp, dip);
732 	ddi_release_devi(dip);		/* from e_ddi_hold_devi_by_dev */
733 
734 	*vpp = vp;
735 	return (0);
736 }
737 
738 /* given a vnode, open a device */
739 static int
ldi_open_by_vp(vnode_t ** vpp,int flag,cred_t * cr,ldi_handle_t * lhp,struct ldi_ident * li)740 ldi_open_by_vp(vnode_t **vpp, int flag, cred_t *cr,
741     ldi_handle_t *lhp, struct ldi_ident *li)
742 {
743 	struct ldi_handle	*nlhp;
744 	vnode_t			*vp;
745 	int			err;
746 
747 	ASSERT((vpp != NULL) && (*vpp != NULL));
748 	ASSERT((lhp != NULL) && (li != NULL));
749 
750 	vp = *vpp;
751 	/* if the vnode passed in is not a device, then bail */
752 	if (!vn_matchops(vp, spec_getvnodeops()) || !VTYP_VALID(vp->v_type))
753 		return (ENXIO);
754 
755 	/*
756 	 * the caller may have specified a node that
757 	 * doesn't have cb_ops defined.  the ldi doesn't yet
758 	 * support opening devices without a valid cb_ops.
759 	 */
760 	if (devopsp[getmajor(vp->v_rdev)]->devo_cb_ops == NULL)
761 		return (ENXIO);
762 
763 	/* open the device */
764 	if ((err = VOP_OPEN(&vp, flag | FKLYR, cr, NULL)) != 0)
765 		return (err);
766 
767 	/* possible clone open, make sure that we still have a spec node */
768 	ASSERT(vn_matchops(vp, spec_getvnodeops()));
769 
770 	nlhp = handle_alloc(vp, li);
771 
772 	if (vp != *vpp) {
773 		/*
774 		 * allocating the layered handle took a new hold on the vnode
775 		 * so we can release the hold that was returned by the clone
776 		 * open
777 		 */
778 		LDI_OPENCLOSE((CE_WARN, "%s: lh=0x%p",
779 		    "ldi clone open", (void *)nlhp));
780 	} else {
781 		LDI_OPENCLOSE((CE_WARN, "%s: lh=0x%p",
782 		    "ldi open", (void *)nlhp));
783 	}
784 
785 	*vpp = vp;
786 	*lhp = (ldi_handle_t)nlhp;
787 	return (0);
788 }
789 
790 /* Call a drivers prop_op(9E) interface */
791 static int
i_ldi_prop_op(dev_t dev,dev_info_t * dip,ddi_prop_op_t prop_op,int flags,char * name,caddr_t valuep,int * lengthp)792 i_ldi_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
793     int flags, char *name, caddr_t valuep, int *lengthp)
794 {
795 	struct dev_ops	*ops = NULL;
796 	int		res;
797 
798 	ASSERT((dip != NULL) && (name != NULL));
799 	ASSERT((prop_op == PROP_LEN) || (valuep != NULL));
800 	ASSERT(lengthp != NULL);
801 
802 	/*
803 	 * we can only be invoked after a driver has been opened and
804 	 * someone has a layered handle to it, so there had better be
805 	 * a valid ops vector.
806 	 */
807 	ops = DEVI(dip)->devi_ops;
808 	ASSERT(ops && ops->devo_cb_ops);
809 
810 	/*
811 	 * Some nexus drivers incorrectly set cb_prop_op to nodev,
812 	 * nulldev or even NULL.
813 	 */
814 	if ((ops->devo_cb_ops->cb_prop_op == nodev) ||
815 	    (ops->devo_cb_ops->cb_prop_op == nulldev) ||
816 	    (ops->devo_cb_ops->cb_prop_op == NULL)) {
817 		return (DDI_PROP_NOT_FOUND);
818 	}
819 
820 	/* check if this is actually DDI_DEV_T_ANY query */
821 	if (flags & LDI_DEV_T_ANY) {
822 		flags &= ~LDI_DEV_T_ANY;
823 		dev = DDI_DEV_T_ANY;
824 	}
825 
826 	res = cdev_prop_op(dev, dip, prop_op, flags, name, valuep, lengthp);
827 	return (res);
828 }
829 
830 static void
i_ldi_prop_op_free(struct prop_driver_data * pdd)831 i_ldi_prop_op_free(struct prop_driver_data *pdd)
832 {
833 	kmem_free(pdd, pdd->pdd_size);
834 }
835 
836 static caddr_t
i_ldi_prop_op_alloc(int prop_len)837 i_ldi_prop_op_alloc(int prop_len)
838 {
839 	struct prop_driver_data	*pdd;
840 	int			pdd_size;
841 
842 	pdd_size = sizeof (struct prop_driver_data) + prop_len;
843 	pdd = kmem_alloc(pdd_size, KM_SLEEP);
844 	pdd->pdd_size = pdd_size;
845 	pdd->pdd_prop_free = i_ldi_prop_op_free;
846 	return ((caddr_t)&pdd[1]);
847 }
848 
849 /*
850  * i_ldi_prop_op_typed() is a wrapper for i_ldi_prop_op that is used
851  * by the typed ldi property lookup interfaces.
852  */
853 static int
i_ldi_prop_op_typed(dev_t dev,dev_info_t * dip,int flags,char * name,caddr_t * datap,int * lengthp,int elem_size)854 i_ldi_prop_op_typed(dev_t dev, dev_info_t *dip, int flags, char *name,
855     caddr_t *datap, int *lengthp, int elem_size)
856 {
857 	caddr_t	prop_val;
858 	int	prop_len, res;
859 
860 	ASSERT((dip != NULL) && (name != NULL));
861 	ASSERT((datap != NULL) && (lengthp != NULL));
862 
863 	/*
864 	 * first call the drivers prop_op() interface to allow it
865 	 * it to override default property values.
866 	 */
867 	res = i_ldi_prop_op(dev, dip, PROP_LEN,
868 	    flags | DDI_PROP_DYNAMIC, name, NULL, &prop_len);
869 	if (res != DDI_PROP_SUCCESS)
870 		return (DDI_PROP_NOT_FOUND);
871 
872 	/* sanity check the property length */
873 	if (prop_len == 0) {
874 		/*
875 		 * the ddi typed interfaces don't allow a drivers to
876 		 * create properties with a length of 0.  so we should
877 		 * prevent drivers from returning 0 length dynamic
878 		 * properties for typed property lookups.
879 		 */
880 		return (DDI_PROP_NOT_FOUND);
881 	}
882 
883 	/* sanity check the property length against the element size */
884 	if (elem_size && ((prop_len % elem_size) != 0))
885 		return (DDI_PROP_NOT_FOUND);
886 
887 	/*
888 	 * got it.  now allocate a prop_driver_data struct so that the
889 	 * user can free the property via ddi_prop_free().
890 	 */
891 	prop_val = i_ldi_prop_op_alloc(prop_len);
892 
893 	/* lookup the property again, this time get the value */
894 	res = i_ldi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
895 	    flags | DDI_PROP_DYNAMIC, name, prop_val, &prop_len);
896 	if (res != DDI_PROP_SUCCESS) {
897 		ddi_prop_free(prop_val);
898 		return (DDI_PROP_NOT_FOUND);
899 	}
900 
901 	/* sanity check the property length */
902 	if (prop_len == 0) {
903 		ddi_prop_free(prop_val);
904 		return (DDI_PROP_NOT_FOUND);
905 	}
906 
907 	/* sanity check the property length against the element size */
908 	if (elem_size && ((prop_len % elem_size) != 0)) {
909 		ddi_prop_free(prop_val);
910 		return (DDI_PROP_NOT_FOUND);
911 	}
912 
913 	/*
914 	 * return the prop_driver_data struct and, optionally, the length
915 	 * of the data.
916 	 */
917 	*datap = prop_val;
918 	*lengthp = prop_len;
919 
920 	return (DDI_PROP_SUCCESS);
921 }
922 
923 /*
924  * i_check_string looks at a string property and makes sure its
925  * a valid null terminated string
926  */
927 static int
i_check_string(char * str,int prop_len)928 i_check_string(char *str, int prop_len)
929 {
930 	int i;
931 
932 	ASSERT(str != NULL);
933 
934 	for (i = 0; i < prop_len; i++) {
935 		if (str[i] == '\0')
936 			return (0);
937 	}
938 	return (1);
939 }
940 
941 /*
942  * i_pack_string_array takes a a string array property that is represented
943  * as a concatenation of strings (with the NULL character included for
944  * each string) and converts it into a format that can be returned by
945  * ldi_prop_lookup_string_array.
946  */
947 static int
i_pack_string_array(char * str_concat,int prop_len,char *** str_arrayp,int * nelemp)948 i_pack_string_array(char *str_concat, int prop_len,
949     char ***str_arrayp, int *nelemp)
950 {
951 	int i, nelem, pack_size;
952 	char **str_array, *strptr;
953 
954 	/*
955 	 * first we need to sanity check the input string array.
956 	 * in essence this can be done my making sure that the last
957 	 * character of the array passed in is null.  (meaning the last
958 	 * string in the array is NULL terminated.
959 	 */
960 	if (str_concat[prop_len - 1] != '\0')
961 		return (1);
962 
963 	/* now let's count the number of strings in the array */
964 	for (nelem = i = 0; i < prop_len; i++)
965 		if (str_concat[i] == '\0')
966 			nelem++;
967 	ASSERT(nelem >= 1);
968 
969 	/* now let's allocate memory for the new packed property */
970 	pack_size = (sizeof (char *) * (nelem + 1)) + prop_len;
971 	str_array = (char **)i_ldi_prop_op_alloc(pack_size);
972 
973 	/* let's copy the actual string data into the new property */
974 	strptr = (char *)&(str_array[nelem + 1]);
975 	bcopy(str_concat, strptr, prop_len);
976 
977 	/* now initialize the string array pointers */
978 	for (i = 0; i < nelem; i++) {
979 		str_array[i] = strptr;
980 		strptr += strlen(strptr) + 1;
981 	}
982 	str_array[nelem] = NULL;
983 
984 	/* set the return values */
985 	*str_arrayp = str_array;
986 	*nelemp = nelem;
987 
988 	return (0);
989 }
990 
991 
992 /*
993  * LDI Project private device usage interfaces
994  */
995 
996 /*
997  * Get a count of how many devices are currentl open by different consumers
998  */
999 int
ldi_usage_count()1000 ldi_usage_count()
1001 {
1002 	return (ldi_handle_hash_count);
1003 }
1004 
1005 static void
ldi_usage_walker_tgt_helper(ldi_usage_t * ldi_usage,vnode_t * vp)1006 ldi_usage_walker_tgt_helper(ldi_usage_t *ldi_usage, vnode_t *vp)
1007 {
1008 	dev_info_t	*dip;
1009 	dev_t		dev;
1010 
1011 	ASSERT(STYP_VALID(VTYP_TO_STYP(vp->v_type)));
1012 
1013 	/* get the target devt */
1014 	dev = vp->v_rdev;
1015 
1016 	/* try to get the target dip */
1017 	dip = VTOCS(vp)->s_dip;
1018 	if (dip != NULL) {
1019 		e_ddi_hold_devi(dip);
1020 	} else if (dev != DDI_DEV_T_NONE) {
1021 		dip = e_ddi_hold_devi_by_dev(dev, 0);
1022 	}
1023 
1024 	/* set the target information */
1025 	ldi_usage->tgt_name = mod_major_to_name(getmajor(dev));
1026 	ldi_usage->tgt_modid = mod_name_to_modid(ldi_usage->tgt_name);
1027 	ldi_usage->tgt_devt = dev;
1028 	ldi_usage->tgt_spec_type = VTYP_TO_STYP(vp->v_type);
1029 	ldi_usage->tgt_dip = dip;
1030 }
1031 
1032 
1033 static int
ldi_usage_walker_helper(struct ldi_ident * lip,vnode_t * vp,void * arg,int (* callback)(const ldi_usage_t *,void *))1034 ldi_usage_walker_helper(struct ldi_ident *lip, vnode_t *vp,
1035     void *arg, int (*callback)(const ldi_usage_t *, void *))
1036 {
1037 	ldi_usage_t	ldi_usage;
1038 	struct devnames	*dnp;
1039 	dev_info_t	*dip;
1040 	major_t		major;
1041 	dev_t		dev;
1042 	int		ret = LDI_USAGE_CONTINUE;
1043 
1044 	/* set the target device information */
1045 	ldi_usage_walker_tgt_helper(&ldi_usage, vp);
1046 
1047 	/* get the source devt */
1048 	dev = lip->li_dev;
1049 
1050 	/* try to get the source dip */
1051 	dip = lip->li_dip;
1052 	if (dip != NULL) {
1053 		e_ddi_hold_devi(dip);
1054 	} else if (dev != DDI_DEV_T_NONE) {
1055 		dip = e_ddi_hold_devi_by_dev(dev, 0);
1056 	}
1057 
1058 	/* set the valid source information */
1059 	ldi_usage.src_modid = lip->li_modid;
1060 	ldi_usage.src_name = lip->li_modname;
1061 	ldi_usage.src_devt = dev;
1062 	ldi_usage.src_dip = dip;
1063 
1064 	/*
1065 	 * if the source ident represents either:
1066 	 *
1067 	 * - a kernel module (and not a device or device driver)
1068 	 * - a device node
1069 	 *
1070 	 * then we currently have all the info we need to report the
1071 	 * usage information so invoke the callback function.
1072 	 */
1073 	if (((lip->li_major == -1) && (dev == DDI_DEV_T_NONE)) ||
1074 	    (dip != NULL)) {
1075 		ret = callback(&ldi_usage, arg);
1076 		if (dip != NULL)
1077 			ddi_release_devi(dip);
1078 		if (ldi_usage.tgt_dip != NULL)
1079 			ddi_release_devi(ldi_usage.tgt_dip);
1080 		return (ret);
1081 	}
1082 
1083 	/*
1084 	 * now this is kinda gross.
1085 	 *
1086 	 * what we do here is attempt to associate every device instance
1087 	 * of the source driver on the system with the open target driver.
1088 	 * we do this because we don't know which instance of the device
1089 	 * could potentially access the lower device so we assume that all
1090 	 * the instances could access it.
1091 	 *
1092 	 * there are two ways we could have gotten here:
1093 	 *
1094 	 * 1) this layered ident represents one created using only a
1095 	 *    major number or a driver module name.  this means that when
1096 	 *    it was created we could not associate it with a particular
1097 	 *    dev_t or device instance.
1098 	 *
1099 	 *    when could this possibly happen you ask?
1100 	 *
1101 	 *    a perfect example of this is streams persistent links.
1102 	 *    when a persistant streams link is formed we can't associate
1103 	 *    the lower device stream with any particular upper device
1104 	 *    stream or instance.  this is because any particular upper
1105 	 *    device stream could be closed, then another could be
1106 	 *    opened with a different dev_t and device instance, and it
1107 	 *    would still have access to the lower linked stream.
1108 	 *
1109 	 *    since any instance of the upper streams driver could
1110 	 *    potentially access the lower stream whenever it wants,
1111 	 *    we represent that here by associating the opened lower
1112 	 *    device with every existing device instance of the upper
1113 	 *    streams driver.
1114 	 *
1115 	 * 2) This case should really never happen but we'll include it
1116 	 *    for completeness.
1117 	 *
1118 	 *    it's possible that we could have gotten here because we
1119 	 *    have a dev_t for the upper device but we couldn't find a
1120 	 *    dip associated with that dev_t.
1121 	 *
1122 	 *    the only types of devices that have dev_t without an
1123 	 *    associated dip are unbound DLPIv2 network devices.  These
1124 	 *    types of devices exist to be able to attach a stream to any
1125 	 *    instance of a hardware network device.  since these types of
1126 	 *    devices are usually hardware devices they should never
1127 	 *    really have other devices open.
1128 	 */
1129 	if (dev != DDI_DEV_T_NONE)
1130 		major = getmajor(dev);
1131 	else
1132 		major = lip->li_major;
1133 
1134 	ASSERT3U(major, <, devcnt);
1135 
1136 	dnp = &devnamesp[major];
1137 	LOCK_DEV_OPS(&dnp->dn_lock);
1138 	dip = dnp->dn_head;
1139 	while ((dip) && (ret == LDI_USAGE_CONTINUE)) {
1140 		e_ddi_hold_devi(dip);
1141 		UNLOCK_DEV_OPS(&dnp->dn_lock);
1142 
1143 		/* set the source dip */
1144 		ldi_usage.src_dip = dip;
1145 
1146 		/* invoke the callback function */
1147 		ret = callback(&ldi_usage, arg);
1148 
1149 		LOCK_DEV_OPS(&dnp->dn_lock);
1150 		ddi_release_devi(dip);
1151 		dip = ddi_get_next(dip);
1152 	}
1153 	UNLOCK_DEV_OPS(&dnp->dn_lock);
1154 
1155 	/* if there was a target dip, release it */
1156 	if (ldi_usage.tgt_dip != NULL)
1157 		ddi_release_devi(ldi_usage.tgt_dip);
1158 
1159 	return (ret);
1160 }
1161 
1162 /*
1163  * ldi_usage_walker() - this walker reports LDI kernel device usage
1164  * information via the callback() callback function.  the LDI keeps track
1165  * of what devices are being accessed in its own internal data structures.
1166  * this function walks those data structures to determine device usage.
1167  */
1168 void
ldi_usage_walker(void * arg,int (* callback)(const ldi_usage_t *,void *))1169 ldi_usage_walker(void *arg, int (*callback)(const ldi_usage_t *, void *))
1170 {
1171 	struct ldi_handle	*lhp;
1172 	struct ldi_ident	*lip;
1173 	vnode_t			*vp;
1174 	int			i;
1175 	int			ret = LDI_USAGE_CONTINUE;
1176 
1177 	for (i = 0; i < LH_HASH_SZ; i++) {
1178 		mutex_enter(&ldi_handle_hash_lock[i]);
1179 
1180 		lhp = ldi_handle_hash[i];
1181 		while ((lhp != NULL) && (ret == LDI_USAGE_CONTINUE)) {
1182 			lip = lhp->lh_ident;
1183 			vp = lhp->lh_vp;
1184 
1185 			/* invoke the devinfo callback function */
1186 			ret = ldi_usage_walker_helper(lip, vp, arg, callback);
1187 
1188 			lhp = lhp->lh_next;
1189 		}
1190 		mutex_exit(&ldi_handle_hash_lock[i]);
1191 
1192 		if (ret != LDI_USAGE_CONTINUE)
1193 			break;
1194 	}
1195 }
1196 
1197 /*
1198  * LDI Project private interfaces (streams linking interfaces)
1199  *
1200  * Streams supports a type of built in device layering via linking.
1201  * Certain types of streams drivers can be streams multiplexors.
1202  * A streams multiplexor supports the I_LINK/I_PLINK operation.
1203  * These operations allows other streams devices to be linked under the
1204  * multiplexor.  By definition all streams multiplexors are devices
1205  * so this linking is a type of device layering where the multiplexor
1206  * device is layered on top of the device linked below it.
1207  */
1208 
1209 /*
1210  * ldi_mlink_lh() is invoked when streams are linked using LDI handles.
1211  * It is not used for normal I_LINKs and I_PLINKs using file descriptors.
1212  *
1213  * The streams framework keeps track of links via the file_t of the lower
1214  * stream.  The LDI keeps track of devices using a vnode.  In the case
1215  * of a streams link created via an LDI handle, fnk_lh() allocates
1216  * a file_t that the streams framework can use to track the linkage.
1217  */
1218 int
ldi_mlink_lh(vnode_t * vp,int cmd,intptr_t arg,cred_t * crp,int * rvalp)1219 ldi_mlink_lh(vnode_t *vp, int cmd, intptr_t arg, cred_t *crp, int *rvalp)
1220 {
1221 	struct ldi_handle	*lhp = (struct ldi_handle *)arg;
1222 	vnode_t			*vpdown;
1223 	file_t			*fpdown;
1224 	int			err;
1225 
1226 	if (lhp == NULL)
1227 		return (EINVAL);
1228 
1229 	vpdown = lhp->lh_vp;
1230 	ASSERT(vn_matchops(vpdown, spec_getvnodeops()));
1231 	ASSERT(cmd == _I_PLINK_LH);
1232 
1233 	/*
1234 	 * create a new lower vnode and a file_t that points to it,
1235 	 * streams linking requires a file_t.  falloc() returns with
1236 	 * fpdown locked.
1237 	 */
1238 	VN_HOLD(vpdown);
1239 	(void) falloc(vpdown, FREAD|FWRITE, &fpdown, NULL);
1240 	mutex_exit(&fpdown->f_tlock);
1241 
1242 	/* try to establish the link */
1243 	err = mlink_file(vp, I_PLINK, fpdown, crp, rvalp, 1);
1244 
1245 	if (err != 0) {
1246 		/* the link failed, free the file_t and release the vnode */
1247 		mutex_enter(&fpdown->f_tlock);
1248 		unfalloc(fpdown);
1249 		VN_RELE(vpdown);
1250 	}
1251 
1252 	return (err);
1253 }
1254 
1255 /*
1256  * ldi_mlink_fp() is invoked for all successful streams linkages created
1257  * via I_LINK and I_PLINK.  ldi_mlink_fp() records the linkage information
1258  * in its internal state so that the devinfo snapshot code has some
1259  * observability into streams device linkage information.
1260  */
1261 int
ldi_mlink_fp(struct stdata * stp,file_t * fpdown,int lhlink,int type)1262 ldi_mlink_fp(struct stdata *stp, file_t *fpdown, int lhlink, int type)
1263 {
1264 	vnode_t			*vp = fpdown->f_vnode;
1265 	struct snode		*sp, *csp;
1266 	ldi_ident_t		li;
1267 	major_t			major;
1268 	int			ret;
1269 
1270 	/*
1271 	 * If the lower stream is not a device then return but claim to have
1272 	 * succeeded, which matches our historical behaviour of just not
1273 	 * setting up LDI in this case.
1274 	 */
1275 	if (!vn_matchops(vp, spec_getvnodeops()))
1276 		return (0);
1277 
1278 	ASSERT(!servicing_interrupt());
1279 
1280 	LDI_STREAMS_LNK((CE_NOTE, "%s: linking streams "
1281 	    "stp=0x%p, fpdown=0x%p", "ldi_mlink_fp",
1282 	    (void *)stp, (void *)fpdown));
1283 
1284 	sp = VTOS(vp);
1285 	csp = VTOS(sp->s_commonvp);
1286 
1287 	/* get a layered ident for the upper stream */
1288 	if (type == LINKNORMAL) {
1289 		/*
1290 		 * if the link is not persistant then we can associate
1291 		 * the upper stream with a dev_t.  this is because the
1292 		 * upper stream is associated with a vnode, which is
1293 		 * associated with a dev_t and this binding can't change
1294 		 * during the life of the stream.  since the link isn't
1295 		 * persistant once the stream is destroyed the link is
1296 		 * destroyed.  so the dev_t will be valid for the life
1297 		 * of the link.
1298 		 */
1299 		ret = ldi_ident_from_stream(getendq(stp->sd_wrq), &li);
1300 	} else {
1301 		/*
1302 		 * if the link is persistant we can only associate the
1303 		 * link with a driver (and not a dev_t.)  this is
1304 		 * because subsequent opens of the upper device may result
1305 		 * in a different stream (and dev_t) having access to
1306 		 * the lower stream.
1307 		 *
1308 		 * for example, if the upper stream is closed after the
1309 		 * persistant link operation is completed, a subsequent
1310 		 * open of the upper device will create a new stream which
1311 		 * may have a different dev_t and an unlink operation
1312 		 * can be performed using this new upper stream.
1313 		 */
1314 		VERIFY3S(type, ==, LINKPERSIST);
1315 		major = getmajor(stp->sd_vnode->v_rdev);
1316 		ret = ldi_ident_from_major(major, &li);
1317 	}
1318 
1319 	if (ret != 0)
1320 		return (ret);
1321 
1322 	/* check if this was a plink via a layered handle */
1323 	if (lhlink) {
1324 		/*
1325 		 * increment the common snode s_count.
1326 		 *
1327 		 * this is done because after the link operation there
1328 		 * are two ways that s_count can be decremented.
1329 		 *
1330 		 * when the layered handle used to create the link is
1331 		 * closed, spec_close() is called and it will decrement
1332 		 * s_count in the common snode.  if we don't increment
1333 		 * s_count here then this could cause spec_close() to
1334 		 * actually close the device while it's still linked
1335 		 * under a multiplexer.
1336 		 *
1337 		 * also, when the lower stream is unlinked, closef() is
1338 		 * called for the file_t associated with this snode.
1339 		 * closef() will call spec_close(), which will decrement
1340 		 * s_count.  if we dont't increment s_count here then this
1341 		 * could cause spec_close() to actually close the device
1342 		 * while there may still be valid layered handles
1343 		 * pointing to it.
1344 		 */
1345 		VERIFY3S(type, ==, LINKPERSIST);
1346 
1347 		mutex_enter(&csp->s_lock);
1348 		VERIFY(csp->s_count >= 1);
1349 		csp->s_count++;
1350 		mutex_exit(&csp->s_lock);
1351 
1352 		/*
1353 		 * decrement the f_count.
1354 		 * this is done because the layered driver framework does
1355 		 * not actually cache a copy of the file_t allocated to
1356 		 * do the link.  this is done here instead of in ldi_mlink_lh()
1357 		 * because there is a window in ldi_mlink_lh() between where
1358 		 * milnk_file() returns and we would decrement the f_count
1359 		 * when the stream could be unlinked.
1360 		 */
1361 		mutex_enter(&fpdown->f_tlock);
1362 		fpdown->f_count--;
1363 		mutex_exit(&fpdown->f_tlock);
1364 	}
1365 
1366 	/*
1367 	 * NOTE: here we rely on the streams subsystem not allowing
1368 	 * a stream to be multiplexed more than once.  if this
1369 	 * changes, we break.
1370 	 *
1371 	 * mark the snode/stream as multiplexed
1372 	 */
1373 	mutex_enter(&sp->s_lock);
1374 	VERIFY(!(sp->s_flag & SMUXED));
1375 	sp->s_flag |= SMUXED;
1376 	mutex_exit(&sp->s_lock);
1377 
1378 	(void) handle_alloc(vp, (struct ldi_ident *)li);
1379 	ldi_ident_release(li);
1380 
1381 	return (0);
1382 }
1383 
1384 int
ldi_munlink_fp(struct stdata * stp,file_t * fpdown,int type)1385 ldi_munlink_fp(struct stdata *stp, file_t *fpdown, int type)
1386 {
1387 	struct ldi_handle	*lhp;
1388 	vnode_t			*vp = (vnode_t *)fpdown->f_vnode;
1389 	struct snode		*sp;
1390 	ldi_ident_t		li;
1391 	major_t			major;
1392 	int			ret;
1393 
1394 	/*
1395 	 * If the lower stream is not a device then return but claim to have
1396 	 * succeeded, which matches our historical behaviour of just not
1397 	 * setting up LDI in this case.
1398 	 */
1399 	if (!vn_matchops(vp, spec_getvnodeops()))
1400 		return (0);
1401 
1402 	ASSERT(!servicing_interrupt());
1403 
1404 	LDI_STREAMS_LNK((CE_NOTE, "%s: unlinking streams "
1405 	    "stp=0x%p, fpdown=0x%p", "ldi_munlink_fp",
1406 	    (void *)stp, (void *)fpdown));
1407 
1408 	/*
1409 	 * clear the owner for this snode
1410 	 * see the comment in ldi_mlink_fp() for information about how
1411 	 * the ident is allocated
1412 	 */
1413 	if (type == LINKNORMAL) {
1414 		ret = ldi_ident_from_stream(getendq(stp->sd_wrq), &li);
1415 	} else {
1416 		VERIFY3S(type, ==, LINKPERSIST);
1417 		major = getmajor(stp->sd_vnode->v_rdev);
1418 		ret = ldi_ident_from_major(major, &li);
1419 	}
1420 
1421 	if (ret != 0)
1422 		return (ret);
1423 
1424 	/*
1425 	 * NOTE: here we rely on the streams subsystem not allowing
1426 	 * a stream to be multiplexed more than once.  if this
1427 	 * changes, we break.
1428 	 *
1429 	 * mark the snode/stream as not multiplexed
1430 	 */
1431 	sp = VTOS(vp);
1432 	mutex_enter(&sp->s_lock);
1433 	VERIFY(sp->s_flag & SMUXED);
1434 	sp->s_flag &= ~SMUXED;
1435 	mutex_exit(&sp->s_lock);
1436 
1437 	lhp = handle_find(vp, (struct ldi_ident *)li);
1438 	handle_release(lhp);
1439 	ldi_ident_release(li);
1440 
1441 	return (0);
1442 }
1443 
1444 /*
1445  * LDI Consolidation private interfaces
1446  */
1447 int
ldi_ident_from_mod(struct modlinkage * modlp,ldi_ident_t * lip)1448 ldi_ident_from_mod(struct modlinkage *modlp, ldi_ident_t *lip)
1449 {
1450 	struct modctl		*modp;
1451 	major_t			major;
1452 	char			*name;
1453 
1454 	if ((modlp == NULL) || (lip == NULL))
1455 		return (EINVAL);
1456 
1457 	ASSERT(!servicing_interrupt());
1458 
1459 	modp = mod_getctl(modlp);
1460 	if (modp == NULL)
1461 		return (EINVAL);
1462 	name = modp->mod_modname;
1463 	if (name == NULL)
1464 		return (EINVAL);
1465 	major = mod_name_to_major(name);
1466 
1467 	*lip = (ldi_ident_t)ident_alloc(name, NULL, DDI_DEV_T_NONE, major);
1468 
1469 	LDI_ALLOCFREE((CE_WARN, "%s: li=0x%p, mod=%s",
1470 	    "ldi_ident_from_mod", (void *)*lip, name));
1471 
1472 	return (0);
1473 }
1474 
1475 ldi_ident_t
ldi_ident_from_anon()1476 ldi_ident_from_anon()
1477 {
1478 	ldi_ident_t	lip;
1479 
1480 	ASSERT(!servicing_interrupt());
1481 
1482 	lip = (ldi_ident_t)ident_alloc("genunix", NULL, DDI_DEV_T_NONE, -1);
1483 
1484 	LDI_ALLOCFREE((CE_WARN, "%s: li=0x%p, mod=%s",
1485 	    "ldi_ident_from_anon", (void *)lip, "genunix"));
1486 
1487 	return (lip);
1488 }
1489 
1490 
1491 /*
1492  * LDI Public interfaces
1493  */
1494 int
ldi_ident_from_stream(struct queue * sq,ldi_ident_t * lip)1495 ldi_ident_from_stream(struct queue *sq, ldi_ident_t *lip)
1496 {
1497 	struct stdata		*stp;
1498 	dev_t			dev;
1499 	char			*name;
1500 
1501 	if ((sq == NULL) || (lip == NULL))
1502 		return (EINVAL);
1503 
1504 	ASSERT(!servicing_interrupt());
1505 
1506 	stp = sq->q_stream;
1507 	if (!vn_matchops(stp->sd_vnode, spec_getvnodeops()))
1508 		return (EINVAL);
1509 
1510 	dev = stp->sd_vnode->v_rdev;
1511 	name = mod_major_to_name(getmajor(dev));
1512 	if (name == NULL)
1513 		return (EINVAL);
1514 	*lip = (ldi_ident_t)ident_alloc(name, NULL, dev, -1);
1515 
1516 	LDI_ALLOCFREE((CE_WARN,
1517 	    "%s: li=0x%p, mod=%s, minor=0x%x, stp=0x%p",
1518 	    "ldi_ident_from_stream", (void *)*lip, name, getminor(dev),
1519 	    (void *)stp));
1520 
1521 	return (0);
1522 }
1523 
1524 int
ldi_ident_from_dev(dev_t dev,ldi_ident_t * lip)1525 ldi_ident_from_dev(dev_t dev, ldi_ident_t *lip)
1526 {
1527 	char			*name;
1528 
1529 	if (lip == NULL)
1530 		return (EINVAL);
1531 
1532 	ASSERT(!servicing_interrupt());
1533 
1534 	name = mod_major_to_name(getmajor(dev));
1535 	if (name == NULL)
1536 		return (EINVAL);
1537 	*lip = (ldi_ident_t)ident_alloc(name, NULL, dev, -1);
1538 
1539 	LDI_ALLOCFREE((CE_WARN,
1540 	    "%s: li=0x%p, mod=%s, minor=0x%x",
1541 	    "ldi_ident_from_dev", (void *)*lip, name, getminor(dev)));
1542 
1543 	return (0);
1544 }
1545 
1546 int
ldi_ident_from_dip(dev_info_t * dip,ldi_ident_t * lip)1547 ldi_ident_from_dip(dev_info_t *dip, ldi_ident_t *lip)
1548 {
1549 	struct dev_info		*devi = (struct dev_info *)dip;
1550 	char			*name;
1551 
1552 	if ((dip == NULL) || (lip == NULL))
1553 		return (EINVAL);
1554 
1555 	ASSERT(!servicing_interrupt());
1556 
1557 	name = mod_major_to_name(devi->devi_major);
1558 	if (name == NULL)
1559 		return (EINVAL);
1560 	*lip = (ldi_ident_t)ident_alloc(name, dip, DDI_DEV_T_NONE, -1);
1561 
1562 	LDI_ALLOCFREE((CE_WARN,
1563 	    "%s: li=0x%p, mod=%s, dip=0x%p",
1564 	    "ldi_ident_from_dip", (void *)*lip, name, (void *)devi));
1565 
1566 	return (0);
1567 }
1568 
1569 int
ldi_ident_from_major(major_t major,ldi_ident_t * lip)1570 ldi_ident_from_major(major_t major, ldi_ident_t *lip)
1571 {
1572 	char			*name;
1573 
1574 	if (lip == NULL)
1575 		return (EINVAL);
1576 
1577 	ASSERT(!servicing_interrupt());
1578 
1579 	name = mod_major_to_name(major);
1580 	if (name == NULL)
1581 		return (EINVAL);
1582 	*lip = (ldi_ident_t)ident_alloc(name, NULL, DDI_DEV_T_NONE, major);
1583 
1584 	LDI_ALLOCFREE((CE_WARN,
1585 	    "%s: li=0x%p, mod=%s",
1586 	    "ldi_ident_from_major", (void *)*lip, name));
1587 
1588 	return (0);
1589 }
1590 
1591 void
ldi_ident_release(ldi_ident_t li)1592 ldi_ident_release(ldi_ident_t li)
1593 {
1594 	struct ldi_ident	*ident = (struct ldi_ident *)li;
1595 	char			*name;
1596 
1597 	if (li == NULL)
1598 		return;
1599 
1600 	ASSERT(!servicing_interrupt());
1601 
1602 	name = ident->li_modname;
1603 
1604 	LDI_ALLOCFREE((CE_WARN,
1605 	    "%s: li=0x%p, mod=%s",
1606 	    "ldi_ident_release", (void *)li, name));
1607 
1608 	ident_release((struct ldi_ident *)li);
1609 }
1610 
1611 /* get a handle to a device by dev_t and otyp */
1612 int
ldi_open_by_dev(dev_t * devp,int otyp,int flag,cred_t * cr,ldi_handle_t * lhp,ldi_ident_t li)1613 ldi_open_by_dev(dev_t *devp, int otyp, int flag, cred_t *cr,
1614     ldi_handle_t *lhp, ldi_ident_t li)
1615 {
1616 	struct ldi_ident	*lip = (struct ldi_ident *)li;
1617 	int			ret;
1618 	vnode_t			*vp;
1619 
1620 	/* sanity check required input parameters */
1621 	if ((devp == NULL) || (!OTYP_VALID(otyp)) || (cr == NULL) ||
1622 	    (lhp == NULL) || (lip == NULL))
1623 		return (EINVAL);
1624 
1625 	ASSERT(!servicing_interrupt());
1626 
1627 	if ((ret = ldi_vp_from_dev(*devp, otyp, &vp)) != 0)
1628 		return (ret);
1629 
1630 	if ((ret = ldi_open_by_vp(&vp, flag, cr, lhp, lip)) == 0) {
1631 		*devp = vp->v_rdev;
1632 	}
1633 	VN_RELE(vp);
1634 
1635 	return (ret);
1636 }
1637 
1638 /* get a handle to a device by pathname */
1639 int
ldi_open_by_name(const char * pathname,int flag,cred_t * cr,ldi_handle_t * lhp,ldi_ident_t li)1640 ldi_open_by_name(const char *pathname, int flag, cred_t *cr,
1641     ldi_handle_t *lhp, ldi_ident_t li)
1642 {
1643 	struct ldi_ident	*lip = (struct ldi_ident *)li;
1644 	int			ret;
1645 	vnode_t			*vp;
1646 
1647 	/* sanity check required input parameters */
1648 	if ((pathname == NULL) || (*pathname != '/') ||
1649 	    (cr == NULL) || (lhp == NULL) || (lip == NULL))
1650 		return (EINVAL);
1651 
1652 	ASSERT(!servicing_interrupt());
1653 
1654 	if ((ret = ldi_vp_from_name(pathname, &vp)) != 0)
1655 		return (ret);
1656 
1657 	ret = ldi_open_by_vp(&vp, flag, cr, lhp, lip);
1658 	VN_RELE(vp);
1659 
1660 	return (ret);
1661 }
1662 
1663 /* get a handle to a device by devid and minor_name */
1664 int
ldi_open_by_devid(ddi_devid_t devid,const char * minor_name,int flag,cred_t * cr,ldi_handle_t * lhp,ldi_ident_t li)1665 ldi_open_by_devid(ddi_devid_t devid, const char *minor_name,
1666     int flag, cred_t *cr, ldi_handle_t *lhp, ldi_ident_t li)
1667 {
1668 	struct ldi_ident	*lip = (struct ldi_ident *)li;
1669 	int			ret;
1670 	vnode_t			*vp;
1671 
1672 	/* sanity check required input parameters */
1673 	if ((minor_name == NULL) || (cr == NULL) ||
1674 	    (lhp == NULL) || (lip == NULL))
1675 		return (EINVAL);
1676 
1677 	ASSERT(!servicing_interrupt());
1678 
1679 	if ((ret = ldi_vp_from_devid(devid, minor_name, &vp)) != 0)
1680 		return (ret);
1681 
1682 	ret = ldi_open_by_vp(&vp, flag, cr, lhp, lip);
1683 	VN_RELE(vp);
1684 
1685 	return (ret);
1686 }
1687 
1688 int
ldi_close(ldi_handle_t lh,int flag,cred_t * cr)1689 ldi_close(ldi_handle_t lh, int flag, cred_t *cr)
1690 {
1691 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
1692 	struct ldi_event	*lep;
1693 	int			err = 0;
1694 	int			notify = 0;
1695 	list_t			*listp;
1696 	ldi_ev_callback_impl_t	*lecp;
1697 
1698 	if (lh == NULL)
1699 		return (EINVAL);
1700 
1701 	ASSERT(!servicing_interrupt());
1702 
1703 #ifdef	LDI_OBSOLETE_EVENT
1704 
1705 	/*
1706 	 * Any event handlers should have been unregistered by the
1707 	 * time ldi_close() is called.  If they haven't then it's a
1708 	 * bug.
1709 	 *
1710 	 * In a debug kernel we'll panic to make the problem obvious.
1711 	 */
1712 	ASSERT(handlep->lh_events == NULL);
1713 
1714 	/*
1715 	 * On a production kernel we'll "do the right thing" (unregister
1716 	 * the event handlers) and then complain about having to do the
1717 	 * work ourselves.
1718 	 */
1719 	while ((lep = handlep->lh_events) != NULL) {
1720 		err = 1;
1721 		(void) ldi_remove_event_handler(lh, (ldi_callback_id_t)lep);
1722 	}
1723 	if (err) {
1724 		struct ldi_ident *lip = handlep->lh_ident;
1725 		ASSERT(lip != NULL);
1726 		cmn_err(CE_NOTE, "ldi err: %s "
1727 		    "failed to unregister layered event handlers before "
1728 		    "closing devices", lip->li_modname);
1729 	}
1730 #endif
1731 
1732 	/* do a layered close on the device */
1733 	err = VOP_CLOSE(handlep->lh_vp, flag | FKLYR, 1, (offset_t)0, cr, NULL);
1734 
1735 	LDI_OPENCLOSE((CE_WARN, "%s: lh=0x%p", "ldi close", (void *)lh));
1736 
1737 	/*
1738 	 * Search the event callback list for callbacks with this
1739 	 * handle. There are 2 cases
1740 	 * 1. Called in the context of a notify. The handle consumer
1741 	 *    is releasing its hold on the device to allow a reconfiguration
1742 	 *    of the device. Simply NULL out the handle and the notify callback.
1743 	 *    The finalize callback is still available so that the consumer
1744 	 *    knows of the final disposition of the device.
1745 	 * 2. Not called in the context of notify. NULL out the handle as well
1746 	 *    as the notify and finalize callbacks. Since the consumer has
1747 	 *    closed the handle, we assume it is not interested in the
1748 	 *    notify and finalize callbacks.
1749 	 */
1750 	ldi_ev_lock();
1751 
1752 	if (handlep->lh_flags & LH_FLAGS_NOTIFY)
1753 		notify = 1;
1754 	listp = &ldi_ev_callback_list.le_head;
1755 	for (lecp = list_head(listp); lecp; lecp = list_next(listp, lecp)) {
1756 		if (lecp->lec_lhp != handlep)
1757 			continue;
1758 		lecp->lec_lhp = NULL;
1759 		lecp->lec_notify = NULL;
1760 		LDI_EVDBG((CE_NOTE, "ldi_close: NULLed lh and notify"));
1761 		if (!notify) {
1762 			LDI_EVDBG((CE_NOTE, "ldi_close: NULLed finalize"));
1763 			lecp->lec_finalize = NULL;
1764 		}
1765 	}
1766 
1767 	if (notify)
1768 		handlep->lh_flags &= ~LH_FLAGS_NOTIFY;
1769 	ldi_ev_unlock();
1770 
1771 	/*
1772 	 * Free the handle even if the device close failed.  why?
1773 	 *
1774 	 * If the device close failed we can't really make assumptions
1775 	 * about the devices state so we shouldn't allow access to the
1776 	 * device via this handle any more.  If the device consumer wants
1777 	 * to access the device again they should open it again.
1778 	 *
1779 	 * This is the same way file/device close failures are handled
1780 	 * in other places like spec_close() and closeandsetf().
1781 	 */
1782 	handle_release(handlep);
1783 	return (err);
1784 }
1785 
1786 int
ldi_read(ldi_handle_t lh,struct uio * uiop,cred_t * credp)1787 ldi_read(ldi_handle_t lh, struct uio *uiop, cred_t *credp)
1788 {
1789 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
1790 	vnode_t			*vp;
1791 	dev_t			dev;
1792 	int			ret;
1793 
1794 	if (lh == NULL)
1795 		return (EINVAL);
1796 
1797 	vp = handlep->lh_vp;
1798 	dev = vp->v_rdev;
1799 	if (handlep->lh_type & LH_CBDEV) {
1800 		ret = cdev_read(dev, uiop, credp);
1801 	} else if (handlep->lh_type & LH_STREAM) {
1802 		ret = strread(vp, uiop, credp);
1803 	} else {
1804 		return (ENOTSUP);
1805 	}
1806 	return (ret);
1807 }
1808 
1809 int
ldi_write(ldi_handle_t lh,struct uio * uiop,cred_t * credp)1810 ldi_write(ldi_handle_t lh, struct uio *uiop, cred_t *credp)
1811 {
1812 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
1813 	vnode_t			*vp;
1814 	dev_t			dev;
1815 	int			ret;
1816 
1817 	if (lh == NULL)
1818 		return (EINVAL);
1819 
1820 	vp = handlep->lh_vp;
1821 	dev = vp->v_rdev;
1822 	if (handlep->lh_type & LH_CBDEV) {
1823 		ret = cdev_write(dev, uiop, credp);
1824 	} else if (handlep->lh_type & LH_STREAM) {
1825 		ret = strwrite(vp, uiop, credp);
1826 	} else {
1827 		return (ENOTSUP);
1828 	}
1829 	return (ret);
1830 }
1831 
1832 int
ldi_get_size(ldi_handle_t lh,uint64_t * sizep)1833 ldi_get_size(ldi_handle_t lh, uint64_t *sizep)
1834 {
1835 	int			otyp;
1836 	uint_t			value;
1837 	int64_t			drv_prop64;
1838 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
1839 	uint_t			blksize;
1840 	int			blkshift;
1841 
1842 
1843 	if ((lh == NULL) || (sizep == NULL))
1844 		return (DDI_FAILURE);
1845 
1846 	if (handlep->lh_type & LH_STREAM)
1847 		return (DDI_FAILURE);
1848 
1849 	/*
1850 	 * Determine device type (char or block).
1851 	 * Character devices support Size/size
1852 	 * property value. Block devices may support
1853 	 * Nblocks/nblocks or Size/size property value.
1854 	 */
1855 	if ((ldi_get_otyp(lh, &otyp)) != 0)
1856 		return (DDI_FAILURE);
1857 
1858 	if (otyp == OTYP_BLK) {
1859 		if (ldi_prop_exists(lh,
1860 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "Nblocks")) {
1861 
1862 			drv_prop64 = ldi_prop_get_int64(lh,
1863 			    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1864 			    "Nblocks", 0);
1865 			blksize = ldi_prop_get_int(lh,
1866 			    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1867 			    "blksize", DEV_BSIZE);
1868 			if (blksize == DEV_BSIZE)
1869 				blksize = ldi_prop_get_int(lh, LDI_DEV_T_ANY |
1870 				    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1871 				    "device-blksize", DEV_BSIZE);
1872 
1873 			/* blksize must be a power of two */
1874 			ASSERT(BIT_ONLYONESET(blksize));
1875 			blkshift = highbit(blksize) - 1;
1876 
1877 			/*
1878 			 * We don't support Nblocks values that don't have
1879 			 * an accurate uint64_t byte count representation.
1880 			 */
1881 			if ((uint64_t)drv_prop64 >= (UINT64_MAX >> blkshift))
1882 				return (DDI_FAILURE);
1883 
1884 			*sizep = (uint64_t)
1885 			    (((u_offset_t)drv_prop64) << blkshift);
1886 			return (DDI_SUCCESS);
1887 		}
1888 
1889 		if (ldi_prop_exists(lh,
1890 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "nblocks")) {
1891 
1892 			value = ldi_prop_get_int(lh,
1893 			    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1894 			    "nblocks", 0);
1895 			blksize = ldi_prop_get_int(lh,
1896 			    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1897 			    "blksize", DEV_BSIZE);
1898 			if (blksize == DEV_BSIZE)
1899 				blksize = ldi_prop_get_int(lh, LDI_DEV_T_ANY |
1900 				    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1901 				    "device-blksize", DEV_BSIZE);
1902 
1903 			/* blksize must be a power of two */
1904 			ASSERT(BIT_ONLYONESET(blksize));
1905 			blkshift = highbit(blksize) - 1;
1906 
1907 			/*
1908 			 * We don't support nblocks values that don't have an
1909 			 * accurate uint64_t byte count representation.
1910 			 */
1911 			if ((uint64_t)value >= (UINT64_MAX >> blkshift))
1912 				return (DDI_FAILURE);
1913 
1914 			*sizep = (uint64_t)
1915 			    (((u_offset_t)value) << blkshift);
1916 			return (DDI_SUCCESS);
1917 		}
1918 	}
1919 
1920 	if (ldi_prop_exists(lh,
1921 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "Size")) {
1922 
1923 		drv_prop64 = ldi_prop_get_int64(lh,
1924 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "Size", 0);
1925 		*sizep = (uint64_t)drv_prop64;
1926 		return (DDI_SUCCESS);
1927 	}
1928 
1929 	if (ldi_prop_exists(lh,
1930 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "size")) {
1931 
1932 		value = ldi_prop_get_int(lh,
1933 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "size", 0);
1934 		*sizep = (uint64_t)value;
1935 		return (DDI_SUCCESS);
1936 	}
1937 
1938 	/* unable to determine device size */
1939 	return (DDI_FAILURE);
1940 }
1941 
1942 int
ldi_ioctl(ldi_handle_t lh,int cmd,intptr_t arg,int mode,cred_t * cr,int * rvalp)1943 ldi_ioctl(ldi_handle_t lh, int cmd, intptr_t arg, int mode,
1944     cred_t *cr, int *rvalp)
1945 {
1946 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
1947 	vnode_t			*vp;
1948 	dev_t			dev;
1949 	int			ret, copymode, unused;
1950 
1951 	if (lh == NULL)
1952 		return (EINVAL);
1953 
1954 	/*
1955 	 * if the data pointed to by arg is located in the kernel then
1956 	 * make sure the FNATIVE flag is set.
1957 	 */
1958 	if (mode & FKIOCTL)
1959 		mode = (mode & ~FMODELS) | FNATIVE | FKIOCTL;
1960 
1961 	/*
1962 	 * Some drivers assume that rvalp will always be non-NULL, so in
1963 	 * an attempt to avoid panics if the caller passed in a NULL
1964 	 * value, update rvalp to point to a temporary variable.
1965 	 */
1966 	if (rvalp == NULL)
1967 		rvalp = &unused;
1968 	vp = handlep->lh_vp;
1969 	dev = vp->v_rdev;
1970 	if (handlep->lh_type & LH_CBDEV) {
1971 		ret = cdev_ioctl(dev, cmd, arg, mode, cr, rvalp);
1972 	} else if (handlep->lh_type & LH_STREAM) {
1973 		copymode = (mode & FKIOCTL) ? K_TO_K : U_TO_K;
1974 
1975 		/*
1976 		 * if we get an I_PLINK from within the kernel the
1977 		 * arg is a layered handle pointer instead of
1978 		 * a file descriptor, so we translate this ioctl
1979 		 * into a private one that can handle this.
1980 		 */
1981 		if ((mode & FKIOCTL) && (cmd == I_PLINK))
1982 			cmd = _I_PLINK_LH;
1983 
1984 		ret = strioctl(vp, cmd, arg, mode, copymode, cr, rvalp);
1985 	} else {
1986 		return (ENOTSUP);
1987 	}
1988 
1989 	return (ret);
1990 }
1991 
1992 int
ldi_poll(ldi_handle_t lh,short events,int anyyet,short * reventsp,struct pollhead ** phpp)1993 ldi_poll(ldi_handle_t lh, short events, int anyyet, short *reventsp,
1994     struct pollhead **phpp)
1995 {
1996 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
1997 	vnode_t			*vp;
1998 	dev_t			dev;
1999 	int			ret;
2000 
2001 	if (lh == NULL)
2002 		return (EINVAL);
2003 
2004 	vp = handlep->lh_vp;
2005 	dev = vp->v_rdev;
2006 	if (handlep->lh_type & LH_CBDEV) {
2007 		ret = cdev_poll(dev, events, anyyet, reventsp, phpp);
2008 	} else if (handlep->lh_type & LH_STREAM) {
2009 		ret = strpoll(vp->v_stream, events, anyyet, reventsp, phpp);
2010 	} else {
2011 		return (ENOTSUP);
2012 	}
2013 
2014 	return (ret);
2015 }
2016 
2017 int
ldi_prop_op(ldi_handle_t lh,ddi_prop_op_t prop_op,int flags,char * name,caddr_t valuep,int * length)2018 ldi_prop_op(ldi_handle_t lh, ddi_prop_op_t prop_op,
2019     int flags, char *name, caddr_t valuep, int *length)
2020 {
2021 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2022 	dev_t			dev;
2023 	dev_info_t		*dip;
2024 	int			ret;
2025 	struct snode		*csp;
2026 
2027 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2028 		return (DDI_PROP_INVAL_ARG);
2029 
2030 	if ((prop_op != PROP_LEN) && (valuep == NULL))
2031 		return (DDI_PROP_INVAL_ARG);
2032 
2033 	if (length == NULL)
2034 		return (DDI_PROP_INVAL_ARG);
2035 
2036 	/*
2037 	 * try to find the associated dip,
2038 	 * this places a hold on the driver
2039 	 */
2040 	dev = handlep->lh_vp->v_rdev;
2041 
2042 	csp = VTOCS(handlep->lh_vp);
2043 	mutex_enter(&csp->s_lock);
2044 	if ((dip = csp->s_dip) != NULL)
2045 		e_ddi_hold_devi(dip);
2046 	mutex_exit(&csp->s_lock);
2047 	if (dip == NULL)
2048 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2049 
2050 	if (dip == NULL)
2051 		return (DDI_PROP_NOT_FOUND);
2052 
2053 	ret = i_ldi_prop_op(dev, dip, prop_op, flags, name, valuep, length);
2054 	ddi_release_devi(dip);
2055 
2056 	return (ret);
2057 }
2058 
2059 int
ldi_strategy(ldi_handle_t lh,struct buf * bp)2060 ldi_strategy(ldi_handle_t lh, struct buf *bp)
2061 {
2062 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2063 	dev_t			dev;
2064 
2065 	if ((lh == NULL) || (bp == NULL))
2066 		return (EINVAL);
2067 
2068 	/* this entry point is only supported for cb devices */
2069 	dev = handlep->lh_vp->v_rdev;
2070 	if (!(handlep->lh_type & LH_CBDEV))
2071 		return (ENOTSUP);
2072 
2073 	bp->b_edev = dev;
2074 	bp->b_dev = cmpdev(dev);
2075 	return (bdev_strategy(bp));
2076 }
2077 
2078 int
ldi_dump(ldi_handle_t lh,caddr_t addr,daddr_t blkno,int nblk)2079 ldi_dump(ldi_handle_t lh, caddr_t addr, daddr_t blkno, int nblk)
2080 {
2081 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2082 	dev_t			dev;
2083 
2084 	if (lh == NULL)
2085 		return (EINVAL);
2086 
2087 	/* this entry point is only supported for cb devices */
2088 	dev = handlep->lh_vp->v_rdev;
2089 	if (!(handlep->lh_type & LH_CBDEV))
2090 		return (ENOTSUP);
2091 
2092 	return (bdev_dump(dev, addr, blkno, nblk));
2093 }
2094 
2095 int
ldi_devmap(ldi_handle_t lh,devmap_cookie_t dhp,offset_t off,size_t len,size_t * maplen,uint_t model)2096 ldi_devmap(ldi_handle_t lh, devmap_cookie_t dhp, offset_t off,
2097     size_t len, size_t *maplen, uint_t model)
2098 {
2099 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2100 	dev_t			dev;
2101 
2102 	if (lh == NULL)
2103 		return (EINVAL);
2104 
2105 	/* this entry point is only supported for cb devices */
2106 	dev = handlep->lh_vp->v_rdev;
2107 	if (!(handlep->lh_type & LH_CBDEV))
2108 		return (ENOTSUP);
2109 
2110 	return (cdev_devmap(dev, dhp, off, len, maplen, model));
2111 }
2112 
2113 int
ldi_aread(ldi_handle_t lh,struct aio_req * aio_reqp,cred_t * cr)2114 ldi_aread(ldi_handle_t lh, struct aio_req *aio_reqp, cred_t *cr)
2115 {
2116 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2117 	dev_t			dev;
2118 	struct cb_ops		*cb;
2119 
2120 	if (lh == NULL)
2121 		return (EINVAL);
2122 
2123 	/* this entry point is only supported for cb devices */
2124 	if (!(handlep->lh_type & LH_CBDEV))
2125 		return (ENOTSUP);
2126 
2127 	/*
2128 	 * Kaio is only supported on block devices.
2129 	 */
2130 	dev = handlep->lh_vp->v_rdev;
2131 	cb = devopsp[getmajor(dev)]->devo_cb_ops;
2132 	if (cb->cb_strategy == nodev || cb->cb_strategy == NULL)
2133 		return (ENOTSUP);
2134 
2135 	if (cb->cb_aread == NULL)
2136 		return (ENOTSUP);
2137 
2138 	return (cb->cb_aread(dev, aio_reqp, cr));
2139 }
2140 
2141 int
ldi_awrite(ldi_handle_t lh,struct aio_req * aio_reqp,cred_t * cr)2142 ldi_awrite(ldi_handle_t lh, struct aio_req *aio_reqp, cred_t *cr)
2143 {
2144 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2145 	struct cb_ops		*cb;
2146 	dev_t			dev;
2147 
2148 	if (lh == NULL)
2149 		return (EINVAL);
2150 
2151 	/* this entry point is only supported for cb devices */
2152 	if (!(handlep->lh_type & LH_CBDEV))
2153 		return (ENOTSUP);
2154 
2155 	/*
2156 	 * Kaio is only supported on block devices.
2157 	 */
2158 	dev = handlep->lh_vp->v_rdev;
2159 	cb = devopsp[getmajor(dev)]->devo_cb_ops;
2160 	if (cb->cb_strategy == nodev || cb->cb_strategy == NULL)
2161 		return (ENOTSUP);
2162 
2163 	if (cb->cb_awrite == NULL)
2164 		return (ENOTSUP);
2165 
2166 	return (cb->cb_awrite(dev, aio_reqp, cr));
2167 }
2168 
2169 int
ldi_putmsg(ldi_handle_t lh,mblk_t * smp)2170 ldi_putmsg(ldi_handle_t lh, mblk_t *smp)
2171 {
2172 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2173 	int			ret;
2174 
2175 	if ((lh == NULL) || (smp == NULL))
2176 		return (EINVAL);
2177 
2178 	if (!(handlep->lh_type & LH_STREAM)) {
2179 		freemsg(smp);
2180 		return (ENOTSUP);
2181 	}
2182 
2183 	/*
2184 	 * If we don't have db_credp, set it. Note that we can not be called
2185 	 * from interrupt context.
2186 	 */
2187 	if (msg_getcred(smp, NULL) == NULL)
2188 		mblk_setcred(smp, CRED(), curproc->p_pid);
2189 
2190 	/* Send message while honoring flow control */
2191 	ret = kstrputmsg(handlep->lh_vp, smp, NULL, 0, 0,
2192 	    MSG_BAND | MSG_HOLDSIG | MSG_IGNERROR, 0);
2193 
2194 	return (ret);
2195 }
2196 
2197 int
ldi_getmsg(ldi_handle_t lh,mblk_t ** rmp,timestruc_t * timeo)2198 ldi_getmsg(ldi_handle_t lh, mblk_t **rmp, timestruc_t *timeo)
2199 {
2200 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2201 	clock_t			timout; /* milliseconds */
2202 	uchar_t			pri;
2203 	rval_t			rval;
2204 	int			ret, pflag;
2205 
2206 
2207 	if (lh == NULL)
2208 		return (EINVAL);
2209 
2210 	if (!(handlep->lh_type & LH_STREAM))
2211 		return (ENOTSUP);
2212 
2213 	/* Convert from nanoseconds to milliseconds */
2214 	if (timeo != NULL) {
2215 		timout = timeo->tv_sec * 1000 + timeo->tv_nsec / 1000000;
2216 		if (timout > INT_MAX)
2217 			return (EINVAL);
2218 	} else
2219 		timout = -1;
2220 
2221 	/* Wait for timeout millseconds for a message */
2222 	pflag = MSG_ANY;
2223 	pri = 0;
2224 	*rmp = NULL;
2225 	ret = kstrgetmsg(handlep->lh_vp,
2226 	    rmp, NULL, &pri, &pflag, timout, &rval);
2227 	return (ret);
2228 }
2229 
2230 int
ldi_get_dev(ldi_handle_t lh,dev_t * devp)2231 ldi_get_dev(ldi_handle_t lh, dev_t *devp)
2232 {
2233 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2234 
2235 	if ((lh == NULL) || (devp == NULL))
2236 		return (EINVAL);
2237 
2238 	*devp = handlep->lh_vp->v_rdev;
2239 	return (0);
2240 }
2241 
2242 int
ldi_get_otyp(ldi_handle_t lh,int * otyp)2243 ldi_get_otyp(ldi_handle_t lh, int *otyp)
2244 {
2245 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2246 
2247 	if ((lh == NULL) || (otyp == NULL))
2248 		return (EINVAL);
2249 
2250 	*otyp = VTYP_TO_OTYP(handlep->lh_vp->v_type);
2251 	return (0);
2252 }
2253 
2254 int
ldi_get_devid(ldi_handle_t lh,ddi_devid_t * devid)2255 ldi_get_devid(ldi_handle_t lh, ddi_devid_t *devid)
2256 {
2257 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2258 	int			ret;
2259 	dev_t			dev;
2260 
2261 	if ((lh == NULL) || (devid == NULL))
2262 		return (EINVAL);
2263 
2264 	dev = handlep->lh_vp->v_rdev;
2265 
2266 	ret = ddi_lyr_get_devid(dev, devid);
2267 	if (ret != DDI_SUCCESS)
2268 		return (ENOTSUP);
2269 
2270 	return (0);
2271 }
2272 
2273 int
ldi_get_minor_name(ldi_handle_t lh,char ** minor_name)2274 ldi_get_minor_name(ldi_handle_t lh, char **minor_name)
2275 {
2276 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2277 	int			ret, otyp;
2278 	dev_t			dev;
2279 
2280 	if ((lh == NULL) || (minor_name == NULL))
2281 		return (EINVAL);
2282 
2283 	dev = handlep->lh_vp->v_rdev;
2284 	otyp = VTYP_TO_OTYP(handlep->lh_vp->v_type);
2285 
2286 	ret = ddi_lyr_get_minor_name(dev, OTYP_TO_STYP(otyp), minor_name);
2287 	if (ret != DDI_SUCCESS)
2288 		return (ENOTSUP);
2289 
2290 	return (0);
2291 }
2292 
2293 int
ldi_prop_lookup_int_array(ldi_handle_t lh,uint_t flags,char * name,int ** data,uint_t * nelements)2294 ldi_prop_lookup_int_array(ldi_handle_t lh,
2295     uint_t flags, char *name, int **data, uint_t *nelements)
2296 {
2297 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2298 	dev_info_t		*dip;
2299 	dev_t			dev;
2300 	int			res;
2301 	struct snode		*csp;
2302 
2303 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2304 		return (DDI_PROP_INVAL_ARG);
2305 
2306 	dev = handlep->lh_vp->v_rdev;
2307 
2308 	csp = VTOCS(handlep->lh_vp);
2309 	mutex_enter(&csp->s_lock);
2310 	if ((dip = csp->s_dip) != NULL)
2311 		e_ddi_hold_devi(dip);
2312 	mutex_exit(&csp->s_lock);
2313 	if (dip == NULL)
2314 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2315 
2316 	if (dip == NULL) {
2317 		flags |= DDI_UNBND_DLPI2;
2318 	} else if (flags & LDI_DEV_T_ANY) {
2319 		flags &= ~LDI_DEV_T_ANY;
2320 		dev = DDI_DEV_T_ANY;
2321 	}
2322 
2323 	if (dip != NULL) {
2324 		int *prop_val, prop_len;
2325 
2326 		res = i_ldi_prop_op_typed(dev, dip, flags, name,
2327 		    (caddr_t *)&prop_val, &prop_len, sizeof (int));
2328 
2329 		/* if we got it then return it */
2330 		if (res == DDI_PROP_SUCCESS) {
2331 			*nelements = prop_len / sizeof (int);
2332 			*data = prop_val;
2333 
2334 			ddi_release_devi(dip);
2335 			return (res);
2336 		}
2337 	}
2338 
2339 	/* call the normal property interfaces */
2340 	res = ddi_prop_lookup_int_array(dev, dip, flags,
2341 	    name, data, nelements);
2342 
2343 	if (dip != NULL)
2344 		ddi_release_devi(dip);
2345 
2346 	return (res);
2347 }
2348 
2349 int
ldi_prop_lookup_int64_array(ldi_handle_t lh,uint_t flags,char * name,int64_t ** data,uint_t * nelements)2350 ldi_prop_lookup_int64_array(ldi_handle_t lh,
2351     uint_t flags, char *name, int64_t **data, uint_t *nelements)
2352 {
2353 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2354 	dev_info_t		*dip;
2355 	dev_t			dev;
2356 	int			res;
2357 	struct snode		*csp;
2358 
2359 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2360 		return (DDI_PROP_INVAL_ARG);
2361 
2362 	dev = handlep->lh_vp->v_rdev;
2363 
2364 	csp = VTOCS(handlep->lh_vp);
2365 	mutex_enter(&csp->s_lock);
2366 	if ((dip = csp->s_dip) != NULL)
2367 		e_ddi_hold_devi(dip);
2368 	mutex_exit(&csp->s_lock);
2369 	if (dip == NULL)
2370 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2371 
2372 	if (dip == NULL) {
2373 		flags |= DDI_UNBND_DLPI2;
2374 	} else if (flags & LDI_DEV_T_ANY) {
2375 		flags &= ~LDI_DEV_T_ANY;
2376 		dev = DDI_DEV_T_ANY;
2377 	}
2378 
2379 	if (dip != NULL) {
2380 		int64_t	*prop_val;
2381 		int	prop_len;
2382 
2383 		res = i_ldi_prop_op_typed(dev, dip, flags, name,
2384 		    (caddr_t *)&prop_val, &prop_len, sizeof (int64_t));
2385 
2386 		/* if we got it then return it */
2387 		if (res == DDI_PROP_SUCCESS) {
2388 			*nelements = prop_len / sizeof (int64_t);
2389 			*data = prop_val;
2390 
2391 			ddi_release_devi(dip);
2392 			return (res);
2393 		}
2394 	}
2395 
2396 	/* call the normal property interfaces */
2397 	res = ddi_prop_lookup_int64_array(dev, dip, flags,
2398 	    name, data, nelements);
2399 
2400 	if (dip != NULL)
2401 		ddi_release_devi(dip);
2402 
2403 	return (res);
2404 }
2405 
2406 int
ldi_prop_lookup_string_array(ldi_handle_t lh,uint_t flags,char * name,char *** data,uint_t * nelements)2407 ldi_prop_lookup_string_array(ldi_handle_t lh,
2408     uint_t flags, char *name, char ***data, uint_t *nelements)
2409 {
2410 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2411 	dev_info_t		*dip;
2412 	dev_t			dev;
2413 	int			res;
2414 	struct snode		*csp;
2415 
2416 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2417 		return (DDI_PROP_INVAL_ARG);
2418 
2419 	dev = handlep->lh_vp->v_rdev;
2420 
2421 	csp = VTOCS(handlep->lh_vp);
2422 	mutex_enter(&csp->s_lock);
2423 	if ((dip = csp->s_dip) != NULL)
2424 		e_ddi_hold_devi(dip);
2425 	mutex_exit(&csp->s_lock);
2426 	if (dip == NULL)
2427 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2428 
2429 	if (dip == NULL) {
2430 		flags |= DDI_UNBND_DLPI2;
2431 	} else if (flags & LDI_DEV_T_ANY) {
2432 		flags &= ~LDI_DEV_T_ANY;
2433 		dev = DDI_DEV_T_ANY;
2434 	}
2435 
2436 	if (dip != NULL) {
2437 		char	*prop_val;
2438 		int	prop_len;
2439 
2440 		res = i_ldi_prop_op_typed(dev, dip, flags, name,
2441 		    (caddr_t *)&prop_val, &prop_len, 0);
2442 
2443 		/* if we got it then return it */
2444 		if (res == DDI_PROP_SUCCESS) {
2445 			char	**str_array;
2446 			int	nelem;
2447 
2448 			/*
2449 			 * pack the returned string array into the format
2450 			 * our callers expect
2451 			 */
2452 			if (i_pack_string_array(prop_val, prop_len,
2453 			    &str_array, &nelem) == 0) {
2454 
2455 				*data = str_array;
2456 				*nelements = nelem;
2457 
2458 				ddi_prop_free(prop_val);
2459 				ddi_release_devi(dip);
2460 				return (res);
2461 			}
2462 
2463 			/*
2464 			 * the format of the returned property must have
2465 			 * been bad so throw it out
2466 			 */
2467 			ddi_prop_free(prop_val);
2468 		}
2469 	}
2470 
2471 	/* call the normal property interfaces */
2472 	res = ddi_prop_lookup_string_array(dev, dip, flags,
2473 	    name, data, nelements);
2474 
2475 	if (dip != NULL)
2476 		ddi_release_devi(dip);
2477 
2478 	return (res);
2479 }
2480 
2481 int
ldi_prop_lookup_string(ldi_handle_t lh,uint_t flags,char * name,char ** data)2482 ldi_prop_lookup_string(ldi_handle_t lh,
2483     uint_t flags, char *name, char **data)
2484 {
2485 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2486 	dev_info_t		*dip;
2487 	dev_t			dev;
2488 	int			res;
2489 	struct snode		*csp;
2490 
2491 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2492 		return (DDI_PROP_INVAL_ARG);
2493 
2494 	dev = handlep->lh_vp->v_rdev;
2495 
2496 	csp = VTOCS(handlep->lh_vp);
2497 	mutex_enter(&csp->s_lock);
2498 	if ((dip = csp->s_dip) != NULL)
2499 		e_ddi_hold_devi(dip);
2500 	mutex_exit(&csp->s_lock);
2501 	if (dip == NULL)
2502 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2503 
2504 	if (dip == NULL) {
2505 		flags |= DDI_UNBND_DLPI2;
2506 	} else if (flags & LDI_DEV_T_ANY) {
2507 		flags &= ~LDI_DEV_T_ANY;
2508 		dev = DDI_DEV_T_ANY;
2509 	}
2510 
2511 	if (dip != NULL) {
2512 		char	*prop_val;
2513 		int	prop_len;
2514 
2515 		res = i_ldi_prop_op_typed(dev, dip, flags, name,
2516 		    (caddr_t *)&prop_val, &prop_len, 0);
2517 
2518 		/* if we got it then return it */
2519 		if (res == DDI_PROP_SUCCESS) {
2520 			/*
2521 			 * sanity check the vaule returned.
2522 			 */
2523 			if (i_check_string(prop_val, prop_len)) {
2524 				ddi_prop_free(prop_val);
2525 			} else {
2526 				*data = prop_val;
2527 				ddi_release_devi(dip);
2528 				return (res);
2529 			}
2530 		}
2531 	}
2532 
2533 	/* call the normal property interfaces */
2534 	res = ddi_prop_lookup_string(dev, dip, flags, name, data);
2535 
2536 	if (dip != NULL)
2537 		ddi_release_devi(dip);
2538 
2539 #ifdef DEBUG
2540 	if (res == DDI_PROP_SUCCESS) {
2541 		/*
2542 		 * keep ourselves honest
2543 		 * make sure the framework returns strings in the
2544 		 * same format as we're demanding from drivers.
2545 		 */
2546 		struct prop_driver_data	*pdd;
2547 		int			pdd_prop_size;
2548 
2549 		pdd = ((struct prop_driver_data *)(*data)) - 1;
2550 		pdd_prop_size = pdd->pdd_size -
2551 		    sizeof (struct prop_driver_data);
2552 		ASSERT(i_check_string(*data, pdd_prop_size) == 0);
2553 	}
2554 #endif /* DEBUG */
2555 
2556 	return (res);
2557 }
2558 
2559 int
ldi_prop_lookup_byte_array(ldi_handle_t lh,uint_t flags,char * name,uchar_t ** data,uint_t * nelements)2560 ldi_prop_lookup_byte_array(ldi_handle_t lh,
2561     uint_t flags, char *name, uchar_t **data, uint_t *nelements)
2562 {
2563 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2564 	dev_info_t		*dip;
2565 	dev_t			dev;
2566 	int			res;
2567 	struct snode		*csp;
2568 
2569 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2570 		return (DDI_PROP_INVAL_ARG);
2571 
2572 	dev = handlep->lh_vp->v_rdev;
2573 
2574 	csp = VTOCS(handlep->lh_vp);
2575 	mutex_enter(&csp->s_lock);
2576 	if ((dip = csp->s_dip) != NULL)
2577 		e_ddi_hold_devi(dip);
2578 	mutex_exit(&csp->s_lock);
2579 	if (dip == NULL)
2580 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2581 
2582 	if (dip == NULL) {
2583 		flags |= DDI_UNBND_DLPI2;
2584 	} else if (flags & LDI_DEV_T_ANY) {
2585 		flags &= ~LDI_DEV_T_ANY;
2586 		dev = DDI_DEV_T_ANY;
2587 	}
2588 
2589 	if (dip != NULL) {
2590 		uchar_t	*prop_val;
2591 		int	prop_len;
2592 
2593 		res = i_ldi_prop_op_typed(dev, dip, flags, name,
2594 		    (caddr_t *)&prop_val, &prop_len, sizeof (uchar_t));
2595 
2596 		/* if we got it then return it */
2597 		if (res == DDI_PROP_SUCCESS) {
2598 			*nelements = prop_len / sizeof (uchar_t);
2599 			*data = prop_val;
2600 
2601 			ddi_release_devi(dip);
2602 			return (res);
2603 		}
2604 	}
2605 
2606 	/* call the normal property interfaces */
2607 	res = ddi_prop_lookup_byte_array(dev, dip, flags,
2608 	    name, data, nelements);
2609 
2610 	if (dip != NULL)
2611 		ddi_release_devi(dip);
2612 
2613 	return (res);
2614 }
2615 
2616 int
ldi_prop_get_int(ldi_handle_t lh,uint_t flags,char * name,int defvalue)2617 ldi_prop_get_int(ldi_handle_t lh,
2618     uint_t flags, char *name, int defvalue)
2619 {
2620 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2621 	dev_info_t		*dip;
2622 	dev_t			dev;
2623 	int			res;
2624 	struct snode		*csp;
2625 
2626 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2627 		return (defvalue);
2628 
2629 	dev = handlep->lh_vp->v_rdev;
2630 
2631 	csp = VTOCS(handlep->lh_vp);
2632 	mutex_enter(&csp->s_lock);
2633 	if ((dip = csp->s_dip) != NULL)
2634 		e_ddi_hold_devi(dip);
2635 	mutex_exit(&csp->s_lock);
2636 	if (dip == NULL)
2637 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2638 
2639 	if (dip == NULL) {
2640 		flags |= DDI_UNBND_DLPI2;
2641 	} else if (flags & LDI_DEV_T_ANY) {
2642 		flags &= ~LDI_DEV_T_ANY;
2643 		dev = DDI_DEV_T_ANY;
2644 	}
2645 
2646 	if (dip != NULL) {
2647 		int	prop_val;
2648 		int	prop_len;
2649 
2650 		/*
2651 		 * first call the drivers prop_op interface to allow it
2652 		 * it to override default property values.
2653 		 */
2654 		prop_len = sizeof (int);
2655 		res = i_ldi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
2656 		    flags | DDI_PROP_DYNAMIC, name,
2657 		    (caddr_t)&prop_val, &prop_len);
2658 
2659 		/* if we got it then return it */
2660 		if ((res == DDI_PROP_SUCCESS) &&
2661 		    (prop_len == sizeof (int))) {
2662 			res = prop_val;
2663 			ddi_release_devi(dip);
2664 			return (res);
2665 		}
2666 	}
2667 
2668 	/* call the normal property interfaces */
2669 	res = ddi_prop_get_int(dev, dip, flags, name, defvalue);
2670 
2671 	if (dip != NULL)
2672 		ddi_release_devi(dip);
2673 
2674 	return (res);
2675 }
2676 
2677 int64_t
ldi_prop_get_int64(ldi_handle_t lh,uint_t flags,char * name,int64_t defvalue)2678 ldi_prop_get_int64(ldi_handle_t lh,
2679     uint_t flags, char *name, int64_t defvalue)
2680 {
2681 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2682 	dev_info_t		*dip;
2683 	dev_t			dev;
2684 	int64_t			res;
2685 	struct snode		*csp;
2686 
2687 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2688 		return (defvalue);
2689 
2690 	dev = handlep->lh_vp->v_rdev;
2691 
2692 	csp = VTOCS(handlep->lh_vp);
2693 	mutex_enter(&csp->s_lock);
2694 	if ((dip = csp->s_dip) != NULL)
2695 		e_ddi_hold_devi(dip);
2696 	mutex_exit(&csp->s_lock);
2697 	if (dip == NULL)
2698 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2699 
2700 	if (dip == NULL) {
2701 		flags |= DDI_UNBND_DLPI2;
2702 	} else if (flags & LDI_DEV_T_ANY) {
2703 		flags &= ~LDI_DEV_T_ANY;
2704 		dev = DDI_DEV_T_ANY;
2705 	}
2706 
2707 	if (dip != NULL) {
2708 		int64_t	prop_val;
2709 		int	prop_len;
2710 
2711 		/*
2712 		 * first call the drivers prop_op interface to allow it
2713 		 * it to override default property values.
2714 		 */
2715 		prop_len = sizeof (int64_t);
2716 		res = i_ldi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
2717 		    flags | DDI_PROP_DYNAMIC, name,
2718 		    (caddr_t)&prop_val, &prop_len);
2719 
2720 		/* if we got it then return it */
2721 		if ((res == DDI_PROP_SUCCESS) &&
2722 		    (prop_len == sizeof (int64_t))) {
2723 			res = prop_val;
2724 			ddi_release_devi(dip);
2725 			return (res);
2726 		}
2727 	}
2728 
2729 	/* call the normal property interfaces */
2730 	res = ddi_prop_get_int64(dev, dip, flags, name, defvalue);
2731 
2732 	if (dip != NULL)
2733 		ddi_release_devi(dip);
2734 
2735 	return (res);
2736 }
2737 
2738 int
ldi_prop_exists(ldi_handle_t lh,uint_t flags,char * name)2739 ldi_prop_exists(ldi_handle_t lh, uint_t flags, char *name)
2740 {
2741 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2742 	dev_info_t		*dip;
2743 	dev_t			dev;
2744 	int			res, prop_len;
2745 	struct snode		*csp;
2746 
2747 	if ((lh == NULL) || (name == NULL) || (strlen(name) == 0))
2748 		return (0);
2749 
2750 	dev = handlep->lh_vp->v_rdev;
2751 
2752 	csp = VTOCS(handlep->lh_vp);
2753 	mutex_enter(&csp->s_lock);
2754 	if ((dip = csp->s_dip) != NULL)
2755 		e_ddi_hold_devi(dip);
2756 	mutex_exit(&csp->s_lock);
2757 	if (dip == NULL)
2758 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2759 
2760 	/* if NULL dip, prop does NOT exist */
2761 	if (dip == NULL)
2762 		return (0);
2763 
2764 	if (flags & LDI_DEV_T_ANY) {
2765 		flags &= ~LDI_DEV_T_ANY;
2766 		dev = DDI_DEV_T_ANY;
2767 	}
2768 
2769 	/*
2770 	 * first call the drivers prop_op interface to allow it
2771 	 * it to override default property values.
2772 	 */
2773 	res = i_ldi_prop_op(dev, dip, PROP_LEN,
2774 	    flags | DDI_PROP_DYNAMIC, name, NULL, &prop_len);
2775 
2776 	if (res == DDI_PROP_SUCCESS) {
2777 		ddi_release_devi(dip);
2778 		return (1);
2779 	}
2780 
2781 	/* call the normal property interfaces */
2782 	res = ddi_prop_exists(dev, dip, flags, name);
2783 
2784 	ddi_release_devi(dip);
2785 	return (res);
2786 }
2787 
2788 #ifdef	LDI_OBSOLETE_EVENT
2789 
2790 int
ldi_get_eventcookie(ldi_handle_t lh,char * name,ddi_eventcookie_t * ecp)2791 ldi_get_eventcookie(ldi_handle_t lh, char *name, ddi_eventcookie_t *ecp)
2792 {
2793 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2794 	dev_info_t		*dip;
2795 	dev_t			dev;
2796 	int			res;
2797 	struct snode		*csp;
2798 
2799 	if ((lh == NULL) || (name == NULL) ||
2800 	    (strlen(name) == 0) || (ecp == NULL)) {
2801 		return (DDI_FAILURE);
2802 	}
2803 
2804 	ASSERT(!servicing_interrupt());
2805 
2806 	dev = handlep->lh_vp->v_rdev;
2807 
2808 	csp = VTOCS(handlep->lh_vp);
2809 	mutex_enter(&csp->s_lock);
2810 	if ((dip = csp->s_dip) != NULL)
2811 		e_ddi_hold_devi(dip);
2812 	mutex_exit(&csp->s_lock);
2813 	if (dip == NULL)
2814 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2815 
2816 	if (dip == NULL)
2817 		return (DDI_FAILURE);
2818 
2819 	LDI_EVENTCB((CE_NOTE, "%s: event_name=%s, "
2820 	    "dip=0x%p, event_cookiep=0x%p", "ldi_get_eventcookie",
2821 	    name, (void *)dip, (void *)ecp));
2822 
2823 	res = ddi_get_eventcookie(dip, name, ecp);
2824 
2825 	ddi_release_devi(dip);
2826 	return (res);
2827 }
2828 
2829 int
ldi_add_event_handler(ldi_handle_t lh,ddi_eventcookie_t ec,void (* handler)(ldi_handle_t,ddi_eventcookie_t,void *,void *),void * arg,ldi_callback_id_t * id)2830 ldi_add_event_handler(ldi_handle_t lh, ddi_eventcookie_t ec,
2831     void (*handler)(ldi_handle_t, ddi_eventcookie_t, void *, void *),
2832     void *arg, ldi_callback_id_t *id)
2833 {
2834 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
2835 	struct ldi_event	*lep;
2836 	dev_info_t		*dip;
2837 	dev_t			dev;
2838 	int			res;
2839 	struct snode		*csp;
2840 
2841 	if ((lh == NULL) || (ec == NULL) || (handler == NULL) || (id == NULL))
2842 		return (DDI_FAILURE);
2843 
2844 	ASSERT(!servicing_interrupt());
2845 
2846 	dev = handlep->lh_vp->v_rdev;
2847 
2848 	csp = VTOCS(handlep->lh_vp);
2849 	mutex_enter(&csp->s_lock);
2850 	if ((dip = csp->s_dip) != NULL)
2851 		e_ddi_hold_devi(dip);
2852 	mutex_exit(&csp->s_lock);
2853 	if (dip == NULL)
2854 		dip = e_ddi_hold_devi_by_dev(dev, 0);
2855 
2856 	if (dip == NULL)
2857 		return (DDI_FAILURE);
2858 
2859 	lep = kmem_zalloc(sizeof (struct ldi_event), KM_SLEEP);
2860 	lep->le_lhp = handlep;
2861 	lep->le_arg = arg;
2862 	lep->le_handler = handler;
2863 
2864 	if ((res = ddi_add_event_handler(dip, ec, i_ldi_callback,
2865 	    (void *)lep, &lep->le_id)) != DDI_SUCCESS) {
2866 		LDI_EVENTCB((CE_WARN, "%s: unable to add"
2867 		    "event callback", "ldi_add_event_handler"));
2868 		ddi_release_devi(dip);
2869 		kmem_free(lep, sizeof (struct ldi_event));
2870 		return (res);
2871 	}
2872 
2873 	*id = (ldi_callback_id_t)lep;
2874 
2875 	LDI_EVENTCB((CE_NOTE, "%s: dip=0x%p, event=0x%p, "
2876 	    "ldi_eventp=0x%p, cb_id=0x%p", "ldi_add_event_handler",
2877 	    (void *)dip, (void *)ec, (void *)lep, (void *)id));
2878 
2879 	handle_event_add(lep);
2880 	ddi_release_devi(dip);
2881 	return (res);
2882 }
2883 
2884 int
ldi_remove_event_handler(ldi_handle_t lh,ldi_callback_id_t id)2885 ldi_remove_event_handler(ldi_handle_t lh, ldi_callback_id_t id)
2886 {
2887 	ldi_event_t		*lep = (ldi_event_t *)id;
2888 	int			res;
2889 
2890 	if ((lh == NULL) || (id == NULL))
2891 		return (DDI_FAILURE);
2892 
2893 	ASSERT(!servicing_interrupt());
2894 
2895 	if ((res = ddi_remove_event_handler(lep->le_id))
2896 	    != DDI_SUCCESS) {
2897 		LDI_EVENTCB((CE_WARN, "%s: unable to remove "
2898 		    "event callback", "ldi_remove_event_handler"));
2899 		return (res);
2900 	}
2901 
2902 	handle_event_remove(lep);
2903 	kmem_free(lep, sizeof (struct ldi_event));
2904 	return (res);
2905 }
2906 
2907 #endif
2908 
2909 /*
2910  * Here are some definitions of terms used in the following LDI events
2911  * code:
2912  *
2913  * "LDI events" AKA "native events": These are events defined by the
2914  * "new" LDI event framework. These events are serviced by the LDI event
2915  * framework itself and thus are native to it.
2916  *
2917  * "LDI contract events": These are contract events that correspond to the
2918  *  LDI events. This mapping of LDI events to contract events is defined by
2919  * the ldi_ev_cookies[] array above.
2920  *
2921  * NDI events: These are events which are serviced by the NDI event subsystem.
2922  * LDI subsystem just provides a thin wrapper around the NDI event interfaces
2923  * These events are therefore *not* native events.
2924  */
2925 
2926 static int
ldi_native_event(const char * evname)2927 ldi_native_event(const char *evname)
2928 {
2929 	int i;
2930 
2931 	LDI_EVTRC((CE_NOTE, "ldi_native_event: entered: ev=%s", evname));
2932 
2933 	for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
2934 		if (strcmp(ldi_ev_cookies[i].ck_evname, evname) == 0)
2935 			return (1);
2936 	}
2937 
2938 	return (0);
2939 }
2940 
2941 static uint_t
ldi_ev_sync_event(const char * evname)2942 ldi_ev_sync_event(const char *evname)
2943 {
2944 	int i;
2945 
2946 	ASSERT(ldi_native_event(evname));
2947 
2948 	LDI_EVTRC((CE_NOTE, "ldi_ev_sync_event: entered: %s", evname));
2949 
2950 	for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
2951 		if (strcmp(ldi_ev_cookies[i].ck_evname, evname) == 0)
2952 			return (ldi_ev_cookies[i].ck_sync);
2953 	}
2954 
2955 	/*
2956 	 * This should never happen until non-contract based
2957 	 * LDI events are introduced. If that happens, we will
2958 	 * use a "special" token to indicate that there are no
2959 	 * contracts corresponding to this LDI event.
2960 	 */
2961 	cmn_err(CE_PANIC, "Unknown LDI event: %s", evname);
2962 
2963 	return (0);
2964 }
2965 
2966 static uint_t
ldi_contract_event(const char * evname)2967 ldi_contract_event(const char *evname)
2968 {
2969 	int i;
2970 
2971 	ASSERT(ldi_native_event(evname));
2972 
2973 	LDI_EVTRC((CE_NOTE, "ldi_contract_event: entered: %s", evname));
2974 
2975 	for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
2976 		if (strcmp(ldi_ev_cookies[i].ck_evname, evname) == 0)
2977 			return (ldi_ev_cookies[i].ck_ctype);
2978 	}
2979 
2980 	/*
2981 	 * This should never happen until non-contract based
2982 	 * LDI events are introduced. If that happens, we will
2983 	 * use a "special" token to indicate that there are no
2984 	 * contracts corresponding to this LDI event.
2985 	 */
2986 	cmn_err(CE_PANIC, "Unknown LDI event: %s", evname);
2987 
2988 	return (0);
2989 }
2990 
2991 char *
ldi_ev_get_type(ldi_ev_cookie_t cookie)2992 ldi_ev_get_type(ldi_ev_cookie_t cookie)
2993 {
2994 	int i;
2995 	struct ldi_ev_cookie *cookie_impl = (struct ldi_ev_cookie *)cookie;
2996 
2997 	for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
2998 		if (&ldi_ev_cookies[i] == cookie_impl) {
2999 			LDI_EVTRC((CE_NOTE, "ldi_ev_get_type: LDI: %s",
3000 			    ldi_ev_cookies[i].ck_evname));
3001 			return (ldi_ev_cookies[i].ck_evname);
3002 		}
3003 	}
3004 
3005 	/*
3006 	 * Not an LDI native event. Must be NDI event service.
3007 	 * Just return a generic string
3008 	 */
3009 	LDI_EVTRC((CE_NOTE, "ldi_ev_get_type: is NDI"));
3010 	return (NDI_EVENT_SERVICE);
3011 }
3012 
3013 static int
ldi_native_cookie(ldi_ev_cookie_t cookie)3014 ldi_native_cookie(ldi_ev_cookie_t cookie)
3015 {
3016 	int i;
3017 	struct ldi_ev_cookie *cookie_impl = (struct ldi_ev_cookie *)cookie;
3018 
3019 	for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
3020 		if (&ldi_ev_cookies[i] == cookie_impl) {
3021 			LDI_EVTRC((CE_NOTE, "ldi_native_cookie: native LDI"));
3022 			return (1);
3023 		}
3024 	}
3025 
3026 	LDI_EVTRC((CE_NOTE, "ldi_native_cookie: is NDI"));
3027 	return (0);
3028 }
3029 
3030 static ldi_ev_cookie_t
ldi_get_native_cookie(const char * evname)3031 ldi_get_native_cookie(const char *evname)
3032 {
3033 	int i;
3034 
3035 	for (i = 0; ldi_ev_cookies[i].ck_evname != NULL; i++) {
3036 		if (strcmp(ldi_ev_cookies[i].ck_evname, evname) == 0) {
3037 			LDI_EVTRC((CE_NOTE, "ldi_get_native_cookie: found"));
3038 			return ((ldi_ev_cookie_t)&ldi_ev_cookies[i]);
3039 		}
3040 	}
3041 
3042 	LDI_EVTRC((CE_NOTE, "ldi_get_native_cookie: NOT found"));
3043 	return (NULL);
3044 }
3045 
3046 /*
3047  * ldi_ev_lock() needs to be recursive, since layered drivers may call
3048  * other LDI interfaces (such as ldi_close() from within the context of
3049  * a notify callback. Since the notify callback is called with the
3050  * ldi_ev_lock() held and ldi_close() also grabs ldi_ev_lock, the lock needs
3051  * to be recursive.
3052  */
3053 static void
ldi_ev_lock(void)3054 ldi_ev_lock(void)
3055 {
3056 	LDI_EVTRC((CE_NOTE, "ldi_ev_lock: entered"));
3057 
3058 	mutex_enter(&ldi_ev_callback_list.le_lock);
3059 	if (ldi_ev_callback_list.le_thread == curthread) {
3060 		ASSERT(ldi_ev_callback_list.le_busy >= 1);
3061 		ldi_ev_callback_list.le_busy++;
3062 	} else {
3063 		while (ldi_ev_callback_list.le_busy)
3064 			cv_wait(&ldi_ev_callback_list.le_cv,
3065 			    &ldi_ev_callback_list.le_lock);
3066 		ASSERT(ldi_ev_callback_list.le_thread == NULL);
3067 		ldi_ev_callback_list.le_busy = 1;
3068 		ldi_ev_callback_list.le_thread = curthread;
3069 	}
3070 	mutex_exit(&ldi_ev_callback_list.le_lock);
3071 
3072 	LDI_EVTRC((CE_NOTE, "ldi_ev_lock: exit"));
3073 }
3074 
3075 static void
ldi_ev_unlock(void)3076 ldi_ev_unlock(void)
3077 {
3078 	LDI_EVTRC((CE_NOTE, "ldi_ev_unlock: entered"));
3079 	mutex_enter(&ldi_ev_callback_list.le_lock);
3080 	ASSERT(ldi_ev_callback_list.le_thread == curthread);
3081 	ASSERT(ldi_ev_callback_list.le_busy >= 1);
3082 
3083 	ldi_ev_callback_list.le_busy--;
3084 	if (ldi_ev_callback_list.le_busy == 0) {
3085 		ldi_ev_callback_list.le_thread = NULL;
3086 		cv_signal(&ldi_ev_callback_list.le_cv);
3087 	}
3088 	mutex_exit(&ldi_ev_callback_list.le_lock);
3089 	LDI_EVTRC((CE_NOTE, "ldi_ev_unlock: exit"));
3090 }
3091 
3092 int
ldi_ev_get_cookie(ldi_handle_t lh,char * evname,ldi_ev_cookie_t * cookiep)3093 ldi_ev_get_cookie(ldi_handle_t lh, char *evname, ldi_ev_cookie_t *cookiep)
3094 {
3095 	struct ldi_handle	*handlep = (struct ldi_handle *)lh;
3096 	dev_info_t		*dip;
3097 	dev_t			dev;
3098 	int			res;
3099 	struct snode		*csp;
3100 	ddi_eventcookie_t	ddi_cookie;
3101 	ldi_ev_cookie_t		tcookie;
3102 
3103 	LDI_EVDBG((CE_NOTE, "ldi_ev_get_cookie: entered: evname=%s",
3104 	    evname ? evname : "<NULL>"));
3105 
3106 	if (lh == NULL || evname == NULL ||
3107 	    strlen(evname) == 0 || cookiep == NULL) {
3108 		LDI_EVDBG((CE_NOTE, "ldi_ev_get_cookie: invalid args"));
3109 		return (LDI_EV_FAILURE);
3110 	}
3111 
3112 	*cookiep = NULL;
3113 
3114 	/*
3115 	 * First check if it is a LDI native event
3116 	 */
3117 	tcookie = ldi_get_native_cookie(evname);
3118 	if (tcookie) {
3119 		LDI_EVDBG((CE_NOTE, "ldi_ev_get_cookie: got native cookie"));
3120 		*cookiep = tcookie;
3121 		return (LDI_EV_SUCCESS);
3122 	}
3123 
3124 	/*
3125 	 * Not a LDI native event. Try NDI event services
3126 	 */
3127 
3128 	dev = handlep->lh_vp->v_rdev;
3129 
3130 	csp = VTOCS(handlep->lh_vp);
3131 	mutex_enter(&csp->s_lock);
3132 	if ((dip = csp->s_dip) != NULL)
3133 		e_ddi_hold_devi(dip);
3134 	mutex_exit(&csp->s_lock);
3135 	if (dip == NULL)
3136 		dip = e_ddi_hold_devi_by_dev(dev, 0);
3137 
3138 	if (dip == NULL) {
3139 		cmn_err(CE_WARN, "ldi_ev_get_cookie: No devinfo node for LDI "
3140 		    "handle: %p", (void *)handlep);
3141 		return (LDI_EV_FAILURE);
3142 	}
3143 
3144 	LDI_EVDBG((CE_NOTE, "Calling ddi_get_eventcookie: dip=%p, ev=%s",
3145 	    (void *)dip, evname));
3146 
3147 	res = ddi_get_eventcookie(dip, evname, &ddi_cookie);
3148 
3149 	ddi_release_devi(dip);
3150 
3151 	if (res == DDI_SUCCESS) {
3152 		LDI_EVDBG((CE_NOTE, "ldi_ev_get_cookie: NDI cookie found"));
3153 		*cookiep = (ldi_ev_cookie_t)ddi_cookie;
3154 		return (LDI_EV_SUCCESS);
3155 	} else {
3156 		LDI_EVDBG((CE_WARN, "ldi_ev_get_cookie: NDI cookie: failed"));
3157 		return (LDI_EV_FAILURE);
3158 	}
3159 }
3160 
3161 /*ARGSUSED*/
3162 static void
i_ldi_ev_callback(dev_info_t * dip,ddi_eventcookie_t event_cookie,void * arg,void * ev_data)3163 i_ldi_ev_callback(dev_info_t *dip, ddi_eventcookie_t event_cookie,
3164     void *arg, void *ev_data)
3165 {
3166 	ldi_ev_callback_impl_t *lecp = (ldi_ev_callback_impl_t *)arg;
3167 
3168 	ASSERT(lecp != NULL);
3169 	ASSERT(!ldi_native_cookie(lecp->lec_cookie));
3170 	ASSERT(lecp->lec_lhp);
3171 	ASSERT(lecp->lec_notify == NULL);
3172 	ASSERT(lecp->lec_finalize);
3173 
3174 	LDI_EVDBG((CE_NOTE, "i_ldi_ev_callback: ldh=%p, cookie=%p, arg=%p, "
3175 	    "ev_data=%p", (void *)lecp->lec_lhp, (void *)event_cookie,
3176 	    (void *)lecp->lec_arg, (void *)ev_data));
3177 
3178 	lecp->lec_finalize(lecp->lec_lhp, (ldi_ev_cookie_t)event_cookie,
3179 	    lecp->lec_arg, ev_data);
3180 }
3181 
3182 int
ldi_ev_register_callbacks(ldi_handle_t lh,ldi_ev_cookie_t cookie,ldi_ev_callback_t * callb,void * arg,ldi_callback_id_t * id)3183 ldi_ev_register_callbacks(ldi_handle_t lh, ldi_ev_cookie_t cookie,
3184     ldi_ev_callback_t *callb, void *arg, ldi_callback_id_t *id)
3185 {
3186 	struct ldi_handle	*lhp = (struct ldi_handle *)lh;
3187 	ldi_ev_callback_impl_t	*lecp;
3188 	dev_t			dev;
3189 	struct snode		*csp;
3190 	dev_info_t		*dip;
3191 	int			ddi_event;
3192 
3193 	ASSERT(!servicing_interrupt());
3194 
3195 	if (lh == NULL || cookie == NULL || callb == NULL || id == NULL) {
3196 		LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks: Invalid args"));
3197 		return (LDI_EV_FAILURE);
3198 	}
3199 
3200 	if (callb->cb_vers != LDI_EV_CB_VERS) {
3201 		LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks: Invalid vers"));
3202 		return (LDI_EV_FAILURE);
3203 	}
3204 
3205 	if (callb->cb_notify == NULL && callb->cb_finalize == NULL) {
3206 		LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks: NULL callb"));
3207 		return (LDI_EV_FAILURE);
3208 	}
3209 
3210 	*id = 0;
3211 
3212 	dev = lhp->lh_vp->v_rdev;
3213 	csp = VTOCS(lhp->lh_vp);
3214 	mutex_enter(&csp->s_lock);
3215 	if ((dip = csp->s_dip) != NULL)
3216 		e_ddi_hold_devi(dip);
3217 	mutex_exit(&csp->s_lock);
3218 	if (dip == NULL)
3219 		dip = e_ddi_hold_devi_by_dev(dev, 0);
3220 
3221 	if (dip == NULL) {
3222 		cmn_err(CE_WARN, "ldi_ev_register: No devinfo node for "
3223 		    "LDI handle: %p", (void *)lhp);
3224 		return (LDI_EV_FAILURE);
3225 	}
3226 
3227 	lecp = kmem_zalloc(sizeof (ldi_ev_callback_impl_t), KM_SLEEP);
3228 
3229 	ddi_event = 0;
3230 	if (!ldi_native_cookie(cookie)) {
3231 		if (callb->cb_notify || callb->cb_finalize == NULL) {
3232 			/*
3233 			 * NDI event services only accept finalize
3234 			 */
3235 			cmn_err(CE_WARN, "%s: module: %s: NDI event cookie. "
3236 			    "Only finalize"
3237 			    " callback supported with this cookie",
3238 			    "ldi_ev_register_callbacks",
3239 			    lhp->lh_ident->li_modname);
3240 			kmem_free(lecp, sizeof (ldi_ev_callback_impl_t));
3241 			ddi_release_devi(dip);
3242 			return (LDI_EV_FAILURE);
3243 		}
3244 
3245 		if (ddi_add_event_handler(dip, (ddi_eventcookie_t)cookie,
3246 		    i_ldi_ev_callback, (void *)lecp,
3247 		    (ddi_callback_id_t *)&lecp->lec_id)
3248 		    != DDI_SUCCESS) {
3249 			kmem_free(lecp, sizeof (ldi_ev_callback_impl_t));
3250 			ddi_release_devi(dip);
3251 			LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks(): "
3252 			    "ddi_add_event_handler failed"));
3253 			return (LDI_EV_FAILURE);
3254 		}
3255 		ddi_event = 1;
3256 		LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks(): "
3257 		    "ddi_add_event_handler success"));
3258 	}
3259 
3260 
3261 
3262 	ldi_ev_lock();
3263 
3264 	/*
3265 	 * Add the notify/finalize callback to the LDI's list of callbacks.
3266 	 */
3267 	lecp->lec_lhp = lhp;
3268 	lecp->lec_dev = lhp->lh_vp->v_rdev;
3269 	lecp->lec_spec = VTYP_TO_STYP(lhp->lh_vp->v_type);
3270 	lecp->lec_notify = callb->cb_notify;
3271 	lecp->lec_finalize = callb->cb_finalize;
3272 	lecp->lec_arg = arg;
3273 	lecp->lec_cookie = cookie;
3274 	if (!ddi_event)
3275 		lecp->lec_id = (void *)(uintptr_t)(++ldi_ev_id_pool);
3276 	else
3277 		ASSERT(lecp->lec_id);
3278 	lecp->lec_dip = dip;
3279 	list_insert_tail(&ldi_ev_callback_list.le_head, lecp);
3280 
3281 	*id = (ldi_callback_id_t)lecp->lec_id;
3282 
3283 	ldi_ev_unlock();
3284 
3285 	ddi_release_devi(dip);
3286 
3287 	LDI_EVDBG((CE_NOTE, "ldi_ev_register_callbacks: registered "
3288 	    "notify/finalize"));
3289 
3290 	return (LDI_EV_SUCCESS);
3291 }
3292 
3293 static int
ldi_ev_device_match(ldi_ev_callback_impl_t * lecp,dev_info_t * dip,dev_t dev,int spec_type)3294 ldi_ev_device_match(ldi_ev_callback_impl_t *lecp, dev_info_t *dip,
3295     dev_t dev, int spec_type)
3296 {
3297 	ASSERT(lecp);
3298 	ASSERT(dip);
3299 	ASSERT(dev != DDI_DEV_T_NONE);
3300 	ASSERT(dev != NODEV);
3301 	ASSERT((dev == DDI_DEV_T_ANY && spec_type == 0) ||
3302 	    (spec_type == S_IFCHR || spec_type == S_IFBLK));
3303 	ASSERT(lecp->lec_dip);
3304 	ASSERT(lecp->lec_spec == S_IFCHR || lecp->lec_spec == S_IFBLK);
3305 	ASSERT(lecp->lec_dev != DDI_DEV_T_ANY);
3306 	ASSERT(lecp->lec_dev != DDI_DEV_T_NONE);
3307 	ASSERT(lecp->lec_dev != NODEV);
3308 
3309 	if (dip != lecp->lec_dip)
3310 		return (0);
3311 
3312 	if (dev != DDI_DEV_T_ANY) {
3313 		if (dev != lecp->lec_dev || spec_type != lecp->lec_spec)
3314 			return (0);
3315 	}
3316 
3317 	LDI_EVTRC((CE_NOTE, "ldi_ev_device_match: MATCH dip=%p", (void *)dip));
3318 
3319 	return (1);
3320 }
3321 
3322 /*
3323  * LDI framework function to post a "notify" event to all layered drivers
3324  * that have registered for that event
3325  *
3326  * Returns:
3327  *		LDI_EV_SUCCESS - registered callbacks allow event
3328  *		LDI_EV_FAILURE - registered callbacks block event
3329  *		LDI_EV_NONE    - No matching LDI callbacks
3330  *
3331  * This function is *not* to be called by layered drivers. It is for I/O
3332  * framework code in Solaris, such as the I/O retire code and DR code
3333  * to call while servicing a device event such as offline or degraded.
3334  */
3335 int
ldi_invoke_notify(dev_info_t * dip,dev_t dev,int spec_type,char * event,void * ev_data)3336 ldi_invoke_notify(dev_info_t *dip, dev_t dev, int spec_type, char *event,
3337     void *ev_data)
3338 {
3339 	ldi_ev_callback_impl_t *lecp;
3340 	list_t	*listp;
3341 	int	ret;
3342 	char	*lec_event;
3343 
3344 	ASSERT(dip);
3345 	ASSERT(dev != DDI_DEV_T_NONE);
3346 	ASSERT(dev != NODEV);
3347 	ASSERT((dev == DDI_DEV_T_ANY && spec_type == 0) ||
3348 	    (spec_type == S_IFCHR || spec_type == S_IFBLK));
3349 	ASSERT(event);
3350 	ASSERT(ldi_native_event(event));
3351 	ASSERT(ldi_ev_sync_event(event));
3352 
3353 	LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): entered: dip=%p, ev=%s",
3354 	    (void *)dip, event));
3355 
3356 	ret = LDI_EV_NONE;
3357 	ldi_ev_lock();
3358 
3359 	VERIFY(ldi_ev_callback_list.le_walker_next == NULL);
3360 	listp = &ldi_ev_callback_list.le_head;
3361 	for (lecp = list_head(listp); lecp; lecp =
3362 	    ldi_ev_callback_list.le_walker_next) {
3363 		ldi_ev_callback_list.le_walker_next = list_next(listp, lecp);
3364 
3365 		/* Check if matching device */
3366 		if (!ldi_ev_device_match(lecp, dip, dev, spec_type))
3367 			continue;
3368 
3369 		if (lecp->lec_lhp == NULL) {
3370 			/*
3371 			 * Consumer has unregistered the handle and so
3372 			 * is no longer interested in notify events.
3373 			 */
3374 			LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): No LDI "
3375 			    "handle, skipping"));
3376 			continue;
3377 		}
3378 
3379 		if (lecp->lec_notify == NULL) {
3380 			LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): No notify "
3381 			    "callback. skipping"));
3382 			continue;	/* not interested in notify */
3383 		}
3384 
3385 		/*
3386 		 * Check if matching event
3387 		 */
3388 		lec_event = ldi_ev_get_type(lecp->lec_cookie);
3389 		if (strcmp(event, lec_event) != 0) {
3390 			LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): Not matching"
3391 			    " event {%s,%s}. skipping", event, lec_event));
3392 			continue;
3393 		}
3394 
3395 		lecp->lec_lhp->lh_flags |= LH_FLAGS_NOTIFY;
3396 		if (lecp->lec_notify(lecp->lec_lhp, lecp->lec_cookie,
3397 		    lecp->lec_arg, ev_data) != LDI_EV_SUCCESS) {
3398 			ret = LDI_EV_FAILURE;
3399 			LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): notify"
3400 			    " FAILURE"));
3401 			break;
3402 		}
3403 
3404 		/* We have a matching callback that allows the event to occur */
3405 		ret = LDI_EV_SUCCESS;
3406 
3407 		LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): 1 consumer success"));
3408 	}
3409 
3410 	if (ret != LDI_EV_FAILURE)
3411 		goto out;
3412 
3413 	LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): undoing notify"));
3414 
3415 	/*
3416 	 * Undo notifies already sent
3417 	 */
3418 	lecp = list_prev(listp, lecp);
3419 	VERIFY(ldi_ev_callback_list.le_walker_prev == NULL);
3420 	for (; lecp; lecp = ldi_ev_callback_list.le_walker_prev) {
3421 		ldi_ev_callback_list.le_walker_prev = list_prev(listp, lecp);
3422 
3423 		/*
3424 		 * Check if matching device
3425 		 */
3426 		if (!ldi_ev_device_match(lecp, dip, dev, spec_type))
3427 			continue;
3428 
3429 
3430 		if (lecp->lec_finalize == NULL) {
3431 			LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): no finalize, "
3432 			    "skipping"));
3433 			continue;	/* not interested in finalize */
3434 		}
3435 
3436 		/*
3437 		 * it is possible that in response to a notify event a
3438 		 * layered driver closed its LDI handle so it is ok
3439 		 * to have a NULL LDI handle for finalize. The layered
3440 		 * driver is expected to maintain state in its "arg"
3441 		 * parameter to keep track of the closed device.
3442 		 */
3443 
3444 		/* Check if matching event */
3445 		lec_event = ldi_ev_get_type(lecp->lec_cookie);
3446 		if (strcmp(event, lec_event) != 0) {
3447 			LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): not matching "
3448 			    "event: %s,%s, skipping", event, lec_event));
3449 			continue;
3450 		}
3451 
3452 		LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): calling finalize"));
3453 
3454 		lecp->lec_finalize(lecp->lec_lhp, lecp->lec_cookie,
3455 		    LDI_EV_FAILURE, lecp->lec_arg, ev_data);
3456 
3457 		/*
3458 		 * If LDI native event and LDI handle closed in context
3459 		 * of notify, NULL out the finalize callback as we have
3460 		 * already called the 1 finalize above allowed in this situation
3461 		 */
3462 		if (lecp->lec_lhp == NULL &&
3463 		    ldi_native_cookie(lecp->lec_cookie)) {
3464 			LDI_EVDBG((CE_NOTE,
3465 			    "ldi_invoke_notify(): NULL-ing finalize after "
3466 			    "calling 1 finalize following ldi_close"));
3467 			lecp->lec_finalize = NULL;
3468 		}
3469 	}
3470 
3471 out:
3472 	ldi_ev_callback_list.le_walker_next = NULL;
3473 	ldi_ev_callback_list.le_walker_prev = NULL;
3474 	ldi_ev_unlock();
3475 
3476 	if (ret == LDI_EV_NONE) {
3477 		LDI_EVDBG((CE_NOTE, "ldi_invoke_notify(): no matching "
3478 		    "LDI callbacks"));
3479 	}
3480 
3481 	return (ret);
3482 }
3483 
3484 /*
3485  * Framework function to be called from a layered driver to propagate
3486  * LDI "notify" events to exported minors.
3487  *
3488  * This function is a public interface exported by the LDI framework
3489  * for use by layered drivers to propagate device events up the software
3490  * stack.
3491  */
3492 int
ldi_ev_notify(dev_info_t * dip,minor_t minor,int spec_type,ldi_ev_cookie_t cookie,void * ev_data)3493 ldi_ev_notify(dev_info_t *dip, minor_t minor, int spec_type,
3494     ldi_ev_cookie_t cookie, void *ev_data)
3495 {
3496 	char		*evname = ldi_ev_get_type(cookie);
3497 	uint_t		ct_evtype;
3498 	dev_t		dev;
3499 	major_t		major;
3500 	int		retc;
3501 	int		retl;
3502 
3503 	ASSERT(spec_type == S_IFBLK || spec_type == S_IFCHR);
3504 	ASSERT(dip);
3505 	ASSERT(ldi_native_cookie(cookie));
3506 
3507 	LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): entered: event=%s, dip=%p",
3508 	    evname, (void *)dip));
3509 
3510 	if (!ldi_ev_sync_event(evname)) {
3511 		cmn_err(CE_PANIC, "ldi_ev_notify(): %s not a "
3512 		    "negotiatable event", evname);
3513 	}
3514 
3515 	major = ddi_driver_major(dip);
3516 	if (major == DDI_MAJOR_T_NONE) {
3517 		char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3518 		(void) ddi_pathname(dip, path);
3519 		cmn_err(CE_WARN, "ldi_ev_notify: cannot derive major number "
3520 		    "for device %s", path);
3521 		kmem_free(path, MAXPATHLEN);
3522 		return (LDI_EV_FAILURE);
3523 	}
3524 	dev = makedevice(major, minor);
3525 
3526 	/*
3527 	 * Generate negotiation contract events on contracts (if any) associated
3528 	 * with this minor.
3529 	 */
3530 	LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): calling contract nego."));
3531 	ct_evtype = ldi_contract_event(evname);
3532 	retc = contract_device_negotiate(dip, dev, spec_type, ct_evtype);
3533 	if (retc == CT_NACK) {
3534 		LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): contract neg. NACK"));
3535 		return (LDI_EV_FAILURE);
3536 	}
3537 
3538 	LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): LDI invoke notify"));
3539 	retl = ldi_invoke_notify(dip, dev, spec_type, evname, ev_data);
3540 	if (retl == LDI_EV_FAILURE) {
3541 		LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): ldi_invoke_notify "
3542 		    "returned FAILURE. Calling contract negend"));
3543 		contract_device_negend(dip, dev, spec_type, CT_EV_FAILURE);
3544 		return (LDI_EV_FAILURE);
3545 	}
3546 
3547 	/*
3548 	 * The very fact that we are here indicates that there is a
3549 	 * LDI callback (and hence a constraint) for the retire of the
3550 	 * HW device. So we just return success even if there are no
3551 	 * contracts or LDI callbacks against the minors layered on top
3552 	 * of the HW minors
3553 	 */
3554 	LDI_EVDBG((CE_NOTE, "ldi_ev_notify(): returning SUCCESS"));
3555 	return (LDI_EV_SUCCESS);
3556 }
3557 
3558 /*
3559  * LDI framework function to invoke "finalize" callbacks for all layered
3560  * drivers that have registered callbacks for that event.
3561  *
3562  * This function is *not* to be called by layered drivers. It is for I/O
3563  * framework code in Solaris, such as the I/O retire code and DR code
3564  * to call while servicing a device event such as offline or degraded.
3565  */
3566 void
ldi_invoke_finalize(dev_info_t * dip,dev_t dev,int spec_type,char * event,int ldi_result,void * ev_data)3567 ldi_invoke_finalize(dev_info_t *dip, dev_t dev, int spec_type, char *event,
3568     int ldi_result, void *ev_data)
3569 {
3570 	ldi_ev_callback_impl_t *lecp;
3571 	list_t	*listp;
3572 	char	*lec_event;
3573 	int	found = 0;
3574 
3575 	ASSERT(dip);
3576 	ASSERT(dev != DDI_DEV_T_NONE);
3577 	ASSERT(dev != NODEV);
3578 	ASSERT((dev == DDI_DEV_T_ANY && spec_type == 0) ||
3579 	    (spec_type == S_IFCHR || spec_type == S_IFBLK));
3580 	ASSERT(event);
3581 	ASSERT(ldi_native_event(event));
3582 	ASSERT(ldi_result == LDI_EV_SUCCESS || ldi_result == LDI_EV_FAILURE);
3583 
3584 	LDI_EVDBG((CE_NOTE, "ldi_invoke_finalize(): entered: dip=%p, result=%d"
3585 	    " event=%s", (void *)dip, ldi_result, event));
3586 
3587 	ldi_ev_lock();
3588 	VERIFY(ldi_ev_callback_list.le_walker_next == NULL);
3589 	listp = &ldi_ev_callback_list.le_head;
3590 	for (lecp = list_head(listp); lecp; lecp =
3591 	    ldi_ev_callback_list.le_walker_next) {
3592 		ldi_ev_callback_list.le_walker_next = list_next(listp, lecp);
3593 
3594 		if (lecp->lec_finalize == NULL) {
3595 			LDI_EVDBG((CE_NOTE, "ldi_invoke_finalize(): No "
3596 			    "finalize. Skipping"));
3597 			continue;	/* Not interested in finalize */
3598 		}
3599 
3600 		/*
3601 		 * Check if matching device
3602 		 */
3603 		if (!ldi_ev_device_match(lecp, dip, dev, spec_type))
3604 			continue;
3605 
3606 		/*
3607 		 * It is valid for the LDI handle to be NULL during finalize.
3608 		 * The layered driver may have done an LDI close in the notify
3609 		 * callback.
3610 		 */
3611 
3612 		/*
3613 		 * Check if matching event
3614 		 */
3615 		lec_event = ldi_ev_get_type(lecp->lec_cookie);
3616 		if (strcmp(event, lec_event) != 0) {
3617 			LDI_EVDBG((CE_NOTE, "ldi_invoke_finalize(): Not "
3618 			    "matching event {%s,%s}. Skipping",
3619 			    event, lec_event));
3620 			continue;
3621 		}
3622 
3623 		LDI_EVDBG((CE_NOTE, "ldi_invoke_finalize(): calling finalize"));
3624 
3625 		found = 1;
3626 
3627 		lecp->lec_finalize(lecp->lec_lhp, lecp->lec_cookie,
3628 		    ldi_result, lecp->lec_arg, ev_data);
3629 
3630 		/*
3631 		 * If LDI native event and LDI handle closed in context
3632 		 * of notify, NULL out the finalize callback as we have
3633 		 * already called the 1 finalize above allowed in this situation
3634 		 */
3635 		if (lecp->lec_lhp == NULL &&
3636 		    ldi_native_cookie(lecp->lec_cookie)) {
3637 			LDI_EVDBG((CE_NOTE,
3638 			    "ldi_invoke_finalize(): NULLing finalize after "
3639 			    "calling 1 finalize following ldi_close"));
3640 			lecp->lec_finalize = NULL;
3641 		}
3642 	}
3643 	ldi_ev_callback_list.le_walker_next = NULL;
3644 	ldi_ev_unlock();
3645 
3646 	if (found)
3647 		return;
3648 
3649 	LDI_EVDBG((CE_NOTE, "ldi_invoke_finalize(): no matching callbacks"));
3650 }
3651 
3652 /*
3653  * Framework function to be called from a layered driver to propagate
3654  * LDI "finalize" events to exported minors.
3655  *
3656  * This function is a public interface exported by the LDI framework
3657  * for use by layered drivers to propagate device events up the software
3658  * stack.
3659  */
3660 void
ldi_ev_finalize(dev_info_t * dip,minor_t minor,int spec_type,int ldi_result,ldi_ev_cookie_t cookie,void * ev_data)3661 ldi_ev_finalize(dev_info_t *dip, minor_t minor, int spec_type, int ldi_result,
3662     ldi_ev_cookie_t cookie, void *ev_data)
3663 {
3664 	dev_t dev;
3665 	major_t major;
3666 	char *evname;
3667 	int ct_result = (ldi_result == LDI_EV_SUCCESS) ?
3668 	    CT_EV_SUCCESS : CT_EV_FAILURE;
3669 	uint_t ct_evtype;
3670 
3671 	ASSERT(dip);
3672 	ASSERT(spec_type == S_IFBLK || spec_type == S_IFCHR);
3673 	ASSERT(ldi_result == LDI_EV_SUCCESS || ldi_result == LDI_EV_FAILURE);
3674 	ASSERT(ldi_native_cookie(cookie));
3675 
3676 	LDI_EVDBG((CE_NOTE, "ldi_ev_finalize: entered: dip=%p", (void *)dip));
3677 
3678 	major = ddi_driver_major(dip);
3679 	if (major == DDI_MAJOR_T_NONE) {
3680 		char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3681 		(void) ddi_pathname(dip, path);
3682 		cmn_err(CE_WARN, "ldi_ev_finalize: cannot derive major number "
3683 		    "for device %s", path);
3684 		kmem_free(path, MAXPATHLEN);
3685 		return;
3686 	}
3687 	dev = makedevice(major, minor);
3688 
3689 	evname = ldi_ev_get_type(cookie);
3690 
3691 	LDI_EVDBG((CE_NOTE, "ldi_ev_finalize: calling contracts"));
3692 	ct_evtype = ldi_contract_event(evname);
3693 	contract_device_finalize(dip, dev, spec_type, ct_evtype, ct_result);
3694 
3695 	LDI_EVDBG((CE_NOTE, "ldi_ev_finalize: calling ldi_invoke_finalize"));
3696 	ldi_invoke_finalize(dip, dev, spec_type, evname, ldi_result, ev_data);
3697 }
3698 
3699 int
ldi_ev_remove_callbacks(ldi_callback_id_t id)3700 ldi_ev_remove_callbacks(ldi_callback_id_t id)
3701 {
3702 	ldi_ev_callback_impl_t	*lecp;
3703 	ldi_ev_callback_impl_t	*next;
3704 	ldi_ev_callback_impl_t	*found;
3705 	list_t			*listp;
3706 
3707 	ASSERT(!servicing_interrupt());
3708 
3709 	if (id == 0) {
3710 		cmn_err(CE_WARN, "ldi_ev_remove_callbacks: Invalid ID 0");
3711 		return (LDI_EV_FAILURE);
3712 	}
3713 
3714 	LDI_EVDBG((CE_NOTE, "ldi_ev_remove_callbacks: entered: id=%p",
3715 	    (void *)id));
3716 
3717 	ldi_ev_lock();
3718 
3719 	listp = &ldi_ev_callback_list.le_head;
3720 	next = found = NULL;
3721 	for (lecp = list_head(listp); lecp; lecp = next) {
3722 		next = list_next(listp, lecp);
3723 		if (lecp->lec_id == id) {
3724 			VERIFY(found == NULL);
3725 
3726 			/*
3727 			 * If there is a walk in progress, shift that walk
3728 			 * along to the next element so that we can remove
3729 			 * this one.  This allows us to unregister an arbitrary
3730 			 * number of callbacks from within a callback.
3731 			 *
3732 			 * See the struct definition (in sunldi_impl.h) for
3733 			 * more information.
3734 			 */
3735 			if (ldi_ev_callback_list.le_walker_next == lecp)
3736 				ldi_ev_callback_list.le_walker_next = next;
3737 			if (ldi_ev_callback_list.le_walker_prev == lecp)
3738 				ldi_ev_callback_list.le_walker_prev = list_prev(
3739 				    listp, ldi_ev_callback_list.le_walker_prev);
3740 
3741 			list_remove(listp, lecp);
3742 			found = lecp;
3743 		}
3744 	}
3745 	ldi_ev_unlock();
3746 
3747 	if (found == NULL) {
3748 		cmn_err(CE_WARN, "No LDI event handler for id (%p)",
3749 		    (void *)id);
3750 		return (LDI_EV_SUCCESS);
3751 	}
3752 
3753 	if (!ldi_native_cookie(found->lec_cookie)) {
3754 		ASSERT(found->lec_notify == NULL);
3755 		if (ddi_remove_event_handler((ddi_callback_id_t)id)
3756 		    != DDI_SUCCESS) {
3757 			cmn_err(CE_WARN, "failed to remove NDI event handler "
3758 			    "for id (%p)", (void *)id);
3759 			ldi_ev_lock();
3760 			list_insert_tail(listp, found);
3761 			ldi_ev_unlock();
3762 			return (LDI_EV_FAILURE);
3763 		}
3764 		LDI_EVDBG((CE_NOTE, "ldi_ev_remove_callbacks: NDI event "
3765 		    "service removal succeeded"));
3766 	} else {
3767 		LDI_EVDBG((CE_NOTE, "ldi_ev_remove_callbacks: removed "
3768 		    "LDI native callbacks"));
3769 	}
3770 	kmem_free(found, sizeof (ldi_ev_callback_impl_t));
3771 
3772 	return (LDI_EV_SUCCESS);
3773 }
3774