xref: /illumos-gate/usr/src/uts/common/os/sunddi.c (revision 78801af7286cd73dbc996d470f789e75993cf15d)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 1990, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright 2014 Garrett D'Amore <garrett@damore.org>
25  */
26 
27 #include <sys/note.h>
28 #include <sys/types.h>
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/buf.h>
32 #include <sys/uio.h>
33 #include <sys/cred.h>
34 #include <sys/poll.h>
35 #include <sys/mman.h>
36 #include <sys/kmem.h>
37 #include <sys/model.h>
38 #include <sys/file.h>
39 #include <sys/proc.h>
40 #include <sys/open.h>
41 #include <sys/user.h>
42 #include <sys/t_lock.h>
43 #include <sys/vm.h>
44 #include <sys/stat.h>
45 #include <vm/hat.h>
46 #include <vm/seg.h>
47 #include <vm/seg_vn.h>
48 #include <vm/seg_dev.h>
49 #include <vm/as.h>
50 #include <sys/cmn_err.h>
51 #include <sys/cpuvar.h>
52 #include <sys/debug.h>
53 #include <sys/autoconf.h>
54 #include <sys/sunddi.h>
55 #include <sys/esunddi.h>
56 #include <sys/sunndi.h>
57 #include <sys/kstat.h>
58 #include <sys/conf.h>
59 #include <sys/ddi_impldefs.h>	/* include implementation structure defs */
60 #include <sys/ndi_impldefs.h>	/* include prototypes */
61 #include <sys/ddi_periodic.h>
62 #include <sys/hwconf.h>
63 #include <sys/pathname.h>
64 #include <sys/modctl.h>
65 #include <sys/epm.h>
66 #include <sys/devctl.h>
67 #include <sys/callb.h>
68 #include <sys/cladm.h>
69 #include <sys/sysevent.h>
70 #include <sys/dacf_impl.h>
71 #include <sys/ddidevmap.h>
72 #include <sys/bootconf.h>
73 #include <sys/disp.h>
74 #include <sys/atomic.h>
75 #include <sys/promif.h>
76 #include <sys/instance.h>
77 #include <sys/sysevent/eventdefs.h>
78 #include <sys/task.h>
79 #include <sys/project.h>
80 #include <sys/taskq.h>
81 #include <sys/devpolicy.h>
82 #include <sys/ctype.h>
83 #include <net/if.h>
84 #include <sys/rctl.h>
85 #include <sys/zone.h>
86 #include <sys/clock_impl.h>
87 #include <sys/ddi.h>
88 #include <sys/modhash.h>
89 #include <sys/sunldi_impl.h>
90 #include <sys/fs/dv_node.h>
91 #include <sys/fs/snode.h>
92 
93 extern	pri_t	minclsyspri;
94 
95 extern	rctl_hndl_t rc_project_locked_mem;
96 extern	rctl_hndl_t rc_zone_locked_mem;
97 
98 #ifdef DEBUG
99 static int sunddi_debug = 0;
100 #endif /* DEBUG */
101 
102 /* ddi_umem_unlock miscellaneous */
103 
104 static	void	i_ddi_umem_unlock_thread_start(void);
105 
106 static	kmutex_t	ddi_umem_unlock_mutex; /* unlock list mutex */
107 static	kcondvar_t	ddi_umem_unlock_cv; /* unlock list block/unblock */
108 static	kthread_t	*ddi_umem_unlock_thread;
109 /*
110  * The ddi_umem_unlock FIFO list.  NULL head pointer indicates empty list.
111  */
112 static	struct	ddi_umem_cookie *ddi_umem_unlock_head = NULL;
113 static	struct	ddi_umem_cookie *ddi_umem_unlock_tail = NULL;
114 
115 /*
116  * DDI(Sun) Function and flag definitions:
117  */
118 
119 #if defined(__x86)
120 /*
121  * Used to indicate which entries were chosen from a range.
122  */
123 char	*chosen_reg = "chosen-reg";
124 #endif
125 
126 /*
127  * Function used to ring system console bell
128  */
129 void (*ddi_console_bell_func)(clock_t duration);
130 
131 /*
132  * Creating register mappings and handling interrupts:
133  */
134 
135 /*
136  * Generic ddi_map: Call parent to fulfill request...
137  */
138 
139 int
140 ddi_map(dev_info_t *dp, ddi_map_req_t *mp, off_t offset,
141     off_t len, caddr_t *addrp)
142 {
143 	dev_info_t *pdip;
144 
145 	ASSERT(dp);
146 	pdip = (dev_info_t *)DEVI(dp)->devi_parent;
147 	return ((DEVI(pdip)->devi_ops->devo_bus_ops->bus_map)(pdip,
148 	    dp, mp, offset, len, addrp));
149 }
150 
151 /*
152  * ddi_apply_range: (Called by nexi only.)
153  * Apply ranges in parent node dp, to child regspec rp...
154  */
155 
156 int
157 ddi_apply_range(dev_info_t *dp, dev_info_t *rdip, struct regspec *rp)
158 {
159 	return (i_ddi_apply_range(dp, rdip, rp));
160 }
161 
162 int
163 ddi_map_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset,
164     off_t len)
165 {
166 	ddi_map_req_t mr;
167 #if defined(__x86)
168 	struct {
169 		int	bus;
170 		int	addr;
171 		int	size;
172 	} reg, *reglist;
173 	uint_t	length;
174 	int	rc;
175 
176 	/*
177 	 * get the 'registers' or the 'reg' property.
178 	 * We look up the reg property as an array of
179 	 * int's.
180 	 */
181 	rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
182 	    DDI_PROP_DONTPASS, "registers", (int **)&reglist, &length);
183 	if (rc != DDI_PROP_SUCCESS)
184 		rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
185 		    DDI_PROP_DONTPASS, "reg", (int **)&reglist, &length);
186 	if (rc == DDI_PROP_SUCCESS) {
187 		/*
188 		 * point to the required entry.
189 		 */
190 		reg = reglist[rnumber];
191 		reg.addr += offset;
192 		if (len != 0)
193 			reg.size = len;
194 		/*
195 		 * make a new property containing ONLY the required tuple.
196 		 */
197 		if (ddi_prop_update_int_array(DDI_DEV_T_NONE, dip,
198 		    chosen_reg, (int *)&reg, (sizeof (reg)/sizeof (int)))
199 		    != DDI_PROP_SUCCESS) {
200 			cmn_err(CE_WARN, "%s%d: cannot create '%s' "
201 			    "property", DEVI(dip)->devi_name,
202 			    DEVI(dip)->devi_instance, chosen_reg);
203 		}
204 		/*
205 		 * free the memory allocated by
206 		 * ddi_prop_lookup_int_array ().
207 		 */
208 		ddi_prop_free((void *)reglist);
209 	}
210 #endif
211 	mr.map_op = DDI_MO_MAP_LOCKED;
212 	mr.map_type = DDI_MT_RNUMBER;
213 	mr.map_obj.rnumber = rnumber;
214 	mr.map_prot = PROT_READ | PROT_WRITE;
215 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
216 	mr.map_handlep = NULL;
217 	mr.map_vers = DDI_MAP_VERSION;
218 
219 	/*
220 	 * Call my parent to map in my regs.
221 	 */
222 
223 	return (ddi_map(dip, &mr, offset, len, kaddrp));
224 }
225 
226 void
227 ddi_unmap_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset,
228     off_t len)
229 {
230 	ddi_map_req_t mr;
231 
232 	mr.map_op = DDI_MO_UNMAP;
233 	mr.map_type = DDI_MT_RNUMBER;
234 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
235 	mr.map_prot = PROT_READ | PROT_WRITE;	/* who cares? */
236 	mr.map_obj.rnumber = rnumber;
237 	mr.map_handlep = NULL;
238 	mr.map_vers = DDI_MAP_VERSION;
239 
240 	/*
241 	 * Call my parent to unmap my regs.
242 	 */
243 
244 	(void) ddi_map(dip, &mr, offset, len, kaddrp);
245 	*kaddrp = (caddr_t)0;
246 #if defined(__x86)
247 	(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, chosen_reg);
248 #endif
249 }
250 
251 int
252 ddi_bus_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
253 	off_t offset, off_t len, caddr_t *vaddrp)
254 {
255 	return (i_ddi_bus_map(dip, rdip, mp, offset, len, vaddrp));
256 }
257 
258 /*
259  * nullbusmap:	The/DDI default bus_map entry point for nexi
260  *		not conforming to the reg/range paradigm (i.e. scsi, etc.)
261  *		with no HAT/MMU layer to be programmed at this level.
262  *
263  *		If the call is to map by rnumber, return an error,
264  *		otherwise pass anything else up the tree to my parent.
265  */
266 int
267 nullbusmap(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
268 	off_t offset, off_t len, caddr_t *vaddrp)
269 {
270 	_NOTE(ARGUNUSED(rdip))
271 	if (mp->map_type == DDI_MT_RNUMBER)
272 		return (DDI_ME_UNSUPPORTED);
273 
274 	return (ddi_map(dip, mp, offset, len, vaddrp));
275 }
276 
277 /*
278  * ddi_rnumber_to_regspec: Not for use by leaf drivers.
279  *			   Only for use by nexi using the reg/range paradigm.
280  */
281 struct regspec *
282 ddi_rnumber_to_regspec(dev_info_t *dip, int rnumber)
283 {
284 	return (i_ddi_rnumber_to_regspec(dip, rnumber));
285 }
286 
287 
288 /*
289  * Note that we allow the dip to be nil because we may be called
290  * prior even to the instantiation of the devinfo tree itself - all
291  * regular leaf and nexus drivers should always use a non-nil dip!
292  *
293  * We treat peek in a somewhat cavalier fashion .. assuming that we'll
294  * simply get a synchronous fault as soon as we touch a missing address.
295  *
296  * Poke is rather more carefully handled because we might poke to a write
297  * buffer, "succeed", then only find some time later that we got an
298  * asynchronous fault that indicated that the address we were writing to
299  * was not really backed by hardware.
300  */
301 
302 static int
303 i_ddi_peekpoke(dev_info_t *devi, ddi_ctl_enum_t cmd, size_t size,
304     void *addr, void *value_p)
305 {
306 	union {
307 		uint64_t	u64;
308 		uint32_t	u32;
309 		uint16_t	u16;
310 		uint8_t		u8;
311 	} peekpoke_value;
312 
313 	peekpoke_ctlops_t peekpoke_args;
314 	uint64_t dummy_result;
315 	int rval;
316 
317 	/* Note: size is assumed to be correct;  it is not checked. */
318 	peekpoke_args.size = size;
319 	peekpoke_args.dev_addr = (uintptr_t)addr;
320 	peekpoke_args.handle = NULL;
321 	peekpoke_args.repcount = 1;
322 	peekpoke_args.flags = 0;
323 
324 	if (cmd == DDI_CTLOPS_POKE) {
325 		switch (size) {
326 		case sizeof (uint8_t):
327 			peekpoke_value.u8 = *(uint8_t *)value_p;
328 			break;
329 		case sizeof (uint16_t):
330 			peekpoke_value.u16 = *(uint16_t *)value_p;
331 			break;
332 		case sizeof (uint32_t):
333 			peekpoke_value.u32 = *(uint32_t *)value_p;
334 			break;
335 		case sizeof (uint64_t):
336 			peekpoke_value.u64 = *(uint64_t *)value_p;
337 			break;
338 		}
339 	}
340 
341 	peekpoke_args.host_addr = (uintptr_t)&peekpoke_value.u64;
342 
343 	if (devi != NULL)
344 		rval = ddi_ctlops(devi, devi, cmd, &peekpoke_args,
345 		    &dummy_result);
346 	else
347 		rval = peekpoke_mem(cmd, &peekpoke_args);
348 
349 	/*
350 	 * A NULL value_p is permitted by ddi_peek(9F); discard the result.
351 	 */
352 	if ((cmd == DDI_CTLOPS_PEEK) & (value_p != NULL)) {
353 		switch (size) {
354 		case sizeof (uint8_t):
355 			*(uint8_t *)value_p = peekpoke_value.u8;
356 			break;
357 		case sizeof (uint16_t):
358 			*(uint16_t *)value_p = peekpoke_value.u16;
359 			break;
360 		case sizeof (uint32_t):
361 			*(uint32_t *)value_p = peekpoke_value.u32;
362 			break;
363 		case sizeof (uint64_t):
364 			*(uint64_t *)value_p = peekpoke_value.u64;
365 			break;
366 		}
367 	}
368 
369 	return (rval);
370 }
371 
372 /*
373  * Keep ddi_peek() and ddi_poke() in case 3rd parties are calling this.
374  * they shouldn't be, but the 9f manpage kind of pseudo exposes it.
375  */
376 int
377 ddi_peek(dev_info_t *devi, size_t size, void *addr, void *value_p)
378 {
379 	switch (size) {
380 	case sizeof (uint8_t):
381 	case sizeof (uint16_t):
382 	case sizeof (uint32_t):
383 	case sizeof (uint64_t):
384 		break;
385 	default:
386 		return (DDI_FAILURE);
387 	}
388 
389 	return (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, size, addr, value_p));
390 }
391 
392 int
393 ddi_poke(dev_info_t *devi, size_t size, void *addr, void *value_p)
394 {
395 	switch (size) {
396 	case sizeof (uint8_t):
397 	case sizeof (uint16_t):
398 	case sizeof (uint32_t):
399 	case sizeof (uint64_t):
400 		break;
401 	default:
402 		return (DDI_FAILURE);
403 	}
404 
405 	return (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, size, addr, value_p));
406 }
407 
408 int
409 ddi_peek8(dev_info_t *dip, int8_t *addr, int8_t *val_p)
410 {
411 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
412 	    val_p));
413 }
414 
415 int
416 ddi_peek16(dev_info_t *dip, int16_t *addr, int16_t *val_p)
417 {
418 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
419 	    val_p));
420 }
421 
422 int
423 ddi_peek32(dev_info_t *dip, int32_t *addr, int32_t *val_p)
424 {
425 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
426 	    val_p));
427 }
428 
429 int
430 ddi_peek64(dev_info_t *dip, int64_t *addr, int64_t *val_p)
431 {
432 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
433 	    val_p));
434 }
435 
436 
437 /*
438  * We need to separate the old interfaces from the new ones and leave them
439  * in here for a while. Previous versions of the OS defined the new interfaces
440  * to the old interfaces. This way we can fix things up so that we can
441  * eventually remove these interfaces.
442  * e.g. A 3rd party module/driver using ddi_peek8 and built against S10
443  * or earlier will actually have a reference to ddi_peekc in the binary.
444  */
445 #ifdef _ILP32
446 int
447 ddi_peekc(dev_info_t *dip, int8_t *addr, int8_t *val_p)
448 {
449 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
450 	    val_p));
451 }
452 
453 int
454 ddi_peeks(dev_info_t *dip, int16_t *addr, int16_t *val_p)
455 {
456 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
457 	    val_p));
458 }
459 
460 int
461 ddi_peekl(dev_info_t *dip, int32_t *addr, int32_t *val_p)
462 {
463 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
464 	    val_p));
465 }
466 
467 int
468 ddi_peekd(dev_info_t *dip, int64_t *addr, int64_t *val_p)
469 {
470 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
471 	    val_p));
472 }
473 #endif /* _ILP32 */
474 
475 int
476 ddi_poke8(dev_info_t *dip, int8_t *addr, int8_t val)
477 {
478 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
479 }
480 
481 int
482 ddi_poke16(dev_info_t *dip, int16_t *addr, int16_t val)
483 {
484 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
485 }
486 
487 int
488 ddi_poke32(dev_info_t *dip, int32_t *addr, int32_t val)
489 {
490 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
491 }
492 
493 int
494 ddi_poke64(dev_info_t *dip, int64_t *addr, int64_t val)
495 {
496 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
497 }
498 
499 /*
500  * We need to separate the old interfaces from the new ones and leave them
501  * in here for a while. Previous versions of the OS defined the new interfaces
502  * to the old interfaces. This way we can fix things up so that we can
503  * eventually remove these interfaces.
504  * e.g. A 3rd party module/driver using ddi_poke8 and built against S10
505  * or earlier will actually have a reference to ddi_pokec in the binary.
506  */
507 #ifdef _ILP32
508 int
509 ddi_pokec(dev_info_t *dip, int8_t *addr, int8_t val)
510 {
511 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
512 }
513 
514 int
515 ddi_pokes(dev_info_t *dip, int16_t *addr, int16_t val)
516 {
517 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
518 }
519 
520 int
521 ddi_pokel(dev_info_t *dip, int32_t *addr, int32_t val)
522 {
523 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
524 }
525 
526 int
527 ddi_poked(dev_info_t *dip, int64_t *addr, int64_t val)
528 {
529 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
530 }
531 #endif /* _ILP32 */
532 
533 /*
534  * ddi_peekpokeio() is used primarily by the mem drivers for moving
535  * data to and from uio structures via peek and poke.  Note that we
536  * use "internal" routines ddi_peek and ddi_poke to make this go
537  * slightly faster, avoiding the call overhead ..
538  */
539 int
540 ddi_peekpokeio(dev_info_t *devi, struct uio *uio, enum uio_rw rw,
541     caddr_t addr, size_t len, uint_t xfersize)
542 {
543 	int64_t	ibuffer;
544 	int8_t w8;
545 	size_t sz;
546 	int o;
547 
548 	if (xfersize > sizeof (long))
549 		xfersize = sizeof (long);
550 
551 	while (len != 0) {
552 		if ((len | (uintptr_t)addr) & 1) {
553 			sz = sizeof (int8_t);
554 			if (rw == UIO_WRITE) {
555 				if ((o = uwritec(uio)) == -1)
556 					return (DDI_FAILURE);
557 				if (ddi_poke8(devi, (int8_t *)addr,
558 				    (int8_t)o) != DDI_SUCCESS)
559 					return (DDI_FAILURE);
560 			} else {
561 				if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz,
562 				    (int8_t *)addr, &w8) != DDI_SUCCESS)
563 					return (DDI_FAILURE);
564 				if (ureadc(w8, uio))
565 					return (DDI_FAILURE);
566 			}
567 		} else {
568 			switch (xfersize) {
569 			case sizeof (int64_t):
570 				if (((len | (uintptr_t)addr) &
571 				    (sizeof (int64_t) - 1)) == 0) {
572 					sz = xfersize;
573 					break;
574 				}
575 				/*FALLTHROUGH*/
576 			case sizeof (int32_t):
577 				if (((len | (uintptr_t)addr) &
578 				    (sizeof (int32_t) - 1)) == 0) {
579 					sz = xfersize;
580 					break;
581 				}
582 				/*FALLTHROUGH*/
583 			default:
584 				/*
585 				 * This still assumes that we might have an
586 				 * I/O bus out there that permits 16-bit
587 				 * transfers (and that it would be upset by
588 				 * 32-bit transfers from such locations).
589 				 */
590 				sz = sizeof (int16_t);
591 				break;
592 			}
593 
594 			if (rw == UIO_READ) {
595 				if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz,
596 				    addr, &ibuffer) != DDI_SUCCESS)
597 					return (DDI_FAILURE);
598 			}
599 
600 			if (uiomove(&ibuffer, sz, rw, uio))
601 				return (DDI_FAILURE);
602 
603 			if (rw == UIO_WRITE) {
604 				if (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, sz,
605 				    addr, &ibuffer) != DDI_SUCCESS)
606 					return (DDI_FAILURE);
607 			}
608 		}
609 		addr += sz;
610 		len -= sz;
611 	}
612 	return (DDI_SUCCESS);
613 }
614 
615 /*
616  * These routines are used by drivers that do layered ioctls
617  * On sparc, they're implemented in assembler to avoid spilling
618  * register windows in the common (copyin) case ..
619  */
620 #if !defined(__sparc)
621 int
622 ddi_copyin(const void *buf, void *kernbuf, size_t size, int flags)
623 {
624 	if (flags & FKIOCTL)
625 		return (kcopy(buf, kernbuf, size) ? -1 : 0);
626 	return (copyin(buf, kernbuf, size));
627 }
628 
629 int
630 ddi_copyout(const void *buf, void *kernbuf, size_t size, int flags)
631 {
632 	if (flags & FKIOCTL)
633 		return (kcopy(buf, kernbuf, size) ? -1 : 0);
634 	return (copyout(buf, kernbuf, size));
635 }
636 #endif	/* !__sparc */
637 
638 /*
639  * Conversions in nexus pagesize units.  We don't duplicate the
640  * 'nil dip' semantics of peek/poke because btopr/btop/ptob are DDI/DKI
641  * routines anyway.
642  */
643 unsigned long
644 ddi_btop(dev_info_t *dip, unsigned long bytes)
645 {
646 	unsigned long pages;
647 
648 	(void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOP, &bytes, &pages);
649 	return (pages);
650 }
651 
652 unsigned long
653 ddi_btopr(dev_info_t *dip, unsigned long bytes)
654 {
655 	unsigned long pages;
656 
657 	(void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOPR, &bytes, &pages);
658 	return (pages);
659 }
660 
661 unsigned long
662 ddi_ptob(dev_info_t *dip, unsigned long pages)
663 {
664 	unsigned long bytes;
665 
666 	(void) ddi_ctlops(dip, dip, DDI_CTLOPS_PTOB, &pages, &bytes);
667 	return (bytes);
668 }
669 
670 unsigned int
671 ddi_enter_critical(void)
672 {
673 	return ((uint_t)spl7());
674 }
675 
676 void
677 ddi_exit_critical(unsigned int spl)
678 {
679 	splx((int)spl);
680 }
681 
682 /*
683  * Nexus ctlops punter
684  */
685 
686 #if !defined(__sparc)
687 /*
688  * Request bus_ctl parent to handle a bus_ctl request
689  *
690  * (The sparc version is in sparc_ddi.s)
691  */
692 int
693 ddi_ctlops(dev_info_t *d, dev_info_t *r, ddi_ctl_enum_t op, void *a, void *v)
694 {
695 	int (*fp)();
696 
697 	if (!d || !r)
698 		return (DDI_FAILURE);
699 
700 	if ((d = (dev_info_t *)DEVI(d)->devi_bus_ctl) == NULL)
701 		return (DDI_FAILURE);
702 
703 	fp = DEVI(d)->devi_ops->devo_bus_ops->bus_ctl;
704 	return ((*fp)(d, r, op, a, v));
705 }
706 
707 #endif
708 
709 /*
710  * DMA/DVMA setup
711  */
712 
713 #if !defined(__sparc)
714 /*
715  * Request bus_dma_ctl parent to fiddle with a dma request.
716  *
717  * (The sparc version is in sparc_subr.s)
718  */
719 int
720 ddi_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
721     ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
722     off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
723 {
724 	int (*fp)();
725 
726 	if (dip != ddi_root_node())
727 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_ctl;
728 	fp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_ctl;
729 	return ((*fp) (dip, rdip, handle, request, offp, lenp, objp, flags));
730 }
731 #endif
732 
733 /*
734  * For all DMA control functions, call the DMA control
735  * routine and return status.
736  *
737  * Just plain assume that the parent is to be called.
738  * If a nexus driver or a thread outside the framework
739  * of a nexus driver or a leaf driver calls these functions,
740  * it is up to them to deal with the fact that the parent's
741  * bus_dma_ctl function will be the first one called.
742  */
743 
744 #define	HD	((ddi_dma_impl_t *)h)->dmai_rdip
745 
746 /*
747  * This routine is left in place to satisfy link dependencies
748  * for any 3rd party nexus drivers that rely on it.  It is never
749  * called, though.
750  */
751 /*ARGSUSED*/
752 int
753 ddi_dma_map(dev_info_t *dip, dev_info_t *rdip,
754     struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep)
755 {
756 	return (DDI_FAILURE);
757 }
758 
759 #if !defined(__sparc)
760 
761 /*
762  * The SPARC versions of these routines are done in assembler to
763  * save register windows, so they're in sparc_subr.s.
764  */
765 
766 int
767 ddi_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
768     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
769 {
770 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_attr_t *,
771 	    int (*)(caddr_t), caddr_t, ddi_dma_handle_t *);
772 
773 	if (dip != ddi_root_node())
774 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl;
775 
776 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_allochdl;
777 	return ((*funcp)(dip, rdip, attr, waitfp, arg, handlep));
778 }
779 
780 int
781 ddi_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handlep)
782 {
783 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
784 
785 	if (dip != ddi_root_node())
786 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl;
787 
788 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_freehdl;
789 	return ((*funcp)(dip, rdip, handlep));
790 }
791 
792 int
793 ddi_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
794     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
795     ddi_dma_cookie_t *cp, uint_t *ccountp)
796 {
797 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
798 	    struct ddi_dma_req *, ddi_dma_cookie_t *, uint_t *);
799 
800 	if (dip != ddi_root_node())
801 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
802 
803 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_bindhdl;
804 	return ((*funcp)(dip, rdip, handle, dmareq, cp, ccountp));
805 }
806 
807 int
808 ddi_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
809     ddi_dma_handle_t handle)
810 {
811 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
812 
813 	if (dip != ddi_root_node())
814 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl;
815 
816 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_unbindhdl;
817 	return ((*funcp)(dip, rdip, handle));
818 }
819 
820 
821 int
822 ddi_dma_flush(dev_info_t *dip, dev_info_t *rdip,
823     ddi_dma_handle_t handle, off_t off, size_t len,
824     uint_t cache_flags)
825 {
826 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
827 	    off_t, size_t, uint_t);
828 
829 	if (dip != ddi_root_node())
830 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush;
831 
832 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_flush;
833 	return ((*funcp)(dip, rdip, handle, off, len, cache_flags));
834 }
835 
836 int
837 ddi_dma_win(dev_info_t *dip, dev_info_t *rdip,
838     ddi_dma_handle_t handle, uint_t win, off_t *offp,
839     size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
840 {
841 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
842 	    uint_t, off_t *, size_t *, ddi_dma_cookie_t *, uint_t *);
843 
844 	if (dip != ddi_root_node())
845 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_win;
846 
847 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_win;
848 	return ((*funcp)(dip, rdip, handle, win, offp, lenp,
849 	    cookiep, ccountp));
850 }
851 
852 int
853 ddi_dma_sync(ddi_dma_handle_t h, off_t o, size_t l, uint_t whom)
854 {
855 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h;
856 	dev_info_t *dip, *rdip;
857 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t, off_t,
858 	    size_t, uint_t);
859 
860 	/*
861 	 * the DMA nexus driver will set DMP_NOSYNC if the
862 	 * platform does not require any sync operation. For
863 	 * example if the memory is uncached or consistent
864 	 * and without any I/O write buffers involved.
865 	 */
866 	if ((hp->dmai_rflags & DMP_NOSYNC) == DMP_NOSYNC)
867 		return (DDI_SUCCESS);
868 
869 	dip = rdip = hp->dmai_rdip;
870 	if (dip != ddi_root_node())
871 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush;
872 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_flush;
873 	return ((*funcp)(dip, rdip, h, o, l, whom));
874 }
875 
876 int
877 ddi_dma_unbind_handle(ddi_dma_handle_t h)
878 {
879 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h;
880 	dev_info_t *dip, *rdip;
881 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
882 
883 	dip = rdip = hp->dmai_rdip;
884 	if (dip != ddi_root_node())
885 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl;
886 	funcp = DEVI(rdip)->devi_bus_dma_unbindfunc;
887 	return ((*funcp)(dip, rdip, h));
888 }
889 
890 #endif	/* !__sparc */
891 
892 /*
893  * DMA burst sizes, and transfer minimums
894  */
895 
896 int
897 ddi_dma_burstsizes(ddi_dma_handle_t handle)
898 {
899 	ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle;
900 
901 	if (!dimp)
902 		return (0);
903 	else
904 		return (dimp->dmai_burstsizes);
905 }
906 
907 /*
908  * Given two DMA attribute structures, apply the attributes
909  * of one to the other, following the rules of attributes
910  * and the wishes of the caller.
911  *
912  * The rules of DMA attribute structures are that you cannot
913  * make things *less* restrictive as you apply one set
914  * of attributes to another.
915  *
916  */
917 void
918 ddi_dma_attr_merge(ddi_dma_attr_t *attr, ddi_dma_attr_t *mod)
919 {
920 	attr->dma_attr_addr_lo =
921 	    MAX(attr->dma_attr_addr_lo, mod->dma_attr_addr_lo);
922 	attr->dma_attr_addr_hi =
923 	    MIN(attr->dma_attr_addr_hi, mod->dma_attr_addr_hi);
924 	attr->dma_attr_count_max =
925 	    MIN(attr->dma_attr_count_max, mod->dma_attr_count_max);
926 	attr->dma_attr_align =
927 	    MAX(attr->dma_attr_align,  mod->dma_attr_align);
928 	attr->dma_attr_burstsizes =
929 	    (uint_t)(attr->dma_attr_burstsizes & mod->dma_attr_burstsizes);
930 	attr->dma_attr_minxfer =
931 	    maxbit(attr->dma_attr_minxfer, mod->dma_attr_minxfer);
932 	attr->dma_attr_maxxfer =
933 	    MIN(attr->dma_attr_maxxfer, mod->dma_attr_maxxfer);
934 	attr->dma_attr_seg = MIN(attr->dma_attr_seg, mod->dma_attr_seg);
935 	attr->dma_attr_sgllen = MIN((uint_t)attr->dma_attr_sgllen,
936 	    (uint_t)mod->dma_attr_sgllen);
937 	attr->dma_attr_granular =
938 	    MAX(attr->dma_attr_granular, mod->dma_attr_granular);
939 }
940 
941 /*
942  * mmap/segmap interface:
943  */
944 
945 /*
946  * ddi_segmap:		setup the default segment driver. Calls the drivers
947  *			XXmmap routine to validate the range to be mapped.
948  *			Return ENXIO of the range is not valid.  Create
949  *			a seg_dev segment that contains all of the
950  *			necessary information and will reference the
951  *			default segment driver routines. It returns zero
952  *			on success or non-zero on failure.
953  */
954 int
955 ddi_segmap(dev_t dev, off_t offset, struct as *asp, caddr_t *addrp, off_t len,
956     uint_t prot, uint_t maxprot, uint_t flags, cred_t *credp)
957 {
958 	extern int spec_segmap(dev_t, off_t, struct as *, caddr_t *,
959 	    off_t, uint_t, uint_t, uint_t, struct cred *);
960 
961 	return (spec_segmap(dev, offset, asp, addrp, len,
962 	    prot, maxprot, flags, credp));
963 }
964 
965 /*
966  * ddi_map_fault:	Resolve mappings at fault time.  Used by segment
967  *			drivers. Allows each successive parent to resolve
968  *			address translations and add its mappings to the
969  *			mapping list supplied in the page structure. It
970  *			returns zero on success	or non-zero on failure.
971  */
972 
973 int
974 ddi_map_fault(dev_info_t *dip, struct hat *hat, struct seg *seg,
975     caddr_t addr, struct devpage *dp, pfn_t pfn, uint_t prot, uint_t lock)
976 {
977 	return (i_ddi_map_fault(dip, dip, hat, seg, addr, dp, pfn, prot, lock));
978 }
979 
980 /*
981  * ddi_device_mapping_check:	Called from ddi_segmap_setup.
982  *	Invokes platform specific DDI to determine whether attributes specified
983  *	in attr(9s) are	valid for the region of memory that will be made
984  *	available for direct access to user process via the mmap(2) system call.
985  */
986 int
987 ddi_device_mapping_check(dev_t dev, ddi_device_acc_attr_t *accattrp,
988     uint_t rnumber, uint_t *hat_flags)
989 {
990 	ddi_acc_handle_t handle;
991 	ddi_map_req_t mr;
992 	ddi_acc_hdl_t *hp;
993 	int result;
994 	dev_info_t *dip;
995 
996 	/*
997 	 * we use e_ddi_hold_devi_by_dev to search for the devi.  We
998 	 * release it immediately since it should already be held by
999 	 * a devfs vnode.
1000 	 */
1001 	if ((dip =
1002 	    e_ddi_hold_devi_by_dev(dev, E_DDI_HOLD_DEVI_NOATTACH)) == NULL)
1003 		return (-1);
1004 	ddi_release_devi(dip);		/* for e_ddi_hold_devi_by_dev() */
1005 
1006 	/*
1007 	 * Allocate and initialize the common elements of data
1008 	 * access handle.
1009 	 */
1010 	handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
1011 	if (handle == NULL)
1012 		return (-1);
1013 
1014 	hp = impl_acc_hdl_get(handle);
1015 	hp->ah_vers = VERS_ACCHDL;
1016 	hp->ah_dip = dip;
1017 	hp->ah_rnumber = rnumber;
1018 	hp->ah_offset = 0;
1019 	hp->ah_len = 0;
1020 	hp->ah_acc = *accattrp;
1021 
1022 	/*
1023 	 * Set up the mapping request and call to parent.
1024 	 */
1025 	mr.map_op = DDI_MO_MAP_HANDLE;
1026 	mr.map_type = DDI_MT_RNUMBER;
1027 	mr.map_obj.rnumber = rnumber;
1028 	mr.map_prot = PROT_READ | PROT_WRITE;
1029 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
1030 	mr.map_handlep = hp;
1031 	mr.map_vers = DDI_MAP_VERSION;
1032 	result = ddi_map(dip, &mr, 0, 0, NULL);
1033 
1034 	/*
1035 	 * Region must be mappable, pick up flags from the framework.
1036 	 */
1037 	*hat_flags = hp->ah_hat_flags;
1038 
1039 	impl_acc_hdl_free(handle);
1040 
1041 	/*
1042 	 * check for end result.
1043 	 */
1044 	if (result != DDI_SUCCESS)
1045 		return (-1);
1046 	return (0);
1047 }
1048 
1049 
1050 /*
1051  * Property functions:	 See also, ddipropdefs.h.
1052  *
1053  * These functions are the framework for the property functions,
1054  * i.e. they support software defined properties.  All implementation
1055  * specific property handling (i.e.: self-identifying devices and
1056  * PROM defined properties are handled in the implementation specific
1057  * functions (defined in ddi_implfuncs.h).
1058  */
1059 
1060 /*
1061  * nopropop:	Shouldn't be called, right?
1062  */
1063 int
1064 nopropop(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
1065     char *name, caddr_t valuep, int *lengthp)
1066 {
1067 	_NOTE(ARGUNUSED(dev, dip, prop_op, mod_flags, name, valuep, lengthp))
1068 	return (DDI_PROP_NOT_FOUND);
1069 }
1070 
1071 #ifdef	DDI_PROP_DEBUG
1072 int ddi_prop_debug_flag = 0;
1073 
1074 int
1075 ddi_prop_debug(int enable)
1076 {
1077 	int prev = ddi_prop_debug_flag;
1078 
1079 	if ((enable != 0) || (prev != 0))
1080 		printf("ddi_prop_debug: debugging %s\n",
1081 		    enable ? "enabled" : "disabled");
1082 	ddi_prop_debug_flag = enable;
1083 	return (prev);
1084 }
1085 
1086 #endif	/* DDI_PROP_DEBUG */
1087 
1088 /*
1089  * Search a property list for a match, if found return pointer
1090  * to matching prop struct, else return NULL.
1091  */
1092 
1093 ddi_prop_t *
1094 i_ddi_prop_search(dev_t dev, char *name, uint_t flags, ddi_prop_t **list_head)
1095 {
1096 	ddi_prop_t	*propp;
1097 
1098 	/*
1099 	 * find the property in child's devinfo:
1100 	 * Search order defined by this search function is first matching
1101 	 * property with input dev == DDI_DEV_T_ANY matching any dev or
1102 	 * dev == propp->prop_dev, name == propp->name, and the correct
1103 	 * data type as specified in the flags.  If a DDI_DEV_T_NONE dev
1104 	 * value made it this far then it implies a DDI_DEV_T_ANY search.
1105 	 */
1106 	if (dev == DDI_DEV_T_NONE)
1107 		dev = DDI_DEV_T_ANY;
1108 
1109 	for (propp = *list_head; propp != NULL; propp = propp->prop_next)  {
1110 
1111 		if (!DDI_STRSAME(propp->prop_name, name))
1112 			continue;
1113 
1114 		if ((dev != DDI_DEV_T_ANY) && (propp->prop_dev != dev))
1115 			continue;
1116 
1117 		if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0)
1118 			continue;
1119 
1120 		return (propp);
1121 	}
1122 
1123 	return ((ddi_prop_t *)0);
1124 }
1125 
1126 /*
1127  * Search for property within devnames structures
1128  */
1129 ddi_prop_t *
1130 i_ddi_search_global_prop(dev_t dev, char *name, uint_t flags)
1131 {
1132 	major_t		major;
1133 	struct devnames	*dnp;
1134 	ddi_prop_t	*propp;
1135 
1136 	/*
1137 	 * Valid dev_t value is needed to index into the
1138 	 * correct devnames entry, therefore a dev_t
1139 	 * value of DDI_DEV_T_ANY is not appropriate.
1140 	 */
1141 	ASSERT(dev != DDI_DEV_T_ANY);
1142 	if (dev == DDI_DEV_T_ANY) {
1143 		return ((ddi_prop_t *)0);
1144 	}
1145 
1146 	major = getmajor(dev);
1147 	dnp = &(devnamesp[major]);
1148 
1149 	if (dnp->dn_global_prop_ptr == NULL)
1150 		return ((ddi_prop_t *)0);
1151 
1152 	LOCK_DEV_OPS(&dnp->dn_lock);
1153 
1154 	for (propp = dnp->dn_global_prop_ptr->prop_list;
1155 	    propp != NULL;
1156 	    propp = (ddi_prop_t *)propp->prop_next) {
1157 
1158 		if (!DDI_STRSAME(propp->prop_name, name))
1159 			continue;
1160 
1161 		if ((!(flags & DDI_PROP_ROOTNEX_GLOBAL)) &&
1162 		    (!(flags & LDI_DEV_T_ANY)) && (propp->prop_dev != dev))
1163 			continue;
1164 
1165 		if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0)
1166 			continue;
1167 
1168 		/* Property found, return it */
1169 		UNLOCK_DEV_OPS(&dnp->dn_lock);
1170 		return (propp);
1171 	}
1172 
1173 	UNLOCK_DEV_OPS(&dnp->dn_lock);
1174 	return ((ddi_prop_t *)0);
1175 }
1176 
1177 static char prop_no_mem_msg[] = "can't allocate memory for ddi property <%s>";
1178 
1179 /*
1180  * ddi_prop_search_global:
1181  *	Search the global property list within devnames
1182  *	for the named property.  Return the encoded value.
1183  */
1184 static int
1185 i_ddi_prop_search_global(dev_t dev, uint_t flags, char *name,
1186     void *valuep, uint_t *lengthp)
1187 {
1188 	ddi_prop_t	*propp;
1189 	caddr_t		buffer;
1190 
1191 	propp =  i_ddi_search_global_prop(dev, name, flags);
1192 
1193 	/* Property NOT found, bail */
1194 	if (propp == (ddi_prop_t *)0)
1195 		return (DDI_PROP_NOT_FOUND);
1196 
1197 	if (propp->prop_flags & DDI_PROP_UNDEF_IT)
1198 		return (DDI_PROP_UNDEFINED);
1199 
1200 	if ((buffer = kmem_alloc(propp->prop_len,
1201 	    (flags & DDI_PROP_CANSLEEP) ? KM_SLEEP : KM_NOSLEEP)) == NULL) {
1202 		cmn_err(CE_CONT, prop_no_mem_msg, name);
1203 		return (DDI_PROP_NO_MEMORY);
1204 	}
1205 
1206 	/*
1207 	 * Return the encoded data
1208 	 */
1209 	*(caddr_t *)valuep = buffer;
1210 	*lengthp = propp->prop_len;
1211 	bcopy(propp->prop_val, buffer, propp->prop_len);
1212 
1213 	return (DDI_PROP_SUCCESS);
1214 }
1215 
1216 /*
1217  * ddi_prop_search_common:	Lookup and return the encoded value
1218  */
1219 int
1220 ddi_prop_search_common(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1221     uint_t flags, char *name, void *valuep, uint_t *lengthp)
1222 {
1223 	ddi_prop_t	*propp;
1224 	int		i;
1225 	caddr_t		buffer = NULL;
1226 	caddr_t		prealloc = NULL;
1227 	int		plength = 0;
1228 	dev_info_t	*pdip;
1229 	int		(*bop)();
1230 
1231 	/*CONSTANTCONDITION*/
1232 	while (1)  {
1233 
1234 		mutex_enter(&(DEVI(dip)->devi_lock));
1235 
1236 
1237 		/*
1238 		 * find the property in child's devinfo:
1239 		 * Search order is:
1240 		 *	1. driver defined properties
1241 		 *	2. system defined properties
1242 		 *	3. driver global properties
1243 		 *	4. boot defined properties
1244 		 */
1245 
1246 		propp = i_ddi_prop_search(dev, name, flags,
1247 		    &(DEVI(dip)->devi_drv_prop_ptr));
1248 		if (propp == NULL)  {
1249 			propp = i_ddi_prop_search(dev, name, flags,
1250 			    &(DEVI(dip)->devi_sys_prop_ptr));
1251 		}
1252 		if ((propp == NULL) && DEVI(dip)->devi_global_prop_list) {
1253 			propp = i_ddi_prop_search(dev, name, flags,
1254 			    &DEVI(dip)->devi_global_prop_list->prop_list);
1255 		}
1256 
1257 		if (propp == NULL)  {
1258 			propp = i_ddi_prop_search(dev, name, flags,
1259 			    &(DEVI(dip)->devi_hw_prop_ptr));
1260 		}
1261 
1262 		/*
1263 		 * Software property found?
1264 		 */
1265 		if (propp != (ddi_prop_t *)0)	{
1266 
1267 			/*
1268 			 * If explicit undefine, return now.
1269 			 */
1270 			if (propp->prop_flags & DDI_PROP_UNDEF_IT) {
1271 				mutex_exit(&(DEVI(dip)->devi_lock));
1272 				if (prealloc)
1273 					kmem_free(prealloc, plength);
1274 				return (DDI_PROP_UNDEFINED);
1275 			}
1276 
1277 			/*
1278 			 * If we only want to know if it exists, return now
1279 			 */
1280 			if (prop_op == PROP_EXISTS) {
1281 				mutex_exit(&(DEVI(dip)->devi_lock));
1282 				ASSERT(prealloc == NULL);
1283 				return (DDI_PROP_SUCCESS);
1284 			}
1285 
1286 			/*
1287 			 * If length only request or prop length == 0,
1288 			 * service request and return now.
1289 			 */
1290 			if ((prop_op == PROP_LEN) ||(propp->prop_len == 0)) {
1291 				*lengthp = propp->prop_len;
1292 
1293 				/*
1294 				 * if prop_op is PROP_LEN_AND_VAL_ALLOC
1295 				 * that means prop_len is 0, so set valuep
1296 				 * also to NULL
1297 				 */
1298 				if (prop_op == PROP_LEN_AND_VAL_ALLOC)
1299 					*(caddr_t *)valuep = NULL;
1300 
1301 				mutex_exit(&(DEVI(dip)->devi_lock));
1302 				if (prealloc)
1303 					kmem_free(prealloc, plength);
1304 				return (DDI_PROP_SUCCESS);
1305 			}
1306 
1307 			/*
1308 			 * If LEN_AND_VAL_ALLOC and the request can sleep,
1309 			 * drop the mutex, allocate the buffer, and go
1310 			 * through the loop again.  If we already allocated
1311 			 * the buffer, and the size of the property changed,
1312 			 * keep trying...
1313 			 */
1314 			if ((prop_op == PROP_LEN_AND_VAL_ALLOC) &&
1315 			    (flags & DDI_PROP_CANSLEEP))  {
1316 				if (prealloc && (propp->prop_len != plength)) {
1317 					kmem_free(prealloc, plength);
1318 					prealloc = NULL;
1319 				}
1320 				if (prealloc == NULL)  {
1321 					plength = propp->prop_len;
1322 					mutex_exit(&(DEVI(dip)->devi_lock));
1323 					prealloc = kmem_alloc(plength,
1324 					    KM_SLEEP);
1325 					continue;
1326 				}
1327 			}
1328 
1329 			/*
1330 			 * Allocate buffer, if required.  Either way,
1331 			 * set `buffer' variable.
1332 			 */
1333 			i = *lengthp;			/* Get callers length */
1334 			*lengthp = propp->prop_len;	/* Set callers length */
1335 
1336 			switch (prop_op) {
1337 
1338 			case PROP_LEN_AND_VAL_ALLOC:
1339 
1340 				if (prealloc == NULL) {
1341 					buffer = kmem_alloc(propp->prop_len,
1342 					    KM_NOSLEEP);
1343 				} else {
1344 					buffer = prealloc;
1345 				}
1346 
1347 				if (buffer == NULL)  {
1348 					mutex_exit(&(DEVI(dip)->devi_lock));
1349 					cmn_err(CE_CONT, prop_no_mem_msg, name);
1350 					return (DDI_PROP_NO_MEMORY);
1351 				}
1352 				/* Set callers buf ptr */
1353 				*(caddr_t *)valuep = buffer;
1354 				break;
1355 
1356 			case PROP_LEN_AND_VAL_BUF:
1357 
1358 				if (propp->prop_len > (i)) {
1359 					mutex_exit(&(DEVI(dip)->devi_lock));
1360 					return (DDI_PROP_BUF_TOO_SMALL);
1361 				}
1362 
1363 				buffer = valuep;  /* Get callers buf ptr */
1364 				break;
1365 
1366 			default:
1367 				break;
1368 			}
1369 
1370 			/*
1371 			 * Do the copy.
1372 			 */
1373 			if (buffer != NULL)
1374 				bcopy(propp->prop_val, buffer, propp->prop_len);
1375 			mutex_exit(&(DEVI(dip)->devi_lock));
1376 			return (DDI_PROP_SUCCESS);
1377 		}
1378 
1379 		mutex_exit(&(DEVI(dip)->devi_lock));
1380 		if (prealloc)
1381 			kmem_free(prealloc, plength);
1382 		prealloc = NULL;
1383 
1384 		/*
1385 		 * Prop not found, call parent bus_ops to deal with possible
1386 		 * h/w layer (possible PROM defined props, etc.) and to
1387 		 * possibly ascend the hierarchy, if allowed by flags.
1388 		 */
1389 		pdip = (dev_info_t *)DEVI(dip)->devi_parent;
1390 
1391 		/*
1392 		 * One last call for the root driver PROM props?
1393 		 */
1394 		if (dip == ddi_root_node())  {
1395 			return (ddi_bus_prop_op(dev, dip, dip, prop_op,
1396 			    flags, name, valuep, (int *)lengthp));
1397 		}
1398 
1399 		/*
1400 		 * We may have been called to check for properties
1401 		 * within a single devinfo node that has no parent -
1402 		 * see make_prop()
1403 		 */
1404 		if (pdip == NULL) {
1405 			ASSERT((flags &
1406 			    (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM)) ==
1407 			    (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM));
1408 			return (DDI_PROP_NOT_FOUND);
1409 		}
1410 
1411 		/*
1412 		 * Instead of recursing, we do iterative calls up the tree.
1413 		 * As a bit of optimization, skip the bus_op level if the
1414 		 * node is a s/w node and if the parent's bus_prop_op function
1415 		 * is `ddi_bus_prop_op', because we know that in this case,
1416 		 * this function does nothing.
1417 		 *
1418 		 * 4225415: If the parent isn't attached, or the child
1419 		 * hasn't been named by the parent yet, use the default
1420 		 * ddi_bus_prop_op as a proxy for the parent.  This
1421 		 * allows property lookups in any child/parent state to
1422 		 * include 'prom' and inherited properties, even when
1423 		 * there are no drivers attached to the child or parent.
1424 		 */
1425 
1426 		bop = ddi_bus_prop_op;
1427 		if (i_ddi_devi_attached(pdip) &&
1428 		    (i_ddi_node_state(dip) >= DS_INITIALIZED))
1429 			bop = DEVI(pdip)->devi_ops->devo_bus_ops->bus_prop_op;
1430 
1431 		i = DDI_PROP_NOT_FOUND;
1432 
1433 		if ((bop != ddi_bus_prop_op) || ndi_dev_is_prom_node(dip)) {
1434 			i = (*bop)(dev, pdip, dip, prop_op,
1435 			    flags | DDI_PROP_DONTPASS,
1436 			    name, valuep, lengthp);
1437 		}
1438 
1439 		if ((flags & DDI_PROP_DONTPASS) ||
1440 		    (i != DDI_PROP_NOT_FOUND))
1441 			return (i);
1442 
1443 		dip = pdip;
1444 	}
1445 	/*NOTREACHED*/
1446 }
1447 
1448 
1449 /*
1450  * ddi_prop_op: The basic property operator for drivers.
1451  *
1452  * In ddi_prop_op, the type of valuep is interpreted based on prop_op:
1453  *
1454  *	prop_op			valuep
1455  *	------			------
1456  *
1457  *	PROP_LEN		<unused>
1458  *
1459  *	PROP_LEN_AND_VAL_BUF	Pointer to callers buffer
1460  *
1461  *	PROP_LEN_AND_VAL_ALLOC	Address of callers pointer (will be set to
1462  *				address of allocated buffer, if successful)
1463  */
1464 int
1465 ddi_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
1466     char *name, caddr_t valuep, int *lengthp)
1467 {
1468 	int	i;
1469 
1470 	ASSERT((mod_flags & DDI_PROP_TYPE_MASK) == 0);
1471 
1472 	/*
1473 	 * If this was originally an LDI prop lookup then we bail here.
1474 	 * The reason is that the LDI property lookup interfaces first call
1475 	 * a drivers prop_op() entry point to allow it to override
1476 	 * properties.  But if we've made it here, then the driver hasn't
1477 	 * overriden any properties.  We don't want to continue with the
1478 	 * property search here because we don't have any type inforamtion.
1479 	 * When we return failure, the LDI interfaces will then proceed to
1480 	 * call the typed property interfaces to look up the property.
1481 	 */
1482 	if (mod_flags & DDI_PROP_DYNAMIC)
1483 		return (DDI_PROP_NOT_FOUND);
1484 
1485 	/*
1486 	 * check for pre-typed property consumer asking for typed property:
1487 	 * see e_ddi_getprop_int64.
1488 	 */
1489 	if (mod_flags & DDI_PROP_CONSUMER_TYPED)
1490 		mod_flags |= DDI_PROP_TYPE_INT64;
1491 	mod_flags |= DDI_PROP_TYPE_ANY;
1492 
1493 	i = ddi_prop_search_common(dev, dip, prop_op,
1494 	    mod_flags, name, valuep, (uint_t *)lengthp);
1495 	if (i == DDI_PROP_FOUND_1275)
1496 		return (DDI_PROP_SUCCESS);
1497 	return (i);
1498 }
1499 
1500 /*
1501  * ddi_prop_op_nblocks_blksize: The basic property operator for drivers that
1502  * maintain size in number of blksize blocks.  Provides a dynamic property
1503  * implementation for size oriented properties based on nblocks64 and blksize
1504  * values passed in by the driver.  Fallback to ddi_prop_op if the nblocks64
1505  * is too large.  This interface should not be used with a nblocks64 that
1506  * represents the driver's idea of how to represent unknown, if nblocks is
1507  * unknown use ddi_prop_op.
1508  */
1509 int
1510 ddi_prop_op_nblocks_blksize(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1511     int mod_flags, char *name, caddr_t valuep, int *lengthp,
1512     uint64_t nblocks64, uint_t blksize)
1513 {
1514 	uint64_t size64;
1515 	int	blkshift;
1516 
1517 	/* convert block size to shift value */
1518 	ASSERT(BIT_ONLYONESET(blksize));
1519 	blkshift = highbit(blksize) - 1;
1520 
1521 	/*
1522 	 * There is no point in supporting nblocks64 values that don't have
1523 	 * an accurate uint64_t byte count representation.
1524 	 */
1525 	if (nblocks64 >= (UINT64_MAX >> blkshift))
1526 		return (ddi_prop_op(dev, dip, prop_op, mod_flags,
1527 		    name, valuep, lengthp));
1528 
1529 	size64 = nblocks64 << blkshift;
1530 	return (ddi_prop_op_size_blksize(dev, dip, prop_op, mod_flags,
1531 	    name, valuep, lengthp, size64, blksize));
1532 }
1533 
1534 /*
1535  * ddi_prop_op_nblocks: ddi_prop_op_nblocks_blksize with DEV_BSIZE blksize.
1536  */
1537 int
1538 ddi_prop_op_nblocks(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1539     int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t nblocks64)
1540 {
1541 	return (ddi_prop_op_nblocks_blksize(dev, dip, prop_op,
1542 	    mod_flags, name, valuep, lengthp, nblocks64, DEV_BSIZE));
1543 }
1544 
1545 /*
1546  * ddi_prop_op_size_blksize: The basic property operator for block drivers that
1547  * maintain size in bytes. Provides a of dynamic property implementation for
1548  * size oriented properties based on size64 value and blksize passed in by the
1549  * driver.  Fallback to ddi_prop_op if the size64 is too large. This interface
1550  * should not be used with a size64 that represents the driver's idea of how
1551  * to represent unknown, if size is unknown use ddi_prop_op.
1552  *
1553  * NOTE: the legacy "nblocks"/"size" properties are treated as 32-bit unsigned
1554  * integers. While the most likely interface to request them ([bc]devi_size)
1555  * is declared int (signed) there is no enforcement of this, which means we
1556  * can't enforce limitations here without risking regression.
1557  */
1558 int
1559 ddi_prop_op_size_blksize(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1560     int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t size64,
1561     uint_t blksize)
1562 {
1563 	uint64_t nblocks64;
1564 	int	callers_length;
1565 	caddr_t	buffer;
1566 	int	blkshift;
1567 
1568 	/*
1569 	 * This is a kludge to support capture of size(9P) pure dynamic
1570 	 * properties in snapshots for non-cmlb code (without exposing
1571 	 * i_ddi_prop_dyn changes). When everyone uses cmlb, this code
1572 	 * should be removed.
1573 	 */
1574 	if (i_ddi_prop_dyn_driver_get(dip) == NULL) {
1575 		static i_ddi_prop_dyn_t prop_dyn_size[] = {
1576 		    {"Size",		DDI_PROP_TYPE_INT64,	S_IFCHR},
1577 		    {"Nblocks",		DDI_PROP_TYPE_INT64,	S_IFBLK},
1578 		    {NULL}
1579 		};
1580 		i_ddi_prop_dyn_driver_set(dip, prop_dyn_size);
1581 	}
1582 
1583 	/* convert block size to shift value */
1584 	ASSERT(BIT_ONLYONESET(blksize));
1585 	blkshift = highbit(blksize) - 1;
1586 
1587 	/* compute DEV_BSIZE nblocks value */
1588 	nblocks64 = size64 >> blkshift;
1589 
1590 	/* get callers length, establish length of our dynamic properties */
1591 	callers_length = *lengthp;
1592 
1593 	if (strcmp(name, "Nblocks") == 0)
1594 		*lengthp = sizeof (uint64_t);
1595 	else if (strcmp(name, "Size") == 0)
1596 		*lengthp = sizeof (uint64_t);
1597 	else if ((strcmp(name, "nblocks") == 0) && (nblocks64 < UINT_MAX))
1598 		*lengthp = sizeof (uint32_t);
1599 	else if ((strcmp(name, "size") == 0) && (size64 < UINT_MAX))
1600 		*lengthp = sizeof (uint32_t);
1601 	else if ((strcmp(name, "blksize") == 0) && (blksize < UINT_MAX))
1602 		*lengthp = sizeof (uint32_t);
1603 	else {
1604 		/* fallback to ddi_prop_op */
1605 		return (ddi_prop_op(dev, dip, prop_op, mod_flags,
1606 		    name, valuep, lengthp));
1607 	}
1608 
1609 	/* service request for the length of the property */
1610 	if (prop_op == PROP_LEN)
1611 		return (DDI_PROP_SUCCESS);
1612 
1613 	switch (prop_op) {
1614 	case PROP_LEN_AND_VAL_ALLOC:
1615 		if ((buffer = kmem_alloc(*lengthp,
1616 		    (mod_flags & DDI_PROP_CANSLEEP) ?
1617 		    KM_SLEEP : KM_NOSLEEP)) == NULL)
1618 			return (DDI_PROP_NO_MEMORY);
1619 
1620 		*(caddr_t *)valuep = buffer;	/* set callers buf ptr */
1621 		break;
1622 
1623 	case PROP_LEN_AND_VAL_BUF:
1624 		/* the length of the property and the request must match */
1625 		if (callers_length != *lengthp)
1626 			return (DDI_PROP_INVAL_ARG);
1627 
1628 		buffer = valuep;		/* get callers buf ptr */
1629 		break;
1630 
1631 	default:
1632 		return (DDI_PROP_INVAL_ARG);
1633 	}
1634 
1635 	/* transfer the value into the buffer */
1636 	if (strcmp(name, "Nblocks") == 0)
1637 		*((uint64_t *)buffer) = nblocks64;
1638 	else if (strcmp(name, "Size") == 0)
1639 		*((uint64_t *)buffer) = size64;
1640 	else if (strcmp(name, "nblocks") == 0)
1641 		*((uint32_t *)buffer) = (uint32_t)nblocks64;
1642 	else if (strcmp(name, "size") == 0)
1643 		*((uint32_t *)buffer) = (uint32_t)size64;
1644 	else if (strcmp(name, "blksize") == 0)
1645 		*((uint32_t *)buffer) = (uint32_t)blksize;
1646 	return (DDI_PROP_SUCCESS);
1647 }
1648 
1649 /*
1650  * ddi_prop_op_size: ddi_prop_op_size_blksize with DEV_BSIZE block size.
1651  */
1652 int
1653 ddi_prop_op_size(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1654     int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t size64)
1655 {
1656 	return (ddi_prop_op_size_blksize(dev, dip, prop_op,
1657 	    mod_flags, name, valuep, lengthp, size64, DEV_BSIZE));
1658 }
1659 
1660 /*
1661  * Variable length props...
1662  */
1663 
1664 /*
1665  * ddi_getlongprop:	Get variable length property len+val into a buffer
1666  *		allocated by property provider via kmem_alloc. Requester
1667  *		is responsible for freeing returned property via kmem_free.
1668  *
1669  *	Arguments:
1670  *
1671  *	dev_t:	Input:	dev_t of property.
1672  *	dip:	Input:	dev_info_t pointer of child.
1673  *	flags:	Input:	Possible flag modifiers are:
1674  *		DDI_PROP_DONTPASS:	Don't pass to parent if prop not found.
1675  *		DDI_PROP_CANSLEEP:	Memory allocation may sleep.
1676  *	name:	Input:	name of property.
1677  *	valuep:	Output:	Addr of callers buffer pointer.
1678  *	lengthp:Output:	*lengthp will contain prop length on exit.
1679  *
1680  *	Possible Returns:
1681  *
1682  *		DDI_PROP_SUCCESS:	Prop found and returned.
1683  *		DDI_PROP_NOT_FOUND:	Prop not found
1684  *		DDI_PROP_UNDEFINED:	Prop explicitly undefined.
1685  *		DDI_PROP_NO_MEMORY:	Prop found, but unable to alloc mem.
1686  */
1687 
1688 int
1689 ddi_getlongprop(dev_t dev, dev_info_t *dip, int flags,
1690     char *name, caddr_t valuep, int *lengthp)
1691 {
1692 	return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_ALLOC,
1693 	    flags, name, valuep, lengthp));
1694 }
1695 
1696 /*
1697  *
1698  * ddi_getlongprop_buf:		Get long prop into pre-allocated callers
1699  *				buffer. (no memory allocation by provider).
1700  *
1701  *	dev_t:	Input:	dev_t of property.
1702  *	dip:	Input:	dev_info_t pointer of child.
1703  *	flags:	Input:	DDI_PROP_DONTPASS or NULL
1704  *	name:	Input:	name of property
1705  *	valuep:	Input:	ptr to callers buffer.
1706  *	lengthp:I/O:	ptr to length of callers buffer on entry,
1707  *			actual length of property on exit.
1708  *
1709  *	Possible returns:
1710  *
1711  *		DDI_PROP_SUCCESS	Prop found and returned
1712  *		DDI_PROP_NOT_FOUND	Prop not found
1713  *		DDI_PROP_UNDEFINED	Prop explicitly undefined.
1714  *		DDI_PROP_BUF_TOO_SMALL	Prop found, callers buf too small,
1715  *					no value returned, but actual prop
1716  *					length returned in *lengthp
1717  *
1718  */
1719 
1720 int
1721 ddi_getlongprop_buf(dev_t dev, dev_info_t *dip, int flags,
1722     char *name, caddr_t valuep, int *lengthp)
1723 {
1724 	return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
1725 	    flags, name, valuep, lengthp));
1726 }
1727 
1728 /*
1729  * Integer/boolean sized props.
1730  *
1731  * Call is value only... returns found boolean or int sized prop value or
1732  * defvalue if prop not found or is wrong length or is explicitly undefined.
1733  * Only flag is DDI_PROP_DONTPASS...
1734  *
1735  * By convention, this interface returns boolean (0) sized properties
1736  * as value (int)1.
1737  *
1738  * This never returns an error, if property not found or specifically
1739  * undefined, the input `defvalue' is returned.
1740  */
1741 
1742 int
1743 ddi_getprop(dev_t dev, dev_info_t *dip, int flags, char *name, int defvalue)
1744 {
1745 	int	propvalue = defvalue;
1746 	int	proplength = sizeof (int);
1747 	int	error;
1748 
1749 	error = ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
1750 	    flags, name, (caddr_t)&propvalue, &proplength);
1751 
1752 	if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
1753 		propvalue = 1;
1754 
1755 	return (propvalue);
1756 }
1757 
1758 /*
1759  * Get prop length interface: flags are 0 or DDI_PROP_DONTPASS
1760  * if returns DDI_PROP_SUCCESS, length returned in *lengthp.
1761  */
1762 
1763 int
1764 ddi_getproplen(dev_t dev, dev_info_t *dip, int flags, char *name, int *lengthp)
1765 {
1766 	return (ddi_prop_op(dev, dip, PROP_LEN, flags, name, NULL, lengthp));
1767 }
1768 
1769 /*
1770  * Allocate a struct prop_driver_data, along with 'size' bytes
1771  * for decoded property data.  This structure is freed by
1772  * calling ddi_prop_free(9F).
1773  */
1774 static void *
1775 ddi_prop_decode_alloc(size_t size, void (*prop_free)(struct prop_driver_data *))
1776 {
1777 	struct prop_driver_data *pdd;
1778 
1779 	/*
1780 	 * Allocate a structure with enough memory to store the decoded data.
1781 	 */
1782 	pdd = kmem_zalloc(sizeof (struct prop_driver_data) + size, KM_SLEEP);
1783 	pdd->pdd_size = (sizeof (struct prop_driver_data) + size);
1784 	pdd->pdd_prop_free = prop_free;
1785 
1786 	/*
1787 	 * Return a pointer to the location to put the decoded data.
1788 	 */
1789 	return ((void *)((caddr_t)pdd + sizeof (struct prop_driver_data)));
1790 }
1791 
1792 /*
1793  * Allocated the memory needed to store the encoded data in the property
1794  * handle.
1795  */
1796 static int
1797 ddi_prop_encode_alloc(prop_handle_t *ph, size_t size)
1798 {
1799 	/*
1800 	 * If size is zero, then set data to NULL and size to 0.  This
1801 	 * is a boolean property.
1802 	 */
1803 	if (size == 0) {
1804 		ph->ph_size = 0;
1805 		ph->ph_data = NULL;
1806 		ph->ph_cur_pos = NULL;
1807 		ph->ph_save_pos = NULL;
1808 	} else {
1809 		if (ph->ph_flags == DDI_PROP_DONTSLEEP) {
1810 			ph->ph_data = kmem_zalloc(size, KM_NOSLEEP);
1811 			if (ph->ph_data == NULL)
1812 				return (DDI_PROP_NO_MEMORY);
1813 		} else
1814 			ph->ph_data = kmem_zalloc(size, KM_SLEEP);
1815 		ph->ph_size = size;
1816 		ph->ph_cur_pos = ph->ph_data;
1817 		ph->ph_save_pos = ph->ph_data;
1818 	}
1819 	return (DDI_PROP_SUCCESS);
1820 }
1821 
1822 /*
1823  * Free the space allocated by the lookup routines.  Each lookup routine
1824  * returns a pointer to the decoded data to the driver.  The driver then
1825  * passes this pointer back to us.  This data actually lives in a struct
1826  * prop_driver_data.  We use negative indexing to find the beginning of
1827  * the structure and then free the entire structure using the size and
1828  * the free routine stored in the structure.
1829  */
1830 void
1831 ddi_prop_free(void *datap)
1832 {
1833 	struct prop_driver_data *pdd;
1834 
1835 	/*
1836 	 * Get the structure
1837 	 */
1838 	pdd = (struct prop_driver_data *)
1839 	    ((caddr_t)datap - sizeof (struct prop_driver_data));
1840 	/*
1841 	 * Call the free routine to free it
1842 	 */
1843 	(*pdd->pdd_prop_free)(pdd);
1844 }
1845 
1846 /*
1847  * Free the data associated with an array of ints,
1848  * allocated with ddi_prop_decode_alloc().
1849  */
1850 static void
1851 ddi_prop_free_ints(struct prop_driver_data *pdd)
1852 {
1853 	kmem_free(pdd, pdd->pdd_size);
1854 }
1855 
1856 /*
1857  * Free a single string property or a single string contained within
1858  * the argv style return value of an array of strings.
1859  */
1860 static void
1861 ddi_prop_free_string(struct prop_driver_data *pdd)
1862 {
1863 	kmem_free(pdd, pdd->pdd_size);
1864 
1865 }
1866 
1867 /*
1868  * Free an array of strings.
1869  */
1870 static void
1871 ddi_prop_free_strings(struct prop_driver_data *pdd)
1872 {
1873 	kmem_free(pdd, pdd->pdd_size);
1874 }
1875 
1876 /*
1877  * Free the data associated with an array of bytes.
1878  */
1879 static void
1880 ddi_prop_free_bytes(struct prop_driver_data *pdd)
1881 {
1882 	kmem_free(pdd, pdd->pdd_size);
1883 }
1884 
1885 /*
1886  * Reset the current location pointer in the property handle to the
1887  * beginning of the data.
1888  */
1889 void
1890 ddi_prop_reset_pos(prop_handle_t *ph)
1891 {
1892 	ph->ph_cur_pos = ph->ph_data;
1893 	ph->ph_save_pos = ph->ph_data;
1894 }
1895 
1896 /*
1897  * Restore the current location pointer in the property handle to the
1898  * saved position.
1899  */
1900 void
1901 ddi_prop_save_pos(prop_handle_t *ph)
1902 {
1903 	ph->ph_save_pos = ph->ph_cur_pos;
1904 }
1905 
1906 /*
1907  * Save the location that the current location pointer is pointing to..
1908  */
1909 void
1910 ddi_prop_restore_pos(prop_handle_t *ph)
1911 {
1912 	ph->ph_cur_pos = ph->ph_save_pos;
1913 }
1914 
1915 /*
1916  * Property encode/decode functions
1917  */
1918 
1919 /*
1920  * Decode a single integer property
1921  */
1922 static int
1923 ddi_prop_fm_decode_int(prop_handle_t *ph, void *data, uint_t *nelements)
1924 {
1925 	int	i;
1926 	int	tmp;
1927 
1928 	/*
1929 	 * If there is nothing to decode return an error
1930 	 */
1931 	if (ph->ph_size == 0)
1932 		return (DDI_PROP_END_OF_DATA);
1933 
1934 	/*
1935 	 * Decode the property as a single integer and return it
1936 	 * in data if we were able to decode it.
1937 	 */
1938 	i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, &tmp);
1939 	if (i < DDI_PROP_RESULT_OK) {
1940 		switch (i) {
1941 		case DDI_PROP_RESULT_EOF:
1942 			return (DDI_PROP_END_OF_DATA);
1943 
1944 		case DDI_PROP_RESULT_ERROR:
1945 			return (DDI_PROP_CANNOT_DECODE);
1946 		}
1947 	}
1948 
1949 	*(int *)data = tmp;
1950 	*nelements = 1;
1951 	return (DDI_PROP_SUCCESS);
1952 }
1953 
1954 /*
1955  * Decode a single 64 bit integer property
1956  */
1957 static int
1958 ddi_prop_fm_decode_int64(prop_handle_t *ph, void *data, uint_t *nelements)
1959 {
1960 	int	i;
1961 	int64_t	tmp;
1962 
1963 	/*
1964 	 * If there is nothing to decode return an error
1965 	 */
1966 	if (ph->ph_size == 0)
1967 		return (DDI_PROP_END_OF_DATA);
1968 
1969 	/*
1970 	 * Decode the property as a single integer and return it
1971 	 * in data if we were able to decode it.
1972 	 */
1973 	i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, &tmp);
1974 	if (i < DDI_PROP_RESULT_OK) {
1975 		switch (i) {
1976 		case DDI_PROP_RESULT_EOF:
1977 			return (DDI_PROP_END_OF_DATA);
1978 
1979 		case DDI_PROP_RESULT_ERROR:
1980 			return (DDI_PROP_CANNOT_DECODE);
1981 		}
1982 	}
1983 
1984 	*(int64_t *)data = tmp;
1985 	*nelements = 1;
1986 	return (DDI_PROP_SUCCESS);
1987 }
1988 
1989 /*
1990  * Decode an array of integers property
1991  */
1992 static int
1993 ddi_prop_fm_decode_ints(prop_handle_t *ph, void *data, uint_t *nelements)
1994 {
1995 	int	i;
1996 	int	cnt = 0;
1997 	int	*tmp;
1998 	int	*intp;
1999 	int	n;
2000 
2001 	/*
2002 	 * Figure out how many array elements there are by going through the
2003 	 * data without decoding it first and counting.
2004 	 */
2005 	for (;;) {
2006 		i = DDI_PROP_INT(ph, DDI_PROP_CMD_SKIP, NULL);
2007 		if (i < 0)
2008 			break;
2009 		cnt++;
2010 	}
2011 
2012 	/*
2013 	 * If there are no elements return an error
2014 	 */
2015 	if (cnt == 0)
2016 		return (DDI_PROP_END_OF_DATA);
2017 
2018 	/*
2019 	 * If we cannot skip through the data, we cannot decode it
2020 	 */
2021 	if (i == DDI_PROP_RESULT_ERROR)
2022 		return (DDI_PROP_CANNOT_DECODE);
2023 
2024 	/*
2025 	 * Reset the data pointer to the beginning of the encoded data
2026 	 */
2027 	ddi_prop_reset_pos(ph);
2028 
2029 	/*
2030 	 * Allocated memory to store the decoded value in.
2031 	 */
2032 	intp = ddi_prop_decode_alloc((cnt * sizeof (int)),
2033 	    ddi_prop_free_ints);
2034 
2035 	/*
2036 	 * Decode each element and place it in the space we just allocated
2037 	 */
2038 	tmp = intp;
2039 	for (n = 0; n < cnt; n++, tmp++) {
2040 		i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, tmp);
2041 		if (i < DDI_PROP_RESULT_OK) {
2042 			/*
2043 			 * Free the space we just allocated
2044 			 * and return an error.
2045 			 */
2046 			ddi_prop_free(intp);
2047 			switch (i) {
2048 			case DDI_PROP_RESULT_EOF:
2049 				return (DDI_PROP_END_OF_DATA);
2050 
2051 			case DDI_PROP_RESULT_ERROR:
2052 				return (DDI_PROP_CANNOT_DECODE);
2053 			}
2054 		}
2055 	}
2056 
2057 	*nelements = cnt;
2058 	*(int **)data = intp;
2059 
2060 	return (DDI_PROP_SUCCESS);
2061 }
2062 
2063 /*
2064  * Decode a 64 bit integer array property
2065  */
2066 static int
2067 ddi_prop_fm_decode_int64_array(prop_handle_t *ph, void *data, uint_t *nelements)
2068 {
2069 	int	i;
2070 	int	n;
2071 	int	cnt = 0;
2072 	int64_t	*tmp;
2073 	int64_t	*intp;
2074 
2075 	/*
2076 	 * Count the number of array elements by going
2077 	 * through the data without decoding it.
2078 	 */
2079 	for (;;) {
2080 		i = DDI_PROP_INT64(ph, DDI_PROP_CMD_SKIP, NULL);
2081 		if (i < 0)
2082 			break;
2083 		cnt++;
2084 	}
2085 
2086 	/*
2087 	 * If there are no elements return an error
2088 	 */
2089 	if (cnt == 0)
2090 		return (DDI_PROP_END_OF_DATA);
2091 
2092 	/*
2093 	 * If we cannot skip through the data, we cannot decode it
2094 	 */
2095 	if (i == DDI_PROP_RESULT_ERROR)
2096 		return (DDI_PROP_CANNOT_DECODE);
2097 
2098 	/*
2099 	 * Reset the data pointer to the beginning of the encoded data
2100 	 */
2101 	ddi_prop_reset_pos(ph);
2102 
2103 	/*
2104 	 * Allocate memory to store the decoded value.
2105 	 */
2106 	intp = ddi_prop_decode_alloc((cnt * sizeof (int64_t)),
2107 	    ddi_prop_free_ints);
2108 
2109 	/*
2110 	 * Decode each element and place it in the space allocated
2111 	 */
2112 	tmp = intp;
2113 	for (n = 0; n < cnt; n++, tmp++) {
2114 		i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, tmp);
2115 		if (i < DDI_PROP_RESULT_OK) {
2116 			/*
2117 			 * Free the space we just allocated
2118 			 * and return an error.
2119 			 */
2120 			ddi_prop_free(intp);
2121 			switch (i) {
2122 			case DDI_PROP_RESULT_EOF:
2123 				return (DDI_PROP_END_OF_DATA);
2124 
2125 			case DDI_PROP_RESULT_ERROR:
2126 				return (DDI_PROP_CANNOT_DECODE);
2127 			}
2128 		}
2129 	}
2130 
2131 	*nelements = cnt;
2132 	*(int64_t **)data = intp;
2133 
2134 	return (DDI_PROP_SUCCESS);
2135 }
2136 
2137 /*
2138  * Encode an array of integers property (Can be one element)
2139  */
2140 int
2141 ddi_prop_fm_encode_ints(prop_handle_t *ph, void *data, uint_t nelements)
2142 {
2143 	int	i;
2144 	int	*tmp;
2145 	int	cnt;
2146 	int	size;
2147 
2148 	/*
2149 	 * If there is no data, we cannot do anything
2150 	 */
2151 	if (nelements == 0)
2152 		return (DDI_PROP_CANNOT_ENCODE);
2153 
2154 	/*
2155 	 * Get the size of an encoded int.
2156 	 */
2157 	size = DDI_PROP_INT(ph, DDI_PROP_CMD_GET_ESIZE, NULL);
2158 
2159 	if (size < DDI_PROP_RESULT_OK) {
2160 		switch (size) {
2161 		case DDI_PROP_RESULT_EOF:
2162 			return (DDI_PROP_END_OF_DATA);
2163 
2164 		case DDI_PROP_RESULT_ERROR:
2165 			return (DDI_PROP_CANNOT_ENCODE);
2166 		}
2167 	}
2168 
2169 	/*
2170 	 * Allocate space in the handle to store the encoded int.
2171 	 */
2172 	if (ddi_prop_encode_alloc(ph, size * nelements) !=
2173 	    DDI_PROP_SUCCESS)
2174 		return (DDI_PROP_NO_MEMORY);
2175 
2176 	/*
2177 	 * Encode the array of ints.
2178 	 */
2179 	tmp = (int *)data;
2180 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2181 		i = DDI_PROP_INT(ph, DDI_PROP_CMD_ENCODE, tmp);
2182 		if (i < DDI_PROP_RESULT_OK) {
2183 			switch (i) {
2184 			case DDI_PROP_RESULT_EOF:
2185 				return (DDI_PROP_END_OF_DATA);
2186 
2187 			case DDI_PROP_RESULT_ERROR:
2188 				return (DDI_PROP_CANNOT_ENCODE);
2189 			}
2190 		}
2191 	}
2192 
2193 	return (DDI_PROP_SUCCESS);
2194 }
2195 
2196 
2197 /*
2198  * Encode a 64 bit integer array property
2199  */
2200 int
2201 ddi_prop_fm_encode_int64(prop_handle_t *ph, void *data, uint_t nelements)
2202 {
2203 	int i;
2204 	int cnt;
2205 	int size;
2206 	int64_t *tmp;
2207 
2208 	/*
2209 	 * If there is no data, we cannot do anything
2210 	 */
2211 	if (nelements == 0)
2212 		return (DDI_PROP_CANNOT_ENCODE);
2213 
2214 	/*
2215 	 * Get the size of an encoded 64 bit int.
2216 	 */
2217 	size = DDI_PROP_INT64(ph, DDI_PROP_CMD_GET_ESIZE, NULL);
2218 
2219 	if (size < DDI_PROP_RESULT_OK) {
2220 		switch (size) {
2221 		case DDI_PROP_RESULT_EOF:
2222 			return (DDI_PROP_END_OF_DATA);
2223 
2224 		case DDI_PROP_RESULT_ERROR:
2225 			return (DDI_PROP_CANNOT_ENCODE);
2226 		}
2227 	}
2228 
2229 	/*
2230 	 * Allocate space in the handle to store the encoded int.
2231 	 */
2232 	if (ddi_prop_encode_alloc(ph, size * nelements) !=
2233 	    DDI_PROP_SUCCESS)
2234 		return (DDI_PROP_NO_MEMORY);
2235 
2236 	/*
2237 	 * Encode the array of ints.
2238 	 */
2239 	tmp = (int64_t *)data;
2240 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2241 		i = DDI_PROP_INT64(ph, DDI_PROP_CMD_ENCODE, tmp);
2242 		if (i < DDI_PROP_RESULT_OK) {
2243 			switch (i) {
2244 			case DDI_PROP_RESULT_EOF:
2245 				return (DDI_PROP_END_OF_DATA);
2246 
2247 			case DDI_PROP_RESULT_ERROR:
2248 				return (DDI_PROP_CANNOT_ENCODE);
2249 			}
2250 		}
2251 	}
2252 
2253 	return (DDI_PROP_SUCCESS);
2254 }
2255 
2256 /*
2257  * Decode a single string property
2258  */
2259 static int
2260 ddi_prop_fm_decode_string(prop_handle_t *ph, void *data, uint_t *nelements)
2261 {
2262 	char		*tmp;
2263 	char		*str;
2264 	int		i;
2265 	int		size;
2266 
2267 	/*
2268 	 * If there is nothing to decode return an error
2269 	 */
2270 	if (ph->ph_size == 0)
2271 		return (DDI_PROP_END_OF_DATA);
2272 
2273 	/*
2274 	 * Get the decoded size of the encoded string.
2275 	 */
2276 	size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2277 	if (size < DDI_PROP_RESULT_OK) {
2278 		switch (size) {
2279 		case DDI_PROP_RESULT_EOF:
2280 			return (DDI_PROP_END_OF_DATA);
2281 
2282 		case DDI_PROP_RESULT_ERROR:
2283 			return (DDI_PROP_CANNOT_DECODE);
2284 		}
2285 	}
2286 
2287 	/*
2288 	 * Allocated memory to store the decoded value in.
2289 	 */
2290 	str = ddi_prop_decode_alloc((size_t)size, ddi_prop_free_string);
2291 
2292 	ddi_prop_reset_pos(ph);
2293 
2294 	/*
2295 	 * Decode the str and place it in the space we just allocated
2296 	 */
2297 	tmp = str;
2298 	i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, tmp);
2299 	if (i < DDI_PROP_RESULT_OK) {
2300 		/*
2301 		 * Free the space we just allocated
2302 		 * and return an error.
2303 		 */
2304 		ddi_prop_free(str);
2305 		switch (i) {
2306 		case DDI_PROP_RESULT_EOF:
2307 			return (DDI_PROP_END_OF_DATA);
2308 
2309 		case DDI_PROP_RESULT_ERROR:
2310 			return (DDI_PROP_CANNOT_DECODE);
2311 		}
2312 	}
2313 
2314 	*(char **)data = str;
2315 	*nelements = 1;
2316 
2317 	return (DDI_PROP_SUCCESS);
2318 }
2319 
2320 /*
2321  * Decode an array of strings.
2322  */
2323 int
2324 ddi_prop_fm_decode_strings(prop_handle_t *ph, void *data, uint_t *nelements)
2325 {
2326 	int		cnt = 0;
2327 	char		**strs;
2328 	char		**tmp;
2329 	char		*ptr;
2330 	int		i;
2331 	int		n;
2332 	int		size;
2333 	size_t		nbytes;
2334 
2335 	/*
2336 	 * Figure out how many array elements there are by going through the
2337 	 * data without decoding it first and counting.
2338 	 */
2339 	for (;;) {
2340 		i = DDI_PROP_STR(ph, DDI_PROP_CMD_SKIP, NULL);
2341 		if (i < 0)
2342 			break;
2343 		cnt++;
2344 	}
2345 
2346 	/*
2347 	 * If there are no elements return an error
2348 	 */
2349 	if (cnt == 0)
2350 		return (DDI_PROP_END_OF_DATA);
2351 
2352 	/*
2353 	 * If we cannot skip through the data, we cannot decode it
2354 	 */
2355 	if (i == DDI_PROP_RESULT_ERROR)
2356 		return (DDI_PROP_CANNOT_DECODE);
2357 
2358 	/*
2359 	 * Reset the data pointer to the beginning of the encoded data
2360 	 */
2361 	ddi_prop_reset_pos(ph);
2362 
2363 	/*
2364 	 * Figure out how much memory we need for the sum total
2365 	 */
2366 	nbytes = (cnt + 1) * sizeof (char *);
2367 
2368 	for (n = 0; n < cnt; n++) {
2369 		/*
2370 		 * Get the decoded size of the current encoded string.
2371 		 */
2372 		size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2373 		if (size < DDI_PROP_RESULT_OK) {
2374 			switch (size) {
2375 			case DDI_PROP_RESULT_EOF:
2376 				return (DDI_PROP_END_OF_DATA);
2377 
2378 			case DDI_PROP_RESULT_ERROR:
2379 				return (DDI_PROP_CANNOT_DECODE);
2380 			}
2381 		}
2382 
2383 		nbytes += size;
2384 	}
2385 
2386 	/*
2387 	 * Allocate memory in which to store the decoded strings.
2388 	 */
2389 	strs = ddi_prop_decode_alloc(nbytes, ddi_prop_free_strings);
2390 
2391 	/*
2392 	 * Set up pointers for each string by figuring out yet
2393 	 * again how long each string is.
2394 	 */
2395 	ddi_prop_reset_pos(ph);
2396 	ptr = (caddr_t)strs + ((cnt + 1) * sizeof (char *));
2397 	for (tmp = strs, n = 0; n < cnt; n++, tmp++) {
2398 		/*
2399 		 * Get the decoded size of the current encoded string.
2400 		 */
2401 		size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2402 		if (size < DDI_PROP_RESULT_OK) {
2403 			ddi_prop_free(strs);
2404 			switch (size) {
2405 			case DDI_PROP_RESULT_EOF:
2406 				return (DDI_PROP_END_OF_DATA);
2407 
2408 			case DDI_PROP_RESULT_ERROR:
2409 				return (DDI_PROP_CANNOT_DECODE);
2410 			}
2411 		}
2412 
2413 		*tmp = ptr;
2414 		ptr += size;
2415 	}
2416 
2417 	/*
2418 	 * String array is terminated by a NULL
2419 	 */
2420 	*tmp = NULL;
2421 
2422 	/*
2423 	 * Finally, we can decode each string
2424 	 */
2425 	ddi_prop_reset_pos(ph);
2426 	for (tmp = strs, n = 0; n < cnt; n++, tmp++) {
2427 		i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, *tmp);
2428 		if (i < DDI_PROP_RESULT_OK) {
2429 			/*
2430 			 * Free the space we just allocated
2431 			 * and return an error
2432 			 */
2433 			ddi_prop_free(strs);
2434 			switch (i) {
2435 			case DDI_PROP_RESULT_EOF:
2436 				return (DDI_PROP_END_OF_DATA);
2437 
2438 			case DDI_PROP_RESULT_ERROR:
2439 				return (DDI_PROP_CANNOT_DECODE);
2440 			}
2441 		}
2442 	}
2443 
2444 	*(char ***)data = strs;
2445 	*nelements = cnt;
2446 
2447 	return (DDI_PROP_SUCCESS);
2448 }
2449 
2450 /*
2451  * Encode a string.
2452  */
2453 int
2454 ddi_prop_fm_encode_string(prop_handle_t *ph, void *data, uint_t nelements)
2455 {
2456 	char		**tmp;
2457 	int		size;
2458 	int		i;
2459 
2460 	/*
2461 	 * If there is no data, we cannot do anything
2462 	 */
2463 	if (nelements == 0)
2464 		return (DDI_PROP_CANNOT_ENCODE);
2465 
2466 	/*
2467 	 * Get the size of the encoded string.
2468 	 */
2469 	tmp = (char **)data;
2470 	size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp);
2471 	if (size < DDI_PROP_RESULT_OK) {
2472 		switch (size) {
2473 		case DDI_PROP_RESULT_EOF:
2474 			return (DDI_PROP_END_OF_DATA);
2475 
2476 		case DDI_PROP_RESULT_ERROR:
2477 			return (DDI_PROP_CANNOT_ENCODE);
2478 		}
2479 	}
2480 
2481 	/*
2482 	 * Allocate space in the handle to store the encoded string.
2483 	 */
2484 	if (ddi_prop_encode_alloc(ph, size) != DDI_PROP_SUCCESS)
2485 		return (DDI_PROP_NO_MEMORY);
2486 
2487 	ddi_prop_reset_pos(ph);
2488 
2489 	/*
2490 	 * Encode the string.
2491 	 */
2492 	tmp = (char **)data;
2493 	i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp);
2494 	if (i < DDI_PROP_RESULT_OK) {
2495 		switch (i) {
2496 		case DDI_PROP_RESULT_EOF:
2497 			return (DDI_PROP_END_OF_DATA);
2498 
2499 		case DDI_PROP_RESULT_ERROR:
2500 			return (DDI_PROP_CANNOT_ENCODE);
2501 		}
2502 	}
2503 
2504 	return (DDI_PROP_SUCCESS);
2505 }
2506 
2507 
2508 /*
2509  * Encode an array of strings.
2510  */
2511 int
2512 ddi_prop_fm_encode_strings(prop_handle_t *ph, void *data, uint_t nelements)
2513 {
2514 	int		cnt = 0;
2515 	char		**tmp;
2516 	int		size;
2517 	uint_t		total_size;
2518 	int		i;
2519 
2520 	/*
2521 	 * If there is no data, we cannot do anything
2522 	 */
2523 	if (nelements == 0)
2524 		return (DDI_PROP_CANNOT_ENCODE);
2525 
2526 	/*
2527 	 * Get the total size required to encode all the strings.
2528 	 */
2529 	total_size = 0;
2530 	tmp = (char **)data;
2531 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2532 		size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp);
2533 		if (size < DDI_PROP_RESULT_OK) {
2534 			switch (size) {
2535 			case DDI_PROP_RESULT_EOF:
2536 				return (DDI_PROP_END_OF_DATA);
2537 
2538 			case DDI_PROP_RESULT_ERROR:
2539 				return (DDI_PROP_CANNOT_ENCODE);
2540 			}
2541 		}
2542 		total_size += (uint_t)size;
2543 	}
2544 
2545 	/*
2546 	 * Allocate space in the handle to store the encoded strings.
2547 	 */
2548 	if (ddi_prop_encode_alloc(ph, total_size) != DDI_PROP_SUCCESS)
2549 		return (DDI_PROP_NO_MEMORY);
2550 
2551 	ddi_prop_reset_pos(ph);
2552 
2553 	/*
2554 	 * Encode the array of strings.
2555 	 */
2556 	tmp = (char **)data;
2557 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2558 		i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp);
2559 		if (i < DDI_PROP_RESULT_OK) {
2560 			switch (i) {
2561 			case DDI_PROP_RESULT_EOF:
2562 				return (DDI_PROP_END_OF_DATA);
2563 
2564 			case DDI_PROP_RESULT_ERROR:
2565 				return (DDI_PROP_CANNOT_ENCODE);
2566 			}
2567 		}
2568 	}
2569 
2570 	return (DDI_PROP_SUCCESS);
2571 }
2572 
2573 
2574 /*
2575  * Decode an array of bytes.
2576  */
2577 static int
2578 ddi_prop_fm_decode_bytes(prop_handle_t *ph, void *data, uint_t *nelements)
2579 {
2580 	uchar_t		*tmp;
2581 	int		nbytes;
2582 	int		i;
2583 
2584 	/*
2585 	 * If there are no elements return an error
2586 	 */
2587 	if (ph->ph_size == 0)
2588 		return (DDI_PROP_END_OF_DATA);
2589 
2590 	/*
2591 	 * Get the size of the encoded array of bytes.
2592 	 */
2593 	nbytes = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_DSIZE,
2594 	    data, ph->ph_size);
2595 	if (nbytes < DDI_PROP_RESULT_OK) {
2596 		switch (nbytes) {
2597 		case DDI_PROP_RESULT_EOF:
2598 			return (DDI_PROP_END_OF_DATA);
2599 
2600 		case DDI_PROP_RESULT_ERROR:
2601 			return (DDI_PROP_CANNOT_DECODE);
2602 		}
2603 	}
2604 
2605 	/*
2606 	 * Allocated memory to store the decoded value in.
2607 	 */
2608 	tmp = ddi_prop_decode_alloc(nbytes, ddi_prop_free_bytes);
2609 
2610 	/*
2611 	 * Decode each element and place it in the space we just allocated
2612 	 */
2613 	i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_DECODE, tmp, nbytes);
2614 	if (i < DDI_PROP_RESULT_OK) {
2615 		/*
2616 		 * Free the space we just allocated
2617 		 * and return an error
2618 		 */
2619 		ddi_prop_free(tmp);
2620 		switch (i) {
2621 		case DDI_PROP_RESULT_EOF:
2622 			return (DDI_PROP_END_OF_DATA);
2623 
2624 		case DDI_PROP_RESULT_ERROR:
2625 			return (DDI_PROP_CANNOT_DECODE);
2626 		}
2627 	}
2628 
2629 	*(uchar_t **)data = tmp;
2630 	*nelements = nbytes;
2631 
2632 	return (DDI_PROP_SUCCESS);
2633 }
2634 
2635 /*
2636  * Encode an array of bytes.
2637  */
2638 int
2639 ddi_prop_fm_encode_bytes(prop_handle_t *ph, void *data, uint_t nelements)
2640 {
2641 	int		size;
2642 	int		i;
2643 
2644 	/*
2645 	 * If there are no elements, then this is a boolean property,
2646 	 * so just create a property handle with no data and return.
2647 	 */
2648 	if (nelements == 0) {
2649 		(void) ddi_prop_encode_alloc(ph, 0);
2650 		return (DDI_PROP_SUCCESS);
2651 	}
2652 
2653 	/*
2654 	 * Get the size of the encoded array of bytes.
2655 	 */
2656 	size = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_ESIZE, (uchar_t *)data,
2657 	    nelements);
2658 	if (size < DDI_PROP_RESULT_OK) {
2659 		switch (size) {
2660 		case DDI_PROP_RESULT_EOF:
2661 			return (DDI_PROP_END_OF_DATA);
2662 
2663 		case DDI_PROP_RESULT_ERROR:
2664 			return (DDI_PROP_CANNOT_DECODE);
2665 		}
2666 	}
2667 
2668 	/*
2669 	 * Allocate space in the handle to store the encoded bytes.
2670 	 */
2671 	if (ddi_prop_encode_alloc(ph, (uint_t)size) != DDI_PROP_SUCCESS)
2672 		return (DDI_PROP_NO_MEMORY);
2673 
2674 	/*
2675 	 * Encode the array of bytes.
2676 	 */
2677 	i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_ENCODE, (uchar_t *)data,
2678 	    nelements);
2679 	if (i < DDI_PROP_RESULT_OK) {
2680 		switch (i) {
2681 		case DDI_PROP_RESULT_EOF:
2682 			return (DDI_PROP_END_OF_DATA);
2683 
2684 		case DDI_PROP_RESULT_ERROR:
2685 			return (DDI_PROP_CANNOT_ENCODE);
2686 		}
2687 	}
2688 
2689 	return (DDI_PROP_SUCCESS);
2690 }
2691 
2692 /*
2693  * OBP 1275 integer, string and byte operators.
2694  *
2695  * DDI_PROP_CMD_DECODE:
2696  *
2697  *	DDI_PROP_RESULT_ERROR:		cannot decode the data
2698  *	DDI_PROP_RESULT_EOF:		end of data
2699  *	DDI_PROP_OK:			data was decoded
2700  *
2701  * DDI_PROP_CMD_ENCODE:
2702  *
2703  *	DDI_PROP_RESULT_ERROR:		cannot encode the data
2704  *	DDI_PROP_RESULT_EOF:		end of data
2705  *	DDI_PROP_OK:			data was encoded
2706  *
2707  * DDI_PROP_CMD_SKIP:
2708  *
2709  *	DDI_PROP_RESULT_ERROR:		cannot skip the data
2710  *	DDI_PROP_RESULT_EOF:		end of data
2711  *	DDI_PROP_OK:			data was skipped
2712  *
2713  * DDI_PROP_CMD_GET_ESIZE:
2714  *
2715  *	DDI_PROP_RESULT_ERROR:		cannot get encoded size
2716  *	DDI_PROP_RESULT_EOF:		end of data
2717  *	> 0:				the encoded size
2718  *
2719  * DDI_PROP_CMD_GET_DSIZE:
2720  *
2721  *	DDI_PROP_RESULT_ERROR:		cannot get decoded size
2722  *	DDI_PROP_RESULT_EOF:		end of data
2723  *	> 0:				the decoded size
2724  */
2725 
2726 /*
2727  * OBP 1275 integer operator
2728  *
2729  * OBP properties are a byte stream of data, so integers may not be
2730  * properly aligned.  Therefore we need to copy them one byte at a time.
2731  */
2732 int
2733 ddi_prop_1275_int(prop_handle_t *ph, uint_t cmd, int *data)
2734 {
2735 	int	i;
2736 
2737 	switch (cmd) {
2738 	case DDI_PROP_CMD_DECODE:
2739 		/*
2740 		 * Check that there is encoded data
2741 		 */
2742 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0)
2743 			return (DDI_PROP_RESULT_ERROR);
2744 		if (ph->ph_flags & PH_FROM_PROM) {
2745 			i = MIN(ph->ph_size, PROP_1275_INT_SIZE);
2746 			if ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
2747 			    ph->ph_size - i))
2748 				return (DDI_PROP_RESULT_ERROR);
2749 		} else {
2750 			if (ph->ph_size < sizeof (int) ||
2751 			    ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
2752 			    ph->ph_size - sizeof (int))))
2753 				return (DDI_PROP_RESULT_ERROR);
2754 		}
2755 
2756 		/*
2757 		 * Copy the integer, using the implementation-specific
2758 		 * copy function if the property is coming from the PROM.
2759 		 */
2760 		if (ph->ph_flags & PH_FROM_PROM) {
2761 			*data = impl_ddi_prop_int_from_prom(
2762 			    (uchar_t *)ph->ph_cur_pos,
2763 			    (ph->ph_size < PROP_1275_INT_SIZE) ?
2764 			    ph->ph_size : PROP_1275_INT_SIZE);
2765 		} else {
2766 			bcopy(ph->ph_cur_pos, data, sizeof (int));
2767 		}
2768 
2769 		/*
2770 		 * Move the current location to the start of the next
2771 		 * bit of undecoded data.
2772 		 */
2773 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
2774 		    PROP_1275_INT_SIZE;
2775 		return (DDI_PROP_RESULT_OK);
2776 
2777 	case DDI_PROP_CMD_ENCODE:
2778 		/*
2779 		 * Check that there is room to encoded the data
2780 		 */
2781 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
2782 		    ph->ph_size < PROP_1275_INT_SIZE ||
2783 		    ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
2784 		    ph->ph_size - sizeof (int))))
2785 			return (DDI_PROP_RESULT_ERROR);
2786 
2787 		/*
2788 		 * Encode the integer into the byte stream one byte at a
2789 		 * time.
2790 		 */
2791 		bcopy(data, ph->ph_cur_pos, sizeof (int));
2792 
2793 		/*
2794 		 * Move the current location to the start of the next bit of
2795 		 * space where we can store encoded data.
2796 		 */
2797 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE;
2798 		return (DDI_PROP_RESULT_OK);
2799 
2800 	case DDI_PROP_CMD_SKIP:
2801 		/*
2802 		 * Check that there is encoded data
2803 		 */
2804 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
2805 		    ph->ph_size < PROP_1275_INT_SIZE)
2806 			return (DDI_PROP_RESULT_ERROR);
2807 
2808 
2809 		if ((caddr_t)ph->ph_cur_pos ==
2810 		    (caddr_t)ph->ph_data + ph->ph_size) {
2811 			return (DDI_PROP_RESULT_EOF);
2812 		} else if ((caddr_t)ph->ph_cur_pos >
2813 		    (caddr_t)ph->ph_data + ph->ph_size) {
2814 			return (DDI_PROP_RESULT_EOF);
2815 		}
2816 
2817 		/*
2818 		 * Move the current location to the start of the next bit of
2819 		 * undecoded data.
2820 		 */
2821 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE;
2822 		return (DDI_PROP_RESULT_OK);
2823 
2824 	case DDI_PROP_CMD_GET_ESIZE:
2825 		/*
2826 		 * Return the size of an encoded integer on OBP
2827 		 */
2828 		return (PROP_1275_INT_SIZE);
2829 
2830 	case DDI_PROP_CMD_GET_DSIZE:
2831 		/*
2832 		 * Return the size of a decoded integer on the system.
2833 		 */
2834 		return (sizeof (int));
2835 
2836 	default:
2837 #ifdef DEBUG
2838 		panic("ddi_prop_1275_int: %x impossible", cmd);
2839 		/*NOTREACHED*/
2840 #else
2841 		return (DDI_PROP_RESULT_ERROR);
2842 #endif	/* DEBUG */
2843 	}
2844 }
2845 
2846 /*
2847  * 64 bit integer operator.
2848  *
2849  * This is an extension, defined by Sun, to the 1275 integer
2850  * operator.  This routine handles the encoding/decoding of
2851  * 64 bit integer properties.
2852  */
2853 int
2854 ddi_prop_int64_op(prop_handle_t *ph, uint_t cmd, int64_t *data)
2855 {
2856 
2857 	switch (cmd) {
2858 	case DDI_PROP_CMD_DECODE:
2859 		/*
2860 		 * Check that there is encoded data
2861 		 */
2862 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0)
2863 			return (DDI_PROP_RESULT_ERROR);
2864 		if (ph->ph_flags & PH_FROM_PROM) {
2865 			return (DDI_PROP_RESULT_ERROR);
2866 		} else {
2867 			if (ph->ph_size < sizeof (int64_t) ||
2868 			    ((int64_t *)ph->ph_cur_pos >
2869 			    ((int64_t *)ph->ph_data +
2870 			    ph->ph_size - sizeof (int64_t))))
2871 				return (DDI_PROP_RESULT_ERROR);
2872 		}
2873 		/*
2874 		 * Copy the integer, using the implementation-specific
2875 		 * copy function if the property is coming from the PROM.
2876 		 */
2877 		if (ph->ph_flags & PH_FROM_PROM) {
2878 			return (DDI_PROP_RESULT_ERROR);
2879 		} else {
2880 			bcopy(ph->ph_cur_pos, data, sizeof (int64_t));
2881 		}
2882 
2883 		/*
2884 		 * Move the current location to the start of the next
2885 		 * bit of undecoded data.
2886 		 */
2887 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
2888 		    sizeof (int64_t);
2889 			return (DDI_PROP_RESULT_OK);
2890 
2891 	case DDI_PROP_CMD_ENCODE:
2892 		/*
2893 		 * Check that there is room to encoded the data
2894 		 */
2895 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
2896 		    ph->ph_size < sizeof (int64_t) ||
2897 		    ((int64_t *)ph->ph_cur_pos > ((int64_t *)ph->ph_data +
2898 		    ph->ph_size - sizeof (int64_t))))
2899 			return (DDI_PROP_RESULT_ERROR);
2900 
2901 		/*
2902 		 * Encode the integer into the byte stream one byte at a
2903 		 * time.
2904 		 */
2905 		bcopy(data, ph->ph_cur_pos, sizeof (int64_t));
2906 
2907 		/*
2908 		 * Move the current location to the start of the next bit of
2909 		 * space where we can store encoded data.
2910 		 */
2911 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
2912 		    sizeof (int64_t);
2913 		return (DDI_PROP_RESULT_OK);
2914 
2915 	case DDI_PROP_CMD_SKIP:
2916 		/*
2917 		 * Check that there is encoded data
2918 		 */
2919 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
2920 		    ph->ph_size < sizeof (int64_t))
2921 			return (DDI_PROP_RESULT_ERROR);
2922 
2923 		if ((caddr_t)ph->ph_cur_pos ==
2924 		    (caddr_t)ph->ph_data + ph->ph_size) {
2925 			return (DDI_PROP_RESULT_EOF);
2926 		} else if ((caddr_t)ph->ph_cur_pos >
2927 		    (caddr_t)ph->ph_data + ph->ph_size) {
2928 			return (DDI_PROP_RESULT_EOF);
2929 		}
2930 
2931 		/*
2932 		 * Move the current location to the start of
2933 		 * the next bit of undecoded data.
2934 		 */
2935 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
2936 		    sizeof (int64_t);
2937 			return (DDI_PROP_RESULT_OK);
2938 
2939 	case DDI_PROP_CMD_GET_ESIZE:
2940 		/*
2941 		 * Return the size of an encoded integer on OBP
2942 		 */
2943 		return (sizeof (int64_t));
2944 
2945 	case DDI_PROP_CMD_GET_DSIZE:
2946 		/*
2947 		 * Return the size of a decoded integer on the system.
2948 		 */
2949 		return (sizeof (int64_t));
2950 
2951 	default:
2952 #ifdef DEBUG
2953 		panic("ddi_prop_int64_op: %x impossible", cmd);
2954 		/*NOTREACHED*/
2955 #else
2956 		return (DDI_PROP_RESULT_ERROR);
2957 #endif  /* DEBUG */
2958 	}
2959 }
2960 
2961 /*
2962  * OBP 1275 string operator.
2963  *
2964  * OBP strings are NULL terminated.
2965  */
2966 int
2967 ddi_prop_1275_string(prop_handle_t *ph, uint_t cmd, char *data)
2968 {
2969 	int	n;
2970 	char	*p;
2971 	char	*end;
2972 
2973 	switch (cmd) {
2974 	case DDI_PROP_CMD_DECODE:
2975 		/*
2976 		 * Check that there is encoded data
2977 		 */
2978 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
2979 			return (DDI_PROP_RESULT_ERROR);
2980 		}
2981 
2982 		/*
2983 		 * Match DDI_PROP_CMD_GET_DSIZE logic for when to stop and
2984 		 * how to NULL terminate result.
2985 		 */
2986 		p = (char *)ph->ph_cur_pos;
2987 		end = (char *)ph->ph_data + ph->ph_size;
2988 		if (p >= end)
2989 			return (DDI_PROP_RESULT_EOF);
2990 
2991 		while (p < end) {
2992 			*data++ = *p;
2993 			if (*p++ == 0) {	/* NULL from OBP */
2994 				ph->ph_cur_pos = p;
2995 				return (DDI_PROP_RESULT_OK);
2996 			}
2997 		}
2998 
2999 		/*
3000 		 * If OBP did not NULL terminate string, which happens
3001 		 * (at least) for 'true'/'false' boolean values, account for
3002 		 * the space and store null termination on decode.
3003 		 */
3004 		ph->ph_cur_pos = p;
3005 		*data = 0;
3006 		return (DDI_PROP_RESULT_OK);
3007 
3008 	case DDI_PROP_CMD_ENCODE:
3009 		/*
3010 		 * Check that there is room to encoded the data
3011 		 */
3012 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
3013 			return (DDI_PROP_RESULT_ERROR);
3014 		}
3015 
3016 		n = strlen(data) + 1;
3017 		if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3018 		    ph->ph_size - n)) {
3019 			return (DDI_PROP_RESULT_ERROR);
3020 		}
3021 
3022 		/*
3023 		 * Copy the NULL terminated string
3024 		 */
3025 		bcopy(data, ph->ph_cur_pos, n);
3026 
3027 		/*
3028 		 * Move the current location to the start of the next bit of
3029 		 * space where we can store encoded data.
3030 		 */
3031 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + n;
3032 		return (DDI_PROP_RESULT_OK);
3033 
3034 	case DDI_PROP_CMD_SKIP:
3035 		/*
3036 		 * Check that there is encoded data
3037 		 */
3038 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
3039 			return (DDI_PROP_RESULT_ERROR);
3040 		}
3041 
3042 		/*
3043 		 * Return the string length plus one for the NULL
3044 		 * We know the size of the property, we need to
3045 		 * ensure that the string is properly formatted,
3046 		 * since we may be looking up random OBP data.
3047 		 */
3048 		p = (char *)ph->ph_cur_pos;
3049 		end = (char *)ph->ph_data + ph->ph_size;
3050 		if (p >= end)
3051 			return (DDI_PROP_RESULT_EOF);
3052 
3053 		while (p < end) {
3054 			if (*p++ == 0) {	/* NULL from OBP */
3055 				ph->ph_cur_pos = p;
3056 				return (DDI_PROP_RESULT_OK);
3057 			}
3058 		}
3059 
3060 		/*
3061 		 * Accommodate the fact that OBP does not always NULL
3062 		 * terminate strings.
3063 		 */
3064 		ph->ph_cur_pos = p;
3065 		return (DDI_PROP_RESULT_OK);
3066 
3067 	case DDI_PROP_CMD_GET_ESIZE:
3068 		/*
3069 		 * Return the size of the encoded string on OBP.
3070 		 */
3071 		return (strlen(data) + 1);
3072 
3073 	case DDI_PROP_CMD_GET_DSIZE:
3074 		/*
3075 		 * Return the string length plus one for the NULL.
3076 		 * We know the size of the property, we need to
3077 		 * ensure that the string is properly formatted,
3078 		 * since we may be looking up random OBP data.
3079 		 */
3080 		p = (char *)ph->ph_cur_pos;
3081 		end = (char *)ph->ph_data + ph->ph_size;
3082 		if (p >= end)
3083 			return (DDI_PROP_RESULT_EOF);
3084 
3085 		for (n = 0; p < end; n++) {
3086 			if (*p++ == 0) {	/* NULL from OBP */
3087 				ph->ph_cur_pos = p;
3088 				return (n + 1);
3089 			}
3090 		}
3091 
3092 		/*
3093 		 * If OBP did not NULL terminate string, which happens for
3094 		 * 'true'/'false' boolean values, account for the space
3095 		 * to store null termination here.
3096 		 */
3097 		ph->ph_cur_pos = p;
3098 		return (n + 1);
3099 
3100 	default:
3101 #ifdef DEBUG
3102 		panic("ddi_prop_1275_string: %x impossible", cmd);
3103 		/*NOTREACHED*/
3104 #else
3105 		return (DDI_PROP_RESULT_ERROR);
3106 #endif	/* DEBUG */
3107 	}
3108 }
3109 
3110 /*
3111  * OBP 1275 byte operator
3112  *
3113  * Caller must specify the number of bytes to get.  OBP encodes bytes
3114  * as a byte so there is a 1-to-1 translation.
3115  */
3116 int
3117 ddi_prop_1275_bytes(prop_handle_t *ph, uint_t cmd, uchar_t *data,
3118 	uint_t nelements)
3119 {
3120 	switch (cmd) {
3121 	case DDI_PROP_CMD_DECODE:
3122 		/*
3123 		 * Check that there is encoded data
3124 		 */
3125 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3126 		    ph->ph_size < nelements ||
3127 		    ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3128 		    ph->ph_size - nelements)))
3129 			return (DDI_PROP_RESULT_ERROR);
3130 
3131 		/*
3132 		 * Copy out the bytes
3133 		 */
3134 		bcopy(ph->ph_cur_pos, data, nelements);
3135 
3136 		/*
3137 		 * Move the current location
3138 		 */
3139 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3140 		return (DDI_PROP_RESULT_OK);
3141 
3142 	case DDI_PROP_CMD_ENCODE:
3143 		/*
3144 		 * Check that there is room to encode the data
3145 		 */
3146 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3147 		    ph->ph_size < nelements ||
3148 		    ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3149 		    ph->ph_size - nelements)))
3150 			return (DDI_PROP_RESULT_ERROR);
3151 
3152 		/*
3153 		 * Copy in the bytes
3154 		 */
3155 		bcopy(data, ph->ph_cur_pos, nelements);
3156 
3157 		/*
3158 		 * Move the current location to the start of the next bit of
3159 		 * space where we can store encoded data.
3160 		 */
3161 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3162 		return (DDI_PROP_RESULT_OK);
3163 
3164 	case DDI_PROP_CMD_SKIP:
3165 		/*
3166 		 * Check that there is encoded data
3167 		 */
3168 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3169 		    ph->ph_size < nelements)
3170 			return (DDI_PROP_RESULT_ERROR);
3171 
3172 		if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3173 		    ph->ph_size - nelements))
3174 			return (DDI_PROP_RESULT_EOF);
3175 
3176 		/*
3177 		 * Move the current location
3178 		 */
3179 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3180 		return (DDI_PROP_RESULT_OK);
3181 
3182 	case DDI_PROP_CMD_GET_ESIZE:
3183 		/*
3184 		 * The size in bytes of the encoded size is the
3185 		 * same as the decoded size provided by the caller.
3186 		 */
3187 		return (nelements);
3188 
3189 	case DDI_PROP_CMD_GET_DSIZE:
3190 		/*
3191 		 * Just return the number of bytes specified by the caller.
3192 		 */
3193 		return (nelements);
3194 
3195 	default:
3196 #ifdef DEBUG
3197 		panic("ddi_prop_1275_bytes: %x impossible", cmd);
3198 		/*NOTREACHED*/
3199 #else
3200 		return (DDI_PROP_RESULT_ERROR);
3201 #endif	/* DEBUG */
3202 	}
3203 }
3204 
3205 /*
3206  * Used for properties that come from the OBP, hardware configuration files,
3207  * or that are created by calls to ddi_prop_update(9F).
3208  */
3209 static struct prop_handle_ops prop_1275_ops = {
3210 	ddi_prop_1275_int,
3211 	ddi_prop_1275_string,
3212 	ddi_prop_1275_bytes,
3213 	ddi_prop_int64_op
3214 };
3215 
3216 
3217 /*
3218  * Interface to create/modify a managed property on child's behalf...
3219  * Flags interpreted are:
3220  *	DDI_PROP_CANSLEEP:	Allow memory allocation to sleep.
3221  *	DDI_PROP_SYSTEM_DEF:	Manipulate system list rather than driver list.
3222  *
3223  * Use same dev_t when modifying or undefining a property.
3224  * Search for properties with DDI_DEV_T_ANY to match first named
3225  * property on the list.
3226  *
3227  * Properties are stored LIFO and subsequently will match the first
3228  * `matching' instance.
3229  */
3230 
3231 /*
3232  * ddi_prop_add:	Add a software defined property
3233  */
3234 
3235 /*
3236  * define to get a new ddi_prop_t.
3237  * km_flags are KM_SLEEP or KM_NOSLEEP.
3238  */
3239 
3240 #define	DDI_NEW_PROP_T(km_flags)	\
3241 	(kmem_zalloc(sizeof (ddi_prop_t), km_flags))
3242 
3243 static int
3244 ddi_prop_add(dev_t dev, dev_info_t *dip, int flags,
3245     char *name, caddr_t value, int length)
3246 {
3247 	ddi_prop_t	*new_propp, *propp;
3248 	ddi_prop_t	**list_head = &(DEVI(dip)->devi_drv_prop_ptr);
3249 	int		km_flags = KM_NOSLEEP;
3250 	int		name_buf_len;
3251 
3252 	/*
3253 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero return error.
3254 	 */
3255 
3256 	if (dev == DDI_DEV_T_ANY || name == (char *)0 || strlen(name) == 0)
3257 		return (DDI_PROP_INVAL_ARG);
3258 
3259 	if (flags & DDI_PROP_CANSLEEP)
3260 		km_flags = KM_SLEEP;
3261 
3262 	if (flags & DDI_PROP_SYSTEM_DEF)
3263 		list_head = &(DEVI(dip)->devi_sys_prop_ptr);
3264 	else if (flags & DDI_PROP_HW_DEF)
3265 		list_head = &(DEVI(dip)->devi_hw_prop_ptr);
3266 
3267 	if ((new_propp = DDI_NEW_PROP_T(km_flags)) == NULL)  {
3268 		cmn_err(CE_CONT, prop_no_mem_msg, name);
3269 		return (DDI_PROP_NO_MEMORY);
3270 	}
3271 
3272 	/*
3273 	 * If dev is major number 0, then we need to do a ddi_name_to_major
3274 	 * to get the real major number for the device.  This needs to be
3275 	 * done because some drivers need to call ddi_prop_create in their
3276 	 * attach routines but they don't have a dev.  By creating the dev
3277 	 * ourself if the major number is 0, drivers will not have to know what
3278 	 * their major number.	They can just create a dev with major number
3279 	 * 0 and pass it in.  For device 0, we will be doing a little extra
3280 	 * work by recreating the same dev that we already have, but its the
3281 	 * price you pay :-).
3282 	 *
3283 	 * This fixes bug #1098060.
3284 	 */
3285 	if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN) {
3286 		new_propp->prop_dev =
3287 		    makedevice(ddi_name_to_major(DEVI(dip)->devi_binding_name),
3288 		    getminor(dev));
3289 	} else
3290 		new_propp->prop_dev = dev;
3291 
3292 	/*
3293 	 * Allocate space for property name and copy it in...
3294 	 */
3295 
3296 	name_buf_len = strlen(name) + 1;
3297 	new_propp->prop_name = kmem_alloc(name_buf_len, km_flags);
3298 	if (new_propp->prop_name == 0)	{
3299 		kmem_free(new_propp, sizeof (ddi_prop_t));
3300 		cmn_err(CE_CONT, prop_no_mem_msg, name);
3301 		return (DDI_PROP_NO_MEMORY);
3302 	}
3303 	bcopy(name, new_propp->prop_name, name_buf_len);
3304 
3305 	/*
3306 	 * Set the property type
3307 	 */
3308 	new_propp->prop_flags = flags & DDI_PROP_TYPE_MASK;
3309 
3310 	/*
3311 	 * Set length and value ONLY if not an explicit property undefine:
3312 	 * NOTE: value and length are zero for explicit undefines.
3313 	 */
3314 
3315 	if (flags & DDI_PROP_UNDEF_IT) {
3316 		new_propp->prop_flags |= DDI_PROP_UNDEF_IT;
3317 	} else {
3318 		if ((new_propp->prop_len = length) != 0) {
3319 			new_propp->prop_val = kmem_alloc(length, km_flags);
3320 			if (new_propp->prop_val == 0)  {
3321 				kmem_free(new_propp->prop_name, name_buf_len);
3322 				kmem_free(new_propp, sizeof (ddi_prop_t));
3323 				cmn_err(CE_CONT, prop_no_mem_msg, name);
3324 				return (DDI_PROP_NO_MEMORY);
3325 			}
3326 			bcopy(value, new_propp->prop_val, length);
3327 		}
3328 	}
3329 
3330 	/*
3331 	 * Link property into beginning of list. (Properties are LIFO order.)
3332 	 */
3333 
3334 	mutex_enter(&(DEVI(dip)->devi_lock));
3335 	propp = *list_head;
3336 	new_propp->prop_next = propp;
3337 	*list_head = new_propp;
3338 	mutex_exit(&(DEVI(dip)->devi_lock));
3339 	return (DDI_PROP_SUCCESS);
3340 }
3341 
3342 
3343 /*
3344  * ddi_prop_change:	Modify a software managed property value
3345  *
3346  *			Set new length and value if found.
3347  *			returns DDI_PROP_INVAL_ARG if dev is DDI_DEV_T_ANY or
3348  *			input name is the NULL string.
3349  *			returns DDI_PROP_NO_MEMORY if unable to allocate memory
3350  *
3351  *			Note: an undef can be modified to be a define,
3352  *			(you can't go the other way.)
3353  */
3354 
3355 static int
3356 ddi_prop_change(dev_t dev, dev_info_t *dip, int flags,
3357     char *name, caddr_t value, int length)
3358 {
3359 	ddi_prop_t	*propp;
3360 	ddi_prop_t	**ppropp;
3361 	caddr_t		p = NULL;
3362 
3363 	if ((dev == DDI_DEV_T_ANY) || (name == NULL) || (strlen(name) == 0))
3364 		return (DDI_PROP_INVAL_ARG);
3365 
3366 	/*
3367 	 * Preallocate buffer, even if we don't need it...
3368 	 */
3369 	if (length != 0)  {
3370 		p = kmem_alloc(length, (flags & DDI_PROP_CANSLEEP) ?
3371 		    KM_SLEEP : KM_NOSLEEP);
3372 		if (p == NULL)	{
3373 			cmn_err(CE_CONT, prop_no_mem_msg, name);
3374 			return (DDI_PROP_NO_MEMORY);
3375 		}
3376 	}
3377 
3378 	/*
3379 	 * If the dev_t value contains DDI_MAJOR_T_UNKNOWN for the major
3380 	 * number, a real dev_t value should be created based upon the dip's
3381 	 * binding driver.  See ddi_prop_add...
3382 	 */
3383 	if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN)
3384 		dev = makedevice(
3385 		    ddi_name_to_major(DEVI(dip)->devi_binding_name),
3386 		    getminor(dev));
3387 
3388 	/*
3389 	 * Check to see if the property exists.  If so we modify it.
3390 	 * Else we create it by calling ddi_prop_add().
3391 	 */
3392 	mutex_enter(&(DEVI(dip)->devi_lock));
3393 	ppropp = &DEVI(dip)->devi_drv_prop_ptr;
3394 	if (flags & DDI_PROP_SYSTEM_DEF)
3395 		ppropp = &DEVI(dip)->devi_sys_prop_ptr;
3396 	else if (flags & DDI_PROP_HW_DEF)
3397 		ppropp = &DEVI(dip)->devi_hw_prop_ptr;
3398 
3399 	if ((propp = i_ddi_prop_search(dev, name, flags, ppropp)) != NULL) {
3400 		/*
3401 		 * Need to reallocate buffer?  If so, do it
3402 		 * carefully (reuse same space if new prop
3403 		 * is same size and non-NULL sized).
3404 		 */
3405 		if (length != 0)
3406 			bcopy(value, p, length);
3407 
3408 		if (propp->prop_len != 0)
3409 			kmem_free(propp->prop_val, propp->prop_len);
3410 
3411 		propp->prop_len = length;
3412 		propp->prop_val = p;
3413 		propp->prop_flags &= ~DDI_PROP_UNDEF_IT;
3414 		mutex_exit(&(DEVI(dip)->devi_lock));
3415 		return (DDI_PROP_SUCCESS);
3416 	}
3417 
3418 	mutex_exit(&(DEVI(dip)->devi_lock));
3419 	if (length != 0)
3420 		kmem_free(p, length);
3421 
3422 	return (ddi_prop_add(dev, dip, flags, name, value, length));
3423 }
3424 
3425 /*
3426  * Common update routine used to update and encode a property.	Creates
3427  * a property handle, calls the property encode routine, figures out if
3428  * the property already exists and updates if it does.	Otherwise it
3429  * creates if it does not exist.
3430  */
3431 int
3432 ddi_prop_update_common(dev_t match_dev, dev_info_t *dip, int flags,
3433     char *name, void *data, uint_t nelements,
3434     int (*prop_create)(prop_handle_t *, void *data, uint_t nelements))
3435 {
3436 	prop_handle_t	ph;
3437 	int		rval;
3438 	uint_t		ourflags;
3439 
3440 	/*
3441 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3442 	 * return error.
3443 	 */
3444 	if (match_dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3445 		return (DDI_PROP_INVAL_ARG);
3446 
3447 	/*
3448 	 * Create the handle
3449 	 */
3450 	ph.ph_data = NULL;
3451 	ph.ph_cur_pos = NULL;
3452 	ph.ph_save_pos = NULL;
3453 	ph.ph_size = 0;
3454 	ph.ph_ops = &prop_1275_ops;
3455 
3456 	/*
3457 	 * ourflags:
3458 	 * For compatibility with the old interfaces.  The old interfaces
3459 	 * didn't sleep by default and slept when the flag was set.  These
3460 	 * interfaces to the opposite.	So the old interfaces now set the
3461 	 * DDI_PROP_DONTSLEEP flag by default which tells us not to sleep.
3462 	 *
3463 	 * ph.ph_flags:
3464 	 * Blocked data or unblocked data allocation
3465 	 * for ph.ph_data in ddi_prop_encode_alloc()
3466 	 */
3467 	if (flags & DDI_PROP_DONTSLEEP) {
3468 		ourflags = flags;
3469 		ph.ph_flags = DDI_PROP_DONTSLEEP;
3470 	} else {
3471 		ourflags = flags | DDI_PROP_CANSLEEP;
3472 		ph.ph_flags = DDI_PROP_CANSLEEP;
3473 	}
3474 
3475 	/*
3476 	 * Encode the data and store it in the property handle by
3477 	 * calling the prop_encode routine.
3478 	 */
3479 	if ((rval = (*prop_create)(&ph, data, nelements)) !=
3480 	    DDI_PROP_SUCCESS) {
3481 		if (rval == DDI_PROP_NO_MEMORY)
3482 			cmn_err(CE_CONT, prop_no_mem_msg, name);
3483 		if (ph.ph_size != 0)
3484 			kmem_free(ph.ph_data, ph.ph_size);
3485 		return (rval);
3486 	}
3487 
3488 	/*
3489 	 * The old interfaces use a stacking approach to creating
3490 	 * properties.	If we are being called from the old interfaces,
3491 	 * the DDI_PROP_STACK_CREATE flag will be set, so we just do a
3492 	 * create without checking.
3493 	 */
3494 	if (flags & DDI_PROP_STACK_CREATE) {
3495 		rval = ddi_prop_add(match_dev, dip,
3496 		    ourflags, name, ph.ph_data, ph.ph_size);
3497 	} else {
3498 		rval = ddi_prop_change(match_dev, dip,
3499 		    ourflags, name, ph.ph_data, ph.ph_size);
3500 	}
3501 
3502 	/*
3503 	 * Free the encoded data allocated in the prop_encode routine.
3504 	 */
3505 	if (ph.ph_size != 0)
3506 		kmem_free(ph.ph_data, ph.ph_size);
3507 
3508 	return (rval);
3509 }
3510 
3511 
3512 /*
3513  * ddi_prop_create:	Define a managed property:
3514  *			See above for details.
3515  */
3516 
3517 int
3518 ddi_prop_create(dev_t dev, dev_info_t *dip, int flag,
3519     char *name, caddr_t value, int length)
3520 {
3521 	if (!(flag & DDI_PROP_CANSLEEP)) {
3522 		flag |= DDI_PROP_DONTSLEEP;
3523 #ifdef DDI_PROP_DEBUG
3524 		if (length != 0)
3525 			cmn_err(CE_NOTE, "!ddi_prop_create: interface obsolete,"
3526 			    "use ddi_prop_update (prop = %s, node = %s%d)",
3527 			    name, ddi_driver_name(dip), ddi_get_instance(dip));
3528 #endif /* DDI_PROP_DEBUG */
3529 	}
3530 	flag &= ~DDI_PROP_SYSTEM_DEF;
3531 	flag |= DDI_PROP_STACK_CREATE | DDI_PROP_TYPE_ANY;
3532 	return (ddi_prop_update_common(dev, dip, flag, name,
3533 	    value, length, ddi_prop_fm_encode_bytes));
3534 }
3535 
3536 int
3537 e_ddi_prop_create(dev_t dev, dev_info_t *dip, int flag,
3538     char *name, caddr_t value, int length)
3539 {
3540 	if (!(flag & DDI_PROP_CANSLEEP))
3541 		flag |= DDI_PROP_DONTSLEEP;
3542 	flag |= DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE | DDI_PROP_TYPE_ANY;
3543 	return (ddi_prop_update_common(dev, dip, flag,
3544 	    name, value, length, ddi_prop_fm_encode_bytes));
3545 }
3546 
3547 int
3548 ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag,
3549     char *name, caddr_t value, int length)
3550 {
3551 	ASSERT((flag & DDI_PROP_TYPE_MASK) == 0);
3552 
3553 	/*
3554 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3555 	 * return error.
3556 	 */
3557 	if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3558 		return (DDI_PROP_INVAL_ARG);
3559 
3560 	if (!(flag & DDI_PROP_CANSLEEP))
3561 		flag |= DDI_PROP_DONTSLEEP;
3562 	flag &= ~DDI_PROP_SYSTEM_DEF;
3563 	if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_NOTPROM), name) == 0)
3564 		return (DDI_PROP_NOT_FOUND);
3565 
3566 	return (ddi_prop_update_common(dev, dip,
3567 	    (flag | DDI_PROP_TYPE_BYTE), name,
3568 	    value, length, ddi_prop_fm_encode_bytes));
3569 }
3570 
3571 int
3572 e_ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag,
3573     char *name, caddr_t value, int length)
3574 {
3575 	ASSERT((flag & DDI_PROP_TYPE_MASK) == 0);
3576 
3577 	/*
3578 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3579 	 * return error.
3580 	 */
3581 	if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3582 		return (DDI_PROP_INVAL_ARG);
3583 
3584 	if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_SYSTEM_DEF), name) == 0)
3585 		return (DDI_PROP_NOT_FOUND);
3586 
3587 	if (!(flag & DDI_PROP_CANSLEEP))
3588 		flag |= DDI_PROP_DONTSLEEP;
3589 	return (ddi_prop_update_common(dev, dip,
3590 	    (flag | DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE),
3591 	    name, value, length, ddi_prop_fm_encode_bytes));
3592 }
3593 
3594 
3595 /*
3596  * Common lookup routine used to lookup and decode a property.
3597  * Creates a property handle, searches for the raw encoded data,
3598  * fills in the handle, and calls the property decode functions
3599  * passed in.
3600  *
3601  * This routine is not static because ddi_bus_prop_op() which lives in
3602  * ddi_impl.c calls it.  No driver should be calling this routine.
3603  */
3604 int
3605 ddi_prop_lookup_common(dev_t match_dev, dev_info_t *dip,
3606     uint_t flags, char *name, void *data, uint_t *nelements,
3607     int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements))
3608 {
3609 	int		rval;
3610 	uint_t		ourflags;
3611 	prop_handle_t	ph;
3612 
3613 	if ((match_dev == DDI_DEV_T_NONE) ||
3614 	    (name == NULL) || (strlen(name) == 0))
3615 		return (DDI_PROP_INVAL_ARG);
3616 
3617 	ourflags = (flags & DDI_PROP_DONTSLEEP) ? flags :
3618 	    flags | DDI_PROP_CANSLEEP;
3619 
3620 	/*
3621 	 * Get the encoded data
3622 	 */
3623 	bzero(&ph, sizeof (prop_handle_t));
3624 
3625 	if ((flags & DDI_UNBND_DLPI2) || (flags & DDI_PROP_ROOTNEX_GLOBAL)) {
3626 		/*
3627 		 * For rootnex and unbound dlpi style-2 devices, index into
3628 		 * the devnames' array and search the global
3629 		 * property list.
3630 		 */
3631 		ourflags &= ~DDI_UNBND_DLPI2;
3632 		rval = i_ddi_prop_search_global(match_dev,
3633 		    ourflags, name, &ph.ph_data, &ph.ph_size);
3634 	} else {
3635 		rval = ddi_prop_search_common(match_dev, dip,
3636 		    PROP_LEN_AND_VAL_ALLOC, ourflags, name,
3637 		    &ph.ph_data, &ph.ph_size);
3638 
3639 	}
3640 
3641 	if (rval != DDI_PROP_SUCCESS && rval != DDI_PROP_FOUND_1275) {
3642 		ASSERT(ph.ph_data == NULL);
3643 		ASSERT(ph.ph_size == 0);
3644 		return (rval);
3645 	}
3646 
3647 	/*
3648 	 * If the encoded data came from a OBP or software
3649 	 * use the 1275 OBP decode/encode routines.
3650 	 */
3651 	ph.ph_cur_pos = ph.ph_data;
3652 	ph.ph_save_pos = ph.ph_data;
3653 	ph.ph_ops = &prop_1275_ops;
3654 	ph.ph_flags = (rval == DDI_PROP_FOUND_1275) ? PH_FROM_PROM : 0;
3655 
3656 	rval = (*prop_decoder)(&ph, data, nelements);
3657 
3658 	/*
3659 	 * Free the encoded data
3660 	 */
3661 	if (ph.ph_size != 0)
3662 		kmem_free(ph.ph_data, ph.ph_size);
3663 
3664 	return (rval);
3665 }
3666 
3667 /*
3668  * Lookup and return an array of composite properties.  The driver must
3669  * provide the decode routine.
3670  */
3671 int
3672 ddi_prop_lookup(dev_t match_dev, dev_info_t *dip,
3673     uint_t flags, char *name, void *data, uint_t *nelements,
3674     int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements))
3675 {
3676 	return (ddi_prop_lookup_common(match_dev, dip,
3677 	    (flags | DDI_PROP_TYPE_COMPOSITE), name,
3678 	    data, nelements, prop_decoder));
3679 }
3680 
3681 /*
3682  * Return 1 if a property exists (no type checking done).
3683  * Return 0 if it does not exist.
3684  */
3685 int
3686 ddi_prop_exists(dev_t match_dev, dev_info_t *dip, uint_t flags, char *name)
3687 {
3688 	int	i;
3689 	uint_t	x = 0;
3690 
3691 	i = ddi_prop_search_common(match_dev, dip, PROP_EXISTS,
3692 	    flags | DDI_PROP_TYPE_MASK, name, NULL, &x);
3693 	return (i == DDI_PROP_SUCCESS || i == DDI_PROP_FOUND_1275);
3694 }
3695 
3696 
3697 /*
3698  * Update an array of composite properties.  The driver must
3699  * provide the encode routine.
3700  */
3701 int
3702 ddi_prop_update(dev_t match_dev, dev_info_t *dip,
3703     char *name, void *data, uint_t nelements,
3704     int (*prop_create)(prop_handle_t *, void *data, uint_t nelements))
3705 {
3706 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_COMPOSITE,
3707 	    name, data, nelements, prop_create));
3708 }
3709 
3710 /*
3711  * Get a single integer or boolean property and return it.
3712  * If the property does not exists, or cannot be decoded,
3713  * then return the defvalue passed in.
3714  *
3715  * This routine always succeeds.
3716  */
3717 int
3718 ddi_prop_get_int(dev_t match_dev, dev_info_t *dip, uint_t flags,
3719     char *name, int defvalue)
3720 {
3721 	int	data;
3722 	uint_t	nelements;
3723 	int	rval;
3724 
3725 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3726 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3727 #ifdef DEBUG
3728 		if (dip != NULL) {
3729 			cmn_err(CE_WARN, "ddi_prop_get_int: invalid flag"
3730 			    " 0x%x (prop = %s, node = %s%d)", flags,
3731 			    name, ddi_driver_name(dip), ddi_get_instance(dip));
3732 		}
3733 #endif /* DEBUG */
3734 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3735 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
3736 	}
3737 
3738 	if ((rval = ddi_prop_lookup_common(match_dev, dip,
3739 	    (flags | DDI_PROP_TYPE_INT), name, &data, &nelements,
3740 	    ddi_prop_fm_decode_int)) != DDI_PROP_SUCCESS) {
3741 		if (rval == DDI_PROP_END_OF_DATA)
3742 			data = 1;
3743 		else
3744 			data = defvalue;
3745 	}
3746 	return (data);
3747 }
3748 
3749 /*
3750  * Get a single 64 bit integer or boolean property and return it.
3751  * If the property does not exists, or cannot be decoded,
3752  * then return the defvalue passed in.
3753  *
3754  * This routine always succeeds.
3755  */
3756 int64_t
3757 ddi_prop_get_int64(dev_t match_dev, dev_info_t *dip, uint_t flags,
3758     char *name, int64_t defvalue)
3759 {
3760 	int64_t	data;
3761 	uint_t	nelements;
3762 	int	rval;
3763 
3764 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3765 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3766 #ifdef DEBUG
3767 		if (dip != NULL) {
3768 			cmn_err(CE_WARN, "ddi_prop_get_int64: invalid flag"
3769 			    " 0x%x (prop = %s, node = %s%d)", flags,
3770 			    name, ddi_driver_name(dip), ddi_get_instance(dip));
3771 		}
3772 #endif /* DEBUG */
3773 		return (DDI_PROP_INVAL_ARG);
3774 	}
3775 
3776 	if ((rval = ddi_prop_lookup_common(match_dev, dip,
3777 	    (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM),
3778 	    name, &data, &nelements, ddi_prop_fm_decode_int64))
3779 	    != DDI_PROP_SUCCESS) {
3780 		if (rval == DDI_PROP_END_OF_DATA)
3781 			data = 1;
3782 		else
3783 			data = defvalue;
3784 	}
3785 	return (data);
3786 }
3787 
3788 /*
3789  * Get an array of integer property
3790  */
3791 int
3792 ddi_prop_lookup_int_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
3793     char *name, int **data, uint_t *nelements)
3794 {
3795 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3796 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3797 #ifdef DEBUG
3798 		if (dip != NULL) {
3799 			cmn_err(CE_WARN, "ddi_prop_lookup_int_array: "
3800 			    "invalid flag 0x%x (prop = %s, node = %s%d)",
3801 			    flags, name, ddi_driver_name(dip),
3802 			    ddi_get_instance(dip));
3803 		}
3804 #endif /* DEBUG */
3805 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3806 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
3807 	}
3808 
3809 	return (ddi_prop_lookup_common(match_dev, dip,
3810 	    (flags | DDI_PROP_TYPE_INT), name, data,
3811 	    nelements, ddi_prop_fm_decode_ints));
3812 }
3813 
3814 /*
3815  * Get an array of 64 bit integer properties
3816  */
3817 int
3818 ddi_prop_lookup_int64_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
3819     char *name, int64_t **data, uint_t *nelements)
3820 {
3821 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3822 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3823 #ifdef DEBUG
3824 		if (dip != NULL) {
3825 			cmn_err(CE_WARN, "ddi_prop_lookup_int64_array: "
3826 			    "invalid flag 0x%x (prop = %s, node = %s%d)",
3827 			    flags, name, ddi_driver_name(dip),
3828 			    ddi_get_instance(dip));
3829 		}
3830 #endif /* DEBUG */
3831 		return (DDI_PROP_INVAL_ARG);
3832 	}
3833 
3834 	return (ddi_prop_lookup_common(match_dev, dip,
3835 	    (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM),
3836 	    name, data, nelements, ddi_prop_fm_decode_int64_array));
3837 }
3838 
3839 /*
3840  * Update a single integer property.  If the property exists on the drivers
3841  * property list it updates, else it creates it.
3842  */
3843 int
3844 ddi_prop_update_int(dev_t match_dev, dev_info_t *dip,
3845     char *name, int data)
3846 {
3847 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT,
3848 	    name, &data, 1, ddi_prop_fm_encode_ints));
3849 }
3850 
3851 /*
3852  * Update a single 64 bit integer property.
3853  * Update the driver property list if it exists, else create it.
3854  */
3855 int
3856 ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip,
3857     char *name, int64_t data)
3858 {
3859 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64,
3860 	    name, &data, 1, ddi_prop_fm_encode_int64));
3861 }
3862 
3863 int
3864 e_ddi_prop_update_int(dev_t match_dev, dev_info_t *dip,
3865     char *name, int data)
3866 {
3867 	return (ddi_prop_update_common(match_dev, dip,
3868 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT,
3869 	    name, &data, 1, ddi_prop_fm_encode_ints));
3870 }
3871 
3872 int
3873 e_ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip,
3874     char *name, int64_t data)
3875 {
3876 	return (ddi_prop_update_common(match_dev, dip,
3877 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64,
3878 	    name, &data, 1, ddi_prop_fm_encode_int64));
3879 }
3880 
3881 /*
3882  * Update an array of integer property.  If the property exists on the drivers
3883  * property list it updates, else it creates it.
3884  */
3885 int
3886 ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip,
3887     char *name, int *data, uint_t nelements)
3888 {
3889 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT,
3890 	    name, data, nelements, ddi_prop_fm_encode_ints));
3891 }
3892 
3893 /*
3894  * Update an array of 64 bit integer properties.
3895  * Update the driver property list if it exists, else create it.
3896  */
3897 int
3898 ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip,
3899     char *name, int64_t *data, uint_t nelements)
3900 {
3901 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64,
3902 	    name, data, nelements, ddi_prop_fm_encode_int64));
3903 }
3904 
3905 int
3906 e_ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip,
3907     char *name, int64_t *data, uint_t nelements)
3908 {
3909 	return (ddi_prop_update_common(match_dev, dip,
3910 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64,
3911 	    name, data, nelements, ddi_prop_fm_encode_int64));
3912 }
3913 
3914 int
3915 e_ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip,
3916     char *name, int *data, uint_t nelements)
3917 {
3918 	return (ddi_prop_update_common(match_dev, dip,
3919 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT,
3920 	    name, data, nelements, ddi_prop_fm_encode_ints));
3921 }
3922 
3923 /*
3924  * Get a single string property.
3925  */
3926 int
3927 ddi_prop_lookup_string(dev_t match_dev, dev_info_t *dip, uint_t flags,
3928     char *name, char **data)
3929 {
3930 	uint_t x;
3931 
3932 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3933 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3934 #ifdef DEBUG
3935 		if (dip != NULL) {
3936 			cmn_err(CE_WARN, "%s: invalid flag 0x%x "
3937 			    "(prop = %s, node = %s%d); invalid bits ignored",
3938 			    "ddi_prop_lookup_string", flags, name,
3939 			    ddi_driver_name(dip), ddi_get_instance(dip));
3940 		}
3941 #endif /* DEBUG */
3942 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3943 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
3944 	}
3945 
3946 	return (ddi_prop_lookup_common(match_dev, dip,
3947 	    (flags | DDI_PROP_TYPE_STRING), name, data,
3948 	    &x, ddi_prop_fm_decode_string));
3949 }
3950 
3951 /*
3952  * Get an array of strings property.
3953  */
3954 int
3955 ddi_prop_lookup_string_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
3956     char *name, char ***data, uint_t *nelements)
3957 {
3958 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3959 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3960 #ifdef DEBUG
3961 		if (dip != NULL) {
3962 			cmn_err(CE_WARN, "ddi_prop_lookup_string_array: "
3963 			    "invalid flag 0x%x (prop = %s, node = %s%d)",
3964 			    flags, name, ddi_driver_name(dip),
3965 			    ddi_get_instance(dip));
3966 		}
3967 #endif /* DEBUG */
3968 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3969 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
3970 	}
3971 
3972 	return (ddi_prop_lookup_common(match_dev, dip,
3973 	    (flags | DDI_PROP_TYPE_STRING), name, data,
3974 	    nelements, ddi_prop_fm_decode_strings));
3975 }
3976 
3977 /*
3978  * Update a single string property.
3979  */
3980 int
3981 ddi_prop_update_string(dev_t match_dev, dev_info_t *dip,
3982     char *name, char *data)
3983 {
3984 	return (ddi_prop_update_common(match_dev, dip,
3985 	    DDI_PROP_TYPE_STRING, name, &data, 1,
3986 	    ddi_prop_fm_encode_string));
3987 }
3988 
3989 int
3990 e_ddi_prop_update_string(dev_t match_dev, dev_info_t *dip,
3991     char *name, char *data)
3992 {
3993 	return (ddi_prop_update_common(match_dev, dip,
3994 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING,
3995 	    name, &data, 1, ddi_prop_fm_encode_string));
3996 }
3997 
3998 
3999 /*
4000  * Update an array of strings property.
4001  */
4002 int
4003 ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip,
4004     char *name, char **data, uint_t nelements)
4005 {
4006 	return (ddi_prop_update_common(match_dev, dip,
4007 	    DDI_PROP_TYPE_STRING, name, data, nelements,
4008 	    ddi_prop_fm_encode_strings));
4009 }
4010 
4011 int
4012 e_ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip,
4013     char *name, char **data, uint_t nelements)
4014 {
4015 	return (ddi_prop_update_common(match_dev, dip,
4016 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING,
4017 	    name, data, nelements,
4018 	    ddi_prop_fm_encode_strings));
4019 }
4020 
4021 
4022 /*
4023  * Get an array of bytes property.
4024  */
4025 int
4026 ddi_prop_lookup_byte_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4027     char *name, uchar_t **data, uint_t *nelements)
4028 {
4029 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4030 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
4031 #ifdef DEBUG
4032 		if (dip != NULL) {
4033 			cmn_err(CE_WARN, "ddi_prop_lookup_byte_array: "
4034 			    " invalid flag 0x%x (prop = %s, node = %s%d)",
4035 			    flags, name, ddi_driver_name(dip),
4036 			    ddi_get_instance(dip));
4037 		}
4038 #endif /* DEBUG */
4039 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4040 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4041 	}
4042 
4043 	return (ddi_prop_lookup_common(match_dev, dip,
4044 	    (flags | DDI_PROP_TYPE_BYTE), name, data,
4045 	    nelements, ddi_prop_fm_decode_bytes));
4046 }
4047 
4048 /*
4049  * Update an array of bytes property.
4050  */
4051 int
4052 ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip,
4053     char *name, uchar_t *data, uint_t nelements)
4054 {
4055 	if (nelements == 0)
4056 		return (DDI_PROP_INVAL_ARG);
4057 
4058 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_BYTE,
4059 	    name, data, nelements, ddi_prop_fm_encode_bytes));
4060 }
4061 
4062 
4063 int
4064 e_ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip,
4065     char *name, uchar_t *data, uint_t nelements)
4066 {
4067 	if (nelements == 0)
4068 		return (DDI_PROP_INVAL_ARG);
4069 
4070 	return (ddi_prop_update_common(match_dev, dip,
4071 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE,
4072 	    name, data, nelements, ddi_prop_fm_encode_bytes));
4073 }
4074 
4075 
4076 /*
4077  * ddi_prop_remove_common:	Undefine a managed property:
4078  *			Input dev_t must match dev_t when defined.
4079  *			Returns DDI_PROP_NOT_FOUND, possibly.
4080  *			DDI_PROP_INVAL_ARG is also possible if dev is
4081  *			DDI_DEV_T_ANY or incoming name is the NULL string.
4082  */
4083 int
4084 ddi_prop_remove_common(dev_t dev, dev_info_t *dip, char *name, int flag)
4085 {
4086 	ddi_prop_t	**list_head = &(DEVI(dip)->devi_drv_prop_ptr);
4087 	ddi_prop_t	*propp;
4088 	ddi_prop_t	*lastpropp = NULL;
4089 
4090 	if ((dev == DDI_DEV_T_ANY) || (name == (char *)0) ||
4091 	    (strlen(name) == 0)) {
4092 		return (DDI_PROP_INVAL_ARG);
4093 	}
4094 
4095 	if (flag & DDI_PROP_SYSTEM_DEF)
4096 		list_head = &(DEVI(dip)->devi_sys_prop_ptr);
4097 	else if (flag & DDI_PROP_HW_DEF)
4098 		list_head = &(DEVI(dip)->devi_hw_prop_ptr);
4099 
4100 	mutex_enter(&(DEVI(dip)->devi_lock));
4101 
4102 	for (propp = *list_head; propp != NULL; propp = propp->prop_next)  {
4103 		if (DDI_STRSAME(propp->prop_name, name) &&
4104 		    (dev == propp->prop_dev)) {
4105 			/*
4106 			 * Unlink this propp allowing for it to
4107 			 * be first in the list:
4108 			 */
4109 
4110 			if (lastpropp == NULL)
4111 				*list_head = propp->prop_next;
4112 			else
4113 				lastpropp->prop_next = propp->prop_next;
4114 
4115 			mutex_exit(&(DEVI(dip)->devi_lock));
4116 
4117 			/*
4118 			 * Free memory and return...
4119 			 */
4120 			kmem_free(propp->prop_name,
4121 			    strlen(propp->prop_name) + 1);
4122 			if (propp->prop_len != 0)
4123 				kmem_free(propp->prop_val, propp->prop_len);
4124 			kmem_free(propp, sizeof (ddi_prop_t));
4125 			return (DDI_PROP_SUCCESS);
4126 		}
4127 		lastpropp = propp;
4128 	}
4129 	mutex_exit(&(DEVI(dip)->devi_lock));
4130 	return (DDI_PROP_NOT_FOUND);
4131 }
4132 
4133 int
4134 ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name)
4135 {
4136 	return (ddi_prop_remove_common(dev, dip, name, 0));
4137 }
4138 
4139 int
4140 e_ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name)
4141 {
4142 	return (ddi_prop_remove_common(dev, dip, name, DDI_PROP_SYSTEM_DEF));
4143 }
4144 
4145 /*
4146  * e_ddi_prop_list_delete: remove a list of properties
4147  *	Note that the caller needs to provide the required protection
4148  *	(eg. devi_lock if these properties are still attached to a devi)
4149  */
4150 void
4151 e_ddi_prop_list_delete(ddi_prop_t *props)
4152 {
4153 	i_ddi_prop_list_delete(props);
4154 }
4155 
4156 /*
4157  * ddi_prop_remove_all_common:
4158  *	Used before unloading a driver to remove
4159  *	all properties. (undefines all dev_t's props.)
4160  *	Also removes `explicitly undefined' props.
4161  *	No errors possible.
4162  */
4163 void
4164 ddi_prop_remove_all_common(dev_info_t *dip, int flag)
4165 {
4166 	ddi_prop_t	**list_head;
4167 
4168 	mutex_enter(&(DEVI(dip)->devi_lock));
4169 	if (flag & DDI_PROP_SYSTEM_DEF) {
4170 		list_head = &(DEVI(dip)->devi_sys_prop_ptr);
4171 	} else if (flag & DDI_PROP_HW_DEF) {
4172 		list_head = &(DEVI(dip)->devi_hw_prop_ptr);
4173 	} else {
4174 		list_head = &(DEVI(dip)->devi_drv_prop_ptr);
4175 	}
4176 	i_ddi_prop_list_delete(*list_head);
4177 	*list_head = NULL;
4178 	mutex_exit(&(DEVI(dip)->devi_lock));
4179 }
4180 
4181 
4182 /*
4183  * ddi_prop_remove_all:		Remove all driver prop definitions.
4184  */
4185 
4186 void
4187 ddi_prop_remove_all(dev_info_t *dip)
4188 {
4189 	i_ddi_prop_dyn_driver_set(dip, NULL);
4190 	ddi_prop_remove_all_common(dip, 0);
4191 }
4192 
4193 /*
4194  * e_ddi_prop_remove_all:	Remove all system prop definitions.
4195  */
4196 
4197 void
4198 e_ddi_prop_remove_all(dev_info_t *dip)
4199 {
4200 	ddi_prop_remove_all_common(dip, (int)DDI_PROP_SYSTEM_DEF);
4201 }
4202 
4203 
4204 /*
4205  * ddi_prop_undefine:	Explicitly undefine a property.  Property
4206  *			searches which match this property return
4207  *			the error code DDI_PROP_UNDEFINED.
4208  *
4209  *			Use ddi_prop_remove to negate effect of
4210  *			ddi_prop_undefine
4211  *
4212  *			See above for error returns.
4213  */
4214 
4215 int
4216 ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name)
4217 {
4218 	if (!(flag & DDI_PROP_CANSLEEP))
4219 		flag |= DDI_PROP_DONTSLEEP;
4220 	flag |= DDI_PROP_STACK_CREATE | DDI_PROP_UNDEF_IT | DDI_PROP_TYPE_ANY;
4221 	return (ddi_prop_update_common(dev, dip, flag,
4222 	    name, NULL, 0, ddi_prop_fm_encode_bytes));
4223 }
4224 
4225 int
4226 e_ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name)
4227 {
4228 	if (!(flag & DDI_PROP_CANSLEEP))
4229 		flag |= DDI_PROP_DONTSLEEP;
4230 	flag |= DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE |
4231 	    DDI_PROP_UNDEF_IT | DDI_PROP_TYPE_ANY;
4232 	return (ddi_prop_update_common(dev, dip, flag,
4233 	    name, NULL, 0, ddi_prop_fm_encode_bytes));
4234 }
4235 
4236 /*
4237  * Support for gathering dynamic properties in devinfo snapshot.
4238  */
4239 void
4240 i_ddi_prop_dyn_driver_set(dev_info_t *dip, i_ddi_prop_dyn_t *dp)
4241 {
4242 	DEVI(dip)->devi_prop_dyn_driver = dp;
4243 }
4244 
4245 i_ddi_prop_dyn_t *
4246 i_ddi_prop_dyn_driver_get(dev_info_t *dip)
4247 {
4248 	return (DEVI(dip)->devi_prop_dyn_driver);
4249 }
4250 
4251 void
4252 i_ddi_prop_dyn_parent_set(dev_info_t *dip, i_ddi_prop_dyn_t *dp)
4253 {
4254 	DEVI(dip)->devi_prop_dyn_parent = dp;
4255 }
4256 
4257 i_ddi_prop_dyn_t *
4258 i_ddi_prop_dyn_parent_get(dev_info_t *dip)
4259 {
4260 	return (DEVI(dip)->devi_prop_dyn_parent);
4261 }
4262 
4263 void
4264 i_ddi_prop_dyn_cache_invalidate(dev_info_t *dip, i_ddi_prop_dyn_t *dp)
4265 {
4266 	/* for now we invalidate the entire cached snapshot */
4267 	if (dip && dp)
4268 		i_ddi_di_cache_invalidate();
4269 }
4270 
4271 /* ARGSUSED */
4272 void
4273 ddi_prop_cache_invalidate(dev_t dev, dev_info_t *dip, char *name, int flags)
4274 {
4275 	/* for now we invalidate the entire cached snapshot */
4276 	i_ddi_di_cache_invalidate();
4277 }
4278 
4279 
4280 /*
4281  * Code to search hardware layer (PROM), if it exists, on behalf of child.
4282  *
4283  * if input dip != child_dip, then call is on behalf of child
4284  * to search PROM, do it via ddi_prop_search_common() and ascend only
4285  * if allowed.
4286  *
4287  * if input dip == ch_dip (child_dip), call is on behalf of root driver,
4288  * to search for PROM defined props only.
4289  *
4290  * Note that the PROM search is done only if the requested dev
4291  * is either DDI_DEV_T_ANY or DDI_DEV_T_NONE. PROM properties
4292  * have no associated dev, thus are automatically associated with
4293  * DDI_DEV_T_NONE.
4294  *
4295  * Modifying flag DDI_PROP_NOTPROM inhibits the search in the h/w layer.
4296  *
4297  * Returns DDI_PROP_FOUND_1275 if found to indicate to framework
4298  * that the property resides in the prom.
4299  */
4300 int
4301 impl_ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip,
4302     ddi_prop_op_t prop_op, int mod_flags,
4303     char *name, caddr_t valuep, int *lengthp)
4304 {
4305 	int	len;
4306 	caddr_t buffer = NULL;
4307 
4308 	/*
4309 	 * If requested dev is DDI_DEV_T_NONE or DDI_DEV_T_ANY, then
4310 	 * look in caller's PROM if it's a self identifying device...
4311 	 *
4312 	 * Note that this is very similar to ddi_prop_op, but we
4313 	 * search the PROM instead of the s/w defined properties,
4314 	 * and we are called on by the parent driver to do this for
4315 	 * the child.
4316 	 */
4317 
4318 	if (((dev == DDI_DEV_T_NONE) || (dev == DDI_DEV_T_ANY)) &&
4319 	    ndi_dev_is_prom_node(ch_dip) &&
4320 	    ((mod_flags & DDI_PROP_NOTPROM) == 0)) {
4321 		len = prom_getproplen((pnode_t)DEVI(ch_dip)->devi_nodeid, name);
4322 		if (len == -1) {
4323 			return (DDI_PROP_NOT_FOUND);
4324 		}
4325 
4326 		/*
4327 		 * If exists only request, we're done
4328 		 */
4329 		if (prop_op == PROP_EXISTS) {
4330 			return (DDI_PROP_FOUND_1275);
4331 		}
4332 
4333 		/*
4334 		 * If length only request or prop length == 0, get out
4335 		 */
4336 		if ((prop_op == PROP_LEN) || (len == 0)) {
4337 			*lengthp = len;
4338 			return (DDI_PROP_FOUND_1275);
4339 		}
4340 
4341 		/*
4342 		 * Allocate buffer if required... (either way `buffer'
4343 		 * is receiving address).
4344 		 */
4345 
4346 		switch (prop_op) {
4347 
4348 		case PROP_LEN_AND_VAL_ALLOC:
4349 
4350 			buffer = kmem_alloc((size_t)len,
4351 			    mod_flags & DDI_PROP_CANSLEEP ?
4352 			    KM_SLEEP : KM_NOSLEEP);
4353 			if (buffer == NULL) {
4354 				return (DDI_PROP_NO_MEMORY);
4355 			}
4356 			*(caddr_t *)valuep = buffer;
4357 			break;
4358 
4359 		case PROP_LEN_AND_VAL_BUF:
4360 
4361 			if (len > (*lengthp)) {
4362 				*lengthp = len;
4363 				return (DDI_PROP_BUF_TOO_SMALL);
4364 			}
4365 
4366 			buffer = valuep;
4367 			break;
4368 
4369 		default:
4370 			break;
4371 		}
4372 
4373 		/*
4374 		 * Call the PROM function to do the copy.
4375 		 */
4376 		(void) prom_getprop((pnode_t)DEVI(ch_dip)->devi_nodeid,
4377 		    name, buffer);
4378 
4379 		*lengthp = len; /* return the actual length to the caller */
4380 		(void) impl_fix_props(dip, ch_dip, name, len, buffer);
4381 		return (DDI_PROP_FOUND_1275);
4382 	}
4383 
4384 	return (DDI_PROP_NOT_FOUND);
4385 }
4386 
4387 /*
4388  * The ddi_bus_prop_op default bus nexus prop op function.
4389  *
4390  * Code to search hardware layer (PROM), if it exists,
4391  * on behalf of child, then, if appropriate, ascend and check
4392  * my own software defined properties...
4393  */
4394 int
4395 ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip,
4396     ddi_prop_op_t prop_op, int mod_flags,
4397     char *name, caddr_t valuep, int *lengthp)
4398 {
4399 	int	error;
4400 
4401 	error = impl_ddi_bus_prop_op(dev, dip, ch_dip, prop_op, mod_flags,
4402 	    name, valuep, lengthp);
4403 
4404 	if (error == DDI_PROP_SUCCESS || error == DDI_PROP_FOUND_1275 ||
4405 	    error == DDI_PROP_BUF_TOO_SMALL)
4406 		return (error);
4407 
4408 	if (error == DDI_PROP_NO_MEMORY) {
4409 		cmn_err(CE_CONT, prop_no_mem_msg, name);
4410 		return (DDI_PROP_NO_MEMORY);
4411 	}
4412 
4413 	/*
4414 	 * Check the 'options' node as a last resort
4415 	 */
4416 	if ((mod_flags & DDI_PROP_DONTPASS) != 0)
4417 		return (DDI_PROP_NOT_FOUND);
4418 
4419 	if (ch_dip == ddi_root_node())	{
4420 		/*
4421 		 * As a last resort, when we've reached
4422 		 * the top and still haven't found the
4423 		 * property, see if the desired property
4424 		 * is attached to the options node.
4425 		 *
4426 		 * The options dip is attached right after boot.
4427 		 */
4428 		ASSERT(options_dip != NULL);
4429 		/*
4430 		 * Force the "don't pass" flag to *just* see
4431 		 * what the options node has to offer.
4432 		 */
4433 		return (ddi_prop_search_common(dev, options_dip, prop_op,
4434 		    mod_flags|DDI_PROP_DONTPASS, name, valuep,
4435 		    (uint_t *)lengthp));
4436 	}
4437 
4438 	/*
4439 	 * Otherwise, continue search with parent's s/w defined properties...
4440 	 * NOTE: Using `dip' in following call increments the level.
4441 	 */
4442 
4443 	return (ddi_prop_search_common(dev, dip, prop_op, mod_flags,
4444 	    name, valuep, (uint_t *)lengthp));
4445 }
4446 
4447 /*
4448  * External property functions used by other parts of the kernel...
4449  */
4450 
4451 /*
4452  * e_ddi_getlongprop: See comments for ddi_get_longprop.
4453  */
4454 
4455 int
4456 e_ddi_getlongprop(dev_t dev, vtype_t type, char *name, int flags,
4457     caddr_t valuep, int *lengthp)
4458 {
4459 	_NOTE(ARGUNUSED(type))
4460 	dev_info_t *devi;
4461 	ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_ALLOC;
4462 	int error;
4463 
4464 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4465 		return (DDI_PROP_NOT_FOUND);
4466 
4467 	error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp);
4468 	ddi_release_devi(devi);
4469 	return (error);
4470 }
4471 
4472 /*
4473  * e_ddi_getlongprop_buf:	See comments for ddi_getlongprop_buf.
4474  */
4475 
4476 int
4477 e_ddi_getlongprop_buf(dev_t dev, vtype_t type, char *name, int flags,
4478     caddr_t valuep, int *lengthp)
4479 {
4480 	_NOTE(ARGUNUSED(type))
4481 	dev_info_t *devi;
4482 	ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF;
4483 	int error;
4484 
4485 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4486 		return (DDI_PROP_NOT_FOUND);
4487 
4488 	error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp);
4489 	ddi_release_devi(devi);
4490 	return (error);
4491 }
4492 
4493 /*
4494  * e_ddi_getprop:	See comments for ddi_getprop.
4495  */
4496 int
4497 e_ddi_getprop(dev_t dev, vtype_t type, char *name, int flags, int defvalue)
4498 {
4499 	_NOTE(ARGUNUSED(type))
4500 	dev_info_t *devi;
4501 	ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF;
4502 	int	propvalue = defvalue;
4503 	int	proplength = sizeof (int);
4504 	int	error;
4505 
4506 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4507 		return (defvalue);
4508 
4509 	error = cdev_prop_op(dev, devi, prop_op,
4510 	    flags, name, (caddr_t)&propvalue, &proplength);
4511 	ddi_release_devi(devi);
4512 
4513 	if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
4514 		propvalue = 1;
4515 
4516 	return (propvalue);
4517 }
4518 
4519 /*
4520  * e_ddi_getprop_int64:
4521  *
4522  * This is a typed interfaces, but predates typed properties. With the
4523  * introduction of typed properties the framework tries to ensure
4524  * consistent use of typed interfaces. This is why TYPE_INT64 is not
4525  * part of TYPE_ANY.  E_ddi_getprop_int64 is a special case where a
4526  * typed interface invokes legacy (non-typed) interfaces:
4527  * cdev_prop_op(), prop_op(9E), ddi_prop_op(9F)).  In this case the
4528  * fact that TYPE_INT64 is not part of TYPE_ANY matters.  To support
4529  * this type of lookup as a single operation we invoke the legacy
4530  * non-typed interfaces with the special CONSUMER_TYPED bit set. The
4531  * framework ddi_prop_op(9F) implementation is expected to check for
4532  * CONSUMER_TYPED and, if set, expand type bits beyond TYPE_ANY
4533  * (currently TYPE_INT64).
4534  */
4535 int64_t
4536 e_ddi_getprop_int64(dev_t dev, vtype_t type, char *name,
4537     int flags, int64_t defvalue)
4538 {
4539 	_NOTE(ARGUNUSED(type))
4540 	dev_info_t	*devi;
4541 	ddi_prop_op_t	prop_op = PROP_LEN_AND_VAL_BUF;
4542 	int64_t		propvalue = defvalue;
4543 	int		proplength = sizeof (propvalue);
4544 	int		error;
4545 
4546 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4547 		return (defvalue);
4548 
4549 	error = cdev_prop_op(dev, devi, prop_op, flags |
4550 	    DDI_PROP_CONSUMER_TYPED, name, (caddr_t)&propvalue, &proplength);
4551 	ddi_release_devi(devi);
4552 
4553 	if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
4554 		propvalue = 1;
4555 
4556 	return (propvalue);
4557 }
4558 
4559 /*
4560  * e_ddi_getproplen:	See comments for ddi_getproplen.
4561  */
4562 int
4563 e_ddi_getproplen(dev_t dev, vtype_t type, char *name, int flags, int *lengthp)
4564 {
4565 	_NOTE(ARGUNUSED(type))
4566 	dev_info_t *devi;
4567 	ddi_prop_op_t prop_op = PROP_LEN;
4568 	int error;
4569 
4570 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4571 		return (DDI_PROP_NOT_FOUND);
4572 
4573 	error = cdev_prop_op(dev, devi, prop_op, flags, name, NULL, lengthp);
4574 	ddi_release_devi(devi);
4575 	return (error);
4576 }
4577 
4578 /*
4579  * Routines to get at elements of the dev_info structure
4580  */
4581 
4582 /*
4583  * ddi_binding_name: Return the driver binding name of the devinfo node
4584  *		This is the name the OS used to bind the node to a driver.
4585  */
4586 char *
4587 ddi_binding_name(dev_info_t *dip)
4588 {
4589 	return (DEVI(dip)->devi_binding_name);
4590 }
4591 
4592 /*
4593  * ddi_driver_major: Return the major number of the driver that
4594  *	the supplied devinfo is bound to.  If not yet bound,
4595  *	DDI_MAJOR_T_NONE.
4596  *
4597  * When used by the driver bound to 'devi', this
4598  * function will reliably return the driver major number.
4599  * Other ways of determining the driver major number, such as
4600  *	major = ddi_name_to_major(ddi_get_name(devi));
4601  *	major = ddi_name_to_major(ddi_binding_name(devi));
4602  * can return a different result as the driver/alias binding
4603  * can change dynamically, and thus should be avoided.
4604  */
4605 major_t
4606 ddi_driver_major(dev_info_t *devi)
4607 {
4608 	return (DEVI(devi)->devi_major);
4609 }
4610 
4611 /*
4612  * ddi_driver_name: Return the normalized driver name. this is the
4613  *		actual driver name
4614  */
4615 const char *
4616 ddi_driver_name(dev_info_t *devi)
4617 {
4618 	major_t major;
4619 
4620 	if ((major = ddi_driver_major(devi)) != DDI_MAJOR_T_NONE)
4621 		return (ddi_major_to_name(major));
4622 
4623 	return (ddi_node_name(devi));
4624 }
4625 
4626 /*
4627  * i_ddi_set_binding_name:	Set binding name.
4628  *
4629  *	Set the binding name to the given name.
4630  *	This routine is for use by the ddi implementation, not by drivers.
4631  */
4632 void
4633 i_ddi_set_binding_name(dev_info_t *dip, char *name)
4634 {
4635 	DEVI(dip)->devi_binding_name = name;
4636 
4637 }
4638 
4639 /*
4640  * ddi_get_name: A synonym of ddi_binding_name() ... returns a name
4641  * the implementation has used to bind the node to a driver.
4642  */
4643 char *
4644 ddi_get_name(dev_info_t *dip)
4645 {
4646 	return (DEVI(dip)->devi_binding_name);
4647 }
4648 
4649 /*
4650  * ddi_node_name: Return the name property of the devinfo node
4651  *		This may differ from ddi_binding_name if the node name
4652  *		does not define a binding to a driver (i.e. generic names).
4653  */
4654 char *
4655 ddi_node_name(dev_info_t *dip)
4656 {
4657 	return (DEVI(dip)->devi_node_name);
4658 }
4659 
4660 
4661 /*
4662  * ddi_get_nodeid:	Get nodeid stored in dev_info structure.
4663  */
4664 int
4665 ddi_get_nodeid(dev_info_t *dip)
4666 {
4667 	return (DEVI(dip)->devi_nodeid);
4668 }
4669 
4670 int
4671 ddi_get_instance(dev_info_t *dip)
4672 {
4673 	return (DEVI(dip)->devi_instance);
4674 }
4675 
4676 struct dev_ops *
4677 ddi_get_driver(dev_info_t *dip)
4678 {
4679 	return (DEVI(dip)->devi_ops);
4680 }
4681 
4682 void
4683 ddi_set_driver(dev_info_t *dip, struct dev_ops *devo)
4684 {
4685 	DEVI(dip)->devi_ops = devo;
4686 }
4687 
4688 /*
4689  * ddi_set_driver_private/ddi_get_driver_private:
4690  * Get/set device driver private data in devinfo.
4691  */
4692 void
4693 ddi_set_driver_private(dev_info_t *dip, void *data)
4694 {
4695 	DEVI(dip)->devi_driver_data = data;
4696 }
4697 
4698 void *
4699 ddi_get_driver_private(dev_info_t *dip)
4700 {
4701 	return (DEVI(dip)->devi_driver_data);
4702 }
4703 
4704 /*
4705  * ddi_get_parent, ddi_get_child, ddi_get_next_sibling
4706  */
4707 
4708 dev_info_t *
4709 ddi_get_parent(dev_info_t *dip)
4710 {
4711 	return ((dev_info_t *)DEVI(dip)->devi_parent);
4712 }
4713 
4714 dev_info_t *
4715 ddi_get_child(dev_info_t *dip)
4716 {
4717 	return ((dev_info_t *)DEVI(dip)->devi_child);
4718 }
4719 
4720 dev_info_t *
4721 ddi_get_next_sibling(dev_info_t *dip)
4722 {
4723 	return ((dev_info_t *)DEVI(dip)->devi_sibling);
4724 }
4725 
4726 dev_info_t *
4727 ddi_get_next(dev_info_t *dip)
4728 {
4729 	return ((dev_info_t *)DEVI(dip)->devi_next);
4730 }
4731 
4732 void
4733 ddi_set_next(dev_info_t *dip, dev_info_t *nextdip)
4734 {
4735 	DEVI(dip)->devi_next = DEVI(nextdip);
4736 }
4737 
4738 /*
4739  * ddi_root_node:		Return root node of devinfo tree
4740  */
4741 
4742 dev_info_t *
4743 ddi_root_node(void)
4744 {
4745 	extern dev_info_t *top_devinfo;
4746 
4747 	return (top_devinfo);
4748 }
4749 
4750 /*
4751  * Miscellaneous functions:
4752  */
4753 
4754 /*
4755  * Implementation specific hooks
4756  */
4757 
4758 void
4759 ddi_report_dev(dev_info_t *d)
4760 {
4761 	char *b;
4762 
4763 	(void) ddi_ctlops(d, d, DDI_CTLOPS_REPORTDEV, (void *)0, (void *)0);
4764 
4765 	/*
4766 	 * If this devinfo node has cb_ops, it's implicitly accessible from
4767 	 * userland, so we print its full name together with the instance
4768 	 * number 'abbreviation' that the driver may use internally.
4769 	 */
4770 	if (DEVI(d)->devi_ops->devo_cb_ops != (struct cb_ops *)0 &&
4771 	    (b = kmem_zalloc(MAXPATHLEN, KM_NOSLEEP))) {
4772 		cmn_err(CE_CONT, "?%s%d is %s\n",
4773 		    ddi_driver_name(d), ddi_get_instance(d),
4774 		    ddi_pathname(d, b));
4775 		kmem_free(b, MAXPATHLEN);
4776 	}
4777 }
4778 
4779 /*
4780  * ddi_ctlops() is described in the assembler not to buy a new register
4781  * window when it's called and can reduce cost in climbing the device tree
4782  * without using the tail call optimization.
4783  */
4784 int
4785 ddi_dev_regsize(dev_info_t *dev, uint_t rnumber, off_t *result)
4786 {
4787 	int ret;
4788 
4789 	ret = ddi_ctlops(dev, dev, DDI_CTLOPS_REGSIZE,
4790 	    (void *)&rnumber, (void *)result);
4791 
4792 	return (ret == DDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE);
4793 }
4794 
4795 int
4796 ddi_dev_nregs(dev_info_t *dev, int *result)
4797 {
4798 	return (ddi_ctlops(dev, dev, DDI_CTLOPS_NREGS, 0, (void *)result));
4799 }
4800 
4801 int
4802 ddi_dev_is_sid(dev_info_t *d)
4803 {
4804 	return (ddi_ctlops(d, d, DDI_CTLOPS_SIDDEV, (void *)0, (void *)0));
4805 }
4806 
4807 int
4808 ddi_slaveonly(dev_info_t *d)
4809 {
4810 	return (ddi_ctlops(d, d, DDI_CTLOPS_SLAVEONLY, (void *)0, (void *)0));
4811 }
4812 
4813 int
4814 ddi_dev_affinity(dev_info_t *a, dev_info_t *b)
4815 {
4816 	return (ddi_ctlops(a, a, DDI_CTLOPS_AFFINITY, (void *)b, (void *)0));
4817 }
4818 
4819 int
4820 ddi_streams_driver(dev_info_t *dip)
4821 {
4822 	if (i_ddi_devi_attached(dip) &&
4823 	    (DEVI(dip)->devi_ops->devo_cb_ops != NULL) &&
4824 	    (DEVI(dip)->devi_ops->devo_cb_ops->cb_str != NULL))
4825 		return (DDI_SUCCESS);
4826 	return (DDI_FAILURE);
4827 }
4828 
4829 /*
4830  * callback free list
4831  */
4832 
4833 static int ncallbacks;
4834 static int nc_low = 170;
4835 static int nc_med = 512;
4836 static int nc_high = 2048;
4837 static struct ddi_callback *callbackq;
4838 static struct ddi_callback *callbackqfree;
4839 
4840 /*
4841  * set/run callback lists
4842  */
4843 struct	cbstats	{
4844 	kstat_named_t	cb_asked;
4845 	kstat_named_t	cb_new;
4846 	kstat_named_t	cb_run;
4847 	kstat_named_t	cb_delete;
4848 	kstat_named_t	cb_maxreq;
4849 	kstat_named_t	cb_maxlist;
4850 	kstat_named_t	cb_alloc;
4851 	kstat_named_t	cb_runouts;
4852 	kstat_named_t	cb_L2;
4853 	kstat_named_t	cb_grow;
4854 } cbstats = {
4855 	{"asked",	KSTAT_DATA_UINT32},
4856 	{"new",		KSTAT_DATA_UINT32},
4857 	{"run",		KSTAT_DATA_UINT32},
4858 	{"delete",	KSTAT_DATA_UINT32},
4859 	{"maxreq",	KSTAT_DATA_UINT32},
4860 	{"maxlist",	KSTAT_DATA_UINT32},
4861 	{"alloc",	KSTAT_DATA_UINT32},
4862 	{"runouts",	KSTAT_DATA_UINT32},
4863 	{"L2",		KSTAT_DATA_UINT32},
4864 	{"grow",	KSTAT_DATA_UINT32},
4865 };
4866 
4867 #define	nc_asked	cb_asked.value.ui32
4868 #define	nc_new		cb_new.value.ui32
4869 #define	nc_run		cb_run.value.ui32
4870 #define	nc_delete	cb_delete.value.ui32
4871 #define	nc_maxreq	cb_maxreq.value.ui32
4872 #define	nc_maxlist	cb_maxlist.value.ui32
4873 #define	nc_alloc	cb_alloc.value.ui32
4874 #define	nc_runouts	cb_runouts.value.ui32
4875 #define	nc_L2		cb_L2.value.ui32
4876 #define	nc_grow		cb_grow.value.ui32
4877 
4878 static kmutex_t ddi_callback_mutex;
4879 
4880 /*
4881  * callbacks are handled using a L1/L2 cache. The L1 cache
4882  * comes out of kmem_cache_alloc and can expand/shrink dynamically. If
4883  * we can't get callbacks from the L1 cache [because pageout is doing
4884  * I/O at the time freemem is 0], we allocate callbacks out of the
4885  * L2 cache. The L2 cache is static and depends on the memory size.
4886  * [We might also count the number of devices at probe time and
4887  * allocate one structure per device and adjust for deferred attach]
4888  */
4889 void
4890 impl_ddi_callback_init(void)
4891 {
4892 	int	i;
4893 	uint_t	physmegs;
4894 	kstat_t	*ksp;
4895 
4896 	physmegs = physmem >> (20 - PAGESHIFT);
4897 	if (physmegs < 48) {
4898 		ncallbacks = nc_low;
4899 	} else if (physmegs < 128) {
4900 		ncallbacks = nc_med;
4901 	} else {
4902 		ncallbacks = nc_high;
4903 	}
4904 
4905 	/*
4906 	 * init free list
4907 	 */
4908 	callbackq = kmem_zalloc(
4909 	    ncallbacks * sizeof (struct ddi_callback), KM_SLEEP);
4910 	for (i = 0; i < ncallbacks-1; i++)
4911 		callbackq[i].c_nfree = &callbackq[i+1];
4912 	callbackqfree = callbackq;
4913 
4914 	/* init kstats */
4915 	if (ksp = kstat_create("unix", 0, "cbstats", "misc", KSTAT_TYPE_NAMED,
4916 	    sizeof (cbstats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL)) {
4917 		ksp->ks_data = (void *) &cbstats;
4918 		kstat_install(ksp);
4919 	}
4920 
4921 }
4922 
4923 static void
4924 callback_insert(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid,
4925 	int count)
4926 {
4927 	struct ddi_callback *list, *marker, *new;
4928 	size_t size = sizeof (struct ddi_callback);
4929 
4930 	list = marker = (struct ddi_callback *)*listid;
4931 	while (list != NULL) {
4932 		if (list->c_call == funcp && list->c_arg == arg) {
4933 			list->c_count += count;
4934 			return;
4935 		}
4936 		marker = list;
4937 		list = list->c_nlist;
4938 	}
4939 	new = kmem_alloc(size, KM_NOSLEEP);
4940 	if (new == NULL) {
4941 		new = callbackqfree;
4942 		if (new == NULL) {
4943 			new = kmem_alloc_tryhard(sizeof (struct ddi_callback),
4944 			    &size, KM_NOSLEEP | KM_PANIC);
4945 			cbstats.nc_grow++;
4946 		} else {
4947 			callbackqfree = new->c_nfree;
4948 			cbstats.nc_L2++;
4949 		}
4950 	}
4951 	if (marker != NULL) {
4952 		marker->c_nlist = new;
4953 	} else {
4954 		*listid = (uintptr_t)new;
4955 	}
4956 	new->c_size = size;
4957 	new->c_nlist = NULL;
4958 	new->c_call = funcp;
4959 	new->c_arg = arg;
4960 	new->c_count = count;
4961 	cbstats.nc_new++;
4962 	cbstats.nc_alloc++;
4963 	if (cbstats.nc_alloc > cbstats.nc_maxlist)
4964 		cbstats.nc_maxlist = cbstats.nc_alloc;
4965 }
4966 
4967 void
4968 ddi_set_callback(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid)
4969 {
4970 	mutex_enter(&ddi_callback_mutex);
4971 	cbstats.nc_asked++;
4972 	if ((cbstats.nc_asked - cbstats.nc_run) > cbstats.nc_maxreq)
4973 		cbstats.nc_maxreq = (cbstats.nc_asked - cbstats.nc_run);
4974 	(void) callback_insert(funcp, arg, listid, 1);
4975 	mutex_exit(&ddi_callback_mutex);
4976 }
4977 
4978 static void
4979 real_callback_run(void *Queue)
4980 {
4981 	int (*funcp)(caddr_t);
4982 	caddr_t arg;
4983 	int count, rval;
4984 	uintptr_t *listid;
4985 	struct ddi_callback *list, *marker;
4986 	int check_pending = 1;
4987 	int pending = 0;
4988 
4989 	do {
4990 		mutex_enter(&ddi_callback_mutex);
4991 		listid = Queue;
4992 		list = (struct ddi_callback *)*listid;
4993 		if (list == NULL) {
4994 			mutex_exit(&ddi_callback_mutex);
4995 			return;
4996 		}
4997 		if (check_pending) {
4998 			marker = list;
4999 			while (marker != NULL) {
5000 				pending += marker->c_count;
5001 				marker = marker->c_nlist;
5002 			}
5003 			check_pending = 0;
5004 		}
5005 		ASSERT(pending > 0);
5006 		ASSERT(list->c_count > 0);
5007 		funcp = list->c_call;
5008 		arg = list->c_arg;
5009 		count = list->c_count;
5010 		*(uintptr_t *)Queue = (uintptr_t)list->c_nlist;
5011 		if (list >= &callbackq[0] &&
5012 		    list <= &callbackq[ncallbacks-1]) {
5013 			list->c_nfree = callbackqfree;
5014 			callbackqfree = list;
5015 		} else
5016 			kmem_free(list, list->c_size);
5017 
5018 		cbstats.nc_delete++;
5019 		cbstats.nc_alloc--;
5020 		mutex_exit(&ddi_callback_mutex);
5021 
5022 		do {
5023 			if ((rval = (*funcp)(arg)) == 0) {
5024 				pending -= count;
5025 				mutex_enter(&ddi_callback_mutex);
5026 				(void) callback_insert(funcp, arg, listid,
5027 				    count);
5028 				cbstats.nc_runouts++;
5029 			} else {
5030 				pending--;
5031 				mutex_enter(&ddi_callback_mutex);
5032 				cbstats.nc_run++;
5033 			}
5034 			mutex_exit(&ddi_callback_mutex);
5035 		} while (rval != 0 && (--count > 0));
5036 	} while (pending > 0);
5037 }
5038 
5039 void
5040 ddi_run_callback(uintptr_t *listid)
5041 {
5042 	softcall(real_callback_run, listid);
5043 }
5044 
5045 /*
5046  * ddi_periodic_t
5047  * ddi_periodic_add(void (*func)(void *), void *arg, hrtime_t interval,
5048  *     int level)
5049  *
5050  * INTERFACE LEVEL
5051  *      Solaris DDI specific (Solaris DDI)
5052  *
5053  * PARAMETERS
5054  *      func: the callback function
5055  *
5056  *            The callback function will be invoked. The function is invoked
5057  *            in kernel context if the argument level passed is the zero.
5058  *            Otherwise it's invoked in interrupt context at the specified
5059  *            level.
5060  *
5061  *       arg: the argument passed to the callback function
5062  *
5063  *  interval: interval time
5064  *
5065  *    level : callback interrupt level
5066  *
5067  *            If the value is the zero, the callback function is invoked
5068  *            in kernel context. If the value is more than the zero, but
5069  *            less than or equal to ten, the callback function is invoked in
5070  *            interrupt context at the specified interrupt level, which may
5071  *            be used for real time applications.
5072  *
5073  *            This value must be in range of 0-10, which can be a numeric
5074  *            number or a pre-defined macro (DDI_IPL_0, ... , DDI_IPL_10).
5075  *
5076  * DESCRIPTION
5077  *      ddi_periodic_add(9F) schedules the specified function to be
5078  *      periodically invoked in the interval time.
5079  *
5080  *      As well as timeout(9F), the exact time interval over which the function
5081  *      takes effect cannot be guaranteed, but the value given is a close
5082  *      approximation.
5083  *
5084  *      Drivers waiting on behalf of processes with real-time constraints must
5085  *      pass non-zero value with the level argument to ddi_periodic_add(9F).
5086  *
5087  * RETURN VALUES
5088  *      ddi_periodic_add(9F) returns a non-zero opaque value (ddi_periodic_t),
5089  *      which must be used for ddi_periodic_delete(9F) to specify the request.
5090  *
5091  * CONTEXT
5092  *      ddi_periodic_add(9F) can be called in user or kernel context, but
5093  *      it cannot be called in interrupt context, which is different from
5094  *      timeout(9F).
5095  */
5096 ddi_periodic_t
5097 ddi_periodic_add(void (*func)(void *), void *arg, hrtime_t interval, int level)
5098 {
5099 	/*
5100 	 * Sanity check of the argument level.
5101 	 */
5102 	if (level < DDI_IPL_0 || level > DDI_IPL_10)
5103 		cmn_err(CE_PANIC,
5104 		    "ddi_periodic_add: invalid interrupt level (%d).", level);
5105 
5106 	/*
5107 	 * Sanity check of the context. ddi_periodic_add() cannot be
5108 	 * called in either interrupt context or high interrupt context.
5109 	 */
5110 	if (servicing_interrupt())
5111 		cmn_err(CE_PANIC,
5112 		    "ddi_periodic_add: called in (high) interrupt context.");
5113 
5114 	return ((ddi_periodic_t)i_timeout(func, arg, interval, level));
5115 }
5116 
5117 /*
5118  * void
5119  * ddi_periodic_delete(ddi_periodic_t req)
5120  *
5121  * INTERFACE LEVEL
5122  *     Solaris DDI specific (Solaris DDI)
5123  *
5124  * PARAMETERS
5125  *     req: ddi_periodic_t opaque value ddi_periodic_add(9F) returned
5126  *     previously.
5127  *
5128  * DESCRIPTION
5129  *     ddi_periodic_delete(9F) cancels the ddi_periodic_add(9F) request
5130  *     previously requested.
5131  *
5132  *     ddi_periodic_delete(9F) will not return until the pending request
5133  *     is canceled or executed.
5134  *
5135  *     As well as untimeout(9F), calling ddi_periodic_delete(9F) for a
5136  *     timeout which is either running on another CPU, or has already
5137  *     completed causes no problems. However, unlike untimeout(9F), there is
5138  *     no restrictions on the lock which might be held across the call to
5139  *     ddi_periodic_delete(9F).
5140  *
5141  *     Drivers should be structured with the understanding that the arrival of
5142  *     both an interrupt and a timeout for that interrupt can occasionally
5143  *     occur, in either order.
5144  *
5145  * CONTEXT
5146  *     ddi_periodic_delete(9F) can be called in user or kernel context, but
5147  *     it cannot be called in interrupt context, which is different from
5148  *     untimeout(9F).
5149  */
5150 void
5151 ddi_periodic_delete(ddi_periodic_t req)
5152 {
5153 	/*
5154 	 * Sanity check of the context. ddi_periodic_delete() cannot be
5155 	 * called in either interrupt context or high interrupt context.
5156 	 */
5157 	if (servicing_interrupt())
5158 		cmn_err(CE_PANIC,
5159 		    "ddi_periodic_delete: called in (high) interrupt context.");
5160 
5161 	i_untimeout((timeout_t)req);
5162 }
5163 
5164 dev_info_t *
5165 nodevinfo(dev_t dev, int otyp)
5166 {
5167 	_NOTE(ARGUNUSED(dev, otyp))
5168 	return ((dev_info_t *)0);
5169 }
5170 
5171 /*
5172  * A driver should support its own getinfo(9E) entry point. This function
5173  * is provided as a convenience for ON drivers that don't expect their
5174  * getinfo(9E) entry point to be called. A driver that uses this must not
5175  * call ddi_create_minor_node.
5176  */
5177 int
5178 ddi_no_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
5179 {
5180 	_NOTE(ARGUNUSED(dip, infocmd, arg, result))
5181 	return (DDI_FAILURE);
5182 }
5183 
5184 /*
5185  * A driver should support its own getinfo(9E) entry point. This function
5186  * is provided as a convenience for ON drivers that where the minor number
5187  * is the instance. Drivers that do not have 1:1 mapping must implement
5188  * their own getinfo(9E) function.
5189  */
5190 int
5191 ddi_getinfo_1to1(dev_info_t *dip, ddi_info_cmd_t infocmd,
5192     void *arg, void **result)
5193 {
5194 	_NOTE(ARGUNUSED(dip))
5195 	int	instance;
5196 
5197 	if (infocmd != DDI_INFO_DEVT2INSTANCE)
5198 		return (DDI_FAILURE);
5199 
5200 	instance = getminor((dev_t)(uintptr_t)arg);
5201 	*result = (void *)(uintptr_t)instance;
5202 	return (DDI_SUCCESS);
5203 }
5204 
5205 int
5206 ddifail(dev_info_t *devi, ddi_attach_cmd_t cmd)
5207 {
5208 	_NOTE(ARGUNUSED(devi, cmd))
5209 	return (DDI_FAILURE);
5210 }
5211 
5212 int
5213 ddi_no_dma_map(dev_info_t *dip, dev_info_t *rdip,
5214     struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep)
5215 {
5216 	_NOTE(ARGUNUSED(dip, rdip, dmareqp, handlep))
5217 	return (DDI_DMA_NOMAPPING);
5218 }
5219 
5220 int
5221 ddi_no_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
5222     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
5223 {
5224 	_NOTE(ARGUNUSED(dip, rdip, attr, waitfp, arg, handlep))
5225 	return (DDI_DMA_BADATTR);
5226 }
5227 
5228 int
5229 ddi_no_dma_freehdl(dev_info_t *dip, dev_info_t *rdip,
5230     ddi_dma_handle_t handle)
5231 {
5232 	_NOTE(ARGUNUSED(dip, rdip, handle))
5233 	return (DDI_FAILURE);
5234 }
5235 
5236 int
5237 ddi_no_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
5238     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
5239     ddi_dma_cookie_t *cp, uint_t *ccountp)
5240 {
5241 	_NOTE(ARGUNUSED(dip, rdip, handle, dmareq, cp, ccountp))
5242 	return (DDI_DMA_NOMAPPING);
5243 }
5244 
5245 int
5246 ddi_no_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
5247     ddi_dma_handle_t handle)
5248 {
5249 	_NOTE(ARGUNUSED(dip, rdip, handle))
5250 	return (DDI_FAILURE);
5251 }
5252 
5253 int
5254 ddi_no_dma_flush(dev_info_t *dip, dev_info_t *rdip,
5255     ddi_dma_handle_t handle, off_t off, size_t len,
5256     uint_t cache_flags)
5257 {
5258 	_NOTE(ARGUNUSED(dip, rdip, handle, off, len, cache_flags))
5259 	return (DDI_FAILURE);
5260 }
5261 
5262 int
5263 ddi_no_dma_win(dev_info_t *dip, dev_info_t *rdip,
5264     ddi_dma_handle_t handle, uint_t win, off_t *offp,
5265     size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
5266 {
5267 	_NOTE(ARGUNUSED(dip, rdip, handle, win, offp, lenp, cookiep, ccountp))
5268 	return (DDI_FAILURE);
5269 }
5270 
5271 int
5272 ddi_no_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
5273     ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
5274     off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
5275 {
5276 	_NOTE(ARGUNUSED(dip, rdip, handle, request, offp, lenp, objp, flags))
5277 	return (DDI_FAILURE);
5278 }
5279 
5280 void
5281 ddivoid(void)
5282 {}
5283 
5284 int
5285 nochpoll(dev_t dev, short events, int anyyet, short *reventsp,
5286     struct pollhead **pollhdrp)
5287 {
5288 	_NOTE(ARGUNUSED(dev, events, anyyet, reventsp, pollhdrp))
5289 	return (ENXIO);
5290 }
5291 
5292 cred_t *
5293 ddi_get_cred(void)
5294 {
5295 	return (CRED());
5296 }
5297 
5298 clock_t
5299 ddi_get_lbolt(void)
5300 {
5301 	return ((clock_t)lbolt_hybrid());
5302 }
5303 
5304 int64_t
5305 ddi_get_lbolt64(void)
5306 {
5307 	return (lbolt_hybrid());
5308 }
5309 
5310 time_t
5311 ddi_get_time(void)
5312 {
5313 	time_t	now;
5314 
5315 	if ((now = gethrestime_sec()) == 0) {
5316 		timestruc_t ts;
5317 		mutex_enter(&tod_lock);
5318 		ts = tod_get();
5319 		mutex_exit(&tod_lock);
5320 		return (ts.tv_sec);
5321 	} else {
5322 		return (now);
5323 	}
5324 }
5325 
5326 pid_t
5327 ddi_get_pid(void)
5328 {
5329 	return (ttoproc(curthread)->p_pid);
5330 }
5331 
5332 kt_did_t
5333 ddi_get_kt_did(void)
5334 {
5335 	return (curthread->t_did);
5336 }
5337 
5338 /*
5339  * This function returns B_TRUE if the caller can reasonably expect that a call
5340  * to cv_wait_sig(9F), cv_timedwait_sig(9F), or qwait_sig(9F) could be awakened
5341  * by user-level signal.  If it returns B_FALSE, then the caller should use
5342  * other means to make certain that the wait will not hang "forever."
5343  *
5344  * It does not check the signal mask, nor for reception of any particular
5345  * signal.
5346  *
5347  * Currently, a thread can receive a signal if it's not a kernel thread and it
5348  * is not in the middle of exit(2) tear-down.  Threads that are in that
5349  * tear-down effectively convert cv_wait_sig to cv_wait, cv_timedwait_sig to
5350  * cv_timedwait, and qwait_sig to qwait.
5351  */
5352 boolean_t
5353 ddi_can_receive_sig(void)
5354 {
5355 	proc_t *pp;
5356 
5357 	if (curthread->t_proc_flag & TP_LWPEXIT)
5358 		return (B_FALSE);
5359 	if ((pp = ttoproc(curthread)) == NULL)
5360 		return (B_FALSE);
5361 	return (pp->p_as != &kas);
5362 }
5363 
5364 /*
5365  * Swap bytes in 16-bit [half-]words
5366  */
5367 void
5368 swab(void *src, void *dst, size_t nbytes)
5369 {
5370 	uchar_t *pf = (uchar_t *)src;
5371 	uchar_t *pt = (uchar_t *)dst;
5372 	uchar_t tmp;
5373 	int nshorts;
5374 
5375 	nshorts = nbytes >> 1;
5376 
5377 	while (--nshorts >= 0) {
5378 		tmp = *pf++;
5379 		*pt++ = *pf++;
5380 		*pt++ = tmp;
5381 	}
5382 }
5383 
5384 static void
5385 ddi_append_minor_node(dev_info_t *ddip, struct ddi_minor_data *dmdp)
5386 {
5387 	int			circ;
5388 	struct ddi_minor_data	*dp;
5389 
5390 	ndi_devi_enter(ddip, &circ);
5391 	if ((dp = DEVI(ddip)->devi_minor) == (struct ddi_minor_data *)NULL) {
5392 		DEVI(ddip)->devi_minor = dmdp;
5393 	} else {
5394 		while (dp->next != (struct ddi_minor_data *)NULL)
5395 			dp = dp->next;
5396 		dp->next = dmdp;
5397 	}
5398 	ndi_devi_exit(ddip, circ);
5399 }
5400 
5401 /*
5402  * Part of the obsolete SunCluster DDI Hooks.
5403  * Keep for binary compatibility
5404  */
5405 minor_t
5406 ddi_getiminor(dev_t dev)
5407 {
5408 	return (getminor(dev));
5409 }
5410 
5411 static int
5412 i_log_devfs_minor_create(dev_info_t *dip, char *minor_name)
5413 {
5414 	int se_flag;
5415 	int kmem_flag;
5416 	int se_err;
5417 	char *pathname, *class_name;
5418 	sysevent_t *ev = NULL;
5419 	sysevent_id_t eid;
5420 	sysevent_value_t se_val;
5421 	sysevent_attr_list_t *ev_attr_list = NULL;
5422 
5423 	/* determine interrupt context */
5424 	se_flag = (servicing_interrupt()) ? SE_NOSLEEP : SE_SLEEP;
5425 	kmem_flag = (se_flag == SE_SLEEP) ? KM_SLEEP : KM_NOSLEEP;
5426 
5427 	i_ddi_di_cache_invalidate();
5428 
5429 #ifdef DEBUG
5430 	if ((se_flag == SE_NOSLEEP) && sunddi_debug) {
5431 		cmn_err(CE_CONT, "ddi_create_minor_node: called from "
5432 		    "interrupt level by driver %s",
5433 		    ddi_driver_name(dip));
5434 	}
5435 #endif /* DEBUG */
5436 
5437 	ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_CREATE, EP_DDI, se_flag);
5438 	if (ev == NULL) {
5439 		goto fail;
5440 	}
5441 
5442 	pathname = kmem_alloc(MAXPATHLEN, kmem_flag);
5443 	if (pathname == NULL) {
5444 		sysevent_free(ev);
5445 		goto fail;
5446 	}
5447 
5448 	(void) ddi_pathname(dip, pathname);
5449 	ASSERT(strlen(pathname));
5450 	se_val.value_type = SE_DATA_TYPE_STRING;
5451 	se_val.value.sv_string = pathname;
5452 	if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME,
5453 	    &se_val, se_flag) != 0) {
5454 		kmem_free(pathname, MAXPATHLEN);
5455 		sysevent_free(ev);
5456 		goto fail;
5457 	}
5458 	kmem_free(pathname, MAXPATHLEN);
5459 
5460 	/* add the device class attribute */
5461 	if ((class_name = i_ddi_devi_class(dip)) != NULL) {
5462 		se_val.value_type = SE_DATA_TYPE_STRING;
5463 		se_val.value.sv_string = class_name;
5464 		if (sysevent_add_attr(&ev_attr_list,
5465 		    DEVFS_DEVI_CLASS, &se_val, SE_SLEEP) != 0) {
5466 			sysevent_free_attr(ev_attr_list);
5467 			goto fail;
5468 		}
5469 	}
5470 
5471 	/*
5472 	 * allow for NULL minor names
5473 	 */
5474 	if (minor_name != NULL) {
5475 		se_val.value.sv_string = minor_name;
5476 		if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME,
5477 		    &se_val, se_flag) != 0) {
5478 			sysevent_free_attr(ev_attr_list);
5479 			sysevent_free(ev);
5480 			goto fail;
5481 		}
5482 	}
5483 
5484 	if (sysevent_attach_attributes(ev, ev_attr_list) != 0) {
5485 		sysevent_free_attr(ev_attr_list);
5486 		sysevent_free(ev);
5487 		goto fail;
5488 	}
5489 
5490 	if ((se_err = log_sysevent(ev, se_flag, &eid)) != 0) {
5491 		if (se_err == SE_NO_TRANSPORT) {
5492 			cmn_err(CE_WARN, "/devices or /dev may not be current "
5493 			    "for driver %s (%s). Run devfsadm -i %s",
5494 			    ddi_driver_name(dip), "syseventd not responding",
5495 			    ddi_driver_name(dip));
5496 		} else {
5497 			sysevent_free(ev);
5498 			goto fail;
5499 		}
5500 	}
5501 
5502 	sysevent_free(ev);
5503 	return (DDI_SUCCESS);
5504 fail:
5505 	cmn_err(CE_WARN, "/devices or /dev may not be current "
5506 	    "for driver %s. Run devfsadm -i %s",
5507 	    ddi_driver_name(dip), ddi_driver_name(dip));
5508 	return (DDI_SUCCESS);
5509 }
5510 
5511 /*
5512  * failing to remove a minor node is not of interest
5513  * therefore we do not generate an error message
5514  */
5515 static int
5516 i_log_devfs_minor_remove(dev_info_t *dip, char *minor_name)
5517 {
5518 	char *pathname, *class_name;
5519 	sysevent_t *ev;
5520 	sysevent_id_t eid;
5521 	sysevent_value_t se_val;
5522 	sysevent_attr_list_t *ev_attr_list = NULL;
5523 
5524 	/*
5525 	 * only log ddi_remove_minor_node() calls outside the scope
5526 	 * of attach/detach reconfigurations and when the dip is
5527 	 * still initialized.
5528 	 */
5529 	if (DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip) ||
5530 	    (i_ddi_node_state(dip) < DS_INITIALIZED)) {
5531 		return (DDI_SUCCESS);
5532 	}
5533 
5534 	i_ddi_di_cache_invalidate();
5535 
5536 	ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_REMOVE, EP_DDI, SE_SLEEP);
5537 	if (ev == NULL) {
5538 		return (DDI_SUCCESS);
5539 	}
5540 
5541 	pathname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
5542 	if (pathname == NULL) {
5543 		sysevent_free(ev);
5544 		return (DDI_SUCCESS);
5545 	}
5546 
5547 	(void) ddi_pathname(dip, pathname);
5548 	ASSERT(strlen(pathname));
5549 	se_val.value_type = SE_DATA_TYPE_STRING;
5550 	se_val.value.sv_string = pathname;
5551 	if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME,
5552 	    &se_val, SE_SLEEP) != 0) {
5553 		kmem_free(pathname, MAXPATHLEN);
5554 		sysevent_free(ev);
5555 		return (DDI_SUCCESS);
5556 	}
5557 
5558 	kmem_free(pathname, MAXPATHLEN);
5559 
5560 	/*
5561 	 * allow for NULL minor names
5562 	 */
5563 	if (minor_name != NULL) {
5564 		se_val.value.sv_string = minor_name;
5565 		if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME,
5566 		    &se_val, SE_SLEEP) != 0) {
5567 			sysevent_free_attr(ev_attr_list);
5568 			goto fail;
5569 		}
5570 	}
5571 
5572 	if ((class_name = i_ddi_devi_class(dip)) != NULL) {
5573 		/* add the device class, driver name and instance attributes */
5574 
5575 		se_val.value_type = SE_DATA_TYPE_STRING;
5576 		se_val.value.sv_string = class_name;
5577 		if (sysevent_add_attr(&ev_attr_list,
5578 		    DEVFS_DEVI_CLASS, &se_val, SE_SLEEP) != 0) {
5579 			sysevent_free_attr(ev_attr_list);
5580 			goto fail;
5581 		}
5582 
5583 		se_val.value_type = SE_DATA_TYPE_STRING;
5584 		se_val.value.sv_string = (char *)ddi_driver_name(dip);
5585 		if (sysevent_add_attr(&ev_attr_list,
5586 		    DEVFS_DRIVER_NAME, &se_val, SE_SLEEP) != 0) {
5587 			sysevent_free_attr(ev_attr_list);
5588 			goto fail;
5589 		}
5590 
5591 		se_val.value_type = SE_DATA_TYPE_INT32;
5592 		se_val.value.sv_int32 = ddi_get_instance(dip);
5593 		if (sysevent_add_attr(&ev_attr_list,
5594 		    DEVFS_INSTANCE, &se_val, SE_SLEEP) != 0) {
5595 			sysevent_free_attr(ev_attr_list);
5596 			goto fail;
5597 		}
5598 
5599 	}
5600 
5601 	if (sysevent_attach_attributes(ev, ev_attr_list) != 0) {
5602 		sysevent_free_attr(ev_attr_list);
5603 	} else {
5604 		(void) log_sysevent(ev, SE_SLEEP, &eid);
5605 	}
5606 fail:
5607 	sysevent_free(ev);
5608 	return (DDI_SUCCESS);
5609 }
5610 
5611 /*
5612  * Derive the device class of the node.
5613  * Device class names aren't defined yet. Until this is done we use
5614  * devfs event subclass names as device class names.
5615  */
5616 static int
5617 derive_devi_class(dev_info_t *dip, const char *node_type, int flag)
5618 {
5619 	int rv = DDI_SUCCESS;
5620 
5621 	if (i_ddi_devi_class(dip) == NULL) {
5622 		if (strncmp(node_type, DDI_NT_BLOCK,
5623 		    sizeof (DDI_NT_BLOCK) - 1) == 0 &&
5624 		    (node_type[sizeof (DDI_NT_BLOCK) - 1] == '\0' ||
5625 		    node_type[sizeof (DDI_NT_BLOCK) - 1] == ':') &&
5626 		    strcmp(node_type, DDI_NT_FD) != 0) {
5627 
5628 			rv = i_ddi_set_devi_class(dip, ESC_DISK, flag);
5629 
5630 		} else if (strncmp(node_type, DDI_NT_NET,
5631 		    sizeof (DDI_NT_NET) - 1) == 0 &&
5632 		    (node_type[sizeof (DDI_NT_NET) - 1] == '\0' ||
5633 		    node_type[sizeof (DDI_NT_NET) - 1] == ':')) {
5634 
5635 			rv = i_ddi_set_devi_class(dip, ESC_NETWORK, flag);
5636 
5637 		} else if (strncmp(node_type, DDI_NT_PRINTER,
5638 		    sizeof (DDI_NT_PRINTER) - 1) == 0 &&
5639 		    (node_type[sizeof (DDI_NT_PRINTER) - 1] == '\0' ||
5640 		    node_type[sizeof (DDI_NT_PRINTER) - 1] == ':')) {
5641 
5642 			rv = i_ddi_set_devi_class(dip, ESC_PRINTER, flag);
5643 
5644 		} else if (strncmp(node_type, DDI_PSEUDO,
5645 		    sizeof (DDI_PSEUDO) -1) == 0 &&
5646 		    (strncmp(ESC_LOFI, ddi_node_name(dip),
5647 		    sizeof (ESC_LOFI) -1) == 0)) {
5648 			rv = i_ddi_set_devi_class(dip, ESC_LOFI, flag);
5649 		}
5650 	}
5651 
5652 	return (rv);
5653 }
5654 
5655 /*
5656  * Check compliance with PSARC 2003/375:
5657  *
5658  * The name must contain only characters a-z, A-Z, 0-9 or _ and it must not
5659  * exceed IFNAMSIZ (16) characters in length.
5660  */
5661 static boolean_t
5662 verify_name(const char *name)
5663 {
5664 	size_t len = strlen(name);
5665 	const char *cp;
5666 
5667 	if (len == 0 || len > IFNAMSIZ)
5668 		return (B_FALSE);
5669 
5670 	for (cp = name; *cp != '\0'; cp++) {
5671 		if (!isalnum(*cp) && *cp != '_')
5672 			return (B_FALSE);
5673 	}
5674 
5675 	return (B_TRUE);
5676 }
5677 
5678 /*
5679  * ddi_create_minor_common:	Create a  ddi_minor_data structure and
5680  *				attach it to the given devinfo node.
5681  */
5682 
5683 static int
5684 ddi_create_minor_common(dev_info_t *dip, const char *name, int spec_type,
5685     minor_t minor_num, const char *node_type, int flag, ddi_minor_type mtype,
5686     const char *read_priv, const char *write_priv, mode_t priv_mode)
5687 {
5688 	struct ddi_minor_data *dmdp;
5689 	major_t major;
5690 
5691 	if (spec_type != S_IFCHR && spec_type != S_IFBLK)
5692 		return (DDI_FAILURE);
5693 
5694 	if (name == NULL)
5695 		return (DDI_FAILURE);
5696 
5697 	/*
5698 	 * Log a message if the minor number the driver is creating
5699 	 * is not expressible on the on-disk filesystem (currently
5700 	 * this is limited to 18 bits both by UFS). The device can
5701 	 * be opened via devfs, but not by device special files created
5702 	 * via mknod().
5703 	 */
5704 	if (minor_num > L_MAXMIN32) {
5705 		cmn_err(CE_WARN,
5706 		    "%s%d:%s minor 0x%x too big for 32-bit applications",
5707 		    ddi_driver_name(dip), ddi_get_instance(dip),
5708 		    name, minor_num);
5709 		return (DDI_FAILURE);
5710 	}
5711 
5712 	/* dip must be bound and attached */
5713 	major = ddi_driver_major(dip);
5714 	ASSERT(major != DDI_MAJOR_T_NONE);
5715 
5716 	/*
5717 	 * Default node_type to DDI_PSEUDO and issue notice in debug mode
5718 	 */
5719 	if (node_type == NULL) {
5720 		node_type = DDI_PSEUDO;
5721 		NDI_CONFIG_DEBUG((CE_NOTE, "!illegal node_type NULL for %s%d "
5722 		    " minor node %s; default to DDI_PSEUDO",
5723 		    ddi_driver_name(dip), ddi_get_instance(dip), name));
5724 	}
5725 
5726 	/*
5727 	 * If the driver is a network driver, ensure that the name falls within
5728 	 * the interface naming constraints specified by PSARC/2003/375.
5729 	 */
5730 	if (strcmp(node_type, DDI_NT_NET) == 0) {
5731 		if (!verify_name(name))
5732 			return (DDI_FAILURE);
5733 
5734 		if (mtype == DDM_MINOR) {
5735 			struct devnames *dnp = &devnamesp[major];
5736 
5737 			/* Mark driver as a network driver */
5738 			LOCK_DEV_OPS(&dnp->dn_lock);
5739 			dnp->dn_flags |= DN_NETWORK_DRIVER;
5740 
5741 			/*
5742 			 * If this minor node is created during the device
5743 			 * attachment, this is a physical network device.
5744 			 * Mark the driver as a physical network driver.
5745 			 */
5746 			if (DEVI_IS_ATTACHING(dip))
5747 				dnp->dn_flags |= DN_NETWORK_PHYSDRIVER;
5748 			UNLOCK_DEV_OPS(&dnp->dn_lock);
5749 		}
5750 	}
5751 
5752 	if (mtype == DDM_MINOR) {
5753 		if (derive_devi_class(dip,  node_type, KM_NOSLEEP) !=
5754 		    DDI_SUCCESS)
5755 			return (DDI_FAILURE);
5756 	}
5757 
5758 	/*
5759 	 * Take care of minor number information for the node.
5760 	 */
5761 
5762 	if ((dmdp = kmem_zalloc(sizeof (struct ddi_minor_data),
5763 	    KM_NOSLEEP)) == NULL) {
5764 		return (DDI_FAILURE);
5765 	}
5766 	if ((dmdp->ddm_name = i_ddi_strdup(name, KM_NOSLEEP)) == NULL) {
5767 		kmem_free(dmdp, sizeof (struct ddi_minor_data));
5768 		return (DDI_FAILURE);
5769 	}
5770 	dmdp->dip = dip;
5771 	dmdp->ddm_dev = makedevice(major, minor_num);
5772 	dmdp->ddm_spec_type = spec_type;
5773 	dmdp->ddm_node_type = node_type;
5774 	dmdp->type = mtype;
5775 	if (flag & CLONE_DEV) {
5776 		dmdp->type = DDM_ALIAS;
5777 		dmdp->ddm_dev = makedevice(ddi_driver_major(clone_dip), major);
5778 	}
5779 	if (flag & PRIVONLY_DEV) {
5780 		dmdp->ddm_flags |= DM_NO_FSPERM;
5781 	}
5782 	if (read_priv || write_priv) {
5783 		dmdp->ddm_node_priv =
5784 		    devpolicy_priv_by_name(read_priv, write_priv);
5785 	}
5786 	dmdp->ddm_priv_mode = priv_mode;
5787 
5788 	ddi_append_minor_node(dip, dmdp);
5789 
5790 	/*
5791 	 * only log ddi_create_minor_node() calls which occur
5792 	 * outside the scope of attach(9e)/detach(9e) reconfigurations
5793 	 */
5794 	if (!(DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip)) &&
5795 	    mtype != DDM_INTERNAL_PATH) {
5796 		(void) i_log_devfs_minor_create(dip, dmdp->ddm_name);
5797 	}
5798 
5799 	/*
5800 	 * Check if any dacf rules match the creation of this minor node
5801 	 */
5802 	dacfc_match_create_minor(name, node_type, dip, dmdp, flag);
5803 	return (DDI_SUCCESS);
5804 }
5805 
5806 int
5807 ddi_create_minor_node(dev_info_t *dip, const char *name, int spec_type,
5808     minor_t minor_num, const char *node_type, int flag)
5809 {
5810 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5811 	    node_type, flag, DDM_MINOR, NULL, NULL, 0));
5812 }
5813 
5814 int
5815 ddi_create_priv_minor_node(dev_info_t *dip, const char *name, int spec_type,
5816     minor_t minor_num, const char *node_type, int flag,
5817     const char *rdpriv, const char *wrpriv, mode_t priv_mode)
5818 {
5819 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5820 	    node_type, flag, DDM_MINOR, rdpriv, wrpriv, priv_mode));
5821 }
5822 
5823 int
5824 ddi_create_default_minor_node(dev_info_t *dip, const char *name, int spec_type,
5825     minor_t minor_num, const char *node_type, int flag)
5826 {
5827 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5828 	    node_type, flag, DDM_DEFAULT, NULL, NULL, 0));
5829 }
5830 
5831 /*
5832  * Internal (non-ddi) routine for drivers to export names known
5833  * to the kernel (especially ddi_pathname_to_dev_t and friends)
5834  * but not exported externally to /dev
5835  */
5836 int
5837 ddi_create_internal_pathname(dev_info_t *dip, char *name, int spec_type,
5838     minor_t minor_num)
5839 {
5840 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5841 	    "internal", 0, DDM_INTERNAL_PATH, NULL, NULL, 0));
5842 }
5843 
5844 void
5845 ddi_remove_minor_node(dev_info_t *dip, const char *name)
5846 {
5847 	int			circ;
5848 	struct ddi_minor_data	*dmdp, *dmdp1;
5849 	struct ddi_minor_data	**dmdp_prev;
5850 
5851 	ndi_devi_enter(dip, &circ);
5852 	dmdp_prev = &DEVI(dip)->devi_minor;
5853 	dmdp = DEVI(dip)->devi_minor;
5854 	while (dmdp != NULL) {
5855 		dmdp1 = dmdp->next;
5856 		if ((name == NULL || (dmdp->ddm_name != NULL &&
5857 		    strcmp(name, dmdp->ddm_name) == 0))) {
5858 			if (dmdp->ddm_name != NULL) {
5859 				if (dmdp->type != DDM_INTERNAL_PATH)
5860 					(void) i_log_devfs_minor_remove(dip,
5861 					    dmdp->ddm_name);
5862 				kmem_free(dmdp->ddm_name,
5863 				    strlen(dmdp->ddm_name) + 1);
5864 			}
5865 			/*
5866 			 * Release device privilege, if any.
5867 			 * Release dacf client data associated with this minor
5868 			 * node by storing NULL.
5869 			 */
5870 			if (dmdp->ddm_node_priv)
5871 				dpfree(dmdp->ddm_node_priv);
5872 			dacf_store_info((dacf_infohdl_t)dmdp, NULL);
5873 			kmem_free(dmdp, sizeof (struct ddi_minor_data));
5874 			*dmdp_prev = dmdp1;
5875 			/*
5876 			 * OK, we found it, so get out now -- if we drive on,
5877 			 * we will strcmp against garbage.  See 1139209.
5878 			 */
5879 			if (name != NULL)
5880 				break;
5881 		} else {
5882 			dmdp_prev = &dmdp->next;
5883 		}
5884 		dmdp = dmdp1;
5885 	}
5886 	ndi_devi_exit(dip, circ);
5887 }
5888 
5889 
5890 int
5891 ddi_in_panic()
5892 {
5893 	return (panicstr != NULL);
5894 }
5895 
5896 
5897 /*
5898  * Find first bit set in a mask (returned counting from 1 up)
5899  */
5900 
5901 int
5902 ddi_ffs(long mask)
5903 {
5904 	return (ffs(mask));
5905 }
5906 
5907 /*
5908  * Find last bit set. Take mask and clear
5909  * all but the most significant bit, and
5910  * then let ffs do the rest of the work.
5911  *
5912  * Algorithm courtesy of Steve Chessin.
5913  */
5914 
5915 int
5916 ddi_fls(long mask)
5917 {
5918 	while (mask) {
5919 		long nx;
5920 
5921 		if ((nx = (mask & (mask - 1))) == 0)
5922 			break;
5923 		mask = nx;
5924 	}
5925 	return (ffs(mask));
5926 }
5927 
5928 /*
5929  * The ddi_soft_state_* routines comprise generic storage management utilities
5930  * for driver soft state structures (in "the old days," this was done with
5931  * statically sized array - big systems and dynamic loading and unloading
5932  * make heap allocation more attractive).
5933  */
5934 
5935 /*
5936  * Allocate a set of pointers to 'n_items' objects of size 'size'
5937  * bytes.  Each pointer is initialized to nil.
5938  *
5939  * The 'size' and 'n_items' values are stashed in the opaque
5940  * handle returned to the caller.
5941  *
5942  * This implementation interprets 'set of pointers' to mean 'array
5943  * of pointers' but note that nothing in the interface definition
5944  * precludes an implementation that uses, for example, a linked list.
5945  * However there should be a small efficiency gain from using an array
5946  * at lookup time.
5947  *
5948  * NOTE	As an optimization, we make our growable array allocations in
5949  *	powers of two (bytes), since that's how much kmem_alloc (currently)
5950  *	gives us anyway.  It should save us some free/realloc's ..
5951  *
5952  *	As a further optimization, we make the growable array start out
5953  *	with MIN_N_ITEMS in it.
5954  */
5955 
5956 #define	MIN_N_ITEMS	8	/* 8 void *'s == 32 bytes */
5957 
5958 int
5959 ddi_soft_state_init(void **state_p, size_t size, size_t n_items)
5960 {
5961 	i_ddi_soft_state	*ss;
5962 
5963 	if (state_p == NULL || size == 0)
5964 		return (EINVAL);
5965 
5966 	ss = kmem_zalloc(sizeof (*ss), KM_SLEEP);
5967 	mutex_init(&ss->lock, NULL, MUTEX_DRIVER, NULL);
5968 	ss->size = size;
5969 
5970 	if (n_items < MIN_N_ITEMS)
5971 		ss->n_items = MIN_N_ITEMS;
5972 	else {
5973 		int bitlog;
5974 
5975 		if ((bitlog = ddi_fls(n_items)) == ddi_ffs(n_items))
5976 			bitlog--;
5977 		ss->n_items = 1 << bitlog;
5978 	}
5979 
5980 	ASSERT(ss->n_items >= n_items);
5981 
5982 	ss->array = kmem_zalloc(ss->n_items * sizeof (void *), KM_SLEEP);
5983 
5984 	*state_p = ss;
5985 	return (0);
5986 }
5987 
5988 /*
5989  * Allocate a state structure of size 'size' to be associated
5990  * with item 'item'.
5991  *
5992  * In this implementation, the array is extended to
5993  * allow the requested offset, if needed.
5994  */
5995 int
5996 ddi_soft_state_zalloc(void *state, int item)
5997 {
5998 	i_ddi_soft_state	*ss = (i_ddi_soft_state *)state;
5999 	void			**array;
6000 	void			*new_element;
6001 
6002 	if ((state == NULL) || (item < 0))
6003 		return (DDI_FAILURE);
6004 
6005 	mutex_enter(&ss->lock);
6006 	if (ss->size == 0) {
6007 		mutex_exit(&ss->lock);
6008 		cmn_err(CE_WARN, "ddi_soft_state_zalloc: bad handle: %s",
6009 		    mod_containing_pc(caller()));
6010 		return (DDI_FAILURE);
6011 	}
6012 
6013 	array = ss->array;	/* NULL if ss->n_items == 0 */
6014 	ASSERT(ss->n_items != 0 && array != NULL);
6015 
6016 	/*
6017 	 * refuse to tread on an existing element
6018 	 */
6019 	if (item < ss->n_items && array[item] != NULL) {
6020 		mutex_exit(&ss->lock);
6021 		return (DDI_FAILURE);
6022 	}
6023 
6024 	/*
6025 	 * Allocate a new element to plug in
6026 	 */
6027 	new_element = kmem_zalloc(ss->size, KM_SLEEP);
6028 
6029 	/*
6030 	 * Check if the array is big enough, if not, grow it.
6031 	 */
6032 	if (item >= ss->n_items) {
6033 		void			**new_array;
6034 		size_t			new_n_items;
6035 		struct i_ddi_soft_state	*dirty;
6036 
6037 		/*
6038 		 * Allocate a new array of the right length, copy
6039 		 * all the old pointers to the new array, then
6040 		 * if it exists at all, put the old array on the
6041 		 * dirty list.
6042 		 *
6043 		 * Note that we can't kmem_free() the old array.
6044 		 *
6045 		 * Why -- well the 'get' operation is 'mutex-free', so we
6046 		 * can't easily catch a suspended thread that is just about
6047 		 * to dereference the array we just grew out of.  So we
6048 		 * cons up a header and put it on a list of 'dirty'
6049 		 * pointer arrays.  (Dirty in the sense that there may
6050 		 * be suspended threads somewhere that are in the middle
6051 		 * of referencing them).  Fortunately, we -can- garbage
6052 		 * collect it all at ddi_soft_state_fini time.
6053 		 */
6054 		new_n_items = ss->n_items;
6055 		while (new_n_items < (1 + item))
6056 			new_n_items <<= 1;	/* double array size .. */
6057 
6058 		ASSERT(new_n_items >= (1 + item));	/* sanity check! */
6059 
6060 		new_array = kmem_zalloc(new_n_items * sizeof (void *),
6061 		    KM_SLEEP);
6062 		/*
6063 		 * Copy the pointers into the new array
6064 		 */
6065 		bcopy(array, new_array, ss->n_items * sizeof (void *));
6066 
6067 		/*
6068 		 * Save the old array on the dirty list
6069 		 */
6070 		dirty = kmem_zalloc(sizeof (*dirty), KM_SLEEP);
6071 		dirty->array = ss->array;
6072 		dirty->n_items = ss->n_items;
6073 		dirty->next = ss->next;
6074 		ss->next = dirty;
6075 
6076 		ss->array = (array = new_array);
6077 		ss->n_items = new_n_items;
6078 	}
6079 
6080 	ASSERT(array != NULL && item < ss->n_items && array[item] == NULL);
6081 
6082 	array[item] = new_element;
6083 
6084 	mutex_exit(&ss->lock);
6085 	return (DDI_SUCCESS);
6086 }
6087 
6088 /*
6089  * Fetch a pointer to the allocated soft state structure.
6090  *
6091  * This is designed to be cheap.
6092  *
6093  * There's an argument that there should be more checking for
6094  * nil pointers and out of bounds on the array.. but we do a lot
6095  * of that in the alloc/free routines.
6096  *
6097  * An array has the convenience that we don't need to lock read-access
6098  * to it c.f. a linked list.  However our "expanding array" strategy
6099  * means that we should hold a readers lock on the i_ddi_soft_state
6100  * structure.
6101  *
6102  * However, from a performance viewpoint, we need to do it without
6103  * any locks at all -- this also makes it a leaf routine.  The algorithm
6104  * is 'lock-free' because we only discard the pointer arrays at
6105  * ddi_soft_state_fini() time.
6106  */
6107 void *
6108 ddi_get_soft_state(void *state, int item)
6109 {
6110 	i_ddi_soft_state	*ss = (i_ddi_soft_state *)state;
6111 
6112 	ASSERT((ss != NULL) && (item >= 0));
6113 
6114 	if (item < ss->n_items && ss->array != NULL)
6115 		return (ss->array[item]);
6116 	return (NULL);
6117 }
6118 
6119 /*
6120  * Free the state structure corresponding to 'item.'   Freeing an
6121  * element that has either gone or was never allocated is not
6122  * considered an error.  Note that we free the state structure, but
6123  * we don't shrink our pointer array, or discard 'dirty' arrays,
6124  * since even a few pointers don't really waste too much memory.
6125  *
6126  * Passing an item number that is out of bounds, or a null pointer will
6127  * provoke an error message.
6128  */
6129 void
6130 ddi_soft_state_free(void *state, int item)
6131 {
6132 	i_ddi_soft_state	*ss = (i_ddi_soft_state *)state;
6133 	void			**array;
6134 	void			*element;
6135 	static char		msg[] = "ddi_soft_state_free:";
6136 
6137 	if (ss == NULL) {
6138 		cmn_err(CE_WARN, "%s null handle: %s",
6139 		    msg, mod_containing_pc(caller()));
6140 		return;
6141 	}
6142 
6143 	element = NULL;
6144 
6145 	mutex_enter(&ss->lock);
6146 
6147 	if ((array = ss->array) == NULL || ss->size == 0) {
6148 		cmn_err(CE_WARN, "%s bad handle: %s",
6149 		    msg, mod_containing_pc(caller()));
6150 	} else if (item < 0 || item >= ss->n_items) {
6151 		cmn_err(CE_WARN, "%s item %d not in range [0..%lu]: %s",
6152 		    msg, item, ss->n_items - 1, mod_containing_pc(caller()));
6153 	} else if (array[item] != NULL) {
6154 		element = array[item];
6155 		array[item] = NULL;
6156 	}
6157 
6158 	mutex_exit(&ss->lock);
6159 
6160 	if (element)
6161 		kmem_free(element, ss->size);
6162 }
6163 
6164 /*
6165  * Free the entire set of pointers, and any
6166  * soft state structures contained therein.
6167  *
6168  * Note that we don't grab the ss->lock mutex, even though
6169  * we're inspecting the various fields of the data structure.
6170  *
6171  * There is an implicit assumption that this routine will
6172  * never run concurrently with any of the above on this
6173  * particular state structure i.e. by the time the driver
6174  * calls this routine, there should be no other threads
6175  * running in the driver.
6176  */
6177 void
6178 ddi_soft_state_fini(void **state_p)
6179 {
6180 	i_ddi_soft_state	*ss, *dirty;
6181 	int			item;
6182 	static char		msg[] = "ddi_soft_state_fini:";
6183 
6184 	if (state_p == NULL ||
6185 	    (ss = (i_ddi_soft_state *)(*state_p)) == NULL) {
6186 		cmn_err(CE_WARN, "%s null handle: %s",
6187 		    msg, mod_containing_pc(caller()));
6188 		return;
6189 	}
6190 
6191 	if (ss->size == 0) {
6192 		cmn_err(CE_WARN, "%s bad handle: %s",
6193 		    msg, mod_containing_pc(caller()));
6194 		return;
6195 	}
6196 
6197 	if (ss->n_items > 0) {
6198 		for (item = 0; item < ss->n_items; item++)
6199 			ddi_soft_state_free(ss, item);
6200 		kmem_free(ss->array, ss->n_items * sizeof (void *));
6201 	}
6202 
6203 	/*
6204 	 * Now delete any dirty arrays from previous 'grow' operations
6205 	 */
6206 	for (dirty = ss->next; dirty; dirty = ss->next) {
6207 		ss->next = dirty->next;
6208 		kmem_free(dirty->array, dirty->n_items * sizeof (void *));
6209 		kmem_free(dirty, sizeof (*dirty));
6210 	}
6211 
6212 	mutex_destroy(&ss->lock);
6213 	kmem_free(ss, sizeof (*ss));
6214 
6215 	*state_p = NULL;
6216 }
6217 
6218 #define	SS_N_ITEMS_PER_HASH	16
6219 #define	SS_MIN_HASH_SZ		16
6220 #define	SS_MAX_HASH_SZ		4096
6221 
6222 int
6223 ddi_soft_state_bystr_init(ddi_soft_state_bystr **state_p, size_t size,
6224     int n_items)
6225 {
6226 	i_ddi_soft_state_bystr	*sss;
6227 	int			hash_sz;
6228 
6229 	ASSERT(state_p && size && n_items);
6230 	if ((state_p == NULL) || (size == 0) || (n_items == 0))
6231 		return (EINVAL);
6232 
6233 	/* current implementation is based on hash, convert n_items to hash */
6234 	hash_sz = n_items / SS_N_ITEMS_PER_HASH;
6235 	if (hash_sz < SS_MIN_HASH_SZ)
6236 		hash_sz = SS_MIN_HASH_SZ;
6237 	else if (hash_sz > SS_MAX_HASH_SZ)
6238 		hash_sz = SS_MAX_HASH_SZ;
6239 
6240 	/* allocate soft_state pool */
6241 	sss = kmem_zalloc(sizeof (*sss), KM_SLEEP);
6242 	sss->ss_size = size;
6243 	sss->ss_mod_hash = mod_hash_create_strhash("soft_state_bystr",
6244 	    hash_sz, mod_hash_null_valdtor);
6245 	*state_p = (ddi_soft_state_bystr *)sss;
6246 	return (0);
6247 }
6248 
6249 int
6250 ddi_soft_state_bystr_zalloc(ddi_soft_state_bystr *state, const char *str)
6251 {
6252 	i_ddi_soft_state_bystr	*sss = (i_ddi_soft_state_bystr *)state;
6253 	void			*sso;
6254 	char			*dup_str;
6255 
6256 	ASSERT(sss && str && sss->ss_mod_hash);
6257 	if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL))
6258 		return (DDI_FAILURE);
6259 	sso = kmem_zalloc(sss->ss_size, KM_SLEEP);
6260 	dup_str = i_ddi_strdup((char *)str, KM_SLEEP);
6261 	if (mod_hash_insert(sss->ss_mod_hash,
6262 	    (mod_hash_key_t)dup_str, (mod_hash_val_t)sso) == 0)
6263 		return (DDI_SUCCESS);
6264 
6265 	/*
6266 	 * The only error from an strhash insert is caused by a duplicate key.
6267 	 * We refuse to tread on an existing elements, so free and fail.
6268 	 */
6269 	kmem_free(dup_str, strlen(dup_str) + 1);
6270 	kmem_free(sso, sss->ss_size);
6271 	return (DDI_FAILURE);
6272 }
6273 
6274 void *
6275 ddi_soft_state_bystr_get(ddi_soft_state_bystr *state, const char *str)
6276 {
6277 	i_ddi_soft_state_bystr	*sss = (i_ddi_soft_state_bystr *)state;
6278 	void			*sso;
6279 
6280 	ASSERT(sss && str && sss->ss_mod_hash);
6281 	if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL))
6282 		return (NULL);
6283 
6284 	if (mod_hash_find(sss->ss_mod_hash,
6285 	    (mod_hash_key_t)str, (mod_hash_val_t *)&sso) == 0)
6286 		return (sso);
6287 	return (NULL);
6288 }
6289 
6290 void
6291 ddi_soft_state_bystr_free(ddi_soft_state_bystr *state, const char *str)
6292 {
6293 	i_ddi_soft_state_bystr	*sss = (i_ddi_soft_state_bystr *)state;
6294 	void			*sso;
6295 
6296 	ASSERT(sss && str && sss->ss_mod_hash);
6297 	if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL))
6298 		return;
6299 
6300 	(void) mod_hash_remove(sss->ss_mod_hash,
6301 	    (mod_hash_key_t)str, (mod_hash_val_t *)&sso);
6302 	kmem_free(sso, sss->ss_size);
6303 }
6304 
6305 void
6306 ddi_soft_state_bystr_fini(ddi_soft_state_bystr **state_p)
6307 {
6308 	i_ddi_soft_state_bystr	*sss;
6309 
6310 	ASSERT(state_p);
6311 	if (state_p == NULL)
6312 		return;
6313 
6314 	sss = (i_ddi_soft_state_bystr *)(*state_p);
6315 	if (sss == NULL)
6316 		return;
6317 
6318 	ASSERT(sss->ss_mod_hash);
6319 	if (sss->ss_mod_hash) {
6320 		mod_hash_destroy_strhash(sss->ss_mod_hash);
6321 		sss->ss_mod_hash = NULL;
6322 	}
6323 
6324 	kmem_free(sss, sizeof (*sss));
6325 	*state_p = NULL;
6326 }
6327 
6328 /*
6329  * The ddi_strid_* routines provide string-to-index management utilities.
6330  */
6331 /* allocate and initialize an strid set */
6332 int
6333 ddi_strid_init(ddi_strid **strid_p, int n_items)
6334 {
6335 	i_ddi_strid	*ss;
6336 	int		hash_sz;
6337 
6338 	if (strid_p == NULL)
6339 		return (DDI_FAILURE);
6340 
6341 	/* current implementation is based on hash, convert n_items to hash */
6342 	hash_sz = n_items / SS_N_ITEMS_PER_HASH;
6343 	if (hash_sz < SS_MIN_HASH_SZ)
6344 		hash_sz = SS_MIN_HASH_SZ;
6345 	else if (hash_sz > SS_MAX_HASH_SZ)
6346 		hash_sz = SS_MAX_HASH_SZ;
6347 
6348 	ss = kmem_alloc(sizeof (*ss), KM_SLEEP);
6349 	ss->strid_chunksz = n_items;
6350 	ss->strid_spacesz = n_items;
6351 	ss->strid_space = id_space_create("strid", 1, n_items);
6352 	ss->strid_bystr = mod_hash_create_strhash("strid_bystr", hash_sz,
6353 	    mod_hash_null_valdtor);
6354 	ss->strid_byid = mod_hash_create_idhash("strid_byid", hash_sz,
6355 	    mod_hash_null_valdtor);
6356 	*strid_p = (ddi_strid *)ss;
6357 	return (DDI_SUCCESS);
6358 }
6359 
6360 /* allocate an id mapping within the specified set for str, return id */
6361 static id_t
6362 i_ddi_strid_alloc(ddi_strid *strid, char *str)
6363 {
6364 	i_ddi_strid	*ss = (i_ddi_strid *)strid;
6365 	id_t		id;
6366 	char		*s;
6367 
6368 	ASSERT(ss && str);
6369 	if ((ss == NULL) || (str == NULL))
6370 		return (0);
6371 
6372 	/*
6373 	 * Allocate an id using VM_FIRSTFIT in order to keep allocated id
6374 	 * range as compressed as possible.  This is important to minimize
6375 	 * the amount of space used when the id is used as a ddi_soft_state
6376 	 * index by the caller.
6377 	 *
6378 	 * If the id list is exhausted, increase the size of the list
6379 	 * by the chuck size specified in ddi_strid_init and reattempt
6380 	 * the allocation
6381 	 */
6382 	if ((id = id_allocff_nosleep(ss->strid_space)) == (id_t)-1) {
6383 		id_space_extend(ss->strid_space, ss->strid_spacesz,
6384 		    ss->strid_spacesz + ss->strid_chunksz);
6385 		ss->strid_spacesz += ss->strid_chunksz;
6386 		if ((id = id_allocff_nosleep(ss->strid_space)) == (id_t)-1)
6387 			return (0);
6388 	}
6389 
6390 	/*
6391 	 * NOTE: since we create and destroy in unison we can save space by
6392 	 * using bystr key as the byid value.  This means destroy must occur
6393 	 * in (byid, bystr) order.
6394 	 */
6395 	s = i_ddi_strdup(str, KM_SLEEP);
6396 	if (mod_hash_insert(ss->strid_bystr, (mod_hash_key_t)s,
6397 	    (mod_hash_val_t)(intptr_t)id) != 0) {
6398 		ddi_strid_free(strid, id);
6399 		return (0);
6400 	}
6401 	if (mod_hash_insert(ss->strid_byid, (mod_hash_key_t)(intptr_t)id,
6402 	    (mod_hash_val_t)s) != 0) {
6403 		ddi_strid_free(strid, id);
6404 		return (0);
6405 	}
6406 
6407 	/* NOTE: s if freed on mod_hash_destroy by mod_hash_strval_dtor */
6408 	return (id);
6409 }
6410 
6411 /* allocate an id mapping within the specified set for str, return id */
6412 id_t
6413 ddi_strid_alloc(ddi_strid *strid, char *str)
6414 {
6415 	return (i_ddi_strid_alloc(strid, str));
6416 }
6417 
6418 /* return the id within the specified strid given the str */
6419 id_t
6420 ddi_strid_str2id(ddi_strid *strid, char *str)
6421 {
6422 	i_ddi_strid	*ss = (i_ddi_strid *)strid;
6423 	id_t		id = 0;
6424 	mod_hash_val_t	hv;
6425 
6426 	ASSERT(ss && str);
6427 	if (ss && str && (mod_hash_find(ss->strid_bystr,
6428 	    (mod_hash_key_t)str, &hv) == 0))
6429 		id = (int)(intptr_t)hv;
6430 	return (id);
6431 }
6432 
6433 /* return str within the specified strid given the id */
6434 char *
6435 ddi_strid_id2str(ddi_strid *strid, id_t id)
6436 {
6437 	i_ddi_strid	*ss = (i_ddi_strid *)strid;
6438 	char		*str = NULL;
6439 	mod_hash_val_t	hv;
6440 
6441 	ASSERT(ss && id > 0);
6442 	if (ss && (id > 0) && (mod_hash_find(ss->strid_byid,
6443 	    (mod_hash_key_t)(uintptr_t)id, &hv) == 0))
6444 		str = (char *)hv;
6445 	return (str);
6446 }
6447 
6448 /* free the id mapping within the specified strid */
6449 void
6450 ddi_strid_free(ddi_strid *strid, id_t id)
6451 {
6452 	i_ddi_strid	*ss = (i_ddi_strid *)strid;
6453 	char		*str;
6454 
6455 	ASSERT(ss && id > 0);
6456 	if ((ss == NULL) || (id <= 0))
6457 		return;
6458 
6459 	/* bystr key is byid value: destroy order must be (byid, bystr) */
6460 	str = ddi_strid_id2str(strid, id);
6461 	(void) mod_hash_destroy(ss->strid_byid, (mod_hash_key_t)(uintptr_t)id);
6462 	id_free(ss->strid_space, id);
6463 
6464 	if (str)
6465 		(void) mod_hash_destroy(ss->strid_bystr, (mod_hash_key_t)str);
6466 }
6467 
6468 /* destroy the strid set */
6469 void
6470 ddi_strid_fini(ddi_strid **strid_p)
6471 {
6472 	i_ddi_strid	*ss;
6473 
6474 	ASSERT(strid_p);
6475 	if (strid_p == NULL)
6476 		return;
6477 
6478 	ss = (i_ddi_strid *)(*strid_p);
6479 	if (ss == NULL)
6480 		return;
6481 
6482 	/* bystr key is byid value: destroy order must be (byid, bystr) */
6483 	if (ss->strid_byid)
6484 		mod_hash_destroy_hash(ss->strid_byid);
6485 	if (ss->strid_byid)
6486 		mod_hash_destroy_hash(ss->strid_bystr);
6487 	if (ss->strid_space)
6488 		id_space_destroy(ss->strid_space);
6489 	kmem_free(ss, sizeof (*ss));
6490 	*strid_p = NULL;
6491 }
6492 
6493 /*
6494  * This sets the devi_addr entry in the dev_info structure 'dip' to 'name'.
6495  * Storage is double buffered to prevent updates during devi_addr use -
6496  * double buffering is adaquate for reliable ddi_deviname() consumption.
6497  * The double buffer is not freed until dev_info structure destruction
6498  * (by i_ddi_free_node).
6499  */
6500 void
6501 ddi_set_name_addr(dev_info_t *dip, char *name)
6502 {
6503 	char	*buf = DEVI(dip)->devi_addr_buf;
6504 	char	*newaddr;
6505 
6506 	if (buf == NULL) {
6507 		buf = kmem_zalloc(2 * MAXNAMELEN, KM_SLEEP);
6508 		DEVI(dip)->devi_addr_buf = buf;
6509 	}
6510 
6511 	if (name) {
6512 		ASSERT(strlen(name) < MAXNAMELEN);
6513 		newaddr = (DEVI(dip)->devi_addr == buf) ?
6514 		    (buf + MAXNAMELEN) : buf;
6515 		(void) strlcpy(newaddr, name, MAXNAMELEN);
6516 	} else
6517 		newaddr = NULL;
6518 
6519 	DEVI(dip)->devi_addr = newaddr;
6520 }
6521 
6522 char *
6523 ddi_get_name_addr(dev_info_t *dip)
6524 {
6525 	return (DEVI(dip)->devi_addr);
6526 }
6527 
6528 void
6529 ddi_set_parent_data(dev_info_t *dip, void *pd)
6530 {
6531 	DEVI(dip)->devi_parent_data = pd;
6532 }
6533 
6534 void *
6535 ddi_get_parent_data(dev_info_t *dip)
6536 {
6537 	return (DEVI(dip)->devi_parent_data);
6538 }
6539 
6540 /*
6541  * ddi_name_to_major: returns the major number of a named module,
6542  * derived from the current driver alias binding.
6543  *
6544  * Caveat: drivers should avoid the use of this function, in particular
6545  * together with ddi_get_name/ddi_binding name, as per
6546  *	major = ddi_name_to_major(ddi_get_name(devi));
6547  * ddi_name_to_major() relies on the state of the device/alias binding,
6548  * which can and does change dynamically as aliases are administered
6549  * over time.  An attached device instance cannot rely on the major
6550  * number returned by ddi_name_to_major() to match its own major number.
6551  *
6552  * For driver use, ddi_driver_major() reliably returns the major number
6553  * for the module to which the device was bound at attach time over
6554  * the life of the instance.
6555  *	major = ddi_driver_major(dev_info_t *)
6556  */
6557 major_t
6558 ddi_name_to_major(char *name)
6559 {
6560 	return (mod_name_to_major(name));
6561 }
6562 
6563 /*
6564  * ddi_major_to_name: Returns the module name bound to a major number.
6565  */
6566 char *
6567 ddi_major_to_name(major_t major)
6568 {
6569 	return (mod_major_to_name(major));
6570 }
6571 
6572 /*
6573  * Return the name of the devinfo node pointed at by 'dip' in the buffer
6574  * pointed at by 'name.'  A devinfo node is named as a result of calling
6575  * ddi_initchild().
6576  *
6577  * Note: the driver must be held before calling this function!
6578  */
6579 char *
6580 ddi_deviname(dev_info_t *dip, char *name)
6581 {
6582 	char *addrname;
6583 	char none = '\0';
6584 
6585 	if (dip == ddi_root_node()) {
6586 		*name = '\0';
6587 		return (name);
6588 	}
6589 
6590 	if (i_ddi_node_state(dip) < DS_BOUND) {
6591 		addrname = &none;
6592 	} else {
6593 		/*
6594 		 * Use ddi_get_name_addr() without checking state so we get
6595 		 * a unit-address if we are called after ddi_set_name_addr()
6596 		 * by nexus DDI_CTL_INITCHILD code, but before completing
6597 		 * node promotion to DS_INITIALIZED.  We currently have
6598 		 * two situations where we are called in this state:
6599 		 *   o  For framework processing of a path-oriented alias.
6600 		 *   o  If a SCSA nexus driver calls ddi_devid_register()
6601 		 *	from it's tran_tgt_init(9E) implementation.
6602 		 */
6603 		addrname = ddi_get_name_addr(dip);
6604 		if (addrname == NULL)
6605 			addrname = &none;
6606 	}
6607 
6608 	if (*addrname == '\0') {
6609 		(void) sprintf(name, "/%s", ddi_node_name(dip));
6610 	} else {
6611 		(void) sprintf(name, "/%s@%s", ddi_node_name(dip), addrname);
6612 	}
6613 
6614 	return (name);
6615 }
6616 
6617 /*
6618  * Spits out the name of device node, typically name@addr, for a given node,
6619  * using the driver name, not the nodename.
6620  *
6621  * Used by match_parent. Not to be used elsewhere.
6622  */
6623 char *
6624 i_ddi_parname(dev_info_t *dip, char *name)
6625 {
6626 	char *addrname;
6627 
6628 	if (dip == ddi_root_node()) {
6629 		*name = '\0';
6630 		return (name);
6631 	}
6632 
6633 	ASSERT(i_ddi_node_state(dip) >= DS_INITIALIZED);
6634 
6635 	if (*(addrname = ddi_get_name_addr(dip)) == '\0')
6636 		(void) sprintf(name, "%s", ddi_binding_name(dip));
6637 	else
6638 		(void) sprintf(name, "%s@%s", ddi_binding_name(dip), addrname);
6639 	return (name);
6640 }
6641 
6642 static char *
6643 pathname_work(dev_info_t *dip, char *path)
6644 {
6645 	char *bp;
6646 
6647 	if (dip == ddi_root_node()) {
6648 		*path = '\0';
6649 		return (path);
6650 	}
6651 	(void) pathname_work(ddi_get_parent(dip), path);
6652 	bp = path + strlen(path);
6653 	(void) ddi_deviname(dip, bp);
6654 	return (path);
6655 }
6656 
6657 char *
6658 ddi_pathname(dev_info_t *dip, char *path)
6659 {
6660 	return (pathname_work(dip, path));
6661 }
6662 
6663 char *
6664 ddi_pathname_minor(struct ddi_minor_data *dmdp, char *path)
6665 {
6666 	if (dmdp->dip == NULL)
6667 		*path = '\0';
6668 	else {
6669 		(void) ddi_pathname(dmdp->dip, path);
6670 		if (dmdp->ddm_name) {
6671 			(void) strcat(path, ":");
6672 			(void) strcat(path, dmdp->ddm_name);
6673 		}
6674 	}
6675 	return (path);
6676 }
6677 
6678 static char *
6679 pathname_work_obp(dev_info_t *dip, char *path)
6680 {
6681 	char *bp;
6682 	char *obp_path;
6683 
6684 	/*
6685 	 * look up the "obp-path" property, return the path if it exists
6686 	 */
6687 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
6688 	    "obp-path", &obp_path) == DDI_PROP_SUCCESS) {
6689 		(void) strcpy(path, obp_path);
6690 		ddi_prop_free(obp_path);
6691 		return (path);
6692 	}
6693 
6694 	/*
6695 	 * stop at root, no obp path
6696 	 */
6697 	if (dip == ddi_root_node()) {
6698 		return (NULL);
6699 	}
6700 
6701 	obp_path = pathname_work_obp(ddi_get_parent(dip), path);
6702 	if (obp_path == NULL)
6703 		return (NULL);
6704 
6705 	/*
6706 	 * append our component to parent's obp path
6707 	 */
6708 	bp = path + strlen(path);
6709 	if (*(bp - 1) != '/')
6710 		(void) strcat(bp++, "/");
6711 	(void) ddi_deviname(dip, bp);
6712 	return (path);
6713 }
6714 
6715 /*
6716  * return the 'obp-path' based path for the given node, or NULL if the node
6717  * does not have a different obp path. NOTE: Unlike ddi_pathname, this
6718  * function can't be called from interrupt context (since we need to
6719  * lookup a string property).
6720  */
6721 char *
6722 ddi_pathname_obp(dev_info_t *dip, char *path)
6723 {
6724 	ASSERT(!servicing_interrupt());
6725 	if (dip == NULL || path == NULL)
6726 		return (NULL);
6727 
6728 	/* split work into a separate function to aid debugging */
6729 	return (pathname_work_obp(dip, path));
6730 }
6731 
6732 int
6733 ddi_pathname_obp_set(dev_info_t *dip, char *component)
6734 {
6735 	dev_info_t *pdip;
6736 	char *obp_path = NULL;
6737 	int rc = DDI_FAILURE;
6738 
6739 	if (dip == NULL)
6740 		return (DDI_FAILURE);
6741 
6742 	obp_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
6743 
6744 	pdip = ddi_get_parent(dip);
6745 
6746 	if (ddi_pathname_obp(pdip, obp_path) == NULL) {
6747 		(void) ddi_pathname(pdip, obp_path);
6748 	}
6749 
6750 	if (component) {
6751 		(void) strncat(obp_path, "/", MAXPATHLEN);
6752 		(void) strncat(obp_path, component, MAXPATHLEN);
6753 	}
6754 	rc = ndi_prop_update_string(DDI_DEV_T_NONE, dip, "obp-path",
6755 	    obp_path);
6756 
6757 	if (obp_path)
6758 		kmem_free(obp_path, MAXPATHLEN);
6759 
6760 	return (rc);
6761 }
6762 
6763 /*
6764  * Given a dev_t, return the pathname of the corresponding device in the
6765  * buffer pointed at by "path."  The buffer is assumed to be large enough
6766  * to hold the pathname of the device (MAXPATHLEN).
6767  *
6768  * The pathname of a device is the pathname of the devinfo node to which
6769  * the device "belongs," concatenated with the character ':' and the name
6770  * of the minor node corresponding to the dev_t.  If spec_type is 0 then
6771  * just the pathname of the devinfo node is returned without driving attach
6772  * of that node.  For a non-zero spec_type, an attach is performed and a
6773  * search of the minor list occurs.
6774  *
6775  * It is possible that the path associated with the dev_t is not
6776  * currently available in the devinfo tree.  In order to have a
6777  * dev_t, a device must have been discovered before, which means
6778  * that the path is always in the instance tree.  The one exception
6779  * to this is if the dev_t is associated with a pseudo driver, in
6780  * which case the device must exist on the pseudo branch of the
6781  * devinfo tree as a result of parsing .conf files.
6782  */
6783 int
6784 ddi_dev_pathname(dev_t devt, int spec_type, char *path)
6785 {
6786 	int		circ;
6787 	major_t		major = getmajor(devt);
6788 	int		instance;
6789 	dev_info_t	*dip;
6790 	char		*minorname;
6791 	char		*drvname;
6792 
6793 	if (major >= devcnt)
6794 		goto fail;
6795 	if (major == clone_major) {
6796 		/* clone has no minor nodes, manufacture the path here */
6797 		if ((drvname = ddi_major_to_name(getminor(devt))) == NULL)
6798 			goto fail;
6799 
6800 		(void) snprintf(path, MAXPATHLEN, "%s:%s", CLONE_PATH, drvname);
6801 		return (DDI_SUCCESS);
6802 	}
6803 
6804 	/* extract instance from devt (getinfo(9E) DDI_INFO_DEVT2INSTANCE). */
6805 	if ((instance = dev_to_instance(devt)) == -1)
6806 		goto fail;
6807 
6808 	/* reconstruct the path given the major/instance */
6809 	if (e_ddi_majorinstance_to_path(major, instance, path) != DDI_SUCCESS)
6810 		goto fail;
6811 
6812 	/* if spec_type given we must drive attach and search minor nodes */
6813 	if ((spec_type == S_IFCHR) || (spec_type == S_IFBLK)) {
6814 		/* attach the path so we can search minors */
6815 		if ((dip = e_ddi_hold_devi_by_path(path, 0)) == NULL)
6816 			goto fail;
6817 
6818 		/* Add minorname to path. */
6819 		ndi_devi_enter(dip, &circ);
6820 		minorname = i_ddi_devtspectype_to_minorname(dip,
6821 		    devt, spec_type);
6822 		if (minorname) {
6823 			(void) strcat(path, ":");
6824 			(void) strcat(path, minorname);
6825 		}
6826 		ndi_devi_exit(dip, circ);
6827 		ddi_release_devi(dip);
6828 		if (minorname == NULL)
6829 			goto fail;
6830 	}
6831 	ASSERT(strlen(path) < MAXPATHLEN);
6832 	return (DDI_SUCCESS);
6833 
6834 fail:	*path = 0;
6835 	return (DDI_FAILURE);
6836 }
6837 
6838 /*
6839  * Given a major number and an instance, return the path.
6840  * This interface does NOT drive attach.
6841  */
6842 int
6843 e_ddi_majorinstance_to_path(major_t major, int instance, char *path)
6844 {
6845 	struct devnames *dnp;
6846 	dev_info_t	*dip;
6847 
6848 	if ((major >= devcnt) || (instance == -1)) {
6849 		*path = 0;
6850 		return (DDI_FAILURE);
6851 	}
6852 
6853 	/* look for the major/instance in the instance tree */
6854 	if (e_ddi_instance_majorinstance_to_path(major, instance,
6855 	    path) == DDI_SUCCESS) {
6856 		ASSERT(strlen(path) < MAXPATHLEN);
6857 		return (DDI_SUCCESS);
6858 	}
6859 
6860 	/*
6861 	 * Not in instance tree, find the instance on the per driver list and
6862 	 * construct path to instance via ddi_pathname(). This is how paths
6863 	 * down the 'pseudo' branch are constructed.
6864 	 */
6865 	dnp = &(devnamesp[major]);
6866 	LOCK_DEV_OPS(&(dnp->dn_lock));
6867 	for (dip = dnp->dn_head; dip;
6868 	    dip = (dev_info_t *)DEVI(dip)->devi_next) {
6869 		/* Skip if instance does not match. */
6870 		if (DEVI(dip)->devi_instance != instance)
6871 			continue;
6872 
6873 		/*
6874 		 * An ndi_hold_devi() does not prevent DS_INITIALIZED->DS_BOUND
6875 		 * node demotion, so it is not an effective way of ensuring
6876 		 * that the ddi_pathname result has a unit-address.  Instead,
6877 		 * we reverify the node state after calling ddi_pathname().
6878 		 */
6879 		if (i_ddi_node_state(dip) >= DS_INITIALIZED) {
6880 			(void) ddi_pathname(dip, path);
6881 			if (i_ddi_node_state(dip) < DS_INITIALIZED)
6882 				continue;
6883 			UNLOCK_DEV_OPS(&(dnp->dn_lock));
6884 			ASSERT(strlen(path) < MAXPATHLEN);
6885 			return (DDI_SUCCESS);
6886 		}
6887 	}
6888 	UNLOCK_DEV_OPS(&(dnp->dn_lock));
6889 
6890 	/* can't reconstruct the path */
6891 	*path = 0;
6892 	return (DDI_FAILURE);
6893 }
6894 
6895 #define	GLD_DRIVER_PPA "SUNW,gld_v0_ppa"
6896 
6897 /*
6898  * Given the dip for a network interface return the ppa for that interface.
6899  *
6900  * In all cases except GLD v0 drivers, the ppa == instance.
6901  * In the case of GLD v0 drivers, the ppa is equal to the attach order.
6902  * So for these drivers when the attach routine calls gld_register(),
6903  * the GLD framework creates an integer property called "gld_driver_ppa"
6904  * that can be queried here.
6905  *
6906  * The only time this function is used is when a system is booting over nfs.
6907  * In this case the system has to resolve the pathname of the boot device
6908  * to it's ppa.
6909  */
6910 int
6911 i_ddi_devi_get_ppa(dev_info_t *dip)
6912 {
6913 	return (ddi_prop_get_int(DDI_DEV_T_ANY, dip,
6914 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
6915 	    GLD_DRIVER_PPA, ddi_get_instance(dip)));
6916 }
6917 
6918 /*
6919  * i_ddi_devi_set_ppa() should only be called from gld_register()
6920  * and only for GLD v0 drivers
6921  */
6922 void
6923 i_ddi_devi_set_ppa(dev_info_t *dip, int ppa)
6924 {
6925 	(void) e_ddi_prop_update_int(DDI_DEV_T_NONE, dip, GLD_DRIVER_PPA, ppa);
6926 }
6927 
6928 
6929 /*
6930  * Private DDI Console bell functions.
6931  */
6932 void
6933 ddi_ring_console_bell(clock_t duration)
6934 {
6935 	if (ddi_console_bell_func != NULL)
6936 		(*ddi_console_bell_func)(duration);
6937 }
6938 
6939 void
6940 ddi_set_console_bell(void (*bellfunc)(clock_t duration))
6941 {
6942 	ddi_console_bell_func = bellfunc;
6943 }
6944 
6945 int
6946 ddi_dma_alloc_handle(dev_info_t *dip, ddi_dma_attr_t *attr,
6947 	int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
6948 {
6949 	int (*funcp)() = ddi_dma_allochdl;
6950 	ddi_dma_attr_t dma_attr;
6951 	struct bus_ops *bop;
6952 
6953 	if (attr == (ddi_dma_attr_t *)0)
6954 		return (DDI_DMA_BADATTR);
6955 
6956 	dma_attr = *attr;
6957 
6958 	bop = DEVI(dip)->devi_ops->devo_bus_ops;
6959 	if (bop && bop->bus_dma_allochdl)
6960 		funcp = bop->bus_dma_allochdl;
6961 
6962 	return ((*funcp)(dip, dip, &dma_attr, waitfp, arg, handlep));
6963 }
6964 
6965 void
6966 ddi_dma_free_handle(ddi_dma_handle_t *handlep)
6967 {
6968 	ddi_dma_handle_t h = *handlep;
6969 	(void) ddi_dma_freehdl(HD, HD, h);
6970 }
6971 
6972 static uintptr_t dma_mem_list_id = 0;
6973 
6974 
6975 int
6976 ddi_dma_mem_alloc(ddi_dma_handle_t handle, size_t length,
6977 	ddi_device_acc_attr_t *accattrp, uint_t flags,
6978 	int (*waitfp)(caddr_t), caddr_t arg, caddr_t *kaddrp,
6979 	size_t *real_length, ddi_acc_handle_t *handlep)
6980 {
6981 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6982 	dev_info_t *dip = hp->dmai_rdip;
6983 	ddi_acc_hdl_t *ap;
6984 	ddi_dma_attr_t *attrp = &hp->dmai_attr;
6985 	uint_t sleepflag, xfermodes;
6986 	int (*fp)(caddr_t);
6987 	int rval;
6988 
6989 	if (waitfp == DDI_DMA_SLEEP)
6990 		fp = (int (*)())KM_SLEEP;
6991 	else if (waitfp == DDI_DMA_DONTWAIT)
6992 		fp = (int (*)())KM_NOSLEEP;
6993 	else
6994 		fp = waitfp;
6995 	*handlep = impl_acc_hdl_alloc(fp, arg);
6996 	if (*handlep == NULL)
6997 		return (DDI_FAILURE);
6998 
6999 	/* check if the cache attributes are supported */
7000 	if (i_ddi_check_cache_attr(flags) == B_FALSE)
7001 		return (DDI_FAILURE);
7002 
7003 	/*
7004 	 * Transfer the meaningful bits to xfermodes.
7005 	 * Double-check if the 3rd party driver correctly sets the bits.
7006 	 * If not, set DDI_DMA_STREAMING to keep compatibility.
7007 	 */
7008 	xfermodes = flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING);
7009 	if (xfermodes == 0) {
7010 		xfermodes = DDI_DMA_STREAMING;
7011 	}
7012 
7013 	/*
7014 	 * initialize the common elements of data access handle
7015 	 */
7016 	ap = impl_acc_hdl_get(*handlep);
7017 	ap->ah_vers = VERS_ACCHDL;
7018 	ap->ah_dip = dip;
7019 	ap->ah_offset = 0;
7020 	ap->ah_len = 0;
7021 	ap->ah_xfermodes = flags;
7022 	ap->ah_acc = *accattrp;
7023 
7024 	sleepflag = ((waitfp == DDI_DMA_SLEEP) ? 1 : 0);
7025 	if (xfermodes == DDI_DMA_CONSISTENT) {
7026 		rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag,
7027 		    flags, accattrp, kaddrp, NULL, ap);
7028 		*real_length = length;
7029 	} else {
7030 		rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag,
7031 		    flags, accattrp, kaddrp, real_length, ap);
7032 	}
7033 	if (rval == DDI_SUCCESS) {
7034 		ap->ah_len = (off_t)(*real_length);
7035 		ap->ah_addr = *kaddrp;
7036 	} else {
7037 		impl_acc_hdl_free(*handlep);
7038 		*handlep = (ddi_acc_handle_t)NULL;
7039 		if (waitfp != DDI_DMA_SLEEP && waitfp != DDI_DMA_DONTWAIT) {
7040 			ddi_set_callback(waitfp, arg, &dma_mem_list_id);
7041 		}
7042 		rval = DDI_FAILURE;
7043 	}
7044 	return (rval);
7045 }
7046 
7047 void
7048 ddi_dma_mem_free(ddi_acc_handle_t *handlep)
7049 {
7050 	ddi_acc_hdl_t *ap;
7051 
7052 	ap = impl_acc_hdl_get(*handlep);
7053 	ASSERT(ap);
7054 
7055 	i_ddi_mem_free((caddr_t)ap->ah_addr, ap);
7056 
7057 	/*
7058 	 * free the handle
7059 	 */
7060 	impl_acc_hdl_free(*handlep);
7061 	*handlep = (ddi_acc_handle_t)NULL;
7062 
7063 	if (dma_mem_list_id != 0) {
7064 		ddi_run_callback(&dma_mem_list_id);
7065 	}
7066 }
7067 
7068 int
7069 ddi_dma_buf_bind_handle(ddi_dma_handle_t handle, struct buf *bp,
7070 	uint_t flags, int (*waitfp)(caddr_t), caddr_t arg,
7071 	ddi_dma_cookie_t *cookiep, uint_t *ccountp)
7072 {
7073 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7074 	dev_info_t *dip, *rdip;
7075 	struct ddi_dma_req dmareq;
7076 	int (*funcp)();
7077 	ddi_dma_cookie_t cookie;
7078 	uint_t count;
7079 
7080 	if (cookiep == NULL)
7081 		cookiep = &cookie;
7082 
7083 	if (ccountp == NULL)
7084 		ccountp = &count;
7085 
7086 	dmareq.dmar_flags = flags;
7087 	dmareq.dmar_fp = waitfp;
7088 	dmareq.dmar_arg = arg;
7089 	dmareq.dmar_object.dmao_size = (uint_t)bp->b_bcount;
7090 
7091 	if (bp->b_flags & B_PAGEIO) {
7092 		dmareq.dmar_object.dmao_type = DMA_OTYP_PAGES;
7093 		dmareq.dmar_object.dmao_obj.pp_obj.pp_pp = bp->b_pages;
7094 		dmareq.dmar_object.dmao_obj.pp_obj.pp_offset =
7095 		    (uint_t)(((uintptr_t)bp->b_un.b_addr) & MMU_PAGEOFFSET);
7096 	} else {
7097 		dmareq.dmar_object.dmao_obj.virt_obj.v_addr = bp->b_un.b_addr;
7098 		if (bp->b_flags & B_SHADOW) {
7099 			dmareq.dmar_object.dmao_obj.virt_obj.v_priv =
7100 			    bp->b_shadow;
7101 			dmareq.dmar_object.dmao_type = DMA_OTYP_BUFVADDR;
7102 		} else {
7103 			dmareq.dmar_object.dmao_type =
7104 			    (bp->b_flags & (B_PHYS | B_REMAPPED)) ?
7105 			    DMA_OTYP_BUFVADDR : DMA_OTYP_VADDR;
7106 			dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
7107 		}
7108 
7109 		/*
7110 		 * If the buffer has no proc pointer, or the proc
7111 		 * struct has the kernel address space, or the buffer has
7112 		 * been marked B_REMAPPED (meaning that it is now
7113 		 * mapped into the kernel's address space), then
7114 		 * the address space is kas (kernel address space).
7115 		 */
7116 		if ((bp->b_proc == NULL) || (bp->b_proc->p_as == &kas) ||
7117 		    (bp->b_flags & B_REMAPPED)) {
7118 			dmareq.dmar_object.dmao_obj.virt_obj.v_as = 0;
7119 		} else {
7120 			dmareq.dmar_object.dmao_obj.virt_obj.v_as =
7121 			    bp->b_proc->p_as;
7122 		}
7123 	}
7124 
7125 	dip = rdip = hp->dmai_rdip;
7126 	if (dip != ddi_root_node())
7127 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
7128 	funcp = DEVI(rdip)->devi_bus_dma_bindfunc;
7129 	return ((*funcp)(dip, rdip, handle, &dmareq, cookiep, ccountp));
7130 }
7131 
7132 int
7133 ddi_dma_addr_bind_handle(ddi_dma_handle_t handle, struct as *as,
7134 	caddr_t addr, size_t len, uint_t flags, int (*waitfp)(caddr_t),
7135 	caddr_t arg, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
7136 {
7137 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7138 	dev_info_t *dip, *rdip;
7139 	struct ddi_dma_req dmareq;
7140 	int (*funcp)();
7141 	ddi_dma_cookie_t cookie;
7142 	uint_t count;
7143 
7144 	if (len == (uint_t)0) {
7145 		return (DDI_DMA_NOMAPPING);
7146 	}
7147 
7148 	if (cookiep == NULL)
7149 		cookiep = &cookie;
7150 
7151 	if (ccountp == NULL)
7152 		ccountp = &count;
7153 
7154 	dmareq.dmar_flags = flags;
7155 	dmareq.dmar_fp = waitfp;
7156 	dmareq.dmar_arg = arg;
7157 	dmareq.dmar_object.dmao_size = len;
7158 	dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR;
7159 	dmareq.dmar_object.dmao_obj.virt_obj.v_as = as;
7160 	dmareq.dmar_object.dmao_obj.virt_obj.v_addr = addr;
7161 	dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
7162 
7163 	dip = rdip = hp->dmai_rdip;
7164 	if (dip != ddi_root_node())
7165 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
7166 	funcp = DEVI(rdip)->devi_bus_dma_bindfunc;
7167 	return ((*funcp)(dip, rdip, handle, &dmareq, cookiep, ccountp));
7168 }
7169 
7170 void
7171 ddi_dma_nextcookie(ddi_dma_handle_t handle, ddi_dma_cookie_t *cookiep)
7172 {
7173 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7174 	ddi_dma_cookie_t *cp;
7175 
7176 	if (hp->dmai_curcookie >= hp->dmai_ncookies) {
7177 		panic("ddi_dma_nextcookie() called too many times on handle %p",
7178 		    hp);
7179 	}
7180 
7181 	cp = hp->dmai_cookie;
7182 	ASSERT(cp);
7183 
7184 	cookiep->dmac_notused = cp->dmac_notused;
7185 	cookiep->dmac_type = cp->dmac_type;
7186 	cookiep->dmac_address = cp->dmac_address;
7187 	cookiep->dmac_size = cp->dmac_size;
7188 	hp->dmai_cookie++;
7189 	hp->dmai_curcookie++;
7190 }
7191 
7192 int
7193 ddi_dma_ncookies(ddi_dma_handle_t handle)
7194 {
7195 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7196 
7197 	return (hp->dmai_ncookies);
7198 }
7199 
7200 const ddi_dma_cookie_t *
7201 ddi_dma_cookie_iter(ddi_dma_handle_t handle, const ddi_dma_cookie_t *iter)
7202 {
7203 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7204 	const ddi_dma_cookie_t *base, *end;
7205 
7206 	if (hp->dmai_ncookies == 0) {
7207 		return (NULL);
7208 	}
7209 
7210 	base = hp->dmai_cookie - hp->dmai_curcookie;
7211 	end = base + hp->dmai_ncookies;
7212 	if (iter == NULL) {
7213 		return (base);
7214 	}
7215 
7216 	if ((uintptr_t)iter < (uintptr_t)base ||
7217 	    (uintptr_t)iter >= (uintptr_t)end) {
7218 		return (NULL);
7219 	}
7220 
7221 	iter++;
7222 	if (iter == end) {
7223 		return (NULL);
7224 	}
7225 
7226 	return (iter);
7227 }
7228 
7229 const ddi_dma_cookie_t *
7230 ddi_dma_cookie_get(ddi_dma_handle_t handle, uint_t index)
7231 {
7232 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7233 	const ddi_dma_cookie_t *base;
7234 
7235 	if (index >= hp->dmai_ncookies) {
7236 		return (NULL);
7237 	}
7238 
7239 	base = hp->dmai_cookie - hp->dmai_curcookie;
7240 	return (base + index);
7241 }
7242 
7243 const ddi_dma_cookie_t *
7244 ddi_dma_cookie_one(ddi_dma_handle_t handle)
7245 {
7246 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7247 	const ddi_dma_cookie_t *base;
7248 
7249 	if (hp->dmai_ncookies != 1) {
7250 		panic("ddi_dma_cookie_one() called with improper handle %p",
7251 		    hp);
7252 	}
7253 	ASSERT3P(hp->dmai_cookie, !=, NULL);
7254 
7255 	base = hp->dmai_cookie - hp->dmai_curcookie;
7256 	return (base);
7257 }
7258 
7259 int
7260 ddi_dma_numwin(ddi_dma_handle_t handle, uint_t *nwinp)
7261 {
7262 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7263 	if ((hp->dmai_rflags & DDI_DMA_PARTIAL) == 0) {
7264 		return (DDI_FAILURE);
7265 	} else {
7266 		*nwinp = hp->dmai_nwin;
7267 		return (DDI_SUCCESS);
7268 	}
7269 }
7270 
7271 int
7272 ddi_dma_getwin(ddi_dma_handle_t h, uint_t win, off_t *offp,
7273 	size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
7274 {
7275 	int (*funcp)() = ddi_dma_win;
7276 	struct bus_ops *bop;
7277 	ddi_dma_cookie_t cookie;
7278 	uint_t count;
7279 
7280 	bop = DEVI(HD)->devi_ops->devo_bus_ops;
7281 	if (bop && bop->bus_dma_win)
7282 		funcp = bop->bus_dma_win;
7283 
7284 	if (cookiep == NULL)
7285 		cookiep = &cookie;
7286 
7287 	if (ccountp == NULL)
7288 		ccountp = &count;
7289 
7290 	return ((*funcp)(HD, HD, h, win, offp, lenp, cookiep, ccountp));
7291 }
7292 
7293 int
7294 ddi_dma_set_sbus64(ddi_dma_handle_t h, ulong_t burstsizes)
7295 {
7296 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_SET_SBUS64, 0,
7297 	    &burstsizes, 0, 0));
7298 }
7299 
7300 int
7301 i_ddi_dma_fault_check(ddi_dma_impl_t *hp)
7302 {
7303 	return (hp->dmai_fault);
7304 }
7305 
7306 int
7307 ddi_check_dma_handle(ddi_dma_handle_t handle)
7308 {
7309 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7310 	int (*check)(ddi_dma_impl_t *);
7311 
7312 	if ((check = hp->dmai_fault_check) == NULL)
7313 		check = i_ddi_dma_fault_check;
7314 
7315 	return (((*check)(hp) == DDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
7316 }
7317 
7318 void
7319 i_ddi_dma_set_fault(ddi_dma_handle_t handle)
7320 {
7321 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7322 	void (*notify)(ddi_dma_impl_t *);
7323 
7324 	if (!hp->dmai_fault) {
7325 		hp->dmai_fault = 1;
7326 		if ((notify = hp->dmai_fault_notify) != NULL)
7327 			(*notify)(hp);
7328 	}
7329 }
7330 
7331 void
7332 i_ddi_dma_clr_fault(ddi_dma_handle_t handle)
7333 {
7334 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7335 	void (*notify)(ddi_dma_impl_t *);
7336 
7337 	if (hp->dmai_fault) {
7338 		hp->dmai_fault = 0;
7339 		if ((notify = hp->dmai_fault_notify) != NULL)
7340 			(*notify)(hp);
7341 	}
7342 }
7343 
7344 /*
7345  * register mapping routines.
7346  */
7347 int
7348 ddi_regs_map_setup(dev_info_t *dip, uint_t rnumber, caddr_t *addrp,
7349 	offset_t offset, offset_t len, ddi_device_acc_attr_t *accattrp,
7350 	ddi_acc_handle_t *handle)
7351 {
7352 	ddi_map_req_t mr;
7353 	ddi_acc_hdl_t *hp;
7354 	int result;
7355 
7356 	/*
7357 	 * Allocate and initialize the common elements of data access handle.
7358 	 */
7359 	*handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
7360 	hp = impl_acc_hdl_get(*handle);
7361 	hp->ah_vers = VERS_ACCHDL;
7362 	hp->ah_dip = dip;
7363 	hp->ah_rnumber = rnumber;
7364 	hp->ah_offset = offset;
7365 	hp->ah_len = len;
7366 	hp->ah_acc = *accattrp;
7367 
7368 	/*
7369 	 * Set up the mapping request and call to parent.
7370 	 */
7371 	mr.map_op = DDI_MO_MAP_LOCKED;
7372 	mr.map_type = DDI_MT_RNUMBER;
7373 	mr.map_obj.rnumber = rnumber;
7374 	mr.map_prot = PROT_READ | PROT_WRITE;
7375 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
7376 	mr.map_handlep = hp;
7377 	mr.map_vers = DDI_MAP_VERSION;
7378 	result = ddi_map(dip, &mr, offset, len, addrp);
7379 
7380 	/*
7381 	 * check for end result
7382 	 */
7383 	if (result != DDI_SUCCESS) {
7384 		impl_acc_hdl_free(*handle);
7385 		*handle = (ddi_acc_handle_t)NULL;
7386 	} else {
7387 		hp->ah_addr = *addrp;
7388 	}
7389 
7390 	return (result);
7391 }
7392 
7393 void
7394 ddi_regs_map_free(ddi_acc_handle_t *handlep)
7395 {
7396 	ddi_map_req_t mr;
7397 	ddi_acc_hdl_t *hp;
7398 
7399 	hp = impl_acc_hdl_get(*handlep);
7400 	ASSERT(hp);
7401 
7402 	mr.map_op = DDI_MO_UNMAP;
7403 	mr.map_type = DDI_MT_RNUMBER;
7404 	mr.map_obj.rnumber = hp->ah_rnumber;
7405 	mr.map_prot = PROT_READ | PROT_WRITE;
7406 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
7407 	mr.map_handlep = hp;
7408 	mr.map_vers = DDI_MAP_VERSION;
7409 
7410 	/*
7411 	 * Call my parent to unmap my regs.
7412 	 */
7413 	(void) ddi_map(hp->ah_dip, &mr, hp->ah_offset,
7414 	    hp->ah_len, &hp->ah_addr);
7415 	/*
7416 	 * free the handle
7417 	 */
7418 	impl_acc_hdl_free(*handlep);
7419 	*handlep = (ddi_acc_handle_t)NULL;
7420 }
7421 
7422 int
7423 ddi_device_zero(ddi_acc_handle_t handle, caddr_t dev_addr, size_t bytecount,
7424 	ssize_t dev_advcnt, uint_t dev_datasz)
7425 {
7426 	uint8_t *b;
7427 	uint16_t *w;
7428 	uint32_t *l;
7429 	uint64_t *ll;
7430 
7431 	/* check for total byte count is multiple of data transfer size */
7432 	if (bytecount != ((bytecount / dev_datasz) * dev_datasz))
7433 		return (DDI_FAILURE);
7434 
7435 	switch (dev_datasz) {
7436 	case DDI_DATA_SZ01_ACC:
7437 		for (b = (uint8_t *)dev_addr;
7438 		    bytecount != 0; bytecount -= 1, b += dev_advcnt)
7439 			ddi_put8(handle, b, 0);
7440 		break;
7441 	case DDI_DATA_SZ02_ACC:
7442 		for (w = (uint16_t *)dev_addr;
7443 		    bytecount != 0; bytecount -= 2, w += dev_advcnt)
7444 			ddi_put16(handle, w, 0);
7445 		break;
7446 	case DDI_DATA_SZ04_ACC:
7447 		for (l = (uint32_t *)dev_addr;
7448 		    bytecount != 0; bytecount -= 4, l += dev_advcnt)
7449 			ddi_put32(handle, l, 0);
7450 		break;
7451 	case DDI_DATA_SZ08_ACC:
7452 		for (ll = (uint64_t *)dev_addr;
7453 		    bytecount != 0; bytecount -= 8, ll += dev_advcnt)
7454 			ddi_put64(handle, ll, 0x0ll);
7455 		break;
7456 	default:
7457 		return (DDI_FAILURE);
7458 	}
7459 	return (DDI_SUCCESS);
7460 }
7461 
7462 int
7463 ddi_device_copy(
7464 	ddi_acc_handle_t src_handle, caddr_t src_addr, ssize_t src_advcnt,
7465 	ddi_acc_handle_t dest_handle, caddr_t dest_addr, ssize_t dest_advcnt,
7466 	size_t bytecount, uint_t dev_datasz)
7467 {
7468 	uint8_t *b_src, *b_dst;
7469 	uint16_t *w_src, *w_dst;
7470 	uint32_t *l_src, *l_dst;
7471 	uint64_t *ll_src, *ll_dst;
7472 
7473 	/* check for total byte count is multiple of data transfer size */
7474 	if (bytecount != ((bytecount / dev_datasz) * dev_datasz))
7475 		return (DDI_FAILURE);
7476 
7477 	switch (dev_datasz) {
7478 	case DDI_DATA_SZ01_ACC:
7479 		b_src = (uint8_t *)src_addr;
7480 		b_dst = (uint8_t *)dest_addr;
7481 
7482 		for (; bytecount != 0; bytecount -= 1) {
7483 			ddi_put8(dest_handle, b_dst,
7484 			    ddi_get8(src_handle, b_src));
7485 			b_dst += dest_advcnt;
7486 			b_src += src_advcnt;
7487 		}
7488 		break;
7489 	case DDI_DATA_SZ02_ACC:
7490 		w_src = (uint16_t *)src_addr;
7491 		w_dst = (uint16_t *)dest_addr;
7492 
7493 		for (; bytecount != 0; bytecount -= 2) {
7494 			ddi_put16(dest_handle, w_dst,
7495 			    ddi_get16(src_handle, w_src));
7496 			w_dst += dest_advcnt;
7497 			w_src += src_advcnt;
7498 		}
7499 		break;
7500 	case DDI_DATA_SZ04_ACC:
7501 		l_src = (uint32_t *)src_addr;
7502 		l_dst = (uint32_t *)dest_addr;
7503 
7504 		for (; bytecount != 0; bytecount -= 4) {
7505 			ddi_put32(dest_handle, l_dst,
7506 			    ddi_get32(src_handle, l_src));
7507 			l_dst += dest_advcnt;
7508 			l_src += src_advcnt;
7509 		}
7510 		break;
7511 	case DDI_DATA_SZ08_ACC:
7512 		ll_src = (uint64_t *)src_addr;
7513 		ll_dst = (uint64_t *)dest_addr;
7514 
7515 		for (; bytecount != 0; bytecount -= 8) {
7516 			ddi_put64(dest_handle, ll_dst,
7517 			    ddi_get64(src_handle, ll_src));
7518 			ll_dst += dest_advcnt;
7519 			ll_src += src_advcnt;
7520 		}
7521 		break;
7522 	default:
7523 		return (DDI_FAILURE);
7524 	}
7525 	return (DDI_SUCCESS);
7526 }
7527 
7528 #define	swap16(value)  \
7529 	((((value) & 0xff) << 8) | ((value) >> 8))
7530 
7531 #define	swap32(value)	\
7532 	(((uint32_t)swap16((uint16_t)((value) & 0xffff)) << 16) | \
7533 	(uint32_t)swap16((uint16_t)((value) >> 16)))
7534 
7535 #define	swap64(value)	\
7536 	(((uint64_t)swap32((uint32_t)((value) & 0xffffffff)) \
7537 	    << 32) | \
7538 	(uint64_t)swap32((uint32_t)((value) >> 32)))
7539 
7540 uint16_t
7541 ddi_swap16(uint16_t value)
7542 {
7543 	return (swap16(value));
7544 }
7545 
7546 uint32_t
7547 ddi_swap32(uint32_t value)
7548 {
7549 	return (swap32(value));
7550 }
7551 
7552 uint64_t
7553 ddi_swap64(uint64_t value)
7554 {
7555 	return (swap64(value));
7556 }
7557 
7558 /*
7559  * Convert a binding name to a driver name.
7560  * A binding name is the name used to determine the driver for a
7561  * device - it may be either an alias for the driver or the name
7562  * of the driver itself.
7563  */
7564 char *
7565 i_binding_to_drv_name(char *bname)
7566 {
7567 	major_t major_no;
7568 
7569 	ASSERT(bname != NULL);
7570 
7571 	if ((major_no = ddi_name_to_major(bname)) == -1)
7572 		return (NULL);
7573 	return (ddi_major_to_name(major_no));
7574 }
7575 
7576 /*
7577  * Search for minor name that has specified dev_t and spec_type.
7578  * If spec_type is zero then any dev_t match works.  Since we
7579  * are returning a pointer to the minor name string, we require the
7580  * caller to do the locking.
7581  */
7582 char *
7583 i_ddi_devtspectype_to_minorname(dev_info_t *dip, dev_t dev, int spec_type)
7584 {
7585 	struct ddi_minor_data	*dmdp;
7586 
7587 	/*
7588 	 * The did layered driver currently intentionally returns a
7589 	 * devinfo ptr for an underlying sd instance based on a did
7590 	 * dev_t. In this case it is not an error.
7591 	 *
7592 	 * The did layered driver is associated with Sun Cluster.
7593 	 */
7594 	ASSERT((ddi_driver_major(dip) == getmajor(dev)) ||
7595 	    (strcmp(ddi_major_to_name(getmajor(dev)), "did") == 0));
7596 
7597 	ASSERT(DEVI_BUSY_OWNED(dip));
7598 	for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) {
7599 		if (((dmdp->type == DDM_MINOR) ||
7600 		    (dmdp->type == DDM_INTERNAL_PATH) ||
7601 		    (dmdp->type == DDM_DEFAULT)) &&
7602 		    (dmdp->ddm_dev == dev) &&
7603 		    ((((spec_type & (S_IFCHR|S_IFBLK))) == 0) ||
7604 		    (dmdp->ddm_spec_type == spec_type)))
7605 			return (dmdp->ddm_name);
7606 	}
7607 
7608 	return (NULL);
7609 }
7610 
7611 /*
7612  * Find the devt and spectype of the specified minor_name.
7613  * Return DDI_FAILURE if minor_name not found. Since we are
7614  * returning everything via arguments we can do the locking.
7615  */
7616 int
7617 i_ddi_minorname_to_devtspectype(dev_info_t *dip, char *minor_name,
7618 	dev_t *devtp, int *spectypep)
7619 {
7620 	int			circ;
7621 	struct ddi_minor_data	*dmdp;
7622 
7623 	/* deal with clone minor nodes */
7624 	if (dip == clone_dip) {
7625 		major_t	major;
7626 		/*
7627 		 * Make sure minor_name is a STREAMS driver.
7628 		 * We load the driver but don't attach to any instances.
7629 		 */
7630 
7631 		major = ddi_name_to_major(minor_name);
7632 		if (major == DDI_MAJOR_T_NONE)
7633 			return (DDI_FAILURE);
7634 
7635 		if (ddi_hold_driver(major) == NULL)
7636 			return (DDI_FAILURE);
7637 
7638 		if (STREAMSTAB(major) == NULL) {
7639 			ddi_rele_driver(major);
7640 			return (DDI_FAILURE);
7641 		}
7642 		ddi_rele_driver(major);
7643 
7644 		if (devtp)
7645 			*devtp = makedevice(clone_major, (minor_t)major);
7646 
7647 		if (spectypep)
7648 			*spectypep = S_IFCHR;
7649 
7650 		return (DDI_SUCCESS);
7651 	}
7652 
7653 	ndi_devi_enter(dip, &circ);
7654 	for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) {
7655 		if (((dmdp->type != DDM_MINOR) &&
7656 		    (dmdp->type != DDM_INTERNAL_PATH) &&
7657 		    (dmdp->type != DDM_DEFAULT)) ||
7658 		    strcmp(minor_name, dmdp->ddm_name))
7659 			continue;
7660 
7661 		if (devtp)
7662 			*devtp = dmdp->ddm_dev;
7663 
7664 		if (spectypep)
7665 			*spectypep = dmdp->ddm_spec_type;
7666 
7667 		ndi_devi_exit(dip, circ);
7668 		return (DDI_SUCCESS);
7669 	}
7670 	ndi_devi_exit(dip, circ);
7671 
7672 	return (DDI_FAILURE);
7673 }
7674 
7675 static kmutex_t devid_gen_mutex;
7676 static short	devid_gen_number;
7677 
7678 #ifdef DEBUG
7679 
7680 static int	devid_register_corrupt = 0;
7681 static int	devid_register_corrupt_major = 0;
7682 static int	devid_register_corrupt_hint = 0;
7683 static int	devid_register_corrupt_hint_major = 0;
7684 
7685 static int devid_lyr_debug = 0;
7686 
7687 #define	DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs)		\
7688 	if (devid_lyr_debug)					\
7689 		ddi_debug_devid_devts(msg, ndevs, devs)
7690 
7691 #else
7692 
7693 #define	DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs)
7694 
7695 #endif /* DEBUG */
7696 
7697 
7698 #ifdef	DEBUG
7699 
7700 static void
7701 ddi_debug_devid_devts(char *msg, int ndevs, dev_t *devs)
7702 {
7703 	int i;
7704 
7705 	cmn_err(CE_CONT, "%s:\n", msg);
7706 	for (i = 0; i < ndevs; i++) {
7707 		cmn_err(CE_CONT, "    0x%lx\n", devs[i]);
7708 	}
7709 }
7710 
7711 static void
7712 ddi_debug_devid_paths(char *msg, int npaths, char **paths)
7713 {
7714 	int i;
7715 
7716 	cmn_err(CE_CONT, "%s:\n", msg);
7717 	for (i = 0; i < npaths; i++) {
7718 		cmn_err(CE_CONT, "    %s\n", paths[i]);
7719 	}
7720 }
7721 
7722 static void
7723 ddi_debug_devid_devts_per_path(char *path, int ndevs, dev_t *devs)
7724 {
7725 	int i;
7726 
7727 	cmn_err(CE_CONT, "dev_ts per path %s\n", path);
7728 	for (i = 0; i < ndevs; i++) {
7729 		cmn_err(CE_CONT, "    0x%lx\n", devs[i]);
7730 	}
7731 }
7732 
7733 #endif	/* DEBUG */
7734 
7735 /*
7736  * Register device id into DDI framework.
7737  * Must be called when the driver is bound.
7738  */
7739 static int
7740 i_ddi_devid_register(dev_info_t *dip, ddi_devid_t devid)
7741 {
7742 	impl_devid_t	*i_devid = (impl_devid_t *)devid;
7743 	size_t		driver_len;
7744 	const char	*driver_name;
7745 	char		*devid_str;
7746 	major_t		major;
7747 
7748 	if ((dip == NULL) ||
7749 	    ((major = ddi_driver_major(dip)) == DDI_MAJOR_T_NONE))
7750 		return (DDI_FAILURE);
7751 
7752 	/* verify that the devid is valid */
7753 	if (ddi_devid_valid(devid) != DDI_SUCCESS)
7754 		return (DDI_FAILURE);
7755 
7756 	/* Updating driver name hint in devid */
7757 	driver_name = ddi_driver_name(dip);
7758 	driver_len = strlen(driver_name);
7759 	if (driver_len > DEVID_HINT_SIZE) {
7760 		/* Pick up last four characters of driver name */
7761 		driver_name += driver_len - DEVID_HINT_SIZE;
7762 		driver_len = DEVID_HINT_SIZE;
7763 	}
7764 	bzero(i_devid->did_driver, DEVID_HINT_SIZE);
7765 	bcopy(driver_name, i_devid->did_driver, driver_len);
7766 
7767 #ifdef DEBUG
7768 	/* Corrupt the devid for testing. */
7769 	if (devid_register_corrupt)
7770 		i_devid->did_id[0] += devid_register_corrupt;
7771 	if (devid_register_corrupt_major &&
7772 	    (major == devid_register_corrupt_major))
7773 		i_devid->did_id[0] += 1;
7774 	if (devid_register_corrupt_hint)
7775 		i_devid->did_driver[0] += devid_register_corrupt_hint;
7776 	if (devid_register_corrupt_hint_major &&
7777 	    (major == devid_register_corrupt_hint_major))
7778 		i_devid->did_driver[0] += 1;
7779 #endif /* DEBUG */
7780 
7781 	/* encode the devid as a string */
7782 	if ((devid_str = ddi_devid_str_encode(devid, NULL)) == NULL)
7783 		return (DDI_FAILURE);
7784 
7785 	/* add string as a string property */
7786 	if (ndi_prop_update_string(DDI_DEV_T_NONE, dip,
7787 	    DEVID_PROP_NAME, devid_str) != DDI_SUCCESS) {
7788 		cmn_err(CE_WARN, "%s%d: devid property update failed",
7789 		    ddi_driver_name(dip), ddi_get_instance(dip));
7790 		ddi_devid_str_free(devid_str);
7791 		return (DDI_FAILURE);
7792 	}
7793 
7794 	/* keep pointer to devid string for interrupt context fma code */
7795 	if (DEVI(dip)->devi_devid_str)
7796 		ddi_devid_str_free(DEVI(dip)->devi_devid_str);
7797 	DEVI(dip)->devi_devid_str = devid_str;
7798 	return (DDI_SUCCESS);
7799 }
7800 
7801 int
7802 ddi_devid_register(dev_info_t *dip, ddi_devid_t devid)
7803 {
7804 	int rval;
7805 
7806 	rval = i_ddi_devid_register(dip, devid);
7807 	if (rval == DDI_SUCCESS) {
7808 		/*
7809 		 * Register devid in devid-to-path cache
7810 		 */
7811 		if (e_devid_cache_register(dip, devid) == DDI_SUCCESS) {
7812 			mutex_enter(&DEVI(dip)->devi_lock);
7813 			DEVI(dip)->devi_flags |= DEVI_CACHED_DEVID;
7814 			mutex_exit(&DEVI(dip)->devi_lock);
7815 		} else if (ddi_get_name_addr(dip)) {
7816 			/*
7817 			 * We only expect cache_register DDI_FAILURE when we
7818 			 * can't form the full path because of NULL devi_addr.
7819 			 */
7820 			cmn_err(CE_WARN, "%s%d: failed to cache devid",
7821 			    ddi_driver_name(dip), ddi_get_instance(dip));
7822 		}
7823 	} else {
7824 		cmn_err(CE_WARN, "%s%d: failed to register devid",
7825 		    ddi_driver_name(dip), ddi_get_instance(dip));
7826 	}
7827 	return (rval);
7828 }
7829 
7830 /*
7831  * Remove (unregister) device id from DDI framework.
7832  * Must be called when device is detached.
7833  */
7834 static void
7835 i_ddi_devid_unregister(dev_info_t *dip)
7836 {
7837 	if (DEVI(dip)->devi_devid_str) {
7838 		ddi_devid_str_free(DEVI(dip)->devi_devid_str);
7839 		DEVI(dip)->devi_devid_str = NULL;
7840 	}
7841 
7842 	/* remove the devid property */
7843 	(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, DEVID_PROP_NAME);
7844 }
7845 
7846 void
7847 ddi_devid_unregister(dev_info_t *dip)
7848 {
7849 	mutex_enter(&DEVI(dip)->devi_lock);
7850 	DEVI(dip)->devi_flags &= ~DEVI_CACHED_DEVID;
7851 	mutex_exit(&DEVI(dip)->devi_lock);
7852 	e_devid_cache_unregister(dip);
7853 	i_ddi_devid_unregister(dip);
7854 }
7855 
7856 /*
7857  * Allocate and initialize a device id.
7858  */
7859 int
7860 ddi_devid_init(
7861 	dev_info_t	*dip,
7862 	ushort_t	devid_type,
7863 	ushort_t	nbytes,
7864 	void		*id,
7865 	ddi_devid_t	*ret_devid)
7866 {
7867 	impl_devid_t	*i_devid;
7868 	int		sz = sizeof (*i_devid) + nbytes - sizeof (char);
7869 	int		driver_len;
7870 	const char	*driver_name;
7871 
7872 	switch (devid_type) {
7873 	case DEVID_SCSI3_WWN:
7874 		/*FALLTHRU*/
7875 	case DEVID_SCSI_SERIAL:
7876 		/*FALLTHRU*/
7877 	case DEVID_ATA_SERIAL:
7878 		/*FALLTHRU*/
7879 	case DEVID_ENCAP:
7880 		if (nbytes == 0)
7881 			return (DDI_FAILURE);
7882 		if (id == NULL)
7883 			return (DDI_FAILURE);
7884 		break;
7885 	case DEVID_FAB:
7886 		if (nbytes != 0)
7887 			return (DDI_FAILURE);
7888 		if (id != NULL)
7889 			return (DDI_FAILURE);
7890 		nbytes = sizeof (int) +
7891 		    sizeof (struct timeval32) + sizeof (short);
7892 		sz += nbytes;
7893 		break;
7894 	default:
7895 		return (DDI_FAILURE);
7896 	}
7897 
7898 	if ((i_devid = kmem_zalloc(sz, KM_SLEEP)) == NULL)
7899 		return (DDI_FAILURE);
7900 
7901 	i_devid->did_magic_hi = DEVID_MAGIC_MSB;
7902 	i_devid->did_magic_lo = DEVID_MAGIC_LSB;
7903 	i_devid->did_rev_hi = DEVID_REV_MSB;
7904 	i_devid->did_rev_lo = DEVID_REV_LSB;
7905 	DEVID_FORMTYPE(i_devid, devid_type);
7906 	DEVID_FORMLEN(i_devid, nbytes);
7907 
7908 	/* Fill in driver name hint */
7909 	driver_name = ddi_driver_name(dip);
7910 	driver_len = strlen(driver_name);
7911 	if (driver_len > DEVID_HINT_SIZE) {
7912 		/* Pick up last four characters of driver name */
7913 		driver_name += driver_len - DEVID_HINT_SIZE;
7914 		driver_len = DEVID_HINT_SIZE;
7915 	}
7916 
7917 	bcopy(driver_name, i_devid->did_driver, driver_len);
7918 
7919 	/* Fill in id field */
7920 	if (devid_type == DEVID_FAB) {
7921 		char		*cp;
7922 		uint32_t	hostid;
7923 		struct timeval32 timestamp32;
7924 		int		i;
7925 		int		*ip;
7926 		short		gen;
7927 
7928 		/* increase the generation number */
7929 		mutex_enter(&devid_gen_mutex);
7930 		gen = devid_gen_number++;
7931 		mutex_exit(&devid_gen_mutex);
7932 
7933 		cp = i_devid->did_id;
7934 
7935 		/* Fill in host id (big-endian byte ordering) */
7936 		hostid = zone_get_hostid(NULL);
7937 		*cp++ = hibyte(hiword(hostid));
7938 		*cp++ = lobyte(hiword(hostid));
7939 		*cp++ = hibyte(loword(hostid));
7940 		*cp++ = lobyte(loword(hostid));
7941 
7942 		/*
7943 		 * Fill in timestamp (big-endian byte ordering)
7944 		 *
7945 		 * (Note that the format may have to be changed
7946 		 * before 2038 comes around, though it's arguably
7947 		 * unique enough as it is..)
7948 		 */
7949 		uniqtime32(&timestamp32);
7950 		ip = (int *)&timestamp32;
7951 		for (i = 0;
7952 		    i < sizeof (timestamp32) / sizeof (int); i++, ip++) {
7953 			int	val;
7954 			val = *ip;
7955 			*cp++ = hibyte(hiword(val));
7956 			*cp++ = lobyte(hiword(val));
7957 			*cp++ = hibyte(loword(val));
7958 			*cp++ = lobyte(loword(val));
7959 		}
7960 
7961 		/* fill in the generation number */
7962 		*cp++ = hibyte(gen);
7963 		*cp++ = lobyte(gen);
7964 	} else
7965 		bcopy(id, i_devid->did_id, nbytes);
7966 
7967 	/* return device id */
7968 	*ret_devid = (ddi_devid_t)i_devid;
7969 	return (DDI_SUCCESS);
7970 }
7971 
7972 int
7973 ddi_devid_get(dev_info_t *dip, ddi_devid_t *ret_devid)
7974 {
7975 	return (i_ddi_devi_get_devid(DDI_DEV_T_ANY, dip, ret_devid));
7976 }
7977 
7978 int
7979 i_ddi_devi_get_devid(dev_t dev, dev_info_t *dip, ddi_devid_t *ret_devid)
7980 {
7981 	char		*devidstr;
7982 
7983 	ASSERT(dev != DDI_DEV_T_NONE);
7984 
7985 	/* look up the property, devt specific first */
7986 	if (ddi_prop_lookup_string(dev, dip, DDI_PROP_DONTPASS,
7987 	    DEVID_PROP_NAME, &devidstr) != DDI_PROP_SUCCESS) {
7988 		if ((dev == DDI_DEV_T_ANY) ||
7989 		    (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
7990 		    DDI_PROP_DONTPASS, DEVID_PROP_NAME, &devidstr) !=
7991 		    DDI_PROP_SUCCESS)) {
7992 			return (DDI_FAILURE);
7993 		}
7994 	}
7995 
7996 	/* convert to binary form */
7997 	if (ddi_devid_str_decode(devidstr, ret_devid, NULL) == -1) {
7998 		ddi_prop_free(devidstr);
7999 		return (DDI_FAILURE);
8000 	}
8001 	ddi_prop_free(devidstr);
8002 	return (DDI_SUCCESS);
8003 }
8004 
8005 /*
8006  * Return a copy of the device id for dev_t
8007  */
8008 int
8009 ddi_lyr_get_devid(dev_t dev, ddi_devid_t *ret_devid)
8010 {
8011 	dev_info_t	*dip;
8012 	int		rval;
8013 
8014 	/* get the dip */
8015 	if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
8016 		return (DDI_FAILURE);
8017 
8018 	rval = i_ddi_devi_get_devid(dev, dip, ret_devid);
8019 
8020 	ddi_release_devi(dip);		/* e_ddi_hold_devi_by_dev() */
8021 	return (rval);
8022 }
8023 
8024 /*
8025  * Return a copy of the minor name for dev_t and spec_type
8026  */
8027 int
8028 ddi_lyr_get_minor_name(dev_t dev, int spec_type, char **minor_name)
8029 {
8030 	char		*buf;
8031 	int		circ;
8032 	dev_info_t	*dip;
8033 	char		*nm;
8034 	int		rval;
8035 
8036 	if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) {
8037 		*minor_name = NULL;
8038 		return (DDI_FAILURE);
8039 	}
8040 
8041 	/* Find the minor name and copy into max size buf */
8042 	buf = kmem_alloc(MAXNAMELEN, KM_SLEEP);
8043 	ndi_devi_enter(dip, &circ);
8044 	nm = i_ddi_devtspectype_to_minorname(dip, dev, spec_type);
8045 	if (nm)
8046 		(void) strcpy(buf, nm);
8047 	ndi_devi_exit(dip, circ);
8048 	ddi_release_devi(dip);	/* e_ddi_hold_devi_by_dev() */
8049 
8050 	if (nm) {
8051 		/* duplicate into min size buf for return result */
8052 		*minor_name = i_ddi_strdup(buf, KM_SLEEP);
8053 		rval = DDI_SUCCESS;
8054 	} else {
8055 		*minor_name = NULL;
8056 		rval = DDI_FAILURE;
8057 	}
8058 
8059 	/* free max size buf and return */
8060 	kmem_free(buf, MAXNAMELEN);
8061 	return (rval);
8062 }
8063 
8064 int
8065 ddi_lyr_devid_to_devlist(
8066 	ddi_devid_t	devid,
8067 	char		*minor_name,
8068 	int		*retndevs,
8069 	dev_t		**retdevs)
8070 {
8071 	ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS);
8072 
8073 	if (e_devid_cache_to_devt_list(devid, minor_name,
8074 	    retndevs, retdevs) == DDI_SUCCESS) {
8075 		ASSERT(*retndevs > 0);
8076 		DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist",
8077 		    *retndevs, *retdevs);
8078 		return (DDI_SUCCESS);
8079 	}
8080 
8081 	if (e_ddi_devid_discovery(devid) == DDI_FAILURE) {
8082 		return (DDI_FAILURE);
8083 	}
8084 
8085 	if (e_devid_cache_to_devt_list(devid, minor_name,
8086 	    retndevs, retdevs) == DDI_SUCCESS) {
8087 		ASSERT(*retndevs > 0);
8088 		DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist",
8089 		    *retndevs, *retdevs);
8090 		return (DDI_SUCCESS);
8091 	}
8092 
8093 	return (DDI_FAILURE);
8094 }
8095 
8096 void
8097 ddi_lyr_free_devlist(dev_t *devlist, int ndevs)
8098 {
8099 	kmem_free(devlist, sizeof (dev_t) * ndevs);
8100 }
8101 
8102 /*
8103  * Note: This will need to be fixed if we ever allow processes to
8104  * have more than one data model per exec.
8105  */
8106 model_t
8107 ddi_mmap_get_model(void)
8108 {
8109 	return (get_udatamodel());
8110 }
8111 
8112 model_t
8113 ddi_model_convert_from(model_t model)
8114 {
8115 	return ((model & DDI_MODEL_MASK) & ~DDI_MODEL_NATIVE);
8116 }
8117 
8118 /*
8119  * ddi interfaces managing storage and retrieval of eventcookies.
8120  */
8121 
8122 /*
8123  * Invoke bus nexus driver's implementation of the
8124  * (*bus_remove_eventcall)() interface to remove a registered
8125  * callback handler for "event".
8126  */
8127 int
8128 ddi_remove_event_handler(ddi_callback_id_t id)
8129 {
8130 	ndi_event_callbacks_t *cb = (ndi_event_callbacks_t *)id;
8131 	dev_info_t *ddip;
8132 
8133 	ASSERT(cb);
8134 	if (!cb) {
8135 		return (DDI_FAILURE);
8136 	}
8137 
8138 	ddip = NDI_EVENT_DDIP(cb->ndi_evtcb_cookie);
8139 	return (ndi_busop_remove_eventcall(ddip, id));
8140 }
8141 
8142 /*
8143  * Invoke bus nexus driver's implementation of the
8144  * (*bus_add_eventcall)() interface to register a callback handler
8145  * for "event".
8146  */
8147 int
8148 ddi_add_event_handler(dev_info_t *dip, ddi_eventcookie_t event,
8149     void (*handler)(dev_info_t *, ddi_eventcookie_t, void *, void *),
8150     void *arg, ddi_callback_id_t *id)
8151 {
8152 	return (ndi_busop_add_eventcall(dip, dip, event, handler, arg, id));
8153 }
8154 
8155 
8156 /*
8157  * Return a handle for event "name" by calling up the device tree
8158  * hierarchy via  (*bus_get_eventcookie)() interface until claimed
8159  * by a bus nexus or top of dev_info tree is reached.
8160  */
8161 int
8162 ddi_get_eventcookie(dev_info_t *dip, char *name,
8163     ddi_eventcookie_t *event_cookiep)
8164 {
8165 	return (ndi_busop_get_eventcookie(dip, dip,
8166 	    name, event_cookiep));
8167 }
8168 
8169 /*
8170  * This procedure is provided as the general callback function when
8171  * umem_lockmemory calls as_add_callback for long term memory locking.
8172  * When as_unmap, as_setprot, or as_free encounter segments which have
8173  * locked memory, this callback will be invoked.
8174  */
8175 void
8176 umem_lock_undo(struct as *as, void *arg, uint_t event)
8177 {
8178 	_NOTE(ARGUNUSED(as, event))
8179 	struct ddi_umem_cookie *cp = (struct ddi_umem_cookie *)arg;
8180 
8181 	/*
8182 	 * Call the cleanup function.  Decrement the cookie reference
8183 	 * count, if it goes to zero, return the memory for the cookie.
8184 	 * The i_ddi_umem_unlock for this cookie may or may not have been
8185 	 * called already.  It is the responsibility of the caller of
8186 	 * umem_lockmemory to handle the case of the cleanup routine
8187 	 * being called after a ddi_umem_unlock for the cookie
8188 	 * was called.
8189 	 */
8190 
8191 	(*cp->callbacks.cbo_umem_lock_cleanup)((ddi_umem_cookie_t)cp);
8192 
8193 	/* remove the cookie if reference goes to zero */
8194 	if (atomic_dec_ulong_nv((ulong_t *)(&(cp->cook_refcnt))) == 0) {
8195 		kmem_free(cp, sizeof (struct ddi_umem_cookie));
8196 	}
8197 }
8198 
8199 /*
8200  * The following two Consolidation Private routines provide generic
8201  * interfaces to increase/decrease the amount of device-locked memory.
8202  *
8203  * To keep project_rele and project_hold consistent, i_ddi_decr_locked_memory()
8204  * must be called every time i_ddi_incr_locked_memory() is called.
8205  */
8206 int
8207 /* ARGSUSED */
8208 i_ddi_incr_locked_memory(proc_t *procp, rctl_qty_t inc)
8209 {
8210 	ASSERT(procp != NULL);
8211 	mutex_enter(&procp->p_lock);
8212 	if (rctl_incr_locked_mem(procp, NULL, inc, 1)) {
8213 		mutex_exit(&procp->p_lock);
8214 		return (ENOMEM);
8215 	}
8216 	mutex_exit(&procp->p_lock);
8217 	return (0);
8218 }
8219 
8220 /*
8221  * To keep project_rele and project_hold consistent, i_ddi_incr_locked_memory()
8222  * must be called every time i_ddi_decr_locked_memory() is called.
8223  */
8224 /* ARGSUSED */
8225 void
8226 i_ddi_decr_locked_memory(proc_t *procp, rctl_qty_t dec)
8227 {
8228 	ASSERT(procp != NULL);
8229 	mutex_enter(&procp->p_lock);
8230 	rctl_decr_locked_mem(procp, NULL, dec, 1);
8231 	mutex_exit(&procp->p_lock);
8232 }
8233 
8234 /*
8235  * The cookie->upd_max_lock_rctl flag is used to determine if we should
8236  * charge device locked memory to the max-locked-memory rctl.  Tracking
8237  * device locked memory causes the rctl locks to get hot under high-speed
8238  * I/O such as RDSv3 over IB.  If there is no max-locked-memory rctl limit,
8239  * we bypass charging the locked memory to the rctl altogether.  The cookie's
8240  * flag tells us if the rctl value should be updated when unlocking the memory,
8241  * in case the rctl gets changed after the memory was locked.  Any device
8242  * locked memory in that rare case will not be counted toward the rctl limit.
8243  *
8244  * When tracking the locked memory, the kproject_t parameter is always NULL
8245  * in the code paths:
8246  *	i_ddi_incr_locked_memory -> rctl_incr_locked_mem
8247  *	i_ddi_decr_locked_memory -> rctl_decr_locked_mem
8248  * Thus, we always use the tk_proj member to check the projp setting.
8249  */
8250 static void
8251 init_lockedmem_rctl_flag(struct ddi_umem_cookie *cookie)
8252 {
8253 	proc_t		*p;
8254 	kproject_t	*projp;
8255 	zone_t		*zonep;
8256 
8257 	ASSERT(cookie);
8258 	p = cookie->procp;
8259 	ASSERT(p);
8260 
8261 	zonep = p->p_zone;
8262 	projp = p->p_task->tk_proj;
8263 
8264 	ASSERT(zonep);
8265 	ASSERT(projp);
8266 
8267 	if (zonep->zone_locked_mem_ctl == UINT64_MAX &&
8268 	    projp->kpj_data.kpd_locked_mem_ctl == UINT64_MAX)
8269 		cookie->upd_max_lock_rctl = 0;
8270 	else
8271 		cookie->upd_max_lock_rctl = 1;
8272 }
8273 
8274 /*
8275  * This routine checks if the max-locked-memory resource ctl is
8276  * exceeded, if not increments it, grabs a hold on the project.
8277  * Returns 0 if successful otherwise returns error code
8278  */
8279 static int
8280 umem_incr_devlockmem(struct ddi_umem_cookie *cookie)
8281 {
8282 	proc_t		*procp;
8283 	int		ret;
8284 
8285 	ASSERT(cookie);
8286 	if (cookie->upd_max_lock_rctl == 0)
8287 		return (0);
8288 
8289 	procp = cookie->procp;
8290 	ASSERT(procp);
8291 
8292 	if ((ret = i_ddi_incr_locked_memory(procp,
8293 	    cookie->size)) != 0) {
8294 		return (ret);
8295 	}
8296 	return (0);
8297 }
8298 
8299 /*
8300  * Decrements the max-locked-memory resource ctl and releases
8301  * the hold on the project that was acquired during umem_incr_devlockmem
8302  */
8303 static void
8304 umem_decr_devlockmem(struct ddi_umem_cookie *cookie)
8305 {
8306 	proc_t		*proc;
8307 
8308 	if (cookie->upd_max_lock_rctl == 0)
8309 		return;
8310 
8311 	proc = (proc_t *)cookie->procp;
8312 	if (!proc)
8313 		return;
8314 
8315 	i_ddi_decr_locked_memory(proc, cookie->size);
8316 }
8317 
8318 /*
8319  * A consolidation private function which is essentially equivalent to
8320  * ddi_umem_lock but with the addition of arguments ops_vector and procp.
8321  * A call to as_add_callback is done if DDI_UMEMLOCK_LONGTERM is set, and
8322  * the ops_vector is valid.
8323  *
8324  * Lock the virtual address range in the current process and create a
8325  * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to
8326  * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export
8327  * to user space.
8328  *
8329  * Note: The resource control accounting currently uses a full charge model
8330  * in other words attempts to lock the same/overlapping areas of memory
8331  * will deduct the full size of the buffer from the projects running
8332  * counter for the device locked memory.
8333  *
8334  * addr, size should be PAGESIZE aligned
8335  *
8336  * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both
8337  *	identifies whether the locked memory will be read or written or both
8338  *      DDI_UMEMLOCK_LONGTERM  must be set when the locking will
8339  * be maintained for an indefinitely long period (essentially permanent),
8340  * rather than for what would be required for a typical I/O completion.
8341  * When DDI_UMEMLOCK_LONGTERM is set, umem_lockmemory will return EFAULT
8342  * if the memory pertains to a regular file which is mapped MAP_SHARED.
8343  * This is to prevent a deadlock if a file truncation is attempted after
8344  * after the locking is done.
8345  *
8346  * Returns 0 on success
8347  *	EINVAL - for invalid parameters
8348  *	EPERM, ENOMEM and other error codes returned by as_pagelock
8349  *	ENOMEM - is returned if the current request to lock memory exceeds
8350  *		*.max-locked-memory resource control value.
8351  *      EFAULT - memory pertains to a regular file mapped shared and
8352  *		and DDI_UMEMLOCK_LONGTERM flag is set
8353  *	EAGAIN - could not start the ddi_umem_unlock list processing thread
8354  */
8355 int
8356 umem_lockmemory(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie,
8357 		struct umem_callback_ops *ops_vector,
8358 		proc_t *procp)
8359 {
8360 	int	error;
8361 	struct ddi_umem_cookie *p;
8362 	void	(*driver_callback)() = NULL;
8363 	struct as *as;
8364 	struct seg		*seg;
8365 	vnode_t			*vp;
8366 
8367 	/* Allow device drivers to not have to reference "curproc" */
8368 	if (procp == NULL)
8369 		procp = curproc;
8370 	as = procp->p_as;
8371 	*cookie = NULL;		/* in case of any error return */
8372 
8373 	/* These are the only three valid flags */
8374 	if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE |
8375 	    DDI_UMEMLOCK_LONGTERM)) != 0)
8376 		return (EINVAL);
8377 
8378 	/* At least one (can be both) of the two access flags must be set */
8379 	if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0)
8380 		return (EINVAL);
8381 
8382 	/* addr and len must be page-aligned */
8383 	if (((uintptr_t)addr & PAGEOFFSET) != 0)
8384 		return (EINVAL);
8385 
8386 	if ((len & PAGEOFFSET) != 0)
8387 		return (EINVAL);
8388 
8389 	/*
8390 	 * For longterm locking a driver callback must be specified; if
8391 	 * not longterm then a callback is optional.
8392 	 */
8393 	if (ops_vector != NULL) {
8394 		if (ops_vector->cbo_umem_callback_version !=
8395 		    UMEM_CALLBACK_VERSION)
8396 			return (EINVAL);
8397 		else
8398 			driver_callback = ops_vector->cbo_umem_lock_cleanup;
8399 	}
8400 	if ((driver_callback == NULL) && (flags & DDI_UMEMLOCK_LONGTERM))
8401 		return (EINVAL);
8402 
8403 	/*
8404 	 * Call i_ddi_umem_unlock_thread_start if necessary.  It will
8405 	 * be called on first ddi_umem_lock or umem_lockmemory call.
8406 	 */
8407 	if (ddi_umem_unlock_thread == NULL)
8408 		i_ddi_umem_unlock_thread_start();
8409 
8410 	/* Allocate memory for the cookie */
8411 	p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP);
8412 
8413 	/* Convert the flags to seg_rw type */
8414 	if (flags & DDI_UMEMLOCK_WRITE) {
8415 		p->s_flags = S_WRITE;
8416 	} else {
8417 		p->s_flags = S_READ;
8418 	}
8419 
8420 	/* Store procp in cookie for later iosetup/unlock */
8421 	p->procp = (void *)procp;
8422 
8423 	/*
8424 	 * Store the struct as pointer in cookie for later use by
8425 	 * ddi_umem_unlock.  The proc->p_as will be stale if ddi_umem_unlock
8426 	 * is called after relvm is called.
8427 	 */
8428 	p->asp = as;
8429 
8430 	/*
8431 	 * The size field is needed for lockmem accounting.
8432 	 */
8433 	p->size = len;
8434 	init_lockedmem_rctl_flag(p);
8435 
8436 	if (umem_incr_devlockmem(p) != 0) {
8437 		/*
8438 		 * The requested memory cannot be locked
8439 		 */
8440 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8441 		*cookie = (ddi_umem_cookie_t)NULL;
8442 		return (ENOMEM);
8443 	}
8444 
8445 	/* Lock the pages corresponding to addr, len in memory */
8446 	error = as_pagelock(as, &(p->pparray), addr, len, p->s_flags);
8447 	if (error != 0) {
8448 		umem_decr_devlockmem(p);
8449 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8450 		*cookie = (ddi_umem_cookie_t)NULL;
8451 		return (error);
8452 	}
8453 
8454 	/*
8455 	 * For longterm locking the addr must pertain to a seg_vn segment or
8456 	 * or a seg_spt segment.
8457 	 * If the segment pertains to a regular file, it cannot be
8458 	 * mapped MAP_SHARED.
8459 	 * This is to prevent a deadlock if a file truncation is attempted
8460 	 * after the locking is done.
8461 	 * Doing this after as_pagelock guarantees persistence of the as; if
8462 	 * an unacceptable segment is found, the cleanup includes calling
8463 	 * as_pageunlock before returning EFAULT.
8464 	 *
8465 	 * segdev is allowed here as it is already locked.  This allows
8466 	 * for memory exported by drivers through mmap() (which is already
8467 	 * locked) to be allowed for LONGTERM.
8468 	 */
8469 	if (flags & DDI_UMEMLOCK_LONGTERM) {
8470 		extern  struct seg_ops segspt_shmops;
8471 		extern	struct seg_ops segdev_ops;
8472 		AS_LOCK_ENTER(as, RW_READER);
8473 		for (seg = as_segat(as, addr); ; seg = AS_SEGNEXT(as, seg)) {
8474 			if (seg == NULL || seg->s_base > addr + len)
8475 				break;
8476 			if (seg->s_ops == &segdev_ops)
8477 				continue;
8478 			if (((seg->s_ops != &segvn_ops) &&
8479 			    (seg->s_ops != &segspt_shmops)) ||
8480 			    ((SEGOP_GETVP(seg, addr, &vp) == 0 &&
8481 			    vp != NULL && vp->v_type == VREG) &&
8482 			    (SEGOP_GETTYPE(seg, addr) & MAP_SHARED))) {
8483 				as_pageunlock(as, p->pparray,
8484 				    addr, len, p->s_flags);
8485 				AS_LOCK_EXIT(as);
8486 				umem_decr_devlockmem(p);
8487 				kmem_free(p, sizeof (struct ddi_umem_cookie));
8488 				*cookie = (ddi_umem_cookie_t)NULL;
8489 				return (EFAULT);
8490 			}
8491 		}
8492 		AS_LOCK_EXIT(as);
8493 	}
8494 
8495 
8496 	/* Initialize the fields in the ddi_umem_cookie */
8497 	p->cvaddr = addr;
8498 	p->type = UMEM_LOCKED;
8499 	if (driver_callback != NULL) {
8500 		/* i_ddi_umem_unlock and umem_lock_undo may need the cookie */
8501 		p->cook_refcnt = 2;
8502 		p->callbacks = *ops_vector;
8503 	} else {
8504 		/* only i_ddi_umme_unlock needs the cookie */
8505 		p->cook_refcnt = 1;
8506 	}
8507 
8508 	*cookie = (ddi_umem_cookie_t)p;
8509 
8510 	/*
8511 	 * If a driver callback was specified, add an entry to the
8512 	 * as struct callback list. The as_pagelock above guarantees
8513 	 * the persistence of as.
8514 	 */
8515 	if (driver_callback) {
8516 		error = as_add_callback(as, umem_lock_undo, p, AS_ALL_EVENT,
8517 		    addr, len, KM_SLEEP);
8518 		if (error != 0) {
8519 			as_pageunlock(as, p->pparray,
8520 			    addr, len, p->s_flags);
8521 			umem_decr_devlockmem(p);
8522 			kmem_free(p, sizeof (struct ddi_umem_cookie));
8523 			*cookie = (ddi_umem_cookie_t)NULL;
8524 		}
8525 	}
8526 	return (error);
8527 }
8528 
8529 /*
8530  * Unlock the pages locked by ddi_umem_lock or umem_lockmemory and free
8531  * the cookie.  Called from i_ddi_umem_unlock_thread.
8532  */
8533 
8534 static void
8535 i_ddi_umem_unlock(struct ddi_umem_cookie *p)
8536 {
8537 	uint_t	rc;
8538 
8539 	/*
8540 	 * There is no way to determine whether a callback to
8541 	 * umem_lock_undo was registered via as_add_callback.
8542 	 * (i.e. umem_lockmemory was called with DDI_MEMLOCK_LONGTERM and
8543 	 * a valid callback function structure.)  as_delete_callback
8544 	 * is called to delete a possible registered callback.  If the
8545 	 * return from as_delete_callbacks is AS_CALLBACK_DELETED, it
8546 	 * indicates that there was a callback registered, and that is was
8547 	 * successfully deleted.  Thus, the cookie reference count
8548 	 * will never be decremented by umem_lock_undo.  Just return the
8549 	 * memory for the cookie, since both users of the cookie are done.
8550 	 * A return of AS_CALLBACK_NOTFOUND indicates a callback was
8551 	 * never registered.  A return of AS_CALLBACK_DELETE_DEFERRED
8552 	 * indicates that callback processing is taking place and, and
8553 	 * umem_lock_undo is, or will be, executing, and thus decrementing
8554 	 * the cookie reference count when it is complete.
8555 	 *
8556 	 * This needs to be done before as_pageunlock so that the
8557 	 * persistence of as is guaranteed because of the locked pages.
8558 	 *
8559 	 */
8560 	rc = as_delete_callback(p->asp, p);
8561 
8562 
8563 	/*
8564 	 * The proc->p_as will be stale if i_ddi_umem_unlock is called
8565 	 * after relvm is called so use p->asp.
8566 	 */
8567 	as_pageunlock(p->asp, p->pparray, p->cvaddr, p->size, p->s_flags);
8568 
8569 	/*
8570 	 * Now that we have unlocked the memory decrement the
8571 	 * *.max-locked-memory rctl
8572 	 */
8573 	umem_decr_devlockmem(p);
8574 
8575 	if (rc == AS_CALLBACK_DELETED) {
8576 		/* umem_lock_undo will not happen, return the cookie memory */
8577 		ASSERT(p->cook_refcnt == 2);
8578 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8579 	} else {
8580 		/*
8581 		 * umem_undo_lock may happen if as_delete_callback returned
8582 		 * AS_CALLBACK_DELETE_DEFERRED.  In that case, decrement the
8583 		 * reference count, atomically, and return the cookie
8584 		 * memory if the reference count goes to zero.  The only
8585 		 * other value for rc is AS_CALLBACK_NOTFOUND.  In that
8586 		 * case, just return the cookie memory.
8587 		 */
8588 		if ((rc != AS_CALLBACK_DELETE_DEFERRED) ||
8589 		    (atomic_dec_ulong_nv((ulong_t *)(&(p->cook_refcnt)))
8590 		    == 0)) {
8591 			kmem_free(p, sizeof (struct ddi_umem_cookie));
8592 		}
8593 	}
8594 }
8595 
8596 /*
8597  * i_ddi_umem_unlock_thread - deferred ddi_umem_unlock list handler.
8598  *
8599  * Call i_ddi_umem_unlock for entries in the ddi_umem_unlock list
8600  * until it is empty.  Then, wait for more to be added.  This thread is awoken
8601  * via calls to ddi_umem_unlock.
8602  */
8603 
8604 static void
8605 i_ddi_umem_unlock_thread(void)
8606 {
8607 	struct ddi_umem_cookie	*ret_cookie;
8608 	callb_cpr_t	cprinfo;
8609 
8610 	/* process the ddi_umem_unlock list */
8611 	CALLB_CPR_INIT(&cprinfo, &ddi_umem_unlock_mutex,
8612 	    callb_generic_cpr, "unlock_thread");
8613 	for (;;) {
8614 		mutex_enter(&ddi_umem_unlock_mutex);
8615 		if (ddi_umem_unlock_head != NULL) {	/* list not empty */
8616 			ret_cookie = ddi_umem_unlock_head;
8617 			/* take if off the list */
8618 			if ((ddi_umem_unlock_head =
8619 			    ddi_umem_unlock_head->unl_forw) == NULL) {
8620 				ddi_umem_unlock_tail = NULL;
8621 			}
8622 			mutex_exit(&ddi_umem_unlock_mutex);
8623 			/* unlock the pages in this cookie */
8624 			(void) i_ddi_umem_unlock(ret_cookie);
8625 		} else {   /* list is empty, wait for next ddi_umem_unlock */
8626 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
8627 			cv_wait(&ddi_umem_unlock_cv, &ddi_umem_unlock_mutex);
8628 			CALLB_CPR_SAFE_END(&cprinfo, &ddi_umem_unlock_mutex);
8629 			mutex_exit(&ddi_umem_unlock_mutex);
8630 		}
8631 	}
8632 	/* ddi_umem_unlock_thread does not exit */
8633 	/* NOTREACHED */
8634 }
8635 
8636 /*
8637  * Start the thread that will process the ddi_umem_unlock list if it is
8638  * not already started (i_ddi_umem_unlock_thread).
8639  */
8640 static void
8641 i_ddi_umem_unlock_thread_start(void)
8642 {
8643 	mutex_enter(&ddi_umem_unlock_mutex);
8644 	if (ddi_umem_unlock_thread == NULL) {
8645 		ddi_umem_unlock_thread = thread_create(NULL, 0,
8646 		    i_ddi_umem_unlock_thread, NULL, 0, &p0,
8647 		    TS_RUN, minclsyspri);
8648 	}
8649 	mutex_exit(&ddi_umem_unlock_mutex);
8650 }
8651 
8652 /*
8653  * Lock the virtual address range in the current process and create a
8654  * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to
8655  * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export
8656  * to user space.
8657  *
8658  * Note: The resource control accounting currently uses a full charge model
8659  * in other words attempts to lock the same/overlapping areas of memory
8660  * will deduct the full size of the buffer from the projects running
8661  * counter for the device locked memory. This applies to umem_lockmemory too.
8662  *
8663  * addr, size should be PAGESIZE aligned
8664  * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both
8665  *	identifies whether the locked memory will be read or written or both
8666  *
8667  * Returns 0 on success
8668  *	EINVAL - for invalid parameters
8669  *	EPERM, ENOMEM and other error codes returned by as_pagelock
8670  *	ENOMEM - is returned if the current request to lock memory exceeds
8671  *		*.max-locked-memory resource control value.
8672  *	EAGAIN - could not start the ddi_umem_unlock list processing thread
8673  */
8674 int
8675 ddi_umem_lock(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie)
8676 {
8677 	int	error;
8678 	struct ddi_umem_cookie *p;
8679 
8680 	*cookie = NULL;		/* in case of any error return */
8681 
8682 	/* These are the only two valid flags */
8683 	if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) != 0) {
8684 		return (EINVAL);
8685 	}
8686 
8687 	/* At least one of the two flags (or both) must be set */
8688 	if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0) {
8689 		return (EINVAL);
8690 	}
8691 
8692 	/* addr and len must be page-aligned */
8693 	if (((uintptr_t)addr & PAGEOFFSET) != 0) {
8694 		return (EINVAL);
8695 	}
8696 
8697 	if ((len & PAGEOFFSET) != 0) {
8698 		return (EINVAL);
8699 	}
8700 
8701 	/*
8702 	 * Call i_ddi_umem_unlock_thread_start if necessary.  It will
8703 	 * be called on first ddi_umem_lock or umem_lockmemory call.
8704 	 */
8705 	if (ddi_umem_unlock_thread == NULL)
8706 		i_ddi_umem_unlock_thread_start();
8707 
8708 	/* Allocate memory for the cookie */
8709 	p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP);
8710 
8711 	/* Convert the flags to seg_rw type */
8712 	if (flags & DDI_UMEMLOCK_WRITE) {
8713 		p->s_flags = S_WRITE;
8714 	} else {
8715 		p->s_flags = S_READ;
8716 	}
8717 
8718 	/* Store curproc in cookie for later iosetup/unlock */
8719 	p->procp = (void *)curproc;
8720 
8721 	/*
8722 	 * Store the struct as pointer in cookie for later use by
8723 	 * ddi_umem_unlock.  The proc->p_as will be stale if ddi_umem_unlock
8724 	 * is called after relvm is called.
8725 	 */
8726 	p->asp = curproc->p_as;
8727 	/*
8728 	 * The size field is needed for lockmem accounting.
8729 	 */
8730 	p->size = len;
8731 	init_lockedmem_rctl_flag(p);
8732 
8733 	if (umem_incr_devlockmem(p) != 0) {
8734 		/*
8735 		 * The requested memory cannot be locked
8736 		 */
8737 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8738 		*cookie = (ddi_umem_cookie_t)NULL;
8739 		return (ENOMEM);
8740 	}
8741 
8742 	/* Lock the pages corresponding to addr, len in memory */
8743 	error = as_pagelock(((proc_t *)p->procp)->p_as, &(p->pparray),
8744 	    addr, len, p->s_flags);
8745 	if (error != 0) {
8746 		umem_decr_devlockmem(p);
8747 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8748 		*cookie = (ddi_umem_cookie_t)NULL;
8749 		return (error);
8750 	}
8751 
8752 	/* Initialize the fields in the ddi_umem_cookie */
8753 	p->cvaddr = addr;
8754 	p->type = UMEM_LOCKED;
8755 	p->cook_refcnt = 1;
8756 
8757 	*cookie = (ddi_umem_cookie_t)p;
8758 	return (error);
8759 }
8760 
8761 /*
8762  * Add the cookie to the ddi_umem_unlock list.  Pages will be
8763  * unlocked by i_ddi_umem_unlock_thread.
8764  */
8765 
8766 void
8767 ddi_umem_unlock(ddi_umem_cookie_t cookie)
8768 {
8769 	struct ddi_umem_cookie	*p = (struct ddi_umem_cookie *)cookie;
8770 
8771 	ASSERT(p->type == UMEM_LOCKED);
8772 	ASSERT(CPU_ON_INTR(CPU) == 0); /* cannot be high level */
8773 	ASSERT(ddi_umem_unlock_thread != NULL);
8774 
8775 	p->unl_forw = (struct ddi_umem_cookie *)NULL;	/* end of list */
8776 	/*
8777 	 * Queue the unlock request and notify i_ddi_umem_unlock thread
8778 	 * if it's called in the interrupt context. Otherwise, unlock pages
8779 	 * immediately.
8780 	 */
8781 	if (servicing_interrupt()) {
8782 		/* queue the unlock request and notify the thread */
8783 		mutex_enter(&ddi_umem_unlock_mutex);
8784 		if (ddi_umem_unlock_head == NULL) {
8785 			ddi_umem_unlock_head = ddi_umem_unlock_tail = p;
8786 			cv_broadcast(&ddi_umem_unlock_cv);
8787 		} else {
8788 			ddi_umem_unlock_tail->unl_forw = p;
8789 			ddi_umem_unlock_tail = p;
8790 		}
8791 		mutex_exit(&ddi_umem_unlock_mutex);
8792 	} else {
8793 		/* unlock the pages right away */
8794 		(void) i_ddi_umem_unlock(p);
8795 	}
8796 }
8797 
8798 /*
8799  * Create a buf structure from a ddi_umem_cookie
8800  * cookie - is a ddi_umem_cookie for from ddi_umem_lock and ddi_umem_alloc
8801  *		(only UMEM_LOCKED & KMEM_NON_PAGEABLE types supported)
8802  * off, len - identifies the portion of the memory represented by the cookie
8803  *		that the buf points to.
8804  *	NOTE: off, len need to follow the alignment/size restrictions of the
8805  *		device (dev) that this buf will be passed to. Some devices
8806  *		will accept unrestricted alignment/size, whereas others (such as
8807  *		st) require some block-size alignment/size. It is the caller's
8808  *		responsibility to ensure that the alignment/size restrictions
8809  *		are met (we cannot assert as we do not know the restrictions)
8810  *
8811  * direction - is one of B_READ or B_WRITE and needs to be compatible with
8812  *		the flags used in ddi_umem_lock
8813  *
8814  * The following three arguments are used to initialize fields in the
8815  * buf structure and are uninterpreted by this routine.
8816  *
8817  * dev
8818  * blkno
8819  * iodone
8820  *
8821  * sleepflag - is one of DDI_UMEM_SLEEP or DDI_UMEM_NOSLEEP
8822  *
8823  * Returns a buf structure pointer on success (to be freed by freerbuf)
8824  *	NULL on any parameter error or memory alloc failure
8825  *
8826  */
8827 struct buf *
8828 ddi_umem_iosetup(ddi_umem_cookie_t cookie, off_t off, size_t len,
8829 	int direction, dev_t dev, daddr_t blkno,
8830 	int (*iodone)(struct buf *), int sleepflag)
8831 {
8832 	struct ddi_umem_cookie *p = (struct ddi_umem_cookie *)cookie;
8833 	struct buf *bp;
8834 
8835 	/*
8836 	 * check for valid cookie offset, len
8837 	 */
8838 	if ((off + len) > p->size) {
8839 		return (NULL);
8840 	}
8841 
8842 	if (len > p->size) {
8843 		return (NULL);
8844 	}
8845 
8846 	/* direction has to be one of B_READ or B_WRITE */
8847 	if ((direction != B_READ) && (direction != B_WRITE)) {
8848 		return (NULL);
8849 	}
8850 
8851 	/* These are the only two valid sleepflags */
8852 	if ((sleepflag != DDI_UMEM_SLEEP) && (sleepflag != DDI_UMEM_NOSLEEP)) {
8853 		return (NULL);
8854 	}
8855 
8856 	/*
8857 	 * Only cookies of type UMEM_LOCKED and KMEM_NON_PAGEABLE are supported
8858 	 */
8859 	if ((p->type != UMEM_LOCKED) && (p->type != KMEM_NON_PAGEABLE)) {
8860 		return (NULL);
8861 	}
8862 
8863 	/* If type is KMEM_NON_PAGEABLE procp is NULL */
8864 	ASSERT((p->type == KMEM_NON_PAGEABLE) ?
8865 	    (p->procp == NULL) : (p->procp != NULL));
8866 
8867 	bp = kmem_alloc(sizeof (struct buf), sleepflag);
8868 	if (bp == NULL) {
8869 		return (NULL);
8870 	}
8871 	bioinit(bp);
8872 
8873 	bp->b_flags = B_BUSY | B_PHYS | direction;
8874 	bp->b_edev = dev;
8875 	bp->b_lblkno = blkno;
8876 	bp->b_iodone = iodone;
8877 	bp->b_bcount = len;
8878 	bp->b_proc = (proc_t *)p->procp;
8879 	ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0);
8880 	bp->b_un.b_addr = (caddr_t)((uintptr_t)(p->cvaddr) + off);
8881 	if (p->pparray != NULL) {
8882 		bp->b_flags |= B_SHADOW;
8883 		ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0);
8884 		bp->b_shadow = p->pparray + btop(off);
8885 	}
8886 	return (bp);
8887 }
8888 
8889 /*
8890  * Fault-handling and related routines
8891  */
8892 
8893 ddi_devstate_t
8894 ddi_get_devstate(dev_info_t *dip)
8895 {
8896 	if (DEVI_IS_DEVICE_OFFLINE(dip))
8897 		return (DDI_DEVSTATE_OFFLINE);
8898 	else if (DEVI_IS_DEVICE_DOWN(dip) || DEVI_IS_BUS_DOWN(dip))
8899 		return (DDI_DEVSTATE_DOWN);
8900 	else if (DEVI_IS_BUS_QUIESCED(dip))
8901 		return (DDI_DEVSTATE_QUIESCED);
8902 	else if (DEVI_IS_DEVICE_DEGRADED(dip))
8903 		return (DDI_DEVSTATE_DEGRADED);
8904 	else
8905 		return (DDI_DEVSTATE_UP);
8906 }
8907 
8908 void
8909 ddi_dev_report_fault(dev_info_t *dip, ddi_fault_impact_t impact,
8910 	ddi_fault_location_t location, const char *message)
8911 {
8912 	struct ddi_fault_event_data fd;
8913 	ddi_eventcookie_t ec;
8914 
8915 	/*
8916 	 * Assemble all the information into a fault-event-data structure
8917 	 */
8918 	fd.f_dip = dip;
8919 	fd.f_impact = impact;
8920 	fd.f_location = location;
8921 	fd.f_message = message;
8922 	fd.f_oldstate = ddi_get_devstate(dip);
8923 
8924 	/*
8925 	 * Get eventcookie from defining parent.
8926 	 */
8927 	if (ddi_get_eventcookie(dip, DDI_DEVI_FAULT_EVENT, &ec) !=
8928 	    DDI_SUCCESS)
8929 		return;
8930 
8931 	(void) ndi_post_event(dip, dip, ec, &fd);
8932 }
8933 
8934 char *
8935 i_ddi_devi_class(dev_info_t *dip)
8936 {
8937 	return (DEVI(dip)->devi_device_class);
8938 }
8939 
8940 int
8941 i_ddi_set_devi_class(dev_info_t *dip, const char *devi_class, int flag)
8942 {
8943 	struct dev_info *devi = DEVI(dip);
8944 
8945 	mutex_enter(&devi->devi_lock);
8946 
8947 	if (devi->devi_device_class)
8948 		kmem_free(devi->devi_device_class,
8949 		    strlen(devi->devi_device_class) + 1);
8950 
8951 	if ((devi->devi_device_class = i_ddi_strdup(devi_class, flag))
8952 	    != NULL) {
8953 		mutex_exit(&devi->devi_lock);
8954 		return (DDI_SUCCESS);
8955 	}
8956 
8957 	mutex_exit(&devi->devi_lock);
8958 
8959 	return (DDI_FAILURE);
8960 }
8961 
8962 
8963 /*
8964  * Task Queues DDI interfaces.
8965  */
8966 
8967 /* ARGSUSED */
8968 ddi_taskq_t *
8969 ddi_taskq_create(dev_info_t *dip, const char *name, int nthreads,
8970     pri_t pri, uint_t cflags)
8971 {
8972 	char full_name[TASKQ_NAMELEN];
8973 	const char *tq_name;
8974 	int nodeid = 0;
8975 
8976 	if (dip == NULL)
8977 		tq_name = name;
8978 	else {
8979 		nodeid = ddi_get_instance(dip);
8980 
8981 		if (name == NULL)
8982 			name = "tq";
8983 
8984 		(void) snprintf(full_name, sizeof (full_name), "%s_%s",
8985 		    ddi_driver_name(dip), name);
8986 
8987 		tq_name = full_name;
8988 	}
8989 
8990 	return ((ddi_taskq_t *)taskq_create_instance(tq_name, nodeid, nthreads,
8991 	    pri == TASKQ_DEFAULTPRI ? minclsyspri : pri,
8992 	    nthreads, INT_MAX, TASKQ_PREPOPULATE));
8993 }
8994 
8995 void
8996 ddi_taskq_destroy(ddi_taskq_t *tq)
8997 {
8998 	taskq_destroy((taskq_t *)tq);
8999 }
9000 
9001 int
9002 ddi_taskq_dispatch(ddi_taskq_t *tq, void (* func)(void *),
9003     void *arg, uint_t dflags)
9004 {
9005 	taskqid_t id = taskq_dispatch((taskq_t *)tq, func, arg,
9006 	    dflags == DDI_SLEEP ? TQ_SLEEP : TQ_NOSLEEP);
9007 
9008 	return (id != TASKQID_INVALID ? DDI_SUCCESS : DDI_FAILURE);
9009 }
9010 
9011 void
9012 ddi_taskq_wait(ddi_taskq_t *tq)
9013 {
9014 	taskq_wait((taskq_t *)tq);
9015 }
9016 
9017 void
9018 ddi_taskq_suspend(ddi_taskq_t *tq)
9019 {
9020 	taskq_suspend((taskq_t *)tq);
9021 }
9022 
9023 boolean_t
9024 ddi_taskq_suspended(ddi_taskq_t *tq)
9025 {
9026 	return (taskq_suspended((taskq_t *)tq));
9027 }
9028 
9029 void
9030 ddi_taskq_resume(ddi_taskq_t *tq)
9031 {
9032 	taskq_resume((taskq_t *)tq);
9033 }
9034 
9035 int
9036 ddi_parse(
9037 	const char	*ifname,
9038 	char		*alnum,
9039 	uint_t		*nump)
9040 {
9041 	const char	*p;
9042 	int		l;
9043 	ulong_t		num;
9044 	boolean_t	nonum = B_TRUE;
9045 	char		c;
9046 
9047 	l = strlen(ifname);
9048 	for (p = ifname + l; p != ifname; l--) {
9049 		c = *--p;
9050 		if (!isdigit(c)) {
9051 			(void) strlcpy(alnum, ifname, l + 1);
9052 			if (ddi_strtoul(p + 1, NULL, 10, &num) != 0)
9053 				return (DDI_FAILURE);
9054 			break;
9055 		}
9056 		nonum = B_FALSE;
9057 	}
9058 	if (l == 0 || nonum)
9059 		return (DDI_FAILURE);
9060 
9061 	*nump = num;
9062 	return (DDI_SUCCESS);
9063 }
9064 
9065 /*
9066  * Default initialization function for drivers that don't need to quiesce.
9067  */
9068 /* ARGSUSED */
9069 int
9070 ddi_quiesce_not_needed(dev_info_t *dip)
9071 {
9072 	return (DDI_SUCCESS);
9073 }
9074 
9075 /*
9076  * Initialization function for drivers that should implement quiesce()
9077  * but haven't yet.
9078  */
9079 /* ARGSUSED */
9080 int
9081 ddi_quiesce_not_supported(dev_info_t *dip)
9082 {
9083 	return (DDI_FAILURE);
9084 }
9085 
9086 char *
9087 ddi_strdup(const char *str, int flag)
9088 {
9089 	int	n;
9090 	char	*ptr;
9091 
9092 	ASSERT(str != NULL);
9093 	ASSERT((flag == KM_SLEEP) || (flag == KM_NOSLEEP));
9094 
9095 	n = strlen(str);
9096 	if ((ptr = kmem_alloc(n + 1, flag)) == NULL)
9097 		return (NULL);
9098 	bcopy(str, ptr, n + 1);
9099 	return (ptr);
9100 }
9101 
9102 char *
9103 strdup(const char *str)
9104 {
9105 	return (ddi_strdup(str, KM_SLEEP));
9106 }
9107 
9108 void
9109 strfree(char *str)
9110 {
9111 	ASSERT(str != NULL);
9112 	kmem_free(str, strlen(str) + 1);
9113 }
9114 
9115 /*
9116  * Generic DDI callback interfaces.
9117  */
9118 
9119 int
9120 ddi_cb_register(dev_info_t *dip, ddi_cb_flags_t flags, ddi_cb_func_t cbfunc,
9121     void *arg1, void *arg2, ddi_cb_handle_t *ret_hdlp)
9122 {
9123 	ddi_cb_t	*cbp;
9124 
9125 	ASSERT(dip != NULL);
9126 	ASSERT(DDI_CB_FLAG_VALID(flags));
9127 	ASSERT(cbfunc != NULL);
9128 	ASSERT(ret_hdlp != NULL);
9129 
9130 	/* Sanity check the context */
9131 	ASSERT(!servicing_interrupt());
9132 	if (servicing_interrupt())
9133 		return (DDI_FAILURE);
9134 
9135 	/* Validate parameters */
9136 	if ((dip == NULL) || !DDI_CB_FLAG_VALID(flags) ||
9137 	    (cbfunc == NULL) || (ret_hdlp == NULL))
9138 		return (DDI_EINVAL);
9139 
9140 	/* Check for previous registration */
9141 	if (DEVI(dip)->devi_cb_p != NULL)
9142 		return (DDI_EALREADY);
9143 
9144 	/* Allocate and initialize callback */
9145 	cbp = kmem_zalloc(sizeof (ddi_cb_t), KM_SLEEP);
9146 	cbp->cb_dip = dip;
9147 	cbp->cb_func = cbfunc;
9148 	cbp->cb_arg1 = arg1;
9149 	cbp->cb_arg2 = arg2;
9150 	cbp->cb_flags = flags;
9151 	DEVI(dip)->devi_cb_p = cbp;
9152 
9153 	/* If adding an IRM callback, notify IRM */
9154 	if (flags & DDI_CB_FLAG_INTR)
9155 		i_ddi_irm_set_cb(dip, B_TRUE);
9156 
9157 	*ret_hdlp = (ddi_cb_handle_t)&(DEVI(dip)->devi_cb_p);
9158 	return (DDI_SUCCESS);
9159 }
9160 
9161 int
9162 ddi_cb_unregister(ddi_cb_handle_t hdl)
9163 {
9164 	ddi_cb_t	*cbp;
9165 	dev_info_t	*dip;
9166 
9167 	ASSERT(hdl != NULL);
9168 
9169 	/* Sanity check the context */
9170 	ASSERT(!servicing_interrupt());
9171 	if (servicing_interrupt())
9172 		return (DDI_FAILURE);
9173 
9174 	/* Validate parameters */
9175 	if ((hdl == NULL) || ((cbp = *(ddi_cb_t **)hdl) == NULL) ||
9176 	    ((dip = cbp->cb_dip) == NULL))
9177 		return (DDI_EINVAL);
9178 
9179 	/* If removing an IRM callback, notify IRM */
9180 	if (cbp->cb_flags & DDI_CB_FLAG_INTR)
9181 		i_ddi_irm_set_cb(dip, B_FALSE);
9182 
9183 	/* Destroy the callback */
9184 	kmem_free(cbp, sizeof (ddi_cb_t));
9185 	DEVI(dip)->devi_cb_p = NULL;
9186 
9187 	return (DDI_SUCCESS);
9188 }
9189 
9190 /*
9191  * Platform independent DR routines
9192  */
9193 
9194 static int
9195 ndi2errno(int n)
9196 {
9197 	int err = 0;
9198 
9199 	switch (n) {
9200 		case NDI_NOMEM:
9201 			err = ENOMEM;
9202 			break;
9203 		case NDI_BUSY:
9204 			err = EBUSY;
9205 			break;
9206 		case NDI_FAULT:
9207 			err = EFAULT;
9208 			break;
9209 		case NDI_FAILURE:
9210 			err = EIO;
9211 			break;
9212 		case NDI_SUCCESS:
9213 			break;
9214 		case NDI_BADHANDLE:
9215 		default:
9216 			err = EINVAL;
9217 			break;
9218 	}
9219 	return (err);
9220 }
9221 
9222 /*
9223  * Prom tree node list
9224  */
9225 struct ptnode {
9226 	pnode_t		nodeid;
9227 	struct ptnode	*next;
9228 };
9229 
9230 /*
9231  * Prom tree walk arg
9232  */
9233 struct pta {
9234 	dev_info_t	*pdip;
9235 	devi_branch_t	*bp;
9236 	uint_t		flags;
9237 	dev_info_t	*fdip;
9238 	struct ptnode	*head;
9239 };
9240 
9241 static void
9242 visit_node(pnode_t nodeid, struct pta *ap)
9243 {
9244 	struct ptnode	**nextp;
9245 	int		(*select)(pnode_t, void *, uint_t);
9246 
9247 	ASSERT(nodeid != OBP_NONODE && nodeid != OBP_BADNODE);
9248 
9249 	select = ap->bp->create.prom_branch_select;
9250 
9251 	ASSERT(select);
9252 
9253 	if (select(nodeid, ap->bp->arg, 0) == DDI_SUCCESS) {
9254 
9255 		for (nextp = &ap->head; *nextp; nextp = &(*nextp)->next)
9256 			;
9257 
9258 		*nextp = kmem_zalloc(sizeof (struct ptnode), KM_SLEEP);
9259 
9260 		(*nextp)->nodeid = nodeid;
9261 	}
9262 
9263 	if ((ap->flags & DEVI_BRANCH_CHILD) == DEVI_BRANCH_CHILD)
9264 		return;
9265 
9266 	nodeid = prom_childnode(nodeid);
9267 	while (nodeid != OBP_NONODE && nodeid != OBP_BADNODE) {
9268 		visit_node(nodeid, ap);
9269 		nodeid = prom_nextnode(nodeid);
9270 	}
9271 }
9272 
9273 /*
9274  * NOTE: The caller of this function must check for device contracts
9275  * or LDI callbacks against this dip before setting the dip offline.
9276  */
9277 static int
9278 set_infant_dip_offline(dev_info_t *dip, void *arg)
9279 {
9280 	char	*path = (char *)arg;
9281 
9282 	ASSERT(dip);
9283 	ASSERT(arg);
9284 
9285 	if (i_ddi_node_state(dip) >= DS_ATTACHED) {
9286 		(void) ddi_pathname(dip, path);
9287 		cmn_err(CE_WARN, "Attempt to set offline flag on attached "
9288 		    "node: %s", path);
9289 		return (DDI_FAILURE);
9290 	}
9291 
9292 	mutex_enter(&(DEVI(dip)->devi_lock));
9293 	if (!DEVI_IS_DEVICE_OFFLINE(dip))
9294 		DEVI_SET_DEVICE_OFFLINE(dip);
9295 	mutex_exit(&(DEVI(dip)->devi_lock));
9296 
9297 	return (DDI_SUCCESS);
9298 }
9299 
9300 typedef struct result {
9301 	char	*path;
9302 	int	result;
9303 } result_t;
9304 
9305 static int
9306 dip_set_offline(dev_info_t *dip, void *arg)
9307 {
9308 	int end;
9309 	result_t *resp = (result_t *)arg;
9310 
9311 	ASSERT(dip);
9312 	ASSERT(resp);
9313 
9314 	/*
9315 	 * We stop the walk if e_ddi_offline_notify() returns
9316 	 * failure, because this implies that one or more consumers
9317 	 * (either LDI or contract based) has blocked the offline.
9318 	 * So there is no point in conitnuing the walk
9319 	 */
9320 	if (e_ddi_offline_notify(dip) == DDI_FAILURE) {
9321 		resp->result = DDI_FAILURE;
9322 		return (DDI_WALK_TERMINATE);
9323 	}
9324 
9325 	/*
9326 	 * If set_infant_dip_offline() returns failure, it implies
9327 	 * that we failed to set a particular dip offline. This
9328 	 * does not imply that the offline as a whole should fail.
9329 	 * We want to do the best we can, so we continue the walk.
9330 	 */
9331 	if (set_infant_dip_offline(dip, resp->path) == DDI_SUCCESS)
9332 		end = DDI_SUCCESS;
9333 	else
9334 		end = DDI_FAILURE;
9335 
9336 	e_ddi_offline_finalize(dip, end);
9337 
9338 	return (DDI_WALK_CONTINUE);
9339 }
9340 
9341 /*
9342  * The call to e_ddi_offline_notify() exists for the
9343  * unlikely error case that a branch we are trying to
9344  * create already exists and has device contracts or LDI
9345  * event callbacks against it.
9346  *
9347  * We allow create to succeed for such branches only if
9348  * no constraints block the offline.
9349  */
9350 static int
9351 branch_set_offline(dev_info_t *dip, char *path)
9352 {
9353 	int		circ;
9354 	int		end;
9355 	result_t	res;
9356 
9357 
9358 	if (e_ddi_offline_notify(dip) == DDI_FAILURE) {
9359 		return (DDI_FAILURE);
9360 	}
9361 
9362 	if (set_infant_dip_offline(dip, path) == DDI_SUCCESS)
9363 		end = DDI_SUCCESS;
9364 	else
9365 		end = DDI_FAILURE;
9366 
9367 	e_ddi_offline_finalize(dip, end);
9368 
9369 	if (end == DDI_FAILURE)
9370 		return (DDI_FAILURE);
9371 
9372 	res.result = DDI_SUCCESS;
9373 	res.path = path;
9374 
9375 	ndi_devi_enter(dip, &circ);
9376 	ddi_walk_devs(ddi_get_child(dip), dip_set_offline, &res);
9377 	ndi_devi_exit(dip, circ);
9378 
9379 	return (res.result);
9380 }
9381 
9382 /*ARGSUSED*/
9383 static int
9384 create_prom_branch(void *arg, int has_changed)
9385 {
9386 	int		circ;
9387 	int		exists, rv;
9388 	pnode_t		nodeid;
9389 	struct ptnode	*tnp;
9390 	dev_info_t	*dip;
9391 	struct pta	*ap = arg;
9392 	devi_branch_t	*bp;
9393 	char		*path;
9394 
9395 	ASSERT(ap);
9396 	ASSERT(ap->fdip == NULL);
9397 	ASSERT(ap->pdip && ndi_dev_is_prom_node(ap->pdip));
9398 
9399 	bp = ap->bp;
9400 
9401 	nodeid = ddi_get_nodeid(ap->pdip);
9402 	if (nodeid == OBP_NONODE || nodeid == OBP_BADNODE) {
9403 		cmn_err(CE_WARN, "create_prom_branch: invalid "
9404 		    "nodeid: 0x%x", nodeid);
9405 		return (EINVAL);
9406 	}
9407 
9408 	ap->head = NULL;
9409 
9410 	nodeid = prom_childnode(nodeid);
9411 	while (nodeid != OBP_NONODE && nodeid != OBP_BADNODE) {
9412 		visit_node(nodeid, ap);
9413 		nodeid = prom_nextnode(nodeid);
9414 	}
9415 
9416 	if (ap->head == NULL)
9417 		return (ENODEV);
9418 
9419 	path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
9420 	rv = 0;
9421 	while ((tnp = ap->head) != NULL) {
9422 		ap->head = tnp->next;
9423 
9424 		ndi_devi_enter(ap->pdip, &circ);
9425 
9426 		/*
9427 		 * Check if the branch already exists.
9428 		 */
9429 		exists = 0;
9430 		dip = e_ddi_nodeid_to_dip(tnp->nodeid);
9431 		if (dip != NULL) {
9432 			exists = 1;
9433 
9434 			/* Parent is held busy, so release hold */
9435 			ndi_rele_devi(dip);
9436 #ifdef	DEBUG
9437 			cmn_err(CE_WARN, "create_prom_branch: dip(%p) exists"
9438 			    " for nodeid 0x%x", (void *)dip, tnp->nodeid);
9439 #endif
9440 		} else {
9441 			dip = i_ddi_create_branch(ap->pdip, tnp->nodeid);
9442 		}
9443 
9444 		kmem_free(tnp, sizeof (struct ptnode));
9445 
9446 		/*
9447 		 * Hold the branch if it is not already held
9448 		 */
9449 		if (dip && !exists) {
9450 			e_ddi_branch_hold(dip);
9451 		}
9452 
9453 		ASSERT(dip == NULL || e_ddi_branch_held(dip));
9454 
9455 		/*
9456 		 * Set all dips in the newly created branch offline so that
9457 		 * only a "configure" operation can attach
9458 		 * the branch
9459 		 */
9460 		if (dip == NULL || branch_set_offline(dip, path)
9461 		    == DDI_FAILURE) {
9462 			ndi_devi_exit(ap->pdip, circ);
9463 			rv = EIO;
9464 			continue;
9465 		}
9466 
9467 		ASSERT(ddi_get_parent(dip) == ap->pdip);
9468 
9469 		ndi_devi_exit(ap->pdip, circ);
9470 
9471 		if (ap->flags & DEVI_BRANCH_CONFIGURE) {
9472 			int error = e_ddi_branch_configure(dip, &ap->fdip, 0);
9473 			if (error && rv == 0)
9474 				rv = error;
9475 		}
9476 
9477 		/*
9478 		 * Invoke devi_branch_callback() (if it exists) only for
9479 		 * newly created branches
9480 		 */
9481 		if (bp->devi_branch_callback && !exists)
9482 			bp->devi_branch_callback(dip, bp->arg, 0);
9483 	}
9484 
9485 	kmem_free(path, MAXPATHLEN);
9486 
9487 	return (rv);
9488 }
9489 
9490 static int
9491 sid_node_create(dev_info_t *pdip, devi_branch_t *bp, dev_info_t **rdipp)
9492 {
9493 	int			rv, circ, len;
9494 	int			i, flags, ret;
9495 	dev_info_t		*dip;
9496 	char			*nbuf;
9497 	char			*path;
9498 	static const char	*noname = "<none>";
9499 
9500 	ASSERT(pdip);
9501 	ASSERT(DEVI_BUSY_OWNED(pdip));
9502 
9503 	flags = 0;
9504 
9505 	/*
9506 	 * Creating the root of a branch ?
9507 	 */
9508 	if (rdipp) {
9509 		*rdipp = NULL;
9510 		flags = DEVI_BRANCH_ROOT;
9511 	}
9512 
9513 	ndi_devi_alloc_sleep(pdip, (char *)noname, DEVI_SID_NODEID, &dip);
9514 	rv = bp->create.sid_branch_create(dip, bp->arg, flags);
9515 
9516 	nbuf = kmem_alloc(OBP_MAXDRVNAME, KM_SLEEP);
9517 
9518 	if (rv == DDI_WALK_ERROR) {
9519 		cmn_err(CE_WARN, "e_ddi_branch_create: Error setting"
9520 		    " properties on devinfo node %p",  (void *)dip);
9521 		goto fail;
9522 	}
9523 
9524 	len = OBP_MAXDRVNAME;
9525 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
9526 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "name", nbuf, &len)
9527 	    != DDI_PROP_SUCCESS) {
9528 		cmn_err(CE_WARN, "e_ddi_branch_create: devinfo node %p has"
9529 		    "no name property", (void *)dip);
9530 		goto fail;
9531 	}
9532 
9533 	ASSERT(i_ddi_node_state(dip) == DS_PROTO);
9534 	if (ndi_devi_set_nodename(dip, nbuf, 0) != NDI_SUCCESS) {
9535 		cmn_err(CE_WARN, "e_ddi_branch_create: cannot set name (%s)"
9536 		    " for devinfo node %p", nbuf, (void *)dip);
9537 		goto fail;
9538 	}
9539 
9540 	kmem_free(nbuf, OBP_MAXDRVNAME);
9541 
9542 	/*
9543 	 * Ignore bind failures just like boot does
9544 	 */
9545 	(void) ndi_devi_bind_driver(dip, 0);
9546 
9547 	switch (rv) {
9548 	case DDI_WALK_CONTINUE:
9549 	case DDI_WALK_PRUNESIB:
9550 		ndi_devi_enter(dip, &circ);
9551 
9552 		i = DDI_WALK_CONTINUE;
9553 		for (; i == DDI_WALK_CONTINUE; ) {
9554 			i = sid_node_create(dip, bp, NULL);
9555 		}
9556 
9557 		ASSERT(i == DDI_WALK_ERROR || i == DDI_WALK_PRUNESIB);
9558 		if (i == DDI_WALK_ERROR)
9559 			rv = i;
9560 		/*
9561 		 * If PRUNESIB stop creating siblings
9562 		 * of dip's child. Subsequent walk behavior
9563 		 * is determined by rv returned by dip.
9564 		 */
9565 
9566 		ndi_devi_exit(dip, circ);
9567 		break;
9568 	case DDI_WALK_TERMINATE:
9569 		/*
9570 		 * Don't create children and ask our parent
9571 		 * to not create siblings either.
9572 		 */
9573 		rv = DDI_WALK_PRUNESIB;
9574 		break;
9575 	case DDI_WALK_PRUNECHILD:
9576 		/*
9577 		 * Don't create children, but ask parent to continue
9578 		 * with siblings.
9579 		 */
9580 		rv = DDI_WALK_CONTINUE;
9581 		break;
9582 	default:
9583 		ASSERT(0);
9584 		break;
9585 	}
9586 
9587 	if (rdipp)
9588 		*rdipp = dip;
9589 
9590 	/*
9591 	 * Set device offline - only the "configure" op should cause an attach.
9592 	 * Note that it is safe to set the dip offline without checking
9593 	 * for either device contract or layered driver (LDI) based constraints
9594 	 * since there cannot be any contracts or LDI opens of this device.
9595 	 * This is because this node is a newly created dip with the parent busy
9596 	 * held, so no other thread can come in and attach this dip. A dip that
9597 	 * has never been attached cannot have contracts since by definition
9598 	 * a device contract (an agreement between a process and a device minor
9599 	 * node) can only be created against a device that has minor nodes
9600 	 * i.e is attached. Similarly an LDI open will only succeed if the
9601 	 * dip is attached. We assert below that the dip is not attached.
9602 	 */
9603 	ASSERT(i_ddi_node_state(dip) < DS_ATTACHED);
9604 	path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
9605 	ret = set_infant_dip_offline(dip, path);
9606 	ASSERT(ret == DDI_SUCCESS);
9607 	kmem_free(path, MAXPATHLEN);
9608 
9609 	return (rv);
9610 fail:
9611 	(void) ndi_devi_free(dip);
9612 	kmem_free(nbuf, OBP_MAXDRVNAME);
9613 	return (DDI_WALK_ERROR);
9614 }
9615 
9616 static int
9617 create_sid_branch(
9618 	dev_info_t	*pdip,
9619 	devi_branch_t	*bp,
9620 	dev_info_t	**dipp,
9621 	uint_t		flags)
9622 {
9623 	int		rv = 0, state = DDI_WALK_CONTINUE;
9624 	dev_info_t	*rdip;
9625 
9626 	while (state == DDI_WALK_CONTINUE) {
9627 		int	circ;
9628 
9629 		ndi_devi_enter(pdip, &circ);
9630 
9631 		state = sid_node_create(pdip, bp, &rdip);
9632 		if (rdip == NULL) {
9633 			ndi_devi_exit(pdip, circ);
9634 			ASSERT(state == DDI_WALK_ERROR);
9635 			break;
9636 		}
9637 
9638 		e_ddi_branch_hold(rdip);
9639 
9640 		ndi_devi_exit(pdip, circ);
9641 
9642 		if (flags & DEVI_BRANCH_CONFIGURE) {
9643 			int error = e_ddi_branch_configure(rdip, dipp, 0);
9644 			if (error && rv == 0)
9645 				rv = error;
9646 		}
9647 
9648 		/*
9649 		 * devi_branch_callback() is optional
9650 		 */
9651 		if (bp->devi_branch_callback)
9652 			bp->devi_branch_callback(rdip, bp->arg, 0);
9653 	}
9654 
9655 	ASSERT(state == DDI_WALK_ERROR || state == DDI_WALK_PRUNESIB);
9656 
9657 	return (state == DDI_WALK_ERROR ? EIO : rv);
9658 }
9659 
9660 int
9661 e_ddi_branch_create(
9662 	dev_info_t	*pdip,
9663 	devi_branch_t	*bp,
9664 	dev_info_t	**dipp,
9665 	uint_t		flags)
9666 {
9667 	int prom_devi, sid_devi, error;
9668 
9669 	if (pdip == NULL || bp == NULL || bp->type == 0)
9670 		return (EINVAL);
9671 
9672 	prom_devi = (bp->type == DEVI_BRANCH_PROM) ? 1 : 0;
9673 	sid_devi = (bp->type == DEVI_BRANCH_SID) ? 1 : 0;
9674 
9675 	if (prom_devi && bp->create.prom_branch_select == NULL)
9676 		return (EINVAL);
9677 	else if (sid_devi && bp->create.sid_branch_create == NULL)
9678 		return (EINVAL);
9679 	else if (!prom_devi && !sid_devi)
9680 		return (EINVAL);
9681 
9682 	if (flags & DEVI_BRANCH_EVENT)
9683 		return (EINVAL);
9684 
9685 	if (prom_devi) {
9686 		struct pta pta = {0};
9687 
9688 		pta.pdip = pdip;
9689 		pta.bp = bp;
9690 		pta.flags = flags;
9691 
9692 		error = prom_tree_access(create_prom_branch, &pta, NULL);
9693 
9694 		if (dipp)
9695 			*dipp = pta.fdip;
9696 		else if (pta.fdip)
9697 			ndi_rele_devi(pta.fdip);
9698 	} else {
9699 		error = create_sid_branch(pdip, bp, dipp, flags);
9700 	}
9701 
9702 	return (error);
9703 }
9704 
9705 int
9706 e_ddi_branch_configure(dev_info_t *rdip, dev_info_t **dipp, uint_t flags)
9707 {
9708 	int		rv;
9709 	char		*devnm;
9710 	dev_info_t	*pdip;
9711 
9712 	if (dipp)
9713 		*dipp = NULL;
9714 
9715 	if (rdip == NULL || flags != 0 || (flags & DEVI_BRANCH_EVENT))
9716 		return (EINVAL);
9717 
9718 	pdip = ddi_get_parent(rdip);
9719 
9720 	ndi_hold_devi(pdip);
9721 
9722 	if (!e_ddi_branch_held(rdip)) {
9723 		ndi_rele_devi(pdip);
9724 		cmn_err(CE_WARN, "e_ddi_branch_configure: "
9725 		    "dip(%p) not held", (void *)rdip);
9726 		return (EINVAL);
9727 	}
9728 
9729 	if (i_ddi_node_state(rdip) < DS_INITIALIZED) {
9730 		/*
9731 		 * First attempt to bind a driver. If we fail, return
9732 		 * success (On some platforms, dips for some device
9733 		 * types (CPUs) may not have a driver)
9734 		 */
9735 		if (ndi_devi_bind_driver(rdip, 0) != NDI_SUCCESS) {
9736 			ndi_rele_devi(pdip);
9737 			return (0);
9738 		}
9739 
9740 		if (ddi_initchild(pdip, rdip) != DDI_SUCCESS) {
9741 			rv = NDI_FAILURE;
9742 			goto out;
9743 		}
9744 	}
9745 
9746 	ASSERT(i_ddi_node_state(rdip) >= DS_INITIALIZED);
9747 
9748 	devnm = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
9749 
9750 	(void) ddi_deviname(rdip, devnm);
9751 
9752 	if ((rv = ndi_devi_config_one(pdip, devnm+1, &rdip,
9753 	    NDI_DEVI_ONLINE | NDI_CONFIG)) == NDI_SUCCESS) {
9754 		/* release hold from ndi_devi_config_one() */
9755 		ndi_rele_devi(rdip);
9756 	}
9757 
9758 	kmem_free(devnm, MAXNAMELEN + 1);
9759 out:
9760 	if (rv != NDI_SUCCESS && dipp && rdip) {
9761 		ndi_hold_devi(rdip);
9762 		*dipp = rdip;
9763 	}
9764 	ndi_rele_devi(pdip);
9765 	return (ndi2errno(rv));
9766 }
9767 
9768 void
9769 e_ddi_branch_hold(dev_info_t *rdip)
9770 {
9771 	if (e_ddi_branch_held(rdip)) {
9772 		cmn_err(CE_WARN, "e_ddi_branch_hold: branch already held");
9773 		return;
9774 	}
9775 
9776 	mutex_enter(&DEVI(rdip)->devi_lock);
9777 	if ((DEVI(rdip)->devi_flags & DEVI_BRANCH_HELD) == 0) {
9778 		DEVI(rdip)->devi_flags |= DEVI_BRANCH_HELD;
9779 		DEVI(rdip)->devi_ref++;
9780 	}
9781 	ASSERT(DEVI(rdip)->devi_ref > 0);
9782 	mutex_exit(&DEVI(rdip)->devi_lock);
9783 }
9784 
9785 int
9786 e_ddi_branch_held(dev_info_t *rdip)
9787 {
9788 	int rv = 0;
9789 
9790 	mutex_enter(&DEVI(rdip)->devi_lock);
9791 	if ((DEVI(rdip)->devi_flags & DEVI_BRANCH_HELD) &&
9792 	    DEVI(rdip)->devi_ref > 0) {
9793 		rv = 1;
9794 	}
9795 	mutex_exit(&DEVI(rdip)->devi_lock);
9796 
9797 	return (rv);
9798 }
9799 
9800 void
9801 e_ddi_branch_rele(dev_info_t *rdip)
9802 {
9803 	mutex_enter(&DEVI(rdip)->devi_lock);
9804 	DEVI(rdip)->devi_flags &= ~DEVI_BRANCH_HELD;
9805 	DEVI(rdip)->devi_ref--;
9806 	mutex_exit(&DEVI(rdip)->devi_lock);
9807 }
9808 
9809 int
9810 e_ddi_branch_unconfigure(
9811 	dev_info_t *rdip,
9812 	dev_info_t **dipp,
9813 	uint_t flags)
9814 {
9815 	int	circ, rv;
9816 	int	destroy;
9817 	char	*devnm;
9818 	uint_t	nflags;
9819 	dev_info_t *pdip;
9820 
9821 	if (dipp)
9822 		*dipp = NULL;
9823 
9824 	if (rdip == NULL)
9825 		return (EINVAL);
9826 
9827 	pdip = ddi_get_parent(rdip);
9828 
9829 	ASSERT(pdip);
9830 
9831 	/*
9832 	 * Check if caller holds pdip busy - can cause deadlocks during
9833 	 * devfs_clean()
9834 	 */
9835 	if (DEVI_BUSY_OWNED(pdip)) {
9836 		cmn_err(CE_WARN, "e_ddi_branch_unconfigure: failed: parent"
9837 		    " devinfo node(%p) is busy held", (void *)pdip);
9838 		return (EINVAL);
9839 	}
9840 
9841 	destroy = (flags & DEVI_BRANCH_DESTROY) ? 1 : 0;
9842 
9843 	devnm = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
9844 
9845 	ndi_devi_enter(pdip, &circ);
9846 	(void) ddi_deviname(rdip, devnm);
9847 	ndi_devi_exit(pdip, circ);
9848 
9849 	/*
9850 	 * ddi_deviname() returns a component name with / prepended.
9851 	 */
9852 	(void) devfs_clean(pdip, devnm + 1, DV_CLEAN_FORCE);
9853 
9854 	ndi_devi_enter(pdip, &circ);
9855 
9856 	/*
9857 	 * Recreate device name as it may have changed state (init/uninit)
9858 	 * when parent busy lock was dropped for devfs_clean()
9859 	 */
9860 	(void) ddi_deviname(rdip, devnm);
9861 
9862 	if (!e_ddi_branch_held(rdip)) {
9863 		kmem_free(devnm, MAXNAMELEN + 1);
9864 		ndi_devi_exit(pdip, circ);
9865 		cmn_err(CE_WARN, "e_ddi_%s_branch: dip(%p) not held",
9866 		    destroy ? "destroy" : "unconfigure", (void *)rdip);
9867 		return (EINVAL);
9868 	}
9869 
9870 	/*
9871 	 * Release hold on the branch. This is ok since we are holding the
9872 	 * parent busy. If rdip is not removed, we must do a hold on the
9873 	 * branch before returning.
9874 	 */
9875 	e_ddi_branch_rele(rdip);
9876 
9877 	nflags = NDI_DEVI_OFFLINE;
9878 	if (destroy || (flags & DEVI_BRANCH_DESTROY)) {
9879 		nflags |= NDI_DEVI_REMOVE;
9880 		destroy = 1;
9881 	} else {
9882 		nflags |= NDI_UNCONFIG;		/* uninit but don't remove */
9883 	}
9884 
9885 	if (flags & DEVI_BRANCH_EVENT)
9886 		nflags |= NDI_POST_EVENT;
9887 
9888 	if (i_ddi_devi_attached(pdip) &&
9889 	    (i_ddi_node_state(rdip) >= DS_INITIALIZED)) {
9890 		rv = ndi_devi_unconfig_one(pdip, devnm+1, dipp, nflags);
9891 	} else {
9892 		rv = e_ddi_devi_unconfig(rdip, dipp, nflags);
9893 		if (rv == NDI_SUCCESS) {
9894 			ASSERT(!destroy || ddi_get_child(rdip) == NULL);
9895 			rv = ndi_devi_offline(rdip, nflags);
9896 		}
9897 	}
9898 
9899 	if (!destroy || rv != NDI_SUCCESS) {
9900 		/* The dip still exists, so do a hold */
9901 		e_ddi_branch_hold(rdip);
9902 	}
9903 out:
9904 	kmem_free(devnm, MAXNAMELEN + 1);
9905 	ndi_devi_exit(pdip, circ);
9906 	return (ndi2errno(rv));
9907 }
9908 
9909 int
9910 e_ddi_branch_destroy(dev_info_t *rdip, dev_info_t **dipp, uint_t flag)
9911 {
9912 	return (e_ddi_branch_unconfigure(rdip, dipp,
9913 	    flag|DEVI_BRANCH_DESTROY));
9914 }
9915 
9916 /*
9917  * Number of chains for hash table
9918  */
9919 #define	NUMCHAINS	17
9920 
9921 /*
9922  * Devinfo busy arg
9923  */
9924 struct devi_busy {
9925 	int dv_total;
9926 	int s_total;
9927 	mod_hash_t *dv_hash;
9928 	mod_hash_t *s_hash;
9929 	int (*callback)(dev_info_t *, void *, uint_t);
9930 	void *arg;
9931 };
9932 
9933 static int
9934 visit_dip(dev_info_t *dip, void *arg)
9935 {
9936 	uintptr_t sbusy, dvbusy, ref;
9937 	struct devi_busy *bsp = arg;
9938 
9939 	ASSERT(bsp->callback);
9940 
9941 	/*
9942 	 * A dip cannot be busy if its reference count is 0
9943 	 */
9944 	if ((ref = e_ddi_devi_holdcnt(dip)) == 0) {
9945 		return (bsp->callback(dip, bsp->arg, 0));
9946 	}
9947 
9948 	if (mod_hash_find(bsp->dv_hash, dip, (mod_hash_val_t *)&dvbusy))
9949 		dvbusy = 0;
9950 
9951 	/*
9952 	 * To catch device opens currently maintained on specfs common snodes.
9953 	 */
9954 	if (mod_hash_find(bsp->s_hash, dip, (mod_hash_val_t *)&sbusy))
9955 		sbusy = 0;
9956 
9957 #ifdef	DEBUG
9958 	if (ref < sbusy || ref < dvbusy) {
9959 		cmn_err(CE_WARN, "dip(%p): sopen = %lu, dvopen = %lu "
9960 		    "dip ref = %lu\n", (void *)dip, sbusy, dvbusy, ref);
9961 	}
9962 #endif
9963 
9964 	dvbusy = (sbusy > dvbusy) ? sbusy : dvbusy;
9965 
9966 	return (bsp->callback(dip, bsp->arg, dvbusy));
9967 }
9968 
9969 static int
9970 visit_snode(struct snode *sp, void *arg)
9971 {
9972 	uintptr_t sbusy;
9973 	dev_info_t *dip;
9974 	int count;
9975 	struct devi_busy *bsp = arg;
9976 
9977 	ASSERT(sp);
9978 
9979 	/*
9980 	 * The stable lock is held. This prevents
9981 	 * the snode and its associated dip from
9982 	 * going away.
9983 	 */
9984 	dip = NULL;
9985 	count = spec_devi_open_count(sp, &dip);
9986 
9987 	if (count <= 0)
9988 		return (DDI_WALK_CONTINUE);
9989 
9990 	ASSERT(dip);
9991 
9992 	if (mod_hash_remove(bsp->s_hash, dip, (mod_hash_val_t *)&sbusy))
9993 		sbusy = count;
9994 	else
9995 		sbusy += count;
9996 
9997 	if (mod_hash_insert(bsp->s_hash, dip, (mod_hash_val_t)sbusy)) {
9998 		cmn_err(CE_WARN, "%s: s_hash insert failed: dip=0x%p, "
9999 		    "sbusy = %lu", "e_ddi_branch_referenced",
10000 		    (void *)dip, sbusy);
10001 	}
10002 
10003 	bsp->s_total += count;
10004 
10005 	return (DDI_WALK_CONTINUE);
10006 }
10007 
10008 static void
10009 visit_dvnode(struct dv_node *dv, void *arg)
10010 {
10011 	uintptr_t dvbusy;
10012 	uint_t count;
10013 	struct vnode *vp;
10014 	struct devi_busy *bsp = arg;
10015 
10016 	ASSERT(dv && dv->dv_devi);
10017 
10018 	vp = DVTOV(dv);
10019 
10020 	mutex_enter(&vp->v_lock);
10021 	count = vp->v_count;
10022 	mutex_exit(&vp->v_lock);
10023 
10024 	if (!count)
10025 		return;
10026 
10027 	if (mod_hash_remove(bsp->dv_hash, dv->dv_devi,
10028 	    (mod_hash_val_t *)&dvbusy))
10029 		dvbusy = count;
10030 	else
10031 		dvbusy += count;
10032 
10033 	if (mod_hash_insert(bsp->dv_hash, dv->dv_devi,
10034 	    (mod_hash_val_t)dvbusy)) {
10035 		cmn_err(CE_WARN, "%s: dv_hash insert failed: dip=0x%p, "
10036 		    "dvbusy=%lu", "e_ddi_branch_referenced",
10037 		    (void *)dv->dv_devi, dvbusy);
10038 	}
10039 
10040 	bsp->dv_total += count;
10041 }
10042 
10043 /*
10044  * Returns reference count on success or -1 on failure.
10045  */
10046 int
10047 e_ddi_branch_referenced(
10048 	dev_info_t *rdip,
10049 	int (*callback)(dev_info_t *dip, void *arg, uint_t ref),
10050 	void *arg)
10051 {
10052 	int circ;
10053 	char *path;
10054 	dev_info_t *pdip;
10055 	struct devi_busy bsa = {0};
10056 
10057 	ASSERT(rdip);
10058 
10059 	path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
10060 
10061 	ndi_hold_devi(rdip);
10062 
10063 	pdip = ddi_get_parent(rdip);
10064 
10065 	ASSERT(pdip);
10066 
10067 	/*
10068 	 * Check if caller holds pdip busy - can cause deadlocks during
10069 	 * devfs_walk()
10070 	 */
10071 	if (!e_ddi_branch_held(rdip) || DEVI_BUSY_OWNED(pdip)) {
10072 		cmn_err(CE_WARN, "e_ddi_branch_referenced: failed: "
10073 		    "devinfo branch(%p) not held or parent busy held",
10074 		    (void *)rdip);
10075 		ndi_rele_devi(rdip);
10076 		kmem_free(path, MAXPATHLEN);
10077 		return (-1);
10078 	}
10079 
10080 	ndi_devi_enter(pdip, &circ);
10081 	(void) ddi_pathname(rdip, path);
10082 	ndi_devi_exit(pdip, circ);
10083 
10084 	bsa.dv_hash = mod_hash_create_ptrhash("dv_node busy hash", NUMCHAINS,
10085 	    mod_hash_null_valdtor, sizeof (struct dev_info));
10086 
10087 	bsa.s_hash = mod_hash_create_ptrhash("snode busy hash", NUMCHAINS,
10088 	    mod_hash_null_valdtor, sizeof (struct snode));
10089 
10090 	if (devfs_walk(path, visit_dvnode, &bsa)) {
10091 		cmn_err(CE_WARN, "e_ddi_branch_referenced: "
10092 		    "devfs walk failed for: %s", path);
10093 		kmem_free(path, MAXPATHLEN);
10094 		bsa.s_total = bsa.dv_total = -1;
10095 		goto out;
10096 	}
10097 
10098 	kmem_free(path, MAXPATHLEN);
10099 
10100 	/*
10101 	 * Walk the snode table to detect device opens, which are currently
10102 	 * maintained on specfs common snodes.
10103 	 */
10104 	spec_snode_walk(visit_snode, &bsa);
10105 
10106 	if (callback == NULL)
10107 		goto out;
10108 
10109 	bsa.callback = callback;
10110 	bsa.arg = arg;
10111 
10112 	if (visit_dip(rdip, &bsa) == DDI_WALK_CONTINUE) {
10113 		ndi_devi_enter(rdip, &circ);
10114 		ddi_walk_devs(ddi_get_child(rdip), visit_dip, &bsa);
10115 		ndi_devi_exit(rdip, circ);
10116 	}
10117 
10118 out:
10119 	ndi_rele_devi(rdip);
10120 	mod_hash_destroy_ptrhash(bsa.s_hash);
10121 	mod_hash_destroy_ptrhash(bsa.dv_hash);
10122 	return (bsa.s_total > bsa.dv_total ? bsa.s_total : bsa.dv_total);
10123 }
10124