xref: /illumos-gate/usr/src/uts/common/os/sunddi.c (revision 45f8fdd18746f21c0bd44b4ae78f21a8d2de01c1)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 1990, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright 2022 Garrett D'Amore
25  * Copyright 2022 Tintri by DDN, Inc. All rights reserved.
26  * Copyright 2023 MNX Cloud, Inc.
27  */
28 
29 #include <sys/note.h>
30 #include <sys/types.h>
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/buf.h>
34 #include <sys/uio.h>
35 #include <sys/cred.h>
36 #include <sys/poll.h>
37 #include <sys/mman.h>
38 #include <sys/kmem.h>
39 #include <sys/model.h>
40 #include <sys/file.h>
41 #include <sys/proc.h>
42 #include <sys/open.h>
43 #include <sys/user.h>
44 #include <sys/t_lock.h>
45 #include <sys/vm.h>
46 #include <sys/stat.h>
47 #include <vm/hat.h>
48 #include <vm/seg.h>
49 #include <vm/seg_vn.h>
50 #include <vm/seg_dev.h>
51 #include <vm/as.h>
52 #include <sys/cmn_err.h>
53 #include <sys/cpuvar.h>
54 #include <sys/debug.h>
55 #include <sys/autoconf.h>
56 #include <sys/sunddi.h>
57 #include <sys/esunddi.h>
58 #include <sys/sunndi.h>
59 #include <sys/kstat.h>
60 #include <sys/conf.h>
61 #include <sys/ddi_impldefs.h>	/* include implementation structure defs */
62 #include <sys/ndi_impldefs.h>	/* include prototypes */
63 #include <sys/ddi_periodic.h>
64 #include <sys/hwconf.h>
65 #include <sys/pathname.h>
66 #include <sys/modctl.h>
67 #include <sys/epm.h>
68 #include <sys/devctl.h>
69 #include <sys/callb.h>
70 #include <sys/cladm.h>
71 #include <sys/sysevent.h>
72 #include <sys/dacf_impl.h>
73 #include <sys/ddidevmap.h>
74 #include <sys/bootconf.h>
75 #include <sys/disp.h>
76 #include <sys/atomic.h>
77 #include <sys/promif.h>
78 #include <sys/instance.h>
79 #include <sys/sysevent/eventdefs.h>
80 #include <sys/task.h>
81 #include <sys/project.h>
82 #include <sys/taskq.h>
83 #include <sys/devpolicy.h>
84 #include <sys/ctype.h>
85 #include <net/if.h>
86 #include <sys/rctl.h>
87 #include <sys/zone.h>
88 #include <sys/clock_impl.h>
89 #include <sys/ddi.h>
90 #include <sys/modhash.h>
91 #include <sys/sunldi_impl.h>
92 #include <sys/fs/dv_node.h>
93 #include <sys/fs/snode.h>
94 
95 extern	pri_t	minclsyspri;
96 
97 extern	rctl_hndl_t rc_project_locked_mem;
98 extern	rctl_hndl_t rc_zone_locked_mem;
99 
100 #ifdef DEBUG
101 static int sunddi_debug = 0;
102 #endif /* DEBUG */
103 
104 /* ddi_umem_unlock miscellaneous */
105 
106 static	void	i_ddi_umem_unlock_thread_start(void);
107 
108 static	kmutex_t	ddi_umem_unlock_mutex; /* unlock list mutex */
109 static	kcondvar_t	ddi_umem_unlock_cv; /* unlock list block/unblock */
110 static	kthread_t	*ddi_umem_unlock_thread;
111 /*
112  * The ddi_umem_unlock FIFO list.  NULL head pointer indicates empty list.
113  */
114 static	struct	ddi_umem_cookie *ddi_umem_unlock_head = NULL;
115 static	struct	ddi_umem_cookie *ddi_umem_unlock_tail = NULL;
116 
117 /*
118  * DDI(Sun) Function and flag definitions:
119  */
120 
121 #if defined(__x86)
122 /*
123  * Used to indicate which entries were chosen from a range.
124  */
125 char	*chosen_reg = "chosen-reg";
126 #endif
127 
128 /*
129  * Function used to ring system console bell
130  */
131 void (*ddi_console_bell_func)(clock_t duration);
132 
133 /*
134  * Creating register mappings and handling interrupts:
135  */
136 
137 /*
138  * Generic ddi_map: Call parent to fulfill request...
139  */
140 
141 int
142 ddi_map(dev_info_t *dp, ddi_map_req_t *mp, off_t offset,
143     off_t len, caddr_t *addrp)
144 {
145 	dev_info_t *pdip;
146 
147 	ASSERT(dp);
148 	pdip = (dev_info_t *)DEVI(dp)->devi_parent;
149 	return ((DEVI(pdip)->devi_ops->devo_bus_ops->bus_map)(pdip,
150 	    dp, mp, offset, len, addrp));
151 }
152 
153 /*
154  * ddi_apply_range: (Called by nexi only.)
155  * Apply ranges in parent node dp, to child regspec rp...
156  */
157 
158 int
159 ddi_apply_range(dev_info_t *dp, dev_info_t *rdip, struct regspec *rp)
160 {
161 	return (i_ddi_apply_range(dp, rdip, rp));
162 }
163 
164 int
165 ddi_map_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset,
166     off_t len)
167 {
168 	ddi_map_req_t mr;
169 #if defined(__x86)
170 	struct {
171 		int	bus;
172 		int	addr;
173 		int	size;
174 	} reg, *reglist;
175 	uint_t	length;
176 	int	rc;
177 
178 	/*
179 	 * get the 'registers' or the 'reg' property.
180 	 * We look up the reg property as an array of
181 	 * int's.
182 	 */
183 	rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
184 	    DDI_PROP_DONTPASS, "registers", (int **)&reglist, &length);
185 	if (rc != DDI_PROP_SUCCESS)
186 		rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
187 		    DDI_PROP_DONTPASS, "reg", (int **)&reglist, &length);
188 	if (rc == DDI_PROP_SUCCESS) {
189 		/*
190 		 * point to the required entry.
191 		 */
192 		reg = reglist[rnumber];
193 		reg.addr += offset;
194 		if (len != 0)
195 			reg.size = len;
196 		/*
197 		 * make a new property containing ONLY the required tuple.
198 		 */
199 		if (ddi_prop_update_int_array(DDI_DEV_T_NONE, dip,
200 		    chosen_reg, (int *)&reg, (sizeof (reg)/sizeof (int)))
201 		    != DDI_PROP_SUCCESS) {
202 			cmn_err(CE_WARN, "%s%d: cannot create '%s' "
203 			    "property", DEVI(dip)->devi_name,
204 			    DEVI(dip)->devi_instance, chosen_reg);
205 		}
206 		/*
207 		 * free the memory allocated by
208 		 * ddi_prop_lookup_int_array ().
209 		 */
210 		ddi_prop_free((void *)reglist);
211 	}
212 #endif
213 	mr.map_op = DDI_MO_MAP_LOCKED;
214 	mr.map_type = DDI_MT_RNUMBER;
215 	mr.map_obj.rnumber = rnumber;
216 	mr.map_prot = PROT_READ | PROT_WRITE;
217 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
218 	mr.map_handlep = NULL;
219 	mr.map_vers = DDI_MAP_VERSION;
220 
221 	/*
222 	 * Call my parent to map in my regs.
223 	 */
224 
225 	return (ddi_map(dip, &mr, offset, len, kaddrp));
226 }
227 
228 void
229 ddi_unmap_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset,
230     off_t len)
231 {
232 	ddi_map_req_t mr;
233 
234 	mr.map_op = DDI_MO_UNMAP;
235 	mr.map_type = DDI_MT_RNUMBER;
236 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
237 	mr.map_prot = PROT_READ | PROT_WRITE;	/* who cares? */
238 	mr.map_obj.rnumber = rnumber;
239 	mr.map_handlep = NULL;
240 	mr.map_vers = DDI_MAP_VERSION;
241 
242 	/*
243 	 * Call my parent to unmap my regs.
244 	 */
245 
246 	(void) ddi_map(dip, &mr, offset, len, kaddrp);
247 	*kaddrp = (caddr_t)0;
248 #if defined(__x86)
249 	(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, chosen_reg);
250 #endif
251 }
252 
253 int
254 ddi_bus_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
255     off_t offset, off_t len, caddr_t *vaddrp)
256 {
257 	return (i_ddi_bus_map(dip, rdip, mp, offset, len, vaddrp));
258 }
259 
260 /*
261  * nullbusmap:	The/DDI default bus_map entry point for nexi
262  *		not conforming to the reg/range paradigm (i.e. scsi, etc.)
263  *		with no HAT/MMU layer to be programmed at this level.
264  *
265  *		If the call is to map by rnumber, return an error,
266  *		otherwise pass anything else up the tree to my parent.
267  */
268 int
269 nullbusmap(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
270     off_t offset, off_t len, caddr_t *vaddrp)
271 {
272 	_NOTE(ARGUNUSED(rdip))
273 	if (mp->map_type == DDI_MT_RNUMBER)
274 		return (DDI_ME_UNSUPPORTED);
275 
276 	return (ddi_map(dip, mp, offset, len, vaddrp));
277 }
278 
279 /*
280  * ddi_rnumber_to_regspec: Not for use by leaf drivers.
281  *			   Only for use by nexi using the reg/range paradigm.
282  */
283 struct regspec *
284 ddi_rnumber_to_regspec(dev_info_t *dip, int rnumber)
285 {
286 	return (i_ddi_rnumber_to_regspec(dip, rnumber));
287 }
288 
289 
290 /*
291  * Note that we allow the dip to be nil because we may be called
292  * prior even to the instantiation of the devinfo tree itself - all
293  * regular leaf and nexus drivers should always use a non-nil dip!
294  *
295  * We treat peek in a somewhat cavalier fashion .. assuming that we'll
296  * simply get a synchronous fault as soon as we touch a missing address.
297  *
298  * Poke is rather more carefully handled because we might poke to a write
299  * buffer, "succeed", then only find some time later that we got an
300  * asynchronous fault that indicated that the address we were writing to
301  * was not really backed by hardware.
302  */
303 
304 static int
305 i_ddi_peekpoke(dev_info_t *devi, ddi_ctl_enum_t cmd, size_t size,
306     void *addr, void *value_p)
307 {
308 	union {
309 		uint64_t	u64;
310 		uint32_t	u32;
311 		uint16_t	u16;
312 		uint8_t		u8;
313 	} peekpoke_value;
314 
315 	peekpoke_ctlops_t peekpoke_args;
316 	uint64_t dummy_result;
317 	int rval;
318 
319 	/* Note: size is assumed to be correct;  it is not checked. */
320 	peekpoke_args.size = size;
321 	peekpoke_args.dev_addr = (uintptr_t)addr;
322 	peekpoke_args.handle = NULL;
323 	peekpoke_args.repcount = 1;
324 	peekpoke_args.flags = 0;
325 
326 	if (cmd == DDI_CTLOPS_POKE) {
327 		switch (size) {
328 		case sizeof (uint8_t):
329 			peekpoke_value.u8 = *(uint8_t *)value_p;
330 			break;
331 		case sizeof (uint16_t):
332 			peekpoke_value.u16 = *(uint16_t *)value_p;
333 			break;
334 		case sizeof (uint32_t):
335 			peekpoke_value.u32 = *(uint32_t *)value_p;
336 			break;
337 		case sizeof (uint64_t):
338 			peekpoke_value.u64 = *(uint64_t *)value_p;
339 			break;
340 		}
341 	}
342 
343 	peekpoke_args.host_addr = (uintptr_t)&peekpoke_value.u64;
344 
345 	if (devi != NULL)
346 		rval = ddi_ctlops(devi, devi, cmd, &peekpoke_args,
347 		    &dummy_result);
348 	else
349 		rval = peekpoke_mem(cmd, &peekpoke_args);
350 
351 	/*
352 	 * A NULL value_p is permitted by ddi_peek(9F); discard the result.
353 	 */
354 	if ((cmd == DDI_CTLOPS_PEEK) & (value_p != NULL)) {
355 		switch (size) {
356 		case sizeof (uint8_t):
357 			*(uint8_t *)value_p = peekpoke_value.u8;
358 			break;
359 		case sizeof (uint16_t):
360 			*(uint16_t *)value_p = peekpoke_value.u16;
361 			break;
362 		case sizeof (uint32_t):
363 			*(uint32_t *)value_p = peekpoke_value.u32;
364 			break;
365 		case sizeof (uint64_t):
366 			*(uint64_t *)value_p = peekpoke_value.u64;
367 			break;
368 		}
369 	}
370 
371 	return (rval);
372 }
373 
374 /*
375  * Keep ddi_peek() and ddi_poke() in case 3rd parties are calling this.
376  * they shouldn't be, but the 9f manpage kind of pseudo exposes it.
377  */
378 int
379 ddi_peek(dev_info_t *devi, size_t size, void *addr, void *value_p)
380 {
381 	switch (size) {
382 	case sizeof (uint8_t):
383 	case sizeof (uint16_t):
384 	case sizeof (uint32_t):
385 	case sizeof (uint64_t):
386 		break;
387 	default:
388 		return (DDI_FAILURE);
389 	}
390 
391 	return (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, size, addr, value_p));
392 }
393 
394 int
395 ddi_poke(dev_info_t *devi, size_t size, void *addr, void *value_p)
396 {
397 	switch (size) {
398 	case sizeof (uint8_t):
399 	case sizeof (uint16_t):
400 	case sizeof (uint32_t):
401 	case sizeof (uint64_t):
402 		break;
403 	default:
404 		return (DDI_FAILURE);
405 	}
406 
407 	return (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, size, addr, value_p));
408 }
409 
410 int
411 ddi_peek8(dev_info_t *dip, int8_t *addr, int8_t *val_p)
412 {
413 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
414 	    val_p));
415 }
416 
417 int
418 ddi_peek16(dev_info_t *dip, int16_t *addr, int16_t *val_p)
419 {
420 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
421 	    val_p));
422 }
423 
424 int
425 ddi_peek32(dev_info_t *dip, int32_t *addr, int32_t *val_p)
426 {
427 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
428 	    val_p));
429 }
430 
431 int
432 ddi_peek64(dev_info_t *dip, int64_t *addr, int64_t *val_p)
433 {
434 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
435 	    val_p));
436 }
437 
438 int
439 ddi_poke8(dev_info_t *dip, int8_t *addr, int8_t val)
440 {
441 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
442 }
443 
444 int
445 ddi_poke16(dev_info_t *dip, int16_t *addr, int16_t val)
446 {
447 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
448 }
449 
450 int
451 ddi_poke32(dev_info_t *dip, int32_t *addr, int32_t val)
452 {
453 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
454 }
455 
456 int
457 ddi_poke64(dev_info_t *dip, int64_t *addr, int64_t val)
458 {
459 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
460 }
461 
462 /*
463  * ddi_peekpokeio() is used primarily by the mem drivers for moving
464  * data to and from uio structures via peek and poke.  Note that we
465  * use "internal" routines ddi_peek and ddi_poke to make this go
466  * slightly faster, avoiding the call overhead ..
467  */
468 int
469 ddi_peekpokeio(dev_info_t *devi, struct uio *uio, enum uio_rw rw,
470     caddr_t addr, size_t len, uint_t xfersize)
471 {
472 	int64_t	ibuffer;
473 	int8_t w8;
474 	size_t sz;
475 	int o;
476 
477 	if (xfersize > sizeof (long))
478 		xfersize = sizeof (long);
479 
480 	while (len != 0) {
481 		if ((len | (uintptr_t)addr) & 1) {
482 			sz = sizeof (int8_t);
483 			if (rw == UIO_WRITE) {
484 				if ((o = uwritec(uio)) == -1)
485 					return (DDI_FAILURE);
486 				if (ddi_poke8(devi, (int8_t *)addr,
487 				    (int8_t)o) != DDI_SUCCESS)
488 					return (DDI_FAILURE);
489 			} else {
490 				if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz,
491 				    (int8_t *)addr, &w8) != DDI_SUCCESS)
492 					return (DDI_FAILURE);
493 				if (ureadc(w8, uio))
494 					return (DDI_FAILURE);
495 			}
496 		} else {
497 			switch (xfersize) {
498 			case sizeof (int64_t):
499 				if (((len | (uintptr_t)addr) &
500 				    (sizeof (int64_t) - 1)) == 0) {
501 					sz = xfersize;
502 					break;
503 				}
504 				/*FALLTHROUGH*/
505 			case sizeof (int32_t):
506 				if (((len | (uintptr_t)addr) &
507 				    (sizeof (int32_t) - 1)) == 0) {
508 					sz = xfersize;
509 					break;
510 				}
511 				/*FALLTHROUGH*/
512 			default:
513 				/*
514 				 * This still assumes that we might have an
515 				 * I/O bus out there that permits 16-bit
516 				 * transfers (and that it would be upset by
517 				 * 32-bit transfers from such locations).
518 				 */
519 				sz = sizeof (int16_t);
520 				break;
521 			}
522 
523 			if (rw == UIO_READ) {
524 				if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz,
525 				    addr, &ibuffer) != DDI_SUCCESS)
526 					return (DDI_FAILURE);
527 			}
528 
529 			if (uiomove(&ibuffer, sz, rw, uio))
530 				return (DDI_FAILURE);
531 
532 			if (rw == UIO_WRITE) {
533 				if (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, sz,
534 				    addr, &ibuffer) != DDI_SUCCESS)
535 					return (DDI_FAILURE);
536 			}
537 		}
538 		addr += sz;
539 		len -= sz;
540 	}
541 	return (DDI_SUCCESS);
542 }
543 
544 /*
545  * These routines are used by drivers that do layered ioctls
546  * On sparc, they're implemented in assembler to avoid spilling
547  * register windows in the common (copyin) case ..
548  */
549 #if !defined(__sparc)
550 int
551 ddi_copyin(const void *buf, void *kernbuf, size_t size, int flags)
552 {
553 	if (flags & FKIOCTL)
554 		return (kcopy(buf, kernbuf, size) ? -1 : 0);
555 	return (copyin(buf, kernbuf, size));
556 }
557 
558 int
559 ddi_copyout(const void *buf, void *kernbuf, size_t size, int flags)
560 {
561 	if (flags & FKIOCTL)
562 		return (kcopy(buf, kernbuf, size) ? -1 : 0);
563 	return (copyout(buf, kernbuf, size));
564 }
565 #endif	/* !__sparc */
566 
567 /*
568  * Conversions in nexus pagesize units.  We don't duplicate the
569  * 'nil dip' semantics of peek/poke because btopr/btop/ptob are DDI/DKI
570  * routines anyway.
571  */
572 unsigned long
573 ddi_btop(dev_info_t *dip, unsigned long bytes)
574 {
575 	unsigned long pages;
576 
577 	(void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOP, &bytes, &pages);
578 	return (pages);
579 }
580 
581 unsigned long
582 ddi_btopr(dev_info_t *dip, unsigned long bytes)
583 {
584 	unsigned long pages;
585 
586 	(void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOPR, &bytes, &pages);
587 	return (pages);
588 }
589 
590 unsigned long
591 ddi_ptob(dev_info_t *dip, unsigned long pages)
592 {
593 	unsigned long bytes;
594 
595 	(void) ddi_ctlops(dip, dip, DDI_CTLOPS_PTOB, &pages, &bytes);
596 	return (bytes);
597 }
598 
599 unsigned int
600 ddi_enter_critical(void)
601 {
602 	return ((uint_t)spl7());
603 }
604 
605 void
606 ddi_exit_critical(unsigned int spl)
607 {
608 	splx((int)spl);
609 }
610 
611 /*
612  * Nexus ctlops punter
613  */
614 
615 #if !defined(__sparc)
616 /*
617  * Request bus_ctl parent to handle a bus_ctl request
618  *
619  * (The sparc version is in sparc_ddi.s)
620  */
621 int
622 ddi_ctlops(dev_info_t *d, dev_info_t *r, ddi_ctl_enum_t op, void *a, void *v)
623 {
624 	int (*fp)();
625 
626 	if (!d || !r)
627 		return (DDI_FAILURE);
628 
629 	if ((d = (dev_info_t *)DEVI(d)->devi_bus_ctl) == NULL)
630 		return (DDI_FAILURE);
631 
632 	fp = DEVI(d)->devi_ops->devo_bus_ops->bus_ctl;
633 	return ((*fp)(d, r, op, a, v));
634 }
635 
636 #endif
637 
638 /*
639  * DMA/DVMA setup
640  */
641 
642 #if !defined(__sparc)
643 /*
644  * Request bus_dma_ctl parent to fiddle with a dma request.
645  *
646  * (The sparc version is in sparc_subr.s)
647  */
648 int
649 ddi_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
650     ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
651     off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
652 {
653 	int (*fp)();
654 
655 	if (dip != ddi_root_node())
656 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_ctl;
657 	fp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_ctl;
658 	return ((*fp) (dip, rdip, handle, request, offp, lenp, objp, flags));
659 }
660 #endif
661 
662 /*
663  * For all DMA control functions, call the DMA control
664  * routine and return status.
665  *
666  * Just plain assume that the parent is to be called.
667  * If a nexus driver or a thread outside the framework
668  * of a nexus driver or a leaf driver calls these functions,
669  * it is up to them to deal with the fact that the parent's
670  * bus_dma_ctl function will be the first one called.
671  */
672 
673 #define	HD	((ddi_dma_impl_t *)h)->dmai_rdip
674 
675 /*
676  * This routine is left in place to satisfy link dependencies
677  * for any 3rd party nexus drivers that rely on it.  It is never
678  * called, though.
679  */
680 /*ARGSUSED*/
681 int
682 ddi_dma_map(dev_info_t *dip, dev_info_t *rdip,
683     struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep)
684 {
685 	return (DDI_FAILURE);
686 }
687 
688 #if !defined(__sparc)
689 
690 /*
691  * The SPARC versions of these routines are done in assembler to
692  * save register windows, so they're in sparc_subr.s.
693  */
694 
695 int
696 ddi_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
697     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
698 {
699 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_attr_t *,
700 	    int (*)(caddr_t), caddr_t, ddi_dma_handle_t *);
701 
702 	if (dip != ddi_root_node())
703 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl;
704 
705 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_allochdl;
706 	return ((*funcp)(dip, rdip, attr, waitfp, arg, handlep));
707 }
708 
709 int
710 ddi_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handlep)
711 {
712 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
713 
714 	if (dip != ddi_root_node())
715 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl;
716 
717 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_freehdl;
718 	return ((*funcp)(dip, rdip, handlep));
719 }
720 
721 int
722 ddi_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
723     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
724     ddi_dma_cookie_t *cp, uint_t *ccountp)
725 {
726 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
727 	    struct ddi_dma_req *, ddi_dma_cookie_t *, uint_t *);
728 
729 	if (dip != ddi_root_node())
730 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
731 
732 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_bindhdl;
733 	return ((*funcp)(dip, rdip, handle, dmareq, cp, ccountp));
734 }
735 
736 int
737 ddi_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
738     ddi_dma_handle_t handle)
739 {
740 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
741 
742 	if (dip != ddi_root_node())
743 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl;
744 
745 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_unbindhdl;
746 	return ((*funcp)(dip, rdip, handle));
747 }
748 
749 
750 int
751 ddi_dma_flush(dev_info_t *dip, dev_info_t *rdip,
752     ddi_dma_handle_t handle, off_t off, size_t len,
753     uint_t cache_flags)
754 {
755 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
756 	    off_t, size_t, uint_t);
757 
758 	if (dip != ddi_root_node())
759 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush;
760 
761 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_flush;
762 	return ((*funcp)(dip, rdip, handle, off, len, cache_flags));
763 }
764 
765 int
766 ddi_dma_win(dev_info_t *dip, dev_info_t *rdip,
767     ddi_dma_handle_t handle, uint_t win, off_t *offp,
768     size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
769 {
770 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
771 	    uint_t, off_t *, size_t *, ddi_dma_cookie_t *, uint_t *);
772 
773 	if (dip != ddi_root_node())
774 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_win;
775 
776 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_win;
777 	return ((*funcp)(dip, rdip, handle, win, offp, lenp,
778 	    cookiep, ccountp));
779 }
780 
781 int
782 ddi_dma_sync(ddi_dma_handle_t h, off_t o, size_t l, uint_t whom)
783 {
784 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h;
785 	dev_info_t *dip, *rdip;
786 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t, off_t,
787 	    size_t, uint_t);
788 
789 	/*
790 	 * the DMA nexus driver will set DMP_NOSYNC if the
791 	 * platform does not require any sync operation. For
792 	 * example if the memory is uncached or consistent
793 	 * and without any I/O write buffers involved.
794 	 */
795 	if ((hp->dmai_rflags & DMP_NOSYNC) == DMP_NOSYNC)
796 		return (DDI_SUCCESS);
797 
798 	dip = rdip = hp->dmai_rdip;
799 	if (dip != ddi_root_node())
800 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush;
801 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_flush;
802 	return ((*funcp)(dip, rdip, h, o, l, whom));
803 }
804 
805 int
806 ddi_dma_unbind_handle(ddi_dma_handle_t h)
807 {
808 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h;
809 	dev_info_t *dip, *rdip;
810 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
811 
812 	dip = rdip = hp->dmai_rdip;
813 	if (dip != ddi_root_node())
814 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl;
815 	funcp = DEVI(rdip)->devi_bus_dma_unbindfunc;
816 	return ((*funcp)(dip, rdip, h));
817 }
818 
819 #endif	/* !__sparc */
820 
821 /*
822  * DMA burst sizes, and transfer minimums
823  */
824 
825 int
826 ddi_dma_burstsizes(ddi_dma_handle_t handle)
827 {
828 	ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle;
829 
830 	if (!dimp)
831 		return (0);
832 	else
833 		return (dimp->dmai_burstsizes);
834 }
835 
836 /*
837  * Given two DMA attribute structures, apply the attributes
838  * of one to the other, following the rules of attributes
839  * and the wishes of the caller.
840  *
841  * The rules of DMA attribute structures are that you cannot
842  * make things *less* restrictive as you apply one set
843  * of attributes to another.
844  *
845  */
846 void
847 ddi_dma_attr_merge(ddi_dma_attr_t *attr, ddi_dma_attr_t *mod)
848 {
849 	attr->dma_attr_addr_lo =
850 	    MAX(attr->dma_attr_addr_lo, mod->dma_attr_addr_lo);
851 	attr->dma_attr_addr_hi =
852 	    MIN(attr->dma_attr_addr_hi, mod->dma_attr_addr_hi);
853 	attr->dma_attr_count_max =
854 	    MIN(attr->dma_attr_count_max, mod->dma_attr_count_max);
855 	attr->dma_attr_align =
856 	    MAX(attr->dma_attr_align,  mod->dma_attr_align);
857 	attr->dma_attr_burstsizes =
858 	    (uint_t)(attr->dma_attr_burstsizes & mod->dma_attr_burstsizes);
859 	attr->dma_attr_minxfer =
860 	    maxbit(attr->dma_attr_minxfer, mod->dma_attr_minxfer);
861 	attr->dma_attr_maxxfer =
862 	    MIN(attr->dma_attr_maxxfer, mod->dma_attr_maxxfer);
863 	attr->dma_attr_seg = MIN(attr->dma_attr_seg, mod->dma_attr_seg);
864 	attr->dma_attr_sgllen = MIN((uint_t)attr->dma_attr_sgllen,
865 	    (uint_t)mod->dma_attr_sgllen);
866 	attr->dma_attr_granular =
867 	    MAX(attr->dma_attr_granular, mod->dma_attr_granular);
868 }
869 
870 /*
871  * mmap/segmap interface:
872  */
873 
874 /*
875  * ddi_segmap:		setup the default segment driver. Calls the drivers
876  *			XXmmap routine to validate the range to be mapped.
877  *			Return ENXIO of the range is not valid.  Create
878  *			a seg_dev segment that contains all of the
879  *			necessary information and will reference the
880  *			default segment driver routines. It returns zero
881  *			on success or non-zero on failure.
882  */
883 int
884 ddi_segmap(dev_t dev, off_t offset, struct as *asp, caddr_t *addrp, off_t len,
885     uint_t prot, uint_t maxprot, uint_t flags, cred_t *credp)
886 {
887 	extern int spec_segmap(dev_t, off_t, struct as *, caddr_t *,
888 	    off_t, uint_t, uint_t, uint_t, struct cred *);
889 
890 	return (spec_segmap(dev, offset, asp, addrp, len,
891 	    prot, maxprot, flags, credp));
892 }
893 
894 /*
895  * ddi_map_fault:	Resolve mappings at fault time.  Used by segment
896  *			drivers. Allows each successive parent to resolve
897  *			address translations and add its mappings to the
898  *			mapping list supplied in the page structure. It
899  *			returns zero on success	or non-zero on failure.
900  */
901 
902 int
903 ddi_map_fault(dev_info_t *dip, struct hat *hat, struct seg *seg,
904     caddr_t addr, struct devpage *dp, pfn_t pfn, uint_t prot, uint_t lock)
905 {
906 	return (i_ddi_map_fault(dip, dip, hat, seg, addr, dp, pfn, prot, lock));
907 }
908 
909 /*
910  * ddi_device_mapping_check:	Called from ddi_segmap_setup.
911  *	Invokes platform specific DDI to determine whether attributes specified
912  *	in attr(9s) are	valid for the region of memory that will be made
913  *	available for direct access to user process via the mmap(2) system call.
914  */
915 int
916 ddi_device_mapping_check(dev_t dev, ddi_device_acc_attr_t *accattrp,
917     uint_t rnumber, uint_t *hat_flags)
918 {
919 	ddi_acc_handle_t handle;
920 	ddi_map_req_t mr;
921 	ddi_acc_hdl_t *hp;
922 	int result;
923 	dev_info_t *dip;
924 
925 	/*
926 	 * we use e_ddi_hold_devi_by_dev to search for the devi.  We
927 	 * release it immediately since it should already be held by
928 	 * a devfs vnode.
929 	 */
930 	if ((dip =
931 	    e_ddi_hold_devi_by_dev(dev, E_DDI_HOLD_DEVI_NOATTACH)) == NULL)
932 		return (-1);
933 	ddi_release_devi(dip);		/* for e_ddi_hold_devi_by_dev() */
934 
935 	/*
936 	 * Allocate and initialize the common elements of data
937 	 * access handle.
938 	 */
939 	handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
940 	if (handle == NULL)
941 		return (-1);
942 
943 	hp = impl_acc_hdl_get(handle);
944 	hp->ah_vers = VERS_ACCHDL;
945 	hp->ah_dip = dip;
946 	hp->ah_rnumber = rnumber;
947 	hp->ah_offset = 0;
948 	hp->ah_len = 0;
949 	hp->ah_acc = *accattrp;
950 
951 	/*
952 	 * Set up the mapping request and call to parent.
953 	 */
954 	mr.map_op = DDI_MO_MAP_HANDLE;
955 	mr.map_type = DDI_MT_RNUMBER;
956 	mr.map_obj.rnumber = rnumber;
957 	mr.map_prot = PROT_READ | PROT_WRITE;
958 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
959 	mr.map_handlep = hp;
960 	mr.map_vers = DDI_MAP_VERSION;
961 	result = ddi_map(dip, &mr, 0, 0, NULL);
962 
963 	/*
964 	 * Region must be mappable, pick up flags from the framework.
965 	 */
966 	*hat_flags = hp->ah_hat_flags;
967 
968 	impl_acc_hdl_free(handle);
969 
970 	/*
971 	 * check for end result.
972 	 */
973 	if (result != DDI_SUCCESS)
974 		return (-1);
975 	return (0);
976 }
977 
978 
979 /*
980  * Property functions:	 See also, ddipropdefs.h.
981  *
982  * These functions are the framework for the property functions,
983  * i.e. they support software defined properties.  All implementation
984  * specific property handling (i.e.: self-identifying devices and
985  * PROM defined properties are handled in the implementation specific
986  * functions (defined in ddi_implfuncs.h).
987  */
988 
989 /*
990  * nopropop:	Shouldn't be called, right?
991  */
992 int
993 nopropop(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
994     char *name, caddr_t valuep, int *lengthp)
995 {
996 	_NOTE(ARGUNUSED(dev, dip, prop_op, mod_flags, name, valuep, lengthp))
997 	return (DDI_PROP_NOT_FOUND);
998 }
999 
1000 #ifdef	DDI_PROP_DEBUG
1001 int ddi_prop_debug_flag = 0;
1002 
1003 int
1004 ddi_prop_debug(int enable)
1005 {
1006 	int prev = ddi_prop_debug_flag;
1007 
1008 	if ((enable != 0) || (prev != 0))
1009 		printf("ddi_prop_debug: debugging %s\n",
1010 		    enable ? "enabled" : "disabled");
1011 	ddi_prop_debug_flag = enable;
1012 	return (prev);
1013 }
1014 
1015 #endif	/* DDI_PROP_DEBUG */
1016 
1017 /*
1018  * Search a property list for a match, if found return pointer
1019  * to matching prop struct, else return NULL.
1020  */
1021 
1022 ddi_prop_t *
1023 i_ddi_prop_search(dev_t dev, char *name, uint_t flags, ddi_prop_t **list_head)
1024 {
1025 	ddi_prop_t	*propp;
1026 
1027 	/*
1028 	 * find the property in child's devinfo:
1029 	 * Search order defined by this search function is first matching
1030 	 * property with input dev == DDI_DEV_T_ANY matching any dev or
1031 	 * dev == propp->prop_dev, name == propp->name, and the correct
1032 	 * data type as specified in the flags.  If a DDI_DEV_T_NONE dev
1033 	 * value made it this far then it implies a DDI_DEV_T_ANY search.
1034 	 */
1035 	if (dev == DDI_DEV_T_NONE)
1036 		dev = DDI_DEV_T_ANY;
1037 
1038 	for (propp = *list_head; propp != NULL; propp = propp->prop_next)  {
1039 
1040 		if (!DDI_STRSAME(propp->prop_name, name))
1041 			continue;
1042 
1043 		if ((dev != DDI_DEV_T_ANY) && (propp->prop_dev != dev))
1044 			continue;
1045 
1046 		if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0)
1047 			continue;
1048 
1049 		return (propp);
1050 	}
1051 
1052 	return ((ddi_prop_t *)0);
1053 }
1054 
1055 /*
1056  * Search for property within devnames structures
1057  */
1058 ddi_prop_t *
1059 i_ddi_search_global_prop(dev_t dev, char *name, uint_t flags)
1060 {
1061 	major_t		major;
1062 	struct devnames	*dnp;
1063 	ddi_prop_t	*propp;
1064 
1065 	/*
1066 	 * Valid dev_t value is needed to index into the
1067 	 * correct devnames entry, therefore a dev_t
1068 	 * value of DDI_DEV_T_ANY is not appropriate.
1069 	 */
1070 	ASSERT(dev != DDI_DEV_T_ANY);
1071 	if (dev == DDI_DEV_T_ANY) {
1072 		return ((ddi_prop_t *)0);
1073 	}
1074 
1075 	major = getmajor(dev);
1076 	dnp = &(devnamesp[major]);
1077 
1078 	if (dnp->dn_global_prop_ptr == NULL)
1079 		return ((ddi_prop_t *)0);
1080 
1081 	LOCK_DEV_OPS(&dnp->dn_lock);
1082 
1083 	for (propp = dnp->dn_global_prop_ptr->prop_list;
1084 	    propp != NULL;
1085 	    propp = (ddi_prop_t *)propp->prop_next) {
1086 
1087 		if (!DDI_STRSAME(propp->prop_name, name))
1088 			continue;
1089 
1090 		if ((!(flags & DDI_PROP_ROOTNEX_GLOBAL)) &&
1091 		    (!(flags & LDI_DEV_T_ANY)) && (propp->prop_dev != dev))
1092 			continue;
1093 
1094 		if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0)
1095 			continue;
1096 
1097 		/* Property found, return it */
1098 		UNLOCK_DEV_OPS(&dnp->dn_lock);
1099 		return (propp);
1100 	}
1101 
1102 	UNLOCK_DEV_OPS(&dnp->dn_lock);
1103 	return ((ddi_prop_t *)0);
1104 }
1105 
1106 static char prop_no_mem_msg[] = "can't allocate memory for ddi property <%s>";
1107 
1108 /*
1109  * ddi_prop_search_global:
1110  *	Search the global property list within devnames
1111  *	for the named property.  Return the encoded value.
1112  */
1113 static int
1114 i_ddi_prop_search_global(dev_t dev, uint_t flags, char *name,
1115     void *valuep, uint_t *lengthp)
1116 {
1117 	ddi_prop_t	*propp;
1118 	caddr_t		buffer;
1119 
1120 	propp =  i_ddi_search_global_prop(dev, name, flags);
1121 
1122 	/* Property NOT found, bail */
1123 	if (propp == (ddi_prop_t *)0)
1124 		return (DDI_PROP_NOT_FOUND);
1125 
1126 	if (propp->prop_flags & DDI_PROP_UNDEF_IT)
1127 		return (DDI_PROP_UNDEFINED);
1128 
1129 	if ((buffer = kmem_alloc(propp->prop_len,
1130 	    (flags & DDI_PROP_CANSLEEP) ? KM_SLEEP : KM_NOSLEEP)) == NULL) {
1131 		cmn_err(CE_CONT, prop_no_mem_msg, name);
1132 		return (DDI_PROP_NO_MEMORY);
1133 	}
1134 
1135 	/*
1136 	 * Return the encoded data
1137 	 */
1138 	*(caddr_t *)valuep = buffer;
1139 	*lengthp = propp->prop_len;
1140 	bcopy(propp->prop_val, buffer, propp->prop_len);
1141 
1142 	return (DDI_PROP_SUCCESS);
1143 }
1144 
1145 /*
1146  * ddi_prop_search_common:	Lookup and return the encoded value
1147  */
1148 int
1149 ddi_prop_search_common(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1150     uint_t flags, char *name, void *valuep, uint_t *lengthp)
1151 {
1152 	ddi_prop_t	*propp;
1153 	int		i;
1154 	caddr_t		buffer = NULL;
1155 	caddr_t		prealloc = NULL;
1156 	int		plength = 0;
1157 	dev_info_t	*pdip;
1158 	int		(*bop)();
1159 
1160 	/*CONSTANTCONDITION*/
1161 	while (1)  {
1162 
1163 		mutex_enter(&(DEVI(dip)->devi_lock));
1164 
1165 
1166 		/*
1167 		 * find the property in child's devinfo:
1168 		 * Search order is:
1169 		 *	1. driver defined properties
1170 		 *	2. system defined properties
1171 		 *	3. driver global properties
1172 		 *	4. boot defined properties
1173 		 */
1174 
1175 		propp = i_ddi_prop_search(dev, name, flags,
1176 		    &(DEVI(dip)->devi_drv_prop_ptr));
1177 		if (propp == NULL)  {
1178 			propp = i_ddi_prop_search(dev, name, flags,
1179 			    &(DEVI(dip)->devi_sys_prop_ptr));
1180 		}
1181 		if ((propp == NULL) && DEVI(dip)->devi_global_prop_list) {
1182 			propp = i_ddi_prop_search(dev, name, flags,
1183 			    &DEVI(dip)->devi_global_prop_list->prop_list);
1184 		}
1185 
1186 		if (propp == NULL)  {
1187 			propp = i_ddi_prop_search(dev, name, flags,
1188 			    &(DEVI(dip)->devi_hw_prop_ptr));
1189 		}
1190 
1191 		/*
1192 		 * Software property found?
1193 		 */
1194 		if (propp != (ddi_prop_t *)0)	{
1195 
1196 			/*
1197 			 * If explicit undefine, return now.
1198 			 */
1199 			if (propp->prop_flags & DDI_PROP_UNDEF_IT) {
1200 				mutex_exit(&(DEVI(dip)->devi_lock));
1201 				if (prealloc)
1202 					kmem_free(prealloc, plength);
1203 				return (DDI_PROP_UNDEFINED);
1204 			}
1205 
1206 			/*
1207 			 * If we only want to know if it exists, return now
1208 			 */
1209 			if (prop_op == PROP_EXISTS) {
1210 				mutex_exit(&(DEVI(dip)->devi_lock));
1211 				ASSERT(prealloc == NULL);
1212 				return (DDI_PROP_SUCCESS);
1213 			}
1214 
1215 			/*
1216 			 * If length only request or prop length == 0,
1217 			 * service request and return now.
1218 			 */
1219 			if ((prop_op == PROP_LEN) ||(propp->prop_len == 0)) {
1220 				*lengthp = propp->prop_len;
1221 
1222 				/*
1223 				 * if prop_op is PROP_LEN_AND_VAL_ALLOC
1224 				 * that means prop_len is 0, so set valuep
1225 				 * also to NULL
1226 				 */
1227 				if (prop_op == PROP_LEN_AND_VAL_ALLOC)
1228 					*(caddr_t *)valuep = NULL;
1229 
1230 				mutex_exit(&(DEVI(dip)->devi_lock));
1231 				if (prealloc)
1232 					kmem_free(prealloc, plength);
1233 				return (DDI_PROP_SUCCESS);
1234 			}
1235 
1236 			/*
1237 			 * If LEN_AND_VAL_ALLOC and the request can sleep,
1238 			 * drop the mutex, allocate the buffer, and go
1239 			 * through the loop again.  If we already allocated
1240 			 * the buffer, and the size of the property changed,
1241 			 * keep trying...
1242 			 */
1243 			if ((prop_op == PROP_LEN_AND_VAL_ALLOC) &&
1244 			    (flags & DDI_PROP_CANSLEEP))  {
1245 				if (prealloc && (propp->prop_len != plength)) {
1246 					kmem_free(prealloc, plength);
1247 					prealloc = NULL;
1248 				}
1249 				if (prealloc == NULL)  {
1250 					plength = propp->prop_len;
1251 					mutex_exit(&(DEVI(dip)->devi_lock));
1252 					prealloc = kmem_alloc(plength,
1253 					    KM_SLEEP);
1254 					continue;
1255 				}
1256 			}
1257 
1258 			/*
1259 			 * Allocate buffer, if required.  Either way,
1260 			 * set `buffer' variable.
1261 			 */
1262 			i = *lengthp;			/* Get callers length */
1263 			*lengthp = propp->prop_len;	/* Set callers length */
1264 
1265 			switch (prop_op) {
1266 
1267 			case PROP_LEN_AND_VAL_ALLOC:
1268 
1269 				if (prealloc == NULL) {
1270 					buffer = kmem_alloc(propp->prop_len,
1271 					    KM_NOSLEEP);
1272 				} else {
1273 					buffer = prealloc;
1274 				}
1275 
1276 				if (buffer == NULL)  {
1277 					mutex_exit(&(DEVI(dip)->devi_lock));
1278 					cmn_err(CE_CONT, prop_no_mem_msg, name);
1279 					return (DDI_PROP_NO_MEMORY);
1280 				}
1281 				/* Set callers buf ptr */
1282 				*(caddr_t *)valuep = buffer;
1283 				break;
1284 
1285 			case PROP_LEN_AND_VAL_BUF:
1286 
1287 				if (propp->prop_len > (i)) {
1288 					mutex_exit(&(DEVI(dip)->devi_lock));
1289 					return (DDI_PROP_BUF_TOO_SMALL);
1290 				}
1291 
1292 				buffer = valuep;  /* Get callers buf ptr */
1293 				break;
1294 
1295 			default:
1296 				break;
1297 			}
1298 
1299 			/*
1300 			 * Do the copy.
1301 			 */
1302 			if (buffer != NULL)
1303 				bcopy(propp->prop_val, buffer, propp->prop_len);
1304 			mutex_exit(&(DEVI(dip)->devi_lock));
1305 			return (DDI_PROP_SUCCESS);
1306 		}
1307 
1308 		mutex_exit(&(DEVI(dip)->devi_lock));
1309 		if (prealloc)
1310 			kmem_free(prealloc, plength);
1311 		prealloc = NULL;
1312 
1313 		/*
1314 		 * Prop not found, call parent bus_ops to deal with possible
1315 		 * h/w layer (possible PROM defined props, etc.) and to
1316 		 * possibly ascend the hierarchy, if allowed by flags.
1317 		 */
1318 		pdip = (dev_info_t *)DEVI(dip)->devi_parent;
1319 
1320 		/*
1321 		 * One last call for the root driver PROM props?
1322 		 */
1323 		if (dip == ddi_root_node())  {
1324 			return (ddi_bus_prop_op(dev, dip, dip, prop_op,
1325 			    flags, name, valuep, (int *)lengthp));
1326 		}
1327 
1328 		/*
1329 		 * We may have been called to check for properties
1330 		 * within a single devinfo node that has no parent -
1331 		 * see make_prop()
1332 		 */
1333 		if (pdip == NULL) {
1334 			ASSERT((flags &
1335 			    (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM)) ==
1336 			    (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM));
1337 			return (DDI_PROP_NOT_FOUND);
1338 		}
1339 
1340 		/*
1341 		 * Instead of recursing, we do iterative calls up the tree.
1342 		 * As a bit of optimization, skip the bus_op level if the
1343 		 * node is a s/w node and if the parent's bus_prop_op function
1344 		 * is `ddi_bus_prop_op', because we know that in this case,
1345 		 * this function does nothing.
1346 		 *
1347 		 * 4225415: If the parent isn't attached, or the child
1348 		 * hasn't been named by the parent yet, use the default
1349 		 * ddi_bus_prop_op as a proxy for the parent.  This
1350 		 * allows property lookups in any child/parent state to
1351 		 * include 'prom' and inherited properties, even when
1352 		 * there are no drivers attached to the child or parent.
1353 		 */
1354 
1355 		bop = ddi_bus_prop_op;
1356 		if (i_ddi_devi_attached(pdip) &&
1357 		    (i_ddi_node_state(dip) >= DS_INITIALIZED))
1358 			bop = DEVI(pdip)->devi_ops->devo_bus_ops->bus_prop_op;
1359 
1360 		i = DDI_PROP_NOT_FOUND;
1361 
1362 		if ((bop != ddi_bus_prop_op) || ndi_dev_is_prom_node(dip)) {
1363 			i = (*bop)(dev, pdip, dip, prop_op,
1364 			    flags | DDI_PROP_DONTPASS,
1365 			    name, valuep, lengthp);
1366 		}
1367 
1368 		if ((flags & DDI_PROP_DONTPASS) ||
1369 		    (i != DDI_PROP_NOT_FOUND))
1370 			return (i);
1371 
1372 		dip = pdip;
1373 	}
1374 	/*NOTREACHED*/
1375 }
1376 
1377 
1378 /*
1379  * ddi_prop_op: The basic property operator for drivers.
1380  *
1381  * In ddi_prop_op, the type of valuep is interpreted based on prop_op:
1382  *
1383  *	prop_op			valuep
1384  *	------			------
1385  *
1386  *	PROP_LEN		<unused>
1387  *
1388  *	PROP_LEN_AND_VAL_BUF	Pointer to callers buffer
1389  *
1390  *	PROP_LEN_AND_VAL_ALLOC	Address of callers pointer (will be set to
1391  *				address of allocated buffer, if successful)
1392  */
1393 int
1394 ddi_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
1395     char *name, caddr_t valuep, int *lengthp)
1396 {
1397 	int	i;
1398 
1399 	ASSERT((mod_flags & DDI_PROP_TYPE_MASK) == 0);
1400 
1401 	/*
1402 	 * If this was originally an LDI prop lookup then we bail here.
1403 	 * The reason is that the LDI property lookup interfaces first call
1404 	 * a drivers prop_op() entry point to allow it to override
1405 	 * properties.  But if we've made it here, then the driver hasn't
1406 	 * overriden any properties.  We don't want to continue with the
1407 	 * property search here because we don't have any type inforamtion.
1408 	 * When we return failure, the LDI interfaces will then proceed to
1409 	 * call the typed property interfaces to look up the property.
1410 	 */
1411 	if (mod_flags & DDI_PROP_DYNAMIC)
1412 		return (DDI_PROP_NOT_FOUND);
1413 
1414 	/*
1415 	 * check for pre-typed property consumer asking for typed property:
1416 	 * see e_ddi_getprop_int64.
1417 	 */
1418 	if (mod_flags & DDI_PROP_CONSUMER_TYPED)
1419 		mod_flags |= DDI_PROP_TYPE_INT64;
1420 	mod_flags |= DDI_PROP_TYPE_ANY;
1421 
1422 	i = ddi_prop_search_common(dev, dip, prop_op,
1423 	    mod_flags, name, valuep, (uint_t *)lengthp);
1424 	if (i == DDI_PROP_FOUND_1275)
1425 		return (DDI_PROP_SUCCESS);
1426 	return (i);
1427 }
1428 
1429 /*
1430  * ddi_prop_op_nblocks_blksize: The basic property operator for drivers that
1431  * maintain size in number of blksize blocks.  Provides a dynamic property
1432  * implementation for size oriented properties based on nblocks64 and blksize
1433  * values passed in by the driver.  Fallback to ddi_prop_op if the nblocks64
1434  * is too large.  This interface should not be used with a nblocks64 that
1435  * represents the driver's idea of how to represent unknown, if nblocks is
1436  * unknown use ddi_prop_op.
1437  */
1438 int
1439 ddi_prop_op_nblocks_blksize(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1440     int mod_flags, char *name, caddr_t valuep, int *lengthp,
1441     uint64_t nblocks64, uint_t blksize)
1442 {
1443 	uint64_t size64;
1444 	int	blkshift;
1445 
1446 	/* convert block size to shift value */
1447 	ASSERT(BIT_ONLYONESET(blksize));
1448 	blkshift = highbit(blksize) - 1;
1449 
1450 	/*
1451 	 * There is no point in supporting nblocks64 values that don't have
1452 	 * an accurate uint64_t byte count representation.
1453 	 */
1454 	if (nblocks64 >= (UINT64_MAX >> blkshift))
1455 		return (ddi_prop_op(dev, dip, prop_op, mod_flags,
1456 		    name, valuep, lengthp));
1457 
1458 	size64 = nblocks64 << blkshift;
1459 	return (ddi_prop_op_size_blksize(dev, dip, prop_op, mod_flags,
1460 	    name, valuep, lengthp, size64, blksize));
1461 }
1462 
1463 /*
1464  * ddi_prop_op_nblocks: ddi_prop_op_nblocks_blksize with DEV_BSIZE blksize.
1465  */
1466 int
1467 ddi_prop_op_nblocks(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1468     int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t nblocks64)
1469 {
1470 	return (ddi_prop_op_nblocks_blksize(dev, dip, prop_op,
1471 	    mod_flags, name, valuep, lengthp, nblocks64, DEV_BSIZE));
1472 }
1473 
1474 /*
1475  * ddi_prop_op_size_blksize: The basic property operator for block drivers that
1476  * maintain size in bytes. Provides a of dynamic property implementation for
1477  * size oriented properties based on size64 value and blksize passed in by the
1478  * driver.  Fallback to ddi_prop_op if the size64 is too large. This interface
1479  * should not be used with a size64 that represents the driver's idea of how
1480  * to represent unknown, if size is unknown use ddi_prop_op.
1481  *
1482  * NOTE: the legacy "nblocks"/"size" properties are treated as 32-bit unsigned
1483  * integers. While the most likely interface to request them ([bc]devi_size)
1484  * is declared int (signed) there is no enforcement of this, which means we
1485  * can't enforce limitations here without risking regression.
1486  */
1487 int
1488 ddi_prop_op_size_blksize(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1489     int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t size64,
1490     uint_t blksize)
1491 {
1492 	uint64_t nblocks64;
1493 	int	callers_length;
1494 	caddr_t	buffer;
1495 	int	blkshift;
1496 
1497 	/*
1498 	 * This is a kludge to support capture of size(9P) pure dynamic
1499 	 * properties in snapshots for non-cmlb code (without exposing
1500 	 * i_ddi_prop_dyn changes). When everyone uses cmlb, this code
1501 	 * should be removed.
1502 	 */
1503 	if (i_ddi_prop_dyn_driver_get(dip) == NULL) {
1504 		static i_ddi_prop_dyn_t prop_dyn_size[] = {
1505 		    {"Size",		DDI_PROP_TYPE_INT64,	S_IFCHR},
1506 		    {"Nblocks",		DDI_PROP_TYPE_INT64,	S_IFBLK},
1507 		    {NULL}
1508 		};
1509 		i_ddi_prop_dyn_driver_set(dip, prop_dyn_size);
1510 	}
1511 
1512 	/* convert block size to shift value */
1513 	ASSERT(BIT_ONLYONESET(blksize));
1514 	blkshift = highbit(blksize) - 1;
1515 
1516 	/* compute DEV_BSIZE nblocks value */
1517 	nblocks64 = size64 >> blkshift;
1518 
1519 	/* get callers length, establish length of our dynamic properties */
1520 	callers_length = *lengthp;
1521 
1522 	if (strcmp(name, "Nblocks") == 0)
1523 		*lengthp = sizeof (uint64_t);
1524 	else if (strcmp(name, "Size") == 0)
1525 		*lengthp = sizeof (uint64_t);
1526 	else if ((strcmp(name, "nblocks") == 0) && (nblocks64 < UINT_MAX))
1527 		*lengthp = sizeof (uint32_t);
1528 	else if ((strcmp(name, "size") == 0) && (size64 < UINT_MAX))
1529 		*lengthp = sizeof (uint32_t);
1530 	else if ((strcmp(name, "blksize") == 0) && (blksize < UINT_MAX))
1531 		*lengthp = sizeof (uint32_t);
1532 	else {
1533 		/* fallback to ddi_prop_op */
1534 		return (ddi_prop_op(dev, dip, prop_op, mod_flags,
1535 		    name, valuep, lengthp));
1536 	}
1537 
1538 	/* service request for the length of the property */
1539 	if (prop_op == PROP_LEN)
1540 		return (DDI_PROP_SUCCESS);
1541 
1542 	switch (prop_op) {
1543 	case PROP_LEN_AND_VAL_ALLOC:
1544 		if ((buffer = kmem_alloc(*lengthp,
1545 		    (mod_flags & DDI_PROP_CANSLEEP) ?
1546 		    KM_SLEEP : KM_NOSLEEP)) == NULL)
1547 			return (DDI_PROP_NO_MEMORY);
1548 
1549 		*(caddr_t *)valuep = buffer;	/* set callers buf ptr */
1550 		break;
1551 
1552 	case PROP_LEN_AND_VAL_BUF:
1553 		/* the length of the property and the request must match */
1554 		if (callers_length != *lengthp)
1555 			return (DDI_PROP_INVAL_ARG);
1556 
1557 		buffer = valuep;		/* get callers buf ptr */
1558 		break;
1559 
1560 	default:
1561 		return (DDI_PROP_INVAL_ARG);
1562 	}
1563 
1564 	/* transfer the value into the buffer */
1565 	if (strcmp(name, "Nblocks") == 0)
1566 		*((uint64_t *)buffer) = nblocks64;
1567 	else if (strcmp(name, "Size") == 0)
1568 		*((uint64_t *)buffer) = size64;
1569 	else if (strcmp(name, "nblocks") == 0)
1570 		*((uint32_t *)buffer) = (uint32_t)nblocks64;
1571 	else if (strcmp(name, "size") == 0)
1572 		*((uint32_t *)buffer) = (uint32_t)size64;
1573 	else if (strcmp(name, "blksize") == 0)
1574 		*((uint32_t *)buffer) = (uint32_t)blksize;
1575 	return (DDI_PROP_SUCCESS);
1576 }
1577 
1578 /*
1579  * ddi_prop_op_size: ddi_prop_op_size_blksize with DEV_BSIZE block size.
1580  */
1581 int
1582 ddi_prop_op_size(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1583     int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t size64)
1584 {
1585 	return (ddi_prop_op_size_blksize(dev, dip, prop_op,
1586 	    mod_flags, name, valuep, lengthp, size64, DEV_BSIZE));
1587 }
1588 
1589 /*
1590  * Variable length props...
1591  */
1592 
1593 /*
1594  * ddi_getlongprop:	Get variable length property len+val into a buffer
1595  *		allocated by property provider via kmem_alloc. Requester
1596  *		is responsible for freeing returned property via kmem_free.
1597  *
1598  *	Arguments:
1599  *
1600  *	dev_t:	Input:	dev_t of property.
1601  *	dip:	Input:	dev_info_t pointer of child.
1602  *	flags:	Input:	Possible flag modifiers are:
1603  *		DDI_PROP_DONTPASS:	Don't pass to parent if prop not found.
1604  *		DDI_PROP_CANSLEEP:	Memory allocation may sleep.
1605  *	name:	Input:	name of property.
1606  *	valuep:	Output:	Addr of callers buffer pointer.
1607  *	lengthp:Output:	*lengthp will contain prop length on exit.
1608  *
1609  *	Possible Returns:
1610  *
1611  *		DDI_PROP_SUCCESS:	Prop found and returned.
1612  *		DDI_PROP_NOT_FOUND:	Prop not found
1613  *		DDI_PROP_UNDEFINED:	Prop explicitly undefined.
1614  *		DDI_PROP_NO_MEMORY:	Prop found, but unable to alloc mem.
1615  */
1616 
1617 int
1618 ddi_getlongprop(dev_t dev, dev_info_t *dip, int flags,
1619     char *name, caddr_t valuep, int *lengthp)
1620 {
1621 	return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_ALLOC,
1622 	    flags, name, valuep, lengthp));
1623 }
1624 
1625 /*
1626  *
1627  * ddi_getlongprop_buf:		Get long prop into pre-allocated callers
1628  *				buffer. (no memory allocation by provider).
1629  *
1630  *	dev_t:	Input:	dev_t of property.
1631  *	dip:	Input:	dev_info_t pointer of child.
1632  *	flags:	Input:	DDI_PROP_DONTPASS or NULL
1633  *	name:	Input:	name of property
1634  *	valuep:	Input:	ptr to callers buffer.
1635  *	lengthp:I/O:	ptr to length of callers buffer on entry,
1636  *			actual length of property on exit.
1637  *
1638  *	Possible returns:
1639  *
1640  *		DDI_PROP_SUCCESS	Prop found and returned
1641  *		DDI_PROP_NOT_FOUND	Prop not found
1642  *		DDI_PROP_UNDEFINED	Prop explicitly undefined.
1643  *		DDI_PROP_BUF_TOO_SMALL	Prop found, callers buf too small,
1644  *					no value returned, but actual prop
1645  *					length returned in *lengthp
1646  *
1647  */
1648 
1649 int
1650 ddi_getlongprop_buf(dev_t dev, dev_info_t *dip, int flags,
1651     char *name, caddr_t valuep, int *lengthp)
1652 {
1653 	return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
1654 	    flags, name, valuep, lengthp));
1655 }
1656 
1657 /*
1658  * Integer/boolean sized props.
1659  *
1660  * Call is value only... returns found boolean or int sized prop value or
1661  * defvalue if prop not found or is wrong length or is explicitly undefined.
1662  * Only flag is DDI_PROP_DONTPASS...
1663  *
1664  * By convention, this interface returns boolean (0) sized properties
1665  * as value (int)1.
1666  *
1667  * This never returns an error, if property not found or specifically
1668  * undefined, the input `defvalue' is returned.
1669  */
1670 
1671 int
1672 ddi_getprop(dev_t dev, dev_info_t *dip, int flags, char *name, int defvalue)
1673 {
1674 	int	propvalue = defvalue;
1675 	int	proplength = sizeof (int);
1676 	int	error;
1677 
1678 	error = ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
1679 	    flags, name, (caddr_t)&propvalue, &proplength);
1680 
1681 	if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
1682 		propvalue = 1;
1683 
1684 	return (propvalue);
1685 }
1686 
1687 /*
1688  * Get prop length interface: flags are 0 or DDI_PROP_DONTPASS
1689  * if returns DDI_PROP_SUCCESS, length returned in *lengthp.
1690  */
1691 
1692 int
1693 ddi_getproplen(dev_t dev, dev_info_t *dip, int flags, char *name, int *lengthp)
1694 {
1695 	return (ddi_prop_op(dev, dip, PROP_LEN, flags, name, NULL, lengthp));
1696 }
1697 
1698 /*
1699  * Allocate a struct prop_driver_data, along with 'size' bytes
1700  * for decoded property data.  This structure is freed by
1701  * calling ddi_prop_free(9F).
1702  */
1703 static void *
1704 ddi_prop_decode_alloc(size_t size, void (*prop_free)(struct prop_driver_data *))
1705 {
1706 	struct prop_driver_data *pdd;
1707 
1708 	/*
1709 	 * Allocate a structure with enough memory to store the decoded data.
1710 	 */
1711 	pdd = kmem_zalloc(sizeof (struct prop_driver_data) + size, KM_SLEEP);
1712 	pdd->pdd_size = (sizeof (struct prop_driver_data) + size);
1713 	pdd->pdd_prop_free = prop_free;
1714 
1715 	/*
1716 	 * Return a pointer to the location to put the decoded data.
1717 	 */
1718 	return ((void *)((caddr_t)pdd + sizeof (struct prop_driver_data)));
1719 }
1720 
1721 /*
1722  * Allocated the memory needed to store the encoded data in the property
1723  * handle.
1724  */
1725 static int
1726 ddi_prop_encode_alloc(prop_handle_t *ph, size_t size)
1727 {
1728 	/*
1729 	 * If size is zero, then set data to NULL and size to 0.  This
1730 	 * is a boolean property.
1731 	 */
1732 	if (size == 0) {
1733 		ph->ph_size = 0;
1734 		ph->ph_data = NULL;
1735 		ph->ph_cur_pos = NULL;
1736 		ph->ph_save_pos = NULL;
1737 	} else {
1738 		if (ph->ph_flags == DDI_PROP_DONTSLEEP) {
1739 			ph->ph_data = kmem_zalloc(size, KM_NOSLEEP);
1740 			if (ph->ph_data == NULL)
1741 				return (DDI_PROP_NO_MEMORY);
1742 		} else
1743 			ph->ph_data = kmem_zalloc(size, KM_SLEEP);
1744 		ph->ph_size = size;
1745 		ph->ph_cur_pos = ph->ph_data;
1746 		ph->ph_save_pos = ph->ph_data;
1747 	}
1748 	return (DDI_PROP_SUCCESS);
1749 }
1750 
1751 /*
1752  * Free the space allocated by the lookup routines.  Each lookup routine
1753  * returns a pointer to the decoded data to the driver.  The driver then
1754  * passes this pointer back to us.  This data actually lives in a struct
1755  * prop_driver_data.  We use negative indexing to find the beginning of
1756  * the structure and then free the entire structure using the size and
1757  * the free routine stored in the structure.
1758  */
1759 void
1760 ddi_prop_free(void *datap)
1761 {
1762 	struct prop_driver_data *pdd;
1763 
1764 	/*
1765 	 * Get the structure
1766 	 */
1767 	pdd = (struct prop_driver_data *)
1768 	    ((caddr_t)datap - sizeof (struct prop_driver_data));
1769 	/*
1770 	 * Call the free routine to free it
1771 	 */
1772 	(*pdd->pdd_prop_free)(pdd);
1773 }
1774 
1775 /*
1776  * Free the data associated with an array of ints,
1777  * allocated with ddi_prop_decode_alloc().
1778  */
1779 static void
1780 ddi_prop_free_ints(struct prop_driver_data *pdd)
1781 {
1782 	kmem_free(pdd, pdd->pdd_size);
1783 }
1784 
1785 /*
1786  * Free a single string property or a single string contained within
1787  * the argv style return value of an array of strings.
1788  */
1789 static void
1790 ddi_prop_free_string(struct prop_driver_data *pdd)
1791 {
1792 	kmem_free(pdd, pdd->pdd_size);
1793 
1794 }
1795 
1796 /*
1797  * Free an array of strings.
1798  */
1799 static void
1800 ddi_prop_free_strings(struct prop_driver_data *pdd)
1801 {
1802 	kmem_free(pdd, pdd->pdd_size);
1803 }
1804 
1805 /*
1806  * Free the data associated with an array of bytes.
1807  */
1808 static void
1809 ddi_prop_free_bytes(struct prop_driver_data *pdd)
1810 {
1811 	kmem_free(pdd, pdd->pdd_size);
1812 }
1813 
1814 /*
1815  * Reset the current location pointer in the property handle to the
1816  * beginning of the data.
1817  */
1818 void
1819 ddi_prop_reset_pos(prop_handle_t *ph)
1820 {
1821 	ph->ph_cur_pos = ph->ph_data;
1822 	ph->ph_save_pos = ph->ph_data;
1823 }
1824 
1825 /*
1826  * Restore the current location pointer in the property handle to the
1827  * saved position.
1828  */
1829 void
1830 ddi_prop_save_pos(prop_handle_t *ph)
1831 {
1832 	ph->ph_save_pos = ph->ph_cur_pos;
1833 }
1834 
1835 /*
1836  * Save the location that the current location pointer is pointing to..
1837  */
1838 void
1839 ddi_prop_restore_pos(prop_handle_t *ph)
1840 {
1841 	ph->ph_cur_pos = ph->ph_save_pos;
1842 }
1843 
1844 /*
1845  * Property encode/decode functions
1846  */
1847 
1848 /*
1849  * Decode a single integer property
1850  */
1851 static int
1852 ddi_prop_fm_decode_int(prop_handle_t *ph, void *data, uint_t *nelements)
1853 {
1854 	int	i;
1855 	int	tmp;
1856 
1857 	/*
1858 	 * If there is nothing to decode return an error
1859 	 */
1860 	if (ph->ph_size == 0)
1861 		return (DDI_PROP_END_OF_DATA);
1862 
1863 	/*
1864 	 * Decode the property as a single integer and return it
1865 	 * in data if we were able to decode it.
1866 	 */
1867 	i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, &tmp);
1868 	if (i < DDI_PROP_RESULT_OK) {
1869 		switch (i) {
1870 		case DDI_PROP_RESULT_EOF:
1871 			return (DDI_PROP_END_OF_DATA);
1872 
1873 		case DDI_PROP_RESULT_ERROR:
1874 			return (DDI_PROP_CANNOT_DECODE);
1875 		}
1876 	}
1877 
1878 	*(int *)data = tmp;
1879 	*nelements = 1;
1880 	return (DDI_PROP_SUCCESS);
1881 }
1882 
1883 /*
1884  * Decode a single 64 bit integer property
1885  */
1886 static int
1887 ddi_prop_fm_decode_int64(prop_handle_t *ph, void *data, uint_t *nelements)
1888 {
1889 	int	i;
1890 	int64_t	tmp;
1891 
1892 	/*
1893 	 * If there is nothing to decode return an error
1894 	 */
1895 	if (ph->ph_size == 0)
1896 		return (DDI_PROP_END_OF_DATA);
1897 
1898 	/*
1899 	 * Decode the property as a single integer and return it
1900 	 * in data if we were able to decode it.
1901 	 */
1902 	i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, &tmp);
1903 	if (i < DDI_PROP_RESULT_OK) {
1904 		switch (i) {
1905 		case DDI_PROP_RESULT_EOF:
1906 			return (DDI_PROP_END_OF_DATA);
1907 
1908 		case DDI_PROP_RESULT_ERROR:
1909 			return (DDI_PROP_CANNOT_DECODE);
1910 		}
1911 	}
1912 
1913 	*(int64_t *)data = tmp;
1914 	*nelements = 1;
1915 	return (DDI_PROP_SUCCESS);
1916 }
1917 
1918 /*
1919  * Decode an array of integers property
1920  */
1921 static int
1922 ddi_prop_fm_decode_ints(prop_handle_t *ph, void *data, uint_t *nelements)
1923 {
1924 	int	i;
1925 	int	cnt = 0;
1926 	int	*tmp;
1927 	int	*intp;
1928 	int	n;
1929 
1930 	/*
1931 	 * Figure out how many array elements there are by going through the
1932 	 * data without decoding it first and counting.
1933 	 */
1934 	for (;;) {
1935 		i = DDI_PROP_INT(ph, DDI_PROP_CMD_SKIP, NULL);
1936 		if (i < 0)
1937 			break;
1938 		cnt++;
1939 	}
1940 
1941 	/*
1942 	 * If there are no elements return an error
1943 	 */
1944 	if (cnt == 0)
1945 		return (DDI_PROP_END_OF_DATA);
1946 
1947 	/*
1948 	 * If we cannot skip through the data, we cannot decode it
1949 	 */
1950 	if (i == DDI_PROP_RESULT_ERROR)
1951 		return (DDI_PROP_CANNOT_DECODE);
1952 
1953 	/*
1954 	 * Reset the data pointer to the beginning of the encoded data
1955 	 */
1956 	ddi_prop_reset_pos(ph);
1957 
1958 	/*
1959 	 * Allocated memory to store the decoded value in.
1960 	 */
1961 	intp = ddi_prop_decode_alloc((cnt * sizeof (int)),
1962 	    ddi_prop_free_ints);
1963 
1964 	/*
1965 	 * Decode each element and place it in the space we just allocated
1966 	 */
1967 	tmp = intp;
1968 	for (n = 0; n < cnt; n++, tmp++) {
1969 		i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, tmp);
1970 		if (i < DDI_PROP_RESULT_OK) {
1971 			/*
1972 			 * Free the space we just allocated
1973 			 * and return an error.
1974 			 */
1975 			ddi_prop_free(intp);
1976 			switch (i) {
1977 			case DDI_PROP_RESULT_EOF:
1978 				return (DDI_PROP_END_OF_DATA);
1979 
1980 			case DDI_PROP_RESULT_ERROR:
1981 				return (DDI_PROP_CANNOT_DECODE);
1982 			}
1983 		}
1984 	}
1985 
1986 	*nelements = cnt;
1987 	*(int **)data = intp;
1988 
1989 	return (DDI_PROP_SUCCESS);
1990 }
1991 
1992 /*
1993  * Decode a 64 bit integer array property
1994  */
1995 static int
1996 ddi_prop_fm_decode_int64_array(prop_handle_t *ph, void *data, uint_t *nelements)
1997 {
1998 	int	i;
1999 	int	n;
2000 	int	cnt = 0;
2001 	int64_t	*tmp;
2002 	int64_t	*intp;
2003 
2004 	/*
2005 	 * Count the number of array elements by going
2006 	 * through the data without decoding it.
2007 	 */
2008 	for (;;) {
2009 		i = DDI_PROP_INT64(ph, DDI_PROP_CMD_SKIP, NULL);
2010 		if (i < 0)
2011 			break;
2012 		cnt++;
2013 	}
2014 
2015 	/*
2016 	 * If there are no elements return an error
2017 	 */
2018 	if (cnt == 0)
2019 		return (DDI_PROP_END_OF_DATA);
2020 
2021 	/*
2022 	 * If we cannot skip through the data, we cannot decode it
2023 	 */
2024 	if (i == DDI_PROP_RESULT_ERROR)
2025 		return (DDI_PROP_CANNOT_DECODE);
2026 
2027 	/*
2028 	 * Reset the data pointer to the beginning of the encoded data
2029 	 */
2030 	ddi_prop_reset_pos(ph);
2031 
2032 	/*
2033 	 * Allocate memory to store the decoded value.
2034 	 */
2035 	intp = ddi_prop_decode_alloc((cnt * sizeof (int64_t)),
2036 	    ddi_prop_free_ints);
2037 
2038 	/*
2039 	 * Decode each element and place it in the space allocated
2040 	 */
2041 	tmp = intp;
2042 	for (n = 0; n < cnt; n++, tmp++) {
2043 		i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, tmp);
2044 		if (i < DDI_PROP_RESULT_OK) {
2045 			/*
2046 			 * Free the space we just allocated
2047 			 * and return an error.
2048 			 */
2049 			ddi_prop_free(intp);
2050 			switch (i) {
2051 			case DDI_PROP_RESULT_EOF:
2052 				return (DDI_PROP_END_OF_DATA);
2053 
2054 			case DDI_PROP_RESULT_ERROR:
2055 				return (DDI_PROP_CANNOT_DECODE);
2056 			}
2057 		}
2058 	}
2059 
2060 	*nelements = cnt;
2061 	*(int64_t **)data = intp;
2062 
2063 	return (DDI_PROP_SUCCESS);
2064 }
2065 
2066 /*
2067  * Encode an array of integers property (Can be one element)
2068  */
2069 int
2070 ddi_prop_fm_encode_ints(prop_handle_t *ph, void *data, uint_t nelements)
2071 {
2072 	int	i;
2073 	int	*tmp;
2074 	int	cnt;
2075 	int	size;
2076 
2077 	/*
2078 	 * If there is no data, we cannot do anything
2079 	 */
2080 	if (nelements == 0)
2081 		return (DDI_PROP_CANNOT_ENCODE);
2082 
2083 	/*
2084 	 * Get the size of an encoded int.
2085 	 */
2086 	size = DDI_PROP_INT(ph, DDI_PROP_CMD_GET_ESIZE, NULL);
2087 
2088 	if (size < DDI_PROP_RESULT_OK) {
2089 		switch (size) {
2090 		case DDI_PROP_RESULT_EOF:
2091 			return (DDI_PROP_END_OF_DATA);
2092 
2093 		case DDI_PROP_RESULT_ERROR:
2094 			return (DDI_PROP_CANNOT_ENCODE);
2095 		}
2096 	}
2097 
2098 	/*
2099 	 * Allocate space in the handle to store the encoded int.
2100 	 */
2101 	if (ddi_prop_encode_alloc(ph, size * nelements) !=
2102 	    DDI_PROP_SUCCESS)
2103 		return (DDI_PROP_NO_MEMORY);
2104 
2105 	/*
2106 	 * Encode the array of ints.
2107 	 */
2108 	tmp = (int *)data;
2109 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2110 		i = DDI_PROP_INT(ph, DDI_PROP_CMD_ENCODE, tmp);
2111 		if (i < DDI_PROP_RESULT_OK) {
2112 			switch (i) {
2113 			case DDI_PROP_RESULT_EOF:
2114 				return (DDI_PROP_END_OF_DATA);
2115 
2116 			case DDI_PROP_RESULT_ERROR:
2117 				return (DDI_PROP_CANNOT_ENCODE);
2118 			}
2119 		}
2120 	}
2121 
2122 	return (DDI_PROP_SUCCESS);
2123 }
2124 
2125 
2126 /*
2127  * Encode a 64 bit integer array property
2128  */
2129 int
2130 ddi_prop_fm_encode_int64(prop_handle_t *ph, void *data, uint_t nelements)
2131 {
2132 	int i;
2133 	int cnt;
2134 	int size;
2135 	int64_t *tmp;
2136 
2137 	/*
2138 	 * If there is no data, we cannot do anything
2139 	 */
2140 	if (nelements == 0)
2141 		return (DDI_PROP_CANNOT_ENCODE);
2142 
2143 	/*
2144 	 * Get the size of an encoded 64 bit int.
2145 	 */
2146 	size = DDI_PROP_INT64(ph, DDI_PROP_CMD_GET_ESIZE, NULL);
2147 
2148 	if (size < DDI_PROP_RESULT_OK) {
2149 		switch (size) {
2150 		case DDI_PROP_RESULT_EOF:
2151 			return (DDI_PROP_END_OF_DATA);
2152 
2153 		case DDI_PROP_RESULT_ERROR:
2154 			return (DDI_PROP_CANNOT_ENCODE);
2155 		}
2156 	}
2157 
2158 	/*
2159 	 * Allocate space in the handle to store the encoded int.
2160 	 */
2161 	if (ddi_prop_encode_alloc(ph, size * nelements) !=
2162 	    DDI_PROP_SUCCESS)
2163 		return (DDI_PROP_NO_MEMORY);
2164 
2165 	/*
2166 	 * Encode the array of ints.
2167 	 */
2168 	tmp = (int64_t *)data;
2169 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2170 		i = DDI_PROP_INT64(ph, DDI_PROP_CMD_ENCODE, tmp);
2171 		if (i < DDI_PROP_RESULT_OK) {
2172 			switch (i) {
2173 			case DDI_PROP_RESULT_EOF:
2174 				return (DDI_PROP_END_OF_DATA);
2175 
2176 			case DDI_PROP_RESULT_ERROR:
2177 				return (DDI_PROP_CANNOT_ENCODE);
2178 			}
2179 		}
2180 	}
2181 
2182 	return (DDI_PROP_SUCCESS);
2183 }
2184 
2185 /*
2186  * Decode a single string property
2187  */
2188 static int
2189 ddi_prop_fm_decode_string(prop_handle_t *ph, void *data, uint_t *nelements)
2190 {
2191 	char		*tmp;
2192 	char		*str;
2193 	int		i;
2194 	int		size;
2195 
2196 	/*
2197 	 * If there is nothing to decode return an error
2198 	 */
2199 	if (ph->ph_size == 0)
2200 		return (DDI_PROP_END_OF_DATA);
2201 
2202 	/*
2203 	 * Get the decoded size of the encoded string.
2204 	 */
2205 	size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2206 	if (size < DDI_PROP_RESULT_OK) {
2207 		switch (size) {
2208 		case DDI_PROP_RESULT_EOF:
2209 			return (DDI_PROP_END_OF_DATA);
2210 
2211 		case DDI_PROP_RESULT_ERROR:
2212 			return (DDI_PROP_CANNOT_DECODE);
2213 		}
2214 	}
2215 
2216 	/*
2217 	 * Allocated memory to store the decoded value in.
2218 	 */
2219 	str = ddi_prop_decode_alloc((size_t)size, ddi_prop_free_string);
2220 
2221 	ddi_prop_reset_pos(ph);
2222 
2223 	/*
2224 	 * Decode the str and place it in the space we just allocated
2225 	 */
2226 	tmp = str;
2227 	i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, tmp);
2228 	if (i < DDI_PROP_RESULT_OK) {
2229 		/*
2230 		 * Free the space we just allocated
2231 		 * and return an error.
2232 		 */
2233 		ddi_prop_free(str);
2234 		switch (i) {
2235 		case DDI_PROP_RESULT_EOF:
2236 			return (DDI_PROP_END_OF_DATA);
2237 
2238 		case DDI_PROP_RESULT_ERROR:
2239 			return (DDI_PROP_CANNOT_DECODE);
2240 		}
2241 	}
2242 
2243 	*(char **)data = str;
2244 	*nelements = 1;
2245 
2246 	return (DDI_PROP_SUCCESS);
2247 }
2248 
2249 /*
2250  * Decode an array of strings.
2251  */
2252 int
2253 ddi_prop_fm_decode_strings(prop_handle_t *ph, void *data, uint_t *nelements)
2254 {
2255 	int		cnt = 0;
2256 	char		**strs;
2257 	char		**tmp;
2258 	char		*ptr;
2259 	int		i;
2260 	int		n;
2261 	int		size;
2262 	size_t		nbytes;
2263 
2264 	/*
2265 	 * Figure out how many array elements there are by going through the
2266 	 * data without decoding it first and counting.
2267 	 */
2268 	for (;;) {
2269 		i = DDI_PROP_STR(ph, DDI_PROP_CMD_SKIP, NULL);
2270 		if (i < 0)
2271 			break;
2272 		cnt++;
2273 	}
2274 
2275 	/*
2276 	 * If there are no elements return an error
2277 	 */
2278 	if (cnt == 0)
2279 		return (DDI_PROP_END_OF_DATA);
2280 
2281 	/*
2282 	 * If we cannot skip through the data, we cannot decode it
2283 	 */
2284 	if (i == DDI_PROP_RESULT_ERROR)
2285 		return (DDI_PROP_CANNOT_DECODE);
2286 
2287 	/*
2288 	 * Reset the data pointer to the beginning of the encoded data
2289 	 */
2290 	ddi_prop_reset_pos(ph);
2291 
2292 	/*
2293 	 * Figure out how much memory we need for the sum total
2294 	 */
2295 	nbytes = (cnt + 1) * sizeof (char *);
2296 
2297 	for (n = 0; n < cnt; n++) {
2298 		/*
2299 		 * Get the decoded size of the current encoded string.
2300 		 */
2301 		size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2302 		if (size < DDI_PROP_RESULT_OK) {
2303 			switch (size) {
2304 			case DDI_PROP_RESULT_EOF:
2305 				return (DDI_PROP_END_OF_DATA);
2306 
2307 			case DDI_PROP_RESULT_ERROR:
2308 				return (DDI_PROP_CANNOT_DECODE);
2309 			}
2310 		}
2311 
2312 		nbytes += size;
2313 	}
2314 
2315 	/*
2316 	 * Allocate memory in which to store the decoded strings.
2317 	 */
2318 	strs = ddi_prop_decode_alloc(nbytes, ddi_prop_free_strings);
2319 
2320 	/*
2321 	 * Set up pointers for each string by figuring out yet
2322 	 * again how long each string is.
2323 	 */
2324 	ddi_prop_reset_pos(ph);
2325 	ptr = (caddr_t)strs + ((cnt + 1) * sizeof (char *));
2326 	for (tmp = strs, n = 0; n < cnt; n++, tmp++) {
2327 		/*
2328 		 * Get the decoded size of the current encoded string.
2329 		 */
2330 		size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2331 		if (size < DDI_PROP_RESULT_OK) {
2332 			ddi_prop_free(strs);
2333 			switch (size) {
2334 			case DDI_PROP_RESULT_EOF:
2335 				return (DDI_PROP_END_OF_DATA);
2336 
2337 			case DDI_PROP_RESULT_ERROR:
2338 				return (DDI_PROP_CANNOT_DECODE);
2339 			}
2340 		}
2341 
2342 		*tmp = ptr;
2343 		ptr += size;
2344 	}
2345 
2346 	/*
2347 	 * String array is terminated by a NULL
2348 	 */
2349 	*tmp = NULL;
2350 
2351 	/*
2352 	 * Finally, we can decode each string
2353 	 */
2354 	ddi_prop_reset_pos(ph);
2355 	for (tmp = strs, n = 0; n < cnt; n++, tmp++) {
2356 		i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, *tmp);
2357 		if (i < DDI_PROP_RESULT_OK) {
2358 			/*
2359 			 * Free the space we just allocated
2360 			 * and return an error
2361 			 */
2362 			ddi_prop_free(strs);
2363 			switch (i) {
2364 			case DDI_PROP_RESULT_EOF:
2365 				return (DDI_PROP_END_OF_DATA);
2366 
2367 			case DDI_PROP_RESULT_ERROR:
2368 				return (DDI_PROP_CANNOT_DECODE);
2369 			}
2370 		}
2371 	}
2372 
2373 	*(char ***)data = strs;
2374 	*nelements = cnt;
2375 
2376 	return (DDI_PROP_SUCCESS);
2377 }
2378 
2379 /*
2380  * Encode a string.
2381  */
2382 int
2383 ddi_prop_fm_encode_string(prop_handle_t *ph, void *data, uint_t nelements)
2384 {
2385 	char		**tmp;
2386 	int		size;
2387 	int		i;
2388 
2389 	/*
2390 	 * If there is no data, we cannot do anything
2391 	 */
2392 	if (nelements == 0)
2393 		return (DDI_PROP_CANNOT_ENCODE);
2394 
2395 	/*
2396 	 * Get the size of the encoded string.
2397 	 */
2398 	tmp = (char **)data;
2399 	size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp);
2400 	if (size < DDI_PROP_RESULT_OK) {
2401 		switch (size) {
2402 		case DDI_PROP_RESULT_EOF:
2403 			return (DDI_PROP_END_OF_DATA);
2404 
2405 		case DDI_PROP_RESULT_ERROR:
2406 			return (DDI_PROP_CANNOT_ENCODE);
2407 		}
2408 	}
2409 
2410 	/*
2411 	 * Allocate space in the handle to store the encoded string.
2412 	 */
2413 	if (ddi_prop_encode_alloc(ph, size) != DDI_PROP_SUCCESS)
2414 		return (DDI_PROP_NO_MEMORY);
2415 
2416 	ddi_prop_reset_pos(ph);
2417 
2418 	/*
2419 	 * Encode the string.
2420 	 */
2421 	tmp = (char **)data;
2422 	i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp);
2423 	if (i < DDI_PROP_RESULT_OK) {
2424 		switch (i) {
2425 		case DDI_PROP_RESULT_EOF:
2426 			return (DDI_PROP_END_OF_DATA);
2427 
2428 		case DDI_PROP_RESULT_ERROR:
2429 			return (DDI_PROP_CANNOT_ENCODE);
2430 		}
2431 	}
2432 
2433 	return (DDI_PROP_SUCCESS);
2434 }
2435 
2436 
2437 /*
2438  * Encode an array of strings.
2439  */
2440 int
2441 ddi_prop_fm_encode_strings(prop_handle_t *ph, void *data, uint_t nelements)
2442 {
2443 	int		cnt = 0;
2444 	char		**tmp;
2445 	int		size;
2446 	uint_t		total_size;
2447 	int		i;
2448 
2449 	/*
2450 	 * If there is no data, we cannot do anything
2451 	 */
2452 	if (nelements == 0)
2453 		return (DDI_PROP_CANNOT_ENCODE);
2454 
2455 	/*
2456 	 * Get the total size required to encode all the strings.
2457 	 */
2458 	total_size = 0;
2459 	tmp = (char **)data;
2460 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2461 		size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp);
2462 		if (size < DDI_PROP_RESULT_OK) {
2463 			switch (size) {
2464 			case DDI_PROP_RESULT_EOF:
2465 				return (DDI_PROP_END_OF_DATA);
2466 
2467 			case DDI_PROP_RESULT_ERROR:
2468 				return (DDI_PROP_CANNOT_ENCODE);
2469 			}
2470 		}
2471 		total_size += (uint_t)size;
2472 	}
2473 
2474 	/*
2475 	 * Allocate space in the handle to store the encoded strings.
2476 	 */
2477 	if (ddi_prop_encode_alloc(ph, total_size) != DDI_PROP_SUCCESS)
2478 		return (DDI_PROP_NO_MEMORY);
2479 
2480 	ddi_prop_reset_pos(ph);
2481 
2482 	/*
2483 	 * Encode the array of strings.
2484 	 */
2485 	tmp = (char **)data;
2486 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2487 		i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp);
2488 		if (i < DDI_PROP_RESULT_OK) {
2489 			switch (i) {
2490 			case DDI_PROP_RESULT_EOF:
2491 				return (DDI_PROP_END_OF_DATA);
2492 
2493 			case DDI_PROP_RESULT_ERROR:
2494 				return (DDI_PROP_CANNOT_ENCODE);
2495 			}
2496 		}
2497 	}
2498 
2499 	return (DDI_PROP_SUCCESS);
2500 }
2501 
2502 
2503 /*
2504  * Decode an array of bytes.
2505  */
2506 static int
2507 ddi_prop_fm_decode_bytes(prop_handle_t *ph, void *data, uint_t *nelements)
2508 {
2509 	uchar_t		*tmp;
2510 	int		nbytes;
2511 	int		i;
2512 
2513 	/*
2514 	 * If there are no elements return an error
2515 	 */
2516 	if (ph->ph_size == 0)
2517 		return (DDI_PROP_END_OF_DATA);
2518 
2519 	/*
2520 	 * Get the size of the encoded array of bytes.
2521 	 */
2522 	nbytes = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_DSIZE,
2523 	    data, ph->ph_size);
2524 	if (nbytes < DDI_PROP_RESULT_OK) {
2525 		switch (nbytes) {
2526 		case DDI_PROP_RESULT_EOF:
2527 			return (DDI_PROP_END_OF_DATA);
2528 
2529 		case DDI_PROP_RESULT_ERROR:
2530 			return (DDI_PROP_CANNOT_DECODE);
2531 		}
2532 	}
2533 
2534 	/*
2535 	 * Allocated memory to store the decoded value in.
2536 	 */
2537 	tmp = ddi_prop_decode_alloc(nbytes, ddi_prop_free_bytes);
2538 
2539 	/*
2540 	 * Decode each element and place it in the space we just allocated
2541 	 */
2542 	i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_DECODE, tmp, nbytes);
2543 	if (i < DDI_PROP_RESULT_OK) {
2544 		/*
2545 		 * Free the space we just allocated
2546 		 * and return an error
2547 		 */
2548 		ddi_prop_free(tmp);
2549 		switch (i) {
2550 		case DDI_PROP_RESULT_EOF:
2551 			return (DDI_PROP_END_OF_DATA);
2552 
2553 		case DDI_PROP_RESULT_ERROR:
2554 			return (DDI_PROP_CANNOT_DECODE);
2555 		}
2556 	}
2557 
2558 	*(uchar_t **)data = tmp;
2559 	*nelements = nbytes;
2560 
2561 	return (DDI_PROP_SUCCESS);
2562 }
2563 
2564 /*
2565  * Encode an array of bytes.
2566  */
2567 int
2568 ddi_prop_fm_encode_bytes(prop_handle_t *ph, void *data, uint_t nelements)
2569 {
2570 	int		size;
2571 	int		i;
2572 
2573 	/*
2574 	 * If there are no elements, then this is a boolean property,
2575 	 * so just create a property handle with no data and return.
2576 	 */
2577 	if (nelements == 0) {
2578 		(void) ddi_prop_encode_alloc(ph, 0);
2579 		return (DDI_PROP_SUCCESS);
2580 	}
2581 
2582 	/*
2583 	 * Get the size of the encoded array of bytes.
2584 	 */
2585 	size = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_ESIZE, (uchar_t *)data,
2586 	    nelements);
2587 	if (size < DDI_PROP_RESULT_OK) {
2588 		switch (size) {
2589 		case DDI_PROP_RESULT_EOF:
2590 			return (DDI_PROP_END_OF_DATA);
2591 
2592 		case DDI_PROP_RESULT_ERROR:
2593 			return (DDI_PROP_CANNOT_DECODE);
2594 		}
2595 	}
2596 
2597 	/*
2598 	 * Allocate space in the handle to store the encoded bytes.
2599 	 */
2600 	if (ddi_prop_encode_alloc(ph, (uint_t)size) != DDI_PROP_SUCCESS)
2601 		return (DDI_PROP_NO_MEMORY);
2602 
2603 	/*
2604 	 * Encode the array of bytes.
2605 	 */
2606 	i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_ENCODE, (uchar_t *)data,
2607 	    nelements);
2608 	if (i < DDI_PROP_RESULT_OK) {
2609 		switch (i) {
2610 		case DDI_PROP_RESULT_EOF:
2611 			return (DDI_PROP_END_OF_DATA);
2612 
2613 		case DDI_PROP_RESULT_ERROR:
2614 			return (DDI_PROP_CANNOT_ENCODE);
2615 		}
2616 	}
2617 
2618 	return (DDI_PROP_SUCCESS);
2619 }
2620 
2621 /*
2622  * OBP 1275 integer, string and byte operators.
2623  *
2624  * DDI_PROP_CMD_DECODE:
2625  *
2626  *	DDI_PROP_RESULT_ERROR:		cannot decode the data
2627  *	DDI_PROP_RESULT_EOF:		end of data
2628  *	DDI_PROP_OK:			data was decoded
2629  *
2630  * DDI_PROP_CMD_ENCODE:
2631  *
2632  *	DDI_PROP_RESULT_ERROR:		cannot encode the data
2633  *	DDI_PROP_RESULT_EOF:		end of data
2634  *	DDI_PROP_OK:			data was encoded
2635  *
2636  * DDI_PROP_CMD_SKIP:
2637  *
2638  *	DDI_PROP_RESULT_ERROR:		cannot skip the data
2639  *	DDI_PROP_RESULT_EOF:		end of data
2640  *	DDI_PROP_OK:			data was skipped
2641  *
2642  * DDI_PROP_CMD_GET_ESIZE:
2643  *
2644  *	DDI_PROP_RESULT_ERROR:		cannot get encoded size
2645  *	DDI_PROP_RESULT_EOF:		end of data
2646  *	> 0:				the encoded size
2647  *
2648  * DDI_PROP_CMD_GET_DSIZE:
2649  *
2650  *	DDI_PROP_RESULT_ERROR:		cannot get decoded size
2651  *	DDI_PROP_RESULT_EOF:		end of data
2652  *	> 0:				the decoded size
2653  */
2654 
2655 /*
2656  * OBP 1275 integer operator
2657  *
2658  * OBP properties are a byte stream of data, so integers may not be
2659  * properly aligned.  Therefore we need to copy them one byte at a time.
2660  */
2661 int
2662 ddi_prop_1275_int(prop_handle_t *ph, uint_t cmd, int *data)
2663 {
2664 	int	i;
2665 
2666 	switch (cmd) {
2667 	case DDI_PROP_CMD_DECODE:
2668 		/*
2669 		 * Check that there is encoded data
2670 		 */
2671 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0)
2672 			return (DDI_PROP_RESULT_ERROR);
2673 		if (ph->ph_flags & PH_FROM_PROM) {
2674 			i = MIN(ph->ph_size, PROP_1275_INT_SIZE);
2675 			if ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
2676 			    ph->ph_size - i))
2677 				return (DDI_PROP_RESULT_ERROR);
2678 		} else {
2679 			if (ph->ph_size < sizeof (int) ||
2680 			    ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
2681 			    ph->ph_size - sizeof (int))))
2682 				return (DDI_PROP_RESULT_ERROR);
2683 		}
2684 
2685 		/*
2686 		 * Copy the integer, using the implementation-specific
2687 		 * copy function if the property is coming from the PROM.
2688 		 */
2689 		if (ph->ph_flags & PH_FROM_PROM) {
2690 			*data = impl_ddi_prop_int_from_prom(
2691 			    (uchar_t *)ph->ph_cur_pos,
2692 			    (ph->ph_size < PROP_1275_INT_SIZE) ?
2693 			    ph->ph_size : PROP_1275_INT_SIZE);
2694 		} else {
2695 			bcopy(ph->ph_cur_pos, data, sizeof (int));
2696 		}
2697 
2698 		/*
2699 		 * Move the current location to the start of the next
2700 		 * bit of undecoded data.
2701 		 */
2702 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
2703 		    PROP_1275_INT_SIZE;
2704 		return (DDI_PROP_RESULT_OK);
2705 
2706 	case DDI_PROP_CMD_ENCODE:
2707 		/*
2708 		 * Check that there is room to encoded the data
2709 		 */
2710 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
2711 		    ph->ph_size < PROP_1275_INT_SIZE ||
2712 		    ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
2713 		    ph->ph_size - sizeof (int))))
2714 			return (DDI_PROP_RESULT_ERROR);
2715 
2716 		/*
2717 		 * Encode the integer into the byte stream one byte at a
2718 		 * time.
2719 		 */
2720 		bcopy(data, ph->ph_cur_pos, sizeof (int));
2721 
2722 		/*
2723 		 * Move the current location to the start of the next bit of
2724 		 * space where we can store encoded data.
2725 		 */
2726 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE;
2727 		return (DDI_PROP_RESULT_OK);
2728 
2729 	case DDI_PROP_CMD_SKIP:
2730 		/*
2731 		 * Check that there is encoded data
2732 		 */
2733 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
2734 		    ph->ph_size < PROP_1275_INT_SIZE)
2735 			return (DDI_PROP_RESULT_ERROR);
2736 
2737 
2738 		if ((caddr_t)ph->ph_cur_pos ==
2739 		    (caddr_t)ph->ph_data + ph->ph_size) {
2740 			return (DDI_PROP_RESULT_EOF);
2741 		} else if ((caddr_t)ph->ph_cur_pos >
2742 		    (caddr_t)ph->ph_data + ph->ph_size) {
2743 			return (DDI_PROP_RESULT_EOF);
2744 		}
2745 
2746 		/*
2747 		 * Move the current location to the start of the next bit of
2748 		 * undecoded data.
2749 		 */
2750 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE;
2751 		return (DDI_PROP_RESULT_OK);
2752 
2753 	case DDI_PROP_CMD_GET_ESIZE:
2754 		/*
2755 		 * Return the size of an encoded integer on OBP
2756 		 */
2757 		return (PROP_1275_INT_SIZE);
2758 
2759 	case DDI_PROP_CMD_GET_DSIZE:
2760 		/*
2761 		 * Return the size of a decoded integer on the system.
2762 		 */
2763 		return (sizeof (int));
2764 
2765 	default:
2766 #ifdef DEBUG
2767 		panic("ddi_prop_1275_int: %x impossible", cmd);
2768 		/*NOTREACHED*/
2769 #else
2770 		return (DDI_PROP_RESULT_ERROR);
2771 #endif	/* DEBUG */
2772 	}
2773 }
2774 
2775 /*
2776  * 64 bit integer operator.
2777  *
2778  * This is an extension, defined by Sun, to the 1275 integer
2779  * operator.  This routine handles the encoding/decoding of
2780  * 64 bit integer properties.
2781  */
2782 int
2783 ddi_prop_int64_op(prop_handle_t *ph, uint_t cmd, int64_t *data)
2784 {
2785 
2786 	switch (cmd) {
2787 	case DDI_PROP_CMD_DECODE:
2788 		/*
2789 		 * Check that there is encoded data
2790 		 */
2791 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0)
2792 			return (DDI_PROP_RESULT_ERROR);
2793 		if (ph->ph_flags & PH_FROM_PROM) {
2794 			return (DDI_PROP_RESULT_ERROR);
2795 		} else {
2796 			if (ph->ph_size < sizeof (int64_t) ||
2797 			    ((int64_t *)ph->ph_cur_pos >
2798 			    ((int64_t *)ph->ph_data +
2799 			    ph->ph_size - sizeof (int64_t))))
2800 				return (DDI_PROP_RESULT_ERROR);
2801 		}
2802 		/*
2803 		 * Copy the integer, using the implementation-specific
2804 		 * copy function if the property is coming from the PROM.
2805 		 */
2806 		if (ph->ph_flags & PH_FROM_PROM) {
2807 			return (DDI_PROP_RESULT_ERROR);
2808 		} else {
2809 			bcopy(ph->ph_cur_pos, data, sizeof (int64_t));
2810 		}
2811 
2812 		/*
2813 		 * Move the current location to the start of the next
2814 		 * bit of undecoded data.
2815 		 */
2816 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
2817 		    sizeof (int64_t);
2818 		return (DDI_PROP_RESULT_OK);
2819 
2820 	case DDI_PROP_CMD_ENCODE:
2821 		/*
2822 		 * Check that there is room to encoded the data
2823 		 */
2824 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
2825 		    ph->ph_size < sizeof (int64_t) ||
2826 		    ((int64_t *)ph->ph_cur_pos > ((int64_t *)ph->ph_data +
2827 		    ph->ph_size - sizeof (int64_t))))
2828 			return (DDI_PROP_RESULT_ERROR);
2829 
2830 		/*
2831 		 * Encode the integer into the byte stream one byte at a
2832 		 * time.
2833 		 */
2834 		bcopy(data, ph->ph_cur_pos, sizeof (int64_t));
2835 
2836 		/*
2837 		 * Move the current location to the start of the next bit of
2838 		 * space where we can store encoded data.
2839 		 */
2840 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
2841 		    sizeof (int64_t);
2842 		return (DDI_PROP_RESULT_OK);
2843 
2844 	case DDI_PROP_CMD_SKIP:
2845 		/*
2846 		 * Check that there is encoded data
2847 		 */
2848 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
2849 		    ph->ph_size < sizeof (int64_t))
2850 			return (DDI_PROP_RESULT_ERROR);
2851 
2852 		if ((caddr_t)ph->ph_cur_pos ==
2853 		    (caddr_t)ph->ph_data + ph->ph_size) {
2854 			return (DDI_PROP_RESULT_EOF);
2855 		} else if ((caddr_t)ph->ph_cur_pos >
2856 		    (caddr_t)ph->ph_data + ph->ph_size) {
2857 			return (DDI_PROP_RESULT_EOF);
2858 		}
2859 
2860 		/*
2861 		 * Move the current location to the start of
2862 		 * the next bit of undecoded data.
2863 		 */
2864 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
2865 		    sizeof (int64_t);
2866 		return (DDI_PROP_RESULT_OK);
2867 
2868 	case DDI_PROP_CMD_GET_ESIZE:
2869 		/*
2870 		 * Return the size of an encoded integer on OBP
2871 		 */
2872 		return (sizeof (int64_t));
2873 
2874 	case DDI_PROP_CMD_GET_DSIZE:
2875 		/*
2876 		 * Return the size of a decoded integer on the system.
2877 		 */
2878 		return (sizeof (int64_t));
2879 
2880 	default:
2881 #ifdef DEBUG
2882 		panic("ddi_prop_int64_op: %x impossible", cmd);
2883 		/*NOTREACHED*/
2884 #else
2885 		return (DDI_PROP_RESULT_ERROR);
2886 #endif  /* DEBUG */
2887 	}
2888 }
2889 
2890 /*
2891  * OBP 1275 string operator.
2892  *
2893  * OBP strings are NULL terminated.
2894  */
2895 int
2896 ddi_prop_1275_string(prop_handle_t *ph, uint_t cmd, char *data)
2897 {
2898 	int	n;
2899 	char	*p;
2900 	char	*end;
2901 
2902 	switch (cmd) {
2903 	case DDI_PROP_CMD_DECODE:
2904 		/*
2905 		 * Check that there is encoded data
2906 		 */
2907 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
2908 			return (DDI_PROP_RESULT_ERROR);
2909 		}
2910 
2911 		/*
2912 		 * Match DDI_PROP_CMD_GET_DSIZE logic for when to stop and
2913 		 * how to NULL terminate result.
2914 		 */
2915 		p = (char *)ph->ph_cur_pos;
2916 		end = (char *)ph->ph_data + ph->ph_size;
2917 		if (p >= end)
2918 			return (DDI_PROP_RESULT_EOF);
2919 
2920 		while (p < end) {
2921 			*data++ = *p;
2922 			if (*p++ == 0) {	/* NULL from OBP */
2923 				ph->ph_cur_pos = p;
2924 				return (DDI_PROP_RESULT_OK);
2925 			}
2926 		}
2927 
2928 		/*
2929 		 * If OBP did not NULL terminate string, which happens
2930 		 * (at least) for 'true'/'false' boolean values, account for
2931 		 * the space and store null termination on decode.
2932 		 */
2933 		ph->ph_cur_pos = p;
2934 		*data = 0;
2935 		return (DDI_PROP_RESULT_OK);
2936 
2937 	case DDI_PROP_CMD_ENCODE:
2938 		/*
2939 		 * Check that there is room to encoded the data
2940 		 */
2941 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
2942 			return (DDI_PROP_RESULT_ERROR);
2943 		}
2944 
2945 		n = strlen(data) + 1;
2946 		if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
2947 		    ph->ph_size - n)) {
2948 			return (DDI_PROP_RESULT_ERROR);
2949 		}
2950 
2951 		/*
2952 		 * Copy the NULL terminated string
2953 		 */
2954 		bcopy(data, ph->ph_cur_pos, n);
2955 
2956 		/*
2957 		 * Move the current location to the start of the next bit of
2958 		 * space where we can store encoded data.
2959 		 */
2960 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + n;
2961 		return (DDI_PROP_RESULT_OK);
2962 
2963 	case DDI_PROP_CMD_SKIP:
2964 		/*
2965 		 * Check that there is encoded data
2966 		 */
2967 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
2968 			return (DDI_PROP_RESULT_ERROR);
2969 		}
2970 
2971 		/*
2972 		 * Return the string length plus one for the NULL
2973 		 * We know the size of the property, we need to
2974 		 * ensure that the string is properly formatted,
2975 		 * since we may be looking up random OBP data.
2976 		 */
2977 		p = (char *)ph->ph_cur_pos;
2978 		end = (char *)ph->ph_data + ph->ph_size;
2979 		if (p >= end)
2980 			return (DDI_PROP_RESULT_EOF);
2981 
2982 		while (p < end) {
2983 			if (*p++ == 0) {	/* NULL from OBP */
2984 				ph->ph_cur_pos = p;
2985 				return (DDI_PROP_RESULT_OK);
2986 			}
2987 		}
2988 
2989 		/*
2990 		 * Accommodate the fact that OBP does not always NULL
2991 		 * terminate strings.
2992 		 */
2993 		ph->ph_cur_pos = p;
2994 		return (DDI_PROP_RESULT_OK);
2995 
2996 	case DDI_PROP_CMD_GET_ESIZE:
2997 		/*
2998 		 * Return the size of the encoded string on OBP.
2999 		 */
3000 		return (strlen(data) + 1);
3001 
3002 	case DDI_PROP_CMD_GET_DSIZE:
3003 		/*
3004 		 * Return the string length plus one for the NULL.
3005 		 * We know the size of the property, we need to
3006 		 * ensure that the string is properly formatted,
3007 		 * since we may be looking up random OBP data.
3008 		 */
3009 		p = (char *)ph->ph_cur_pos;
3010 		end = (char *)ph->ph_data + ph->ph_size;
3011 		if (p >= end)
3012 			return (DDI_PROP_RESULT_EOF);
3013 
3014 		for (n = 0; p < end; n++) {
3015 			if (*p++ == 0) {	/* NULL from OBP */
3016 				ph->ph_cur_pos = p;
3017 				return (n + 1);
3018 			}
3019 		}
3020 
3021 		/*
3022 		 * If OBP did not NULL terminate string, which happens for
3023 		 * 'true'/'false' boolean values, account for the space
3024 		 * to store null termination here.
3025 		 */
3026 		ph->ph_cur_pos = p;
3027 		return (n + 1);
3028 
3029 	default:
3030 #ifdef DEBUG
3031 		panic("ddi_prop_1275_string: %x impossible", cmd);
3032 		/*NOTREACHED*/
3033 #else
3034 		return (DDI_PROP_RESULT_ERROR);
3035 #endif	/* DEBUG */
3036 	}
3037 }
3038 
3039 /*
3040  * OBP 1275 byte operator
3041  *
3042  * Caller must specify the number of bytes to get.  OBP encodes bytes
3043  * as a byte so there is a 1-to-1 translation.
3044  */
3045 int
3046 ddi_prop_1275_bytes(prop_handle_t *ph, uint_t cmd, uchar_t *data,
3047     uint_t nelements)
3048 {
3049 	switch (cmd) {
3050 	case DDI_PROP_CMD_DECODE:
3051 		/*
3052 		 * Check that there is encoded data
3053 		 */
3054 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3055 		    ph->ph_size < nelements ||
3056 		    ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3057 		    ph->ph_size - nelements)))
3058 			return (DDI_PROP_RESULT_ERROR);
3059 
3060 		/*
3061 		 * Copy out the bytes
3062 		 */
3063 		bcopy(ph->ph_cur_pos, data, nelements);
3064 
3065 		/*
3066 		 * Move the current location
3067 		 */
3068 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3069 		return (DDI_PROP_RESULT_OK);
3070 
3071 	case DDI_PROP_CMD_ENCODE:
3072 		/*
3073 		 * Check that there is room to encode the data
3074 		 */
3075 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3076 		    ph->ph_size < nelements ||
3077 		    ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3078 		    ph->ph_size - nelements)))
3079 			return (DDI_PROP_RESULT_ERROR);
3080 
3081 		/*
3082 		 * Copy in the bytes
3083 		 */
3084 		bcopy(data, ph->ph_cur_pos, nelements);
3085 
3086 		/*
3087 		 * Move the current location to the start of the next bit of
3088 		 * space where we can store encoded data.
3089 		 */
3090 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3091 		return (DDI_PROP_RESULT_OK);
3092 
3093 	case DDI_PROP_CMD_SKIP:
3094 		/*
3095 		 * Check that there is encoded data
3096 		 */
3097 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3098 		    ph->ph_size < nelements)
3099 			return (DDI_PROP_RESULT_ERROR);
3100 
3101 		if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3102 		    ph->ph_size - nelements))
3103 			return (DDI_PROP_RESULT_EOF);
3104 
3105 		/*
3106 		 * Move the current location
3107 		 */
3108 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3109 		return (DDI_PROP_RESULT_OK);
3110 
3111 	case DDI_PROP_CMD_GET_ESIZE:
3112 		/*
3113 		 * The size in bytes of the encoded size is the
3114 		 * same as the decoded size provided by the caller.
3115 		 */
3116 		return (nelements);
3117 
3118 	case DDI_PROP_CMD_GET_DSIZE:
3119 		/*
3120 		 * Just return the number of bytes specified by the caller.
3121 		 */
3122 		return (nelements);
3123 
3124 	default:
3125 #ifdef DEBUG
3126 		panic("ddi_prop_1275_bytes: %x impossible", cmd);
3127 		/*NOTREACHED*/
3128 #else
3129 		return (DDI_PROP_RESULT_ERROR);
3130 #endif	/* DEBUG */
3131 	}
3132 }
3133 
3134 /*
3135  * Used for properties that come from the OBP, hardware configuration files,
3136  * or that are created by calls to ddi_prop_update(9F).
3137  */
3138 static struct prop_handle_ops prop_1275_ops = {
3139 	ddi_prop_1275_int,
3140 	ddi_prop_1275_string,
3141 	ddi_prop_1275_bytes,
3142 	ddi_prop_int64_op
3143 };
3144 
3145 
3146 /*
3147  * Interface to create/modify a managed property on child's behalf...
3148  * Flags interpreted are:
3149  *	DDI_PROP_CANSLEEP:	Allow memory allocation to sleep.
3150  *	DDI_PROP_SYSTEM_DEF:	Manipulate system list rather than driver list.
3151  *
3152  * Use same dev_t when modifying or undefining a property.
3153  * Search for properties with DDI_DEV_T_ANY to match first named
3154  * property on the list.
3155  *
3156  * Properties are stored LIFO and subsequently will match the first
3157  * `matching' instance.
3158  */
3159 
3160 /*
3161  * ddi_prop_add:	Add a software defined property
3162  */
3163 
3164 /*
3165  * define to get a new ddi_prop_t.
3166  * km_flags are KM_SLEEP or KM_NOSLEEP.
3167  */
3168 
3169 #define	DDI_NEW_PROP_T(km_flags)	\
3170 	(kmem_zalloc(sizeof (ddi_prop_t), km_flags))
3171 
3172 static int
3173 ddi_prop_add(dev_t dev, dev_info_t *dip, int flags,
3174     char *name, caddr_t value, int length)
3175 {
3176 	ddi_prop_t	*new_propp, *propp;
3177 	ddi_prop_t	**list_head = &(DEVI(dip)->devi_drv_prop_ptr);
3178 	int		km_flags = KM_NOSLEEP;
3179 	int		name_buf_len;
3180 
3181 	/*
3182 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero return error.
3183 	 */
3184 
3185 	if (dev == DDI_DEV_T_ANY || name == (char *)0 || strlen(name) == 0)
3186 		return (DDI_PROP_INVAL_ARG);
3187 
3188 	if (flags & DDI_PROP_CANSLEEP)
3189 		km_flags = KM_SLEEP;
3190 
3191 	if (flags & DDI_PROP_SYSTEM_DEF)
3192 		list_head = &(DEVI(dip)->devi_sys_prop_ptr);
3193 	else if (flags & DDI_PROP_HW_DEF)
3194 		list_head = &(DEVI(dip)->devi_hw_prop_ptr);
3195 
3196 	if ((new_propp = DDI_NEW_PROP_T(km_flags)) == NULL)  {
3197 		cmn_err(CE_CONT, prop_no_mem_msg, name);
3198 		return (DDI_PROP_NO_MEMORY);
3199 	}
3200 
3201 	/*
3202 	 * If dev is major number 0, then we need to do a ddi_name_to_major
3203 	 * to get the real major number for the device.  This needs to be
3204 	 * done because some drivers need to call ddi_prop_create in their
3205 	 * attach routines but they don't have a dev.  By creating the dev
3206 	 * ourself if the major number is 0, drivers will not have to know what
3207 	 * their major number.	They can just create a dev with major number
3208 	 * 0 and pass it in.  For device 0, we will be doing a little extra
3209 	 * work by recreating the same dev that we already have, but its the
3210 	 * price you pay :-).
3211 	 *
3212 	 * This fixes bug #1098060.
3213 	 */
3214 	if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN) {
3215 		new_propp->prop_dev =
3216 		    makedevice(ddi_name_to_major(DEVI(dip)->devi_binding_name),
3217 		    getminor(dev));
3218 	} else
3219 		new_propp->prop_dev = dev;
3220 
3221 	/*
3222 	 * Allocate space for property name and copy it in...
3223 	 */
3224 
3225 	name_buf_len = strlen(name) + 1;
3226 	new_propp->prop_name = kmem_alloc(name_buf_len, km_flags);
3227 	if (new_propp->prop_name == 0)	{
3228 		kmem_free(new_propp, sizeof (ddi_prop_t));
3229 		cmn_err(CE_CONT, prop_no_mem_msg, name);
3230 		return (DDI_PROP_NO_MEMORY);
3231 	}
3232 	bcopy(name, new_propp->prop_name, name_buf_len);
3233 
3234 	/*
3235 	 * Set the property type
3236 	 */
3237 	new_propp->prop_flags = flags & DDI_PROP_TYPE_MASK;
3238 
3239 	/*
3240 	 * Set length and value ONLY if not an explicit property undefine:
3241 	 * NOTE: value and length are zero for explicit undefines.
3242 	 */
3243 
3244 	if (flags & DDI_PROP_UNDEF_IT) {
3245 		new_propp->prop_flags |= DDI_PROP_UNDEF_IT;
3246 	} else {
3247 		if ((new_propp->prop_len = length) != 0) {
3248 			new_propp->prop_val = kmem_alloc(length, km_flags);
3249 			if (new_propp->prop_val == 0)  {
3250 				kmem_free(new_propp->prop_name, name_buf_len);
3251 				kmem_free(new_propp, sizeof (ddi_prop_t));
3252 				cmn_err(CE_CONT, prop_no_mem_msg, name);
3253 				return (DDI_PROP_NO_MEMORY);
3254 			}
3255 			bcopy(value, new_propp->prop_val, length);
3256 		}
3257 	}
3258 
3259 	/*
3260 	 * Link property into beginning of list. (Properties are LIFO order.)
3261 	 */
3262 
3263 	mutex_enter(&(DEVI(dip)->devi_lock));
3264 	propp = *list_head;
3265 	new_propp->prop_next = propp;
3266 	*list_head = new_propp;
3267 	mutex_exit(&(DEVI(dip)->devi_lock));
3268 	return (DDI_PROP_SUCCESS);
3269 }
3270 
3271 
3272 /*
3273  * ddi_prop_change:	Modify a software managed property value
3274  *
3275  *			Set new length and value if found.
3276  *			returns DDI_PROP_INVAL_ARG if dev is DDI_DEV_T_ANY or
3277  *			input name is the NULL string.
3278  *			returns DDI_PROP_NO_MEMORY if unable to allocate memory
3279  *
3280  *			Note: an undef can be modified to be a define,
3281  *			(you can't go the other way.)
3282  */
3283 
3284 static int
3285 ddi_prop_change(dev_t dev, dev_info_t *dip, int flags,
3286     char *name, caddr_t value, int length)
3287 {
3288 	ddi_prop_t	*propp;
3289 	ddi_prop_t	**ppropp;
3290 	caddr_t		p = NULL;
3291 
3292 	if ((dev == DDI_DEV_T_ANY) || (name == NULL) || (strlen(name) == 0))
3293 		return (DDI_PROP_INVAL_ARG);
3294 
3295 	/*
3296 	 * Preallocate buffer, even if we don't need it...
3297 	 */
3298 	if (length != 0)  {
3299 		p = kmem_alloc(length, (flags & DDI_PROP_CANSLEEP) ?
3300 		    KM_SLEEP : KM_NOSLEEP);
3301 		if (p == NULL)	{
3302 			cmn_err(CE_CONT, prop_no_mem_msg, name);
3303 			return (DDI_PROP_NO_MEMORY);
3304 		}
3305 	}
3306 
3307 	/*
3308 	 * If the dev_t value contains DDI_MAJOR_T_UNKNOWN for the major
3309 	 * number, a real dev_t value should be created based upon the dip's
3310 	 * binding driver.  See ddi_prop_add...
3311 	 */
3312 	if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN)
3313 		dev = makedevice(
3314 		    ddi_name_to_major(DEVI(dip)->devi_binding_name),
3315 		    getminor(dev));
3316 
3317 	/*
3318 	 * Check to see if the property exists.  If so we modify it.
3319 	 * Else we create it by calling ddi_prop_add().
3320 	 */
3321 	mutex_enter(&(DEVI(dip)->devi_lock));
3322 	ppropp = &DEVI(dip)->devi_drv_prop_ptr;
3323 	if (flags & DDI_PROP_SYSTEM_DEF)
3324 		ppropp = &DEVI(dip)->devi_sys_prop_ptr;
3325 	else if (flags & DDI_PROP_HW_DEF)
3326 		ppropp = &DEVI(dip)->devi_hw_prop_ptr;
3327 
3328 	if ((propp = i_ddi_prop_search(dev, name, flags, ppropp)) != NULL) {
3329 		/*
3330 		 * Need to reallocate buffer?  If so, do it
3331 		 * carefully (reuse same space if new prop
3332 		 * is same size and non-NULL sized).
3333 		 */
3334 		if (length != 0)
3335 			bcopy(value, p, length);
3336 
3337 		if (propp->prop_len != 0)
3338 			kmem_free(propp->prop_val, propp->prop_len);
3339 
3340 		propp->prop_len = length;
3341 		propp->prop_val = p;
3342 		propp->prop_flags &= ~DDI_PROP_UNDEF_IT;
3343 		mutex_exit(&(DEVI(dip)->devi_lock));
3344 		return (DDI_PROP_SUCCESS);
3345 	}
3346 
3347 	mutex_exit(&(DEVI(dip)->devi_lock));
3348 	if (length != 0)
3349 		kmem_free(p, length);
3350 
3351 	return (ddi_prop_add(dev, dip, flags, name, value, length));
3352 }
3353 
3354 /*
3355  * Common update routine used to update and encode a property.	Creates
3356  * a property handle, calls the property encode routine, figures out if
3357  * the property already exists and updates if it does.	Otherwise it
3358  * creates if it does not exist.
3359  */
3360 int
3361 ddi_prop_update_common(dev_t match_dev, dev_info_t *dip, int flags,
3362     char *name, void *data, uint_t nelements,
3363     int (*prop_create)(prop_handle_t *, void *data, uint_t nelements))
3364 {
3365 	prop_handle_t	ph;
3366 	int		rval;
3367 	uint_t		ourflags;
3368 
3369 	/*
3370 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3371 	 * return error.
3372 	 */
3373 	if (match_dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3374 		return (DDI_PROP_INVAL_ARG);
3375 
3376 	/*
3377 	 * Create the handle
3378 	 */
3379 	ph.ph_data = NULL;
3380 	ph.ph_cur_pos = NULL;
3381 	ph.ph_save_pos = NULL;
3382 	ph.ph_size = 0;
3383 	ph.ph_ops = &prop_1275_ops;
3384 
3385 	/*
3386 	 * ourflags:
3387 	 * For compatibility with the old interfaces.  The old interfaces
3388 	 * didn't sleep by default and slept when the flag was set.  These
3389 	 * interfaces to the opposite.	So the old interfaces now set the
3390 	 * DDI_PROP_DONTSLEEP flag by default which tells us not to sleep.
3391 	 *
3392 	 * ph.ph_flags:
3393 	 * Blocked data or unblocked data allocation
3394 	 * for ph.ph_data in ddi_prop_encode_alloc()
3395 	 */
3396 	if (flags & DDI_PROP_DONTSLEEP) {
3397 		ourflags = flags;
3398 		ph.ph_flags = DDI_PROP_DONTSLEEP;
3399 	} else {
3400 		ourflags = flags | DDI_PROP_CANSLEEP;
3401 		ph.ph_flags = DDI_PROP_CANSLEEP;
3402 	}
3403 
3404 	/*
3405 	 * Encode the data and store it in the property handle by
3406 	 * calling the prop_encode routine.
3407 	 */
3408 	if ((rval = (*prop_create)(&ph, data, nelements)) !=
3409 	    DDI_PROP_SUCCESS) {
3410 		if (rval == DDI_PROP_NO_MEMORY)
3411 			cmn_err(CE_CONT, prop_no_mem_msg, name);
3412 		if (ph.ph_size != 0)
3413 			kmem_free(ph.ph_data, ph.ph_size);
3414 		return (rval);
3415 	}
3416 
3417 	/*
3418 	 * The old interfaces use a stacking approach to creating
3419 	 * properties.	If we are being called from the old interfaces,
3420 	 * the DDI_PROP_STACK_CREATE flag will be set, so we just do a
3421 	 * create without checking.
3422 	 */
3423 	if (flags & DDI_PROP_STACK_CREATE) {
3424 		rval = ddi_prop_add(match_dev, dip,
3425 		    ourflags, name, ph.ph_data, ph.ph_size);
3426 	} else {
3427 		rval = ddi_prop_change(match_dev, dip,
3428 		    ourflags, name, ph.ph_data, ph.ph_size);
3429 	}
3430 
3431 	/*
3432 	 * Free the encoded data allocated in the prop_encode routine.
3433 	 */
3434 	if (ph.ph_size != 0)
3435 		kmem_free(ph.ph_data, ph.ph_size);
3436 
3437 	return (rval);
3438 }
3439 
3440 
3441 /*
3442  * ddi_prop_create:	Define a managed property:
3443  *			See above for details.
3444  */
3445 
3446 int
3447 ddi_prop_create(dev_t dev, dev_info_t *dip, int flag,
3448     char *name, caddr_t value, int length)
3449 {
3450 	if (!(flag & DDI_PROP_CANSLEEP)) {
3451 		flag |= DDI_PROP_DONTSLEEP;
3452 #ifdef DDI_PROP_DEBUG
3453 		if (length != 0)
3454 			cmn_err(CE_NOTE, "!ddi_prop_create: interface obsolete,"
3455 			    "use ddi_prop_update (prop = %s, node = %s%d)",
3456 			    name, ddi_driver_name(dip), ddi_get_instance(dip));
3457 #endif /* DDI_PROP_DEBUG */
3458 	}
3459 	flag &= ~DDI_PROP_SYSTEM_DEF;
3460 	flag |= DDI_PROP_STACK_CREATE | DDI_PROP_TYPE_ANY;
3461 	return (ddi_prop_update_common(dev, dip, flag, name,
3462 	    value, length, ddi_prop_fm_encode_bytes));
3463 }
3464 
3465 int
3466 e_ddi_prop_create(dev_t dev, dev_info_t *dip, int flag,
3467     char *name, caddr_t value, int length)
3468 {
3469 	if (!(flag & DDI_PROP_CANSLEEP))
3470 		flag |= DDI_PROP_DONTSLEEP;
3471 	flag |= DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE | DDI_PROP_TYPE_ANY;
3472 	return (ddi_prop_update_common(dev, dip, flag,
3473 	    name, value, length, ddi_prop_fm_encode_bytes));
3474 }
3475 
3476 int
3477 ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag,
3478     char *name, caddr_t value, int length)
3479 {
3480 	ASSERT((flag & DDI_PROP_TYPE_MASK) == 0);
3481 
3482 	/*
3483 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3484 	 * return error.
3485 	 */
3486 	if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3487 		return (DDI_PROP_INVAL_ARG);
3488 
3489 	if (!(flag & DDI_PROP_CANSLEEP))
3490 		flag |= DDI_PROP_DONTSLEEP;
3491 	flag &= ~DDI_PROP_SYSTEM_DEF;
3492 	if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_NOTPROM), name) == 0)
3493 		return (DDI_PROP_NOT_FOUND);
3494 
3495 	return (ddi_prop_update_common(dev, dip,
3496 	    (flag | DDI_PROP_TYPE_BYTE), name,
3497 	    value, length, ddi_prop_fm_encode_bytes));
3498 }
3499 
3500 int
3501 e_ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag,
3502     char *name, caddr_t value, int length)
3503 {
3504 	ASSERT((flag & DDI_PROP_TYPE_MASK) == 0);
3505 
3506 	/*
3507 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3508 	 * return error.
3509 	 */
3510 	if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3511 		return (DDI_PROP_INVAL_ARG);
3512 
3513 	if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_SYSTEM_DEF), name) == 0)
3514 		return (DDI_PROP_NOT_FOUND);
3515 
3516 	if (!(flag & DDI_PROP_CANSLEEP))
3517 		flag |= DDI_PROP_DONTSLEEP;
3518 	return (ddi_prop_update_common(dev, dip,
3519 	    (flag | DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE),
3520 	    name, value, length, ddi_prop_fm_encode_bytes));
3521 }
3522 
3523 
3524 /*
3525  * Common lookup routine used to lookup and decode a property.
3526  * Creates a property handle, searches for the raw encoded data,
3527  * fills in the handle, and calls the property decode functions
3528  * passed in.
3529  *
3530  * This routine is not static because ddi_bus_prop_op() which lives in
3531  * ddi_impl.c calls it.  No driver should be calling this routine.
3532  */
3533 int
3534 ddi_prop_lookup_common(dev_t match_dev, dev_info_t *dip,
3535     uint_t flags, char *name, void *data, uint_t *nelements,
3536     int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements))
3537 {
3538 	int		rval;
3539 	uint_t		ourflags;
3540 	prop_handle_t	ph;
3541 
3542 	if ((match_dev == DDI_DEV_T_NONE) ||
3543 	    (name == NULL) || (strlen(name) == 0))
3544 		return (DDI_PROP_INVAL_ARG);
3545 
3546 	ourflags = (flags & DDI_PROP_DONTSLEEP) ? flags :
3547 	    flags | DDI_PROP_CANSLEEP;
3548 
3549 	/*
3550 	 * Get the encoded data
3551 	 */
3552 	bzero(&ph, sizeof (prop_handle_t));
3553 
3554 	if ((flags & DDI_UNBND_DLPI2) || (flags & DDI_PROP_ROOTNEX_GLOBAL)) {
3555 		/*
3556 		 * For rootnex and unbound dlpi style-2 devices, index into
3557 		 * the devnames' array and search the global
3558 		 * property list.
3559 		 */
3560 		ourflags &= ~DDI_UNBND_DLPI2;
3561 		rval = i_ddi_prop_search_global(match_dev,
3562 		    ourflags, name, &ph.ph_data, &ph.ph_size);
3563 	} else {
3564 		rval = ddi_prop_search_common(match_dev, dip,
3565 		    PROP_LEN_AND_VAL_ALLOC, ourflags, name,
3566 		    &ph.ph_data, &ph.ph_size);
3567 
3568 	}
3569 
3570 	if (rval != DDI_PROP_SUCCESS && rval != DDI_PROP_FOUND_1275) {
3571 		ASSERT(ph.ph_data == NULL);
3572 		ASSERT(ph.ph_size == 0);
3573 		return (rval);
3574 	}
3575 
3576 	/*
3577 	 * If the encoded data came from a OBP or software
3578 	 * use the 1275 OBP decode/encode routines.
3579 	 */
3580 	ph.ph_cur_pos = ph.ph_data;
3581 	ph.ph_save_pos = ph.ph_data;
3582 	ph.ph_ops = &prop_1275_ops;
3583 	ph.ph_flags = (rval == DDI_PROP_FOUND_1275) ? PH_FROM_PROM : 0;
3584 
3585 	rval = (*prop_decoder)(&ph, data, nelements);
3586 
3587 	/*
3588 	 * Free the encoded data
3589 	 */
3590 	if (ph.ph_size != 0)
3591 		kmem_free(ph.ph_data, ph.ph_size);
3592 
3593 	return (rval);
3594 }
3595 
3596 /*
3597  * Lookup and return an array of composite properties.  The driver must
3598  * provide the decode routine.
3599  */
3600 int
3601 ddi_prop_lookup(dev_t match_dev, dev_info_t *dip,
3602     uint_t flags, char *name, void *data, uint_t *nelements,
3603     int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements))
3604 {
3605 	return (ddi_prop_lookup_common(match_dev, dip,
3606 	    (flags | DDI_PROP_TYPE_COMPOSITE), name,
3607 	    data, nelements, prop_decoder));
3608 }
3609 
3610 /*
3611  * Return 1 if a property exists (no type checking done).
3612  * Return 0 if it does not exist.
3613  */
3614 int
3615 ddi_prop_exists(dev_t match_dev, dev_info_t *dip, uint_t flags, char *name)
3616 {
3617 	int	i;
3618 	uint_t	x = 0;
3619 
3620 	i = ddi_prop_search_common(match_dev, dip, PROP_EXISTS,
3621 	    flags | DDI_PROP_TYPE_MASK, name, NULL, &x);
3622 	return (i == DDI_PROP_SUCCESS || i == DDI_PROP_FOUND_1275);
3623 }
3624 
3625 
3626 /*
3627  * Update an array of composite properties.  The driver must
3628  * provide the encode routine.
3629  */
3630 int
3631 ddi_prop_update(dev_t match_dev, dev_info_t *dip,
3632     char *name, void *data, uint_t nelements,
3633     int (*prop_create)(prop_handle_t *, void *data, uint_t nelements))
3634 {
3635 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_COMPOSITE,
3636 	    name, data, nelements, prop_create));
3637 }
3638 
3639 /*
3640  * Get a single integer or boolean property and return it.
3641  * If the property does not exists, or cannot be decoded,
3642  * then return the defvalue passed in.
3643  *
3644  * This routine always succeeds.
3645  */
3646 int
3647 ddi_prop_get_int(dev_t match_dev, dev_info_t *dip, uint_t flags,
3648     char *name, int defvalue)
3649 {
3650 	int	data;
3651 	uint_t	nelements;
3652 	int	rval;
3653 
3654 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3655 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3656 #ifdef DEBUG
3657 		if (dip != NULL) {
3658 			cmn_err(CE_WARN, "ddi_prop_get_int: invalid flag"
3659 			    " 0x%x (prop = %s, node = %s%d)", flags,
3660 			    name, ddi_driver_name(dip), ddi_get_instance(dip));
3661 		}
3662 #endif /* DEBUG */
3663 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3664 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
3665 	}
3666 
3667 	if ((rval = ddi_prop_lookup_common(match_dev, dip,
3668 	    (flags | DDI_PROP_TYPE_INT), name, &data, &nelements,
3669 	    ddi_prop_fm_decode_int)) != DDI_PROP_SUCCESS) {
3670 		if (rval == DDI_PROP_END_OF_DATA)
3671 			data = 1;
3672 		else
3673 			data = defvalue;
3674 	}
3675 	return (data);
3676 }
3677 
3678 /*
3679  * Get a single 64 bit integer or boolean property and return it.
3680  * If the property does not exists, or cannot be decoded,
3681  * then return the defvalue passed in.
3682  *
3683  * This routine always succeeds.
3684  */
3685 int64_t
3686 ddi_prop_get_int64(dev_t match_dev, dev_info_t *dip, uint_t flags,
3687     char *name, int64_t defvalue)
3688 {
3689 	int64_t	data;
3690 	uint_t	nelements;
3691 	int	rval;
3692 
3693 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3694 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3695 #ifdef DEBUG
3696 		if (dip != NULL) {
3697 			cmn_err(CE_WARN, "ddi_prop_get_int64: invalid flag"
3698 			    " 0x%x (prop = %s, node = %s%d)", flags,
3699 			    name, ddi_driver_name(dip), ddi_get_instance(dip));
3700 		}
3701 #endif /* DEBUG */
3702 		return (DDI_PROP_INVAL_ARG);
3703 	}
3704 
3705 	if ((rval = ddi_prop_lookup_common(match_dev, dip,
3706 	    (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM),
3707 	    name, &data, &nelements, ddi_prop_fm_decode_int64))
3708 	    != DDI_PROP_SUCCESS) {
3709 		if (rval == DDI_PROP_END_OF_DATA)
3710 			data = 1;
3711 		else
3712 			data = defvalue;
3713 	}
3714 	return (data);
3715 }
3716 
3717 /*
3718  * Get an array of integer property
3719  */
3720 int
3721 ddi_prop_lookup_int_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
3722     char *name, int **data, uint_t *nelements)
3723 {
3724 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3725 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3726 #ifdef DEBUG
3727 		if (dip != NULL) {
3728 			cmn_err(CE_WARN, "ddi_prop_lookup_int_array: "
3729 			    "invalid flag 0x%x (prop = %s, node = %s%d)",
3730 			    flags, name, ddi_driver_name(dip),
3731 			    ddi_get_instance(dip));
3732 		}
3733 #endif /* DEBUG */
3734 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3735 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
3736 	}
3737 
3738 	return (ddi_prop_lookup_common(match_dev, dip,
3739 	    (flags | DDI_PROP_TYPE_INT), name, data,
3740 	    nelements, ddi_prop_fm_decode_ints));
3741 }
3742 
3743 /*
3744  * Get an array of 64 bit integer properties
3745  */
3746 int
3747 ddi_prop_lookup_int64_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
3748     char *name, int64_t **data, uint_t *nelements)
3749 {
3750 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3751 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3752 #ifdef DEBUG
3753 		if (dip != NULL) {
3754 			cmn_err(CE_WARN, "ddi_prop_lookup_int64_array: "
3755 			    "invalid flag 0x%x (prop = %s, node = %s%d)",
3756 			    flags, name, ddi_driver_name(dip),
3757 			    ddi_get_instance(dip));
3758 		}
3759 #endif /* DEBUG */
3760 		return (DDI_PROP_INVAL_ARG);
3761 	}
3762 
3763 	return (ddi_prop_lookup_common(match_dev, dip,
3764 	    (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM),
3765 	    name, data, nelements, ddi_prop_fm_decode_int64_array));
3766 }
3767 
3768 /*
3769  * Update a single integer property.  If the property exists on the drivers
3770  * property list it updates, else it creates it.
3771  */
3772 int
3773 ddi_prop_update_int(dev_t match_dev, dev_info_t *dip,
3774     char *name, int data)
3775 {
3776 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT,
3777 	    name, &data, 1, ddi_prop_fm_encode_ints));
3778 }
3779 
3780 /*
3781  * Update a single 64 bit integer property.
3782  * Update the driver property list if it exists, else create it.
3783  */
3784 int
3785 ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip,
3786     char *name, int64_t data)
3787 {
3788 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64,
3789 	    name, &data, 1, ddi_prop_fm_encode_int64));
3790 }
3791 
3792 int
3793 e_ddi_prop_update_int(dev_t match_dev, dev_info_t *dip,
3794     char *name, int data)
3795 {
3796 	return (ddi_prop_update_common(match_dev, dip,
3797 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT,
3798 	    name, &data, 1, ddi_prop_fm_encode_ints));
3799 }
3800 
3801 int
3802 e_ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip,
3803     char *name, int64_t data)
3804 {
3805 	return (ddi_prop_update_common(match_dev, dip,
3806 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64,
3807 	    name, &data, 1, ddi_prop_fm_encode_int64));
3808 }
3809 
3810 /*
3811  * Update an array of integer property.  If the property exists on the drivers
3812  * property list it updates, else it creates it.
3813  */
3814 int
3815 ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip,
3816     char *name, int *data, uint_t nelements)
3817 {
3818 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT,
3819 	    name, data, nelements, ddi_prop_fm_encode_ints));
3820 }
3821 
3822 /*
3823  * Update an array of 64 bit integer properties.
3824  * Update the driver property list if it exists, else create it.
3825  */
3826 int
3827 ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip,
3828     char *name, int64_t *data, uint_t nelements)
3829 {
3830 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64,
3831 	    name, data, nelements, ddi_prop_fm_encode_int64));
3832 }
3833 
3834 int
3835 e_ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip,
3836     char *name, int64_t *data, uint_t nelements)
3837 {
3838 	return (ddi_prop_update_common(match_dev, dip,
3839 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64,
3840 	    name, data, nelements, ddi_prop_fm_encode_int64));
3841 }
3842 
3843 int
3844 e_ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip,
3845     char *name, int *data, uint_t nelements)
3846 {
3847 	return (ddi_prop_update_common(match_dev, dip,
3848 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT,
3849 	    name, data, nelements, ddi_prop_fm_encode_ints));
3850 }
3851 
3852 /*
3853  * Get a single string property.
3854  */
3855 int
3856 ddi_prop_lookup_string(dev_t match_dev, dev_info_t *dip, uint_t flags,
3857     char *name, char **data)
3858 {
3859 	uint_t x;
3860 
3861 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3862 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3863 #ifdef DEBUG
3864 		if (dip != NULL) {
3865 			cmn_err(CE_WARN, "%s: invalid flag 0x%x "
3866 			    "(prop = %s, node = %s%d); invalid bits ignored",
3867 			    "ddi_prop_lookup_string", flags, name,
3868 			    ddi_driver_name(dip), ddi_get_instance(dip));
3869 		}
3870 #endif /* DEBUG */
3871 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3872 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
3873 	}
3874 
3875 	return (ddi_prop_lookup_common(match_dev, dip,
3876 	    (flags | DDI_PROP_TYPE_STRING), name, data,
3877 	    &x, ddi_prop_fm_decode_string));
3878 }
3879 
3880 /*
3881  * Get an array of strings property.
3882  */
3883 int
3884 ddi_prop_lookup_string_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
3885     char *name, char ***data, uint_t *nelements)
3886 {
3887 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3888 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3889 #ifdef DEBUG
3890 		if (dip != NULL) {
3891 			cmn_err(CE_WARN, "ddi_prop_lookup_string_array: "
3892 			    "invalid flag 0x%x (prop = %s, node = %s%d)",
3893 			    flags, name, ddi_driver_name(dip),
3894 			    ddi_get_instance(dip));
3895 		}
3896 #endif /* DEBUG */
3897 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3898 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
3899 	}
3900 
3901 	return (ddi_prop_lookup_common(match_dev, dip,
3902 	    (flags | DDI_PROP_TYPE_STRING), name, data,
3903 	    nelements, ddi_prop_fm_decode_strings));
3904 }
3905 
3906 /*
3907  * Update a single string property.
3908  */
3909 int
3910 ddi_prop_update_string(dev_t match_dev, dev_info_t *dip,
3911     char *name, char *data)
3912 {
3913 	return (ddi_prop_update_common(match_dev, dip,
3914 	    DDI_PROP_TYPE_STRING, name, &data, 1,
3915 	    ddi_prop_fm_encode_string));
3916 }
3917 
3918 int
3919 e_ddi_prop_update_string(dev_t match_dev, dev_info_t *dip,
3920     char *name, char *data)
3921 {
3922 	return (ddi_prop_update_common(match_dev, dip,
3923 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING,
3924 	    name, &data, 1, ddi_prop_fm_encode_string));
3925 }
3926 
3927 
3928 /*
3929  * Update an array of strings property.
3930  */
3931 int
3932 ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip,
3933     char *name, char **data, uint_t nelements)
3934 {
3935 	return (ddi_prop_update_common(match_dev, dip,
3936 	    DDI_PROP_TYPE_STRING, name, data, nelements,
3937 	    ddi_prop_fm_encode_strings));
3938 }
3939 
3940 int
3941 e_ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip,
3942     char *name, char **data, uint_t nelements)
3943 {
3944 	return (ddi_prop_update_common(match_dev, dip,
3945 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING,
3946 	    name, data, nelements,
3947 	    ddi_prop_fm_encode_strings));
3948 }
3949 
3950 
3951 /*
3952  * Get an array of bytes property.
3953  */
3954 int
3955 ddi_prop_lookup_byte_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
3956     char *name, uchar_t **data, uint_t *nelements)
3957 {
3958 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3959 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3960 #ifdef DEBUG
3961 		if (dip != NULL) {
3962 			cmn_err(CE_WARN, "ddi_prop_lookup_byte_array: "
3963 			    " invalid flag 0x%x (prop = %s, node = %s%d)",
3964 			    flags, name, ddi_driver_name(dip),
3965 			    ddi_get_instance(dip));
3966 		}
3967 #endif /* DEBUG */
3968 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3969 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
3970 	}
3971 
3972 	return (ddi_prop_lookup_common(match_dev, dip,
3973 	    (flags | DDI_PROP_TYPE_BYTE), name, data,
3974 	    nelements, ddi_prop_fm_decode_bytes));
3975 }
3976 
3977 /*
3978  * Update an array of bytes property.
3979  */
3980 int
3981 ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip,
3982     char *name, uchar_t *data, uint_t nelements)
3983 {
3984 	if (nelements == 0)
3985 		return (DDI_PROP_INVAL_ARG);
3986 
3987 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_BYTE,
3988 	    name, data, nelements, ddi_prop_fm_encode_bytes));
3989 }
3990 
3991 
3992 int
3993 e_ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip,
3994     char *name, uchar_t *data, uint_t nelements)
3995 {
3996 	if (nelements == 0)
3997 		return (DDI_PROP_INVAL_ARG);
3998 
3999 	return (ddi_prop_update_common(match_dev, dip,
4000 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE,
4001 	    name, data, nelements, ddi_prop_fm_encode_bytes));
4002 }
4003 
4004 
4005 /*
4006  * ddi_prop_remove_common:	Undefine a managed property:
4007  *			Input dev_t must match dev_t when defined.
4008  *			Returns DDI_PROP_NOT_FOUND, possibly.
4009  *			DDI_PROP_INVAL_ARG is also possible if dev is
4010  *			DDI_DEV_T_ANY or incoming name is the NULL string.
4011  */
4012 int
4013 ddi_prop_remove_common(dev_t dev, dev_info_t *dip, char *name, int flag)
4014 {
4015 	ddi_prop_t	**list_head = &(DEVI(dip)->devi_drv_prop_ptr);
4016 	ddi_prop_t	*propp;
4017 	ddi_prop_t	*lastpropp = NULL;
4018 
4019 	if ((dev == DDI_DEV_T_ANY) || (name == (char *)0) ||
4020 	    (strlen(name) == 0)) {
4021 		return (DDI_PROP_INVAL_ARG);
4022 	}
4023 
4024 	if (flag & DDI_PROP_SYSTEM_DEF)
4025 		list_head = &(DEVI(dip)->devi_sys_prop_ptr);
4026 	else if (flag & DDI_PROP_HW_DEF)
4027 		list_head = &(DEVI(dip)->devi_hw_prop_ptr);
4028 
4029 	mutex_enter(&(DEVI(dip)->devi_lock));
4030 
4031 	for (propp = *list_head; propp != NULL; propp = propp->prop_next)  {
4032 		if (DDI_STRSAME(propp->prop_name, name) &&
4033 		    (dev == propp->prop_dev)) {
4034 			/*
4035 			 * Unlink this propp allowing for it to
4036 			 * be first in the list:
4037 			 */
4038 
4039 			if (lastpropp == NULL)
4040 				*list_head = propp->prop_next;
4041 			else
4042 				lastpropp->prop_next = propp->prop_next;
4043 
4044 			mutex_exit(&(DEVI(dip)->devi_lock));
4045 
4046 			/*
4047 			 * Free memory and return...
4048 			 */
4049 			kmem_free(propp->prop_name,
4050 			    strlen(propp->prop_name) + 1);
4051 			if (propp->prop_len != 0)
4052 				kmem_free(propp->prop_val, propp->prop_len);
4053 			kmem_free(propp, sizeof (ddi_prop_t));
4054 			return (DDI_PROP_SUCCESS);
4055 		}
4056 		lastpropp = propp;
4057 	}
4058 	mutex_exit(&(DEVI(dip)->devi_lock));
4059 	return (DDI_PROP_NOT_FOUND);
4060 }
4061 
4062 int
4063 ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name)
4064 {
4065 	return (ddi_prop_remove_common(dev, dip, name, 0));
4066 }
4067 
4068 int
4069 e_ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name)
4070 {
4071 	return (ddi_prop_remove_common(dev, dip, name, DDI_PROP_SYSTEM_DEF));
4072 }
4073 
4074 /*
4075  * e_ddi_prop_list_delete: remove a list of properties
4076  *	Note that the caller needs to provide the required protection
4077  *	(eg. devi_lock if these properties are still attached to a devi)
4078  */
4079 void
4080 e_ddi_prop_list_delete(ddi_prop_t *props)
4081 {
4082 	i_ddi_prop_list_delete(props);
4083 }
4084 
4085 /*
4086  * ddi_prop_remove_all_common:
4087  *	Used before unloading a driver to remove
4088  *	all properties. (undefines all dev_t's props.)
4089  *	Also removes `explicitly undefined' props.
4090  *	No errors possible.
4091  */
4092 void
4093 ddi_prop_remove_all_common(dev_info_t *dip, int flag)
4094 {
4095 	ddi_prop_t	**list_head;
4096 
4097 	mutex_enter(&(DEVI(dip)->devi_lock));
4098 	if (flag & DDI_PROP_SYSTEM_DEF) {
4099 		list_head = &(DEVI(dip)->devi_sys_prop_ptr);
4100 	} else if (flag & DDI_PROP_HW_DEF) {
4101 		list_head = &(DEVI(dip)->devi_hw_prop_ptr);
4102 	} else {
4103 		list_head = &(DEVI(dip)->devi_drv_prop_ptr);
4104 	}
4105 	i_ddi_prop_list_delete(*list_head);
4106 	*list_head = NULL;
4107 	mutex_exit(&(DEVI(dip)->devi_lock));
4108 }
4109 
4110 
4111 /*
4112  * ddi_prop_remove_all:		Remove all driver prop definitions.
4113  */
4114 
4115 void
4116 ddi_prop_remove_all(dev_info_t *dip)
4117 {
4118 	i_ddi_prop_dyn_driver_set(dip, NULL);
4119 	ddi_prop_remove_all_common(dip, 0);
4120 }
4121 
4122 /*
4123  * e_ddi_prop_remove_all:	Remove all system prop definitions.
4124  */
4125 
4126 void
4127 e_ddi_prop_remove_all(dev_info_t *dip)
4128 {
4129 	ddi_prop_remove_all_common(dip, (int)DDI_PROP_SYSTEM_DEF);
4130 }
4131 
4132 
4133 /*
4134  * ddi_prop_undefine:	Explicitly undefine a property.  Property
4135  *			searches which match this property return
4136  *			the error code DDI_PROP_UNDEFINED.
4137  *
4138  *			Use ddi_prop_remove to negate effect of
4139  *			ddi_prop_undefine
4140  *
4141  *			See above for error returns.
4142  */
4143 
4144 int
4145 ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name)
4146 {
4147 	if (!(flag & DDI_PROP_CANSLEEP))
4148 		flag |= DDI_PROP_DONTSLEEP;
4149 	flag |= DDI_PROP_STACK_CREATE | DDI_PROP_UNDEF_IT | DDI_PROP_TYPE_ANY;
4150 	return (ddi_prop_update_common(dev, dip, flag,
4151 	    name, NULL, 0, ddi_prop_fm_encode_bytes));
4152 }
4153 
4154 int
4155 e_ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name)
4156 {
4157 	if (!(flag & DDI_PROP_CANSLEEP))
4158 		flag |= DDI_PROP_DONTSLEEP;
4159 	flag |= DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE |
4160 	    DDI_PROP_UNDEF_IT | DDI_PROP_TYPE_ANY;
4161 	return (ddi_prop_update_common(dev, dip, flag,
4162 	    name, NULL, 0, ddi_prop_fm_encode_bytes));
4163 }
4164 
4165 /*
4166  * Support for gathering dynamic properties in devinfo snapshot.
4167  */
4168 void
4169 i_ddi_prop_dyn_driver_set(dev_info_t *dip, i_ddi_prop_dyn_t *dp)
4170 {
4171 	DEVI(dip)->devi_prop_dyn_driver = dp;
4172 }
4173 
4174 i_ddi_prop_dyn_t *
4175 i_ddi_prop_dyn_driver_get(dev_info_t *dip)
4176 {
4177 	return (DEVI(dip)->devi_prop_dyn_driver);
4178 }
4179 
4180 void
4181 i_ddi_prop_dyn_parent_set(dev_info_t *dip, i_ddi_prop_dyn_t *dp)
4182 {
4183 	DEVI(dip)->devi_prop_dyn_parent = dp;
4184 }
4185 
4186 i_ddi_prop_dyn_t *
4187 i_ddi_prop_dyn_parent_get(dev_info_t *dip)
4188 {
4189 	return (DEVI(dip)->devi_prop_dyn_parent);
4190 }
4191 
4192 void
4193 i_ddi_prop_dyn_cache_invalidate(dev_info_t *dip, i_ddi_prop_dyn_t *dp)
4194 {
4195 	/* for now we invalidate the entire cached snapshot */
4196 	if (dip && dp)
4197 		i_ddi_di_cache_invalidate();
4198 }
4199 
4200 /* ARGSUSED */
4201 void
4202 ddi_prop_cache_invalidate(dev_t dev, dev_info_t *dip, char *name, int flags)
4203 {
4204 	/* for now we invalidate the entire cached snapshot */
4205 	i_ddi_di_cache_invalidate();
4206 }
4207 
4208 
4209 /*
4210  * Code to search hardware layer (PROM), if it exists, on behalf of child.
4211  *
4212  * if input dip != child_dip, then call is on behalf of child
4213  * to search PROM, do it via ddi_prop_search_common() and ascend only
4214  * if allowed.
4215  *
4216  * if input dip == ch_dip (child_dip), call is on behalf of root driver,
4217  * to search for PROM defined props only.
4218  *
4219  * Note that the PROM search is done only if the requested dev
4220  * is either DDI_DEV_T_ANY or DDI_DEV_T_NONE. PROM properties
4221  * have no associated dev, thus are automatically associated with
4222  * DDI_DEV_T_NONE.
4223  *
4224  * Modifying flag DDI_PROP_NOTPROM inhibits the search in the h/w layer.
4225  *
4226  * Returns DDI_PROP_FOUND_1275 if found to indicate to framework
4227  * that the property resides in the prom.
4228  */
4229 int
4230 impl_ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip,
4231     ddi_prop_op_t prop_op, int mod_flags,
4232     char *name, caddr_t valuep, int *lengthp)
4233 {
4234 	int	len;
4235 	caddr_t buffer = NULL;
4236 
4237 	/*
4238 	 * If requested dev is DDI_DEV_T_NONE or DDI_DEV_T_ANY, then
4239 	 * look in caller's PROM if it's a self identifying device...
4240 	 *
4241 	 * Note that this is very similar to ddi_prop_op, but we
4242 	 * search the PROM instead of the s/w defined properties,
4243 	 * and we are called on by the parent driver to do this for
4244 	 * the child.
4245 	 */
4246 
4247 	if (((dev == DDI_DEV_T_NONE) || (dev == DDI_DEV_T_ANY)) &&
4248 	    ndi_dev_is_prom_node(ch_dip) &&
4249 	    ((mod_flags & DDI_PROP_NOTPROM) == 0)) {
4250 		len = prom_getproplen((pnode_t)DEVI(ch_dip)->devi_nodeid, name);
4251 		if (len == -1) {
4252 			return (DDI_PROP_NOT_FOUND);
4253 		}
4254 
4255 		/*
4256 		 * If exists only request, we're done
4257 		 */
4258 		if (prop_op == PROP_EXISTS) {
4259 			return (DDI_PROP_FOUND_1275);
4260 		}
4261 
4262 		/*
4263 		 * If length only request or prop length == 0, get out
4264 		 */
4265 		if ((prop_op == PROP_LEN) || (len == 0)) {
4266 			*lengthp = len;
4267 			return (DDI_PROP_FOUND_1275);
4268 		}
4269 
4270 		/*
4271 		 * Allocate buffer if required... (either way `buffer'
4272 		 * is receiving address).
4273 		 */
4274 
4275 		switch (prop_op) {
4276 
4277 		case PROP_LEN_AND_VAL_ALLOC:
4278 
4279 			buffer = kmem_alloc((size_t)len,
4280 			    mod_flags & DDI_PROP_CANSLEEP ?
4281 			    KM_SLEEP : KM_NOSLEEP);
4282 			if (buffer == NULL) {
4283 				return (DDI_PROP_NO_MEMORY);
4284 			}
4285 			*(caddr_t *)valuep = buffer;
4286 			break;
4287 
4288 		case PROP_LEN_AND_VAL_BUF:
4289 
4290 			if (len > (*lengthp)) {
4291 				*lengthp = len;
4292 				return (DDI_PROP_BUF_TOO_SMALL);
4293 			}
4294 
4295 			buffer = valuep;
4296 			break;
4297 
4298 		default:
4299 			break;
4300 		}
4301 
4302 		/*
4303 		 * Call the PROM function to do the copy.
4304 		 */
4305 		(void) prom_getprop((pnode_t)DEVI(ch_dip)->devi_nodeid,
4306 		    name, buffer);
4307 
4308 		*lengthp = len; /* return the actual length to the caller */
4309 		(void) impl_fix_props(dip, ch_dip, name, len, buffer);
4310 		return (DDI_PROP_FOUND_1275);
4311 	}
4312 
4313 	return (DDI_PROP_NOT_FOUND);
4314 }
4315 
4316 /*
4317  * The ddi_bus_prop_op default bus nexus prop op function.
4318  *
4319  * Code to search hardware layer (PROM), if it exists,
4320  * on behalf of child, then, if appropriate, ascend and check
4321  * my own software defined properties...
4322  */
4323 int
4324 ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip,
4325     ddi_prop_op_t prop_op, int mod_flags,
4326     char *name, caddr_t valuep, int *lengthp)
4327 {
4328 	int	error;
4329 
4330 	error = impl_ddi_bus_prop_op(dev, dip, ch_dip, prop_op, mod_flags,
4331 	    name, valuep, lengthp);
4332 
4333 	if (error == DDI_PROP_SUCCESS || error == DDI_PROP_FOUND_1275 ||
4334 	    error == DDI_PROP_BUF_TOO_SMALL)
4335 		return (error);
4336 
4337 	if (error == DDI_PROP_NO_MEMORY) {
4338 		cmn_err(CE_CONT, prop_no_mem_msg, name);
4339 		return (DDI_PROP_NO_MEMORY);
4340 	}
4341 
4342 	/*
4343 	 * Check the 'options' node as a last resort
4344 	 */
4345 	if ((mod_flags & DDI_PROP_DONTPASS) != 0)
4346 		return (DDI_PROP_NOT_FOUND);
4347 
4348 	if (ch_dip == ddi_root_node())	{
4349 		/*
4350 		 * As a last resort, when we've reached
4351 		 * the top and still haven't found the
4352 		 * property, see if the desired property
4353 		 * is attached to the options node.
4354 		 *
4355 		 * The options dip is attached right after boot.
4356 		 */
4357 		ASSERT(options_dip != NULL);
4358 		/*
4359 		 * Force the "don't pass" flag to *just* see
4360 		 * what the options node has to offer.
4361 		 */
4362 		return (ddi_prop_search_common(dev, options_dip, prop_op,
4363 		    mod_flags|DDI_PROP_DONTPASS, name, valuep,
4364 		    (uint_t *)lengthp));
4365 	}
4366 
4367 	/*
4368 	 * Otherwise, continue search with parent's s/w defined properties...
4369 	 * NOTE: Using `dip' in following call increments the level.
4370 	 */
4371 
4372 	return (ddi_prop_search_common(dev, dip, prop_op, mod_flags,
4373 	    name, valuep, (uint_t *)lengthp));
4374 }
4375 
4376 /*
4377  * External property functions used by other parts of the kernel...
4378  */
4379 
4380 /*
4381  * e_ddi_getlongprop: See comments for ddi_get_longprop.
4382  */
4383 
4384 int
4385 e_ddi_getlongprop(dev_t dev, vtype_t type, char *name, int flags,
4386     caddr_t valuep, int *lengthp)
4387 {
4388 	_NOTE(ARGUNUSED(type))
4389 	dev_info_t *devi;
4390 	ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_ALLOC;
4391 	int error;
4392 
4393 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4394 		return (DDI_PROP_NOT_FOUND);
4395 
4396 	error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp);
4397 	ddi_release_devi(devi);
4398 	return (error);
4399 }
4400 
4401 /*
4402  * e_ddi_getlongprop_buf:	See comments for ddi_getlongprop_buf.
4403  */
4404 
4405 int
4406 e_ddi_getlongprop_buf(dev_t dev, vtype_t type, char *name, int flags,
4407     caddr_t valuep, int *lengthp)
4408 {
4409 	_NOTE(ARGUNUSED(type))
4410 	dev_info_t *devi;
4411 	ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF;
4412 	int error;
4413 
4414 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4415 		return (DDI_PROP_NOT_FOUND);
4416 
4417 	error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp);
4418 	ddi_release_devi(devi);
4419 	return (error);
4420 }
4421 
4422 /*
4423  * e_ddi_getprop:	See comments for ddi_getprop.
4424  */
4425 int
4426 e_ddi_getprop(dev_t dev, vtype_t type, char *name, int flags, int defvalue)
4427 {
4428 	_NOTE(ARGUNUSED(type))
4429 	dev_info_t *devi;
4430 	ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF;
4431 	int	propvalue = defvalue;
4432 	int	proplength = sizeof (int);
4433 	int	error;
4434 
4435 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4436 		return (defvalue);
4437 
4438 	error = cdev_prop_op(dev, devi, prop_op,
4439 	    flags, name, (caddr_t)&propvalue, &proplength);
4440 	ddi_release_devi(devi);
4441 
4442 	if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
4443 		propvalue = 1;
4444 
4445 	return (propvalue);
4446 }
4447 
4448 /*
4449  * e_ddi_getprop_int64:
4450  *
4451  * This is a typed interfaces, but predates typed properties. With the
4452  * introduction of typed properties the framework tries to ensure
4453  * consistent use of typed interfaces. This is why TYPE_INT64 is not
4454  * part of TYPE_ANY.  E_ddi_getprop_int64 is a special case where a
4455  * typed interface invokes legacy (non-typed) interfaces:
4456  * cdev_prop_op(), prop_op(9E), ddi_prop_op(9F)).  In this case the
4457  * fact that TYPE_INT64 is not part of TYPE_ANY matters.  To support
4458  * this type of lookup as a single operation we invoke the legacy
4459  * non-typed interfaces with the special CONSUMER_TYPED bit set. The
4460  * framework ddi_prop_op(9F) implementation is expected to check for
4461  * CONSUMER_TYPED and, if set, expand type bits beyond TYPE_ANY
4462  * (currently TYPE_INT64).
4463  */
4464 int64_t
4465 e_ddi_getprop_int64(dev_t dev, vtype_t type, char *name,
4466     int flags, int64_t defvalue)
4467 {
4468 	_NOTE(ARGUNUSED(type))
4469 	dev_info_t	*devi;
4470 	ddi_prop_op_t	prop_op = PROP_LEN_AND_VAL_BUF;
4471 	int64_t		propvalue = defvalue;
4472 	int		proplength = sizeof (propvalue);
4473 	int		error;
4474 
4475 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4476 		return (defvalue);
4477 
4478 	error = cdev_prop_op(dev, devi, prop_op, flags |
4479 	    DDI_PROP_CONSUMER_TYPED, name, (caddr_t)&propvalue, &proplength);
4480 	ddi_release_devi(devi);
4481 
4482 	if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
4483 		propvalue = 1;
4484 
4485 	return (propvalue);
4486 }
4487 
4488 /*
4489  * e_ddi_getproplen:	See comments for ddi_getproplen.
4490  */
4491 int
4492 e_ddi_getproplen(dev_t dev, vtype_t type, char *name, int flags, int *lengthp)
4493 {
4494 	_NOTE(ARGUNUSED(type))
4495 	dev_info_t *devi;
4496 	ddi_prop_op_t prop_op = PROP_LEN;
4497 	int error;
4498 
4499 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4500 		return (DDI_PROP_NOT_FOUND);
4501 
4502 	error = cdev_prop_op(dev, devi, prop_op, flags, name, NULL, lengthp);
4503 	ddi_release_devi(devi);
4504 	return (error);
4505 }
4506 
4507 /*
4508  * Routines to get at elements of the dev_info structure
4509  */
4510 
4511 /*
4512  * ddi_binding_name: Return the driver binding name of the devinfo node
4513  *		This is the name the OS used to bind the node to a driver.
4514  */
4515 char *
4516 ddi_binding_name(dev_info_t *dip)
4517 {
4518 	return (DEVI(dip)->devi_binding_name);
4519 }
4520 
4521 /*
4522  * ddi_driver_major: Return the major number of the driver that
4523  *	the supplied devinfo is bound to.  If not yet bound,
4524  *	DDI_MAJOR_T_NONE.
4525  *
4526  * When used by the driver bound to 'devi', this
4527  * function will reliably return the driver major number.
4528  * Other ways of determining the driver major number, such as
4529  *	major = ddi_name_to_major(ddi_get_name(devi));
4530  *	major = ddi_name_to_major(ddi_binding_name(devi));
4531  * can return a different result as the driver/alias binding
4532  * can change dynamically, and thus should be avoided.
4533  */
4534 major_t
4535 ddi_driver_major(dev_info_t *devi)
4536 {
4537 	return (DEVI(devi)->devi_major);
4538 }
4539 
4540 /*
4541  * ddi_driver_name: Return the normalized driver name. this is the
4542  *		actual driver name
4543  */
4544 const char *
4545 ddi_driver_name(dev_info_t *devi)
4546 {
4547 	major_t major;
4548 
4549 	if ((major = ddi_driver_major(devi)) != DDI_MAJOR_T_NONE)
4550 		return (ddi_major_to_name(major));
4551 
4552 	return (ddi_node_name(devi));
4553 }
4554 
4555 /*
4556  * i_ddi_set_binding_name:	Set binding name.
4557  *
4558  *	Set the binding name to the given name.
4559  *	This routine is for use by the ddi implementation, not by drivers.
4560  */
4561 void
4562 i_ddi_set_binding_name(dev_info_t *dip, char *name)
4563 {
4564 	DEVI(dip)->devi_binding_name = name;
4565 
4566 }
4567 
4568 /*
4569  * ddi_get_name: A synonym of ddi_binding_name() ... returns a name
4570  * the implementation has used to bind the node to a driver.
4571  */
4572 char *
4573 ddi_get_name(dev_info_t *dip)
4574 {
4575 	return (DEVI(dip)->devi_binding_name);
4576 }
4577 
4578 /*
4579  * ddi_node_name: Return the name property of the devinfo node
4580  *		This may differ from ddi_binding_name if the node name
4581  *		does not define a binding to a driver (i.e. generic names).
4582  */
4583 char *
4584 ddi_node_name(dev_info_t *dip)
4585 {
4586 	return (DEVI(dip)->devi_node_name);
4587 }
4588 
4589 
4590 /*
4591  * ddi_get_nodeid:	Get nodeid stored in dev_info structure.
4592  */
4593 int
4594 ddi_get_nodeid(dev_info_t *dip)
4595 {
4596 	return (DEVI(dip)->devi_nodeid);
4597 }
4598 
4599 int
4600 ddi_get_instance(dev_info_t *dip)
4601 {
4602 	return (DEVI(dip)->devi_instance);
4603 }
4604 
4605 struct dev_ops *
4606 ddi_get_driver(dev_info_t *dip)
4607 {
4608 	return (DEVI(dip)->devi_ops);
4609 }
4610 
4611 void
4612 ddi_set_driver(dev_info_t *dip, struct dev_ops *devo)
4613 {
4614 	DEVI(dip)->devi_ops = devo;
4615 }
4616 
4617 /*
4618  * ddi_set_driver_private/ddi_get_driver_private:
4619  * Get/set device driver private data in devinfo.
4620  */
4621 void
4622 ddi_set_driver_private(dev_info_t *dip, void *data)
4623 {
4624 	DEVI(dip)->devi_driver_data = data;
4625 }
4626 
4627 void *
4628 ddi_get_driver_private(dev_info_t *dip)
4629 {
4630 	return (DEVI(dip)->devi_driver_data);
4631 }
4632 
4633 /*
4634  * ddi_get_parent, ddi_get_child, ddi_get_next_sibling
4635  */
4636 
4637 dev_info_t *
4638 ddi_get_parent(dev_info_t *dip)
4639 {
4640 	return ((dev_info_t *)DEVI(dip)->devi_parent);
4641 }
4642 
4643 dev_info_t *
4644 ddi_get_child(dev_info_t *dip)
4645 {
4646 	return ((dev_info_t *)DEVI(dip)->devi_child);
4647 }
4648 
4649 dev_info_t *
4650 ddi_get_next_sibling(dev_info_t *dip)
4651 {
4652 	return ((dev_info_t *)DEVI(dip)->devi_sibling);
4653 }
4654 
4655 dev_info_t *
4656 ddi_get_next(dev_info_t *dip)
4657 {
4658 	return ((dev_info_t *)DEVI(dip)->devi_next);
4659 }
4660 
4661 void
4662 ddi_set_next(dev_info_t *dip, dev_info_t *nextdip)
4663 {
4664 	DEVI(dip)->devi_next = DEVI(nextdip);
4665 }
4666 
4667 /*
4668  * ddi_root_node:		Return root node of devinfo tree
4669  */
4670 
4671 dev_info_t *
4672 ddi_root_node(void)
4673 {
4674 	extern dev_info_t *top_devinfo;
4675 
4676 	return (top_devinfo);
4677 }
4678 
4679 /*
4680  * Miscellaneous functions:
4681  */
4682 
4683 /*
4684  * Implementation specific hooks
4685  */
4686 
4687 void
4688 ddi_report_dev(dev_info_t *d)
4689 {
4690 	char *b;
4691 
4692 	(void) ddi_ctlops(d, d, DDI_CTLOPS_REPORTDEV, (void *)0, (void *)0);
4693 
4694 	/*
4695 	 * If this devinfo node has cb_ops, it's implicitly accessible from
4696 	 * userland, so we print its full name together with the instance
4697 	 * number 'abbreviation' that the driver may use internally.
4698 	 */
4699 	if (DEVI(d)->devi_ops->devo_cb_ops != (struct cb_ops *)0 &&
4700 	    (b = kmem_zalloc(MAXPATHLEN, KM_NOSLEEP))) {
4701 		cmn_err(CE_CONT, "?%s%d is %s\n",
4702 		    ddi_driver_name(d), ddi_get_instance(d),
4703 		    ddi_pathname(d, b));
4704 		kmem_free(b, MAXPATHLEN);
4705 	}
4706 }
4707 
4708 /*
4709  * ddi_ctlops() is described in the assembler not to buy a new register
4710  * window when it's called and can reduce cost in climbing the device tree
4711  * without using the tail call optimization.
4712  */
4713 int
4714 ddi_dev_regsize(dev_info_t *dev, uint_t rnumber, off_t *result)
4715 {
4716 	int ret;
4717 
4718 	ret = ddi_ctlops(dev, dev, DDI_CTLOPS_REGSIZE,
4719 	    (void *)&rnumber, (void *)result);
4720 
4721 	return (ret == DDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE);
4722 }
4723 
4724 int
4725 ddi_dev_nregs(dev_info_t *dev, int *result)
4726 {
4727 	return (ddi_ctlops(dev, dev, DDI_CTLOPS_NREGS, 0, (void *)result));
4728 }
4729 
4730 int
4731 ddi_dev_is_sid(dev_info_t *d)
4732 {
4733 	return (ddi_ctlops(d, d, DDI_CTLOPS_SIDDEV, (void *)0, (void *)0));
4734 }
4735 
4736 int
4737 ddi_slaveonly(dev_info_t *d)
4738 {
4739 	return (ddi_ctlops(d, d, DDI_CTLOPS_SLAVEONLY, (void *)0, (void *)0));
4740 }
4741 
4742 int
4743 ddi_dev_affinity(dev_info_t *a, dev_info_t *b)
4744 {
4745 	return (ddi_ctlops(a, a, DDI_CTLOPS_AFFINITY, (void *)b, (void *)0));
4746 }
4747 
4748 int
4749 ddi_streams_driver(dev_info_t *dip)
4750 {
4751 	if (i_ddi_devi_attached(dip) &&
4752 	    (DEVI(dip)->devi_ops->devo_cb_ops != NULL) &&
4753 	    (DEVI(dip)->devi_ops->devo_cb_ops->cb_str != NULL))
4754 		return (DDI_SUCCESS);
4755 	return (DDI_FAILURE);
4756 }
4757 
4758 /*
4759  * callback free list
4760  */
4761 
4762 static int ncallbacks;
4763 static int nc_low = 170;
4764 static int nc_med = 512;
4765 static int nc_high = 2048;
4766 static struct ddi_callback *callbackq;
4767 static struct ddi_callback *callbackqfree;
4768 
4769 /*
4770  * set/run callback lists
4771  */
4772 struct	cbstats	{
4773 	kstat_named_t	cb_asked;
4774 	kstat_named_t	cb_new;
4775 	kstat_named_t	cb_run;
4776 	kstat_named_t	cb_delete;
4777 	kstat_named_t	cb_maxreq;
4778 	kstat_named_t	cb_maxlist;
4779 	kstat_named_t	cb_alloc;
4780 	kstat_named_t	cb_runouts;
4781 	kstat_named_t	cb_L2;
4782 	kstat_named_t	cb_grow;
4783 } cbstats = {
4784 	{"asked",	KSTAT_DATA_UINT32},
4785 	{"new",		KSTAT_DATA_UINT32},
4786 	{"run",		KSTAT_DATA_UINT32},
4787 	{"delete",	KSTAT_DATA_UINT32},
4788 	{"maxreq",	KSTAT_DATA_UINT32},
4789 	{"maxlist",	KSTAT_DATA_UINT32},
4790 	{"alloc",	KSTAT_DATA_UINT32},
4791 	{"runouts",	KSTAT_DATA_UINT32},
4792 	{"L2",		KSTAT_DATA_UINT32},
4793 	{"grow",	KSTAT_DATA_UINT32},
4794 };
4795 
4796 #define	nc_asked	cb_asked.value.ui32
4797 #define	nc_new		cb_new.value.ui32
4798 #define	nc_run		cb_run.value.ui32
4799 #define	nc_delete	cb_delete.value.ui32
4800 #define	nc_maxreq	cb_maxreq.value.ui32
4801 #define	nc_maxlist	cb_maxlist.value.ui32
4802 #define	nc_alloc	cb_alloc.value.ui32
4803 #define	nc_runouts	cb_runouts.value.ui32
4804 #define	nc_L2		cb_L2.value.ui32
4805 #define	nc_grow		cb_grow.value.ui32
4806 
4807 static kmutex_t ddi_callback_mutex;
4808 
4809 /*
4810  * callbacks are handled using a L1/L2 cache. The L1 cache
4811  * comes out of kmem_cache_alloc and can expand/shrink dynamically. If
4812  * we can't get callbacks from the L1 cache [because pageout is doing
4813  * I/O at the time freemem is 0], we allocate callbacks out of the
4814  * L2 cache. The L2 cache is static and depends on the memory size.
4815  * [We might also count the number of devices at probe time and
4816  * allocate one structure per device and adjust for deferred attach]
4817  */
4818 void
4819 impl_ddi_callback_init(void)
4820 {
4821 	int	i;
4822 	uint_t	physmegs;
4823 	kstat_t	*ksp;
4824 
4825 	physmegs = physmem >> (20 - PAGESHIFT);
4826 	if (physmegs < 48) {
4827 		ncallbacks = nc_low;
4828 	} else if (physmegs < 128) {
4829 		ncallbacks = nc_med;
4830 	} else {
4831 		ncallbacks = nc_high;
4832 	}
4833 
4834 	/*
4835 	 * init free list
4836 	 */
4837 	callbackq = kmem_zalloc(
4838 	    ncallbacks * sizeof (struct ddi_callback), KM_SLEEP);
4839 	for (i = 0; i < ncallbacks-1; i++)
4840 		callbackq[i].c_nfree = &callbackq[i+1];
4841 	callbackqfree = callbackq;
4842 
4843 	/* init kstats */
4844 	if (ksp = kstat_create("unix", 0, "cbstats", "misc", KSTAT_TYPE_NAMED,
4845 	    sizeof (cbstats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL)) {
4846 		ksp->ks_data = (void *) &cbstats;
4847 		kstat_install(ksp);
4848 	}
4849 
4850 }
4851 
4852 static void
4853 callback_insert(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid,
4854     int count)
4855 {
4856 	struct ddi_callback *list, *marker, *new;
4857 	size_t size = sizeof (struct ddi_callback);
4858 
4859 	list = marker = (struct ddi_callback *)*listid;
4860 	while (list != NULL) {
4861 		if (list->c_call == funcp && list->c_arg == arg) {
4862 			list->c_count += count;
4863 			return;
4864 		}
4865 		marker = list;
4866 		list = list->c_nlist;
4867 	}
4868 	new = kmem_alloc(size, KM_NOSLEEP);
4869 	if (new == NULL) {
4870 		new = callbackqfree;
4871 		if (new == NULL) {
4872 			new = kmem_alloc_tryhard(sizeof (struct ddi_callback),
4873 			    &size, KM_NOSLEEP | KM_PANIC);
4874 			cbstats.nc_grow++;
4875 		} else {
4876 			callbackqfree = new->c_nfree;
4877 			cbstats.nc_L2++;
4878 		}
4879 	}
4880 	if (marker != NULL) {
4881 		marker->c_nlist = new;
4882 	} else {
4883 		*listid = (uintptr_t)new;
4884 	}
4885 	new->c_size = size;
4886 	new->c_nlist = NULL;
4887 	new->c_call = funcp;
4888 	new->c_arg = arg;
4889 	new->c_count = count;
4890 	cbstats.nc_new++;
4891 	cbstats.nc_alloc++;
4892 	if (cbstats.nc_alloc > cbstats.nc_maxlist)
4893 		cbstats.nc_maxlist = cbstats.nc_alloc;
4894 }
4895 
4896 void
4897 ddi_set_callback(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid)
4898 {
4899 	mutex_enter(&ddi_callback_mutex);
4900 	cbstats.nc_asked++;
4901 	if ((cbstats.nc_asked - cbstats.nc_run) > cbstats.nc_maxreq)
4902 		cbstats.nc_maxreq = (cbstats.nc_asked - cbstats.nc_run);
4903 	(void) callback_insert(funcp, arg, listid, 1);
4904 	mutex_exit(&ddi_callback_mutex);
4905 }
4906 
4907 static void
4908 real_callback_run(void *Queue)
4909 {
4910 	int (*funcp)(caddr_t);
4911 	caddr_t arg;
4912 	int count, rval;
4913 	uintptr_t *listid;
4914 	struct ddi_callback *list, *marker;
4915 	int check_pending = 1;
4916 	int pending = 0;
4917 
4918 	do {
4919 		mutex_enter(&ddi_callback_mutex);
4920 		listid = Queue;
4921 		list = (struct ddi_callback *)*listid;
4922 		if (list == NULL) {
4923 			mutex_exit(&ddi_callback_mutex);
4924 			return;
4925 		}
4926 		if (check_pending) {
4927 			marker = list;
4928 			while (marker != NULL) {
4929 				pending += marker->c_count;
4930 				marker = marker->c_nlist;
4931 			}
4932 			check_pending = 0;
4933 		}
4934 		ASSERT(pending > 0);
4935 		ASSERT(list->c_count > 0);
4936 		funcp = list->c_call;
4937 		arg = list->c_arg;
4938 		count = list->c_count;
4939 		*(uintptr_t *)Queue = (uintptr_t)list->c_nlist;
4940 		if (list >= &callbackq[0] &&
4941 		    list <= &callbackq[ncallbacks-1]) {
4942 			list->c_nfree = callbackqfree;
4943 			callbackqfree = list;
4944 		} else
4945 			kmem_free(list, list->c_size);
4946 
4947 		cbstats.nc_delete++;
4948 		cbstats.nc_alloc--;
4949 		mutex_exit(&ddi_callback_mutex);
4950 
4951 		do {
4952 			if ((rval = (*funcp)(arg)) == 0) {
4953 				pending -= count;
4954 				mutex_enter(&ddi_callback_mutex);
4955 				(void) callback_insert(funcp, arg, listid,
4956 				    count);
4957 				cbstats.nc_runouts++;
4958 			} else {
4959 				pending--;
4960 				mutex_enter(&ddi_callback_mutex);
4961 				cbstats.nc_run++;
4962 			}
4963 			mutex_exit(&ddi_callback_mutex);
4964 		} while (rval != 0 && (--count > 0));
4965 	} while (pending > 0);
4966 }
4967 
4968 void
4969 ddi_run_callback(uintptr_t *listid)
4970 {
4971 	softcall(real_callback_run, listid);
4972 }
4973 
4974 /*
4975  * ddi_periodic_t
4976  * ddi_periodic_add(void (*func)(void *), void *arg, hrtime_t interval,
4977  *     int level)
4978  *
4979  * INTERFACE LEVEL
4980  *      Solaris DDI specific (Solaris DDI)
4981  *
4982  * PARAMETERS
4983  *      func: the callback function
4984  *
4985  *            The callback function will be invoked. The function is invoked
4986  *            in kernel context if the argument level passed is the zero.
4987  *            Otherwise it's invoked in interrupt context at the specified
4988  *            level.
4989  *
4990  *       arg: the argument passed to the callback function
4991  *
4992  *  interval: interval time
4993  *
4994  *    level : callback interrupt level
4995  *
4996  *            If the value is the zero, the callback function is invoked
4997  *            in kernel context. If the value is more than the zero, but
4998  *            less than or equal to ten, the callback function is invoked in
4999  *            interrupt context at the specified interrupt level, which may
5000  *            be used for real time applications.
5001  *
5002  *            This value must be in range of 0-10, which can be a numeric
5003  *            number or a pre-defined macro (DDI_IPL_0, ... , DDI_IPL_10).
5004  *
5005  * DESCRIPTION
5006  *      ddi_periodic_add(9F) schedules the specified function to be
5007  *      periodically invoked in the interval time.
5008  *
5009  *      As well as timeout(9F), the exact time interval over which the function
5010  *      takes effect cannot be guaranteed, but the value given is a close
5011  *      approximation.
5012  *
5013  *      Drivers waiting on behalf of processes with real-time constraints must
5014  *      pass non-zero value with the level argument to ddi_periodic_add(9F).
5015  *
5016  * RETURN VALUES
5017  *      ddi_periodic_add(9F) returns a non-zero opaque value (ddi_periodic_t),
5018  *      which must be used for ddi_periodic_delete(9F) to specify the request.
5019  *
5020  * CONTEXT
5021  *      ddi_periodic_add(9F) can be called in user or kernel context, but
5022  *      it cannot be called in interrupt context, which is different from
5023  *      timeout(9F).
5024  */
5025 ddi_periodic_t
5026 ddi_periodic_add(void (*func)(void *), void *arg, hrtime_t interval, int level)
5027 {
5028 	/*
5029 	 * Sanity check of the argument level.
5030 	 */
5031 	if (level < DDI_IPL_0 || level > DDI_IPL_10)
5032 		cmn_err(CE_PANIC,
5033 		    "ddi_periodic_add: invalid interrupt level (%d).", level);
5034 
5035 	/*
5036 	 * Sanity check of the context. ddi_periodic_add() cannot be
5037 	 * called in either interrupt context or high interrupt context.
5038 	 */
5039 	if (servicing_interrupt())
5040 		cmn_err(CE_PANIC,
5041 		    "ddi_periodic_add: called in (high) interrupt context.");
5042 
5043 	return ((ddi_periodic_t)i_timeout(func, arg, interval, level));
5044 }
5045 
5046 /*
5047  * void
5048  * ddi_periodic_delete(ddi_periodic_t req)
5049  *
5050  * INTERFACE LEVEL
5051  *     Solaris DDI specific (Solaris DDI)
5052  *
5053  * PARAMETERS
5054  *     req: ddi_periodic_t opaque value ddi_periodic_add(9F) returned
5055  *     previously.
5056  *
5057  * DESCRIPTION
5058  *     ddi_periodic_delete(9F) cancels the ddi_periodic_add(9F) request
5059  *     previously requested.
5060  *
5061  *     ddi_periodic_delete(9F) will not return until the pending request
5062  *     is canceled or executed.
5063  *
5064  *     As well as untimeout(9F), calling ddi_periodic_delete(9F) for a
5065  *     timeout which is either running on another CPU, or has already
5066  *     completed causes no problems. However, unlike untimeout(9F), there is
5067  *     no restrictions on the lock which might be held across the call to
5068  *     ddi_periodic_delete(9F).
5069  *
5070  *     Drivers should be structured with the understanding that the arrival of
5071  *     both an interrupt and a timeout for that interrupt can occasionally
5072  *     occur, in either order.
5073  *
5074  * CONTEXT
5075  *     ddi_periodic_delete(9F) can be called in user or kernel context, but
5076  *     it cannot be called in interrupt context, which is different from
5077  *     untimeout(9F).
5078  */
5079 void
5080 ddi_periodic_delete(ddi_periodic_t req)
5081 {
5082 	/*
5083 	 * Sanity check of the context. ddi_periodic_delete() cannot be
5084 	 * called in either interrupt context or high interrupt context.
5085 	 */
5086 	if (servicing_interrupt())
5087 		cmn_err(CE_PANIC,
5088 		    "ddi_periodic_delete: called in (high) interrupt context.");
5089 
5090 	i_untimeout((timeout_t)req);
5091 }
5092 
5093 dev_info_t *
5094 nodevinfo(dev_t dev, int otyp)
5095 {
5096 	_NOTE(ARGUNUSED(dev, otyp))
5097 	return ((dev_info_t *)0);
5098 }
5099 
5100 /*
5101  * A driver should support its own getinfo(9E) entry point. This function
5102  * is provided as a convenience for ON drivers that don't expect their
5103  * getinfo(9E) entry point to be called. A driver that uses this must not
5104  * call ddi_create_minor_node.
5105  */
5106 int
5107 ddi_no_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
5108 {
5109 	_NOTE(ARGUNUSED(dip, infocmd, arg, result))
5110 	return (DDI_FAILURE);
5111 }
5112 
5113 /*
5114  * A driver should support its own getinfo(9E) entry point. This function
5115  * is provided as a convenience for ON drivers that where the minor number
5116  * is the instance. Drivers that do not have 1:1 mapping must implement
5117  * their own getinfo(9E) function.
5118  */
5119 int
5120 ddi_getinfo_1to1(dev_info_t *dip, ddi_info_cmd_t infocmd,
5121     void *arg, void **result)
5122 {
5123 	_NOTE(ARGUNUSED(dip))
5124 	int	instance;
5125 
5126 	if (infocmd != DDI_INFO_DEVT2INSTANCE)
5127 		return (DDI_FAILURE);
5128 
5129 	instance = getminor((dev_t)(uintptr_t)arg);
5130 	*result = (void *)(uintptr_t)instance;
5131 	return (DDI_SUCCESS);
5132 }
5133 
5134 int
5135 ddifail(dev_info_t *devi, ddi_attach_cmd_t cmd)
5136 {
5137 	_NOTE(ARGUNUSED(devi, cmd))
5138 	return (DDI_FAILURE);
5139 }
5140 
5141 int
5142 ddi_no_dma_map(dev_info_t *dip, dev_info_t *rdip,
5143     struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep)
5144 {
5145 	_NOTE(ARGUNUSED(dip, rdip, dmareqp, handlep))
5146 	return (DDI_DMA_NOMAPPING);
5147 }
5148 
5149 int
5150 ddi_no_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
5151     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
5152 {
5153 	_NOTE(ARGUNUSED(dip, rdip, attr, waitfp, arg, handlep))
5154 	return (DDI_DMA_BADATTR);
5155 }
5156 
5157 int
5158 ddi_no_dma_freehdl(dev_info_t *dip, dev_info_t *rdip,
5159     ddi_dma_handle_t handle)
5160 {
5161 	_NOTE(ARGUNUSED(dip, rdip, handle))
5162 	return (DDI_FAILURE);
5163 }
5164 
5165 int
5166 ddi_no_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
5167     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
5168     ddi_dma_cookie_t *cp, uint_t *ccountp)
5169 {
5170 	_NOTE(ARGUNUSED(dip, rdip, handle, dmareq, cp, ccountp))
5171 	return (DDI_DMA_NOMAPPING);
5172 }
5173 
5174 int
5175 ddi_no_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
5176     ddi_dma_handle_t handle)
5177 {
5178 	_NOTE(ARGUNUSED(dip, rdip, handle))
5179 	return (DDI_FAILURE);
5180 }
5181 
5182 int
5183 ddi_no_dma_flush(dev_info_t *dip, dev_info_t *rdip,
5184     ddi_dma_handle_t handle, off_t off, size_t len,
5185     uint_t cache_flags)
5186 {
5187 	_NOTE(ARGUNUSED(dip, rdip, handle, off, len, cache_flags))
5188 	return (DDI_FAILURE);
5189 }
5190 
5191 int
5192 ddi_no_dma_win(dev_info_t *dip, dev_info_t *rdip,
5193     ddi_dma_handle_t handle, uint_t win, off_t *offp,
5194     size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
5195 {
5196 	_NOTE(ARGUNUSED(dip, rdip, handle, win, offp, lenp, cookiep, ccountp))
5197 	return (DDI_FAILURE);
5198 }
5199 
5200 int
5201 ddi_no_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
5202     ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
5203     off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
5204 {
5205 	_NOTE(ARGUNUSED(dip, rdip, handle, request, offp, lenp, objp, flags))
5206 	return (DDI_FAILURE);
5207 }
5208 
5209 void
5210 ddivoid(void)
5211 {}
5212 
5213 int
5214 nochpoll(dev_t dev, short events, int anyyet, short *reventsp,
5215     struct pollhead **pollhdrp)
5216 {
5217 	_NOTE(ARGUNUSED(dev, events, anyyet, reventsp, pollhdrp))
5218 	return (ENXIO);
5219 }
5220 
5221 cred_t *
5222 ddi_get_cred(void)
5223 {
5224 	return (CRED());
5225 }
5226 
5227 clock_t
5228 ddi_get_lbolt(void)
5229 {
5230 	return ((clock_t)lbolt_hybrid());
5231 }
5232 
5233 int64_t
5234 ddi_get_lbolt64(void)
5235 {
5236 	return (lbolt_hybrid());
5237 }
5238 
5239 time_t
5240 ddi_get_time(void)
5241 {
5242 	time_t	now;
5243 
5244 	if ((now = gethrestime_sec()) == 0) {
5245 		timestruc_t ts;
5246 		mutex_enter(&tod_lock);
5247 		ts = tod_get();
5248 		mutex_exit(&tod_lock);
5249 		return (ts.tv_sec);
5250 	} else {
5251 		return (now);
5252 	}
5253 }
5254 
5255 pid_t
5256 ddi_get_pid(void)
5257 {
5258 	return (ttoproc(curthread)->p_pid);
5259 }
5260 
5261 kt_did_t
5262 ddi_get_kt_did(void)
5263 {
5264 	return (curthread->t_did);
5265 }
5266 
5267 /*
5268  * This function returns B_TRUE if the caller can reasonably expect that a call
5269  * to cv_wait_sig(9F), cv_timedwait_sig(9F), or qwait_sig(9F) could be awakened
5270  * by user-level signal.  If it returns B_FALSE, then the caller should use
5271  * other means to make certain that the wait will not hang "forever."
5272  *
5273  * It does not check the signal mask, nor for reception of any particular
5274  * signal.
5275  *
5276  * Currently, a thread can receive a signal if it's not a kernel thread and it
5277  * is not in the middle of exit(2) tear-down.  Threads that are in that
5278  * tear-down effectively convert cv_wait_sig to cv_wait, cv_timedwait_sig to
5279  * cv_timedwait, and qwait_sig to qwait.
5280  */
5281 boolean_t
5282 ddi_can_receive_sig(void)
5283 {
5284 	proc_t *pp;
5285 
5286 	if (curthread->t_proc_flag & TP_LWPEXIT)
5287 		return (B_FALSE);
5288 	if ((pp = ttoproc(curthread)) == NULL)
5289 		return (B_FALSE);
5290 	return (pp->p_as != &kas);
5291 }
5292 
5293 /*
5294  * Swap bytes in 16-bit [half-]words
5295  */
5296 void
5297 swab(void *src, void *dst, size_t nbytes)
5298 {
5299 	uchar_t *pf = (uchar_t *)src;
5300 	uchar_t *pt = (uchar_t *)dst;
5301 	uchar_t tmp;
5302 	int nshorts;
5303 
5304 	nshorts = nbytes >> 1;
5305 
5306 	while (--nshorts >= 0) {
5307 		tmp = *pf++;
5308 		*pt++ = *pf++;
5309 		*pt++ = tmp;
5310 	}
5311 }
5312 
5313 static void
5314 ddi_append_minor_node(dev_info_t *ddip, struct ddi_minor_data *dmdp)
5315 {
5316 	int			circ;
5317 	struct ddi_minor_data	*dp;
5318 
5319 	ndi_devi_enter(ddip, &circ);
5320 	if ((dp = DEVI(ddip)->devi_minor) == (struct ddi_minor_data *)NULL) {
5321 		DEVI(ddip)->devi_minor = dmdp;
5322 	} else {
5323 		while (dp->next != (struct ddi_minor_data *)NULL)
5324 			dp = dp->next;
5325 		dp->next = dmdp;
5326 	}
5327 	ndi_devi_exit(ddip, circ);
5328 }
5329 
5330 static int
5331 i_log_devfs_minor_create(dev_info_t *dip, char *minor_name)
5332 {
5333 	int se_flag;
5334 	int kmem_flag;
5335 	int se_err;
5336 	char *pathname, *class_name;
5337 	sysevent_t *ev = NULL;
5338 	sysevent_id_t eid;
5339 	sysevent_value_t se_val;
5340 	sysevent_attr_list_t *ev_attr_list = NULL;
5341 
5342 	/* determine interrupt context */
5343 	se_flag = (servicing_interrupt()) ? SE_NOSLEEP : SE_SLEEP;
5344 	kmem_flag = (se_flag == SE_SLEEP) ? KM_SLEEP : KM_NOSLEEP;
5345 
5346 	i_ddi_di_cache_invalidate();
5347 
5348 #ifdef DEBUG
5349 	if ((se_flag == SE_NOSLEEP) && sunddi_debug) {
5350 		cmn_err(CE_CONT, "ddi_create_minor_node: called from "
5351 		    "interrupt level by driver %s",
5352 		    ddi_driver_name(dip));
5353 	}
5354 #endif /* DEBUG */
5355 
5356 	ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_CREATE, EP_DDI, se_flag);
5357 	if (ev == NULL) {
5358 		goto fail;
5359 	}
5360 
5361 	pathname = kmem_alloc(MAXPATHLEN, kmem_flag);
5362 	if (pathname == NULL) {
5363 		sysevent_free(ev);
5364 		goto fail;
5365 	}
5366 
5367 	(void) ddi_pathname(dip, pathname);
5368 	ASSERT(strlen(pathname));
5369 	se_val.value_type = SE_DATA_TYPE_STRING;
5370 	se_val.value.sv_string = pathname;
5371 	if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME,
5372 	    &se_val, se_flag) != 0) {
5373 		kmem_free(pathname, MAXPATHLEN);
5374 		sysevent_free(ev);
5375 		goto fail;
5376 	}
5377 	kmem_free(pathname, MAXPATHLEN);
5378 
5379 	/* add the device class attribute */
5380 	if ((class_name = i_ddi_devi_class(dip)) != NULL) {
5381 		se_val.value_type = SE_DATA_TYPE_STRING;
5382 		se_val.value.sv_string = class_name;
5383 		if (sysevent_add_attr(&ev_attr_list,
5384 		    DEVFS_DEVI_CLASS, &se_val, SE_SLEEP) != 0) {
5385 			sysevent_free_attr(ev_attr_list);
5386 			goto fail;
5387 		}
5388 	}
5389 
5390 	/*
5391 	 * allow for NULL minor names
5392 	 */
5393 	if (minor_name != NULL) {
5394 		se_val.value.sv_string = minor_name;
5395 		if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME,
5396 		    &se_val, se_flag) != 0) {
5397 			sysevent_free_attr(ev_attr_list);
5398 			sysevent_free(ev);
5399 			goto fail;
5400 		}
5401 	}
5402 
5403 	if (sysevent_attach_attributes(ev, ev_attr_list) != 0) {
5404 		sysevent_free_attr(ev_attr_list);
5405 		sysevent_free(ev);
5406 		goto fail;
5407 	}
5408 
5409 	if ((se_err = log_sysevent(ev, se_flag, &eid)) != 0) {
5410 		if (se_err == SE_NO_TRANSPORT) {
5411 			cmn_err(CE_WARN, "/devices or /dev may not be current "
5412 			    "for driver %s (%s). Run devfsadm -i %s",
5413 			    ddi_driver_name(dip), "syseventd not responding",
5414 			    ddi_driver_name(dip));
5415 		} else {
5416 			sysevent_free(ev);
5417 			goto fail;
5418 		}
5419 	}
5420 
5421 	sysevent_free(ev);
5422 	return (DDI_SUCCESS);
5423 fail:
5424 	cmn_err(CE_WARN, "/devices or /dev may not be current "
5425 	    "for driver %s. Run devfsadm -i %s",
5426 	    ddi_driver_name(dip), ddi_driver_name(dip));
5427 	return (DDI_SUCCESS);
5428 }
5429 
5430 /*
5431  * failing to remove a minor node is not of interest
5432  * therefore we do not generate an error message
5433  */
5434 static int
5435 i_log_devfs_minor_remove(dev_info_t *dip, char *minor_name)
5436 {
5437 	char *pathname, *class_name;
5438 	sysevent_t *ev;
5439 	sysevent_id_t eid;
5440 	sysevent_value_t se_val;
5441 	sysevent_attr_list_t *ev_attr_list = NULL;
5442 
5443 	/*
5444 	 * only log ddi_remove_minor_node() calls outside the scope
5445 	 * of attach/detach reconfigurations and when the dip is
5446 	 * still initialized.
5447 	 */
5448 	if (DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip) ||
5449 	    (i_ddi_node_state(dip) < DS_INITIALIZED)) {
5450 		return (DDI_SUCCESS);
5451 	}
5452 
5453 	i_ddi_di_cache_invalidate();
5454 
5455 	ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_REMOVE, EP_DDI, SE_SLEEP);
5456 	if (ev == NULL) {
5457 		return (DDI_SUCCESS);
5458 	}
5459 
5460 	pathname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
5461 	if (pathname == NULL) {
5462 		sysevent_free(ev);
5463 		return (DDI_SUCCESS);
5464 	}
5465 
5466 	(void) ddi_pathname(dip, pathname);
5467 	ASSERT(strlen(pathname));
5468 	se_val.value_type = SE_DATA_TYPE_STRING;
5469 	se_val.value.sv_string = pathname;
5470 	if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME,
5471 	    &se_val, SE_SLEEP) != 0) {
5472 		kmem_free(pathname, MAXPATHLEN);
5473 		sysevent_free(ev);
5474 		return (DDI_SUCCESS);
5475 	}
5476 
5477 	kmem_free(pathname, MAXPATHLEN);
5478 
5479 	/*
5480 	 * allow for NULL minor names
5481 	 */
5482 	if (minor_name != NULL) {
5483 		se_val.value.sv_string = minor_name;
5484 		if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME,
5485 		    &se_val, SE_SLEEP) != 0) {
5486 			sysevent_free_attr(ev_attr_list);
5487 			goto fail;
5488 		}
5489 	}
5490 
5491 	if ((class_name = i_ddi_devi_class(dip)) != NULL) {
5492 		/* add the device class, driver name and instance attributes */
5493 
5494 		se_val.value_type = SE_DATA_TYPE_STRING;
5495 		se_val.value.sv_string = class_name;
5496 		if (sysevent_add_attr(&ev_attr_list,
5497 		    DEVFS_DEVI_CLASS, &se_val, SE_SLEEP) != 0) {
5498 			sysevent_free_attr(ev_attr_list);
5499 			goto fail;
5500 		}
5501 
5502 		se_val.value_type = SE_DATA_TYPE_STRING;
5503 		se_val.value.sv_string = (char *)ddi_driver_name(dip);
5504 		if (sysevent_add_attr(&ev_attr_list,
5505 		    DEVFS_DRIVER_NAME, &se_val, SE_SLEEP) != 0) {
5506 			sysevent_free_attr(ev_attr_list);
5507 			goto fail;
5508 		}
5509 
5510 		se_val.value_type = SE_DATA_TYPE_INT32;
5511 		se_val.value.sv_int32 = ddi_get_instance(dip);
5512 		if (sysevent_add_attr(&ev_attr_list,
5513 		    DEVFS_INSTANCE, &se_val, SE_SLEEP) != 0) {
5514 			sysevent_free_attr(ev_attr_list);
5515 			goto fail;
5516 		}
5517 
5518 	}
5519 
5520 	if (sysevent_attach_attributes(ev, ev_attr_list) != 0) {
5521 		sysevent_free_attr(ev_attr_list);
5522 	} else {
5523 		(void) log_sysevent(ev, SE_SLEEP, &eid);
5524 	}
5525 fail:
5526 	sysevent_free(ev);
5527 	return (DDI_SUCCESS);
5528 }
5529 
5530 /*
5531  * Derive the device class of the node.
5532  * Device class names aren't defined yet. Until this is done we use
5533  * devfs event subclass names as device class names.
5534  */
5535 static int
5536 derive_devi_class(dev_info_t *dip, const char *node_type, int flag)
5537 {
5538 	int rv = DDI_SUCCESS;
5539 
5540 	if (i_ddi_devi_class(dip) == NULL) {
5541 		if (strncmp(node_type, DDI_NT_BLOCK,
5542 		    sizeof (DDI_NT_BLOCK) - 1) == 0 &&
5543 		    (node_type[sizeof (DDI_NT_BLOCK) - 1] == '\0' ||
5544 		    node_type[sizeof (DDI_NT_BLOCK) - 1] == ':') &&
5545 		    strcmp(node_type, DDI_NT_FD) != 0) {
5546 
5547 			rv = i_ddi_set_devi_class(dip, ESC_DISK, flag);
5548 
5549 		} else if (strncmp(node_type, DDI_NT_NET,
5550 		    sizeof (DDI_NT_NET) - 1) == 0 &&
5551 		    (node_type[sizeof (DDI_NT_NET) - 1] == '\0' ||
5552 		    node_type[sizeof (DDI_NT_NET) - 1] == ':')) {
5553 
5554 			rv = i_ddi_set_devi_class(dip, ESC_NETWORK, flag);
5555 
5556 		} else if (strncmp(node_type, DDI_NT_PRINTER,
5557 		    sizeof (DDI_NT_PRINTER) - 1) == 0 &&
5558 		    (node_type[sizeof (DDI_NT_PRINTER) - 1] == '\0' ||
5559 		    node_type[sizeof (DDI_NT_PRINTER) - 1] == ':')) {
5560 
5561 			rv = i_ddi_set_devi_class(dip, ESC_PRINTER, flag);
5562 
5563 		} else if (strncmp(node_type, DDI_PSEUDO,
5564 		    sizeof (DDI_PSEUDO) -1) == 0 &&
5565 		    (strncmp(ESC_LOFI, ddi_node_name(dip),
5566 		    sizeof (ESC_LOFI) -1) == 0)) {
5567 			rv = i_ddi_set_devi_class(dip, ESC_LOFI, flag);
5568 		}
5569 	}
5570 
5571 	return (rv);
5572 }
5573 
5574 /*
5575  * Check compliance with PSARC 2003/375:
5576  *
5577  * The name must contain only characters a-z, A-Z, 0-9 or _ and it must not
5578  * exceed IFNAMSIZ (16) characters in length.
5579  */
5580 static boolean_t
5581 verify_name(const char *name)
5582 {
5583 	size_t len = strlen(name);
5584 	const char *cp;
5585 
5586 	if (len == 0 || len > IFNAMSIZ)
5587 		return (B_FALSE);
5588 
5589 	for (cp = name; *cp != '\0'; cp++) {
5590 		if (!isalnum(*cp) && *cp != '_')
5591 			return (B_FALSE);
5592 	}
5593 
5594 	return (B_TRUE);
5595 }
5596 
5597 /*
5598  * ddi_create_minor_common:	Create a  ddi_minor_data structure and
5599  *				attach it to the given devinfo node.
5600  */
5601 
5602 static int
5603 ddi_create_minor_common(dev_info_t *dip, const char *name, int spec_type,
5604     minor_t minor_num, const char *node_type, int flag, ddi_minor_type mtype,
5605     const char *read_priv, const char *write_priv, mode_t priv_mode)
5606 {
5607 	struct ddi_minor_data *dmdp;
5608 	major_t major;
5609 
5610 	if (spec_type != S_IFCHR && spec_type != S_IFBLK)
5611 		return (DDI_FAILURE);
5612 
5613 	if (name == NULL)
5614 		return (DDI_FAILURE);
5615 
5616 	/*
5617 	 * Log a message if the minor number the driver is creating
5618 	 * is not expressible on the on-disk filesystem (currently
5619 	 * this is limited to 18 bits both by UFS). The device can
5620 	 * be opened via devfs, but not by device special files created
5621 	 * via mknod().
5622 	 */
5623 	if (minor_num > L_MAXMIN32) {
5624 		cmn_err(CE_WARN,
5625 		    "%s%d:%s minor 0x%x too big for 32-bit applications",
5626 		    ddi_driver_name(dip), ddi_get_instance(dip),
5627 		    name, minor_num);
5628 		return (DDI_FAILURE);
5629 	}
5630 
5631 	/* dip must be bound and attached */
5632 	major = ddi_driver_major(dip);
5633 	ASSERT(major != DDI_MAJOR_T_NONE);
5634 
5635 	/*
5636 	 * Default node_type to DDI_PSEUDO and issue notice in debug mode
5637 	 */
5638 	if (node_type == NULL) {
5639 		node_type = DDI_PSEUDO;
5640 		NDI_CONFIG_DEBUG((CE_NOTE, "!illegal node_type NULL for %s%d "
5641 		    " minor node %s; default to DDI_PSEUDO",
5642 		    ddi_driver_name(dip), ddi_get_instance(dip), name));
5643 	}
5644 
5645 	/*
5646 	 * If the driver is a network driver, ensure that the name falls within
5647 	 * the interface naming constraints specified by PSARC/2003/375.
5648 	 */
5649 	if (strcmp(node_type, DDI_NT_NET) == 0) {
5650 		if (!verify_name(name))
5651 			return (DDI_FAILURE);
5652 
5653 		if (mtype == DDM_MINOR) {
5654 			struct devnames *dnp = &devnamesp[major];
5655 
5656 			/* Mark driver as a network driver */
5657 			LOCK_DEV_OPS(&dnp->dn_lock);
5658 			dnp->dn_flags |= DN_NETWORK_DRIVER;
5659 
5660 			/*
5661 			 * If this minor node is created during the device
5662 			 * attachment, this is a physical network device.
5663 			 * Mark the driver as a physical network driver.
5664 			 */
5665 			if (DEVI_IS_ATTACHING(dip))
5666 				dnp->dn_flags |= DN_NETWORK_PHYSDRIVER;
5667 			UNLOCK_DEV_OPS(&dnp->dn_lock);
5668 		}
5669 	}
5670 
5671 	if (mtype == DDM_MINOR) {
5672 		if (derive_devi_class(dip,  node_type, KM_NOSLEEP) !=
5673 		    DDI_SUCCESS)
5674 			return (DDI_FAILURE);
5675 	}
5676 
5677 	/*
5678 	 * Take care of minor number information for the node.
5679 	 */
5680 
5681 	if ((dmdp = kmem_zalloc(sizeof (struct ddi_minor_data),
5682 	    KM_NOSLEEP)) == NULL) {
5683 		return (DDI_FAILURE);
5684 	}
5685 	if ((dmdp->ddm_name = i_ddi_strdup(name, KM_NOSLEEP)) == NULL) {
5686 		kmem_free(dmdp, sizeof (struct ddi_minor_data));
5687 		return (DDI_FAILURE);
5688 	}
5689 	dmdp->dip = dip;
5690 	dmdp->ddm_dev = makedevice(major, minor_num);
5691 	dmdp->ddm_spec_type = spec_type;
5692 	dmdp->ddm_node_type = node_type;
5693 	dmdp->type = mtype;
5694 	if (flag & CLONE_DEV) {
5695 		dmdp->type = DDM_ALIAS;
5696 		dmdp->ddm_dev = makedevice(ddi_driver_major(clone_dip), major);
5697 	}
5698 	if (flag & PRIVONLY_DEV) {
5699 		dmdp->ddm_flags |= DM_NO_FSPERM;
5700 	}
5701 	if (read_priv || write_priv) {
5702 		dmdp->ddm_node_priv =
5703 		    devpolicy_priv_by_name(read_priv, write_priv);
5704 	}
5705 	dmdp->ddm_priv_mode = priv_mode;
5706 
5707 	ddi_append_minor_node(dip, dmdp);
5708 
5709 	/*
5710 	 * only log ddi_create_minor_node() calls which occur
5711 	 * outside the scope of attach(9e)/detach(9e) reconfigurations
5712 	 */
5713 	if (!(DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip)) &&
5714 	    mtype != DDM_INTERNAL_PATH) {
5715 		(void) i_log_devfs_minor_create(dip, dmdp->ddm_name);
5716 	}
5717 
5718 	/*
5719 	 * Check if any dacf rules match the creation of this minor node
5720 	 */
5721 	dacfc_match_create_minor(name, node_type, dip, dmdp, flag);
5722 	return (DDI_SUCCESS);
5723 }
5724 
5725 int
5726 ddi_create_minor_node(dev_info_t *dip, const char *name, int spec_type,
5727     minor_t minor_num, const char *node_type, int flag)
5728 {
5729 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5730 	    node_type, flag, DDM_MINOR, NULL, NULL, 0));
5731 }
5732 
5733 int
5734 ddi_create_priv_minor_node(dev_info_t *dip, const char *name, int spec_type,
5735     minor_t minor_num, const char *node_type, int flag,
5736     const char *rdpriv, const char *wrpriv, mode_t priv_mode)
5737 {
5738 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5739 	    node_type, flag, DDM_MINOR, rdpriv, wrpriv, priv_mode));
5740 }
5741 
5742 int
5743 ddi_create_default_minor_node(dev_info_t *dip, const char *name, int spec_type,
5744     minor_t minor_num, const char *node_type, int flag)
5745 {
5746 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5747 	    node_type, flag, DDM_DEFAULT, NULL, NULL, 0));
5748 }
5749 
5750 /*
5751  * Internal (non-ddi) routine for drivers to export names known
5752  * to the kernel (especially ddi_pathname_to_dev_t and friends)
5753  * but not exported externally to /dev
5754  */
5755 int
5756 ddi_create_internal_pathname(dev_info_t *dip, char *name, int spec_type,
5757     minor_t minor_num)
5758 {
5759 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5760 	    "internal", 0, DDM_INTERNAL_PATH, NULL, NULL, 0));
5761 }
5762 
5763 void
5764 ddi_remove_minor_node(dev_info_t *dip, const char *name)
5765 {
5766 	int			circ;
5767 	struct ddi_minor_data	*dmdp, *dmdp1;
5768 	struct ddi_minor_data	**dmdp_prev;
5769 
5770 	ndi_devi_enter(dip, &circ);
5771 	dmdp_prev = &DEVI(dip)->devi_minor;
5772 	dmdp = DEVI(dip)->devi_minor;
5773 	while (dmdp != NULL) {
5774 		dmdp1 = dmdp->next;
5775 		if ((name == NULL || (dmdp->ddm_name != NULL &&
5776 		    strcmp(name, dmdp->ddm_name) == 0))) {
5777 			if (dmdp->ddm_name != NULL) {
5778 				if (dmdp->type != DDM_INTERNAL_PATH)
5779 					(void) i_log_devfs_minor_remove(dip,
5780 					    dmdp->ddm_name);
5781 				kmem_free(dmdp->ddm_name,
5782 				    strlen(dmdp->ddm_name) + 1);
5783 			}
5784 			/*
5785 			 * Release device privilege, if any.
5786 			 * Release dacf client data associated with this minor
5787 			 * node by storing NULL.
5788 			 */
5789 			if (dmdp->ddm_node_priv)
5790 				dpfree(dmdp->ddm_node_priv);
5791 			dacf_store_info((dacf_infohdl_t)dmdp, NULL);
5792 			kmem_free(dmdp, sizeof (struct ddi_minor_data));
5793 			*dmdp_prev = dmdp1;
5794 			/*
5795 			 * OK, we found it, so get out now -- if we drive on,
5796 			 * we will strcmp against garbage.  See 1139209.
5797 			 */
5798 			if (name != NULL)
5799 				break;
5800 		} else {
5801 			dmdp_prev = &dmdp->next;
5802 		}
5803 		dmdp = dmdp1;
5804 	}
5805 	ndi_devi_exit(dip, circ);
5806 }
5807 
5808 
5809 int
5810 ddi_in_panic()
5811 {
5812 	return (panicstr != NULL);
5813 }
5814 
5815 
5816 /*
5817  * Find first bit set in a mask (returned counting from 1 up)
5818  */
5819 
5820 int
5821 ddi_ffs(long mask)
5822 {
5823 	return (ffs(mask));
5824 }
5825 
5826 /*
5827  * Find last bit set. Take mask and clear
5828  * all but the most significant bit, and
5829  * then let ffs do the rest of the work.
5830  *
5831  * Algorithm courtesy of Steve Chessin.
5832  */
5833 
5834 int
5835 ddi_fls(long mask)
5836 {
5837 	while (mask) {
5838 		long nx;
5839 
5840 		if ((nx = (mask & (mask - 1))) == 0)
5841 			break;
5842 		mask = nx;
5843 	}
5844 	return (ffs(mask));
5845 }
5846 
5847 /*
5848  * The ddi_soft_state_* routines comprise generic storage management utilities
5849  * for driver soft state structures (in "the old days," this was done with
5850  * statically sized array - big systems and dynamic loading and unloading
5851  * make heap allocation more attractive).
5852  */
5853 
5854 /*
5855  * Allocate a set of pointers to 'n_items' objects of size 'size'
5856  * bytes.  Each pointer is initialized to nil.
5857  *
5858  * The 'size' and 'n_items' values are stashed in the opaque
5859  * handle returned to the caller.
5860  *
5861  * This implementation interprets 'set of pointers' to mean 'array
5862  * of pointers' but note that nothing in the interface definition
5863  * precludes an implementation that uses, for example, a linked list.
5864  * However there should be a small efficiency gain from using an array
5865  * at lookup time.
5866  *
5867  * NOTE	As an optimization, we make our growable array allocations in
5868  *	powers of two (bytes), since that's how much kmem_alloc (currently)
5869  *	gives us anyway.  It should save us some free/realloc's ..
5870  *
5871  *	As a further optimization, we make the growable array start out
5872  *	with MIN_N_ITEMS in it.
5873  */
5874 
5875 #define	MIN_N_ITEMS	8	/* 8 void *'s == 32 bytes */
5876 
5877 int
5878 ddi_soft_state_init(void **state_p, size_t size, size_t n_items)
5879 {
5880 	i_ddi_soft_state	*ss;
5881 
5882 	if (state_p == NULL || size == 0)
5883 		return (EINVAL);
5884 
5885 	ss = kmem_zalloc(sizeof (*ss), KM_SLEEP);
5886 	mutex_init(&ss->lock, NULL, MUTEX_DRIVER, NULL);
5887 	ss->size = size;
5888 
5889 	if (n_items < MIN_N_ITEMS)
5890 		ss->n_items = MIN_N_ITEMS;
5891 	else {
5892 		int bitlog;
5893 
5894 		if ((bitlog = ddi_fls(n_items)) == ddi_ffs(n_items))
5895 			bitlog--;
5896 		ss->n_items = 1 << bitlog;
5897 	}
5898 
5899 	ASSERT(ss->n_items >= n_items);
5900 
5901 	ss->array = kmem_zalloc(ss->n_items * sizeof (void *), KM_SLEEP);
5902 
5903 	*state_p = ss;
5904 	return (0);
5905 }
5906 
5907 /*
5908  * Allocate a state structure of size 'size' to be associated
5909  * with item 'item'.
5910  *
5911  * In this implementation, the array is extended to
5912  * allow the requested offset, if needed.
5913  */
5914 int
5915 ddi_soft_state_zalloc(void *state, int item)
5916 {
5917 	i_ddi_soft_state	*ss = (i_ddi_soft_state *)state;
5918 	void			**array;
5919 	void			*new_element;
5920 
5921 	if ((state == NULL) || (item < 0))
5922 		return (DDI_FAILURE);
5923 
5924 	mutex_enter(&ss->lock);
5925 	if (ss->size == 0) {
5926 		mutex_exit(&ss->lock);
5927 		cmn_err(CE_WARN, "ddi_soft_state_zalloc: bad handle: %s",
5928 		    mod_containing_pc(caller()));
5929 		return (DDI_FAILURE);
5930 	}
5931 
5932 	array = ss->array;	/* NULL if ss->n_items == 0 */
5933 	ASSERT(ss->n_items != 0 && array != NULL);
5934 
5935 	/*
5936 	 * refuse to tread on an existing element
5937 	 */
5938 	if (item < ss->n_items && array[item] != NULL) {
5939 		mutex_exit(&ss->lock);
5940 		return (DDI_FAILURE);
5941 	}
5942 
5943 	/*
5944 	 * Allocate a new element to plug in
5945 	 */
5946 	new_element = kmem_zalloc(ss->size, KM_SLEEP);
5947 
5948 	/*
5949 	 * Check if the array is big enough, if not, grow it.
5950 	 */
5951 	if (item >= ss->n_items) {
5952 		void			**new_array;
5953 		size_t			new_n_items;
5954 		struct i_ddi_soft_state	*dirty;
5955 
5956 		/*
5957 		 * Allocate a new array of the right length, copy
5958 		 * all the old pointers to the new array, then
5959 		 * if it exists at all, put the old array on the
5960 		 * dirty list.
5961 		 *
5962 		 * Note that we can't kmem_free() the old array.
5963 		 *
5964 		 * Why -- well the 'get' operation is 'mutex-free', so we
5965 		 * can't easily catch a suspended thread that is just about
5966 		 * to dereference the array we just grew out of.  So we
5967 		 * cons up a header and put it on a list of 'dirty'
5968 		 * pointer arrays.  (Dirty in the sense that there may
5969 		 * be suspended threads somewhere that are in the middle
5970 		 * of referencing them).  Fortunately, we -can- garbage
5971 		 * collect it all at ddi_soft_state_fini time.
5972 		 */
5973 		new_n_items = ss->n_items;
5974 		while (new_n_items < (1 + item))
5975 			new_n_items <<= 1;	/* double array size .. */
5976 
5977 		ASSERT(new_n_items >= (1 + item));	/* sanity check! */
5978 
5979 		new_array = kmem_zalloc(new_n_items * sizeof (void *),
5980 		    KM_SLEEP);
5981 		/*
5982 		 * Copy the pointers into the new array
5983 		 */
5984 		bcopy(array, new_array, ss->n_items * sizeof (void *));
5985 
5986 		/*
5987 		 * Save the old array on the dirty list
5988 		 */
5989 		dirty = kmem_zalloc(sizeof (*dirty), KM_SLEEP);
5990 		dirty->array = ss->array;
5991 		dirty->n_items = ss->n_items;
5992 		dirty->next = ss->next;
5993 		ss->next = dirty;
5994 
5995 		ss->array = (array = new_array);
5996 		ss->n_items = new_n_items;
5997 	}
5998 
5999 	ASSERT(array != NULL && item < ss->n_items && array[item] == NULL);
6000 
6001 	array[item] = new_element;
6002 
6003 	mutex_exit(&ss->lock);
6004 	return (DDI_SUCCESS);
6005 }
6006 
6007 /*
6008  * Fetch a pointer to the allocated soft state structure.
6009  *
6010  * This is designed to be cheap.
6011  *
6012  * There's an argument that there should be more checking for
6013  * nil pointers and out of bounds on the array.. but we do a lot
6014  * of that in the alloc/free routines.
6015  *
6016  * An array has the convenience that we don't need to lock read-access
6017  * to it c.f. a linked list.  However our "expanding array" strategy
6018  * means that we should hold a readers lock on the i_ddi_soft_state
6019  * structure.
6020  *
6021  * However, from a performance viewpoint, we need to do it without
6022  * any locks at all -- this also makes it a leaf routine.  The algorithm
6023  * is 'lock-free' because we only discard the pointer arrays at
6024  * ddi_soft_state_fini() time.
6025  */
6026 void *
6027 ddi_get_soft_state(void *state, int item)
6028 {
6029 	i_ddi_soft_state	*ss = (i_ddi_soft_state *)state;
6030 
6031 	ASSERT((ss != NULL) && (item >= 0));
6032 
6033 	if (item < ss->n_items && ss->array != NULL)
6034 		return (ss->array[item]);
6035 	return (NULL);
6036 }
6037 
6038 /*
6039  * Free the state structure corresponding to 'item.'   Freeing an
6040  * element that has either gone or was never allocated is not
6041  * considered an error.  Note that we free the state structure, but
6042  * we don't shrink our pointer array, or discard 'dirty' arrays,
6043  * since even a few pointers don't really waste too much memory.
6044  *
6045  * Passing an item number that is out of bounds, or a null pointer will
6046  * provoke an error message.
6047  */
6048 void
6049 ddi_soft_state_free(void *state, int item)
6050 {
6051 	i_ddi_soft_state	*ss = (i_ddi_soft_state *)state;
6052 	void			**array;
6053 	void			*element;
6054 	static char		msg[] = "ddi_soft_state_free:";
6055 
6056 	if (ss == NULL) {
6057 		cmn_err(CE_WARN, "%s null handle: %s",
6058 		    msg, mod_containing_pc(caller()));
6059 		return;
6060 	}
6061 
6062 	element = NULL;
6063 
6064 	mutex_enter(&ss->lock);
6065 
6066 	if ((array = ss->array) == NULL || ss->size == 0) {
6067 		cmn_err(CE_WARN, "%s bad handle: %s",
6068 		    msg, mod_containing_pc(caller()));
6069 	} else if (item < 0 || item >= ss->n_items) {
6070 		cmn_err(CE_WARN, "%s item %d not in range [0..%lu]: %s",
6071 		    msg, item, ss->n_items - 1, mod_containing_pc(caller()));
6072 	} else if (array[item] != NULL) {
6073 		element = array[item];
6074 		array[item] = NULL;
6075 	}
6076 
6077 	mutex_exit(&ss->lock);
6078 
6079 	if (element)
6080 		kmem_free(element, ss->size);
6081 }
6082 
6083 /*
6084  * Free the entire set of pointers, and any
6085  * soft state structures contained therein.
6086  *
6087  * Note that we don't grab the ss->lock mutex, even though
6088  * we're inspecting the various fields of the data structure.
6089  *
6090  * There is an implicit assumption that this routine will
6091  * never run concurrently with any of the above on this
6092  * particular state structure i.e. by the time the driver
6093  * calls this routine, there should be no other threads
6094  * running in the driver.
6095  */
6096 void
6097 ddi_soft_state_fini(void **state_p)
6098 {
6099 	i_ddi_soft_state	*ss, *dirty;
6100 	int			item;
6101 	static char		msg[] = "ddi_soft_state_fini:";
6102 
6103 	if (state_p == NULL ||
6104 	    (ss = (i_ddi_soft_state *)(*state_p)) == NULL) {
6105 		cmn_err(CE_WARN, "%s null handle: %s",
6106 		    msg, mod_containing_pc(caller()));
6107 		return;
6108 	}
6109 
6110 	if (ss->size == 0) {
6111 		cmn_err(CE_WARN, "%s bad handle: %s",
6112 		    msg, mod_containing_pc(caller()));
6113 		return;
6114 	}
6115 
6116 	if (ss->n_items > 0) {
6117 		for (item = 0; item < ss->n_items; item++)
6118 			ddi_soft_state_free(ss, item);
6119 		kmem_free(ss->array, ss->n_items * sizeof (void *));
6120 	}
6121 
6122 	/*
6123 	 * Now delete any dirty arrays from previous 'grow' operations
6124 	 */
6125 	for (dirty = ss->next; dirty; dirty = ss->next) {
6126 		ss->next = dirty->next;
6127 		kmem_free(dirty->array, dirty->n_items * sizeof (void *));
6128 		kmem_free(dirty, sizeof (*dirty));
6129 	}
6130 
6131 	mutex_destroy(&ss->lock);
6132 	kmem_free(ss, sizeof (*ss));
6133 
6134 	*state_p = NULL;
6135 }
6136 
6137 #define	SS_N_ITEMS_PER_HASH	16
6138 #define	SS_MIN_HASH_SZ		16
6139 #define	SS_MAX_HASH_SZ		4096
6140 
6141 int
6142 ddi_soft_state_bystr_init(ddi_soft_state_bystr **state_p, size_t size,
6143     int n_items)
6144 {
6145 	i_ddi_soft_state_bystr	*sss;
6146 	int			hash_sz;
6147 
6148 	ASSERT(state_p && size && n_items);
6149 	if ((state_p == NULL) || (size == 0) || (n_items == 0))
6150 		return (EINVAL);
6151 
6152 	/* current implementation is based on hash, convert n_items to hash */
6153 	hash_sz = n_items / SS_N_ITEMS_PER_HASH;
6154 	if (hash_sz < SS_MIN_HASH_SZ)
6155 		hash_sz = SS_MIN_HASH_SZ;
6156 	else if (hash_sz > SS_MAX_HASH_SZ)
6157 		hash_sz = SS_MAX_HASH_SZ;
6158 
6159 	/* allocate soft_state pool */
6160 	sss = kmem_zalloc(sizeof (*sss), KM_SLEEP);
6161 	sss->ss_size = size;
6162 	sss->ss_mod_hash = mod_hash_create_strhash("soft_state_bystr",
6163 	    hash_sz, mod_hash_null_valdtor);
6164 	*state_p = (ddi_soft_state_bystr *)sss;
6165 	return (0);
6166 }
6167 
6168 int
6169 ddi_soft_state_bystr_zalloc(ddi_soft_state_bystr *state, const char *str)
6170 {
6171 	i_ddi_soft_state_bystr	*sss = (i_ddi_soft_state_bystr *)state;
6172 	void			*sso;
6173 	char			*dup_str;
6174 
6175 	ASSERT(sss && str && sss->ss_mod_hash);
6176 	if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL))
6177 		return (DDI_FAILURE);
6178 	sso = kmem_zalloc(sss->ss_size, KM_SLEEP);
6179 	dup_str = i_ddi_strdup((char *)str, KM_SLEEP);
6180 	if (mod_hash_insert(sss->ss_mod_hash,
6181 	    (mod_hash_key_t)dup_str, (mod_hash_val_t)sso) == 0)
6182 		return (DDI_SUCCESS);
6183 
6184 	/*
6185 	 * The only error from an strhash insert is caused by a duplicate key.
6186 	 * We refuse to tread on an existing elements, so free and fail.
6187 	 */
6188 	kmem_free(dup_str, strlen(dup_str) + 1);
6189 	kmem_free(sso, sss->ss_size);
6190 	return (DDI_FAILURE);
6191 }
6192 
6193 void *
6194 ddi_soft_state_bystr_get(ddi_soft_state_bystr *state, const char *str)
6195 {
6196 	i_ddi_soft_state_bystr	*sss = (i_ddi_soft_state_bystr *)state;
6197 	void			*sso;
6198 
6199 	ASSERT(sss && str && sss->ss_mod_hash);
6200 	if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL))
6201 		return (NULL);
6202 
6203 	if (mod_hash_find(sss->ss_mod_hash,
6204 	    (mod_hash_key_t)str, (mod_hash_val_t *)&sso) == 0)
6205 		return (sso);
6206 	return (NULL);
6207 }
6208 
6209 void
6210 ddi_soft_state_bystr_free(ddi_soft_state_bystr *state, const char *str)
6211 {
6212 	i_ddi_soft_state_bystr	*sss = (i_ddi_soft_state_bystr *)state;
6213 	void			*sso;
6214 
6215 	ASSERT(sss && str && sss->ss_mod_hash);
6216 	if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL))
6217 		return;
6218 
6219 	(void) mod_hash_remove(sss->ss_mod_hash,
6220 	    (mod_hash_key_t)str, (mod_hash_val_t *)&sso);
6221 	kmem_free(sso, sss->ss_size);
6222 }
6223 
6224 void
6225 ddi_soft_state_bystr_fini(ddi_soft_state_bystr **state_p)
6226 {
6227 	i_ddi_soft_state_bystr	*sss;
6228 
6229 	ASSERT(state_p);
6230 	if (state_p == NULL)
6231 		return;
6232 
6233 	sss = (i_ddi_soft_state_bystr *)(*state_p);
6234 	if (sss == NULL)
6235 		return;
6236 
6237 	ASSERT(sss->ss_mod_hash);
6238 	if (sss->ss_mod_hash) {
6239 		mod_hash_destroy_strhash(sss->ss_mod_hash);
6240 		sss->ss_mod_hash = NULL;
6241 	}
6242 
6243 	kmem_free(sss, sizeof (*sss));
6244 	*state_p = NULL;
6245 }
6246 
6247 /*
6248  * The ddi_strid_* routines provide string-to-index management utilities.
6249  */
6250 /* allocate and initialize an strid set */
6251 int
6252 ddi_strid_init(ddi_strid **strid_p, int n_items)
6253 {
6254 	i_ddi_strid	*ss;
6255 	int		hash_sz;
6256 
6257 	if (strid_p == NULL)
6258 		return (DDI_FAILURE);
6259 
6260 	/* current implementation is based on hash, convert n_items to hash */
6261 	hash_sz = n_items / SS_N_ITEMS_PER_HASH;
6262 	if (hash_sz < SS_MIN_HASH_SZ)
6263 		hash_sz = SS_MIN_HASH_SZ;
6264 	else if (hash_sz > SS_MAX_HASH_SZ)
6265 		hash_sz = SS_MAX_HASH_SZ;
6266 
6267 	ss = kmem_alloc(sizeof (*ss), KM_SLEEP);
6268 	ss->strid_chunksz = n_items;
6269 	ss->strid_spacesz = n_items;
6270 	ss->strid_space = id_space_create("strid", 1, n_items);
6271 	ss->strid_bystr = mod_hash_create_strhash("strid_bystr", hash_sz,
6272 	    mod_hash_null_valdtor);
6273 	ss->strid_byid = mod_hash_create_idhash("strid_byid", hash_sz,
6274 	    mod_hash_null_valdtor);
6275 	*strid_p = (ddi_strid *)ss;
6276 	return (DDI_SUCCESS);
6277 }
6278 
6279 /* allocate an id mapping within the specified set for str, return id */
6280 static id_t
6281 i_ddi_strid_alloc(ddi_strid *strid, char *str)
6282 {
6283 	i_ddi_strid	*ss = (i_ddi_strid *)strid;
6284 	id_t		id;
6285 	char		*s;
6286 
6287 	ASSERT(ss && str);
6288 	if ((ss == NULL) || (str == NULL))
6289 		return (0);
6290 
6291 	/*
6292 	 * Allocate an id using VM_FIRSTFIT in order to keep allocated id
6293 	 * range as compressed as possible.  This is important to minimize
6294 	 * the amount of space used when the id is used as a ddi_soft_state
6295 	 * index by the caller.
6296 	 *
6297 	 * If the id list is exhausted, increase the size of the list
6298 	 * by the chuck size specified in ddi_strid_init and reattempt
6299 	 * the allocation
6300 	 */
6301 	if ((id = id_allocff_nosleep(ss->strid_space)) == (id_t)-1) {
6302 		id_space_extend(ss->strid_space, ss->strid_spacesz,
6303 		    ss->strid_spacesz + ss->strid_chunksz);
6304 		ss->strid_spacesz += ss->strid_chunksz;
6305 		if ((id = id_allocff_nosleep(ss->strid_space)) == (id_t)-1)
6306 			return (0);
6307 	}
6308 
6309 	/*
6310 	 * NOTE: since we create and destroy in unison we can save space by
6311 	 * using bystr key as the byid value.  This means destroy must occur
6312 	 * in (byid, bystr) order.
6313 	 */
6314 	s = i_ddi_strdup(str, KM_SLEEP);
6315 	if (mod_hash_insert(ss->strid_bystr, (mod_hash_key_t)s,
6316 	    (mod_hash_val_t)(intptr_t)id) != 0) {
6317 		ddi_strid_free(strid, id);
6318 		return (0);
6319 	}
6320 	if (mod_hash_insert(ss->strid_byid, (mod_hash_key_t)(intptr_t)id,
6321 	    (mod_hash_val_t)s) != 0) {
6322 		ddi_strid_free(strid, id);
6323 		return (0);
6324 	}
6325 
6326 	/* NOTE: s if freed on mod_hash_destroy by mod_hash_strval_dtor */
6327 	return (id);
6328 }
6329 
6330 /* allocate an id mapping within the specified set for str, return id */
6331 id_t
6332 ddi_strid_alloc(ddi_strid *strid, char *str)
6333 {
6334 	return (i_ddi_strid_alloc(strid, str));
6335 }
6336 
6337 /* return the id within the specified strid given the str */
6338 id_t
6339 ddi_strid_str2id(ddi_strid *strid, char *str)
6340 {
6341 	i_ddi_strid	*ss = (i_ddi_strid *)strid;
6342 	id_t		id = 0;
6343 	mod_hash_val_t	hv;
6344 
6345 	ASSERT(ss && str);
6346 	if (ss && str && (mod_hash_find(ss->strid_bystr,
6347 	    (mod_hash_key_t)str, &hv) == 0))
6348 		id = (int)(intptr_t)hv;
6349 	return (id);
6350 }
6351 
6352 /* return str within the specified strid given the id */
6353 char *
6354 ddi_strid_id2str(ddi_strid *strid, id_t id)
6355 {
6356 	i_ddi_strid	*ss = (i_ddi_strid *)strid;
6357 	char		*str = NULL;
6358 	mod_hash_val_t	hv;
6359 
6360 	ASSERT(ss && id > 0);
6361 	if (ss && (id > 0) && (mod_hash_find(ss->strid_byid,
6362 	    (mod_hash_key_t)(uintptr_t)id, &hv) == 0))
6363 		str = (char *)hv;
6364 	return (str);
6365 }
6366 
6367 /* free the id mapping within the specified strid */
6368 void
6369 ddi_strid_free(ddi_strid *strid, id_t id)
6370 {
6371 	i_ddi_strid	*ss = (i_ddi_strid *)strid;
6372 	char		*str;
6373 
6374 	ASSERT(ss && id > 0);
6375 	if ((ss == NULL) || (id <= 0))
6376 		return;
6377 
6378 	/* bystr key is byid value: destroy order must be (byid, bystr) */
6379 	str = ddi_strid_id2str(strid, id);
6380 	(void) mod_hash_destroy(ss->strid_byid, (mod_hash_key_t)(uintptr_t)id);
6381 	id_free(ss->strid_space, id);
6382 
6383 	if (str)
6384 		(void) mod_hash_destroy(ss->strid_bystr, (mod_hash_key_t)str);
6385 }
6386 
6387 /* destroy the strid set */
6388 void
6389 ddi_strid_fini(ddi_strid **strid_p)
6390 {
6391 	i_ddi_strid	*ss;
6392 
6393 	ASSERT(strid_p);
6394 	if (strid_p == NULL)
6395 		return;
6396 
6397 	ss = (i_ddi_strid *)(*strid_p);
6398 	if (ss == NULL)
6399 		return;
6400 
6401 	/* bystr key is byid value: destroy order must be (byid, bystr) */
6402 	if (ss->strid_byid)
6403 		mod_hash_destroy_hash(ss->strid_byid);
6404 	if (ss->strid_byid)
6405 		mod_hash_destroy_hash(ss->strid_bystr);
6406 	if (ss->strid_space)
6407 		id_space_destroy(ss->strid_space);
6408 	kmem_free(ss, sizeof (*ss));
6409 	*strid_p = NULL;
6410 }
6411 
6412 /*
6413  * This sets the devi_addr entry in the dev_info structure 'dip' to 'name'.
6414  * Storage is double buffered to prevent updates during devi_addr use -
6415  * double buffering is adaquate for reliable ddi_deviname() consumption.
6416  * The double buffer is not freed until dev_info structure destruction
6417  * (by i_ddi_free_node).
6418  */
6419 void
6420 ddi_set_name_addr(dev_info_t *dip, char *name)
6421 {
6422 	char	*buf = DEVI(dip)->devi_addr_buf;
6423 	char	*newaddr;
6424 
6425 	if (buf == NULL) {
6426 		buf = kmem_zalloc(2 * MAXNAMELEN, KM_SLEEP);
6427 		DEVI(dip)->devi_addr_buf = buf;
6428 	}
6429 
6430 	if (name) {
6431 		ASSERT(strlen(name) < MAXNAMELEN);
6432 		newaddr = (DEVI(dip)->devi_addr == buf) ?
6433 		    (buf + MAXNAMELEN) : buf;
6434 		(void) strlcpy(newaddr, name, MAXNAMELEN);
6435 	} else
6436 		newaddr = NULL;
6437 
6438 	DEVI(dip)->devi_addr = newaddr;
6439 }
6440 
6441 char *
6442 ddi_get_name_addr(dev_info_t *dip)
6443 {
6444 	return (DEVI(dip)->devi_addr);
6445 }
6446 
6447 void
6448 ddi_set_parent_data(dev_info_t *dip, void *pd)
6449 {
6450 	DEVI(dip)->devi_parent_data = pd;
6451 }
6452 
6453 void *
6454 ddi_get_parent_data(dev_info_t *dip)
6455 {
6456 	return (DEVI(dip)->devi_parent_data);
6457 }
6458 
6459 /*
6460  * ddi_name_to_major: returns the major number of a named module,
6461  * derived from the current driver alias binding.
6462  *
6463  * Caveat: drivers should avoid the use of this function, in particular
6464  * together with ddi_get_name/ddi_binding name, as per
6465  *	major = ddi_name_to_major(ddi_get_name(devi));
6466  * ddi_name_to_major() relies on the state of the device/alias binding,
6467  * which can and does change dynamically as aliases are administered
6468  * over time.  An attached device instance cannot rely on the major
6469  * number returned by ddi_name_to_major() to match its own major number.
6470  *
6471  * For driver use, ddi_driver_major() reliably returns the major number
6472  * for the module to which the device was bound at attach time over
6473  * the life of the instance.
6474  *	major = ddi_driver_major(dev_info_t *)
6475  */
6476 major_t
6477 ddi_name_to_major(char *name)
6478 {
6479 	return (mod_name_to_major(name));
6480 }
6481 
6482 /*
6483  * ddi_major_to_name: Returns the module name bound to a major number.
6484  */
6485 char *
6486 ddi_major_to_name(major_t major)
6487 {
6488 	return (mod_major_to_name(major));
6489 }
6490 
6491 /*
6492  * Return the name of the devinfo node pointed at by 'dip' in the buffer
6493  * pointed at by 'name.'  A devinfo node is named as a result of calling
6494  * ddi_initchild().
6495  *
6496  * Note: the driver must be held before calling this function!
6497  */
6498 char *
6499 ddi_deviname(dev_info_t *dip, char *name)
6500 {
6501 	char *addrname;
6502 	char none = '\0';
6503 
6504 	if (dip == ddi_root_node()) {
6505 		*name = '\0';
6506 		return (name);
6507 	}
6508 
6509 	if (i_ddi_node_state(dip) < DS_BOUND) {
6510 		addrname = &none;
6511 	} else {
6512 		/*
6513 		 * Use ddi_get_name_addr() without checking state so we get
6514 		 * a unit-address if we are called after ddi_set_name_addr()
6515 		 * by nexus DDI_CTL_INITCHILD code, but before completing
6516 		 * node promotion to DS_INITIALIZED.  We currently have
6517 		 * two situations where we are called in this state:
6518 		 *   o  For framework processing of a path-oriented alias.
6519 		 *   o  If a SCSA nexus driver calls ddi_devid_register()
6520 		 *	from it's tran_tgt_init(9E) implementation.
6521 		 */
6522 		addrname = ddi_get_name_addr(dip);
6523 		if (addrname == NULL)
6524 			addrname = &none;
6525 	}
6526 
6527 	if (*addrname == '\0') {
6528 		(void) sprintf(name, "/%s", ddi_node_name(dip));
6529 	} else {
6530 		(void) sprintf(name, "/%s@%s", ddi_node_name(dip), addrname);
6531 	}
6532 
6533 	return (name);
6534 }
6535 
6536 /*
6537  * Spits out the name of device node, typically name@addr, for a given node,
6538  * using the driver name, not the nodename.
6539  *
6540  * Used by match_parent. Not to be used elsewhere.
6541  */
6542 char *
6543 i_ddi_parname(dev_info_t *dip, char *name)
6544 {
6545 	char *addrname;
6546 
6547 	if (dip == ddi_root_node()) {
6548 		*name = '\0';
6549 		return (name);
6550 	}
6551 
6552 	ASSERT(i_ddi_node_state(dip) >= DS_INITIALIZED);
6553 
6554 	if (*(addrname = ddi_get_name_addr(dip)) == '\0')
6555 		(void) sprintf(name, "%s", ddi_binding_name(dip));
6556 	else
6557 		(void) sprintf(name, "%s@%s", ddi_binding_name(dip), addrname);
6558 	return (name);
6559 }
6560 
6561 static char *
6562 pathname_work(dev_info_t *dip, char *path)
6563 {
6564 	char *bp;
6565 
6566 	if (dip == ddi_root_node()) {
6567 		*path = '\0';
6568 		return (path);
6569 	}
6570 	(void) pathname_work(ddi_get_parent(dip), path);
6571 	bp = path + strlen(path);
6572 	(void) ddi_deviname(dip, bp);
6573 	return (path);
6574 }
6575 
6576 char *
6577 ddi_pathname(dev_info_t *dip, char *path)
6578 {
6579 	return (pathname_work(dip, path));
6580 }
6581 
6582 char *
6583 ddi_pathname_minor(struct ddi_minor_data *dmdp, char *path)
6584 {
6585 	if (dmdp->dip == NULL)
6586 		*path = '\0';
6587 	else {
6588 		(void) ddi_pathname(dmdp->dip, path);
6589 		if (dmdp->ddm_name) {
6590 			(void) strcat(path, ":");
6591 			(void) strcat(path, dmdp->ddm_name);
6592 		}
6593 	}
6594 	return (path);
6595 }
6596 
6597 static char *
6598 pathname_work_obp(dev_info_t *dip, char *path)
6599 {
6600 	char *bp;
6601 	char *obp_path;
6602 
6603 	/*
6604 	 * look up the "obp-path" property, return the path if it exists
6605 	 */
6606 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
6607 	    "obp-path", &obp_path) == DDI_PROP_SUCCESS) {
6608 		(void) strcpy(path, obp_path);
6609 		ddi_prop_free(obp_path);
6610 		return (path);
6611 	}
6612 
6613 	/*
6614 	 * stop at root, no obp path
6615 	 */
6616 	if (dip == ddi_root_node()) {
6617 		return (NULL);
6618 	}
6619 
6620 	obp_path = pathname_work_obp(ddi_get_parent(dip), path);
6621 	if (obp_path == NULL)
6622 		return (NULL);
6623 
6624 	/*
6625 	 * append our component to parent's obp path
6626 	 */
6627 	bp = path + strlen(path);
6628 	if (*(bp - 1) != '/')
6629 		(void) strcat(bp++, "/");
6630 	(void) ddi_deviname(dip, bp);
6631 	return (path);
6632 }
6633 
6634 /*
6635  * return the 'obp-path' based path for the given node, or NULL if the node
6636  * does not have a different obp path. NOTE: Unlike ddi_pathname, this
6637  * function can't be called from interrupt context (since we need to
6638  * lookup a string property).
6639  */
6640 char *
6641 ddi_pathname_obp(dev_info_t *dip, char *path)
6642 {
6643 	ASSERT(!servicing_interrupt());
6644 	if (dip == NULL || path == NULL)
6645 		return (NULL);
6646 
6647 	/* split work into a separate function to aid debugging */
6648 	return (pathname_work_obp(dip, path));
6649 }
6650 
6651 int
6652 ddi_pathname_obp_set(dev_info_t *dip, char *component)
6653 {
6654 	dev_info_t *pdip;
6655 	char *obp_path = NULL;
6656 	int rc = DDI_FAILURE;
6657 
6658 	if (dip == NULL)
6659 		return (DDI_FAILURE);
6660 
6661 	obp_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
6662 
6663 	pdip = ddi_get_parent(dip);
6664 
6665 	if (ddi_pathname_obp(pdip, obp_path) == NULL) {
6666 		(void) ddi_pathname(pdip, obp_path);
6667 	}
6668 
6669 	if (component) {
6670 		(void) strncat(obp_path, "/", MAXPATHLEN);
6671 		(void) strncat(obp_path, component, MAXPATHLEN);
6672 	}
6673 	rc = ndi_prop_update_string(DDI_DEV_T_NONE, dip, "obp-path",
6674 	    obp_path);
6675 
6676 	if (obp_path)
6677 		kmem_free(obp_path, MAXPATHLEN);
6678 
6679 	return (rc);
6680 }
6681 
6682 /*
6683  * Given a dev_t, return the pathname of the corresponding device in the
6684  * buffer pointed at by "path."  The buffer is assumed to be large enough
6685  * to hold the pathname of the device (MAXPATHLEN).
6686  *
6687  * The pathname of a device is the pathname of the devinfo node to which
6688  * the device "belongs," concatenated with the character ':' and the name
6689  * of the minor node corresponding to the dev_t.  If spec_type is 0 then
6690  * just the pathname of the devinfo node is returned without driving attach
6691  * of that node.  For a non-zero spec_type, an attach is performed and a
6692  * search of the minor list occurs.
6693  *
6694  * It is possible that the path associated with the dev_t is not
6695  * currently available in the devinfo tree.  In order to have a
6696  * dev_t, a device must have been discovered before, which means
6697  * that the path is always in the instance tree.  The one exception
6698  * to this is if the dev_t is associated with a pseudo driver, in
6699  * which case the device must exist on the pseudo branch of the
6700  * devinfo tree as a result of parsing .conf files.
6701  */
6702 int
6703 ddi_dev_pathname(dev_t devt, int spec_type, char *path)
6704 {
6705 	int		circ;
6706 	major_t		major = getmajor(devt);
6707 	int		instance;
6708 	dev_info_t	*dip;
6709 	char		*minorname;
6710 	char		*drvname;
6711 
6712 	if (major >= devcnt)
6713 		goto fail;
6714 	if (major == clone_major) {
6715 		/* clone has no minor nodes, manufacture the path here */
6716 		if ((drvname = ddi_major_to_name(getminor(devt))) == NULL)
6717 			goto fail;
6718 
6719 		(void) snprintf(path, MAXPATHLEN, "%s:%s", CLONE_PATH, drvname);
6720 		return (DDI_SUCCESS);
6721 	}
6722 
6723 	/* extract instance from devt (getinfo(9E) DDI_INFO_DEVT2INSTANCE). */
6724 	if ((instance = dev_to_instance(devt)) == -1)
6725 		goto fail;
6726 
6727 	/* reconstruct the path given the major/instance */
6728 	if (e_ddi_majorinstance_to_path(major, instance, path) != DDI_SUCCESS)
6729 		goto fail;
6730 
6731 	/* if spec_type given we must drive attach and search minor nodes */
6732 	if ((spec_type == S_IFCHR) || (spec_type == S_IFBLK)) {
6733 		/* attach the path so we can search minors */
6734 		if ((dip = e_ddi_hold_devi_by_path(path, 0)) == NULL)
6735 			goto fail;
6736 
6737 		/* Add minorname to path. */
6738 		ndi_devi_enter(dip, &circ);
6739 		minorname = i_ddi_devtspectype_to_minorname(dip,
6740 		    devt, spec_type);
6741 		if (minorname) {
6742 			(void) strcat(path, ":");
6743 			(void) strcat(path, minorname);
6744 		}
6745 		ndi_devi_exit(dip, circ);
6746 		ddi_release_devi(dip);
6747 		if (minorname == NULL)
6748 			goto fail;
6749 	}
6750 	ASSERT(strlen(path) < MAXPATHLEN);
6751 	return (DDI_SUCCESS);
6752 
6753 fail:	*path = 0;
6754 	return (DDI_FAILURE);
6755 }
6756 
6757 /*
6758  * Given a major number and an instance, return the path.
6759  * This interface does NOT drive attach.
6760  */
6761 int
6762 e_ddi_majorinstance_to_path(major_t major, int instance, char *path)
6763 {
6764 	struct devnames *dnp;
6765 	dev_info_t	*dip;
6766 
6767 	if ((major >= devcnt) || (instance == -1)) {
6768 		*path = 0;
6769 		return (DDI_FAILURE);
6770 	}
6771 
6772 	/* look for the major/instance in the instance tree */
6773 	if (e_ddi_instance_majorinstance_to_path(major, instance,
6774 	    path) == DDI_SUCCESS) {
6775 		ASSERT(strlen(path) < MAXPATHLEN);
6776 		return (DDI_SUCCESS);
6777 	}
6778 
6779 	/*
6780 	 * Not in instance tree, find the instance on the per driver list and
6781 	 * construct path to instance via ddi_pathname(). This is how paths
6782 	 * down the 'pseudo' branch are constructed.
6783 	 */
6784 	dnp = &(devnamesp[major]);
6785 	LOCK_DEV_OPS(&(dnp->dn_lock));
6786 	for (dip = dnp->dn_head; dip;
6787 	    dip = (dev_info_t *)DEVI(dip)->devi_next) {
6788 		/* Skip if instance does not match. */
6789 		if (DEVI(dip)->devi_instance != instance)
6790 			continue;
6791 
6792 		/*
6793 		 * An ndi_hold_devi() does not prevent DS_INITIALIZED->DS_BOUND
6794 		 * node demotion, so it is not an effective way of ensuring
6795 		 * that the ddi_pathname result has a unit-address.  Instead,
6796 		 * we reverify the node state after calling ddi_pathname().
6797 		 */
6798 		if (i_ddi_node_state(dip) >= DS_INITIALIZED) {
6799 			(void) ddi_pathname(dip, path);
6800 			if (i_ddi_node_state(dip) < DS_INITIALIZED)
6801 				continue;
6802 			UNLOCK_DEV_OPS(&(dnp->dn_lock));
6803 			ASSERT(strlen(path) < MAXPATHLEN);
6804 			return (DDI_SUCCESS);
6805 		}
6806 	}
6807 	UNLOCK_DEV_OPS(&(dnp->dn_lock));
6808 
6809 	/* can't reconstruct the path */
6810 	*path = 0;
6811 	return (DDI_FAILURE);
6812 }
6813 
6814 #define	GLD_DRIVER_PPA "SUNW,gld_v0_ppa"
6815 
6816 /*
6817  * Given the dip for a network interface return the ppa for that interface.
6818  *
6819  * In all cases except GLD v0 drivers, the ppa == instance.
6820  * In the case of GLD v0 drivers, the ppa is equal to the attach order.
6821  * So for these drivers when the attach routine calls gld_register(),
6822  * the GLD framework creates an integer property called "gld_driver_ppa"
6823  * that can be queried here.
6824  *
6825  * The only time this function is used is when a system is booting over nfs.
6826  * In this case the system has to resolve the pathname of the boot device
6827  * to it's ppa.
6828  */
6829 int
6830 i_ddi_devi_get_ppa(dev_info_t *dip)
6831 {
6832 	return (ddi_prop_get_int(DDI_DEV_T_ANY, dip,
6833 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
6834 	    GLD_DRIVER_PPA, ddi_get_instance(dip)));
6835 }
6836 
6837 /*
6838  * i_ddi_devi_set_ppa() should only be called from gld_register()
6839  * and only for GLD v0 drivers
6840  */
6841 void
6842 i_ddi_devi_set_ppa(dev_info_t *dip, int ppa)
6843 {
6844 	(void) e_ddi_prop_update_int(DDI_DEV_T_NONE, dip, GLD_DRIVER_PPA, ppa);
6845 }
6846 
6847 
6848 /*
6849  * Private DDI Console bell functions.
6850  */
6851 void
6852 ddi_ring_console_bell(clock_t duration)
6853 {
6854 	if (ddi_console_bell_func != NULL)
6855 		(*ddi_console_bell_func)(duration);
6856 }
6857 
6858 void
6859 ddi_set_console_bell(void (*bellfunc)(clock_t duration))
6860 {
6861 	ddi_console_bell_func = bellfunc;
6862 }
6863 
6864 int
6865 ddi_dma_alloc_handle(dev_info_t *dip, ddi_dma_attr_t *attr,
6866     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
6867 {
6868 	int (*funcp)() = ddi_dma_allochdl;
6869 	ddi_dma_attr_t dma_attr;
6870 	struct bus_ops *bop;
6871 
6872 	if (attr == (ddi_dma_attr_t *)0)
6873 		return (DDI_DMA_BADATTR);
6874 
6875 	dma_attr = *attr;
6876 
6877 	bop = DEVI(dip)->devi_ops->devo_bus_ops;
6878 	if (bop && bop->bus_dma_allochdl)
6879 		funcp = bop->bus_dma_allochdl;
6880 
6881 	return ((*funcp)(dip, dip, &dma_attr, waitfp, arg, handlep));
6882 }
6883 
6884 void
6885 ddi_dma_free_handle(ddi_dma_handle_t *handlep)
6886 {
6887 	ddi_dma_handle_t h = *handlep;
6888 	(void) ddi_dma_freehdl(HD, HD, h);
6889 }
6890 
6891 static uintptr_t dma_mem_list_id = 0;
6892 
6893 
6894 int
6895 ddi_dma_mem_alloc(ddi_dma_handle_t handle, size_t length,
6896     ddi_device_acc_attr_t *accattrp, uint_t flags,
6897     int (*waitfp)(caddr_t), caddr_t arg, caddr_t *kaddrp,
6898     size_t *real_length, ddi_acc_handle_t *handlep)
6899 {
6900 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6901 	dev_info_t *dip = hp->dmai_rdip;
6902 	ddi_acc_hdl_t *ap;
6903 	ddi_dma_attr_t *attrp = &hp->dmai_attr;
6904 	uint_t sleepflag, xfermodes;
6905 	int (*fp)(caddr_t);
6906 	int rval;
6907 
6908 	if (waitfp == DDI_DMA_SLEEP)
6909 		fp = (int (*)())KM_SLEEP;
6910 	else if (waitfp == DDI_DMA_DONTWAIT)
6911 		fp = (int (*)())KM_NOSLEEP;
6912 	else
6913 		fp = waitfp;
6914 	*handlep = impl_acc_hdl_alloc(fp, arg);
6915 	if (*handlep == NULL)
6916 		return (DDI_FAILURE);
6917 
6918 	/* check if the cache attributes are supported */
6919 	if (i_ddi_check_cache_attr(flags) == B_FALSE)
6920 		return (DDI_FAILURE);
6921 
6922 	/*
6923 	 * Transfer the meaningful bits to xfermodes.
6924 	 * Double-check if the 3rd party driver correctly sets the bits.
6925 	 * If not, set DDI_DMA_STREAMING to keep compatibility.
6926 	 */
6927 	xfermodes = flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING);
6928 	if (xfermodes == 0) {
6929 		xfermodes = DDI_DMA_STREAMING;
6930 	}
6931 
6932 	/*
6933 	 * initialize the common elements of data access handle
6934 	 */
6935 	ap = impl_acc_hdl_get(*handlep);
6936 	ap->ah_vers = VERS_ACCHDL;
6937 	ap->ah_dip = dip;
6938 	ap->ah_offset = 0;
6939 	ap->ah_len = 0;
6940 	ap->ah_xfermodes = flags;
6941 	ap->ah_acc = *accattrp;
6942 
6943 	sleepflag = ((waitfp == DDI_DMA_SLEEP) ? 1 : 0);
6944 	if (xfermodes == DDI_DMA_CONSISTENT) {
6945 		rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag,
6946 		    flags, accattrp, kaddrp, NULL, ap);
6947 		*real_length = length;
6948 	} else {
6949 		rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag,
6950 		    flags, accattrp, kaddrp, real_length, ap);
6951 	}
6952 	if (rval == DDI_SUCCESS) {
6953 		ap->ah_len = (off_t)(*real_length);
6954 		ap->ah_addr = *kaddrp;
6955 	} else {
6956 		impl_acc_hdl_free(*handlep);
6957 		*handlep = (ddi_acc_handle_t)NULL;
6958 		if (waitfp != DDI_DMA_SLEEP && waitfp != DDI_DMA_DONTWAIT) {
6959 			ddi_set_callback(waitfp, arg, &dma_mem_list_id);
6960 		}
6961 		rval = DDI_FAILURE;
6962 	}
6963 	return (rval);
6964 }
6965 
6966 void
6967 ddi_dma_mem_free(ddi_acc_handle_t *handlep)
6968 {
6969 	ddi_acc_hdl_t *ap;
6970 
6971 	ap = impl_acc_hdl_get(*handlep);
6972 	ASSERT(ap);
6973 
6974 	i_ddi_mem_free((caddr_t)ap->ah_addr, ap);
6975 
6976 	/*
6977 	 * free the handle
6978 	 */
6979 	impl_acc_hdl_free(*handlep);
6980 	*handlep = (ddi_acc_handle_t)NULL;
6981 
6982 	if (dma_mem_list_id != 0) {
6983 		ddi_run_callback(&dma_mem_list_id);
6984 	}
6985 }
6986 
6987 int
6988 ddi_dma_buf_bind_handle(ddi_dma_handle_t handle, struct buf *bp,
6989     uint_t flags, int (*waitfp)(caddr_t), caddr_t arg,
6990     ddi_dma_cookie_t *cookiep, uint_t *ccountp)
6991 {
6992 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6993 	dev_info_t *dip, *rdip;
6994 	struct ddi_dma_req dmareq;
6995 	int (*funcp)();
6996 	ddi_dma_cookie_t cookie;
6997 	uint_t count;
6998 
6999 	if (cookiep == NULL)
7000 		cookiep = &cookie;
7001 
7002 	if (ccountp == NULL)
7003 		ccountp = &count;
7004 
7005 	dmareq.dmar_flags = flags;
7006 	dmareq.dmar_fp = waitfp;
7007 	dmareq.dmar_arg = arg;
7008 	dmareq.dmar_object.dmao_size = (uint_t)bp->b_bcount;
7009 
7010 	if (bp->b_flags & B_PAGEIO) {
7011 		dmareq.dmar_object.dmao_type = DMA_OTYP_PAGES;
7012 		dmareq.dmar_object.dmao_obj.pp_obj.pp_pp = bp->b_pages;
7013 		dmareq.dmar_object.dmao_obj.pp_obj.pp_offset =
7014 		    (uint_t)(((uintptr_t)bp->b_un.b_addr) & MMU_PAGEOFFSET);
7015 	} else {
7016 		dmareq.dmar_object.dmao_obj.virt_obj.v_addr = bp->b_un.b_addr;
7017 		if (bp->b_flags & B_SHADOW) {
7018 			dmareq.dmar_object.dmao_obj.virt_obj.v_priv =
7019 			    bp->b_shadow;
7020 			dmareq.dmar_object.dmao_type = DMA_OTYP_BUFVADDR;
7021 		} else {
7022 			dmareq.dmar_object.dmao_type =
7023 			    (bp->b_flags & (B_PHYS | B_REMAPPED)) ?
7024 			    DMA_OTYP_BUFVADDR : DMA_OTYP_VADDR;
7025 			dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
7026 		}
7027 
7028 		/*
7029 		 * If the buffer has no proc pointer, or the proc
7030 		 * struct has the kernel address space, or the buffer has
7031 		 * been marked B_REMAPPED (meaning that it is now
7032 		 * mapped into the kernel's address space), then
7033 		 * the address space is kas (kernel address space).
7034 		 */
7035 		if ((bp->b_proc == NULL) || (bp->b_proc->p_as == &kas) ||
7036 		    (bp->b_flags & B_REMAPPED)) {
7037 			dmareq.dmar_object.dmao_obj.virt_obj.v_as = 0;
7038 		} else {
7039 			dmareq.dmar_object.dmao_obj.virt_obj.v_as =
7040 			    bp->b_proc->p_as;
7041 		}
7042 	}
7043 
7044 	dip = rdip = hp->dmai_rdip;
7045 	if (dip != ddi_root_node())
7046 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
7047 	funcp = DEVI(rdip)->devi_bus_dma_bindfunc;
7048 	return ((*funcp)(dip, rdip, handle, &dmareq, cookiep, ccountp));
7049 }
7050 
7051 int
7052 ddi_dma_addr_bind_handle(ddi_dma_handle_t handle, struct as *as,
7053     caddr_t addr, size_t len, uint_t flags, int (*waitfp)(caddr_t),
7054     caddr_t arg, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
7055 {
7056 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7057 	dev_info_t *dip, *rdip;
7058 	struct ddi_dma_req dmareq;
7059 	int (*funcp)();
7060 	ddi_dma_cookie_t cookie;
7061 	uint_t count;
7062 
7063 	if (len == (uint_t)0) {
7064 		return (DDI_DMA_NOMAPPING);
7065 	}
7066 
7067 	if (cookiep == NULL)
7068 		cookiep = &cookie;
7069 
7070 	if (ccountp == NULL)
7071 		ccountp = &count;
7072 
7073 	dmareq.dmar_flags = flags;
7074 	dmareq.dmar_fp = waitfp;
7075 	dmareq.dmar_arg = arg;
7076 	dmareq.dmar_object.dmao_size = len;
7077 	dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR;
7078 	dmareq.dmar_object.dmao_obj.virt_obj.v_as = as;
7079 	dmareq.dmar_object.dmao_obj.virt_obj.v_addr = addr;
7080 	dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
7081 
7082 	dip = rdip = hp->dmai_rdip;
7083 	if (dip != ddi_root_node())
7084 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
7085 	funcp = DEVI(rdip)->devi_bus_dma_bindfunc;
7086 	return ((*funcp)(dip, rdip, handle, &dmareq, cookiep, ccountp));
7087 }
7088 
7089 void
7090 ddi_dma_nextcookie(ddi_dma_handle_t handle, ddi_dma_cookie_t *cookiep)
7091 {
7092 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7093 	ddi_dma_cookie_t *cp;
7094 
7095 	if (hp->dmai_curcookie >= hp->dmai_ncookies) {
7096 		panic("ddi_dma_nextcookie() called too many times on handle %p",
7097 		    hp);
7098 	}
7099 
7100 	cp = hp->dmai_cookie;
7101 	ASSERT(cp);
7102 
7103 	cookiep->dmac_notused = cp->dmac_notused;
7104 	cookiep->dmac_type = cp->dmac_type;
7105 	cookiep->dmac_address = cp->dmac_address;
7106 	cookiep->dmac_size = cp->dmac_size;
7107 	hp->dmai_cookie++;
7108 	hp->dmai_curcookie++;
7109 }
7110 
7111 int
7112 ddi_dma_ncookies(ddi_dma_handle_t handle)
7113 {
7114 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7115 
7116 	return (hp->dmai_ncookies);
7117 }
7118 
7119 const ddi_dma_cookie_t *
7120 ddi_dma_cookie_iter(ddi_dma_handle_t handle, const ddi_dma_cookie_t *iter)
7121 {
7122 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7123 	const ddi_dma_cookie_t *base, *end;
7124 
7125 	if (hp->dmai_ncookies == 0) {
7126 		return (NULL);
7127 	}
7128 
7129 	base = hp->dmai_cookie - hp->dmai_curcookie;
7130 	end = base + hp->dmai_ncookies;
7131 	if (iter == NULL) {
7132 		return (base);
7133 	}
7134 
7135 	if ((uintptr_t)iter < (uintptr_t)base ||
7136 	    (uintptr_t)iter >= (uintptr_t)end) {
7137 		return (NULL);
7138 	}
7139 
7140 	iter++;
7141 	if (iter == end) {
7142 		return (NULL);
7143 	}
7144 
7145 	return (iter);
7146 }
7147 
7148 const ddi_dma_cookie_t *
7149 ddi_dma_cookie_get(ddi_dma_handle_t handle, uint_t index)
7150 {
7151 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7152 	const ddi_dma_cookie_t *base;
7153 
7154 	if (index >= hp->dmai_ncookies) {
7155 		return (NULL);
7156 	}
7157 
7158 	base = hp->dmai_cookie - hp->dmai_curcookie;
7159 	return (base + index);
7160 }
7161 
7162 const ddi_dma_cookie_t *
7163 ddi_dma_cookie_one(ddi_dma_handle_t handle)
7164 {
7165 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7166 	const ddi_dma_cookie_t *base;
7167 
7168 	if (hp->dmai_ncookies != 1) {
7169 		panic("ddi_dma_cookie_one() called with improper handle %p",
7170 		    hp);
7171 	}
7172 	ASSERT3P(hp->dmai_cookie, !=, NULL);
7173 
7174 	base = hp->dmai_cookie - hp->dmai_curcookie;
7175 	return (base);
7176 }
7177 
7178 int
7179 ddi_dma_numwin(ddi_dma_handle_t handle, uint_t *nwinp)
7180 {
7181 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7182 	if ((hp->dmai_rflags & DDI_DMA_PARTIAL) == 0) {
7183 		return (DDI_FAILURE);
7184 	} else {
7185 		*nwinp = hp->dmai_nwin;
7186 		return (DDI_SUCCESS);
7187 	}
7188 }
7189 
7190 int
7191 ddi_dma_getwin(ddi_dma_handle_t h, uint_t win, off_t *offp,
7192     size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
7193 {
7194 	int (*funcp)() = ddi_dma_win;
7195 	struct bus_ops *bop;
7196 	ddi_dma_cookie_t cookie;
7197 	uint_t count;
7198 
7199 	bop = DEVI(HD)->devi_ops->devo_bus_ops;
7200 	if (bop && bop->bus_dma_win)
7201 		funcp = bop->bus_dma_win;
7202 
7203 	if (cookiep == NULL)
7204 		cookiep = &cookie;
7205 
7206 	if (ccountp == NULL)
7207 		ccountp = &count;
7208 
7209 	return ((*funcp)(HD, HD, h, win, offp, lenp, cookiep, ccountp));
7210 }
7211 
7212 int
7213 ddi_dma_set_sbus64(ddi_dma_handle_t h, ulong_t burstsizes)
7214 {
7215 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_SET_SBUS64, 0,
7216 	    &burstsizes, 0, 0));
7217 }
7218 
7219 int
7220 i_ddi_dma_fault_check(ddi_dma_impl_t *hp)
7221 {
7222 	return (hp->dmai_fault);
7223 }
7224 
7225 int
7226 ddi_check_dma_handle(ddi_dma_handle_t handle)
7227 {
7228 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7229 	int (*check)(ddi_dma_impl_t *);
7230 
7231 	if ((check = hp->dmai_fault_check) == NULL)
7232 		check = i_ddi_dma_fault_check;
7233 
7234 	return (((*check)(hp) == DDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
7235 }
7236 
7237 void
7238 i_ddi_dma_set_fault(ddi_dma_handle_t handle)
7239 {
7240 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7241 	void (*notify)(ddi_dma_impl_t *);
7242 
7243 	if (!hp->dmai_fault) {
7244 		hp->dmai_fault = 1;
7245 		if ((notify = hp->dmai_fault_notify) != NULL)
7246 			(*notify)(hp);
7247 	}
7248 }
7249 
7250 void
7251 i_ddi_dma_clr_fault(ddi_dma_handle_t handle)
7252 {
7253 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7254 	void (*notify)(ddi_dma_impl_t *);
7255 
7256 	if (hp->dmai_fault) {
7257 		hp->dmai_fault = 0;
7258 		if ((notify = hp->dmai_fault_notify) != NULL)
7259 			(*notify)(hp);
7260 	}
7261 }
7262 
7263 /*
7264  * register mapping routines.
7265  */
7266 int
7267 ddi_regs_map_setup(dev_info_t *dip, uint_t rnumber, caddr_t *addrp,
7268     offset_t offset, offset_t len, ddi_device_acc_attr_t *accattrp,
7269     ddi_acc_handle_t *handle)
7270 {
7271 	ddi_map_req_t mr;
7272 	ddi_acc_hdl_t *hp;
7273 	int result;
7274 
7275 	/*
7276 	 * Allocate and initialize the common elements of data access handle.
7277 	 */
7278 	*handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
7279 	hp = impl_acc_hdl_get(*handle);
7280 	hp->ah_vers = VERS_ACCHDL;
7281 	hp->ah_dip = dip;
7282 	hp->ah_rnumber = rnumber;
7283 	hp->ah_offset = offset;
7284 	hp->ah_len = len;
7285 	hp->ah_acc = *accattrp;
7286 
7287 	/*
7288 	 * Set up the mapping request and call to parent.
7289 	 */
7290 	mr.map_op = DDI_MO_MAP_LOCKED;
7291 	mr.map_type = DDI_MT_RNUMBER;
7292 	mr.map_obj.rnumber = rnumber;
7293 	mr.map_prot = PROT_READ | PROT_WRITE;
7294 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
7295 	mr.map_handlep = hp;
7296 	mr.map_vers = DDI_MAP_VERSION;
7297 	result = ddi_map(dip, &mr, offset, len, addrp);
7298 
7299 	/*
7300 	 * check for end result
7301 	 */
7302 	if (result != DDI_SUCCESS) {
7303 		impl_acc_hdl_free(*handle);
7304 		*handle = (ddi_acc_handle_t)NULL;
7305 	} else {
7306 		hp->ah_addr = *addrp;
7307 	}
7308 
7309 	return (result);
7310 }
7311 
7312 void
7313 ddi_regs_map_free(ddi_acc_handle_t *handlep)
7314 {
7315 	ddi_map_req_t mr;
7316 	ddi_acc_hdl_t *hp;
7317 
7318 	hp = impl_acc_hdl_get(*handlep);
7319 	ASSERT(hp);
7320 
7321 	mr.map_op = DDI_MO_UNMAP;
7322 	mr.map_type = DDI_MT_RNUMBER;
7323 	mr.map_obj.rnumber = hp->ah_rnumber;
7324 	mr.map_prot = PROT_READ | PROT_WRITE;
7325 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
7326 	mr.map_handlep = hp;
7327 	mr.map_vers = DDI_MAP_VERSION;
7328 
7329 	/*
7330 	 * Call my parent to unmap my regs.
7331 	 */
7332 	(void) ddi_map(hp->ah_dip, &mr, hp->ah_offset,
7333 	    hp->ah_len, &hp->ah_addr);
7334 	/*
7335 	 * free the handle
7336 	 */
7337 	impl_acc_hdl_free(*handlep);
7338 	*handlep = (ddi_acc_handle_t)NULL;
7339 }
7340 
7341 int
7342 ddi_device_zero(ddi_acc_handle_t handle, caddr_t dev_addr, size_t bytecount,
7343     ssize_t dev_advcnt, uint_t dev_datasz)
7344 {
7345 	uint8_t *b;
7346 	uint16_t *w;
7347 	uint32_t *l;
7348 	uint64_t *ll;
7349 
7350 	/* check for total byte count is multiple of data transfer size */
7351 	if (bytecount != ((bytecount / dev_datasz) * dev_datasz))
7352 		return (DDI_FAILURE);
7353 
7354 	switch (dev_datasz) {
7355 	case DDI_DATA_SZ01_ACC:
7356 		for (b = (uint8_t *)dev_addr;
7357 		    bytecount != 0; bytecount -= 1, b += dev_advcnt)
7358 			ddi_put8(handle, b, 0);
7359 		break;
7360 	case DDI_DATA_SZ02_ACC:
7361 		for (w = (uint16_t *)dev_addr;
7362 		    bytecount != 0; bytecount -= 2, w += dev_advcnt)
7363 			ddi_put16(handle, w, 0);
7364 		break;
7365 	case DDI_DATA_SZ04_ACC:
7366 		for (l = (uint32_t *)dev_addr;
7367 		    bytecount != 0; bytecount -= 4, l += dev_advcnt)
7368 			ddi_put32(handle, l, 0);
7369 		break;
7370 	case DDI_DATA_SZ08_ACC:
7371 		for (ll = (uint64_t *)dev_addr;
7372 		    bytecount != 0; bytecount -= 8, ll += dev_advcnt)
7373 			ddi_put64(handle, ll, 0x0ll);
7374 		break;
7375 	default:
7376 		return (DDI_FAILURE);
7377 	}
7378 	return (DDI_SUCCESS);
7379 }
7380 
7381 int
7382 ddi_device_copy(
7383 	ddi_acc_handle_t src_handle, caddr_t src_addr, ssize_t src_advcnt,
7384 	ddi_acc_handle_t dest_handle, caddr_t dest_addr, ssize_t dest_advcnt,
7385 	size_t bytecount, uint_t dev_datasz)
7386 {
7387 	uint8_t *b_src, *b_dst;
7388 	uint16_t *w_src, *w_dst;
7389 	uint32_t *l_src, *l_dst;
7390 	uint64_t *ll_src, *ll_dst;
7391 
7392 	/* check for total byte count is multiple of data transfer size */
7393 	if (bytecount != ((bytecount / dev_datasz) * dev_datasz))
7394 		return (DDI_FAILURE);
7395 
7396 	switch (dev_datasz) {
7397 	case DDI_DATA_SZ01_ACC:
7398 		b_src = (uint8_t *)src_addr;
7399 		b_dst = (uint8_t *)dest_addr;
7400 
7401 		for (; bytecount != 0; bytecount -= 1) {
7402 			ddi_put8(dest_handle, b_dst,
7403 			    ddi_get8(src_handle, b_src));
7404 			b_dst += dest_advcnt;
7405 			b_src += src_advcnt;
7406 		}
7407 		break;
7408 	case DDI_DATA_SZ02_ACC:
7409 		w_src = (uint16_t *)src_addr;
7410 		w_dst = (uint16_t *)dest_addr;
7411 
7412 		for (; bytecount != 0; bytecount -= 2) {
7413 			ddi_put16(dest_handle, w_dst,
7414 			    ddi_get16(src_handle, w_src));
7415 			w_dst += dest_advcnt;
7416 			w_src += src_advcnt;
7417 		}
7418 		break;
7419 	case DDI_DATA_SZ04_ACC:
7420 		l_src = (uint32_t *)src_addr;
7421 		l_dst = (uint32_t *)dest_addr;
7422 
7423 		for (; bytecount != 0; bytecount -= 4) {
7424 			ddi_put32(dest_handle, l_dst,
7425 			    ddi_get32(src_handle, l_src));
7426 			l_dst += dest_advcnt;
7427 			l_src += src_advcnt;
7428 		}
7429 		break;
7430 	case DDI_DATA_SZ08_ACC:
7431 		ll_src = (uint64_t *)src_addr;
7432 		ll_dst = (uint64_t *)dest_addr;
7433 
7434 		for (; bytecount != 0; bytecount -= 8) {
7435 			ddi_put64(dest_handle, ll_dst,
7436 			    ddi_get64(src_handle, ll_src));
7437 			ll_dst += dest_advcnt;
7438 			ll_src += src_advcnt;
7439 		}
7440 		break;
7441 	default:
7442 		return (DDI_FAILURE);
7443 	}
7444 	return (DDI_SUCCESS);
7445 }
7446 
7447 #define	swap16(value)  \
7448 	((((value) & 0xff) << 8) | ((value) >> 8))
7449 
7450 #define	swap32(value)	\
7451 	(((uint32_t)swap16((uint16_t)((value) & 0xffff)) << 16) | \
7452 	(uint32_t)swap16((uint16_t)((value) >> 16)))
7453 
7454 #define	swap64(value)	\
7455 	(((uint64_t)swap32((uint32_t)((value) & 0xffffffff)) \
7456 	    << 32) | \
7457 	(uint64_t)swap32((uint32_t)((value) >> 32)))
7458 
7459 uint16_t
7460 ddi_swap16(uint16_t value)
7461 {
7462 	return (swap16(value));
7463 }
7464 
7465 uint32_t
7466 ddi_swap32(uint32_t value)
7467 {
7468 	return (swap32(value));
7469 }
7470 
7471 uint64_t
7472 ddi_swap64(uint64_t value)
7473 {
7474 	return (swap64(value));
7475 }
7476 
7477 /*
7478  * Convert a binding name to a driver name.
7479  * A binding name is the name used to determine the driver for a
7480  * device - it may be either an alias for the driver or the name
7481  * of the driver itself.
7482  */
7483 char *
7484 i_binding_to_drv_name(char *bname)
7485 {
7486 	major_t major_no;
7487 
7488 	ASSERT(bname != NULL);
7489 
7490 	if ((major_no = ddi_name_to_major(bname)) == -1)
7491 		return (NULL);
7492 	return (ddi_major_to_name(major_no));
7493 }
7494 
7495 /*
7496  * Search for minor name that has specified dev_t and spec_type.
7497  * If spec_type is zero then any dev_t match works.  Since we
7498  * are returning a pointer to the minor name string, we require the
7499  * caller to do the locking.
7500  */
7501 char *
7502 i_ddi_devtspectype_to_minorname(dev_info_t *dip, dev_t dev, int spec_type)
7503 {
7504 	struct ddi_minor_data	*dmdp;
7505 
7506 	/*
7507 	 * The did layered driver currently intentionally returns a
7508 	 * devinfo ptr for an underlying sd instance based on a did
7509 	 * dev_t. In this case it is not an error.
7510 	 *
7511 	 * The did layered driver is associated with Sun Cluster.
7512 	 */
7513 	ASSERT((ddi_driver_major(dip) == getmajor(dev)) ||
7514 	    (strcmp(ddi_major_to_name(getmajor(dev)), "did") == 0));
7515 
7516 	ASSERT(DEVI_BUSY_OWNED(dip));
7517 	for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) {
7518 		if (((dmdp->type == DDM_MINOR) ||
7519 		    (dmdp->type == DDM_INTERNAL_PATH) ||
7520 		    (dmdp->type == DDM_DEFAULT)) &&
7521 		    (dmdp->ddm_dev == dev) &&
7522 		    ((((spec_type & (S_IFCHR|S_IFBLK))) == 0) ||
7523 		    (dmdp->ddm_spec_type == spec_type)))
7524 			return (dmdp->ddm_name);
7525 	}
7526 
7527 	return (NULL);
7528 }
7529 
7530 /*
7531  * Find the devt and spectype of the specified minor_name.
7532  * Return DDI_FAILURE if minor_name not found. Since we are
7533  * returning everything via arguments we can do the locking.
7534  */
7535 int
7536 i_ddi_minorname_to_devtspectype(dev_info_t *dip, char *minor_name,
7537     dev_t *devtp, int *spectypep)
7538 {
7539 	int			circ;
7540 	struct ddi_minor_data	*dmdp;
7541 
7542 	/* deal with clone minor nodes */
7543 	if (dip == clone_dip) {
7544 		major_t	major;
7545 		/*
7546 		 * Make sure minor_name is a STREAMS driver.
7547 		 * We load the driver but don't attach to any instances.
7548 		 */
7549 
7550 		major = ddi_name_to_major(minor_name);
7551 		if (major == DDI_MAJOR_T_NONE)
7552 			return (DDI_FAILURE);
7553 
7554 		if (ddi_hold_driver(major) == NULL)
7555 			return (DDI_FAILURE);
7556 
7557 		if (STREAMSTAB(major) == NULL) {
7558 			ddi_rele_driver(major);
7559 			return (DDI_FAILURE);
7560 		}
7561 		ddi_rele_driver(major);
7562 
7563 		if (devtp)
7564 			*devtp = makedevice(clone_major, (minor_t)major);
7565 
7566 		if (spectypep)
7567 			*spectypep = S_IFCHR;
7568 
7569 		return (DDI_SUCCESS);
7570 	}
7571 
7572 	ndi_devi_enter(dip, &circ);
7573 	for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) {
7574 		if (((dmdp->type != DDM_MINOR) &&
7575 		    (dmdp->type != DDM_INTERNAL_PATH) &&
7576 		    (dmdp->type != DDM_DEFAULT)) ||
7577 		    strcmp(minor_name, dmdp->ddm_name))
7578 			continue;
7579 
7580 		if (devtp)
7581 			*devtp = dmdp->ddm_dev;
7582 
7583 		if (spectypep)
7584 			*spectypep = dmdp->ddm_spec_type;
7585 
7586 		ndi_devi_exit(dip, circ);
7587 		return (DDI_SUCCESS);
7588 	}
7589 	ndi_devi_exit(dip, circ);
7590 
7591 	return (DDI_FAILURE);
7592 }
7593 
7594 static kmutex_t devid_gen_mutex;
7595 static short	devid_gen_number;
7596 
7597 #ifdef DEBUG
7598 
7599 static int	devid_register_corrupt = 0;
7600 static int	devid_register_corrupt_major = 0;
7601 static int	devid_register_corrupt_hint = 0;
7602 static int	devid_register_corrupt_hint_major = 0;
7603 
7604 static int devid_lyr_debug = 0;
7605 
7606 #define	DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs)		\
7607 	if (devid_lyr_debug)					\
7608 		ddi_debug_devid_devts(msg, ndevs, devs)
7609 
7610 #else
7611 
7612 #define	DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs)
7613 
7614 #endif /* DEBUG */
7615 
7616 
7617 #ifdef	DEBUG
7618 
7619 static void
7620 ddi_debug_devid_devts(char *msg, int ndevs, dev_t *devs)
7621 {
7622 	int i;
7623 
7624 	cmn_err(CE_CONT, "%s:\n", msg);
7625 	for (i = 0; i < ndevs; i++) {
7626 		cmn_err(CE_CONT, "    0x%lx\n", devs[i]);
7627 	}
7628 }
7629 
7630 static void
7631 ddi_debug_devid_paths(char *msg, int npaths, char **paths)
7632 {
7633 	int i;
7634 
7635 	cmn_err(CE_CONT, "%s:\n", msg);
7636 	for (i = 0; i < npaths; i++) {
7637 		cmn_err(CE_CONT, "    %s\n", paths[i]);
7638 	}
7639 }
7640 
7641 static void
7642 ddi_debug_devid_devts_per_path(char *path, int ndevs, dev_t *devs)
7643 {
7644 	int i;
7645 
7646 	cmn_err(CE_CONT, "dev_ts per path %s\n", path);
7647 	for (i = 0; i < ndevs; i++) {
7648 		cmn_err(CE_CONT, "    0x%lx\n", devs[i]);
7649 	}
7650 }
7651 
7652 #endif	/* DEBUG */
7653 
7654 /*
7655  * Register device id into DDI framework.
7656  * Must be called when the driver is bound.
7657  */
7658 static int
7659 i_ddi_devid_register(dev_info_t *dip, ddi_devid_t devid)
7660 {
7661 	impl_devid_t	*i_devid = (impl_devid_t *)devid;
7662 	size_t		driver_len;
7663 	const char	*driver_name;
7664 	char		*devid_str;
7665 	major_t		major;
7666 
7667 	if ((dip == NULL) ||
7668 	    ((major = ddi_driver_major(dip)) == DDI_MAJOR_T_NONE))
7669 		return (DDI_FAILURE);
7670 
7671 	/* verify that the devid is valid */
7672 	if (ddi_devid_valid(devid) != DDI_SUCCESS)
7673 		return (DDI_FAILURE);
7674 
7675 	/* Updating driver name hint in devid */
7676 	driver_name = ddi_driver_name(dip);
7677 	driver_len = strlen(driver_name);
7678 	if (driver_len > DEVID_HINT_SIZE) {
7679 		/* Pick up last four characters of driver name */
7680 		driver_name += driver_len - DEVID_HINT_SIZE;
7681 		driver_len = DEVID_HINT_SIZE;
7682 	}
7683 	bzero(i_devid->did_driver, DEVID_HINT_SIZE);
7684 	bcopy(driver_name, i_devid->did_driver, driver_len);
7685 
7686 #ifdef DEBUG
7687 	/* Corrupt the devid for testing. */
7688 	if (devid_register_corrupt)
7689 		i_devid->did_id[0] += devid_register_corrupt;
7690 	if (devid_register_corrupt_major &&
7691 	    (major == devid_register_corrupt_major))
7692 		i_devid->did_id[0] += 1;
7693 	if (devid_register_corrupt_hint)
7694 		i_devid->did_driver[0] += devid_register_corrupt_hint;
7695 	if (devid_register_corrupt_hint_major &&
7696 	    (major == devid_register_corrupt_hint_major))
7697 		i_devid->did_driver[0] += 1;
7698 #endif /* DEBUG */
7699 
7700 	/* encode the devid as a string */
7701 	if ((devid_str = ddi_devid_str_encode(devid, NULL)) == NULL)
7702 		return (DDI_FAILURE);
7703 
7704 	/* add string as a string property */
7705 	if (ndi_prop_update_string(DDI_DEV_T_NONE, dip,
7706 	    DEVID_PROP_NAME, devid_str) != DDI_SUCCESS) {
7707 		cmn_err(CE_WARN, "%s%d: devid property update failed",
7708 		    ddi_driver_name(dip), ddi_get_instance(dip));
7709 		ddi_devid_str_free(devid_str);
7710 		return (DDI_FAILURE);
7711 	}
7712 
7713 	/* keep pointer to devid string for interrupt context fma code */
7714 	if (DEVI(dip)->devi_devid_str)
7715 		ddi_devid_str_free(DEVI(dip)->devi_devid_str);
7716 	DEVI(dip)->devi_devid_str = devid_str;
7717 	return (DDI_SUCCESS);
7718 }
7719 
7720 int
7721 ddi_devid_register(dev_info_t *dip, ddi_devid_t devid)
7722 {
7723 	int rval;
7724 
7725 	rval = i_ddi_devid_register(dip, devid);
7726 	if (rval == DDI_SUCCESS) {
7727 		/*
7728 		 * Register devid in devid-to-path cache
7729 		 */
7730 		if (e_devid_cache_register(dip, devid) == DDI_SUCCESS) {
7731 			mutex_enter(&DEVI(dip)->devi_lock);
7732 			DEVI(dip)->devi_flags |= DEVI_CACHED_DEVID;
7733 			mutex_exit(&DEVI(dip)->devi_lock);
7734 		} else if (ddi_get_name_addr(dip)) {
7735 			/*
7736 			 * We only expect cache_register DDI_FAILURE when we
7737 			 * can't form the full path because of NULL devi_addr.
7738 			 */
7739 			cmn_err(CE_WARN, "%s%d: failed to cache devid",
7740 			    ddi_driver_name(dip), ddi_get_instance(dip));
7741 		}
7742 	} else {
7743 		cmn_err(CE_WARN, "%s%d: failed to register devid",
7744 		    ddi_driver_name(dip), ddi_get_instance(dip));
7745 	}
7746 	return (rval);
7747 }
7748 
7749 /*
7750  * Remove (unregister) device id from DDI framework.
7751  * Must be called when device is detached.
7752  */
7753 static void
7754 i_ddi_devid_unregister(dev_info_t *dip)
7755 {
7756 	if (DEVI(dip)->devi_devid_str) {
7757 		ddi_devid_str_free(DEVI(dip)->devi_devid_str);
7758 		DEVI(dip)->devi_devid_str = NULL;
7759 	}
7760 
7761 	/* remove the devid property */
7762 	(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, DEVID_PROP_NAME);
7763 }
7764 
7765 void
7766 ddi_devid_unregister(dev_info_t *dip)
7767 {
7768 	mutex_enter(&DEVI(dip)->devi_lock);
7769 	DEVI(dip)->devi_flags &= ~DEVI_CACHED_DEVID;
7770 	mutex_exit(&DEVI(dip)->devi_lock);
7771 	e_devid_cache_unregister(dip);
7772 	i_ddi_devid_unregister(dip);
7773 }
7774 
7775 /*
7776  * Allocate and initialize a device id.
7777  */
7778 int
7779 ddi_devid_init(
7780 	dev_info_t	*dip,
7781 	ushort_t	devid_type,
7782 	ushort_t	nbytes,
7783 	void		*id,
7784 	ddi_devid_t	*ret_devid)
7785 {
7786 	impl_devid_t	*i_devid;
7787 	int		sz = sizeof (*i_devid) + nbytes - sizeof (char);
7788 	int		driver_len;
7789 	const char	*driver_name;
7790 
7791 	switch (devid_type) {
7792 	case DEVID_SCSI3_WWN:
7793 		/*FALLTHRU*/
7794 	case DEVID_SCSI_SERIAL:
7795 		/*FALLTHRU*/
7796 	case DEVID_ATA_SERIAL:
7797 		/*FALLTHRU*/
7798 	case DEVID_NVME_NSID:
7799 		/*FALLTHRU*/
7800 	case DEVID_NVME_EUI64:
7801 		/*FALLTHRU*/
7802 	case DEVID_NVME_NGUID:
7803 		/*FALLTHRU*/
7804 	case DEVID_ENCAP:
7805 		if (nbytes == 0)
7806 			return (DDI_FAILURE);
7807 		if (id == NULL)
7808 			return (DDI_FAILURE);
7809 		break;
7810 	case DEVID_FAB:
7811 		if (nbytes != 0)
7812 			return (DDI_FAILURE);
7813 		if (id != NULL)
7814 			return (DDI_FAILURE);
7815 		nbytes = sizeof (int) +
7816 		    sizeof (struct timeval32) + sizeof (short);
7817 		sz += nbytes;
7818 		break;
7819 	default:
7820 		return (DDI_FAILURE);
7821 	}
7822 
7823 	if ((i_devid = kmem_zalloc(sz, KM_SLEEP)) == NULL)
7824 		return (DDI_FAILURE);
7825 
7826 	i_devid->did_magic_hi = DEVID_MAGIC_MSB;
7827 	i_devid->did_magic_lo = DEVID_MAGIC_LSB;
7828 	i_devid->did_rev_hi = DEVID_REV_MSB;
7829 	i_devid->did_rev_lo = DEVID_REV_LSB;
7830 	DEVID_FORMTYPE(i_devid, devid_type);
7831 	DEVID_FORMLEN(i_devid, nbytes);
7832 
7833 	/* Fill in driver name hint */
7834 	driver_name = ddi_driver_name(dip);
7835 	driver_len = strlen(driver_name);
7836 	if (driver_len > DEVID_HINT_SIZE) {
7837 		/* Pick up last four characters of driver name */
7838 		driver_name += driver_len - DEVID_HINT_SIZE;
7839 		driver_len = DEVID_HINT_SIZE;
7840 	}
7841 
7842 	bcopy(driver_name, i_devid->did_driver, driver_len);
7843 
7844 	/* Fill in id field */
7845 	if (devid_type == DEVID_FAB) {
7846 		char		*cp;
7847 		uint32_t	hostid;
7848 		struct timeval32 timestamp32;
7849 		int		i;
7850 		int		*ip;
7851 		short		gen;
7852 
7853 		/* increase the generation number */
7854 		mutex_enter(&devid_gen_mutex);
7855 		gen = devid_gen_number++;
7856 		mutex_exit(&devid_gen_mutex);
7857 
7858 		cp = i_devid->did_id;
7859 
7860 		/* Fill in host id (big-endian byte ordering) */
7861 		hostid = zone_get_hostid(NULL);
7862 		*cp++ = hibyte(hiword(hostid));
7863 		*cp++ = lobyte(hiword(hostid));
7864 		*cp++ = hibyte(loword(hostid));
7865 		*cp++ = lobyte(loword(hostid));
7866 
7867 		/*
7868 		 * Fill in timestamp (big-endian byte ordering)
7869 		 *
7870 		 * (Note that the format may have to be changed
7871 		 * before 2038 comes around, though it's arguably
7872 		 * unique enough as it is..)
7873 		 */
7874 		uniqtime32(&timestamp32);
7875 		ip = (int *)&timestamp32;
7876 		for (i = 0;
7877 		    i < sizeof (timestamp32) / sizeof (int); i++, ip++) {
7878 			int	val;
7879 			val = *ip;
7880 			*cp++ = hibyte(hiword(val));
7881 			*cp++ = lobyte(hiword(val));
7882 			*cp++ = hibyte(loword(val));
7883 			*cp++ = lobyte(loword(val));
7884 		}
7885 
7886 		/* fill in the generation number */
7887 		*cp++ = hibyte(gen);
7888 		*cp++ = lobyte(gen);
7889 	} else
7890 		bcopy(id, i_devid->did_id, nbytes);
7891 
7892 	/* return device id */
7893 	*ret_devid = (ddi_devid_t)i_devid;
7894 	return (DDI_SUCCESS);
7895 }
7896 
7897 int
7898 ddi_devid_get(dev_info_t *dip, ddi_devid_t *ret_devid)
7899 {
7900 	return (i_ddi_devi_get_devid(DDI_DEV_T_ANY, dip, ret_devid));
7901 }
7902 
7903 int
7904 i_ddi_devi_get_devid(dev_t dev, dev_info_t *dip, ddi_devid_t *ret_devid)
7905 {
7906 	char		*devidstr;
7907 
7908 	ASSERT(dev != DDI_DEV_T_NONE);
7909 
7910 	/* look up the property, devt specific first */
7911 	if (ddi_prop_lookup_string(dev, dip, DDI_PROP_DONTPASS,
7912 	    DEVID_PROP_NAME, &devidstr) != DDI_PROP_SUCCESS) {
7913 		if ((dev == DDI_DEV_T_ANY) ||
7914 		    (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
7915 		    DDI_PROP_DONTPASS, DEVID_PROP_NAME, &devidstr) !=
7916 		    DDI_PROP_SUCCESS)) {
7917 			return (DDI_FAILURE);
7918 		}
7919 	}
7920 
7921 	/* convert to binary form */
7922 	if (ddi_devid_str_decode(devidstr, ret_devid, NULL) == -1) {
7923 		ddi_prop_free(devidstr);
7924 		return (DDI_FAILURE);
7925 	}
7926 	ddi_prop_free(devidstr);
7927 	return (DDI_SUCCESS);
7928 }
7929 
7930 /*
7931  * Return a copy of the device id for dev_t
7932  */
7933 int
7934 ddi_lyr_get_devid(dev_t dev, ddi_devid_t *ret_devid)
7935 {
7936 	dev_info_t	*dip;
7937 	int		rval;
7938 
7939 	/* get the dip */
7940 	if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
7941 		return (DDI_FAILURE);
7942 
7943 	rval = i_ddi_devi_get_devid(dev, dip, ret_devid);
7944 
7945 	ddi_release_devi(dip);		/* e_ddi_hold_devi_by_dev() */
7946 	return (rval);
7947 }
7948 
7949 /*
7950  * Return a copy of the minor name for dev_t and spec_type
7951  */
7952 int
7953 ddi_lyr_get_minor_name(dev_t dev, int spec_type, char **minor_name)
7954 {
7955 	char		*buf;
7956 	int		circ;
7957 	dev_info_t	*dip;
7958 	char		*nm;
7959 	int		rval;
7960 
7961 	if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) {
7962 		*minor_name = NULL;
7963 		return (DDI_FAILURE);
7964 	}
7965 
7966 	/* Find the minor name and copy into max size buf */
7967 	buf = kmem_alloc(MAXNAMELEN, KM_SLEEP);
7968 	ndi_devi_enter(dip, &circ);
7969 	nm = i_ddi_devtspectype_to_minorname(dip, dev, spec_type);
7970 	if (nm)
7971 		(void) strcpy(buf, nm);
7972 	ndi_devi_exit(dip, circ);
7973 	ddi_release_devi(dip);	/* e_ddi_hold_devi_by_dev() */
7974 
7975 	if (nm) {
7976 		/* duplicate into min size buf for return result */
7977 		*minor_name = i_ddi_strdup(buf, KM_SLEEP);
7978 		rval = DDI_SUCCESS;
7979 	} else {
7980 		*minor_name = NULL;
7981 		rval = DDI_FAILURE;
7982 	}
7983 
7984 	/* free max size buf and return */
7985 	kmem_free(buf, MAXNAMELEN);
7986 	return (rval);
7987 }
7988 
7989 int
7990 ddi_lyr_devid_to_devlist(
7991 	ddi_devid_t	devid,
7992 	char		*minor_name,
7993 	int		*retndevs,
7994 	dev_t		**retdevs)
7995 {
7996 	ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS);
7997 
7998 	if (e_devid_cache_to_devt_list(devid, minor_name,
7999 	    retndevs, retdevs) == DDI_SUCCESS) {
8000 		ASSERT(*retndevs > 0);
8001 		DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist",
8002 		    *retndevs, *retdevs);
8003 		return (DDI_SUCCESS);
8004 	}
8005 
8006 	if (e_ddi_devid_discovery(devid) == DDI_FAILURE) {
8007 		return (DDI_FAILURE);
8008 	}
8009 
8010 	if (e_devid_cache_to_devt_list(devid, minor_name,
8011 	    retndevs, retdevs) == DDI_SUCCESS) {
8012 		ASSERT(*retndevs > 0);
8013 		DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist",
8014 		    *retndevs, *retdevs);
8015 		return (DDI_SUCCESS);
8016 	}
8017 
8018 	return (DDI_FAILURE);
8019 }
8020 
8021 void
8022 ddi_lyr_free_devlist(dev_t *devlist, int ndevs)
8023 {
8024 	kmem_free(devlist, sizeof (dev_t) * ndevs);
8025 }
8026 
8027 /*
8028  * Note: This will need to be fixed if we ever allow processes to
8029  * have more than one data model per exec.
8030  */
8031 model_t
8032 ddi_mmap_get_model(void)
8033 {
8034 	return (get_udatamodel());
8035 }
8036 
8037 model_t
8038 ddi_model_convert_from(model_t model)
8039 {
8040 	return ((model & DDI_MODEL_MASK) & ~DDI_MODEL_NATIVE);
8041 }
8042 
8043 /*
8044  * ddi interfaces managing storage and retrieval of eventcookies.
8045  */
8046 
8047 /*
8048  * Invoke bus nexus driver's implementation of the
8049  * (*bus_remove_eventcall)() interface to remove a registered
8050  * callback handler for "event".
8051  */
8052 int
8053 ddi_remove_event_handler(ddi_callback_id_t id)
8054 {
8055 	ndi_event_callbacks_t *cb = (ndi_event_callbacks_t *)id;
8056 	dev_info_t *ddip;
8057 
8058 	ASSERT(cb);
8059 	if (!cb) {
8060 		return (DDI_FAILURE);
8061 	}
8062 
8063 	ddip = NDI_EVENT_DDIP(cb->ndi_evtcb_cookie);
8064 	return (ndi_busop_remove_eventcall(ddip, id));
8065 }
8066 
8067 /*
8068  * Invoke bus nexus driver's implementation of the
8069  * (*bus_add_eventcall)() interface to register a callback handler
8070  * for "event".
8071  */
8072 int
8073 ddi_add_event_handler(dev_info_t *dip, ddi_eventcookie_t event,
8074     void (*handler)(dev_info_t *, ddi_eventcookie_t, void *, void *),
8075     void *arg, ddi_callback_id_t *id)
8076 {
8077 	return (ndi_busop_add_eventcall(dip, dip, event, handler, arg, id));
8078 }
8079 
8080 
8081 /*
8082  * Return a handle for event "name" by calling up the device tree
8083  * hierarchy via  (*bus_get_eventcookie)() interface until claimed
8084  * by a bus nexus or top of dev_info tree is reached.
8085  */
8086 int
8087 ddi_get_eventcookie(dev_info_t *dip, char *name,
8088     ddi_eventcookie_t *event_cookiep)
8089 {
8090 	return (ndi_busop_get_eventcookie(dip, dip,
8091 	    name, event_cookiep));
8092 }
8093 
8094 /*
8095  * This procedure is provided as the general callback function when
8096  * umem_lockmemory calls as_add_callback for long term memory locking.
8097  * When as_unmap, as_setprot, or as_free encounter segments which have
8098  * locked memory, this callback will be invoked.
8099  */
8100 void
8101 umem_lock_undo(struct as *as, void *arg, uint_t event)
8102 {
8103 	_NOTE(ARGUNUSED(as, event))
8104 	struct ddi_umem_cookie *cp = (struct ddi_umem_cookie *)arg;
8105 
8106 	/*
8107 	 * Call the cleanup function.  Decrement the cookie reference
8108 	 * count, if it goes to zero, return the memory for the cookie.
8109 	 * The i_ddi_umem_unlock for this cookie may or may not have been
8110 	 * called already.  It is the responsibility of the caller of
8111 	 * umem_lockmemory to handle the case of the cleanup routine
8112 	 * being called after a ddi_umem_unlock for the cookie
8113 	 * was called.
8114 	 */
8115 
8116 	(*cp->callbacks.cbo_umem_lock_cleanup)((ddi_umem_cookie_t)cp);
8117 
8118 	/* remove the cookie if reference goes to zero */
8119 	if (atomic_dec_ulong_nv((ulong_t *)(&(cp->cook_refcnt))) == 0) {
8120 		kmem_free(cp, sizeof (struct ddi_umem_cookie));
8121 	}
8122 }
8123 
8124 /*
8125  * The following two Consolidation Private routines provide generic
8126  * interfaces to increase/decrease the amount of device-locked memory.
8127  *
8128  * To keep project_rele and project_hold consistent, i_ddi_decr_locked_memory()
8129  * must be called every time i_ddi_incr_locked_memory() is called.
8130  */
8131 int
8132 /* ARGSUSED */
8133 i_ddi_incr_locked_memory(proc_t *procp, rctl_qty_t inc)
8134 {
8135 	ASSERT(procp != NULL);
8136 	mutex_enter(&procp->p_lock);
8137 	if (rctl_incr_locked_mem(procp, NULL, inc, 1)) {
8138 		mutex_exit(&procp->p_lock);
8139 		return (ENOMEM);
8140 	}
8141 	mutex_exit(&procp->p_lock);
8142 	return (0);
8143 }
8144 
8145 /*
8146  * To keep project_rele and project_hold consistent, i_ddi_incr_locked_memory()
8147  * must be called every time i_ddi_decr_locked_memory() is called.
8148  */
8149 /* ARGSUSED */
8150 void
8151 i_ddi_decr_locked_memory(proc_t *procp, rctl_qty_t dec)
8152 {
8153 	ASSERT(procp != NULL);
8154 	mutex_enter(&procp->p_lock);
8155 	rctl_decr_locked_mem(procp, NULL, dec, 1);
8156 	mutex_exit(&procp->p_lock);
8157 }
8158 
8159 /*
8160  * The cookie->upd_max_lock_rctl flag is used to determine if we should
8161  * charge device locked memory to the max-locked-memory rctl.  Tracking
8162  * device locked memory causes the rctl locks to get hot under high-speed
8163  * I/O such as RDSv3 over IB.  If there is no max-locked-memory rctl limit,
8164  * we bypass charging the locked memory to the rctl altogether.  The cookie's
8165  * flag tells us if the rctl value should be updated when unlocking the memory,
8166  * in case the rctl gets changed after the memory was locked.  Any device
8167  * locked memory in that rare case will not be counted toward the rctl limit.
8168  *
8169  * When tracking the locked memory, the kproject_t parameter is always NULL
8170  * in the code paths:
8171  *	i_ddi_incr_locked_memory -> rctl_incr_locked_mem
8172  *	i_ddi_decr_locked_memory -> rctl_decr_locked_mem
8173  * Thus, we always use the tk_proj member to check the projp setting.
8174  */
8175 static void
8176 init_lockedmem_rctl_flag(struct ddi_umem_cookie *cookie)
8177 {
8178 	proc_t		*p;
8179 	kproject_t	*projp;
8180 	zone_t		*zonep;
8181 
8182 	ASSERT(cookie);
8183 	p = cookie->procp;
8184 	ASSERT(p);
8185 
8186 	zonep = p->p_zone;
8187 	projp = p->p_task->tk_proj;
8188 
8189 	ASSERT(zonep);
8190 	ASSERT(projp);
8191 
8192 	if (zonep->zone_locked_mem_ctl == UINT64_MAX &&
8193 	    projp->kpj_data.kpd_locked_mem_ctl == UINT64_MAX)
8194 		cookie->upd_max_lock_rctl = 0;
8195 	else
8196 		cookie->upd_max_lock_rctl = 1;
8197 }
8198 
8199 /*
8200  * This routine checks if the max-locked-memory resource ctl is
8201  * exceeded, if not increments it, grabs a hold on the project.
8202  * Returns 0 if successful otherwise returns error code
8203  */
8204 static int
8205 umem_incr_devlockmem(struct ddi_umem_cookie *cookie)
8206 {
8207 	proc_t		*procp;
8208 	int		ret;
8209 
8210 	ASSERT(cookie);
8211 	if (cookie->upd_max_lock_rctl == 0)
8212 		return (0);
8213 
8214 	procp = cookie->procp;
8215 	ASSERT(procp);
8216 
8217 	if ((ret = i_ddi_incr_locked_memory(procp,
8218 	    cookie->size)) != 0) {
8219 		return (ret);
8220 	}
8221 	return (0);
8222 }
8223 
8224 /*
8225  * Decrements the max-locked-memory resource ctl and releases
8226  * the hold on the project that was acquired during umem_incr_devlockmem
8227  */
8228 static void
8229 umem_decr_devlockmem(struct ddi_umem_cookie *cookie)
8230 {
8231 	proc_t		*proc;
8232 
8233 	if (cookie->upd_max_lock_rctl == 0)
8234 		return;
8235 
8236 	proc = (proc_t *)cookie->procp;
8237 	if (!proc)
8238 		return;
8239 
8240 	i_ddi_decr_locked_memory(proc, cookie->size);
8241 }
8242 
8243 /*
8244  * A consolidation private function which is essentially equivalent to
8245  * ddi_umem_lock but with the addition of arguments ops_vector and procp.
8246  * A call to as_add_callback is done if DDI_UMEMLOCK_LONGTERM is set, and
8247  * the ops_vector is valid.
8248  *
8249  * Lock the virtual address range in the current process and create a
8250  * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to
8251  * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export
8252  * to user space.
8253  *
8254  * Note: The resource control accounting currently uses a full charge model
8255  * in other words attempts to lock the same/overlapping areas of memory
8256  * will deduct the full size of the buffer from the projects running
8257  * counter for the device locked memory.
8258  *
8259  * addr, size should be PAGESIZE aligned
8260  *
8261  * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both
8262  *	identifies whether the locked memory will be read or written or both
8263  *      DDI_UMEMLOCK_LONGTERM  must be set when the locking will
8264  * be maintained for an indefinitely long period (essentially permanent),
8265  * rather than for what would be required for a typical I/O completion.
8266  * When DDI_UMEMLOCK_LONGTERM is set, umem_lockmemory will return EFAULT
8267  * if the memory pertains to a regular file which is mapped MAP_SHARED.
8268  * This is to prevent a deadlock if a file truncation is attempted after
8269  * after the locking is done.
8270  *
8271  * Returns 0 on success
8272  *	EINVAL - for invalid parameters
8273  *	EPERM, ENOMEM and other error codes returned by as_pagelock
8274  *	ENOMEM - is returned if the current request to lock memory exceeds
8275  *		*.max-locked-memory resource control value.
8276  *      EFAULT - memory pertains to a regular file mapped shared and
8277  *		and DDI_UMEMLOCK_LONGTERM flag is set
8278  *	EAGAIN - could not start the ddi_umem_unlock list processing thread
8279  */
8280 int
8281 umem_lockmemory(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie,
8282     struct umem_callback_ops *ops_vector,
8283     proc_t *procp)
8284 {
8285 	int	error;
8286 	struct ddi_umem_cookie *p;
8287 	void	(*driver_callback)() = NULL;
8288 	struct as *as;
8289 	struct seg		*seg;
8290 	vnode_t			*vp;
8291 
8292 	/* Allow device drivers to not have to reference "curproc" */
8293 	if (procp == NULL)
8294 		procp = curproc;
8295 	as = procp->p_as;
8296 	*cookie = NULL;		/* in case of any error return */
8297 
8298 	/* These are the only three valid flags */
8299 	if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE |
8300 	    DDI_UMEMLOCK_LONGTERM)) != 0)
8301 		return (EINVAL);
8302 
8303 	/* At least one (can be both) of the two access flags must be set */
8304 	if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0)
8305 		return (EINVAL);
8306 
8307 	/* addr and len must be page-aligned */
8308 	if (((uintptr_t)addr & PAGEOFFSET) != 0)
8309 		return (EINVAL);
8310 
8311 	if ((len & PAGEOFFSET) != 0)
8312 		return (EINVAL);
8313 
8314 	/*
8315 	 * For longterm locking a driver callback must be specified; if
8316 	 * not longterm then a callback is optional.
8317 	 */
8318 	if (ops_vector != NULL) {
8319 		if (ops_vector->cbo_umem_callback_version !=
8320 		    UMEM_CALLBACK_VERSION)
8321 			return (EINVAL);
8322 		else
8323 			driver_callback = ops_vector->cbo_umem_lock_cleanup;
8324 	}
8325 	if ((driver_callback == NULL) && (flags & DDI_UMEMLOCK_LONGTERM))
8326 		return (EINVAL);
8327 
8328 	/*
8329 	 * Call i_ddi_umem_unlock_thread_start if necessary.  It will
8330 	 * be called on first ddi_umem_lock or umem_lockmemory call.
8331 	 */
8332 	if (ddi_umem_unlock_thread == NULL)
8333 		i_ddi_umem_unlock_thread_start();
8334 
8335 	/* Allocate memory for the cookie */
8336 	p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP);
8337 
8338 	/* Convert the flags to seg_rw type */
8339 	if (flags & DDI_UMEMLOCK_WRITE) {
8340 		p->s_flags = S_WRITE;
8341 	} else {
8342 		p->s_flags = S_READ;
8343 	}
8344 
8345 	/* Store procp in cookie for later iosetup/unlock */
8346 	p->procp = (void *)procp;
8347 
8348 	/*
8349 	 * Store the struct as pointer in cookie for later use by
8350 	 * ddi_umem_unlock.  The proc->p_as will be stale if ddi_umem_unlock
8351 	 * is called after relvm is called.
8352 	 */
8353 	p->asp = as;
8354 
8355 	/*
8356 	 * The size field is needed for lockmem accounting.
8357 	 */
8358 	p->size = len;
8359 	init_lockedmem_rctl_flag(p);
8360 
8361 	if (umem_incr_devlockmem(p) != 0) {
8362 		/*
8363 		 * The requested memory cannot be locked
8364 		 */
8365 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8366 		*cookie = (ddi_umem_cookie_t)NULL;
8367 		return (ENOMEM);
8368 	}
8369 
8370 	/* Lock the pages corresponding to addr, len in memory */
8371 	error = as_pagelock(as, &(p->pparray), addr, len, p->s_flags);
8372 	if (error != 0) {
8373 		umem_decr_devlockmem(p);
8374 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8375 		*cookie = (ddi_umem_cookie_t)NULL;
8376 		return (error);
8377 	}
8378 
8379 	/*
8380 	 * For longterm locking the addr must pertain to a seg_vn segment or
8381 	 * or a seg_spt segment.
8382 	 * If the segment pertains to a regular file, it cannot be
8383 	 * mapped MAP_SHARED.
8384 	 * This is to prevent a deadlock if a file truncation is attempted
8385 	 * after the locking is done.
8386 	 * Doing this after as_pagelock guarantees persistence of the as; if
8387 	 * an unacceptable segment is found, the cleanup includes calling
8388 	 * as_pageunlock before returning EFAULT.
8389 	 *
8390 	 * segdev is allowed here as it is already locked.  This allows
8391 	 * for memory exported by drivers through mmap() (which is already
8392 	 * locked) to be allowed for LONGTERM.
8393 	 */
8394 	if (flags & DDI_UMEMLOCK_LONGTERM) {
8395 		extern  struct seg_ops segspt_shmops;
8396 		extern	struct seg_ops segdev_ops;
8397 		AS_LOCK_ENTER(as, RW_READER);
8398 		for (seg = as_segat(as, addr); ; seg = AS_SEGNEXT(as, seg)) {
8399 			if (seg == NULL || seg->s_base > addr + len)
8400 				break;
8401 			if (seg->s_ops == &segdev_ops)
8402 				continue;
8403 			if (((seg->s_ops != &segvn_ops) &&
8404 			    (seg->s_ops != &segspt_shmops)) ||
8405 			    ((SEGOP_GETVP(seg, addr, &vp) == 0 &&
8406 			    vp != NULL && vp->v_type == VREG) &&
8407 			    (SEGOP_GETTYPE(seg, addr) & MAP_SHARED))) {
8408 				as_pageunlock(as, p->pparray,
8409 				    addr, len, p->s_flags);
8410 				AS_LOCK_EXIT(as);
8411 				umem_decr_devlockmem(p);
8412 				kmem_free(p, sizeof (struct ddi_umem_cookie));
8413 				*cookie = (ddi_umem_cookie_t)NULL;
8414 				return (EFAULT);
8415 			}
8416 		}
8417 		AS_LOCK_EXIT(as);
8418 	}
8419 
8420 
8421 	/* Initialize the fields in the ddi_umem_cookie */
8422 	p->cvaddr = addr;
8423 	p->type = UMEM_LOCKED;
8424 	if (driver_callback != NULL) {
8425 		/* i_ddi_umem_unlock and umem_lock_undo may need the cookie */
8426 		p->cook_refcnt = 2;
8427 		p->callbacks = *ops_vector;
8428 	} else {
8429 		/* only i_ddi_umme_unlock needs the cookie */
8430 		p->cook_refcnt = 1;
8431 	}
8432 
8433 	*cookie = (ddi_umem_cookie_t)p;
8434 
8435 	/*
8436 	 * If a driver callback was specified, add an entry to the
8437 	 * as struct callback list. The as_pagelock above guarantees
8438 	 * the persistence of as.
8439 	 */
8440 	if (driver_callback) {
8441 		error = as_add_callback(as, umem_lock_undo, p, AS_ALL_EVENT,
8442 		    addr, len, KM_SLEEP);
8443 		if (error != 0) {
8444 			as_pageunlock(as, p->pparray,
8445 			    addr, len, p->s_flags);
8446 			umem_decr_devlockmem(p);
8447 			kmem_free(p, sizeof (struct ddi_umem_cookie));
8448 			*cookie = (ddi_umem_cookie_t)NULL;
8449 		}
8450 	}
8451 	return (error);
8452 }
8453 
8454 /*
8455  * Unlock the pages locked by ddi_umem_lock or umem_lockmemory and free
8456  * the cookie.  Called from i_ddi_umem_unlock_thread.
8457  */
8458 
8459 static void
8460 i_ddi_umem_unlock(struct ddi_umem_cookie *p)
8461 {
8462 	uint_t	rc;
8463 
8464 	/*
8465 	 * There is no way to determine whether a callback to
8466 	 * umem_lock_undo was registered via as_add_callback.
8467 	 * (i.e. umem_lockmemory was called with DDI_MEMLOCK_LONGTERM and
8468 	 * a valid callback function structure.)  as_delete_callback
8469 	 * is called to delete a possible registered callback.  If the
8470 	 * return from as_delete_callbacks is AS_CALLBACK_DELETED, it
8471 	 * indicates that there was a callback registered, and that is was
8472 	 * successfully deleted.  Thus, the cookie reference count
8473 	 * will never be decremented by umem_lock_undo.  Just return the
8474 	 * memory for the cookie, since both users of the cookie are done.
8475 	 * A return of AS_CALLBACK_NOTFOUND indicates a callback was
8476 	 * never registered.  A return of AS_CALLBACK_DELETE_DEFERRED
8477 	 * indicates that callback processing is taking place and, and
8478 	 * umem_lock_undo is, or will be, executing, and thus decrementing
8479 	 * the cookie reference count when it is complete.
8480 	 *
8481 	 * This needs to be done before as_pageunlock so that the
8482 	 * persistence of as is guaranteed because of the locked pages.
8483 	 *
8484 	 */
8485 	rc = as_delete_callback(p->asp, p);
8486 
8487 
8488 	/*
8489 	 * The proc->p_as will be stale if i_ddi_umem_unlock is called
8490 	 * after relvm is called so use p->asp.
8491 	 */
8492 	as_pageunlock(p->asp, p->pparray, p->cvaddr, p->size, p->s_flags);
8493 
8494 	/*
8495 	 * Now that we have unlocked the memory decrement the
8496 	 * *.max-locked-memory rctl
8497 	 */
8498 	umem_decr_devlockmem(p);
8499 
8500 	if (rc == AS_CALLBACK_DELETED) {
8501 		/* umem_lock_undo will not happen, return the cookie memory */
8502 		ASSERT(p->cook_refcnt == 2);
8503 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8504 	} else {
8505 		/*
8506 		 * umem_undo_lock may happen if as_delete_callback returned
8507 		 * AS_CALLBACK_DELETE_DEFERRED.  In that case, decrement the
8508 		 * reference count, atomically, and return the cookie
8509 		 * memory if the reference count goes to zero.  The only
8510 		 * other value for rc is AS_CALLBACK_NOTFOUND.  In that
8511 		 * case, just return the cookie memory.
8512 		 */
8513 		if ((rc != AS_CALLBACK_DELETE_DEFERRED) ||
8514 		    (atomic_dec_ulong_nv((ulong_t *)(&(p->cook_refcnt)))
8515 		    == 0)) {
8516 			kmem_free(p, sizeof (struct ddi_umem_cookie));
8517 		}
8518 	}
8519 }
8520 
8521 /*
8522  * i_ddi_umem_unlock_thread - deferred ddi_umem_unlock list handler.
8523  *
8524  * Call i_ddi_umem_unlock for entries in the ddi_umem_unlock list
8525  * until it is empty.  Then, wait for more to be added.  This thread is awoken
8526  * via calls to ddi_umem_unlock.
8527  */
8528 
8529 static void
8530 i_ddi_umem_unlock_thread(void)
8531 {
8532 	struct ddi_umem_cookie	*ret_cookie;
8533 	callb_cpr_t	cprinfo;
8534 
8535 	/* process the ddi_umem_unlock list */
8536 	CALLB_CPR_INIT(&cprinfo, &ddi_umem_unlock_mutex,
8537 	    callb_generic_cpr, "unlock_thread");
8538 	for (;;) {
8539 		mutex_enter(&ddi_umem_unlock_mutex);
8540 		if (ddi_umem_unlock_head != NULL) {	/* list not empty */
8541 			ret_cookie = ddi_umem_unlock_head;
8542 			/* take if off the list */
8543 			if ((ddi_umem_unlock_head =
8544 			    ddi_umem_unlock_head->unl_forw) == NULL) {
8545 				ddi_umem_unlock_tail = NULL;
8546 			}
8547 			mutex_exit(&ddi_umem_unlock_mutex);
8548 			/* unlock the pages in this cookie */
8549 			(void) i_ddi_umem_unlock(ret_cookie);
8550 		} else {   /* list is empty, wait for next ddi_umem_unlock */
8551 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
8552 			cv_wait(&ddi_umem_unlock_cv, &ddi_umem_unlock_mutex);
8553 			CALLB_CPR_SAFE_END(&cprinfo, &ddi_umem_unlock_mutex);
8554 			mutex_exit(&ddi_umem_unlock_mutex);
8555 		}
8556 	}
8557 	/* ddi_umem_unlock_thread does not exit */
8558 	/* NOTREACHED */
8559 }
8560 
8561 /*
8562  * Start the thread that will process the ddi_umem_unlock list if it is
8563  * not already started (i_ddi_umem_unlock_thread).
8564  */
8565 static void
8566 i_ddi_umem_unlock_thread_start(void)
8567 {
8568 	mutex_enter(&ddi_umem_unlock_mutex);
8569 	if (ddi_umem_unlock_thread == NULL) {
8570 		ddi_umem_unlock_thread = thread_create(NULL, 0,
8571 		    i_ddi_umem_unlock_thread, NULL, 0, &p0,
8572 		    TS_RUN, minclsyspri);
8573 	}
8574 	mutex_exit(&ddi_umem_unlock_mutex);
8575 }
8576 
8577 /*
8578  * Lock the virtual address range in the current process and create a
8579  * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to
8580  * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export
8581  * to user space.
8582  *
8583  * Note: The resource control accounting currently uses a full charge model
8584  * in other words attempts to lock the same/overlapping areas of memory
8585  * will deduct the full size of the buffer from the projects running
8586  * counter for the device locked memory. This applies to umem_lockmemory too.
8587  *
8588  * addr, size should be PAGESIZE aligned
8589  * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both
8590  *	identifies whether the locked memory will be read or written or both
8591  *
8592  * Returns 0 on success
8593  *	EINVAL - for invalid parameters
8594  *	EPERM, ENOMEM and other error codes returned by as_pagelock
8595  *	ENOMEM - is returned if the current request to lock memory exceeds
8596  *		*.max-locked-memory resource control value.
8597  *	EAGAIN - could not start the ddi_umem_unlock list processing thread
8598  */
8599 int
8600 ddi_umem_lock(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie)
8601 {
8602 	int	error;
8603 	struct ddi_umem_cookie *p;
8604 
8605 	*cookie = NULL;		/* in case of any error return */
8606 
8607 	/* These are the only two valid flags */
8608 	if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) != 0) {
8609 		return (EINVAL);
8610 	}
8611 
8612 	/* At least one of the two flags (or both) must be set */
8613 	if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0) {
8614 		return (EINVAL);
8615 	}
8616 
8617 	/* addr and len must be page-aligned */
8618 	if (((uintptr_t)addr & PAGEOFFSET) != 0) {
8619 		return (EINVAL);
8620 	}
8621 
8622 	if ((len & PAGEOFFSET) != 0) {
8623 		return (EINVAL);
8624 	}
8625 
8626 	/*
8627 	 * Call i_ddi_umem_unlock_thread_start if necessary.  It will
8628 	 * be called on first ddi_umem_lock or umem_lockmemory call.
8629 	 */
8630 	if (ddi_umem_unlock_thread == NULL)
8631 		i_ddi_umem_unlock_thread_start();
8632 
8633 	/* Allocate memory for the cookie */
8634 	p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP);
8635 
8636 	/* Convert the flags to seg_rw type */
8637 	if (flags & DDI_UMEMLOCK_WRITE) {
8638 		p->s_flags = S_WRITE;
8639 	} else {
8640 		p->s_flags = S_READ;
8641 	}
8642 
8643 	/* Store curproc in cookie for later iosetup/unlock */
8644 	p->procp = (void *)curproc;
8645 
8646 	/*
8647 	 * Store the struct as pointer in cookie for later use by
8648 	 * ddi_umem_unlock.  The proc->p_as will be stale if ddi_umem_unlock
8649 	 * is called after relvm is called.
8650 	 */
8651 	p->asp = curproc->p_as;
8652 	/*
8653 	 * The size field is needed for lockmem accounting.
8654 	 */
8655 	p->size = len;
8656 	init_lockedmem_rctl_flag(p);
8657 
8658 	if (umem_incr_devlockmem(p) != 0) {
8659 		/*
8660 		 * The requested memory cannot be locked
8661 		 */
8662 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8663 		*cookie = (ddi_umem_cookie_t)NULL;
8664 		return (ENOMEM);
8665 	}
8666 
8667 	/* Lock the pages corresponding to addr, len in memory */
8668 	error = as_pagelock(((proc_t *)p->procp)->p_as, &(p->pparray),
8669 	    addr, len, p->s_flags);
8670 	if (error != 0) {
8671 		umem_decr_devlockmem(p);
8672 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8673 		*cookie = (ddi_umem_cookie_t)NULL;
8674 		return (error);
8675 	}
8676 
8677 	/* Initialize the fields in the ddi_umem_cookie */
8678 	p->cvaddr = addr;
8679 	p->type = UMEM_LOCKED;
8680 	p->cook_refcnt = 1;
8681 
8682 	*cookie = (ddi_umem_cookie_t)p;
8683 	return (error);
8684 }
8685 
8686 /*
8687  * Add the cookie to the ddi_umem_unlock list.  Pages will be
8688  * unlocked by i_ddi_umem_unlock_thread.
8689  */
8690 
8691 void
8692 ddi_umem_unlock(ddi_umem_cookie_t cookie)
8693 {
8694 	struct ddi_umem_cookie	*p = (struct ddi_umem_cookie *)cookie;
8695 
8696 	ASSERT(p->type == UMEM_LOCKED);
8697 	ASSERT(CPU_ON_INTR(CPU) == 0); /* cannot be high level */
8698 	ASSERT(ddi_umem_unlock_thread != NULL);
8699 
8700 	p->unl_forw = (struct ddi_umem_cookie *)NULL;	/* end of list */
8701 	/*
8702 	 * Queue the unlock request and notify i_ddi_umem_unlock thread
8703 	 * if it's called in the interrupt context. Otherwise, unlock pages
8704 	 * immediately.
8705 	 */
8706 	if (servicing_interrupt()) {
8707 		/* queue the unlock request and notify the thread */
8708 		mutex_enter(&ddi_umem_unlock_mutex);
8709 		if (ddi_umem_unlock_head == NULL) {
8710 			ddi_umem_unlock_head = ddi_umem_unlock_tail = p;
8711 			cv_broadcast(&ddi_umem_unlock_cv);
8712 		} else {
8713 			ddi_umem_unlock_tail->unl_forw = p;
8714 			ddi_umem_unlock_tail = p;
8715 		}
8716 		mutex_exit(&ddi_umem_unlock_mutex);
8717 	} else {
8718 		/* unlock the pages right away */
8719 		(void) i_ddi_umem_unlock(p);
8720 	}
8721 }
8722 
8723 /*
8724  * Create a buf structure from a ddi_umem_cookie
8725  * cookie - is a ddi_umem_cookie for from ddi_umem_lock and ddi_umem_alloc
8726  *		(only UMEM_LOCKED & KMEM_NON_PAGEABLE types supported)
8727  * off, len - identifies the portion of the memory represented by the cookie
8728  *		that the buf points to.
8729  *	NOTE: off, len need to follow the alignment/size restrictions of the
8730  *		device (dev) that this buf will be passed to. Some devices
8731  *		will accept unrestricted alignment/size, whereas others (such as
8732  *		st) require some block-size alignment/size. It is the caller's
8733  *		responsibility to ensure that the alignment/size restrictions
8734  *		are met (we cannot assert as we do not know the restrictions)
8735  *
8736  * direction - is one of B_READ or B_WRITE and needs to be compatible with
8737  *		the flags used in ddi_umem_lock
8738  *
8739  * The following three arguments are used to initialize fields in the
8740  * buf structure and are uninterpreted by this routine.
8741  *
8742  * dev
8743  * blkno
8744  * iodone
8745  *
8746  * sleepflag - is one of DDI_UMEM_SLEEP or DDI_UMEM_NOSLEEP
8747  *
8748  * Returns a buf structure pointer on success (to be freed by freerbuf)
8749  *	NULL on any parameter error or memory alloc failure
8750  *
8751  */
8752 struct buf *
8753 ddi_umem_iosetup(ddi_umem_cookie_t cookie, off_t off, size_t len,
8754     int direction, dev_t dev, daddr_t blkno,
8755     int (*iodone)(struct buf *), int sleepflag)
8756 {
8757 	struct ddi_umem_cookie *p = (struct ddi_umem_cookie *)cookie;
8758 	struct buf *bp;
8759 
8760 	/*
8761 	 * check for valid cookie offset, len
8762 	 */
8763 	if ((off + len) > p->size) {
8764 		return (NULL);
8765 	}
8766 
8767 	if (len > p->size) {
8768 		return (NULL);
8769 	}
8770 
8771 	/* direction has to be one of B_READ or B_WRITE */
8772 	if ((direction != B_READ) && (direction != B_WRITE)) {
8773 		return (NULL);
8774 	}
8775 
8776 	/* These are the only two valid sleepflags */
8777 	if ((sleepflag != DDI_UMEM_SLEEP) && (sleepflag != DDI_UMEM_NOSLEEP)) {
8778 		return (NULL);
8779 	}
8780 
8781 	/*
8782 	 * Only cookies of type UMEM_LOCKED and KMEM_NON_PAGEABLE are supported
8783 	 */
8784 	if ((p->type != UMEM_LOCKED) && (p->type != KMEM_NON_PAGEABLE)) {
8785 		return (NULL);
8786 	}
8787 
8788 	/* If type is KMEM_NON_PAGEABLE procp is NULL */
8789 	ASSERT((p->type == KMEM_NON_PAGEABLE) ?
8790 	    (p->procp == NULL) : (p->procp != NULL));
8791 
8792 	bp = kmem_alloc(sizeof (struct buf), sleepflag);
8793 	if (bp == NULL) {
8794 		return (NULL);
8795 	}
8796 	bioinit(bp);
8797 
8798 	bp->b_flags = B_BUSY | B_PHYS | direction;
8799 	bp->b_edev = dev;
8800 	bp->b_lblkno = blkno;
8801 	bp->b_iodone = iodone;
8802 	bp->b_bcount = len;
8803 	bp->b_proc = (proc_t *)p->procp;
8804 	ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0);
8805 	bp->b_un.b_addr = (caddr_t)((uintptr_t)(p->cvaddr) + off);
8806 	if (p->pparray != NULL) {
8807 		bp->b_flags |= B_SHADOW;
8808 		ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0);
8809 		bp->b_shadow = p->pparray + btop(off);
8810 	}
8811 	return (bp);
8812 }
8813 
8814 /*
8815  * Fault-handling and related routines
8816  */
8817 
8818 ddi_devstate_t
8819 ddi_get_devstate(dev_info_t *dip)
8820 {
8821 	if (DEVI_IS_DEVICE_OFFLINE(dip))
8822 		return (DDI_DEVSTATE_OFFLINE);
8823 	else if (DEVI_IS_DEVICE_DOWN(dip) || DEVI_IS_BUS_DOWN(dip))
8824 		return (DDI_DEVSTATE_DOWN);
8825 	else if (DEVI_IS_BUS_QUIESCED(dip))
8826 		return (DDI_DEVSTATE_QUIESCED);
8827 	else if (DEVI_IS_DEVICE_DEGRADED(dip))
8828 		return (DDI_DEVSTATE_DEGRADED);
8829 	else
8830 		return (DDI_DEVSTATE_UP);
8831 }
8832 
8833 void
8834 ddi_dev_report_fault(dev_info_t *dip, ddi_fault_impact_t impact,
8835     ddi_fault_location_t location, const char *message)
8836 {
8837 	struct ddi_fault_event_data fd;
8838 	ddi_eventcookie_t ec;
8839 
8840 	/*
8841 	 * Assemble all the information into a fault-event-data structure
8842 	 */
8843 	fd.f_dip = dip;
8844 	fd.f_impact = impact;
8845 	fd.f_location = location;
8846 	fd.f_message = message;
8847 	fd.f_oldstate = ddi_get_devstate(dip);
8848 
8849 	/*
8850 	 * Get eventcookie from defining parent.
8851 	 */
8852 	if (ddi_get_eventcookie(dip, DDI_DEVI_FAULT_EVENT, &ec) !=
8853 	    DDI_SUCCESS)
8854 		return;
8855 
8856 	(void) ndi_post_event(dip, dip, ec, &fd);
8857 }
8858 
8859 char *
8860 i_ddi_devi_class(dev_info_t *dip)
8861 {
8862 	return (DEVI(dip)->devi_device_class);
8863 }
8864 
8865 int
8866 i_ddi_set_devi_class(dev_info_t *dip, const char *devi_class, int flag)
8867 {
8868 	struct dev_info *devi = DEVI(dip);
8869 
8870 	mutex_enter(&devi->devi_lock);
8871 
8872 	if (devi->devi_device_class)
8873 		kmem_free(devi->devi_device_class,
8874 		    strlen(devi->devi_device_class) + 1);
8875 
8876 	if ((devi->devi_device_class = i_ddi_strdup(devi_class, flag))
8877 	    != NULL) {
8878 		mutex_exit(&devi->devi_lock);
8879 		return (DDI_SUCCESS);
8880 	}
8881 
8882 	mutex_exit(&devi->devi_lock);
8883 
8884 	return (DDI_FAILURE);
8885 }
8886 
8887 
8888 /*
8889  * Task Queues DDI interfaces.
8890  */
8891 
8892 /* ARGSUSED */
8893 ddi_taskq_t *
8894 ddi_taskq_create(dev_info_t *dip, const char *name, int nthreads,
8895     pri_t pri, uint_t cflags)
8896 {
8897 	char full_name[TASKQ_NAMELEN];
8898 	const char *tq_name;
8899 	int nodeid = 0;
8900 
8901 	if (dip == NULL)
8902 		tq_name = name;
8903 	else {
8904 		nodeid = ddi_get_instance(dip);
8905 
8906 		if (name == NULL)
8907 			name = "tq";
8908 
8909 		(void) snprintf(full_name, sizeof (full_name), "%s_%s",
8910 		    ddi_driver_name(dip), name);
8911 
8912 		tq_name = full_name;
8913 	}
8914 
8915 	return ((ddi_taskq_t *)taskq_create_instance(tq_name, nodeid, nthreads,
8916 	    pri == TASKQ_DEFAULTPRI ? minclsyspri : pri,
8917 	    nthreads, INT_MAX, TASKQ_PREPOPULATE));
8918 }
8919 
8920 void
8921 ddi_taskq_destroy(ddi_taskq_t *tq)
8922 {
8923 	taskq_destroy((taskq_t *)tq);
8924 }
8925 
8926 int
8927 ddi_taskq_dispatch(ddi_taskq_t *tq, void (* func)(void *),
8928     void *arg, uint_t dflags)
8929 {
8930 	taskqid_t id = taskq_dispatch((taskq_t *)tq, func, arg,
8931 	    dflags == DDI_SLEEP ? TQ_SLEEP : TQ_NOSLEEP);
8932 
8933 	return (id != TASKQID_INVALID ? DDI_SUCCESS : DDI_FAILURE);
8934 }
8935 
8936 void
8937 ddi_taskq_wait(ddi_taskq_t *tq)
8938 {
8939 	taskq_wait((taskq_t *)tq);
8940 }
8941 
8942 void
8943 ddi_taskq_suspend(ddi_taskq_t *tq)
8944 {
8945 	taskq_suspend((taskq_t *)tq);
8946 }
8947 
8948 boolean_t
8949 ddi_taskq_suspended(ddi_taskq_t *tq)
8950 {
8951 	return (taskq_suspended((taskq_t *)tq));
8952 }
8953 
8954 void
8955 ddi_taskq_resume(ddi_taskq_t *tq)
8956 {
8957 	taskq_resume((taskq_t *)tq);
8958 }
8959 
8960 int
8961 ddi_parse(const char *ifname, char *alnum, uint_t *nump)
8962 {
8963 	/*
8964 	 * Cap "alnum" size at LIFNAMSIZ, as callers use that in most/all
8965 	 * cases.
8966 	 */
8967 	return (ddi_parse_dlen(ifname, alnum, LIFNAMSIZ, nump));
8968 }
8969 
8970 int
8971 ddi_parse_dlen(const char *ifname, char *alnum, size_t alnumsize, uint_t *nump)
8972 {
8973 	const char	*p;
8974 	int		copy_len;
8975 	ulong_t		num;
8976 	boolean_t	nonum = B_TRUE;
8977 	char		c;
8978 
8979 	copy_len = strlen(ifname);
8980 	for (p = ifname + copy_len; p != ifname; copy_len--) {
8981 		c = *--p;
8982 		if (!isdigit(c)) {
8983 			/*
8984 			 * At this point, copy_len is the length of ifname
8985 			 * WITHOUT the PPA number. For "e1000g10" copy_len is 6.
8986 			 *
8987 			 * We must first make sure we HAVE a PPA, and we
8988 			 * aren't exceeding alnumsize with copy_len and a '\0'
8989 			 * terminator...
8990 			 */
8991 			int copy_len_nul = copy_len + 1;
8992 
8993 			if (nonum || alnumsize < copy_len_nul)
8994 				return (DDI_FAILURE);
8995 
8996 			/*
8997 			 * ... then we abuse strlcpy() to copy over the
8998 			 * driver name portion AND '\0'-terminate it.
8999 			 */
9000 			(void) strlcpy(alnum, ifname, copy_len_nul);
9001 			if (ddi_strtoul(p + 1, NULL, 10, &num) != 0)
9002 				return (DDI_FAILURE);
9003 			break;
9004 		}
9005 		nonum = B_FALSE;
9006 	}
9007 
9008 	if (copy_len == 0)
9009 		return (DDI_FAILURE);
9010 
9011 	*nump = num;
9012 	return (DDI_SUCCESS);
9013 }
9014 
9015 /*
9016  * Default initialization function for drivers that don't need to quiesce.
9017  */
9018 /* ARGSUSED */
9019 int
9020 ddi_quiesce_not_needed(dev_info_t *dip)
9021 {
9022 	return (DDI_SUCCESS);
9023 }
9024 
9025 /*
9026  * Initialization function for drivers that should implement quiesce()
9027  * but haven't yet.
9028  */
9029 /* ARGSUSED */
9030 int
9031 ddi_quiesce_not_supported(dev_info_t *dip)
9032 {
9033 	return (DDI_FAILURE);
9034 }
9035 
9036 char *
9037 ddi_strdup(const char *str, int flag)
9038 {
9039 	int	n;
9040 	char	*ptr;
9041 
9042 	ASSERT(str != NULL);
9043 	ASSERT((flag == KM_SLEEP) || (flag == KM_NOSLEEP));
9044 
9045 	n = strlen(str);
9046 	if ((ptr = kmem_alloc(n + 1, flag)) == NULL)
9047 		return (NULL);
9048 	bcopy(str, ptr, n + 1);
9049 	return (ptr);
9050 }
9051 
9052 char *
9053 strdup(const char *str)
9054 {
9055 	return (ddi_strdup(str, KM_SLEEP));
9056 }
9057 
9058 void
9059 strfree(char *str)
9060 {
9061 	ASSERT(str != NULL);
9062 	kmem_free(str, strlen(str) + 1);
9063 }
9064 
9065 /*
9066  * Generic DDI callback interfaces.
9067  */
9068 
9069 int
9070 ddi_cb_register(dev_info_t *dip, ddi_cb_flags_t flags, ddi_cb_func_t cbfunc,
9071     void *arg1, void *arg2, ddi_cb_handle_t *ret_hdlp)
9072 {
9073 	ddi_cb_t	*cbp;
9074 
9075 	ASSERT(dip != NULL);
9076 	ASSERT(DDI_CB_FLAG_VALID(flags));
9077 	ASSERT(cbfunc != NULL);
9078 	ASSERT(ret_hdlp != NULL);
9079 
9080 	/* Sanity check the context */
9081 	ASSERT(!servicing_interrupt());
9082 	if (servicing_interrupt())
9083 		return (DDI_FAILURE);
9084 
9085 	/* Validate parameters */
9086 	if ((dip == NULL) || !DDI_CB_FLAG_VALID(flags) ||
9087 	    (cbfunc == NULL) || (ret_hdlp == NULL))
9088 		return (DDI_EINVAL);
9089 
9090 	/* Check for previous registration */
9091 	if (DEVI(dip)->devi_cb_p != NULL)
9092 		return (DDI_EALREADY);
9093 
9094 	/* Allocate and initialize callback */
9095 	cbp = kmem_zalloc(sizeof (ddi_cb_t), KM_SLEEP);
9096 	cbp->cb_dip = dip;
9097 	cbp->cb_func = cbfunc;
9098 	cbp->cb_arg1 = arg1;
9099 	cbp->cb_arg2 = arg2;
9100 	cbp->cb_flags = flags;
9101 	DEVI(dip)->devi_cb_p = cbp;
9102 
9103 	/* If adding an IRM callback, notify IRM */
9104 	if (flags & DDI_CB_FLAG_INTR)
9105 		i_ddi_irm_set_cb(dip, B_TRUE);
9106 
9107 	*ret_hdlp = (ddi_cb_handle_t)&(DEVI(dip)->devi_cb_p);
9108 	return (DDI_SUCCESS);
9109 }
9110 
9111 int
9112 ddi_cb_unregister(ddi_cb_handle_t hdl)
9113 {
9114 	ddi_cb_t	*cbp;
9115 	dev_info_t	*dip;
9116 
9117 	ASSERT(hdl != NULL);
9118 
9119 	/* Sanity check the context */
9120 	ASSERT(!servicing_interrupt());
9121 	if (servicing_interrupt())
9122 		return (DDI_FAILURE);
9123 
9124 	/* Validate parameters */
9125 	if ((hdl == NULL) || ((cbp = *(ddi_cb_t **)hdl) == NULL) ||
9126 	    ((dip = cbp->cb_dip) == NULL))
9127 		return (DDI_EINVAL);
9128 
9129 	/* If removing an IRM callback, notify IRM */
9130 	if (cbp->cb_flags & DDI_CB_FLAG_INTR)
9131 		i_ddi_irm_set_cb(dip, B_FALSE);
9132 
9133 	/* Destroy the callback */
9134 	kmem_free(cbp, sizeof (ddi_cb_t));
9135 	DEVI(dip)->devi_cb_p = NULL;
9136 
9137 	return (DDI_SUCCESS);
9138 }
9139 
9140 /*
9141  * Platform independent DR routines
9142  */
9143 
9144 static int
9145 ndi2errno(int n)
9146 {
9147 	int err = 0;
9148 
9149 	switch (n) {
9150 		case NDI_NOMEM:
9151 			err = ENOMEM;
9152 			break;
9153 		case NDI_BUSY:
9154 			err = EBUSY;
9155 			break;
9156 		case NDI_FAULT:
9157 			err = EFAULT;
9158 			break;
9159 		case NDI_FAILURE:
9160 			err = EIO;
9161 			break;
9162 		case NDI_SUCCESS:
9163 			break;
9164 		case NDI_BADHANDLE:
9165 		default:
9166 			err = EINVAL;
9167 			break;
9168 	}
9169 	return (err);
9170 }
9171 
9172 /*
9173  * Prom tree node list
9174  */
9175 struct ptnode {
9176 	pnode_t		nodeid;
9177 	struct ptnode	*next;
9178 };
9179 
9180 /*
9181  * Prom tree walk arg
9182  */
9183 struct pta {
9184 	dev_info_t	*pdip;
9185 	devi_branch_t	*bp;
9186 	uint_t		flags;
9187 	dev_info_t	*fdip;
9188 	struct ptnode	*head;
9189 };
9190 
9191 static void
9192 visit_node(pnode_t nodeid, struct pta *ap)
9193 {
9194 	struct ptnode	**nextp;
9195 	int		(*select)(pnode_t, void *, uint_t);
9196 
9197 	ASSERT(nodeid != OBP_NONODE && nodeid != OBP_BADNODE);
9198 
9199 	select = ap->bp->create.prom_branch_select;
9200 
9201 	ASSERT(select);
9202 
9203 	if (select(nodeid, ap->bp->arg, 0) == DDI_SUCCESS) {
9204 
9205 		for (nextp = &ap->head; *nextp; nextp = &(*nextp)->next)
9206 			;
9207 
9208 		*nextp = kmem_zalloc(sizeof (struct ptnode), KM_SLEEP);
9209 
9210 		(*nextp)->nodeid = nodeid;
9211 	}
9212 
9213 	if ((ap->flags & DEVI_BRANCH_CHILD) == DEVI_BRANCH_CHILD)
9214 		return;
9215 
9216 	nodeid = prom_childnode(nodeid);
9217 	while (nodeid != OBP_NONODE && nodeid != OBP_BADNODE) {
9218 		visit_node(nodeid, ap);
9219 		nodeid = prom_nextnode(nodeid);
9220 	}
9221 }
9222 
9223 /*
9224  * NOTE: The caller of this function must check for device contracts
9225  * or LDI callbacks against this dip before setting the dip offline.
9226  */
9227 static int
9228 set_infant_dip_offline(dev_info_t *dip, void *arg)
9229 {
9230 	char	*path = (char *)arg;
9231 
9232 	ASSERT(dip);
9233 	ASSERT(arg);
9234 
9235 	if (i_ddi_node_state(dip) >= DS_ATTACHED) {
9236 		(void) ddi_pathname(dip, path);
9237 		cmn_err(CE_WARN, "Attempt to set offline flag on attached "
9238 		    "node: %s", path);
9239 		return (DDI_FAILURE);
9240 	}
9241 
9242 	mutex_enter(&(DEVI(dip)->devi_lock));
9243 	if (!DEVI_IS_DEVICE_OFFLINE(dip))
9244 		DEVI_SET_DEVICE_OFFLINE(dip);
9245 	mutex_exit(&(DEVI(dip)->devi_lock));
9246 
9247 	return (DDI_SUCCESS);
9248 }
9249 
9250 typedef struct result {
9251 	char	*path;
9252 	int	result;
9253 } result_t;
9254 
9255 static int
9256 dip_set_offline(dev_info_t *dip, void *arg)
9257 {
9258 	int end;
9259 	result_t *resp = (result_t *)arg;
9260 
9261 	ASSERT(dip);
9262 	ASSERT(resp);
9263 
9264 	/*
9265 	 * We stop the walk if e_ddi_offline_notify() returns
9266 	 * failure, because this implies that one or more consumers
9267 	 * (either LDI or contract based) has blocked the offline.
9268 	 * So there is no point in conitnuing the walk
9269 	 */
9270 	if (e_ddi_offline_notify(dip) == DDI_FAILURE) {
9271 		resp->result = DDI_FAILURE;
9272 		return (DDI_WALK_TERMINATE);
9273 	}
9274 
9275 	/*
9276 	 * If set_infant_dip_offline() returns failure, it implies
9277 	 * that we failed to set a particular dip offline. This
9278 	 * does not imply that the offline as a whole should fail.
9279 	 * We want to do the best we can, so we continue the walk.
9280 	 */
9281 	if (set_infant_dip_offline(dip, resp->path) == DDI_SUCCESS)
9282 		end = DDI_SUCCESS;
9283 	else
9284 		end = DDI_FAILURE;
9285 
9286 	e_ddi_offline_finalize(dip, end);
9287 
9288 	return (DDI_WALK_CONTINUE);
9289 }
9290 
9291 /*
9292  * The call to e_ddi_offline_notify() exists for the
9293  * unlikely error case that a branch we are trying to
9294  * create already exists and has device contracts or LDI
9295  * event callbacks against it.
9296  *
9297  * We allow create to succeed for such branches only if
9298  * no constraints block the offline.
9299  */
9300 static int
9301 branch_set_offline(dev_info_t *dip, char *path)
9302 {
9303 	int		circ;
9304 	int		end;
9305 	result_t	res;
9306 
9307 
9308 	if (e_ddi_offline_notify(dip) == DDI_FAILURE) {
9309 		return (DDI_FAILURE);
9310 	}
9311 
9312 	if (set_infant_dip_offline(dip, path) == DDI_SUCCESS)
9313 		end = DDI_SUCCESS;
9314 	else
9315 		end = DDI_FAILURE;
9316 
9317 	e_ddi_offline_finalize(dip, end);
9318 
9319 	if (end == DDI_FAILURE)
9320 		return (DDI_FAILURE);
9321 
9322 	res.result = DDI_SUCCESS;
9323 	res.path = path;
9324 
9325 	ndi_devi_enter(dip, &circ);
9326 	ddi_walk_devs(ddi_get_child(dip), dip_set_offline, &res);
9327 	ndi_devi_exit(dip, circ);
9328 
9329 	return (res.result);
9330 }
9331 
9332 /*ARGSUSED*/
9333 static int
9334 create_prom_branch(void *arg, int has_changed)
9335 {
9336 	int		circ;
9337 	int		exists, rv;
9338 	pnode_t		nodeid;
9339 	struct ptnode	*tnp;
9340 	dev_info_t	*dip;
9341 	struct pta	*ap = arg;
9342 	devi_branch_t	*bp;
9343 	char		*path;
9344 
9345 	ASSERT(ap);
9346 	ASSERT(ap->fdip == NULL);
9347 	ASSERT(ap->pdip && ndi_dev_is_prom_node(ap->pdip));
9348 
9349 	bp = ap->bp;
9350 
9351 	nodeid = ddi_get_nodeid(ap->pdip);
9352 	if (nodeid == OBP_NONODE || nodeid == OBP_BADNODE) {
9353 		cmn_err(CE_WARN, "create_prom_branch: invalid "
9354 		    "nodeid: 0x%x", nodeid);
9355 		return (EINVAL);
9356 	}
9357 
9358 	ap->head = NULL;
9359 
9360 	nodeid = prom_childnode(nodeid);
9361 	while (nodeid != OBP_NONODE && nodeid != OBP_BADNODE) {
9362 		visit_node(nodeid, ap);
9363 		nodeid = prom_nextnode(nodeid);
9364 	}
9365 
9366 	if (ap->head == NULL)
9367 		return (ENODEV);
9368 
9369 	path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
9370 	rv = 0;
9371 	while ((tnp = ap->head) != NULL) {
9372 		ap->head = tnp->next;
9373 
9374 		ndi_devi_enter(ap->pdip, &circ);
9375 
9376 		/*
9377 		 * Check if the branch already exists.
9378 		 */
9379 		exists = 0;
9380 		dip = e_ddi_nodeid_to_dip(tnp->nodeid);
9381 		if (dip != NULL) {
9382 			exists = 1;
9383 
9384 			/* Parent is held busy, so release hold */
9385 			ndi_rele_devi(dip);
9386 #ifdef	DEBUG
9387 			cmn_err(CE_WARN, "create_prom_branch: dip(%p) exists"
9388 			    " for nodeid 0x%x", (void *)dip, tnp->nodeid);
9389 #endif
9390 		} else {
9391 			dip = i_ddi_create_branch(ap->pdip, tnp->nodeid);
9392 		}
9393 
9394 		kmem_free(tnp, sizeof (struct ptnode));
9395 
9396 		/*
9397 		 * Hold the branch if it is not already held
9398 		 */
9399 		if (dip && !exists) {
9400 			e_ddi_branch_hold(dip);
9401 		}
9402 
9403 		ASSERT(dip == NULL || e_ddi_branch_held(dip));
9404 
9405 		/*
9406 		 * Set all dips in the newly created branch offline so that
9407 		 * only a "configure" operation can attach
9408 		 * the branch
9409 		 */
9410 		if (dip == NULL || branch_set_offline(dip, path)
9411 		    == DDI_FAILURE) {
9412 			ndi_devi_exit(ap->pdip, circ);
9413 			rv = EIO;
9414 			continue;
9415 		}
9416 
9417 		ASSERT(ddi_get_parent(dip) == ap->pdip);
9418 
9419 		ndi_devi_exit(ap->pdip, circ);
9420 
9421 		if (ap->flags & DEVI_BRANCH_CONFIGURE) {
9422 			int error = e_ddi_branch_configure(dip, &ap->fdip, 0);
9423 			if (error && rv == 0)
9424 				rv = error;
9425 		}
9426 
9427 		/*
9428 		 * Invoke devi_branch_callback() (if it exists) only for
9429 		 * newly created branches
9430 		 */
9431 		if (bp->devi_branch_callback && !exists)
9432 			bp->devi_branch_callback(dip, bp->arg, 0);
9433 	}
9434 
9435 	kmem_free(path, MAXPATHLEN);
9436 
9437 	return (rv);
9438 }
9439 
9440 static int
9441 sid_node_create(dev_info_t *pdip, devi_branch_t *bp, dev_info_t **rdipp)
9442 {
9443 	int			rv, circ, len;
9444 	int			i, flags, ret;
9445 	dev_info_t		*dip;
9446 	char			*nbuf;
9447 	char			*path;
9448 	static const char	*noname = "<none>";
9449 
9450 	ASSERT(pdip);
9451 	ASSERT(DEVI_BUSY_OWNED(pdip));
9452 
9453 	flags = 0;
9454 
9455 	/*
9456 	 * Creating the root of a branch ?
9457 	 */
9458 	if (rdipp) {
9459 		*rdipp = NULL;
9460 		flags = DEVI_BRANCH_ROOT;
9461 	}
9462 
9463 	ndi_devi_alloc_sleep(pdip, (char *)noname, DEVI_SID_NODEID, &dip);
9464 	rv = bp->create.sid_branch_create(dip, bp->arg, flags);
9465 
9466 	nbuf = kmem_alloc(OBP_MAXDRVNAME, KM_SLEEP);
9467 
9468 	if (rv == DDI_WALK_ERROR) {
9469 		cmn_err(CE_WARN, "e_ddi_branch_create: Error setting"
9470 		    " properties on devinfo node %p",  (void *)dip);
9471 		goto fail;
9472 	}
9473 
9474 	len = OBP_MAXDRVNAME;
9475 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
9476 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "name", nbuf, &len)
9477 	    != DDI_PROP_SUCCESS) {
9478 		cmn_err(CE_WARN, "e_ddi_branch_create: devinfo node %p has"
9479 		    "no name property", (void *)dip);
9480 		goto fail;
9481 	}
9482 
9483 	ASSERT(i_ddi_node_state(dip) == DS_PROTO);
9484 	if (ndi_devi_set_nodename(dip, nbuf, 0) != NDI_SUCCESS) {
9485 		cmn_err(CE_WARN, "e_ddi_branch_create: cannot set name (%s)"
9486 		    " for devinfo node %p", nbuf, (void *)dip);
9487 		goto fail;
9488 	}
9489 
9490 	kmem_free(nbuf, OBP_MAXDRVNAME);
9491 
9492 	/*
9493 	 * Ignore bind failures just like boot does
9494 	 */
9495 	(void) ndi_devi_bind_driver(dip, 0);
9496 
9497 	switch (rv) {
9498 	case DDI_WALK_CONTINUE:
9499 	case DDI_WALK_PRUNESIB:
9500 		ndi_devi_enter(dip, &circ);
9501 
9502 		i = DDI_WALK_CONTINUE;
9503 		for (; i == DDI_WALK_CONTINUE; ) {
9504 			i = sid_node_create(dip, bp, NULL);
9505 		}
9506 
9507 		ASSERT(i == DDI_WALK_ERROR || i == DDI_WALK_PRUNESIB);
9508 		if (i == DDI_WALK_ERROR)
9509 			rv = i;
9510 		/*
9511 		 * If PRUNESIB stop creating siblings
9512 		 * of dip's child. Subsequent walk behavior
9513 		 * is determined by rv returned by dip.
9514 		 */
9515 
9516 		ndi_devi_exit(dip, circ);
9517 		break;
9518 	case DDI_WALK_TERMINATE:
9519 		/*
9520 		 * Don't create children and ask our parent
9521 		 * to not create siblings either.
9522 		 */
9523 		rv = DDI_WALK_PRUNESIB;
9524 		break;
9525 	case DDI_WALK_PRUNECHILD:
9526 		/*
9527 		 * Don't create children, but ask parent to continue
9528 		 * with siblings.
9529 		 */
9530 		rv = DDI_WALK_CONTINUE;
9531 		break;
9532 	default:
9533 		ASSERT(0);
9534 		break;
9535 	}
9536 
9537 	if (rdipp)
9538 		*rdipp = dip;
9539 
9540 	/*
9541 	 * Set device offline - only the "configure" op should cause an attach.
9542 	 * Note that it is safe to set the dip offline without checking
9543 	 * for either device contract or layered driver (LDI) based constraints
9544 	 * since there cannot be any contracts or LDI opens of this device.
9545 	 * This is because this node is a newly created dip with the parent busy
9546 	 * held, so no other thread can come in and attach this dip. A dip that
9547 	 * has never been attached cannot have contracts since by definition
9548 	 * a device contract (an agreement between a process and a device minor
9549 	 * node) can only be created against a device that has minor nodes
9550 	 * i.e is attached. Similarly an LDI open will only succeed if the
9551 	 * dip is attached. We assert below that the dip is not attached.
9552 	 */
9553 	ASSERT(i_ddi_node_state(dip) < DS_ATTACHED);
9554 	path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
9555 	ret = set_infant_dip_offline(dip, path);
9556 	ASSERT(ret == DDI_SUCCESS);
9557 	kmem_free(path, MAXPATHLEN);
9558 
9559 	return (rv);
9560 fail:
9561 	(void) ndi_devi_free(dip);
9562 	kmem_free(nbuf, OBP_MAXDRVNAME);
9563 	return (DDI_WALK_ERROR);
9564 }
9565 
9566 static int
9567 create_sid_branch(
9568 	dev_info_t	*pdip,
9569 	devi_branch_t	*bp,
9570 	dev_info_t	**dipp,
9571 	uint_t		flags)
9572 {
9573 	int		rv = 0, state = DDI_WALK_CONTINUE;
9574 	dev_info_t	*rdip;
9575 
9576 	while (state == DDI_WALK_CONTINUE) {
9577 		int	circ;
9578 
9579 		ndi_devi_enter(pdip, &circ);
9580 
9581 		state = sid_node_create(pdip, bp, &rdip);
9582 		if (rdip == NULL) {
9583 			ndi_devi_exit(pdip, circ);
9584 			ASSERT(state == DDI_WALK_ERROR);
9585 			break;
9586 		}
9587 
9588 		e_ddi_branch_hold(rdip);
9589 
9590 		ndi_devi_exit(pdip, circ);
9591 
9592 		if (flags & DEVI_BRANCH_CONFIGURE) {
9593 			int error = e_ddi_branch_configure(rdip, dipp, 0);
9594 			if (error && rv == 0)
9595 				rv = error;
9596 		}
9597 
9598 		/*
9599 		 * devi_branch_callback() is optional
9600 		 */
9601 		if (bp->devi_branch_callback)
9602 			bp->devi_branch_callback(rdip, bp->arg, 0);
9603 	}
9604 
9605 	ASSERT(state == DDI_WALK_ERROR || state == DDI_WALK_PRUNESIB);
9606 
9607 	return (state == DDI_WALK_ERROR ? EIO : rv);
9608 }
9609 
9610 int
9611 e_ddi_branch_create(
9612 	dev_info_t	*pdip,
9613 	devi_branch_t	*bp,
9614 	dev_info_t	**dipp,
9615 	uint_t		flags)
9616 {
9617 	int prom_devi, sid_devi, error;
9618 
9619 	if (pdip == NULL || bp == NULL || bp->type == 0)
9620 		return (EINVAL);
9621 
9622 	prom_devi = (bp->type == DEVI_BRANCH_PROM) ? 1 : 0;
9623 	sid_devi = (bp->type == DEVI_BRANCH_SID) ? 1 : 0;
9624 
9625 	if (prom_devi && bp->create.prom_branch_select == NULL)
9626 		return (EINVAL);
9627 	else if (sid_devi && bp->create.sid_branch_create == NULL)
9628 		return (EINVAL);
9629 	else if (!prom_devi && !sid_devi)
9630 		return (EINVAL);
9631 
9632 	if (flags & DEVI_BRANCH_EVENT)
9633 		return (EINVAL);
9634 
9635 	if (prom_devi) {
9636 		struct pta pta = {0};
9637 
9638 		pta.pdip = pdip;
9639 		pta.bp = bp;
9640 		pta.flags = flags;
9641 
9642 		error = prom_tree_access(create_prom_branch, &pta, NULL);
9643 
9644 		if (dipp)
9645 			*dipp = pta.fdip;
9646 		else if (pta.fdip)
9647 			ndi_rele_devi(pta.fdip);
9648 	} else {
9649 		error = create_sid_branch(pdip, bp, dipp, flags);
9650 	}
9651 
9652 	return (error);
9653 }
9654 
9655 int
9656 e_ddi_branch_configure(dev_info_t *rdip, dev_info_t **dipp, uint_t flags)
9657 {
9658 	int		rv;
9659 	char		*devnm;
9660 	dev_info_t	*pdip;
9661 
9662 	if (dipp)
9663 		*dipp = NULL;
9664 
9665 	if (rdip == NULL || flags != 0 || (flags & DEVI_BRANCH_EVENT))
9666 		return (EINVAL);
9667 
9668 	pdip = ddi_get_parent(rdip);
9669 
9670 	ndi_hold_devi(pdip);
9671 
9672 	if (!e_ddi_branch_held(rdip)) {
9673 		ndi_rele_devi(pdip);
9674 		cmn_err(CE_WARN, "e_ddi_branch_configure: "
9675 		    "dip(%p) not held", (void *)rdip);
9676 		return (EINVAL);
9677 	}
9678 
9679 	if (i_ddi_node_state(rdip) < DS_INITIALIZED) {
9680 		/*
9681 		 * First attempt to bind a driver. If we fail, return
9682 		 * success (On some platforms, dips for some device
9683 		 * types (CPUs) may not have a driver)
9684 		 */
9685 		if (ndi_devi_bind_driver(rdip, 0) != NDI_SUCCESS) {
9686 			ndi_rele_devi(pdip);
9687 			return (0);
9688 		}
9689 
9690 		if (ddi_initchild(pdip, rdip) != DDI_SUCCESS) {
9691 			rv = NDI_FAILURE;
9692 			goto out;
9693 		}
9694 	}
9695 
9696 	ASSERT(i_ddi_node_state(rdip) >= DS_INITIALIZED);
9697 
9698 	devnm = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
9699 
9700 	(void) ddi_deviname(rdip, devnm);
9701 
9702 	if ((rv = ndi_devi_config_one(pdip, devnm+1, &rdip,
9703 	    NDI_DEVI_ONLINE | NDI_CONFIG)) == NDI_SUCCESS) {
9704 		/* release hold from ndi_devi_config_one() */
9705 		ndi_rele_devi(rdip);
9706 	}
9707 
9708 	kmem_free(devnm, MAXNAMELEN + 1);
9709 out:
9710 	if (rv != NDI_SUCCESS && dipp && rdip) {
9711 		ndi_hold_devi(rdip);
9712 		*dipp = rdip;
9713 	}
9714 	ndi_rele_devi(pdip);
9715 	return (ndi2errno(rv));
9716 }
9717 
9718 void
9719 e_ddi_branch_hold(dev_info_t *rdip)
9720 {
9721 	if (e_ddi_branch_held(rdip)) {
9722 		cmn_err(CE_WARN, "e_ddi_branch_hold: branch already held");
9723 		return;
9724 	}
9725 
9726 	mutex_enter(&DEVI(rdip)->devi_lock);
9727 	if ((DEVI(rdip)->devi_flags & DEVI_BRANCH_HELD) == 0) {
9728 		DEVI(rdip)->devi_flags |= DEVI_BRANCH_HELD;
9729 		DEVI(rdip)->devi_ref++;
9730 	}
9731 	ASSERT(DEVI(rdip)->devi_ref > 0);
9732 	mutex_exit(&DEVI(rdip)->devi_lock);
9733 }
9734 
9735 int
9736 e_ddi_branch_held(dev_info_t *rdip)
9737 {
9738 	int rv = 0;
9739 
9740 	mutex_enter(&DEVI(rdip)->devi_lock);
9741 	if ((DEVI(rdip)->devi_flags & DEVI_BRANCH_HELD) &&
9742 	    DEVI(rdip)->devi_ref > 0) {
9743 		rv = 1;
9744 	}
9745 	mutex_exit(&DEVI(rdip)->devi_lock);
9746 
9747 	return (rv);
9748 }
9749 
9750 void
9751 e_ddi_branch_rele(dev_info_t *rdip)
9752 {
9753 	mutex_enter(&DEVI(rdip)->devi_lock);
9754 	DEVI(rdip)->devi_flags &= ~DEVI_BRANCH_HELD;
9755 	DEVI(rdip)->devi_ref--;
9756 	mutex_exit(&DEVI(rdip)->devi_lock);
9757 }
9758 
9759 int
9760 e_ddi_branch_unconfigure(
9761 	dev_info_t *rdip,
9762 	dev_info_t **dipp,
9763 	uint_t flags)
9764 {
9765 	int	circ, rv;
9766 	int	destroy;
9767 	char	*devnm;
9768 	uint_t	nflags;
9769 	dev_info_t *pdip;
9770 
9771 	if (dipp)
9772 		*dipp = NULL;
9773 
9774 	if (rdip == NULL)
9775 		return (EINVAL);
9776 
9777 	pdip = ddi_get_parent(rdip);
9778 
9779 	ASSERT(pdip);
9780 
9781 	/*
9782 	 * Check if caller holds pdip busy - can cause deadlocks during
9783 	 * devfs_clean()
9784 	 */
9785 	if (DEVI_BUSY_OWNED(pdip)) {
9786 		cmn_err(CE_WARN, "e_ddi_branch_unconfigure: failed: parent"
9787 		    " devinfo node(%p) is busy held", (void *)pdip);
9788 		return (EINVAL);
9789 	}
9790 
9791 	destroy = (flags & DEVI_BRANCH_DESTROY) ? 1 : 0;
9792 
9793 	devnm = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
9794 
9795 	ndi_devi_enter(pdip, &circ);
9796 	(void) ddi_deviname(rdip, devnm);
9797 	ndi_devi_exit(pdip, circ);
9798 
9799 	/*
9800 	 * ddi_deviname() returns a component name with / prepended.
9801 	 */
9802 	(void) devfs_clean(pdip, devnm + 1, DV_CLEAN_FORCE);
9803 
9804 	ndi_devi_enter(pdip, &circ);
9805 
9806 	/*
9807 	 * Recreate device name as it may have changed state (init/uninit)
9808 	 * when parent busy lock was dropped for devfs_clean()
9809 	 */
9810 	(void) ddi_deviname(rdip, devnm);
9811 
9812 	if (!e_ddi_branch_held(rdip)) {
9813 		kmem_free(devnm, MAXNAMELEN + 1);
9814 		ndi_devi_exit(pdip, circ);
9815 		cmn_err(CE_WARN, "e_ddi_%s_branch: dip(%p) not held",
9816 		    destroy ? "destroy" : "unconfigure", (void *)rdip);
9817 		return (EINVAL);
9818 	}
9819 
9820 	/*
9821 	 * Release hold on the branch. This is ok since we are holding the
9822 	 * parent busy. If rdip is not removed, we must do a hold on the
9823 	 * branch before returning.
9824 	 */
9825 	e_ddi_branch_rele(rdip);
9826 
9827 	nflags = NDI_DEVI_OFFLINE;
9828 	if (destroy || (flags & DEVI_BRANCH_DESTROY)) {
9829 		nflags |= NDI_DEVI_REMOVE;
9830 		destroy = 1;
9831 	} else {
9832 		nflags |= NDI_UNCONFIG;		/* uninit but don't remove */
9833 	}
9834 
9835 	if (flags & DEVI_BRANCH_EVENT)
9836 		nflags |= NDI_POST_EVENT;
9837 
9838 	if (i_ddi_devi_attached(pdip) &&
9839 	    (i_ddi_node_state(rdip) >= DS_INITIALIZED)) {
9840 		rv = ndi_devi_unconfig_one(pdip, devnm+1, dipp, nflags);
9841 	} else {
9842 		rv = e_ddi_devi_unconfig(rdip, dipp, nflags);
9843 		if (rv == NDI_SUCCESS) {
9844 			ASSERT(!destroy || ddi_get_child(rdip) == NULL);
9845 			rv = ndi_devi_offline(rdip, nflags);
9846 		}
9847 	}
9848 
9849 	if (!destroy || rv != NDI_SUCCESS) {
9850 		/* The dip still exists, so do a hold */
9851 		e_ddi_branch_hold(rdip);
9852 	}
9853 
9854 	kmem_free(devnm, MAXNAMELEN + 1);
9855 	ndi_devi_exit(pdip, circ);
9856 	return (ndi2errno(rv));
9857 }
9858 
9859 int
9860 e_ddi_branch_destroy(dev_info_t *rdip, dev_info_t **dipp, uint_t flag)
9861 {
9862 	return (e_ddi_branch_unconfigure(rdip, dipp,
9863 	    flag|DEVI_BRANCH_DESTROY));
9864 }
9865 
9866 /*
9867  * Number of chains for hash table
9868  */
9869 #define	NUMCHAINS	17
9870 
9871 /*
9872  * Devinfo busy arg
9873  */
9874 struct devi_busy {
9875 	int dv_total;
9876 	int s_total;
9877 	mod_hash_t *dv_hash;
9878 	mod_hash_t *s_hash;
9879 	int (*callback)(dev_info_t *, void *, uint_t);
9880 	void *arg;
9881 };
9882 
9883 static int
9884 visit_dip(dev_info_t *dip, void *arg)
9885 {
9886 	uintptr_t sbusy, dvbusy, ref;
9887 	struct devi_busy *bsp = arg;
9888 
9889 	ASSERT(bsp->callback);
9890 
9891 	/*
9892 	 * A dip cannot be busy if its reference count is 0
9893 	 */
9894 	if ((ref = e_ddi_devi_holdcnt(dip)) == 0) {
9895 		return (bsp->callback(dip, bsp->arg, 0));
9896 	}
9897 
9898 	if (mod_hash_find(bsp->dv_hash, dip, (mod_hash_val_t *)&dvbusy))
9899 		dvbusy = 0;
9900 
9901 	/*
9902 	 * To catch device opens currently maintained on specfs common snodes.
9903 	 */
9904 	if (mod_hash_find(bsp->s_hash, dip, (mod_hash_val_t *)&sbusy))
9905 		sbusy = 0;
9906 
9907 #ifdef	DEBUG
9908 	if (ref < sbusy || ref < dvbusy) {
9909 		cmn_err(CE_WARN, "dip(%p): sopen = %lu, dvopen = %lu "
9910 		    "dip ref = %lu\n", (void *)dip, sbusy, dvbusy, ref);
9911 	}
9912 #endif
9913 
9914 	dvbusy = (sbusy > dvbusy) ? sbusy : dvbusy;
9915 
9916 	return (bsp->callback(dip, bsp->arg, dvbusy));
9917 }
9918 
9919 static int
9920 visit_snode(struct snode *sp, void *arg)
9921 {
9922 	uintptr_t sbusy;
9923 	dev_info_t *dip;
9924 	int count;
9925 	struct devi_busy *bsp = arg;
9926 
9927 	ASSERT(sp);
9928 
9929 	/*
9930 	 * The stable lock is held. This prevents
9931 	 * the snode and its associated dip from
9932 	 * going away.
9933 	 */
9934 	dip = NULL;
9935 	count = spec_devi_open_count(sp, &dip);
9936 
9937 	if (count <= 0)
9938 		return (DDI_WALK_CONTINUE);
9939 
9940 	ASSERT(dip);
9941 
9942 	if (mod_hash_remove(bsp->s_hash, dip, (mod_hash_val_t *)&sbusy))
9943 		sbusy = count;
9944 	else
9945 		sbusy += count;
9946 
9947 	if (mod_hash_insert(bsp->s_hash, dip, (mod_hash_val_t)sbusy)) {
9948 		cmn_err(CE_WARN, "%s: s_hash insert failed: dip=0x%p, "
9949 		    "sbusy = %lu", "e_ddi_branch_referenced",
9950 		    (void *)dip, sbusy);
9951 	}
9952 
9953 	bsp->s_total += count;
9954 
9955 	return (DDI_WALK_CONTINUE);
9956 }
9957 
9958 static void
9959 visit_dvnode(struct dv_node *dv, void *arg)
9960 {
9961 	uintptr_t dvbusy;
9962 	uint_t count;
9963 	struct vnode *vp;
9964 	struct devi_busy *bsp = arg;
9965 
9966 	ASSERT(dv && dv->dv_devi);
9967 
9968 	vp = DVTOV(dv);
9969 
9970 	mutex_enter(&vp->v_lock);
9971 	count = vp->v_count;
9972 	mutex_exit(&vp->v_lock);
9973 
9974 	if (!count)
9975 		return;
9976 
9977 	if (mod_hash_remove(bsp->dv_hash, dv->dv_devi,
9978 	    (mod_hash_val_t *)&dvbusy))
9979 		dvbusy = count;
9980 	else
9981 		dvbusy += count;
9982 
9983 	if (mod_hash_insert(bsp->dv_hash, dv->dv_devi,
9984 	    (mod_hash_val_t)dvbusy)) {
9985 		cmn_err(CE_WARN, "%s: dv_hash insert failed: dip=0x%p, "
9986 		    "dvbusy=%lu", "e_ddi_branch_referenced",
9987 		    (void *)dv->dv_devi, dvbusy);
9988 	}
9989 
9990 	bsp->dv_total += count;
9991 }
9992 
9993 /*
9994  * Returns reference count on success or -1 on failure.
9995  */
9996 int
9997 e_ddi_branch_referenced(
9998 	dev_info_t *rdip,
9999 	int (*callback)(dev_info_t *dip, void *arg, uint_t ref),
10000 	void *arg)
10001 {
10002 	int circ;
10003 	char *path;
10004 	dev_info_t *pdip;
10005 	struct devi_busy bsa = {0};
10006 
10007 	ASSERT(rdip);
10008 
10009 	path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
10010 
10011 	ndi_hold_devi(rdip);
10012 
10013 	pdip = ddi_get_parent(rdip);
10014 
10015 	ASSERT(pdip);
10016 
10017 	/*
10018 	 * Check if caller holds pdip busy - can cause deadlocks during
10019 	 * devfs_walk()
10020 	 */
10021 	if (!e_ddi_branch_held(rdip) || DEVI_BUSY_OWNED(pdip)) {
10022 		cmn_err(CE_WARN, "e_ddi_branch_referenced: failed: "
10023 		    "devinfo branch(%p) not held or parent busy held",
10024 		    (void *)rdip);
10025 		ndi_rele_devi(rdip);
10026 		kmem_free(path, MAXPATHLEN);
10027 		return (-1);
10028 	}
10029 
10030 	ndi_devi_enter(pdip, &circ);
10031 	(void) ddi_pathname(rdip, path);
10032 	ndi_devi_exit(pdip, circ);
10033 
10034 	bsa.dv_hash = mod_hash_create_ptrhash("dv_node busy hash", NUMCHAINS,
10035 	    mod_hash_null_valdtor, sizeof (struct dev_info));
10036 
10037 	bsa.s_hash = mod_hash_create_ptrhash("snode busy hash", NUMCHAINS,
10038 	    mod_hash_null_valdtor, sizeof (struct snode));
10039 
10040 	if (devfs_walk(path, visit_dvnode, &bsa)) {
10041 		cmn_err(CE_WARN, "e_ddi_branch_referenced: "
10042 		    "devfs walk failed for: %s", path);
10043 		kmem_free(path, MAXPATHLEN);
10044 		bsa.s_total = bsa.dv_total = -1;
10045 		goto out;
10046 	}
10047 
10048 	kmem_free(path, MAXPATHLEN);
10049 
10050 	/*
10051 	 * Walk the snode table to detect device opens, which are currently
10052 	 * maintained on specfs common snodes.
10053 	 */
10054 	spec_snode_walk(visit_snode, &bsa);
10055 
10056 	if (callback == NULL)
10057 		goto out;
10058 
10059 	bsa.callback = callback;
10060 	bsa.arg = arg;
10061 
10062 	if (visit_dip(rdip, &bsa) == DDI_WALK_CONTINUE) {
10063 		ndi_devi_enter(rdip, &circ);
10064 		ddi_walk_devs(ddi_get_child(rdip), visit_dip, &bsa);
10065 		ndi_devi_exit(rdip, circ);
10066 	}
10067 
10068 out:
10069 	ndi_rele_devi(rdip);
10070 	mod_hash_destroy_ptrhash(bsa.s_hash);
10071 	mod_hash_destroy_ptrhash(bsa.dv_hash);
10072 	return (bsa.s_total > bsa.dv_total ? bsa.s_total : bsa.dv_total);
10073 }
10074