xref: /illumos-gate/usr/src/uts/common/os/sunddi.c (revision 77c0a660417a046bfab6c8ef58d00c181c0264b3)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 1990, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright 2022 Garrett D'Amore
25  * Copyright 2022 Tintri by DDN, Inc. All rights reserved.
26  */
27 
28 #include <sys/note.h>
29 #include <sys/types.h>
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/buf.h>
33 #include <sys/uio.h>
34 #include <sys/cred.h>
35 #include <sys/poll.h>
36 #include <sys/mman.h>
37 #include <sys/kmem.h>
38 #include <sys/model.h>
39 #include <sys/file.h>
40 #include <sys/proc.h>
41 #include <sys/open.h>
42 #include <sys/user.h>
43 #include <sys/t_lock.h>
44 #include <sys/vm.h>
45 #include <sys/stat.h>
46 #include <vm/hat.h>
47 #include <vm/seg.h>
48 #include <vm/seg_vn.h>
49 #include <vm/seg_dev.h>
50 #include <vm/as.h>
51 #include <sys/cmn_err.h>
52 #include <sys/cpuvar.h>
53 #include <sys/debug.h>
54 #include <sys/autoconf.h>
55 #include <sys/sunddi.h>
56 #include <sys/esunddi.h>
57 #include <sys/sunndi.h>
58 #include <sys/kstat.h>
59 #include <sys/conf.h>
60 #include <sys/ddi_impldefs.h>	/* include implementation structure defs */
61 #include <sys/ndi_impldefs.h>	/* include prototypes */
62 #include <sys/ddi_periodic.h>
63 #include <sys/hwconf.h>
64 #include <sys/pathname.h>
65 #include <sys/modctl.h>
66 #include <sys/epm.h>
67 #include <sys/devctl.h>
68 #include <sys/callb.h>
69 #include <sys/cladm.h>
70 #include <sys/sysevent.h>
71 #include <sys/dacf_impl.h>
72 #include <sys/ddidevmap.h>
73 #include <sys/bootconf.h>
74 #include <sys/disp.h>
75 #include <sys/atomic.h>
76 #include <sys/promif.h>
77 #include <sys/instance.h>
78 #include <sys/sysevent/eventdefs.h>
79 #include <sys/task.h>
80 #include <sys/project.h>
81 #include <sys/taskq.h>
82 #include <sys/devpolicy.h>
83 #include <sys/ctype.h>
84 #include <net/if.h>
85 #include <sys/rctl.h>
86 #include <sys/zone.h>
87 #include <sys/clock_impl.h>
88 #include <sys/ddi.h>
89 #include <sys/modhash.h>
90 #include <sys/sunldi_impl.h>
91 #include <sys/fs/dv_node.h>
92 #include <sys/fs/snode.h>
93 
94 extern	pri_t	minclsyspri;
95 
96 extern	rctl_hndl_t rc_project_locked_mem;
97 extern	rctl_hndl_t rc_zone_locked_mem;
98 
99 #ifdef DEBUG
100 static int sunddi_debug = 0;
101 #endif /* DEBUG */
102 
103 /* ddi_umem_unlock miscellaneous */
104 
105 static	void	i_ddi_umem_unlock_thread_start(void);
106 
107 static	kmutex_t	ddi_umem_unlock_mutex; /* unlock list mutex */
108 static	kcondvar_t	ddi_umem_unlock_cv; /* unlock list block/unblock */
109 static	kthread_t	*ddi_umem_unlock_thread;
110 /*
111  * The ddi_umem_unlock FIFO list.  NULL head pointer indicates empty list.
112  */
113 static	struct	ddi_umem_cookie *ddi_umem_unlock_head = NULL;
114 static	struct	ddi_umem_cookie *ddi_umem_unlock_tail = NULL;
115 
116 /*
117  * DDI(Sun) Function and flag definitions:
118  */
119 
120 #if defined(__x86)
121 /*
122  * Used to indicate which entries were chosen from a range.
123  */
124 char	*chosen_reg = "chosen-reg";
125 #endif
126 
127 /*
128  * Function used to ring system console bell
129  */
130 void (*ddi_console_bell_func)(clock_t duration);
131 
132 /*
133  * Creating register mappings and handling interrupts:
134  */
135 
136 /*
137  * Generic ddi_map: Call parent to fulfill request...
138  */
139 
140 int
141 ddi_map(dev_info_t *dp, ddi_map_req_t *mp, off_t offset,
142     off_t len, caddr_t *addrp)
143 {
144 	dev_info_t *pdip;
145 
146 	ASSERT(dp);
147 	pdip = (dev_info_t *)DEVI(dp)->devi_parent;
148 	return ((DEVI(pdip)->devi_ops->devo_bus_ops->bus_map)(pdip,
149 	    dp, mp, offset, len, addrp));
150 }
151 
152 /*
153  * ddi_apply_range: (Called by nexi only.)
154  * Apply ranges in parent node dp, to child regspec rp...
155  */
156 
157 int
158 ddi_apply_range(dev_info_t *dp, dev_info_t *rdip, struct regspec *rp)
159 {
160 	return (i_ddi_apply_range(dp, rdip, rp));
161 }
162 
163 int
164 ddi_map_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset,
165     off_t len)
166 {
167 	ddi_map_req_t mr;
168 #if defined(__x86)
169 	struct {
170 		int	bus;
171 		int	addr;
172 		int	size;
173 	} reg, *reglist;
174 	uint_t	length;
175 	int	rc;
176 
177 	/*
178 	 * get the 'registers' or the 'reg' property.
179 	 * We look up the reg property as an array of
180 	 * int's.
181 	 */
182 	rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
183 	    DDI_PROP_DONTPASS, "registers", (int **)&reglist, &length);
184 	if (rc != DDI_PROP_SUCCESS)
185 		rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
186 		    DDI_PROP_DONTPASS, "reg", (int **)&reglist, &length);
187 	if (rc == DDI_PROP_SUCCESS) {
188 		/*
189 		 * point to the required entry.
190 		 */
191 		reg = reglist[rnumber];
192 		reg.addr += offset;
193 		if (len != 0)
194 			reg.size = len;
195 		/*
196 		 * make a new property containing ONLY the required tuple.
197 		 */
198 		if (ddi_prop_update_int_array(DDI_DEV_T_NONE, dip,
199 		    chosen_reg, (int *)&reg, (sizeof (reg)/sizeof (int)))
200 		    != DDI_PROP_SUCCESS) {
201 			cmn_err(CE_WARN, "%s%d: cannot create '%s' "
202 			    "property", DEVI(dip)->devi_name,
203 			    DEVI(dip)->devi_instance, chosen_reg);
204 		}
205 		/*
206 		 * free the memory allocated by
207 		 * ddi_prop_lookup_int_array ().
208 		 */
209 		ddi_prop_free((void *)reglist);
210 	}
211 #endif
212 	mr.map_op = DDI_MO_MAP_LOCKED;
213 	mr.map_type = DDI_MT_RNUMBER;
214 	mr.map_obj.rnumber = rnumber;
215 	mr.map_prot = PROT_READ | PROT_WRITE;
216 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
217 	mr.map_handlep = NULL;
218 	mr.map_vers = DDI_MAP_VERSION;
219 
220 	/*
221 	 * Call my parent to map in my regs.
222 	 */
223 
224 	return (ddi_map(dip, &mr, offset, len, kaddrp));
225 }
226 
227 void
228 ddi_unmap_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset,
229     off_t len)
230 {
231 	ddi_map_req_t mr;
232 
233 	mr.map_op = DDI_MO_UNMAP;
234 	mr.map_type = DDI_MT_RNUMBER;
235 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
236 	mr.map_prot = PROT_READ | PROT_WRITE;	/* who cares? */
237 	mr.map_obj.rnumber = rnumber;
238 	mr.map_handlep = NULL;
239 	mr.map_vers = DDI_MAP_VERSION;
240 
241 	/*
242 	 * Call my parent to unmap my regs.
243 	 */
244 
245 	(void) ddi_map(dip, &mr, offset, len, kaddrp);
246 	*kaddrp = (caddr_t)0;
247 #if defined(__x86)
248 	(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, chosen_reg);
249 #endif
250 }
251 
252 int
253 ddi_bus_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
254     off_t offset, off_t len, caddr_t *vaddrp)
255 {
256 	return (i_ddi_bus_map(dip, rdip, mp, offset, len, vaddrp));
257 }
258 
259 /*
260  * nullbusmap:	The/DDI default bus_map entry point for nexi
261  *		not conforming to the reg/range paradigm (i.e. scsi, etc.)
262  *		with no HAT/MMU layer to be programmed at this level.
263  *
264  *		If the call is to map by rnumber, return an error,
265  *		otherwise pass anything else up the tree to my parent.
266  */
267 int
268 nullbusmap(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
269     off_t offset, off_t len, caddr_t *vaddrp)
270 {
271 	_NOTE(ARGUNUSED(rdip))
272 	if (mp->map_type == DDI_MT_RNUMBER)
273 		return (DDI_ME_UNSUPPORTED);
274 
275 	return (ddi_map(dip, mp, offset, len, vaddrp));
276 }
277 
278 /*
279  * ddi_rnumber_to_regspec: Not for use by leaf drivers.
280  *			   Only for use by nexi using the reg/range paradigm.
281  */
282 struct regspec *
283 ddi_rnumber_to_regspec(dev_info_t *dip, int rnumber)
284 {
285 	return (i_ddi_rnumber_to_regspec(dip, rnumber));
286 }
287 
288 
289 /*
290  * Note that we allow the dip to be nil because we may be called
291  * prior even to the instantiation of the devinfo tree itself - all
292  * regular leaf and nexus drivers should always use a non-nil dip!
293  *
294  * We treat peek in a somewhat cavalier fashion .. assuming that we'll
295  * simply get a synchronous fault as soon as we touch a missing address.
296  *
297  * Poke is rather more carefully handled because we might poke to a write
298  * buffer, "succeed", then only find some time later that we got an
299  * asynchronous fault that indicated that the address we were writing to
300  * was not really backed by hardware.
301  */
302 
303 static int
304 i_ddi_peekpoke(dev_info_t *devi, ddi_ctl_enum_t cmd, size_t size,
305     void *addr, void *value_p)
306 {
307 	union {
308 		uint64_t	u64;
309 		uint32_t	u32;
310 		uint16_t	u16;
311 		uint8_t		u8;
312 	} peekpoke_value;
313 
314 	peekpoke_ctlops_t peekpoke_args;
315 	uint64_t dummy_result;
316 	int rval;
317 
318 	/* Note: size is assumed to be correct;  it is not checked. */
319 	peekpoke_args.size = size;
320 	peekpoke_args.dev_addr = (uintptr_t)addr;
321 	peekpoke_args.handle = NULL;
322 	peekpoke_args.repcount = 1;
323 	peekpoke_args.flags = 0;
324 
325 	if (cmd == DDI_CTLOPS_POKE) {
326 		switch (size) {
327 		case sizeof (uint8_t):
328 			peekpoke_value.u8 = *(uint8_t *)value_p;
329 			break;
330 		case sizeof (uint16_t):
331 			peekpoke_value.u16 = *(uint16_t *)value_p;
332 			break;
333 		case sizeof (uint32_t):
334 			peekpoke_value.u32 = *(uint32_t *)value_p;
335 			break;
336 		case sizeof (uint64_t):
337 			peekpoke_value.u64 = *(uint64_t *)value_p;
338 			break;
339 		}
340 	}
341 
342 	peekpoke_args.host_addr = (uintptr_t)&peekpoke_value.u64;
343 
344 	if (devi != NULL)
345 		rval = ddi_ctlops(devi, devi, cmd, &peekpoke_args,
346 		    &dummy_result);
347 	else
348 		rval = peekpoke_mem(cmd, &peekpoke_args);
349 
350 	/*
351 	 * A NULL value_p is permitted by ddi_peek(9F); discard the result.
352 	 */
353 	if ((cmd == DDI_CTLOPS_PEEK) & (value_p != NULL)) {
354 		switch (size) {
355 		case sizeof (uint8_t):
356 			*(uint8_t *)value_p = peekpoke_value.u8;
357 			break;
358 		case sizeof (uint16_t):
359 			*(uint16_t *)value_p = peekpoke_value.u16;
360 			break;
361 		case sizeof (uint32_t):
362 			*(uint32_t *)value_p = peekpoke_value.u32;
363 			break;
364 		case sizeof (uint64_t):
365 			*(uint64_t *)value_p = peekpoke_value.u64;
366 			break;
367 		}
368 	}
369 
370 	return (rval);
371 }
372 
373 /*
374  * Keep ddi_peek() and ddi_poke() in case 3rd parties are calling this.
375  * they shouldn't be, but the 9f manpage kind of pseudo exposes it.
376  */
377 int
378 ddi_peek(dev_info_t *devi, size_t size, void *addr, void *value_p)
379 {
380 	switch (size) {
381 	case sizeof (uint8_t):
382 	case sizeof (uint16_t):
383 	case sizeof (uint32_t):
384 	case sizeof (uint64_t):
385 		break;
386 	default:
387 		return (DDI_FAILURE);
388 	}
389 
390 	return (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, size, addr, value_p));
391 }
392 
393 int
394 ddi_poke(dev_info_t *devi, size_t size, void *addr, void *value_p)
395 {
396 	switch (size) {
397 	case sizeof (uint8_t):
398 	case sizeof (uint16_t):
399 	case sizeof (uint32_t):
400 	case sizeof (uint64_t):
401 		break;
402 	default:
403 		return (DDI_FAILURE);
404 	}
405 
406 	return (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, size, addr, value_p));
407 }
408 
409 int
410 ddi_peek8(dev_info_t *dip, int8_t *addr, int8_t *val_p)
411 {
412 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
413 	    val_p));
414 }
415 
416 int
417 ddi_peek16(dev_info_t *dip, int16_t *addr, int16_t *val_p)
418 {
419 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
420 	    val_p));
421 }
422 
423 int
424 ddi_peek32(dev_info_t *dip, int32_t *addr, int32_t *val_p)
425 {
426 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
427 	    val_p));
428 }
429 
430 int
431 ddi_peek64(dev_info_t *dip, int64_t *addr, int64_t *val_p)
432 {
433 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
434 	    val_p));
435 }
436 
437 int
438 ddi_poke8(dev_info_t *dip, int8_t *addr, int8_t val)
439 {
440 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
441 }
442 
443 int
444 ddi_poke16(dev_info_t *dip, int16_t *addr, int16_t val)
445 {
446 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
447 }
448 
449 int
450 ddi_poke32(dev_info_t *dip, int32_t *addr, int32_t val)
451 {
452 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
453 }
454 
455 int
456 ddi_poke64(dev_info_t *dip, int64_t *addr, int64_t val)
457 {
458 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
459 }
460 
461 /*
462  * ddi_peekpokeio() is used primarily by the mem drivers for moving
463  * data to and from uio structures via peek and poke.  Note that we
464  * use "internal" routines ddi_peek and ddi_poke to make this go
465  * slightly faster, avoiding the call overhead ..
466  */
467 int
468 ddi_peekpokeio(dev_info_t *devi, struct uio *uio, enum uio_rw rw,
469     caddr_t addr, size_t len, uint_t xfersize)
470 {
471 	int64_t	ibuffer;
472 	int8_t w8;
473 	size_t sz;
474 	int o;
475 
476 	if (xfersize > sizeof (long))
477 		xfersize = sizeof (long);
478 
479 	while (len != 0) {
480 		if ((len | (uintptr_t)addr) & 1) {
481 			sz = sizeof (int8_t);
482 			if (rw == UIO_WRITE) {
483 				if ((o = uwritec(uio)) == -1)
484 					return (DDI_FAILURE);
485 				if (ddi_poke8(devi, (int8_t *)addr,
486 				    (int8_t)o) != DDI_SUCCESS)
487 					return (DDI_FAILURE);
488 			} else {
489 				if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz,
490 				    (int8_t *)addr, &w8) != DDI_SUCCESS)
491 					return (DDI_FAILURE);
492 				if (ureadc(w8, uio))
493 					return (DDI_FAILURE);
494 			}
495 		} else {
496 			switch (xfersize) {
497 			case sizeof (int64_t):
498 				if (((len | (uintptr_t)addr) &
499 				    (sizeof (int64_t) - 1)) == 0) {
500 					sz = xfersize;
501 					break;
502 				}
503 				/*FALLTHROUGH*/
504 			case sizeof (int32_t):
505 				if (((len | (uintptr_t)addr) &
506 				    (sizeof (int32_t) - 1)) == 0) {
507 					sz = xfersize;
508 					break;
509 				}
510 				/*FALLTHROUGH*/
511 			default:
512 				/*
513 				 * This still assumes that we might have an
514 				 * I/O bus out there that permits 16-bit
515 				 * transfers (and that it would be upset by
516 				 * 32-bit transfers from such locations).
517 				 */
518 				sz = sizeof (int16_t);
519 				break;
520 			}
521 
522 			if (rw == UIO_READ) {
523 				if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz,
524 				    addr, &ibuffer) != DDI_SUCCESS)
525 					return (DDI_FAILURE);
526 			}
527 
528 			if (uiomove(&ibuffer, sz, rw, uio))
529 				return (DDI_FAILURE);
530 
531 			if (rw == UIO_WRITE) {
532 				if (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, sz,
533 				    addr, &ibuffer) != DDI_SUCCESS)
534 					return (DDI_FAILURE);
535 			}
536 		}
537 		addr += sz;
538 		len -= sz;
539 	}
540 	return (DDI_SUCCESS);
541 }
542 
543 /*
544  * These routines are used by drivers that do layered ioctls
545  * On sparc, they're implemented in assembler to avoid spilling
546  * register windows in the common (copyin) case ..
547  */
548 #if !defined(__sparc)
549 int
550 ddi_copyin(const void *buf, void *kernbuf, size_t size, int flags)
551 {
552 	if (flags & FKIOCTL)
553 		return (kcopy(buf, kernbuf, size) ? -1 : 0);
554 	return (copyin(buf, kernbuf, size));
555 }
556 
557 int
558 ddi_copyout(const void *buf, void *kernbuf, size_t size, int flags)
559 {
560 	if (flags & FKIOCTL)
561 		return (kcopy(buf, kernbuf, size) ? -1 : 0);
562 	return (copyout(buf, kernbuf, size));
563 }
564 #endif	/* !__sparc */
565 
566 /*
567  * Conversions in nexus pagesize units.  We don't duplicate the
568  * 'nil dip' semantics of peek/poke because btopr/btop/ptob are DDI/DKI
569  * routines anyway.
570  */
571 unsigned long
572 ddi_btop(dev_info_t *dip, unsigned long bytes)
573 {
574 	unsigned long pages;
575 
576 	(void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOP, &bytes, &pages);
577 	return (pages);
578 }
579 
580 unsigned long
581 ddi_btopr(dev_info_t *dip, unsigned long bytes)
582 {
583 	unsigned long pages;
584 
585 	(void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOPR, &bytes, &pages);
586 	return (pages);
587 }
588 
589 unsigned long
590 ddi_ptob(dev_info_t *dip, unsigned long pages)
591 {
592 	unsigned long bytes;
593 
594 	(void) ddi_ctlops(dip, dip, DDI_CTLOPS_PTOB, &pages, &bytes);
595 	return (bytes);
596 }
597 
598 unsigned int
599 ddi_enter_critical(void)
600 {
601 	return ((uint_t)spl7());
602 }
603 
604 void
605 ddi_exit_critical(unsigned int spl)
606 {
607 	splx((int)spl);
608 }
609 
610 /*
611  * Nexus ctlops punter
612  */
613 
614 #if !defined(__sparc)
615 /*
616  * Request bus_ctl parent to handle a bus_ctl request
617  *
618  * (The sparc version is in sparc_ddi.s)
619  */
620 int
621 ddi_ctlops(dev_info_t *d, dev_info_t *r, ddi_ctl_enum_t op, void *a, void *v)
622 {
623 	int (*fp)();
624 
625 	if (!d || !r)
626 		return (DDI_FAILURE);
627 
628 	if ((d = (dev_info_t *)DEVI(d)->devi_bus_ctl) == NULL)
629 		return (DDI_FAILURE);
630 
631 	fp = DEVI(d)->devi_ops->devo_bus_ops->bus_ctl;
632 	return ((*fp)(d, r, op, a, v));
633 }
634 
635 #endif
636 
637 /*
638  * DMA/DVMA setup
639  */
640 
641 #if !defined(__sparc)
642 /*
643  * Request bus_dma_ctl parent to fiddle with a dma request.
644  *
645  * (The sparc version is in sparc_subr.s)
646  */
647 int
648 ddi_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
649     ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
650     off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
651 {
652 	int (*fp)();
653 
654 	if (dip != ddi_root_node())
655 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_ctl;
656 	fp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_ctl;
657 	return ((*fp) (dip, rdip, handle, request, offp, lenp, objp, flags));
658 }
659 #endif
660 
661 /*
662  * For all DMA control functions, call the DMA control
663  * routine and return status.
664  *
665  * Just plain assume that the parent is to be called.
666  * If a nexus driver or a thread outside the framework
667  * of a nexus driver or a leaf driver calls these functions,
668  * it is up to them to deal with the fact that the parent's
669  * bus_dma_ctl function will be the first one called.
670  */
671 
672 #define	HD	((ddi_dma_impl_t *)h)->dmai_rdip
673 
674 /*
675  * This routine is left in place to satisfy link dependencies
676  * for any 3rd party nexus drivers that rely on it.  It is never
677  * called, though.
678  */
679 /*ARGSUSED*/
680 int
681 ddi_dma_map(dev_info_t *dip, dev_info_t *rdip,
682     struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep)
683 {
684 	return (DDI_FAILURE);
685 }
686 
687 #if !defined(__sparc)
688 
689 /*
690  * The SPARC versions of these routines are done in assembler to
691  * save register windows, so they're in sparc_subr.s.
692  */
693 
694 int
695 ddi_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
696     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
697 {
698 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_attr_t *,
699 	    int (*)(caddr_t), caddr_t, ddi_dma_handle_t *);
700 
701 	if (dip != ddi_root_node())
702 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl;
703 
704 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_allochdl;
705 	return ((*funcp)(dip, rdip, attr, waitfp, arg, handlep));
706 }
707 
708 int
709 ddi_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handlep)
710 {
711 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
712 
713 	if (dip != ddi_root_node())
714 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl;
715 
716 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_freehdl;
717 	return ((*funcp)(dip, rdip, handlep));
718 }
719 
720 int
721 ddi_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
722     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
723     ddi_dma_cookie_t *cp, uint_t *ccountp)
724 {
725 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
726 	    struct ddi_dma_req *, ddi_dma_cookie_t *, uint_t *);
727 
728 	if (dip != ddi_root_node())
729 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
730 
731 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_bindhdl;
732 	return ((*funcp)(dip, rdip, handle, dmareq, cp, ccountp));
733 }
734 
735 int
736 ddi_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
737     ddi_dma_handle_t handle)
738 {
739 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
740 
741 	if (dip != ddi_root_node())
742 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl;
743 
744 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_unbindhdl;
745 	return ((*funcp)(dip, rdip, handle));
746 }
747 
748 
749 int
750 ddi_dma_flush(dev_info_t *dip, dev_info_t *rdip,
751     ddi_dma_handle_t handle, off_t off, size_t len,
752     uint_t cache_flags)
753 {
754 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
755 	    off_t, size_t, uint_t);
756 
757 	if (dip != ddi_root_node())
758 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush;
759 
760 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_flush;
761 	return ((*funcp)(dip, rdip, handle, off, len, cache_flags));
762 }
763 
764 int
765 ddi_dma_win(dev_info_t *dip, dev_info_t *rdip,
766     ddi_dma_handle_t handle, uint_t win, off_t *offp,
767     size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
768 {
769 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
770 	    uint_t, off_t *, size_t *, ddi_dma_cookie_t *, uint_t *);
771 
772 	if (dip != ddi_root_node())
773 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_win;
774 
775 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_win;
776 	return ((*funcp)(dip, rdip, handle, win, offp, lenp,
777 	    cookiep, ccountp));
778 }
779 
780 int
781 ddi_dma_sync(ddi_dma_handle_t h, off_t o, size_t l, uint_t whom)
782 {
783 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h;
784 	dev_info_t *dip, *rdip;
785 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t, off_t,
786 	    size_t, uint_t);
787 
788 	/*
789 	 * the DMA nexus driver will set DMP_NOSYNC if the
790 	 * platform does not require any sync operation. For
791 	 * example if the memory is uncached or consistent
792 	 * and without any I/O write buffers involved.
793 	 */
794 	if ((hp->dmai_rflags & DMP_NOSYNC) == DMP_NOSYNC)
795 		return (DDI_SUCCESS);
796 
797 	dip = rdip = hp->dmai_rdip;
798 	if (dip != ddi_root_node())
799 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush;
800 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_flush;
801 	return ((*funcp)(dip, rdip, h, o, l, whom));
802 }
803 
804 int
805 ddi_dma_unbind_handle(ddi_dma_handle_t h)
806 {
807 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h;
808 	dev_info_t *dip, *rdip;
809 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
810 
811 	dip = rdip = hp->dmai_rdip;
812 	if (dip != ddi_root_node())
813 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl;
814 	funcp = DEVI(rdip)->devi_bus_dma_unbindfunc;
815 	return ((*funcp)(dip, rdip, h));
816 }
817 
818 #endif	/* !__sparc */
819 
820 /*
821  * DMA burst sizes, and transfer minimums
822  */
823 
824 int
825 ddi_dma_burstsizes(ddi_dma_handle_t handle)
826 {
827 	ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle;
828 
829 	if (!dimp)
830 		return (0);
831 	else
832 		return (dimp->dmai_burstsizes);
833 }
834 
835 /*
836  * Given two DMA attribute structures, apply the attributes
837  * of one to the other, following the rules of attributes
838  * and the wishes of the caller.
839  *
840  * The rules of DMA attribute structures are that you cannot
841  * make things *less* restrictive as you apply one set
842  * of attributes to another.
843  *
844  */
845 void
846 ddi_dma_attr_merge(ddi_dma_attr_t *attr, ddi_dma_attr_t *mod)
847 {
848 	attr->dma_attr_addr_lo =
849 	    MAX(attr->dma_attr_addr_lo, mod->dma_attr_addr_lo);
850 	attr->dma_attr_addr_hi =
851 	    MIN(attr->dma_attr_addr_hi, mod->dma_attr_addr_hi);
852 	attr->dma_attr_count_max =
853 	    MIN(attr->dma_attr_count_max, mod->dma_attr_count_max);
854 	attr->dma_attr_align =
855 	    MAX(attr->dma_attr_align,  mod->dma_attr_align);
856 	attr->dma_attr_burstsizes =
857 	    (uint_t)(attr->dma_attr_burstsizes & mod->dma_attr_burstsizes);
858 	attr->dma_attr_minxfer =
859 	    maxbit(attr->dma_attr_minxfer, mod->dma_attr_minxfer);
860 	attr->dma_attr_maxxfer =
861 	    MIN(attr->dma_attr_maxxfer, mod->dma_attr_maxxfer);
862 	attr->dma_attr_seg = MIN(attr->dma_attr_seg, mod->dma_attr_seg);
863 	attr->dma_attr_sgllen = MIN((uint_t)attr->dma_attr_sgllen,
864 	    (uint_t)mod->dma_attr_sgllen);
865 	attr->dma_attr_granular =
866 	    MAX(attr->dma_attr_granular, mod->dma_attr_granular);
867 }
868 
869 /*
870  * mmap/segmap interface:
871  */
872 
873 /*
874  * ddi_segmap:		setup the default segment driver. Calls the drivers
875  *			XXmmap routine to validate the range to be mapped.
876  *			Return ENXIO of the range is not valid.  Create
877  *			a seg_dev segment that contains all of the
878  *			necessary information and will reference the
879  *			default segment driver routines. It returns zero
880  *			on success or non-zero on failure.
881  */
882 int
883 ddi_segmap(dev_t dev, off_t offset, struct as *asp, caddr_t *addrp, off_t len,
884     uint_t prot, uint_t maxprot, uint_t flags, cred_t *credp)
885 {
886 	extern int spec_segmap(dev_t, off_t, struct as *, caddr_t *,
887 	    off_t, uint_t, uint_t, uint_t, struct cred *);
888 
889 	return (spec_segmap(dev, offset, asp, addrp, len,
890 	    prot, maxprot, flags, credp));
891 }
892 
893 /*
894  * ddi_map_fault:	Resolve mappings at fault time.  Used by segment
895  *			drivers. Allows each successive parent to resolve
896  *			address translations and add its mappings to the
897  *			mapping list supplied in the page structure. It
898  *			returns zero on success	or non-zero on failure.
899  */
900 
901 int
902 ddi_map_fault(dev_info_t *dip, struct hat *hat, struct seg *seg,
903     caddr_t addr, struct devpage *dp, pfn_t pfn, uint_t prot, uint_t lock)
904 {
905 	return (i_ddi_map_fault(dip, dip, hat, seg, addr, dp, pfn, prot, lock));
906 }
907 
908 /*
909  * ddi_device_mapping_check:	Called from ddi_segmap_setup.
910  *	Invokes platform specific DDI to determine whether attributes specified
911  *	in attr(9s) are	valid for the region of memory that will be made
912  *	available for direct access to user process via the mmap(2) system call.
913  */
914 int
915 ddi_device_mapping_check(dev_t dev, ddi_device_acc_attr_t *accattrp,
916     uint_t rnumber, uint_t *hat_flags)
917 {
918 	ddi_acc_handle_t handle;
919 	ddi_map_req_t mr;
920 	ddi_acc_hdl_t *hp;
921 	int result;
922 	dev_info_t *dip;
923 
924 	/*
925 	 * we use e_ddi_hold_devi_by_dev to search for the devi.  We
926 	 * release it immediately since it should already be held by
927 	 * a devfs vnode.
928 	 */
929 	if ((dip =
930 	    e_ddi_hold_devi_by_dev(dev, E_DDI_HOLD_DEVI_NOATTACH)) == NULL)
931 		return (-1);
932 	ddi_release_devi(dip);		/* for e_ddi_hold_devi_by_dev() */
933 
934 	/*
935 	 * Allocate and initialize the common elements of data
936 	 * access handle.
937 	 */
938 	handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
939 	if (handle == NULL)
940 		return (-1);
941 
942 	hp = impl_acc_hdl_get(handle);
943 	hp->ah_vers = VERS_ACCHDL;
944 	hp->ah_dip = dip;
945 	hp->ah_rnumber = rnumber;
946 	hp->ah_offset = 0;
947 	hp->ah_len = 0;
948 	hp->ah_acc = *accattrp;
949 
950 	/*
951 	 * Set up the mapping request and call to parent.
952 	 */
953 	mr.map_op = DDI_MO_MAP_HANDLE;
954 	mr.map_type = DDI_MT_RNUMBER;
955 	mr.map_obj.rnumber = rnumber;
956 	mr.map_prot = PROT_READ | PROT_WRITE;
957 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
958 	mr.map_handlep = hp;
959 	mr.map_vers = DDI_MAP_VERSION;
960 	result = ddi_map(dip, &mr, 0, 0, NULL);
961 
962 	/*
963 	 * Region must be mappable, pick up flags from the framework.
964 	 */
965 	*hat_flags = hp->ah_hat_flags;
966 
967 	impl_acc_hdl_free(handle);
968 
969 	/*
970 	 * check for end result.
971 	 */
972 	if (result != DDI_SUCCESS)
973 		return (-1);
974 	return (0);
975 }
976 
977 
978 /*
979  * Property functions:	 See also, ddipropdefs.h.
980  *
981  * These functions are the framework for the property functions,
982  * i.e. they support software defined properties.  All implementation
983  * specific property handling (i.e.: self-identifying devices and
984  * PROM defined properties are handled in the implementation specific
985  * functions (defined in ddi_implfuncs.h).
986  */
987 
988 /*
989  * nopropop:	Shouldn't be called, right?
990  */
991 int
992 nopropop(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
993     char *name, caddr_t valuep, int *lengthp)
994 {
995 	_NOTE(ARGUNUSED(dev, dip, prop_op, mod_flags, name, valuep, lengthp))
996 	return (DDI_PROP_NOT_FOUND);
997 }
998 
999 #ifdef	DDI_PROP_DEBUG
1000 int ddi_prop_debug_flag = 0;
1001 
1002 int
1003 ddi_prop_debug(int enable)
1004 {
1005 	int prev = ddi_prop_debug_flag;
1006 
1007 	if ((enable != 0) || (prev != 0))
1008 		printf("ddi_prop_debug: debugging %s\n",
1009 		    enable ? "enabled" : "disabled");
1010 	ddi_prop_debug_flag = enable;
1011 	return (prev);
1012 }
1013 
1014 #endif	/* DDI_PROP_DEBUG */
1015 
1016 /*
1017  * Search a property list for a match, if found return pointer
1018  * to matching prop struct, else return NULL.
1019  */
1020 
1021 ddi_prop_t *
1022 i_ddi_prop_search(dev_t dev, char *name, uint_t flags, ddi_prop_t **list_head)
1023 {
1024 	ddi_prop_t	*propp;
1025 
1026 	/*
1027 	 * find the property in child's devinfo:
1028 	 * Search order defined by this search function is first matching
1029 	 * property with input dev == DDI_DEV_T_ANY matching any dev or
1030 	 * dev == propp->prop_dev, name == propp->name, and the correct
1031 	 * data type as specified in the flags.  If a DDI_DEV_T_NONE dev
1032 	 * value made it this far then it implies a DDI_DEV_T_ANY search.
1033 	 */
1034 	if (dev == DDI_DEV_T_NONE)
1035 		dev = DDI_DEV_T_ANY;
1036 
1037 	for (propp = *list_head; propp != NULL; propp = propp->prop_next)  {
1038 
1039 		if (!DDI_STRSAME(propp->prop_name, name))
1040 			continue;
1041 
1042 		if ((dev != DDI_DEV_T_ANY) && (propp->prop_dev != dev))
1043 			continue;
1044 
1045 		if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0)
1046 			continue;
1047 
1048 		return (propp);
1049 	}
1050 
1051 	return ((ddi_prop_t *)0);
1052 }
1053 
1054 /*
1055  * Search for property within devnames structures
1056  */
1057 ddi_prop_t *
1058 i_ddi_search_global_prop(dev_t dev, char *name, uint_t flags)
1059 {
1060 	major_t		major;
1061 	struct devnames	*dnp;
1062 	ddi_prop_t	*propp;
1063 
1064 	/*
1065 	 * Valid dev_t value is needed to index into the
1066 	 * correct devnames entry, therefore a dev_t
1067 	 * value of DDI_DEV_T_ANY is not appropriate.
1068 	 */
1069 	ASSERT(dev != DDI_DEV_T_ANY);
1070 	if (dev == DDI_DEV_T_ANY) {
1071 		return ((ddi_prop_t *)0);
1072 	}
1073 
1074 	major = getmajor(dev);
1075 	dnp = &(devnamesp[major]);
1076 
1077 	if (dnp->dn_global_prop_ptr == NULL)
1078 		return ((ddi_prop_t *)0);
1079 
1080 	LOCK_DEV_OPS(&dnp->dn_lock);
1081 
1082 	for (propp = dnp->dn_global_prop_ptr->prop_list;
1083 	    propp != NULL;
1084 	    propp = (ddi_prop_t *)propp->prop_next) {
1085 
1086 		if (!DDI_STRSAME(propp->prop_name, name))
1087 			continue;
1088 
1089 		if ((!(flags & DDI_PROP_ROOTNEX_GLOBAL)) &&
1090 		    (!(flags & LDI_DEV_T_ANY)) && (propp->prop_dev != dev))
1091 			continue;
1092 
1093 		if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0)
1094 			continue;
1095 
1096 		/* Property found, return it */
1097 		UNLOCK_DEV_OPS(&dnp->dn_lock);
1098 		return (propp);
1099 	}
1100 
1101 	UNLOCK_DEV_OPS(&dnp->dn_lock);
1102 	return ((ddi_prop_t *)0);
1103 }
1104 
1105 static char prop_no_mem_msg[] = "can't allocate memory for ddi property <%s>";
1106 
1107 /*
1108  * ddi_prop_search_global:
1109  *	Search the global property list within devnames
1110  *	for the named property.  Return the encoded value.
1111  */
1112 static int
1113 i_ddi_prop_search_global(dev_t dev, uint_t flags, char *name,
1114     void *valuep, uint_t *lengthp)
1115 {
1116 	ddi_prop_t	*propp;
1117 	caddr_t		buffer;
1118 
1119 	propp =  i_ddi_search_global_prop(dev, name, flags);
1120 
1121 	/* Property NOT found, bail */
1122 	if (propp == (ddi_prop_t *)0)
1123 		return (DDI_PROP_NOT_FOUND);
1124 
1125 	if (propp->prop_flags & DDI_PROP_UNDEF_IT)
1126 		return (DDI_PROP_UNDEFINED);
1127 
1128 	if ((buffer = kmem_alloc(propp->prop_len,
1129 	    (flags & DDI_PROP_CANSLEEP) ? KM_SLEEP : KM_NOSLEEP)) == NULL) {
1130 		cmn_err(CE_CONT, prop_no_mem_msg, name);
1131 		return (DDI_PROP_NO_MEMORY);
1132 	}
1133 
1134 	/*
1135 	 * Return the encoded data
1136 	 */
1137 	*(caddr_t *)valuep = buffer;
1138 	*lengthp = propp->prop_len;
1139 	bcopy(propp->prop_val, buffer, propp->prop_len);
1140 
1141 	return (DDI_PROP_SUCCESS);
1142 }
1143 
1144 /*
1145  * ddi_prop_search_common:	Lookup and return the encoded value
1146  */
1147 int
1148 ddi_prop_search_common(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1149     uint_t flags, char *name, void *valuep, uint_t *lengthp)
1150 {
1151 	ddi_prop_t	*propp;
1152 	int		i;
1153 	caddr_t		buffer = NULL;
1154 	caddr_t		prealloc = NULL;
1155 	int		plength = 0;
1156 	dev_info_t	*pdip;
1157 	int		(*bop)();
1158 
1159 	/*CONSTANTCONDITION*/
1160 	while (1)  {
1161 
1162 		mutex_enter(&(DEVI(dip)->devi_lock));
1163 
1164 
1165 		/*
1166 		 * find the property in child's devinfo:
1167 		 * Search order is:
1168 		 *	1. driver defined properties
1169 		 *	2. system defined properties
1170 		 *	3. driver global properties
1171 		 *	4. boot defined properties
1172 		 */
1173 
1174 		propp = i_ddi_prop_search(dev, name, flags,
1175 		    &(DEVI(dip)->devi_drv_prop_ptr));
1176 		if (propp == NULL)  {
1177 			propp = i_ddi_prop_search(dev, name, flags,
1178 			    &(DEVI(dip)->devi_sys_prop_ptr));
1179 		}
1180 		if ((propp == NULL) && DEVI(dip)->devi_global_prop_list) {
1181 			propp = i_ddi_prop_search(dev, name, flags,
1182 			    &DEVI(dip)->devi_global_prop_list->prop_list);
1183 		}
1184 
1185 		if (propp == NULL)  {
1186 			propp = i_ddi_prop_search(dev, name, flags,
1187 			    &(DEVI(dip)->devi_hw_prop_ptr));
1188 		}
1189 
1190 		/*
1191 		 * Software property found?
1192 		 */
1193 		if (propp != (ddi_prop_t *)0)	{
1194 
1195 			/*
1196 			 * If explicit undefine, return now.
1197 			 */
1198 			if (propp->prop_flags & DDI_PROP_UNDEF_IT) {
1199 				mutex_exit(&(DEVI(dip)->devi_lock));
1200 				if (prealloc)
1201 					kmem_free(prealloc, plength);
1202 				return (DDI_PROP_UNDEFINED);
1203 			}
1204 
1205 			/*
1206 			 * If we only want to know if it exists, return now
1207 			 */
1208 			if (prop_op == PROP_EXISTS) {
1209 				mutex_exit(&(DEVI(dip)->devi_lock));
1210 				ASSERT(prealloc == NULL);
1211 				return (DDI_PROP_SUCCESS);
1212 			}
1213 
1214 			/*
1215 			 * If length only request or prop length == 0,
1216 			 * service request and return now.
1217 			 */
1218 			if ((prop_op == PROP_LEN) ||(propp->prop_len == 0)) {
1219 				*lengthp = propp->prop_len;
1220 
1221 				/*
1222 				 * if prop_op is PROP_LEN_AND_VAL_ALLOC
1223 				 * that means prop_len is 0, so set valuep
1224 				 * also to NULL
1225 				 */
1226 				if (prop_op == PROP_LEN_AND_VAL_ALLOC)
1227 					*(caddr_t *)valuep = NULL;
1228 
1229 				mutex_exit(&(DEVI(dip)->devi_lock));
1230 				if (prealloc)
1231 					kmem_free(prealloc, plength);
1232 				return (DDI_PROP_SUCCESS);
1233 			}
1234 
1235 			/*
1236 			 * If LEN_AND_VAL_ALLOC and the request can sleep,
1237 			 * drop the mutex, allocate the buffer, and go
1238 			 * through the loop again.  If we already allocated
1239 			 * the buffer, and the size of the property changed,
1240 			 * keep trying...
1241 			 */
1242 			if ((prop_op == PROP_LEN_AND_VAL_ALLOC) &&
1243 			    (flags & DDI_PROP_CANSLEEP))  {
1244 				if (prealloc && (propp->prop_len != plength)) {
1245 					kmem_free(prealloc, plength);
1246 					prealloc = NULL;
1247 				}
1248 				if (prealloc == NULL)  {
1249 					plength = propp->prop_len;
1250 					mutex_exit(&(DEVI(dip)->devi_lock));
1251 					prealloc = kmem_alloc(plength,
1252 					    KM_SLEEP);
1253 					continue;
1254 				}
1255 			}
1256 
1257 			/*
1258 			 * Allocate buffer, if required.  Either way,
1259 			 * set `buffer' variable.
1260 			 */
1261 			i = *lengthp;			/* Get callers length */
1262 			*lengthp = propp->prop_len;	/* Set callers length */
1263 
1264 			switch (prop_op) {
1265 
1266 			case PROP_LEN_AND_VAL_ALLOC:
1267 
1268 				if (prealloc == NULL) {
1269 					buffer = kmem_alloc(propp->prop_len,
1270 					    KM_NOSLEEP);
1271 				} else {
1272 					buffer = prealloc;
1273 				}
1274 
1275 				if (buffer == NULL)  {
1276 					mutex_exit(&(DEVI(dip)->devi_lock));
1277 					cmn_err(CE_CONT, prop_no_mem_msg, name);
1278 					return (DDI_PROP_NO_MEMORY);
1279 				}
1280 				/* Set callers buf ptr */
1281 				*(caddr_t *)valuep = buffer;
1282 				break;
1283 
1284 			case PROP_LEN_AND_VAL_BUF:
1285 
1286 				if (propp->prop_len > (i)) {
1287 					mutex_exit(&(DEVI(dip)->devi_lock));
1288 					return (DDI_PROP_BUF_TOO_SMALL);
1289 				}
1290 
1291 				buffer = valuep;  /* Get callers buf ptr */
1292 				break;
1293 
1294 			default:
1295 				break;
1296 			}
1297 
1298 			/*
1299 			 * Do the copy.
1300 			 */
1301 			if (buffer != NULL)
1302 				bcopy(propp->prop_val, buffer, propp->prop_len);
1303 			mutex_exit(&(DEVI(dip)->devi_lock));
1304 			return (DDI_PROP_SUCCESS);
1305 		}
1306 
1307 		mutex_exit(&(DEVI(dip)->devi_lock));
1308 		if (prealloc)
1309 			kmem_free(prealloc, plength);
1310 		prealloc = NULL;
1311 
1312 		/*
1313 		 * Prop not found, call parent bus_ops to deal with possible
1314 		 * h/w layer (possible PROM defined props, etc.) and to
1315 		 * possibly ascend the hierarchy, if allowed by flags.
1316 		 */
1317 		pdip = (dev_info_t *)DEVI(dip)->devi_parent;
1318 
1319 		/*
1320 		 * One last call for the root driver PROM props?
1321 		 */
1322 		if (dip == ddi_root_node())  {
1323 			return (ddi_bus_prop_op(dev, dip, dip, prop_op,
1324 			    flags, name, valuep, (int *)lengthp));
1325 		}
1326 
1327 		/*
1328 		 * We may have been called to check for properties
1329 		 * within a single devinfo node that has no parent -
1330 		 * see make_prop()
1331 		 */
1332 		if (pdip == NULL) {
1333 			ASSERT((flags &
1334 			    (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM)) ==
1335 			    (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM));
1336 			return (DDI_PROP_NOT_FOUND);
1337 		}
1338 
1339 		/*
1340 		 * Instead of recursing, we do iterative calls up the tree.
1341 		 * As a bit of optimization, skip the bus_op level if the
1342 		 * node is a s/w node and if the parent's bus_prop_op function
1343 		 * is `ddi_bus_prop_op', because we know that in this case,
1344 		 * this function does nothing.
1345 		 *
1346 		 * 4225415: If the parent isn't attached, or the child
1347 		 * hasn't been named by the parent yet, use the default
1348 		 * ddi_bus_prop_op as a proxy for the parent.  This
1349 		 * allows property lookups in any child/parent state to
1350 		 * include 'prom' and inherited properties, even when
1351 		 * there are no drivers attached to the child or parent.
1352 		 */
1353 
1354 		bop = ddi_bus_prop_op;
1355 		if (i_ddi_devi_attached(pdip) &&
1356 		    (i_ddi_node_state(dip) >= DS_INITIALIZED))
1357 			bop = DEVI(pdip)->devi_ops->devo_bus_ops->bus_prop_op;
1358 
1359 		i = DDI_PROP_NOT_FOUND;
1360 
1361 		if ((bop != ddi_bus_prop_op) || ndi_dev_is_prom_node(dip)) {
1362 			i = (*bop)(dev, pdip, dip, prop_op,
1363 			    flags | DDI_PROP_DONTPASS,
1364 			    name, valuep, lengthp);
1365 		}
1366 
1367 		if ((flags & DDI_PROP_DONTPASS) ||
1368 		    (i != DDI_PROP_NOT_FOUND))
1369 			return (i);
1370 
1371 		dip = pdip;
1372 	}
1373 	/*NOTREACHED*/
1374 }
1375 
1376 
1377 /*
1378  * ddi_prop_op: The basic property operator for drivers.
1379  *
1380  * In ddi_prop_op, the type of valuep is interpreted based on prop_op:
1381  *
1382  *	prop_op			valuep
1383  *	------			------
1384  *
1385  *	PROP_LEN		<unused>
1386  *
1387  *	PROP_LEN_AND_VAL_BUF	Pointer to callers buffer
1388  *
1389  *	PROP_LEN_AND_VAL_ALLOC	Address of callers pointer (will be set to
1390  *				address of allocated buffer, if successful)
1391  */
1392 int
1393 ddi_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
1394     char *name, caddr_t valuep, int *lengthp)
1395 {
1396 	int	i;
1397 
1398 	ASSERT((mod_flags & DDI_PROP_TYPE_MASK) == 0);
1399 
1400 	/*
1401 	 * If this was originally an LDI prop lookup then we bail here.
1402 	 * The reason is that the LDI property lookup interfaces first call
1403 	 * a drivers prop_op() entry point to allow it to override
1404 	 * properties.  But if we've made it here, then the driver hasn't
1405 	 * overriden any properties.  We don't want to continue with the
1406 	 * property search here because we don't have any type inforamtion.
1407 	 * When we return failure, the LDI interfaces will then proceed to
1408 	 * call the typed property interfaces to look up the property.
1409 	 */
1410 	if (mod_flags & DDI_PROP_DYNAMIC)
1411 		return (DDI_PROP_NOT_FOUND);
1412 
1413 	/*
1414 	 * check for pre-typed property consumer asking for typed property:
1415 	 * see e_ddi_getprop_int64.
1416 	 */
1417 	if (mod_flags & DDI_PROP_CONSUMER_TYPED)
1418 		mod_flags |= DDI_PROP_TYPE_INT64;
1419 	mod_flags |= DDI_PROP_TYPE_ANY;
1420 
1421 	i = ddi_prop_search_common(dev, dip, prop_op,
1422 	    mod_flags, name, valuep, (uint_t *)lengthp);
1423 	if (i == DDI_PROP_FOUND_1275)
1424 		return (DDI_PROP_SUCCESS);
1425 	return (i);
1426 }
1427 
1428 /*
1429  * ddi_prop_op_nblocks_blksize: The basic property operator for drivers that
1430  * maintain size in number of blksize blocks.  Provides a dynamic property
1431  * implementation for size oriented properties based on nblocks64 and blksize
1432  * values passed in by the driver.  Fallback to ddi_prop_op if the nblocks64
1433  * is too large.  This interface should not be used with a nblocks64 that
1434  * represents the driver's idea of how to represent unknown, if nblocks is
1435  * unknown use ddi_prop_op.
1436  */
1437 int
1438 ddi_prop_op_nblocks_blksize(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1439     int mod_flags, char *name, caddr_t valuep, int *lengthp,
1440     uint64_t nblocks64, uint_t blksize)
1441 {
1442 	uint64_t size64;
1443 	int	blkshift;
1444 
1445 	/* convert block size to shift value */
1446 	ASSERT(BIT_ONLYONESET(blksize));
1447 	blkshift = highbit(blksize) - 1;
1448 
1449 	/*
1450 	 * There is no point in supporting nblocks64 values that don't have
1451 	 * an accurate uint64_t byte count representation.
1452 	 */
1453 	if (nblocks64 >= (UINT64_MAX >> blkshift))
1454 		return (ddi_prop_op(dev, dip, prop_op, mod_flags,
1455 		    name, valuep, lengthp));
1456 
1457 	size64 = nblocks64 << blkshift;
1458 	return (ddi_prop_op_size_blksize(dev, dip, prop_op, mod_flags,
1459 	    name, valuep, lengthp, size64, blksize));
1460 }
1461 
1462 /*
1463  * ddi_prop_op_nblocks: ddi_prop_op_nblocks_blksize with DEV_BSIZE blksize.
1464  */
1465 int
1466 ddi_prop_op_nblocks(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1467     int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t nblocks64)
1468 {
1469 	return (ddi_prop_op_nblocks_blksize(dev, dip, prop_op,
1470 	    mod_flags, name, valuep, lengthp, nblocks64, DEV_BSIZE));
1471 }
1472 
1473 /*
1474  * ddi_prop_op_size_blksize: The basic property operator for block drivers that
1475  * maintain size in bytes. Provides a of dynamic property implementation for
1476  * size oriented properties based on size64 value and blksize passed in by the
1477  * driver.  Fallback to ddi_prop_op if the size64 is too large. This interface
1478  * should not be used with a size64 that represents the driver's idea of how
1479  * to represent unknown, if size is unknown use ddi_prop_op.
1480  *
1481  * NOTE: the legacy "nblocks"/"size" properties are treated as 32-bit unsigned
1482  * integers. While the most likely interface to request them ([bc]devi_size)
1483  * is declared int (signed) there is no enforcement of this, which means we
1484  * can't enforce limitations here without risking regression.
1485  */
1486 int
1487 ddi_prop_op_size_blksize(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1488     int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t size64,
1489     uint_t blksize)
1490 {
1491 	uint64_t nblocks64;
1492 	int	callers_length;
1493 	caddr_t	buffer;
1494 	int	blkshift;
1495 
1496 	/*
1497 	 * This is a kludge to support capture of size(9P) pure dynamic
1498 	 * properties in snapshots for non-cmlb code (without exposing
1499 	 * i_ddi_prop_dyn changes). When everyone uses cmlb, this code
1500 	 * should be removed.
1501 	 */
1502 	if (i_ddi_prop_dyn_driver_get(dip) == NULL) {
1503 		static i_ddi_prop_dyn_t prop_dyn_size[] = {
1504 		    {"Size",		DDI_PROP_TYPE_INT64,	S_IFCHR},
1505 		    {"Nblocks",		DDI_PROP_TYPE_INT64,	S_IFBLK},
1506 		    {NULL}
1507 		};
1508 		i_ddi_prop_dyn_driver_set(dip, prop_dyn_size);
1509 	}
1510 
1511 	/* convert block size to shift value */
1512 	ASSERT(BIT_ONLYONESET(blksize));
1513 	blkshift = highbit(blksize) - 1;
1514 
1515 	/* compute DEV_BSIZE nblocks value */
1516 	nblocks64 = size64 >> blkshift;
1517 
1518 	/* get callers length, establish length of our dynamic properties */
1519 	callers_length = *lengthp;
1520 
1521 	if (strcmp(name, "Nblocks") == 0)
1522 		*lengthp = sizeof (uint64_t);
1523 	else if (strcmp(name, "Size") == 0)
1524 		*lengthp = sizeof (uint64_t);
1525 	else if ((strcmp(name, "nblocks") == 0) && (nblocks64 < UINT_MAX))
1526 		*lengthp = sizeof (uint32_t);
1527 	else if ((strcmp(name, "size") == 0) && (size64 < UINT_MAX))
1528 		*lengthp = sizeof (uint32_t);
1529 	else if ((strcmp(name, "blksize") == 0) && (blksize < UINT_MAX))
1530 		*lengthp = sizeof (uint32_t);
1531 	else {
1532 		/* fallback to ddi_prop_op */
1533 		return (ddi_prop_op(dev, dip, prop_op, mod_flags,
1534 		    name, valuep, lengthp));
1535 	}
1536 
1537 	/* service request for the length of the property */
1538 	if (prop_op == PROP_LEN)
1539 		return (DDI_PROP_SUCCESS);
1540 
1541 	switch (prop_op) {
1542 	case PROP_LEN_AND_VAL_ALLOC:
1543 		if ((buffer = kmem_alloc(*lengthp,
1544 		    (mod_flags & DDI_PROP_CANSLEEP) ?
1545 		    KM_SLEEP : KM_NOSLEEP)) == NULL)
1546 			return (DDI_PROP_NO_MEMORY);
1547 
1548 		*(caddr_t *)valuep = buffer;	/* set callers buf ptr */
1549 		break;
1550 
1551 	case PROP_LEN_AND_VAL_BUF:
1552 		/* the length of the property and the request must match */
1553 		if (callers_length != *lengthp)
1554 			return (DDI_PROP_INVAL_ARG);
1555 
1556 		buffer = valuep;		/* get callers buf ptr */
1557 		break;
1558 
1559 	default:
1560 		return (DDI_PROP_INVAL_ARG);
1561 	}
1562 
1563 	/* transfer the value into the buffer */
1564 	if (strcmp(name, "Nblocks") == 0)
1565 		*((uint64_t *)buffer) = nblocks64;
1566 	else if (strcmp(name, "Size") == 0)
1567 		*((uint64_t *)buffer) = size64;
1568 	else if (strcmp(name, "nblocks") == 0)
1569 		*((uint32_t *)buffer) = (uint32_t)nblocks64;
1570 	else if (strcmp(name, "size") == 0)
1571 		*((uint32_t *)buffer) = (uint32_t)size64;
1572 	else if (strcmp(name, "blksize") == 0)
1573 		*((uint32_t *)buffer) = (uint32_t)blksize;
1574 	return (DDI_PROP_SUCCESS);
1575 }
1576 
1577 /*
1578  * ddi_prop_op_size: ddi_prop_op_size_blksize with DEV_BSIZE block size.
1579  */
1580 int
1581 ddi_prop_op_size(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1582     int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t size64)
1583 {
1584 	return (ddi_prop_op_size_blksize(dev, dip, prop_op,
1585 	    mod_flags, name, valuep, lengthp, size64, DEV_BSIZE));
1586 }
1587 
1588 /*
1589  * Variable length props...
1590  */
1591 
1592 /*
1593  * ddi_getlongprop:	Get variable length property len+val into a buffer
1594  *		allocated by property provider via kmem_alloc. Requester
1595  *		is responsible for freeing returned property via kmem_free.
1596  *
1597  *	Arguments:
1598  *
1599  *	dev_t:	Input:	dev_t of property.
1600  *	dip:	Input:	dev_info_t pointer of child.
1601  *	flags:	Input:	Possible flag modifiers are:
1602  *		DDI_PROP_DONTPASS:	Don't pass to parent if prop not found.
1603  *		DDI_PROP_CANSLEEP:	Memory allocation may sleep.
1604  *	name:	Input:	name of property.
1605  *	valuep:	Output:	Addr of callers buffer pointer.
1606  *	lengthp:Output:	*lengthp will contain prop length on exit.
1607  *
1608  *	Possible Returns:
1609  *
1610  *		DDI_PROP_SUCCESS:	Prop found and returned.
1611  *		DDI_PROP_NOT_FOUND:	Prop not found
1612  *		DDI_PROP_UNDEFINED:	Prop explicitly undefined.
1613  *		DDI_PROP_NO_MEMORY:	Prop found, but unable to alloc mem.
1614  */
1615 
1616 int
1617 ddi_getlongprop(dev_t dev, dev_info_t *dip, int flags,
1618     char *name, caddr_t valuep, int *lengthp)
1619 {
1620 	return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_ALLOC,
1621 	    flags, name, valuep, lengthp));
1622 }
1623 
1624 /*
1625  *
1626  * ddi_getlongprop_buf:		Get long prop into pre-allocated callers
1627  *				buffer. (no memory allocation by provider).
1628  *
1629  *	dev_t:	Input:	dev_t of property.
1630  *	dip:	Input:	dev_info_t pointer of child.
1631  *	flags:	Input:	DDI_PROP_DONTPASS or NULL
1632  *	name:	Input:	name of property
1633  *	valuep:	Input:	ptr to callers buffer.
1634  *	lengthp:I/O:	ptr to length of callers buffer on entry,
1635  *			actual length of property on exit.
1636  *
1637  *	Possible returns:
1638  *
1639  *		DDI_PROP_SUCCESS	Prop found and returned
1640  *		DDI_PROP_NOT_FOUND	Prop not found
1641  *		DDI_PROP_UNDEFINED	Prop explicitly undefined.
1642  *		DDI_PROP_BUF_TOO_SMALL	Prop found, callers buf too small,
1643  *					no value returned, but actual prop
1644  *					length returned in *lengthp
1645  *
1646  */
1647 
1648 int
1649 ddi_getlongprop_buf(dev_t dev, dev_info_t *dip, int flags,
1650     char *name, caddr_t valuep, int *lengthp)
1651 {
1652 	return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
1653 	    flags, name, valuep, lengthp));
1654 }
1655 
1656 /*
1657  * Integer/boolean sized props.
1658  *
1659  * Call is value only... returns found boolean or int sized prop value or
1660  * defvalue if prop not found or is wrong length or is explicitly undefined.
1661  * Only flag is DDI_PROP_DONTPASS...
1662  *
1663  * By convention, this interface returns boolean (0) sized properties
1664  * as value (int)1.
1665  *
1666  * This never returns an error, if property not found or specifically
1667  * undefined, the input `defvalue' is returned.
1668  */
1669 
1670 int
1671 ddi_getprop(dev_t dev, dev_info_t *dip, int flags, char *name, int defvalue)
1672 {
1673 	int	propvalue = defvalue;
1674 	int	proplength = sizeof (int);
1675 	int	error;
1676 
1677 	error = ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
1678 	    flags, name, (caddr_t)&propvalue, &proplength);
1679 
1680 	if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
1681 		propvalue = 1;
1682 
1683 	return (propvalue);
1684 }
1685 
1686 /*
1687  * Get prop length interface: flags are 0 or DDI_PROP_DONTPASS
1688  * if returns DDI_PROP_SUCCESS, length returned in *lengthp.
1689  */
1690 
1691 int
1692 ddi_getproplen(dev_t dev, dev_info_t *dip, int flags, char *name, int *lengthp)
1693 {
1694 	return (ddi_prop_op(dev, dip, PROP_LEN, flags, name, NULL, lengthp));
1695 }
1696 
1697 /*
1698  * Allocate a struct prop_driver_data, along with 'size' bytes
1699  * for decoded property data.  This structure is freed by
1700  * calling ddi_prop_free(9F).
1701  */
1702 static void *
1703 ddi_prop_decode_alloc(size_t size, void (*prop_free)(struct prop_driver_data *))
1704 {
1705 	struct prop_driver_data *pdd;
1706 
1707 	/*
1708 	 * Allocate a structure with enough memory to store the decoded data.
1709 	 */
1710 	pdd = kmem_zalloc(sizeof (struct prop_driver_data) + size, KM_SLEEP);
1711 	pdd->pdd_size = (sizeof (struct prop_driver_data) + size);
1712 	pdd->pdd_prop_free = prop_free;
1713 
1714 	/*
1715 	 * Return a pointer to the location to put the decoded data.
1716 	 */
1717 	return ((void *)((caddr_t)pdd + sizeof (struct prop_driver_data)));
1718 }
1719 
1720 /*
1721  * Allocated the memory needed to store the encoded data in the property
1722  * handle.
1723  */
1724 static int
1725 ddi_prop_encode_alloc(prop_handle_t *ph, size_t size)
1726 {
1727 	/*
1728 	 * If size is zero, then set data to NULL and size to 0.  This
1729 	 * is a boolean property.
1730 	 */
1731 	if (size == 0) {
1732 		ph->ph_size = 0;
1733 		ph->ph_data = NULL;
1734 		ph->ph_cur_pos = NULL;
1735 		ph->ph_save_pos = NULL;
1736 	} else {
1737 		if (ph->ph_flags == DDI_PROP_DONTSLEEP) {
1738 			ph->ph_data = kmem_zalloc(size, KM_NOSLEEP);
1739 			if (ph->ph_data == NULL)
1740 				return (DDI_PROP_NO_MEMORY);
1741 		} else
1742 			ph->ph_data = kmem_zalloc(size, KM_SLEEP);
1743 		ph->ph_size = size;
1744 		ph->ph_cur_pos = ph->ph_data;
1745 		ph->ph_save_pos = ph->ph_data;
1746 	}
1747 	return (DDI_PROP_SUCCESS);
1748 }
1749 
1750 /*
1751  * Free the space allocated by the lookup routines.  Each lookup routine
1752  * returns a pointer to the decoded data to the driver.  The driver then
1753  * passes this pointer back to us.  This data actually lives in a struct
1754  * prop_driver_data.  We use negative indexing to find the beginning of
1755  * the structure and then free the entire structure using the size and
1756  * the free routine stored in the structure.
1757  */
1758 void
1759 ddi_prop_free(void *datap)
1760 {
1761 	struct prop_driver_data *pdd;
1762 
1763 	/*
1764 	 * Get the structure
1765 	 */
1766 	pdd = (struct prop_driver_data *)
1767 	    ((caddr_t)datap - sizeof (struct prop_driver_data));
1768 	/*
1769 	 * Call the free routine to free it
1770 	 */
1771 	(*pdd->pdd_prop_free)(pdd);
1772 }
1773 
1774 /*
1775  * Free the data associated with an array of ints,
1776  * allocated with ddi_prop_decode_alloc().
1777  */
1778 static void
1779 ddi_prop_free_ints(struct prop_driver_data *pdd)
1780 {
1781 	kmem_free(pdd, pdd->pdd_size);
1782 }
1783 
1784 /*
1785  * Free a single string property or a single string contained within
1786  * the argv style return value of an array of strings.
1787  */
1788 static void
1789 ddi_prop_free_string(struct prop_driver_data *pdd)
1790 {
1791 	kmem_free(pdd, pdd->pdd_size);
1792 
1793 }
1794 
1795 /*
1796  * Free an array of strings.
1797  */
1798 static void
1799 ddi_prop_free_strings(struct prop_driver_data *pdd)
1800 {
1801 	kmem_free(pdd, pdd->pdd_size);
1802 }
1803 
1804 /*
1805  * Free the data associated with an array of bytes.
1806  */
1807 static void
1808 ddi_prop_free_bytes(struct prop_driver_data *pdd)
1809 {
1810 	kmem_free(pdd, pdd->pdd_size);
1811 }
1812 
1813 /*
1814  * Reset the current location pointer in the property handle to the
1815  * beginning of the data.
1816  */
1817 void
1818 ddi_prop_reset_pos(prop_handle_t *ph)
1819 {
1820 	ph->ph_cur_pos = ph->ph_data;
1821 	ph->ph_save_pos = ph->ph_data;
1822 }
1823 
1824 /*
1825  * Restore the current location pointer in the property handle to the
1826  * saved position.
1827  */
1828 void
1829 ddi_prop_save_pos(prop_handle_t *ph)
1830 {
1831 	ph->ph_save_pos = ph->ph_cur_pos;
1832 }
1833 
1834 /*
1835  * Save the location that the current location pointer is pointing to..
1836  */
1837 void
1838 ddi_prop_restore_pos(prop_handle_t *ph)
1839 {
1840 	ph->ph_cur_pos = ph->ph_save_pos;
1841 }
1842 
1843 /*
1844  * Property encode/decode functions
1845  */
1846 
1847 /*
1848  * Decode a single integer property
1849  */
1850 static int
1851 ddi_prop_fm_decode_int(prop_handle_t *ph, void *data, uint_t *nelements)
1852 {
1853 	int	i;
1854 	int	tmp;
1855 
1856 	/*
1857 	 * If there is nothing to decode return an error
1858 	 */
1859 	if (ph->ph_size == 0)
1860 		return (DDI_PROP_END_OF_DATA);
1861 
1862 	/*
1863 	 * Decode the property as a single integer and return it
1864 	 * in data if we were able to decode it.
1865 	 */
1866 	i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, &tmp);
1867 	if (i < DDI_PROP_RESULT_OK) {
1868 		switch (i) {
1869 		case DDI_PROP_RESULT_EOF:
1870 			return (DDI_PROP_END_OF_DATA);
1871 
1872 		case DDI_PROP_RESULT_ERROR:
1873 			return (DDI_PROP_CANNOT_DECODE);
1874 		}
1875 	}
1876 
1877 	*(int *)data = tmp;
1878 	*nelements = 1;
1879 	return (DDI_PROP_SUCCESS);
1880 }
1881 
1882 /*
1883  * Decode a single 64 bit integer property
1884  */
1885 static int
1886 ddi_prop_fm_decode_int64(prop_handle_t *ph, void *data, uint_t *nelements)
1887 {
1888 	int	i;
1889 	int64_t	tmp;
1890 
1891 	/*
1892 	 * If there is nothing to decode return an error
1893 	 */
1894 	if (ph->ph_size == 0)
1895 		return (DDI_PROP_END_OF_DATA);
1896 
1897 	/*
1898 	 * Decode the property as a single integer and return it
1899 	 * in data if we were able to decode it.
1900 	 */
1901 	i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, &tmp);
1902 	if (i < DDI_PROP_RESULT_OK) {
1903 		switch (i) {
1904 		case DDI_PROP_RESULT_EOF:
1905 			return (DDI_PROP_END_OF_DATA);
1906 
1907 		case DDI_PROP_RESULT_ERROR:
1908 			return (DDI_PROP_CANNOT_DECODE);
1909 		}
1910 	}
1911 
1912 	*(int64_t *)data = tmp;
1913 	*nelements = 1;
1914 	return (DDI_PROP_SUCCESS);
1915 }
1916 
1917 /*
1918  * Decode an array of integers property
1919  */
1920 static int
1921 ddi_prop_fm_decode_ints(prop_handle_t *ph, void *data, uint_t *nelements)
1922 {
1923 	int	i;
1924 	int	cnt = 0;
1925 	int	*tmp;
1926 	int	*intp;
1927 	int	n;
1928 
1929 	/*
1930 	 * Figure out how many array elements there are by going through the
1931 	 * data without decoding it first and counting.
1932 	 */
1933 	for (;;) {
1934 		i = DDI_PROP_INT(ph, DDI_PROP_CMD_SKIP, NULL);
1935 		if (i < 0)
1936 			break;
1937 		cnt++;
1938 	}
1939 
1940 	/*
1941 	 * If there are no elements return an error
1942 	 */
1943 	if (cnt == 0)
1944 		return (DDI_PROP_END_OF_DATA);
1945 
1946 	/*
1947 	 * If we cannot skip through the data, we cannot decode it
1948 	 */
1949 	if (i == DDI_PROP_RESULT_ERROR)
1950 		return (DDI_PROP_CANNOT_DECODE);
1951 
1952 	/*
1953 	 * Reset the data pointer to the beginning of the encoded data
1954 	 */
1955 	ddi_prop_reset_pos(ph);
1956 
1957 	/*
1958 	 * Allocated memory to store the decoded value in.
1959 	 */
1960 	intp = ddi_prop_decode_alloc((cnt * sizeof (int)),
1961 	    ddi_prop_free_ints);
1962 
1963 	/*
1964 	 * Decode each element and place it in the space we just allocated
1965 	 */
1966 	tmp = intp;
1967 	for (n = 0; n < cnt; n++, tmp++) {
1968 		i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, tmp);
1969 		if (i < DDI_PROP_RESULT_OK) {
1970 			/*
1971 			 * Free the space we just allocated
1972 			 * and return an error.
1973 			 */
1974 			ddi_prop_free(intp);
1975 			switch (i) {
1976 			case DDI_PROP_RESULT_EOF:
1977 				return (DDI_PROP_END_OF_DATA);
1978 
1979 			case DDI_PROP_RESULT_ERROR:
1980 				return (DDI_PROP_CANNOT_DECODE);
1981 			}
1982 		}
1983 	}
1984 
1985 	*nelements = cnt;
1986 	*(int **)data = intp;
1987 
1988 	return (DDI_PROP_SUCCESS);
1989 }
1990 
1991 /*
1992  * Decode a 64 bit integer array property
1993  */
1994 static int
1995 ddi_prop_fm_decode_int64_array(prop_handle_t *ph, void *data, uint_t *nelements)
1996 {
1997 	int	i;
1998 	int	n;
1999 	int	cnt = 0;
2000 	int64_t	*tmp;
2001 	int64_t	*intp;
2002 
2003 	/*
2004 	 * Count the number of array elements by going
2005 	 * through the data without decoding it.
2006 	 */
2007 	for (;;) {
2008 		i = DDI_PROP_INT64(ph, DDI_PROP_CMD_SKIP, NULL);
2009 		if (i < 0)
2010 			break;
2011 		cnt++;
2012 	}
2013 
2014 	/*
2015 	 * If there are no elements return an error
2016 	 */
2017 	if (cnt == 0)
2018 		return (DDI_PROP_END_OF_DATA);
2019 
2020 	/*
2021 	 * If we cannot skip through the data, we cannot decode it
2022 	 */
2023 	if (i == DDI_PROP_RESULT_ERROR)
2024 		return (DDI_PROP_CANNOT_DECODE);
2025 
2026 	/*
2027 	 * Reset the data pointer to the beginning of the encoded data
2028 	 */
2029 	ddi_prop_reset_pos(ph);
2030 
2031 	/*
2032 	 * Allocate memory to store the decoded value.
2033 	 */
2034 	intp = ddi_prop_decode_alloc((cnt * sizeof (int64_t)),
2035 	    ddi_prop_free_ints);
2036 
2037 	/*
2038 	 * Decode each element and place it in the space allocated
2039 	 */
2040 	tmp = intp;
2041 	for (n = 0; n < cnt; n++, tmp++) {
2042 		i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, tmp);
2043 		if (i < DDI_PROP_RESULT_OK) {
2044 			/*
2045 			 * Free the space we just allocated
2046 			 * and return an error.
2047 			 */
2048 			ddi_prop_free(intp);
2049 			switch (i) {
2050 			case DDI_PROP_RESULT_EOF:
2051 				return (DDI_PROP_END_OF_DATA);
2052 
2053 			case DDI_PROP_RESULT_ERROR:
2054 				return (DDI_PROP_CANNOT_DECODE);
2055 			}
2056 		}
2057 	}
2058 
2059 	*nelements = cnt;
2060 	*(int64_t **)data = intp;
2061 
2062 	return (DDI_PROP_SUCCESS);
2063 }
2064 
2065 /*
2066  * Encode an array of integers property (Can be one element)
2067  */
2068 int
2069 ddi_prop_fm_encode_ints(prop_handle_t *ph, void *data, uint_t nelements)
2070 {
2071 	int	i;
2072 	int	*tmp;
2073 	int	cnt;
2074 	int	size;
2075 
2076 	/*
2077 	 * If there is no data, we cannot do anything
2078 	 */
2079 	if (nelements == 0)
2080 		return (DDI_PROP_CANNOT_ENCODE);
2081 
2082 	/*
2083 	 * Get the size of an encoded int.
2084 	 */
2085 	size = DDI_PROP_INT(ph, DDI_PROP_CMD_GET_ESIZE, NULL);
2086 
2087 	if (size < DDI_PROP_RESULT_OK) {
2088 		switch (size) {
2089 		case DDI_PROP_RESULT_EOF:
2090 			return (DDI_PROP_END_OF_DATA);
2091 
2092 		case DDI_PROP_RESULT_ERROR:
2093 			return (DDI_PROP_CANNOT_ENCODE);
2094 		}
2095 	}
2096 
2097 	/*
2098 	 * Allocate space in the handle to store the encoded int.
2099 	 */
2100 	if (ddi_prop_encode_alloc(ph, size * nelements) !=
2101 	    DDI_PROP_SUCCESS)
2102 		return (DDI_PROP_NO_MEMORY);
2103 
2104 	/*
2105 	 * Encode the array of ints.
2106 	 */
2107 	tmp = (int *)data;
2108 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2109 		i = DDI_PROP_INT(ph, DDI_PROP_CMD_ENCODE, tmp);
2110 		if (i < DDI_PROP_RESULT_OK) {
2111 			switch (i) {
2112 			case DDI_PROP_RESULT_EOF:
2113 				return (DDI_PROP_END_OF_DATA);
2114 
2115 			case DDI_PROP_RESULT_ERROR:
2116 				return (DDI_PROP_CANNOT_ENCODE);
2117 			}
2118 		}
2119 	}
2120 
2121 	return (DDI_PROP_SUCCESS);
2122 }
2123 
2124 
2125 /*
2126  * Encode a 64 bit integer array property
2127  */
2128 int
2129 ddi_prop_fm_encode_int64(prop_handle_t *ph, void *data, uint_t nelements)
2130 {
2131 	int i;
2132 	int cnt;
2133 	int size;
2134 	int64_t *tmp;
2135 
2136 	/*
2137 	 * If there is no data, we cannot do anything
2138 	 */
2139 	if (nelements == 0)
2140 		return (DDI_PROP_CANNOT_ENCODE);
2141 
2142 	/*
2143 	 * Get the size of an encoded 64 bit int.
2144 	 */
2145 	size = DDI_PROP_INT64(ph, DDI_PROP_CMD_GET_ESIZE, NULL);
2146 
2147 	if (size < DDI_PROP_RESULT_OK) {
2148 		switch (size) {
2149 		case DDI_PROP_RESULT_EOF:
2150 			return (DDI_PROP_END_OF_DATA);
2151 
2152 		case DDI_PROP_RESULT_ERROR:
2153 			return (DDI_PROP_CANNOT_ENCODE);
2154 		}
2155 	}
2156 
2157 	/*
2158 	 * Allocate space in the handle to store the encoded int.
2159 	 */
2160 	if (ddi_prop_encode_alloc(ph, size * nelements) !=
2161 	    DDI_PROP_SUCCESS)
2162 		return (DDI_PROP_NO_MEMORY);
2163 
2164 	/*
2165 	 * Encode the array of ints.
2166 	 */
2167 	tmp = (int64_t *)data;
2168 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2169 		i = DDI_PROP_INT64(ph, DDI_PROP_CMD_ENCODE, tmp);
2170 		if (i < DDI_PROP_RESULT_OK) {
2171 			switch (i) {
2172 			case DDI_PROP_RESULT_EOF:
2173 				return (DDI_PROP_END_OF_DATA);
2174 
2175 			case DDI_PROP_RESULT_ERROR:
2176 				return (DDI_PROP_CANNOT_ENCODE);
2177 			}
2178 		}
2179 	}
2180 
2181 	return (DDI_PROP_SUCCESS);
2182 }
2183 
2184 /*
2185  * Decode a single string property
2186  */
2187 static int
2188 ddi_prop_fm_decode_string(prop_handle_t *ph, void *data, uint_t *nelements)
2189 {
2190 	char		*tmp;
2191 	char		*str;
2192 	int		i;
2193 	int		size;
2194 
2195 	/*
2196 	 * If there is nothing to decode return an error
2197 	 */
2198 	if (ph->ph_size == 0)
2199 		return (DDI_PROP_END_OF_DATA);
2200 
2201 	/*
2202 	 * Get the decoded size of the encoded string.
2203 	 */
2204 	size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2205 	if (size < DDI_PROP_RESULT_OK) {
2206 		switch (size) {
2207 		case DDI_PROP_RESULT_EOF:
2208 			return (DDI_PROP_END_OF_DATA);
2209 
2210 		case DDI_PROP_RESULT_ERROR:
2211 			return (DDI_PROP_CANNOT_DECODE);
2212 		}
2213 	}
2214 
2215 	/*
2216 	 * Allocated memory to store the decoded value in.
2217 	 */
2218 	str = ddi_prop_decode_alloc((size_t)size, ddi_prop_free_string);
2219 
2220 	ddi_prop_reset_pos(ph);
2221 
2222 	/*
2223 	 * Decode the str and place it in the space we just allocated
2224 	 */
2225 	tmp = str;
2226 	i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, tmp);
2227 	if (i < DDI_PROP_RESULT_OK) {
2228 		/*
2229 		 * Free the space we just allocated
2230 		 * and return an error.
2231 		 */
2232 		ddi_prop_free(str);
2233 		switch (i) {
2234 		case DDI_PROP_RESULT_EOF:
2235 			return (DDI_PROP_END_OF_DATA);
2236 
2237 		case DDI_PROP_RESULT_ERROR:
2238 			return (DDI_PROP_CANNOT_DECODE);
2239 		}
2240 	}
2241 
2242 	*(char **)data = str;
2243 	*nelements = 1;
2244 
2245 	return (DDI_PROP_SUCCESS);
2246 }
2247 
2248 /*
2249  * Decode an array of strings.
2250  */
2251 int
2252 ddi_prop_fm_decode_strings(prop_handle_t *ph, void *data, uint_t *nelements)
2253 {
2254 	int		cnt = 0;
2255 	char		**strs;
2256 	char		**tmp;
2257 	char		*ptr;
2258 	int		i;
2259 	int		n;
2260 	int		size;
2261 	size_t		nbytes;
2262 
2263 	/*
2264 	 * Figure out how many array elements there are by going through the
2265 	 * data without decoding it first and counting.
2266 	 */
2267 	for (;;) {
2268 		i = DDI_PROP_STR(ph, DDI_PROP_CMD_SKIP, NULL);
2269 		if (i < 0)
2270 			break;
2271 		cnt++;
2272 	}
2273 
2274 	/*
2275 	 * If there are no elements return an error
2276 	 */
2277 	if (cnt == 0)
2278 		return (DDI_PROP_END_OF_DATA);
2279 
2280 	/*
2281 	 * If we cannot skip through the data, we cannot decode it
2282 	 */
2283 	if (i == DDI_PROP_RESULT_ERROR)
2284 		return (DDI_PROP_CANNOT_DECODE);
2285 
2286 	/*
2287 	 * Reset the data pointer to the beginning of the encoded data
2288 	 */
2289 	ddi_prop_reset_pos(ph);
2290 
2291 	/*
2292 	 * Figure out how much memory we need for the sum total
2293 	 */
2294 	nbytes = (cnt + 1) * sizeof (char *);
2295 
2296 	for (n = 0; n < cnt; n++) {
2297 		/*
2298 		 * Get the decoded size of the current encoded string.
2299 		 */
2300 		size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2301 		if (size < DDI_PROP_RESULT_OK) {
2302 			switch (size) {
2303 			case DDI_PROP_RESULT_EOF:
2304 				return (DDI_PROP_END_OF_DATA);
2305 
2306 			case DDI_PROP_RESULT_ERROR:
2307 				return (DDI_PROP_CANNOT_DECODE);
2308 			}
2309 		}
2310 
2311 		nbytes += size;
2312 	}
2313 
2314 	/*
2315 	 * Allocate memory in which to store the decoded strings.
2316 	 */
2317 	strs = ddi_prop_decode_alloc(nbytes, ddi_prop_free_strings);
2318 
2319 	/*
2320 	 * Set up pointers for each string by figuring out yet
2321 	 * again how long each string is.
2322 	 */
2323 	ddi_prop_reset_pos(ph);
2324 	ptr = (caddr_t)strs + ((cnt + 1) * sizeof (char *));
2325 	for (tmp = strs, n = 0; n < cnt; n++, tmp++) {
2326 		/*
2327 		 * Get the decoded size of the current encoded string.
2328 		 */
2329 		size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2330 		if (size < DDI_PROP_RESULT_OK) {
2331 			ddi_prop_free(strs);
2332 			switch (size) {
2333 			case DDI_PROP_RESULT_EOF:
2334 				return (DDI_PROP_END_OF_DATA);
2335 
2336 			case DDI_PROP_RESULT_ERROR:
2337 				return (DDI_PROP_CANNOT_DECODE);
2338 			}
2339 		}
2340 
2341 		*tmp = ptr;
2342 		ptr += size;
2343 	}
2344 
2345 	/*
2346 	 * String array is terminated by a NULL
2347 	 */
2348 	*tmp = NULL;
2349 
2350 	/*
2351 	 * Finally, we can decode each string
2352 	 */
2353 	ddi_prop_reset_pos(ph);
2354 	for (tmp = strs, n = 0; n < cnt; n++, tmp++) {
2355 		i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, *tmp);
2356 		if (i < DDI_PROP_RESULT_OK) {
2357 			/*
2358 			 * Free the space we just allocated
2359 			 * and return an error
2360 			 */
2361 			ddi_prop_free(strs);
2362 			switch (i) {
2363 			case DDI_PROP_RESULT_EOF:
2364 				return (DDI_PROP_END_OF_DATA);
2365 
2366 			case DDI_PROP_RESULT_ERROR:
2367 				return (DDI_PROP_CANNOT_DECODE);
2368 			}
2369 		}
2370 	}
2371 
2372 	*(char ***)data = strs;
2373 	*nelements = cnt;
2374 
2375 	return (DDI_PROP_SUCCESS);
2376 }
2377 
2378 /*
2379  * Encode a string.
2380  */
2381 int
2382 ddi_prop_fm_encode_string(prop_handle_t *ph, void *data, uint_t nelements)
2383 {
2384 	char		**tmp;
2385 	int		size;
2386 	int		i;
2387 
2388 	/*
2389 	 * If there is no data, we cannot do anything
2390 	 */
2391 	if (nelements == 0)
2392 		return (DDI_PROP_CANNOT_ENCODE);
2393 
2394 	/*
2395 	 * Get the size of the encoded string.
2396 	 */
2397 	tmp = (char **)data;
2398 	size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp);
2399 	if (size < DDI_PROP_RESULT_OK) {
2400 		switch (size) {
2401 		case DDI_PROP_RESULT_EOF:
2402 			return (DDI_PROP_END_OF_DATA);
2403 
2404 		case DDI_PROP_RESULT_ERROR:
2405 			return (DDI_PROP_CANNOT_ENCODE);
2406 		}
2407 	}
2408 
2409 	/*
2410 	 * Allocate space in the handle to store the encoded string.
2411 	 */
2412 	if (ddi_prop_encode_alloc(ph, size) != DDI_PROP_SUCCESS)
2413 		return (DDI_PROP_NO_MEMORY);
2414 
2415 	ddi_prop_reset_pos(ph);
2416 
2417 	/*
2418 	 * Encode the string.
2419 	 */
2420 	tmp = (char **)data;
2421 	i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp);
2422 	if (i < DDI_PROP_RESULT_OK) {
2423 		switch (i) {
2424 		case DDI_PROP_RESULT_EOF:
2425 			return (DDI_PROP_END_OF_DATA);
2426 
2427 		case DDI_PROP_RESULT_ERROR:
2428 			return (DDI_PROP_CANNOT_ENCODE);
2429 		}
2430 	}
2431 
2432 	return (DDI_PROP_SUCCESS);
2433 }
2434 
2435 
2436 /*
2437  * Encode an array of strings.
2438  */
2439 int
2440 ddi_prop_fm_encode_strings(prop_handle_t *ph, void *data, uint_t nelements)
2441 {
2442 	int		cnt = 0;
2443 	char		**tmp;
2444 	int		size;
2445 	uint_t		total_size;
2446 	int		i;
2447 
2448 	/*
2449 	 * If there is no data, we cannot do anything
2450 	 */
2451 	if (nelements == 0)
2452 		return (DDI_PROP_CANNOT_ENCODE);
2453 
2454 	/*
2455 	 * Get the total size required to encode all the strings.
2456 	 */
2457 	total_size = 0;
2458 	tmp = (char **)data;
2459 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2460 		size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp);
2461 		if (size < DDI_PROP_RESULT_OK) {
2462 			switch (size) {
2463 			case DDI_PROP_RESULT_EOF:
2464 				return (DDI_PROP_END_OF_DATA);
2465 
2466 			case DDI_PROP_RESULT_ERROR:
2467 				return (DDI_PROP_CANNOT_ENCODE);
2468 			}
2469 		}
2470 		total_size += (uint_t)size;
2471 	}
2472 
2473 	/*
2474 	 * Allocate space in the handle to store the encoded strings.
2475 	 */
2476 	if (ddi_prop_encode_alloc(ph, total_size) != DDI_PROP_SUCCESS)
2477 		return (DDI_PROP_NO_MEMORY);
2478 
2479 	ddi_prop_reset_pos(ph);
2480 
2481 	/*
2482 	 * Encode the array of strings.
2483 	 */
2484 	tmp = (char **)data;
2485 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2486 		i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp);
2487 		if (i < DDI_PROP_RESULT_OK) {
2488 			switch (i) {
2489 			case DDI_PROP_RESULT_EOF:
2490 				return (DDI_PROP_END_OF_DATA);
2491 
2492 			case DDI_PROP_RESULT_ERROR:
2493 				return (DDI_PROP_CANNOT_ENCODE);
2494 			}
2495 		}
2496 	}
2497 
2498 	return (DDI_PROP_SUCCESS);
2499 }
2500 
2501 
2502 /*
2503  * Decode an array of bytes.
2504  */
2505 static int
2506 ddi_prop_fm_decode_bytes(prop_handle_t *ph, void *data, uint_t *nelements)
2507 {
2508 	uchar_t		*tmp;
2509 	int		nbytes;
2510 	int		i;
2511 
2512 	/*
2513 	 * If there are no elements return an error
2514 	 */
2515 	if (ph->ph_size == 0)
2516 		return (DDI_PROP_END_OF_DATA);
2517 
2518 	/*
2519 	 * Get the size of the encoded array of bytes.
2520 	 */
2521 	nbytes = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_DSIZE,
2522 	    data, ph->ph_size);
2523 	if (nbytes < DDI_PROP_RESULT_OK) {
2524 		switch (nbytes) {
2525 		case DDI_PROP_RESULT_EOF:
2526 			return (DDI_PROP_END_OF_DATA);
2527 
2528 		case DDI_PROP_RESULT_ERROR:
2529 			return (DDI_PROP_CANNOT_DECODE);
2530 		}
2531 	}
2532 
2533 	/*
2534 	 * Allocated memory to store the decoded value in.
2535 	 */
2536 	tmp = ddi_prop_decode_alloc(nbytes, ddi_prop_free_bytes);
2537 
2538 	/*
2539 	 * Decode each element and place it in the space we just allocated
2540 	 */
2541 	i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_DECODE, tmp, nbytes);
2542 	if (i < DDI_PROP_RESULT_OK) {
2543 		/*
2544 		 * Free the space we just allocated
2545 		 * and return an error
2546 		 */
2547 		ddi_prop_free(tmp);
2548 		switch (i) {
2549 		case DDI_PROP_RESULT_EOF:
2550 			return (DDI_PROP_END_OF_DATA);
2551 
2552 		case DDI_PROP_RESULT_ERROR:
2553 			return (DDI_PROP_CANNOT_DECODE);
2554 		}
2555 	}
2556 
2557 	*(uchar_t **)data = tmp;
2558 	*nelements = nbytes;
2559 
2560 	return (DDI_PROP_SUCCESS);
2561 }
2562 
2563 /*
2564  * Encode an array of bytes.
2565  */
2566 int
2567 ddi_prop_fm_encode_bytes(prop_handle_t *ph, void *data, uint_t nelements)
2568 {
2569 	int		size;
2570 	int		i;
2571 
2572 	/*
2573 	 * If there are no elements, then this is a boolean property,
2574 	 * so just create a property handle with no data and return.
2575 	 */
2576 	if (nelements == 0) {
2577 		(void) ddi_prop_encode_alloc(ph, 0);
2578 		return (DDI_PROP_SUCCESS);
2579 	}
2580 
2581 	/*
2582 	 * Get the size of the encoded array of bytes.
2583 	 */
2584 	size = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_ESIZE, (uchar_t *)data,
2585 	    nelements);
2586 	if (size < DDI_PROP_RESULT_OK) {
2587 		switch (size) {
2588 		case DDI_PROP_RESULT_EOF:
2589 			return (DDI_PROP_END_OF_DATA);
2590 
2591 		case DDI_PROP_RESULT_ERROR:
2592 			return (DDI_PROP_CANNOT_DECODE);
2593 		}
2594 	}
2595 
2596 	/*
2597 	 * Allocate space in the handle to store the encoded bytes.
2598 	 */
2599 	if (ddi_prop_encode_alloc(ph, (uint_t)size) != DDI_PROP_SUCCESS)
2600 		return (DDI_PROP_NO_MEMORY);
2601 
2602 	/*
2603 	 * Encode the array of bytes.
2604 	 */
2605 	i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_ENCODE, (uchar_t *)data,
2606 	    nelements);
2607 	if (i < DDI_PROP_RESULT_OK) {
2608 		switch (i) {
2609 		case DDI_PROP_RESULT_EOF:
2610 			return (DDI_PROP_END_OF_DATA);
2611 
2612 		case DDI_PROP_RESULT_ERROR:
2613 			return (DDI_PROP_CANNOT_ENCODE);
2614 		}
2615 	}
2616 
2617 	return (DDI_PROP_SUCCESS);
2618 }
2619 
2620 /*
2621  * OBP 1275 integer, string and byte operators.
2622  *
2623  * DDI_PROP_CMD_DECODE:
2624  *
2625  *	DDI_PROP_RESULT_ERROR:		cannot decode the data
2626  *	DDI_PROP_RESULT_EOF:		end of data
2627  *	DDI_PROP_OK:			data was decoded
2628  *
2629  * DDI_PROP_CMD_ENCODE:
2630  *
2631  *	DDI_PROP_RESULT_ERROR:		cannot encode the data
2632  *	DDI_PROP_RESULT_EOF:		end of data
2633  *	DDI_PROP_OK:			data was encoded
2634  *
2635  * DDI_PROP_CMD_SKIP:
2636  *
2637  *	DDI_PROP_RESULT_ERROR:		cannot skip the data
2638  *	DDI_PROP_RESULT_EOF:		end of data
2639  *	DDI_PROP_OK:			data was skipped
2640  *
2641  * DDI_PROP_CMD_GET_ESIZE:
2642  *
2643  *	DDI_PROP_RESULT_ERROR:		cannot get encoded size
2644  *	DDI_PROP_RESULT_EOF:		end of data
2645  *	> 0:				the encoded size
2646  *
2647  * DDI_PROP_CMD_GET_DSIZE:
2648  *
2649  *	DDI_PROP_RESULT_ERROR:		cannot get decoded size
2650  *	DDI_PROP_RESULT_EOF:		end of data
2651  *	> 0:				the decoded size
2652  */
2653 
2654 /*
2655  * OBP 1275 integer operator
2656  *
2657  * OBP properties are a byte stream of data, so integers may not be
2658  * properly aligned.  Therefore we need to copy them one byte at a time.
2659  */
2660 int
2661 ddi_prop_1275_int(prop_handle_t *ph, uint_t cmd, int *data)
2662 {
2663 	int	i;
2664 
2665 	switch (cmd) {
2666 	case DDI_PROP_CMD_DECODE:
2667 		/*
2668 		 * Check that there is encoded data
2669 		 */
2670 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0)
2671 			return (DDI_PROP_RESULT_ERROR);
2672 		if (ph->ph_flags & PH_FROM_PROM) {
2673 			i = MIN(ph->ph_size, PROP_1275_INT_SIZE);
2674 			if ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
2675 			    ph->ph_size - i))
2676 				return (DDI_PROP_RESULT_ERROR);
2677 		} else {
2678 			if (ph->ph_size < sizeof (int) ||
2679 			    ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
2680 			    ph->ph_size - sizeof (int))))
2681 				return (DDI_PROP_RESULT_ERROR);
2682 		}
2683 
2684 		/*
2685 		 * Copy the integer, using the implementation-specific
2686 		 * copy function if the property is coming from the PROM.
2687 		 */
2688 		if (ph->ph_flags & PH_FROM_PROM) {
2689 			*data = impl_ddi_prop_int_from_prom(
2690 			    (uchar_t *)ph->ph_cur_pos,
2691 			    (ph->ph_size < PROP_1275_INT_SIZE) ?
2692 			    ph->ph_size : PROP_1275_INT_SIZE);
2693 		} else {
2694 			bcopy(ph->ph_cur_pos, data, sizeof (int));
2695 		}
2696 
2697 		/*
2698 		 * Move the current location to the start of the next
2699 		 * bit of undecoded data.
2700 		 */
2701 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
2702 		    PROP_1275_INT_SIZE;
2703 		return (DDI_PROP_RESULT_OK);
2704 
2705 	case DDI_PROP_CMD_ENCODE:
2706 		/*
2707 		 * Check that there is room to encoded the data
2708 		 */
2709 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
2710 		    ph->ph_size < PROP_1275_INT_SIZE ||
2711 		    ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
2712 		    ph->ph_size - sizeof (int))))
2713 			return (DDI_PROP_RESULT_ERROR);
2714 
2715 		/*
2716 		 * Encode the integer into the byte stream one byte at a
2717 		 * time.
2718 		 */
2719 		bcopy(data, ph->ph_cur_pos, sizeof (int));
2720 
2721 		/*
2722 		 * Move the current location to the start of the next bit of
2723 		 * space where we can store encoded data.
2724 		 */
2725 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE;
2726 		return (DDI_PROP_RESULT_OK);
2727 
2728 	case DDI_PROP_CMD_SKIP:
2729 		/*
2730 		 * Check that there is encoded data
2731 		 */
2732 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
2733 		    ph->ph_size < PROP_1275_INT_SIZE)
2734 			return (DDI_PROP_RESULT_ERROR);
2735 
2736 
2737 		if ((caddr_t)ph->ph_cur_pos ==
2738 		    (caddr_t)ph->ph_data + ph->ph_size) {
2739 			return (DDI_PROP_RESULT_EOF);
2740 		} else if ((caddr_t)ph->ph_cur_pos >
2741 		    (caddr_t)ph->ph_data + ph->ph_size) {
2742 			return (DDI_PROP_RESULT_EOF);
2743 		}
2744 
2745 		/*
2746 		 * Move the current location to the start of the next bit of
2747 		 * undecoded data.
2748 		 */
2749 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE;
2750 		return (DDI_PROP_RESULT_OK);
2751 
2752 	case DDI_PROP_CMD_GET_ESIZE:
2753 		/*
2754 		 * Return the size of an encoded integer on OBP
2755 		 */
2756 		return (PROP_1275_INT_SIZE);
2757 
2758 	case DDI_PROP_CMD_GET_DSIZE:
2759 		/*
2760 		 * Return the size of a decoded integer on the system.
2761 		 */
2762 		return (sizeof (int));
2763 
2764 	default:
2765 #ifdef DEBUG
2766 		panic("ddi_prop_1275_int: %x impossible", cmd);
2767 		/*NOTREACHED*/
2768 #else
2769 		return (DDI_PROP_RESULT_ERROR);
2770 #endif	/* DEBUG */
2771 	}
2772 }
2773 
2774 /*
2775  * 64 bit integer operator.
2776  *
2777  * This is an extension, defined by Sun, to the 1275 integer
2778  * operator.  This routine handles the encoding/decoding of
2779  * 64 bit integer properties.
2780  */
2781 int
2782 ddi_prop_int64_op(prop_handle_t *ph, uint_t cmd, int64_t *data)
2783 {
2784 
2785 	switch (cmd) {
2786 	case DDI_PROP_CMD_DECODE:
2787 		/*
2788 		 * Check that there is encoded data
2789 		 */
2790 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0)
2791 			return (DDI_PROP_RESULT_ERROR);
2792 		if (ph->ph_flags & PH_FROM_PROM) {
2793 			return (DDI_PROP_RESULT_ERROR);
2794 		} else {
2795 			if (ph->ph_size < sizeof (int64_t) ||
2796 			    ((int64_t *)ph->ph_cur_pos >
2797 			    ((int64_t *)ph->ph_data +
2798 			    ph->ph_size - sizeof (int64_t))))
2799 				return (DDI_PROP_RESULT_ERROR);
2800 		}
2801 		/*
2802 		 * Copy the integer, using the implementation-specific
2803 		 * copy function if the property is coming from the PROM.
2804 		 */
2805 		if (ph->ph_flags & PH_FROM_PROM) {
2806 			return (DDI_PROP_RESULT_ERROR);
2807 		} else {
2808 			bcopy(ph->ph_cur_pos, data, sizeof (int64_t));
2809 		}
2810 
2811 		/*
2812 		 * Move the current location to the start of the next
2813 		 * bit of undecoded data.
2814 		 */
2815 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
2816 		    sizeof (int64_t);
2817 		return (DDI_PROP_RESULT_OK);
2818 
2819 	case DDI_PROP_CMD_ENCODE:
2820 		/*
2821 		 * Check that there is room to encoded the data
2822 		 */
2823 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
2824 		    ph->ph_size < sizeof (int64_t) ||
2825 		    ((int64_t *)ph->ph_cur_pos > ((int64_t *)ph->ph_data +
2826 		    ph->ph_size - sizeof (int64_t))))
2827 			return (DDI_PROP_RESULT_ERROR);
2828 
2829 		/*
2830 		 * Encode the integer into the byte stream one byte at a
2831 		 * time.
2832 		 */
2833 		bcopy(data, ph->ph_cur_pos, sizeof (int64_t));
2834 
2835 		/*
2836 		 * Move the current location to the start of the next bit of
2837 		 * space where we can store encoded data.
2838 		 */
2839 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
2840 		    sizeof (int64_t);
2841 		return (DDI_PROP_RESULT_OK);
2842 
2843 	case DDI_PROP_CMD_SKIP:
2844 		/*
2845 		 * Check that there is encoded data
2846 		 */
2847 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
2848 		    ph->ph_size < sizeof (int64_t))
2849 			return (DDI_PROP_RESULT_ERROR);
2850 
2851 		if ((caddr_t)ph->ph_cur_pos ==
2852 		    (caddr_t)ph->ph_data + ph->ph_size) {
2853 			return (DDI_PROP_RESULT_EOF);
2854 		} else if ((caddr_t)ph->ph_cur_pos >
2855 		    (caddr_t)ph->ph_data + ph->ph_size) {
2856 			return (DDI_PROP_RESULT_EOF);
2857 		}
2858 
2859 		/*
2860 		 * Move the current location to the start of
2861 		 * the next bit of undecoded data.
2862 		 */
2863 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
2864 		    sizeof (int64_t);
2865 		return (DDI_PROP_RESULT_OK);
2866 
2867 	case DDI_PROP_CMD_GET_ESIZE:
2868 		/*
2869 		 * Return the size of an encoded integer on OBP
2870 		 */
2871 		return (sizeof (int64_t));
2872 
2873 	case DDI_PROP_CMD_GET_DSIZE:
2874 		/*
2875 		 * Return the size of a decoded integer on the system.
2876 		 */
2877 		return (sizeof (int64_t));
2878 
2879 	default:
2880 #ifdef DEBUG
2881 		panic("ddi_prop_int64_op: %x impossible", cmd);
2882 		/*NOTREACHED*/
2883 #else
2884 		return (DDI_PROP_RESULT_ERROR);
2885 #endif  /* DEBUG */
2886 	}
2887 }
2888 
2889 /*
2890  * OBP 1275 string operator.
2891  *
2892  * OBP strings are NULL terminated.
2893  */
2894 int
2895 ddi_prop_1275_string(prop_handle_t *ph, uint_t cmd, char *data)
2896 {
2897 	int	n;
2898 	char	*p;
2899 	char	*end;
2900 
2901 	switch (cmd) {
2902 	case DDI_PROP_CMD_DECODE:
2903 		/*
2904 		 * Check that there is encoded data
2905 		 */
2906 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
2907 			return (DDI_PROP_RESULT_ERROR);
2908 		}
2909 
2910 		/*
2911 		 * Match DDI_PROP_CMD_GET_DSIZE logic for when to stop and
2912 		 * how to NULL terminate result.
2913 		 */
2914 		p = (char *)ph->ph_cur_pos;
2915 		end = (char *)ph->ph_data + ph->ph_size;
2916 		if (p >= end)
2917 			return (DDI_PROP_RESULT_EOF);
2918 
2919 		while (p < end) {
2920 			*data++ = *p;
2921 			if (*p++ == 0) {	/* NULL from OBP */
2922 				ph->ph_cur_pos = p;
2923 				return (DDI_PROP_RESULT_OK);
2924 			}
2925 		}
2926 
2927 		/*
2928 		 * If OBP did not NULL terminate string, which happens
2929 		 * (at least) for 'true'/'false' boolean values, account for
2930 		 * the space and store null termination on decode.
2931 		 */
2932 		ph->ph_cur_pos = p;
2933 		*data = 0;
2934 		return (DDI_PROP_RESULT_OK);
2935 
2936 	case DDI_PROP_CMD_ENCODE:
2937 		/*
2938 		 * Check that there is room to encoded the data
2939 		 */
2940 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
2941 			return (DDI_PROP_RESULT_ERROR);
2942 		}
2943 
2944 		n = strlen(data) + 1;
2945 		if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
2946 		    ph->ph_size - n)) {
2947 			return (DDI_PROP_RESULT_ERROR);
2948 		}
2949 
2950 		/*
2951 		 * Copy the NULL terminated string
2952 		 */
2953 		bcopy(data, ph->ph_cur_pos, n);
2954 
2955 		/*
2956 		 * Move the current location to the start of the next bit of
2957 		 * space where we can store encoded data.
2958 		 */
2959 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + n;
2960 		return (DDI_PROP_RESULT_OK);
2961 
2962 	case DDI_PROP_CMD_SKIP:
2963 		/*
2964 		 * Check that there is encoded data
2965 		 */
2966 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
2967 			return (DDI_PROP_RESULT_ERROR);
2968 		}
2969 
2970 		/*
2971 		 * Return the string length plus one for the NULL
2972 		 * We know the size of the property, we need to
2973 		 * ensure that the string is properly formatted,
2974 		 * since we may be looking up random OBP data.
2975 		 */
2976 		p = (char *)ph->ph_cur_pos;
2977 		end = (char *)ph->ph_data + ph->ph_size;
2978 		if (p >= end)
2979 			return (DDI_PROP_RESULT_EOF);
2980 
2981 		while (p < end) {
2982 			if (*p++ == 0) {	/* NULL from OBP */
2983 				ph->ph_cur_pos = p;
2984 				return (DDI_PROP_RESULT_OK);
2985 			}
2986 		}
2987 
2988 		/*
2989 		 * Accommodate the fact that OBP does not always NULL
2990 		 * terminate strings.
2991 		 */
2992 		ph->ph_cur_pos = p;
2993 		return (DDI_PROP_RESULT_OK);
2994 
2995 	case DDI_PROP_CMD_GET_ESIZE:
2996 		/*
2997 		 * Return the size of the encoded string on OBP.
2998 		 */
2999 		return (strlen(data) + 1);
3000 
3001 	case DDI_PROP_CMD_GET_DSIZE:
3002 		/*
3003 		 * Return the string length plus one for the NULL.
3004 		 * We know the size of the property, we need to
3005 		 * ensure that the string is properly formatted,
3006 		 * since we may be looking up random OBP data.
3007 		 */
3008 		p = (char *)ph->ph_cur_pos;
3009 		end = (char *)ph->ph_data + ph->ph_size;
3010 		if (p >= end)
3011 			return (DDI_PROP_RESULT_EOF);
3012 
3013 		for (n = 0; p < end; n++) {
3014 			if (*p++ == 0) {	/* NULL from OBP */
3015 				ph->ph_cur_pos = p;
3016 				return (n + 1);
3017 			}
3018 		}
3019 
3020 		/*
3021 		 * If OBP did not NULL terminate string, which happens for
3022 		 * 'true'/'false' boolean values, account for the space
3023 		 * to store null termination here.
3024 		 */
3025 		ph->ph_cur_pos = p;
3026 		return (n + 1);
3027 
3028 	default:
3029 #ifdef DEBUG
3030 		panic("ddi_prop_1275_string: %x impossible", cmd);
3031 		/*NOTREACHED*/
3032 #else
3033 		return (DDI_PROP_RESULT_ERROR);
3034 #endif	/* DEBUG */
3035 	}
3036 }
3037 
3038 /*
3039  * OBP 1275 byte operator
3040  *
3041  * Caller must specify the number of bytes to get.  OBP encodes bytes
3042  * as a byte so there is a 1-to-1 translation.
3043  */
3044 int
3045 ddi_prop_1275_bytes(prop_handle_t *ph, uint_t cmd, uchar_t *data,
3046     uint_t nelements)
3047 {
3048 	switch (cmd) {
3049 	case DDI_PROP_CMD_DECODE:
3050 		/*
3051 		 * Check that there is encoded data
3052 		 */
3053 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3054 		    ph->ph_size < nelements ||
3055 		    ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3056 		    ph->ph_size - nelements)))
3057 			return (DDI_PROP_RESULT_ERROR);
3058 
3059 		/*
3060 		 * Copy out the bytes
3061 		 */
3062 		bcopy(ph->ph_cur_pos, data, nelements);
3063 
3064 		/*
3065 		 * Move the current location
3066 		 */
3067 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3068 		return (DDI_PROP_RESULT_OK);
3069 
3070 	case DDI_PROP_CMD_ENCODE:
3071 		/*
3072 		 * Check that there is room to encode the data
3073 		 */
3074 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3075 		    ph->ph_size < nelements ||
3076 		    ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3077 		    ph->ph_size - nelements)))
3078 			return (DDI_PROP_RESULT_ERROR);
3079 
3080 		/*
3081 		 * Copy in the bytes
3082 		 */
3083 		bcopy(data, ph->ph_cur_pos, nelements);
3084 
3085 		/*
3086 		 * Move the current location to the start of the next bit of
3087 		 * space where we can store encoded data.
3088 		 */
3089 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3090 		return (DDI_PROP_RESULT_OK);
3091 
3092 	case DDI_PROP_CMD_SKIP:
3093 		/*
3094 		 * Check that there is encoded data
3095 		 */
3096 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3097 		    ph->ph_size < nelements)
3098 			return (DDI_PROP_RESULT_ERROR);
3099 
3100 		if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3101 		    ph->ph_size - nelements))
3102 			return (DDI_PROP_RESULT_EOF);
3103 
3104 		/*
3105 		 * Move the current location
3106 		 */
3107 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3108 		return (DDI_PROP_RESULT_OK);
3109 
3110 	case DDI_PROP_CMD_GET_ESIZE:
3111 		/*
3112 		 * The size in bytes of the encoded size is the
3113 		 * same as the decoded size provided by the caller.
3114 		 */
3115 		return (nelements);
3116 
3117 	case DDI_PROP_CMD_GET_DSIZE:
3118 		/*
3119 		 * Just return the number of bytes specified by the caller.
3120 		 */
3121 		return (nelements);
3122 
3123 	default:
3124 #ifdef DEBUG
3125 		panic("ddi_prop_1275_bytes: %x impossible", cmd);
3126 		/*NOTREACHED*/
3127 #else
3128 		return (DDI_PROP_RESULT_ERROR);
3129 #endif	/* DEBUG */
3130 	}
3131 }
3132 
3133 /*
3134  * Used for properties that come from the OBP, hardware configuration files,
3135  * or that are created by calls to ddi_prop_update(9F).
3136  */
3137 static struct prop_handle_ops prop_1275_ops = {
3138 	ddi_prop_1275_int,
3139 	ddi_prop_1275_string,
3140 	ddi_prop_1275_bytes,
3141 	ddi_prop_int64_op
3142 };
3143 
3144 
3145 /*
3146  * Interface to create/modify a managed property on child's behalf...
3147  * Flags interpreted are:
3148  *	DDI_PROP_CANSLEEP:	Allow memory allocation to sleep.
3149  *	DDI_PROP_SYSTEM_DEF:	Manipulate system list rather than driver list.
3150  *
3151  * Use same dev_t when modifying or undefining a property.
3152  * Search for properties with DDI_DEV_T_ANY to match first named
3153  * property on the list.
3154  *
3155  * Properties are stored LIFO and subsequently will match the first
3156  * `matching' instance.
3157  */
3158 
3159 /*
3160  * ddi_prop_add:	Add a software defined property
3161  */
3162 
3163 /*
3164  * define to get a new ddi_prop_t.
3165  * km_flags are KM_SLEEP or KM_NOSLEEP.
3166  */
3167 
3168 #define	DDI_NEW_PROP_T(km_flags)	\
3169 	(kmem_zalloc(sizeof (ddi_prop_t), km_flags))
3170 
3171 static int
3172 ddi_prop_add(dev_t dev, dev_info_t *dip, int flags,
3173     char *name, caddr_t value, int length)
3174 {
3175 	ddi_prop_t	*new_propp, *propp;
3176 	ddi_prop_t	**list_head = &(DEVI(dip)->devi_drv_prop_ptr);
3177 	int		km_flags = KM_NOSLEEP;
3178 	int		name_buf_len;
3179 
3180 	/*
3181 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero return error.
3182 	 */
3183 
3184 	if (dev == DDI_DEV_T_ANY || name == (char *)0 || strlen(name) == 0)
3185 		return (DDI_PROP_INVAL_ARG);
3186 
3187 	if (flags & DDI_PROP_CANSLEEP)
3188 		km_flags = KM_SLEEP;
3189 
3190 	if (flags & DDI_PROP_SYSTEM_DEF)
3191 		list_head = &(DEVI(dip)->devi_sys_prop_ptr);
3192 	else if (flags & DDI_PROP_HW_DEF)
3193 		list_head = &(DEVI(dip)->devi_hw_prop_ptr);
3194 
3195 	if ((new_propp = DDI_NEW_PROP_T(km_flags)) == NULL)  {
3196 		cmn_err(CE_CONT, prop_no_mem_msg, name);
3197 		return (DDI_PROP_NO_MEMORY);
3198 	}
3199 
3200 	/*
3201 	 * If dev is major number 0, then we need to do a ddi_name_to_major
3202 	 * to get the real major number for the device.  This needs to be
3203 	 * done because some drivers need to call ddi_prop_create in their
3204 	 * attach routines but they don't have a dev.  By creating the dev
3205 	 * ourself if the major number is 0, drivers will not have to know what
3206 	 * their major number.	They can just create a dev with major number
3207 	 * 0 and pass it in.  For device 0, we will be doing a little extra
3208 	 * work by recreating the same dev that we already have, but its the
3209 	 * price you pay :-).
3210 	 *
3211 	 * This fixes bug #1098060.
3212 	 */
3213 	if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN) {
3214 		new_propp->prop_dev =
3215 		    makedevice(ddi_name_to_major(DEVI(dip)->devi_binding_name),
3216 		    getminor(dev));
3217 	} else
3218 		new_propp->prop_dev = dev;
3219 
3220 	/*
3221 	 * Allocate space for property name and copy it in...
3222 	 */
3223 
3224 	name_buf_len = strlen(name) + 1;
3225 	new_propp->prop_name = kmem_alloc(name_buf_len, km_flags);
3226 	if (new_propp->prop_name == 0)	{
3227 		kmem_free(new_propp, sizeof (ddi_prop_t));
3228 		cmn_err(CE_CONT, prop_no_mem_msg, name);
3229 		return (DDI_PROP_NO_MEMORY);
3230 	}
3231 	bcopy(name, new_propp->prop_name, name_buf_len);
3232 
3233 	/*
3234 	 * Set the property type
3235 	 */
3236 	new_propp->prop_flags = flags & DDI_PROP_TYPE_MASK;
3237 
3238 	/*
3239 	 * Set length and value ONLY if not an explicit property undefine:
3240 	 * NOTE: value and length are zero for explicit undefines.
3241 	 */
3242 
3243 	if (flags & DDI_PROP_UNDEF_IT) {
3244 		new_propp->prop_flags |= DDI_PROP_UNDEF_IT;
3245 	} else {
3246 		if ((new_propp->prop_len = length) != 0) {
3247 			new_propp->prop_val = kmem_alloc(length, km_flags);
3248 			if (new_propp->prop_val == 0)  {
3249 				kmem_free(new_propp->prop_name, name_buf_len);
3250 				kmem_free(new_propp, sizeof (ddi_prop_t));
3251 				cmn_err(CE_CONT, prop_no_mem_msg, name);
3252 				return (DDI_PROP_NO_MEMORY);
3253 			}
3254 			bcopy(value, new_propp->prop_val, length);
3255 		}
3256 	}
3257 
3258 	/*
3259 	 * Link property into beginning of list. (Properties are LIFO order.)
3260 	 */
3261 
3262 	mutex_enter(&(DEVI(dip)->devi_lock));
3263 	propp = *list_head;
3264 	new_propp->prop_next = propp;
3265 	*list_head = new_propp;
3266 	mutex_exit(&(DEVI(dip)->devi_lock));
3267 	return (DDI_PROP_SUCCESS);
3268 }
3269 
3270 
3271 /*
3272  * ddi_prop_change:	Modify a software managed property value
3273  *
3274  *			Set new length and value if found.
3275  *			returns DDI_PROP_INVAL_ARG if dev is DDI_DEV_T_ANY or
3276  *			input name is the NULL string.
3277  *			returns DDI_PROP_NO_MEMORY if unable to allocate memory
3278  *
3279  *			Note: an undef can be modified to be a define,
3280  *			(you can't go the other way.)
3281  */
3282 
3283 static int
3284 ddi_prop_change(dev_t dev, dev_info_t *dip, int flags,
3285     char *name, caddr_t value, int length)
3286 {
3287 	ddi_prop_t	*propp;
3288 	ddi_prop_t	**ppropp;
3289 	caddr_t		p = NULL;
3290 
3291 	if ((dev == DDI_DEV_T_ANY) || (name == NULL) || (strlen(name) == 0))
3292 		return (DDI_PROP_INVAL_ARG);
3293 
3294 	/*
3295 	 * Preallocate buffer, even if we don't need it...
3296 	 */
3297 	if (length != 0)  {
3298 		p = kmem_alloc(length, (flags & DDI_PROP_CANSLEEP) ?
3299 		    KM_SLEEP : KM_NOSLEEP);
3300 		if (p == NULL)	{
3301 			cmn_err(CE_CONT, prop_no_mem_msg, name);
3302 			return (DDI_PROP_NO_MEMORY);
3303 		}
3304 	}
3305 
3306 	/*
3307 	 * If the dev_t value contains DDI_MAJOR_T_UNKNOWN for the major
3308 	 * number, a real dev_t value should be created based upon the dip's
3309 	 * binding driver.  See ddi_prop_add...
3310 	 */
3311 	if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN)
3312 		dev = makedevice(
3313 		    ddi_name_to_major(DEVI(dip)->devi_binding_name),
3314 		    getminor(dev));
3315 
3316 	/*
3317 	 * Check to see if the property exists.  If so we modify it.
3318 	 * Else we create it by calling ddi_prop_add().
3319 	 */
3320 	mutex_enter(&(DEVI(dip)->devi_lock));
3321 	ppropp = &DEVI(dip)->devi_drv_prop_ptr;
3322 	if (flags & DDI_PROP_SYSTEM_DEF)
3323 		ppropp = &DEVI(dip)->devi_sys_prop_ptr;
3324 	else if (flags & DDI_PROP_HW_DEF)
3325 		ppropp = &DEVI(dip)->devi_hw_prop_ptr;
3326 
3327 	if ((propp = i_ddi_prop_search(dev, name, flags, ppropp)) != NULL) {
3328 		/*
3329 		 * Need to reallocate buffer?  If so, do it
3330 		 * carefully (reuse same space if new prop
3331 		 * is same size and non-NULL sized).
3332 		 */
3333 		if (length != 0)
3334 			bcopy(value, p, length);
3335 
3336 		if (propp->prop_len != 0)
3337 			kmem_free(propp->prop_val, propp->prop_len);
3338 
3339 		propp->prop_len = length;
3340 		propp->prop_val = p;
3341 		propp->prop_flags &= ~DDI_PROP_UNDEF_IT;
3342 		mutex_exit(&(DEVI(dip)->devi_lock));
3343 		return (DDI_PROP_SUCCESS);
3344 	}
3345 
3346 	mutex_exit(&(DEVI(dip)->devi_lock));
3347 	if (length != 0)
3348 		kmem_free(p, length);
3349 
3350 	return (ddi_prop_add(dev, dip, flags, name, value, length));
3351 }
3352 
3353 /*
3354  * Common update routine used to update and encode a property.	Creates
3355  * a property handle, calls the property encode routine, figures out if
3356  * the property already exists and updates if it does.	Otherwise it
3357  * creates if it does not exist.
3358  */
3359 int
3360 ddi_prop_update_common(dev_t match_dev, dev_info_t *dip, int flags,
3361     char *name, void *data, uint_t nelements,
3362     int (*prop_create)(prop_handle_t *, void *data, uint_t nelements))
3363 {
3364 	prop_handle_t	ph;
3365 	int		rval;
3366 	uint_t		ourflags;
3367 
3368 	/*
3369 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3370 	 * return error.
3371 	 */
3372 	if (match_dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3373 		return (DDI_PROP_INVAL_ARG);
3374 
3375 	/*
3376 	 * Create the handle
3377 	 */
3378 	ph.ph_data = NULL;
3379 	ph.ph_cur_pos = NULL;
3380 	ph.ph_save_pos = NULL;
3381 	ph.ph_size = 0;
3382 	ph.ph_ops = &prop_1275_ops;
3383 
3384 	/*
3385 	 * ourflags:
3386 	 * For compatibility with the old interfaces.  The old interfaces
3387 	 * didn't sleep by default and slept when the flag was set.  These
3388 	 * interfaces to the opposite.	So the old interfaces now set the
3389 	 * DDI_PROP_DONTSLEEP flag by default which tells us not to sleep.
3390 	 *
3391 	 * ph.ph_flags:
3392 	 * Blocked data or unblocked data allocation
3393 	 * for ph.ph_data in ddi_prop_encode_alloc()
3394 	 */
3395 	if (flags & DDI_PROP_DONTSLEEP) {
3396 		ourflags = flags;
3397 		ph.ph_flags = DDI_PROP_DONTSLEEP;
3398 	} else {
3399 		ourflags = flags | DDI_PROP_CANSLEEP;
3400 		ph.ph_flags = DDI_PROP_CANSLEEP;
3401 	}
3402 
3403 	/*
3404 	 * Encode the data and store it in the property handle by
3405 	 * calling the prop_encode routine.
3406 	 */
3407 	if ((rval = (*prop_create)(&ph, data, nelements)) !=
3408 	    DDI_PROP_SUCCESS) {
3409 		if (rval == DDI_PROP_NO_MEMORY)
3410 			cmn_err(CE_CONT, prop_no_mem_msg, name);
3411 		if (ph.ph_size != 0)
3412 			kmem_free(ph.ph_data, ph.ph_size);
3413 		return (rval);
3414 	}
3415 
3416 	/*
3417 	 * The old interfaces use a stacking approach to creating
3418 	 * properties.	If we are being called from the old interfaces,
3419 	 * the DDI_PROP_STACK_CREATE flag will be set, so we just do a
3420 	 * create without checking.
3421 	 */
3422 	if (flags & DDI_PROP_STACK_CREATE) {
3423 		rval = ddi_prop_add(match_dev, dip,
3424 		    ourflags, name, ph.ph_data, ph.ph_size);
3425 	} else {
3426 		rval = ddi_prop_change(match_dev, dip,
3427 		    ourflags, name, ph.ph_data, ph.ph_size);
3428 	}
3429 
3430 	/*
3431 	 * Free the encoded data allocated in the prop_encode routine.
3432 	 */
3433 	if (ph.ph_size != 0)
3434 		kmem_free(ph.ph_data, ph.ph_size);
3435 
3436 	return (rval);
3437 }
3438 
3439 
3440 /*
3441  * ddi_prop_create:	Define a managed property:
3442  *			See above for details.
3443  */
3444 
3445 int
3446 ddi_prop_create(dev_t dev, dev_info_t *dip, int flag,
3447     char *name, caddr_t value, int length)
3448 {
3449 	if (!(flag & DDI_PROP_CANSLEEP)) {
3450 		flag |= DDI_PROP_DONTSLEEP;
3451 #ifdef DDI_PROP_DEBUG
3452 		if (length != 0)
3453 			cmn_err(CE_NOTE, "!ddi_prop_create: interface obsolete,"
3454 			    "use ddi_prop_update (prop = %s, node = %s%d)",
3455 			    name, ddi_driver_name(dip), ddi_get_instance(dip));
3456 #endif /* DDI_PROP_DEBUG */
3457 	}
3458 	flag &= ~DDI_PROP_SYSTEM_DEF;
3459 	flag |= DDI_PROP_STACK_CREATE | DDI_PROP_TYPE_ANY;
3460 	return (ddi_prop_update_common(dev, dip, flag, name,
3461 	    value, length, ddi_prop_fm_encode_bytes));
3462 }
3463 
3464 int
3465 e_ddi_prop_create(dev_t dev, dev_info_t *dip, int flag,
3466     char *name, caddr_t value, int length)
3467 {
3468 	if (!(flag & DDI_PROP_CANSLEEP))
3469 		flag |= DDI_PROP_DONTSLEEP;
3470 	flag |= DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE | DDI_PROP_TYPE_ANY;
3471 	return (ddi_prop_update_common(dev, dip, flag,
3472 	    name, value, length, ddi_prop_fm_encode_bytes));
3473 }
3474 
3475 int
3476 ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag,
3477     char *name, caddr_t value, int length)
3478 {
3479 	ASSERT((flag & DDI_PROP_TYPE_MASK) == 0);
3480 
3481 	/*
3482 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3483 	 * return error.
3484 	 */
3485 	if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3486 		return (DDI_PROP_INVAL_ARG);
3487 
3488 	if (!(flag & DDI_PROP_CANSLEEP))
3489 		flag |= DDI_PROP_DONTSLEEP;
3490 	flag &= ~DDI_PROP_SYSTEM_DEF;
3491 	if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_NOTPROM), name) == 0)
3492 		return (DDI_PROP_NOT_FOUND);
3493 
3494 	return (ddi_prop_update_common(dev, dip,
3495 	    (flag | DDI_PROP_TYPE_BYTE), name,
3496 	    value, length, ddi_prop_fm_encode_bytes));
3497 }
3498 
3499 int
3500 e_ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag,
3501     char *name, caddr_t value, int length)
3502 {
3503 	ASSERT((flag & DDI_PROP_TYPE_MASK) == 0);
3504 
3505 	/*
3506 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3507 	 * return error.
3508 	 */
3509 	if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3510 		return (DDI_PROP_INVAL_ARG);
3511 
3512 	if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_SYSTEM_DEF), name) == 0)
3513 		return (DDI_PROP_NOT_FOUND);
3514 
3515 	if (!(flag & DDI_PROP_CANSLEEP))
3516 		flag |= DDI_PROP_DONTSLEEP;
3517 	return (ddi_prop_update_common(dev, dip,
3518 	    (flag | DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE),
3519 	    name, value, length, ddi_prop_fm_encode_bytes));
3520 }
3521 
3522 
3523 /*
3524  * Common lookup routine used to lookup and decode a property.
3525  * Creates a property handle, searches for the raw encoded data,
3526  * fills in the handle, and calls the property decode functions
3527  * passed in.
3528  *
3529  * This routine is not static because ddi_bus_prop_op() which lives in
3530  * ddi_impl.c calls it.  No driver should be calling this routine.
3531  */
3532 int
3533 ddi_prop_lookup_common(dev_t match_dev, dev_info_t *dip,
3534     uint_t flags, char *name, void *data, uint_t *nelements,
3535     int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements))
3536 {
3537 	int		rval;
3538 	uint_t		ourflags;
3539 	prop_handle_t	ph;
3540 
3541 	if ((match_dev == DDI_DEV_T_NONE) ||
3542 	    (name == NULL) || (strlen(name) == 0))
3543 		return (DDI_PROP_INVAL_ARG);
3544 
3545 	ourflags = (flags & DDI_PROP_DONTSLEEP) ? flags :
3546 	    flags | DDI_PROP_CANSLEEP;
3547 
3548 	/*
3549 	 * Get the encoded data
3550 	 */
3551 	bzero(&ph, sizeof (prop_handle_t));
3552 
3553 	if ((flags & DDI_UNBND_DLPI2) || (flags & DDI_PROP_ROOTNEX_GLOBAL)) {
3554 		/*
3555 		 * For rootnex and unbound dlpi style-2 devices, index into
3556 		 * the devnames' array and search the global
3557 		 * property list.
3558 		 */
3559 		ourflags &= ~DDI_UNBND_DLPI2;
3560 		rval = i_ddi_prop_search_global(match_dev,
3561 		    ourflags, name, &ph.ph_data, &ph.ph_size);
3562 	} else {
3563 		rval = ddi_prop_search_common(match_dev, dip,
3564 		    PROP_LEN_AND_VAL_ALLOC, ourflags, name,
3565 		    &ph.ph_data, &ph.ph_size);
3566 
3567 	}
3568 
3569 	if (rval != DDI_PROP_SUCCESS && rval != DDI_PROP_FOUND_1275) {
3570 		ASSERT(ph.ph_data == NULL);
3571 		ASSERT(ph.ph_size == 0);
3572 		return (rval);
3573 	}
3574 
3575 	/*
3576 	 * If the encoded data came from a OBP or software
3577 	 * use the 1275 OBP decode/encode routines.
3578 	 */
3579 	ph.ph_cur_pos = ph.ph_data;
3580 	ph.ph_save_pos = ph.ph_data;
3581 	ph.ph_ops = &prop_1275_ops;
3582 	ph.ph_flags = (rval == DDI_PROP_FOUND_1275) ? PH_FROM_PROM : 0;
3583 
3584 	rval = (*prop_decoder)(&ph, data, nelements);
3585 
3586 	/*
3587 	 * Free the encoded data
3588 	 */
3589 	if (ph.ph_size != 0)
3590 		kmem_free(ph.ph_data, ph.ph_size);
3591 
3592 	return (rval);
3593 }
3594 
3595 /*
3596  * Lookup and return an array of composite properties.  The driver must
3597  * provide the decode routine.
3598  */
3599 int
3600 ddi_prop_lookup(dev_t match_dev, dev_info_t *dip,
3601     uint_t flags, char *name, void *data, uint_t *nelements,
3602     int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements))
3603 {
3604 	return (ddi_prop_lookup_common(match_dev, dip,
3605 	    (flags | DDI_PROP_TYPE_COMPOSITE), name,
3606 	    data, nelements, prop_decoder));
3607 }
3608 
3609 /*
3610  * Return 1 if a property exists (no type checking done).
3611  * Return 0 if it does not exist.
3612  */
3613 int
3614 ddi_prop_exists(dev_t match_dev, dev_info_t *dip, uint_t flags, char *name)
3615 {
3616 	int	i;
3617 	uint_t	x = 0;
3618 
3619 	i = ddi_prop_search_common(match_dev, dip, PROP_EXISTS,
3620 	    flags | DDI_PROP_TYPE_MASK, name, NULL, &x);
3621 	return (i == DDI_PROP_SUCCESS || i == DDI_PROP_FOUND_1275);
3622 }
3623 
3624 
3625 /*
3626  * Update an array of composite properties.  The driver must
3627  * provide the encode routine.
3628  */
3629 int
3630 ddi_prop_update(dev_t match_dev, dev_info_t *dip,
3631     char *name, void *data, uint_t nelements,
3632     int (*prop_create)(prop_handle_t *, void *data, uint_t nelements))
3633 {
3634 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_COMPOSITE,
3635 	    name, data, nelements, prop_create));
3636 }
3637 
3638 /*
3639  * Get a single integer or boolean property and return it.
3640  * If the property does not exists, or cannot be decoded,
3641  * then return the defvalue passed in.
3642  *
3643  * This routine always succeeds.
3644  */
3645 int
3646 ddi_prop_get_int(dev_t match_dev, dev_info_t *dip, uint_t flags,
3647     char *name, int defvalue)
3648 {
3649 	int	data;
3650 	uint_t	nelements;
3651 	int	rval;
3652 
3653 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3654 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3655 #ifdef DEBUG
3656 		if (dip != NULL) {
3657 			cmn_err(CE_WARN, "ddi_prop_get_int: invalid flag"
3658 			    " 0x%x (prop = %s, node = %s%d)", flags,
3659 			    name, ddi_driver_name(dip), ddi_get_instance(dip));
3660 		}
3661 #endif /* DEBUG */
3662 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3663 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
3664 	}
3665 
3666 	if ((rval = ddi_prop_lookup_common(match_dev, dip,
3667 	    (flags | DDI_PROP_TYPE_INT), name, &data, &nelements,
3668 	    ddi_prop_fm_decode_int)) != DDI_PROP_SUCCESS) {
3669 		if (rval == DDI_PROP_END_OF_DATA)
3670 			data = 1;
3671 		else
3672 			data = defvalue;
3673 	}
3674 	return (data);
3675 }
3676 
3677 /*
3678  * Get a single 64 bit integer or boolean property and return it.
3679  * If the property does not exists, or cannot be decoded,
3680  * then return the defvalue passed in.
3681  *
3682  * This routine always succeeds.
3683  */
3684 int64_t
3685 ddi_prop_get_int64(dev_t match_dev, dev_info_t *dip, uint_t flags,
3686     char *name, int64_t defvalue)
3687 {
3688 	int64_t	data;
3689 	uint_t	nelements;
3690 	int	rval;
3691 
3692 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3693 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3694 #ifdef DEBUG
3695 		if (dip != NULL) {
3696 			cmn_err(CE_WARN, "ddi_prop_get_int64: invalid flag"
3697 			    " 0x%x (prop = %s, node = %s%d)", flags,
3698 			    name, ddi_driver_name(dip), ddi_get_instance(dip));
3699 		}
3700 #endif /* DEBUG */
3701 		return (DDI_PROP_INVAL_ARG);
3702 	}
3703 
3704 	if ((rval = ddi_prop_lookup_common(match_dev, dip,
3705 	    (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM),
3706 	    name, &data, &nelements, ddi_prop_fm_decode_int64))
3707 	    != DDI_PROP_SUCCESS) {
3708 		if (rval == DDI_PROP_END_OF_DATA)
3709 			data = 1;
3710 		else
3711 			data = defvalue;
3712 	}
3713 	return (data);
3714 }
3715 
3716 /*
3717  * Get an array of integer property
3718  */
3719 int
3720 ddi_prop_lookup_int_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
3721     char *name, int **data, uint_t *nelements)
3722 {
3723 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3724 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3725 #ifdef DEBUG
3726 		if (dip != NULL) {
3727 			cmn_err(CE_WARN, "ddi_prop_lookup_int_array: "
3728 			    "invalid flag 0x%x (prop = %s, node = %s%d)",
3729 			    flags, name, ddi_driver_name(dip),
3730 			    ddi_get_instance(dip));
3731 		}
3732 #endif /* DEBUG */
3733 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3734 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
3735 	}
3736 
3737 	return (ddi_prop_lookup_common(match_dev, dip,
3738 	    (flags | DDI_PROP_TYPE_INT), name, data,
3739 	    nelements, ddi_prop_fm_decode_ints));
3740 }
3741 
3742 /*
3743  * Get an array of 64 bit integer properties
3744  */
3745 int
3746 ddi_prop_lookup_int64_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
3747     char *name, int64_t **data, uint_t *nelements)
3748 {
3749 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3750 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3751 #ifdef DEBUG
3752 		if (dip != NULL) {
3753 			cmn_err(CE_WARN, "ddi_prop_lookup_int64_array: "
3754 			    "invalid flag 0x%x (prop = %s, node = %s%d)",
3755 			    flags, name, ddi_driver_name(dip),
3756 			    ddi_get_instance(dip));
3757 		}
3758 #endif /* DEBUG */
3759 		return (DDI_PROP_INVAL_ARG);
3760 	}
3761 
3762 	return (ddi_prop_lookup_common(match_dev, dip,
3763 	    (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM),
3764 	    name, data, nelements, ddi_prop_fm_decode_int64_array));
3765 }
3766 
3767 /*
3768  * Update a single integer property.  If the property exists on the drivers
3769  * property list it updates, else it creates it.
3770  */
3771 int
3772 ddi_prop_update_int(dev_t match_dev, dev_info_t *dip,
3773     char *name, int data)
3774 {
3775 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT,
3776 	    name, &data, 1, ddi_prop_fm_encode_ints));
3777 }
3778 
3779 /*
3780  * Update a single 64 bit integer property.
3781  * Update the driver property list if it exists, else create it.
3782  */
3783 int
3784 ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip,
3785     char *name, int64_t data)
3786 {
3787 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64,
3788 	    name, &data, 1, ddi_prop_fm_encode_int64));
3789 }
3790 
3791 int
3792 e_ddi_prop_update_int(dev_t match_dev, dev_info_t *dip,
3793     char *name, int data)
3794 {
3795 	return (ddi_prop_update_common(match_dev, dip,
3796 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT,
3797 	    name, &data, 1, ddi_prop_fm_encode_ints));
3798 }
3799 
3800 int
3801 e_ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip,
3802     char *name, int64_t data)
3803 {
3804 	return (ddi_prop_update_common(match_dev, dip,
3805 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64,
3806 	    name, &data, 1, ddi_prop_fm_encode_int64));
3807 }
3808 
3809 /*
3810  * Update an array of integer property.  If the property exists on the drivers
3811  * property list it updates, else it creates it.
3812  */
3813 int
3814 ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip,
3815     char *name, int *data, uint_t nelements)
3816 {
3817 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT,
3818 	    name, data, nelements, ddi_prop_fm_encode_ints));
3819 }
3820 
3821 /*
3822  * Update an array of 64 bit integer properties.
3823  * Update the driver property list if it exists, else create it.
3824  */
3825 int
3826 ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip,
3827     char *name, int64_t *data, uint_t nelements)
3828 {
3829 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64,
3830 	    name, data, nelements, ddi_prop_fm_encode_int64));
3831 }
3832 
3833 int
3834 e_ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip,
3835     char *name, int64_t *data, uint_t nelements)
3836 {
3837 	return (ddi_prop_update_common(match_dev, dip,
3838 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64,
3839 	    name, data, nelements, ddi_prop_fm_encode_int64));
3840 }
3841 
3842 int
3843 e_ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip,
3844     char *name, int *data, uint_t nelements)
3845 {
3846 	return (ddi_prop_update_common(match_dev, dip,
3847 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT,
3848 	    name, data, nelements, ddi_prop_fm_encode_ints));
3849 }
3850 
3851 /*
3852  * Get a single string property.
3853  */
3854 int
3855 ddi_prop_lookup_string(dev_t match_dev, dev_info_t *dip, uint_t flags,
3856     char *name, char **data)
3857 {
3858 	uint_t x;
3859 
3860 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3861 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3862 #ifdef DEBUG
3863 		if (dip != NULL) {
3864 			cmn_err(CE_WARN, "%s: invalid flag 0x%x "
3865 			    "(prop = %s, node = %s%d); invalid bits ignored",
3866 			    "ddi_prop_lookup_string", flags, name,
3867 			    ddi_driver_name(dip), ddi_get_instance(dip));
3868 		}
3869 #endif /* DEBUG */
3870 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3871 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
3872 	}
3873 
3874 	return (ddi_prop_lookup_common(match_dev, dip,
3875 	    (flags | DDI_PROP_TYPE_STRING), name, data,
3876 	    &x, ddi_prop_fm_decode_string));
3877 }
3878 
3879 /*
3880  * Get an array of strings property.
3881  */
3882 int
3883 ddi_prop_lookup_string_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
3884     char *name, char ***data, uint_t *nelements)
3885 {
3886 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3887 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3888 #ifdef DEBUG
3889 		if (dip != NULL) {
3890 			cmn_err(CE_WARN, "ddi_prop_lookup_string_array: "
3891 			    "invalid flag 0x%x (prop = %s, node = %s%d)",
3892 			    flags, name, ddi_driver_name(dip),
3893 			    ddi_get_instance(dip));
3894 		}
3895 #endif /* DEBUG */
3896 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3897 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
3898 	}
3899 
3900 	return (ddi_prop_lookup_common(match_dev, dip,
3901 	    (flags | DDI_PROP_TYPE_STRING), name, data,
3902 	    nelements, ddi_prop_fm_decode_strings));
3903 }
3904 
3905 /*
3906  * Update a single string property.
3907  */
3908 int
3909 ddi_prop_update_string(dev_t match_dev, dev_info_t *dip,
3910     char *name, char *data)
3911 {
3912 	return (ddi_prop_update_common(match_dev, dip,
3913 	    DDI_PROP_TYPE_STRING, name, &data, 1,
3914 	    ddi_prop_fm_encode_string));
3915 }
3916 
3917 int
3918 e_ddi_prop_update_string(dev_t match_dev, dev_info_t *dip,
3919     char *name, char *data)
3920 {
3921 	return (ddi_prop_update_common(match_dev, dip,
3922 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING,
3923 	    name, &data, 1, ddi_prop_fm_encode_string));
3924 }
3925 
3926 
3927 /*
3928  * Update an array of strings property.
3929  */
3930 int
3931 ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip,
3932     char *name, char **data, uint_t nelements)
3933 {
3934 	return (ddi_prop_update_common(match_dev, dip,
3935 	    DDI_PROP_TYPE_STRING, name, data, nelements,
3936 	    ddi_prop_fm_encode_strings));
3937 }
3938 
3939 int
3940 e_ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip,
3941     char *name, char **data, uint_t nelements)
3942 {
3943 	return (ddi_prop_update_common(match_dev, dip,
3944 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING,
3945 	    name, data, nelements,
3946 	    ddi_prop_fm_encode_strings));
3947 }
3948 
3949 
3950 /*
3951  * Get an array of bytes property.
3952  */
3953 int
3954 ddi_prop_lookup_byte_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
3955     char *name, uchar_t **data, uint_t *nelements)
3956 {
3957 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3958 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3959 #ifdef DEBUG
3960 		if (dip != NULL) {
3961 			cmn_err(CE_WARN, "ddi_prop_lookup_byte_array: "
3962 			    " invalid flag 0x%x (prop = %s, node = %s%d)",
3963 			    flags, name, ddi_driver_name(dip),
3964 			    ddi_get_instance(dip));
3965 		}
3966 #endif /* DEBUG */
3967 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3968 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
3969 	}
3970 
3971 	return (ddi_prop_lookup_common(match_dev, dip,
3972 	    (flags | DDI_PROP_TYPE_BYTE), name, data,
3973 	    nelements, ddi_prop_fm_decode_bytes));
3974 }
3975 
3976 /*
3977  * Update an array of bytes property.
3978  */
3979 int
3980 ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip,
3981     char *name, uchar_t *data, uint_t nelements)
3982 {
3983 	if (nelements == 0)
3984 		return (DDI_PROP_INVAL_ARG);
3985 
3986 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_BYTE,
3987 	    name, data, nelements, ddi_prop_fm_encode_bytes));
3988 }
3989 
3990 
3991 int
3992 e_ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip,
3993     char *name, uchar_t *data, uint_t nelements)
3994 {
3995 	if (nelements == 0)
3996 		return (DDI_PROP_INVAL_ARG);
3997 
3998 	return (ddi_prop_update_common(match_dev, dip,
3999 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE,
4000 	    name, data, nelements, ddi_prop_fm_encode_bytes));
4001 }
4002 
4003 
4004 /*
4005  * ddi_prop_remove_common:	Undefine a managed property:
4006  *			Input dev_t must match dev_t when defined.
4007  *			Returns DDI_PROP_NOT_FOUND, possibly.
4008  *			DDI_PROP_INVAL_ARG is also possible if dev is
4009  *			DDI_DEV_T_ANY or incoming name is the NULL string.
4010  */
4011 int
4012 ddi_prop_remove_common(dev_t dev, dev_info_t *dip, char *name, int flag)
4013 {
4014 	ddi_prop_t	**list_head = &(DEVI(dip)->devi_drv_prop_ptr);
4015 	ddi_prop_t	*propp;
4016 	ddi_prop_t	*lastpropp = NULL;
4017 
4018 	if ((dev == DDI_DEV_T_ANY) || (name == (char *)0) ||
4019 	    (strlen(name) == 0)) {
4020 		return (DDI_PROP_INVAL_ARG);
4021 	}
4022 
4023 	if (flag & DDI_PROP_SYSTEM_DEF)
4024 		list_head = &(DEVI(dip)->devi_sys_prop_ptr);
4025 	else if (flag & DDI_PROP_HW_DEF)
4026 		list_head = &(DEVI(dip)->devi_hw_prop_ptr);
4027 
4028 	mutex_enter(&(DEVI(dip)->devi_lock));
4029 
4030 	for (propp = *list_head; propp != NULL; propp = propp->prop_next)  {
4031 		if (DDI_STRSAME(propp->prop_name, name) &&
4032 		    (dev == propp->prop_dev)) {
4033 			/*
4034 			 * Unlink this propp allowing for it to
4035 			 * be first in the list:
4036 			 */
4037 
4038 			if (lastpropp == NULL)
4039 				*list_head = propp->prop_next;
4040 			else
4041 				lastpropp->prop_next = propp->prop_next;
4042 
4043 			mutex_exit(&(DEVI(dip)->devi_lock));
4044 
4045 			/*
4046 			 * Free memory and return...
4047 			 */
4048 			kmem_free(propp->prop_name,
4049 			    strlen(propp->prop_name) + 1);
4050 			if (propp->prop_len != 0)
4051 				kmem_free(propp->prop_val, propp->prop_len);
4052 			kmem_free(propp, sizeof (ddi_prop_t));
4053 			return (DDI_PROP_SUCCESS);
4054 		}
4055 		lastpropp = propp;
4056 	}
4057 	mutex_exit(&(DEVI(dip)->devi_lock));
4058 	return (DDI_PROP_NOT_FOUND);
4059 }
4060 
4061 int
4062 ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name)
4063 {
4064 	return (ddi_prop_remove_common(dev, dip, name, 0));
4065 }
4066 
4067 int
4068 e_ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name)
4069 {
4070 	return (ddi_prop_remove_common(dev, dip, name, DDI_PROP_SYSTEM_DEF));
4071 }
4072 
4073 /*
4074  * e_ddi_prop_list_delete: remove a list of properties
4075  *	Note that the caller needs to provide the required protection
4076  *	(eg. devi_lock if these properties are still attached to a devi)
4077  */
4078 void
4079 e_ddi_prop_list_delete(ddi_prop_t *props)
4080 {
4081 	i_ddi_prop_list_delete(props);
4082 }
4083 
4084 /*
4085  * ddi_prop_remove_all_common:
4086  *	Used before unloading a driver to remove
4087  *	all properties. (undefines all dev_t's props.)
4088  *	Also removes `explicitly undefined' props.
4089  *	No errors possible.
4090  */
4091 void
4092 ddi_prop_remove_all_common(dev_info_t *dip, int flag)
4093 {
4094 	ddi_prop_t	**list_head;
4095 
4096 	mutex_enter(&(DEVI(dip)->devi_lock));
4097 	if (flag & DDI_PROP_SYSTEM_DEF) {
4098 		list_head = &(DEVI(dip)->devi_sys_prop_ptr);
4099 	} else if (flag & DDI_PROP_HW_DEF) {
4100 		list_head = &(DEVI(dip)->devi_hw_prop_ptr);
4101 	} else {
4102 		list_head = &(DEVI(dip)->devi_drv_prop_ptr);
4103 	}
4104 	i_ddi_prop_list_delete(*list_head);
4105 	*list_head = NULL;
4106 	mutex_exit(&(DEVI(dip)->devi_lock));
4107 }
4108 
4109 
4110 /*
4111  * ddi_prop_remove_all:		Remove all driver prop definitions.
4112  */
4113 
4114 void
4115 ddi_prop_remove_all(dev_info_t *dip)
4116 {
4117 	i_ddi_prop_dyn_driver_set(dip, NULL);
4118 	ddi_prop_remove_all_common(dip, 0);
4119 }
4120 
4121 /*
4122  * e_ddi_prop_remove_all:	Remove all system prop definitions.
4123  */
4124 
4125 void
4126 e_ddi_prop_remove_all(dev_info_t *dip)
4127 {
4128 	ddi_prop_remove_all_common(dip, (int)DDI_PROP_SYSTEM_DEF);
4129 }
4130 
4131 
4132 /*
4133  * ddi_prop_undefine:	Explicitly undefine a property.  Property
4134  *			searches which match this property return
4135  *			the error code DDI_PROP_UNDEFINED.
4136  *
4137  *			Use ddi_prop_remove to negate effect of
4138  *			ddi_prop_undefine
4139  *
4140  *			See above for error returns.
4141  */
4142 
4143 int
4144 ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name)
4145 {
4146 	if (!(flag & DDI_PROP_CANSLEEP))
4147 		flag |= DDI_PROP_DONTSLEEP;
4148 	flag |= DDI_PROP_STACK_CREATE | DDI_PROP_UNDEF_IT | DDI_PROP_TYPE_ANY;
4149 	return (ddi_prop_update_common(dev, dip, flag,
4150 	    name, NULL, 0, ddi_prop_fm_encode_bytes));
4151 }
4152 
4153 int
4154 e_ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name)
4155 {
4156 	if (!(flag & DDI_PROP_CANSLEEP))
4157 		flag |= DDI_PROP_DONTSLEEP;
4158 	flag |= DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE |
4159 	    DDI_PROP_UNDEF_IT | DDI_PROP_TYPE_ANY;
4160 	return (ddi_prop_update_common(dev, dip, flag,
4161 	    name, NULL, 0, ddi_prop_fm_encode_bytes));
4162 }
4163 
4164 /*
4165  * Support for gathering dynamic properties in devinfo snapshot.
4166  */
4167 void
4168 i_ddi_prop_dyn_driver_set(dev_info_t *dip, i_ddi_prop_dyn_t *dp)
4169 {
4170 	DEVI(dip)->devi_prop_dyn_driver = dp;
4171 }
4172 
4173 i_ddi_prop_dyn_t *
4174 i_ddi_prop_dyn_driver_get(dev_info_t *dip)
4175 {
4176 	return (DEVI(dip)->devi_prop_dyn_driver);
4177 }
4178 
4179 void
4180 i_ddi_prop_dyn_parent_set(dev_info_t *dip, i_ddi_prop_dyn_t *dp)
4181 {
4182 	DEVI(dip)->devi_prop_dyn_parent = dp;
4183 }
4184 
4185 i_ddi_prop_dyn_t *
4186 i_ddi_prop_dyn_parent_get(dev_info_t *dip)
4187 {
4188 	return (DEVI(dip)->devi_prop_dyn_parent);
4189 }
4190 
4191 void
4192 i_ddi_prop_dyn_cache_invalidate(dev_info_t *dip, i_ddi_prop_dyn_t *dp)
4193 {
4194 	/* for now we invalidate the entire cached snapshot */
4195 	if (dip && dp)
4196 		i_ddi_di_cache_invalidate();
4197 }
4198 
4199 /* ARGSUSED */
4200 void
4201 ddi_prop_cache_invalidate(dev_t dev, dev_info_t *dip, char *name, int flags)
4202 {
4203 	/* for now we invalidate the entire cached snapshot */
4204 	i_ddi_di_cache_invalidate();
4205 }
4206 
4207 
4208 /*
4209  * Code to search hardware layer (PROM), if it exists, on behalf of child.
4210  *
4211  * if input dip != child_dip, then call is on behalf of child
4212  * to search PROM, do it via ddi_prop_search_common() and ascend only
4213  * if allowed.
4214  *
4215  * if input dip == ch_dip (child_dip), call is on behalf of root driver,
4216  * to search for PROM defined props only.
4217  *
4218  * Note that the PROM search is done only if the requested dev
4219  * is either DDI_DEV_T_ANY or DDI_DEV_T_NONE. PROM properties
4220  * have no associated dev, thus are automatically associated with
4221  * DDI_DEV_T_NONE.
4222  *
4223  * Modifying flag DDI_PROP_NOTPROM inhibits the search in the h/w layer.
4224  *
4225  * Returns DDI_PROP_FOUND_1275 if found to indicate to framework
4226  * that the property resides in the prom.
4227  */
4228 int
4229 impl_ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip,
4230     ddi_prop_op_t prop_op, int mod_flags,
4231     char *name, caddr_t valuep, int *lengthp)
4232 {
4233 	int	len;
4234 	caddr_t buffer = NULL;
4235 
4236 	/*
4237 	 * If requested dev is DDI_DEV_T_NONE or DDI_DEV_T_ANY, then
4238 	 * look in caller's PROM if it's a self identifying device...
4239 	 *
4240 	 * Note that this is very similar to ddi_prop_op, but we
4241 	 * search the PROM instead of the s/w defined properties,
4242 	 * and we are called on by the parent driver to do this for
4243 	 * the child.
4244 	 */
4245 
4246 	if (((dev == DDI_DEV_T_NONE) || (dev == DDI_DEV_T_ANY)) &&
4247 	    ndi_dev_is_prom_node(ch_dip) &&
4248 	    ((mod_flags & DDI_PROP_NOTPROM) == 0)) {
4249 		len = prom_getproplen((pnode_t)DEVI(ch_dip)->devi_nodeid, name);
4250 		if (len == -1) {
4251 			return (DDI_PROP_NOT_FOUND);
4252 		}
4253 
4254 		/*
4255 		 * If exists only request, we're done
4256 		 */
4257 		if (prop_op == PROP_EXISTS) {
4258 			return (DDI_PROP_FOUND_1275);
4259 		}
4260 
4261 		/*
4262 		 * If length only request or prop length == 0, get out
4263 		 */
4264 		if ((prop_op == PROP_LEN) || (len == 0)) {
4265 			*lengthp = len;
4266 			return (DDI_PROP_FOUND_1275);
4267 		}
4268 
4269 		/*
4270 		 * Allocate buffer if required... (either way `buffer'
4271 		 * is receiving address).
4272 		 */
4273 
4274 		switch (prop_op) {
4275 
4276 		case PROP_LEN_AND_VAL_ALLOC:
4277 
4278 			buffer = kmem_alloc((size_t)len,
4279 			    mod_flags & DDI_PROP_CANSLEEP ?
4280 			    KM_SLEEP : KM_NOSLEEP);
4281 			if (buffer == NULL) {
4282 				return (DDI_PROP_NO_MEMORY);
4283 			}
4284 			*(caddr_t *)valuep = buffer;
4285 			break;
4286 
4287 		case PROP_LEN_AND_VAL_BUF:
4288 
4289 			if (len > (*lengthp)) {
4290 				*lengthp = len;
4291 				return (DDI_PROP_BUF_TOO_SMALL);
4292 			}
4293 
4294 			buffer = valuep;
4295 			break;
4296 
4297 		default:
4298 			break;
4299 		}
4300 
4301 		/*
4302 		 * Call the PROM function to do the copy.
4303 		 */
4304 		(void) prom_getprop((pnode_t)DEVI(ch_dip)->devi_nodeid,
4305 		    name, buffer);
4306 
4307 		*lengthp = len; /* return the actual length to the caller */
4308 		(void) impl_fix_props(dip, ch_dip, name, len, buffer);
4309 		return (DDI_PROP_FOUND_1275);
4310 	}
4311 
4312 	return (DDI_PROP_NOT_FOUND);
4313 }
4314 
4315 /*
4316  * The ddi_bus_prop_op default bus nexus prop op function.
4317  *
4318  * Code to search hardware layer (PROM), if it exists,
4319  * on behalf of child, then, if appropriate, ascend and check
4320  * my own software defined properties...
4321  */
4322 int
4323 ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip,
4324     ddi_prop_op_t prop_op, int mod_flags,
4325     char *name, caddr_t valuep, int *lengthp)
4326 {
4327 	int	error;
4328 
4329 	error = impl_ddi_bus_prop_op(dev, dip, ch_dip, prop_op, mod_flags,
4330 	    name, valuep, lengthp);
4331 
4332 	if (error == DDI_PROP_SUCCESS || error == DDI_PROP_FOUND_1275 ||
4333 	    error == DDI_PROP_BUF_TOO_SMALL)
4334 		return (error);
4335 
4336 	if (error == DDI_PROP_NO_MEMORY) {
4337 		cmn_err(CE_CONT, prop_no_mem_msg, name);
4338 		return (DDI_PROP_NO_MEMORY);
4339 	}
4340 
4341 	/*
4342 	 * Check the 'options' node as a last resort
4343 	 */
4344 	if ((mod_flags & DDI_PROP_DONTPASS) != 0)
4345 		return (DDI_PROP_NOT_FOUND);
4346 
4347 	if (ch_dip == ddi_root_node())	{
4348 		/*
4349 		 * As a last resort, when we've reached
4350 		 * the top and still haven't found the
4351 		 * property, see if the desired property
4352 		 * is attached to the options node.
4353 		 *
4354 		 * The options dip is attached right after boot.
4355 		 */
4356 		ASSERT(options_dip != NULL);
4357 		/*
4358 		 * Force the "don't pass" flag to *just* see
4359 		 * what the options node has to offer.
4360 		 */
4361 		return (ddi_prop_search_common(dev, options_dip, prop_op,
4362 		    mod_flags|DDI_PROP_DONTPASS, name, valuep,
4363 		    (uint_t *)lengthp));
4364 	}
4365 
4366 	/*
4367 	 * Otherwise, continue search with parent's s/w defined properties...
4368 	 * NOTE: Using `dip' in following call increments the level.
4369 	 */
4370 
4371 	return (ddi_prop_search_common(dev, dip, prop_op, mod_flags,
4372 	    name, valuep, (uint_t *)lengthp));
4373 }
4374 
4375 /*
4376  * External property functions used by other parts of the kernel...
4377  */
4378 
4379 /*
4380  * e_ddi_getlongprop: See comments for ddi_get_longprop.
4381  */
4382 
4383 int
4384 e_ddi_getlongprop(dev_t dev, vtype_t type, char *name, int flags,
4385     caddr_t valuep, int *lengthp)
4386 {
4387 	_NOTE(ARGUNUSED(type))
4388 	dev_info_t *devi;
4389 	ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_ALLOC;
4390 	int error;
4391 
4392 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4393 		return (DDI_PROP_NOT_FOUND);
4394 
4395 	error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp);
4396 	ddi_release_devi(devi);
4397 	return (error);
4398 }
4399 
4400 /*
4401  * e_ddi_getlongprop_buf:	See comments for ddi_getlongprop_buf.
4402  */
4403 
4404 int
4405 e_ddi_getlongprop_buf(dev_t dev, vtype_t type, char *name, int flags,
4406     caddr_t valuep, int *lengthp)
4407 {
4408 	_NOTE(ARGUNUSED(type))
4409 	dev_info_t *devi;
4410 	ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF;
4411 	int error;
4412 
4413 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4414 		return (DDI_PROP_NOT_FOUND);
4415 
4416 	error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp);
4417 	ddi_release_devi(devi);
4418 	return (error);
4419 }
4420 
4421 /*
4422  * e_ddi_getprop:	See comments for ddi_getprop.
4423  */
4424 int
4425 e_ddi_getprop(dev_t dev, vtype_t type, char *name, int flags, int defvalue)
4426 {
4427 	_NOTE(ARGUNUSED(type))
4428 	dev_info_t *devi;
4429 	ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF;
4430 	int	propvalue = defvalue;
4431 	int	proplength = sizeof (int);
4432 	int	error;
4433 
4434 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4435 		return (defvalue);
4436 
4437 	error = cdev_prop_op(dev, devi, prop_op,
4438 	    flags, name, (caddr_t)&propvalue, &proplength);
4439 	ddi_release_devi(devi);
4440 
4441 	if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
4442 		propvalue = 1;
4443 
4444 	return (propvalue);
4445 }
4446 
4447 /*
4448  * e_ddi_getprop_int64:
4449  *
4450  * This is a typed interfaces, but predates typed properties. With the
4451  * introduction of typed properties the framework tries to ensure
4452  * consistent use of typed interfaces. This is why TYPE_INT64 is not
4453  * part of TYPE_ANY.  E_ddi_getprop_int64 is a special case where a
4454  * typed interface invokes legacy (non-typed) interfaces:
4455  * cdev_prop_op(), prop_op(9E), ddi_prop_op(9F)).  In this case the
4456  * fact that TYPE_INT64 is not part of TYPE_ANY matters.  To support
4457  * this type of lookup as a single operation we invoke the legacy
4458  * non-typed interfaces with the special CONSUMER_TYPED bit set. The
4459  * framework ddi_prop_op(9F) implementation is expected to check for
4460  * CONSUMER_TYPED and, if set, expand type bits beyond TYPE_ANY
4461  * (currently TYPE_INT64).
4462  */
4463 int64_t
4464 e_ddi_getprop_int64(dev_t dev, vtype_t type, char *name,
4465     int flags, int64_t defvalue)
4466 {
4467 	_NOTE(ARGUNUSED(type))
4468 	dev_info_t	*devi;
4469 	ddi_prop_op_t	prop_op = PROP_LEN_AND_VAL_BUF;
4470 	int64_t		propvalue = defvalue;
4471 	int		proplength = sizeof (propvalue);
4472 	int		error;
4473 
4474 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4475 		return (defvalue);
4476 
4477 	error = cdev_prop_op(dev, devi, prop_op, flags |
4478 	    DDI_PROP_CONSUMER_TYPED, name, (caddr_t)&propvalue, &proplength);
4479 	ddi_release_devi(devi);
4480 
4481 	if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
4482 		propvalue = 1;
4483 
4484 	return (propvalue);
4485 }
4486 
4487 /*
4488  * e_ddi_getproplen:	See comments for ddi_getproplen.
4489  */
4490 int
4491 e_ddi_getproplen(dev_t dev, vtype_t type, char *name, int flags, int *lengthp)
4492 {
4493 	_NOTE(ARGUNUSED(type))
4494 	dev_info_t *devi;
4495 	ddi_prop_op_t prop_op = PROP_LEN;
4496 	int error;
4497 
4498 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4499 		return (DDI_PROP_NOT_FOUND);
4500 
4501 	error = cdev_prop_op(dev, devi, prop_op, flags, name, NULL, lengthp);
4502 	ddi_release_devi(devi);
4503 	return (error);
4504 }
4505 
4506 /*
4507  * Routines to get at elements of the dev_info structure
4508  */
4509 
4510 /*
4511  * ddi_binding_name: Return the driver binding name of the devinfo node
4512  *		This is the name the OS used to bind the node to a driver.
4513  */
4514 char *
4515 ddi_binding_name(dev_info_t *dip)
4516 {
4517 	return (DEVI(dip)->devi_binding_name);
4518 }
4519 
4520 /*
4521  * ddi_driver_major: Return the major number of the driver that
4522  *	the supplied devinfo is bound to.  If not yet bound,
4523  *	DDI_MAJOR_T_NONE.
4524  *
4525  * When used by the driver bound to 'devi', this
4526  * function will reliably return the driver major number.
4527  * Other ways of determining the driver major number, such as
4528  *	major = ddi_name_to_major(ddi_get_name(devi));
4529  *	major = ddi_name_to_major(ddi_binding_name(devi));
4530  * can return a different result as the driver/alias binding
4531  * can change dynamically, and thus should be avoided.
4532  */
4533 major_t
4534 ddi_driver_major(dev_info_t *devi)
4535 {
4536 	return (DEVI(devi)->devi_major);
4537 }
4538 
4539 /*
4540  * ddi_driver_name: Return the normalized driver name. this is the
4541  *		actual driver name
4542  */
4543 const char *
4544 ddi_driver_name(dev_info_t *devi)
4545 {
4546 	major_t major;
4547 
4548 	if ((major = ddi_driver_major(devi)) != DDI_MAJOR_T_NONE)
4549 		return (ddi_major_to_name(major));
4550 
4551 	return (ddi_node_name(devi));
4552 }
4553 
4554 /*
4555  * i_ddi_set_binding_name:	Set binding name.
4556  *
4557  *	Set the binding name to the given name.
4558  *	This routine is for use by the ddi implementation, not by drivers.
4559  */
4560 void
4561 i_ddi_set_binding_name(dev_info_t *dip, char *name)
4562 {
4563 	DEVI(dip)->devi_binding_name = name;
4564 
4565 }
4566 
4567 /*
4568  * ddi_get_name: A synonym of ddi_binding_name() ... returns a name
4569  * the implementation has used to bind the node to a driver.
4570  */
4571 char *
4572 ddi_get_name(dev_info_t *dip)
4573 {
4574 	return (DEVI(dip)->devi_binding_name);
4575 }
4576 
4577 /*
4578  * ddi_node_name: Return the name property of the devinfo node
4579  *		This may differ from ddi_binding_name if the node name
4580  *		does not define a binding to a driver (i.e. generic names).
4581  */
4582 char *
4583 ddi_node_name(dev_info_t *dip)
4584 {
4585 	return (DEVI(dip)->devi_node_name);
4586 }
4587 
4588 
4589 /*
4590  * ddi_get_nodeid:	Get nodeid stored in dev_info structure.
4591  */
4592 int
4593 ddi_get_nodeid(dev_info_t *dip)
4594 {
4595 	return (DEVI(dip)->devi_nodeid);
4596 }
4597 
4598 int
4599 ddi_get_instance(dev_info_t *dip)
4600 {
4601 	return (DEVI(dip)->devi_instance);
4602 }
4603 
4604 struct dev_ops *
4605 ddi_get_driver(dev_info_t *dip)
4606 {
4607 	return (DEVI(dip)->devi_ops);
4608 }
4609 
4610 void
4611 ddi_set_driver(dev_info_t *dip, struct dev_ops *devo)
4612 {
4613 	DEVI(dip)->devi_ops = devo;
4614 }
4615 
4616 /*
4617  * ddi_set_driver_private/ddi_get_driver_private:
4618  * Get/set device driver private data in devinfo.
4619  */
4620 void
4621 ddi_set_driver_private(dev_info_t *dip, void *data)
4622 {
4623 	DEVI(dip)->devi_driver_data = data;
4624 }
4625 
4626 void *
4627 ddi_get_driver_private(dev_info_t *dip)
4628 {
4629 	return (DEVI(dip)->devi_driver_data);
4630 }
4631 
4632 /*
4633  * ddi_get_parent, ddi_get_child, ddi_get_next_sibling
4634  */
4635 
4636 dev_info_t *
4637 ddi_get_parent(dev_info_t *dip)
4638 {
4639 	return ((dev_info_t *)DEVI(dip)->devi_parent);
4640 }
4641 
4642 dev_info_t *
4643 ddi_get_child(dev_info_t *dip)
4644 {
4645 	return ((dev_info_t *)DEVI(dip)->devi_child);
4646 }
4647 
4648 dev_info_t *
4649 ddi_get_next_sibling(dev_info_t *dip)
4650 {
4651 	return ((dev_info_t *)DEVI(dip)->devi_sibling);
4652 }
4653 
4654 dev_info_t *
4655 ddi_get_next(dev_info_t *dip)
4656 {
4657 	return ((dev_info_t *)DEVI(dip)->devi_next);
4658 }
4659 
4660 void
4661 ddi_set_next(dev_info_t *dip, dev_info_t *nextdip)
4662 {
4663 	DEVI(dip)->devi_next = DEVI(nextdip);
4664 }
4665 
4666 /*
4667  * ddi_root_node:		Return root node of devinfo tree
4668  */
4669 
4670 dev_info_t *
4671 ddi_root_node(void)
4672 {
4673 	extern dev_info_t *top_devinfo;
4674 
4675 	return (top_devinfo);
4676 }
4677 
4678 /*
4679  * Miscellaneous functions:
4680  */
4681 
4682 /*
4683  * Implementation specific hooks
4684  */
4685 
4686 void
4687 ddi_report_dev(dev_info_t *d)
4688 {
4689 	char *b;
4690 
4691 	(void) ddi_ctlops(d, d, DDI_CTLOPS_REPORTDEV, (void *)0, (void *)0);
4692 
4693 	/*
4694 	 * If this devinfo node has cb_ops, it's implicitly accessible from
4695 	 * userland, so we print its full name together with the instance
4696 	 * number 'abbreviation' that the driver may use internally.
4697 	 */
4698 	if (DEVI(d)->devi_ops->devo_cb_ops != (struct cb_ops *)0 &&
4699 	    (b = kmem_zalloc(MAXPATHLEN, KM_NOSLEEP))) {
4700 		cmn_err(CE_CONT, "?%s%d is %s\n",
4701 		    ddi_driver_name(d), ddi_get_instance(d),
4702 		    ddi_pathname(d, b));
4703 		kmem_free(b, MAXPATHLEN);
4704 	}
4705 }
4706 
4707 /*
4708  * ddi_ctlops() is described in the assembler not to buy a new register
4709  * window when it's called and can reduce cost in climbing the device tree
4710  * without using the tail call optimization.
4711  */
4712 int
4713 ddi_dev_regsize(dev_info_t *dev, uint_t rnumber, off_t *result)
4714 {
4715 	int ret;
4716 
4717 	ret = ddi_ctlops(dev, dev, DDI_CTLOPS_REGSIZE,
4718 	    (void *)&rnumber, (void *)result);
4719 
4720 	return (ret == DDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE);
4721 }
4722 
4723 int
4724 ddi_dev_nregs(dev_info_t *dev, int *result)
4725 {
4726 	return (ddi_ctlops(dev, dev, DDI_CTLOPS_NREGS, 0, (void *)result));
4727 }
4728 
4729 int
4730 ddi_dev_is_sid(dev_info_t *d)
4731 {
4732 	return (ddi_ctlops(d, d, DDI_CTLOPS_SIDDEV, (void *)0, (void *)0));
4733 }
4734 
4735 int
4736 ddi_slaveonly(dev_info_t *d)
4737 {
4738 	return (ddi_ctlops(d, d, DDI_CTLOPS_SLAVEONLY, (void *)0, (void *)0));
4739 }
4740 
4741 int
4742 ddi_dev_affinity(dev_info_t *a, dev_info_t *b)
4743 {
4744 	return (ddi_ctlops(a, a, DDI_CTLOPS_AFFINITY, (void *)b, (void *)0));
4745 }
4746 
4747 int
4748 ddi_streams_driver(dev_info_t *dip)
4749 {
4750 	if (i_ddi_devi_attached(dip) &&
4751 	    (DEVI(dip)->devi_ops->devo_cb_ops != NULL) &&
4752 	    (DEVI(dip)->devi_ops->devo_cb_ops->cb_str != NULL))
4753 		return (DDI_SUCCESS);
4754 	return (DDI_FAILURE);
4755 }
4756 
4757 /*
4758  * callback free list
4759  */
4760 
4761 static int ncallbacks;
4762 static int nc_low = 170;
4763 static int nc_med = 512;
4764 static int nc_high = 2048;
4765 static struct ddi_callback *callbackq;
4766 static struct ddi_callback *callbackqfree;
4767 
4768 /*
4769  * set/run callback lists
4770  */
4771 struct	cbstats	{
4772 	kstat_named_t	cb_asked;
4773 	kstat_named_t	cb_new;
4774 	kstat_named_t	cb_run;
4775 	kstat_named_t	cb_delete;
4776 	kstat_named_t	cb_maxreq;
4777 	kstat_named_t	cb_maxlist;
4778 	kstat_named_t	cb_alloc;
4779 	kstat_named_t	cb_runouts;
4780 	kstat_named_t	cb_L2;
4781 	kstat_named_t	cb_grow;
4782 } cbstats = {
4783 	{"asked",	KSTAT_DATA_UINT32},
4784 	{"new",		KSTAT_DATA_UINT32},
4785 	{"run",		KSTAT_DATA_UINT32},
4786 	{"delete",	KSTAT_DATA_UINT32},
4787 	{"maxreq",	KSTAT_DATA_UINT32},
4788 	{"maxlist",	KSTAT_DATA_UINT32},
4789 	{"alloc",	KSTAT_DATA_UINT32},
4790 	{"runouts",	KSTAT_DATA_UINT32},
4791 	{"L2",		KSTAT_DATA_UINT32},
4792 	{"grow",	KSTAT_DATA_UINT32},
4793 };
4794 
4795 #define	nc_asked	cb_asked.value.ui32
4796 #define	nc_new		cb_new.value.ui32
4797 #define	nc_run		cb_run.value.ui32
4798 #define	nc_delete	cb_delete.value.ui32
4799 #define	nc_maxreq	cb_maxreq.value.ui32
4800 #define	nc_maxlist	cb_maxlist.value.ui32
4801 #define	nc_alloc	cb_alloc.value.ui32
4802 #define	nc_runouts	cb_runouts.value.ui32
4803 #define	nc_L2		cb_L2.value.ui32
4804 #define	nc_grow		cb_grow.value.ui32
4805 
4806 static kmutex_t ddi_callback_mutex;
4807 
4808 /*
4809  * callbacks are handled using a L1/L2 cache. The L1 cache
4810  * comes out of kmem_cache_alloc and can expand/shrink dynamically. If
4811  * we can't get callbacks from the L1 cache [because pageout is doing
4812  * I/O at the time freemem is 0], we allocate callbacks out of the
4813  * L2 cache. The L2 cache is static and depends on the memory size.
4814  * [We might also count the number of devices at probe time and
4815  * allocate one structure per device and adjust for deferred attach]
4816  */
4817 void
4818 impl_ddi_callback_init(void)
4819 {
4820 	int	i;
4821 	uint_t	physmegs;
4822 	kstat_t	*ksp;
4823 
4824 	physmegs = physmem >> (20 - PAGESHIFT);
4825 	if (physmegs < 48) {
4826 		ncallbacks = nc_low;
4827 	} else if (physmegs < 128) {
4828 		ncallbacks = nc_med;
4829 	} else {
4830 		ncallbacks = nc_high;
4831 	}
4832 
4833 	/*
4834 	 * init free list
4835 	 */
4836 	callbackq = kmem_zalloc(
4837 	    ncallbacks * sizeof (struct ddi_callback), KM_SLEEP);
4838 	for (i = 0; i < ncallbacks-1; i++)
4839 		callbackq[i].c_nfree = &callbackq[i+1];
4840 	callbackqfree = callbackq;
4841 
4842 	/* init kstats */
4843 	if (ksp = kstat_create("unix", 0, "cbstats", "misc", KSTAT_TYPE_NAMED,
4844 	    sizeof (cbstats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL)) {
4845 		ksp->ks_data = (void *) &cbstats;
4846 		kstat_install(ksp);
4847 	}
4848 
4849 }
4850 
4851 static void
4852 callback_insert(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid,
4853     int count)
4854 {
4855 	struct ddi_callback *list, *marker, *new;
4856 	size_t size = sizeof (struct ddi_callback);
4857 
4858 	list = marker = (struct ddi_callback *)*listid;
4859 	while (list != NULL) {
4860 		if (list->c_call == funcp && list->c_arg == arg) {
4861 			list->c_count += count;
4862 			return;
4863 		}
4864 		marker = list;
4865 		list = list->c_nlist;
4866 	}
4867 	new = kmem_alloc(size, KM_NOSLEEP);
4868 	if (new == NULL) {
4869 		new = callbackqfree;
4870 		if (new == NULL) {
4871 			new = kmem_alloc_tryhard(sizeof (struct ddi_callback),
4872 			    &size, KM_NOSLEEP | KM_PANIC);
4873 			cbstats.nc_grow++;
4874 		} else {
4875 			callbackqfree = new->c_nfree;
4876 			cbstats.nc_L2++;
4877 		}
4878 	}
4879 	if (marker != NULL) {
4880 		marker->c_nlist = new;
4881 	} else {
4882 		*listid = (uintptr_t)new;
4883 	}
4884 	new->c_size = size;
4885 	new->c_nlist = NULL;
4886 	new->c_call = funcp;
4887 	new->c_arg = arg;
4888 	new->c_count = count;
4889 	cbstats.nc_new++;
4890 	cbstats.nc_alloc++;
4891 	if (cbstats.nc_alloc > cbstats.nc_maxlist)
4892 		cbstats.nc_maxlist = cbstats.nc_alloc;
4893 }
4894 
4895 void
4896 ddi_set_callback(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid)
4897 {
4898 	mutex_enter(&ddi_callback_mutex);
4899 	cbstats.nc_asked++;
4900 	if ((cbstats.nc_asked - cbstats.nc_run) > cbstats.nc_maxreq)
4901 		cbstats.nc_maxreq = (cbstats.nc_asked - cbstats.nc_run);
4902 	(void) callback_insert(funcp, arg, listid, 1);
4903 	mutex_exit(&ddi_callback_mutex);
4904 }
4905 
4906 static void
4907 real_callback_run(void *Queue)
4908 {
4909 	int (*funcp)(caddr_t);
4910 	caddr_t arg;
4911 	int count, rval;
4912 	uintptr_t *listid;
4913 	struct ddi_callback *list, *marker;
4914 	int check_pending = 1;
4915 	int pending = 0;
4916 
4917 	do {
4918 		mutex_enter(&ddi_callback_mutex);
4919 		listid = Queue;
4920 		list = (struct ddi_callback *)*listid;
4921 		if (list == NULL) {
4922 			mutex_exit(&ddi_callback_mutex);
4923 			return;
4924 		}
4925 		if (check_pending) {
4926 			marker = list;
4927 			while (marker != NULL) {
4928 				pending += marker->c_count;
4929 				marker = marker->c_nlist;
4930 			}
4931 			check_pending = 0;
4932 		}
4933 		ASSERT(pending > 0);
4934 		ASSERT(list->c_count > 0);
4935 		funcp = list->c_call;
4936 		arg = list->c_arg;
4937 		count = list->c_count;
4938 		*(uintptr_t *)Queue = (uintptr_t)list->c_nlist;
4939 		if (list >= &callbackq[0] &&
4940 		    list <= &callbackq[ncallbacks-1]) {
4941 			list->c_nfree = callbackqfree;
4942 			callbackqfree = list;
4943 		} else
4944 			kmem_free(list, list->c_size);
4945 
4946 		cbstats.nc_delete++;
4947 		cbstats.nc_alloc--;
4948 		mutex_exit(&ddi_callback_mutex);
4949 
4950 		do {
4951 			if ((rval = (*funcp)(arg)) == 0) {
4952 				pending -= count;
4953 				mutex_enter(&ddi_callback_mutex);
4954 				(void) callback_insert(funcp, arg, listid,
4955 				    count);
4956 				cbstats.nc_runouts++;
4957 			} else {
4958 				pending--;
4959 				mutex_enter(&ddi_callback_mutex);
4960 				cbstats.nc_run++;
4961 			}
4962 			mutex_exit(&ddi_callback_mutex);
4963 		} while (rval != 0 && (--count > 0));
4964 	} while (pending > 0);
4965 }
4966 
4967 void
4968 ddi_run_callback(uintptr_t *listid)
4969 {
4970 	softcall(real_callback_run, listid);
4971 }
4972 
4973 /*
4974  * ddi_periodic_t
4975  * ddi_periodic_add(void (*func)(void *), void *arg, hrtime_t interval,
4976  *     int level)
4977  *
4978  * INTERFACE LEVEL
4979  *      Solaris DDI specific (Solaris DDI)
4980  *
4981  * PARAMETERS
4982  *      func: the callback function
4983  *
4984  *            The callback function will be invoked. The function is invoked
4985  *            in kernel context if the argument level passed is the zero.
4986  *            Otherwise it's invoked in interrupt context at the specified
4987  *            level.
4988  *
4989  *       arg: the argument passed to the callback function
4990  *
4991  *  interval: interval time
4992  *
4993  *    level : callback interrupt level
4994  *
4995  *            If the value is the zero, the callback function is invoked
4996  *            in kernel context. If the value is more than the zero, but
4997  *            less than or equal to ten, the callback function is invoked in
4998  *            interrupt context at the specified interrupt level, which may
4999  *            be used for real time applications.
5000  *
5001  *            This value must be in range of 0-10, which can be a numeric
5002  *            number or a pre-defined macro (DDI_IPL_0, ... , DDI_IPL_10).
5003  *
5004  * DESCRIPTION
5005  *      ddi_periodic_add(9F) schedules the specified function to be
5006  *      periodically invoked in the interval time.
5007  *
5008  *      As well as timeout(9F), the exact time interval over which the function
5009  *      takes effect cannot be guaranteed, but the value given is a close
5010  *      approximation.
5011  *
5012  *      Drivers waiting on behalf of processes with real-time constraints must
5013  *      pass non-zero value with the level argument to ddi_periodic_add(9F).
5014  *
5015  * RETURN VALUES
5016  *      ddi_periodic_add(9F) returns a non-zero opaque value (ddi_periodic_t),
5017  *      which must be used for ddi_periodic_delete(9F) to specify the request.
5018  *
5019  * CONTEXT
5020  *      ddi_periodic_add(9F) can be called in user or kernel context, but
5021  *      it cannot be called in interrupt context, which is different from
5022  *      timeout(9F).
5023  */
5024 ddi_periodic_t
5025 ddi_periodic_add(void (*func)(void *), void *arg, hrtime_t interval, int level)
5026 {
5027 	/*
5028 	 * Sanity check of the argument level.
5029 	 */
5030 	if (level < DDI_IPL_0 || level > DDI_IPL_10)
5031 		cmn_err(CE_PANIC,
5032 		    "ddi_periodic_add: invalid interrupt level (%d).", level);
5033 
5034 	/*
5035 	 * Sanity check of the context. ddi_periodic_add() cannot be
5036 	 * called in either interrupt context or high interrupt context.
5037 	 */
5038 	if (servicing_interrupt())
5039 		cmn_err(CE_PANIC,
5040 		    "ddi_periodic_add: called in (high) interrupt context.");
5041 
5042 	return ((ddi_periodic_t)i_timeout(func, arg, interval, level));
5043 }
5044 
5045 /*
5046  * void
5047  * ddi_periodic_delete(ddi_periodic_t req)
5048  *
5049  * INTERFACE LEVEL
5050  *     Solaris DDI specific (Solaris DDI)
5051  *
5052  * PARAMETERS
5053  *     req: ddi_periodic_t opaque value ddi_periodic_add(9F) returned
5054  *     previously.
5055  *
5056  * DESCRIPTION
5057  *     ddi_periodic_delete(9F) cancels the ddi_periodic_add(9F) request
5058  *     previously requested.
5059  *
5060  *     ddi_periodic_delete(9F) will not return until the pending request
5061  *     is canceled or executed.
5062  *
5063  *     As well as untimeout(9F), calling ddi_periodic_delete(9F) for a
5064  *     timeout which is either running on another CPU, or has already
5065  *     completed causes no problems. However, unlike untimeout(9F), there is
5066  *     no restrictions on the lock which might be held across the call to
5067  *     ddi_periodic_delete(9F).
5068  *
5069  *     Drivers should be structured with the understanding that the arrival of
5070  *     both an interrupt and a timeout for that interrupt can occasionally
5071  *     occur, in either order.
5072  *
5073  * CONTEXT
5074  *     ddi_periodic_delete(9F) can be called in user or kernel context, but
5075  *     it cannot be called in interrupt context, which is different from
5076  *     untimeout(9F).
5077  */
5078 void
5079 ddi_periodic_delete(ddi_periodic_t req)
5080 {
5081 	/*
5082 	 * Sanity check of the context. ddi_periodic_delete() cannot be
5083 	 * called in either interrupt context or high interrupt context.
5084 	 */
5085 	if (servicing_interrupt())
5086 		cmn_err(CE_PANIC,
5087 		    "ddi_periodic_delete: called in (high) interrupt context.");
5088 
5089 	i_untimeout((timeout_t)req);
5090 }
5091 
5092 dev_info_t *
5093 nodevinfo(dev_t dev, int otyp)
5094 {
5095 	_NOTE(ARGUNUSED(dev, otyp))
5096 	return ((dev_info_t *)0);
5097 }
5098 
5099 /*
5100  * A driver should support its own getinfo(9E) entry point. This function
5101  * is provided as a convenience for ON drivers that don't expect their
5102  * getinfo(9E) entry point to be called. A driver that uses this must not
5103  * call ddi_create_minor_node.
5104  */
5105 int
5106 ddi_no_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
5107 {
5108 	_NOTE(ARGUNUSED(dip, infocmd, arg, result))
5109 	return (DDI_FAILURE);
5110 }
5111 
5112 /*
5113  * A driver should support its own getinfo(9E) entry point. This function
5114  * is provided as a convenience for ON drivers that where the minor number
5115  * is the instance. Drivers that do not have 1:1 mapping must implement
5116  * their own getinfo(9E) function.
5117  */
5118 int
5119 ddi_getinfo_1to1(dev_info_t *dip, ddi_info_cmd_t infocmd,
5120     void *arg, void **result)
5121 {
5122 	_NOTE(ARGUNUSED(dip))
5123 	int	instance;
5124 
5125 	if (infocmd != DDI_INFO_DEVT2INSTANCE)
5126 		return (DDI_FAILURE);
5127 
5128 	instance = getminor((dev_t)(uintptr_t)arg);
5129 	*result = (void *)(uintptr_t)instance;
5130 	return (DDI_SUCCESS);
5131 }
5132 
5133 int
5134 ddifail(dev_info_t *devi, ddi_attach_cmd_t cmd)
5135 {
5136 	_NOTE(ARGUNUSED(devi, cmd))
5137 	return (DDI_FAILURE);
5138 }
5139 
5140 int
5141 ddi_no_dma_map(dev_info_t *dip, dev_info_t *rdip,
5142     struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep)
5143 {
5144 	_NOTE(ARGUNUSED(dip, rdip, dmareqp, handlep))
5145 	return (DDI_DMA_NOMAPPING);
5146 }
5147 
5148 int
5149 ddi_no_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
5150     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
5151 {
5152 	_NOTE(ARGUNUSED(dip, rdip, attr, waitfp, arg, handlep))
5153 	return (DDI_DMA_BADATTR);
5154 }
5155 
5156 int
5157 ddi_no_dma_freehdl(dev_info_t *dip, dev_info_t *rdip,
5158     ddi_dma_handle_t handle)
5159 {
5160 	_NOTE(ARGUNUSED(dip, rdip, handle))
5161 	return (DDI_FAILURE);
5162 }
5163 
5164 int
5165 ddi_no_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
5166     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
5167     ddi_dma_cookie_t *cp, uint_t *ccountp)
5168 {
5169 	_NOTE(ARGUNUSED(dip, rdip, handle, dmareq, cp, ccountp))
5170 	return (DDI_DMA_NOMAPPING);
5171 }
5172 
5173 int
5174 ddi_no_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
5175     ddi_dma_handle_t handle)
5176 {
5177 	_NOTE(ARGUNUSED(dip, rdip, handle))
5178 	return (DDI_FAILURE);
5179 }
5180 
5181 int
5182 ddi_no_dma_flush(dev_info_t *dip, dev_info_t *rdip,
5183     ddi_dma_handle_t handle, off_t off, size_t len,
5184     uint_t cache_flags)
5185 {
5186 	_NOTE(ARGUNUSED(dip, rdip, handle, off, len, cache_flags))
5187 	return (DDI_FAILURE);
5188 }
5189 
5190 int
5191 ddi_no_dma_win(dev_info_t *dip, dev_info_t *rdip,
5192     ddi_dma_handle_t handle, uint_t win, off_t *offp,
5193     size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
5194 {
5195 	_NOTE(ARGUNUSED(dip, rdip, handle, win, offp, lenp, cookiep, ccountp))
5196 	return (DDI_FAILURE);
5197 }
5198 
5199 int
5200 ddi_no_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
5201     ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
5202     off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
5203 {
5204 	_NOTE(ARGUNUSED(dip, rdip, handle, request, offp, lenp, objp, flags))
5205 	return (DDI_FAILURE);
5206 }
5207 
5208 void
5209 ddivoid(void)
5210 {}
5211 
5212 int
5213 nochpoll(dev_t dev, short events, int anyyet, short *reventsp,
5214     struct pollhead **pollhdrp)
5215 {
5216 	_NOTE(ARGUNUSED(dev, events, anyyet, reventsp, pollhdrp))
5217 	return (ENXIO);
5218 }
5219 
5220 cred_t *
5221 ddi_get_cred(void)
5222 {
5223 	return (CRED());
5224 }
5225 
5226 clock_t
5227 ddi_get_lbolt(void)
5228 {
5229 	return ((clock_t)lbolt_hybrid());
5230 }
5231 
5232 int64_t
5233 ddi_get_lbolt64(void)
5234 {
5235 	return (lbolt_hybrid());
5236 }
5237 
5238 time_t
5239 ddi_get_time(void)
5240 {
5241 	time_t	now;
5242 
5243 	if ((now = gethrestime_sec()) == 0) {
5244 		timestruc_t ts;
5245 		mutex_enter(&tod_lock);
5246 		ts = tod_get();
5247 		mutex_exit(&tod_lock);
5248 		return (ts.tv_sec);
5249 	} else {
5250 		return (now);
5251 	}
5252 }
5253 
5254 pid_t
5255 ddi_get_pid(void)
5256 {
5257 	return (ttoproc(curthread)->p_pid);
5258 }
5259 
5260 kt_did_t
5261 ddi_get_kt_did(void)
5262 {
5263 	return (curthread->t_did);
5264 }
5265 
5266 /*
5267  * This function returns B_TRUE if the caller can reasonably expect that a call
5268  * to cv_wait_sig(9F), cv_timedwait_sig(9F), or qwait_sig(9F) could be awakened
5269  * by user-level signal.  If it returns B_FALSE, then the caller should use
5270  * other means to make certain that the wait will not hang "forever."
5271  *
5272  * It does not check the signal mask, nor for reception of any particular
5273  * signal.
5274  *
5275  * Currently, a thread can receive a signal if it's not a kernel thread and it
5276  * is not in the middle of exit(2) tear-down.  Threads that are in that
5277  * tear-down effectively convert cv_wait_sig to cv_wait, cv_timedwait_sig to
5278  * cv_timedwait, and qwait_sig to qwait.
5279  */
5280 boolean_t
5281 ddi_can_receive_sig(void)
5282 {
5283 	proc_t *pp;
5284 
5285 	if (curthread->t_proc_flag & TP_LWPEXIT)
5286 		return (B_FALSE);
5287 	if ((pp = ttoproc(curthread)) == NULL)
5288 		return (B_FALSE);
5289 	return (pp->p_as != &kas);
5290 }
5291 
5292 /*
5293  * Swap bytes in 16-bit [half-]words
5294  */
5295 void
5296 swab(void *src, void *dst, size_t nbytes)
5297 {
5298 	uchar_t *pf = (uchar_t *)src;
5299 	uchar_t *pt = (uchar_t *)dst;
5300 	uchar_t tmp;
5301 	int nshorts;
5302 
5303 	nshorts = nbytes >> 1;
5304 
5305 	while (--nshorts >= 0) {
5306 		tmp = *pf++;
5307 		*pt++ = *pf++;
5308 		*pt++ = tmp;
5309 	}
5310 }
5311 
5312 static void
5313 ddi_append_minor_node(dev_info_t *ddip, struct ddi_minor_data *dmdp)
5314 {
5315 	int			circ;
5316 	struct ddi_minor_data	*dp;
5317 
5318 	ndi_devi_enter(ddip, &circ);
5319 	if ((dp = DEVI(ddip)->devi_minor) == (struct ddi_minor_data *)NULL) {
5320 		DEVI(ddip)->devi_minor = dmdp;
5321 	} else {
5322 		while (dp->next != (struct ddi_minor_data *)NULL)
5323 			dp = dp->next;
5324 		dp->next = dmdp;
5325 	}
5326 	ndi_devi_exit(ddip, circ);
5327 }
5328 
5329 static int
5330 i_log_devfs_minor_create(dev_info_t *dip, char *minor_name)
5331 {
5332 	int se_flag;
5333 	int kmem_flag;
5334 	int se_err;
5335 	char *pathname, *class_name;
5336 	sysevent_t *ev = NULL;
5337 	sysevent_id_t eid;
5338 	sysevent_value_t se_val;
5339 	sysevent_attr_list_t *ev_attr_list = NULL;
5340 
5341 	/* determine interrupt context */
5342 	se_flag = (servicing_interrupt()) ? SE_NOSLEEP : SE_SLEEP;
5343 	kmem_flag = (se_flag == SE_SLEEP) ? KM_SLEEP : KM_NOSLEEP;
5344 
5345 	i_ddi_di_cache_invalidate();
5346 
5347 #ifdef DEBUG
5348 	if ((se_flag == SE_NOSLEEP) && sunddi_debug) {
5349 		cmn_err(CE_CONT, "ddi_create_minor_node: called from "
5350 		    "interrupt level by driver %s",
5351 		    ddi_driver_name(dip));
5352 	}
5353 #endif /* DEBUG */
5354 
5355 	ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_CREATE, EP_DDI, se_flag);
5356 	if (ev == NULL) {
5357 		goto fail;
5358 	}
5359 
5360 	pathname = kmem_alloc(MAXPATHLEN, kmem_flag);
5361 	if (pathname == NULL) {
5362 		sysevent_free(ev);
5363 		goto fail;
5364 	}
5365 
5366 	(void) ddi_pathname(dip, pathname);
5367 	ASSERT(strlen(pathname));
5368 	se_val.value_type = SE_DATA_TYPE_STRING;
5369 	se_val.value.sv_string = pathname;
5370 	if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME,
5371 	    &se_val, se_flag) != 0) {
5372 		kmem_free(pathname, MAXPATHLEN);
5373 		sysevent_free(ev);
5374 		goto fail;
5375 	}
5376 	kmem_free(pathname, MAXPATHLEN);
5377 
5378 	/* add the device class attribute */
5379 	if ((class_name = i_ddi_devi_class(dip)) != NULL) {
5380 		se_val.value_type = SE_DATA_TYPE_STRING;
5381 		se_val.value.sv_string = class_name;
5382 		if (sysevent_add_attr(&ev_attr_list,
5383 		    DEVFS_DEVI_CLASS, &se_val, SE_SLEEP) != 0) {
5384 			sysevent_free_attr(ev_attr_list);
5385 			goto fail;
5386 		}
5387 	}
5388 
5389 	/*
5390 	 * allow for NULL minor names
5391 	 */
5392 	if (minor_name != NULL) {
5393 		se_val.value.sv_string = minor_name;
5394 		if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME,
5395 		    &se_val, se_flag) != 0) {
5396 			sysevent_free_attr(ev_attr_list);
5397 			sysevent_free(ev);
5398 			goto fail;
5399 		}
5400 	}
5401 
5402 	if (sysevent_attach_attributes(ev, ev_attr_list) != 0) {
5403 		sysevent_free_attr(ev_attr_list);
5404 		sysevent_free(ev);
5405 		goto fail;
5406 	}
5407 
5408 	if ((se_err = log_sysevent(ev, se_flag, &eid)) != 0) {
5409 		if (se_err == SE_NO_TRANSPORT) {
5410 			cmn_err(CE_WARN, "/devices or /dev may not be current "
5411 			    "for driver %s (%s). Run devfsadm -i %s",
5412 			    ddi_driver_name(dip), "syseventd not responding",
5413 			    ddi_driver_name(dip));
5414 		} else {
5415 			sysevent_free(ev);
5416 			goto fail;
5417 		}
5418 	}
5419 
5420 	sysevent_free(ev);
5421 	return (DDI_SUCCESS);
5422 fail:
5423 	cmn_err(CE_WARN, "/devices or /dev may not be current "
5424 	    "for driver %s. Run devfsadm -i %s",
5425 	    ddi_driver_name(dip), ddi_driver_name(dip));
5426 	return (DDI_SUCCESS);
5427 }
5428 
5429 /*
5430  * failing to remove a minor node is not of interest
5431  * therefore we do not generate an error message
5432  */
5433 static int
5434 i_log_devfs_minor_remove(dev_info_t *dip, char *minor_name)
5435 {
5436 	char *pathname, *class_name;
5437 	sysevent_t *ev;
5438 	sysevent_id_t eid;
5439 	sysevent_value_t se_val;
5440 	sysevent_attr_list_t *ev_attr_list = NULL;
5441 
5442 	/*
5443 	 * only log ddi_remove_minor_node() calls outside the scope
5444 	 * of attach/detach reconfigurations and when the dip is
5445 	 * still initialized.
5446 	 */
5447 	if (DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip) ||
5448 	    (i_ddi_node_state(dip) < DS_INITIALIZED)) {
5449 		return (DDI_SUCCESS);
5450 	}
5451 
5452 	i_ddi_di_cache_invalidate();
5453 
5454 	ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_REMOVE, EP_DDI, SE_SLEEP);
5455 	if (ev == NULL) {
5456 		return (DDI_SUCCESS);
5457 	}
5458 
5459 	pathname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
5460 	if (pathname == NULL) {
5461 		sysevent_free(ev);
5462 		return (DDI_SUCCESS);
5463 	}
5464 
5465 	(void) ddi_pathname(dip, pathname);
5466 	ASSERT(strlen(pathname));
5467 	se_val.value_type = SE_DATA_TYPE_STRING;
5468 	se_val.value.sv_string = pathname;
5469 	if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME,
5470 	    &se_val, SE_SLEEP) != 0) {
5471 		kmem_free(pathname, MAXPATHLEN);
5472 		sysevent_free(ev);
5473 		return (DDI_SUCCESS);
5474 	}
5475 
5476 	kmem_free(pathname, MAXPATHLEN);
5477 
5478 	/*
5479 	 * allow for NULL minor names
5480 	 */
5481 	if (minor_name != NULL) {
5482 		se_val.value.sv_string = minor_name;
5483 		if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME,
5484 		    &se_val, SE_SLEEP) != 0) {
5485 			sysevent_free_attr(ev_attr_list);
5486 			goto fail;
5487 		}
5488 	}
5489 
5490 	if ((class_name = i_ddi_devi_class(dip)) != NULL) {
5491 		/* add the device class, driver name and instance attributes */
5492 
5493 		se_val.value_type = SE_DATA_TYPE_STRING;
5494 		se_val.value.sv_string = class_name;
5495 		if (sysevent_add_attr(&ev_attr_list,
5496 		    DEVFS_DEVI_CLASS, &se_val, SE_SLEEP) != 0) {
5497 			sysevent_free_attr(ev_attr_list);
5498 			goto fail;
5499 		}
5500 
5501 		se_val.value_type = SE_DATA_TYPE_STRING;
5502 		se_val.value.sv_string = (char *)ddi_driver_name(dip);
5503 		if (sysevent_add_attr(&ev_attr_list,
5504 		    DEVFS_DRIVER_NAME, &se_val, SE_SLEEP) != 0) {
5505 			sysevent_free_attr(ev_attr_list);
5506 			goto fail;
5507 		}
5508 
5509 		se_val.value_type = SE_DATA_TYPE_INT32;
5510 		se_val.value.sv_int32 = ddi_get_instance(dip);
5511 		if (sysevent_add_attr(&ev_attr_list,
5512 		    DEVFS_INSTANCE, &se_val, SE_SLEEP) != 0) {
5513 			sysevent_free_attr(ev_attr_list);
5514 			goto fail;
5515 		}
5516 
5517 	}
5518 
5519 	if (sysevent_attach_attributes(ev, ev_attr_list) != 0) {
5520 		sysevent_free_attr(ev_attr_list);
5521 	} else {
5522 		(void) log_sysevent(ev, SE_SLEEP, &eid);
5523 	}
5524 fail:
5525 	sysevent_free(ev);
5526 	return (DDI_SUCCESS);
5527 }
5528 
5529 /*
5530  * Derive the device class of the node.
5531  * Device class names aren't defined yet. Until this is done we use
5532  * devfs event subclass names as device class names.
5533  */
5534 static int
5535 derive_devi_class(dev_info_t *dip, const char *node_type, int flag)
5536 {
5537 	int rv = DDI_SUCCESS;
5538 
5539 	if (i_ddi_devi_class(dip) == NULL) {
5540 		if (strncmp(node_type, DDI_NT_BLOCK,
5541 		    sizeof (DDI_NT_BLOCK) - 1) == 0 &&
5542 		    (node_type[sizeof (DDI_NT_BLOCK) - 1] == '\0' ||
5543 		    node_type[sizeof (DDI_NT_BLOCK) - 1] == ':') &&
5544 		    strcmp(node_type, DDI_NT_FD) != 0) {
5545 
5546 			rv = i_ddi_set_devi_class(dip, ESC_DISK, flag);
5547 
5548 		} else if (strncmp(node_type, DDI_NT_NET,
5549 		    sizeof (DDI_NT_NET) - 1) == 0 &&
5550 		    (node_type[sizeof (DDI_NT_NET) - 1] == '\0' ||
5551 		    node_type[sizeof (DDI_NT_NET) - 1] == ':')) {
5552 
5553 			rv = i_ddi_set_devi_class(dip, ESC_NETWORK, flag);
5554 
5555 		} else if (strncmp(node_type, DDI_NT_PRINTER,
5556 		    sizeof (DDI_NT_PRINTER) - 1) == 0 &&
5557 		    (node_type[sizeof (DDI_NT_PRINTER) - 1] == '\0' ||
5558 		    node_type[sizeof (DDI_NT_PRINTER) - 1] == ':')) {
5559 
5560 			rv = i_ddi_set_devi_class(dip, ESC_PRINTER, flag);
5561 
5562 		} else if (strncmp(node_type, DDI_PSEUDO,
5563 		    sizeof (DDI_PSEUDO) -1) == 0 &&
5564 		    (strncmp(ESC_LOFI, ddi_node_name(dip),
5565 		    sizeof (ESC_LOFI) -1) == 0)) {
5566 			rv = i_ddi_set_devi_class(dip, ESC_LOFI, flag);
5567 		}
5568 	}
5569 
5570 	return (rv);
5571 }
5572 
5573 /*
5574  * Check compliance with PSARC 2003/375:
5575  *
5576  * The name must contain only characters a-z, A-Z, 0-9 or _ and it must not
5577  * exceed IFNAMSIZ (16) characters in length.
5578  */
5579 static boolean_t
5580 verify_name(const char *name)
5581 {
5582 	size_t len = strlen(name);
5583 	const char *cp;
5584 
5585 	if (len == 0 || len > IFNAMSIZ)
5586 		return (B_FALSE);
5587 
5588 	for (cp = name; *cp != '\0'; cp++) {
5589 		if (!isalnum(*cp) && *cp != '_')
5590 			return (B_FALSE);
5591 	}
5592 
5593 	return (B_TRUE);
5594 }
5595 
5596 /*
5597  * ddi_create_minor_common:	Create a  ddi_minor_data structure and
5598  *				attach it to the given devinfo node.
5599  */
5600 
5601 static int
5602 ddi_create_minor_common(dev_info_t *dip, const char *name, int spec_type,
5603     minor_t minor_num, const char *node_type, int flag, ddi_minor_type mtype,
5604     const char *read_priv, const char *write_priv, mode_t priv_mode)
5605 {
5606 	struct ddi_minor_data *dmdp;
5607 	major_t major;
5608 
5609 	if (spec_type != S_IFCHR && spec_type != S_IFBLK)
5610 		return (DDI_FAILURE);
5611 
5612 	if (name == NULL)
5613 		return (DDI_FAILURE);
5614 
5615 	/*
5616 	 * Log a message if the minor number the driver is creating
5617 	 * is not expressible on the on-disk filesystem (currently
5618 	 * this is limited to 18 bits both by UFS). The device can
5619 	 * be opened via devfs, but not by device special files created
5620 	 * via mknod().
5621 	 */
5622 	if (minor_num > L_MAXMIN32) {
5623 		cmn_err(CE_WARN,
5624 		    "%s%d:%s minor 0x%x too big for 32-bit applications",
5625 		    ddi_driver_name(dip), ddi_get_instance(dip),
5626 		    name, minor_num);
5627 		return (DDI_FAILURE);
5628 	}
5629 
5630 	/* dip must be bound and attached */
5631 	major = ddi_driver_major(dip);
5632 	ASSERT(major != DDI_MAJOR_T_NONE);
5633 
5634 	/*
5635 	 * Default node_type to DDI_PSEUDO and issue notice in debug mode
5636 	 */
5637 	if (node_type == NULL) {
5638 		node_type = DDI_PSEUDO;
5639 		NDI_CONFIG_DEBUG((CE_NOTE, "!illegal node_type NULL for %s%d "
5640 		    " minor node %s; default to DDI_PSEUDO",
5641 		    ddi_driver_name(dip), ddi_get_instance(dip), name));
5642 	}
5643 
5644 	/*
5645 	 * If the driver is a network driver, ensure that the name falls within
5646 	 * the interface naming constraints specified by PSARC/2003/375.
5647 	 */
5648 	if (strcmp(node_type, DDI_NT_NET) == 0) {
5649 		if (!verify_name(name))
5650 			return (DDI_FAILURE);
5651 
5652 		if (mtype == DDM_MINOR) {
5653 			struct devnames *dnp = &devnamesp[major];
5654 
5655 			/* Mark driver as a network driver */
5656 			LOCK_DEV_OPS(&dnp->dn_lock);
5657 			dnp->dn_flags |= DN_NETWORK_DRIVER;
5658 
5659 			/*
5660 			 * If this minor node is created during the device
5661 			 * attachment, this is a physical network device.
5662 			 * Mark the driver as a physical network driver.
5663 			 */
5664 			if (DEVI_IS_ATTACHING(dip))
5665 				dnp->dn_flags |= DN_NETWORK_PHYSDRIVER;
5666 			UNLOCK_DEV_OPS(&dnp->dn_lock);
5667 		}
5668 	}
5669 
5670 	if (mtype == DDM_MINOR) {
5671 		if (derive_devi_class(dip,  node_type, KM_NOSLEEP) !=
5672 		    DDI_SUCCESS)
5673 			return (DDI_FAILURE);
5674 	}
5675 
5676 	/*
5677 	 * Take care of minor number information for the node.
5678 	 */
5679 
5680 	if ((dmdp = kmem_zalloc(sizeof (struct ddi_minor_data),
5681 	    KM_NOSLEEP)) == NULL) {
5682 		return (DDI_FAILURE);
5683 	}
5684 	if ((dmdp->ddm_name = i_ddi_strdup(name, KM_NOSLEEP)) == NULL) {
5685 		kmem_free(dmdp, sizeof (struct ddi_minor_data));
5686 		return (DDI_FAILURE);
5687 	}
5688 	dmdp->dip = dip;
5689 	dmdp->ddm_dev = makedevice(major, minor_num);
5690 	dmdp->ddm_spec_type = spec_type;
5691 	dmdp->ddm_node_type = node_type;
5692 	dmdp->type = mtype;
5693 	if (flag & CLONE_DEV) {
5694 		dmdp->type = DDM_ALIAS;
5695 		dmdp->ddm_dev = makedevice(ddi_driver_major(clone_dip), major);
5696 	}
5697 	if (flag & PRIVONLY_DEV) {
5698 		dmdp->ddm_flags |= DM_NO_FSPERM;
5699 	}
5700 	if (read_priv || write_priv) {
5701 		dmdp->ddm_node_priv =
5702 		    devpolicy_priv_by_name(read_priv, write_priv);
5703 	}
5704 	dmdp->ddm_priv_mode = priv_mode;
5705 
5706 	ddi_append_minor_node(dip, dmdp);
5707 
5708 	/*
5709 	 * only log ddi_create_minor_node() calls which occur
5710 	 * outside the scope of attach(9e)/detach(9e) reconfigurations
5711 	 */
5712 	if (!(DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip)) &&
5713 	    mtype != DDM_INTERNAL_PATH) {
5714 		(void) i_log_devfs_minor_create(dip, dmdp->ddm_name);
5715 	}
5716 
5717 	/*
5718 	 * Check if any dacf rules match the creation of this minor node
5719 	 */
5720 	dacfc_match_create_minor(name, node_type, dip, dmdp, flag);
5721 	return (DDI_SUCCESS);
5722 }
5723 
5724 int
5725 ddi_create_minor_node(dev_info_t *dip, const char *name, int spec_type,
5726     minor_t minor_num, const char *node_type, int flag)
5727 {
5728 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5729 	    node_type, flag, DDM_MINOR, NULL, NULL, 0));
5730 }
5731 
5732 int
5733 ddi_create_priv_minor_node(dev_info_t *dip, const char *name, int spec_type,
5734     minor_t minor_num, const char *node_type, int flag,
5735     const char *rdpriv, const char *wrpriv, mode_t priv_mode)
5736 {
5737 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5738 	    node_type, flag, DDM_MINOR, rdpriv, wrpriv, priv_mode));
5739 }
5740 
5741 int
5742 ddi_create_default_minor_node(dev_info_t *dip, const char *name, int spec_type,
5743     minor_t minor_num, const char *node_type, int flag)
5744 {
5745 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5746 	    node_type, flag, DDM_DEFAULT, NULL, NULL, 0));
5747 }
5748 
5749 /*
5750  * Internal (non-ddi) routine for drivers to export names known
5751  * to the kernel (especially ddi_pathname_to_dev_t and friends)
5752  * but not exported externally to /dev
5753  */
5754 int
5755 ddi_create_internal_pathname(dev_info_t *dip, char *name, int spec_type,
5756     minor_t minor_num)
5757 {
5758 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5759 	    "internal", 0, DDM_INTERNAL_PATH, NULL, NULL, 0));
5760 }
5761 
5762 void
5763 ddi_remove_minor_node(dev_info_t *dip, const char *name)
5764 {
5765 	int			circ;
5766 	struct ddi_minor_data	*dmdp, *dmdp1;
5767 	struct ddi_minor_data	**dmdp_prev;
5768 
5769 	ndi_devi_enter(dip, &circ);
5770 	dmdp_prev = &DEVI(dip)->devi_minor;
5771 	dmdp = DEVI(dip)->devi_minor;
5772 	while (dmdp != NULL) {
5773 		dmdp1 = dmdp->next;
5774 		if ((name == NULL || (dmdp->ddm_name != NULL &&
5775 		    strcmp(name, dmdp->ddm_name) == 0))) {
5776 			if (dmdp->ddm_name != NULL) {
5777 				if (dmdp->type != DDM_INTERNAL_PATH)
5778 					(void) i_log_devfs_minor_remove(dip,
5779 					    dmdp->ddm_name);
5780 				kmem_free(dmdp->ddm_name,
5781 				    strlen(dmdp->ddm_name) + 1);
5782 			}
5783 			/*
5784 			 * Release device privilege, if any.
5785 			 * Release dacf client data associated with this minor
5786 			 * node by storing NULL.
5787 			 */
5788 			if (dmdp->ddm_node_priv)
5789 				dpfree(dmdp->ddm_node_priv);
5790 			dacf_store_info((dacf_infohdl_t)dmdp, NULL);
5791 			kmem_free(dmdp, sizeof (struct ddi_minor_data));
5792 			*dmdp_prev = dmdp1;
5793 			/*
5794 			 * OK, we found it, so get out now -- if we drive on,
5795 			 * we will strcmp against garbage.  See 1139209.
5796 			 */
5797 			if (name != NULL)
5798 				break;
5799 		} else {
5800 			dmdp_prev = &dmdp->next;
5801 		}
5802 		dmdp = dmdp1;
5803 	}
5804 	ndi_devi_exit(dip, circ);
5805 }
5806 
5807 
5808 int
5809 ddi_in_panic()
5810 {
5811 	return (panicstr != NULL);
5812 }
5813 
5814 
5815 /*
5816  * Find first bit set in a mask (returned counting from 1 up)
5817  */
5818 
5819 int
5820 ddi_ffs(long mask)
5821 {
5822 	return (ffs(mask));
5823 }
5824 
5825 /*
5826  * Find last bit set. Take mask and clear
5827  * all but the most significant bit, and
5828  * then let ffs do the rest of the work.
5829  *
5830  * Algorithm courtesy of Steve Chessin.
5831  */
5832 
5833 int
5834 ddi_fls(long mask)
5835 {
5836 	while (mask) {
5837 		long nx;
5838 
5839 		if ((nx = (mask & (mask - 1))) == 0)
5840 			break;
5841 		mask = nx;
5842 	}
5843 	return (ffs(mask));
5844 }
5845 
5846 /*
5847  * The ddi_soft_state_* routines comprise generic storage management utilities
5848  * for driver soft state structures (in "the old days," this was done with
5849  * statically sized array - big systems and dynamic loading and unloading
5850  * make heap allocation more attractive).
5851  */
5852 
5853 /*
5854  * Allocate a set of pointers to 'n_items' objects of size 'size'
5855  * bytes.  Each pointer is initialized to nil.
5856  *
5857  * The 'size' and 'n_items' values are stashed in the opaque
5858  * handle returned to the caller.
5859  *
5860  * This implementation interprets 'set of pointers' to mean 'array
5861  * of pointers' but note that nothing in the interface definition
5862  * precludes an implementation that uses, for example, a linked list.
5863  * However there should be a small efficiency gain from using an array
5864  * at lookup time.
5865  *
5866  * NOTE	As an optimization, we make our growable array allocations in
5867  *	powers of two (bytes), since that's how much kmem_alloc (currently)
5868  *	gives us anyway.  It should save us some free/realloc's ..
5869  *
5870  *	As a further optimization, we make the growable array start out
5871  *	with MIN_N_ITEMS in it.
5872  */
5873 
5874 #define	MIN_N_ITEMS	8	/* 8 void *'s == 32 bytes */
5875 
5876 int
5877 ddi_soft_state_init(void **state_p, size_t size, size_t n_items)
5878 {
5879 	i_ddi_soft_state	*ss;
5880 
5881 	if (state_p == NULL || size == 0)
5882 		return (EINVAL);
5883 
5884 	ss = kmem_zalloc(sizeof (*ss), KM_SLEEP);
5885 	mutex_init(&ss->lock, NULL, MUTEX_DRIVER, NULL);
5886 	ss->size = size;
5887 
5888 	if (n_items < MIN_N_ITEMS)
5889 		ss->n_items = MIN_N_ITEMS;
5890 	else {
5891 		int bitlog;
5892 
5893 		if ((bitlog = ddi_fls(n_items)) == ddi_ffs(n_items))
5894 			bitlog--;
5895 		ss->n_items = 1 << bitlog;
5896 	}
5897 
5898 	ASSERT(ss->n_items >= n_items);
5899 
5900 	ss->array = kmem_zalloc(ss->n_items * sizeof (void *), KM_SLEEP);
5901 
5902 	*state_p = ss;
5903 	return (0);
5904 }
5905 
5906 /*
5907  * Allocate a state structure of size 'size' to be associated
5908  * with item 'item'.
5909  *
5910  * In this implementation, the array is extended to
5911  * allow the requested offset, if needed.
5912  */
5913 int
5914 ddi_soft_state_zalloc(void *state, int item)
5915 {
5916 	i_ddi_soft_state	*ss = (i_ddi_soft_state *)state;
5917 	void			**array;
5918 	void			*new_element;
5919 
5920 	if ((state == NULL) || (item < 0))
5921 		return (DDI_FAILURE);
5922 
5923 	mutex_enter(&ss->lock);
5924 	if (ss->size == 0) {
5925 		mutex_exit(&ss->lock);
5926 		cmn_err(CE_WARN, "ddi_soft_state_zalloc: bad handle: %s",
5927 		    mod_containing_pc(caller()));
5928 		return (DDI_FAILURE);
5929 	}
5930 
5931 	array = ss->array;	/* NULL if ss->n_items == 0 */
5932 	ASSERT(ss->n_items != 0 && array != NULL);
5933 
5934 	/*
5935 	 * refuse to tread on an existing element
5936 	 */
5937 	if (item < ss->n_items && array[item] != NULL) {
5938 		mutex_exit(&ss->lock);
5939 		return (DDI_FAILURE);
5940 	}
5941 
5942 	/*
5943 	 * Allocate a new element to plug in
5944 	 */
5945 	new_element = kmem_zalloc(ss->size, KM_SLEEP);
5946 
5947 	/*
5948 	 * Check if the array is big enough, if not, grow it.
5949 	 */
5950 	if (item >= ss->n_items) {
5951 		void			**new_array;
5952 		size_t			new_n_items;
5953 		struct i_ddi_soft_state	*dirty;
5954 
5955 		/*
5956 		 * Allocate a new array of the right length, copy
5957 		 * all the old pointers to the new array, then
5958 		 * if it exists at all, put the old array on the
5959 		 * dirty list.
5960 		 *
5961 		 * Note that we can't kmem_free() the old array.
5962 		 *
5963 		 * Why -- well the 'get' operation is 'mutex-free', so we
5964 		 * can't easily catch a suspended thread that is just about
5965 		 * to dereference the array we just grew out of.  So we
5966 		 * cons up a header and put it on a list of 'dirty'
5967 		 * pointer arrays.  (Dirty in the sense that there may
5968 		 * be suspended threads somewhere that are in the middle
5969 		 * of referencing them).  Fortunately, we -can- garbage
5970 		 * collect it all at ddi_soft_state_fini time.
5971 		 */
5972 		new_n_items = ss->n_items;
5973 		while (new_n_items < (1 + item))
5974 			new_n_items <<= 1;	/* double array size .. */
5975 
5976 		ASSERT(new_n_items >= (1 + item));	/* sanity check! */
5977 
5978 		new_array = kmem_zalloc(new_n_items * sizeof (void *),
5979 		    KM_SLEEP);
5980 		/*
5981 		 * Copy the pointers into the new array
5982 		 */
5983 		bcopy(array, new_array, ss->n_items * sizeof (void *));
5984 
5985 		/*
5986 		 * Save the old array on the dirty list
5987 		 */
5988 		dirty = kmem_zalloc(sizeof (*dirty), KM_SLEEP);
5989 		dirty->array = ss->array;
5990 		dirty->n_items = ss->n_items;
5991 		dirty->next = ss->next;
5992 		ss->next = dirty;
5993 
5994 		ss->array = (array = new_array);
5995 		ss->n_items = new_n_items;
5996 	}
5997 
5998 	ASSERT(array != NULL && item < ss->n_items && array[item] == NULL);
5999 
6000 	array[item] = new_element;
6001 
6002 	mutex_exit(&ss->lock);
6003 	return (DDI_SUCCESS);
6004 }
6005 
6006 /*
6007  * Fetch a pointer to the allocated soft state structure.
6008  *
6009  * This is designed to be cheap.
6010  *
6011  * There's an argument that there should be more checking for
6012  * nil pointers and out of bounds on the array.. but we do a lot
6013  * of that in the alloc/free routines.
6014  *
6015  * An array has the convenience that we don't need to lock read-access
6016  * to it c.f. a linked list.  However our "expanding array" strategy
6017  * means that we should hold a readers lock on the i_ddi_soft_state
6018  * structure.
6019  *
6020  * However, from a performance viewpoint, we need to do it without
6021  * any locks at all -- this also makes it a leaf routine.  The algorithm
6022  * is 'lock-free' because we only discard the pointer arrays at
6023  * ddi_soft_state_fini() time.
6024  */
6025 void *
6026 ddi_get_soft_state(void *state, int item)
6027 {
6028 	i_ddi_soft_state	*ss = (i_ddi_soft_state *)state;
6029 
6030 	ASSERT((ss != NULL) && (item >= 0));
6031 
6032 	if (item < ss->n_items && ss->array != NULL)
6033 		return (ss->array[item]);
6034 	return (NULL);
6035 }
6036 
6037 /*
6038  * Free the state structure corresponding to 'item.'   Freeing an
6039  * element that has either gone or was never allocated is not
6040  * considered an error.  Note that we free the state structure, but
6041  * we don't shrink our pointer array, or discard 'dirty' arrays,
6042  * since even a few pointers don't really waste too much memory.
6043  *
6044  * Passing an item number that is out of bounds, or a null pointer will
6045  * provoke an error message.
6046  */
6047 void
6048 ddi_soft_state_free(void *state, int item)
6049 {
6050 	i_ddi_soft_state	*ss = (i_ddi_soft_state *)state;
6051 	void			**array;
6052 	void			*element;
6053 	static char		msg[] = "ddi_soft_state_free:";
6054 
6055 	if (ss == NULL) {
6056 		cmn_err(CE_WARN, "%s null handle: %s",
6057 		    msg, mod_containing_pc(caller()));
6058 		return;
6059 	}
6060 
6061 	element = NULL;
6062 
6063 	mutex_enter(&ss->lock);
6064 
6065 	if ((array = ss->array) == NULL || ss->size == 0) {
6066 		cmn_err(CE_WARN, "%s bad handle: %s",
6067 		    msg, mod_containing_pc(caller()));
6068 	} else if (item < 0 || item >= ss->n_items) {
6069 		cmn_err(CE_WARN, "%s item %d not in range [0..%lu]: %s",
6070 		    msg, item, ss->n_items - 1, mod_containing_pc(caller()));
6071 	} else if (array[item] != NULL) {
6072 		element = array[item];
6073 		array[item] = NULL;
6074 	}
6075 
6076 	mutex_exit(&ss->lock);
6077 
6078 	if (element)
6079 		kmem_free(element, ss->size);
6080 }
6081 
6082 /*
6083  * Free the entire set of pointers, and any
6084  * soft state structures contained therein.
6085  *
6086  * Note that we don't grab the ss->lock mutex, even though
6087  * we're inspecting the various fields of the data structure.
6088  *
6089  * There is an implicit assumption that this routine will
6090  * never run concurrently with any of the above on this
6091  * particular state structure i.e. by the time the driver
6092  * calls this routine, there should be no other threads
6093  * running in the driver.
6094  */
6095 void
6096 ddi_soft_state_fini(void **state_p)
6097 {
6098 	i_ddi_soft_state	*ss, *dirty;
6099 	int			item;
6100 	static char		msg[] = "ddi_soft_state_fini:";
6101 
6102 	if (state_p == NULL ||
6103 	    (ss = (i_ddi_soft_state *)(*state_p)) == NULL) {
6104 		cmn_err(CE_WARN, "%s null handle: %s",
6105 		    msg, mod_containing_pc(caller()));
6106 		return;
6107 	}
6108 
6109 	if (ss->size == 0) {
6110 		cmn_err(CE_WARN, "%s bad handle: %s",
6111 		    msg, mod_containing_pc(caller()));
6112 		return;
6113 	}
6114 
6115 	if (ss->n_items > 0) {
6116 		for (item = 0; item < ss->n_items; item++)
6117 			ddi_soft_state_free(ss, item);
6118 		kmem_free(ss->array, ss->n_items * sizeof (void *));
6119 	}
6120 
6121 	/*
6122 	 * Now delete any dirty arrays from previous 'grow' operations
6123 	 */
6124 	for (dirty = ss->next; dirty; dirty = ss->next) {
6125 		ss->next = dirty->next;
6126 		kmem_free(dirty->array, dirty->n_items * sizeof (void *));
6127 		kmem_free(dirty, sizeof (*dirty));
6128 	}
6129 
6130 	mutex_destroy(&ss->lock);
6131 	kmem_free(ss, sizeof (*ss));
6132 
6133 	*state_p = NULL;
6134 }
6135 
6136 #define	SS_N_ITEMS_PER_HASH	16
6137 #define	SS_MIN_HASH_SZ		16
6138 #define	SS_MAX_HASH_SZ		4096
6139 
6140 int
6141 ddi_soft_state_bystr_init(ddi_soft_state_bystr **state_p, size_t size,
6142     int n_items)
6143 {
6144 	i_ddi_soft_state_bystr	*sss;
6145 	int			hash_sz;
6146 
6147 	ASSERT(state_p && size && n_items);
6148 	if ((state_p == NULL) || (size == 0) || (n_items == 0))
6149 		return (EINVAL);
6150 
6151 	/* current implementation is based on hash, convert n_items to hash */
6152 	hash_sz = n_items / SS_N_ITEMS_PER_HASH;
6153 	if (hash_sz < SS_MIN_HASH_SZ)
6154 		hash_sz = SS_MIN_HASH_SZ;
6155 	else if (hash_sz > SS_MAX_HASH_SZ)
6156 		hash_sz = SS_MAX_HASH_SZ;
6157 
6158 	/* allocate soft_state pool */
6159 	sss = kmem_zalloc(sizeof (*sss), KM_SLEEP);
6160 	sss->ss_size = size;
6161 	sss->ss_mod_hash = mod_hash_create_strhash("soft_state_bystr",
6162 	    hash_sz, mod_hash_null_valdtor);
6163 	*state_p = (ddi_soft_state_bystr *)sss;
6164 	return (0);
6165 }
6166 
6167 int
6168 ddi_soft_state_bystr_zalloc(ddi_soft_state_bystr *state, const char *str)
6169 {
6170 	i_ddi_soft_state_bystr	*sss = (i_ddi_soft_state_bystr *)state;
6171 	void			*sso;
6172 	char			*dup_str;
6173 
6174 	ASSERT(sss && str && sss->ss_mod_hash);
6175 	if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL))
6176 		return (DDI_FAILURE);
6177 	sso = kmem_zalloc(sss->ss_size, KM_SLEEP);
6178 	dup_str = i_ddi_strdup((char *)str, KM_SLEEP);
6179 	if (mod_hash_insert(sss->ss_mod_hash,
6180 	    (mod_hash_key_t)dup_str, (mod_hash_val_t)sso) == 0)
6181 		return (DDI_SUCCESS);
6182 
6183 	/*
6184 	 * The only error from an strhash insert is caused by a duplicate key.
6185 	 * We refuse to tread on an existing elements, so free and fail.
6186 	 */
6187 	kmem_free(dup_str, strlen(dup_str) + 1);
6188 	kmem_free(sso, sss->ss_size);
6189 	return (DDI_FAILURE);
6190 }
6191 
6192 void *
6193 ddi_soft_state_bystr_get(ddi_soft_state_bystr *state, const char *str)
6194 {
6195 	i_ddi_soft_state_bystr	*sss = (i_ddi_soft_state_bystr *)state;
6196 	void			*sso;
6197 
6198 	ASSERT(sss && str && sss->ss_mod_hash);
6199 	if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL))
6200 		return (NULL);
6201 
6202 	if (mod_hash_find(sss->ss_mod_hash,
6203 	    (mod_hash_key_t)str, (mod_hash_val_t *)&sso) == 0)
6204 		return (sso);
6205 	return (NULL);
6206 }
6207 
6208 void
6209 ddi_soft_state_bystr_free(ddi_soft_state_bystr *state, const char *str)
6210 {
6211 	i_ddi_soft_state_bystr	*sss = (i_ddi_soft_state_bystr *)state;
6212 	void			*sso;
6213 
6214 	ASSERT(sss && str && sss->ss_mod_hash);
6215 	if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL))
6216 		return;
6217 
6218 	(void) mod_hash_remove(sss->ss_mod_hash,
6219 	    (mod_hash_key_t)str, (mod_hash_val_t *)&sso);
6220 	kmem_free(sso, sss->ss_size);
6221 }
6222 
6223 void
6224 ddi_soft_state_bystr_fini(ddi_soft_state_bystr **state_p)
6225 {
6226 	i_ddi_soft_state_bystr	*sss;
6227 
6228 	ASSERT(state_p);
6229 	if (state_p == NULL)
6230 		return;
6231 
6232 	sss = (i_ddi_soft_state_bystr *)(*state_p);
6233 	if (sss == NULL)
6234 		return;
6235 
6236 	ASSERT(sss->ss_mod_hash);
6237 	if (sss->ss_mod_hash) {
6238 		mod_hash_destroy_strhash(sss->ss_mod_hash);
6239 		sss->ss_mod_hash = NULL;
6240 	}
6241 
6242 	kmem_free(sss, sizeof (*sss));
6243 	*state_p = NULL;
6244 }
6245 
6246 /*
6247  * The ddi_strid_* routines provide string-to-index management utilities.
6248  */
6249 /* allocate and initialize an strid set */
6250 int
6251 ddi_strid_init(ddi_strid **strid_p, int n_items)
6252 {
6253 	i_ddi_strid	*ss;
6254 	int		hash_sz;
6255 
6256 	if (strid_p == NULL)
6257 		return (DDI_FAILURE);
6258 
6259 	/* current implementation is based on hash, convert n_items to hash */
6260 	hash_sz = n_items / SS_N_ITEMS_PER_HASH;
6261 	if (hash_sz < SS_MIN_HASH_SZ)
6262 		hash_sz = SS_MIN_HASH_SZ;
6263 	else if (hash_sz > SS_MAX_HASH_SZ)
6264 		hash_sz = SS_MAX_HASH_SZ;
6265 
6266 	ss = kmem_alloc(sizeof (*ss), KM_SLEEP);
6267 	ss->strid_chunksz = n_items;
6268 	ss->strid_spacesz = n_items;
6269 	ss->strid_space = id_space_create("strid", 1, n_items);
6270 	ss->strid_bystr = mod_hash_create_strhash("strid_bystr", hash_sz,
6271 	    mod_hash_null_valdtor);
6272 	ss->strid_byid = mod_hash_create_idhash("strid_byid", hash_sz,
6273 	    mod_hash_null_valdtor);
6274 	*strid_p = (ddi_strid *)ss;
6275 	return (DDI_SUCCESS);
6276 }
6277 
6278 /* allocate an id mapping within the specified set for str, return id */
6279 static id_t
6280 i_ddi_strid_alloc(ddi_strid *strid, char *str)
6281 {
6282 	i_ddi_strid	*ss = (i_ddi_strid *)strid;
6283 	id_t		id;
6284 	char		*s;
6285 
6286 	ASSERT(ss && str);
6287 	if ((ss == NULL) || (str == NULL))
6288 		return (0);
6289 
6290 	/*
6291 	 * Allocate an id using VM_FIRSTFIT in order to keep allocated id
6292 	 * range as compressed as possible.  This is important to minimize
6293 	 * the amount of space used when the id is used as a ddi_soft_state
6294 	 * index by the caller.
6295 	 *
6296 	 * If the id list is exhausted, increase the size of the list
6297 	 * by the chuck size specified in ddi_strid_init and reattempt
6298 	 * the allocation
6299 	 */
6300 	if ((id = id_allocff_nosleep(ss->strid_space)) == (id_t)-1) {
6301 		id_space_extend(ss->strid_space, ss->strid_spacesz,
6302 		    ss->strid_spacesz + ss->strid_chunksz);
6303 		ss->strid_spacesz += ss->strid_chunksz;
6304 		if ((id = id_allocff_nosleep(ss->strid_space)) == (id_t)-1)
6305 			return (0);
6306 	}
6307 
6308 	/*
6309 	 * NOTE: since we create and destroy in unison we can save space by
6310 	 * using bystr key as the byid value.  This means destroy must occur
6311 	 * in (byid, bystr) order.
6312 	 */
6313 	s = i_ddi_strdup(str, KM_SLEEP);
6314 	if (mod_hash_insert(ss->strid_bystr, (mod_hash_key_t)s,
6315 	    (mod_hash_val_t)(intptr_t)id) != 0) {
6316 		ddi_strid_free(strid, id);
6317 		return (0);
6318 	}
6319 	if (mod_hash_insert(ss->strid_byid, (mod_hash_key_t)(intptr_t)id,
6320 	    (mod_hash_val_t)s) != 0) {
6321 		ddi_strid_free(strid, id);
6322 		return (0);
6323 	}
6324 
6325 	/* NOTE: s if freed on mod_hash_destroy by mod_hash_strval_dtor */
6326 	return (id);
6327 }
6328 
6329 /* allocate an id mapping within the specified set for str, return id */
6330 id_t
6331 ddi_strid_alloc(ddi_strid *strid, char *str)
6332 {
6333 	return (i_ddi_strid_alloc(strid, str));
6334 }
6335 
6336 /* return the id within the specified strid given the str */
6337 id_t
6338 ddi_strid_str2id(ddi_strid *strid, char *str)
6339 {
6340 	i_ddi_strid	*ss = (i_ddi_strid *)strid;
6341 	id_t		id = 0;
6342 	mod_hash_val_t	hv;
6343 
6344 	ASSERT(ss && str);
6345 	if (ss && str && (mod_hash_find(ss->strid_bystr,
6346 	    (mod_hash_key_t)str, &hv) == 0))
6347 		id = (int)(intptr_t)hv;
6348 	return (id);
6349 }
6350 
6351 /* return str within the specified strid given the id */
6352 char *
6353 ddi_strid_id2str(ddi_strid *strid, id_t id)
6354 {
6355 	i_ddi_strid	*ss = (i_ddi_strid *)strid;
6356 	char		*str = NULL;
6357 	mod_hash_val_t	hv;
6358 
6359 	ASSERT(ss && id > 0);
6360 	if (ss && (id > 0) && (mod_hash_find(ss->strid_byid,
6361 	    (mod_hash_key_t)(uintptr_t)id, &hv) == 0))
6362 		str = (char *)hv;
6363 	return (str);
6364 }
6365 
6366 /* free the id mapping within the specified strid */
6367 void
6368 ddi_strid_free(ddi_strid *strid, id_t id)
6369 {
6370 	i_ddi_strid	*ss = (i_ddi_strid *)strid;
6371 	char		*str;
6372 
6373 	ASSERT(ss && id > 0);
6374 	if ((ss == NULL) || (id <= 0))
6375 		return;
6376 
6377 	/* bystr key is byid value: destroy order must be (byid, bystr) */
6378 	str = ddi_strid_id2str(strid, id);
6379 	(void) mod_hash_destroy(ss->strid_byid, (mod_hash_key_t)(uintptr_t)id);
6380 	id_free(ss->strid_space, id);
6381 
6382 	if (str)
6383 		(void) mod_hash_destroy(ss->strid_bystr, (mod_hash_key_t)str);
6384 }
6385 
6386 /* destroy the strid set */
6387 void
6388 ddi_strid_fini(ddi_strid **strid_p)
6389 {
6390 	i_ddi_strid	*ss;
6391 
6392 	ASSERT(strid_p);
6393 	if (strid_p == NULL)
6394 		return;
6395 
6396 	ss = (i_ddi_strid *)(*strid_p);
6397 	if (ss == NULL)
6398 		return;
6399 
6400 	/* bystr key is byid value: destroy order must be (byid, bystr) */
6401 	if (ss->strid_byid)
6402 		mod_hash_destroy_hash(ss->strid_byid);
6403 	if (ss->strid_byid)
6404 		mod_hash_destroy_hash(ss->strid_bystr);
6405 	if (ss->strid_space)
6406 		id_space_destroy(ss->strid_space);
6407 	kmem_free(ss, sizeof (*ss));
6408 	*strid_p = NULL;
6409 }
6410 
6411 /*
6412  * This sets the devi_addr entry in the dev_info structure 'dip' to 'name'.
6413  * Storage is double buffered to prevent updates during devi_addr use -
6414  * double buffering is adaquate for reliable ddi_deviname() consumption.
6415  * The double buffer is not freed until dev_info structure destruction
6416  * (by i_ddi_free_node).
6417  */
6418 void
6419 ddi_set_name_addr(dev_info_t *dip, char *name)
6420 {
6421 	char	*buf = DEVI(dip)->devi_addr_buf;
6422 	char	*newaddr;
6423 
6424 	if (buf == NULL) {
6425 		buf = kmem_zalloc(2 * MAXNAMELEN, KM_SLEEP);
6426 		DEVI(dip)->devi_addr_buf = buf;
6427 	}
6428 
6429 	if (name) {
6430 		ASSERT(strlen(name) < MAXNAMELEN);
6431 		newaddr = (DEVI(dip)->devi_addr == buf) ?
6432 		    (buf + MAXNAMELEN) : buf;
6433 		(void) strlcpy(newaddr, name, MAXNAMELEN);
6434 	} else
6435 		newaddr = NULL;
6436 
6437 	DEVI(dip)->devi_addr = newaddr;
6438 }
6439 
6440 char *
6441 ddi_get_name_addr(dev_info_t *dip)
6442 {
6443 	return (DEVI(dip)->devi_addr);
6444 }
6445 
6446 void
6447 ddi_set_parent_data(dev_info_t *dip, void *pd)
6448 {
6449 	DEVI(dip)->devi_parent_data = pd;
6450 }
6451 
6452 void *
6453 ddi_get_parent_data(dev_info_t *dip)
6454 {
6455 	return (DEVI(dip)->devi_parent_data);
6456 }
6457 
6458 /*
6459  * ddi_name_to_major: returns the major number of a named module,
6460  * derived from the current driver alias binding.
6461  *
6462  * Caveat: drivers should avoid the use of this function, in particular
6463  * together with ddi_get_name/ddi_binding name, as per
6464  *	major = ddi_name_to_major(ddi_get_name(devi));
6465  * ddi_name_to_major() relies on the state of the device/alias binding,
6466  * which can and does change dynamically as aliases are administered
6467  * over time.  An attached device instance cannot rely on the major
6468  * number returned by ddi_name_to_major() to match its own major number.
6469  *
6470  * For driver use, ddi_driver_major() reliably returns the major number
6471  * for the module to which the device was bound at attach time over
6472  * the life of the instance.
6473  *	major = ddi_driver_major(dev_info_t *)
6474  */
6475 major_t
6476 ddi_name_to_major(char *name)
6477 {
6478 	return (mod_name_to_major(name));
6479 }
6480 
6481 /*
6482  * ddi_major_to_name: Returns the module name bound to a major number.
6483  */
6484 char *
6485 ddi_major_to_name(major_t major)
6486 {
6487 	return (mod_major_to_name(major));
6488 }
6489 
6490 /*
6491  * Return the name of the devinfo node pointed at by 'dip' in the buffer
6492  * pointed at by 'name.'  A devinfo node is named as a result of calling
6493  * ddi_initchild().
6494  *
6495  * Note: the driver must be held before calling this function!
6496  */
6497 char *
6498 ddi_deviname(dev_info_t *dip, char *name)
6499 {
6500 	char *addrname;
6501 	char none = '\0';
6502 
6503 	if (dip == ddi_root_node()) {
6504 		*name = '\0';
6505 		return (name);
6506 	}
6507 
6508 	if (i_ddi_node_state(dip) < DS_BOUND) {
6509 		addrname = &none;
6510 	} else {
6511 		/*
6512 		 * Use ddi_get_name_addr() without checking state so we get
6513 		 * a unit-address if we are called after ddi_set_name_addr()
6514 		 * by nexus DDI_CTL_INITCHILD code, but before completing
6515 		 * node promotion to DS_INITIALIZED.  We currently have
6516 		 * two situations where we are called in this state:
6517 		 *   o  For framework processing of a path-oriented alias.
6518 		 *   o  If a SCSA nexus driver calls ddi_devid_register()
6519 		 *	from it's tran_tgt_init(9E) implementation.
6520 		 */
6521 		addrname = ddi_get_name_addr(dip);
6522 		if (addrname == NULL)
6523 			addrname = &none;
6524 	}
6525 
6526 	if (*addrname == '\0') {
6527 		(void) sprintf(name, "/%s", ddi_node_name(dip));
6528 	} else {
6529 		(void) sprintf(name, "/%s@%s", ddi_node_name(dip), addrname);
6530 	}
6531 
6532 	return (name);
6533 }
6534 
6535 /*
6536  * Spits out the name of device node, typically name@addr, for a given node,
6537  * using the driver name, not the nodename.
6538  *
6539  * Used by match_parent. Not to be used elsewhere.
6540  */
6541 char *
6542 i_ddi_parname(dev_info_t *dip, char *name)
6543 {
6544 	char *addrname;
6545 
6546 	if (dip == ddi_root_node()) {
6547 		*name = '\0';
6548 		return (name);
6549 	}
6550 
6551 	ASSERT(i_ddi_node_state(dip) >= DS_INITIALIZED);
6552 
6553 	if (*(addrname = ddi_get_name_addr(dip)) == '\0')
6554 		(void) sprintf(name, "%s", ddi_binding_name(dip));
6555 	else
6556 		(void) sprintf(name, "%s@%s", ddi_binding_name(dip), addrname);
6557 	return (name);
6558 }
6559 
6560 static char *
6561 pathname_work(dev_info_t *dip, char *path)
6562 {
6563 	char *bp;
6564 
6565 	if (dip == ddi_root_node()) {
6566 		*path = '\0';
6567 		return (path);
6568 	}
6569 	(void) pathname_work(ddi_get_parent(dip), path);
6570 	bp = path + strlen(path);
6571 	(void) ddi_deviname(dip, bp);
6572 	return (path);
6573 }
6574 
6575 char *
6576 ddi_pathname(dev_info_t *dip, char *path)
6577 {
6578 	return (pathname_work(dip, path));
6579 }
6580 
6581 char *
6582 ddi_pathname_minor(struct ddi_minor_data *dmdp, char *path)
6583 {
6584 	if (dmdp->dip == NULL)
6585 		*path = '\0';
6586 	else {
6587 		(void) ddi_pathname(dmdp->dip, path);
6588 		if (dmdp->ddm_name) {
6589 			(void) strcat(path, ":");
6590 			(void) strcat(path, dmdp->ddm_name);
6591 		}
6592 	}
6593 	return (path);
6594 }
6595 
6596 static char *
6597 pathname_work_obp(dev_info_t *dip, char *path)
6598 {
6599 	char *bp;
6600 	char *obp_path;
6601 
6602 	/*
6603 	 * look up the "obp-path" property, return the path if it exists
6604 	 */
6605 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
6606 	    "obp-path", &obp_path) == DDI_PROP_SUCCESS) {
6607 		(void) strcpy(path, obp_path);
6608 		ddi_prop_free(obp_path);
6609 		return (path);
6610 	}
6611 
6612 	/*
6613 	 * stop at root, no obp path
6614 	 */
6615 	if (dip == ddi_root_node()) {
6616 		return (NULL);
6617 	}
6618 
6619 	obp_path = pathname_work_obp(ddi_get_parent(dip), path);
6620 	if (obp_path == NULL)
6621 		return (NULL);
6622 
6623 	/*
6624 	 * append our component to parent's obp path
6625 	 */
6626 	bp = path + strlen(path);
6627 	if (*(bp - 1) != '/')
6628 		(void) strcat(bp++, "/");
6629 	(void) ddi_deviname(dip, bp);
6630 	return (path);
6631 }
6632 
6633 /*
6634  * return the 'obp-path' based path for the given node, or NULL if the node
6635  * does not have a different obp path. NOTE: Unlike ddi_pathname, this
6636  * function can't be called from interrupt context (since we need to
6637  * lookup a string property).
6638  */
6639 char *
6640 ddi_pathname_obp(dev_info_t *dip, char *path)
6641 {
6642 	ASSERT(!servicing_interrupt());
6643 	if (dip == NULL || path == NULL)
6644 		return (NULL);
6645 
6646 	/* split work into a separate function to aid debugging */
6647 	return (pathname_work_obp(dip, path));
6648 }
6649 
6650 int
6651 ddi_pathname_obp_set(dev_info_t *dip, char *component)
6652 {
6653 	dev_info_t *pdip;
6654 	char *obp_path = NULL;
6655 	int rc = DDI_FAILURE;
6656 
6657 	if (dip == NULL)
6658 		return (DDI_FAILURE);
6659 
6660 	obp_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
6661 
6662 	pdip = ddi_get_parent(dip);
6663 
6664 	if (ddi_pathname_obp(pdip, obp_path) == NULL) {
6665 		(void) ddi_pathname(pdip, obp_path);
6666 	}
6667 
6668 	if (component) {
6669 		(void) strncat(obp_path, "/", MAXPATHLEN);
6670 		(void) strncat(obp_path, component, MAXPATHLEN);
6671 	}
6672 	rc = ndi_prop_update_string(DDI_DEV_T_NONE, dip, "obp-path",
6673 	    obp_path);
6674 
6675 	if (obp_path)
6676 		kmem_free(obp_path, MAXPATHLEN);
6677 
6678 	return (rc);
6679 }
6680 
6681 /*
6682  * Given a dev_t, return the pathname of the corresponding device in the
6683  * buffer pointed at by "path."  The buffer is assumed to be large enough
6684  * to hold the pathname of the device (MAXPATHLEN).
6685  *
6686  * The pathname of a device is the pathname of the devinfo node to which
6687  * the device "belongs," concatenated with the character ':' and the name
6688  * of the minor node corresponding to the dev_t.  If spec_type is 0 then
6689  * just the pathname of the devinfo node is returned without driving attach
6690  * of that node.  For a non-zero spec_type, an attach is performed and a
6691  * search of the minor list occurs.
6692  *
6693  * It is possible that the path associated with the dev_t is not
6694  * currently available in the devinfo tree.  In order to have a
6695  * dev_t, a device must have been discovered before, which means
6696  * that the path is always in the instance tree.  The one exception
6697  * to this is if the dev_t is associated with a pseudo driver, in
6698  * which case the device must exist on the pseudo branch of the
6699  * devinfo tree as a result of parsing .conf files.
6700  */
6701 int
6702 ddi_dev_pathname(dev_t devt, int spec_type, char *path)
6703 {
6704 	int		circ;
6705 	major_t		major = getmajor(devt);
6706 	int		instance;
6707 	dev_info_t	*dip;
6708 	char		*minorname;
6709 	char		*drvname;
6710 
6711 	if (major >= devcnt)
6712 		goto fail;
6713 	if (major == clone_major) {
6714 		/* clone has no minor nodes, manufacture the path here */
6715 		if ((drvname = ddi_major_to_name(getminor(devt))) == NULL)
6716 			goto fail;
6717 
6718 		(void) snprintf(path, MAXPATHLEN, "%s:%s", CLONE_PATH, drvname);
6719 		return (DDI_SUCCESS);
6720 	}
6721 
6722 	/* extract instance from devt (getinfo(9E) DDI_INFO_DEVT2INSTANCE). */
6723 	if ((instance = dev_to_instance(devt)) == -1)
6724 		goto fail;
6725 
6726 	/* reconstruct the path given the major/instance */
6727 	if (e_ddi_majorinstance_to_path(major, instance, path) != DDI_SUCCESS)
6728 		goto fail;
6729 
6730 	/* if spec_type given we must drive attach and search minor nodes */
6731 	if ((spec_type == S_IFCHR) || (spec_type == S_IFBLK)) {
6732 		/* attach the path so we can search minors */
6733 		if ((dip = e_ddi_hold_devi_by_path(path, 0)) == NULL)
6734 			goto fail;
6735 
6736 		/* Add minorname to path. */
6737 		ndi_devi_enter(dip, &circ);
6738 		minorname = i_ddi_devtspectype_to_minorname(dip,
6739 		    devt, spec_type);
6740 		if (minorname) {
6741 			(void) strcat(path, ":");
6742 			(void) strcat(path, minorname);
6743 		}
6744 		ndi_devi_exit(dip, circ);
6745 		ddi_release_devi(dip);
6746 		if (minorname == NULL)
6747 			goto fail;
6748 	}
6749 	ASSERT(strlen(path) < MAXPATHLEN);
6750 	return (DDI_SUCCESS);
6751 
6752 fail:	*path = 0;
6753 	return (DDI_FAILURE);
6754 }
6755 
6756 /*
6757  * Given a major number and an instance, return the path.
6758  * This interface does NOT drive attach.
6759  */
6760 int
6761 e_ddi_majorinstance_to_path(major_t major, int instance, char *path)
6762 {
6763 	struct devnames *dnp;
6764 	dev_info_t	*dip;
6765 
6766 	if ((major >= devcnt) || (instance == -1)) {
6767 		*path = 0;
6768 		return (DDI_FAILURE);
6769 	}
6770 
6771 	/* look for the major/instance in the instance tree */
6772 	if (e_ddi_instance_majorinstance_to_path(major, instance,
6773 	    path) == DDI_SUCCESS) {
6774 		ASSERT(strlen(path) < MAXPATHLEN);
6775 		return (DDI_SUCCESS);
6776 	}
6777 
6778 	/*
6779 	 * Not in instance tree, find the instance on the per driver list and
6780 	 * construct path to instance via ddi_pathname(). This is how paths
6781 	 * down the 'pseudo' branch are constructed.
6782 	 */
6783 	dnp = &(devnamesp[major]);
6784 	LOCK_DEV_OPS(&(dnp->dn_lock));
6785 	for (dip = dnp->dn_head; dip;
6786 	    dip = (dev_info_t *)DEVI(dip)->devi_next) {
6787 		/* Skip if instance does not match. */
6788 		if (DEVI(dip)->devi_instance != instance)
6789 			continue;
6790 
6791 		/*
6792 		 * An ndi_hold_devi() does not prevent DS_INITIALIZED->DS_BOUND
6793 		 * node demotion, so it is not an effective way of ensuring
6794 		 * that the ddi_pathname result has a unit-address.  Instead,
6795 		 * we reverify the node state after calling ddi_pathname().
6796 		 */
6797 		if (i_ddi_node_state(dip) >= DS_INITIALIZED) {
6798 			(void) ddi_pathname(dip, path);
6799 			if (i_ddi_node_state(dip) < DS_INITIALIZED)
6800 				continue;
6801 			UNLOCK_DEV_OPS(&(dnp->dn_lock));
6802 			ASSERT(strlen(path) < MAXPATHLEN);
6803 			return (DDI_SUCCESS);
6804 		}
6805 	}
6806 	UNLOCK_DEV_OPS(&(dnp->dn_lock));
6807 
6808 	/* can't reconstruct the path */
6809 	*path = 0;
6810 	return (DDI_FAILURE);
6811 }
6812 
6813 #define	GLD_DRIVER_PPA "SUNW,gld_v0_ppa"
6814 
6815 /*
6816  * Given the dip for a network interface return the ppa for that interface.
6817  *
6818  * In all cases except GLD v0 drivers, the ppa == instance.
6819  * In the case of GLD v0 drivers, the ppa is equal to the attach order.
6820  * So for these drivers when the attach routine calls gld_register(),
6821  * the GLD framework creates an integer property called "gld_driver_ppa"
6822  * that can be queried here.
6823  *
6824  * The only time this function is used is when a system is booting over nfs.
6825  * In this case the system has to resolve the pathname of the boot device
6826  * to it's ppa.
6827  */
6828 int
6829 i_ddi_devi_get_ppa(dev_info_t *dip)
6830 {
6831 	return (ddi_prop_get_int(DDI_DEV_T_ANY, dip,
6832 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
6833 	    GLD_DRIVER_PPA, ddi_get_instance(dip)));
6834 }
6835 
6836 /*
6837  * i_ddi_devi_set_ppa() should only be called from gld_register()
6838  * and only for GLD v0 drivers
6839  */
6840 void
6841 i_ddi_devi_set_ppa(dev_info_t *dip, int ppa)
6842 {
6843 	(void) e_ddi_prop_update_int(DDI_DEV_T_NONE, dip, GLD_DRIVER_PPA, ppa);
6844 }
6845 
6846 
6847 /*
6848  * Private DDI Console bell functions.
6849  */
6850 void
6851 ddi_ring_console_bell(clock_t duration)
6852 {
6853 	if (ddi_console_bell_func != NULL)
6854 		(*ddi_console_bell_func)(duration);
6855 }
6856 
6857 void
6858 ddi_set_console_bell(void (*bellfunc)(clock_t duration))
6859 {
6860 	ddi_console_bell_func = bellfunc;
6861 }
6862 
6863 int
6864 ddi_dma_alloc_handle(dev_info_t *dip, ddi_dma_attr_t *attr,
6865     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
6866 {
6867 	int (*funcp)() = ddi_dma_allochdl;
6868 	ddi_dma_attr_t dma_attr;
6869 	struct bus_ops *bop;
6870 
6871 	if (attr == (ddi_dma_attr_t *)0)
6872 		return (DDI_DMA_BADATTR);
6873 
6874 	dma_attr = *attr;
6875 
6876 	bop = DEVI(dip)->devi_ops->devo_bus_ops;
6877 	if (bop && bop->bus_dma_allochdl)
6878 		funcp = bop->bus_dma_allochdl;
6879 
6880 	return ((*funcp)(dip, dip, &dma_attr, waitfp, arg, handlep));
6881 }
6882 
6883 void
6884 ddi_dma_free_handle(ddi_dma_handle_t *handlep)
6885 {
6886 	ddi_dma_handle_t h = *handlep;
6887 	(void) ddi_dma_freehdl(HD, HD, h);
6888 }
6889 
6890 static uintptr_t dma_mem_list_id = 0;
6891 
6892 
6893 int
6894 ddi_dma_mem_alloc(ddi_dma_handle_t handle, size_t length,
6895     ddi_device_acc_attr_t *accattrp, uint_t flags,
6896     int (*waitfp)(caddr_t), caddr_t arg, caddr_t *kaddrp,
6897     size_t *real_length, ddi_acc_handle_t *handlep)
6898 {
6899 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6900 	dev_info_t *dip = hp->dmai_rdip;
6901 	ddi_acc_hdl_t *ap;
6902 	ddi_dma_attr_t *attrp = &hp->dmai_attr;
6903 	uint_t sleepflag, xfermodes;
6904 	int (*fp)(caddr_t);
6905 	int rval;
6906 
6907 	if (waitfp == DDI_DMA_SLEEP)
6908 		fp = (int (*)())KM_SLEEP;
6909 	else if (waitfp == DDI_DMA_DONTWAIT)
6910 		fp = (int (*)())KM_NOSLEEP;
6911 	else
6912 		fp = waitfp;
6913 	*handlep = impl_acc_hdl_alloc(fp, arg);
6914 	if (*handlep == NULL)
6915 		return (DDI_FAILURE);
6916 
6917 	/* check if the cache attributes are supported */
6918 	if (i_ddi_check_cache_attr(flags) == B_FALSE)
6919 		return (DDI_FAILURE);
6920 
6921 	/*
6922 	 * Transfer the meaningful bits to xfermodes.
6923 	 * Double-check if the 3rd party driver correctly sets the bits.
6924 	 * If not, set DDI_DMA_STREAMING to keep compatibility.
6925 	 */
6926 	xfermodes = flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING);
6927 	if (xfermodes == 0) {
6928 		xfermodes = DDI_DMA_STREAMING;
6929 	}
6930 
6931 	/*
6932 	 * initialize the common elements of data access handle
6933 	 */
6934 	ap = impl_acc_hdl_get(*handlep);
6935 	ap->ah_vers = VERS_ACCHDL;
6936 	ap->ah_dip = dip;
6937 	ap->ah_offset = 0;
6938 	ap->ah_len = 0;
6939 	ap->ah_xfermodes = flags;
6940 	ap->ah_acc = *accattrp;
6941 
6942 	sleepflag = ((waitfp == DDI_DMA_SLEEP) ? 1 : 0);
6943 	if (xfermodes == DDI_DMA_CONSISTENT) {
6944 		rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag,
6945 		    flags, accattrp, kaddrp, NULL, ap);
6946 		*real_length = length;
6947 	} else {
6948 		rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag,
6949 		    flags, accattrp, kaddrp, real_length, ap);
6950 	}
6951 	if (rval == DDI_SUCCESS) {
6952 		ap->ah_len = (off_t)(*real_length);
6953 		ap->ah_addr = *kaddrp;
6954 	} else {
6955 		impl_acc_hdl_free(*handlep);
6956 		*handlep = (ddi_acc_handle_t)NULL;
6957 		if (waitfp != DDI_DMA_SLEEP && waitfp != DDI_DMA_DONTWAIT) {
6958 			ddi_set_callback(waitfp, arg, &dma_mem_list_id);
6959 		}
6960 		rval = DDI_FAILURE;
6961 	}
6962 	return (rval);
6963 }
6964 
6965 void
6966 ddi_dma_mem_free(ddi_acc_handle_t *handlep)
6967 {
6968 	ddi_acc_hdl_t *ap;
6969 
6970 	ap = impl_acc_hdl_get(*handlep);
6971 	ASSERT(ap);
6972 
6973 	i_ddi_mem_free((caddr_t)ap->ah_addr, ap);
6974 
6975 	/*
6976 	 * free the handle
6977 	 */
6978 	impl_acc_hdl_free(*handlep);
6979 	*handlep = (ddi_acc_handle_t)NULL;
6980 
6981 	if (dma_mem_list_id != 0) {
6982 		ddi_run_callback(&dma_mem_list_id);
6983 	}
6984 }
6985 
6986 int
6987 ddi_dma_buf_bind_handle(ddi_dma_handle_t handle, struct buf *bp,
6988     uint_t flags, int (*waitfp)(caddr_t), caddr_t arg,
6989     ddi_dma_cookie_t *cookiep, uint_t *ccountp)
6990 {
6991 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6992 	dev_info_t *dip, *rdip;
6993 	struct ddi_dma_req dmareq;
6994 	int (*funcp)();
6995 	ddi_dma_cookie_t cookie;
6996 	uint_t count;
6997 
6998 	if (cookiep == NULL)
6999 		cookiep = &cookie;
7000 
7001 	if (ccountp == NULL)
7002 		ccountp = &count;
7003 
7004 	dmareq.dmar_flags = flags;
7005 	dmareq.dmar_fp = waitfp;
7006 	dmareq.dmar_arg = arg;
7007 	dmareq.dmar_object.dmao_size = (uint_t)bp->b_bcount;
7008 
7009 	if (bp->b_flags & B_PAGEIO) {
7010 		dmareq.dmar_object.dmao_type = DMA_OTYP_PAGES;
7011 		dmareq.dmar_object.dmao_obj.pp_obj.pp_pp = bp->b_pages;
7012 		dmareq.dmar_object.dmao_obj.pp_obj.pp_offset =
7013 		    (uint_t)(((uintptr_t)bp->b_un.b_addr) & MMU_PAGEOFFSET);
7014 	} else {
7015 		dmareq.dmar_object.dmao_obj.virt_obj.v_addr = bp->b_un.b_addr;
7016 		if (bp->b_flags & B_SHADOW) {
7017 			dmareq.dmar_object.dmao_obj.virt_obj.v_priv =
7018 			    bp->b_shadow;
7019 			dmareq.dmar_object.dmao_type = DMA_OTYP_BUFVADDR;
7020 		} else {
7021 			dmareq.dmar_object.dmao_type =
7022 			    (bp->b_flags & (B_PHYS | B_REMAPPED)) ?
7023 			    DMA_OTYP_BUFVADDR : DMA_OTYP_VADDR;
7024 			dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
7025 		}
7026 
7027 		/*
7028 		 * If the buffer has no proc pointer, or the proc
7029 		 * struct has the kernel address space, or the buffer has
7030 		 * been marked B_REMAPPED (meaning that it is now
7031 		 * mapped into the kernel's address space), then
7032 		 * the address space is kas (kernel address space).
7033 		 */
7034 		if ((bp->b_proc == NULL) || (bp->b_proc->p_as == &kas) ||
7035 		    (bp->b_flags & B_REMAPPED)) {
7036 			dmareq.dmar_object.dmao_obj.virt_obj.v_as = 0;
7037 		} else {
7038 			dmareq.dmar_object.dmao_obj.virt_obj.v_as =
7039 			    bp->b_proc->p_as;
7040 		}
7041 	}
7042 
7043 	dip = rdip = hp->dmai_rdip;
7044 	if (dip != ddi_root_node())
7045 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
7046 	funcp = DEVI(rdip)->devi_bus_dma_bindfunc;
7047 	return ((*funcp)(dip, rdip, handle, &dmareq, cookiep, ccountp));
7048 }
7049 
7050 int
7051 ddi_dma_addr_bind_handle(ddi_dma_handle_t handle, struct as *as,
7052     caddr_t addr, size_t len, uint_t flags, int (*waitfp)(caddr_t),
7053     caddr_t arg, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
7054 {
7055 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7056 	dev_info_t *dip, *rdip;
7057 	struct ddi_dma_req dmareq;
7058 	int (*funcp)();
7059 	ddi_dma_cookie_t cookie;
7060 	uint_t count;
7061 
7062 	if (len == (uint_t)0) {
7063 		return (DDI_DMA_NOMAPPING);
7064 	}
7065 
7066 	if (cookiep == NULL)
7067 		cookiep = &cookie;
7068 
7069 	if (ccountp == NULL)
7070 		ccountp = &count;
7071 
7072 	dmareq.dmar_flags = flags;
7073 	dmareq.dmar_fp = waitfp;
7074 	dmareq.dmar_arg = arg;
7075 	dmareq.dmar_object.dmao_size = len;
7076 	dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR;
7077 	dmareq.dmar_object.dmao_obj.virt_obj.v_as = as;
7078 	dmareq.dmar_object.dmao_obj.virt_obj.v_addr = addr;
7079 	dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
7080 
7081 	dip = rdip = hp->dmai_rdip;
7082 	if (dip != ddi_root_node())
7083 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
7084 	funcp = DEVI(rdip)->devi_bus_dma_bindfunc;
7085 	return ((*funcp)(dip, rdip, handle, &dmareq, cookiep, ccountp));
7086 }
7087 
7088 void
7089 ddi_dma_nextcookie(ddi_dma_handle_t handle, ddi_dma_cookie_t *cookiep)
7090 {
7091 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7092 	ddi_dma_cookie_t *cp;
7093 
7094 	if (hp->dmai_curcookie >= hp->dmai_ncookies) {
7095 		panic("ddi_dma_nextcookie() called too many times on handle %p",
7096 		    hp);
7097 	}
7098 
7099 	cp = hp->dmai_cookie;
7100 	ASSERT(cp);
7101 
7102 	cookiep->dmac_notused = cp->dmac_notused;
7103 	cookiep->dmac_type = cp->dmac_type;
7104 	cookiep->dmac_address = cp->dmac_address;
7105 	cookiep->dmac_size = cp->dmac_size;
7106 	hp->dmai_cookie++;
7107 	hp->dmai_curcookie++;
7108 }
7109 
7110 int
7111 ddi_dma_ncookies(ddi_dma_handle_t handle)
7112 {
7113 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7114 
7115 	return (hp->dmai_ncookies);
7116 }
7117 
7118 const ddi_dma_cookie_t *
7119 ddi_dma_cookie_iter(ddi_dma_handle_t handle, const ddi_dma_cookie_t *iter)
7120 {
7121 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7122 	const ddi_dma_cookie_t *base, *end;
7123 
7124 	if (hp->dmai_ncookies == 0) {
7125 		return (NULL);
7126 	}
7127 
7128 	base = hp->dmai_cookie - hp->dmai_curcookie;
7129 	end = base + hp->dmai_ncookies;
7130 	if (iter == NULL) {
7131 		return (base);
7132 	}
7133 
7134 	if ((uintptr_t)iter < (uintptr_t)base ||
7135 	    (uintptr_t)iter >= (uintptr_t)end) {
7136 		return (NULL);
7137 	}
7138 
7139 	iter++;
7140 	if (iter == end) {
7141 		return (NULL);
7142 	}
7143 
7144 	return (iter);
7145 }
7146 
7147 const ddi_dma_cookie_t *
7148 ddi_dma_cookie_get(ddi_dma_handle_t handle, uint_t index)
7149 {
7150 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7151 	const ddi_dma_cookie_t *base;
7152 
7153 	if (index >= hp->dmai_ncookies) {
7154 		return (NULL);
7155 	}
7156 
7157 	base = hp->dmai_cookie - hp->dmai_curcookie;
7158 	return (base + index);
7159 }
7160 
7161 const ddi_dma_cookie_t *
7162 ddi_dma_cookie_one(ddi_dma_handle_t handle)
7163 {
7164 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7165 	const ddi_dma_cookie_t *base;
7166 
7167 	if (hp->dmai_ncookies != 1) {
7168 		panic("ddi_dma_cookie_one() called with improper handle %p",
7169 		    hp);
7170 	}
7171 	ASSERT3P(hp->dmai_cookie, !=, NULL);
7172 
7173 	base = hp->dmai_cookie - hp->dmai_curcookie;
7174 	return (base);
7175 }
7176 
7177 int
7178 ddi_dma_numwin(ddi_dma_handle_t handle, uint_t *nwinp)
7179 {
7180 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7181 	if ((hp->dmai_rflags & DDI_DMA_PARTIAL) == 0) {
7182 		return (DDI_FAILURE);
7183 	} else {
7184 		*nwinp = hp->dmai_nwin;
7185 		return (DDI_SUCCESS);
7186 	}
7187 }
7188 
7189 int
7190 ddi_dma_getwin(ddi_dma_handle_t h, uint_t win, off_t *offp,
7191     size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
7192 {
7193 	int (*funcp)() = ddi_dma_win;
7194 	struct bus_ops *bop;
7195 	ddi_dma_cookie_t cookie;
7196 	uint_t count;
7197 
7198 	bop = DEVI(HD)->devi_ops->devo_bus_ops;
7199 	if (bop && bop->bus_dma_win)
7200 		funcp = bop->bus_dma_win;
7201 
7202 	if (cookiep == NULL)
7203 		cookiep = &cookie;
7204 
7205 	if (ccountp == NULL)
7206 		ccountp = &count;
7207 
7208 	return ((*funcp)(HD, HD, h, win, offp, lenp, cookiep, ccountp));
7209 }
7210 
7211 int
7212 ddi_dma_set_sbus64(ddi_dma_handle_t h, ulong_t burstsizes)
7213 {
7214 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_SET_SBUS64, 0,
7215 	    &burstsizes, 0, 0));
7216 }
7217 
7218 int
7219 i_ddi_dma_fault_check(ddi_dma_impl_t *hp)
7220 {
7221 	return (hp->dmai_fault);
7222 }
7223 
7224 int
7225 ddi_check_dma_handle(ddi_dma_handle_t handle)
7226 {
7227 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7228 	int (*check)(ddi_dma_impl_t *);
7229 
7230 	if ((check = hp->dmai_fault_check) == NULL)
7231 		check = i_ddi_dma_fault_check;
7232 
7233 	return (((*check)(hp) == DDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
7234 }
7235 
7236 void
7237 i_ddi_dma_set_fault(ddi_dma_handle_t handle)
7238 {
7239 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7240 	void (*notify)(ddi_dma_impl_t *);
7241 
7242 	if (!hp->dmai_fault) {
7243 		hp->dmai_fault = 1;
7244 		if ((notify = hp->dmai_fault_notify) != NULL)
7245 			(*notify)(hp);
7246 	}
7247 }
7248 
7249 void
7250 i_ddi_dma_clr_fault(ddi_dma_handle_t handle)
7251 {
7252 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7253 	void (*notify)(ddi_dma_impl_t *);
7254 
7255 	if (hp->dmai_fault) {
7256 		hp->dmai_fault = 0;
7257 		if ((notify = hp->dmai_fault_notify) != NULL)
7258 			(*notify)(hp);
7259 	}
7260 }
7261 
7262 /*
7263  * register mapping routines.
7264  */
7265 int
7266 ddi_regs_map_setup(dev_info_t *dip, uint_t rnumber, caddr_t *addrp,
7267     offset_t offset, offset_t len, ddi_device_acc_attr_t *accattrp,
7268     ddi_acc_handle_t *handle)
7269 {
7270 	ddi_map_req_t mr;
7271 	ddi_acc_hdl_t *hp;
7272 	int result;
7273 
7274 	/*
7275 	 * Allocate and initialize the common elements of data access handle.
7276 	 */
7277 	*handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
7278 	hp = impl_acc_hdl_get(*handle);
7279 	hp->ah_vers = VERS_ACCHDL;
7280 	hp->ah_dip = dip;
7281 	hp->ah_rnumber = rnumber;
7282 	hp->ah_offset = offset;
7283 	hp->ah_len = len;
7284 	hp->ah_acc = *accattrp;
7285 
7286 	/*
7287 	 * Set up the mapping request and call to parent.
7288 	 */
7289 	mr.map_op = DDI_MO_MAP_LOCKED;
7290 	mr.map_type = DDI_MT_RNUMBER;
7291 	mr.map_obj.rnumber = rnumber;
7292 	mr.map_prot = PROT_READ | PROT_WRITE;
7293 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
7294 	mr.map_handlep = hp;
7295 	mr.map_vers = DDI_MAP_VERSION;
7296 	result = ddi_map(dip, &mr, offset, len, addrp);
7297 
7298 	/*
7299 	 * check for end result
7300 	 */
7301 	if (result != DDI_SUCCESS) {
7302 		impl_acc_hdl_free(*handle);
7303 		*handle = (ddi_acc_handle_t)NULL;
7304 	} else {
7305 		hp->ah_addr = *addrp;
7306 	}
7307 
7308 	return (result);
7309 }
7310 
7311 void
7312 ddi_regs_map_free(ddi_acc_handle_t *handlep)
7313 {
7314 	ddi_map_req_t mr;
7315 	ddi_acc_hdl_t *hp;
7316 
7317 	hp = impl_acc_hdl_get(*handlep);
7318 	ASSERT(hp);
7319 
7320 	mr.map_op = DDI_MO_UNMAP;
7321 	mr.map_type = DDI_MT_RNUMBER;
7322 	mr.map_obj.rnumber = hp->ah_rnumber;
7323 	mr.map_prot = PROT_READ | PROT_WRITE;
7324 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
7325 	mr.map_handlep = hp;
7326 	mr.map_vers = DDI_MAP_VERSION;
7327 
7328 	/*
7329 	 * Call my parent to unmap my regs.
7330 	 */
7331 	(void) ddi_map(hp->ah_dip, &mr, hp->ah_offset,
7332 	    hp->ah_len, &hp->ah_addr);
7333 	/*
7334 	 * free the handle
7335 	 */
7336 	impl_acc_hdl_free(*handlep);
7337 	*handlep = (ddi_acc_handle_t)NULL;
7338 }
7339 
7340 int
7341 ddi_device_zero(ddi_acc_handle_t handle, caddr_t dev_addr, size_t bytecount,
7342     ssize_t dev_advcnt, uint_t dev_datasz)
7343 {
7344 	uint8_t *b;
7345 	uint16_t *w;
7346 	uint32_t *l;
7347 	uint64_t *ll;
7348 
7349 	/* check for total byte count is multiple of data transfer size */
7350 	if (bytecount != ((bytecount / dev_datasz) * dev_datasz))
7351 		return (DDI_FAILURE);
7352 
7353 	switch (dev_datasz) {
7354 	case DDI_DATA_SZ01_ACC:
7355 		for (b = (uint8_t *)dev_addr;
7356 		    bytecount != 0; bytecount -= 1, b += dev_advcnt)
7357 			ddi_put8(handle, b, 0);
7358 		break;
7359 	case DDI_DATA_SZ02_ACC:
7360 		for (w = (uint16_t *)dev_addr;
7361 		    bytecount != 0; bytecount -= 2, w += dev_advcnt)
7362 			ddi_put16(handle, w, 0);
7363 		break;
7364 	case DDI_DATA_SZ04_ACC:
7365 		for (l = (uint32_t *)dev_addr;
7366 		    bytecount != 0; bytecount -= 4, l += dev_advcnt)
7367 			ddi_put32(handle, l, 0);
7368 		break;
7369 	case DDI_DATA_SZ08_ACC:
7370 		for (ll = (uint64_t *)dev_addr;
7371 		    bytecount != 0; bytecount -= 8, ll += dev_advcnt)
7372 			ddi_put64(handle, ll, 0x0ll);
7373 		break;
7374 	default:
7375 		return (DDI_FAILURE);
7376 	}
7377 	return (DDI_SUCCESS);
7378 }
7379 
7380 int
7381 ddi_device_copy(
7382 	ddi_acc_handle_t src_handle, caddr_t src_addr, ssize_t src_advcnt,
7383 	ddi_acc_handle_t dest_handle, caddr_t dest_addr, ssize_t dest_advcnt,
7384 	size_t bytecount, uint_t dev_datasz)
7385 {
7386 	uint8_t *b_src, *b_dst;
7387 	uint16_t *w_src, *w_dst;
7388 	uint32_t *l_src, *l_dst;
7389 	uint64_t *ll_src, *ll_dst;
7390 
7391 	/* check for total byte count is multiple of data transfer size */
7392 	if (bytecount != ((bytecount / dev_datasz) * dev_datasz))
7393 		return (DDI_FAILURE);
7394 
7395 	switch (dev_datasz) {
7396 	case DDI_DATA_SZ01_ACC:
7397 		b_src = (uint8_t *)src_addr;
7398 		b_dst = (uint8_t *)dest_addr;
7399 
7400 		for (; bytecount != 0; bytecount -= 1) {
7401 			ddi_put8(dest_handle, b_dst,
7402 			    ddi_get8(src_handle, b_src));
7403 			b_dst += dest_advcnt;
7404 			b_src += src_advcnt;
7405 		}
7406 		break;
7407 	case DDI_DATA_SZ02_ACC:
7408 		w_src = (uint16_t *)src_addr;
7409 		w_dst = (uint16_t *)dest_addr;
7410 
7411 		for (; bytecount != 0; bytecount -= 2) {
7412 			ddi_put16(dest_handle, w_dst,
7413 			    ddi_get16(src_handle, w_src));
7414 			w_dst += dest_advcnt;
7415 			w_src += src_advcnt;
7416 		}
7417 		break;
7418 	case DDI_DATA_SZ04_ACC:
7419 		l_src = (uint32_t *)src_addr;
7420 		l_dst = (uint32_t *)dest_addr;
7421 
7422 		for (; bytecount != 0; bytecount -= 4) {
7423 			ddi_put32(dest_handle, l_dst,
7424 			    ddi_get32(src_handle, l_src));
7425 			l_dst += dest_advcnt;
7426 			l_src += src_advcnt;
7427 		}
7428 		break;
7429 	case DDI_DATA_SZ08_ACC:
7430 		ll_src = (uint64_t *)src_addr;
7431 		ll_dst = (uint64_t *)dest_addr;
7432 
7433 		for (; bytecount != 0; bytecount -= 8) {
7434 			ddi_put64(dest_handle, ll_dst,
7435 			    ddi_get64(src_handle, ll_src));
7436 			ll_dst += dest_advcnt;
7437 			ll_src += src_advcnt;
7438 		}
7439 		break;
7440 	default:
7441 		return (DDI_FAILURE);
7442 	}
7443 	return (DDI_SUCCESS);
7444 }
7445 
7446 #define	swap16(value)  \
7447 	((((value) & 0xff) << 8) | ((value) >> 8))
7448 
7449 #define	swap32(value)	\
7450 	(((uint32_t)swap16((uint16_t)((value) & 0xffff)) << 16) | \
7451 	(uint32_t)swap16((uint16_t)((value) >> 16)))
7452 
7453 #define	swap64(value)	\
7454 	(((uint64_t)swap32((uint32_t)((value) & 0xffffffff)) \
7455 	    << 32) | \
7456 	(uint64_t)swap32((uint32_t)((value) >> 32)))
7457 
7458 uint16_t
7459 ddi_swap16(uint16_t value)
7460 {
7461 	return (swap16(value));
7462 }
7463 
7464 uint32_t
7465 ddi_swap32(uint32_t value)
7466 {
7467 	return (swap32(value));
7468 }
7469 
7470 uint64_t
7471 ddi_swap64(uint64_t value)
7472 {
7473 	return (swap64(value));
7474 }
7475 
7476 /*
7477  * Convert a binding name to a driver name.
7478  * A binding name is the name used to determine the driver for a
7479  * device - it may be either an alias for the driver or the name
7480  * of the driver itself.
7481  */
7482 char *
7483 i_binding_to_drv_name(char *bname)
7484 {
7485 	major_t major_no;
7486 
7487 	ASSERT(bname != NULL);
7488 
7489 	if ((major_no = ddi_name_to_major(bname)) == -1)
7490 		return (NULL);
7491 	return (ddi_major_to_name(major_no));
7492 }
7493 
7494 /*
7495  * Search for minor name that has specified dev_t and spec_type.
7496  * If spec_type is zero then any dev_t match works.  Since we
7497  * are returning a pointer to the minor name string, we require the
7498  * caller to do the locking.
7499  */
7500 char *
7501 i_ddi_devtspectype_to_minorname(dev_info_t *dip, dev_t dev, int spec_type)
7502 {
7503 	struct ddi_minor_data	*dmdp;
7504 
7505 	/*
7506 	 * The did layered driver currently intentionally returns a
7507 	 * devinfo ptr for an underlying sd instance based on a did
7508 	 * dev_t. In this case it is not an error.
7509 	 *
7510 	 * The did layered driver is associated with Sun Cluster.
7511 	 */
7512 	ASSERT((ddi_driver_major(dip) == getmajor(dev)) ||
7513 	    (strcmp(ddi_major_to_name(getmajor(dev)), "did") == 0));
7514 
7515 	ASSERT(DEVI_BUSY_OWNED(dip));
7516 	for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) {
7517 		if (((dmdp->type == DDM_MINOR) ||
7518 		    (dmdp->type == DDM_INTERNAL_PATH) ||
7519 		    (dmdp->type == DDM_DEFAULT)) &&
7520 		    (dmdp->ddm_dev == dev) &&
7521 		    ((((spec_type & (S_IFCHR|S_IFBLK))) == 0) ||
7522 		    (dmdp->ddm_spec_type == spec_type)))
7523 			return (dmdp->ddm_name);
7524 	}
7525 
7526 	return (NULL);
7527 }
7528 
7529 /*
7530  * Find the devt and spectype of the specified minor_name.
7531  * Return DDI_FAILURE if minor_name not found. Since we are
7532  * returning everything via arguments we can do the locking.
7533  */
7534 int
7535 i_ddi_minorname_to_devtspectype(dev_info_t *dip, char *minor_name,
7536     dev_t *devtp, int *spectypep)
7537 {
7538 	int			circ;
7539 	struct ddi_minor_data	*dmdp;
7540 
7541 	/* deal with clone minor nodes */
7542 	if (dip == clone_dip) {
7543 		major_t	major;
7544 		/*
7545 		 * Make sure minor_name is a STREAMS driver.
7546 		 * We load the driver but don't attach to any instances.
7547 		 */
7548 
7549 		major = ddi_name_to_major(minor_name);
7550 		if (major == DDI_MAJOR_T_NONE)
7551 			return (DDI_FAILURE);
7552 
7553 		if (ddi_hold_driver(major) == NULL)
7554 			return (DDI_FAILURE);
7555 
7556 		if (STREAMSTAB(major) == NULL) {
7557 			ddi_rele_driver(major);
7558 			return (DDI_FAILURE);
7559 		}
7560 		ddi_rele_driver(major);
7561 
7562 		if (devtp)
7563 			*devtp = makedevice(clone_major, (minor_t)major);
7564 
7565 		if (spectypep)
7566 			*spectypep = S_IFCHR;
7567 
7568 		return (DDI_SUCCESS);
7569 	}
7570 
7571 	ndi_devi_enter(dip, &circ);
7572 	for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) {
7573 		if (((dmdp->type != DDM_MINOR) &&
7574 		    (dmdp->type != DDM_INTERNAL_PATH) &&
7575 		    (dmdp->type != DDM_DEFAULT)) ||
7576 		    strcmp(minor_name, dmdp->ddm_name))
7577 			continue;
7578 
7579 		if (devtp)
7580 			*devtp = dmdp->ddm_dev;
7581 
7582 		if (spectypep)
7583 			*spectypep = dmdp->ddm_spec_type;
7584 
7585 		ndi_devi_exit(dip, circ);
7586 		return (DDI_SUCCESS);
7587 	}
7588 	ndi_devi_exit(dip, circ);
7589 
7590 	return (DDI_FAILURE);
7591 }
7592 
7593 static kmutex_t devid_gen_mutex;
7594 static short	devid_gen_number;
7595 
7596 #ifdef DEBUG
7597 
7598 static int	devid_register_corrupt = 0;
7599 static int	devid_register_corrupt_major = 0;
7600 static int	devid_register_corrupt_hint = 0;
7601 static int	devid_register_corrupt_hint_major = 0;
7602 
7603 static int devid_lyr_debug = 0;
7604 
7605 #define	DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs)		\
7606 	if (devid_lyr_debug)					\
7607 		ddi_debug_devid_devts(msg, ndevs, devs)
7608 
7609 #else
7610 
7611 #define	DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs)
7612 
7613 #endif /* DEBUG */
7614 
7615 
7616 #ifdef	DEBUG
7617 
7618 static void
7619 ddi_debug_devid_devts(char *msg, int ndevs, dev_t *devs)
7620 {
7621 	int i;
7622 
7623 	cmn_err(CE_CONT, "%s:\n", msg);
7624 	for (i = 0; i < ndevs; i++) {
7625 		cmn_err(CE_CONT, "    0x%lx\n", devs[i]);
7626 	}
7627 }
7628 
7629 static void
7630 ddi_debug_devid_paths(char *msg, int npaths, char **paths)
7631 {
7632 	int i;
7633 
7634 	cmn_err(CE_CONT, "%s:\n", msg);
7635 	for (i = 0; i < npaths; i++) {
7636 		cmn_err(CE_CONT, "    %s\n", paths[i]);
7637 	}
7638 }
7639 
7640 static void
7641 ddi_debug_devid_devts_per_path(char *path, int ndevs, dev_t *devs)
7642 {
7643 	int i;
7644 
7645 	cmn_err(CE_CONT, "dev_ts per path %s\n", path);
7646 	for (i = 0; i < ndevs; i++) {
7647 		cmn_err(CE_CONT, "    0x%lx\n", devs[i]);
7648 	}
7649 }
7650 
7651 #endif	/* DEBUG */
7652 
7653 /*
7654  * Register device id into DDI framework.
7655  * Must be called when the driver is bound.
7656  */
7657 static int
7658 i_ddi_devid_register(dev_info_t *dip, ddi_devid_t devid)
7659 {
7660 	impl_devid_t	*i_devid = (impl_devid_t *)devid;
7661 	size_t		driver_len;
7662 	const char	*driver_name;
7663 	char		*devid_str;
7664 	major_t		major;
7665 
7666 	if ((dip == NULL) ||
7667 	    ((major = ddi_driver_major(dip)) == DDI_MAJOR_T_NONE))
7668 		return (DDI_FAILURE);
7669 
7670 	/* verify that the devid is valid */
7671 	if (ddi_devid_valid(devid) != DDI_SUCCESS)
7672 		return (DDI_FAILURE);
7673 
7674 	/* Updating driver name hint in devid */
7675 	driver_name = ddi_driver_name(dip);
7676 	driver_len = strlen(driver_name);
7677 	if (driver_len > DEVID_HINT_SIZE) {
7678 		/* Pick up last four characters of driver name */
7679 		driver_name += driver_len - DEVID_HINT_SIZE;
7680 		driver_len = DEVID_HINT_SIZE;
7681 	}
7682 	bzero(i_devid->did_driver, DEVID_HINT_SIZE);
7683 	bcopy(driver_name, i_devid->did_driver, driver_len);
7684 
7685 #ifdef DEBUG
7686 	/* Corrupt the devid for testing. */
7687 	if (devid_register_corrupt)
7688 		i_devid->did_id[0] += devid_register_corrupt;
7689 	if (devid_register_corrupt_major &&
7690 	    (major == devid_register_corrupt_major))
7691 		i_devid->did_id[0] += 1;
7692 	if (devid_register_corrupt_hint)
7693 		i_devid->did_driver[0] += devid_register_corrupt_hint;
7694 	if (devid_register_corrupt_hint_major &&
7695 	    (major == devid_register_corrupt_hint_major))
7696 		i_devid->did_driver[0] += 1;
7697 #endif /* DEBUG */
7698 
7699 	/* encode the devid as a string */
7700 	if ((devid_str = ddi_devid_str_encode(devid, NULL)) == NULL)
7701 		return (DDI_FAILURE);
7702 
7703 	/* add string as a string property */
7704 	if (ndi_prop_update_string(DDI_DEV_T_NONE, dip,
7705 	    DEVID_PROP_NAME, devid_str) != DDI_SUCCESS) {
7706 		cmn_err(CE_WARN, "%s%d: devid property update failed",
7707 		    ddi_driver_name(dip), ddi_get_instance(dip));
7708 		ddi_devid_str_free(devid_str);
7709 		return (DDI_FAILURE);
7710 	}
7711 
7712 	/* keep pointer to devid string for interrupt context fma code */
7713 	if (DEVI(dip)->devi_devid_str)
7714 		ddi_devid_str_free(DEVI(dip)->devi_devid_str);
7715 	DEVI(dip)->devi_devid_str = devid_str;
7716 	return (DDI_SUCCESS);
7717 }
7718 
7719 int
7720 ddi_devid_register(dev_info_t *dip, ddi_devid_t devid)
7721 {
7722 	int rval;
7723 
7724 	rval = i_ddi_devid_register(dip, devid);
7725 	if (rval == DDI_SUCCESS) {
7726 		/*
7727 		 * Register devid in devid-to-path cache
7728 		 */
7729 		if (e_devid_cache_register(dip, devid) == DDI_SUCCESS) {
7730 			mutex_enter(&DEVI(dip)->devi_lock);
7731 			DEVI(dip)->devi_flags |= DEVI_CACHED_DEVID;
7732 			mutex_exit(&DEVI(dip)->devi_lock);
7733 		} else if (ddi_get_name_addr(dip)) {
7734 			/*
7735 			 * We only expect cache_register DDI_FAILURE when we
7736 			 * can't form the full path because of NULL devi_addr.
7737 			 */
7738 			cmn_err(CE_WARN, "%s%d: failed to cache devid",
7739 			    ddi_driver_name(dip), ddi_get_instance(dip));
7740 		}
7741 	} else {
7742 		cmn_err(CE_WARN, "%s%d: failed to register devid",
7743 		    ddi_driver_name(dip), ddi_get_instance(dip));
7744 	}
7745 	return (rval);
7746 }
7747 
7748 /*
7749  * Remove (unregister) device id from DDI framework.
7750  * Must be called when device is detached.
7751  */
7752 static void
7753 i_ddi_devid_unregister(dev_info_t *dip)
7754 {
7755 	if (DEVI(dip)->devi_devid_str) {
7756 		ddi_devid_str_free(DEVI(dip)->devi_devid_str);
7757 		DEVI(dip)->devi_devid_str = NULL;
7758 	}
7759 
7760 	/* remove the devid property */
7761 	(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, DEVID_PROP_NAME);
7762 }
7763 
7764 void
7765 ddi_devid_unregister(dev_info_t *dip)
7766 {
7767 	mutex_enter(&DEVI(dip)->devi_lock);
7768 	DEVI(dip)->devi_flags &= ~DEVI_CACHED_DEVID;
7769 	mutex_exit(&DEVI(dip)->devi_lock);
7770 	e_devid_cache_unregister(dip);
7771 	i_ddi_devid_unregister(dip);
7772 }
7773 
7774 /*
7775  * Allocate and initialize a device id.
7776  */
7777 int
7778 ddi_devid_init(
7779 	dev_info_t	*dip,
7780 	ushort_t	devid_type,
7781 	ushort_t	nbytes,
7782 	void		*id,
7783 	ddi_devid_t	*ret_devid)
7784 {
7785 	impl_devid_t	*i_devid;
7786 	int		sz = sizeof (*i_devid) + nbytes - sizeof (char);
7787 	int		driver_len;
7788 	const char	*driver_name;
7789 
7790 	switch (devid_type) {
7791 	case DEVID_SCSI3_WWN:
7792 		/*FALLTHRU*/
7793 	case DEVID_SCSI_SERIAL:
7794 		/*FALLTHRU*/
7795 	case DEVID_ATA_SERIAL:
7796 		/*FALLTHRU*/
7797 	case DEVID_NVME_NSID:
7798 		/*FALLTHRU*/
7799 	case DEVID_NVME_EUI64:
7800 		/*FALLTHRU*/
7801 	case DEVID_NVME_NGUID:
7802 		/*FALLTHRU*/
7803 	case DEVID_ENCAP:
7804 		if (nbytes == 0)
7805 			return (DDI_FAILURE);
7806 		if (id == NULL)
7807 			return (DDI_FAILURE);
7808 		break;
7809 	case DEVID_FAB:
7810 		if (nbytes != 0)
7811 			return (DDI_FAILURE);
7812 		if (id != NULL)
7813 			return (DDI_FAILURE);
7814 		nbytes = sizeof (int) +
7815 		    sizeof (struct timeval32) + sizeof (short);
7816 		sz += nbytes;
7817 		break;
7818 	default:
7819 		return (DDI_FAILURE);
7820 	}
7821 
7822 	if ((i_devid = kmem_zalloc(sz, KM_SLEEP)) == NULL)
7823 		return (DDI_FAILURE);
7824 
7825 	i_devid->did_magic_hi = DEVID_MAGIC_MSB;
7826 	i_devid->did_magic_lo = DEVID_MAGIC_LSB;
7827 	i_devid->did_rev_hi = DEVID_REV_MSB;
7828 	i_devid->did_rev_lo = DEVID_REV_LSB;
7829 	DEVID_FORMTYPE(i_devid, devid_type);
7830 	DEVID_FORMLEN(i_devid, nbytes);
7831 
7832 	/* Fill in driver name hint */
7833 	driver_name = ddi_driver_name(dip);
7834 	driver_len = strlen(driver_name);
7835 	if (driver_len > DEVID_HINT_SIZE) {
7836 		/* Pick up last four characters of driver name */
7837 		driver_name += driver_len - DEVID_HINT_SIZE;
7838 		driver_len = DEVID_HINT_SIZE;
7839 	}
7840 
7841 	bcopy(driver_name, i_devid->did_driver, driver_len);
7842 
7843 	/* Fill in id field */
7844 	if (devid_type == DEVID_FAB) {
7845 		char		*cp;
7846 		uint32_t	hostid;
7847 		struct timeval32 timestamp32;
7848 		int		i;
7849 		int		*ip;
7850 		short		gen;
7851 
7852 		/* increase the generation number */
7853 		mutex_enter(&devid_gen_mutex);
7854 		gen = devid_gen_number++;
7855 		mutex_exit(&devid_gen_mutex);
7856 
7857 		cp = i_devid->did_id;
7858 
7859 		/* Fill in host id (big-endian byte ordering) */
7860 		hostid = zone_get_hostid(NULL);
7861 		*cp++ = hibyte(hiword(hostid));
7862 		*cp++ = lobyte(hiword(hostid));
7863 		*cp++ = hibyte(loword(hostid));
7864 		*cp++ = lobyte(loword(hostid));
7865 
7866 		/*
7867 		 * Fill in timestamp (big-endian byte ordering)
7868 		 *
7869 		 * (Note that the format may have to be changed
7870 		 * before 2038 comes around, though it's arguably
7871 		 * unique enough as it is..)
7872 		 */
7873 		uniqtime32(&timestamp32);
7874 		ip = (int *)&timestamp32;
7875 		for (i = 0;
7876 		    i < sizeof (timestamp32) / sizeof (int); i++, ip++) {
7877 			int	val;
7878 			val = *ip;
7879 			*cp++ = hibyte(hiword(val));
7880 			*cp++ = lobyte(hiword(val));
7881 			*cp++ = hibyte(loword(val));
7882 			*cp++ = lobyte(loword(val));
7883 		}
7884 
7885 		/* fill in the generation number */
7886 		*cp++ = hibyte(gen);
7887 		*cp++ = lobyte(gen);
7888 	} else
7889 		bcopy(id, i_devid->did_id, nbytes);
7890 
7891 	/* return device id */
7892 	*ret_devid = (ddi_devid_t)i_devid;
7893 	return (DDI_SUCCESS);
7894 }
7895 
7896 int
7897 ddi_devid_get(dev_info_t *dip, ddi_devid_t *ret_devid)
7898 {
7899 	return (i_ddi_devi_get_devid(DDI_DEV_T_ANY, dip, ret_devid));
7900 }
7901 
7902 int
7903 i_ddi_devi_get_devid(dev_t dev, dev_info_t *dip, ddi_devid_t *ret_devid)
7904 {
7905 	char		*devidstr;
7906 
7907 	ASSERT(dev != DDI_DEV_T_NONE);
7908 
7909 	/* look up the property, devt specific first */
7910 	if (ddi_prop_lookup_string(dev, dip, DDI_PROP_DONTPASS,
7911 	    DEVID_PROP_NAME, &devidstr) != DDI_PROP_SUCCESS) {
7912 		if ((dev == DDI_DEV_T_ANY) ||
7913 		    (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
7914 		    DDI_PROP_DONTPASS, DEVID_PROP_NAME, &devidstr) !=
7915 		    DDI_PROP_SUCCESS)) {
7916 			return (DDI_FAILURE);
7917 		}
7918 	}
7919 
7920 	/* convert to binary form */
7921 	if (ddi_devid_str_decode(devidstr, ret_devid, NULL) == -1) {
7922 		ddi_prop_free(devidstr);
7923 		return (DDI_FAILURE);
7924 	}
7925 	ddi_prop_free(devidstr);
7926 	return (DDI_SUCCESS);
7927 }
7928 
7929 /*
7930  * Return a copy of the device id for dev_t
7931  */
7932 int
7933 ddi_lyr_get_devid(dev_t dev, ddi_devid_t *ret_devid)
7934 {
7935 	dev_info_t	*dip;
7936 	int		rval;
7937 
7938 	/* get the dip */
7939 	if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
7940 		return (DDI_FAILURE);
7941 
7942 	rval = i_ddi_devi_get_devid(dev, dip, ret_devid);
7943 
7944 	ddi_release_devi(dip);		/* e_ddi_hold_devi_by_dev() */
7945 	return (rval);
7946 }
7947 
7948 /*
7949  * Return a copy of the minor name for dev_t and spec_type
7950  */
7951 int
7952 ddi_lyr_get_minor_name(dev_t dev, int spec_type, char **minor_name)
7953 {
7954 	char		*buf;
7955 	int		circ;
7956 	dev_info_t	*dip;
7957 	char		*nm;
7958 	int		rval;
7959 
7960 	if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) {
7961 		*minor_name = NULL;
7962 		return (DDI_FAILURE);
7963 	}
7964 
7965 	/* Find the minor name and copy into max size buf */
7966 	buf = kmem_alloc(MAXNAMELEN, KM_SLEEP);
7967 	ndi_devi_enter(dip, &circ);
7968 	nm = i_ddi_devtspectype_to_minorname(dip, dev, spec_type);
7969 	if (nm)
7970 		(void) strcpy(buf, nm);
7971 	ndi_devi_exit(dip, circ);
7972 	ddi_release_devi(dip);	/* e_ddi_hold_devi_by_dev() */
7973 
7974 	if (nm) {
7975 		/* duplicate into min size buf for return result */
7976 		*minor_name = i_ddi_strdup(buf, KM_SLEEP);
7977 		rval = DDI_SUCCESS;
7978 	} else {
7979 		*minor_name = NULL;
7980 		rval = DDI_FAILURE;
7981 	}
7982 
7983 	/* free max size buf and return */
7984 	kmem_free(buf, MAXNAMELEN);
7985 	return (rval);
7986 }
7987 
7988 int
7989 ddi_lyr_devid_to_devlist(
7990 	ddi_devid_t	devid,
7991 	char		*minor_name,
7992 	int		*retndevs,
7993 	dev_t		**retdevs)
7994 {
7995 	ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS);
7996 
7997 	if (e_devid_cache_to_devt_list(devid, minor_name,
7998 	    retndevs, retdevs) == DDI_SUCCESS) {
7999 		ASSERT(*retndevs > 0);
8000 		DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist",
8001 		    *retndevs, *retdevs);
8002 		return (DDI_SUCCESS);
8003 	}
8004 
8005 	if (e_ddi_devid_discovery(devid) == DDI_FAILURE) {
8006 		return (DDI_FAILURE);
8007 	}
8008 
8009 	if (e_devid_cache_to_devt_list(devid, minor_name,
8010 	    retndevs, retdevs) == DDI_SUCCESS) {
8011 		ASSERT(*retndevs > 0);
8012 		DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist",
8013 		    *retndevs, *retdevs);
8014 		return (DDI_SUCCESS);
8015 	}
8016 
8017 	return (DDI_FAILURE);
8018 }
8019 
8020 void
8021 ddi_lyr_free_devlist(dev_t *devlist, int ndevs)
8022 {
8023 	kmem_free(devlist, sizeof (dev_t) * ndevs);
8024 }
8025 
8026 /*
8027  * Note: This will need to be fixed if we ever allow processes to
8028  * have more than one data model per exec.
8029  */
8030 model_t
8031 ddi_mmap_get_model(void)
8032 {
8033 	return (get_udatamodel());
8034 }
8035 
8036 model_t
8037 ddi_model_convert_from(model_t model)
8038 {
8039 	return ((model & DDI_MODEL_MASK) & ~DDI_MODEL_NATIVE);
8040 }
8041 
8042 /*
8043  * ddi interfaces managing storage and retrieval of eventcookies.
8044  */
8045 
8046 /*
8047  * Invoke bus nexus driver's implementation of the
8048  * (*bus_remove_eventcall)() interface to remove a registered
8049  * callback handler for "event".
8050  */
8051 int
8052 ddi_remove_event_handler(ddi_callback_id_t id)
8053 {
8054 	ndi_event_callbacks_t *cb = (ndi_event_callbacks_t *)id;
8055 	dev_info_t *ddip;
8056 
8057 	ASSERT(cb);
8058 	if (!cb) {
8059 		return (DDI_FAILURE);
8060 	}
8061 
8062 	ddip = NDI_EVENT_DDIP(cb->ndi_evtcb_cookie);
8063 	return (ndi_busop_remove_eventcall(ddip, id));
8064 }
8065 
8066 /*
8067  * Invoke bus nexus driver's implementation of the
8068  * (*bus_add_eventcall)() interface to register a callback handler
8069  * for "event".
8070  */
8071 int
8072 ddi_add_event_handler(dev_info_t *dip, ddi_eventcookie_t event,
8073     void (*handler)(dev_info_t *, ddi_eventcookie_t, void *, void *),
8074     void *arg, ddi_callback_id_t *id)
8075 {
8076 	return (ndi_busop_add_eventcall(dip, dip, event, handler, arg, id));
8077 }
8078 
8079 
8080 /*
8081  * Return a handle for event "name" by calling up the device tree
8082  * hierarchy via  (*bus_get_eventcookie)() interface until claimed
8083  * by a bus nexus or top of dev_info tree is reached.
8084  */
8085 int
8086 ddi_get_eventcookie(dev_info_t *dip, char *name,
8087     ddi_eventcookie_t *event_cookiep)
8088 {
8089 	return (ndi_busop_get_eventcookie(dip, dip,
8090 	    name, event_cookiep));
8091 }
8092 
8093 /*
8094  * This procedure is provided as the general callback function when
8095  * umem_lockmemory calls as_add_callback for long term memory locking.
8096  * When as_unmap, as_setprot, or as_free encounter segments which have
8097  * locked memory, this callback will be invoked.
8098  */
8099 void
8100 umem_lock_undo(struct as *as, void *arg, uint_t event)
8101 {
8102 	_NOTE(ARGUNUSED(as, event))
8103 	struct ddi_umem_cookie *cp = (struct ddi_umem_cookie *)arg;
8104 
8105 	/*
8106 	 * Call the cleanup function.  Decrement the cookie reference
8107 	 * count, if it goes to zero, return the memory for the cookie.
8108 	 * The i_ddi_umem_unlock for this cookie may or may not have been
8109 	 * called already.  It is the responsibility of the caller of
8110 	 * umem_lockmemory to handle the case of the cleanup routine
8111 	 * being called after a ddi_umem_unlock for the cookie
8112 	 * was called.
8113 	 */
8114 
8115 	(*cp->callbacks.cbo_umem_lock_cleanup)((ddi_umem_cookie_t)cp);
8116 
8117 	/* remove the cookie if reference goes to zero */
8118 	if (atomic_dec_ulong_nv((ulong_t *)(&(cp->cook_refcnt))) == 0) {
8119 		kmem_free(cp, sizeof (struct ddi_umem_cookie));
8120 	}
8121 }
8122 
8123 /*
8124  * The following two Consolidation Private routines provide generic
8125  * interfaces to increase/decrease the amount of device-locked memory.
8126  *
8127  * To keep project_rele and project_hold consistent, i_ddi_decr_locked_memory()
8128  * must be called every time i_ddi_incr_locked_memory() is called.
8129  */
8130 int
8131 /* ARGSUSED */
8132 i_ddi_incr_locked_memory(proc_t *procp, rctl_qty_t inc)
8133 {
8134 	ASSERT(procp != NULL);
8135 	mutex_enter(&procp->p_lock);
8136 	if (rctl_incr_locked_mem(procp, NULL, inc, 1)) {
8137 		mutex_exit(&procp->p_lock);
8138 		return (ENOMEM);
8139 	}
8140 	mutex_exit(&procp->p_lock);
8141 	return (0);
8142 }
8143 
8144 /*
8145  * To keep project_rele and project_hold consistent, i_ddi_incr_locked_memory()
8146  * must be called every time i_ddi_decr_locked_memory() is called.
8147  */
8148 /* ARGSUSED */
8149 void
8150 i_ddi_decr_locked_memory(proc_t *procp, rctl_qty_t dec)
8151 {
8152 	ASSERT(procp != NULL);
8153 	mutex_enter(&procp->p_lock);
8154 	rctl_decr_locked_mem(procp, NULL, dec, 1);
8155 	mutex_exit(&procp->p_lock);
8156 }
8157 
8158 /*
8159  * The cookie->upd_max_lock_rctl flag is used to determine if we should
8160  * charge device locked memory to the max-locked-memory rctl.  Tracking
8161  * device locked memory causes the rctl locks to get hot under high-speed
8162  * I/O such as RDSv3 over IB.  If there is no max-locked-memory rctl limit,
8163  * we bypass charging the locked memory to the rctl altogether.  The cookie's
8164  * flag tells us if the rctl value should be updated when unlocking the memory,
8165  * in case the rctl gets changed after the memory was locked.  Any device
8166  * locked memory in that rare case will not be counted toward the rctl limit.
8167  *
8168  * When tracking the locked memory, the kproject_t parameter is always NULL
8169  * in the code paths:
8170  *	i_ddi_incr_locked_memory -> rctl_incr_locked_mem
8171  *	i_ddi_decr_locked_memory -> rctl_decr_locked_mem
8172  * Thus, we always use the tk_proj member to check the projp setting.
8173  */
8174 static void
8175 init_lockedmem_rctl_flag(struct ddi_umem_cookie *cookie)
8176 {
8177 	proc_t		*p;
8178 	kproject_t	*projp;
8179 	zone_t		*zonep;
8180 
8181 	ASSERT(cookie);
8182 	p = cookie->procp;
8183 	ASSERT(p);
8184 
8185 	zonep = p->p_zone;
8186 	projp = p->p_task->tk_proj;
8187 
8188 	ASSERT(zonep);
8189 	ASSERT(projp);
8190 
8191 	if (zonep->zone_locked_mem_ctl == UINT64_MAX &&
8192 	    projp->kpj_data.kpd_locked_mem_ctl == UINT64_MAX)
8193 		cookie->upd_max_lock_rctl = 0;
8194 	else
8195 		cookie->upd_max_lock_rctl = 1;
8196 }
8197 
8198 /*
8199  * This routine checks if the max-locked-memory resource ctl is
8200  * exceeded, if not increments it, grabs a hold on the project.
8201  * Returns 0 if successful otherwise returns error code
8202  */
8203 static int
8204 umem_incr_devlockmem(struct ddi_umem_cookie *cookie)
8205 {
8206 	proc_t		*procp;
8207 	int		ret;
8208 
8209 	ASSERT(cookie);
8210 	if (cookie->upd_max_lock_rctl == 0)
8211 		return (0);
8212 
8213 	procp = cookie->procp;
8214 	ASSERT(procp);
8215 
8216 	if ((ret = i_ddi_incr_locked_memory(procp,
8217 	    cookie->size)) != 0) {
8218 		return (ret);
8219 	}
8220 	return (0);
8221 }
8222 
8223 /*
8224  * Decrements the max-locked-memory resource ctl and releases
8225  * the hold on the project that was acquired during umem_incr_devlockmem
8226  */
8227 static void
8228 umem_decr_devlockmem(struct ddi_umem_cookie *cookie)
8229 {
8230 	proc_t		*proc;
8231 
8232 	if (cookie->upd_max_lock_rctl == 0)
8233 		return;
8234 
8235 	proc = (proc_t *)cookie->procp;
8236 	if (!proc)
8237 		return;
8238 
8239 	i_ddi_decr_locked_memory(proc, cookie->size);
8240 }
8241 
8242 /*
8243  * A consolidation private function which is essentially equivalent to
8244  * ddi_umem_lock but with the addition of arguments ops_vector and procp.
8245  * A call to as_add_callback is done if DDI_UMEMLOCK_LONGTERM is set, and
8246  * the ops_vector is valid.
8247  *
8248  * Lock the virtual address range in the current process and create a
8249  * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to
8250  * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export
8251  * to user space.
8252  *
8253  * Note: The resource control accounting currently uses a full charge model
8254  * in other words attempts to lock the same/overlapping areas of memory
8255  * will deduct the full size of the buffer from the projects running
8256  * counter for the device locked memory.
8257  *
8258  * addr, size should be PAGESIZE aligned
8259  *
8260  * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both
8261  *	identifies whether the locked memory will be read or written or both
8262  *      DDI_UMEMLOCK_LONGTERM  must be set when the locking will
8263  * be maintained for an indefinitely long period (essentially permanent),
8264  * rather than for what would be required for a typical I/O completion.
8265  * When DDI_UMEMLOCK_LONGTERM is set, umem_lockmemory will return EFAULT
8266  * if the memory pertains to a regular file which is mapped MAP_SHARED.
8267  * This is to prevent a deadlock if a file truncation is attempted after
8268  * after the locking is done.
8269  *
8270  * Returns 0 on success
8271  *	EINVAL - for invalid parameters
8272  *	EPERM, ENOMEM and other error codes returned by as_pagelock
8273  *	ENOMEM - is returned if the current request to lock memory exceeds
8274  *		*.max-locked-memory resource control value.
8275  *      EFAULT - memory pertains to a regular file mapped shared and
8276  *		and DDI_UMEMLOCK_LONGTERM flag is set
8277  *	EAGAIN - could not start the ddi_umem_unlock list processing thread
8278  */
8279 int
8280 umem_lockmemory(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie,
8281     struct umem_callback_ops *ops_vector,
8282     proc_t *procp)
8283 {
8284 	int	error;
8285 	struct ddi_umem_cookie *p;
8286 	void	(*driver_callback)() = NULL;
8287 	struct as *as;
8288 	struct seg		*seg;
8289 	vnode_t			*vp;
8290 
8291 	/* Allow device drivers to not have to reference "curproc" */
8292 	if (procp == NULL)
8293 		procp = curproc;
8294 	as = procp->p_as;
8295 	*cookie = NULL;		/* in case of any error return */
8296 
8297 	/* These are the only three valid flags */
8298 	if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE |
8299 	    DDI_UMEMLOCK_LONGTERM)) != 0)
8300 		return (EINVAL);
8301 
8302 	/* At least one (can be both) of the two access flags must be set */
8303 	if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0)
8304 		return (EINVAL);
8305 
8306 	/* addr and len must be page-aligned */
8307 	if (((uintptr_t)addr & PAGEOFFSET) != 0)
8308 		return (EINVAL);
8309 
8310 	if ((len & PAGEOFFSET) != 0)
8311 		return (EINVAL);
8312 
8313 	/*
8314 	 * For longterm locking a driver callback must be specified; if
8315 	 * not longterm then a callback is optional.
8316 	 */
8317 	if (ops_vector != NULL) {
8318 		if (ops_vector->cbo_umem_callback_version !=
8319 		    UMEM_CALLBACK_VERSION)
8320 			return (EINVAL);
8321 		else
8322 			driver_callback = ops_vector->cbo_umem_lock_cleanup;
8323 	}
8324 	if ((driver_callback == NULL) && (flags & DDI_UMEMLOCK_LONGTERM))
8325 		return (EINVAL);
8326 
8327 	/*
8328 	 * Call i_ddi_umem_unlock_thread_start if necessary.  It will
8329 	 * be called on first ddi_umem_lock or umem_lockmemory call.
8330 	 */
8331 	if (ddi_umem_unlock_thread == NULL)
8332 		i_ddi_umem_unlock_thread_start();
8333 
8334 	/* Allocate memory for the cookie */
8335 	p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP);
8336 
8337 	/* Convert the flags to seg_rw type */
8338 	if (flags & DDI_UMEMLOCK_WRITE) {
8339 		p->s_flags = S_WRITE;
8340 	} else {
8341 		p->s_flags = S_READ;
8342 	}
8343 
8344 	/* Store procp in cookie for later iosetup/unlock */
8345 	p->procp = (void *)procp;
8346 
8347 	/*
8348 	 * Store the struct as pointer in cookie for later use by
8349 	 * ddi_umem_unlock.  The proc->p_as will be stale if ddi_umem_unlock
8350 	 * is called after relvm is called.
8351 	 */
8352 	p->asp = as;
8353 
8354 	/*
8355 	 * The size field is needed for lockmem accounting.
8356 	 */
8357 	p->size = len;
8358 	init_lockedmem_rctl_flag(p);
8359 
8360 	if (umem_incr_devlockmem(p) != 0) {
8361 		/*
8362 		 * The requested memory cannot be locked
8363 		 */
8364 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8365 		*cookie = (ddi_umem_cookie_t)NULL;
8366 		return (ENOMEM);
8367 	}
8368 
8369 	/* Lock the pages corresponding to addr, len in memory */
8370 	error = as_pagelock(as, &(p->pparray), addr, len, p->s_flags);
8371 	if (error != 0) {
8372 		umem_decr_devlockmem(p);
8373 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8374 		*cookie = (ddi_umem_cookie_t)NULL;
8375 		return (error);
8376 	}
8377 
8378 	/*
8379 	 * For longterm locking the addr must pertain to a seg_vn segment or
8380 	 * or a seg_spt segment.
8381 	 * If the segment pertains to a regular file, it cannot be
8382 	 * mapped MAP_SHARED.
8383 	 * This is to prevent a deadlock if a file truncation is attempted
8384 	 * after the locking is done.
8385 	 * Doing this after as_pagelock guarantees persistence of the as; if
8386 	 * an unacceptable segment is found, the cleanup includes calling
8387 	 * as_pageunlock before returning EFAULT.
8388 	 *
8389 	 * segdev is allowed here as it is already locked.  This allows
8390 	 * for memory exported by drivers through mmap() (which is already
8391 	 * locked) to be allowed for LONGTERM.
8392 	 */
8393 	if (flags & DDI_UMEMLOCK_LONGTERM) {
8394 		extern  struct seg_ops segspt_shmops;
8395 		extern	struct seg_ops segdev_ops;
8396 		AS_LOCK_ENTER(as, RW_READER);
8397 		for (seg = as_segat(as, addr); ; seg = AS_SEGNEXT(as, seg)) {
8398 			if (seg == NULL || seg->s_base > addr + len)
8399 				break;
8400 			if (seg->s_ops == &segdev_ops)
8401 				continue;
8402 			if (((seg->s_ops != &segvn_ops) &&
8403 			    (seg->s_ops != &segspt_shmops)) ||
8404 			    ((SEGOP_GETVP(seg, addr, &vp) == 0 &&
8405 			    vp != NULL && vp->v_type == VREG) &&
8406 			    (SEGOP_GETTYPE(seg, addr) & MAP_SHARED))) {
8407 				as_pageunlock(as, p->pparray,
8408 				    addr, len, p->s_flags);
8409 				AS_LOCK_EXIT(as);
8410 				umem_decr_devlockmem(p);
8411 				kmem_free(p, sizeof (struct ddi_umem_cookie));
8412 				*cookie = (ddi_umem_cookie_t)NULL;
8413 				return (EFAULT);
8414 			}
8415 		}
8416 		AS_LOCK_EXIT(as);
8417 	}
8418 
8419 
8420 	/* Initialize the fields in the ddi_umem_cookie */
8421 	p->cvaddr = addr;
8422 	p->type = UMEM_LOCKED;
8423 	if (driver_callback != NULL) {
8424 		/* i_ddi_umem_unlock and umem_lock_undo may need the cookie */
8425 		p->cook_refcnt = 2;
8426 		p->callbacks = *ops_vector;
8427 	} else {
8428 		/* only i_ddi_umme_unlock needs the cookie */
8429 		p->cook_refcnt = 1;
8430 	}
8431 
8432 	*cookie = (ddi_umem_cookie_t)p;
8433 
8434 	/*
8435 	 * If a driver callback was specified, add an entry to the
8436 	 * as struct callback list. The as_pagelock above guarantees
8437 	 * the persistence of as.
8438 	 */
8439 	if (driver_callback) {
8440 		error = as_add_callback(as, umem_lock_undo, p, AS_ALL_EVENT,
8441 		    addr, len, KM_SLEEP);
8442 		if (error != 0) {
8443 			as_pageunlock(as, p->pparray,
8444 			    addr, len, p->s_flags);
8445 			umem_decr_devlockmem(p);
8446 			kmem_free(p, sizeof (struct ddi_umem_cookie));
8447 			*cookie = (ddi_umem_cookie_t)NULL;
8448 		}
8449 	}
8450 	return (error);
8451 }
8452 
8453 /*
8454  * Unlock the pages locked by ddi_umem_lock or umem_lockmemory and free
8455  * the cookie.  Called from i_ddi_umem_unlock_thread.
8456  */
8457 
8458 static void
8459 i_ddi_umem_unlock(struct ddi_umem_cookie *p)
8460 {
8461 	uint_t	rc;
8462 
8463 	/*
8464 	 * There is no way to determine whether a callback to
8465 	 * umem_lock_undo was registered via as_add_callback.
8466 	 * (i.e. umem_lockmemory was called with DDI_MEMLOCK_LONGTERM and
8467 	 * a valid callback function structure.)  as_delete_callback
8468 	 * is called to delete a possible registered callback.  If the
8469 	 * return from as_delete_callbacks is AS_CALLBACK_DELETED, it
8470 	 * indicates that there was a callback registered, and that is was
8471 	 * successfully deleted.  Thus, the cookie reference count
8472 	 * will never be decremented by umem_lock_undo.  Just return the
8473 	 * memory for the cookie, since both users of the cookie are done.
8474 	 * A return of AS_CALLBACK_NOTFOUND indicates a callback was
8475 	 * never registered.  A return of AS_CALLBACK_DELETE_DEFERRED
8476 	 * indicates that callback processing is taking place and, and
8477 	 * umem_lock_undo is, or will be, executing, and thus decrementing
8478 	 * the cookie reference count when it is complete.
8479 	 *
8480 	 * This needs to be done before as_pageunlock so that the
8481 	 * persistence of as is guaranteed because of the locked pages.
8482 	 *
8483 	 */
8484 	rc = as_delete_callback(p->asp, p);
8485 
8486 
8487 	/*
8488 	 * The proc->p_as will be stale if i_ddi_umem_unlock is called
8489 	 * after relvm is called so use p->asp.
8490 	 */
8491 	as_pageunlock(p->asp, p->pparray, p->cvaddr, p->size, p->s_flags);
8492 
8493 	/*
8494 	 * Now that we have unlocked the memory decrement the
8495 	 * *.max-locked-memory rctl
8496 	 */
8497 	umem_decr_devlockmem(p);
8498 
8499 	if (rc == AS_CALLBACK_DELETED) {
8500 		/* umem_lock_undo will not happen, return the cookie memory */
8501 		ASSERT(p->cook_refcnt == 2);
8502 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8503 	} else {
8504 		/*
8505 		 * umem_undo_lock may happen if as_delete_callback returned
8506 		 * AS_CALLBACK_DELETE_DEFERRED.  In that case, decrement the
8507 		 * reference count, atomically, and return the cookie
8508 		 * memory if the reference count goes to zero.  The only
8509 		 * other value for rc is AS_CALLBACK_NOTFOUND.  In that
8510 		 * case, just return the cookie memory.
8511 		 */
8512 		if ((rc != AS_CALLBACK_DELETE_DEFERRED) ||
8513 		    (atomic_dec_ulong_nv((ulong_t *)(&(p->cook_refcnt)))
8514 		    == 0)) {
8515 			kmem_free(p, sizeof (struct ddi_umem_cookie));
8516 		}
8517 	}
8518 }
8519 
8520 /*
8521  * i_ddi_umem_unlock_thread - deferred ddi_umem_unlock list handler.
8522  *
8523  * Call i_ddi_umem_unlock for entries in the ddi_umem_unlock list
8524  * until it is empty.  Then, wait for more to be added.  This thread is awoken
8525  * via calls to ddi_umem_unlock.
8526  */
8527 
8528 static void
8529 i_ddi_umem_unlock_thread(void)
8530 {
8531 	struct ddi_umem_cookie	*ret_cookie;
8532 	callb_cpr_t	cprinfo;
8533 
8534 	/* process the ddi_umem_unlock list */
8535 	CALLB_CPR_INIT(&cprinfo, &ddi_umem_unlock_mutex,
8536 	    callb_generic_cpr, "unlock_thread");
8537 	for (;;) {
8538 		mutex_enter(&ddi_umem_unlock_mutex);
8539 		if (ddi_umem_unlock_head != NULL) {	/* list not empty */
8540 			ret_cookie = ddi_umem_unlock_head;
8541 			/* take if off the list */
8542 			if ((ddi_umem_unlock_head =
8543 			    ddi_umem_unlock_head->unl_forw) == NULL) {
8544 				ddi_umem_unlock_tail = NULL;
8545 			}
8546 			mutex_exit(&ddi_umem_unlock_mutex);
8547 			/* unlock the pages in this cookie */
8548 			(void) i_ddi_umem_unlock(ret_cookie);
8549 		} else {   /* list is empty, wait for next ddi_umem_unlock */
8550 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
8551 			cv_wait(&ddi_umem_unlock_cv, &ddi_umem_unlock_mutex);
8552 			CALLB_CPR_SAFE_END(&cprinfo, &ddi_umem_unlock_mutex);
8553 			mutex_exit(&ddi_umem_unlock_mutex);
8554 		}
8555 	}
8556 	/* ddi_umem_unlock_thread does not exit */
8557 	/* NOTREACHED */
8558 }
8559 
8560 /*
8561  * Start the thread that will process the ddi_umem_unlock list if it is
8562  * not already started (i_ddi_umem_unlock_thread).
8563  */
8564 static void
8565 i_ddi_umem_unlock_thread_start(void)
8566 {
8567 	mutex_enter(&ddi_umem_unlock_mutex);
8568 	if (ddi_umem_unlock_thread == NULL) {
8569 		ddi_umem_unlock_thread = thread_create(NULL, 0,
8570 		    i_ddi_umem_unlock_thread, NULL, 0, &p0,
8571 		    TS_RUN, minclsyspri);
8572 	}
8573 	mutex_exit(&ddi_umem_unlock_mutex);
8574 }
8575 
8576 /*
8577  * Lock the virtual address range in the current process and create a
8578  * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to
8579  * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export
8580  * to user space.
8581  *
8582  * Note: The resource control accounting currently uses a full charge model
8583  * in other words attempts to lock the same/overlapping areas of memory
8584  * will deduct the full size of the buffer from the projects running
8585  * counter for the device locked memory. This applies to umem_lockmemory too.
8586  *
8587  * addr, size should be PAGESIZE aligned
8588  * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both
8589  *	identifies whether the locked memory will be read or written or both
8590  *
8591  * Returns 0 on success
8592  *	EINVAL - for invalid parameters
8593  *	EPERM, ENOMEM and other error codes returned by as_pagelock
8594  *	ENOMEM - is returned if the current request to lock memory exceeds
8595  *		*.max-locked-memory resource control value.
8596  *	EAGAIN - could not start the ddi_umem_unlock list processing thread
8597  */
8598 int
8599 ddi_umem_lock(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie)
8600 {
8601 	int	error;
8602 	struct ddi_umem_cookie *p;
8603 
8604 	*cookie = NULL;		/* in case of any error return */
8605 
8606 	/* These are the only two valid flags */
8607 	if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) != 0) {
8608 		return (EINVAL);
8609 	}
8610 
8611 	/* At least one of the two flags (or both) must be set */
8612 	if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0) {
8613 		return (EINVAL);
8614 	}
8615 
8616 	/* addr and len must be page-aligned */
8617 	if (((uintptr_t)addr & PAGEOFFSET) != 0) {
8618 		return (EINVAL);
8619 	}
8620 
8621 	if ((len & PAGEOFFSET) != 0) {
8622 		return (EINVAL);
8623 	}
8624 
8625 	/*
8626 	 * Call i_ddi_umem_unlock_thread_start if necessary.  It will
8627 	 * be called on first ddi_umem_lock or umem_lockmemory call.
8628 	 */
8629 	if (ddi_umem_unlock_thread == NULL)
8630 		i_ddi_umem_unlock_thread_start();
8631 
8632 	/* Allocate memory for the cookie */
8633 	p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP);
8634 
8635 	/* Convert the flags to seg_rw type */
8636 	if (flags & DDI_UMEMLOCK_WRITE) {
8637 		p->s_flags = S_WRITE;
8638 	} else {
8639 		p->s_flags = S_READ;
8640 	}
8641 
8642 	/* Store curproc in cookie for later iosetup/unlock */
8643 	p->procp = (void *)curproc;
8644 
8645 	/*
8646 	 * Store the struct as pointer in cookie for later use by
8647 	 * ddi_umem_unlock.  The proc->p_as will be stale if ddi_umem_unlock
8648 	 * is called after relvm is called.
8649 	 */
8650 	p->asp = curproc->p_as;
8651 	/*
8652 	 * The size field is needed for lockmem accounting.
8653 	 */
8654 	p->size = len;
8655 	init_lockedmem_rctl_flag(p);
8656 
8657 	if (umem_incr_devlockmem(p) != 0) {
8658 		/*
8659 		 * The requested memory cannot be locked
8660 		 */
8661 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8662 		*cookie = (ddi_umem_cookie_t)NULL;
8663 		return (ENOMEM);
8664 	}
8665 
8666 	/* Lock the pages corresponding to addr, len in memory */
8667 	error = as_pagelock(((proc_t *)p->procp)->p_as, &(p->pparray),
8668 	    addr, len, p->s_flags);
8669 	if (error != 0) {
8670 		umem_decr_devlockmem(p);
8671 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8672 		*cookie = (ddi_umem_cookie_t)NULL;
8673 		return (error);
8674 	}
8675 
8676 	/* Initialize the fields in the ddi_umem_cookie */
8677 	p->cvaddr = addr;
8678 	p->type = UMEM_LOCKED;
8679 	p->cook_refcnt = 1;
8680 
8681 	*cookie = (ddi_umem_cookie_t)p;
8682 	return (error);
8683 }
8684 
8685 /*
8686  * Add the cookie to the ddi_umem_unlock list.  Pages will be
8687  * unlocked by i_ddi_umem_unlock_thread.
8688  */
8689 
8690 void
8691 ddi_umem_unlock(ddi_umem_cookie_t cookie)
8692 {
8693 	struct ddi_umem_cookie	*p = (struct ddi_umem_cookie *)cookie;
8694 
8695 	ASSERT(p->type == UMEM_LOCKED);
8696 	ASSERT(CPU_ON_INTR(CPU) == 0); /* cannot be high level */
8697 	ASSERT(ddi_umem_unlock_thread != NULL);
8698 
8699 	p->unl_forw = (struct ddi_umem_cookie *)NULL;	/* end of list */
8700 	/*
8701 	 * Queue the unlock request and notify i_ddi_umem_unlock thread
8702 	 * if it's called in the interrupt context. Otherwise, unlock pages
8703 	 * immediately.
8704 	 */
8705 	if (servicing_interrupt()) {
8706 		/* queue the unlock request and notify the thread */
8707 		mutex_enter(&ddi_umem_unlock_mutex);
8708 		if (ddi_umem_unlock_head == NULL) {
8709 			ddi_umem_unlock_head = ddi_umem_unlock_tail = p;
8710 			cv_broadcast(&ddi_umem_unlock_cv);
8711 		} else {
8712 			ddi_umem_unlock_tail->unl_forw = p;
8713 			ddi_umem_unlock_tail = p;
8714 		}
8715 		mutex_exit(&ddi_umem_unlock_mutex);
8716 	} else {
8717 		/* unlock the pages right away */
8718 		(void) i_ddi_umem_unlock(p);
8719 	}
8720 }
8721 
8722 /*
8723  * Create a buf structure from a ddi_umem_cookie
8724  * cookie - is a ddi_umem_cookie for from ddi_umem_lock and ddi_umem_alloc
8725  *		(only UMEM_LOCKED & KMEM_NON_PAGEABLE types supported)
8726  * off, len - identifies the portion of the memory represented by the cookie
8727  *		that the buf points to.
8728  *	NOTE: off, len need to follow the alignment/size restrictions of the
8729  *		device (dev) that this buf will be passed to. Some devices
8730  *		will accept unrestricted alignment/size, whereas others (such as
8731  *		st) require some block-size alignment/size. It is the caller's
8732  *		responsibility to ensure that the alignment/size restrictions
8733  *		are met (we cannot assert as we do not know the restrictions)
8734  *
8735  * direction - is one of B_READ or B_WRITE and needs to be compatible with
8736  *		the flags used in ddi_umem_lock
8737  *
8738  * The following three arguments are used to initialize fields in the
8739  * buf structure and are uninterpreted by this routine.
8740  *
8741  * dev
8742  * blkno
8743  * iodone
8744  *
8745  * sleepflag - is one of DDI_UMEM_SLEEP or DDI_UMEM_NOSLEEP
8746  *
8747  * Returns a buf structure pointer on success (to be freed by freerbuf)
8748  *	NULL on any parameter error or memory alloc failure
8749  *
8750  */
8751 struct buf *
8752 ddi_umem_iosetup(ddi_umem_cookie_t cookie, off_t off, size_t len,
8753     int direction, dev_t dev, daddr_t blkno,
8754     int (*iodone)(struct buf *), int sleepflag)
8755 {
8756 	struct ddi_umem_cookie *p = (struct ddi_umem_cookie *)cookie;
8757 	struct buf *bp;
8758 
8759 	/*
8760 	 * check for valid cookie offset, len
8761 	 */
8762 	if ((off + len) > p->size) {
8763 		return (NULL);
8764 	}
8765 
8766 	if (len > p->size) {
8767 		return (NULL);
8768 	}
8769 
8770 	/* direction has to be one of B_READ or B_WRITE */
8771 	if ((direction != B_READ) && (direction != B_WRITE)) {
8772 		return (NULL);
8773 	}
8774 
8775 	/* These are the only two valid sleepflags */
8776 	if ((sleepflag != DDI_UMEM_SLEEP) && (sleepflag != DDI_UMEM_NOSLEEP)) {
8777 		return (NULL);
8778 	}
8779 
8780 	/*
8781 	 * Only cookies of type UMEM_LOCKED and KMEM_NON_PAGEABLE are supported
8782 	 */
8783 	if ((p->type != UMEM_LOCKED) && (p->type != KMEM_NON_PAGEABLE)) {
8784 		return (NULL);
8785 	}
8786 
8787 	/* If type is KMEM_NON_PAGEABLE procp is NULL */
8788 	ASSERT((p->type == KMEM_NON_PAGEABLE) ?
8789 	    (p->procp == NULL) : (p->procp != NULL));
8790 
8791 	bp = kmem_alloc(sizeof (struct buf), sleepflag);
8792 	if (bp == NULL) {
8793 		return (NULL);
8794 	}
8795 	bioinit(bp);
8796 
8797 	bp->b_flags = B_BUSY | B_PHYS | direction;
8798 	bp->b_edev = dev;
8799 	bp->b_lblkno = blkno;
8800 	bp->b_iodone = iodone;
8801 	bp->b_bcount = len;
8802 	bp->b_proc = (proc_t *)p->procp;
8803 	ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0);
8804 	bp->b_un.b_addr = (caddr_t)((uintptr_t)(p->cvaddr) + off);
8805 	if (p->pparray != NULL) {
8806 		bp->b_flags |= B_SHADOW;
8807 		ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0);
8808 		bp->b_shadow = p->pparray + btop(off);
8809 	}
8810 	return (bp);
8811 }
8812 
8813 /*
8814  * Fault-handling and related routines
8815  */
8816 
8817 ddi_devstate_t
8818 ddi_get_devstate(dev_info_t *dip)
8819 {
8820 	if (DEVI_IS_DEVICE_OFFLINE(dip))
8821 		return (DDI_DEVSTATE_OFFLINE);
8822 	else if (DEVI_IS_DEVICE_DOWN(dip) || DEVI_IS_BUS_DOWN(dip))
8823 		return (DDI_DEVSTATE_DOWN);
8824 	else if (DEVI_IS_BUS_QUIESCED(dip))
8825 		return (DDI_DEVSTATE_QUIESCED);
8826 	else if (DEVI_IS_DEVICE_DEGRADED(dip))
8827 		return (DDI_DEVSTATE_DEGRADED);
8828 	else
8829 		return (DDI_DEVSTATE_UP);
8830 }
8831 
8832 void
8833 ddi_dev_report_fault(dev_info_t *dip, ddi_fault_impact_t impact,
8834     ddi_fault_location_t location, const char *message)
8835 {
8836 	struct ddi_fault_event_data fd;
8837 	ddi_eventcookie_t ec;
8838 
8839 	/*
8840 	 * Assemble all the information into a fault-event-data structure
8841 	 */
8842 	fd.f_dip = dip;
8843 	fd.f_impact = impact;
8844 	fd.f_location = location;
8845 	fd.f_message = message;
8846 	fd.f_oldstate = ddi_get_devstate(dip);
8847 
8848 	/*
8849 	 * Get eventcookie from defining parent.
8850 	 */
8851 	if (ddi_get_eventcookie(dip, DDI_DEVI_FAULT_EVENT, &ec) !=
8852 	    DDI_SUCCESS)
8853 		return;
8854 
8855 	(void) ndi_post_event(dip, dip, ec, &fd);
8856 }
8857 
8858 char *
8859 i_ddi_devi_class(dev_info_t *dip)
8860 {
8861 	return (DEVI(dip)->devi_device_class);
8862 }
8863 
8864 int
8865 i_ddi_set_devi_class(dev_info_t *dip, const char *devi_class, int flag)
8866 {
8867 	struct dev_info *devi = DEVI(dip);
8868 
8869 	mutex_enter(&devi->devi_lock);
8870 
8871 	if (devi->devi_device_class)
8872 		kmem_free(devi->devi_device_class,
8873 		    strlen(devi->devi_device_class) + 1);
8874 
8875 	if ((devi->devi_device_class = i_ddi_strdup(devi_class, flag))
8876 	    != NULL) {
8877 		mutex_exit(&devi->devi_lock);
8878 		return (DDI_SUCCESS);
8879 	}
8880 
8881 	mutex_exit(&devi->devi_lock);
8882 
8883 	return (DDI_FAILURE);
8884 }
8885 
8886 
8887 /*
8888  * Task Queues DDI interfaces.
8889  */
8890 
8891 /* ARGSUSED */
8892 ddi_taskq_t *
8893 ddi_taskq_create(dev_info_t *dip, const char *name, int nthreads,
8894     pri_t pri, uint_t cflags)
8895 {
8896 	char full_name[TASKQ_NAMELEN];
8897 	const char *tq_name;
8898 	int nodeid = 0;
8899 
8900 	if (dip == NULL)
8901 		tq_name = name;
8902 	else {
8903 		nodeid = ddi_get_instance(dip);
8904 
8905 		if (name == NULL)
8906 			name = "tq";
8907 
8908 		(void) snprintf(full_name, sizeof (full_name), "%s_%s",
8909 		    ddi_driver_name(dip), name);
8910 
8911 		tq_name = full_name;
8912 	}
8913 
8914 	return ((ddi_taskq_t *)taskq_create_instance(tq_name, nodeid, nthreads,
8915 	    pri == TASKQ_DEFAULTPRI ? minclsyspri : pri,
8916 	    nthreads, INT_MAX, TASKQ_PREPOPULATE));
8917 }
8918 
8919 void
8920 ddi_taskq_destroy(ddi_taskq_t *tq)
8921 {
8922 	taskq_destroy((taskq_t *)tq);
8923 }
8924 
8925 int
8926 ddi_taskq_dispatch(ddi_taskq_t *tq, void (* func)(void *),
8927     void *arg, uint_t dflags)
8928 {
8929 	taskqid_t id = taskq_dispatch((taskq_t *)tq, func, arg,
8930 	    dflags == DDI_SLEEP ? TQ_SLEEP : TQ_NOSLEEP);
8931 
8932 	return (id != TASKQID_INVALID ? DDI_SUCCESS : DDI_FAILURE);
8933 }
8934 
8935 void
8936 ddi_taskq_wait(ddi_taskq_t *tq)
8937 {
8938 	taskq_wait((taskq_t *)tq);
8939 }
8940 
8941 void
8942 ddi_taskq_suspend(ddi_taskq_t *tq)
8943 {
8944 	taskq_suspend((taskq_t *)tq);
8945 }
8946 
8947 boolean_t
8948 ddi_taskq_suspended(ddi_taskq_t *tq)
8949 {
8950 	return (taskq_suspended((taskq_t *)tq));
8951 }
8952 
8953 void
8954 ddi_taskq_resume(ddi_taskq_t *tq)
8955 {
8956 	taskq_resume((taskq_t *)tq);
8957 }
8958 
8959 int
8960 ddi_parse(
8961 	const char	*ifname,
8962 	char		*alnum,
8963 	uint_t		*nump)
8964 {
8965 	const char	*p;
8966 	int		l;
8967 	ulong_t		num;
8968 	boolean_t	nonum = B_TRUE;
8969 	char		c;
8970 
8971 	l = strlen(ifname);
8972 	for (p = ifname + l; p != ifname; l--) {
8973 		c = *--p;
8974 		if (!isdigit(c)) {
8975 			(void) strlcpy(alnum, ifname, l + 1);
8976 			if (ddi_strtoul(p + 1, NULL, 10, &num) != 0)
8977 				return (DDI_FAILURE);
8978 			break;
8979 		}
8980 		nonum = B_FALSE;
8981 	}
8982 	if (l == 0 || nonum)
8983 		return (DDI_FAILURE);
8984 
8985 	*nump = num;
8986 	return (DDI_SUCCESS);
8987 }
8988 
8989 /*
8990  * Default initialization function for drivers that don't need to quiesce.
8991  */
8992 /* ARGSUSED */
8993 int
8994 ddi_quiesce_not_needed(dev_info_t *dip)
8995 {
8996 	return (DDI_SUCCESS);
8997 }
8998 
8999 /*
9000  * Initialization function for drivers that should implement quiesce()
9001  * but haven't yet.
9002  */
9003 /* ARGSUSED */
9004 int
9005 ddi_quiesce_not_supported(dev_info_t *dip)
9006 {
9007 	return (DDI_FAILURE);
9008 }
9009 
9010 char *
9011 ddi_strdup(const char *str, int flag)
9012 {
9013 	int	n;
9014 	char	*ptr;
9015 
9016 	ASSERT(str != NULL);
9017 	ASSERT((flag == KM_SLEEP) || (flag == KM_NOSLEEP));
9018 
9019 	n = strlen(str);
9020 	if ((ptr = kmem_alloc(n + 1, flag)) == NULL)
9021 		return (NULL);
9022 	bcopy(str, ptr, n + 1);
9023 	return (ptr);
9024 }
9025 
9026 char *
9027 strdup(const char *str)
9028 {
9029 	return (ddi_strdup(str, KM_SLEEP));
9030 }
9031 
9032 void
9033 strfree(char *str)
9034 {
9035 	ASSERT(str != NULL);
9036 	kmem_free(str, strlen(str) + 1);
9037 }
9038 
9039 /*
9040  * Generic DDI callback interfaces.
9041  */
9042 
9043 int
9044 ddi_cb_register(dev_info_t *dip, ddi_cb_flags_t flags, ddi_cb_func_t cbfunc,
9045     void *arg1, void *arg2, ddi_cb_handle_t *ret_hdlp)
9046 {
9047 	ddi_cb_t	*cbp;
9048 
9049 	ASSERT(dip != NULL);
9050 	ASSERT(DDI_CB_FLAG_VALID(flags));
9051 	ASSERT(cbfunc != NULL);
9052 	ASSERT(ret_hdlp != NULL);
9053 
9054 	/* Sanity check the context */
9055 	ASSERT(!servicing_interrupt());
9056 	if (servicing_interrupt())
9057 		return (DDI_FAILURE);
9058 
9059 	/* Validate parameters */
9060 	if ((dip == NULL) || !DDI_CB_FLAG_VALID(flags) ||
9061 	    (cbfunc == NULL) || (ret_hdlp == NULL))
9062 		return (DDI_EINVAL);
9063 
9064 	/* Check for previous registration */
9065 	if (DEVI(dip)->devi_cb_p != NULL)
9066 		return (DDI_EALREADY);
9067 
9068 	/* Allocate and initialize callback */
9069 	cbp = kmem_zalloc(sizeof (ddi_cb_t), KM_SLEEP);
9070 	cbp->cb_dip = dip;
9071 	cbp->cb_func = cbfunc;
9072 	cbp->cb_arg1 = arg1;
9073 	cbp->cb_arg2 = arg2;
9074 	cbp->cb_flags = flags;
9075 	DEVI(dip)->devi_cb_p = cbp;
9076 
9077 	/* If adding an IRM callback, notify IRM */
9078 	if (flags & DDI_CB_FLAG_INTR)
9079 		i_ddi_irm_set_cb(dip, B_TRUE);
9080 
9081 	*ret_hdlp = (ddi_cb_handle_t)&(DEVI(dip)->devi_cb_p);
9082 	return (DDI_SUCCESS);
9083 }
9084 
9085 int
9086 ddi_cb_unregister(ddi_cb_handle_t hdl)
9087 {
9088 	ddi_cb_t	*cbp;
9089 	dev_info_t	*dip;
9090 
9091 	ASSERT(hdl != NULL);
9092 
9093 	/* Sanity check the context */
9094 	ASSERT(!servicing_interrupt());
9095 	if (servicing_interrupt())
9096 		return (DDI_FAILURE);
9097 
9098 	/* Validate parameters */
9099 	if ((hdl == NULL) || ((cbp = *(ddi_cb_t **)hdl) == NULL) ||
9100 	    ((dip = cbp->cb_dip) == NULL))
9101 		return (DDI_EINVAL);
9102 
9103 	/* If removing an IRM callback, notify IRM */
9104 	if (cbp->cb_flags & DDI_CB_FLAG_INTR)
9105 		i_ddi_irm_set_cb(dip, B_FALSE);
9106 
9107 	/* Destroy the callback */
9108 	kmem_free(cbp, sizeof (ddi_cb_t));
9109 	DEVI(dip)->devi_cb_p = NULL;
9110 
9111 	return (DDI_SUCCESS);
9112 }
9113 
9114 /*
9115  * Platform independent DR routines
9116  */
9117 
9118 static int
9119 ndi2errno(int n)
9120 {
9121 	int err = 0;
9122 
9123 	switch (n) {
9124 		case NDI_NOMEM:
9125 			err = ENOMEM;
9126 			break;
9127 		case NDI_BUSY:
9128 			err = EBUSY;
9129 			break;
9130 		case NDI_FAULT:
9131 			err = EFAULT;
9132 			break;
9133 		case NDI_FAILURE:
9134 			err = EIO;
9135 			break;
9136 		case NDI_SUCCESS:
9137 			break;
9138 		case NDI_BADHANDLE:
9139 		default:
9140 			err = EINVAL;
9141 			break;
9142 	}
9143 	return (err);
9144 }
9145 
9146 /*
9147  * Prom tree node list
9148  */
9149 struct ptnode {
9150 	pnode_t		nodeid;
9151 	struct ptnode	*next;
9152 };
9153 
9154 /*
9155  * Prom tree walk arg
9156  */
9157 struct pta {
9158 	dev_info_t	*pdip;
9159 	devi_branch_t	*bp;
9160 	uint_t		flags;
9161 	dev_info_t	*fdip;
9162 	struct ptnode	*head;
9163 };
9164 
9165 static void
9166 visit_node(pnode_t nodeid, struct pta *ap)
9167 {
9168 	struct ptnode	**nextp;
9169 	int		(*select)(pnode_t, void *, uint_t);
9170 
9171 	ASSERT(nodeid != OBP_NONODE && nodeid != OBP_BADNODE);
9172 
9173 	select = ap->bp->create.prom_branch_select;
9174 
9175 	ASSERT(select);
9176 
9177 	if (select(nodeid, ap->bp->arg, 0) == DDI_SUCCESS) {
9178 
9179 		for (nextp = &ap->head; *nextp; nextp = &(*nextp)->next)
9180 			;
9181 
9182 		*nextp = kmem_zalloc(sizeof (struct ptnode), KM_SLEEP);
9183 
9184 		(*nextp)->nodeid = nodeid;
9185 	}
9186 
9187 	if ((ap->flags & DEVI_BRANCH_CHILD) == DEVI_BRANCH_CHILD)
9188 		return;
9189 
9190 	nodeid = prom_childnode(nodeid);
9191 	while (nodeid != OBP_NONODE && nodeid != OBP_BADNODE) {
9192 		visit_node(nodeid, ap);
9193 		nodeid = prom_nextnode(nodeid);
9194 	}
9195 }
9196 
9197 /*
9198  * NOTE: The caller of this function must check for device contracts
9199  * or LDI callbacks against this dip before setting the dip offline.
9200  */
9201 static int
9202 set_infant_dip_offline(dev_info_t *dip, void *arg)
9203 {
9204 	char	*path = (char *)arg;
9205 
9206 	ASSERT(dip);
9207 	ASSERT(arg);
9208 
9209 	if (i_ddi_node_state(dip) >= DS_ATTACHED) {
9210 		(void) ddi_pathname(dip, path);
9211 		cmn_err(CE_WARN, "Attempt to set offline flag on attached "
9212 		    "node: %s", path);
9213 		return (DDI_FAILURE);
9214 	}
9215 
9216 	mutex_enter(&(DEVI(dip)->devi_lock));
9217 	if (!DEVI_IS_DEVICE_OFFLINE(dip))
9218 		DEVI_SET_DEVICE_OFFLINE(dip);
9219 	mutex_exit(&(DEVI(dip)->devi_lock));
9220 
9221 	return (DDI_SUCCESS);
9222 }
9223 
9224 typedef struct result {
9225 	char	*path;
9226 	int	result;
9227 } result_t;
9228 
9229 static int
9230 dip_set_offline(dev_info_t *dip, void *arg)
9231 {
9232 	int end;
9233 	result_t *resp = (result_t *)arg;
9234 
9235 	ASSERT(dip);
9236 	ASSERT(resp);
9237 
9238 	/*
9239 	 * We stop the walk if e_ddi_offline_notify() returns
9240 	 * failure, because this implies that one or more consumers
9241 	 * (either LDI or contract based) has blocked the offline.
9242 	 * So there is no point in conitnuing the walk
9243 	 */
9244 	if (e_ddi_offline_notify(dip) == DDI_FAILURE) {
9245 		resp->result = DDI_FAILURE;
9246 		return (DDI_WALK_TERMINATE);
9247 	}
9248 
9249 	/*
9250 	 * If set_infant_dip_offline() returns failure, it implies
9251 	 * that we failed to set a particular dip offline. This
9252 	 * does not imply that the offline as a whole should fail.
9253 	 * We want to do the best we can, so we continue the walk.
9254 	 */
9255 	if (set_infant_dip_offline(dip, resp->path) == DDI_SUCCESS)
9256 		end = DDI_SUCCESS;
9257 	else
9258 		end = DDI_FAILURE;
9259 
9260 	e_ddi_offline_finalize(dip, end);
9261 
9262 	return (DDI_WALK_CONTINUE);
9263 }
9264 
9265 /*
9266  * The call to e_ddi_offline_notify() exists for the
9267  * unlikely error case that a branch we are trying to
9268  * create already exists and has device contracts or LDI
9269  * event callbacks against it.
9270  *
9271  * We allow create to succeed for such branches only if
9272  * no constraints block the offline.
9273  */
9274 static int
9275 branch_set_offline(dev_info_t *dip, char *path)
9276 {
9277 	int		circ;
9278 	int		end;
9279 	result_t	res;
9280 
9281 
9282 	if (e_ddi_offline_notify(dip) == DDI_FAILURE) {
9283 		return (DDI_FAILURE);
9284 	}
9285 
9286 	if (set_infant_dip_offline(dip, path) == DDI_SUCCESS)
9287 		end = DDI_SUCCESS;
9288 	else
9289 		end = DDI_FAILURE;
9290 
9291 	e_ddi_offline_finalize(dip, end);
9292 
9293 	if (end == DDI_FAILURE)
9294 		return (DDI_FAILURE);
9295 
9296 	res.result = DDI_SUCCESS;
9297 	res.path = path;
9298 
9299 	ndi_devi_enter(dip, &circ);
9300 	ddi_walk_devs(ddi_get_child(dip), dip_set_offline, &res);
9301 	ndi_devi_exit(dip, circ);
9302 
9303 	return (res.result);
9304 }
9305 
9306 /*ARGSUSED*/
9307 static int
9308 create_prom_branch(void *arg, int has_changed)
9309 {
9310 	int		circ;
9311 	int		exists, rv;
9312 	pnode_t		nodeid;
9313 	struct ptnode	*tnp;
9314 	dev_info_t	*dip;
9315 	struct pta	*ap = arg;
9316 	devi_branch_t	*bp;
9317 	char		*path;
9318 
9319 	ASSERT(ap);
9320 	ASSERT(ap->fdip == NULL);
9321 	ASSERT(ap->pdip && ndi_dev_is_prom_node(ap->pdip));
9322 
9323 	bp = ap->bp;
9324 
9325 	nodeid = ddi_get_nodeid(ap->pdip);
9326 	if (nodeid == OBP_NONODE || nodeid == OBP_BADNODE) {
9327 		cmn_err(CE_WARN, "create_prom_branch: invalid "
9328 		    "nodeid: 0x%x", nodeid);
9329 		return (EINVAL);
9330 	}
9331 
9332 	ap->head = NULL;
9333 
9334 	nodeid = prom_childnode(nodeid);
9335 	while (nodeid != OBP_NONODE && nodeid != OBP_BADNODE) {
9336 		visit_node(nodeid, ap);
9337 		nodeid = prom_nextnode(nodeid);
9338 	}
9339 
9340 	if (ap->head == NULL)
9341 		return (ENODEV);
9342 
9343 	path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
9344 	rv = 0;
9345 	while ((tnp = ap->head) != NULL) {
9346 		ap->head = tnp->next;
9347 
9348 		ndi_devi_enter(ap->pdip, &circ);
9349 
9350 		/*
9351 		 * Check if the branch already exists.
9352 		 */
9353 		exists = 0;
9354 		dip = e_ddi_nodeid_to_dip(tnp->nodeid);
9355 		if (dip != NULL) {
9356 			exists = 1;
9357 
9358 			/* Parent is held busy, so release hold */
9359 			ndi_rele_devi(dip);
9360 #ifdef	DEBUG
9361 			cmn_err(CE_WARN, "create_prom_branch: dip(%p) exists"
9362 			    " for nodeid 0x%x", (void *)dip, tnp->nodeid);
9363 #endif
9364 		} else {
9365 			dip = i_ddi_create_branch(ap->pdip, tnp->nodeid);
9366 		}
9367 
9368 		kmem_free(tnp, sizeof (struct ptnode));
9369 
9370 		/*
9371 		 * Hold the branch if it is not already held
9372 		 */
9373 		if (dip && !exists) {
9374 			e_ddi_branch_hold(dip);
9375 		}
9376 
9377 		ASSERT(dip == NULL || e_ddi_branch_held(dip));
9378 
9379 		/*
9380 		 * Set all dips in the newly created branch offline so that
9381 		 * only a "configure" operation can attach
9382 		 * the branch
9383 		 */
9384 		if (dip == NULL || branch_set_offline(dip, path)
9385 		    == DDI_FAILURE) {
9386 			ndi_devi_exit(ap->pdip, circ);
9387 			rv = EIO;
9388 			continue;
9389 		}
9390 
9391 		ASSERT(ddi_get_parent(dip) == ap->pdip);
9392 
9393 		ndi_devi_exit(ap->pdip, circ);
9394 
9395 		if (ap->flags & DEVI_BRANCH_CONFIGURE) {
9396 			int error = e_ddi_branch_configure(dip, &ap->fdip, 0);
9397 			if (error && rv == 0)
9398 				rv = error;
9399 		}
9400 
9401 		/*
9402 		 * Invoke devi_branch_callback() (if it exists) only for
9403 		 * newly created branches
9404 		 */
9405 		if (bp->devi_branch_callback && !exists)
9406 			bp->devi_branch_callback(dip, bp->arg, 0);
9407 	}
9408 
9409 	kmem_free(path, MAXPATHLEN);
9410 
9411 	return (rv);
9412 }
9413 
9414 static int
9415 sid_node_create(dev_info_t *pdip, devi_branch_t *bp, dev_info_t **rdipp)
9416 {
9417 	int			rv, circ, len;
9418 	int			i, flags, ret;
9419 	dev_info_t		*dip;
9420 	char			*nbuf;
9421 	char			*path;
9422 	static const char	*noname = "<none>";
9423 
9424 	ASSERT(pdip);
9425 	ASSERT(DEVI_BUSY_OWNED(pdip));
9426 
9427 	flags = 0;
9428 
9429 	/*
9430 	 * Creating the root of a branch ?
9431 	 */
9432 	if (rdipp) {
9433 		*rdipp = NULL;
9434 		flags = DEVI_BRANCH_ROOT;
9435 	}
9436 
9437 	ndi_devi_alloc_sleep(pdip, (char *)noname, DEVI_SID_NODEID, &dip);
9438 	rv = bp->create.sid_branch_create(dip, bp->arg, flags);
9439 
9440 	nbuf = kmem_alloc(OBP_MAXDRVNAME, KM_SLEEP);
9441 
9442 	if (rv == DDI_WALK_ERROR) {
9443 		cmn_err(CE_WARN, "e_ddi_branch_create: Error setting"
9444 		    " properties on devinfo node %p",  (void *)dip);
9445 		goto fail;
9446 	}
9447 
9448 	len = OBP_MAXDRVNAME;
9449 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
9450 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "name", nbuf, &len)
9451 	    != DDI_PROP_SUCCESS) {
9452 		cmn_err(CE_WARN, "e_ddi_branch_create: devinfo node %p has"
9453 		    "no name property", (void *)dip);
9454 		goto fail;
9455 	}
9456 
9457 	ASSERT(i_ddi_node_state(dip) == DS_PROTO);
9458 	if (ndi_devi_set_nodename(dip, nbuf, 0) != NDI_SUCCESS) {
9459 		cmn_err(CE_WARN, "e_ddi_branch_create: cannot set name (%s)"
9460 		    " for devinfo node %p", nbuf, (void *)dip);
9461 		goto fail;
9462 	}
9463 
9464 	kmem_free(nbuf, OBP_MAXDRVNAME);
9465 
9466 	/*
9467 	 * Ignore bind failures just like boot does
9468 	 */
9469 	(void) ndi_devi_bind_driver(dip, 0);
9470 
9471 	switch (rv) {
9472 	case DDI_WALK_CONTINUE:
9473 	case DDI_WALK_PRUNESIB:
9474 		ndi_devi_enter(dip, &circ);
9475 
9476 		i = DDI_WALK_CONTINUE;
9477 		for (; i == DDI_WALK_CONTINUE; ) {
9478 			i = sid_node_create(dip, bp, NULL);
9479 		}
9480 
9481 		ASSERT(i == DDI_WALK_ERROR || i == DDI_WALK_PRUNESIB);
9482 		if (i == DDI_WALK_ERROR)
9483 			rv = i;
9484 		/*
9485 		 * If PRUNESIB stop creating siblings
9486 		 * of dip's child. Subsequent walk behavior
9487 		 * is determined by rv returned by dip.
9488 		 */
9489 
9490 		ndi_devi_exit(dip, circ);
9491 		break;
9492 	case DDI_WALK_TERMINATE:
9493 		/*
9494 		 * Don't create children and ask our parent
9495 		 * to not create siblings either.
9496 		 */
9497 		rv = DDI_WALK_PRUNESIB;
9498 		break;
9499 	case DDI_WALK_PRUNECHILD:
9500 		/*
9501 		 * Don't create children, but ask parent to continue
9502 		 * with siblings.
9503 		 */
9504 		rv = DDI_WALK_CONTINUE;
9505 		break;
9506 	default:
9507 		ASSERT(0);
9508 		break;
9509 	}
9510 
9511 	if (rdipp)
9512 		*rdipp = dip;
9513 
9514 	/*
9515 	 * Set device offline - only the "configure" op should cause an attach.
9516 	 * Note that it is safe to set the dip offline without checking
9517 	 * for either device contract or layered driver (LDI) based constraints
9518 	 * since there cannot be any contracts or LDI opens of this device.
9519 	 * This is because this node is a newly created dip with the parent busy
9520 	 * held, so no other thread can come in and attach this dip. A dip that
9521 	 * has never been attached cannot have contracts since by definition
9522 	 * a device contract (an agreement between a process and a device minor
9523 	 * node) can only be created against a device that has minor nodes
9524 	 * i.e is attached. Similarly an LDI open will only succeed if the
9525 	 * dip is attached. We assert below that the dip is not attached.
9526 	 */
9527 	ASSERT(i_ddi_node_state(dip) < DS_ATTACHED);
9528 	path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
9529 	ret = set_infant_dip_offline(dip, path);
9530 	ASSERT(ret == DDI_SUCCESS);
9531 	kmem_free(path, MAXPATHLEN);
9532 
9533 	return (rv);
9534 fail:
9535 	(void) ndi_devi_free(dip);
9536 	kmem_free(nbuf, OBP_MAXDRVNAME);
9537 	return (DDI_WALK_ERROR);
9538 }
9539 
9540 static int
9541 create_sid_branch(
9542 	dev_info_t	*pdip,
9543 	devi_branch_t	*bp,
9544 	dev_info_t	**dipp,
9545 	uint_t		flags)
9546 {
9547 	int		rv = 0, state = DDI_WALK_CONTINUE;
9548 	dev_info_t	*rdip;
9549 
9550 	while (state == DDI_WALK_CONTINUE) {
9551 		int	circ;
9552 
9553 		ndi_devi_enter(pdip, &circ);
9554 
9555 		state = sid_node_create(pdip, bp, &rdip);
9556 		if (rdip == NULL) {
9557 			ndi_devi_exit(pdip, circ);
9558 			ASSERT(state == DDI_WALK_ERROR);
9559 			break;
9560 		}
9561 
9562 		e_ddi_branch_hold(rdip);
9563 
9564 		ndi_devi_exit(pdip, circ);
9565 
9566 		if (flags & DEVI_BRANCH_CONFIGURE) {
9567 			int error = e_ddi_branch_configure(rdip, dipp, 0);
9568 			if (error && rv == 0)
9569 				rv = error;
9570 		}
9571 
9572 		/*
9573 		 * devi_branch_callback() is optional
9574 		 */
9575 		if (bp->devi_branch_callback)
9576 			bp->devi_branch_callback(rdip, bp->arg, 0);
9577 	}
9578 
9579 	ASSERT(state == DDI_WALK_ERROR || state == DDI_WALK_PRUNESIB);
9580 
9581 	return (state == DDI_WALK_ERROR ? EIO : rv);
9582 }
9583 
9584 int
9585 e_ddi_branch_create(
9586 	dev_info_t	*pdip,
9587 	devi_branch_t	*bp,
9588 	dev_info_t	**dipp,
9589 	uint_t		flags)
9590 {
9591 	int prom_devi, sid_devi, error;
9592 
9593 	if (pdip == NULL || bp == NULL || bp->type == 0)
9594 		return (EINVAL);
9595 
9596 	prom_devi = (bp->type == DEVI_BRANCH_PROM) ? 1 : 0;
9597 	sid_devi = (bp->type == DEVI_BRANCH_SID) ? 1 : 0;
9598 
9599 	if (prom_devi && bp->create.prom_branch_select == NULL)
9600 		return (EINVAL);
9601 	else if (sid_devi && bp->create.sid_branch_create == NULL)
9602 		return (EINVAL);
9603 	else if (!prom_devi && !sid_devi)
9604 		return (EINVAL);
9605 
9606 	if (flags & DEVI_BRANCH_EVENT)
9607 		return (EINVAL);
9608 
9609 	if (prom_devi) {
9610 		struct pta pta = {0};
9611 
9612 		pta.pdip = pdip;
9613 		pta.bp = bp;
9614 		pta.flags = flags;
9615 
9616 		error = prom_tree_access(create_prom_branch, &pta, NULL);
9617 
9618 		if (dipp)
9619 			*dipp = pta.fdip;
9620 		else if (pta.fdip)
9621 			ndi_rele_devi(pta.fdip);
9622 	} else {
9623 		error = create_sid_branch(pdip, bp, dipp, flags);
9624 	}
9625 
9626 	return (error);
9627 }
9628 
9629 int
9630 e_ddi_branch_configure(dev_info_t *rdip, dev_info_t **dipp, uint_t flags)
9631 {
9632 	int		rv;
9633 	char		*devnm;
9634 	dev_info_t	*pdip;
9635 
9636 	if (dipp)
9637 		*dipp = NULL;
9638 
9639 	if (rdip == NULL || flags != 0 || (flags & DEVI_BRANCH_EVENT))
9640 		return (EINVAL);
9641 
9642 	pdip = ddi_get_parent(rdip);
9643 
9644 	ndi_hold_devi(pdip);
9645 
9646 	if (!e_ddi_branch_held(rdip)) {
9647 		ndi_rele_devi(pdip);
9648 		cmn_err(CE_WARN, "e_ddi_branch_configure: "
9649 		    "dip(%p) not held", (void *)rdip);
9650 		return (EINVAL);
9651 	}
9652 
9653 	if (i_ddi_node_state(rdip) < DS_INITIALIZED) {
9654 		/*
9655 		 * First attempt to bind a driver. If we fail, return
9656 		 * success (On some platforms, dips for some device
9657 		 * types (CPUs) may not have a driver)
9658 		 */
9659 		if (ndi_devi_bind_driver(rdip, 0) != NDI_SUCCESS) {
9660 			ndi_rele_devi(pdip);
9661 			return (0);
9662 		}
9663 
9664 		if (ddi_initchild(pdip, rdip) != DDI_SUCCESS) {
9665 			rv = NDI_FAILURE;
9666 			goto out;
9667 		}
9668 	}
9669 
9670 	ASSERT(i_ddi_node_state(rdip) >= DS_INITIALIZED);
9671 
9672 	devnm = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
9673 
9674 	(void) ddi_deviname(rdip, devnm);
9675 
9676 	if ((rv = ndi_devi_config_one(pdip, devnm+1, &rdip,
9677 	    NDI_DEVI_ONLINE | NDI_CONFIG)) == NDI_SUCCESS) {
9678 		/* release hold from ndi_devi_config_one() */
9679 		ndi_rele_devi(rdip);
9680 	}
9681 
9682 	kmem_free(devnm, MAXNAMELEN + 1);
9683 out:
9684 	if (rv != NDI_SUCCESS && dipp && rdip) {
9685 		ndi_hold_devi(rdip);
9686 		*dipp = rdip;
9687 	}
9688 	ndi_rele_devi(pdip);
9689 	return (ndi2errno(rv));
9690 }
9691 
9692 void
9693 e_ddi_branch_hold(dev_info_t *rdip)
9694 {
9695 	if (e_ddi_branch_held(rdip)) {
9696 		cmn_err(CE_WARN, "e_ddi_branch_hold: branch already held");
9697 		return;
9698 	}
9699 
9700 	mutex_enter(&DEVI(rdip)->devi_lock);
9701 	if ((DEVI(rdip)->devi_flags & DEVI_BRANCH_HELD) == 0) {
9702 		DEVI(rdip)->devi_flags |= DEVI_BRANCH_HELD;
9703 		DEVI(rdip)->devi_ref++;
9704 	}
9705 	ASSERT(DEVI(rdip)->devi_ref > 0);
9706 	mutex_exit(&DEVI(rdip)->devi_lock);
9707 }
9708 
9709 int
9710 e_ddi_branch_held(dev_info_t *rdip)
9711 {
9712 	int rv = 0;
9713 
9714 	mutex_enter(&DEVI(rdip)->devi_lock);
9715 	if ((DEVI(rdip)->devi_flags & DEVI_BRANCH_HELD) &&
9716 	    DEVI(rdip)->devi_ref > 0) {
9717 		rv = 1;
9718 	}
9719 	mutex_exit(&DEVI(rdip)->devi_lock);
9720 
9721 	return (rv);
9722 }
9723 
9724 void
9725 e_ddi_branch_rele(dev_info_t *rdip)
9726 {
9727 	mutex_enter(&DEVI(rdip)->devi_lock);
9728 	DEVI(rdip)->devi_flags &= ~DEVI_BRANCH_HELD;
9729 	DEVI(rdip)->devi_ref--;
9730 	mutex_exit(&DEVI(rdip)->devi_lock);
9731 }
9732 
9733 int
9734 e_ddi_branch_unconfigure(
9735 	dev_info_t *rdip,
9736 	dev_info_t **dipp,
9737 	uint_t flags)
9738 {
9739 	int	circ, rv;
9740 	int	destroy;
9741 	char	*devnm;
9742 	uint_t	nflags;
9743 	dev_info_t *pdip;
9744 
9745 	if (dipp)
9746 		*dipp = NULL;
9747 
9748 	if (rdip == NULL)
9749 		return (EINVAL);
9750 
9751 	pdip = ddi_get_parent(rdip);
9752 
9753 	ASSERT(pdip);
9754 
9755 	/*
9756 	 * Check if caller holds pdip busy - can cause deadlocks during
9757 	 * devfs_clean()
9758 	 */
9759 	if (DEVI_BUSY_OWNED(pdip)) {
9760 		cmn_err(CE_WARN, "e_ddi_branch_unconfigure: failed: parent"
9761 		    " devinfo node(%p) is busy held", (void *)pdip);
9762 		return (EINVAL);
9763 	}
9764 
9765 	destroy = (flags & DEVI_BRANCH_DESTROY) ? 1 : 0;
9766 
9767 	devnm = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
9768 
9769 	ndi_devi_enter(pdip, &circ);
9770 	(void) ddi_deviname(rdip, devnm);
9771 	ndi_devi_exit(pdip, circ);
9772 
9773 	/*
9774 	 * ddi_deviname() returns a component name with / prepended.
9775 	 */
9776 	(void) devfs_clean(pdip, devnm + 1, DV_CLEAN_FORCE);
9777 
9778 	ndi_devi_enter(pdip, &circ);
9779 
9780 	/*
9781 	 * Recreate device name as it may have changed state (init/uninit)
9782 	 * when parent busy lock was dropped for devfs_clean()
9783 	 */
9784 	(void) ddi_deviname(rdip, devnm);
9785 
9786 	if (!e_ddi_branch_held(rdip)) {
9787 		kmem_free(devnm, MAXNAMELEN + 1);
9788 		ndi_devi_exit(pdip, circ);
9789 		cmn_err(CE_WARN, "e_ddi_%s_branch: dip(%p) not held",
9790 		    destroy ? "destroy" : "unconfigure", (void *)rdip);
9791 		return (EINVAL);
9792 	}
9793 
9794 	/*
9795 	 * Release hold on the branch. This is ok since we are holding the
9796 	 * parent busy. If rdip is not removed, we must do a hold on the
9797 	 * branch before returning.
9798 	 */
9799 	e_ddi_branch_rele(rdip);
9800 
9801 	nflags = NDI_DEVI_OFFLINE;
9802 	if (destroy || (flags & DEVI_BRANCH_DESTROY)) {
9803 		nflags |= NDI_DEVI_REMOVE;
9804 		destroy = 1;
9805 	} else {
9806 		nflags |= NDI_UNCONFIG;		/* uninit but don't remove */
9807 	}
9808 
9809 	if (flags & DEVI_BRANCH_EVENT)
9810 		nflags |= NDI_POST_EVENT;
9811 
9812 	if (i_ddi_devi_attached(pdip) &&
9813 	    (i_ddi_node_state(rdip) >= DS_INITIALIZED)) {
9814 		rv = ndi_devi_unconfig_one(pdip, devnm+1, dipp, nflags);
9815 	} else {
9816 		rv = e_ddi_devi_unconfig(rdip, dipp, nflags);
9817 		if (rv == NDI_SUCCESS) {
9818 			ASSERT(!destroy || ddi_get_child(rdip) == NULL);
9819 			rv = ndi_devi_offline(rdip, nflags);
9820 		}
9821 	}
9822 
9823 	if (!destroy || rv != NDI_SUCCESS) {
9824 		/* The dip still exists, so do a hold */
9825 		e_ddi_branch_hold(rdip);
9826 	}
9827 
9828 	kmem_free(devnm, MAXNAMELEN + 1);
9829 	ndi_devi_exit(pdip, circ);
9830 	return (ndi2errno(rv));
9831 }
9832 
9833 int
9834 e_ddi_branch_destroy(dev_info_t *rdip, dev_info_t **dipp, uint_t flag)
9835 {
9836 	return (e_ddi_branch_unconfigure(rdip, dipp,
9837 	    flag|DEVI_BRANCH_DESTROY));
9838 }
9839 
9840 /*
9841  * Number of chains for hash table
9842  */
9843 #define	NUMCHAINS	17
9844 
9845 /*
9846  * Devinfo busy arg
9847  */
9848 struct devi_busy {
9849 	int dv_total;
9850 	int s_total;
9851 	mod_hash_t *dv_hash;
9852 	mod_hash_t *s_hash;
9853 	int (*callback)(dev_info_t *, void *, uint_t);
9854 	void *arg;
9855 };
9856 
9857 static int
9858 visit_dip(dev_info_t *dip, void *arg)
9859 {
9860 	uintptr_t sbusy, dvbusy, ref;
9861 	struct devi_busy *bsp = arg;
9862 
9863 	ASSERT(bsp->callback);
9864 
9865 	/*
9866 	 * A dip cannot be busy if its reference count is 0
9867 	 */
9868 	if ((ref = e_ddi_devi_holdcnt(dip)) == 0) {
9869 		return (bsp->callback(dip, bsp->arg, 0));
9870 	}
9871 
9872 	if (mod_hash_find(bsp->dv_hash, dip, (mod_hash_val_t *)&dvbusy))
9873 		dvbusy = 0;
9874 
9875 	/*
9876 	 * To catch device opens currently maintained on specfs common snodes.
9877 	 */
9878 	if (mod_hash_find(bsp->s_hash, dip, (mod_hash_val_t *)&sbusy))
9879 		sbusy = 0;
9880 
9881 #ifdef	DEBUG
9882 	if (ref < sbusy || ref < dvbusy) {
9883 		cmn_err(CE_WARN, "dip(%p): sopen = %lu, dvopen = %lu "
9884 		    "dip ref = %lu\n", (void *)dip, sbusy, dvbusy, ref);
9885 	}
9886 #endif
9887 
9888 	dvbusy = (sbusy > dvbusy) ? sbusy : dvbusy;
9889 
9890 	return (bsp->callback(dip, bsp->arg, dvbusy));
9891 }
9892 
9893 static int
9894 visit_snode(struct snode *sp, void *arg)
9895 {
9896 	uintptr_t sbusy;
9897 	dev_info_t *dip;
9898 	int count;
9899 	struct devi_busy *bsp = arg;
9900 
9901 	ASSERT(sp);
9902 
9903 	/*
9904 	 * The stable lock is held. This prevents
9905 	 * the snode and its associated dip from
9906 	 * going away.
9907 	 */
9908 	dip = NULL;
9909 	count = spec_devi_open_count(sp, &dip);
9910 
9911 	if (count <= 0)
9912 		return (DDI_WALK_CONTINUE);
9913 
9914 	ASSERT(dip);
9915 
9916 	if (mod_hash_remove(bsp->s_hash, dip, (mod_hash_val_t *)&sbusy))
9917 		sbusy = count;
9918 	else
9919 		sbusy += count;
9920 
9921 	if (mod_hash_insert(bsp->s_hash, dip, (mod_hash_val_t)sbusy)) {
9922 		cmn_err(CE_WARN, "%s: s_hash insert failed: dip=0x%p, "
9923 		    "sbusy = %lu", "e_ddi_branch_referenced",
9924 		    (void *)dip, sbusy);
9925 	}
9926 
9927 	bsp->s_total += count;
9928 
9929 	return (DDI_WALK_CONTINUE);
9930 }
9931 
9932 static void
9933 visit_dvnode(struct dv_node *dv, void *arg)
9934 {
9935 	uintptr_t dvbusy;
9936 	uint_t count;
9937 	struct vnode *vp;
9938 	struct devi_busy *bsp = arg;
9939 
9940 	ASSERT(dv && dv->dv_devi);
9941 
9942 	vp = DVTOV(dv);
9943 
9944 	mutex_enter(&vp->v_lock);
9945 	count = vp->v_count;
9946 	mutex_exit(&vp->v_lock);
9947 
9948 	if (!count)
9949 		return;
9950 
9951 	if (mod_hash_remove(bsp->dv_hash, dv->dv_devi,
9952 	    (mod_hash_val_t *)&dvbusy))
9953 		dvbusy = count;
9954 	else
9955 		dvbusy += count;
9956 
9957 	if (mod_hash_insert(bsp->dv_hash, dv->dv_devi,
9958 	    (mod_hash_val_t)dvbusy)) {
9959 		cmn_err(CE_WARN, "%s: dv_hash insert failed: dip=0x%p, "
9960 		    "dvbusy=%lu", "e_ddi_branch_referenced",
9961 		    (void *)dv->dv_devi, dvbusy);
9962 	}
9963 
9964 	bsp->dv_total += count;
9965 }
9966 
9967 /*
9968  * Returns reference count on success or -1 on failure.
9969  */
9970 int
9971 e_ddi_branch_referenced(
9972 	dev_info_t *rdip,
9973 	int (*callback)(dev_info_t *dip, void *arg, uint_t ref),
9974 	void *arg)
9975 {
9976 	int circ;
9977 	char *path;
9978 	dev_info_t *pdip;
9979 	struct devi_busy bsa = {0};
9980 
9981 	ASSERT(rdip);
9982 
9983 	path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
9984 
9985 	ndi_hold_devi(rdip);
9986 
9987 	pdip = ddi_get_parent(rdip);
9988 
9989 	ASSERT(pdip);
9990 
9991 	/*
9992 	 * Check if caller holds pdip busy - can cause deadlocks during
9993 	 * devfs_walk()
9994 	 */
9995 	if (!e_ddi_branch_held(rdip) || DEVI_BUSY_OWNED(pdip)) {
9996 		cmn_err(CE_WARN, "e_ddi_branch_referenced: failed: "
9997 		    "devinfo branch(%p) not held or parent busy held",
9998 		    (void *)rdip);
9999 		ndi_rele_devi(rdip);
10000 		kmem_free(path, MAXPATHLEN);
10001 		return (-1);
10002 	}
10003 
10004 	ndi_devi_enter(pdip, &circ);
10005 	(void) ddi_pathname(rdip, path);
10006 	ndi_devi_exit(pdip, circ);
10007 
10008 	bsa.dv_hash = mod_hash_create_ptrhash("dv_node busy hash", NUMCHAINS,
10009 	    mod_hash_null_valdtor, sizeof (struct dev_info));
10010 
10011 	bsa.s_hash = mod_hash_create_ptrhash("snode busy hash", NUMCHAINS,
10012 	    mod_hash_null_valdtor, sizeof (struct snode));
10013 
10014 	if (devfs_walk(path, visit_dvnode, &bsa)) {
10015 		cmn_err(CE_WARN, "e_ddi_branch_referenced: "
10016 		    "devfs walk failed for: %s", path);
10017 		kmem_free(path, MAXPATHLEN);
10018 		bsa.s_total = bsa.dv_total = -1;
10019 		goto out;
10020 	}
10021 
10022 	kmem_free(path, MAXPATHLEN);
10023 
10024 	/*
10025 	 * Walk the snode table to detect device opens, which are currently
10026 	 * maintained on specfs common snodes.
10027 	 */
10028 	spec_snode_walk(visit_snode, &bsa);
10029 
10030 	if (callback == NULL)
10031 		goto out;
10032 
10033 	bsa.callback = callback;
10034 	bsa.arg = arg;
10035 
10036 	if (visit_dip(rdip, &bsa) == DDI_WALK_CONTINUE) {
10037 		ndi_devi_enter(rdip, &circ);
10038 		ddi_walk_devs(ddi_get_child(rdip), visit_dip, &bsa);
10039 		ndi_devi_exit(rdip, circ);
10040 	}
10041 
10042 out:
10043 	ndi_rele_devi(rdip);
10044 	mod_hash_destroy_ptrhash(bsa.s_hash);
10045 	mod_hash_destroy_ptrhash(bsa.dv_hash);
10046 	return (bsa.s_total > bsa.dv_total ? bsa.s_total : bsa.dv_total);
10047 }
10048