xref: /illumos-gate/usr/src/uts/common/os/sunddi.c (revision 9e494b8a787c7b2d9fd087a2dde8811e386513d4)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 1990, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright 2014 Garrett D'Amore <garrett@damore.org>
25  */
26 
27 #include <sys/note.h>
28 #include <sys/types.h>
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/buf.h>
32 #include <sys/uio.h>
33 #include <sys/cred.h>
34 #include <sys/poll.h>
35 #include <sys/mman.h>
36 #include <sys/kmem.h>
37 #include <sys/model.h>
38 #include <sys/file.h>
39 #include <sys/proc.h>
40 #include <sys/open.h>
41 #include <sys/user.h>
42 #include <sys/t_lock.h>
43 #include <sys/vm.h>
44 #include <sys/stat.h>
45 #include <vm/hat.h>
46 #include <vm/seg.h>
47 #include <vm/seg_vn.h>
48 #include <vm/seg_dev.h>
49 #include <vm/as.h>
50 #include <sys/cmn_err.h>
51 #include <sys/cpuvar.h>
52 #include <sys/debug.h>
53 #include <sys/autoconf.h>
54 #include <sys/sunddi.h>
55 #include <sys/esunddi.h>
56 #include <sys/sunndi.h>
57 #include <sys/kstat.h>
58 #include <sys/conf.h>
59 #include <sys/ddi_impldefs.h>	/* include implementation structure defs */
60 #include <sys/ndi_impldefs.h>	/* include prototypes */
61 #include <sys/ddi_periodic.h>
62 #include <sys/hwconf.h>
63 #include <sys/pathname.h>
64 #include <sys/modctl.h>
65 #include <sys/epm.h>
66 #include <sys/devctl.h>
67 #include <sys/callb.h>
68 #include <sys/cladm.h>
69 #include <sys/sysevent.h>
70 #include <sys/dacf_impl.h>
71 #include <sys/ddidevmap.h>
72 #include <sys/bootconf.h>
73 #include <sys/disp.h>
74 #include <sys/atomic.h>
75 #include <sys/promif.h>
76 #include <sys/instance.h>
77 #include <sys/sysevent/eventdefs.h>
78 #include <sys/task.h>
79 #include <sys/project.h>
80 #include <sys/taskq.h>
81 #include <sys/devpolicy.h>
82 #include <sys/ctype.h>
83 #include <net/if.h>
84 #include <sys/rctl.h>
85 #include <sys/zone.h>
86 #include <sys/clock_impl.h>
87 #include <sys/ddi.h>
88 #include <sys/modhash.h>
89 #include <sys/sunldi_impl.h>
90 #include <sys/fs/dv_node.h>
91 #include <sys/fs/snode.h>
92 
93 extern	pri_t	minclsyspri;
94 
95 extern	rctl_hndl_t rc_project_locked_mem;
96 extern	rctl_hndl_t rc_zone_locked_mem;
97 
98 #ifdef DEBUG
99 static int sunddi_debug = 0;
100 #endif /* DEBUG */
101 
102 /* ddi_umem_unlock miscellaneous */
103 
104 static	void	i_ddi_umem_unlock_thread_start(void);
105 
106 static	kmutex_t	ddi_umem_unlock_mutex; /* unlock list mutex */
107 static	kcondvar_t	ddi_umem_unlock_cv; /* unlock list block/unblock */
108 static	kthread_t	*ddi_umem_unlock_thread;
109 /*
110  * The ddi_umem_unlock FIFO list.  NULL head pointer indicates empty list.
111  */
112 static	struct	ddi_umem_cookie *ddi_umem_unlock_head = NULL;
113 static	struct	ddi_umem_cookie *ddi_umem_unlock_tail = NULL;
114 
115 /*
116  * DDI(Sun) Function and flag definitions:
117  */
118 
119 #if defined(__x86)
120 /*
121  * Used to indicate which entries were chosen from a range.
122  */
123 char	*chosen_reg = "chosen-reg";
124 #endif
125 
126 /*
127  * Function used to ring system console bell
128  */
129 void (*ddi_console_bell_func)(clock_t duration);
130 
131 /*
132  * Creating register mappings and handling interrupts:
133  */
134 
135 /*
136  * Generic ddi_map: Call parent to fulfill request...
137  */
138 
139 int
140 ddi_map(dev_info_t *dp, ddi_map_req_t *mp, off_t offset,
141     off_t len, caddr_t *addrp)
142 {
143 	dev_info_t *pdip;
144 
145 	ASSERT(dp);
146 	pdip = (dev_info_t *)DEVI(dp)->devi_parent;
147 	return ((DEVI(pdip)->devi_ops->devo_bus_ops->bus_map)(pdip,
148 	    dp, mp, offset, len, addrp));
149 }
150 
151 /*
152  * ddi_apply_range: (Called by nexi only.)
153  * Apply ranges in parent node dp, to child regspec rp...
154  */
155 
156 int
157 ddi_apply_range(dev_info_t *dp, dev_info_t *rdip, struct regspec *rp)
158 {
159 	return (i_ddi_apply_range(dp, rdip, rp));
160 }
161 
162 int
163 ddi_map_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset,
164     off_t len)
165 {
166 	ddi_map_req_t mr;
167 #if defined(__x86)
168 	struct {
169 		int	bus;
170 		int	addr;
171 		int	size;
172 	} reg, *reglist;
173 	uint_t	length;
174 	int	rc;
175 
176 	/*
177 	 * get the 'registers' or the 'reg' property.
178 	 * We look up the reg property as an array of
179 	 * int's.
180 	 */
181 	rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
182 	    DDI_PROP_DONTPASS, "registers", (int **)&reglist, &length);
183 	if (rc != DDI_PROP_SUCCESS)
184 		rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
185 		    DDI_PROP_DONTPASS, "reg", (int **)&reglist, &length);
186 	if (rc == DDI_PROP_SUCCESS) {
187 		/*
188 		 * point to the required entry.
189 		 */
190 		reg = reglist[rnumber];
191 		reg.addr += offset;
192 		if (len != 0)
193 			reg.size = len;
194 		/*
195 		 * make a new property containing ONLY the required tuple.
196 		 */
197 		if (ddi_prop_update_int_array(DDI_DEV_T_NONE, dip,
198 		    chosen_reg, (int *)&reg, (sizeof (reg)/sizeof (int)))
199 		    != DDI_PROP_SUCCESS) {
200 			cmn_err(CE_WARN, "%s%d: cannot create '%s' "
201 			    "property", DEVI(dip)->devi_name,
202 			    DEVI(dip)->devi_instance, chosen_reg);
203 		}
204 		/*
205 		 * free the memory allocated by
206 		 * ddi_prop_lookup_int_array ().
207 		 */
208 		ddi_prop_free((void *)reglist);
209 	}
210 #endif
211 	mr.map_op = DDI_MO_MAP_LOCKED;
212 	mr.map_type = DDI_MT_RNUMBER;
213 	mr.map_obj.rnumber = rnumber;
214 	mr.map_prot = PROT_READ | PROT_WRITE;
215 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
216 	mr.map_handlep = NULL;
217 	mr.map_vers = DDI_MAP_VERSION;
218 
219 	/*
220 	 * Call my parent to map in my regs.
221 	 */
222 
223 	return (ddi_map(dip, &mr, offset, len, kaddrp));
224 }
225 
226 void
227 ddi_unmap_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset,
228     off_t len)
229 {
230 	ddi_map_req_t mr;
231 
232 	mr.map_op = DDI_MO_UNMAP;
233 	mr.map_type = DDI_MT_RNUMBER;
234 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
235 	mr.map_prot = PROT_READ | PROT_WRITE;	/* who cares? */
236 	mr.map_obj.rnumber = rnumber;
237 	mr.map_handlep = NULL;
238 	mr.map_vers = DDI_MAP_VERSION;
239 
240 	/*
241 	 * Call my parent to unmap my regs.
242 	 */
243 
244 	(void) ddi_map(dip, &mr, offset, len, kaddrp);
245 	*kaddrp = (caddr_t)0;
246 #if defined(__x86)
247 	(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, chosen_reg);
248 #endif
249 }
250 
251 int
252 ddi_bus_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
253     off_t offset, off_t len, caddr_t *vaddrp)
254 {
255 	return (i_ddi_bus_map(dip, rdip, mp, offset, len, vaddrp));
256 }
257 
258 /*
259  * nullbusmap:	The/DDI default bus_map entry point for nexi
260  *		not conforming to the reg/range paradigm (i.e. scsi, etc.)
261  *		with no HAT/MMU layer to be programmed at this level.
262  *
263  *		If the call is to map by rnumber, return an error,
264  *		otherwise pass anything else up the tree to my parent.
265  */
266 int
267 nullbusmap(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
268     off_t offset, off_t len, caddr_t *vaddrp)
269 {
270 	_NOTE(ARGUNUSED(rdip))
271 	if (mp->map_type == DDI_MT_RNUMBER)
272 		return (DDI_ME_UNSUPPORTED);
273 
274 	return (ddi_map(dip, mp, offset, len, vaddrp));
275 }
276 
277 /*
278  * ddi_rnumber_to_regspec: Not for use by leaf drivers.
279  *			   Only for use by nexi using the reg/range paradigm.
280  */
281 struct regspec *
282 ddi_rnumber_to_regspec(dev_info_t *dip, int rnumber)
283 {
284 	return (i_ddi_rnumber_to_regspec(dip, rnumber));
285 }
286 
287 
288 /*
289  * Note that we allow the dip to be nil because we may be called
290  * prior even to the instantiation of the devinfo tree itself - all
291  * regular leaf and nexus drivers should always use a non-nil dip!
292  *
293  * We treat peek in a somewhat cavalier fashion .. assuming that we'll
294  * simply get a synchronous fault as soon as we touch a missing address.
295  *
296  * Poke is rather more carefully handled because we might poke to a write
297  * buffer, "succeed", then only find some time later that we got an
298  * asynchronous fault that indicated that the address we were writing to
299  * was not really backed by hardware.
300  */
301 
302 static int
303 i_ddi_peekpoke(dev_info_t *devi, ddi_ctl_enum_t cmd, size_t size,
304     void *addr, void *value_p)
305 {
306 	union {
307 		uint64_t	u64;
308 		uint32_t	u32;
309 		uint16_t	u16;
310 		uint8_t		u8;
311 	} peekpoke_value;
312 
313 	peekpoke_ctlops_t peekpoke_args;
314 	uint64_t dummy_result;
315 	int rval;
316 
317 	/* Note: size is assumed to be correct;  it is not checked. */
318 	peekpoke_args.size = size;
319 	peekpoke_args.dev_addr = (uintptr_t)addr;
320 	peekpoke_args.handle = NULL;
321 	peekpoke_args.repcount = 1;
322 	peekpoke_args.flags = 0;
323 
324 	if (cmd == DDI_CTLOPS_POKE) {
325 		switch (size) {
326 		case sizeof (uint8_t):
327 			peekpoke_value.u8 = *(uint8_t *)value_p;
328 			break;
329 		case sizeof (uint16_t):
330 			peekpoke_value.u16 = *(uint16_t *)value_p;
331 			break;
332 		case sizeof (uint32_t):
333 			peekpoke_value.u32 = *(uint32_t *)value_p;
334 			break;
335 		case sizeof (uint64_t):
336 			peekpoke_value.u64 = *(uint64_t *)value_p;
337 			break;
338 		}
339 	}
340 
341 	peekpoke_args.host_addr = (uintptr_t)&peekpoke_value.u64;
342 
343 	if (devi != NULL)
344 		rval = ddi_ctlops(devi, devi, cmd, &peekpoke_args,
345 		    &dummy_result);
346 	else
347 		rval = peekpoke_mem(cmd, &peekpoke_args);
348 
349 	/*
350 	 * A NULL value_p is permitted by ddi_peek(9F); discard the result.
351 	 */
352 	if ((cmd == DDI_CTLOPS_PEEK) & (value_p != NULL)) {
353 		switch (size) {
354 		case sizeof (uint8_t):
355 			*(uint8_t *)value_p = peekpoke_value.u8;
356 			break;
357 		case sizeof (uint16_t):
358 			*(uint16_t *)value_p = peekpoke_value.u16;
359 			break;
360 		case sizeof (uint32_t):
361 			*(uint32_t *)value_p = peekpoke_value.u32;
362 			break;
363 		case sizeof (uint64_t):
364 			*(uint64_t *)value_p = peekpoke_value.u64;
365 			break;
366 		}
367 	}
368 
369 	return (rval);
370 }
371 
372 /*
373  * Keep ddi_peek() and ddi_poke() in case 3rd parties are calling this.
374  * they shouldn't be, but the 9f manpage kind of pseudo exposes it.
375  */
376 int
377 ddi_peek(dev_info_t *devi, size_t size, void *addr, void *value_p)
378 {
379 	switch (size) {
380 	case sizeof (uint8_t):
381 	case sizeof (uint16_t):
382 	case sizeof (uint32_t):
383 	case sizeof (uint64_t):
384 		break;
385 	default:
386 		return (DDI_FAILURE);
387 	}
388 
389 	return (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, size, addr, value_p));
390 }
391 
392 int
393 ddi_poke(dev_info_t *devi, size_t size, void *addr, void *value_p)
394 {
395 	switch (size) {
396 	case sizeof (uint8_t):
397 	case sizeof (uint16_t):
398 	case sizeof (uint32_t):
399 	case sizeof (uint64_t):
400 		break;
401 	default:
402 		return (DDI_FAILURE);
403 	}
404 
405 	return (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, size, addr, value_p));
406 }
407 
408 int
409 ddi_peek8(dev_info_t *dip, int8_t *addr, int8_t *val_p)
410 {
411 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
412 	    val_p));
413 }
414 
415 int
416 ddi_peek16(dev_info_t *dip, int16_t *addr, int16_t *val_p)
417 {
418 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
419 	    val_p));
420 }
421 
422 int
423 ddi_peek32(dev_info_t *dip, int32_t *addr, int32_t *val_p)
424 {
425 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
426 	    val_p));
427 }
428 
429 int
430 ddi_peek64(dev_info_t *dip, int64_t *addr, int64_t *val_p)
431 {
432 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
433 	    val_p));
434 }
435 
436 int
437 ddi_poke8(dev_info_t *dip, int8_t *addr, int8_t val)
438 {
439 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
440 }
441 
442 int
443 ddi_poke16(dev_info_t *dip, int16_t *addr, int16_t val)
444 {
445 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
446 }
447 
448 int
449 ddi_poke32(dev_info_t *dip, int32_t *addr, int32_t val)
450 {
451 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
452 }
453 
454 int
455 ddi_poke64(dev_info_t *dip, int64_t *addr, int64_t val)
456 {
457 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
458 }
459 
460 /*
461  * ddi_peekpokeio() is used primarily by the mem drivers for moving
462  * data to and from uio structures via peek and poke.  Note that we
463  * use "internal" routines ddi_peek and ddi_poke to make this go
464  * slightly faster, avoiding the call overhead ..
465  */
466 int
467 ddi_peekpokeio(dev_info_t *devi, struct uio *uio, enum uio_rw rw,
468     caddr_t addr, size_t len, uint_t xfersize)
469 {
470 	int64_t	ibuffer;
471 	int8_t w8;
472 	size_t sz;
473 	int o;
474 
475 	if (xfersize > sizeof (long))
476 		xfersize = sizeof (long);
477 
478 	while (len != 0) {
479 		if ((len | (uintptr_t)addr) & 1) {
480 			sz = sizeof (int8_t);
481 			if (rw == UIO_WRITE) {
482 				if ((o = uwritec(uio)) == -1)
483 					return (DDI_FAILURE);
484 				if (ddi_poke8(devi, (int8_t *)addr,
485 				    (int8_t)o) != DDI_SUCCESS)
486 					return (DDI_FAILURE);
487 			} else {
488 				if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz,
489 				    (int8_t *)addr, &w8) != DDI_SUCCESS)
490 					return (DDI_FAILURE);
491 				if (ureadc(w8, uio))
492 					return (DDI_FAILURE);
493 			}
494 		} else {
495 			switch (xfersize) {
496 			case sizeof (int64_t):
497 				if (((len | (uintptr_t)addr) &
498 				    (sizeof (int64_t) - 1)) == 0) {
499 					sz = xfersize;
500 					break;
501 				}
502 				/*FALLTHROUGH*/
503 			case sizeof (int32_t):
504 				if (((len | (uintptr_t)addr) &
505 				    (sizeof (int32_t) - 1)) == 0) {
506 					sz = xfersize;
507 					break;
508 				}
509 				/*FALLTHROUGH*/
510 			default:
511 				/*
512 				 * This still assumes that we might have an
513 				 * I/O bus out there that permits 16-bit
514 				 * transfers (and that it would be upset by
515 				 * 32-bit transfers from such locations).
516 				 */
517 				sz = sizeof (int16_t);
518 				break;
519 			}
520 
521 			if (rw == UIO_READ) {
522 				if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz,
523 				    addr, &ibuffer) != DDI_SUCCESS)
524 					return (DDI_FAILURE);
525 			}
526 
527 			if (uiomove(&ibuffer, sz, rw, uio))
528 				return (DDI_FAILURE);
529 
530 			if (rw == UIO_WRITE) {
531 				if (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, sz,
532 				    addr, &ibuffer) != DDI_SUCCESS)
533 					return (DDI_FAILURE);
534 			}
535 		}
536 		addr += sz;
537 		len -= sz;
538 	}
539 	return (DDI_SUCCESS);
540 }
541 
542 /*
543  * These routines are used by drivers that do layered ioctls
544  * On sparc, they're implemented in assembler to avoid spilling
545  * register windows in the common (copyin) case ..
546  */
547 #if !defined(__sparc)
548 int
549 ddi_copyin(const void *buf, void *kernbuf, size_t size, int flags)
550 {
551 	if (flags & FKIOCTL)
552 		return (kcopy(buf, kernbuf, size) ? -1 : 0);
553 	return (copyin(buf, kernbuf, size));
554 }
555 
556 int
557 ddi_copyout(const void *buf, void *kernbuf, size_t size, int flags)
558 {
559 	if (flags & FKIOCTL)
560 		return (kcopy(buf, kernbuf, size) ? -1 : 0);
561 	return (copyout(buf, kernbuf, size));
562 }
563 #endif	/* !__sparc */
564 
565 /*
566  * Conversions in nexus pagesize units.  We don't duplicate the
567  * 'nil dip' semantics of peek/poke because btopr/btop/ptob are DDI/DKI
568  * routines anyway.
569  */
570 unsigned long
571 ddi_btop(dev_info_t *dip, unsigned long bytes)
572 {
573 	unsigned long pages;
574 
575 	(void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOP, &bytes, &pages);
576 	return (pages);
577 }
578 
579 unsigned long
580 ddi_btopr(dev_info_t *dip, unsigned long bytes)
581 {
582 	unsigned long pages;
583 
584 	(void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOPR, &bytes, &pages);
585 	return (pages);
586 }
587 
588 unsigned long
589 ddi_ptob(dev_info_t *dip, unsigned long pages)
590 {
591 	unsigned long bytes;
592 
593 	(void) ddi_ctlops(dip, dip, DDI_CTLOPS_PTOB, &pages, &bytes);
594 	return (bytes);
595 }
596 
597 unsigned int
598 ddi_enter_critical(void)
599 {
600 	return ((uint_t)spl7());
601 }
602 
603 void
604 ddi_exit_critical(unsigned int spl)
605 {
606 	splx((int)spl);
607 }
608 
609 /*
610  * Nexus ctlops punter
611  */
612 
613 #if !defined(__sparc)
614 /*
615  * Request bus_ctl parent to handle a bus_ctl request
616  *
617  * (The sparc version is in sparc_ddi.s)
618  */
619 int
620 ddi_ctlops(dev_info_t *d, dev_info_t *r, ddi_ctl_enum_t op, void *a, void *v)
621 {
622 	int (*fp)();
623 
624 	if (!d || !r)
625 		return (DDI_FAILURE);
626 
627 	if ((d = (dev_info_t *)DEVI(d)->devi_bus_ctl) == NULL)
628 		return (DDI_FAILURE);
629 
630 	fp = DEVI(d)->devi_ops->devo_bus_ops->bus_ctl;
631 	return ((*fp)(d, r, op, a, v));
632 }
633 
634 #endif
635 
636 /*
637  * DMA/DVMA setup
638  */
639 
640 #if !defined(__sparc)
641 /*
642  * Request bus_dma_ctl parent to fiddle with a dma request.
643  *
644  * (The sparc version is in sparc_subr.s)
645  */
646 int
647 ddi_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
648     ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
649     off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
650 {
651 	int (*fp)();
652 
653 	if (dip != ddi_root_node())
654 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_ctl;
655 	fp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_ctl;
656 	return ((*fp) (dip, rdip, handle, request, offp, lenp, objp, flags));
657 }
658 #endif
659 
660 /*
661  * For all DMA control functions, call the DMA control
662  * routine and return status.
663  *
664  * Just plain assume that the parent is to be called.
665  * If a nexus driver or a thread outside the framework
666  * of a nexus driver or a leaf driver calls these functions,
667  * it is up to them to deal with the fact that the parent's
668  * bus_dma_ctl function will be the first one called.
669  */
670 
671 #define	HD	((ddi_dma_impl_t *)h)->dmai_rdip
672 
673 /*
674  * This routine is left in place to satisfy link dependencies
675  * for any 3rd party nexus drivers that rely on it.  It is never
676  * called, though.
677  */
678 /*ARGSUSED*/
679 int
680 ddi_dma_map(dev_info_t *dip, dev_info_t *rdip,
681     struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep)
682 {
683 	return (DDI_FAILURE);
684 }
685 
686 #if !defined(__sparc)
687 
688 /*
689  * The SPARC versions of these routines are done in assembler to
690  * save register windows, so they're in sparc_subr.s.
691  */
692 
693 int
694 ddi_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
695     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
696 {
697 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_attr_t *,
698 	    int (*)(caddr_t), caddr_t, ddi_dma_handle_t *);
699 
700 	if (dip != ddi_root_node())
701 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl;
702 
703 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_allochdl;
704 	return ((*funcp)(dip, rdip, attr, waitfp, arg, handlep));
705 }
706 
707 int
708 ddi_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handlep)
709 {
710 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
711 
712 	if (dip != ddi_root_node())
713 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl;
714 
715 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_freehdl;
716 	return ((*funcp)(dip, rdip, handlep));
717 }
718 
719 int
720 ddi_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
721     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
722     ddi_dma_cookie_t *cp, uint_t *ccountp)
723 {
724 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
725 	    struct ddi_dma_req *, ddi_dma_cookie_t *, uint_t *);
726 
727 	if (dip != ddi_root_node())
728 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
729 
730 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_bindhdl;
731 	return ((*funcp)(dip, rdip, handle, dmareq, cp, ccountp));
732 }
733 
734 int
735 ddi_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
736     ddi_dma_handle_t handle)
737 {
738 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
739 
740 	if (dip != ddi_root_node())
741 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl;
742 
743 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_unbindhdl;
744 	return ((*funcp)(dip, rdip, handle));
745 }
746 
747 
748 int
749 ddi_dma_flush(dev_info_t *dip, dev_info_t *rdip,
750     ddi_dma_handle_t handle, off_t off, size_t len,
751     uint_t cache_flags)
752 {
753 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
754 	    off_t, size_t, uint_t);
755 
756 	if (dip != ddi_root_node())
757 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush;
758 
759 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_flush;
760 	return ((*funcp)(dip, rdip, handle, off, len, cache_flags));
761 }
762 
763 int
764 ddi_dma_win(dev_info_t *dip, dev_info_t *rdip,
765     ddi_dma_handle_t handle, uint_t win, off_t *offp,
766     size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
767 {
768 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
769 	    uint_t, off_t *, size_t *, ddi_dma_cookie_t *, uint_t *);
770 
771 	if (dip != ddi_root_node())
772 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_win;
773 
774 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_win;
775 	return ((*funcp)(dip, rdip, handle, win, offp, lenp,
776 	    cookiep, ccountp));
777 }
778 
779 int
780 ddi_dma_sync(ddi_dma_handle_t h, off_t o, size_t l, uint_t whom)
781 {
782 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h;
783 	dev_info_t *dip, *rdip;
784 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t, off_t,
785 	    size_t, uint_t);
786 
787 	/*
788 	 * the DMA nexus driver will set DMP_NOSYNC if the
789 	 * platform does not require any sync operation. For
790 	 * example if the memory is uncached or consistent
791 	 * and without any I/O write buffers involved.
792 	 */
793 	if ((hp->dmai_rflags & DMP_NOSYNC) == DMP_NOSYNC)
794 		return (DDI_SUCCESS);
795 
796 	dip = rdip = hp->dmai_rdip;
797 	if (dip != ddi_root_node())
798 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush;
799 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_flush;
800 	return ((*funcp)(dip, rdip, h, o, l, whom));
801 }
802 
803 int
804 ddi_dma_unbind_handle(ddi_dma_handle_t h)
805 {
806 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h;
807 	dev_info_t *dip, *rdip;
808 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
809 
810 	dip = rdip = hp->dmai_rdip;
811 	if (dip != ddi_root_node())
812 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl;
813 	funcp = DEVI(rdip)->devi_bus_dma_unbindfunc;
814 	return ((*funcp)(dip, rdip, h));
815 }
816 
817 #endif	/* !__sparc */
818 
819 /*
820  * DMA burst sizes, and transfer minimums
821  */
822 
823 int
824 ddi_dma_burstsizes(ddi_dma_handle_t handle)
825 {
826 	ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle;
827 
828 	if (!dimp)
829 		return (0);
830 	else
831 		return (dimp->dmai_burstsizes);
832 }
833 
834 /*
835  * Given two DMA attribute structures, apply the attributes
836  * of one to the other, following the rules of attributes
837  * and the wishes of the caller.
838  *
839  * The rules of DMA attribute structures are that you cannot
840  * make things *less* restrictive as you apply one set
841  * of attributes to another.
842  *
843  */
844 void
845 ddi_dma_attr_merge(ddi_dma_attr_t *attr, ddi_dma_attr_t *mod)
846 {
847 	attr->dma_attr_addr_lo =
848 	    MAX(attr->dma_attr_addr_lo, mod->dma_attr_addr_lo);
849 	attr->dma_attr_addr_hi =
850 	    MIN(attr->dma_attr_addr_hi, mod->dma_attr_addr_hi);
851 	attr->dma_attr_count_max =
852 	    MIN(attr->dma_attr_count_max, mod->dma_attr_count_max);
853 	attr->dma_attr_align =
854 	    MAX(attr->dma_attr_align,  mod->dma_attr_align);
855 	attr->dma_attr_burstsizes =
856 	    (uint_t)(attr->dma_attr_burstsizes & mod->dma_attr_burstsizes);
857 	attr->dma_attr_minxfer =
858 	    maxbit(attr->dma_attr_minxfer, mod->dma_attr_minxfer);
859 	attr->dma_attr_maxxfer =
860 	    MIN(attr->dma_attr_maxxfer, mod->dma_attr_maxxfer);
861 	attr->dma_attr_seg = MIN(attr->dma_attr_seg, mod->dma_attr_seg);
862 	attr->dma_attr_sgllen = MIN((uint_t)attr->dma_attr_sgllen,
863 	    (uint_t)mod->dma_attr_sgllen);
864 	attr->dma_attr_granular =
865 	    MAX(attr->dma_attr_granular, mod->dma_attr_granular);
866 }
867 
868 /*
869  * mmap/segmap interface:
870  */
871 
872 /*
873  * ddi_segmap:		setup the default segment driver. Calls the drivers
874  *			XXmmap routine to validate the range to be mapped.
875  *			Return ENXIO of the range is not valid.  Create
876  *			a seg_dev segment that contains all of the
877  *			necessary information and will reference the
878  *			default segment driver routines. It returns zero
879  *			on success or non-zero on failure.
880  */
881 int
882 ddi_segmap(dev_t dev, off_t offset, struct as *asp, caddr_t *addrp, off_t len,
883     uint_t prot, uint_t maxprot, uint_t flags, cred_t *credp)
884 {
885 	extern int spec_segmap(dev_t, off_t, struct as *, caddr_t *,
886 	    off_t, uint_t, uint_t, uint_t, struct cred *);
887 
888 	return (spec_segmap(dev, offset, asp, addrp, len,
889 	    prot, maxprot, flags, credp));
890 }
891 
892 /*
893  * ddi_map_fault:	Resolve mappings at fault time.  Used by segment
894  *			drivers. Allows each successive parent to resolve
895  *			address translations and add its mappings to the
896  *			mapping list supplied in the page structure. It
897  *			returns zero on success	or non-zero on failure.
898  */
899 
900 int
901 ddi_map_fault(dev_info_t *dip, struct hat *hat, struct seg *seg,
902     caddr_t addr, struct devpage *dp, pfn_t pfn, uint_t prot, uint_t lock)
903 {
904 	return (i_ddi_map_fault(dip, dip, hat, seg, addr, dp, pfn, prot, lock));
905 }
906 
907 /*
908  * ddi_device_mapping_check:	Called from ddi_segmap_setup.
909  *	Invokes platform specific DDI to determine whether attributes specified
910  *	in attr(9s) are	valid for the region of memory that will be made
911  *	available for direct access to user process via the mmap(2) system call.
912  */
913 int
914 ddi_device_mapping_check(dev_t dev, ddi_device_acc_attr_t *accattrp,
915     uint_t rnumber, uint_t *hat_flags)
916 {
917 	ddi_acc_handle_t handle;
918 	ddi_map_req_t mr;
919 	ddi_acc_hdl_t *hp;
920 	int result;
921 	dev_info_t *dip;
922 
923 	/*
924 	 * we use e_ddi_hold_devi_by_dev to search for the devi.  We
925 	 * release it immediately since it should already be held by
926 	 * a devfs vnode.
927 	 */
928 	if ((dip =
929 	    e_ddi_hold_devi_by_dev(dev, E_DDI_HOLD_DEVI_NOATTACH)) == NULL)
930 		return (-1);
931 	ddi_release_devi(dip);		/* for e_ddi_hold_devi_by_dev() */
932 
933 	/*
934 	 * Allocate and initialize the common elements of data
935 	 * access handle.
936 	 */
937 	handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
938 	if (handle == NULL)
939 		return (-1);
940 
941 	hp = impl_acc_hdl_get(handle);
942 	hp->ah_vers = VERS_ACCHDL;
943 	hp->ah_dip = dip;
944 	hp->ah_rnumber = rnumber;
945 	hp->ah_offset = 0;
946 	hp->ah_len = 0;
947 	hp->ah_acc = *accattrp;
948 
949 	/*
950 	 * Set up the mapping request and call to parent.
951 	 */
952 	mr.map_op = DDI_MO_MAP_HANDLE;
953 	mr.map_type = DDI_MT_RNUMBER;
954 	mr.map_obj.rnumber = rnumber;
955 	mr.map_prot = PROT_READ | PROT_WRITE;
956 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
957 	mr.map_handlep = hp;
958 	mr.map_vers = DDI_MAP_VERSION;
959 	result = ddi_map(dip, &mr, 0, 0, NULL);
960 
961 	/*
962 	 * Region must be mappable, pick up flags from the framework.
963 	 */
964 	*hat_flags = hp->ah_hat_flags;
965 
966 	impl_acc_hdl_free(handle);
967 
968 	/*
969 	 * check for end result.
970 	 */
971 	if (result != DDI_SUCCESS)
972 		return (-1);
973 	return (0);
974 }
975 
976 
977 /*
978  * Property functions:	 See also, ddipropdefs.h.
979  *
980  * These functions are the framework for the property functions,
981  * i.e. they support software defined properties.  All implementation
982  * specific property handling (i.e.: self-identifying devices and
983  * PROM defined properties are handled in the implementation specific
984  * functions (defined in ddi_implfuncs.h).
985  */
986 
987 /*
988  * nopropop:	Shouldn't be called, right?
989  */
990 int
991 nopropop(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
992     char *name, caddr_t valuep, int *lengthp)
993 {
994 	_NOTE(ARGUNUSED(dev, dip, prop_op, mod_flags, name, valuep, lengthp))
995 	return (DDI_PROP_NOT_FOUND);
996 }
997 
998 #ifdef	DDI_PROP_DEBUG
999 int ddi_prop_debug_flag = 0;
1000 
1001 int
1002 ddi_prop_debug(int enable)
1003 {
1004 	int prev = ddi_prop_debug_flag;
1005 
1006 	if ((enable != 0) || (prev != 0))
1007 		printf("ddi_prop_debug: debugging %s\n",
1008 		    enable ? "enabled" : "disabled");
1009 	ddi_prop_debug_flag = enable;
1010 	return (prev);
1011 }
1012 
1013 #endif	/* DDI_PROP_DEBUG */
1014 
1015 /*
1016  * Search a property list for a match, if found return pointer
1017  * to matching prop struct, else return NULL.
1018  */
1019 
1020 ddi_prop_t *
1021 i_ddi_prop_search(dev_t dev, char *name, uint_t flags, ddi_prop_t **list_head)
1022 {
1023 	ddi_prop_t	*propp;
1024 
1025 	/*
1026 	 * find the property in child's devinfo:
1027 	 * Search order defined by this search function is first matching
1028 	 * property with input dev == DDI_DEV_T_ANY matching any dev or
1029 	 * dev == propp->prop_dev, name == propp->name, and the correct
1030 	 * data type as specified in the flags.  If a DDI_DEV_T_NONE dev
1031 	 * value made it this far then it implies a DDI_DEV_T_ANY search.
1032 	 */
1033 	if (dev == DDI_DEV_T_NONE)
1034 		dev = DDI_DEV_T_ANY;
1035 
1036 	for (propp = *list_head; propp != NULL; propp = propp->prop_next)  {
1037 
1038 		if (!DDI_STRSAME(propp->prop_name, name))
1039 			continue;
1040 
1041 		if ((dev != DDI_DEV_T_ANY) && (propp->prop_dev != dev))
1042 			continue;
1043 
1044 		if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0)
1045 			continue;
1046 
1047 		return (propp);
1048 	}
1049 
1050 	return ((ddi_prop_t *)0);
1051 }
1052 
1053 /*
1054  * Search for property within devnames structures
1055  */
1056 ddi_prop_t *
1057 i_ddi_search_global_prop(dev_t dev, char *name, uint_t flags)
1058 {
1059 	major_t		major;
1060 	struct devnames	*dnp;
1061 	ddi_prop_t	*propp;
1062 
1063 	/*
1064 	 * Valid dev_t value is needed to index into the
1065 	 * correct devnames entry, therefore a dev_t
1066 	 * value of DDI_DEV_T_ANY is not appropriate.
1067 	 */
1068 	ASSERT(dev != DDI_DEV_T_ANY);
1069 	if (dev == DDI_DEV_T_ANY) {
1070 		return ((ddi_prop_t *)0);
1071 	}
1072 
1073 	major = getmajor(dev);
1074 	dnp = &(devnamesp[major]);
1075 
1076 	if (dnp->dn_global_prop_ptr == NULL)
1077 		return ((ddi_prop_t *)0);
1078 
1079 	LOCK_DEV_OPS(&dnp->dn_lock);
1080 
1081 	for (propp = dnp->dn_global_prop_ptr->prop_list;
1082 	    propp != NULL;
1083 	    propp = (ddi_prop_t *)propp->prop_next) {
1084 
1085 		if (!DDI_STRSAME(propp->prop_name, name))
1086 			continue;
1087 
1088 		if ((!(flags & DDI_PROP_ROOTNEX_GLOBAL)) &&
1089 		    (!(flags & LDI_DEV_T_ANY)) && (propp->prop_dev != dev))
1090 			continue;
1091 
1092 		if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0)
1093 			continue;
1094 
1095 		/* Property found, return it */
1096 		UNLOCK_DEV_OPS(&dnp->dn_lock);
1097 		return (propp);
1098 	}
1099 
1100 	UNLOCK_DEV_OPS(&dnp->dn_lock);
1101 	return ((ddi_prop_t *)0);
1102 }
1103 
1104 static char prop_no_mem_msg[] = "can't allocate memory for ddi property <%s>";
1105 
1106 /*
1107  * ddi_prop_search_global:
1108  *	Search the global property list within devnames
1109  *	for the named property.  Return the encoded value.
1110  */
1111 static int
1112 i_ddi_prop_search_global(dev_t dev, uint_t flags, char *name,
1113     void *valuep, uint_t *lengthp)
1114 {
1115 	ddi_prop_t	*propp;
1116 	caddr_t		buffer;
1117 
1118 	propp =  i_ddi_search_global_prop(dev, name, flags);
1119 
1120 	/* Property NOT found, bail */
1121 	if (propp == (ddi_prop_t *)0)
1122 		return (DDI_PROP_NOT_FOUND);
1123 
1124 	if (propp->prop_flags & DDI_PROP_UNDEF_IT)
1125 		return (DDI_PROP_UNDEFINED);
1126 
1127 	if ((buffer = kmem_alloc(propp->prop_len,
1128 	    (flags & DDI_PROP_CANSLEEP) ? KM_SLEEP : KM_NOSLEEP)) == NULL) {
1129 		cmn_err(CE_CONT, prop_no_mem_msg, name);
1130 		return (DDI_PROP_NO_MEMORY);
1131 	}
1132 
1133 	/*
1134 	 * Return the encoded data
1135 	 */
1136 	*(caddr_t *)valuep = buffer;
1137 	*lengthp = propp->prop_len;
1138 	bcopy(propp->prop_val, buffer, propp->prop_len);
1139 
1140 	return (DDI_PROP_SUCCESS);
1141 }
1142 
1143 /*
1144  * ddi_prop_search_common:	Lookup and return the encoded value
1145  */
1146 int
1147 ddi_prop_search_common(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1148     uint_t flags, char *name, void *valuep, uint_t *lengthp)
1149 {
1150 	ddi_prop_t	*propp;
1151 	int		i;
1152 	caddr_t		buffer = NULL;
1153 	caddr_t		prealloc = NULL;
1154 	int		plength = 0;
1155 	dev_info_t	*pdip;
1156 	int		(*bop)();
1157 
1158 	/*CONSTANTCONDITION*/
1159 	while (1)  {
1160 
1161 		mutex_enter(&(DEVI(dip)->devi_lock));
1162 
1163 
1164 		/*
1165 		 * find the property in child's devinfo:
1166 		 * Search order is:
1167 		 *	1. driver defined properties
1168 		 *	2. system defined properties
1169 		 *	3. driver global properties
1170 		 *	4. boot defined properties
1171 		 */
1172 
1173 		propp = i_ddi_prop_search(dev, name, flags,
1174 		    &(DEVI(dip)->devi_drv_prop_ptr));
1175 		if (propp == NULL)  {
1176 			propp = i_ddi_prop_search(dev, name, flags,
1177 			    &(DEVI(dip)->devi_sys_prop_ptr));
1178 		}
1179 		if ((propp == NULL) && DEVI(dip)->devi_global_prop_list) {
1180 			propp = i_ddi_prop_search(dev, name, flags,
1181 			    &DEVI(dip)->devi_global_prop_list->prop_list);
1182 		}
1183 
1184 		if (propp == NULL)  {
1185 			propp = i_ddi_prop_search(dev, name, flags,
1186 			    &(DEVI(dip)->devi_hw_prop_ptr));
1187 		}
1188 
1189 		/*
1190 		 * Software property found?
1191 		 */
1192 		if (propp != (ddi_prop_t *)0)	{
1193 
1194 			/*
1195 			 * If explicit undefine, return now.
1196 			 */
1197 			if (propp->prop_flags & DDI_PROP_UNDEF_IT) {
1198 				mutex_exit(&(DEVI(dip)->devi_lock));
1199 				if (prealloc)
1200 					kmem_free(prealloc, plength);
1201 				return (DDI_PROP_UNDEFINED);
1202 			}
1203 
1204 			/*
1205 			 * If we only want to know if it exists, return now
1206 			 */
1207 			if (prop_op == PROP_EXISTS) {
1208 				mutex_exit(&(DEVI(dip)->devi_lock));
1209 				ASSERT(prealloc == NULL);
1210 				return (DDI_PROP_SUCCESS);
1211 			}
1212 
1213 			/*
1214 			 * If length only request or prop length == 0,
1215 			 * service request and return now.
1216 			 */
1217 			if ((prop_op == PROP_LEN) ||(propp->prop_len == 0)) {
1218 				*lengthp = propp->prop_len;
1219 
1220 				/*
1221 				 * if prop_op is PROP_LEN_AND_VAL_ALLOC
1222 				 * that means prop_len is 0, so set valuep
1223 				 * also to NULL
1224 				 */
1225 				if (prop_op == PROP_LEN_AND_VAL_ALLOC)
1226 					*(caddr_t *)valuep = NULL;
1227 
1228 				mutex_exit(&(DEVI(dip)->devi_lock));
1229 				if (prealloc)
1230 					kmem_free(prealloc, plength);
1231 				return (DDI_PROP_SUCCESS);
1232 			}
1233 
1234 			/*
1235 			 * If LEN_AND_VAL_ALLOC and the request can sleep,
1236 			 * drop the mutex, allocate the buffer, and go
1237 			 * through the loop again.  If we already allocated
1238 			 * the buffer, and the size of the property changed,
1239 			 * keep trying...
1240 			 */
1241 			if ((prop_op == PROP_LEN_AND_VAL_ALLOC) &&
1242 			    (flags & DDI_PROP_CANSLEEP))  {
1243 				if (prealloc && (propp->prop_len != plength)) {
1244 					kmem_free(prealloc, plength);
1245 					prealloc = NULL;
1246 				}
1247 				if (prealloc == NULL)  {
1248 					plength = propp->prop_len;
1249 					mutex_exit(&(DEVI(dip)->devi_lock));
1250 					prealloc = kmem_alloc(plength,
1251 					    KM_SLEEP);
1252 					continue;
1253 				}
1254 			}
1255 
1256 			/*
1257 			 * Allocate buffer, if required.  Either way,
1258 			 * set `buffer' variable.
1259 			 */
1260 			i = *lengthp;			/* Get callers length */
1261 			*lengthp = propp->prop_len;	/* Set callers length */
1262 
1263 			switch (prop_op) {
1264 
1265 			case PROP_LEN_AND_VAL_ALLOC:
1266 
1267 				if (prealloc == NULL) {
1268 					buffer = kmem_alloc(propp->prop_len,
1269 					    KM_NOSLEEP);
1270 				} else {
1271 					buffer = prealloc;
1272 				}
1273 
1274 				if (buffer == NULL)  {
1275 					mutex_exit(&(DEVI(dip)->devi_lock));
1276 					cmn_err(CE_CONT, prop_no_mem_msg, name);
1277 					return (DDI_PROP_NO_MEMORY);
1278 				}
1279 				/* Set callers buf ptr */
1280 				*(caddr_t *)valuep = buffer;
1281 				break;
1282 
1283 			case PROP_LEN_AND_VAL_BUF:
1284 
1285 				if (propp->prop_len > (i)) {
1286 					mutex_exit(&(DEVI(dip)->devi_lock));
1287 					return (DDI_PROP_BUF_TOO_SMALL);
1288 				}
1289 
1290 				buffer = valuep;  /* Get callers buf ptr */
1291 				break;
1292 
1293 			default:
1294 				break;
1295 			}
1296 
1297 			/*
1298 			 * Do the copy.
1299 			 */
1300 			if (buffer != NULL)
1301 				bcopy(propp->prop_val, buffer, propp->prop_len);
1302 			mutex_exit(&(DEVI(dip)->devi_lock));
1303 			return (DDI_PROP_SUCCESS);
1304 		}
1305 
1306 		mutex_exit(&(DEVI(dip)->devi_lock));
1307 		if (prealloc)
1308 			kmem_free(prealloc, plength);
1309 		prealloc = NULL;
1310 
1311 		/*
1312 		 * Prop not found, call parent bus_ops to deal with possible
1313 		 * h/w layer (possible PROM defined props, etc.) and to
1314 		 * possibly ascend the hierarchy, if allowed by flags.
1315 		 */
1316 		pdip = (dev_info_t *)DEVI(dip)->devi_parent;
1317 
1318 		/*
1319 		 * One last call for the root driver PROM props?
1320 		 */
1321 		if (dip == ddi_root_node())  {
1322 			return (ddi_bus_prop_op(dev, dip, dip, prop_op,
1323 			    flags, name, valuep, (int *)lengthp));
1324 		}
1325 
1326 		/*
1327 		 * We may have been called to check for properties
1328 		 * within a single devinfo node that has no parent -
1329 		 * see make_prop()
1330 		 */
1331 		if (pdip == NULL) {
1332 			ASSERT((flags &
1333 			    (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM)) ==
1334 			    (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM));
1335 			return (DDI_PROP_NOT_FOUND);
1336 		}
1337 
1338 		/*
1339 		 * Instead of recursing, we do iterative calls up the tree.
1340 		 * As a bit of optimization, skip the bus_op level if the
1341 		 * node is a s/w node and if the parent's bus_prop_op function
1342 		 * is `ddi_bus_prop_op', because we know that in this case,
1343 		 * this function does nothing.
1344 		 *
1345 		 * 4225415: If the parent isn't attached, or the child
1346 		 * hasn't been named by the parent yet, use the default
1347 		 * ddi_bus_prop_op as a proxy for the parent.  This
1348 		 * allows property lookups in any child/parent state to
1349 		 * include 'prom' and inherited properties, even when
1350 		 * there are no drivers attached to the child or parent.
1351 		 */
1352 
1353 		bop = ddi_bus_prop_op;
1354 		if (i_ddi_devi_attached(pdip) &&
1355 		    (i_ddi_node_state(dip) >= DS_INITIALIZED))
1356 			bop = DEVI(pdip)->devi_ops->devo_bus_ops->bus_prop_op;
1357 
1358 		i = DDI_PROP_NOT_FOUND;
1359 
1360 		if ((bop != ddi_bus_prop_op) || ndi_dev_is_prom_node(dip)) {
1361 			i = (*bop)(dev, pdip, dip, prop_op,
1362 			    flags | DDI_PROP_DONTPASS,
1363 			    name, valuep, lengthp);
1364 		}
1365 
1366 		if ((flags & DDI_PROP_DONTPASS) ||
1367 		    (i != DDI_PROP_NOT_FOUND))
1368 			return (i);
1369 
1370 		dip = pdip;
1371 	}
1372 	/*NOTREACHED*/
1373 }
1374 
1375 
1376 /*
1377  * ddi_prop_op: The basic property operator for drivers.
1378  *
1379  * In ddi_prop_op, the type of valuep is interpreted based on prop_op:
1380  *
1381  *	prop_op			valuep
1382  *	------			------
1383  *
1384  *	PROP_LEN		<unused>
1385  *
1386  *	PROP_LEN_AND_VAL_BUF	Pointer to callers buffer
1387  *
1388  *	PROP_LEN_AND_VAL_ALLOC	Address of callers pointer (will be set to
1389  *				address of allocated buffer, if successful)
1390  */
1391 int
1392 ddi_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
1393     char *name, caddr_t valuep, int *lengthp)
1394 {
1395 	int	i;
1396 
1397 	ASSERT((mod_flags & DDI_PROP_TYPE_MASK) == 0);
1398 
1399 	/*
1400 	 * If this was originally an LDI prop lookup then we bail here.
1401 	 * The reason is that the LDI property lookup interfaces first call
1402 	 * a drivers prop_op() entry point to allow it to override
1403 	 * properties.  But if we've made it here, then the driver hasn't
1404 	 * overriden any properties.  We don't want to continue with the
1405 	 * property search here because we don't have any type inforamtion.
1406 	 * When we return failure, the LDI interfaces will then proceed to
1407 	 * call the typed property interfaces to look up the property.
1408 	 */
1409 	if (mod_flags & DDI_PROP_DYNAMIC)
1410 		return (DDI_PROP_NOT_FOUND);
1411 
1412 	/*
1413 	 * check for pre-typed property consumer asking for typed property:
1414 	 * see e_ddi_getprop_int64.
1415 	 */
1416 	if (mod_flags & DDI_PROP_CONSUMER_TYPED)
1417 		mod_flags |= DDI_PROP_TYPE_INT64;
1418 	mod_flags |= DDI_PROP_TYPE_ANY;
1419 
1420 	i = ddi_prop_search_common(dev, dip, prop_op,
1421 	    mod_flags, name, valuep, (uint_t *)lengthp);
1422 	if (i == DDI_PROP_FOUND_1275)
1423 		return (DDI_PROP_SUCCESS);
1424 	return (i);
1425 }
1426 
1427 /*
1428  * ddi_prop_op_nblocks_blksize: The basic property operator for drivers that
1429  * maintain size in number of blksize blocks.  Provides a dynamic property
1430  * implementation for size oriented properties based on nblocks64 and blksize
1431  * values passed in by the driver.  Fallback to ddi_prop_op if the nblocks64
1432  * is too large.  This interface should not be used with a nblocks64 that
1433  * represents the driver's idea of how to represent unknown, if nblocks is
1434  * unknown use ddi_prop_op.
1435  */
1436 int
1437 ddi_prop_op_nblocks_blksize(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1438     int mod_flags, char *name, caddr_t valuep, int *lengthp,
1439     uint64_t nblocks64, uint_t blksize)
1440 {
1441 	uint64_t size64;
1442 	int	blkshift;
1443 
1444 	/* convert block size to shift value */
1445 	ASSERT(BIT_ONLYONESET(blksize));
1446 	blkshift = highbit(blksize) - 1;
1447 
1448 	/*
1449 	 * There is no point in supporting nblocks64 values that don't have
1450 	 * an accurate uint64_t byte count representation.
1451 	 */
1452 	if (nblocks64 >= (UINT64_MAX >> blkshift))
1453 		return (ddi_prop_op(dev, dip, prop_op, mod_flags,
1454 		    name, valuep, lengthp));
1455 
1456 	size64 = nblocks64 << blkshift;
1457 	return (ddi_prop_op_size_blksize(dev, dip, prop_op, mod_flags,
1458 	    name, valuep, lengthp, size64, blksize));
1459 }
1460 
1461 /*
1462  * ddi_prop_op_nblocks: ddi_prop_op_nblocks_blksize with DEV_BSIZE blksize.
1463  */
1464 int
1465 ddi_prop_op_nblocks(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1466     int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t nblocks64)
1467 {
1468 	return (ddi_prop_op_nblocks_blksize(dev, dip, prop_op,
1469 	    mod_flags, name, valuep, lengthp, nblocks64, DEV_BSIZE));
1470 }
1471 
1472 /*
1473  * ddi_prop_op_size_blksize: The basic property operator for block drivers that
1474  * maintain size in bytes. Provides a of dynamic property implementation for
1475  * size oriented properties based on size64 value and blksize passed in by the
1476  * driver.  Fallback to ddi_prop_op if the size64 is too large. This interface
1477  * should not be used with a size64 that represents the driver's idea of how
1478  * to represent unknown, if size is unknown use ddi_prop_op.
1479  *
1480  * NOTE: the legacy "nblocks"/"size" properties are treated as 32-bit unsigned
1481  * integers. While the most likely interface to request them ([bc]devi_size)
1482  * is declared int (signed) there is no enforcement of this, which means we
1483  * can't enforce limitations here without risking regression.
1484  */
1485 int
1486 ddi_prop_op_size_blksize(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1487     int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t size64,
1488     uint_t blksize)
1489 {
1490 	uint64_t nblocks64;
1491 	int	callers_length;
1492 	caddr_t	buffer;
1493 	int	blkshift;
1494 
1495 	/*
1496 	 * This is a kludge to support capture of size(9P) pure dynamic
1497 	 * properties in snapshots for non-cmlb code (without exposing
1498 	 * i_ddi_prop_dyn changes). When everyone uses cmlb, this code
1499 	 * should be removed.
1500 	 */
1501 	if (i_ddi_prop_dyn_driver_get(dip) == NULL) {
1502 		static i_ddi_prop_dyn_t prop_dyn_size[] = {
1503 		    {"Size",		DDI_PROP_TYPE_INT64,	S_IFCHR},
1504 		    {"Nblocks",		DDI_PROP_TYPE_INT64,	S_IFBLK},
1505 		    {NULL}
1506 		};
1507 		i_ddi_prop_dyn_driver_set(dip, prop_dyn_size);
1508 	}
1509 
1510 	/* convert block size to shift value */
1511 	ASSERT(BIT_ONLYONESET(blksize));
1512 	blkshift = highbit(blksize) - 1;
1513 
1514 	/* compute DEV_BSIZE nblocks value */
1515 	nblocks64 = size64 >> blkshift;
1516 
1517 	/* get callers length, establish length of our dynamic properties */
1518 	callers_length = *lengthp;
1519 
1520 	if (strcmp(name, "Nblocks") == 0)
1521 		*lengthp = sizeof (uint64_t);
1522 	else if (strcmp(name, "Size") == 0)
1523 		*lengthp = sizeof (uint64_t);
1524 	else if ((strcmp(name, "nblocks") == 0) && (nblocks64 < UINT_MAX))
1525 		*lengthp = sizeof (uint32_t);
1526 	else if ((strcmp(name, "size") == 0) && (size64 < UINT_MAX))
1527 		*lengthp = sizeof (uint32_t);
1528 	else if ((strcmp(name, "blksize") == 0) && (blksize < UINT_MAX))
1529 		*lengthp = sizeof (uint32_t);
1530 	else {
1531 		/* fallback to ddi_prop_op */
1532 		return (ddi_prop_op(dev, dip, prop_op, mod_flags,
1533 		    name, valuep, lengthp));
1534 	}
1535 
1536 	/* service request for the length of the property */
1537 	if (prop_op == PROP_LEN)
1538 		return (DDI_PROP_SUCCESS);
1539 
1540 	switch (prop_op) {
1541 	case PROP_LEN_AND_VAL_ALLOC:
1542 		if ((buffer = kmem_alloc(*lengthp,
1543 		    (mod_flags & DDI_PROP_CANSLEEP) ?
1544 		    KM_SLEEP : KM_NOSLEEP)) == NULL)
1545 			return (DDI_PROP_NO_MEMORY);
1546 
1547 		*(caddr_t *)valuep = buffer;	/* set callers buf ptr */
1548 		break;
1549 
1550 	case PROP_LEN_AND_VAL_BUF:
1551 		/* the length of the property and the request must match */
1552 		if (callers_length != *lengthp)
1553 			return (DDI_PROP_INVAL_ARG);
1554 
1555 		buffer = valuep;		/* get callers buf ptr */
1556 		break;
1557 
1558 	default:
1559 		return (DDI_PROP_INVAL_ARG);
1560 	}
1561 
1562 	/* transfer the value into the buffer */
1563 	if (strcmp(name, "Nblocks") == 0)
1564 		*((uint64_t *)buffer) = nblocks64;
1565 	else if (strcmp(name, "Size") == 0)
1566 		*((uint64_t *)buffer) = size64;
1567 	else if (strcmp(name, "nblocks") == 0)
1568 		*((uint32_t *)buffer) = (uint32_t)nblocks64;
1569 	else if (strcmp(name, "size") == 0)
1570 		*((uint32_t *)buffer) = (uint32_t)size64;
1571 	else if (strcmp(name, "blksize") == 0)
1572 		*((uint32_t *)buffer) = (uint32_t)blksize;
1573 	return (DDI_PROP_SUCCESS);
1574 }
1575 
1576 /*
1577  * ddi_prop_op_size: ddi_prop_op_size_blksize with DEV_BSIZE block size.
1578  */
1579 int
1580 ddi_prop_op_size(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1581     int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t size64)
1582 {
1583 	return (ddi_prop_op_size_blksize(dev, dip, prop_op,
1584 	    mod_flags, name, valuep, lengthp, size64, DEV_BSIZE));
1585 }
1586 
1587 /*
1588  * Variable length props...
1589  */
1590 
1591 /*
1592  * ddi_getlongprop:	Get variable length property len+val into a buffer
1593  *		allocated by property provider via kmem_alloc. Requester
1594  *		is responsible for freeing returned property via kmem_free.
1595  *
1596  *	Arguments:
1597  *
1598  *	dev_t:	Input:	dev_t of property.
1599  *	dip:	Input:	dev_info_t pointer of child.
1600  *	flags:	Input:	Possible flag modifiers are:
1601  *		DDI_PROP_DONTPASS:	Don't pass to parent if prop not found.
1602  *		DDI_PROP_CANSLEEP:	Memory allocation may sleep.
1603  *	name:	Input:	name of property.
1604  *	valuep:	Output:	Addr of callers buffer pointer.
1605  *	lengthp:Output:	*lengthp will contain prop length on exit.
1606  *
1607  *	Possible Returns:
1608  *
1609  *		DDI_PROP_SUCCESS:	Prop found and returned.
1610  *		DDI_PROP_NOT_FOUND:	Prop not found
1611  *		DDI_PROP_UNDEFINED:	Prop explicitly undefined.
1612  *		DDI_PROP_NO_MEMORY:	Prop found, but unable to alloc mem.
1613  */
1614 
1615 int
1616 ddi_getlongprop(dev_t dev, dev_info_t *dip, int flags,
1617     char *name, caddr_t valuep, int *lengthp)
1618 {
1619 	return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_ALLOC,
1620 	    flags, name, valuep, lengthp));
1621 }
1622 
1623 /*
1624  *
1625  * ddi_getlongprop_buf:		Get long prop into pre-allocated callers
1626  *				buffer. (no memory allocation by provider).
1627  *
1628  *	dev_t:	Input:	dev_t of property.
1629  *	dip:	Input:	dev_info_t pointer of child.
1630  *	flags:	Input:	DDI_PROP_DONTPASS or NULL
1631  *	name:	Input:	name of property
1632  *	valuep:	Input:	ptr to callers buffer.
1633  *	lengthp:I/O:	ptr to length of callers buffer on entry,
1634  *			actual length of property on exit.
1635  *
1636  *	Possible returns:
1637  *
1638  *		DDI_PROP_SUCCESS	Prop found and returned
1639  *		DDI_PROP_NOT_FOUND	Prop not found
1640  *		DDI_PROP_UNDEFINED	Prop explicitly undefined.
1641  *		DDI_PROP_BUF_TOO_SMALL	Prop found, callers buf too small,
1642  *					no value returned, but actual prop
1643  *					length returned in *lengthp
1644  *
1645  */
1646 
1647 int
1648 ddi_getlongprop_buf(dev_t dev, dev_info_t *dip, int flags,
1649     char *name, caddr_t valuep, int *lengthp)
1650 {
1651 	return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
1652 	    flags, name, valuep, lengthp));
1653 }
1654 
1655 /*
1656  * Integer/boolean sized props.
1657  *
1658  * Call is value only... returns found boolean or int sized prop value or
1659  * defvalue if prop not found or is wrong length or is explicitly undefined.
1660  * Only flag is DDI_PROP_DONTPASS...
1661  *
1662  * By convention, this interface returns boolean (0) sized properties
1663  * as value (int)1.
1664  *
1665  * This never returns an error, if property not found or specifically
1666  * undefined, the input `defvalue' is returned.
1667  */
1668 
1669 int
1670 ddi_getprop(dev_t dev, dev_info_t *dip, int flags, char *name, int defvalue)
1671 {
1672 	int	propvalue = defvalue;
1673 	int	proplength = sizeof (int);
1674 	int	error;
1675 
1676 	error = ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
1677 	    flags, name, (caddr_t)&propvalue, &proplength);
1678 
1679 	if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
1680 		propvalue = 1;
1681 
1682 	return (propvalue);
1683 }
1684 
1685 /*
1686  * Get prop length interface: flags are 0 or DDI_PROP_DONTPASS
1687  * if returns DDI_PROP_SUCCESS, length returned in *lengthp.
1688  */
1689 
1690 int
1691 ddi_getproplen(dev_t dev, dev_info_t *dip, int flags, char *name, int *lengthp)
1692 {
1693 	return (ddi_prop_op(dev, dip, PROP_LEN, flags, name, NULL, lengthp));
1694 }
1695 
1696 /*
1697  * Allocate a struct prop_driver_data, along with 'size' bytes
1698  * for decoded property data.  This structure is freed by
1699  * calling ddi_prop_free(9F).
1700  */
1701 static void *
1702 ddi_prop_decode_alloc(size_t size, void (*prop_free)(struct prop_driver_data *))
1703 {
1704 	struct prop_driver_data *pdd;
1705 
1706 	/*
1707 	 * Allocate a structure with enough memory to store the decoded data.
1708 	 */
1709 	pdd = kmem_zalloc(sizeof (struct prop_driver_data) + size, KM_SLEEP);
1710 	pdd->pdd_size = (sizeof (struct prop_driver_data) + size);
1711 	pdd->pdd_prop_free = prop_free;
1712 
1713 	/*
1714 	 * Return a pointer to the location to put the decoded data.
1715 	 */
1716 	return ((void *)((caddr_t)pdd + sizeof (struct prop_driver_data)));
1717 }
1718 
1719 /*
1720  * Allocated the memory needed to store the encoded data in the property
1721  * handle.
1722  */
1723 static int
1724 ddi_prop_encode_alloc(prop_handle_t *ph, size_t size)
1725 {
1726 	/*
1727 	 * If size is zero, then set data to NULL and size to 0.  This
1728 	 * is a boolean property.
1729 	 */
1730 	if (size == 0) {
1731 		ph->ph_size = 0;
1732 		ph->ph_data = NULL;
1733 		ph->ph_cur_pos = NULL;
1734 		ph->ph_save_pos = NULL;
1735 	} else {
1736 		if (ph->ph_flags == DDI_PROP_DONTSLEEP) {
1737 			ph->ph_data = kmem_zalloc(size, KM_NOSLEEP);
1738 			if (ph->ph_data == NULL)
1739 				return (DDI_PROP_NO_MEMORY);
1740 		} else
1741 			ph->ph_data = kmem_zalloc(size, KM_SLEEP);
1742 		ph->ph_size = size;
1743 		ph->ph_cur_pos = ph->ph_data;
1744 		ph->ph_save_pos = ph->ph_data;
1745 	}
1746 	return (DDI_PROP_SUCCESS);
1747 }
1748 
1749 /*
1750  * Free the space allocated by the lookup routines.  Each lookup routine
1751  * returns a pointer to the decoded data to the driver.  The driver then
1752  * passes this pointer back to us.  This data actually lives in a struct
1753  * prop_driver_data.  We use negative indexing to find the beginning of
1754  * the structure and then free the entire structure using the size and
1755  * the free routine stored in the structure.
1756  */
1757 void
1758 ddi_prop_free(void *datap)
1759 {
1760 	struct prop_driver_data *pdd;
1761 
1762 	/*
1763 	 * Get the structure
1764 	 */
1765 	pdd = (struct prop_driver_data *)
1766 	    ((caddr_t)datap - sizeof (struct prop_driver_data));
1767 	/*
1768 	 * Call the free routine to free it
1769 	 */
1770 	(*pdd->pdd_prop_free)(pdd);
1771 }
1772 
1773 /*
1774  * Free the data associated with an array of ints,
1775  * allocated with ddi_prop_decode_alloc().
1776  */
1777 static void
1778 ddi_prop_free_ints(struct prop_driver_data *pdd)
1779 {
1780 	kmem_free(pdd, pdd->pdd_size);
1781 }
1782 
1783 /*
1784  * Free a single string property or a single string contained within
1785  * the argv style return value of an array of strings.
1786  */
1787 static void
1788 ddi_prop_free_string(struct prop_driver_data *pdd)
1789 {
1790 	kmem_free(pdd, pdd->pdd_size);
1791 
1792 }
1793 
1794 /*
1795  * Free an array of strings.
1796  */
1797 static void
1798 ddi_prop_free_strings(struct prop_driver_data *pdd)
1799 {
1800 	kmem_free(pdd, pdd->pdd_size);
1801 }
1802 
1803 /*
1804  * Free the data associated with an array of bytes.
1805  */
1806 static void
1807 ddi_prop_free_bytes(struct prop_driver_data *pdd)
1808 {
1809 	kmem_free(pdd, pdd->pdd_size);
1810 }
1811 
1812 /*
1813  * Reset the current location pointer in the property handle to the
1814  * beginning of the data.
1815  */
1816 void
1817 ddi_prop_reset_pos(prop_handle_t *ph)
1818 {
1819 	ph->ph_cur_pos = ph->ph_data;
1820 	ph->ph_save_pos = ph->ph_data;
1821 }
1822 
1823 /*
1824  * Restore the current location pointer in the property handle to the
1825  * saved position.
1826  */
1827 void
1828 ddi_prop_save_pos(prop_handle_t *ph)
1829 {
1830 	ph->ph_save_pos = ph->ph_cur_pos;
1831 }
1832 
1833 /*
1834  * Save the location that the current location pointer is pointing to..
1835  */
1836 void
1837 ddi_prop_restore_pos(prop_handle_t *ph)
1838 {
1839 	ph->ph_cur_pos = ph->ph_save_pos;
1840 }
1841 
1842 /*
1843  * Property encode/decode functions
1844  */
1845 
1846 /*
1847  * Decode a single integer property
1848  */
1849 static int
1850 ddi_prop_fm_decode_int(prop_handle_t *ph, void *data, uint_t *nelements)
1851 {
1852 	int	i;
1853 	int	tmp;
1854 
1855 	/*
1856 	 * If there is nothing to decode return an error
1857 	 */
1858 	if (ph->ph_size == 0)
1859 		return (DDI_PROP_END_OF_DATA);
1860 
1861 	/*
1862 	 * Decode the property as a single integer and return it
1863 	 * in data if we were able to decode it.
1864 	 */
1865 	i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, &tmp);
1866 	if (i < DDI_PROP_RESULT_OK) {
1867 		switch (i) {
1868 		case DDI_PROP_RESULT_EOF:
1869 			return (DDI_PROP_END_OF_DATA);
1870 
1871 		case DDI_PROP_RESULT_ERROR:
1872 			return (DDI_PROP_CANNOT_DECODE);
1873 		}
1874 	}
1875 
1876 	*(int *)data = tmp;
1877 	*nelements = 1;
1878 	return (DDI_PROP_SUCCESS);
1879 }
1880 
1881 /*
1882  * Decode a single 64 bit integer property
1883  */
1884 static int
1885 ddi_prop_fm_decode_int64(prop_handle_t *ph, void *data, uint_t *nelements)
1886 {
1887 	int	i;
1888 	int64_t	tmp;
1889 
1890 	/*
1891 	 * If there is nothing to decode return an error
1892 	 */
1893 	if (ph->ph_size == 0)
1894 		return (DDI_PROP_END_OF_DATA);
1895 
1896 	/*
1897 	 * Decode the property as a single integer and return it
1898 	 * in data if we were able to decode it.
1899 	 */
1900 	i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, &tmp);
1901 	if (i < DDI_PROP_RESULT_OK) {
1902 		switch (i) {
1903 		case DDI_PROP_RESULT_EOF:
1904 			return (DDI_PROP_END_OF_DATA);
1905 
1906 		case DDI_PROP_RESULT_ERROR:
1907 			return (DDI_PROP_CANNOT_DECODE);
1908 		}
1909 	}
1910 
1911 	*(int64_t *)data = tmp;
1912 	*nelements = 1;
1913 	return (DDI_PROP_SUCCESS);
1914 }
1915 
1916 /*
1917  * Decode an array of integers property
1918  */
1919 static int
1920 ddi_prop_fm_decode_ints(prop_handle_t *ph, void *data, uint_t *nelements)
1921 {
1922 	int	i;
1923 	int	cnt = 0;
1924 	int	*tmp;
1925 	int	*intp;
1926 	int	n;
1927 
1928 	/*
1929 	 * Figure out how many array elements there are by going through the
1930 	 * data without decoding it first and counting.
1931 	 */
1932 	for (;;) {
1933 		i = DDI_PROP_INT(ph, DDI_PROP_CMD_SKIP, NULL);
1934 		if (i < 0)
1935 			break;
1936 		cnt++;
1937 	}
1938 
1939 	/*
1940 	 * If there are no elements return an error
1941 	 */
1942 	if (cnt == 0)
1943 		return (DDI_PROP_END_OF_DATA);
1944 
1945 	/*
1946 	 * If we cannot skip through the data, we cannot decode it
1947 	 */
1948 	if (i == DDI_PROP_RESULT_ERROR)
1949 		return (DDI_PROP_CANNOT_DECODE);
1950 
1951 	/*
1952 	 * Reset the data pointer to the beginning of the encoded data
1953 	 */
1954 	ddi_prop_reset_pos(ph);
1955 
1956 	/*
1957 	 * Allocated memory to store the decoded value in.
1958 	 */
1959 	intp = ddi_prop_decode_alloc((cnt * sizeof (int)),
1960 	    ddi_prop_free_ints);
1961 
1962 	/*
1963 	 * Decode each element and place it in the space we just allocated
1964 	 */
1965 	tmp = intp;
1966 	for (n = 0; n < cnt; n++, tmp++) {
1967 		i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, tmp);
1968 		if (i < DDI_PROP_RESULT_OK) {
1969 			/*
1970 			 * Free the space we just allocated
1971 			 * and return an error.
1972 			 */
1973 			ddi_prop_free(intp);
1974 			switch (i) {
1975 			case DDI_PROP_RESULT_EOF:
1976 				return (DDI_PROP_END_OF_DATA);
1977 
1978 			case DDI_PROP_RESULT_ERROR:
1979 				return (DDI_PROP_CANNOT_DECODE);
1980 			}
1981 		}
1982 	}
1983 
1984 	*nelements = cnt;
1985 	*(int **)data = intp;
1986 
1987 	return (DDI_PROP_SUCCESS);
1988 }
1989 
1990 /*
1991  * Decode a 64 bit integer array property
1992  */
1993 static int
1994 ddi_prop_fm_decode_int64_array(prop_handle_t *ph, void *data, uint_t *nelements)
1995 {
1996 	int	i;
1997 	int	n;
1998 	int	cnt = 0;
1999 	int64_t	*tmp;
2000 	int64_t	*intp;
2001 
2002 	/*
2003 	 * Count the number of array elements by going
2004 	 * through the data without decoding it.
2005 	 */
2006 	for (;;) {
2007 		i = DDI_PROP_INT64(ph, DDI_PROP_CMD_SKIP, NULL);
2008 		if (i < 0)
2009 			break;
2010 		cnt++;
2011 	}
2012 
2013 	/*
2014 	 * If there are no elements return an error
2015 	 */
2016 	if (cnt == 0)
2017 		return (DDI_PROP_END_OF_DATA);
2018 
2019 	/*
2020 	 * If we cannot skip through the data, we cannot decode it
2021 	 */
2022 	if (i == DDI_PROP_RESULT_ERROR)
2023 		return (DDI_PROP_CANNOT_DECODE);
2024 
2025 	/*
2026 	 * Reset the data pointer to the beginning of the encoded data
2027 	 */
2028 	ddi_prop_reset_pos(ph);
2029 
2030 	/*
2031 	 * Allocate memory to store the decoded value.
2032 	 */
2033 	intp = ddi_prop_decode_alloc((cnt * sizeof (int64_t)),
2034 	    ddi_prop_free_ints);
2035 
2036 	/*
2037 	 * Decode each element and place it in the space allocated
2038 	 */
2039 	tmp = intp;
2040 	for (n = 0; n < cnt; n++, tmp++) {
2041 		i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, tmp);
2042 		if (i < DDI_PROP_RESULT_OK) {
2043 			/*
2044 			 * Free the space we just allocated
2045 			 * and return an error.
2046 			 */
2047 			ddi_prop_free(intp);
2048 			switch (i) {
2049 			case DDI_PROP_RESULT_EOF:
2050 				return (DDI_PROP_END_OF_DATA);
2051 
2052 			case DDI_PROP_RESULT_ERROR:
2053 				return (DDI_PROP_CANNOT_DECODE);
2054 			}
2055 		}
2056 	}
2057 
2058 	*nelements = cnt;
2059 	*(int64_t **)data = intp;
2060 
2061 	return (DDI_PROP_SUCCESS);
2062 }
2063 
2064 /*
2065  * Encode an array of integers property (Can be one element)
2066  */
2067 int
2068 ddi_prop_fm_encode_ints(prop_handle_t *ph, void *data, uint_t nelements)
2069 {
2070 	int	i;
2071 	int	*tmp;
2072 	int	cnt;
2073 	int	size;
2074 
2075 	/*
2076 	 * If there is no data, we cannot do anything
2077 	 */
2078 	if (nelements == 0)
2079 		return (DDI_PROP_CANNOT_ENCODE);
2080 
2081 	/*
2082 	 * Get the size of an encoded int.
2083 	 */
2084 	size = DDI_PROP_INT(ph, DDI_PROP_CMD_GET_ESIZE, NULL);
2085 
2086 	if (size < DDI_PROP_RESULT_OK) {
2087 		switch (size) {
2088 		case DDI_PROP_RESULT_EOF:
2089 			return (DDI_PROP_END_OF_DATA);
2090 
2091 		case DDI_PROP_RESULT_ERROR:
2092 			return (DDI_PROP_CANNOT_ENCODE);
2093 		}
2094 	}
2095 
2096 	/*
2097 	 * Allocate space in the handle to store the encoded int.
2098 	 */
2099 	if (ddi_prop_encode_alloc(ph, size * nelements) !=
2100 	    DDI_PROP_SUCCESS)
2101 		return (DDI_PROP_NO_MEMORY);
2102 
2103 	/*
2104 	 * Encode the array of ints.
2105 	 */
2106 	tmp = (int *)data;
2107 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2108 		i = DDI_PROP_INT(ph, DDI_PROP_CMD_ENCODE, tmp);
2109 		if (i < DDI_PROP_RESULT_OK) {
2110 			switch (i) {
2111 			case DDI_PROP_RESULT_EOF:
2112 				return (DDI_PROP_END_OF_DATA);
2113 
2114 			case DDI_PROP_RESULT_ERROR:
2115 				return (DDI_PROP_CANNOT_ENCODE);
2116 			}
2117 		}
2118 	}
2119 
2120 	return (DDI_PROP_SUCCESS);
2121 }
2122 
2123 
2124 /*
2125  * Encode a 64 bit integer array property
2126  */
2127 int
2128 ddi_prop_fm_encode_int64(prop_handle_t *ph, void *data, uint_t nelements)
2129 {
2130 	int i;
2131 	int cnt;
2132 	int size;
2133 	int64_t *tmp;
2134 
2135 	/*
2136 	 * If there is no data, we cannot do anything
2137 	 */
2138 	if (nelements == 0)
2139 		return (DDI_PROP_CANNOT_ENCODE);
2140 
2141 	/*
2142 	 * Get the size of an encoded 64 bit int.
2143 	 */
2144 	size = DDI_PROP_INT64(ph, DDI_PROP_CMD_GET_ESIZE, NULL);
2145 
2146 	if (size < DDI_PROP_RESULT_OK) {
2147 		switch (size) {
2148 		case DDI_PROP_RESULT_EOF:
2149 			return (DDI_PROP_END_OF_DATA);
2150 
2151 		case DDI_PROP_RESULT_ERROR:
2152 			return (DDI_PROP_CANNOT_ENCODE);
2153 		}
2154 	}
2155 
2156 	/*
2157 	 * Allocate space in the handle to store the encoded int.
2158 	 */
2159 	if (ddi_prop_encode_alloc(ph, size * nelements) !=
2160 	    DDI_PROP_SUCCESS)
2161 		return (DDI_PROP_NO_MEMORY);
2162 
2163 	/*
2164 	 * Encode the array of ints.
2165 	 */
2166 	tmp = (int64_t *)data;
2167 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2168 		i = DDI_PROP_INT64(ph, DDI_PROP_CMD_ENCODE, tmp);
2169 		if (i < DDI_PROP_RESULT_OK) {
2170 			switch (i) {
2171 			case DDI_PROP_RESULT_EOF:
2172 				return (DDI_PROP_END_OF_DATA);
2173 
2174 			case DDI_PROP_RESULT_ERROR:
2175 				return (DDI_PROP_CANNOT_ENCODE);
2176 			}
2177 		}
2178 	}
2179 
2180 	return (DDI_PROP_SUCCESS);
2181 }
2182 
2183 /*
2184  * Decode a single string property
2185  */
2186 static int
2187 ddi_prop_fm_decode_string(prop_handle_t *ph, void *data, uint_t *nelements)
2188 {
2189 	char		*tmp;
2190 	char		*str;
2191 	int		i;
2192 	int		size;
2193 
2194 	/*
2195 	 * If there is nothing to decode return an error
2196 	 */
2197 	if (ph->ph_size == 0)
2198 		return (DDI_PROP_END_OF_DATA);
2199 
2200 	/*
2201 	 * Get the decoded size of the encoded string.
2202 	 */
2203 	size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2204 	if (size < DDI_PROP_RESULT_OK) {
2205 		switch (size) {
2206 		case DDI_PROP_RESULT_EOF:
2207 			return (DDI_PROP_END_OF_DATA);
2208 
2209 		case DDI_PROP_RESULT_ERROR:
2210 			return (DDI_PROP_CANNOT_DECODE);
2211 		}
2212 	}
2213 
2214 	/*
2215 	 * Allocated memory to store the decoded value in.
2216 	 */
2217 	str = ddi_prop_decode_alloc((size_t)size, ddi_prop_free_string);
2218 
2219 	ddi_prop_reset_pos(ph);
2220 
2221 	/*
2222 	 * Decode the str and place it in the space we just allocated
2223 	 */
2224 	tmp = str;
2225 	i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, tmp);
2226 	if (i < DDI_PROP_RESULT_OK) {
2227 		/*
2228 		 * Free the space we just allocated
2229 		 * and return an error.
2230 		 */
2231 		ddi_prop_free(str);
2232 		switch (i) {
2233 		case DDI_PROP_RESULT_EOF:
2234 			return (DDI_PROP_END_OF_DATA);
2235 
2236 		case DDI_PROP_RESULT_ERROR:
2237 			return (DDI_PROP_CANNOT_DECODE);
2238 		}
2239 	}
2240 
2241 	*(char **)data = str;
2242 	*nelements = 1;
2243 
2244 	return (DDI_PROP_SUCCESS);
2245 }
2246 
2247 /*
2248  * Decode an array of strings.
2249  */
2250 int
2251 ddi_prop_fm_decode_strings(prop_handle_t *ph, void *data, uint_t *nelements)
2252 {
2253 	int		cnt = 0;
2254 	char		**strs;
2255 	char		**tmp;
2256 	char		*ptr;
2257 	int		i;
2258 	int		n;
2259 	int		size;
2260 	size_t		nbytes;
2261 
2262 	/*
2263 	 * Figure out how many array elements there are by going through the
2264 	 * data without decoding it first and counting.
2265 	 */
2266 	for (;;) {
2267 		i = DDI_PROP_STR(ph, DDI_PROP_CMD_SKIP, NULL);
2268 		if (i < 0)
2269 			break;
2270 		cnt++;
2271 	}
2272 
2273 	/*
2274 	 * If there are no elements return an error
2275 	 */
2276 	if (cnt == 0)
2277 		return (DDI_PROP_END_OF_DATA);
2278 
2279 	/*
2280 	 * If we cannot skip through the data, we cannot decode it
2281 	 */
2282 	if (i == DDI_PROP_RESULT_ERROR)
2283 		return (DDI_PROP_CANNOT_DECODE);
2284 
2285 	/*
2286 	 * Reset the data pointer to the beginning of the encoded data
2287 	 */
2288 	ddi_prop_reset_pos(ph);
2289 
2290 	/*
2291 	 * Figure out how much memory we need for the sum total
2292 	 */
2293 	nbytes = (cnt + 1) * sizeof (char *);
2294 
2295 	for (n = 0; n < cnt; n++) {
2296 		/*
2297 		 * Get the decoded size of the current encoded string.
2298 		 */
2299 		size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2300 		if (size < DDI_PROP_RESULT_OK) {
2301 			switch (size) {
2302 			case DDI_PROP_RESULT_EOF:
2303 				return (DDI_PROP_END_OF_DATA);
2304 
2305 			case DDI_PROP_RESULT_ERROR:
2306 				return (DDI_PROP_CANNOT_DECODE);
2307 			}
2308 		}
2309 
2310 		nbytes += size;
2311 	}
2312 
2313 	/*
2314 	 * Allocate memory in which to store the decoded strings.
2315 	 */
2316 	strs = ddi_prop_decode_alloc(nbytes, ddi_prop_free_strings);
2317 
2318 	/*
2319 	 * Set up pointers for each string by figuring out yet
2320 	 * again how long each string is.
2321 	 */
2322 	ddi_prop_reset_pos(ph);
2323 	ptr = (caddr_t)strs + ((cnt + 1) * sizeof (char *));
2324 	for (tmp = strs, n = 0; n < cnt; n++, tmp++) {
2325 		/*
2326 		 * Get the decoded size of the current encoded string.
2327 		 */
2328 		size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2329 		if (size < DDI_PROP_RESULT_OK) {
2330 			ddi_prop_free(strs);
2331 			switch (size) {
2332 			case DDI_PROP_RESULT_EOF:
2333 				return (DDI_PROP_END_OF_DATA);
2334 
2335 			case DDI_PROP_RESULT_ERROR:
2336 				return (DDI_PROP_CANNOT_DECODE);
2337 			}
2338 		}
2339 
2340 		*tmp = ptr;
2341 		ptr += size;
2342 	}
2343 
2344 	/*
2345 	 * String array is terminated by a NULL
2346 	 */
2347 	*tmp = NULL;
2348 
2349 	/*
2350 	 * Finally, we can decode each string
2351 	 */
2352 	ddi_prop_reset_pos(ph);
2353 	for (tmp = strs, n = 0; n < cnt; n++, tmp++) {
2354 		i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, *tmp);
2355 		if (i < DDI_PROP_RESULT_OK) {
2356 			/*
2357 			 * Free the space we just allocated
2358 			 * and return an error
2359 			 */
2360 			ddi_prop_free(strs);
2361 			switch (i) {
2362 			case DDI_PROP_RESULT_EOF:
2363 				return (DDI_PROP_END_OF_DATA);
2364 
2365 			case DDI_PROP_RESULT_ERROR:
2366 				return (DDI_PROP_CANNOT_DECODE);
2367 			}
2368 		}
2369 	}
2370 
2371 	*(char ***)data = strs;
2372 	*nelements = cnt;
2373 
2374 	return (DDI_PROP_SUCCESS);
2375 }
2376 
2377 /*
2378  * Encode a string.
2379  */
2380 int
2381 ddi_prop_fm_encode_string(prop_handle_t *ph, void *data, uint_t nelements)
2382 {
2383 	char		**tmp;
2384 	int		size;
2385 	int		i;
2386 
2387 	/*
2388 	 * If there is no data, we cannot do anything
2389 	 */
2390 	if (nelements == 0)
2391 		return (DDI_PROP_CANNOT_ENCODE);
2392 
2393 	/*
2394 	 * Get the size of the encoded string.
2395 	 */
2396 	tmp = (char **)data;
2397 	size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp);
2398 	if (size < DDI_PROP_RESULT_OK) {
2399 		switch (size) {
2400 		case DDI_PROP_RESULT_EOF:
2401 			return (DDI_PROP_END_OF_DATA);
2402 
2403 		case DDI_PROP_RESULT_ERROR:
2404 			return (DDI_PROP_CANNOT_ENCODE);
2405 		}
2406 	}
2407 
2408 	/*
2409 	 * Allocate space in the handle to store the encoded string.
2410 	 */
2411 	if (ddi_prop_encode_alloc(ph, size) != DDI_PROP_SUCCESS)
2412 		return (DDI_PROP_NO_MEMORY);
2413 
2414 	ddi_prop_reset_pos(ph);
2415 
2416 	/*
2417 	 * Encode the string.
2418 	 */
2419 	tmp = (char **)data;
2420 	i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp);
2421 	if (i < DDI_PROP_RESULT_OK) {
2422 		switch (i) {
2423 		case DDI_PROP_RESULT_EOF:
2424 			return (DDI_PROP_END_OF_DATA);
2425 
2426 		case DDI_PROP_RESULT_ERROR:
2427 			return (DDI_PROP_CANNOT_ENCODE);
2428 		}
2429 	}
2430 
2431 	return (DDI_PROP_SUCCESS);
2432 }
2433 
2434 
2435 /*
2436  * Encode an array of strings.
2437  */
2438 int
2439 ddi_prop_fm_encode_strings(prop_handle_t *ph, void *data, uint_t nelements)
2440 {
2441 	int		cnt = 0;
2442 	char		**tmp;
2443 	int		size;
2444 	uint_t		total_size;
2445 	int		i;
2446 
2447 	/*
2448 	 * If there is no data, we cannot do anything
2449 	 */
2450 	if (nelements == 0)
2451 		return (DDI_PROP_CANNOT_ENCODE);
2452 
2453 	/*
2454 	 * Get the total size required to encode all the strings.
2455 	 */
2456 	total_size = 0;
2457 	tmp = (char **)data;
2458 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2459 		size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp);
2460 		if (size < DDI_PROP_RESULT_OK) {
2461 			switch (size) {
2462 			case DDI_PROP_RESULT_EOF:
2463 				return (DDI_PROP_END_OF_DATA);
2464 
2465 			case DDI_PROP_RESULT_ERROR:
2466 				return (DDI_PROP_CANNOT_ENCODE);
2467 			}
2468 		}
2469 		total_size += (uint_t)size;
2470 	}
2471 
2472 	/*
2473 	 * Allocate space in the handle to store the encoded strings.
2474 	 */
2475 	if (ddi_prop_encode_alloc(ph, total_size) != DDI_PROP_SUCCESS)
2476 		return (DDI_PROP_NO_MEMORY);
2477 
2478 	ddi_prop_reset_pos(ph);
2479 
2480 	/*
2481 	 * Encode the array of strings.
2482 	 */
2483 	tmp = (char **)data;
2484 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2485 		i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp);
2486 		if (i < DDI_PROP_RESULT_OK) {
2487 			switch (i) {
2488 			case DDI_PROP_RESULT_EOF:
2489 				return (DDI_PROP_END_OF_DATA);
2490 
2491 			case DDI_PROP_RESULT_ERROR:
2492 				return (DDI_PROP_CANNOT_ENCODE);
2493 			}
2494 		}
2495 	}
2496 
2497 	return (DDI_PROP_SUCCESS);
2498 }
2499 
2500 
2501 /*
2502  * Decode an array of bytes.
2503  */
2504 static int
2505 ddi_prop_fm_decode_bytes(prop_handle_t *ph, void *data, uint_t *nelements)
2506 {
2507 	uchar_t		*tmp;
2508 	int		nbytes;
2509 	int		i;
2510 
2511 	/*
2512 	 * If there are no elements return an error
2513 	 */
2514 	if (ph->ph_size == 0)
2515 		return (DDI_PROP_END_OF_DATA);
2516 
2517 	/*
2518 	 * Get the size of the encoded array of bytes.
2519 	 */
2520 	nbytes = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_DSIZE,
2521 	    data, ph->ph_size);
2522 	if (nbytes < DDI_PROP_RESULT_OK) {
2523 		switch (nbytes) {
2524 		case DDI_PROP_RESULT_EOF:
2525 			return (DDI_PROP_END_OF_DATA);
2526 
2527 		case DDI_PROP_RESULT_ERROR:
2528 			return (DDI_PROP_CANNOT_DECODE);
2529 		}
2530 	}
2531 
2532 	/*
2533 	 * Allocated memory to store the decoded value in.
2534 	 */
2535 	tmp = ddi_prop_decode_alloc(nbytes, ddi_prop_free_bytes);
2536 
2537 	/*
2538 	 * Decode each element and place it in the space we just allocated
2539 	 */
2540 	i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_DECODE, tmp, nbytes);
2541 	if (i < DDI_PROP_RESULT_OK) {
2542 		/*
2543 		 * Free the space we just allocated
2544 		 * and return an error
2545 		 */
2546 		ddi_prop_free(tmp);
2547 		switch (i) {
2548 		case DDI_PROP_RESULT_EOF:
2549 			return (DDI_PROP_END_OF_DATA);
2550 
2551 		case DDI_PROP_RESULT_ERROR:
2552 			return (DDI_PROP_CANNOT_DECODE);
2553 		}
2554 	}
2555 
2556 	*(uchar_t **)data = tmp;
2557 	*nelements = nbytes;
2558 
2559 	return (DDI_PROP_SUCCESS);
2560 }
2561 
2562 /*
2563  * Encode an array of bytes.
2564  */
2565 int
2566 ddi_prop_fm_encode_bytes(prop_handle_t *ph, void *data, uint_t nelements)
2567 {
2568 	int		size;
2569 	int		i;
2570 
2571 	/*
2572 	 * If there are no elements, then this is a boolean property,
2573 	 * so just create a property handle with no data and return.
2574 	 */
2575 	if (nelements == 0) {
2576 		(void) ddi_prop_encode_alloc(ph, 0);
2577 		return (DDI_PROP_SUCCESS);
2578 	}
2579 
2580 	/*
2581 	 * Get the size of the encoded array of bytes.
2582 	 */
2583 	size = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_ESIZE, (uchar_t *)data,
2584 	    nelements);
2585 	if (size < DDI_PROP_RESULT_OK) {
2586 		switch (size) {
2587 		case DDI_PROP_RESULT_EOF:
2588 			return (DDI_PROP_END_OF_DATA);
2589 
2590 		case DDI_PROP_RESULT_ERROR:
2591 			return (DDI_PROP_CANNOT_DECODE);
2592 		}
2593 	}
2594 
2595 	/*
2596 	 * Allocate space in the handle to store the encoded bytes.
2597 	 */
2598 	if (ddi_prop_encode_alloc(ph, (uint_t)size) != DDI_PROP_SUCCESS)
2599 		return (DDI_PROP_NO_MEMORY);
2600 
2601 	/*
2602 	 * Encode the array of bytes.
2603 	 */
2604 	i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_ENCODE, (uchar_t *)data,
2605 	    nelements);
2606 	if (i < DDI_PROP_RESULT_OK) {
2607 		switch (i) {
2608 		case DDI_PROP_RESULT_EOF:
2609 			return (DDI_PROP_END_OF_DATA);
2610 
2611 		case DDI_PROP_RESULT_ERROR:
2612 			return (DDI_PROP_CANNOT_ENCODE);
2613 		}
2614 	}
2615 
2616 	return (DDI_PROP_SUCCESS);
2617 }
2618 
2619 /*
2620  * OBP 1275 integer, string and byte operators.
2621  *
2622  * DDI_PROP_CMD_DECODE:
2623  *
2624  *	DDI_PROP_RESULT_ERROR:		cannot decode the data
2625  *	DDI_PROP_RESULT_EOF:		end of data
2626  *	DDI_PROP_OK:			data was decoded
2627  *
2628  * DDI_PROP_CMD_ENCODE:
2629  *
2630  *	DDI_PROP_RESULT_ERROR:		cannot encode the data
2631  *	DDI_PROP_RESULT_EOF:		end of data
2632  *	DDI_PROP_OK:			data was encoded
2633  *
2634  * DDI_PROP_CMD_SKIP:
2635  *
2636  *	DDI_PROP_RESULT_ERROR:		cannot skip the data
2637  *	DDI_PROP_RESULT_EOF:		end of data
2638  *	DDI_PROP_OK:			data was skipped
2639  *
2640  * DDI_PROP_CMD_GET_ESIZE:
2641  *
2642  *	DDI_PROP_RESULT_ERROR:		cannot get encoded size
2643  *	DDI_PROP_RESULT_EOF:		end of data
2644  *	> 0:				the encoded size
2645  *
2646  * DDI_PROP_CMD_GET_DSIZE:
2647  *
2648  *	DDI_PROP_RESULT_ERROR:		cannot get decoded size
2649  *	DDI_PROP_RESULT_EOF:		end of data
2650  *	> 0:				the decoded size
2651  */
2652 
2653 /*
2654  * OBP 1275 integer operator
2655  *
2656  * OBP properties are a byte stream of data, so integers may not be
2657  * properly aligned.  Therefore we need to copy them one byte at a time.
2658  */
2659 int
2660 ddi_prop_1275_int(prop_handle_t *ph, uint_t cmd, int *data)
2661 {
2662 	int	i;
2663 
2664 	switch (cmd) {
2665 	case DDI_PROP_CMD_DECODE:
2666 		/*
2667 		 * Check that there is encoded data
2668 		 */
2669 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0)
2670 			return (DDI_PROP_RESULT_ERROR);
2671 		if (ph->ph_flags & PH_FROM_PROM) {
2672 			i = MIN(ph->ph_size, PROP_1275_INT_SIZE);
2673 			if ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
2674 			    ph->ph_size - i))
2675 				return (DDI_PROP_RESULT_ERROR);
2676 		} else {
2677 			if (ph->ph_size < sizeof (int) ||
2678 			    ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
2679 			    ph->ph_size - sizeof (int))))
2680 				return (DDI_PROP_RESULT_ERROR);
2681 		}
2682 
2683 		/*
2684 		 * Copy the integer, using the implementation-specific
2685 		 * copy function if the property is coming from the PROM.
2686 		 */
2687 		if (ph->ph_flags & PH_FROM_PROM) {
2688 			*data = impl_ddi_prop_int_from_prom(
2689 			    (uchar_t *)ph->ph_cur_pos,
2690 			    (ph->ph_size < PROP_1275_INT_SIZE) ?
2691 			    ph->ph_size : PROP_1275_INT_SIZE);
2692 		} else {
2693 			bcopy(ph->ph_cur_pos, data, sizeof (int));
2694 		}
2695 
2696 		/*
2697 		 * Move the current location to the start of the next
2698 		 * bit of undecoded data.
2699 		 */
2700 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
2701 		    PROP_1275_INT_SIZE;
2702 		return (DDI_PROP_RESULT_OK);
2703 
2704 	case DDI_PROP_CMD_ENCODE:
2705 		/*
2706 		 * Check that there is room to encoded the data
2707 		 */
2708 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
2709 		    ph->ph_size < PROP_1275_INT_SIZE ||
2710 		    ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
2711 		    ph->ph_size - sizeof (int))))
2712 			return (DDI_PROP_RESULT_ERROR);
2713 
2714 		/*
2715 		 * Encode the integer into the byte stream one byte at a
2716 		 * time.
2717 		 */
2718 		bcopy(data, ph->ph_cur_pos, sizeof (int));
2719 
2720 		/*
2721 		 * Move the current location to the start of the next bit of
2722 		 * space where we can store encoded data.
2723 		 */
2724 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE;
2725 		return (DDI_PROP_RESULT_OK);
2726 
2727 	case DDI_PROP_CMD_SKIP:
2728 		/*
2729 		 * Check that there is encoded data
2730 		 */
2731 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
2732 		    ph->ph_size < PROP_1275_INT_SIZE)
2733 			return (DDI_PROP_RESULT_ERROR);
2734 
2735 
2736 		if ((caddr_t)ph->ph_cur_pos ==
2737 		    (caddr_t)ph->ph_data + ph->ph_size) {
2738 			return (DDI_PROP_RESULT_EOF);
2739 		} else if ((caddr_t)ph->ph_cur_pos >
2740 		    (caddr_t)ph->ph_data + ph->ph_size) {
2741 			return (DDI_PROP_RESULT_EOF);
2742 		}
2743 
2744 		/*
2745 		 * Move the current location to the start of the next bit of
2746 		 * undecoded data.
2747 		 */
2748 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE;
2749 		return (DDI_PROP_RESULT_OK);
2750 
2751 	case DDI_PROP_CMD_GET_ESIZE:
2752 		/*
2753 		 * Return the size of an encoded integer on OBP
2754 		 */
2755 		return (PROP_1275_INT_SIZE);
2756 
2757 	case DDI_PROP_CMD_GET_DSIZE:
2758 		/*
2759 		 * Return the size of a decoded integer on the system.
2760 		 */
2761 		return (sizeof (int));
2762 
2763 	default:
2764 #ifdef DEBUG
2765 		panic("ddi_prop_1275_int: %x impossible", cmd);
2766 		/*NOTREACHED*/
2767 #else
2768 		return (DDI_PROP_RESULT_ERROR);
2769 #endif	/* DEBUG */
2770 	}
2771 }
2772 
2773 /*
2774  * 64 bit integer operator.
2775  *
2776  * This is an extension, defined by Sun, to the 1275 integer
2777  * operator.  This routine handles the encoding/decoding of
2778  * 64 bit integer properties.
2779  */
2780 int
2781 ddi_prop_int64_op(prop_handle_t *ph, uint_t cmd, int64_t *data)
2782 {
2783 
2784 	switch (cmd) {
2785 	case DDI_PROP_CMD_DECODE:
2786 		/*
2787 		 * Check that there is encoded data
2788 		 */
2789 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0)
2790 			return (DDI_PROP_RESULT_ERROR);
2791 		if (ph->ph_flags & PH_FROM_PROM) {
2792 			return (DDI_PROP_RESULT_ERROR);
2793 		} else {
2794 			if (ph->ph_size < sizeof (int64_t) ||
2795 			    ((int64_t *)ph->ph_cur_pos >
2796 			    ((int64_t *)ph->ph_data +
2797 			    ph->ph_size - sizeof (int64_t))))
2798 				return (DDI_PROP_RESULT_ERROR);
2799 		}
2800 		/*
2801 		 * Copy the integer, using the implementation-specific
2802 		 * copy function if the property is coming from the PROM.
2803 		 */
2804 		if (ph->ph_flags & PH_FROM_PROM) {
2805 			return (DDI_PROP_RESULT_ERROR);
2806 		} else {
2807 			bcopy(ph->ph_cur_pos, data, sizeof (int64_t));
2808 		}
2809 
2810 		/*
2811 		 * Move the current location to the start of the next
2812 		 * bit of undecoded data.
2813 		 */
2814 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
2815 		    sizeof (int64_t);
2816 			return (DDI_PROP_RESULT_OK);
2817 
2818 	case DDI_PROP_CMD_ENCODE:
2819 		/*
2820 		 * Check that there is room to encoded the data
2821 		 */
2822 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
2823 		    ph->ph_size < sizeof (int64_t) ||
2824 		    ((int64_t *)ph->ph_cur_pos > ((int64_t *)ph->ph_data +
2825 		    ph->ph_size - sizeof (int64_t))))
2826 			return (DDI_PROP_RESULT_ERROR);
2827 
2828 		/*
2829 		 * Encode the integer into the byte stream one byte at a
2830 		 * time.
2831 		 */
2832 		bcopy(data, ph->ph_cur_pos, sizeof (int64_t));
2833 
2834 		/*
2835 		 * Move the current location to the start of the next bit of
2836 		 * space where we can store encoded data.
2837 		 */
2838 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
2839 		    sizeof (int64_t);
2840 		return (DDI_PROP_RESULT_OK);
2841 
2842 	case DDI_PROP_CMD_SKIP:
2843 		/*
2844 		 * Check that there is encoded data
2845 		 */
2846 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
2847 		    ph->ph_size < sizeof (int64_t))
2848 			return (DDI_PROP_RESULT_ERROR);
2849 
2850 		if ((caddr_t)ph->ph_cur_pos ==
2851 		    (caddr_t)ph->ph_data + ph->ph_size) {
2852 			return (DDI_PROP_RESULT_EOF);
2853 		} else if ((caddr_t)ph->ph_cur_pos >
2854 		    (caddr_t)ph->ph_data + ph->ph_size) {
2855 			return (DDI_PROP_RESULT_EOF);
2856 		}
2857 
2858 		/*
2859 		 * Move the current location to the start of
2860 		 * the next bit of undecoded data.
2861 		 */
2862 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
2863 		    sizeof (int64_t);
2864 			return (DDI_PROP_RESULT_OK);
2865 
2866 	case DDI_PROP_CMD_GET_ESIZE:
2867 		/*
2868 		 * Return the size of an encoded integer on OBP
2869 		 */
2870 		return (sizeof (int64_t));
2871 
2872 	case DDI_PROP_CMD_GET_DSIZE:
2873 		/*
2874 		 * Return the size of a decoded integer on the system.
2875 		 */
2876 		return (sizeof (int64_t));
2877 
2878 	default:
2879 #ifdef DEBUG
2880 		panic("ddi_prop_int64_op: %x impossible", cmd);
2881 		/*NOTREACHED*/
2882 #else
2883 		return (DDI_PROP_RESULT_ERROR);
2884 #endif  /* DEBUG */
2885 	}
2886 }
2887 
2888 /*
2889  * OBP 1275 string operator.
2890  *
2891  * OBP strings are NULL terminated.
2892  */
2893 int
2894 ddi_prop_1275_string(prop_handle_t *ph, uint_t cmd, char *data)
2895 {
2896 	int	n;
2897 	char	*p;
2898 	char	*end;
2899 
2900 	switch (cmd) {
2901 	case DDI_PROP_CMD_DECODE:
2902 		/*
2903 		 * Check that there is encoded data
2904 		 */
2905 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
2906 			return (DDI_PROP_RESULT_ERROR);
2907 		}
2908 
2909 		/*
2910 		 * Match DDI_PROP_CMD_GET_DSIZE logic for when to stop and
2911 		 * how to NULL terminate result.
2912 		 */
2913 		p = (char *)ph->ph_cur_pos;
2914 		end = (char *)ph->ph_data + ph->ph_size;
2915 		if (p >= end)
2916 			return (DDI_PROP_RESULT_EOF);
2917 
2918 		while (p < end) {
2919 			*data++ = *p;
2920 			if (*p++ == 0) {	/* NULL from OBP */
2921 				ph->ph_cur_pos = p;
2922 				return (DDI_PROP_RESULT_OK);
2923 			}
2924 		}
2925 
2926 		/*
2927 		 * If OBP did not NULL terminate string, which happens
2928 		 * (at least) for 'true'/'false' boolean values, account for
2929 		 * the space and store null termination on decode.
2930 		 */
2931 		ph->ph_cur_pos = p;
2932 		*data = 0;
2933 		return (DDI_PROP_RESULT_OK);
2934 
2935 	case DDI_PROP_CMD_ENCODE:
2936 		/*
2937 		 * Check that there is room to encoded the data
2938 		 */
2939 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
2940 			return (DDI_PROP_RESULT_ERROR);
2941 		}
2942 
2943 		n = strlen(data) + 1;
2944 		if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
2945 		    ph->ph_size - n)) {
2946 			return (DDI_PROP_RESULT_ERROR);
2947 		}
2948 
2949 		/*
2950 		 * Copy the NULL terminated string
2951 		 */
2952 		bcopy(data, ph->ph_cur_pos, n);
2953 
2954 		/*
2955 		 * Move the current location to the start of the next bit of
2956 		 * space where we can store encoded data.
2957 		 */
2958 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + n;
2959 		return (DDI_PROP_RESULT_OK);
2960 
2961 	case DDI_PROP_CMD_SKIP:
2962 		/*
2963 		 * Check that there is encoded data
2964 		 */
2965 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
2966 			return (DDI_PROP_RESULT_ERROR);
2967 		}
2968 
2969 		/*
2970 		 * Return the string length plus one for the NULL
2971 		 * We know the size of the property, we need to
2972 		 * ensure that the string is properly formatted,
2973 		 * since we may be looking up random OBP data.
2974 		 */
2975 		p = (char *)ph->ph_cur_pos;
2976 		end = (char *)ph->ph_data + ph->ph_size;
2977 		if (p >= end)
2978 			return (DDI_PROP_RESULT_EOF);
2979 
2980 		while (p < end) {
2981 			if (*p++ == 0) {	/* NULL from OBP */
2982 				ph->ph_cur_pos = p;
2983 				return (DDI_PROP_RESULT_OK);
2984 			}
2985 		}
2986 
2987 		/*
2988 		 * Accommodate the fact that OBP does not always NULL
2989 		 * terminate strings.
2990 		 */
2991 		ph->ph_cur_pos = p;
2992 		return (DDI_PROP_RESULT_OK);
2993 
2994 	case DDI_PROP_CMD_GET_ESIZE:
2995 		/*
2996 		 * Return the size of the encoded string on OBP.
2997 		 */
2998 		return (strlen(data) + 1);
2999 
3000 	case DDI_PROP_CMD_GET_DSIZE:
3001 		/*
3002 		 * Return the string length plus one for the NULL.
3003 		 * We know the size of the property, we need to
3004 		 * ensure that the string is properly formatted,
3005 		 * since we may be looking up random OBP data.
3006 		 */
3007 		p = (char *)ph->ph_cur_pos;
3008 		end = (char *)ph->ph_data + ph->ph_size;
3009 		if (p >= end)
3010 			return (DDI_PROP_RESULT_EOF);
3011 
3012 		for (n = 0; p < end; n++) {
3013 			if (*p++ == 0) {	/* NULL from OBP */
3014 				ph->ph_cur_pos = p;
3015 				return (n + 1);
3016 			}
3017 		}
3018 
3019 		/*
3020 		 * If OBP did not NULL terminate string, which happens for
3021 		 * 'true'/'false' boolean values, account for the space
3022 		 * to store null termination here.
3023 		 */
3024 		ph->ph_cur_pos = p;
3025 		return (n + 1);
3026 
3027 	default:
3028 #ifdef DEBUG
3029 		panic("ddi_prop_1275_string: %x impossible", cmd);
3030 		/*NOTREACHED*/
3031 #else
3032 		return (DDI_PROP_RESULT_ERROR);
3033 #endif	/* DEBUG */
3034 	}
3035 }
3036 
3037 /*
3038  * OBP 1275 byte operator
3039  *
3040  * Caller must specify the number of bytes to get.  OBP encodes bytes
3041  * as a byte so there is a 1-to-1 translation.
3042  */
3043 int
3044 ddi_prop_1275_bytes(prop_handle_t *ph, uint_t cmd, uchar_t *data,
3045     uint_t nelements)
3046 {
3047 	switch (cmd) {
3048 	case DDI_PROP_CMD_DECODE:
3049 		/*
3050 		 * Check that there is encoded data
3051 		 */
3052 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3053 		    ph->ph_size < nelements ||
3054 		    ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3055 		    ph->ph_size - nelements)))
3056 			return (DDI_PROP_RESULT_ERROR);
3057 
3058 		/*
3059 		 * Copy out the bytes
3060 		 */
3061 		bcopy(ph->ph_cur_pos, data, nelements);
3062 
3063 		/*
3064 		 * Move the current location
3065 		 */
3066 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3067 		return (DDI_PROP_RESULT_OK);
3068 
3069 	case DDI_PROP_CMD_ENCODE:
3070 		/*
3071 		 * Check that there is room to encode the data
3072 		 */
3073 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3074 		    ph->ph_size < nelements ||
3075 		    ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3076 		    ph->ph_size - nelements)))
3077 			return (DDI_PROP_RESULT_ERROR);
3078 
3079 		/*
3080 		 * Copy in the bytes
3081 		 */
3082 		bcopy(data, ph->ph_cur_pos, nelements);
3083 
3084 		/*
3085 		 * Move the current location to the start of the next bit of
3086 		 * space where we can store encoded data.
3087 		 */
3088 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3089 		return (DDI_PROP_RESULT_OK);
3090 
3091 	case DDI_PROP_CMD_SKIP:
3092 		/*
3093 		 * Check that there is encoded data
3094 		 */
3095 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3096 		    ph->ph_size < nelements)
3097 			return (DDI_PROP_RESULT_ERROR);
3098 
3099 		if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3100 		    ph->ph_size - nelements))
3101 			return (DDI_PROP_RESULT_EOF);
3102 
3103 		/*
3104 		 * Move the current location
3105 		 */
3106 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3107 		return (DDI_PROP_RESULT_OK);
3108 
3109 	case DDI_PROP_CMD_GET_ESIZE:
3110 		/*
3111 		 * The size in bytes of the encoded size is the
3112 		 * same as the decoded size provided by the caller.
3113 		 */
3114 		return (nelements);
3115 
3116 	case DDI_PROP_CMD_GET_DSIZE:
3117 		/*
3118 		 * Just return the number of bytes specified by the caller.
3119 		 */
3120 		return (nelements);
3121 
3122 	default:
3123 #ifdef DEBUG
3124 		panic("ddi_prop_1275_bytes: %x impossible", cmd);
3125 		/*NOTREACHED*/
3126 #else
3127 		return (DDI_PROP_RESULT_ERROR);
3128 #endif	/* DEBUG */
3129 	}
3130 }
3131 
3132 /*
3133  * Used for properties that come from the OBP, hardware configuration files,
3134  * or that are created by calls to ddi_prop_update(9F).
3135  */
3136 static struct prop_handle_ops prop_1275_ops = {
3137 	ddi_prop_1275_int,
3138 	ddi_prop_1275_string,
3139 	ddi_prop_1275_bytes,
3140 	ddi_prop_int64_op
3141 };
3142 
3143 
3144 /*
3145  * Interface to create/modify a managed property on child's behalf...
3146  * Flags interpreted are:
3147  *	DDI_PROP_CANSLEEP:	Allow memory allocation to sleep.
3148  *	DDI_PROP_SYSTEM_DEF:	Manipulate system list rather than driver list.
3149  *
3150  * Use same dev_t when modifying or undefining a property.
3151  * Search for properties with DDI_DEV_T_ANY to match first named
3152  * property on the list.
3153  *
3154  * Properties are stored LIFO and subsequently will match the first
3155  * `matching' instance.
3156  */
3157 
3158 /*
3159  * ddi_prop_add:	Add a software defined property
3160  */
3161 
3162 /*
3163  * define to get a new ddi_prop_t.
3164  * km_flags are KM_SLEEP or KM_NOSLEEP.
3165  */
3166 
3167 #define	DDI_NEW_PROP_T(km_flags)	\
3168 	(kmem_zalloc(sizeof (ddi_prop_t), km_flags))
3169 
3170 static int
3171 ddi_prop_add(dev_t dev, dev_info_t *dip, int flags,
3172     char *name, caddr_t value, int length)
3173 {
3174 	ddi_prop_t	*new_propp, *propp;
3175 	ddi_prop_t	**list_head = &(DEVI(dip)->devi_drv_prop_ptr);
3176 	int		km_flags = KM_NOSLEEP;
3177 	int		name_buf_len;
3178 
3179 	/*
3180 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero return error.
3181 	 */
3182 
3183 	if (dev == DDI_DEV_T_ANY || name == (char *)0 || strlen(name) == 0)
3184 		return (DDI_PROP_INVAL_ARG);
3185 
3186 	if (flags & DDI_PROP_CANSLEEP)
3187 		km_flags = KM_SLEEP;
3188 
3189 	if (flags & DDI_PROP_SYSTEM_DEF)
3190 		list_head = &(DEVI(dip)->devi_sys_prop_ptr);
3191 	else if (flags & DDI_PROP_HW_DEF)
3192 		list_head = &(DEVI(dip)->devi_hw_prop_ptr);
3193 
3194 	if ((new_propp = DDI_NEW_PROP_T(km_flags)) == NULL)  {
3195 		cmn_err(CE_CONT, prop_no_mem_msg, name);
3196 		return (DDI_PROP_NO_MEMORY);
3197 	}
3198 
3199 	/*
3200 	 * If dev is major number 0, then we need to do a ddi_name_to_major
3201 	 * to get the real major number for the device.  This needs to be
3202 	 * done because some drivers need to call ddi_prop_create in their
3203 	 * attach routines but they don't have a dev.  By creating the dev
3204 	 * ourself if the major number is 0, drivers will not have to know what
3205 	 * their major number.	They can just create a dev with major number
3206 	 * 0 and pass it in.  For device 0, we will be doing a little extra
3207 	 * work by recreating the same dev that we already have, but its the
3208 	 * price you pay :-).
3209 	 *
3210 	 * This fixes bug #1098060.
3211 	 */
3212 	if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN) {
3213 		new_propp->prop_dev =
3214 		    makedevice(ddi_name_to_major(DEVI(dip)->devi_binding_name),
3215 		    getminor(dev));
3216 	} else
3217 		new_propp->prop_dev = dev;
3218 
3219 	/*
3220 	 * Allocate space for property name and copy it in...
3221 	 */
3222 
3223 	name_buf_len = strlen(name) + 1;
3224 	new_propp->prop_name = kmem_alloc(name_buf_len, km_flags);
3225 	if (new_propp->prop_name == 0)	{
3226 		kmem_free(new_propp, sizeof (ddi_prop_t));
3227 		cmn_err(CE_CONT, prop_no_mem_msg, name);
3228 		return (DDI_PROP_NO_MEMORY);
3229 	}
3230 	bcopy(name, new_propp->prop_name, name_buf_len);
3231 
3232 	/*
3233 	 * Set the property type
3234 	 */
3235 	new_propp->prop_flags = flags & DDI_PROP_TYPE_MASK;
3236 
3237 	/*
3238 	 * Set length and value ONLY if not an explicit property undefine:
3239 	 * NOTE: value and length are zero for explicit undefines.
3240 	 */
3241 
3242 	if (flags & DDI_PROP_UNDEF_IT) {
3243 		new_propp->prop_flags |= DDI_PROP_UNDEF_IT;
3244 	} else {
3245 		if ((new_propp->prop_len = length) != 0) {
3246 			new_propp->prop_val = kmem_alloc(length, km_flags);
3247 			if (new_propp->prop_val == 0)  {
3248 				kmem_free(new_propp->prop_name, name_buf_len);
3249 				kmem_free(new_propp, sizeof (ddi_prop_t));
3250 				cmn_err(CE_CONT, prop_no_mem_msg, name);
3251 				return (DDI_PROP_NO_MEMORY);
3252 			}
3253 			bcopy(value, new_propp->prop_val, length);
3254 		}
3255 	}
3256 
3257 	/*
3258 	 * Link property into beginning of list. (Properties are LIFO order.)
3259 	 */
3260 
3261 	mutex_enter(&(DEVI(dip)->devi_lock));
3262 	propp = *list_head;
3263 	new_propp->prop_next = propp;
3264 	*list_head = new_propp;
3265 	mutex_exit(&(DEVI(dip)->devi_lock));
3266 	return (DDI_PROP_SUCCESS);
3267 }
3268 
3269 
3270 /*
3271  * ddi_prop_change:	Modify a software managed property value
3272  *
3273  *			Set new length and value if found.
3274  *			returns DDI_PROP_INVAL_ARG if dev is DDI_DEV_T_ANY or
3275  *			input name is the NULL string.
3276  *			returns DDI_PROP_NO_MEMORY if unable to allocate memory
3277  *
3278  *			Note: an undef can be modified to be a define,
3279  *			(you can't go the other way.)
3280  */
3281 
3282 static int
3283 ddi_prop_change(dev_t dev, dev_info_t *dip, int flags,
3284     char *name, caddr_t value, int length)
3285 {
3286 	ddi_prop_t	*propp;
3287 	ddi_prop_t	**ppropp;
3288 	caddr_t		p = NULL;
3289 
3290 	if ((dev == DDI_DEV_T_ANY) || (name == NULL) || (strlen(name) == 0))
3291 		return (DDI_PROP_INVAL_ARG);
3292 
3293 	/*
3294 	 * Preallocate buffer, even if we don't need it...
3295 	 */
3296 	if (length != 0)  {
3297 		p = kmem_alloc(length, (flags & DDI_PROP_CANSLEEP) ?
3298 		    KM_SLEEP : KM_NOSLEEP);
3299 		if (p == NULL)	{
3300 			cmn_err(CE_CONT, prop_no_mem_msg, name);
3301 			return (DDI_PROP_NO_MEMORY);
3302 		}
3303 	}
3304 
3305 	/*
3306 	 * If the dev_t value contains DDI_MAJOR_T_UNKNOWN for the major
3307 	 * number, a real dev_t value should be created based upon the dip's
3308 	 * binding driver.  See ddi_prop_add...
3309 	 */
3310 	if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN)
3311 		dev = makedevice(
3312 		    ddi_name_to_major(DEVI(dip)->devi_binding_name),
3313 		    getminor(dev));
3314 
3315 	/*
3316 	 * Check to see if the property exists.  If so we modify it.
3317 	 * Else we create it by calling ddi_prop_add().
3318 	 */
3319 	mutex_enter(&(DEVI(dip)->devi_lock));
3320 	ppropp = &DEVI(dip)->devi_drv_prop_ptr;
3321 	if (flags & DDI_PROP_SYSTEM_DEF)
3322 		ppropp = &DEVI(dip)->devi_sys_prop_ptr;
3323 	else if (flags & DDI_PROP_HW_DEF)
3324 		ppropp = &DEVI(dip)->devi_hw_prop_ptr;
3325 
3326 	if ((propp = i_ddi_prop_search(dev, name, flags, ppropp)) != NULL) {
3327 		/*
3328 		 * Need to reallocate buffer?  If so, do it
3329 		 * carefully (reuse same space if new prop
3330 		 * is same size and non-NULL sized).
3331 		 */
3332 		if (length != 0)
3333 			bcopy(value, p, length);
3334 
3335 		if (propp->prop_len != 0)
3336 			kmem_free(propp->prop_val, propp->prop_len);
3337 
3338 		propp->prop_len = length;
3339 		propp->prop_val = p;
3340 		propp->prop_flags &= ~DDI_PROP_UNDEF_IT;
3341 		mutex_exit(&(DEVI(dip)->devi_lock));
3342 		return (DDI_PROP_SUCCESS);
3343 	}
3344 
3345 	mutex_exit(&(DEVI(dip)->devi_lock));
3346 	if (length != 0)
3347 		kmem_free(p, length);
3348 
3349 	return (ddi_prop_add(dev, dip, flags, name, value, length));
3350 }
3351 
3352 /*
3353  * Common update routine used to update and encode a property.	Creates
3354  * a property handle, calls the property encode routine, figures out if
3355  * the property already exists and updates if it does.	Otherwise it
3356  * creates if it does not exist.
3357  */
3358 int
3359 ddi_prop_update_common(dev_t match_dev, dev_info_t *dip, int flags,
3360     char *name, void *data, uint_t nelements,
3361     int (*prop_create)(prop_handle_t *, void *data, uint_t nelements))
3362 {
3363 	prop_handle_t	ph;
3364 	int		rval;
3365 	uint_t		ourflags;
3366 
3367 	/*
3368 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3369 	 * return error.
3370 	 */
3371 	if (match_dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3372 		return (DDI_PROP_INVAL_ARG);
3373 
3374 	/*
3375 	 * Create the handle
3376 	 */
3377 	ph.ph_data = NULL;
3378 	ph.ph_cur_pos = NULL;
3379 	ph.ph_save_pos = NULL;
3380 	ph.ph_size = 0;
3381 	ph.ph_ops = &prop_1275_ops;
3382 
3383 	/*
3384 	 * ourflags:
3385 	 * For compatibility with the old interfaces.  The old interfaces
3386 	 * didn't sleep by default and slept when the flag was set.  These
3387 	 * interfaces to the opposite.	So the old interfaces now set the
3388 	 * DDI_PROP_DONTSLEEP flag by default which tells us not to sleep.
3389 	 *
3390 	 * ph.ph_flags:
3391 	 * Blocked data or unblocked data allocation
3392 	 * for ph.ph_data in ddi_prop_encode_alloc()
3393 	 */
3394 	if (flags & DDI_PROP_DONTSLEEP) {
3395 		ourflags = flags;
3396 		ph.ph_flags = DDI_PROP_DONTSLEEP;
3397 	} else {
3398 		ourflags = flags | DDI_PROP_CANSLEEP;
3399 		ph.ph_flags = DDI_PROP_CANSLEEP;
3400 	}
3401 
3402 	/*
3403 	 * Encode the data and store it in the property handle by
3404 	 * calling the prop_encode routine.
3405 	 */
3406 	if ((rval = (*prop_create)(&ph, data, nelements)) !=
3407 	    DDI_PROP_SUCCESS) {
3408 		if (rval == DDI_PROP_NO_MEMORY)
3409 			cmn_err(CE_CONT, prop_no_mem_msg, name);
3410 		if (ph.ph_size != 0)
3411 			kmem_free(ph.ph_data, ph.ph_size);
3412 		return (rval);
3413 	}
3414 
3415 	/*
3416 	 * The old interfaces use a stacking approach to creating
3417 	 * properties.	If we are being called from the old interfaces,
3418 	 * the DDI_PROP_STACK_CREATE flag will be set, so we just do a
3419 	 * create without checking.
3420 	 */
3421 	if (flags & DDI_PROP_STACK_CREATE) {
3422 		rval = ddi_prop_add(match_dev, dip,
3423 		    ourflags, name, ph.ph_data, ph.ph_size);
3424 	} else {
3425 		rval = ddi_prop_change(match_dev, dip,
3426 		    ourflags, name, ph.ph_data, ph.ph_size);
3427 	}
3428 
3429 	/*
3430 	 * Free the encoded data allocated in the prop_encode routine.
3431 	 */
3432 	if (ph.ph_size != 0)
3433 		kmem_free(ph.ph_data, ph.ph_size);
3434 
3435 	return (rval);
3436 }
3437 
3438 
3439 /*
3440  * ddi_prop_create:	Define a managed property:
3441  *			See above for details.
3442  */
3443 
3444 int
3445 ddi_prop_create(dev_t dev, dev_info_t *dip, int flag,
3446     char *name, caddr_t value, int length)
3447 {
3448 	if (!(flag & DDI_PROP_CANSLEEP)) {
3449 		flag |= DDI_PROP_DONTSLEEP;
3450 #ifdef DDI_PROP_DEBUG
3451 		if (length != 0)
3452 			cmn_err(CE_NOTE, "!ddi_prop_create: interface obsolete,"
3453 			    "use ddi_prop_update (prop = %s, node = %s%d)",
3454 			    name, ddi_driver_name(dip), ddi_get_instance(dip));
3455 #endif /* DDI_PROP_DEBUG */
3456 	}
3457 	flag &= ~DDI_PROP_SYSTEM_DEF;
3458 	flag |= DDI_PROP_STACK_CREATE | DDI_PROP_TYPE_ANY;
3459 	return (ddi_prop_update_common(dev, dip, flag, name,
3460 	    value, length, ddi_prop_fm_encode_bytes));
3461 }
3462 
3463 int
3464 e_ddi_prop_create(dev_t dev, dev_info_t *dip, int flag,
3465     char *name, caddr_t value, int length)
3466 {
3467 	if (!(flag & DDI_PROP_CANSLEEP))
3468 		flag |= DDI_PROP_DONTSLEEP;
3469 	flag |= DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE | DDI_PROP_TYPE_ANY;
3470 	return (ddi_prop_update_common(dev, dip, flag,
3471 	    name, value, length, ddi_prop_fm_encode_bytes));
3472 }
3473 
3474 int
3475 ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag,
3476     char *name, caddr_t value, int length)
3477 {
3478 	ASSERT((flag & DDI_PROP_TYPE_MASK) == 0);
3479 
3480 	/*
3481 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3482 	 * return error.
3483 	 */
3484 	if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3485 		return (DDI_PROP_INVAL_ARG);
3486 
3487 	if (!(flag & DDI_PROP_CANSLEEP))
3488 		flag |= DDI_PROP_DONTSLEEP;
3489 	flag &= ~DDI_PROP_SYSTEM_DEF;
3490 	if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_NOTPROM), name) == 0)
3491 		return (DDI_PROP_NOT_FOUND);
3492 
3493 	return (ddi_prop_update_common(dev, dip,
3494 	    (flag | DDI_PROP_TYPE_BYTE), name,
3495 	    value, length, ddi_prop_fm_encode_bytes));
3496 }
3497 
3498 int
3499 e_ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag,
3500     char *name, caddr_t value, int length)
3501 {
3502 	ASSERT((flag & DDI_PROP_TYPE_MASK) == 0);
3503 
3504 	/*
3505 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3506 	 * return error.
3507 	 */
3508 	if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3509 		return (DDI_PROP_INVAL_ARG);
3510 
3511 	if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_SYSTEM_DEF), name) == 0)
3512 		return (DDI_PROP_NOT_FOUND);
3513 
3514 	if (!(flag & DDI_PROP_CANSLEEP))
3515 		flag |= DDI_PROP_DONTSLEEP;
3516 	return (ddi_prop_update_common(dev, dip,
3517 	    (flag | DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE),
3518 	    name, value, length, ddi_prop_fm_encode_bytes));
3519 }
3520 
3521 
3522 /*
3523  * Common lookup routine used to lookup and decode a property.
3524  * Creates a property handle, searches for the raw encoded data,
3525  * fills in the handle, and calls the property decode functions
3526  * passed in.
3527  *
3528  * This routine is not static because ddi_bus_prop_op() which lives in
3529  * ddi_impl.c calls it.  No driver should be calling this routine.
3530  */
3531 int
3532 ddi_prop_lookup_common(dev_t match_dev, dev_info_t *dip,
3533     uint_t flags, char *name, void *data, uint_t *nelements,
3534     int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements))
3535 {
3536 	int		rval;
3537 	uint_t		ourflags;
3538 	prop_handle_t	ph;
3539 
3540 	if ((match_dev == DDI_DEV_T_NONE) ||
3541 	    (name == NULL) || (strlen(name) == 0))
3542 		return (DDI_PROP_INVAL_ARG);
3543 
3544 	ourflags = (flags & DDI_PROP_DONTSLEEP) ? flags :
3545 	    flags | DDI_PROP_CANSLEEP;
3546 
3547 	/*
3548 	 * Get the encoded data
3549 	 */
3550 	bzero(&ph, sizeof (prop_handle_t));
3551 
3552 	if ((flags & DDI_UNBND_DLPI2) || (flags & DDI_PROP_ROOTNEX_GLOBAL)) {
3553 		/*
3554 		 * For rootnex and unbound dlpi style-2 devices, index into
3555 		 * the devnames' array and search the global
3556 		 * property list.
3557 		 */
3558 		ourflags &= ~DDI_UNBND_DLPI2;
3559 		rval = i_ddi_prop_search_global(match_dev,
3560 		    ourflags, name, &ph.ph_data, &ph.ph_size);
3561 	} else {
3562 		rval = ddi_prop_search_common(match_dev, dip,
3563 		    PROP_LEN_AND_VAL_ALLOC, ourflags, name,
3564 		    &ph.ph_data, &ph.ph_size);
3565 
3566 	}
3567 
3568 	if (rval != DDI_PROP_SUCCESS && rval != DDI_PROP_FOUND_1275) {
3569 		ASSERT(ph.ph_data == NULL);
3570 		ASSERT(ph.ph_size == 0);
3571 		return (rval);
3572 	}
3573 
3574 	/*
3575 	 * If the encoded data came from a OBP or software
3576 	 * use the 1275 OBP decode/encode routines.
3577 	 */
3578 	ph.ph_cur_pos = ph.ph_data;
3579 	ph.ph_save_pos = ph.ph_data;
3580 	ph.ph_ops = &prop_1275_ops;
3581 	ph.ph_flags = (rval == DDI_PROP_FOUND_1275) ? PH_FROM_PROM : 0;
3582 
3583 	rval = (*prop_decoder)(&ph, data, nelements);
3584 
3585 	/*
3586 	 * Free the encoded data
3587 	 */
3588 	if (ph.ph_size != 0)
3589 		kmem_free(ph.ph_data, ph.ph_size);
3590 
3591 	return (rval);
3592 }
3593 
3594 /*
3595  * Lookup and return an array of composite properties.  The driver must
3596  * provide the decode routine.
3597  */
3598 int
3599 ddi_prop_lookup(dev_t match_dev, dev_info_t *dip,
3600     uint_t flags, char *name, void *data, uint_t *nelements,
3601     int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements))
3602 {
3603 	return (ddi_prop_lookup_common(match_dev, dip,
3604 	    (flags | DDI_PROP_TYPE_COMPOSITE), name,
3605 	    data, nelements, prop_decoder));
3606 }
3607 
3608 /*
3609  * Return 1 if a property exists (no type checking done).
3610  * Return 0 if it does not exist.
3611  */
3612 int
3613 ddi_prop_exists(dev_t match_dev, dev_info_t *dip, uint_t flags, char *name)
3614 {
3615 	int	i;
3616 	uint_t	x = 0;
3617 
3618 	i = ddi_prop_search_common(match_dev, dip, PROP_EXISTS,
3619 	    flags | DDI_PROP_TYPE_MASK, name, NULL, &x);
3620 	return (i == DDI_PROP_SUCCESS || i == DDI_PROP_FOUND_1275);
3621 }
3622 
3623 
3624 /*
3625  * Update an array of composite properties.  The driver must
3626  * provide the encode routine.
3627  */
3628 int
3629 ddi_prop_update(dev_t match_dev, dev_info_t *dip,
3630     char *name, void *data, uint_t nelements,
3631     int (*prop_create)(prop_handle_t *, void *data, uint_t nelements))
3632 {
3633 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_COMPOSITE,
3634 	    name, data, nelements, prop_create));
3635 }
3636 
3637 /*
3638  * Get a single integer or boolean property and return it.
3639  * If the property does not exists, or cannot be decoded,
3640  * then return the defvalue passed in.
3641  *
3642  * This routine always succeeds.
3643  */
3644 int
3645 ddi_prop_get_int(dev_t match_dev, dev_info_t *dip, uint_t flags,
3646     char *name, int defvalue)
3647 {
3648 	int	data;
3649 	uint_t	nelements;
3650 	int	rval;
3651 
3652 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3653 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3654 #ifdef DEBUG
3655 		if (dip != NULL) {
3656 			cmn_err(CE_WARN, "ddi_prop_get_int: invalid flag"
3657 			    " 0x%x (prop = %s, node = %s%d)", flags,
3658 			    name, ddi_driver_name(dip), ddi_get_instance(dip));
3659 		}
3660 #endif /* DEBUG */
3661 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3662 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
3663 	}
3664 
3665 	if ((rval = ddi_prop_lookup_common(match_dev, dip,
3666 	    (flags | DDI_PROP_TYPE_INT), name, &data, &nelements,
3667 	    ddi_prop_fm_decode_int)) != DDI_PROP_SUCCESS) {
3668 		if (rval == DDI_PROP_END_OF_DATA)
3669 			data = 1;
3670 		else
3671 			data = defvalue;
3672 	}
3673 	return (data);
3674 }
3675 
3676 /*
3677  * Get a single 64 bit integer or boolean property and return it.
3678  * If the property does not exists, or cannot be decoded,
3679  * then return the defvalue passed in.
3680  *
3681  * This routine always succeeds.
3682  */
3683 int64_t
3684 ddi_prop_get_int64(dev_t match_dev, dev_info_t *dip, uint_t flags,
3685     char *name, int64_t defvalue)
3686 {
3687 	int64_t	data;
3688 	uint_t	nelements;
3689 	int	rval;
3690 
3691 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3692 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3693 #ifdef DEBUG
3694 		if (dip != NULL) {
3695 			cmn_err(CE_WARN, "ddi_prop_get_int64: invalid flag"
3696 			    " 0x%x (prop = %s, node = %s%d)", flags,
3697 			    name, ddi_driver_name(dip), ddi_get_instance(dip));
3698 		}
3699 #endif /* DEBUG */
3700 		return (DDI_PROP_INVAL_ARG);
3701 	}
3702 
3703 	if ((rval = ddi_prop_lookup_common(match_dev, dip,
3704 	    (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM),
3705 	    name, &data, &nelements, ddi_prop_fm_decode_int64))
3706 	    != DDI_PROP_SUCCESS) {
3707 		if (rval == DDI_PROP_END_OF_DATA)
3708 			data = 1;
3709 		else
3710 			data = defvalue;
3711 	}
3712 	return (data);
3713 }
3714 
3715 /*
3716  * Get an array of integer property
3717  */
3718 int
3719 ddi_prop_lookup_int_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
3720     char *name, int **data, uint_t *nelements)
3721 {
3722 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3723 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3724 #ifdef DEBUG
3725 		if (dip != NULL) {
3726 			cmn_err(CE_WARN, "ddi_prop_lookup_int_array: "
3727 			    "invalid flag 0x%x (prop = %s, node = %s%d)",
3728 			    flags, name, ddi_driver_name(dip),
3729 			    ddi_get_instance(dip));
3730 		}
3731 #endif /* DEBUG */
3732 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3733 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
3734 	}
3735 
3736 	return (ddi_prop_lookup_common(match_dev, dip,
3737 	    (flags | DDI_PROP_TYPE_INT), name, data,
3738 	    nelements, ddi_prop_fm_decode_ints));
3739 }
3740 
3741 /*
3742  * Get an array of 64 bit integer properties
3743  */
3744 int
3745 ddi_prop_lookup_int64_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
3746     char *name, int64_t **data, uint_t *nelements)
3747 {
3748 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3749 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3750 #ifdef DEBUG
3751 		if (dip != NULL) {
3752 			cmn_err(CE_WARN, "ddi_prop_lookup_int64_array: "
3753 			    "invalid flag 0x%x (prop = %s, node = %s%d)",
3754 			    flags, name, ddi_driver_name(dip),
3755 			    ddi_get_instance(dip));
3756 		}
3757 #endif /* DEBUG */
3758 		return (DDI_PROP_INVAL_ARG);
3759 	}
3760 
3761 	return (ddi_prop_lookup_common(match_dev, dip,
3762 	    (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM),
3763 	    name, data, nelements, ddi_prop_fm_decode_int64_array));
3764 }
3765 
3766 /*
3767  * Update a single integer property.  If the property exists on the drivers
3768  * property list it updates, else it creates it.
3769  */
3770 int
3771 ddi_prop_update_int(dev_t match_dev, dev_info_t *dip,
3772     char *name, int data)
3773 {
3774 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT,
3775 	    name, &data, 1, ddi_prop_fm_encode_ints));
3776 }
3777 
3778 /*
3779  * Update a single 64 bit integer property.
3780  * Update the driver property list if it exists, else create it.
3781  */
3782 int
3783 ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip,
3784     char *name, int64_t data)
3785 {
3786 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64,
3787 	    name, &data, 1, ddi_prop_fm_encode_int64));
3788 }
3789 
3790 int
3791 e_ddi_prop_update_int(dev_t match_dev, dev_info_t *dip,
3792     char *name, int data)
3793 {
3794 	return (ddi_prop_update_common(match_dev, dip,
3795 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT,
3796 	    name, &data, 1, ddi_prop_fm_encode_ints));
3797 }
3798 
3799 int
3800 e_ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip,
3801     char *name, int64_t data)
3802 {
3803 	return (ddi_prop_update_common(match_dev, dip,
3804 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64,
3805 	    name, &data, 1, ddi_prop_fm_encode_int64));
3806 }
3807 
3808 /*
3809  * Update an array of integer property.  If the property exists on the drivers
3810  * property list it updates, else it creates it.
3811  */
3812 int
3813 ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip,
3814     char *name, int *data, uint_t nelements)
3815 {
3816 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT,
3817 	    name, data, nelements, ddi_prop_fm_encode_ints));
3818 }
3819 
3820 /*
3821  * Update an array of 64 bit integer properties.
3822  * Update the driver property list if it exists, else create it.
3823  */
3824 int
3825 ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip,
3826     char *name, int64_t *data, uint_t nelements)
3827 {
3828 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64,
3829 	    name, data, nelements, ddi_prop_fm_encode_int64));
3830 }
3831 
3832 int
3833 e_ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip,
3834     char *name, int64_t *data, uint_t nelements)
3835 {
3836 	return (ddi_prop_update_common(match_dev, dip,
3837 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64,
3838 	    name, data, nelements, ddi_prop_fm_encode_int64));
3839 }
3840 
3841 int
3842 e_ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip,
3843     char *name, int *data, uint_t nelements)
3844 {
3845 	return (ddi_prop_update_common(match_dev, dip,
3846 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT,
3847 	    name, data, nelements, ddi_prop_fm_encode_ints));
3848 }
3849 
3850 /*
3851  * Get a single string property.
3852  */
3853 int
3854 ddi_prop_lookup_string(dev_t match_dev, dev_info_t *dip, uint_t flags,
3855     char *name, char **data)
3856 {
3857 	uint_t x;
3858 
3859 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3860 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3861 #ifdef DEBUG
3862 		if (dip != NULL) {
3863 			cmn_err(CE_WARN, "%s: invalid flag 0x%x "
3864 			    "(prop = %s, node = %s%d); invalid bits ignored",
3865 			    "ddi_prop_lookup_string", flags, name,
3866 			    ddi_driver_name(dip), ddi_get_instance(dip));
3867 		}
3868 #endif /* DEBUG */
3869 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3870 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
3871 	}
3872 
3873 	return (ddi_prop_lookup_common(match_dev, dip,
3874 	    (flags | DDI_PROP_TYPE_STRING), name, data,
3875 	    &x, ddi_prop_fm_decode_string));
3876 }
3877 
3878 /*
3879  * Get an array of strings property.
3880  */
3881 int
3882 ddi_prop_lookup_string_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
3883     char *name, char ***data, uint_t *nelements)
3884 {
3885 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3886 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3887 #ifdef DEBUG
3888 		if (dip != NULL) {
3889 			cmn_err(CE_WARN, "ddi_prop_lookup_string_array: "
3890 			    "invalid flag 0x%x (prop = %s, node = %s%d)",
3891 			    flags, name, ddi_driver_name(dip),
3892 			    ddi_get_instance(dip));
3893 		}
3894 #endif /* DEBUG */
3895 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3896 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
3897 	}
3898 
3899 	return (ddi_prop_lookup_common(match_dev, dip,
3900 	    (flags | DDI_PROP_TYPE_STRING), name, data,
3901 	    nelements, ddi_prop_fm_decode_strings));
3902 }
3903 
3904 /*
3905  * Update a single string property.
3906  */
3907 int
3908 ddi_prop_update_string(dev_t match_dev, dev_info_t *dip,
3909     char *name, char *data)
3910 {
3911 	return (ddi_prop_update_common(match_dev, dip,
3912 	    DDI_PROP_TYPE_STRING, name, &data, 1,
3913 	    ddi_prop_fm_encode_string));
3914 }
3915 
3916 int
3917 e_ddi_prop_update_string(dev_t match_dev, dev_info_t *dip,
3918     char *name, char *data)
3919 {
3920 	return (ddi_prop_update_common(match_dev, dip,
3921 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING,
3922 	    name, &data, 1, ddi_prop_fm_encode_string));
3923 }
3924 
3925 
3926 /*
3927  * Update an array of strings property.
3928  */
3929 int
3930 ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip,
3931     char *name, char **data, uint_t nelements)
3932 {
3933 	return (ddi_prop_update_common(match_dev, dip,
3934 	    DDI_PROP_TYPE_STRING, name, data, nelements,
3935 	    ddi_prop_fm_encode_strings));
3936 }
3937 
3938 int
3939 e_ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip,
3940     char *name, char **data, uint_t nelements)
3941 {
3942 	return (ddi_prop_update_common(match_dev, dip,
3943 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING,
3944 	    name, data, nelements,
3945 	    ddi_prop_fm_encode_strings));
3946 }
3947 
3948 
3949 /*
3950  * Get an array of bytes property.
3951  */
3952 int
3953 ddi_prop_lookup_byte_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
3954     char *name, uchar_t **data, uint_t *nelements)
3955 {
3956 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3957 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
3958 #ifdef DEBUG
3959 		if (dip != NULL) {
3960 			cmn_err(CE_WARN, "ddi_prop_lookup_byte_array: "
3961 			    " invalid flag 0x%x (prop = %s, node = %s%d)",
3962 			    flags, name, ddi_driver_name(dip),
3963 			    ddi_get_instance(dip));
3964 		}
3965 #endif /* DEBUG */
3966 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3967 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
3968 	}
3969 
3970 	return (ddi_prop_lookup_common(match_dev, dip,
3971 	    (flags | DDI_PROP_TYPE_BYTE), name, data,
3972 	    nelements, ddi_prop_fm_decode_bytes));
3973 }
3974 
3975 /*
3976  * Update an array of bytes property.
3977  */
3978 int
3979 ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip,
3980     char *name, uchar_t *data, uint_t nelements)
3981 {
3982 	if (nelements == 0)
3983 		return (DDI_PROP_INVAL_ARG);
3984 
3985 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_BYTE,
3986 	    name, data, nelements, ddi_prop_fm_encode_bytes));
3987 }
3988 
3989 
3990 int
3991 e_ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip,
3992     char *name, uchar_t *data, uint_t nelements)
3993 {
3994 	if (nelements == 0)
3995 		return (DDI_PROP_INVAL_ARG);
3996 
3997 	return (ddi_prop_update_common(match_dev, dip,
3998 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE,
3999 	    name, data, nelements, ddi_prop_fm_encode_bytes));
4000 }
4001 
4002 
4003 /*
4004  * ddi_prop_remove_common:	Undefine a managed property:
4005  *			Input dev_t must match dev_t when defined.
4006  *			Returns DDI_PROP_NOT_FOUND, possibly.
4007  *			DDI_PROP_INVAL_ARG is also possible if dev is
4008  *			DDI_DEV_T_ANY or incoming name is the NULL string.
4009  */
4010 int
4011 ddi_prop_remove_common(dev_t dev, dev_info_t *dip, char *name, int flag)
4012 {
4013 	ddi_prop_t	**list_head = &(DEVI(dip)->devi_drv_prop_ptr);
4014 	ddi_prop_t	*propp;
4015 	ddi_prop_t	*lastpropp = NULL;
4016 
4017 	if ((dev == DDI_DEV_T_ANY) || (name == (char *)0) ||
4018 	    (strlen(name) == 0)) {
4019 		return (DDI_PROP_INVAL_ARG);
4020 	}
4021 
4022 	if (flag & DDI_PROP_SYSTEM_DEF)
4023 		list_head = &(DEVI(dip)->devi_sys_prop_ptr);
4024 	else if (flag & DDI_PROP_HW_DEF)
4025 		list_head = &(DEVI(dip)->devi_hw_prop_ptr);
4026 
4027 	mutex_enter(&(DEVI(dip)->devi_lock));
4028 
4029 	for (propp = *list_head; propp != NULL; propp = propp->prop_next)  {
4030 		if (DDI_STRSAME(propp->prop_name, name) &&
4031 		    (dev == propp->prop_dev)) {
4032 			/*
4033 			 * Unlink this propp allowing for it to
4034 			 * be first in the list:
4035 			 */
4036 
4037 			if (lastpropp == NULL)
4038 				*list_head = propp->prop_next;
4039 			else
4040 				lastpropp->prop_next = propp->prop_next;
4041 
4042 			mutex_exit(&(DEVI(dip)->devi_lock));
4043 
4044 			/*
4045 			 * Free memory and return...
4046 			 */
4047 			kmem_free(propp->prop_name,
4048 			    strlen(propp->prop_name) + 1);
4049 			if (propp->prop_len != 0)
4050 				kmem_free(propp->prop_val, propp->prop_len);
4051 			kmem_free(propp, sizeof (ddi_prop_t));
4052 			return (DDI_PROP_SUCCESS);
4053 		}
4054 		lastpropp = propp;
4055 	}
4056 	mutex_exit(&(DEVI(dip)->devi_lock));
4057 	return (DDI_PROP_NOT_FOUND);
4058 }
4059 
4060 int
4061 ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name)
4062 {
4063 	return (ddi_prop_remove_common(dev, dip, name, 0));
4064 }
4065 
4066 int
4067 e_ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name)
4068 {
4069 	return (ddi_prop_remove_common(dev, dip, name, DDI_PROP_SYSTEM_DEF));
4070 }
4071 
4072 /*
4073  * e_ddi_prop_list_delete: remove a list of properties
4074  *	Note that the caller needs to provide the required protection
4075  *	(eg. devi_lock if these properties are still attached to a devi)
4076  */
4077 void
4078 e_ddi_prop_list_delete(ddi_prop_t *props)
4079 {
4080 	i_ddi_prop_list_delete(props);
4081 }
4082 
4083 /*
4084  * ddi_prop_remove_all_common:
4085  *	Used before unloading a driver to remove
4086  *	all properties. (undefines all dev_t's props.)
4087  *	Also removes `explicitly undefined' props.
4088  *	No errors possible.
4089  */
4090 void
4091 ddi_prop_remove_all_common(dev_info_t *dip, int flag)
4092 {
4093 	ddi_prop_t	**list_head;
4094 
4095 	mutex_enter(&(DEVI(dip)->devi_lock));
4096 	if (flag & DDI_PROP_SYSTEM_DEF) {
4097 		list_head = &(DEVI(dip)->devi_sys_prop_ptr);
4098 	} else if (flag & DDI_PROP_HW_DEF) {
4099 		list_head = &(DEVI(dip)->devi_hw_prop_ptr);
4100 	} else {
4101 		list_head = &(DEVI(dip)->devi_drv_prop_ptr);
4102 	}
4103 	i_ddi_prop_list_delete(*list_head);
4104 	*list_head = NULL;
4105 	mutex_exit(&(DEVI(dip)->devi_lock));
4106 }
4107 
4108 
4109 /*
4110  * ddi_prop_remove_all:		Remove all driver prop definitions.
4111  */
4112 
4113 void
4114 ddi_prop_remove_all(dev_info_t *dip)
4115 {
4116 	i_ddi_prop_dyn_driver_set(dip, NULL);
4117 	ddi_prop_remove_all_common(dip, 0);
4118 }
4119 
4120 /*
4121  * e_ddi_prop_remove_all:	Remove all system prop definitions.
4122  */
4123 
4124 void
4125 e_ddi_prop_remove_all(dev_info_t *dip)
4126 {
4127 	ddi_prop_remove_all_common(dip, (int)DDI_PROP_SYSTEM_DEF);
4128 }
4129 
4130 
4131 /*
4132  * ddi_prop_undefine:	Explicitly undefine a property.  Property
4133  *			searches which match this property return
4134  *			the error code DDI_PROP_UNDEFINED.
4135  *
4136  *			Use ddi_prop_remove to negate effect of
4137  *			ddi_prop_undefine
4138  *
4139  *			See above for error returns.
4140  */
4141 
4142 int
4143 ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name)
4144 {
4145 	if (!(flag & DDI_PROP_CANSLEEP))
4146 		flag |= DDI_PROP_DONTSLEEP;
4147 	flag |= DDI_PROP_STACK_CREATE | DDI_PROP_UNDEF_IT | DDI_PROP_TYPE_ANY;
4148 	return (ddi_prop_update_common(dev, dip, flag,
4149 	    name, NULL, 0, ddi_prop_fm_encode_bytes));
4150 }
4151 
4152 int
4153 e_ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name)
4154 {
4155 	if (!(flag & DDI_PROP_CANSLEEP))
4156 		flag |= DDI_PROP_DONTSLEEP;
4157 	flag |= DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE |
4158 	    DDI_PROP_UNDEF_IT | DDI_PROP_TYPE_ANY;
4159 	return (ddi_prop_update_common(dev, dip, flag,
4160 	    name, NULL, 0, ddi_prop_fm_encode_bytes));
4161 }
4162 
4163 /*
4164  * Support for gathering dynamic properties in devinfo snapshot.
4165  */
4166 void
4167 i_ddi_prop_dyn_driver_set(dev_info_t *dip, i_ddi_prop_dyn_t *dp)
4168 {
4169 	DEVI(dip)->devi_prop_dyn_driver = dp;
4170 }
4171 
4172 i_ddi_prop_dyn_t *
4173 i_ddi_prop_dyn_driver_get(dev_info_t *dip)
4174 {
4175 	return (DEVI(dip)->devi_prop_dyn_driver);
4176 }
4177 
4178 void
4179 i_ddi_prop_dyn_parent_set(dev_info_t *dip, i_ddi_prop_dyn_t *dp)
4180 {
4181 	DEVI(dip)->devi_prop_dyn_parent = dp;
4182 }
4183 
4184 i_ddi_prop_dyn_t *
4185 i_ddi_prop_dyn_parent_get(dev_info_t *dip)
4186 {
4187 	return (DEVI(dip)->devi_prop_dyn_parent);
4188 }
4189 
4190 void
4191 i_ddi_prop_dyn_cache_invalidate(dev_info_t *dip, i_ddi_prop_dyn_t *dp)
4192 {
4193 	/* for now we invalidate the entire cached snapshot */
4194 	if (dip && dp)
4195 		i_ddi_di_cache_invalidate();
4196 }
4197 
4198 /* ARGSUSED */
4199 void
4200 ddi_prop_cache_invalidate(dev_t dev, dev_info_t *dip, char *name, int flags)
4201 {
4202 	/* for now we invalidate the entire cached snapshot */
4203 	i_ddi_di_cache_invalidate();
4204 }
4205 
4206 
4207 /*
4208  * Code to search hardware layer (PROM), if it exists, on behalf of child.
4209  *
4210  * if input dip != child_dip, then call is on behalf of child
4211  * to search PROM, do it via ddi_prop_search_common() and ascend only
4212  * if allowed.
4213  *
4214  * if input dip == ch_dip (child_dip), call is on behalf of root driver,
4215  * to search for PROM defined props only.
4216  *
4217  * Note that the PROM search is done only if the requested dev
4218  * is either DDI_DEV_T_ANY or DDI_DEV_T_NONE. PROM properties
4219  * have no associated dev, thus are automatically associated with
4220  * DDI_DEV_T_NONE.
4221  *
4222  * Modifying flag DDI_PROP_NOTPROM inhibits the search in the h/w layer.
4223  *
4224  * Returns DDI_PROP_FOUND_1275 if found to indicate to framework
4225  * that the property resides in the prom.
4226  */
4227 int
4228 impl_ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip,
4229     ddi_prop_op_t prop_op, int mod_flags,
4230     char *name, caddr_t valuep, int *lengthp)
4231 {
4232 	int	len;
4233 	caddr_t buffer = NULL;
4234 
4235 	/*
4236 	 * If requested dev is DDI_DEV_T_NONE or DDI_DEV_T_ANY, then
4237 	 * look in caller's PROM if it's a self identifying device...
4238 	 *
4239 	 * Note that this is very similar to ddi_prop_op, but we
4240 	 * search the PROM instead of the s/w defined properties,
4241 	 * and we are called on by the parent driver to do this for
4242 	 * the child.
4243 	 */
4244 
4245 	if (((dev == DDI_DEV_T_NONE) || (dev == DDI_DEV_T_ANY)) &&
4246 	    ndi_dev_is_prom_node(ch_dip) &&
4247 	    ((mod_flags & DDI_PROP_NOTPROM) == 0)) {
4248 		len = prom_getproplen((pnode_t)DEVI(ch_dip)->devi_nodeid, name);
4249 		if (len == -1) {
4250 			return (DDI_PROP_NOT_FOUND);
4251 		}
4252 
4253 		/*
4254 		 * If exists only request, we're done
4255 		 */
4256 		if (prop_op == PROP_EXISTS) {
4257 			return (DDI_PROP_FOUND_1275);
4258 		}
4259 
4260 		/*
4261 		 * If length only request or prop length == 0, get out
4262 		 */
4263 		if ((prop_op == PROP_LEN) || (len == 0)) {
4264 			*lengthp = len;
4265 			return (DDI_PROP_FOUND_1275);
4266 		}
4267 
4268 		/*
4269 		 * Allocate buffer if required... (either way `buffer'
4270 		 * is receiving address).
4271 		 */
4272 
4273 		switch (prop_op) {
4274 
4275 		case PROP_LEN_AND_VAL_ALLOC:
4276 
4277 			buffer = kmem_alloc((size_t)len,
4278 			    mod_flags & DDI_PROP_CANSLEEP ?
4279 			    KM_SLEEP : KM_NOSLEEP);
4280 			if (buffer == NULL) {
4281 				return (DDI_PROP_NO_MEMORY);
4282 			}
4283 			*(caddr_t *)valuep = buffer;
4284 			break;
4285 
4286 		case PROP_LEN_AND_VAL_BUF:
4287 
4288 			if (len > (*lengthp)) {
4289 				*lengthp = len;
4290 				return (DDI_PROP_BUF_TOO_SMALL);
4291 			}
4292 
4293 			buffer = valuep;
4294 			break;
4295 
4296 		default:
4297 			break;
4298 		}
4299 
4300 		/*
4301 		 * Call the PROM function to do the copy.
4302 		 */
4303 		(void) prom_getprop((pnode_t)DEVI(ch_dip)->devi_nodeid,
4304 		    name, buffer);
4305 
4306 		*lengthp = len; /* return the actual length to the caller */
4307 		(void) impl_fix_props(dip, ch_dip, name, len, buffer);
4308 		return (DDI_PROP_FOUND_1275);
4309 	}
4310 
4311 	return (DDI_PROP_NOT_FOUND);
4312 }
4313 
4314 /*
4315  * The ddi_bus_prop_op default bus nexus prop op function.
4316  *
4317  * Code to search hardware layer (PROM), if it exists,
4318  * on behalf of child, then, if appropriate, ascend and check
4319  * my own software defined properties...
4320  */
4321 int
4322 ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip,
4323     ddi_prop_op_t prop_op, int mod_flags,
4324     char *name, caddr_t valuep, int *lengthp)
4325 {
4326 	int	error;
4327 
4328 	error = impl_ddi_bus_prop_op(dev, dip, ch_dip, prop_op, mod_flags,
4329 	    name, valuep, lengthp);
4330 
4331 	if (error == DDI_PROP_SUCCESS || error == DDI_PROP_FOUND_1275 ||
4332 	    error == DDI_PROP_BUF_TOO_SMALL)
4333 		return (error);
4334 
4335 	if (error == DDI_PROP_NO_MEMORY) {
4336 		cmn_err(CE_CONT, prop_no_mem_msg, name);
4337 		return (DDI_PROP_NO_MEMORY);
4338 	}
4339 
4340 	/*
4341 	 * Check the 'options' node as a last resort
4342 	 */
4343 	if ((mod_flags & DDI_PROP_DONTPASS) != 0)
4344 		return (DDI_PROP_NOT_FOUND);
4345 
4346 	if (ch_dip == ddi_root_node())	{
4347 		/*
4348 		 * As a last resort, when we've reached
4349 		 * the top and still haven't found the
4350 		 * property, see if the desired property
4351 		 * is attached to the options node.
4352 		 *
4353 		 * The options dip is attached right after boot.
4354 		 */
4355 		ASSERT(options_dip != NULL);
4356 		/*
4357 		 * Force the "don't pass" flag to *just* see
4358 		 * what the options node has to offer.
4359 		 */
4360 		return (ddi_prop_search_common(dev, options_dip, prop_op,
4361 		    mod_flags|DDI_PROP_DONTPASS, name, valuep,
4362 		    (uint_t *)lengthp));
4363 	}
4364 
4365 	/*
4366 	 * Otherwise, continue search with parent's s/w defined properties...
4367 	 * NOTE: Using `dip' in following call increments the level.
4368 	 */
4369 
4370 	return (ddi_prop_search_common(dev, dip, prop_op, mod_flags,
4371 	    name, valuep, (uint_t *)lengthp));
4372 }
4373 
4374 /*
4375  * External property functions used by other parts of the kernel...
4376  */
4377 
4378 /*
4379  * e_ddi_getlongprop: See comments for ddi_get_longprop.
4380  */
4381 
4382 int
4383 e_ddi_getlongprop(dev_t dev, vtype_t type, char *name, int flags,
4384     caddr_t valuep, int *lengthp)
4385 {
4386 	_NOTE(ARGUNUSED(type))
4387 	dev_info_t *devi;
4388 	ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_ALLOC;
4389 	int error;
4390 
4391 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4392 		return (DDI_PROP_NOT_FOUND);
4393 
4394 	error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp);
4395 	ddi_release_devi(devi);
4396 	return (error);
4397 }
4398 
4399 /*
4400  * e_ddi_getlongprop_buf:	See comments for ddi_getlongprop_buf.
4401  */
4402 
4403 int
4404 e_ddi_getlongprop_buf(dev_t dev, vtype_t type, char *name, int flags,
4405     caddr_t valuep, int *lengthp)
4406 {
4407 	_NOTE(ARGUNUSED(type))
4408 	dev_info_t *devi;
4409 	ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF;
4410 	int error;
4411 
4412 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4413 		return (DDI_PROP_NOT_FOUND);
4414 
4415 	error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp);
4416 	ddi_release_devi(devi);
4417 	return (error);
4418 }
4419 
4420 /*
4421  * e_ddi_getprop:	See comments for ddi_getprop.
4422  */
4423 int
4424 e_ddi_getprop(dev_t dev, vtype_t type, char *name, int flags, int defvalue)
4425 {
4426 	_NOTE(ARGUNUSED(type))
4427 	dev_info_t *devi;
4428 	ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF;
4429 	int	propvalue = defvalue;
4430 	int	proplength = sizeof (int);
4431 	int	error;
4432 
4433 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4434 		return (defvalue);
4435 
4436 	error = cdev_prop_op(dev, devi, prop_op,
4437 	    flags, name, (caddr_t)&propvalue, &proplength);
4438 	ddi_release_devi(devi);
4439 
4440 	if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
4441 		propvalue = 1;
4442 
4443 	return (propvalue);
4444 }
4445 
4446 /*
4447  * e_ddi_getprop_int64:
4448  *
4449  * This is a typed interfaces, but predates typed properties. With the
4450  * introduction of typed properties the framework tries to ensure
4451  * consistent use of typed interfaces. This is why TYPE_INT64 is not
4452  * part of TYPE_ANY.  E_ddi_getprop_int64 is a special case where a
4453  * typed interface invokes legacy (non-typed) interfaces:
4454  * cdev_prop_op(), prop_op(9E), ddi_prop_op(9F)).  In this case the
4455  * fact that TYPE_INT64 is not part of TYPE_ANY matters.  To support
4456  * this type of lookup as a single operation we invoke the legacy
4457  * non-typed interfaces with the special CONSUMER_TYPED bit set. The
4458  * framework ddi_prop_op(9F) implementation is expected to check for
4459  * CONSUMER_TYPED and, if set, expand type bits beyond TYPE_ANY
4460  * (currently TYPE_INT64).
4461  */
4462 int64_t
4463 e_ddi_getprop_int64(dev_t dev, vtype_t type, char *name,
4464     int flags, int64_t defvalue)
4465 {
4466 	_NOTE(ARGUNUSED(type))
4467 	dev_info_t	*devi;
4468 	ddi_prop_op_t	prop_op = PROP_LEN_AND_VAL_BUF;
4469 	int64_t		propvalue = defvalue;
4470 	int		proplength = sizeof (propvalue);
4471 	int		error;
4472 
4473 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4474 		return (defvalue);
4475 
4476 	error = cdev_prop_op(dev, devi, prop_op, flags |
4477 	    DDI_PROP_CONSUMER_TYPED, name, (caddr_t)&propvalue, &proplength);
4478 	ddi_release_devi(devi);
4479 
4480 	if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
4481 		propvalue = 1;
4482 
4483 	return (propvalue);
4484 }
4485 
4486 /*
4487  * e_ddi_getproplen:	See comments for ddi_getproplen.
4488  */
4489 int
4490 e_ddi_getproplen(dev_t dev, vtype_t type, char *name, int flags, int *lengthp)
4491 {
4492 	_NOTE(ARGUNUSED(type))
4493 	dev_info_t *devi;
4494 	ddi_prop_op_t prop_op = PROP_LEN;
4495 	int error;
4496 
4497 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4498 		return (DDI_PROP_NOT_FOUND);
4499 
4500 	error = cdev_prop_op(dev, devi, prop_op, flags, name, NULL, lengthp);
4501 	ddi_release_devi(devi);
4502 	return (error);
4503 }
4504 
4505 /*
4506  * Routines to get at elements of the dev_info structure
4507  */
4508 
4509 /*
4510  * ddi_binding_name: Return the driver binding name of the devinfo node
4511  *		This is the name the OS used to bind the node to a driver.
4512  */
4513 char *
4514 ddi_binding_name(dev_info_t *dip)
4515 {
4516 	return (DEVI(dip)->devi_binding_name);
4517 }
4518 
4519 /*
4520  * ddi_driver_major: Return the major number of the driver that
4521  *	the supplied devinfo is bound to.  If not yet bound,
4522  *	DDI_MAJOR_T_NONE.
4523  *
4524  * When used by the driver bound to 'devi', this
4525  * function will reliably return the driver major number.
4526  * Other ways of determining the driver major number, such as
4527  *	major = ddi_name_to_major(ddi_get_name(devi));
4528  *	major = ddi_name_to_major(ddi_binding_name(devi));
4529  * can return a different result as the driver/alias binding
4530  * can change dynamically, and thus should be avoided.
4531  */
4532 major_t
4533 ddi_driver_major(dev_info_t *devi)
4534 {
4535 	return (DEVI(devi)->devi_major);
4536 }
4537 
4538 /*
4539  * ddi_driver_name: Return the normalized driver name. this is the
4540  *		actual driver name
4541  */
4542 const char *
4543 ddi_driver_name(dev_info_t *devi)
4544 {
4545 	major_t major;
4546 
4547 	if ((major = ddi_driver_major(devi)) != DDI_MAJOR_T_NONE)
4548 		return (ddi_major_to_name(major));
4549 
4550 	return (ddi_node_name(devi));
4551 }
4552 
4553 /*
4554  * i_ddi_set_binding_name:	Set binding name.
4555  *
4556  *	Set the binding name to the given name.
4557  *	This routine is for use by the ddi implementation, not by drivers.
4558  */
4559 void
4560 i_ddi_set_binding_name(dev_info_t *dip, char *name)
4561 {
4562 	DEVI(dip)->devi_binding_name = name;
4563 
4564 }
4565 
4566 /*
4567  * ddi_get_name: A synonym of ddi_binding_name() ... returns a name
4568  * the implementation has used to bind the node to a driver.
4569  */
4570 char *
4571 ddi_get_name(dev_info_t *dip)
4572 {
4573 	return (DEVI(dip)->devi_binding_name);
4574 }
4575 
4576 /*
4577  * ddi_node_name: Return the name property of the devinfo node
4578  *		This may differ from ddi_binding_name if the node name
4579  *		does not define a binding to a driver (i.e. generic names).
4580  */
4581 char *
4582 ddi_node_name(dev_info_t *dip)
4583 {
4584 	return (DEVI(dip)->devi_node_name);
4585 }
4586 
4587 
4588 /*
4589  * ddi_get_nodeid:	Get nodeid stored in dev_info structure.
4590  */
4591 int
4592 ddi_get_nodeid(dev_info_t *dip)
4593 {
4594 	return (DEVI(dip)->devi_nodeid);
4595 }
4596 
4597 int
4598 ddi_get_instance(dev_info_t *dip)
4599 {
4600 	return (DEVI(dip)->devi_instance);
4601 }
4602 
4603 struct dev_ops *
4604 ddi_get_driver(dev_info_t *dip)
4605 {
4606 	return (DEVI(dip)->devi_ops);
4607 }
4608 
4609 void
4610 ddi_set_driver(dev_info_t *dip, struct dev_ops *devo)
4611 {
4612 	DEVI(dip)->devi_ops = devo;
4613 }
4614 
4615 /*
4616  * ddi_set_driver_private/ddi_get_driver_private:
4617  * Get/set device driver private data in devinfo.
4618  */
4619 void
4620 ddi_set_driver_private(dev_info_t *dip, void *data)
4621 {
4622 	DEVI(dip)->devi_driver_data = data;
4623 }
4624 
4625 void *
4626 ddi_get_driver_private(dev_info_t *dip)
4627 {
4628 	return (DEVI(dip)->devi_driver_data);
4629 }
4630 
4631 /*
4632  * ddi_get_parent, ddi_get_child, ddi_get_next_sibling
4633  */
4634 
4635 dev_info_t *
4636 ddi_get_parent(dev_info_t *dip)
4637 {
4638 	return ((dev_info_t *)DEVI(dip)->devi_parent);
4639 }
4640 
4641 dev_info_t *
4642 ddi_get_child(dev_info_t *dip)
4643 {
4644 	return ((dev_info_t *)DEVI(dip)->devi_child);
4645 }
4646 
4647 dev_info_t *
4648 ddi_get_next_sibling(dev_info_t *dip)
4649 {
4650 	return ((dev_info_t *)DEVI(dip)->devi_sibling);
4651 }
4652 
4653 dev_info_t *
4654 ddi_get_next(dev_info_t *dip)
4655 {
4656 	return ((dev_info_t *)DEVI(dip)->devi_next);
4657 }
4658 
4659 void
4660 ddi_set_next(dev_info_t *dip, dev_info_t *nextdip)
4661 {
4662 	DEVI(dip)->devi_next = DEVI(nextdip);
4663 }
4664 
4665 /*
4666  * ddi_root_node:		Return root node of devinfo tree
4667  */
4668 
4669 dev_info_t *
4670 ddi_root_node(void)
4671 {
4672 	extern dev_info_t *top_devinfo;
4673 
4674 	return (top_devinfo);
4675 }
4676 
4677 /*
4678  * Miscellaneous functions:
4679  */
4680 
4681 /*
4682  * Implementation specific hooks
4683  */
4684 
4685 void
4686 ddi_report_dev(dev_info_t *d)
4687 {
4688 	char *b;
4689 
4690 	(void) ddi_ctlops(d, d, DDI_CTLOPS_REPORTDEV, (void *)0, (void *)0);
4691 
4692 	/*
4693 	 * If this devinfo node has cb_ops, it's implicitly accessible from
4694 	 * userland, so we print its full name together with the instance
4695 	 * number 'abbreviation' that the driver may use internally.
4696 	 */
4697 	if (DEVI(d)->devi_ops->devo_cb_ops != (struct cb_ops *)0 &&
4698 	    (b = kmem_zalloc(MAXPATHLEN, KM_NOSLEEP))) {
4699 		cmn_err(CE_CONT, "?%s%d is %s\n",
4700 		    ddi_driver_name(d), ddi_get_instance(d),
4701 		    ddi_pathname(d, b));
4702 		kmem_free(b, MAXPATHLEN);
4703 	}
4704 }
4705 
4706 /*
4707  * ddi_ctlops() is described in the assembler not to buy a new register
4708  * window when it's called and can reduce cost in climbing the device tree
4709  * without using the tail call optimization.
4710  */
4711 int
4712 ddi_dev_regsize(dev_info_t *dev, uint_t rnumber, off_t *result)
4713 {
4714 	int ret;
4715 
4716 	ret = ddi_ctlops(dev, dev, DDI_CTLOPS_REGSIZE,
4717 	    (void *)&rnumber, (void *)result);
4718 
4719 	return (ret == DDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE);
4720 }
4721 
4722 int
4723 ddi_dev_nregs(dev_info_t *dev, int *result)
4724 {
4725 	return (ddi_ctlops(dev, dev, DDI_CTLOPS_NREGS, 0, (void *)result));
4726 }
4727 
4728 int
4729 ddi_dev_is_sid(dev_info_t *d)
4730 {
4731 	return (ddi_ctlops(d, d, DDI_CTLOPS_SIDDEV, (void *)0, (void *)0));
4732 }
4733 
4734 int
4735 ddi_slaveonly(dev_info_t *d)
4736 {
4737 	return (ddi_ctlops(d, d, DDI_CTLOPS_SLAVEONLY, (void *)0, (void *)0));
4738 }
4739 
4740 int
4741 ddi_dev_affinity(dev_info_t *a, dev_info_t *b)
4742 {
4743 	return (ddi_ctlops(a, a, DDI_CTLOPS_AFFINITY, (void *)b, (void *)0));
4744 }
4745 
4746 int
4747 ddi_streams_driver(dev_info_t *dip)
4748 {
4749 	if (i_ddi_devi_attached(dip) &&
4750 	    (DEVI(dip)->devi_ops->devo_cb_ops != NULL) &&
4751 	    (DEVI(dip)->devi_ops->devo_cb_ops->cb_str != NULL))
4752 		return (DDI_SUCCESS);
4753 	return (DDI_FAILURE);
4754 }
4755 
4756 /*
4757  * callback free list
4758  */
4759 
4760 static int ncallbacks;
4761 static int nc_low = 170;
4762 static int nc_med = 512;
4763 static int nc_high = 2048;
4764 static struct ddi_callback *callbackq;
4765 static struct ddi_callback *callbackqfree;
4766 
4767 /*
4768  * set/run callback lists
4769  */
4770 struct	cbstats	{
4771 	kstat_named_t	cb_asked;
4772 	kstat_named_t	cb_new;
4773 	kstat_named_t	cb_run;
4774 	kstat_named_t	cb_delete;
4775 	kstat_named_t	cb_maxreq;
4776 	kstat_named_t	cb_maxlist;
4777 	kstat_named_t	cb_alloc;
4778 	kstat_named_t	cb_runouts;
4779 	kstat_named_t	cb_L2;
4780 	kstat_named_t	cb_grow;
4781 } cbstats = {
4782 	{"asked",	KSTAT_DATA_UINT32},
4783 	{"new",		KSTAT_DATA_UINT32},
4784 	{"run",		KSTAT_DATA_UINT32},
4785 	{"delete",	KSTAT_DATA_UINT32},
4786 	{"maxreq",	KSTAT_DATA_UINT32},
4787 	{"maxlist",	KSTAT_DATA_UINT32},
4788 	{"alloc",	KSTAT_DATA_UINT32},
4789 	{"runouts",	KSTAT_DATA_UINT32},
4790 	{"L2",		KSTAT_DATA_UINT32},
4791 	{"grow",	KSTAT_DATA_UINT32},
4792 };
4793 
4794 #define	nc_asked	cb_asked.value.ui32
4795 #define	nc_new		cb_new.value.ui32
4796 #define	nc_run		cb_run.value.ui32
4797 #define	nc_delete	cb_delete.value.ui32
4798 #define	nc_maxreq	cb_maxreq.value.ui32
4799 #define	nc_maxlist	cb_maxlist.value.ui32
4800 #define	nc_alloc	cb_alloc.value.ui32
4801 #define	nc_runouts	cb_runouts.value.ui32
4802 #define	nc_L2		cb_L2.value.ui32
4803 #define	nc_grow		cb_grow.value.ui32
4804 
4805 static kmutex_t ddi_callback_mutex;
4806 
4807 /*
4808  * callbacks are handled using a L1/L2 cache. The L1 cache
4809  * comes out of kmem_cache_alloc and can expand/shrink dynamically. If
4810  * we can't get callbacks from the L1 cache [because pageout is doing
4811  * I/O at the time freemem is 0], we allocate callbacks out of the
4812  * L2 cache. The L2 cache is static and depends on the memory size.
4813  * [We might also count the number of devices at probe time and
4814  * allocate one structure per device and adjust for deferred attach]
4815  */
4816 void
4817 impl_ddi_callback_init(void)
4818 {
4819 	int	i;
4820 	uint_t	physmegs;
4821 	kstat_t	*ksp;
4822 
4823 	physmegs = physmem >> (20 - PAGESHIFT);
4824 	if (physmegs < 48) {
4825 		ncallbacks = nc_low;
4826 	} else if (physmegs < 128) {
4827 		ncallbacks = nc_med;
4828 	} else {
4829 		ncallbacks = nc_high;
4830 	}
4831 
4832 	/*
4833 	 * init free list
4834 	 */
4835 	callbackq = kmem_zalloc(
4836 	    ncallbacks * sizeof (struct ddi_callback), KM_SLEEP);
4837 	for (i = 0; i < ncallbacks-1; i++)
4838 		callbackq[i].c_nfree = &callbackq[i+1];
4839 	callbackqfree = callbackq;
4840 
4841 	/* init kstats */
4842 	if (ksp = kstat_create("unix", 0, "cbstats", "misc", KSTAT_TYPE_NAMED,
4843 	    sizeof (cbstats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL)) {
4844 		ksp->ks_data = (void *) &cbstats;
4845 		kstat_install(ksp);
4846 	}
4847 
4848 }
4849 
4850 static void
4851 callback_insert(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid,
4852     int count)
4853 {
4854 	struct ddi_callback *list, *marker, *new;
4855 	size_t size = sizeof (struct ddi_callback);
4856 
4857 	list = marker = (struct ddi_callback *)*listid;
4858 	while (list != NULL) {
4859 		if (list->c_call == funcp && list->c_arg == arg) {
4860 			list->c_count += count;
4861 			return;
4862 		}
4863 		marker = list;
4864 		list = list->c_nlist;
4865 	}
4866 	new = kmem_alloc(size, KM_NOSLEEP);
4867 	if (new == NULL) {
4868 		new = callbackqfree;
4869 		if (new == NULL) {
4870 			new = kmem_alloc_tryhard(sizeof (struct ddi_callback),
4871 			    &size, KM_NOSLEEP | KM_PANIC);
4872 			cbstats.nc_grow++;
4873 		} else {
4874 			callbackqfree = new->c_nfree;
4875 			cbstats.nc_L2++;
4876 		}
4877 	}
4878 	if (marker != NULL) {
4879 		marker->c_nlist = new;
4880 	} else {
4881 		*listid = (uintptr_t)new;
4882 	}
4883 	new->c_size = size;
4884 	new->c_nlist = NULL;
4885 	new->c_call = funcp;
4886 	new->c_arg = arg;
4887 	new->c_count = count;
4888 	cbstats.nc_new++;
4889 	cbstats.nc_alloc++;
4890 	if (cbstats.nc_alloc > cbstats.nc_maxlist)
4891 		cbstats.nc_maxlist = cbstats.nc_alloc;
4892 }
4893 
4894 void
4895 ddi_set_callback(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid)
4896 {
4897 	mutex_enter(&ddi_callback_mutex);
4898 	cbstats.nc_asked++;
4899 	if ((cbstats.nc_asked - cbstats.nc_run) > cbstats.nc_maxreq)
4900 		cbstats.nc_maxreq = (cbstats.nc_asked - cbstats.nc_run);
4901 	(void) callback_insert(funcp, arg, listid, 1);
4902 	mutex_exit(&ddi_callback_mutex);
4903 }
4904 
4905 static void
4906 real_callback_run(void *Queue)
4907 {
4908 	int (*funcp)(caddr_t);
4909 	caddr_t arg;
4910 	int count, rval;
4911 	uintptr_t *listid;
4912 	struct ddi_callback *list, *marker;
4913 	int check_pending = 1;
4914 	int pending = 0;
4915 
4916 	do {
4917 		mutex_enter(&ddi_callback_mutex);
4918 		listid = Queue;
4919 		list = (struct ddi_callback *)*listid;
4920 		if (list == NULL) {
4921 			mutex_exit(&ddi_callback_mutex);
4922 			return;
4923 		}
4924 		if (check_pending) {
4925 			marker = list;
4926 			while (marker != NULL) {
4927 				pending += marker->c_count;
4928 				marker = marker->c_nlist;
4929 			}
4930 			check_pending = 0;
4931 		}
4932 		ASSERT(pending > 0);
4933 		ASSERT(list->c_count > 0);
4934 		funcp = list->c_call;
4935 		arg = list->c_arg;
4936 		count = list->c_count;
4937 		*(uintptr_t *)Queue = (uintptr_t)list->c_nlist;
4938 		if (list >= &callbackq[0] &&
4939 		    list <= &callbackq[ncallbacks-1]) {
4940 			list->c_nfree = callbackqfree;
4941 			callbackqfree = list;
4942 		} else
4943 			kmem_free(list, list->c_size);
4944 
4945 		cbstats.nc_delete++;
4946 		cbstats.nc_alloc--;
4947 		mutex_exit(&ddi_callback_mutex);
4948 
4949 		do {
4950 			if ((rval = (*funcp)(arg)) == 0) {
4951 				pending -= count;
4952 				mutex_enter(&ddi_callback_mutex);
4953 				(void) callback_insert(funcp, arg, listid,
4954 				    count);
4955 				cbstats.nc_runouts++;
4956 			} else {
4957 				pending--;
4958 				mutex_enter(&ddi_callback_mutex);
4959 				cbstats.nc_run++;
4960 			}
4961 			mutex_exit(&ddi_callback_mutex);
4962 		} while (rval != 0 && (--count > 0));
4963 	} while (pending > 0);
4964 }
4965 
4966 void
4967 ddi_run_callback(uintptr_t *listid)
4968 {
4969 	softcall(real_callback_run, listid);
4970 }
4971 
4972 /*
4973  * ddi_periodic_t
4974  * ddi_periodic_add(void (*func)(void *), void *arg, hrtime_t interval,
4975  *     int level)
4976  *
4977  * INTERFACE LEVEL
4978  *      Solaris DDI specific (Solaris DDI)
4979  *
4980  * PARAMETERS
4981  *      func: the callback function
4982  *
4983  *            The callback function will be invoked. The function is invoked
4984  *            in kernel context if the argument level passed is the zero.
4985  *            Otherwise it's invoked in interrupt context at the specified
4986  *            level.
4987  *
4988  *       arg: the argument passed to the callback function
4989  *
4990  *  interval: interval time
4991  *
4992  *    level : callback interrupt level
4993  *
4994  *            If the value is the zero, the callback function is invoked
4995  *            in kernel context. If the value is more than the zero, but
4996  *            less than or equal to ten, the callback function is invoked in
4997  *            interrupt context at the specified interrupt level, which may
4998  *            be used for real time applications.
4999  *
5000  *            This value must be in range of 0-10, which can be a numeric
5001  *            number or a pre-defined macro (DDI_IPL_0, ... , DDI_IPL_10).
5002  *
5003  * DESCRIPTION
5004  *      ddi_periodic_add(9F) schedules the specified function to be
5005  *      periodically invoked in the interval time.
5006  *
5007  *      As well as timeout(9F), the exact time interval over which the function
5008  *      takes effect cannot be guaranteed, but the value given is a close
5009  *      approximation.
5010  *
5011  *      Drivers waiting on behalf of processes with real-time constraints must
5012  *      pass non-zero value with the level argument to ddi_periodic_add(9F).
5013  *
5014  * RETURN VALUES
5015  *      ddi_periodic_add(9F) returns a non-zero opaque value (ddi_periodic_t),
5016  *      which must be used for ddi_periodic_delete(9F) to specify the request.
5017  *
5018  * CONTEXT
5019  *      ddi_periodic_add(9F) can be called in user or kernel context, but
5020  *      it cannot be called in interrupt context, which is different from
5021  *      timeout(9F).
5022  */
5023 ddi_periodic_t
5024 ddi_periodic_add(void (*func)(void *), void *arg, hrtime_t interval, int level)
5025 {
5026 	/*
5027 	 * Sanity check of the argument level.
5028 	 */
5029 	if (level < DDI_IPL_0 || level > DDI_IPL_10)
5030 		cmn_err(CE_PANIC,
5031 		    "ddi_periodic_add: invalid interrupt level (%d).", level);
5032 
5033 	/*
5034 	 * Sanity check of the context. ddi_periodic_add() cannot be
5035 	 * called in either interrupt context or high interrupt context.
5036 	 */
5037 	if (servicing_interrupt())
5038 		cmn_err(CE_PANIC,
5039 		    "ddi_periodic_add: called in (high) interrupt context.");
5040 
5041 	return ((ddi_periodic_t)i_timeout(func, arg, interval, level));
5042 }
5043 
5044 /*
5045  * void
5046  * ddi_periodic_delete(ddi_periodic_t req)
5047  *
5048  * INTERFACE LEVEL
5049  *     Solaris DDI specific (Solaris DDI)
5050  *
5051  * PARAMETERS
5052  *     req: ddi_periodic_t opaque value ddi_periodic_add(9F) returned
5053  *     previously.
5054  *
5055  * DESCRIPTION
5056  *     ddi_periodic_delete(9F) cancels the ddi_periodic_add(9F) request
5057  *     previously requested.
5058  *
5059  *     ddi_periodic_delete(9F) will not return until the pending request
5060  *     is canceled or executed.
5061  *
5062  *     As well as untimeout(9F), calling ddi_periodic_delete(9F) for a
5063  *     timeout which is either running on another CPU, or has already
5064  *     completed causes no problems. However, unlike untimeout(9F), there is
5065  *     no restrictions on the lock which might be held across the call to
5066  *     ddi_periodic_delete(9F).
5067  *
5068  *     Drivers should be structured with the understanding that the arrival of
5069  *     both an interrupt and a timeout for that interrupt can occasionally
5070  *     occur, in either order.
5071  *
5072  * CONTEXT
5073  *     ddi_periodic_delete(9F) can be called in user or kernel context, but
5074  *     it cannot be called in interrupt context, which is different from
5075  *     untimeout(9F).
5076  */
5077 void
5078 ddi_periodic_delete(ddi_periodic_t req)
5079 {
5080 	/*
5081 	 * Sanity check of the context. ddi_periodic_delete() cannot be
5082 	 * called in either interrupt context or high interrupt context.
5083 	 */
5084 	if (servicing_interrupt())
5085 		cmn_err(CE_PANIC,
5086 		    "ddi_periodic_delete: called in (high) interrupt context.");
5087 
5088 	i_untimeout((timeout_t)req);
5089 }
5090 
5091 dev_info_t *
5092 nodevinfo(dev_t dev, int otyp)
5093 {
5094 	_NOTE(ARGUNUSED(dev, otyp))
5095 	return ((dev_info_t *)0);
5096 }
5097 
5098 /*
5099  * A driver should support its own getinfo(9E) entry point. This function
5100  * is provided as a convenience for ON drivers that don't expect their
5101  * getinfo(9E) entry point to be called. A driver that uses this must not
5102  * call ddi_create_minor_node.
5103  */
5104 int
5105 ddi_no_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
5106 {
5107 	_NOTE(ARGUNUSED(dip, infocmd, arg, result))
5108 	return (DDI_FAILURE);
5109 }
5110 
5111 /*
5112  * A driver should support its own getinfo(9E) entry point. This function
5113  * is provided as a convenience for ON drivers that where the minor number
5114  * is the instance. Drivers that do not have 1:1 mapping must implement
5115  * their own getinfo(9E) function.
5116  */
5117 int
5118 ddi_getinfo_1to1(dev_info_t *dip, ddi_info_cmd_t infocmd,
5119     void *arg, void **result)
5120 {
5121 	_NOTE(ARGUNUSED(dip))
5122 	int	instance;
5123 
5124 	if (infocmd != DDI_INFO_DEVT2INSTANCE)
5125 		return (DDI_FAILURE);
5126 
5127 	instance = getminor((dev_t)(uintptr_t)arg);
5128 	*result = (void *)(uintptr_t)instance;
5129 	return (DDI_SUCCESS);
5130 }
5131 
5132 int
5133 ddifail(dev_info_t *devi, ddi_attach_cmd_t cmd)
5134 {
5135 	_NOTE(ARGUNUSED(devi, cmd))
5136 	return (DDI_FAILURE);
5137 }
5138 
5139 int
5140 ddi_no_dma_map(dev_info_t *dip, dev_info_t *rdip,
5141     struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep)
5142 {
5143 	_NOTE(ARGUNUSED(dip, rdip, dmareqp, handlep))
5144 	return (DDI_DMA_NOMAPPING);
5145 }
5146 
5147 int
5148 ddi_no_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
5149     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
5150 {
5151 	_NOTE(ARGUNUSED(dip, rdip, attr, waitfp, arg, handlep))
5152 	return (DDI_DMA_BADATTR);
5153 }
5154 
5155 int
5156 ddi_no_dma_freehdl(dev_info_t *dip, dev_info_t *rdip,
5157     ddi_dma_handle_t handle)
5158 {
5159 	_NOTE(ARGUNUSED(dip, rdip, handle))
5160 	return (DDI_FAILURE);
5161 }
5162 
5163 int
5164 ddi_no_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
5165     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
5166     ddi_dma_cookie_t *cp, uint_t *ccountp)
5167 {
5168 	_NOTE(ARGUNUSED(dip, rdip, handle, dmareq, cp, ccountp))
5169 	return (DDI_DMA_NOMAPPING);
5170 }
5171 
5172 int
5173 ddi_no_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
5174     ddi_dma_handle_t handle)
5175 {
5176 	_NOTE(ARGUNUSED(dip, rdip, handle))
5177 	return (DDI_FAILURE);
5178 }
5179 
5180 int
5181 ddi_no_dma_flush(dev_info_t *dip, dev_info_t *rdip,
5182     ddi_dma_handle_t handle, off_t off, size_t len,
5183     uint_t cache_flags)
5184 {
5185 	_NOTE(ARGUNUSED(dip, rdip, handle, off, len, cache_flags))
5186 	return (DDI_FAILURE);
5187 }
5188 
5189 int
5190 ddi_no_dma_win(dev_info_t *dip, dev_info_t *rdip,
5191     ddi_dma_handle_t handle, uint_t win, off_t *offp,
5192     size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
5193 {
5194 	_NOTE(ARGUNUSED(dip, rdip, handle, win, offp, lenp, cookiep, ccountp))
5195 	return (DDI_FAILURE);
5196 }
5197 
5198 int
5199 ddi_no_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
5200     ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
5201     off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
5202 {
5203 	_NOTE(ARGUNUSED(dip, rdip, handle, request, offp, lenp, objp, flags))
5204 	return (DDI_FAILURE);
5205 }
5206 
5207 void
5208 ddivoid(void)
5209 {}
5210 
5211 int
5212 nochpoll(dev_t dev, short events, int anyyet, short *reventsp,
5213     struct pollhead **pollhdrp)
5214 {
5215 	_NOTE(ARGUNUSED(dev, events, anyyet, reventsp, pollhdrp))
5216 	return (ENXIO);
5217 }
5218 
5219 cred_t *
5220 ddi_get_cred(void)
5221 {
5222 	return (CRED());
5223 }
5224 
5225 clock_t
5226 ddi_get_lbolt(void)
5227 {
5228 	return ((clock_t)lbolt_hybrid());
5229 }
5230 
5231 int64_t
5232 ddi_get_lbolt64(void)
5233 {
5234 	return (lbolt_hybrid());
5235 }
5236 
5237 time_t
5238 ddi_get_time(void)
5239 {
5240 	time_t	now;
5241 
5242 	if ((now = gethrestime_sec()) == 0) {
5243 		timestruc_t ts;
5244 		mutex_enter(&tod_lock);
5245 		ts = tod_get();
5246 		mutex_exit(&tod_lock);
5247 		return (ts.tv_sec);
5248 	} else {
5249 		return (now);
5250 	}
5251 }
5252 
5253 pid_t
5254 ddi_get_pid(void)
5255 {
5256 	return (ttoproc(curthread)->p_pid);
5257 }
5258 
5259 kt_did_t
5260 ddi_get_kt_did(void)
5261 {
5262 	return (curthread->t_did);
5263 }
5264 
5265 /*
5266  * This function returns B_TRUE if the caller can reasonably expect that a call
5267  * to cv_wait_sig(9F), cv_timedwait_sig(9F), or qwait_sig(9F) could be awakened
5268  * by user-level signal.  If it returns B_FALSE, then the caller should use
5269  * other means to make certain that the wait will not hang "forever."
5270  *
5271  * It does not check the signal mask, nor for reception of any particular
5272  * signal.
5273  *
5274  * Currently, a thread can receive a signal if it's not a kernel thread and it
5275  * is not in the middle of exit(2) tear-down.  Threads that are in that
5276  * tear-down effectively convert cv_wait_sig to cv_wait, cv_timedwait_sig to
5277  * cv_timedwait, and qwait_sig to qwait.
5278  */
5279 boolean_t
5280 ddi_can_receive_sig(void)
5281 {
5282 	proc_t *pp;
5283 
5284 	if (curthread->t_proc_flag & TP_LWPEXIT)
5285 		return (B_FALSE);
5286 	if ((pp = ttoproc(curthread)) == NULL)
5287 		return (B_FALSE);
5288 	return (pp->p_as != &kas);
5289 }
5290 
5291 /*
5292  * Swap bytes in 16-bit [half-]words
5293  */
5294 void
5295 swab(void *src, void *dst, size_t nbytes)
5296 {
5297 	uchar_t *pf = (uchar_t *)src;
5298 	uchar_t *pt = (uchar_t *)dst;
5299 	uchar_t tmp;
5300 	int nshorts;
5301 
5302 	nshorts = nbytes >> 1;
5303 
5304 	while (--nshorts >= 0) {
5305 		tmp = *pf++;
5306 		*pt++ = *pf++;
5307 		*pt++ = tmp;
5308 	}
5309 }
5310 
5311 static void
5312 ddi_append_minor_node(dev_info_t *ddip, struct ddi_minor_data *dmdp)
5313 {
5314 	int			circ;
5315 	struct ddi_minor_data	*dp;
5316 
5317 	ndi_devi_enter(ddip, &circ);
5318 	if ((dp = DEVI(ddip)->devi_minor) == (struct ddi_minor_data *)NULL) {
5319 		DEVI(ddip)->devi_minor = dmdp;
5320 	} else {
5321 		while (dp->next != (struct ddi_minor_data *)NULL)
5322 			dp = dp->next;
5323 		dp->next = dmdp;
5324 	}
5325 	ndi_devi_exit(ddip, circ);
5326 }
5327 
5328 /*
5329  * Part of the obsolete SunCluster DDI Hooks.
5330  * Keep for binary compatibility
5331  */
5332 minor_t
5333 ddi_getiminor(dev_t dev)
5334 {
5335 	return (getminor(dev));
5336 }
5337 
5338 static int
5339 i_log_devfs_minor_create(dev_info_t *dip, char *minor_name)
5340 {
5341 	int se_flag;
5342 	int kmem_flag;
5343 	int se_err;
5344 	char *pathname, *class_name;
5345 	sysevent_t *ev = NULL;
5346 	sysevent_id_t eid;
5347 	sysevent_value_t se_val;
5348 	sysevent_attr_list_t *ev_attr_list = NULL;
5349 
5350 	/* determine interrupt context */
5351 	se_flag = (servicing_interrupt()) ? SE_NOSLEEP : SE_SLEEP;
5352 	kmem_flag = (se_flag == SE_SLEEP) ? KM_SLEEP : KM_NOSLEEP;
5353 
5354 	i_ddi_di_cache_invalidate();
5355 
5356 #ifdef DEBUG
5357 	if ((se_flag == SE_NOSLEEP) && sunddi_debug) {
5358 		cmn_err(CE_CONT, "ddi_create_minor_node: called from "
5359 		    "interrupt level by driver %s",
5360 		    ddi_driver_name(dip));
5361 	}
5362 #endif /* DEBUG */
5363 
5364 	ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_CREATE, EP_DDI, se_flag);
5365 	if (ev == NULL) {
5366 		goto fail;
5367 	}
5368 
5369 	pathname = kmem_alloc(MAXPATHLEN, kmem_flag);
5370 	if (pathname == NULL) {
5371 		sysevent_free(ev);
5372 		goto fail;
5373 	}
5374 
5375 	(void) ddi_pathname(dip, pathname);
5376 	ASSERT(strlen(pathname));
5377 	se_val.value_type = SE_DATA_TYPE_STRING;
5378 	se_val.value.sv_string = pathname;
5379 	if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME,
5380 	    &se_val, se_flag) != 0) {
5381 		kmem_free(pathname, MAXPATHLEN);
5382 		sysevent_free(ev);
5383 		goto fail;
5384 	}
5385 	kmem_free(pathname, MAXPATHLEN);
5386 
5387 	/* add the device class attribute */
5388 	if ((class_name = i_ddi_devi_class(dip)) != NULL) {
5389 		se_val.value_type = SE_DATA_TYPE_STRING;
5390 		se_val.value.sv_string = class_name;
5391 		if (sysevent_add_attr(&ev_attr_list,
5392 		    DEVFS_DEVI_CLASS, &se_val, SE_SLEEP) != 0) {
5393 			sysevent_free_attr(ev_attr_list);
5394 			goto fail;
5395 		}
5396 	}
5397 
5398 	/*
5399 	 * allow for NULL minor names
5400 	 */
5401 	if (minor_name != NULL) {
5402 		se_val.value.sv_string = minor_name;
5403 		if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME,
5404 		    &se_val, se_flag) != 0) {
5405 			sysevent_free_attr(ev_attr_list);
5406 			sysevent_free(ev);
5407 			goto fail;
5408 		}
5409 	}
5410 
5411 	if (sysevent_attach_attributes(ev, ev_attr_list) != 0) {
5412 		sysevent_free_attr(ev_attr_list);
5413 		sysevent_free(ev);
5414 		goto fail;
5415 	}
5416 
5417 	if ((se_err = log_sysevent(ev, se_flag, &eid)) != 0) {
5418 		if (se_err == SE_NO_TRANSPORT) {
5419 			cmn_err(CE_WARN, "/devices or /dev may not be current "
5420 			    "for driver %s (%s). Run devfsadm -i %s",
5421 			    ddi_driver_name(dip), "syseventd not responding",
5422 			    ddi_driver_name(dip));
5423 		} else {
5424 			sysevent_free(ev);
5425 			goto fail;
5426 		}
5427 	}
5428 
5429 	sysevent_free(ev);
5430 	return (DDI_SUCCESS);
5431 fail:
5432 	cmn_err(CE_WARN, "/devices or /dev may not be current "
5433 	    "for driver %s. Run devfsadm -i %s",
5434 	    ddi_driver_name(dip), ddi_driver_name(dip));
5435 	return (DDI_SUCCESS);
5436 }
5437 
5438 /*
5439  * failing to remove a minor node is not of interest
5440  * therefore we do not generate an error message
5441  */
5442 static int
5443 i_log_devfs_minor_remove(dev_info_t *dip, char *minor_name)
5444 {
5445 	char *pathname, *class_name;
5446 	sysevent_t *ev;
5447 	sysevent_id_t eid;
5448 	sysevent_value_t se_val;
5449 	sysevent_attr_list_t *ev_attr_list = NULL;
5450 
5451 	/*
5452 	 * only log ddi_remove_minor_node() calls outside the scope
5453 	 * of attach/detach reconfigurations and when the dip is
5454 	 * still initialized.
5455 	 */
5456 	if (DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip) ||
5457 	    (i_ddi_node_state(dip) < DS_INITIALIZED)) {
5458 		return (DDI_SUCCESS);
5459 	}
5460 
5461 	i_ddi_di_cache_invalidate();
5462 
5463 	ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_REMOVE, EP_DDI, SE_SLEEP);
5464 	if (ev == NULL) {
5465 		return (DDI_SUCCESS);
5466 	}
5467 
5468 	pathname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
5469 	if (pathname == NULL) {
5470 		sysevent_free(ev);
5471 		return (DDI_SUCCESS);
5472 	}
5473 
5474 	(void) ddi_pathname(dip, pathname);
5475 	ASSERT(strlen(pathname));
5476 	se_val.value_type = SE_DATA_TYPE_STRING;
5477 	se_val.value.sv_string = pathname;
5478 	if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME,
5479 	    &se_val, SE_SLEEP) != 0) {
5480 		kmem_free(pathname, MAXPATHLEN);
5481 		sysevent_free(ev);
5482 		return (DDI_SUCCESS);
5483 	}
5484 
5485 	kmem_free(pathname, MAXPATHLEN);
5486 
5487 	/*
5488 	 * allow for NULL minor names
5489 	 */
5490 	if (minor_name != NULL) {
5491 		se_val.value.sv_string = minor_name;
5492 		if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME,
5493 		    &se_val, SE_SLEEP) != 0) {
5494 			sysevent_free_attr(ev_attr_list);
5495 			goto fail;
5496 		}
5497 	}
5498 
5499 	if ((class_name = i_ddi_devi_class(dip)) != NULL) {
5500 		/* add the device class, driver name and instance attributes */
5501 
5502 		se_val.value_type = SE_DATA_TYPE_STRING;
5503 		se_val.value.sv_string = class_name;
5504 		if (sysevent_add_attr(&ev_attr_list,
5505 		    DEVFS_DEVI_CLASS, &se_val, SE_SLEEP) != 0) {
5506 			sysevent_free_attr(ev_attr_list);
5507 			goto fail;
5508 		}
5509 
5510 		se_val.value_type = SE_DATA_TYPE_STRING;
5511 		se_val.value.sv_string = (char *)ddi_driver_name(dip);
5512 		if (sysevent_add_attr(&ev_attr_list,
5513 		    DEVFS_DRIVER_NAME, &se_val, SE_SLEEP) != 0) {
5514 			sysevent_free_attr(ev_attr_list);
5515 			goto fail;
5516 		}
5517 
5518 		se_val.value_type = SE_DATA_TYPE_INT32;
5519 		se_val.value.sv_int32 = ddi_get_instance(dip);
5520 		if (sysevent_add_attr(&ev_attr_list,
5521 		    DEVFS_INSTANCE, &se_val, SE_SLEEP) != 0) {
5522 			sysevent_free_attr(ev_attr_list);
5523 			goto fail;
5524 		}
5525 
5526 	}
5527 
5528 	if (sysevent_attach_attributes(ev, ev_attr_list) != 0) {
5529 		sysevent_free_attr(ev_attr_list);
5530 	} else {
5531 		(void) log_sysevent(ev, SE_SLEEP, &eid);
5532 	}
5533 fail:
5534 	sysevent_free(ev);
5535 	return (DDI_SUCCESS);
5536 }
5537 
5538 /*
5539  * Derive the device class of the node.
5540  * Device class names aren't defined yet. Until this is done we use
5541  * devfs event subclass names as device class names.
5542  */
5543 static int
5544 derive_devi_class(dev_info_t *dip, const char *node_type, int flag)
5545 {
5546 	int rv = DDI_SUCCESS;
5547 
5548 	if (i_ddi_devi_class(dip) == NULL) {
5549 		if (strncmp(node_type, DDI_NT_BLOCK,
5550 		    sizeof (DDI_NT_BLOCK) - 1) == 0 &&
5551 		    (node_type[sizeof (DDI_NT_BLOCK) - 1] == '\0' ||
5552 		    node_type[sizeof (DDI_NT_BLOCK) - 1] == ':') &&
5553 		    strcmp(node_type, DDI_NT_FD) != 0) {
5554 
5555 			rv = i_ddi_set_devi_class(dip, ESC_DISK, flag);
5556 
5557 		} else if (strncmp(node_type, DDI_NT_NET,
5558 		    sizeof (DDI_NT_NET) - 1) == 0 &&
5559 		    (node_type[sizeof (DDI_NT_NET) - 1] == '\0' ||
5560 		    node_type[sizeof (DDI_NT_NET) - 1] == ':')) {
5561 
5562 			rv = i_ddi_set_devi_class(dip, ESC_NETWORK, flag);
5563 
5564 		} else if (strncmp(node_type, DDI_NT_PRINTER,
5565 		    sizeof (DDI_NT_PRINTER) - 1) == 0 &&
5566 		    (node_type[sizeof (DDI_NT_PRINTER) - 1] == '\0' ||
5567 		    node_type[sizeof (DDI_NT_PRINTER) - 1] == ':')) {
5568 
5569 			rv = i_ddi_set_devi_class(dip, ESC_PRINTER, flag);
5570 
5571 		} else if (strncmp(node_type, DDI_PSEUDO,
5572 		    sizeof (DDI_PSEUDO) -1) == 0 &&
5573 		    (strncmp(ESC_LOFI, ddi_node_name(dip),
5574 		    sizeof (ESC_LOFI) -1) == 0)) {
5575 			rv = i_ddi_set_devi_class(dip, ESC_LOFI, flag);
5576 		}
5577 	}
5578 
5579 	return (rv);
5580 }
5581 
5582 /*
5583  * Check compliance with PSARC 2003/375:
5584  *
5585  * The name must contain only characters a-z, A-Z, 0-9 or _ and it must not
5586  * exceed IFNAMSIZ (16) characters in length.
5587  */
5588 static boolean_t
5589 verify_name(const char *name)
5590 {
5591 	size_t len = strlen(name);
5592 	const char *cp;
5593 
5594 	if (len == 0 || len > IFNAMSIZ)
5595 		return (B_FALSE);
5596 
5597 	for (cp = name; *cp != '\0'; cp++) {
5598 		if (!isalnum(*cp) && *cp != '_')
5599 			return (B_FALSE);
5600 	}
5601 
5602 	return (B_TRUE);
5603 }
5604 
5605 /*
5606  * ddi_create_minor_common:	Create a  ddi_minor_data structure and
5607  *				attach it to the given devinfo node.
5608  */
5609 
5610 static int
5611 ddi_create_minor_common(dev_info_t *dip, const char *name, int spec_type,
5612     minor_t minor_num, const char *node_type, int flag, ddi_minor_type mtype,
5613     const char *read_priv, const char *write_priv, mode_t priv_mode)
5614 {
5615 	struct ddi_minor_data *dmdp;
5616 	major_t major;
5617 
5618 	if (spec_type != S_IFCHR && spec_type != S_IFBLK)
5619 		return (DDI_FAILURE);
5620 
5621 	if (name == NULL)
5622 		return (DDI_FAILURE);
5623 
5624 	/*
5625 	 * Log a message if the minor number the driver is creating
5626 	 * is not expressible on the on-disk filesystem (currently
5627 	 * this is limited to 18 bits both by UFS). The device can
5628 	 * be opened via devfs, but not by device special files created
5629 	 * via mknod().
5630 	 */
5631 	if (minor_num > L_MAXMIN32) {
5632 		cmn_err(CE_WARN,
5633 		    "%s%d:%s minor 0x%x too big for 32-bit applications",
5634 		    ddi_driver_name(dip), ddi_get_instance(dip),
5635 		    name, minor_num);
5636 		return (DDI_FAILURE);
5637 	}
5638 
5639 	/* dip must be bound and attached */
5640 	major = ddi_driver_major(dip);
5641 	ASSERT(major != DDI_MAJOR_T_NONE);
5642 
5643 	/*
5644 	 * Default node_type to DDI_PSEUDO and issue notice in debug mode
5645 	 */
5646 	if (node_type == NULL) {
5647 		node_type = DDI_PSEUDO;
5648 		NDI_CONFIG_DEBUG((CE_NOTE, "!illegal node_type NULL for %s%d "
5649 		    " minor node %s; default to DDI_PSEUDO",
5650 		    ddi_driver_name(dip), ddi_get_instance(dip), name));
5651 	}
5652 
5653 	/*
5654 	 * If the driver is a network driver, ensure that the name falls within
5655 	 * the interface naming constraints specified by PSARC/2003/375.
5656 	 */
5657 	if (strcmp(node_type, DDI_NT_NET) == 0) {
5658 		if (!verify_name(name))
5659 			return (DDI_FAILURE);
5660 
5661 		if (mtype == DDM_MINOR) {
5662 			struct devnames *dnp = &devnamesp[major];
5663 
5664 			/* Mark driver as a network driver */
5665 			LOCK_DEV_OPS(&dnp->dn_lock);
5666 			dnp->dn_flags |= DN_NETWORK_DRIVER;
5667 
5668 			/*
5669 			 * If this minor node is created during the device
5670 			 * attachment, this is a physical network device.
5671 			 * Mark the driver as a physical network driver.
5672 			 */
5673 			if (DEVI_IS_ATTACHING(dip))
5674 				dnp->dn_flags |= DN_NETWORK_PHYSDRIVER;
5675 			UNLOCK_DEV_OPS(&dnp->dn_lock);
5676 		}
5677 	}
5678 
5679 	if (mtype == DDM_MINOR) {
5680 		if (derive_devi_class(dip,  node_type, KM_NOSLEEP) !=
5681 		    DDI_SUCCESS)
5682 			return (DDI_FAILURE);
5683 	}
5684 
5685 	/*
5686 	 * Take care of minor number information for the node.
5687 	 */
5688 
5689 	if ((dmdp = kmem_zalloc(sizeof (struct ddi_minor_data),
5690 	    KM_NOSLEEP)) == NULL) {
5691 		return (DDI_FAILURE);
5692 	}
5693 	if ((dmdp->ddm_name = i_ddi_strdup(name, KM_NOSLEEP)) == NULL) {
5694 		kmem_free(dmdp, sizeof (struct ddi_minor_data));
5695 		return (DDI_FAILURE);
5696 	}
5697 	dmdp->dip = dip;
5698 	dmdp->ddm_dev = makedevice(major, minor_num);
5699 	dmdp->ddm_spec_type = spec_type;
5700 	dmdp->ddm_node_type = node_type;
5701 	dmdp->type = mtype;
5702 	if (flag & CLONE_DEV) {
5703 		dmdp->type = DDM_ALIAS;
5704 		dmdp->ddm_dev = makedevice(ddi_driver_major(clone_dip), major);
5705 	}
5706 	if (flag & PRIVONLY_DEV) {
5707 		dmdp->ddm_flags |= DM_NO_FSPERM;
5708 	}
5709 	if (read_priv || write_priv) {
5710 		dmdp->ddm_node_priv =
5711 		    devpolicy_priv_by_name(read_priv, write_priv);
5712 	}
5713 	dmdp->ddm_priv_mode = priv_mode;
5714 
5715 	ddi_append_minor_node(dip, dmdp);
5716 
5717 	/*
5718 	 * only log ddi_create_minor_node() calls which occur
5719 	 * outside the scope of attach(9e)/detach(9e) reconfigurations
5720 	 */
5721 	if (!(DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip)) &&
5722 	    mtype != DDM_INTERNAL_PATH) {
5723 		(void) i_log_devfs_minor_create(dip, dmdp->ddm_name);
5724 	}
5725 
5726 	/*
5727 	 * Check if any dacf rules match the creation of this minor node
5728 	 */
5729 	dacfc_match_create_minor(name, node_type, dip, dmdp, flag);
5730 	return (DDI_SUCCESS);
5731 }
5732 
5733 int
5734 ddi_create_minor_node(dev_info_t *dip, const char *name, int spec_type,
5735     minor_t minor_num, const char *node_type, int flag)
5736 {
5737 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5738 	    node_type, flag, DDM_MINOR, NULL, NULL, 0));
5739 }
5740 
5741 int
5742 ddi_create_priv_minor_node(dev_info_t *dip, const char *name, int spec_type,
5743     minor_t minor_num, const char *node_type, int flag,
5744     const char *rdpriv, const char *wrpriv, mode_t priv_mode)
5745 {
5746 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5747 	    node_type, flag, DDM_MINOR, rdpriv, wrpriv, priv_mode));
5748 }
5749 
5750 int
5751 ddi_create_default_minor_node(dev_info_t *dip, const char *name, int spec_type,
5752     minor_t minor_num, const char *node_type, int flag)
5753 {
5754 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5755 	    node_type, flag, DDM_DEFAULT, NULL, NULL, 0));
5756 }
5757 
5758 /*
5759  * Internal (non-ddi) routine for drivers to export names known
5760  * to the kernel (especially ddi_pathname_to_dev_t and friends)
5761  * but not exported externally to /dev
5762  */
5763 int
5764 ddi_create_internal_pathname(dev_info_t *dip, char *name, int spec_type,
5765     minor_t minor_num)
5766 {
5767 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5768 	    "internal", 0, DDM_INTERNAL_PATH, NULL, NULL, 0));
5769 }
5770 
5771 void
5772 ddi_remove_minor_node(dev_info_t *dip, const char *name)
5773 {
5774 	int			circ;
5775 	struct ddi_minor_data	*dmdp, *dmdp1;
5776 	struct ddi_minor_data	**dmdp_prev;
5777 
5778 	ndi_devi_enter(dip, &circ);
5779 	dmdp_prev = &DEVI(dip)->devi_minor;
5780 	dmdp = DEVI(dip)->devi_minor;
5781 	while (dmdp != NULL) {
5782 		dmdp1 = dmdp->next;
5783 		if ((name == NULL || (dmdp->ddm_name != NULL &&
5784 		    strcmp(name, dmdp->ddm_name) == 0))) {
5785 			if (dmdp->ddm_name != NULL) {
5786 				if (dmdp->type != DDM_INTERNAL_PATH)
5787 					(void) i_log_devfs_minor_remove(dip,
5788 					    dmdp->ddm_name);
5789 				kmem_free(dmdp->ddm_name,
5790 				    strlen(dmdp->ddm_name) + 1);
5791 			}
5792 			/*
5793 			 * Release device privilege, if any.
5794 			 * Release dacf client data associated with this minor
5795 			 * node by storing NULL.
5796 			 */
5797 			if (dmdp->ddm_node_priv)
5798 				dpfree(dmdp->ddm_node_priv);
5799 			dacf_store_info((dacf_infohdl_t)dmdp, NULL);
5800 			kmem_free(dmdp, sizeof (struct ddi_minor_data));
5801 			*dmdp_prev = dmdp1;
5802 			/*
5803 			 * OK, we found it, so get out now -- if we drive on,
5804 			 * we will strcmp against garbage.  See 1139209.
5805 			 */
5806 			if (name != NULL)
5807 				break;
5808 		} else {
5809 			dmdp_prev = &dmdp->next;
5810 		}
5811 		dmdp = dmdp1;
5812 	}
5813 	ndi_devi_exit(dip, circ);
5814 }
5815 
5816 
5817 int
5818 ddi_in_panic()
5819 {
5820 	return (panicstr != NULL);
5821 }
5822 
5823 
5824 /*
5825  * Find first bit set in a mask (returned counting from 1 up)
5826  */
5827 
5828 int
5829 ddi_ffs(long mask)
5830 {
5831 	return (ffs(mask));
5832 }
5833 
5834 /*
5835  * Find last bit set. Take mask and clear
5836  * all but the most significant bit, and
5837  * then let ffs do the rest of the work.
5838  *
5839  * Algorithm courtesy of Steve Chessin.
5840  */
5841 
5842 int
5843 ddi_fls(long mask)
5844 {
5845 	while (mask) {
5846 		long nx;
5847 
5848 		if ((nx = (mask & (mask - 1))) == 0)
5849 			break;
5850 		mask = nx;
5851 	}
5852 	return (ffs(mask));
5853 }
5854 
5855 /*
5856  * The ddi_soft_state_* routines comprise generic storage management utilities
5857  * for driver soft state structures (in "the old days," this was done with
5858  * statically sized array - big systems and dynamic loading and unloading
5859  * make heap allocation more attractive).
5860  */
5861 
5862 /*
5863  * Allocate a set of pointers to 'n_items' objects of size 'size'
5864  * bytes.  Each pointer is initialized to nil.
5865  *
5866  * The 'size' and 'n_items' values are stashed in the opaque
5867  * handle returned to the caller.
5868  *
5869  * This implementation interprets 'set of pointers' to mean 'array
5870  * of pointers' but note that nothing in the interface definition
5871  * precludes an implementation that uses, for example, a linked list.
5872  * However there should be a small efficiency gain from using an array
5873  * at lookup time.
5874  *
5875  * NOTE	As an optimization, we make our growable array allocations in
5876  *	powers of two (bytes), since that's how much kmem_alloc (currently)
5877  *	gives us anyway.  It should save us some free/realloc's ..
5878  *
5879  *	As a further optimization, we make the growable array start out
5880  *	with MIN_N_ITEMS in it.
5881  */
5882 
5883 #define	MIN_N_ITEMS	8	/* 8 void *'s == 32 bytes */
5884 
5885 int
5886 ddi_soft_state_init(void **state_p, size_t size, size_t n_items)
5887 {
5888 	i_ddi_soft_state	*ss;
5889 
5890 	if (state_p == NULL || size == 0)
5891 		return (EINVAL);
5892 
5893 	ss = kmem_zalloc(sizeof (*ss), KM_SLEEP);
5894 	mutex_init(&ss->lock, NULL, MUTEX_DRIVER, NULL);
5895 	ss->size = size;
5896 
5897 	if (n_items < MIN_N_ITEMS)
5898 		ss->n_items = MIN_N_ITEMS;
5899 	else {
5900 		int bitlog;
5901 
5902 		if ((bitlog = ddi_fls(n_items)) == ddi_ffs(n_items))
5903 			bitlog--;
5904 		ss->n_items = 1 << bitlog;
5905 	}
5906 
5907 	ASSERT(ss->n_items >= n_items);
5908 
5909 	ss->array = kmem_zalloc(ss->n_items * sizeof (void *), KM_SLEEP);
5910 
5911 	*state_p = ss;
5912 	return (0);
5913 }
5914 
5915 /*
5916  * Allocate a state structure of size 'size' to be associated
5917  * with item 'item'.
5918  *
5919  * In this implementation, the array is extended to
5920  * allow the requested offset, if needed.
5921  */
5922 int
5923 ddi_soft_state_zalloc(void *state, int item)
5924 {
5925 	i_ddi_soft_state	*ss = (i_ddi_soft_state *)state;
5926 	void			**array;
5927 	void			*new_element;
5928 
5929 	if ((state == NULL) || (item < 0))
5930 		return (DDI_FAILURE);
5931 
5932 	mutex_enter(&ss->lock);
5933 	if (ss->size == 0) {
5934 		mutex_exit(&ss->lock);
5935 		cmn_err(CE_WARN, "ddi_soft_state_zalloc: bad handle: %s",
5936 		    mod_containing_pc(caller()));
5937 		return (DDI_FAILURE);
5938 	}
5939 
5940 	array = ss->array;	/* NULL if ss->n_items == 0 */
5941 	ASSERT(ss->n_items != 0 && array != NULL);
5942 
5943 	/*
5944 	 * refuse to tread on an existing element
5945 	 */
5946 	if (item < ss->n_items && array[item] != NULL) {
5947 		mutex_exit(&ss->lock);
5948 		return (DDI_FAILURE);
5949 	}
5950 
5951 	/*
5952 	 * Allocate a new element to plug in
5953 	 */
5954 	new_element = kmem_zalloc(ss->size, KM_SLEEP);
5955 
5956 	/*
5957 	 * Check if the array is big enough, if not, grow it.
5958 	 */
5959 	if (item >= ss->n_items) {
5960 		void			**new_array;
5961 		size_t			new_n_items;
5962 		struct i_ddi_soft_state	*dirty;
5963 
5964 		/*
5965 		 * Allocate a new array of the right length, copy
5966 		 * all the old pointers to the new array, then
5967 		 * if it exists at all, put the old array on the
5968 		 * dirty list.
5969 		 *
5970 		 * Note that we can't kmem_free() the old array.
5971 		 *
5972 		 * Why -- well the 'get' operation is 'mutex-free', so we
5973 		 * can't easily catch a suspended thread that is just about
5974 		 * to dereference the array we just grew out of.  So we
5975 		 * cons up a header and put it on a list of 'dirty'
5976 		 * pointer arrays.  (Dirty in the sense that there may
5977 		 * be suspended threads somewhere that are in the middle
5978 		 * of referencing them).  Fortunately, we -can- garbage
5979 		 * collect it all at ddi_soft_state_fini time.
5980 		 */
5981 		new_n_items = ss->n_items;
5982 		while (new_n_items < (1 + item))
5983 			new_n_items <<= 1;	/* double array size .. */
5984 
5985 		ASSERT(new_n_items >= (1 + item));	/* sanity check! */
5986 
5987 		new_array = kmem_zalloc(new_n_items * sizeof (void *),
5988 		    KM_SLEEP);
5989 		/*
5990 		 * Copy the pointers into the new array
5991 		 */
5992 		bcopy(array, new_array, ss->n_items * sizeof (void *));
5993 
5994 		/*
5995 		 * Save the old array on the dirty list
5996 		 */
5997 		dirty = kmem_zalloc(sizeof (*dirty), KM_SLEEP);
5998 		dirty->array = ss->array;
5999 		dirty->n_items = ss->n_items;
6000 		dirty->next = ss->next;
6001 		ss->next = dirty;
6002 
6003 		ss->array = (array = new_array);
6004 		ss->n_items = new_n_items;
6005 	}
6006 
6007 	ASSERT(array != NULL && item < ss->n_items && array[item] == NULL);
6008 
6009 	array[item] = new_element;
6010 
6011 	mutex_exit(&ss->lock);
6012 	return (DDI_SUCCESS);
6013 }
6014 
6015 /*
6016  * Fetch a pointer to the allocated soft state structure.
6017  *
6018  * This is designed to be cheap.
6019  *
6020  * There's an argument that there should be more checking for
6021  * nil pointers and out of bounds on the array.. but we do a lot
6022  * of that in the alloc/free routines.
6023  *
6024  * An array has the convenience that we don't need to lock read-access
6025  * to it c.f. a linked list.  However our "expanding array" strategy
6026  * means that we should hold a readers lock on the i_ddi_soft_state
6027  * structure.
6028  *
6029  * However, from a performance viewpoint, we need to do it without
6030  * any locks at all -- this also makes it a leaf routine.  The algorithm
6031  * is 'lock-free' because we only discard the pointer arrays at
6032  * ddi_soft_state_fini() time.
6033  */
6034 void *
6035 ddi_get_soft_state(void *state, int item)
6036 {
6037 	i_ddi_soft_state	*ss = (i_ddi_soft_state *)state;
6038 
6039 	ASSERT((ss != NULL) && (item >= 0));
6040 
6041 	if (item < ss->n_items && ss->array != NULL)
6042 		return (ss->array[item]);
6043 	return (NULL);
6044 }
6045 
6046 /*
6047  * Free the state structure corresponding to 'item.'   Freeing an
6048  * element that has either gone or was never allocated is not
6049  * considered an error.  Note that we free the state structure, but
6050  * we don't shrink our pointer array, or discard 'dirty' arrays,
6051  * since even a few pointers don't really waste too much memory.
6052  *
6053  * Passing an item number that is out of bounds, or a null pointer will
6054  * provoke an error message.
6055  */
6056 void
6057 ddi_soft_state_free(void *state, int item)
6058 {
6059 	i_ddi_soft_state	*ss = (i_ddi_soft_state *)state;
6060 	void			**array;
6061 	void			*element;
6062 	static char		msg[] = "ddi_soft_state_free:";
6063 
6064 	if (ss == NULL) {
6065 		cmn_err(CE_WARN, "%s null handle: %s",
6066 		    msg, mod_containing_pc(caller()));
6067 		return;
6068 	}
6069 
6070 	element = NULL;
6071 
6072 	mutex_enter(&ss->lock);
6073 
6074 	if ((array = ss->array) == NULL || ss->size == 0) {
6075 		cmn_err(CE_WARN, "%s bad handle: %s",
6076 		    msg, mod_containing_pc(caller()));
6077 	} else if (item < 0 || item >= ss->n_items) {
6078 		cmn_err(CE_WARN, "%s item %d not in range [0..%lu]: %s",
6079 		    msg, item, ss->n_items - 1, mod_containing_pc(caller()));
6080 	} else if (array[item] != NULL) {
6081 		element = array[item];
6082 		array[item] = NULL;
6083 	}
6084 
6085 	mutex_exit(&ss->lock);
6086 
6087 	if (element)
6088 		kmem_free(element, ss->size);
6089 }
6090 
6091 /*
6092  * Free the entire set of pointers, and any
6093  * soft state structures contained therein.
6094  *
6095  * Note that we don't grab the ss->lock mutex, even though
6096  * we're inspecting the various fields of the data structure.
6097  *
6098  * There is an implicit assumption that this routine will
6099  * never run concurrently with any of the above on this
6100  * particular state structure i.e. by the time the driver
6101  * calls this routine, there should be no other threads
6102  * running in the driver.
6103  */
6104 void
6105 ddi_soft_state_fini(void **state_p)
6106 {
6107 	i_ddi_soft_state	*ss, *dirty;
6108 	int			item;
6109 	static char		msg[] = "ddi_soft_state_fini:";
6110 
6111 	if (state_p == NULL ||
6112 	    (ss = (i_ddi_soft_state *)(*state_p)) == NULL) {
6113 		cmn_err(CE_WARN, "%s null handle: %s",
6114 		    msg, mod_containing_pc(caller()));
6115 		return;
6116 	}
6117 
6118 	if (ss->size == 0) {
6119 		cmn_err(CE_WARN, "%s bad handle: %s",
6120 		    msg, mod_containing_pc(caller()));
6121 		return;
6122 	}
6123 
6124 	if (ss->n_items > 0) {
6125 		for (item = 0; item < ss->n_items; item++)
6126 			ddi_soft_state_free(ss, item);
6127 		kmem_free(ss->array, ss->n_items * sizeof (void *));
6128 	}
6129 
6130 	/*
6131 	 * Now delete any dirty arrays from previous 'grow' operations
6132 	 */
6133 	for (dirty = ss->next; dirty; dirty = ss->next) {
6134 		ss->next = dirty->next;
6135 		kmem_free(dirty->array, dirty->n_items * sizeof (void *));
6136 		kmem_free(dirty, sizeof (*dirty));
6137 	}
6138 
6139 	mutex_destroy(&ss->lock);
6140 	kmem_free(ss, sizeof (*ss));
6141 
6142 	*state_p = NULL;
6143 }
6144 
6145 #define	SS_N_ITEMS_PER_HASH	16
6146 #define	SS_MIN_HASH_SZ		16
6147 #define	SS_MAX_HASH_SZ		4096
6148 
6149 int
6150 ddi_soft_state_bystr_init(ddi_soft_state_bystr **state_p, size_t size,
6151     int n_items)
6152 {
6153 	i_ddi_soft_state_bystr	*sss;
6154 	int			hash_sz;
6155 
6156 	ASSERT(state_p && size && n_items);
6157 	if ((state_p == NULL) || (size == 0) || (n_items == 0))
6158 		return (EINVAL);
6159 
6160 	/* current implementation is based on hash, convert n_items to hash */
6161 	hash_sz = n_items / SS_N_ITEMS_PER_HASH;
6162 	if (hash_sz < SS_MIN_HASH_SZ)
6163 		hash_sz = SS_MIN_HASH_SZ;
6164 	else if (hash_sz > SS_MAX_HASH_SZ)
6165 		hash_sz = SS_MAX_HASH_SZ;
6166 
6167 	/* allocate soft_state pool */
6168 	sss = kmem_zalloc(sizeof (*sss), KM_SLEEP);
6169 	sss->ss_size = size;
6170 	sss->ss_mod_hash = mod_hash_create_strhash("soft_state_bystr",
6171 	    hash_sz, mod_hash_null_valdtor);
6172 	*state_p = (ddi_soft_state_bystr *)sss;
6173 	return (0);
6174 }
6175 
6176 int
6177 ddi_soft_state_bystr_zalloc(ddi_soft_state_bystr *state, const char *str)
6178 {
6179 	i_ddi_soft_state_bystr	*sss = (i_ddi_soft_state_bystr *)state;
6180 	void			*sso;
6181 	char			*dup_str;
6182 
6183 	ASSERT(sss && str && sss->ss_mod_hash);
6184 	if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL))
6185 		return (DDI_FAILURE);
6186 	sso = kmem_zalloc(sss->ss_size, KM_SLEEP);
6187 	dup_str = i_ddi_strdup((char *)str, KM_SLEEP);
6188 	if (mod_hash_insert(sss->ss_mod_hash,
6189 	    (mod_hash_key_t)dup_str, (mod_hash_val_t)sso) == 0)
6190 		return (DDI_SUCCESS);
6191 
6192 	/*
6193 	 * The only error from an strhash insert is caused by a duplicate key.
6194 	 * We refuse to tread on an existing elements, so free and fail.
6195 	 */
6196 	kmem_free(dup_str, strlen(dup_str) + 1);
6197 	kmem_free(sso, sss->ss_size);
6198 	return (DDI_FAILURE);
6199 }
6200 
6201 void *
6202 ddi_soft_state_bystr_get(ddi_soft_state_bystr *state, const char *str)
6203 {
6204 	i_ddi_soft_state_bystr	*sss = (i_ddi_soft_state_bystr *)state;
6205 	void			*sso;
6206 
6207 	ASSERT(sss && str && sss->ss_mod_hash);
6208 	if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL))
6209 		return (NULL);
6210 
6211 	if (mod_hash_find(sss->ss_mod_hash,
6212 	    (mod_hash_key_t)str, (mod_hash_val_t *)&sso) == 0)
6213 		return (sso);
6214 	return (NULL);
6215 }
6216 
6217 void
6218 ddi_soft_state_bystr_free(ddi_soft_state_bystr *state, const char *str)
6219 {
6220 	i_ddi_soft_state_bystr	*sss = (i_ddi_soft_state_bystr *)state;
6221 	void			*sso;
6222 
6223 	ASSERT(sss && str && sss->ss_mod_hash);
6224 	if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL))
6225 		return;
6226 
6227 	(void) mod_hash_remove(sss->ss_mod_hash,
6228 	    (mod_hash_key_t)str, (mod_hash_val_t *)&sso);
6229 	kmem_free(sso, sss->ss_size);
6230 }
6231 
6232 void
6233 ddi_soft_state_bystr_fini(ddi_soft_state_bystr **state_p)
6234 {
6235 	i_ddi_soft_state_bystr	*sss;
6236 
6237 	ASSERT(state_p);
6238 	if (state_p == NULL)
6239 		return;
6240 
6241 	sss = (i_ddi_soft_state_bystr *)(*state_p);
6242 	if (sss == NULL)
6243 		return;
6244 
6245 	ASSERT(sss->ss_mod_hash);
6246 	if (sss->ss_mod_hash) {
6247 		mod_hash_destroy_strhash(sss->ss_mod_hash);
6248 		sss->ss_mod_hash = NULL;
6249 	}
6250 
6251 	kmem_free(sss, sizeof (*sss));
6252 	*state_p = NULL;
6253 }
6254 
6255 /*
6256  * The ddi_strid_* routines provide string-to-index management utilities.
6257  */
6258 /* allocate and initialize an strid set */
6259 int
6260 ddi_strid_init(ddi_strid **strid_p, int n_items)
6261 {
6262 	i_ddi_strid	*ss;
6263 	int		hash_sz;
6264 
6265 	if (strid_p == NULL)
6266 		return (DDI_FAILURE);
6267 
6268 	/* current implementation is based on hash, convert n_items to hash */
6269 	hash_sz = n_items / SS_N_ITEMS_PER_HASH;
6270 	if (hash_sz < SS_MIN_HASH_SZ)
6271 		hash_sz = SS_MIN_HASH_SZ;
6272 	else if (hash_sz > SS_MAX_HASH_SZ)
6273 		hash_sz = SS_MAX_HASH_SZ;
6274 
6275 	ss = kmem_alloc(sizeof (*ss), KM_SLEEP);
6276 	ss->strid_chunksz = n_items;
6277 	ss->strid_spacesz = n_items;
6278 	ss->strid_space = id_space_create("strid", 1, n_items);
6279 	ss->strid_bystr = mod_hash_create_strhash("strid_bystr", hash_sz,
6280 	    mod_hash_null_valdtor);
6281 	ss->strid_byid = mod_hash_create_idhash("strid_byid", hash_sz,
6282 	    mod_hash_null_valdtor);
6283 	*strid_p = (ddi_strid *)ss;
6284 	return (DDI_SUCCESS);
6285 }
6286 
6287 /* allocate an id mapping within the specified set for str, return id */
6288 static id_t
6289 i_ddi_strid_alloc(ddi_strid *strid, char *str)
6290 {
6291 	i_ddi_strid	*ss = (i_ddi_strid *)strid;
6292 	id_t		id;
6293 	char		*s;
6294 
6295 	ASSERT(ss && str);
6296 	if ((ss == NULL) || (str == NULL))
6297 		return (0);
6298 
6299 	/*
6300 	 * Allocate an id using VM_FIRSTFIT in order to keep allocated id
6301 	 * range as compressed as possible.  This is important to minimize
6302 	 * the amount of space used when the id is used as a ddi_soft_state
6303 	 * index by the caller.
6304 	 *
6305 	 * If the id list is exhausted, increase the size of the list
6306 	 * by the chuck size specified in ddi_strid_init and reattempt
6307 	 * the allocation
6308 	 */
6309 	if ((id = id_allocff_nosleep(ss->strid_space)) == (id_t)-1) {
6310 		id_space_extend(ss->strid_space, ss->strid_spacesz,
6311 		    ss->strid_spacesz + ss->strid_chunksz);
6312 		ss->strid_spacesz += ss->strid_chunksz;
6313 		if ((id = id_allocff_nosleep(ss->strid_space)) == (id_t)-1)
6314 			return (0);
6315 	}
6316 
6317 	/*
6318 	 * NOTE: since we create and destroy in unison we can save space by
6319 	 * using bystr key as the byid value.  This means destroy must occur
6320 	 * in (byid, bystr) order.
6321 	 */
6322 	s = i_ddi_strdup(str, KM_SLEEP);
6323 	if (mod_hash_insert(ss->strid_bystr, (mod_hash_key_t)s,
6324 	    (mod_hash_val_t)(intptr_t)id) != 0) {
6325 		ddi_strid_free(strid, id);
6326 		return (0);
6327 	}
6328 	if (mod_hash_insert(ss->strid_byid, (mod_hash_key_t)(intptr_t)id,
6329 	    (mod_hash_val_t)s) != 0) {
6330 		ddi_strid_free(strid, id);
6331 		return (0);
6332 	}
6333 
6334 	/* NOTE: s if freed on mod_hash_destroy by mod_hash_strval_dtor */
6335 	return (id);
6336 }
6337 
6338 /* allocate an id mapping within the specified set for str, return id */
6339 id_t
6340 ddi_strid_alloc(ddi_strid *strid, char *str)
6341 {
6342 	return (i_ddi_strid_alloc(strid, str));
6343 }
6344 
6345 /* return the id within the specified strid given the str */
6346 id_t
6347 ddi_strid_str2id(ddi_strid *strid, char *str)
6348 {
6349 	i_ddi_strid	*ss = (i_ddi_strid *)strid;
6350 	id_t		id = 0;
6351 	mod_hash_val_t	hv;
6352 
6353 	ASSERT(ss && str);
6354 	if (ss && str && (mod_hash_find(ss->strid_bystr,
6355 	    (mod_hash_key_t)str, &hv) == 0))
6356 		id = (int)(intptr_t)hv;
6357 	return (id);
6358 }
6359 
6360 /* return str within the specified strid given the id */
6361 char *
6362 ddi_strid_id2str(ddi_strid *strid, id_t id)
6363 {
6364 	i_ddi_strid	*ss = (i_ddi_strid *)strid;
6365 	char		*str = NULL;
6366 	mod_hash_val_t	hv;
6367 
6368 	ASSERT(ss && id > 0);
6369 	if (ss && (id > 0) && (mod_hash_find(ss->strid_byid,
6370 	    (mod_hash_key_t)(uintptr_t)id, &hv) == 0))
6371 		str = (char *)hv;
6372 	return (str);
6373 }
6374 
6375 /* free the id mapping within the specified strid */
6376 void
6377 ddi_strid_free(ddi_strid *strid, id_t id)
6378 {
6379 	i_ddi_strid	*ss = (i_ddi_strid *)strid;
6380 	char		*str;
6381 
6382 	ASSERT(ss && id > 0);
6383 	if ((ss == NULL) || (id <= 0))
6384 		return;
6385 
6386 	/* bystr key is byid value: destroy order must be (byid, bystr) */
6387 	str = ddi_strid_id2str(strid, id);
6388 	(void) mod_hash_destroy(ss->strid_byid, (mod_hash_key_t)(uintptr_t)id);
6389 	id_free(ss->strid_space, id);
6390 
6391 	if (str)
6392 		(void) mod_hash_destroy(ss->strid_bystr, (mod_hash_key_t)str);
6393 }
6394 
6395 /* destroy the strid set */
6396 void
6397 ddi_strid_fini(ddi_strid **strid_p)
6398 {
6399 	i_ddi_strid	*ss;
6400 
6401 	ASSERT(strid_p);
6402 	if (strid_p == NULL)
6403 		return;
6404 
6405 	ss = (i_ddi_strid *)(*strid_p);
6406 	if (ss == NULL)
6407 		return;
6408 
6409 	/* bystr key is byid value: destroy order must be (byid, bystr) */
6410 	if (ss->strid_byid)
6411 		mod_hash_destroy_hash(ss->strid_byid);
6412 	if (ss->strid_byid)
6413 		mod_hash_destroy_hash(ss->strid_bystr);
6414 	if (ss->strid_space)
6415 		id_space_destroy(ss->strid_space);
6416 	kmem_free(ss, sizeof (*ss));
6417 	*strid_p = NULL;
6418 }
6419 
6420 /*
6421  * This sets the devi_addr entry in the dev_info structure 'dip' to 'name'.
6422  * Storage is double buffered to prevent updates during devi_addr use -
6423  * double buffering is adaquate for reliable ddi_deviname() consumption.
6424  * The double buffer is not freed until dev_info structure destruction
6425  * (by i_ddi_free_node).
6426  */
6427 void
6428 ddi_set_name_addr(dev_info_t *dip, char *name)
6429 {
6430 	char	*buf = DEVI(dip)->devi_addr_buf;
6431 	char	*newaddr;
6432 
6433 	if (buf == NULL) {
6434 		buf = kmem_zalloc(2 * MAXNAMELEN, KM_SLEEP);
6435 		DEVI(dip)->devi_addr_buf = buf;
6436 	}
6437 
6438 	if (name) {
6439 		ASSERT(strlen(name) < MAXNAMELEN);
6440 		newaddr = (DEVI(dip)->devi_addr == buf) ?
6441 		    (buf + MAXNAMELEN) : buf;
6442 		(void) strlcpy(newaddr, name, MAXNAMELEN);
6443 	} else
6444 		newaddr = NULL;
6445 
6446 	DEVI(dip)->devi_addr = newaddr;
6447 }
6448 
6449 char *
6450 ddi_get_name_addr(dev_info_t *dip)
6451 {
6452 	return (DEVI(dip)->devi_addr);
6453 }
6454 
6455 void
6456 ddi_set_parent_data(dev_info_t *dip, void *pd)
6457 {
6458 	DEVI(dip)->devi_parent_data = pd;
6459 }
6460 
6461 void *
6462 ddi_get_parent_data(dev_info_t *dip)
6463 {
6464 	return (DEVI(dip)->devi_parent_data);
6465 }
6466 
6467 /*
6468  * ddi_name_to_major: returns the major number of a named module,
6469  * derived from the current driver alias binding.
6470  *
6471  * Caveat: drivers should avoid the use of this function, in particular
6472  * together with ddi_get_name/ddi_binding name, as per
6473  *	major = ddi_name_to_major(ddi_get_name(devi));
6474  * ddi_name_to_major() relies on the state of the device/alias binding,
6475  * which can and does change dynamically as aliases are administered
6476  * over time.  An attached device instance cannot rely on the major
6477  * number returned by ddi_name_to_major() to match its own major number.
6478  *
6479  * For driver use, ddi_driver_major() reliably returns the major number
6480  * for the module to which the device was bound at attach time over
6481  * the life of the instance.
6482  *	major = ddi_driver_major(dev_info_t *)
6483  */
6484 major_t
6485 ddi_name_to_major(char *name)
6486 {
6487 	return (mod_name_to_major(name));
6488 }
6489 
6490 /*
6491  * ddi_major_to_name: Returns the module name bound to a major number.
6492  */
6493 char *
6494 ddi_major_to_name(major_t major)
6495 {
6496 	return (mod_major_to_name(major));
6497 }
6498 
6499 /*
6500  * Return the name of the devinfo node pointed at by 'dip' in the buffer
6501  * pointed at by 'name.'  A devinfo node is named as a result of calling
6502  * ddi_initchild().
6503  *
6504  * Note: the driver must be held before calling this function!
6505  */
6506 char *
6507 ddi_deviname(dev_info_t *dip, char *name)
6508 {
6509 	char *addrname;
6510 	char none = '\0';
6511 
6512 	if (dip == ddi_root_node()) {
6513 		*name = '\0';
6514 		return (name);
6515 	}
6516 
6517 	if (i_ddi_node_state(dip) < DS_BOUND) {
6518 		addrname = &none;
6519 	} else {
6520 		/*
6521 		 * Use ddi_get_name_addr() without checking state so we get
6522 		 * a unit-address if we are called after ddi_set_name_addr()
6523 		 * by nexus DDI_CTL_INITCHILD code, but before completing
6524 		 * node promotion to DS_INITIALIZED.  We currently have
6525 		 * two situations where we are called in this state:
6526 		 *   o  For framework processing of a path-oriented alias.
6527 		 *   o  If a SCSA nexus driver calls ddi_devid_register()
6528 		 *	from it's tran_tgt_init(9E) implementation.
6529 		 */
6530 		addrname = ddi_get_name_addr(dip);
6531 		if (addrname == NULL)
6532 			addrname = &none;
6533 	}
6534 
6535 	if (*addrname == '\0') {
6536 		(void) sprintf(name, "/%s", ddi_node_name(dip));
6537 	} else {
6538 		(void) sprintf(name, "/%s@%s", ddi_node_name(dip), addrname);
6539 	}
6540 
6541 	return (name);
6542 }
6543 
6544 /*
6545  * Spits out the name of device node, typically name@addr, for a given node,
6546  * using the driver name, not the nodename.
6547  *
6548  * Used by match_parent. Not to be used elsewhere.
6549  */
6550 char *
6551 i_ddi_parname(dev_info_t *dip, char *name)
6552 {
6553 	char *addrname;
6554 
6555 	if (dip == ddi_root_node()) {
6556 		*name = '\0';
6557 		return (name);
6558 	}
6559 
6560 	ASSERT(i_ddi_node_state(dip) >= DS_INITIALIZED);
6561 
6562 	if (*(addrname = ddi_get_name_addr(dip)) == '\0')
6563 		(void) sprintf(name, "%s", ddi_binding_name(dip));
6564 	else
6565 		(void) sprintf(name, "%s@%s", ddi_binding_name(dip), addrname);
6566 	return (name);
6567 }
6568 
6569 static char *
6570 pathname_work(dev_info_t *dip, char *path)
6571 {
6572 	char *bp;
6573 
6574 	if (dip == ddi_root_node()) {
6575 		*path = '\0';
6576 		return (path);
6577 	}
6578 	(void) pathname_work(ddi_get_parent(dip), path);
6579 	bp = path + strlen(path);
6580 	(void) ddi_deviname(dip, bp);
6581 	return (path);
6582 }
6583 
6584 char *
6585 ddi_pathname(dev_info_t *dip, char *path)
6586 {
6587 	return (pathname_work(dip, path));
6588 }
6589 
6590 char *
6591 ddi_pathname_minor(struct ddi_minor_data *dmdp, char *path)
6592 {
6593 	if (dmdp->dip == NULL)
6594 		*path = '\0';
6595 	else {
6596 		(void) ddi_pathname(dmdp->dip, path);
6597 		if (dmdp->ddm_name) {
6598 			(void) strcat(path, ":");
6599 			(void) strcat(path, dmdp->ddm_name);
6600 		}
6601 	}
6602 	return (path);
6603 }
6604 
6605 static char *
6606 pathname_work_obp(dev_info_t *dip, char *path)
6607 {
6608 	char *bp;
6609 	char *obp_path;
6610 
6611 	/*
6612 	 * look up the "obp-path" property, return the path if it exists
6613 	 */
6614 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
6615 	    "obp-path", &obp_path) == DDI_PROP_SUCCESS) {
6616 		(void) strcpy(path, obp_path);
6617 		ddi_prop_free(obp_path);
6618 		return (path);
6619 	}
6620 
6621 	/*
6622 	 * stop at root, no obp path
6623 	 */
6624 	if (dip == ddi_root_node()) {
6625 		return (NULL);
6626 	}
6627 
6628 	obp_path = pathname_work_obp(ddi_get_parent(dip), path);
6629 	if (obp_path == NULL)
6630 		return (NULL);
6631 
6632 	/*
6633 	 * append our component to parent's obp path
6634 	 */
6635 	bp = path + strlen(path);
6636 	if (*(bp - 1) != '/')
6637 		(void) strcat(bp++, "/");
6638 	(void) ddi_deviname(dip, bp);
6639 	return (path);
6640 }
6641 
6642 /*
6643  * return the 'obp-path' based path for the given node, or NULL if the node
6644  * does not have a different obp path. NOTE: Unlike ddi_pathname, this
6645  * function can't be called from interrupt context (since we need to
6646  * lookup a string property).
6647  */
6648 char *
6649 ddi_pathname_obp(dev_info_t *dip, char *path)
6650 {
6651 	ASSERT(!servicing_interrupt());
6652 	if (dip == NULL || path == NULL)
6653 		return (NULL);
6654 
6655 	/* split work into a separate function to aid debugging */
6656 	return (pathname_work_obp(dip, path));
6657 }
6658 
6659 int
6660 ddi_pathname_obp_set(dev_info_t *dip, char *component)
6661 {
6662 	dev_info_t *pdip;
6663 	char *obp_path = NULL;
6664 	int rc = DDI_FAILURE;
6665 
6666 	if (dip == NULL)
6667 		return (DDI_FAILURE);
6668 
6669 	obp_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
6670 
6671 	pdip = ddi_get_parent(dip);
6672 
6673 	if (ddi_pathname_obp(pdip, obp_path) == NULL) {
6674 		(void) ddi_pathname(pdip, obp_path);
6675 	}
6676 
6677 	if (component) {
6678 		(void) strncat(obp_path, "/", MAXPATHLEN);
6679 		(void) strncat(obp_path, component, MAXPATHLEN);
6680 	}
6681 	rc = ndi_prop_update_string(DDI_DEV_T_NONE, dip, "obp-path",
6682 	    obp_path);
6683 
6684 	if (obp_path)
6685 		kmem_free(obp_path, MAXPATHLEN);
6686 
6687 	return (rc);
6688 }
6689 
6690 /*
6691  * Given a dev_t, return the pathname of the corresponding device in the
6692  * buffer pointed at by "path."  The buffer is assumed to be large enough
6693  * to hold the pathname of the device (MAXPATHLEN).
6694  *
6695  * The pathname of a device is the pathname of the devinfo node to which
6696  * the device "belongs," concatenated with the character ':' and the name
6697  * of the minor node corresponding to the dev_t.  If spec_type is 0 then
6698  * just the pathname of the devinfo node is returned without driving attach
6699  * of that node.  For a non-zero spec_type, an attach is performed and a
6700  * search of the minor list occurs.
6701  *
6702  * It is possible that the path associated with the dev_t is not
6703  * currently available in the devinfo tree.  In order to have a
6704  * dev_t, a device must have been discovered before, which means
6705  * that the path is always in the instance tree.  The one exception
6706  * to this is if the dev_t is associated with a pseudo driver, in
6707  * which case the device must exist on the pseudo branch of the
6708  * devinfo tree as a result of parsing .conf files.
6709  */
6710 int
6711 ddi_dev_pathname(dev_t devt, int spec_type, char *path)
6712 {
6713 	int		circ;
6714 	major_t		major = getmajor(devt);
6715 	int		instance;
6716 	dev_info_t	*dip;
6717 	char		*minorname;
6718 	char		*drvname;
6719 
6720 	if (major >= devcnt)
6721 		goto fail;
6722 	if (major == clone_major) {
6723 		/* clone has no minor nodes, manufacture the path here */
6724 		if ((drvname = ddi_major_to_name(getminor(devt))) == NULL)
6725 			goto fail;
6726 
6727 		(void) snprintf(path, MAXPATHLEN, "%s:%s", CLONE_PATH, drvname);
6728 		return (DDI_SUCCESS);
6729 	}
6730 
6731 	/* extract instance from devt (getinfo(9E) DDI_INFO_DEVT2INSTANCE). */
6732 	if ((instance = dev_to_instance(devt)) == -1)
6733 		goto fail;
6734 
6735 	/* reconstruct the path given the major/instance */
6736 	if (e_ddi_majorinstance_to_path(major, instance, path) != DDI_SUCCESS)
6737 		goto fail;
6738 
6739 	/* if spec_type given we must drive attach and search minor nodes */
6740 	if ((spec_type == S_IFCHR) || (spec_type == S_IFBLK)) {
6741 		/* attach the path so we can search minors */
6742 		if ((dip = e_ddi_hold_devi_by_path(path, 0)) == NULL)
6743 			goto fail;
6744 
6745 		/* Add minorname to path. */
6746 		ndi_devi_enter(dip, &circ);
6747 		minorname = i_ddi_devtspectype_to_minorname(dip,
6748 		    devt, spec_type);
6749 		if (minorname) {
6750 			(void) strcat(path, ":");
6751 			(void) strcat(path, minorname);
6752 		}
6753 		ndi_devi_exit(dip, circ);
6754 		ddi_release_devi(dip);
6755 		if (minorname == NULL)
6756 			goto fail;
6757 	}
6758 	ASSERT(strlen(path) < MAXPATHLEN);
6759 	return (DDI_SUCCESS);
6760 
6761 fail:	*path = 0;
6762 	return (DDI_FAILURE);
6763 }
6764 
6765 /*
6766  * Given a major number and an instance, return the path.
6767  * This interface does NOT drive attach.
6768  */
6769 int
6770 e_ddi_majorinstance_to_path(major_t major, int instance, char *path)
6771 {
6772 	struct devnames *dnp;
6773 	dev_info_t	*dip;
6774 
6775 	if ((major >= devcnt) || (instance == -1)) {
6776 		*path = 0;
6777 		return (DDI_FAILURE);
6778 	}
6779 
6780 	/* look for the major/instance in the instance tree */
6781 	if (e_ddi_instance_majorinstance_to_path(major, instance,
6782 	    path) == DDI_SUCCESS) {
6783 		ASSERT(strlen(path) < MAXPATHLEN);
6784 		return (DDI_SUCCESS);
6785 	}
6786 
6787 	/*
6788 	 * Not in instance tree, find the instance on the per driver list and
6789 	 * construct path to instance via ddi_pathname(). This is how paths
6790 	 * down the 'pseudo' branch are constructed.
6791 	 */
6792 	dnp = &(devnamesp[major]);
6793 	LOCK_DEV_OPS(&(dnp->dn_lock));
6794 	for (dip = dnp->dn_head; dip;
6795 	    dip = (dev_info_t *)DEVI(dip)->devi_next) {
6796 		/* Skip if instance does not match. */
6797 		if (DEVI(dip)->devi_instance != instance)
6798 			continue;
6799 
6800 		/*
6801 		 * An ndi_hold_devi() does not prevent DS_INITIALIZED->DS_BOUND
6802 		 * node demotion, so it is not an effective way of ensuring
6803 		 * that the ddi_pathname result has a unit-address.  Instead,
6804 		 * we reverify the node state after calling ddi_pathname().
6805 		 */
6806 		if (i_ddi_node_state(dip) >= DS_INITIALIZED) {
6807 			(void) ddi_pathname(dip, path);
6808 			if (i_ddi_node_state(dip) < DS_INITIALIZED)
6809 				continue;
6810 			UNLOCK_DEV_OPS(&(dnp->dn_lock));
6811 			ASSERT(strlen(path) < MAXPATHLEN);
6812 			return (DDI_SUCCESS);
6813 		}
6814 	}
6815 	UNLOCK_DEV_OPS(&(dnp->dn_lock));
6816 
6817 	/* can't reconstruct the path */
6818 	*path = 0;
6819 	return (DDI_FAILURE);
6820 }
6821 
6822 #define	GLD_DRIVER_PPA "SUNW,gld_v0_ppa"
6823 
6824 /*
6825  * Given the dip for a network interface return the ppa for that interface.
6826  *
6827  * In all cases except GLD v0 drivers, the ppa == instance.
6828  * In the case of GLD v0 drivers, the ppa is equal to the attach order.
6829  * So for these drivers when the attach routine calls gld_register(),
6830  * the GLD framework creates an integer property called "gld_driver_ppa"
6831  * that can be queried here.
6832  *
6833  * The only time this function is used is when a system is booting over nfs.
6834  * In this case the system has to resolve the pathname of the boot device
6835  * to it's ppa.
6836  */
6837 int
6838 i_ddi_devi_get_ppa(dev_info_t *dip)
6839 {
6840 	return (ddi_prop_get_int(DDI_DEV_T_ANY, dip,
6841 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
6842 	    GLD_DRIVER_PPA, ddi_get_instance(dip)));
6843 }
6844 
6845 /*
6846  * i_ddi_devi_set_ppa() should only be called from gld_register()
6847  * and only for GLD v0 drivers
6848  */
6849 void
6850 i_ddi_devi_set_ppa(dev_info_t *dip, int ppa)
6851 {
6852 	(void) e_ddi_prop_update_int(DDI_DEV_T_NONE, dip, GLD_DRIVER_PPA, ppa);
6853 }
6854 
6855 
6856 /*
6857  * Private DDI Console bell functions.
6858  */
6859 void
6860 ddi_ring_console_bell(clock_t duration)
6861 {
6862 	if (ddi_console_bell_func != NULL)
6863 		(*ddi_console_bell_func)(duration);
6864 }
6865 
6866 void
6867 ddi_set_console_bell(void (*bellfunc)(clock_t duration))
6868 {
6869 	ddi_console_bell_func = bellfunc;
6870 }
6871 
6872 int
6873 ddi_dma_alloc_handle(dev_info_t *dip, ddi_dma_attr_t *attr,
6874     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
6875 {
6876 	int (*funcp)() = ddi_dma_allochdl;
6877 	ddi_dma_attr_t dma_attr;
6878 	struct bus_ops *bop;
6879 
6880 	if (attr == (ddi_dma_attr_t *)0)
6881 		return (DDI_DMA_BADATTR);
6882 
6883 	dma_attr = *attr;
6884 
6885 	bop = DEVI(dip)->devi_ops->devo_bus_ops;
6886 	if (bop && bop->bus_dma_allochdl)
6887 		funcp = bop->bus_dma_allochdl;
6888 
6889 	return ((*funcp)(dip, dip, &dma_attr, waitfp, arg, handlep));
6890 }
6891 
6892 void
6893 ddi_dma_free_handle(ddi_dma_handle_t *handlep)
6894 {
6895 	ddi_dma_handle_t h = *handlep;
6896 	(void) ddi_dma_freehdl(HD, HD, h);
6897 }
6898 
6899 static uintptr_t dma_mem_list_id = 0;
6900 
6901 
6902 int
6903 ddi_dma_mem_alloc(ddi_dma_handle_t handle, size_t length,
6904     ddi_device_acc_attr_t *accattrp, uint_t flags,
6905     int (*waitfp)(caddr_t), caddr_t arg, caddr_t *kaddrp,
6906     size_t *real_length, ddi_acc_handle_t *handlep)
6907 {
6908 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6909 	dev_info_t *dip = hp->dmai_rdip;
6910 	ddi_acc_hdl_t *ap;
6911 	ddi_dma_attr_t *attrp = &hp->dmai_attr;
6912 	uint_t sleepflag, xfermodes;
6913 	int (*fp)(caddr_t);
6914 	int rval;
6915 
6916 	if (waitfp == DDI_DMA_SLEEP)
6917 		fp = (int (*)())KM_SLEEP;
6918 	else if (waitfp == DDI_DMA_DONTWAIT)
6919 		fp = (int (*)())KM_NOSLEEP;
6920 	else
6921 		fp = waitfp;
6922 	*handlep = impl_acc_hdl_alloc(fp, arg);
6923 	if (*handlep == NULL)
6924 		return (DDI_FAILURE);
6925 
6926 	/* check if the cache attributes are supported */
6927 	if (i_ddi_check_cache_attr(flags) == B_FALSE)
6928 		return (DDI_FAILURE);
6929 
6930 	/*
6931 	 * Transfer the meaningful bits to xfermodes.
6932 	 * Double-check if the 3rd party driver correctly sets the bits.
6933 	 * If not, set DDI_DMA_STREAMING to keep compatibility.
6934 	 */
6935 	xfermodes = flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING);
6936 	if (xfermodes == 0) {
6937 		xfermodes = DDI_DMA_STREAMING;
6938 	}
6939 
6940 	/*
6941 	 * initialize the common elements of data access handle
6942 	 */
6943 	ap = impl_acc_hdl_get(*handlep);
6944 	ap->ah_vers = VERS_ACCHDL;
6945 	ap->ah_dip = dip;
6946 	ap->ah_offset = 0;
6947 	ap->ah_len = 0;
6948 	ap->ah_xfermodes = flags;
6949 	ap->ah_acc = *accattrp;
6950 
6951 	sleepflag = ((waitfp == DDI_DMA_SLEEP) ? 1 : 0);
6952 	if (xfermodes == DDI_DMA_CONSISTENT) {
6953 		rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag,
6954 		    flags, accattrp, kaddrp, NULL, ap);
6955 		*real_length = length;
6956 	} else {
6957 		rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag,
6958 		    flags, accattrp, kaddrp, real_length, ap);
6959 	}
6960 	if (rval == DDI_SUCCESS) {
6961 		ap->ah_len = (off_t)(*real_length);
6962 		ap->ah_addr = *kaddrp;
6963 	} else {
6964 		impl_acc_hdl_free(*handlep);
6965 		*handlep = (ddi_acc_handle_t)NULL;
6966 		if (waitfp != DDI_DMA_SLEEP && waitfp != DDI_DMA_DONTWAIT) {
6967 			ddi_set_callback(waitfp, arg, &dma_mem_list_id);
6968 		}
6969 		rval = DDI_FAILURE;
6970 	}
6971 	return (rval);
6972 }
6973 
6974 void
6975 ddi_dma_mem_free(ddi_acc_handle_t *handlep)
6976 {
6977 	ddi_acc_hdl_t *ap;
6978 
6979 	ap = impl_acc_hdl_get(*handlep);
6980 	ASSERT(ap);
6981 
6982 	i_ddi_mem_free((caddr_t)ap->ah_addr, ap);
6983 
6984 	/*
6985 	 * free the handle
6986 	 */
6987 	impl_acc_hdl_free(*handlep);
6988 	*handlep = (ddi_acc_handle_t)NULL;
6989 
6990 	if (dma_mem_list_id != 0) {
6991 		ddi_run_callback(&dma_mem_list_id);
6992 	}
6993 }
6994 
6995 int
6996 ddi_dma_buf_bind_handle(ddi_dma_handle_t handle, struct buf *bp,
6997     uint_t flags, int (*waitfp)(caddr_t), caddr_t arg,
6998     ddi_dma_cookie_t *cookiep, uint_t *ccountp)
6999 {
7000 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7001 	dev_info_t *dip, *rdip;
7002 	struct ddi_dma_req dmareq;
7003 	int (*funcp)();
7004 	ddi_dma_cookie_t cookie;
7005 	uint_t count;
7006 
7007 	if (cookiep == NULL)
7008 		cookiep = &cookie;
7009 
7010 	if (ccountp == NULL)
7011 		ccountp = &count;
7012 
7013 	dmareq.dmar_flags = flags;
7014 	dmareq.dmar_fp = waitfp;
7015 	dmareq.dmar_arg = arg;
7016 	dmareq.dmar_object.dmao_size = (uint_t)bp->b_bcount;
7017 
7018 	if (bp->b_flags & B_PAGEIO) {
7019 		dmareq.dmar_object.dmao_type = DMA_OTYP_PAGES;
7020 		dmareq.dmar_object.dmao_obj.pp_obj.pp_pp = bp->b_pages;
7021 		dmareq.dmar_object.dmao_obj.pp_obj.pp_offset =
7022 		    (uint_t)(((uintptr_t)bp->b_un.b_addr) & MMU_PAGEOFFSET);
7023 	} else {
7024 		dmareq.dmar_object.dmao_obj.virt_obj.v_addr = bp->b_un.b_addr;
7025 		if (bp->b_flags & B_SHADOW) {
7026 			dmareq.dmar_object.dmao_obj.virt_obj.v_priv =
7027 			    bp->b_shadow;
7028 			dmareq.dmar_object.dmao_type = DMA_OTYP_BUFVADDR;
7029 		} else {
7030 			dmareq.dmar_object.dmao_type =
7031 			    (bp->b_flags & (B_PHYS | B_REMAPPED)) ?
7032 			    DMA_OTYP_BUFVADDR : DMA_OTYP_VADDR;
7033 			dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
7034 		}
7035 
7036 		/*
7037 		 * If the buffer has no proc pointer, or the proc
7038 		 * struct has the kernel address space, or the buffer has
7039 		 * been marked B_REMAPPED (meaning that it is now
7040 		 * mapped into the kernel's address space), then
7041 		 * the address space is kas (kernel address space).
7042 		 */
7043 		if ((bp->b_proc == NULL) || (bp->b_proc->p_as == &kas) ||
7044 		    (bp->b_flags & B_REMAPPED)) {
7045 			dmareq.dmar_object.dmao_obj.virt_obj.v_as = 0;
7046 		} else {
7047 			dmareq.dmar_object.dmao_obj.virt_obj.v_as =
7048 			    bp->b_proc->p_as;
7049 		}
7050 	}
7051 
7052 	dip = rdip = hp->dmai_rdip;
7053 	if (dip != ddi_root_node())
7054 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
7055 	funcp = DEVI(rdip)->devi_bus_dma_bindfunc;
7056 	return ((*funcp)(dip, rdip, handle, &dmareq, cookiep, ccountp));
7057 }
7058 
7059 int
7060 ddi_dma_addr_bind_handle(ddi_dma_handle_t handle, struct as *as,
7061     caddr_t addr, size_t len, uint_t flags, int (*waitfp)(caddr_t),
7062     caddr_t arg, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
7063 {
7064 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7065 	dev_info_t *dip, *rdip;
7066 	struct ddi_dma_req dmareq;
7067 	int (*funcp)();
7068 	ddi_dma_cookie_t cookie;
7069 	uint_t count;
7070 
7071 	if (len == (uint_t)0) {
7072 		return (DDI_DMA_NOMAPPING);
7073 	}
7074 
7075 	if (cookiep == NULL)
7076 		cookiep = &cookie;
7077 
7078 	if (ccountp == NULL)
7079 		ccountp = &count;
7080 
7081 	dmareq.dmar_flags = flags;
7082 	dmareq.dmar_fp = waitfp;
7083 	dmareq.dmar_arg = arg;
7084 	dmareq.dmar_object.dmao_size = len;
7085 	dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR;
7086 	dmareq.dmar_object.dmao_obj.virt_obj.v_as = as;
7087 	dmareq.dmar_object.dmao_obj.virt_obj.v_addr = addr;
7088 	dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
7089 
7090 	dip = rdip = hp->dmai_rdip;
7091 	if (dip != ddi_root_node())
7092 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
7093 	funcp = DEVI(rdip)->devi_bus_dma_bindfunc;
7094 	return ((*funcp)(dip, rdip, handle, &dmareq, cookiep, ccountp));
7095 }
7096 
7097 void
7098 ddi_dma_nextcookie(ddi_dma_handle_t handle, ddi_dma_cookie_t *cookiep)
7099 {
7100 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7101 	ddi_dma_cookie_t *cp;
7102 
7103 	if (hp->dmai_curcookie >= hp->dmai_ncookies) {
7104 		panic("ddi_dma_nextcookie() called too many times on handle %p",
7105 		    hp);
7106 	}
7107 
7108 	cp = hp->dmai_cookie;
7109 	ASSERT(cp);
7110 
7111 	cookiep->dmac_notused = cp->dmac_notused;
7112 	cookiep->dmac_type = cp->dmac_type;
7113 	cookiep->dmac_address = cp->dmac_address;
7114 	cookiep->dmac_size = cp->dmac_size;
7115 	hp->dmai_cookie++;
7116 	hp->dmai_curcookie++;
7117 }
7118 
7119 int
7120 ddi_dma_ncookies(ddi_dma_handle_t handle)
7121 {
7122 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7123 
7124 	return (hp->dmai_ncookies);
7125 }
7126 
7127 const ddi_dma_cookie_t *
7128 ddi_dma_cookie_iter(ddi_dma_handle_t handle, const ddi_dma_cookie_t *iter)
7129 {
7130 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7131 	const ddi_dma_cookie_t *base, *end;
7132 
7133 	if (hp->dmai_ncookies == 0) {
7134 		return (NULL);
7135 	}
7136 
7137 	base = hp->dmai_cookie - hp->dmai_curcookie;
7138 	end = base + hp->dmai_ncookies;
7139 	if (iter == NULL) {
7140 		return (base);
7141 	}
7142 
7143 	if ((uintptr_t)iter < (uintptr_t)base ||
7144 	    (uintptr_t)iter >= (uintptr_t)end) {
7145 		return (NULL);
7146 	}
7147 
7148 	iter++;
7149 	if (iter == end) {
7150 		return (NULL);
7151 	}
7152 
7153 	return (iter);
7154 }
7155 
7156 const ddi_dma_cookie_t *
7157 ddi_dma_cookie_get(ddi_dma_handle_t handle, uint_t index)
7158 {
7159 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7160 	const ddi_dma_cookie_t *base;
7161 
7162 	if (index >= hp->dmai_ncookies) {
7163 		return (NULL);
7164 	}
7165 
7166 	base = hp->dmai_cookie - hp->dmai_curcookie;
7167 	return (base + index);
7168 }
7169 
7170 const ddi_dma_cookie_t *
7171 ddi_dma_cookie_one(ddi_dma_handle_t handle)
7172 {
7173 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7174 	const ddi_dma_cookie_t *base;
7175 
7176 	if (hp->dmai_ncookies != 1) {
7177 		panic("ddi_dma_cookie_one() called with improper handle %p",
7178 		    hp);
7179 	}
7180 	ASSERT3P(hp->dmai_cookie, !=, NULL);
7181 
7182 	base = hp->dmai_cookie - hp->dmai_curcookie;
7183 	return (base);
7184 }
7185 
7186 int
7187 ddi_dma_numwin(ddi_dma_handle_t handle, uint_t *nwinp)
7188 {
7189 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7190 	if ((hp->dmai_rflags & DDI_DMA_PARTIAL) == 0) {
7191 		return (DDI_FAILURE);
7192 	} else {
7193 		*nwinp = hp->dmai_nwin;
7194 		return (DDI_SUCCESS);
7195 	}
7196 }
7197 
7198 int
7199 ddi_dma_getwin(ddi_dma_handle_t h, uint_t win, off_t *offp,
7200     size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
7201 {
7202 	int (*funcp)() = ddi_dma_win;
7203 	struct bus_ops *bop;
7204 	ddi_dma_cookie_t cookie;
7205 	uint_t count;
7206 
7207 	bop = DEVI(HD)->devi_ops->devo_bus_ops;
7208 	if (bop && bop->bus_dma_win)
7209 		funcp = bop->bus_dma_win;
7210 
7211 	if (cookiep == NULL)
7212 		cookiep = &cookie;
7213 
7214 	if (ccountp == NULL)
7215 		ccountp = &count;
7216 
7217 	return ((*funcp)(HD, HD, h, win, offp, lenp, cookiep, ccountp));
7218 }
7219 
7220 int
7221 ddi_dma_set_sbus64(ddi_dma_handle_t h, ulong_t burstsizes)
7222 {
7223 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_SET_SBUS64, 0,
7224 	    &burstsizes, 0, 0));
7225 }
7226 
7227 int
7228 i_ddi_dma_fault_check(ddi_dma_impl_t *hp)
7229 {
7230 	return (hp->dmai_fault);
7231 }
7232 
7233 int
7234 ddi_check_dma_handle(ddi_dma_handle_t handle)
7235 {
7236 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7237 	int (*check)(ddi_dma_impl_t *);
7238 
7239 	if ((check = hp->dmai_fault_check) == NULL)
7240 		check = i_ddi_dma_fault_check;
7241 
7242 	return (((*check)(hp) == DDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
7243 }
7244 
7245 void
7246 i_ddi_dma_set_fault(ddi_dma_handle_t handle)
7247 {
7248 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7249 	void (*notify)(ddi_dma_impl_t *);
7250 
7251 	if (!hp->dmai_fault) {
7252 		hp->dmai_fault = 1;
7253 		if ((notify = hp->dmai_fault_notify) != NULL)
7254 			(*notify)(hp);
7255 	}
7256 }
7257 
7258 void
7259 i_ddi_dma_clr_fault(ddi_dma_handle_t handle)
7260 {
7261 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7262 	void (*notify)(ddi_dma_impl_t *);
7263 
7264 	if (hp->dmai_fault) {
7265 		hp->dmai_fault = 0;
7266 		if ((notify = hp->dmai_fault_notify) != NULL)
7267 			(*notify)(hp);
7268 	}
7269 }
7270 
7271 /*
7272  * register mapping routines.
7273  */
7274 int
7275 ddi_regs_map_setup(dev_info_t *dip, uint_t rnumber, caddr_t *addrp,
7276     offset_t offset, offset_t len, ddi_device_acc_attr_t *accattrp,
7277     ddi_acc_handle_t *handle)
7278 {
7279 	ddi_map_req_t mr;
7280 	ddi_acc_hdl_t *hp;
7281 	int result;
7282 
7283 	/*
7284 	 * Allocate and initialize the common elements of data access handle.
7285 	 */
7286 	*handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
7287 	hp = impl_acc_hdl_get(*handle);
7288 	hp->ah_vers = VERS_ACCHDL;
7289 	hp->ah_dip = dip;
7290 	hp->ah_rnumber = rnumber;
7291 	hp->ah_offset = offset;
7292 	hp->ah_len = len;
7293 	hp->ah_acc = *accattrp;
7294 
7295 	/*
7296 	 * Set up the mapping request and call to parent.
7297 	 */
7298 	mr.map_op = DDI_MO_MAP_LOCKED;
7299 	mr.map_type = DDI_MT_RNUMBER;
7300 	mr.map_obj.rnumber = rnumber;
7301 	mr.map_prot = PROT_READ | PROT_WRITE;
7302 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
7303 	mr.map_handlep = hp;
7304 	mr.map_vers = DDI_MAP_VERSION;
7305 	result = ddi_map(dip, &mr, offset, len, addrp);
7306 
7307 	/*
7308 	 * check for end result
7309 	 */
7310 	if (result != DDI_SUCCESS) {
7311 		impl_acc_hdl_free(*handle);
7312 		*handle = (ddi_acc_handle_t)NULL;
7313 	} else {
7314 		hp->ah_addr = *addrp;
7315 	}
7316 
7317 	return (result);
7318 }
7319 
7320 void
7321 ddi_regs_map_free(ddi_acc_handle_t *handlep)
7322 {
7323 	ddi_map_req_t mr;
7324 	ddi_acc_hdl_t *hp;
7325 
7326 	hp = impl_acc_hdl_get(*handlep);
7327 	ASSERT(hp);
7328 
7329 	mr.map_op = DDI_MO_UNMAP;
7330 	mr.map_type = DDI_MT_RNUMBER;
7331 	mr.map_obj.rnumber = hp->ah_rnumber;
7332 	mr.map_prot = PROT_READ | PROT_WRITE;
7333 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
7334 	mr.map_handlep = hp;
7335 	mr.map_vers = DDI_MAP_VERSION;
7336 
7337 	/*
7338 	 * Call my parent to unmap my regs.
7339 	 */
7340 	(void) ddi_map(hp->ah_dip, &mr, hp->ah_offset,
7341 	    hp->ah_len, &hp->ah_addr);
7342 	/*
7343 	 * free the handle
7344 	 */
7345 	impl_acc_hdl_free(*handlep);
7346 	*handlep = (ddi_acc_handle_t)NULL;
7347 }
7348 
7349 int
7350 ddi_device_zero(ddi_acc_handle_t handle, caddr_t dev_addr, size_t bytecount,
7351     ssize_t dev_advcnt, uint_t dev_datasz)
7352 {
7353 	uint8_t *b;
7354 	uint16_t *w;
7355 	uint32_t *l;
7356 	uint64_t *ll;
7357 
7358 	/* check for total byte count is multiple of data transfer size */
7359 	if (bytecount != ((bytecount / dev_datasz) * dev_datasz))
7360 		return (DDI_FAILURE);
7361 
7362 	switch (dev_datasz) {
7363 	case DDI_DATA_SZ01_ACC:
7364 		for (b = (uint8_t *)dev_addr;
7365 		    bytecount != 0; bytecount -= 1, b += dev_advcnt)
7366 			ddi_put8(handle, b, 0);
7367 		break;
7368 	case DDI_DATA_SZ02_ACC:
7369 		for (w = (uint16_t *)dev_addr;
7370 		    bytecount != 0; bytecount -= 2, w += dev_advcnt)
7371 			ddi_put16(handle, w, 0);
7372 		break;
7373 	case DDI_DATA_SZ04_ACC:
7374 		for (l = (uint32_t *)dev_addr;
7375 		    bytecount != 0; bytecount -= 4, l += dev_advcnt)
7376 			ddi_put32(handle, l, 0);
7377 		break;
7378 	case DDI_DATA_SZ08_ACC:
7379 		for (ll = (uint64_t *)dev_addr;
7380 		    bytecount != 0; bytecount -= 8, ll += dev_advcnt)
7381 			ddi_put64(handle, ll, 0x0ll);
7382 		break;
7383 	default:
7384 		return (DDI_FAILURE);
7385 	}
7386 	return (DDI_SUCCESS);
7387 }
7388 
7389 int
7390 ddi_device_copy(
7391 	ddi_acc_handle_t src_handle, caddr_t src_addr, ssize_t src_advcnt,
7392 	ddi_acc_handle_t dest_handle, caddr_t dest_addr, ssize_t dest_advcnt,
7393 	size_t bytecount, uint_t dev_datasz)
7394 {
7395 	uint8_t *b_src, *b_dst;
7396 	uint16_t *w_src, *w_dst;
7397 	uint32_t *l_src, *l_dst;
7398 	uint64_t *ll_src, *ll_dst;
7399 
7400 	/* check for total byte count is multiple of data transfer size */
7401 	if (bytecount != ((bytecount / dev_datasz) * dev_datasz))
7402 		return (DDI_FAILURE);
7403 
7404 	switch (dev_datasz) {
7405 	case DDI_DATA_SZ01_ACC:
7406 		b_src = (uint8_t *)src_addr;
7407 		b_dst = (uint8_t *)dest_addr;
7408 
7409 		for (; bytecount != 0; bytecount -= 1) {
7410 			ddi_put8(dest_handle, b_dst,
7411 			    ddi_get8(src_handle, b_src));
7412 			b_dst += dest_advcnt;
7413 			b_src += src_advcnt;
7414 		}
7415 		break;
7416 	case DDI_DATA_SZ02_ACC:
7417 		w_src = (uint16_t *)src_addr;
7418 		w_dst = (uint16_t *)dest_addr;
7419 
7420 		for (; bytecount != 0; bytecount -= 2) {
7421 			ddi_put16(dest_handle, w_dst,
7422 			    ddi_get16(src_handle, w_src));
7423 			w_dst += dest_advcnt;
7424 			w_src += src_advcnt;
7425 		}
7426 		break;
7427 	case DDI_DATA_SZ04_ACC:
7428 		l_src = (uint32_t *)src_addr;
7429 		l_dst = (uint32_t *)dest_addr;
7430 
7431 		for (; bytecount != 0; bytecount -= 4) {
7432 			ddi_put32(dest_handle, l_dst,
7433 			    ddi_get32(src_handle, l_src));
7434 			l_dst += dest_advcnt;
7435 			l_src += src_advcnt;
7436 		}
7437 		break;
7438 	case DDI_DATA_SZ08_ACC:
7439 		ll_src = (uint64_t *)src_addr;
7440 		ll_dst = (uint64_t *)dest_addr;
7441 
7442 		for (; bytecount != 0; bytecount -= 8) {
7443 			ddi_put64(dest_handle, ll_dst,
7444 			    ddi_get64(src_handle, ll_src));
7445 			ll_dst += dest_advcnt;
7446 			ll_src += src_advcnt;
7447 		}
7448 		break;
7449 	default:
7450 		return (DDI_FAILURE);
7451 	}
7452 	return (DDI_SUCCESS);
7453 }
7454 
7455 #define	swap16(value)  \
7456 	((((value) & 0xff) << 8) | ((value) >> 8))
7457 
7458 #define	swap32(value)	\
7459 	(((uint32_t)swap16((uint16_t)((value) & 0xffff)) << 16) | \
7460 	(uint32_t)swap16((uint16_t)((value) >> 16)))
7461 
7462 #define	swap64(value)	\
7463 	(((uint64_t)swap32((uint32_t)((value) & 0xffffffff)) \
7464 	    << 32) | \
7465 	(uint64_t)swap32((uint32_t)((value) >> 32)))
7466 
7467 uint16_t
7468 ddi_swap16(uint16_t value)
7469 {
7470 	return (swap16(value));
7471 }
7472 
7473 uint32_t
7474 ddi_swap32(uint32_t value)
7475 {
7476 	return (swap32(value));
7477 }
7478 
7479 uint64_t
7480 ddi_swap64(uint64_t value)
7481 {
7482 	return (swap64(value));
7483 }
7484 
7485 /*
7486  * Convert a binding name to a driver name.
7487  * A binding name is the name used to determine the driver for a
7488  * device - it may be either an alias for the driver or the name
7489  * of the driver itself.
7490  */
7491 char *
7492 i_binding_to_drv_name(char *bname)
7493 {
7494 	major_t major_no;
7495 
7496 	ASSERT(bname != NULL);
7497 
7498 	if ((major_no = ddi_name_to_major(bname)) == -1)
7499 		return (NULL);
7500 	return (ddi_major_to_name(major_no));
7501 }
7502 
7503 /*
7504  * Search for minor name that has specified dev_t and spec_type.
7505  * If spec_type is zero then any dev_t match works.  Since we
7506  * are returning a pointer to the minor name string, we require the
7507  * caller to do the locking.
7508  */
7509 char *
7510 i_ddi_devtspectype_to_minorname(dev_info_t *dip, dev_t dev, int spec_type)
7511 {
7512 	struct ddi_minor_data	*dmdp;
7513 
7514 	/*
7515 	 * The did layered driver currently intentionally returns a
7516 	 * devinfo ptr for an underlying sd instance based on a did
7517 	 * dev_t. In this case it is not an error.
7518 	 *
7519 	 * The did layered driver is associated with Sun Cluster.
7520 	 */
7521 	ASSERT((ddi_driver_major(dip) == getmajor(dev)) ||
7522 	    (strcmp(ddi_major_to_name(getmajor(dev)), "did") == 0));
7523 
7524 	ASSERT(DEVI_BUSY_OWNED(dip));
7525 	for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) {
7526 		if (((dmdp->type == DDM_MINOR) ||
7527 		    (dmdp->type == DDM_INTERNAL_PATH) ||
7528 		    (dmdp->type == DDM_DEFAULT)) &&
7529 		    (dmdp->ddm_dev == dev) &&
7530 		    ((((spec_type & (S_IFCHR|S_IFBLK))) == 0) ||
7531 		    (dmdp->ddm_spec_type == spec_type)))
7532 			return (dmdp->ddm_name);
7533 	}
7534 
7535 	return (NULL);
7536 }
7537 
7538 /*
7539  * Find the devt and spectype of the specified minor_name.
7540  * Return DDI_FAILURE if minor_name not found. Since we are
7541  * returning everything via arguments we can do the locking.
7542  */
7543 int
7544 i_ddi_minorname_to_devtspectype(dev_info_t *dip, char *minor_name,
7545     dev_t *devtp, int *spectypep)
7546 {
7547 	int			circ;
7548 	struct ddi_minor_data	*dmdp;
7549 
7550 	/* deal with clone minor nodes */
7551 	if (dip == clone_dip) {
7552 		major_t	major;
7553 		/*
7554 		 * Make sure minor_name is a STREAMS driver.
7555 		 * We load the driver but don't attach to any instances.
7556 		 */
7557 
7558 		major = ddi_name_to_major(minor_name);
7559 		if (major == DDI_MAJOR_T_NONE)
7560 			return (DDI_FAILURE);
7561 
7562 		if (ddi_hold_driver(major) == NULL)
7563 			return (DDI_FAILURE);
7564 
7565 		if (STREAMSTAB(major) == NULL) {
7566 			ddi_rele_driver(major);
7567 			return (DDI_FAILURE);
7568 		}
7569 		ddi_rele_driver(major);
7570 
7571 		if (devtp)
7572 			*devtp = makedevice(clone_major, (minor_t)major);
7573 
7574 		if (spectypep)
7575 			*spectypep = S_IFCHR;
7576 
7577 		return (DDI_SUCCESS);
7578 	}
7579 
7580 	ndi_devi_enter(dip, &circ);
7581 	for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) {
7582 		if (((dmdp->type != DDM_MINOR) &&
7583 		    (dmdp->type != DDM_INTERNAL_PATH) &&
7584 		    (dmdp->type != DDM_DEFAULT)) ||
7585 		    strcmp(minor_name, dmdp->ddm_name))
7586 			continue;
7587 
7588 		if (devtp)
7589 			*devtp = dmdp->ddm_dev;
7590 
7591 		if (spectypep)
7592 			*spectypep = dmdp->ddm_spec_type;
7593 
7594 		ndi_devi_exit(dip, circ);
7595 		return (DDI_SUCCESS);
7596 	}
7597 	ndi_devi_exit(dip, circ);
7598 
7599 	return (DDI_FAILURE);
7600 }
7601 
7602 static kmutex_t devid_gen_mutex;
7603 static short	devid_gen_number;
7604 
7605 #ifdef DEBUG
7606 
7607 static int	devid_register_corrupt = 0;
7608 static int	devid_register_corrupt_major = 0;
7609 static int	devid_register_corrupt_hint = 0;
7610 static int	devid_register_corrupt_hint_major = 0;
7611 
7612 static int devid_lyr_debug = 0;
7613 
7614 #define	DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs)		\
7615 	if (devid_lyr_debug)					\
7616 		ddi_debug_devid_devts(msg, ndevs, devs)
7617 
7618 #else
7619 
7620 #define	DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs)
7621 
7622 #endif /* DEBUG */
7623 
7624 
7625 #ifdef	DEBUG
7626 
7627 static void
7628 ddi_debug_devid_devts(char *msg, int ndevs, dev_t *devs)
7629 {
7630 	int i;
7631 
7632 	cmn_err(CE_CONT, "%s:\n", msg);
7633 	for (i = 0; i < ndevs; i++) {
7634 		cmn_err(CE_CONT, "    0x%lx\n", devs[i]);
7635 	}
7636 }
7637 
7638 static void
7639 ddi_debug_devid_paths(char *msg, int npaths, char **paths)
7640 {
7641 	int i;
7642 
7643 	cmn_err(CE_CONT, "%s:\n", msg);
7644 	for (i = 0; i < npaths; i++) {
7645 		cmn_err(CE_CONT, "    %s\n", paths[i]);
7646 	}
7647 }
7648 
7649 static void
7650 ddi_debug_devid_devts_per_path(char *path, int ndevs, dev_t *devs)
7651 {
7652 	int i;
7653 
7654 	cmn_err(CE_CONT, "dev_ts per path %s\n", path);
7655 	for (i = 0; i < ndevs; i++) {
7656 		cmn_err(CE_CONT, "    0x%lx\n", devs[i]);
7657 	}
7658 }
7659 
7660 #endif	/* DEBUG */
7661 
7662 /*
7663  * Register device id into DDI framework.
7664  * Must be called when the driver is bound.
7665  */
7666 static int
7667 i_ddi_devid_register(dev_info_t *dip, ddi_devid_t devid)
7668 {
7669 	impl_devid_t	*i_devid = (impl_devid_t *)devid;
7670 	size_t		driver_len;
7671 	const char	*driver_name;
7672 	char		*devid_str;
7673 	major_t		major;
7674 
7675 	if ((dip == NULL) ||
7676 	    ((major = ddi_driver_major(dip)) == DDI_MAJOR_T_NONE))
7677 		return (DDI_FAILURE);
7678 
7679 	/* verify that the devid is valid */
7680 	if (ddi_devid_valid(devid) != DDI_SUCCESS)
7681 		return (DDI_FAILURE);
7682 
7683 	/* Updating driver name hint in devid */
7684 	driver_name = ddi_driver_name(dip);
7685 	driver_len = strlen(driver_name);
7686 	if (driver_len > DEVID_HINT_SIZE) {
7687 		/* Pick up last four characters of driver name */
7688 		driver_name += driver_len - DEVID_HINT_SIZE;
7689 		driver_len = DEVID_HINT_SIZE;
7690 	}
7691 	bzero(i_devid->did_driver, DEVID_HINT_SIZE);
7692 	bcopy(driver_name, i_devid->did_driver, driver_len);
7693 
7694 #ifdef DEBUG
7695 	/* Corrupt the devid for testing. */
7696 	if (devid_register_corrupt)
7697 		i_devid->did_id[0] += devid_register_corrupt;
7698 	if (devid_register_corrupt_major &&
7699 	    (major == devid_register_corrupt_major))
7700 		i_devid->did_id[0] += 1;
7701 	if (devid_register_corrupt_hint)
7702 		i_devid->did_driver[0] += devid_register_corrupt_hint;
7703 	if (devid_register_corrupt_hint_major &&
7704 	    (major == devid_register_corrupt_hint_major))
7705 		i_devid->did_driver[0] += 1;
7706 #endif /* DEBUG */
7707 
7708 	/* encode the devid as a string */
7709 	if ((devid_str = ddi_devid_str_encode(devid, NULL)) == NULL)
7710 		return (DDI_FAILURE);
7711 
7712 	/* add string as a string property */
7713 	if (ndi_prop_update_string(DDI_DEV_T_NONE, dip,
7714 	    DEVID_PROP_NAME, devid_str) != DDI_SUCCESS) {
7715 		cmn_err(CE_WARN, "%s%d: devid property update failed",
7716 		    ddi_driver_name(dip), ddi_get_instance(dip));
7717 		ddi_devid_str_free(devid_str);
7718 		return (DDI_FAILURE);
7719 	}
7720 
7721 	/* keep pointer to devid string for interrupt context fma code */
7722 	if (DEVI(dip)->devi_devid_str)
7723 		ddi_devid_str_free(DEVI(dip)->devi_devid_str);
7724 	DEVI(dip)->devi_devid_str = devid_str;
7725 	return (DDI_SUCCESS);
7726 }
7727 
7728 int
7729 ddi_devid_register(dev_info_t *dip, ddi_devid_t devid)
7730 {
7731 	int rval;
7732 
7733 	rval = i_ddi_devid_register(dip, devid);
7734 	if (rval == DDI_SUCCESS) {
7735 		/*
7736 		 * Register devid in devid-to-path cache
7737 		 */
7738 		if (e_devid_cache_register(dip, devid) == DDI_SUCCESS) {
7739 			mutex_enter(&DEVI(dip)->devi_lock);
7740 			DEVI(dip)->devi_flags |= DEVI_CACHED_DEVID;
7741 			mutex_exit(&DEVI(dip)->devi_lock);
7742 		} else if (ddi_get_name_addr(dip)) {
7743 			/*
7744 			 * We only expect cache_register DDI_FAILURE when we
7745 			 * can't form the full path because of NULL devi_addr.
7746 			 */
7747 			cmn_err(CE_WARN, "%s%d: failed to cache devid",
7748 			    ddi_driver_name(dip), ddi_get_instance(dip));
7749 		}
7750 	} else {
7751 		cmn_err(CE_WARN, "%s%d: failed to register devid",
7752 		    ddi_driver_name(dip), ddi_get_instance(dip));
7753 	}
7754 	return (rval);
7755 }
7756 
7757 /*
7758  * Remove (unregister) device id from DDI framework.
7759  * Must be called when device is detached.
7760  */
7761 static void
7762 i_ddi_devid_unregister(dev_info_t *dip)
7763 {
7764 	if (DEVI(dip)->devi_devid_str) {
7765 		ddi_devid_str_free(DEVI(dip)->devi_devid_str);
7766 		DEVI(dip)->devi_devid_str = NULL;
7767 	}
7768 
7769 	/* remove the devid property */
7770 	(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, DEVID_PROP_NAME);
7771 }
7772 
7773 void
7774 ddi_devid_unregister(dev_info_t *dip)
7775 {
7776 	mutex_enter(&DEVI(dip)->devi_lock);
7777 	DEVI(dip)->devi_flags &= ~DEVI_CACHED_DEVID;
7778 	mutex_exit(&DEVI(dip)->devi_lock);
7779 	e_devid_cache_unregister(dip);
7780 	i_ddi_devid_unregister(dip);
7781 }
7782 
7783 /*
7784  * Allocate and initialize a device id.
7785  */
7786 int
7787 ddi_devid_init(
7788 	dev_info_t	*dip,
7789 	ushort_t	devid_type,
7790 	ushort_t	nbytes,
7791 	void		*id,
7792 	ddi_devid_t	*ret_devid)
7793 {
7794 	impl_devid_t	*i_devid;
7795 	int		sz = sizeof (*i_devid) + nbytes - sizeof (char);
7796 	int		driver_len;
7797 	const char	*driver_name;
7798 
7799 	switch (devid_type) {
7800 	case DEVID_SCSI3_WWN:
7801 		/*FALLTHRU*/
7802 	case DEVID_SCSI_SERIAL:
7803 		/*FALLTHRU*/
7804 	case DEVID_ATA_SERIAL:
7805 		/*FALLTHRU*/
7806 	case DEVID_ENCAP:
7807 		if (nbytes == 0)
7808 			return (DDI_FAILURE);
7809 		if (id == NULL)
7810 			return (DDI_FAILURE);
7811 		break;
7812 	case DEVID_FAB:
7813 		if (nbytes != 0)
7814 			return (DDI_FAILURE);
7815 		if (id != NULL)
7816 			return (DDI_FAILURE);
7817 		nbytes = sizeof (int) +
7818 		    sizeof (struct timeval32) + sizeof (short);
7819 		sz += nbytes;
7820 		break;
7821 	default:
7822 		return (DDI_FAILURE);
7823 	}
7824 
7825 	if ((i_devid = kmem_zalloc(sz, KM_SLEEP)) == NULL)
7826 		return (DDI_FAILURE);
7827 
7828 	i_devid->did_magic_hi = DEVID_MAGIC_MSB;
7829 	i_devid->did_magic_lo = DEVID_MAGIC_LSB;
7830 	i_devid->did_rev_hi = DEVID_REV_MSB;
7831 	i_devid->did_rev_lo = DEVID_REV_LSB;
7832 	DEVID_FORMTYPE(i_devid, devid_type);
7833 	DEVID_FORMLEN(i_devid, nbytes);
7834 
7835 	/* Fill in driver name hint */
7836 	driver_name = ddi_driver_name(dip);
7837 	driver_len = strlen(driver_name);
7838 	if (driver_len > DEVID_HINT_SIZE) {
7839 		/* Pick up last four characters of driver name */
7840 		driver_name += driver_len - DEVID_HINT_SIZE;
7841 		driver_len = DEVID_HINT_SIZE;
7842 	}
7843 
7844 	bcopy(driver_name, i_devid->did_driver, driver_len);
7845 
7846 	/* Fill in id field */
7847 	if (devid_type == DEVID_FAB) {
7848 		char		*cp;
7849 		uint32_t	hostid;
7850 		struct timeval32 timestamp32;
7851 		int		i;
7852 		int		*ip;
7853 		short		gen;
7854 
7855 		/* increase the generation number */
7856 		mutex_enter(&devid_gen_mutex);
7857 		gen = devid_gen_number++;
7858 		mutex_exit(&devid_gen_mutex);
7859 
7860 		cp = i_devid->did_id;
7861 
7862 		/* Fill in host id (big-endian byte ordering) */
7863 		hostid = zone_get_hostid(NULL);
7864 		*cp++ = hibyte(hiword(hostid));
7865 		*cp++ = lobyte(hiword(hostid));
7866 		*cp++ = hibyte(loword(hostid));
7867 		*cp++ = lobyte(loword(hostid));
7868 
7869 		/*
7870 		 * Fill in timestamp (big-endian byte ordering)
7871 		 *
7872 		 * (Note that the format may have to be changed
7873 		 * before 2038 comes around, though it's arguably
7874 		 * unique enough as it is..)
7875 		 */
7876 		uniqtime32(&timestamp32);
7877 		ip = (int *)&timestamp32;
7878 		for (i = 0;
7879 		    i < sizeof (timestamp32) / sizeof (int); i++, ip++) {
7880 			int	val;
7881 			val = *ip;
7882 			*cp++ = hibyte(hiword(val));
7883 			*cp++ = lobyte(hiword(val));
7884 			*cp++ = hibyte(loword(val));
7885 			*cp++ = lobyte(loword(val));
7886 		}
7887 
7888 		/* fill in the generation number */
7889 		*cp++ = hibyte(gen);
7890 		*cp++ = lobyte(gen);
7891 	} else
7892 		bcopy(id, i_devid->did_id, nbytes);
7893 
7894 	/* return device id */
7895 	*ret_devid = (ddi_devid_t)i_devid;
7896 	return (DDI_SUCCESS);
7897 }
7898 
7899 int
7900 ddi_devid_get(dev_info_t *dip, ddi_devid_t *ret_devid)
7901 {
7902 	return (i_ddi_devi_get_devid(DDI_DEV_T_ANY, dip, ret_devid));
7903 }
7904 
7905 int
7906 i_ddi_devi_get_devid(dev_t dev, dev_info_t *dip, ddi_devid_t *ret_devid)
7907 {
7908 	char		*devidstr;
7909 
7910 	ASSERT(dev != DDI_DEV_T_NONE);
7911 
7912 	/* look up the property, devt specific first */
7913 	if (ddi_prop_lookup_string(dev, dip, DDI_PROP_DONTPASS,
7914 	    DEVID_PROP_NAME, &devidstr) != DDI_PROP_SUCCESS) {
7915 		if ((dev == DDI_DEV_T_ANY) ||
7916 		    (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
7917 		    DDI_PROP_DONTPASS, DEVID_PROP_NAME, &devidstr) !=
7918 		    DDI_PROP_SUCCESS)) {
7919 			return (DDI_FAILURE);
7920 		}
7921 	}
7922 
7923 	/* convert to binary form */
7924 	if (ddi_devid_str_decode(devidstr, ret_devid, NULL) == -1) {
7925 		ddi_prop_free(devidstr);
7926 		return (DDI_FAILURE);
7927 	}
7928 	ddi_prop_free(devidstr);
7929 	return (DDI_SUCCESS);
7930 }
7931 
7932 /*
7933  * Return a copy of the device id for dev_t
7934  */
7935 int
7936 ddi_lyr_get_devid(dev_t dev, ddi_devid_t *ret_devid)
7937 {
7938 	dev_info_t	*dip;
7939 	int		rval;
7940 
7941 	/* get the dip */
7942 	if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
7943 		return (DDI_FAILURE);
7944 
7945 	rval = i_ddi_devi_get_devid(dev, dip, ret_devid);
7946 
7947 	ddi_release_devi(dip);		/* e_ddi_hold_devi_by_dev() */
7948 	return (rval);
7949 }
7950 
7951 /*
7952  * Return a copy of the minor name for dev_t and spec_type
7953  */
7954 int
7955 ddi_lyr_get_minor_name(dev_t dev, int spec_type, char **minor_name)
7956 {
7957 	char		*buf;
7958 	int		circ;
7959 	dev_info_t	*dip;
7960 	char		*nm;
7961 	int		rval;
7962 
7963 	if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) {
7964 		*minor_name = NULL;
7965 		return (DDI_FAILURE);
7966 	}
7967 
7968 	/* Find the minor name and copy into max size buf */
7969 	buf = kmem_alloc(MAXNAMELEN, KM_SLEEP);
7970 	ndi_devi_enter(dip, &circ);
7971 	nm = i_ddi_devtspectype_to_minorname(dip, dev, spec_type);
7972 	if (nm)
7973 		(void) strcpy(buf, nm);
7974 	ndi_devi_exit(dip, circ);
7975 	ddi_release_devi(dip);	/* e_ddi_hold_devi_by_dev() */
7976 
7977 	if (nm) {
7978 		/* duplicate into min size buf for return result */
7979 		*minor_name = i_ddi_strdup(buf, KM_SLEEP);
7980 		rval = DDI_SUCCESS;
7981 	} else {
7982 		*minor_name = NULL;
7983 		rval = DDI_FAILURE;
7984 	}
7985 
7986 	/* free max size buf and return */
7987 	kmem_free(buf, MAXNAMELEN);
7988 	return (rval);
7989 }
7990 
7991 int
7992 ddi_lyr_devid_to_devlist(
7993 	ddi_devid_t	devid,
7994 	char		*minor_name,
7995 	int		*retndevs,
7996 	dev_t		**retdevs)
7997 {
7998 	ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS);
7999 
8000 	if (e_devid_cache_to_devt_list(devid, minor_name,
8001 	    retndevs, retdevs) == DDI_SUCCESS) {
8002 		ASSERT(*retndevs > 0);
8003 		DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist",
8004 		    *retndevs, *retdevs);
8005 		return (DDI_SUCCESS);
8006 	}
8007 
8008 	if (e_ddi_devid_discovery(devid) == DDI_FAILURE) {
8009 		return (DDI_FAILURE);
8010 	}
8011 
8012 	if (e_devid_cache_to_devt_list(devid, minor_name,
8013 	    retndevs, retdevs) == DDI_SUCCESS) {
8014 		ASSERT(*retndevs > 0);
8015 		DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist",
8016 		    *retndevs, *retdevs);
8017 		return (DDI_SUCCESS);
8018 	}
8019 
8020 	return (DDI_FAILURE);
8021 }
8022 
8023 void
8024 ddi_lyr_free_devlist(dev_t *devlist, int ndevs)
8025 {
8026 	kmem_free(devlist, sizeof (dev_t) * ndevs);
8027 }
8028 
8029 /*
8030  * Note: This will need to be fixed if we ever allow processes to
8031  * have more than one data model per exec.
8032  */
8033 model_t
8034 ddi_mmap_get_model(void)
8035 {
8036 	return (get_udatamodel());
8037 }
8038 
8039 model_t
8040 ddi_model_convert_from(model_t model)
8041 {
8042 	return ((model & DDI_MODEL_MASK) & ~DDI_MODEL_NATIVE);
8043 }
8044 
8045 /*
8046  * ddi interfaces managing storage and retrieval of eventcookies.
8047  */
8048 
8049 /*
8050  * Invoke bus nexus driver's implementation of the
8051  * (*bus_remove_eventcall)() interface to remove a registered
8052  * callback handler for "event".
8053  */
8054 int
8055 ddi_remove_event_handler(ddi_callback_id_t id)
8056 {
8057 	ndi_event_callbacks_t *cb = (ndi_event_callbacks_t *)id;
8058 	dev_info_t *ddip;
8059 
8060 	ASSERT(cb);
8061 	if (!cb) {
8062 		return (DDI_FAILURE);
8063 	}
8064 
8065 	ddip = NDI_EVENT_DDIP(cb->ndi_evtcb_cookie);
8066 	return (ndi_busop_remove_eventcall(ddip, id));
8067 }
8068 
8069 /*
8070  * Invoke bus nexus driver's implementation of the
8071  * (*bus_add_eventcall)() interface to register a callback handler
8072  * for "event".
8073  */
8074 int
8075 ddi_add_event_handler(dev_info_t *dip, ddi_eventcookie_t event,
8076     void (*handler)(dev_info_t *, ddi_eventcookie_t, void *, void *),
8077     void *arg, ddi_callback_id_t *id)
8078 {
8079 	return (ndi_busop_add_eventcall(dip, dip, event, handler, arg, id));
8080 }
8081 
8082 
8083 /*
8084  * Return a handle for event "name" by calling up the device tree
8085  * hierarchy via  (*bus_get_eventcookie)() interface until claimed
8086  * by a bus nexus or top of dev_info tree is reached.
8087  */
8088 int
8089 ddi_get_eventcookie(dev_info_t *dip, char *name,
8090     ddi_eventcookie_t *event_cookiep)
8091 {
8092 	return (ndi_busop_get_eventcookie(dip, dip,
8093 	    name, event_cookiep));
8094 }
8095 
8096 /*
8097  * This procedure is provided as the general callback function when
8098  * umem_lockmemory calls as_add_callback for long term memory locking.
8099  * When as_unmap, as_setprot, or as_free encounter segments which have
8100  * locked memory, this callback will be invoked.
8101  */
8102 void
8103 umem_lock_undo(struct as *as, void *arg, uint_t event)
8104 {
8105 	_NOTE(ARGUNUSED(as, event))
8106 	struct ddi_umem_cookie *cp = (struct ddi_umem_cookie *)arg;
8107 
8108 	/*
8109 	 * Call the cleanup function.  Decrement the cookie reference
8110 	 * count, if it goes to zero, return the memory for the cookie.
8111 	 * The i_ddi_umem_unlock for this cookie may or may not have been
8112 	 * called already.  It is the responsibility of the caller of
8113 	 * umem_lockmemory to handle the case of the cleanup routine
8114 	 * being called after a ddi_umem_unlock for the cookie
8115 	 * was called.
8116 	 */
8117 
8118 	(*cp->callbacks.cbo_umem_lock_cleanup)((ddi_umem_cookie_t)cp);
8119 
8120 	/* remove the cookie if reference goes to zero */
8121 	if (atomic_dec_ulong_nv((ulong_t *)(&(cp->cook_refcnt))) == 0) {
8122 		kmem_free(cp, sizeof (struct ddi_umem_cookie));
8123 	}
8124 }
8125 
8126 /*
8127  * The following two Consolidation Private routines provide generic
8128  * interfaces to increase/decrease the amount of device-locked memory.
8129  *
8130  * To keep project_rele and project_hold consistent, i_ddi_decr_locked_memory()
8131  * must be called every time i_ddi_incr_locked_memory() is called.
8132  */
8133 int
8134 /* ARGSUSED */
8135 i_ddi_incr_locked_memory(proc_t *procp, rctl_qty_t inc)
8136 {
8137 	ASSERT(procp != NULL);
8138 	mutex_enter(&procp->p_lock);
8139 	if (rctl_incr_locked_mem(procp, NULL, inc, 1)) {
8140 		mutex_exit(&procp->p_lock);
8141 		return (ENOMEM);
8142 	}
8143 	mutex_exit(&procp->p_lock);
8144 	return (0);
8145 }
8146 
8147 /*
8148  * To keep project_rele and project_hold consistent, i_ddi_incr_locked_memory()
8149  * must be called every time i_ddi_decr_locked_memory() is called.
8150  */
8151 /* ARGSUSED */
8152 void
8153 i_ddi_decr_locked_memory(proc_t *procp, rctl_qty_t dec)
8154 {
8155 	ASSERT(procp != NULL);
8156 	mutex_enter(&procp->p_lock);
8157 	rctl_decr_locked_mem(procp, NULL, dec, 1);
8158 	mutex_exit(&procp->p_lock);
8159 }
8160 
8161 /*
8162  * The cookie->upd_max_lock_rctl flag is used to determine if we should
8163  * charge device locked memory to the max-locked-memory rctl.  Tracking
8164  * device locked memory causes the rctl locks to get hot under high-speed
8165  * I/O such as RDSv3 over IB.  If there is no max-locked-memory rctl limit,
8166  * we bypass charging the locked memory to the rctl altogether.  The cookie's
8167  * flag tells us if the rctl value should be updated when unlocking the memory,
8168  * in case the rctl gets changed after the memory was locked.  Any device
8169  * locked memory in that rare case will not be counted toward the rctl limit.
8170  *
8171  * When tracking the locked memory, the kproject_t parameter is always NULL
8172  * in the code paths:
8173  *	i_ddi_incr_locked_memory -> rctl_incr_locked_mem
8174  *	i_ddi_decr_locked_memory -> rctl_decr_locked_mem
8175  * Thus, we always use the tk_proj member to check the projp setting.
8176  */
8177 static void
8178 init_lockedmem_rctl_flag(struct ddi_umem_cookie *cookie)
8179 {
8180 	proc_t		*p;
8181 	kproject_t	*projp;
8182 	zone_t		*zonep;
8183 
8184 	ASSERT(cookie);
8185 	p = cookie->procp;
8186 	ASSERT(p);
8187 
8188 	zonep = p->p_zone;
8189 	projp = p->p_task->tk_proj;
8190 
8191 	ASSERT(zonep);
8192 	ASSERT(projp);
8193 
8194 	if (zonep->zone_locked_mem_ctl == UINT64_MAX &&
8195 	    projp->kpj_data.kpd_locked_mem_ctl == UINT64_MAX)
8196 		cookie->upd_max_lock_rctl = 0;
8197 	else
8198 		cookie->upd_max_lock_rctl = 1;
8199 }
8200 
8201 /*
8202  * This routine checks if the max-locked-memory resource ctl is
8203  * exceeded, if not increments it, grabs a hold on the project.
8204  * Returns 0 if successful otherwise returns error code
8205  */
8206 static int
8207 umem_incr_devlockmem(struct ddi_umem_cookie *cookie)
8208 {
8209 	proc_t		*procp;
8210 	int		ret;
8211 
8212 	ASSERT(cookie);
8213 	if (cookie->upd_max_lock_rctl == 0)
8214 		return (0);
8215 
8216 	procp = cookie->procp;
8217 	ASSERT(procp);
8218 
8219 	if ((ret = i_ddi_incr_locked_memory(procp,
8220 	    cookie->size)) != 0) {
8221 		return (ret);
8222 	}
8223 	return (0);
8224 }
8225 
8226 /*
8227  * Decrements the max-locked-memory resource ctl and releases
8228  * the hold on the project that was acquired during umem_incr_devlockmem
8229  */
8230 static void
8231 umem_decr_devlockmem(struct ddi_umem_cookie *cookie)
8232 {
8233 	proc_t		*proc;
8234 
8235 	if (cookie->upd_max_lock_rctl == 0)
8236 		return;
8237 
8238 	proc = (proc_t *)cookie->procp;
8239 	if (!proc)
8240 		return;
8241 
8242 	i_ddi_decr_locked_memory(proc, cookie->size);
8243 }
8244 
8245 /*
8246  * A consolidation private function which is essentially equivalent to
8247  * ddi_umem_lock but with the addition of arguments ops_vector and procp.
8248  * A call to as_add_callback is done if DDI_UMEMLOCK_LONGTERM is set, and
8249  * the ops_vector is valid.
8250  *
8251  * Lock the virtual address range in the current process and create a
8252  * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to
8253  * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export
8254  * to user space.
8255  *
8256  * Note: The resource control accounting currently uses a full charge model
8257  * in other words attempts to lock the same/overlapping areas of memory
8258  * will deduct the full size of the buffer from the projects running
8259  * counter for the device locked memory.
8260  *
8261  * addr, size should be PAGESIZE aligned
8262  *
8263  * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both
8264  *	identifies whether the locked memory will be read or written or both
8265  *      DDI_UMEMLOCK_LONGTERM  must be set when the locking will
8266  * be maintained for an indefinitely long period (essentially permanent),
8267  * rather than for what would be required for a typical I/O completion.
8268  * When DDI_UMEMLOCK_LONGTERM is set, umem_lockmemory will return EFAULT
8269  * if the memory pertains to a regular file which is mapped MAP_SHARED.
8270  * This is to prevent a deadlock if a file truncation is attempted after
8271  * after the locking is done.
8272  *
8273  * Returns 0 on success
8274  *	EINVAL - for invalid parameters
8275  *	EPERM, ENOMEM and other error codes returned by as_pagelock
8276  *	ENOMEM - is returned if the current request to lock memory exceeds
8277  *		*.max-locked-memory resource control value.
8278  *      EFAULT - memory pertains to a regular file mapped shared and
8279  *		and DDI_UMEMLOCK_LONGTERM flag is set
8280  *	EAGAIN - could not start the ddi_umem_unlock list processing thread
8281  */
8282 int
8283 umem_lockmemory(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie,
8284     struct umem_callback_ops *ops_vector,
8285     proc_t *procp)
8286 {
8287 	int	error;
8288 	struct ddi_umem_cookie *p;
8289 	void	(*driver_callback)() = NULL;
8290 	struct as *as;
8291 	struct seg		*seg;
8292 	vnode_t			*vp;
8293 
8294 	/* Allow device drivers to not have to reference "curproc" */
8295 	if (procp == NULL)
8296 		procp = curproc;
8297 	as = procp->p_as;
8298 	*cookie = NULL;		/* in case of any error return */
8299 
8300 	/* These are the only three valid flags */
8301 	if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE |
8302 	    DDI_UMEMLOCK_LONGTERM)) != 0)
8303 		return (EINVAL);
8304 
8305 	/* At least one (can be both) of the two access flags must be set */
8306 	if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0)
8307 		return (EINVAL);
8308 
8309 	/* addr and len must be page-aligned */
8310 	if (((uintptr_t)addr & PAGEOFFSET) != 0)
8311 		return (EINVAL);
8312 
8313 	if ((len & PAGEOFFSET) != 0)
8314 		return (EINVAL);
8315 
8316 	/*
8317 	 * For longterm locking a driver callback must be specified; if
8318 	 * not longterm then a callback is optional.
8319 	 */
8320 	if (ops_vector != NULL) {
8321 		if (ops_vector->cbo_umem_callback_version !=
8322 		    UMEM_CALLBACK_VERSION)
8323 			return (EINVAL);
8324 		else
8325 			driver_callback = ops_vector->cbo_umem_lock_cleanup;
8326 	}
8327 	if ((driver_callback == NULL) && (flags & DDI_UMEMLOCK_LONGTERM))
8328 		return (EINVAL);
8329 
8330 	/*
8331 	 * Call i_ddi_umem_unlock_thread_start if necessary.  It will
8332 	 * be called on first ddi_umem_lock or umem_lockmemory call.
8333 	 */
8334 	if (ddi_umem_unlock_thread == NULL)
8335 		i_ddi_umem_unlock_thread_start();
8336 
8337 	/* Allocate memory for the cookie */
8338 	p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP);
8339 
8340 	/* Convert the flags to seg_rw type */
8341 	if (flags & DDI_UMEMLOCK_WRITE) {
8342 		p->s_flags = S_WRITE;
8343 	} else {
8344 		p->s_flags = S_READ;
8345 	}
8346 
8347 	/* Store procp in cookie for later iosetup/unlock */
8348 	p->procp = (void *)procp;
8349 
8350 	/*
8351 	 * Store the struct as pointer in cookie for later use by
8352 	 * ddi_umem_unlock.  The proc->p_as will be stale if ddi_umem_unlock
8353 	 * is called after relvm is called.
8354 	 */
8355 	p->asp = as;
8356 
8357 	/*
8358 	 * The size field is needed for lockmem accounting.
8359 	 */
8360 	p->size = len;
8361 	init_lockedmem_rctl_flag(p);
8362 
8363 	if (umem_incr_devlockmem(p) != 0) {
8364 		/*
8365 		 * The requested memory cannot be locked
8366 		 */
8367 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8368 		*cookie = (ddi_umem_cookie_t)NULL;
8369 		return (ENOMEM);
8370 	}
8371 
8372 	/* Lock the pages corresponding to addr, len in memory */
8373 	error = as_pagelock(as, &(p->pparray), addr, len, p->s_flags);
8374 	if (error != 0) {
8375 		umem_decr_devlockmem(p);
8376 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8377 		*cookie = (ddi_umem_cookie_t)NULL;
8378 		return (error);
8379 	}
8380 
8381 	/*
8382 	 * For longterm locking the addr must pertain to a seg_vn segment or
8383 	 * or a seg_spt segment.
8384 	 * If the segment pertains to a regular file, it cannot be
8385 	 * mapped MAP_SHARED.
8386 	 * This is to prevent a deadlock if a file truncation is attempted
8387 	 * after the locking is done.
8388 	 * Doing this after as_pagelock guarantees persistence of the as; if
8389 	 * an unacceptable segment is found, the cleanup includes calling
8390 	 * as_pageunlock before returning EFAULT.
8391 	 *
8392 	 * segdev is allowed here as it is already locked.  This allows
8393 	 * for memory exported by drivers through mmap() (which is already
8394 	 * locked) to be allowed for LONGTERM.
8395 	 */
8396 	if (flags & DDI_UMEMLOCK_LONGTERM) {
8397 		extern  struct seg_ops segspt_shmops;
8398 		extern	struct seg_ops segdev_ops;
8399 		AS_LOCK_ENTER(as, RW_READER);
8400 		for (seg = as_segat(as, addr); ; seg = AS_SEGNEXT(as, seg)) {
8401 			if (seg == NULL || seg->s_base > addr + len)
8402 				break;
8403 			if (seg->s_ops == &segdev_ops)
8404 				continue;
8405 			if (((seg->s_ops != &segvn_ops) &&
8406 			    (seg->s_ops != &segspt_shmops)) ||
8407 			    ((SEGOP_GETVP(seg, addr, &vp) == 0 &&
8408 			    vp != NULL && vp->v_type == VREG) &&
8409 			    (SEGOP_GETTYPE(seg, addr) & MAP_SHARED))) {
8410 				as_pageunlock(as, p->pparray,
8411 				    addr, len, p->s_flags);
8412 				AS_LOCK_EXIT(as);
8413 				umem_decr_devlockmem(p);
8414 				kmem_free(p, sizeof (struct ddi_umem_cookie));
8415 				*cookie = (ddi_umem_cookie_t)NULL;
8416 				return (EFAULT);
8417 			}
8418 		}
8419 		AS_LOCK_EXIT(as);
8420 	}
8421 
8422 
8423 	/* Initialize the fields in the ddi_umem_cookie */
8424 	p->cvaddr = addr;
8425 	p->type = UMEM_LOCKED;
8426 	if (driver_callback != NULL) {
8427 		/* i_ddi_umem_unlock and umem_lock_undo may need the cookie */
8428 		p->cook_refcnt = 2;
8429 		p->callbacks = *ops_vector;
8430 	} else {
8431 		/* only i_ddi_umme_unlock needs the cookie */
8432 		p->cook_refcnt = 1;
8433 	}
8434 
8435 	*cookie = (ddi_umem_cookie_t)p;
8436 
8437 	/*
8438 	 * If a driver callback was specified, add an entry to the
8439 	 * as struct callback list. The as_pagelock above guarantees
8440 	 * the persistence of as.
8441 	 */
8442 	if (driver_callback) {
8443 		error = as_add_callback(as, umem_lock_undo, p, AS_ALL_EVENT,
8444 		    addr, len, KM_SLEEP);
8445 		if (error != 0) {
8446 			as_pageunlock(as, p->pparray,
8447 			    addr, len, p->s_flags);
8448 			umem_decr_devlockmem(p);
8449 			kmem_free(p, sizeof (struct ddi_umem_cookie));
8450 			*cookie = (ddi_umem_cookie_t)NULL;
8451 		}
8452 	}
8453 	return (error);
8454 }
8455 
8456 /*
8457  * Unlock the pages locked by ddi_umem_lock or umem_lockmemory and free
8458  * the cookie.  Called from i_ddi_umem_unlock_thread.
8459  */
8460 
8461 static void
8462 i_ddi_umem_unlock(struct ddi_umem_cookie *p)
8463 {
8464 	uint_t	rc;
8465 
8466 	/*
8467 	 * There is no way to determine whether a callback to
8468 	 * umem_lock_undo was registered via as_add_callback.
8469 	 * (i.e. umem_lockmemory was called with DDI_MEMLOCK_LONGTERM and
8470 	 * a valid callback function structure.)  as_delete_callback
8471 	 * is called to delete a possible registered callback.  If the
8472 	 * return from as_delete_callbacks is AS_CALLBACK_DELETED, it
8473 	 * indicates that there was a callback registered, and that is was
8474 	 * successfully deleted.  Thus, the cookie reference count
8475 	 * will never be decremented by umem_lock_undo.  Just return the
8476 	 * memory for the cookie, since both users of the cookie are done.
8477 	 * A return of AS_CALLBACK_NOTFOUND indicates a callback was
8478 	 * never registered.  A return of AS_CALLBACK_DELETE_DEFERRED
8479 	 * indicates that callback processing is taking place and, and
8480 	 * umem_lock_undo is, or will be, executing, and thus decrementing
8481 	 * the cookie reference count when it is complete.
8482 	 *
8483 	 * This needs to be done before as_pageunlock so that the
8484 	 * persistence of as is guaranteed because of the locked pages.
8485 	 *
8486 	 */
8487 	rc = as_delete_callback(p->asp, p);
8488 
8489 
8490 	/*
8491 	 * The proc->p_as will be stale if i_ddi_umem_unlock is called
8492 	 * after relvm is called so use p->asp.
8493 	 */
8494 	as_pageunlock(p->asp, p->pparray, p->cvaddr, p->size, p->s_flags);
8495 
8496 	/*
8497 	 * Now that we have unlocked the memory decrement the
8498 	 * *.max-locked-memory rctl
8499 	 */
8500 	umem_decr_devlockmem(p);
8501 
8502 	if (rc == AS_CALLBACK_DELETED) {
8503 		/* umem_lock_undo will not happen, return the cookie memory */
8504 		ASSERT(p->cook_refcnt == 2);
8505 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8506 	} else {
8507 		/*
8508 		 * umem_undo_lock may happen if as_delete_callback returned
8509 		 * AS_CALLBACK_DELETE_DEFERRED.  In that case, decrement the
8510 		 * reference count, atomically, and return the cookie
8511 		 * memory if the reference count goes to zero.  The only
8512 		 * other value for rc is AS_CALLBACK_NOTFOUND.  In that
8513 		 * case, just return the cookie memory.
8514 		 */
8515 		if ((rc != AS_CALLBACK_DELETE_DEFERRED) ||
8516 		    (atomic_dec_ulong_nv((ulong_t *)(&(p->cook_refcnt)))
8517 		    == 0)) {
8518 			kmem_free(p, sizeof (struct ddi_umem_cookie));
8519 		}
8520 	}
8521 }
8522 
8523 /*
8524  * i_ddi_umem_unlock_thread - deferred ddi_umem_unlock list handler.
8525  *
8526  * Call i_ddi_umem_unlock for entries in the ddi_umem_unlock list
8527  * until it is empty.  Then, wait for more to be added.  This thread is awoken
8528  * via calls to ddi_umem_unlock.
8529  */
8530 
8531 static void
8532 i_ddi_umem_unlock_thread(void)
8533 {
8534 	struct ddi_umem_cookie	*ret_cookie;
8535 	callb_cpr_t	cprinfo;
8536 
8537 	/* process the ddi_umem_unlock list */
8538 	CALLB_CPR_INIT(&cprinfo, &ddi_umem_unlock_mutex,
8539 	    callb_generic_cpr, "unlock_thread");
8540 	for (;;) {
8541 		mutex_enter(&ddi_umem_unlock_mutex);
8542 		if (ddi_umem_unlock_head != NULL) {	/* list not empty */
8543 			ret_cookie = ddi_umem_unlock_head;
8544 			/* take if off the list */
8545 			if ((ddi_umem_unlock_head =
8546 			    ddi_umem_unlock_head->unl_forw) == NULL) {
8547 				ddi_umem_unlock_tail = NULL;
8548 			}
8549 			mutex_exit(&ddi_umem_unlock_mutex);
8550 			/* unlock the pages in this cookie */
8551 			(void) i_ddi_umem_unlock(ret_cookie);
8552 		} else {   /* list is empty, wait for next ddi_umem_unlock */
8553 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
8554 			cv_wait(&ddi_umem_unlock_cv, &ddi_umem_unlock_mutex);
8555 			CALLB_CPR_SAFE_END(&cprinfo, &ddi_umem_unlock_mutex);
8556 			mutex_exit(&ddi_umem_unlock_mutex);
8557 		}
8558 	}
8559 	/* ddi_umem_unlock_thread does not exit */
8560 	/* NOTREACHED */
8561 }
8562 
8563 /*
8564  * Start the thread that will process the ddi_umem_unlock list if it is
8565  * not already started (i_ddi_umem_unlock_thread).
8566  */
8567 static void
8568 i_ddi_umem_unlock_thread_start(void)
8569 {
8570 	mutex_enter(&ddi_umem_unlock_mutex);
8571 	if (ddi_umem_unlock_thread == NULL) {
8572 		ddi_umem_unlock_thread = thread_create(NULL, 0,
8573 		    i_ddi_umem_unlock_thread, NULL, 0, &p0,
8574 		    TS_RUN, minclsyspri);
8575 	}
8576 	mutex_exit(&ddi_umem_unlock_mutex);
8577 }
8578 
8579 /*
8580  * Lock the virtual address range in the current process and create a
8581  * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to
8582  * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export
8583  * to user space.
8584  *
8585  * Note: The resource control accounting currently uses a full charge model
8586  * in other words attempts to lock the same/overlapping areas of memory
8587  * will deduct the full size of the buffer from the projects running
8588  * counter for the device locked memory. This applies to umem_lockmemory too.
8589  *
8590  * addr, size should be PAGESIZE aligned
8591  * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both
8592  *	identifies whether the locked memory will be read or written or both
8593  *
8594  * Returns 0 on success
8595  *	EINVAL - for invalid parameters
8596  *	EPERM, ENOMEM and other error codes returned by as_pagelock
8597  *	ENOMEM - is returned if the current request to lock memory exceeds
8598  *		*.max-locked-memory resource control value.
8599  *	EAGAIN - could not start the ddi_umem_unlock list processing thread
8600  */
8601 int
8602 ddi_umem_lock(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie)
8603 {
8604 	int	error;
8605 	struct ddi_umem_cookie *p;
8606 
8607 	*cookie = NULL;		/* in case of any error return */
8608 
8609 	/* These are the only two valid flags */
8610 	if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) != 0) {
8611 		return (EINVAL);
8612 	}
8613 
8614 	/* At least one of the two flags (or both) must be set */
8615 	if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0) {
8616 		return (EINVAL);
8617 	}
8618 
8619 	/* addr and len must be page-aligned */
8620 	if (((uintptr_t)addr & PAGEOFFSET) != 0) {
8621 		return (EINVAL);
8622 	}
8623 
8624 	if ((len & PAGEOFFSET) != 0) {
8625 		return (EINVAL);
8626 	}
8627 
8628 	/*
8629 	 * Call i_ddi_umem_unlock_thread_start if necessary.  It will
8630 	 * be called on first ddi_umem_lock or umem_lockmemory call.
8631 	 */
8632 	if (ddi_umem_unlock_thread == NULL)
8633 		i_ddi_umem_unlock_thread_start();
8634 
8635 	/* Allocate memory for the cookie */
8636 	p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP);
8637 
8638 	/* Convert the flags to seg_rw type */
8639 	if (flags & DDI_UMEMLOCK_WRITE) {
8640 		p->s_flags = S_WRITE;
8641 	} else {
8642 		p->s_flags = S_READ;
8643 	}
8644 
8645 	/* Store curproc in cookie for later iosetup/unlock */
8646 	p->procp = (void *)curproc;
8647 
8648 	/*
8649 	 * Store the struct as pointer in cookie for later use by
8650 	 * ddi_umem_unlock.  The proc->p_as will be stale if ddi_umem_unlock
8651 	 * is called after relvm is called.
8652 	 */
8653 	p->asp = curproc->p_as;
8654 	/*
8655 	 * The size field is needed for lockmem accounting.
8656 	 */
8657 	p->size = len;
8658 	init_lockedmem_rctl_flag(p);
8659 
8660 	if (umem_incr_devlockmem(p) != 0) {
8661 		/*
8662 		 * The requested memory cannot be locked
8663 		 */
8664 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8665 		*cookie = (ddi_umem_cookie_t)NULL;
8666 		return (ENOMEM);
8667 	}
8668 
8669 	/* Lock the pages corresponding to addr, len in memory */
8670 	error = as_pagelock(((proc_t *)p->procp)->p_as, &(p->pparray),
8671 	    addr, len, p->s_flags);
8672 	if (error != 0) {
8673 		umem_decr_devlockmem(p);
8674 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8675 		*cookie = (ddi_umem_cookie_t)NULL;
8676 		return (error);
8677 	}
8678 
8679 	/* Initialize the fields in the ddi_umem_cookie */
8680 	p->cvaddr = addr;
8681 	p->type = UMEM_LOCKED;
8682 	p->cook_refcnt = 1;
8683 
8684 	*cookie = (ddi_umem_cookie_t)p;
8685 	return (error);
8686 }
8687 
8688 /*
8689  * Add the cookie to the ddi_umem_unlock list.  Pages will be
8690  * unlocked by i_ddi_umem_unlock_thread.
8691  */
8692 
8693 void
8694 ddi_umem_unlock(ddi_umem_cookie_t cookie)
8695 {
8696 	struct ddi_umem_cookie	*p = (struct ddi_umem_cookie *)cookie;
8697 
8698 	ASSERT(p->type == UMEM_LOCKED);
8699 	ASSERT(CPU_ON_INTR(CPU) == 0); /* cannot be high level */
8700 	ASSERT(ddi_umem_unlock_thread != NULL);
8701 
8702 	p->unl_forw = (struct ddi_umem_cookie *)NULL;	/* end of list */
8703 	/*
8704 	 * Queue the unlock request and notify i_ddi_umem_unlock thread
8705 	 * if it's called in the interrupt context. Otherwise, unlock pages
8706 	 * immediately.
8707 	 */
8708 	if (servicing_interrupt()) {
8709 		/* queue the unlock request and notify the thread */
8710 		mutex_enter(&ddi_umem_unlock_mutex);
8711 		if (ddi_umem_unlock_head == NULL) {
8712 			ddi_umem_unlock_head = ddi_umem_unlock_tail = p;
8713 			cv_broadcast(&ddi_umem_unlock_cv);
8714 		} else {
8715 			ddi_umem_unlock_tail->unl_forw = p;
8716 			ddi_umem_unlock_tail = p;
8717 		}
8718 		mutex_exit(&ddi_umem_unlock_mutex);
8719 	} else {
8720 		/* unlock the pages right away */
8721 		(void) i_ddi_umem_unlock(p);
8722 	}
8723 }
8724 
8725 /*
8726  * Create a buf structure from a ddi_umem_cookie
8727  * cookie - is a ddi_umem_cookie for from ddi_umem_lock and ddi_umem_alloc
8728  *		(only UMEM_LOCKED & KMEM_NON_PAGEABLE types supported)
8729  * off, len - identifies the portion of the memory represented by the cookie
8730  *		that the buf points to.
8731  *	NOTE: off, len need to follow the alignment/size restrictions of the
8732  *		device (dev) that this buf will be passed to. Some devices
8733  *		will accept unrestricted alignment/size, whereas others (such as
8734  *		st) require some block-size alignment/size. It is the caller's
8735  *		responsibility to ensure that the alignment/size restrictions
8736  *		are met (we cannot assert as we do not know the restrictions)
8737  *
8738  * direction - is one of B_READ or B_WRITE and needs to be compatible with
8739  *		the flags used in ddi_umem_lock
8740  *
8741  * The following three arguments are used to initialize fields in the
8742  * buf structure and are uninterpreted by this routine.
8743  *
8744  * dev
8745  * blkno
8746  * iodone
8747  *
8748  * sleepflag - is one of DDI_UMEM_SLEEP or DDI_UMEM_NOSLEEP
8749  *
8750  * Returns a buf structure pointer on success (to be freed by freerbuf)
8751  *	NULL on any parameter error or memory alloc failure
8752  *
8753  */
8754 struct buf *
8755 ddi_umem_iosetup(ddi_umem_cookie_t cookie, off_t off, size_t len,
8756     int direction, dev_t dev, daddr_t blkno,
8757     int (*iodone)(struct buf *), int sleepflag)
8758 {
8759 	struct ddi_umem_cookie *p = (struct ddi_umem_cookie *)cookie;
8760 	struct buf *bp;
8761 
8762 	/*
8763 	 * check for valid cookie offset, len
8764 	 */
8765 	if ((off + len) > p->size) {
8766 		return (NULL);
8767 	}
8768 
8769 	if (len > p->size) {
8770 		return (NULL);
8771 	}
8772 
8773 	/* direction has to be one of B_READ or B_WRITE */
8774 	if ((direction != B_READ) && (direction != B_WRITE)) {
8775 		return (NULL);
8776 	}
8777 
8778 	/* These are the only two valid sleepflags */
8779 	if ((sleepflag != DDI_UMEM_SLEEP) && (sleepflag != DDI_UMEM_NOSLEEP)) {
8780 		return (NULL);
8781 	}
8782 
8783 	/*
8784 	 * Only cookies of type UMEM_LOCKED and KMEM_NON_PAGEABLE are supported
8785 	 */
8786 	if ((p->type != UMEM_LOCKED) && (p->type != KMEM_NON_PAGEABLE)) {
8787 		return (NULL);
8788 	}
8789 
8790 	/* If type is KMEM_NON_PAGEABLE procp is NULL */
8791 	ASSERT((p->type == KMEM_NON_PAGEABLE) ?
8792 	    (p->procp == NULL) : (p->procp != NULL));
8793 
8794 	bp = kmem_alloc(sizeof (struct buf), sleepflag);
8795 	if (bp == NULL) {
8796 		return (NULL);
8797 	}
8798 	bioinit(bp);
8799 
8800 	bp->b_flags = B_BUSY | B_PHYS | direction;
8801 	bp->b_edev = dev;
8802 	bp->b_lblkno = blkno;
8803 	bp->b_iodone = iodone;
8804 	bp->b_bcount = len;
8805 	bp->b_proc = (proc_t *)p->procp;
8806 	ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0);
8807 	bp->b_un.b_addr = (caddr_t)((uintptr_t)(p->cvaddr) + off);
8808 	if (p->pparray != NULL) {
8809 		bp->b_flags |= B_SHADOW;
8810 		ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0);
8811 		bp->b_shadow = p->pparray + btop(off);
8812 	}
8813 	return (bp);
8814 }
8815 
8816 /*
8817  * Fault-handling and related routines
8818  */
8819 
8820 ddi_devstate_t
8821 ddi_get_devstate(dev_info_t *dip)
8822 {
8823 	if (DEVI_IS_DEVICE_OFFLINE(dip))
8824 		return (DDI_DEVSTATE_OFFLINE);
8825 	else if (DEVI_IS_DEVICE_DOWN(dip) || DEVI_IS_BUS_DOWN(dip))
8826 		return (DDI_DEVSTATE_DOWN);
8827 	else if (DEVI_IS_BUS_QUIESCED(dip))
8828 		return (DDI_DEVSTATE_QUIESCED);
8829 	else if (DEVI_IS_DEVICE_DEGRADED(dip))
8830 		return (DDI_DEVSTATE_DEGRADED);
8831 	else
8832 		return (DDI_DEVSTATE_UP);
8833 }
8834 
8835 void
8836 ddi_dev_report_fault(dev_info_t *dip, ddi_fault_impact_t impact,
8837     ddi_fault_location_t location, const char *message)
8838 {
8839 	struct ddi_fault_event_data fd;
8840 	ddi_eventcookie_t ec;
8841 
8842 	/*
8843 	 * Assemble all the information into a fault-event-data structure
8844 	 */
8845 	fd.f_dip = dip;
8846 	fd.f_impact = impact;
8847 	fd.f_location = location;
8848 	fd.f_message = message;
8849 	fd.f_oldstate = ddi_get_devstate(dip);
8850 
8851 	/*
8852 	 * Get eventcookie from defining parent.
8853 	 */
8854 	if (ddi_get_eventcookie(dip, DDI_DEVI_FAULT_EVENT, &ec) !=
8855 	    DDI_SUCCESS)
8856 		return;
8857 
8858 	(void) ndi_post_event(dip, dip, ec, &fd);
8859 }
8860 
8861 char *
8862 i_ddi_devi_class(dev_info_t *dip)
8863 {
8864 	return (DEVI(dip)->devi_device_class);
8865 }
8866 
8867 int
8868 i_ddi_set_devi_class(dev_info_t *dip, const char *devi_class, int flag)
8869 {
8870 	struct dev_info *devi = DEVI(dip);
8871 
8872 	mutex_enter(&devi->devi_lock);
8873 
8874 	if (devi->devi_device_class)
8875 		kmem_free(devi->devi_device_class,
8876 		    strlen(devi->devi_device_class) + 1);
8877 
8878 	if ((devi->devi_device_class = i_ddi_strdup(devi_class, flag))
8879 	    != NULL) {
8880 		mutex_exit(&devi->devi_lock);
8881 		return (DDI_SUCCESS);
8882 	}
8883 
8884 	mutex_exit(&devi->devi_lock);
8885 
8886 	return (DDI_FAILURE);
8887 }
8888 
8889 
8890 /*
8891  * Task Queues DDI interfaces.
8892  */
8893 
8894 /* ARGSUSED */
8895 ddi_taskq_t *
8896 ddi_taskq_create(dev_info_t *dip, const char *name, int nthreads,
8897     pri_t pri, uint_t cflags)
8898 {
8899 	char full_name[TASKQ_NAMELEN];
8900 	const char *tq_name;
8901 	int nodeid = 0;
8902 
8903 	if (dip == NULL)
8904 		tq_name = name;
8905 	else {
8906 		nodeid = ddi_get_instance(dip);
8907 
8908 		if (name == NULL)
8909 			name = "tq";
8910 
8911 		(void) snprintf(full_name, sizeof (full_name), "%s_%s",
8912 		    ddi_driver_name(dip), name);
8913 
8914 		tq_name = full_name;
8915 	}
8916 
8917 	return ((ddi_taskq_t *)taskq_create_instance(tq_name, nodeid, nthreads,
8918 	    pri == TASKQ_DEFAULTPRI ? minclsyspri : pri,
8919 	    nthreads, INT_MAX, TASKQ_PREPOPULATE));
8920 }
8921 
8922 void
8923 ddi_taskq_destroy(ddi_taskq_t *tq)
8924 {
8925 	taskq_destroy((taskq_t *)tq);
8926 }
8927 
8928 int
8929 ddi_taskq_dispatch(ddi_taskq_t *tq, void (* func)(void *),
8930     void *arg, uint_t dflags)
8931 {
8932 	taskqid_t id = taskq_dispatch((taskq_t *)tq, func, arg,
8933 	    dflags == DDI_SLEEP ? TQ_SLEEP : TQ_NOSLEEP);
8934 
8935 	return (id != TASKQID_INVALID ? DDI_SUCCESS : DDI_FAILURE);
8936 }
8937 
8938 void
8939 ddi_taskq_wait(ddi_taskq_t *tq)
8940 {
8941 	taskq_wait((taskq_t *)tq);
8942 }
8943 
8944 void
8945 ddi_taskq_suspend(ddi_taskq_t *tq)
8946 {
8947 	taskq_suspend((taskq_t *)tq);
8948 }
8949 
8950 boolean_t
8951 ddi_taskq_suspended(ddi_taskq_t *tq)
8952 {
8953 	return (taskq_suspended((taskq_t *)tq));
8954 }
8955 
8956 void
8957 ddi_taskq_resume(ddi_taskq_t *tq)
8958 {
8959 	taskq_resume((taskq_t *)tq);
8960 }
8961 
8962 int
8963 ddi_parse(
8964 	const char	*ifname,
8965 	char		*alnum,
8966 	uint_t		*nump)
8967 {
8968 	const char	*p;
8969 	int		l;
8970 	ulong_t		num;
8971 	boolean_t	nonum = B_TRUE;
8972 	char		c;
8973 
8974 	l = strlen(ifname);
8975 	for (p = ifname + l; p != ifname; l--) {
8976 		c = *--p;
8977 		if (!isdigit(c)) {
8978 			(void) strlcpy(alnum, ifname, l + 1);
8979 			if (ddi_strtoul(p + 1, NULL, 10, &num) != 0)
8980 				return (DDI_FAILURE);
8981 			break;
8982 		}
8983 		nonum = B_FALSE;
8984 	}
8985 	if (l == 0 || nonum)
8986 		return (DDI_FAILURE);
8987 
8988 	*nump = num;
8989 	return (DDI_SUCCESS);
8990 }
8991 
8992 /*
8993  * Default initialization function for drivers that don't need to quiesce.
8994  */
8995 /* ARGSUSED */
8996 int
8997 ddi_quiesce_not_needed(dev_info_t *dip)
8998 {
8999 	return (DDI_SUCCESS);
9000 }
9001 
9002 /*
9003  * Initialization function for drivers that should implement quiesce()
9004  * but haven't yet.
9005  */
9006 /* ARGSUSED */
9007 int
9008 ddi_quiesce_not_supported(dev_info_t *dip)
9009 {
9010 	return (DDI_FAILURE);
9011 }
9012 
9013 char *
9014 ddi_strdup(const char *str, int flag)
9015 {
9016 	int	n;
9017 	char	*ptr;
9018 
9019 	ASSERT(str != NULL);
9020 	ASSERT((flag == KM_SLEEP) || (flag == KM_NOSLEEP));
9021 
9022 	n = strlen(str);
9023 	if ((ptr = kmem_alloc(n + 1, flag)) == NULL)
9024 		return (NULL);
9025 	bcopy(str, ptr, n + 1);
9026 	return (ptr);
9027 }
9028 
9029 char *
9030 strdup(const char *str)
9031 {
9032 	return (ddi_strdup(str, KM_SLEEP));
9033 }
9034 
9035 void
9036 strfree(char *str)
9037 {
9038 	ASSERT(str != NULL);
9039 	kmem_free(str, strlen(str) + 1);
9040 }
9041 
9042 /*
9043  * Generic DDI callback interfaces.
9044  */
9045 
9046 int
9047 ddi_cb_register(dev_info_t *dip, ddi_cb_flags_t flags, ddi_cb_func_t cbfunc,
9048     void *arg1, void *arg2, ddi_cb_handle_t *ret_hdlp)
9049 {
9050 	ddi_cb_t	*cbp;
9051 
9052 	ASSERT(dip != NULL);
9053 	ASSERT(DDI_CB_FLAG_VALID(flags));
9054 	ASSERT(cbfunc != NULL);
9055 	ASSERT(ret_hdlp != NULL);
9056 
9057 	/* Sanity check the context */
9058 	ASSERT(!servicing_interrupt());
9059 	if (servicing_interrupt())
9060 		return (DDI_FAILURE);
9061 
9062 	/* Validate parameters */
9063 	if ((dip == NULL) || !DDI_CB_FLAG_VALID(flags) ||
9064 	    (cbfunc == NULL) || (ret_hdlp == NULL))
9065 		return (DDI_EINVAL);
9066 
9067 	/* Check for previous registration */
9068 	if (DEVI(dip)->devi_cb_p != NULL)
9069 		return (DDI_EALREADY);
9070 
9071 	/* Allocate and initialize callback */
9072 	cbp = kmem_zalloc(sizeof (ddi_cb_t), KM_SLEEP);
9073 	cbp->cb_dip = dip;
9074 	cbp->cb_func = cbfunc;
9075 	cbp->cb_arg1 = arg1;
9076 	cbp->cb_arg2 = arg2;
9077 	cbp->cb_flags = flags;
9078 	DEVI(dip)->devi_cb_p = cbp;
9079 
9080 	/* If adding an IRM callback, notify IRM */
9081 	if (flags & DDI_CB_FLAG_INTR)
9082 		i_ddi_irm_set_cb(dip, B_TRUE);
9083 
9084 	*ret_hdlp = (ddi_cb_handle_t)&(DEVI(dip)->devi_cb_p);
9085 	return (DDI_SUCCESS);
9086 }
9087 
9088 int
9089 ddi_cb_unregister(ddi_cb_handle_t hdl)
9090 {
9091 	ddi_cb_t	*cbp;
9092 	dev_info_t	*dip;
9093 
9094 	ASSERT(hdl != NULL);
9095 
9096 	/* Sanity check the context */
9097 	ASSERT(!servicing_interrupt());
9098 	if (servicing_interrupt())
9099 		return (DDI_FAILURE);
9100 
9101 	/* Validate parameters */
9102 	if ((hdl == NULL) || ((cbp = *(ddi_cb_t **)hdl) == NULL) ||
9103 	    ((dip = cbp->cb_dip) == NULL))
9104 		return (DDI_EINVAL);
9105 
9106 	/* If removing an IRM callback, notify IRM */
9107 	if (cbp->cb_flags & DDI_CB_FLAG_INTR)
9108 		i_ddi_irm_set_cb(dip, B_FALSE);
9109 
9110 	/* Destroy the callback */
9111 	kmem_free(cbp, sizeof (ddi_cb_t));
9112 	DEVI(dip)->devi_cb_p = NULL;
9113 
9114 	return (DDI_SUCCESS);
9115 }
9116 
9117 /*
9118  * Platform independent DR routines
9119  */
9120 
9121 static int
9122 ndi2errno(int n)
9123 {
9124 	int err = 0;
9125 
9126 	switch (n) {
9127 		case NDI_NOMEM:
9128 			err = ENOMEM;
9129 			break;
9130 		case NDI_BUSY:
9131 			err = EBUSY;
9132 			break;
9133 		case NDI_FAULT:
9134 			err = EFAULT;
9135 			break;
9136 		case NDI_FAILURE:
9137 			err = EIO;
9138 			break;
9139 		case NDI_SUCCESS:
9140 			break;
9141 		case NDI_BADHANDLE:
9142 		default:
9143 			err = EINVAL;
9144 			break;
9145 	}
9146 	return (err);
9147 }
9148 
9149 /*
9150  * Prom tree node list
9151  */
9152 struct ptnode {
9153 	pnode_t		nodeid;
9154 	struct ptnode	*next;
9155 };
9156 
9157 /*
9158  * Prom tree walk arg
9159  */
9160 struct pta {
9161 	dev_info_t	*pdip;
9162 	devi_branch_t	*bp;
9163 	uint_t		flags;
9164 	dev_info_t	*fdip;
9165 	struct ptnode	*head;
9166 };
9167 
9168 static void
9169 visit_node(pnode_t nodeid, struct pta *ap)
9170 {
9171 	struct ptnode	**nextp;
9172 	int		(*select)(pnode_t, void *, uint_t);
9173 
9174 	ASSERT(nodeid != OBP_NONODE && nodeid != OBP_BADNODE);
9175 
9176 	select = ap->bp->create.prom_branch_select;
9177 
9178 	ASSERT(select);
9179 
9180 	if (select(nodeid, ap->bp->arg, 0) == DDI_SUCCESS) {
9181 
9182 		for (nextp = &ap->head; *nextp; nextp = &(*nextp)->next)
9183 			;
9184 
9185 		*nextp = kmem_zalloc(sizeof (struct ptnode), KM_SLEEP);
9186 
9187 		(*nextp)->nodeid = nodeid;
9188 	}
9189 
9190 	if ((ap->flags & DEVI_BRANCH_CHILD) == DEVI_BRANCH_CHILD)
9191 		return;
9192 
9193 	nodeid = prom_childnode(nodeid);
9194 	while (nodeid != OBP_NONODE && nodeid != OBP_BADNODE) {
9195 		visit_node(nodeid, ap);
9196 		nodeid = prom_nextnode(nodeid);
9197 	}
9198 }
9199 
9200 /*
9201  * NOTE: The caller of this function must check for device contracts
9202  * or LDI callbacks against this dip before setting the dip offline.
9203  */
9204 static int
9205 set_infant_dip_offline(dev_info_t *dip, void *arg)
9206 {
9207 	char	*path = (char *)arg;
9208 
9209 	ASSERT(dip);
9210 	ASSERT(arg);
9211 
9212 	if (i_ddi_node_state(dip) >= DS_ATTACHED) {
9213 		(void) ddi_pathname(dip, path);
9214 		cmn_err(CE_WARN, "Attempt to set offline flag on attached "
9215 		    "node: %s", path);
9216 		return (DDI_FAILURE);
9217 	}
9218 
9219 	mutex_enter(&(DEVI(dip)->devi_lock));
9220 	if (!DEVI_IS_DEVICE_OFFLINE(dip))
9221 		DEVI_SET_DEVICE_OFFLINE(dip);
9222 	mutex_exit(&(DEVI(dip)->devi_lock));
9223 
9224 	return (DDI_SUCCESS);
9225 }
9226 
9227 typedef struct result {
9228 	char	*path;
9229 	int	result;
9230 } result_t;
9231 
9232 static int
9233 dip_set_offline(dev_info_t *dip, void *arg)
9234 {
9235 	int end;
9236 	result_t *resp = (result_t *)arg;
9237 
9238 	ASSERT(dip);
9239 	ASSERT(resp);
9240 
9241 	/*
9242 	 * We stop the walk if e_ddi_offline_notify() returns
9243 	 * failure, because this implies that one or more consumers
9244 	 * (either LDI or contract based) has blocked the offline.
9245 	 * So there is no point in conitnuing the walk
9246 	 */
9247 	if (e_ddi_offline_notify(dip) == DDI_FAILURE) {
9248 		resp->result = DDI_FAILURE;
9249 		return (DDI_WALK_TERMINATE);
9250 	}
9251 
9252 	/*
9253 	 * If set_infant_dip_offline() returns failure, it implies
9254 	 * that we failed to set a particular dip offline. This
9255 	 * does not imply that the offline as a whole should fail.
9256 	 * We want to do the best we can, so we continue the walk.
9257 	 */
9258 	if (set_infant_dip_offline(dip, resp->path) == DDI_SUCCESS)
9259 		end = DDI_SUCCESS;
9260 	else
9261 		end = DDI_FAILURE;
9262 
9263 	e_ddi_offline_finalize(dip, end);
9264 
9265 	return (DDI_WALK_CONTINUE);
9266 }
9267 
9268 /*
9269  * The call to e_ddi_offline_notify() exists for the
9270  * unlikely error case that a branch we are trying to
9271  * create already exists and has device contracts or LDI
9272  * event callbacks against it.
9273  *
9274  * We allow create to succeed for such branches only if
9275  * no constraints block the offline.
9276  */
9277 static int
9278 branch_set_offline(dev_info_t *dip, char *path)
9279 {
9280 	int		circ;
9281 	int		end;
9282 	result_t	res;
9283 
9284 
9285 	if (e_ddi_offline_notify(dip) == DDI_FAILURE) {
9286 		return (DDI_FAILURE);
9287 	}
9288 
9289 	if (set_infant_dip_offline(dip, path) == DDI_SUCCESS)
9290 		end = DDI_SUCCESS;
9291 	else
9292 		end = DDI_FAILURE;
9293 
9294 	e_ddi_offline_finalize(dip, end);
9295 
9296 	if (end == DDI_FAILURE)
9297 		return (DDI_FAILURE);
9298 
9299 	res.result = DDI_SUCCESS;
9300 	res.path = path;
9301 
9302 	ndi_devi_enter(dip, &circ);
9303 	ddi_walk_devs(ddi_get_child(dip), dip_set_offline, &res);
9304 	ndi_devi_exit(dip, circ);
9305 
9306 	return (res.result);
9307 }
9308 
9309 /*ARGSUSED*/
9310 static int
9311 create_prom_branch(void *arg, int has_changed)
9312 {
9313 	int		circ;
9314 	int		exists, rv;
9315 	pnode_t		nodeid;
9316 	struct ptnode	*tnp;
9317 	dev_info_t	*dip;
9318 	struct pta	*ap = arg;
9319 	devi_branch_t	*bp;
9320 	char		*path;
9321 
9322 	ASSERT(ap);
9323 	ASSERT(ap->fdip == NULL);
9324 	ASSERT(ap->pdip && ndi_dev_is_prom_node(ap->pdip));
9325 
9326 	bp = ap->bp;
9327 
9328 	nodeid = ddi_get_nodeid(ap->pdip);
9329 	if (nodeid == OBP_NONODE || nodeid == OBP_BADNODE) {
9330 		cmn_err(CE_WARN, "create_prom_branch: invalid "
9331 		    "nodeid: 0x%x", nodeid);
9332 		return (EINVAL);
9333 	}
9334 
9335 	ap->head = NULL;
9336 
9337 	nodeid = prom_childnode(nodeid);
9338 	while (nodeid != OBP_NONODE && nodeid != OBP_BADNODE) {
9339 		visit_node(nodeid, ap);
9340 		nodeid = prom_nextnode(nodeid);
9341 	}
9342 
9343 	if (ap->head == NULL)
9344 		return (ENODEV);
9345 
9346 	path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
9347 	rv = 0;
9348 	while ((tnp = ap->head) != NULL) {
9349 		ap->head = tnp->next;
9350 
9351 		ndi_devi_enter(ap->pdip, &circ);
9352 
9353 		/*
9354 		 * Check if the branch already exists.
9355 		 */
9356 		exists = 0;
9357 		dip = e_ddi_nodeid_to_dip(tnp->nodeid);
9358 		if (dip != NULL) {
9359 			exists = 1;
9360 
9361 			/* Parent is held busy, so release hold */
9362 			ndi_rele_devi(dip);
9363 #ifdef	DEBUG
9364 			cmn_err(CE_WARN, "create_prom_branch: dip(%p) exists"
9365 			    " for nodeid 0x%x", (void *)dip, tnp->nodeid);
9366 #endif
9367 		} else {
9368 			dip = i_ddi_create_branch(ap->pdip, tnp->nodeid);
9369 		}
9370 
9371 		kmem_free(tnp, sizeof (struct ptnode));
9372 
9373 		/*
9374 		 * Hold the branch if it is not already held
9375 		 */
9376 		if (dip && !exists) {
9377 			e_ddi_branch_hold(dip);
9378 		}
9379 
9380 		ASSERT(dip == NULL || e_ddi_branch_held(dip));
9381 
9382 		/*
9383 		 * Set all dips in the newly created branch offline so that
9384 		 * only a "configure" operation can attach
9385 		 * the branch
9386 		 */
9387 		if (dip == NULL || branch_set_offline(dip, path)
9388 		    == DDI_FAILURE) {
9389 			ndi_devi_exit(ap->pdip, circ);
9390 			rv = EIO;
9391 			continue;
9392 		}
9393 
9394 		ASSERT(ddi_get_parent(dip) == ap->pdip);
9395 
9396 		ndi_devi_exit(ap->pdip, circ);
9397 
9398 		if (ap->flags & DEVI_BRANCH_CONFIGURE) {
9399 			int error = e_ddi_branch_configure(dip, &ap->fdip, 0);
9400 			if (error && rv == 0)
9401 				rv = error;
9402 		}
9403 
9404 		/*
9405 		 * Invoke devi_branch_callback() (if it exists) only for
9406 		 * newly created branches
9407 		 */
9408 		if (bp->devi_branch_callback && !exists)
9409 			bp->devi_branch_callback(dip, bp->arg, 0);
9410 	}
9411 
9412 	kmem_free(path, MAXPATHLEN);
9413 
9414 	return (rv);
9415 }
9416 
9417 static int
9418 sid_node_create(dev_info_t *pdip, devi_branch_t *bp, dev_info_t **rdipp)
9419 {
9420 	int			rv, circ, len;
9421 	int			i, flags, ret;
9422 	dev_info_t		*dip;
9423 	char			*nbuf;
9424 	char			*path;
9425 	static const char	*noname = "<none>";
9426 
9427 	ASSERT(pdip);
9428 	ASSERT(DEVI_BUSY_OWNED(pdip));
9429 
9430 	flags = 0;
9431 
9432 	/*
9433 	 * Creating the root of a branch ?
9434 	 */
9435 	if (rdipp) {
9436 		*rdipp = NULL;
9437 		flags = DEVI_BRANCH_ROOT;
9438 	}
9439 
9440 	ndi_devi_alloc_sleep(pdip, (char *)noname, DEVI_SID_NODEID, &dip);
9441 	rv = bp->create.sid_branch_create(dip, bp->arg, flags);
9442 
9443 	nbuf = kmem_alloc(OBP_MAXDRVNAME, KM_SLEEP);
9444 
9445 	if (rv == DDI_WALK_ERROR) {
9446 		cmn_err(CE_WARN, "e_ddi_branch_create: Error setting"
9447 		    " properties on devinfo node %p",  (void *)dip);
9448 		goto fail;
9449 	}
9450 
9451 	len = OBP_MAXDRVNAME;
9452 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
9453 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "name", nbuf, &len)
9454 	    != DDI_PROP_SUCCESS) {
9455 		cmn_err(CE_WARN, "e_ddi_branch_create: devinfo node %p has"
9456 		    "no name property", (void *)dip);
9457 		goto fail;
9458 	}
9459 
9460 	ASSERT(i_ddi_node_state(dip) == DS_PROTO);
9461 	if (ndi_devi_set_nodename(dip, nbuf, 0) != NDI_SUCCESS) {
9462 		cmn_err(CE_WARN, "e_ddi_branch_create: cannot set name (%s)"
9463 		    " for devinfo node %p", nbuf, (void *)dip);
9464 		goto fail;
9465 	}
9466 
9467 	kmem_free(nbuf, OBP_MAXDRVNAME);
9468 
9469 	/*
9470 	 * Ignore bind failures just like boot does
9471 	 */
9472 	(void) ndi_devi_bind_driver(dip, 0);
9473 
9474 	switch (rv) {
9475 	case DDI_WALK_CONTINUE:
9476 	case DDI_WALK_PRUNESIB:
9477 		ndi_devi_enter(dip, &circ);
9478 
9479 		i = DDI_WALK_CONTINUE;
9480 		for (; i == DDI_WALK_CONTINUE; ) {
9481 			i = sid_node_create(dip, bp, NULL);
9482 		}
9483 
9484 		ASSERT(i == DDI_WALK_ERROR || i == DDI_WALK_PRUNESIB);
9485 		if (i == DDI_WALK_ERROR)
9486 			rv = i;
9487 		/*
9488 		 * If PRUNESIB stop creating siblings
9489 		 * of dip's child. Subsequent walk behavior
9490 		 * is determined by rv returned by dip.
9491 		 */
9492 
9493 		ndi_devi_exit(dip, circ);
9494 		break;
9495 	case DDI_WALK_TERMINATE:
9496 		/*
9497 		 * Don't create children and ask our parent
9498 		 * to not create siblings either.
9499 		 */
9500 		rv = DDI_WALK_PRUNESIB;
9501 		break;
9502 	case DDI_WALK_PRUNECHILD:
9503 		/*
9504 		 * Don't create children, but ask parent to continue
9505 		 * with siblings.
9506 		 */
9507 		rv = DDI_WALK_CONTINUE;
9508 		break;
9509 	default:
9510 		ASSERT(0);
9511 		break;
9512 	}
9513 
9514 	if (rdipp)
9515 		*rdipp = dip;
9516 
9517 	/*
9518 	 * Set device offline - only the "configure" op should cause an attach.
9519 	 * Note that it is safe to set the dip offline without checking
9520 	 * for either device contract or layered driver (LDI) based constraints
9521 	 * since there cannot be any contracts or LDI opens of this device.
9522 	 * This is because this node is a newly created dip with the parent busy
9523 	 * held, so no other thread can come in and attach this dip. A dip that
9524 	 * has never been attached cannot have contracts since by definition
9525 	 * a device contract (an agreement between a process and a device minor
9526 	 * node) can only be created against a device that has minor nodes
9527 	 * i.e is attached. Similarly an LDI open will only succeed if the
9528 	 * dip is attached. We assert below that the dip is not attached.
9529 	 */
9530 	ASSERT(i_ddi_node_state(dip) < DS_ATTACHED);
9531 	path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
9532 	ret = set_infant_dip_offline(dip, path);
9533 	ASSERT(ret == DDI_SUCCESS);
9534 	kmem_free(path, MAXPATHLEN);
9535 
9536 	return (rv);
9537 fail:
9538 	(void) ndi_devi_free(dip);
9539 	kmem_free(nbuf, OBP_MAXDRVNAME);
9540 	return (DDI_WALK_ERROR);
9541 }
9542 
9543 static int
9544 create_sid_branch(
9545 	dev_info_t	*pdip,
9546 	devi_branch_t	*bp,
9547 	dev_info_t	**dipp,
9548 	uint_t		flags)
9549 {
9550 	int		rv = 0, state = DDI_WALK_CONTINUE;
9551 	dev_info_t	*rdip;
9552 
9553 	while (state == DDI_WALK_CONTINUE) {
9554 		int	circ;
9555 
9556 		ndi_devi_enter(pdip, &circ);
9557 
9558 		state = sid_node_create(pdip, bp, &rdip);
9559 		if (rdip == NULL) {
9560 			ndi_devi_exit(pdip, circ);
9561 			ASSERT(state == DDI_WALK_ERROR);
9562 			break;
9563 		}
9564 
9565 		e_ddi_branch_hold(rdip);
9566 
9567 		ndi_devi_exit(pdip, circ);
9568 
9569 		if (flags & DEVI_BRANCH_CONFIGURE) {
9570 			int error = e_ddi_branch_configure(rdip, dipp, 0);
9571 			if (error && rv == 0)
9572 				rv = error;
9573 		}
9574 
9575 		/*
9576 		 * devi_branch_callback() is optional
9577 		 */
9578 		if (bp->devi_branch_callback)
9579 			bp->devi_branch_callback(rdip, bp->arg, 0);
9580 	}
9581 
9582 	ASSERT(state == DDI_WALK_ERROR || state == DDI_WALK_PRUNESIB);
9583 
9584 	return (state == DDI_WALK_ERROR ? EIO : rv);
9585 }
9586 
9587 int
9588 e_ddi_branch_create(
9589 	dev_info_t	*pdip,
9590 	devi_branch_t	*bp,
9591 	dev_info_t	**dipp,
9592 	uint_t		flags)
9593 {
9594 	int prom_devi, sid_devi, error;
9595 
9596 	if (pdip == NULL || bp == NULL || bp->type == 0)
9597 		return (EINVAL);
9598 
9599 	prom_devi = (bp->type == DEVI_BRANCH_PROM) ? 1 : 0;
9600 	sid_devi = (bp->type == DEVI_BRANCH_SID) ? 1 : 0;
9601 
9602 	if (prom_devi && bp->create.prom_branch_select == NULL)
9603 		return (EINVAL);
9604 	else if (sid_devi && bp->create.sid_branch_create == NULL)
9605 		return (EINVAL);
9606 	else if (!prom_devi && !sid_devi)
9607 		return (EINVAL);
9608 
9609 	if (flags & DEVI_BRANCH_EVENT)
9610 		return (EINVAL);
9611 
9612 	if (prom_devi) {
9613 		struct pta pta = {0};
9614 
9615 		pta.pdip = pdip;
9616 		pta.bp = bp;
9617 		pta.flags = flags;
9618 
9619 		error = prom_tree_access(create_prom_branch, &pta, NULL);
9620 
9621 		if (dipp)
9622 			*dipp = pta.fdip;
9623 		else if (pta.fdip)
9624 			ndi_rele_devi(pta.fdip);
9625 	} else {
9626 		error = create_sid_branch(pdip, bp, dipp, flags);
9627 	}
9628 
9629 	return (error);
9630 }
9631 
9632 int
9633 e_ddi_branch_configure(dev_info_t *rdip, dev_info_t **dipp, uint_t flags)
9634 {
9635 	int		rv;
9636 	char		*devnm;
9637 	dev_info_t	*pdip;
9638 
9639 	if (dipp)
9640 		*dipp = NULL;
9641 
9642 	if (rdip == NULL || flags != 0 || (flags & DEVI_BRANCH_EVENT))
9643 		return (EINVAL);
9644 
9645 	pdip = ddi_get_parent(rdip);
9646 
9647 	ndi_hold_devi(pdip);
9648 
9649 	if (!e_ddi_branch_held(rdip)) {
9650 		ndi_rele_devi(pdip);
9651 		cmn_err(CE_WARN, "e_ddi_branch_configure: "
9652 		    "dip(%p) not held", (void *)rdip);
9653 		return (EINVAL);
9654 	}
9655 
9656 	if (i_ddi_node_state(rdip) < DS_INITIALIZED) {
9657 		/*
9658 		 * First attempt to bind a driver. If we fail, return
9659 		 * success (On some platforms, dips for some device
9660 		 * types (CPUs) may not have a driver)
9661 		 */
9662 		if (ndi_devi_bind_driver(rdip, 0) != NDI_SUCCESS) {
9663 			ndi_rele_devi(pdip);
9664 			return (0);
9665 		}
9666 
9667 		if (ddi_initchild(pdip, rdip) != DDI_SUCCESS) {
9668 			rv = NDI_FAILURE;
9669 			goto out;
9670 		}
9671 	}
9672 
9673 	ASSERT(i_ddi_node_state(rdip) >= DS_INITIALIZED);
9674 
9675 	devnm = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
9676 
9677 	(void) ddi_deviname(rdip, devnm);
9678 
9679 	if ((rv = ndi_devi_config_one(pdip, devnm+1, &rdip,
9680 	    NDI_DEVI_ONLINE | NDI_CONFIG)) == NDI_SUCCESS) {
9681 		/* release hold from ndi_devi_config_one() */
9682 		ndi_rele_devi(rdip);
9683 	}
9684 
9685 	kmem_free(devnm, MAXNAMELEN + 1);
9686 out:
9687 	if (rv != NDI_SUCCESS && dipp && rdip) {
9688 		ndi_hold_devi(rdip);
9689 		*dipp = rdip;
9690 	}
9691 	ndi_rele_devi(pdip);
9692 	return (ndi2errno(rv));
9693 }
9694 
9695 void
9696 e_ddi_branch_hold(dev_info_t *rdip)
9697 {
9698 	if (e_ddi_branch_held(rdip)) {
9699 		cmn_err(CE_WARN, "e_ddi_branch_hold: branch already held");
9700 		return;
9701 	}
9702 
9703 	mutex_enter(&DEVI(rdip)->devi_lock);
9704 	if ((DEVI(rdip)->devi_flags & DEVI_BRANCH_HELD) == 0) {
9705 		DEVI(rdip)->devi_flags |= DEVI_BRANCH_HELD;
9706 		DEVI(rdip)->devi_ref++;
9707 	}
9708 	ASSERT(DEVI(rdip)->devi_ref > 0);
9709 	mutex_exit(&DEVI(rdip)->devi_lock);
9710 }
9711 
9712 int
9713 e_ddi_branch_held(dev_info_t *rdip)
9714 {
9715 	int rv = 0;
9716 
9717 	mutex_enter(&DEVI(rdip)->devi_lock);
9718 	if ((DEVI(rdip)->devi_flags & DEVI_BRANCH_HELD) &&
9719 	    DEVI(rdip)->devi_ref > 0) {
9720 		rv = 1;
9721 	}
9722 	mutex_exit(&DEVI(rdip)->devi_lock);
9723 
9724 	return (rv);
9725 }
9726 
9727 void
9728 e_ddi_branch_rele(dev_info_t *rdip)
9729 {
9730 	mutex_enter(&DEVI(rdip)->devi_lock);
9731 	DEVI(rdip)->devi_flags &= ~DEVI_BRANCH_HELD;
9732 	DEVI(rdip)->devi_ref--;
9733 	mutex_exit(&DEVI(rdip)->devi_lock);
9734 }
9735 
9736 int
9737 e_ddi_branch_unconfigure(
9738 	dev_info_t *rdip,
9739 	dev_info_t **dipp,
9740 	uint_t flags)
9741 {
9742 	int	circ, rv;
9743 	int	destroy;
9744 	char	*devnm;
9745 	uint_t	nflags;
9746 	dev_info_t *pdip;
9747 
9748 	if (dipp)
9749 		*dipp = NULL;
9750 
9751 	if (rdip == NULL)
9752 		return (EINVAL);
9753 
9754 	pdip = ddi_get_parent(rdip);
9755 
9756 	ASSERT(pdip);
9757 
9758 	/*
9759 	 * Check if caller holds pdip busy - can cause deadlocks during
9760 	 * devfs_clean()
9761 	 */
9762 	if (DEVI_BUSY_OWNED(pdip)) {
9763 		cmn_err(CE_WARN, "e_ddi_branch_unconfigure: failed: parent"
9764 		    " devinfo node(%p) is busy held", (void *)pdip);
9765 		return (EINVAL);
9766 	}
9767 
9768 	destroy = (flags & DEVI_BRANCH_DESTROY) ? 1 : 0;
9769 
9770 	devnm = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
9771 
9772 	ndi_devi_enter(pdip, &circ);
9773 	(void) ddi_deviname(rdip, devnm);
9774 	ndi_devi_exit(pdip, circ);
9775 
9776 	/*
9777 	 * ddi_deviname() returns a component name with / prepended.
9778 	 */
9779 	(void) devfs_clean(pdip, devnm + 1, DV_CLEAN_FORCE);
9780 
9781 	ndi_devi_enter(pdip, &circ);
9782 
9783 	/*
9784 	 * Recreate device name as it may have changed state (init/uninit)
9785 	 * when parent busy lock was dropped for devfs_clean()
9786 	 */
9787 	(void) ddi_deviname(rdip, devnm);
9788 
9789 	if (!e_ddi_branch_held(rdip)) {
9790 		kmem_free(devnm, MAXNAMELEN + 1);
9791 		ndi_devi_exit(pdip, circ);
9792 		cmn_err(CE_WARN, "e_ddi_%s_branch: dip(%p) not held",
9793 		    destroy ? "destroy" : "unconfigure", (void *)rdip);
9794 		return (EINVAL);
9795 	}
9796 
9797 	/*
9798 	 * Release hold on the branch. This is ok since we are holding the
9799 	 * parent busy. If rdip is not removed, we must do a hold on the
9800 	 * branch before returning.
9801 	 */
9802 	e_ddi_branch_rele(rdip);
9803 
9804 	nflags = NDI_DEVI_OFFLINE;
9805 	if (destroy || (flags & DEVI_BRANCH_DESTROY)) {
9806 		nflags |= NDI_DEVI_REMOVE;
9807 		destroy = 1;
9808 	} else {
9809 		nflags |= NDI_UNCONFIG;		/* uninit but don't remove */
9810 	}
9811 
9812 	if (flags & DEVI_BRANCH_EVENT)
9813 		nflags |= NDI_POST_EVENT;
9814 
9815 	if (i_ddi_devi_attached(pdip) &&
9816 	    (i_ddi_node_state(rdip) >= DS_INITIALIZED)) {
9817 		rv = ndi_devi_unconfig_one(pdip, devnm+1, dipp, nflags);
9818 	} else {
9819 		rv = e_ddi_devi_unconfig(rdip, dipp, nflags);
9820 		if (rv == NDI_SUCCESS) {
9821 			ASSERT(!destroy || ddi_get_child(rdip) == NULL);
9822 			rv = ndi_devi_offline(rdip, nflags);
9823 		}
9824 	}
9825 
9826 	if (!destroy || rv != NDI_SUCCESS) {
9827 		/* The dip still exists, so do a hold */
9828 		e_ddi_branch_hold(rdip);
9829 	}
9830 out:
9831 	kmem_free(devnm, MAXNAMELEN + 1);
9832 	ndi_devi_exit(pdip, circ);
9833 	return (ndi2errno(rv));
9834 }
9835 
9836 int
9837 e_ddi_branch_destroy(dev_info_t *rdip, dev_info_t **dipp, uint_t flag)
9838 {
9839 	return (e_ddi_branch_unconfigure(rdip, dipp,
9840 	    flag|DEVI_BRANCH_DESTROY));
9841 }
9842 
9843 /*
9844  * Number of chains for hash table
9845  */
9846 #define	NUMCHAINS	17
9847 
9848 /*
9849  * Devinfo busy arg
9850  */
9851 struct devi_busy {
9852 	int dv_total;
9853 	int s_total;
9854 	mod_hash_t *dv_hash;
9855 	mod_hash_t *s_hash;
9856 	int (*callback)(dev_info_t *, void *, uint_t);
9857 	void *arg;
9858 };
9859 
9860 static int
9861 visit_dip(dev_info_t *dip, void *arg)
9862 {
9863 	uintptr_t sbusy, dvbusy, ref;
9864 	struct devi_busy *bsp = arg;
9865 
9866 	ASSERT(bsp->callback);
9867 
9868 	/*
9869 	 * A dip cannot be busy if its reference count is 0
9870 	 */
9871 	if ((ref = e_ddi_devi_holdcnt(dip)) == 0) {
9872 		return (bsp->callback(dip, bsp->arg, 0));
9873 	}
9874 
9875 	if (mod_hash_find(bsp->dv_hash, dip, (mod_hash_val_t *)&dvbusy))
9876 		dvbusy = 0;
9877 
9878 	/*
9879 	 * To catch device opens currently maintained on specfs common snodes.
9880 	 */
9881 	if (mod_hash_find(bsp->s_hash, dip, (mod_hash_val_t *)&sbusy))
9882 		sbusy = 0;
9883 
9884 #ifdef	DEBUG
9885 	if (ref < sbusy || ref < dvbusy) {
9886 		cmn_err(CE_WARN, "dip(%p): sopen = %lu, dvopen = %lu "
9887 		    "dip ref = %lu\n", (void *)dip, sbusy, dvbusy, ref);
9888 	}
9889 #endif
9890 
9891 	dvbusy = (sbusy > dvbusy) ? sbusy : dvbusy;
9892 
9893 	return (bsp->callback(dip, bsp->arg, dvbusy));
9894 }
9895 
9896 static int
9897 visit_snode(struct snode *sp, void *arg)
9898 {
9899 	uintptr_t sbusy;
9900 	dev_info_t *dip;
9901 	int count;
9902 	struct devi_busy *bsp = arg;
9903 
9904 	ASSERT(sp);
9905 
9906 	/*
9907 	 * The stable lock is held. This prevents
9908 	 * the snode and its associated dip from
9909 	 * going away.
9910 	 */
9911 	dip = NULL;
9912 	count = spec_devi_open_count(sp, &dip);
9913 
9914 	if (count <= 0)
9915 		return (DDI_WALK_CONTINUE);
9916 
9917 	ASSERT(dip);
9918 
9919 	if (mod_hash_remove(bsp->s_hash, dip, (mod_hash_val_t *)&sbusy))
9920 		sbusy = count;
9921 	else
9922 		sbusy += count;
9923 
9924 	if (mod_hash_insert(bsp->s_hash, dip, (mod_hash_val_t)sbusy)) {
9925 		cmn_err(CE_WARN, "%s: s_hash insert failed: dip=0x%p, "
9926 		    "sbusy = %lu", "e_ddi_branch_referenced",
9927 		    (void *)dip, sbusy);
9928 	}
9929 
9930 	bsp->s_total += count;
9931 
9932 	return (DDI_WALK_CONTINUE);
9933 }
9934 
9935 static void
9936 visit_dvnode(struct dv_node *dv, void *arg)
9937 {
9938 	uintptr_t dvbusy;
9939 	uint_t count;
9940 	struct vnode *vp;
9941 	struct devi_busy *bsp = arg;
9942 
9943 	ASSERT(dv && dv->dv_devi);
9944 
9945 	vp = DVTOV(dv);
9946 
9947 	mutex_enter(&vp->v_lock);
9948 	count = vp->v_count;
9949 	mutex_exit(&vp->v_lock);
9950 
9951 	if (!count)
9952 		return;
9953 
9954 	if (mod_hash_remove(bsp->dv_hash, dv->dv_devi,
9955 	    (mod_hash_val_t *)&dvbusy))
9956 		dvbusy = count;
9957 	else
9958 		dvbusy += count;
9959 
9960 	if (mod_hash_insert(bsp->dv_hash, dv->dv_devi,
9961 	    (mod_hash_val_t)dvbusy)) {
9962 		cmn_err(CE_WARN, "%s: dv_hash insert failed: dip=0x%p, "
9963 		    "dvbusy=%lu", "e_ddi_branch_referenced",
9964 		    (void *)dv->dv_devi, dvbusy);
9965 	}
9966 
9967 	bsp->dv_total += count;
9968 }
9969 
9970 /*
9971  * Returns reference count on success or -1 on failure.
9972  */
9973 int
9974 e_ddi_branch_referenced(
9975 	dev_info_t *rdip,
9976 	int (*callback)(dev_info_t *dip, void *arg, uint_t ref),
9977 	void *arg)
9978 {
9979 	int circ;
9980 	char *path;
9981 	dev_info_t *pdip;
9982 	struct devi_busy bsa = {0};
9983 
9984 	ASSERT(rdip);
9985 
9986 	path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
9987 
9988 	ndi_hold_devi(rdip);
9989 
9990 	pdip = ddi_get_parent(rdip);
9991 
9992 	ASSERT(pdip);
9993 
9994 	/*
9995 	 * Check if caller holds pdip busy - can cause deadlocks during
9996 	 * devfs_walk()
9997 	 */
9998 	if (!e_ddi_branch_held(rdip) || DEVI_BUSY_OWNED(pdip)) {
9999 		cmn_err(CE_WARN, "e_ddi_branch_referenced: failed: "
10000 		    "devinfo branch(%p) not held or parent busy held",
10001 		    (void *)rdip);
10002 		ndi_rele_devi(rdip);
10003 		kmem_free(path, MAXPATHLEN);
10004 		return (-1);
10005 	}
10006 
10007 	ndi_devi_enter(pdip, &circ);
10008 	(void) ddi_pathname(rdip, path);
10009 	ndi_devi_exit(pdip, circ);
10010 
10011 	bsa.dv_hash = mod_hash_create_ptrhash("dv_node busy hash", NUMCHAINS,
10012 	    mod_hash_null_valdtor, sizeof (struct dev_info));
10013 
10014 	bsa.s_hash = mod_hash_create_ptrhash("snode busy hash", NUMCHAINS,
10015 	    mod_hash_null_valdtor, sizeof (struct snode));
10016 
10017 	if (devfs_walk(path, visit_dvnode, &bsa)) {
10018 		cmn_err(CE_WARN, "e_ddi_branch_referenced: "
10019 		    "devfs walk failed for: %s", path);
10020 		kmem_free(path, MAXPATHLEN);
10021 		bsa.s_total = bsa.dv_total = -1;
10022 		goto out;
10023 	}
10024 
10025 	kmem_free(path, MAXPATHLEN);
10026 
10027 	/*
10028 	 * Walk the snode table to detect device opens, which are currently
10029 	 * maintained on specfs common snodes.
10030 	 */
10031 	spec_snode_walk(visit_snode, &bsa);
10032 
10033 	if (callback == NULL)
10034 		goto out;
10035 
10036 	bsa.callback = callback;
10037 	bsa.arg = arg;
10038 
10039 	if (visit_dip(rdip, &bsa) == DDI_WALK_CONTINUE) {
10040 		ndi_devi_enter(rdip, &circ);
10041 		ddi_walk_devs(ddi_get_child(rdip), visit_dip, &bsa);
10042 		ndi_devi_exit(rdip, circ);
10043 	}
10044 
10045 out:
10046 	ndi_rele_devi(rdip);
10047 	mod_hash_destroy_ptrhash(bsa.s_hash);
10048 	mod_hash_destroy_ptrhash(bsa.dv_hash);
10049 	return (bsa.s_total > bsa.dv_total ? bsa.s_total : bsa.dv_total);
10050 }
10051