xref: /illumos-gate/usr/src/uts/common/os/sunddi.c (revision fb2a9bae0030340ad72b9c26ba1ffee2ee3cafec)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/note.h>
28 #include <sys/types.h>
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/buf.h>
32 #include <sys/uio.h>
33 #include <sys/cred.h>
34 #include <sys/poll.h>
35 #include <sys/mman.h>
36 #include <sys/kmem.h>
37 #include <sys/model.h>
38 #include <sys/file.h>
39 #include <sys/proc.h>
40 #include <sys/open.h>
41 #include <sys/user.h>
42 #include <sys/t_lock.h>
43 #include <sys/vm.h>
44 #include <sys/stat.h>
45 #include <vm/hat.h>
46 #include <vm/seg.h>
47 #include <vm/seg_vn.h>
48 #include <vm/seg_dev.h>
49 #include <vm/as.h>
50 #include <sys/cmn_err.h>
51 #include <sys/cpuvar.h>
52 #include <sys/debug.h>
53 #include <sys/autoconf.h>
54 #include <sys/sunddi.h>
55 #include <sys/esunddi.h>
56 #include <sys/sunndi.h>
57 #include <sys/kstat.h>
58 #include <sys/conf.h>
59 #include <sys/ddi_impldefs.h>	/* include implementation structure defs */
60 #include <sys/ndi_impldefs.h>	/* include prototypes */
61 #include <sys/ddi_timer.h>
62 #include <sys/hwconf.h>
63 #include <sys/pathname.h>
64 #include <sys/modctl.h>
65 #include <sys/epm.h>
66 #include <sys/devctl.h>
67 #include <sys/callb.h>
68 #include <sys/cladm.h>
69 #include <sys/sysevent.h>
70 #include <sys/dacf_impl.h>
71 #include <sys/ddidevmap.h>
72 #include <sys/bootconf.h>
73 #include <sys/disp.h>
74 #include <sys/atomic.h>
75 #include <sys/promif.h>
76 #include <sys/instance.h>
77 #include <sys/sysevent/eventdefs.h>
78 #include <sys/task.h>
79 #include <sys/project.h>
80 #include <sys/taskq.h>
81 #include <sys/devpolicy.h>
82 #include <sys/ctype.h>
83 #include <net/if.h>
84 #include <sys/rctl.h>
85 #include <sys/zone.h>
86 #include <sys/clock_impl.h>
87 #include <sys/ddi.h>
88 #include <sys/modhash.h>
89 #include <sys/sunldi_impl.h>
90 #include <sys/fs/dv_node.h>
91 #include <sys/fs/snode.h>
92 
93 extern	pri_t	minclsyspri;
94 
95 extern	rctl_hndl_t rc_project_locked_mem;
96 extern	rctl_hndl_t rc_zone_locked_mem;
97 
98 #ifdef DEBUG
99 static int sunddi_debug = 0;
100 #endif /* DEBUG */
101 
102 /* ddi_umem_unlock miscellaneous */
103 
104 static	void	i_ddi_umem_unlock_thread_start(void);
105 
106 static	kmutex_t	ddi_umem_unlock_mutex; /* unlock list mutex */
107 static	kcondvar_t	ddi_umem_unlock_cv; /* unlock list block/unblock */
108 static	kthread_t	*ddi_umem_unlock_thread;
109 /*
110  * The ddi_umem_unlock FIFO list.  NULL head pointer indicates empty list.
111  */
112 static	struct	ddi_umem_cookie *ddi_umem_unlock_head = NULL;
113 static	struct	ddi_umem_cookie *ddi_umem_unlock_tail = NULL;
114 
115 /*
116  * DDI(Sun) Function and flag definitions:
117  */
118 
119 #if defined(__x86)
120 /*
121  * Used to indicate which entries were chosen from a range.
122  */
123 char	*chosen_reg = "chosen-reg";
124 #endif
125 
126 /*
127  * Function used to ring system console bell
128  */
129 void (*ddi_console_bell_func)(clock_t duration);
130 
131 /*
132  * Creating register mappings and handling interrupts:
133  */
134 
135 /*
136  * Generic ddi_map: Call parent to fulfill request...
137  */
138 
139 int
140 ddi_map(dev_info_t *dp, ddi_map_req_t *mp, off_t offset,
141     off_t len, caddr_t *addrp)
142 {
143 	dev_info_t *pdip;
144 
145 	ASSERT(dp);
146 	pdip = (dev_info_t *)DEVI(dp)->devi_parent;
147 	return ((DEVI(pdip)->devi_ops->devo_bus_ops->bus_map)(pdip,
148 	    dp, mp, offset, len, addrp));
149 }
150 
151 /*
152  * ddi_apply_range: (Called by nexi only.)
153  * Apply ranges in parent node dp, to child regspec rp...
154  */
155 
156 int
157 ddi_apply_range(dev_info_t *dp, dev_info_t *rdip, struct regspec *rp)
158 {
159 	return (i_ddi_apply_range(dp, rdip, rp));
160 }
161 
162 int
163 ddi_map_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset,
164     off_t len)
165 {
166 	ddi_map_req_t mr;
167 #if defined(__x86)
168 	struct {
169 		int	bus;
170 		int	addr;
171 		int	size;
172 	} reg, *reglist;
173 	uint_t	length;
174 	int	rc;
175 
176 	/*
177 	 * get the 'registers' or the 'reg' property.
178 	 * We look up the reg property as an array of
179 	 * int's.
180 	 */
181 	rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
182 	    DDI_PROP_DONTPASS, "registers", (int **)&reglist, &length);
183 	if (rc != DDI_PROP_SUCCESS)
184 		rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
185 		    DDI_PROP_DONTPASS, "reg", (int **)&reglist, &length);
186 	if (rc == DDI_PROP_SUCCESS) {
187 		/*
188 		 * point to the required entry.
189 		 */
190 		reg = reglist[rnumber];
191 		reg.addr += offset;
192 		if (len != 0)
193 			reg.size = len;
194 		/*
195 		 * make a new property containing ONLY the required tuple.
196 		 */
197 		if (ddi_prop_update_int_array(DDI_DEV_T_NONE, dip,
198 		    chosen_reg, (int *)&reg, (sizeof (reg)/sizeof (int)))
199 		    != DDI_PROP_SUCCESS) {
200 			cmn_err(CE_WARN, "%s%d: cannot create '%s' "
201 			    "property", DEVI(dip)->devi_name,
202 			    DEVI(dip)->devi_instance, chosen_reg);
203 		}
204 		/*
205 		 * free the memory allocated by
206 		 * ddi_prop_lookup_int_array ().
207 		 */
208 		ddi_prop_free((void *)reglist);
209 	}
210 #endif
211 	mr.map_op = DDI_MO_MAP_LOCKED;
212 	mr.map_type = DDI_MT_RNUMBER;
213 	mr.map_obj.rnumber = rnumber;
214 	mr.map_prot = PROT_READ | PROT_WRITE;
215 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
216 	mr.map_handlep = NULL;
217 	mr.map_vers = DDI_MAP_VERSION;
218 
219 	/*
220 	 * Call my parent to map in my regs.
221 	 */
222 
223 	return (ddi_map(dip, &mr, offset, len, kaddrp));
224 }
225 
226 void
227 ddi_unmap_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset,
228     off_t len)
229 {
230 	ddi_map_req_t mr;
231 
232 	mr.map_op = DDI_MO_UNMAP;
233 	mr.map_type = DDI_MT_RNUMBER;
234 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
235 	mr.map_prot = PROT_READ | PROT_WRITE;	/* who cares? */
236 	mr.map_obj.rnumber = rnumber;
237 	mr.map_handlep = NULL;
238 	mr.map_vers = DDI_MAP_VERSION;
239 
240 	/*
241 	 * Call my parent to unmap my regs.
242 	 */
243 
244 	(void) ddi_map(dip, &mr, offset, len, kaddrp);
245 	*kaddrp = (caddr_t)0;
246 #if defined(__x86)
247 	(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, chosen_reg);
248 #endif
249 }
250 
251 int
252 ddi_bus_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
253 	off_t offset, off_t len, caddr_t *vaddrp)
254 {
255 	return (i_ddi_bus_map(dip, rdip, mp, offset, len, vaddrp));
256 }
257 
258 /*
259  * nullbusmap:	The/DDI default bus_map entry point for nexi
260  *		not conforming to the reg/range paradigm (i.e. scsi, etc.)
261  *		with no HAT/MMU layer to be programmed at this level.
262  *
263  *		If the call is to map by rnumber, return an error,
264  *		otherwise pass anything else up the tree to my parent.
265  */
266 int
267 nullbusmap(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
268 	off_t offset, off_t len, caddr_t *vaddrp)
269 {
270 	_NOTE(ARGUNUSED(rdip))
271 	if (mp->map_type == DDI_MT_RNUMBER)
272 		return (DDI_ME_UNSUPPORTED);
273 
274 	return (ddi_map(dip, mp, offset, len, vaddrp));
275 }
276 
277 /*
278  * ddi_rnumber_to_regspec: Not for use by leaf drivers.
279  *			   Only for use by nexi using the reg/range paradigm.
280  */
281 struct regspec *
282 ddi_rnumber_to_regspec(dev_info_t *dip, int rnumber)
283 {
284 	return (i_ddi_rnumber_to_regspec(dip, rnumber));
285 }
286 
287 
288 /*
289  * Note that we allow the dip to be nil because we may be called
290  * prior even to the instantiation of the devinfo tree itself - all
291  * regular leaf and nexus drivers should always use a non-nil dip!
292  *
293  * We treat peek in a somewhat cavalier fashion .. assuming that we'll
294  * simply get a synchronous fault as soon as we touch a missing address.
295  *
296  * Poke is rather more carefully handled because we might poke to a write
297  * buffer, "succeed", then only find some time later that we got an
298  * asynchronous fault that indicated that the address we were writing to
299  * was not really backed by hardware.
300  */
301 
302 static int
303 i_ddi_peekpoke(dev_info_t *devi, ddi_ctl_enum_t cmd, size_t size,
304     void *addr, void *value_p)
305 {
306 	union {
307 		uint64_t	u64;
308 		uint32_t	u32;
309 		uint16_t	u16;
310 		uint8_t		u8;
311 	} peekpoke_value;
312 
313 	peekpoke_ctlops_t peekpoke_args;
314 	uint64_t dummy_result;
315 	int rval;
316 
317 	/* Note: size is assumed to be correct;  it is not checked. */
318 	peekpoke_args.size = size;
319 	peekpoke_args.dev_addr = (uintptr_t)addr;
320 	peekpoke_args.handle = NULL;
321 	peekpoke_args.repcount = 1;
322 	peekpoke_args.flags = 0;
323 
324 	if (cmd == DDI_CTLOPS_POKE) {
325 		switch (size) {
326 		case sizeof (uint8_t):
327 			peekpoke_value.u8 = *(uint8_t *)value_p;
328 			break;
329 		case sizeof (uint16_t):
330 			peekpoke_value.u16 = *(uint16_t *)value_p;
331 			break;
332 		case sizeof (uint32_t):
333 			peekpoke_value.u32 = *(uint32_t *)value_p;
334 			break;
335 		case sizeof (uint64_t):
336 			peekpoke_value.u64 = *(uint64_t *)value_p;
337 			break;
338 		}
339 	}
340 
341 	peekpoke_args.host_addr = (uintptr_t)&peekpoke_value.u64;
342 
343 	if (devi != NULL)
344 		rval = ddi_ctlops(devi, devi, cmd, &peekpoke_args,
345 		    &dummy_result);
346 	else
347 		rval = peekpoke_mem(cmd, &peekpoke_args);
348 
349 	/*
350 	 * A NULL value_p is permitted by ddi_peek(9F); discard the result.
351 	 */
352 	if ((cmd == DDI_CTLOPS_PEEK) & (value_p != NULL)) {
353 		switch (size) {
354 		case sizeof (uint8_t):
355 			*(uint8_t *)value_p = peekpoke_value.u8;
356 			break;
357 		case sizeof (uint16_t):
358 			*(uint16_t *)value_p = peekpoke_value.u16;
359 			break;
360 		case sizeof (uint32_t):
361 			*(uint32_t *)value_p = peekpoke_value.u32;
362 			break;
363 		case sizeof (uint64_t):
364 			*(uint64_t *)value_p = peekpoke_value.u64;
365 			break;
366 		}
367 	}
368 
369 	return (rval);
370 }
371 
372 /*
373  * Keep ddi_peek() and ddi_poke() in case 3rd parties are calling this.
374  * they shouldn't be, but the 9f manpage kind of pseudo exposes it.
375  */
376 int
377 ddi_peek(dev_info_t *devi, size_t size, void *addr, void *value_p)
378 {
379 	switch (size) {
380 	case sizeof (uint8_t):
381 	case sizeof (uint16_t):
382 	case sizeof (uint32_t):
383 	case sizeof (uint64_t):
384 		break;
385 	default:
386 		return (DDI_FAILURE);
387 	}
388 
389 	return (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, size, addr, value_p));
390 }
391 
392 int
393 ddi_poke(dev_info_t *devi, size_t size, void *addr, void *value_p)
394 {
395 	switch (size) {
396 	case sizeof (uint8_t):
397 	case sizeof (uint16_t):
398 	case sizeof (uint32_t):
399 	case sizeof (uint64_t):
400 		break;
401 	default:
402 		return (DDI_FAILURE);
403 	}
404 
405 	return (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, size, addr, value_p));
406 }
407 
408 int
409 ddi_peek8(dev_info_t *dip, int8_t *addr, int8_t *val_p)
410 {
411 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
412 	    val_p));
413 }
414 
415 int
416 ddi_peek16(dev_info_t *dip, int16_t *addr, int16_t *val_p)
417 {
418 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
419 	    val_p));
420 }
421 
422 int
423 ddi_peek32(dev_info_t *dip, int32_t *addr, int32_t *val_p)
424 {
425 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
426 	    val_p));
427 }
428 
429 int
430 ddi_peek64(dev_info_t *dip, int64_t *addr, int64_t *val_p)
431 {
432 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
433 	    val_p));
434 }
435 
436 
437 /*
438  * We need to separate the old interfaces from the new ones and leave them
439  * in here for a while. Previous versions of the OS defined the new interfaces
440  * to the old interfaces. This way we can fix things up so that we can
441  * eventually remove these interfaces.
442  * e.g. A 3rd party module/driver using ddi_peek8 and built against S10
443  * or earlier will actually have a reference to ddi_peekc in the binary.
444  */
445 #ifdef _ILP32
446 int
447 ddi_peekc(dev_info_t *dip, int8_t *addr, int8_t *val_p)
448 {
449 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
450 	    val_p));
451 }
452 
453 int
454 ddi_peeks(dev_info_t *dip, int16_t *addr, int16_t *val_p)
455 {
456 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
457 	    val_p));
458 }
459 
460 int
461 ddi_peekl(dev_info_t *dip, int32_t *addr, int32_t *val_p)
462 {
463 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
464 	    val_p));
465 }
466 
467 int
468 ddi_peekd(dev_info_t *dip, int64_t *addr, int64_t *val_p)
469 {
470 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
471 	    val_p));
472 }
473 #endif /* _ILP32 */
474 
475 int
476 ddi_poke8(dev_info_t *dip, int8_t *addr, int8_t val)
477 {
478 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
479 }
480 
481 int
482 ddi_poke16(dev_info_t *dip, int16_t *addr, int16_t val)
483 {
484 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
485 }
486 
487 int
488 ddi_poke32(dev_info_t *dip, int32_t *addr, int32_t val)
489 {
490 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
491 }
492 
493 int
494 ddi_poke64(dev_info_t *dip, int64_t *addr, int64_t val)
495 {
496 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
497 }
498 
499 /*
500  * We need to separate the old interfaces from the new ones and leave them
501  * in here for a while. Previous versions of the OS defined the new interfaces
502  * to the old interfaces. This way we can fix things up so that we can
503  * eventually remove these interfaces.
504  * e.g. A 3rd party module/driver using ddi_poke8 and built against S10
505  * or earlier will actually have a reference to ddi_pokec in the binary.
506  */
507 #ifdef _ILP32
508 int
509 ddi_pokec(dev_info_t *dip, int8_t *addr, int8_t val)
510 {
511 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
512 }
513 
514 int
515 ddi_pokes(dev_info_t *dip, int16_t *addr, int16_t val)
516 {
517 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
518 }
519 
520 int
521 ddi_pokel(dev_info_t *dip, int32_t *addr, int32_t val)
522 {
523 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
524 }
525 
526 int
527 ddi_poked(dev_info_t *dip, int64_t *addr, int64_t val)
528 {
529 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
530 }
531 #endif /* _ILP32 */
532 
533 /*
534  * ddi_peekpokeio() is used primarily by the mem drivers for moving
535  * data to and from uio structures via peek and poke.  Note that we
536  * use "internal" routines ddi_peek and ddi_poke to make this go
537  * slightly faster, avoiding the call overhead ..
538  */
539 int
540 ddi_peekpokeio(dev_info_t *devi, struct uio *uio, enum uio_rw rw,
541     caddr_t addr, size_t len, uint_t xfersize)
542 {
543 	int64_t	ibuffer;
544 	int8_t w8;
545 	size_t sz;
546 	int o;
547 
548 	if (xfersize > sizeof (long))
549 		xfersize = sizeof (long);
550 
551 	while (len != 0) {
552 		if ((len | (uintptr_t)addr) & 1) {
553 			sz = sizeof (int8_t);
554 			if (rw == UIO_WRITE) {
555 				if ((o = uwritec(uio)) == -1)
556 					return (DDI_FAILURE);
557 				if (ddi_poke8(devi, (int8_t *)addr,
558 				    (int8_t)o) != DDI_SUCCESS)
559 					return (DDI_FAILURE);
560 			} else {
561 				if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz,
562 				    (int8_t *)addr, &w8) != DDI_SUCCESS)
563 					return (DDI_FAILURE);
564 				if (ureadc(w8, uio))
565 					return (DDI_FAILURE);
566 			}
567 		} else {
568 			switch (xfersize) {
569 			case sizeof (int64_t):
570 				if (((len | (uintptr_t)addr) &
571 				    (sizeof (int64_t) - 1)) == 0) {
572 					sz = xfersize;
573 					break;
574 				}
575 				/*FALLTHROUGH*/
576 			case sizeof (int32_t):
577 				if (((len | (uintptr_t)addr) &
578 				    (sizeof (int32_t) - 1)) == 0) {
579 					sz = xfersize;
580 					break;
581 				}
582 				/*FALLTHROUGH*/
583 			default:
584 				/*
585 				 * This still assumes that we might have an
586 				 * I/O bus out there that permits 16-bit
587 				 * transfers (and that it would be upset by
588 				 * 32-bit transfers from such locations).
589 				 */
590 				sz = sizeof (int16_t);
591 				break;
592 			}
593 
594 			if (rw == UIO_READ) {
595 				if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz,
596 				    addr, &ibuffer) != DDI_SUCCESS)
597 					return (DDI_FAILURE);
598 			}
599 
600 			if (uiomove(&ibuffer, sz, rw, uio))
601 				return (DDI_FAILURE);
602 
603 			if (rw == UIO_WRITE) {
604 				if (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, sz,
605 				    addr, &ibuffer) != DDI_SUCCESS)
606 					return (DDI_FAILURE);
607 			}
608 		}
609 		addr += sz;
610 		len -= sz;
611 	}
612 	return (DDI_SUCCESS);
613 }
614 
615 /*
616  * These routines are used by drivers that do layered ioctls
617  * On sparc, they're implemented in assembler to avoid spilling
618  * register windows in the common (copyin) case ..
619  */
620 #if !defined(__sparc)
621 int
622 ddi_copyin(const void *buf, void *kernbuf, size_t size, int flags)
623 {
624 	if (flags & FKIOCTL)
625 		return (kcopy(buf, kernbuf, size) ? -1 : 0);
626 	return (copyin(buf, kernbuf, size));
627 }
628 
629 int
630 ddi_copyout(const void *buf, void *kernbuf, size_t size, int flags)
631 {
632 	if (flags & FKIOCTL)
633 		return (kcopy(buf, kernbuf, size) ? -1 : 0);
634 	return (copyout(buf, kernbuf, size));
635 }
636 #endif	/* !__sparc */
637 
638 /*
639  * Conversions in nexus pagesize units.  We don't duplicate the
640  * 'nil dip' semantics of peek/poke because btopr/btop/ptob are DDI/DKI
641  * routines anyway.
642  */
643 unsigned long
644 ddi_btop(dev_info_t *dip, unsigned long bytes)
645 {
646 	unsigned long pages;
647 
648 	(void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOP, &bytes, &pages);
649 	return (pages);
650 }
651 
652 unsigned long
653 ddi_btopr(dev_info_t *dip, unsigned long bytes)
654 {
655 	unsigned long pages;
656 
657 	(void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOPR, &bytes, &pages);
658 	return (pages);
659 }
660 
661 unsigned long
662 ddi_ptob(dev_info_t *dip, unsigned long pages)
663 {
664 	unsigned long bytes;
665 
666 	(void) ddi_ctlops(dip, dip, DDI_CTLOPS_PTOB, &pages, &bytes);
667 	return (bytes);
668 }
669 
670 unsigned int
671 ddi_enter_critical(void)
672 {
673 	return ((uint_t)spl7());
674 }
675 
676 void
677 ddi_exit_critical(unsigned int spl)
678 {
679 	splx((int)spl);
680 }
681 
682 /*
683  * Nexus ctlops punter
684  */
685 
686 #if !defined(__sparc)
687 /*
688  * Request bus_ctl parent to handle a bus_ctl request
689  *
690  * (The sparc version is in sparc_ddi.s)
691  */
692 int
693 ddi_ctlops(dev_info_t *d, dev_info_t *r, ddi_ctl_enum_t op, void *a, void *v)
694 {
695 	int (*fp)();
696 
697 	if (!d || !r)
698 		return (DDI_FAILURE);
699 
700 	if ((d = (dev_info_t *)DEVI(d)->devi_bus_ctl) == NULL)
701 		return (DDI_FAILURE);
702 
703 	fp = DEVI(d)->devi_ops->devo_bus_ops->bus_ctl;
704 	return ((*fp)(d, r, op, a, v));
705 }
706 
707 #endif
708 
709 /*
710  * DMA/DVMA setup
711  */
712 
713 #if defined(__sparc)
714 static ddi_dma_lim_t standard_limits = {
715 	(uint_t)0,	/* addr_t dlim_addr_lo */
716 	(uint_t)-1,	/* addr_t dlim_addr_hi */
717 	(uint_t)-1,	/* uint_t dlim_cntr_max */
718 	(uint_t)1,	/* uint_t dlim_burstsizes */
719 	(uint_t)1,	/* uint_t dlim_minxfer */
720 	0		/* uint_t dlim_dmaspeed */
721 };
722 #elif defined(__x86)
723 static ddi_dma_lim_t standard_limits = {
724 	(uint_t)0,		/* addr_t dlim_addr_lo */
725 	(uint_t)0xffffff,	/* addr_t dlim_addr_hi */
726 	(uint_t)0,		/* uint_t dlim_cntr_max */
727 	(uint_t)0x00000001,	/* uint_t dlim_burstsizes */
728 	(uint_t)DMA_UNIT_8,	/* uint_t dlim_minxfer */
729 	(uint_t)0,		/* uint_t dlim_dmaspeed */
730 	(uint_t)0x86<<24+0,	/* uint_t dlim_version */
731 	(uint_t)0xffff,		/* uint_t dlim_adreg_max */
732 	(uint_t)0xffff,		/* uint_t dlim_ctreg_max */
733 	(uint_t)512,		/* uint_t dlim_granular */
734 	(int)1,			/* int dlim_sgllen */
735 	(uint_t)0xffffffff	/* uint_t dlim_reqsizes */
736 };
737 
738 #endif
739 
740 int
741 ddi_dma_setup(dev_info_t *dip, struct ddi_dma_req *dmareqp,
742     ddi_dma_handle_t *handlep)
743 {
744 	int (*funcp)() = ddi_dma_map;
745 	struct bus_ops *bop;
746 #if defined(__sparc)
747 	auto ddi_dma_lim_t dma_lim;
748 
749 	if (dmareqp->dmar_limits == (ddi_dma_lim_t *)0) {
750 		dma_lim = standard_limits;
751 	} else {
752 		dma_lim = *dmareqp->dmar_limits;
753 	}
754 	dmareqp->dmar_limits = &dma_lim;
755 #endif
756 #if defined(__x86)
757 	if (dmareqp->dmar_limits == (ddi_dma_lim_t *)0)
758 		return (DDI_FAILURE);
759 #endif
760 
761 	/*
762 	 * Handle the case that the requester is both a leaf
763 	 * and a nexus driver simultaneously by calling the
764 	 * requester's bus_dma_map function directly instead
765 	 * of ddi_dma_map.
766 	 */
767 	bop = DEVI(dip)->devi_ops->devo_bus_ops;
768 	if (bop && bop->bus_dma_map)
769 		funcp = bop->bus_dma_map;
770 	return ((*funcp)(dip, dip, dmareqp, handlep));
771 }
772 
773 int
774 ddi_dma_addr_setup(dev_info_t *dip, struct as *as, caddr_t addr, size_t len,
775     uint_t flags, int (*waitfp)(), caddr_t arg,
776     ddi_dma_lim_t *limits, ddi_dma_handle_t *handlep)
777 {
778 	int (*funcp)() = ddi_dma_map;
779 	ddi_dma_lim_t dma_lim;
780 	struct ddi_dma_req dmareq;
781 	struct bus_ops *bop;
782 
783 	if (len == 0) {
784 		return (DDI_DMA_NOMAPPING);
785 	}
786 	if (limits == (ddi_dma_lim_t *)0) {
787 		dma_lim = standard_limits;
788 	} else {
789 		dma_lim = *limits;
790 	}
791 	dmareq.dmar_limits = &dma_lim;
792 	dmareq.dmar_flags = flags;
793 	dmareq.dmar_fp = waitfp;
794 	dmareq.dmar_arg = arg;
795 	dmareq.dmar_object.dmao_size = len;
796 	dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR;
797 	dmareq.dmar_object.dmao_obj.virt_obj.v_as = as;
798 	dmareq.dmar_object.dmao_obj.virt_obj.v_addr = addr;
799 	dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
800 
801 	/*
802 	 * Handle the case that the requester is both a leaf
803 	 * and a nexus driver simultaneously by calling the
804 	 * requester's bus_dma_map function directly instead
805 	 * of ddi_dma_map.
806 	 */
807 	bop = DEVI(dip)->devi_ops->devo_bus_ops;
808 	if (bop && bop->bus_dma_map)
809 		funcp = bop->bus_dma_map;
810 
811 	return ((*funcp)(dip, dip, &dmareq, handlep));
812 }
813 
814 int
815 ddi_dma_buf_setup(dev_info_t *dip, struct buf *bp, uint_t flags,
816     int (*waitfp)(), caddr_t arg, ddi_dma_lim_t *limits,
817     ddi_dma_handle_t *handlep)
818 {
819 	int (*funcp)() = ddi_dma_map;
820 	ddi_dma_lim_t dma_lim;
821 	struct ddi_dma_req dmareq;
822 	struct bus_ops *bop;
823 
824 	if (limits == (ddi_dma_lim_t *)0) {
825 		dma_lim = standard_limits;
826 	} else {
827 		dma_lim = *limits;
828 	}
829 	dmareq.dmar_limits = &dma_lim;
830 	dmareq.dmar_flags = flags;
831 	dmareq.dmar_fp = waitfp;
832 	dmareq.dmar_arg = arg;
833 	dmareq.dmar_object.dmao_size = (uint_t)bp->b_bcount;
834 
835 	if (bp->b_flags & B_PAGEIO) {
836 		dmareq.dmar_object.dmao_type = DMA_OTYP_PAGES;
837 		dmareq.dmar_object.dmao_obj.pp_obj.pp_pp = bp->b_pages;
838 		dmareq.dmar_object.dmao_obj.pp_obj.pp_offset =
839 		    (uint_t)(((uintptr_t)bp->b_un.b_addr) & MMU_PAGEOFFSET);
840 	} else {
841 		dmareq.dmar_object.dmao_type = DMA_OTYP_BUFVADDR;
842 		dmareq.dmar_object.dmao_obj.virt_obj.v_addr = bp->b_un.b_addr;
843 		if (bp->b_flags & B_SHADOW) {
844 			dmareq.dmar_object.dmao_obj.virt_obj.v_priv =
845 			    bp->b_shadow;
846 		} else {
847 			dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
848 		}
849 
850 		/*
851 		 * If the buffer has no proc pointer, or the proc
852 		 * struct has the kernel address space, or the buffer has
853 		 * been marked B_REMAPPED (meaning that it is now
854 		 * mapped into the kernel's address space), then
855 		 * the address space is kas (kernel address space).
856 		 */
857 		if ((bp->b_proc == NULL) || (bp->b_proc->p_as == &kas) ||
858 		    (bp->b_flags & B_REMAPPED)) {
859 			dmareq.dmar_object.dmao_obj.virt_obj.v_as = 0;
860 		} else {
861 			dmareq.dmar_object.dmao_obj.virt_obj.v_as =
862 			    bp->b_proc->p_as;
863 		}
864 	}
865 
866 	/*
867 	 * Handle the case that the requester is both a leaf
868 	 * and a nexus driver simultaneously by calling the
869 	 * requester's bus_dma_map function directly instead
870 	 * of ddi_dma_map.
871 	 */
872 	bop = DEVI(dip)->devi_ops->devo_bus_ops;
873 	if (bop && bop->bus_dma_map)
874 		funcp = bop->bus_dma_map;
875 
876 	return ((*funcp)(dip, dip, &dmareq, handlep));
877 }
878 
879 #if !defined(__sparc)
880 /*
881  * Request bus_dma_ctl parent to fiddle with a dma request.
882  *
883  * (The sparc version is in sparc_subr.s)
884  */
885 int
886 ddi_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
887     ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
888     off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
889 {
890 	int (*fp)();
891 
892 	if (dip != ddi_root_node())
893 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_ctl;
894 	fp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_ctl;
895 	return ((*fp) (dip, rdip, handle, request, offp, lenp, objp, flags));
896 }
897 #endif
898 
899 /*
900  * For all DMA control functions, call the DMA control
901  * routine and return status.
902  *
903  * Just plain assume that the parent is to be called.
904  * If a nexus driver or a thread outside the framework
905  * of a nexus driver or a leaf driver calls these functions,
906  * it is up to them to deal with the fact that the parent's
907  * bus_dma_ctl function will be the first one called.
908  */
909 
910 #define	HD	((ddi_dma_impl_t *)h)->dmai_rdip
911 
912 int
913 ddi_dma_kvaddrp(ddi_dma_handle_t h, off_t off, size_t len, caddr_t *kp)
914 {
915 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_KVADDR, &off, &len, kp, 0));
916 }
917 
918 int
919 ddi_dma_htoc(ddi_dma_handle_t h, off_t o, ddi_dma_cookie_t *c)
920 {
921 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_HTOC, &o, 0, (caddr_t *)c, 0));
922 }
923 
924 int
925 ddi_dma_coff(ddi_dma_handle_t h, ddi_dma_cookie_t *c, off_t *o)
926 {
927 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_COFF,
928 	    (off_t *)c, 0, (caddr_t *)o, 0));
929 }
930 
931 int
932 ddi_dma_movwin(ddi_dma_handle_t h, off_t *o, size_t *l, ddi_dma_cookie_t *c)
933 {
934 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_MOVWIN, o,
935 	    l, (caddr_t *)c, 0));
936 }
937 
938 int
939 ddi_dma_curwin(ddi_dma_handle_t h, off_t *o, size_t *l)
940 {
941 	if ((((ddi_dma_impl_t *)h)->dmai_rflags & DDI_DMA_PARTIAL) == 0)
942 		return (DDI_FAILURE);
943 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_REPWIN, o, l, 0, 0));
944 }
945 
946 int
947 ddi_dma_nextwin(ddi_dma_handle_t h, ddi_dma_win_t win,
948     ddi_dma_win_t *nwin)
949 {
950 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_NEXTWIN, (off_t *)&win, 0,
951 	    (caddr_t *)nwin, 0));
952 }
953 
954 int
955 ddi_dma_nextseg(ddi_dma_win_t win, ddi_dma_seg_t seg, ddi_dma_seg_t *nseg)
956 {
957 	ddi_dma_handle_t h = (ddi_dma_handle_t)win;
958 
959 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_NEXTSEG, (off_t *)&win,
960 	    (size_t *)&seg, (caddr_t *)nseg, 0));
961 }
962 
963 #if (defined(__i386) && !defined(__amd64)) || defined(__sparc)
964 /*
965  * This routine is Obsolete and should be removed from ALL architectures
966  * in a future release of Solaris.
967  *
968  * It is deliberately NOT ported to amd64; please fix the code that
969  * depends on this routine to use ddi_dma_nextcookie(9F).
970  *
971  * NOTE: even though we fixed the pointer through a 32-bit param issue (the fix
972  * is a side effect to some other cleanup), we're still not going to support
973  * this interface on x64.
974  */
975 int
976 ddi_dma_segtocookie(ddi_dma_seg_t seg, off_t *o, off_t *l,
977     ddi_dma_cookie_t *cookiep)
978 {
979 	ddi_dma_handle_t h = (ddi_dma_handle_t)seg;
980 
981 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_SEGTOC, o, (size_t *)l,
982 	    (caddr_t *)cookiep, 0));
983 }
984 #endif	/* (__i386 && !__amd64) || __sparc */
985 
986 #if !defined(__sparc)
987 
988 /*
989  * The SPARC versions of these routines are done in assembler to
990  * save register windows, so they're in sparc_subr.s.
991  */
992 
993 int
994 ddi_dma_map(dev_info_t *dip, dev_info_t *rdip,
995 	struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep)
996 {
997 	int (*funcp)(dev_info_t *, dev_info_t *, struct ddi_dma_req *,
998 	    ddi_dma_handle_t *);
999 
1000 	if (dip != ddi_root_node())
1001 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_map;
1002 
1003 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_map;
1004 	return ((*funcp)(dip, rdip, dmareqp, handlep));
1005 }
1006 
1007 int
1008 ddi_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
1009     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
1010 {
1011 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_attr_t *,
1012 	    int (*)(caddr_t), caddr_t, ddi_dma_handle_t *);
1013 
1014 	if (dip != ddi_root_node())
1015 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl;
1016 
1017 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_allochdl;
1018 	return ((*funcp)(dip, rdip, attr, waitfp, arg, handlep));
1019 }
1020 
1021 int
1022 ddi_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handlep)
1023 {
1024 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
1025 
1026 	if (dip != ddi_root_node())
1027 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl;
1028 
1029 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_freehdl;
1030 	return ((*funcp)(dip, rdip, handlep));
1031 }
1032 
1033 int
1034 ddi_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
1035     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
1036     ddi_dma_cookie_t *cp, uint_t *ccountp)
1037 {
1038 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
1039 	    struct ddi_dma_req *, ddi_dma_cookie_t *, uint_t *);
1040 
1041 	if (dip != ddi_root_node())
1042 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
1043 
1044 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_bindhdl;
1045 	return ((*funcp)(dip, rdip, handle, dmareq, cp, ccountp));
1046 }
1047 
1048 int
1049 ddi_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
1050     ddi_dma_handle_t handle)
1051 {
1052 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
1053 
1054 	if (dip != ddi_root_node())
1055 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl;
1056 
1057 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_unbindhdl;
1058 	return ((*funcp)(dip, rdip, handle));
1059 }
1060 
1061 
1062 int
1063 ddi_dma_flush(dev_info_t *dip, dev_info_t *rdip,
1064     ddi_dma_handle_t handle, off_t off, size_t len,
1065     uint_t cache_flags)
1066 {
1067 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
1068 	    off_t, size_t, uint_t);
1069 
1070 	if (dip != ddi_root_node())
1071 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush;
1072 
1073 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_flush;
1074 	return ((*funcp)(dip, rdip, handle, off, len, cache_flags));
1075 }
1076 
1077 int
1078 ddi_dma_win(dev_info_t *dip, dev_info_t *rdip,
1079     ddi_dma_handle_t handle, uint_t win, off_t *offp,
1080     size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
1081 {
1082 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
1083 	    uint_t, off_t *, size_t *, ddi_dma_cookie_t *, uint_t *);
1084 
1085 	if (dip != ddi_root_node())
1086 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_win;
1087 
1088 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_win;
1089 	return ((*funcp)(dip, rdip, handle, win, offp, lenp,
1090 	    cookiep, ccountp));
1091 }
1092 
1093 int
1094 ddi_dma_sync(ddi_dma_handle_t h, off_t o, size_t l, uint_t whom)
1095 {
1096 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h;
1097 	dev_info_t *dip, *rdip;
1098 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t, off_t,
1099 	    size_t, uint_t);
1100 
1101 	/*
1102 	 * the DMA nexus driver will set DMP_NOSYNC if the
1103 	 * platform does not require any sync operation. For
1104 	 * example if the memory is uncached or consistent
1105 	 * and without any I/O write buffers involved.
1106 	 */
1107 	if ((hp->dmai_rflags & DMP_NOSYNC) == DMP_NOSYNC)
1108 		return (DDI_SUCCESS);
1109 
1110 	dip = rdip = hp->dmai_rdip;
1111 	if (dip != ddi_root_node())
1112 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush;
1113 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_flush;
1114 	return ((*funcp)(dip, rdip, h, o, l, whom));
1115 }
1116 
1117 int
1118 ddi_dma_unbind_handle(ddi_dma_handle_t h)
1119 {
1120 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h;
1121 	dev_info_t *dip, *rdip;
1122 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
1123 
1124 	dip = rdip = hp->dmai_rdip;
1125 	if (dip != ddi_root_node())
1126 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl;
1127 	funcp = DEVI(rdip)->devi_bus_dma_unbindfunc;
1128 	return ((*funcp)(dip, rdip, h));
1129 }
1130 
1131 #endif	/* !__sparc */
1132 
1133 int
1134 ddi_dma_free(ddi_dma_handle_t h)
1135 {
1136 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_FREE, 0, 0, 0, 0));
1137 }
1138 
1139 int
1140 ddi_iopb_alloc(dev_info_t *dip, ddi_dma_lim_t *limp, uint_t len, caddr_t *iopbp)
1141 {
1142 	ddi_dma_lim_t defalt;
1143 	size_t size = len;
1144 
1145 	if (!limp) {
1146 		defalt = standard_limits;
1147 		limp = &defalt;
1148 	}
1149 	return (i_ddi_mem_alloc_lim(dip, limp, size, 0, 0, 0,
1150 	    iopbp, NULL, NULL));
1151 }
1152 
1153 void
1154 ddi_iopb_free(caddr_t iopb)
1155 {
1156 	i_ddi_mem_free(iopb, NULL);
1157 }
1158 
1159 int
1160 ddi_mem_alloc(dev_info_t *dip, ddi_dma_lim_t *limits, uint_t length,
1161 	uint_t flags, caddr_t *kaddrp, uint_t *real_length)
1162 {
1163 	ddi_dma_lim_t defalt;
1164 	size_t size = length;
1165 
1166 	if (!limits) {
1167 		defalt = standard_limits;
1168 		limits = &defalt;
1169 	}
1170 	return (i_ddi_mem_alloc_lim(dip, limits, size, flags & 0x1,
1171 	    1, 0, kaddrp, real_length, NULL));
1172 }
1173 
1174 void
1175 ddi_mem_free(caddr_t kaddr)
1176 {
1177 	i_ddi_mem_free(kaddr, NULL);
1178 }
1179 
1180 /*
1181  * DMA attributes, alignment, burst sizes, and transfer minimums
1182  */
1183 int
1184 ddi_dma_get_attr(ddi_dma_handle_t handle, ddi_dma_attr_t *attrp)
1185 {
1186 	ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle;
1187 
1188 	if (attrp == NULL)
1189 		return (DDI_FAILURE);
1190 	*attrp = dimp->dmai_attr;
1191 	return (DDI_SUCCESS);
1192 }
1193 
1194 int
1195 ddi_dma_burstsizes(ddi_dma_handle_t handle)
1196 {
1197 	ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle;
1198 
1199 	if (!dimp)
1200 		return (0);
1201 	else
1202 		return (dimp->dmai_burstsizes);
1203 }
1204 
1205 int
1206 ddi_dma_devalign(ddi_dma_handle_t handle, uint_t *alignment, uint_t *mineffect)
1207 {
1208 	ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle;
1209 
1210 	if (!dimp || !alignment || !mineffect)
1211 		return (DDI_FAILURE);
1212 	if (!(dimp->dmai_rflags & DDI_DMA_SBUS_64BIT)) {
1213 		*alignment = 1 << ddi_ffs(dimp->dmai_burstsizes);
1214 	} else {
1215 		if (dimp->dmai_burstsizes & 0xff0000) {
1216 			*alignment = 1 << ddi_ffs(dimp->dmai_burstsizes >> 16);
1217 		} else {
1218 			*alignment = 1 << ddi_ffs(dimp->dmai_burstsizes);
1219 		}
1220 	}
1221 	*mineffect = dimp->dmai_minxfer;
1222 	return (DDI_SUCCESS);
1223 }
1224 
1225 int
1226 ddi_iomin(dev_info_t *a, int i, int stream)
1227 {
1228 	int r;
1229 
1230 	/*
1231 	 * Make sure that the initial value is sane
1232 	 */
1233 	if (i & (i - 1))
1234 		return (0);
1235 	if (i == 0)
1236 		i = (stream) ? 4 : 1;
1237 
1238 	r = ddi_ctlops(a, a,
1239 	    DDI_CTLOPS_IOMIN, (void *)(uintptr_t)stream, (void *)&i);
1240 	if (r != DDI_SUCCESS || (i & (i - 1)))
1241 		return (0);
1242 	return (i);
1243 }
1244 
1245 /*
1246  * Given two DMA attribute structures, apply the attributes
1247  * of one to the other, following the rules of attributes
1248  * and the wishes of the caller.
1249  *
1250  * The rules of DMA attribute structures are that you cannot
1251  * make things *less* restrictive as you apply one set
1252  * of attributes to another.
1253  *
1254  */
1255 void
1256 ddi_dma_attr_merge(ddi_dma_attr_t *attr, ddi_dma_attr_t *mod)
1257 {
1258 	attr->dma_attr_addr_lo =
1259 	    MAX(attr->dma_attr_addr_lo, mod->dma_attr_addr_lo);
1260 	attr->dma_attr_addr_hi =
1261 	    MIN(attr->dma_attr_addr_hi, mod->dma_attr_addr_hi);
1262 	attr->dma_attr_count_max =
1263 	    MIN(attr->dma_attr_count_max, mod->dma_attr_count_max);
1264 	attr->dma_attr_align =
1265 	    MAX(attr->dma_attr_align,  mod->dma_attr_align);
1266 	attr->dma_attr_burstsizes =
1267 	    (uint_t)(attr->dma_attr_burstsizes & mod->dma_attr_burstsizes);
1268 	attr->dma_attr_minxfer =
1269 	    maxbit(attr->dma_attr_minxfer, mod->dma_attr_minxfer);
1270 	attr->dma_attr_maxxfer =
1271 	    MIN(attr->dma_attr_maxxfer, mod->dma_attr_maxxfer);
1272 	attr->dma_attr_seg = MIN(attr->dma_attr_seg, mod->dma_attr_seg);
1273 	attr->dma_attr_sgllen = MIN((uint_t)attr->dma_attr_sgllen,
1274 	    (uint_t)mod->dma_attr_sgllen);
1275 	attr->dma_attr_granular =
1276 	    MAX(attr->dma_attr_granular, mod->dma_attr_granular);
1277 }
1278 
1279 /*
1280  * mmap/segmap interface:
1281  */
1282 
1283 /*
1284  * ddi_segmap:		setup the default segment driver. Calls the drivers
1285  *			XXmmap routine to validate the range to be mapped.
1286  *			Return ENXIO of the range is not valid.  Create
1287  *			a seg_dev segment that contains all of the
1288  *			necessary information and will reference the
1289  *			default segment driver routines. It returns zero
1290  *			on success or non-zero on failure.
1291  */
1292 int
1293 ddi_segmap(dev_t dev, off_t offset, struct as *asp, caddr_t *addrp, off_t len,
1294     uint_t prot, uint_t maxprot, uint_t flags, cred_t *credp)
1295 {
1296 	extern int spec_segmap(dev_t, off_t, struct as *, caddr_t *,
1297 	    off_t, uint_t, uint_t, uint_t, struct cred *);
1298 
1299 	return (spec_segmap(dev, offset, asp, addrp, len,
1300 	    prot, maxprot, flags, credp));
1301 }
1302 
1303 /*
1304  * ddi_map_fault:	Resolve mappings at fault time.  Used by segment
1305  *			drivers. Allows each successive parent to resolve
1306  *			address translations and add its mappings to the
1307  *			mapping list supplied in the page structure. It
1308  *			returns zero on success	or non-zero on failure.
1309  */
1310 
1311 int
1312 ddi_map_fault(dev_info_t *dip, struct hat *hat, struct seg *seg,
1313     caddr_t addr, struct devpage *dp, pfn_t pfn, uint_t prot, uint_t lock)
1314 {
1315 	return (i_ddi_map_fault(dip, dip, hat, seg, addr, dp, pfn, prot, lock));
1316 }
1317 
1318 /*
1319  * ddi_device_mapping_check:	Called from ddi_segmap_setup.
1320  *	Invokes platform specific DDI to determine whether attributes specified
1321  *	in attr(9s) are	valid for the region of memory that will be made
1322  *	available for direct access to user process via the mmap(2) system call.
1323  */
1324 int
1325 ddi_device_mapping_check(dev_t dev, ddi_device_acc_attr_t *accattrp,
1326     uint_t rnumber, uint_t *hat_flags)
1327 {
1328 	ddi_acc_handle_t handle;
1329 	ddi_map_req_t mr;
1330 	ddi_acc_hdl_t *hp;
1331 	int result;
1332 	dev_info_t *dip;
1333 
1334 	/*
1335 	 * we use e_ddi_hold_devi_by_dev to search for the devi.  We
1336 	 * release it immediately since it should already be held by
1337 	 * a devfs vnode.
1338 	 */
1339 	if ((dip =
1340 	    e_ddi_hold_devi_by_dev(dev, E_DDI_HOLD_DEVI_NOATTACH)) == NULL)
1341 		return (-1);
1342 	ddi_release_devi(dip);		/* for e_ddi_hold_devi_by_dev() */
1343 
1344 	/*
1345 	 * Allocate and initialize the common elements of data
1346 	 * access handle.
1347 	 */
1348 	handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
1349 	if (handle == NULL)
1350 		return (-1);
1351 
1352 	hp = impl_acc_hdl_get(handle);
1353 	hp->ah_vers = VERS_ACCHDL;
1354 	hp->ah_dip = dip;
1355 	hp->ah_rnumber = rnumber;
1356 	hp->ah_offset = 0;
1357 	hp->ah_len = 0;
1358 	hp->ah_acc = *accattrp;
1359 
1360 	/*
1361 	 * Set up the mapping request and call to parent.
1362 	 */
1363 	mr.map_op = DDI_MO_MAP_HANDLE;
1364 	mr.map_type = DDI_MT_RNUMBER;
1365 	mr.map_obj.rnumber = rnumber;
1366 	mr.map_prot = PROT_READ | PROT_WRITE;
1367 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
1368 	mr.map_handlep = hp;
1369 	mr.map_vers = DDI_MAP_VERSION;
1370 	result = ddi_map(dip, &mr, 0, 0, NULL);
1371 
1372 	/*
1373 	 * Region must be mappable, pick up flags from the framework.
1374 	 */
1375 	*hat_flags = hp->ah_hat_flags;
1376 
1377 	impl_acc_hdl_free(handle);
1378 
1379 	/*
1380 	 * check for end result.
1381 	 */
1382 	if (result != DDI_SUCCESS)
1383 		return (-1);
1384 	return (0);
1385 }
1386 
1387 
1388 /*
1389  * Property functions:	 See also, ddipropdefs.h.
1390  *
1391  * These functions are the framework for the property functions,
1392  * i.e. they support software defined properties.  All implementation
1393  * specific property handling (i.e.: self-identifying devices and
1394  * PROM defined properties are handled in the implementation specific
1395  * functions (defined in ddi_implfuncs.h).
1396  */
1397 
1398 /*
1399  * nopropop:	Shouldn't be called, right?
1400  */
1401 int
1402 nopropop(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
1403     char *name, caddr_t valuep, int *lengthp)
1404 {
1405 	_NOTE(ARGUNUSED(dev, dip, prop_op, mod_flags, name, valuep, lengthp))
1406 	return (DDI_PROP_NOT_FOUND);
1407 }
1408 
1409 #ifdef	DDI_PROP_DEBUG
1410 int ddi_prop_debug_flag = 0;
1411 
1412 int
1413 ddi_prop_debug(int enable)
1414 {
1415 	int prev = ddi_prop_debug_flag;
1416 
1417 	if ((enable != 0) || (prev != 0))
1418 		printf("ddi_prop_debug: debugging %s\n",
1419 		    enable ? "enabled" : "disabled");
1420 	ddi_prop_debug_flag = enable;
1421 	return (prev);
1422 }
1423 
1424 #endif	/* DDI_PROP_DEBUG */
1425 
1426 /*
1427  * Search a property list for a match, if found return pointer
1428  * to matching prop struct, else return NULL.
1429  */
1430 
1431 ddi_prop_t *
1432 i_ddi_prop_search(dev_t dev, char *name, uint_t flags, ddi_prop_t **list_head)
1433 {
1434 	ddi_prop_t	*propp;
1435 
1436 	/*
1437 	 * find the property in child's devinfo:
1438 	 * Search order defined by this search function is first matching
1439 	 * property with input dev == DDI_DEV_T_ANY matching any dev or
1440 	 * dev == propp->prop_dev, name == propp->name, and the correct
1441 	 * data type as specified in the flags.  If a DDI_DEV_T_NONE dev
1442 	 * value made it this far then it implies a DDI_DEV_T_ANY search.
1443 	 */
1444 	if (dev == DDI_DEV_T_NONE)
1445 		dev = DDI_DEV_T_ANY;
1446 
1447 	for (propp = *list_head; propp != NULL; propp = propp->prop_next)  {
1448 
1449 		if (!DDI_STRSAME(propp->prop_name, name))
1450 			continue;
1451 
1452 		if ((dev != DDI_DEV_T_ANY) && (propp->prop_dev != dev))
1453 			continue;
1454 
1455 		if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0)
1456 			continue;
1457 
1458 		return (propp);
1459 	}
1460 
1461 	return ((ddi_prop_t *)0);
1462 }
1463 
1464 /*
1465  * Search for property within devnames structures
1466  */
1467 ddi_prop_t *
1468 i_ddi_search_global_prop(dev_t dev, char *name, uint_t flags)
1469 {
1470 	major_t		major;
1471 	struct devnames	*dnp;
1472 	ddi_prop_t	*propp;
1473 
1474 	/*
1475 	 * Valid dev_t value is needed to index into the
1476 	 * correct devnames entry, therefore a dev_t
1477 	 * value of DDI_DEV_T_ANY is not appropriate.
1478 	 */
1479 	ASSERT(dev != DDI_DEV_T_ANY);
1480 	if (dev == DDI_DEV_T_ANY) {
1481 		return ((ddi_prop_t *)0);
1482 	}
1483 
1484 	major = getmajor(dev);
1485 	dnp = &(devnamesp[major]);
1486 
1487 	if (dnp->dn_global_prop_ptr == NULL)
1488 		return ((ddi_prop_t *)0);
1489 
1490 	LOCK_DEV_OPS(&dnp->dn_lock);
1491 
1492 	for (propp = dnp->dn_global_prop_ptr->prop_list;
1493 	    propp != NULL;
1494 	    propp = (ddi_prop_t *)propp->prop_next) {
1495 
1496 		if (!DDI_STRSAME(propp->prop_name, name))
1497 			continue;
1498 
1499 		if ((!(flags & DDI_PROP_ROOTNEX_GLOBAL)) &&
1500 		    (!(flags & LDI_DEV_T_ANY)) && (propp->prop_dev != dev))
1501 			continue;
1502 
1503 		if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0)
1504 			continue;
1505 
1506 		/* Property found, return it */
1507 		UNLOCK_DEV_OPS(&dnp->dn_lock);
1508 		return (propp);
1509 	}
1510 
1511 	UNLOCK_DEV_OPS(&dnp->dn_lock);
1512 	return ((ddi_prop_t *)0);
1513 }
1514 
1515 static char prop_no_mem_msg[] = "can't allocate memory for ddi property <%s>";
1516 
1517 /*
1518  * ddi_prop_search_global:
1519  *	Search the global property list within devnames
1520  *	for the named property.  Return the encoded value.
1521  */
1522 static int
1523 i_ddi_prop_search_global(dev_t dev, uint_t flags, char *name,
1524     void *valuep, uint_t *lengthp)
1525 {
1526 	ddi_prop_t	*propp;
1527 	caddr_t		buffer;
1528 
1529 	propp =  i_ddi_search_global_prop(dev, name, flags);
1530 
1531 	/* Property NOT found, bail */
1532 	if (propp == (ddi_prop_t *)0)
1533 		return (DDI_PROP_NOT_FOUND);
1534 
1535 	if (propp->prop_flags & DDI_PROP_UNDEF_IT)
1536 		return (DDI_PROP_UNDEFINED);
1537 
1538 	if ((buffer = kmem_alloc(propp->prop_len,
1539 	    (flags & DDI_PROP_CANSLEEP) ? KM_SLEEP : KM_NOSLEEP)) == NULL) {
1540 		cmn_err(CE_CONT, prop_no_mem_msg, name);
1541 		return (DDI_PROP_NO_MEMORY);
1542 	}
1543 
1544 	/*
1545 	 * Return the encoded data
1546 	 */
1547 	*(caddr_t *)valuep = buffer;
1548 	*lengthp = propp->prop_len;
1549 	bcopy(propp->prop_val, buffer, propp->prop_len);
1550 
1551 	return (DDI_PROP_SUCCESS);
1552 }
1553 
1554 /*
1555  * ddi_prop_search_common:	Lookup and return the encoded value
1556  */
1557 int
1558 ddi_prop_search_common(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1559     uint_t flags, char *name, void *valuep, uint_t *lengthp)
1560 {
1561 	ddi_prop_t	*propp;
1562 	int		i;
1563 	caddr_t		buffer;
1564 	caddr_t		prealloc = NULL;
1565 	int		plength = 0;
1566 	dev_info_t	*pdip;
1567 	int		(*bop)();
1568 
1569 	/*CONSTANTCONDITION*/
1570 	while (1)  {
1571 
1572 		mutex_enter(&(DEVI(dip)->devi_lock));
1573 
1574 
1575 		/*
1576 		 * find the property in child's devinfo:
1577 		 * Search order is:
1578 		 *	1. driver defined properties
1579 		 *	2. system defined properties
1580 		 *	3. driver global properties
1581 		 *	4. boot defined properties
1582 		 */
1583 
1584 		propp = i_ddi_prop_search(dev, name, flags,
1585 		    &(DEVI(dip)->devi_drv_prop_ptr));
1586 		if (propp == NULL)  {
1587 			propp = i_ddi_prop_search(dev, name, flags,
1588 			    &(DEVI(dip)->devi_sys_prop_ptr));
1589 		}
1590 		if ((propp == NULL) && DEVI(dip)->devi_global_prop_list) {
1591 			propp = i_ddi_prop_search(dev, name, flags,
1592 			    &DEVI(dip)->devi_global_prop_list->prop_list);
1593 		}
1594 
1595 		if (propp == NULL)  {
1596 			propp = i_ddi_prop_search(dev, name, flags,
1597 			    &(DEVI(dip)->devi_hw_prop_ptr));
1598 		}
1599 
1600 		/*
1601 		 * Software property found?
1602 		 */
1603 		if (propp != (ddi_prop_t *)0)	{
1604 
1605 			/*
1606 			 * If explicit undefine, return now.
1607 			 */
1608 			if (propp->prop_flags & DDI_PROP_UNDEF_IT) {
1609 				mutex_exit(&(DEVI(dip)->devi_lock));
1610 				if (prealloc)
1611 					kmem_free(prealloc, plength);
1612 				return (DDI_PROP_UNDEFINED);
1613 			}
1614 
1615 			/*
1616 			 * If we only want to know if it exists, return now
1617 			 */
1618 			if (prop_op == PROP_EXISTS) {
1619 				mutex_exit(&(DEVI(dip)->devi_lock));
1620 				ASSERT(prealloc == NULL);
1621 				return (DDI_PROP_SUCCESS);
1622 			}
1623 
1624 			/*
1625 			 * If length only request or prop length == 0,
1626 			 * service request and return now.
1627 			 */
1628 			if ((prop_op == PROP_LEN) ||(propp->prop_len == 0)) {
1629 				*lengthp = propp->prop_len;
1630 
1631 				/*
1632 				 * if prop_op is PROP_LEN_AND_VAL_ALLOC
1633 				 * that means prop_len is 0, so set valuep
1634 				 * also to NULL
1635 				 */
1636 				if (prop_op == PROP_LEN_AND_VAL_ALLOC)
1637 					*(caddr_t *)valuep = NULL;
1638 
1639 				mutex_exit(&(DEVI(dip)->devi_lock));
1640 				if (prealloc)
1641 					kmem_free(prealloc, plength);
1642 				return (DDI_PROP_SUCCESS);
1643 			}
1644 
1645 			/*
1646 			 * If LEN_AND_VAL_ALLOC and the request can sleep,
1647 			 * drop the mutex, allocate the buffer, and go
1648 			 * through the loop again.  If we already allocated
1649 			 * the buffer, and the size of the property changed,
1650 			 * keep trying...
1651 			 */
1652 			if ((prop_op == PROP_LEN_AND_VAL_ALLOC) &&
1653 			    (flags & DDI_PROP_CANSLEEP))  {
1654 				if (prealloc && (propp->prop_len != plength)) {
1655 					kmem_free(prealloc, plength);
1656 					prealloc = NULL;
1657 				}
1658 				if (prealloc == NULL)  {
1659 					plength = propp->prop_len;
1660 					mutex_exit(&(DEVI(dip)->devi_lock));
1661 					prealloc = kmem_alloc(plength,
1662 					    KM_SLEEP);
1663 					continue;
1664 				}
1665 			}
1666 
1667 			/*
1668 			 * Allocate buffer, if required.  Either way,
1669 			 * set `buffer' variable.
1670 			 */
1671 			i = *lengthp;			/* Get callers length */
1672 			*lengthp = propp->prop_len;	/* Set callers length */
1673 
1674 			switch (prop_op) {
1675 
1676 			case PROP_LEN_AND_VAL_ALLOC:
1677 
1678 				if (prealloc == NULL) {
1679 					buffer = kmem_alloc(propp->prop_len,
1680 					    KM_NOSLEEP);
1681 				} else {
1682 					buffer = prealloc;
1683 				}
1684 
1685 				if (buffer == NULL)  {
1686 					mutex_exit(&(DEVI(dip)->devi_lock));
1687 					cmn_err(CE_CONT, prop_no_mem_msg, name);
1688 					return (DDI_PROP_NO_MEMORY);
1689 				}
1690 				/* Set callers buf ptr */
1691 				*(caddr_t *)valuep = buffer;
1692 				break;
1693 
1694 			case PROP_LEN_AND_VAL_BUF:
1695 
1696 				if (propp->prop_len > (i)) {
1697 					mutex_exit(&(DEVI(dip)->devi_lock));
1698 					return (DDI_PROP_BUF_TOO_SMALL);
1699 				}
1700 
1701 				buffer = valuep;  /* Get callers buf ptr */
1702 				break;
1703 
1704 			default:
1705 				break;
1706 			}
1707 
1708 			/*
1709 			 * Do the copy.
1710 			 */
1711 			bcopy(propp->prop_val, buffer, propp->prop_len);
1712 			mutex_exit(&(DEVI(dip)->devi_lock));
1713 			return (DDI_PROP_SUCCESS);
1714 		}
1715 
1716 		mutex_exit(&(DEVI(dip)->devi_lock));
1717 		if (prealloc)
1718 			kmem_free(prealloc, plength);
1719 		prealloc = NULL;
1720 
1721 		/*
1722 		 * Prop not found, call parent bus_ops to deal with possible
1723 		 * h/w layer (possible PROM defined props, etc.) and to
1724 		 * possibly ascend the hierarchy, if allowed by flags.
1725 		 */
1726 		pdip = (dev_info_t *)DEVI(dip)->devi_parent;
1727 
1728 		/*
1729 		 * One last call for the root driver PROM props?
1730 		 */
1731 		if (dip == ddi_root_node())  {
1732 			return (ddi_bus_prop_op(dev, dip, dip, prop_op,
1733 			    flags, name, valuep, (int *)lengthp));
1734 		}
1735 
1736 		/*
1737 		 * We may have been called to check for properties
1738 		 * within a single devinfo node that has no parent -
1739 		 * see make_prop()
1740 		 */
1741 		if (pdip == NULL) {
1742 			ASSERT((flags &
1743 			    (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM)) ==
1744 			    (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM));
1745 			return (DDI_PROP_NOT_FOUND);
1746 		}
1747 
1748 		/*
1749 		 * Instead of recursing, we do iterative calls up the tree.
1750 		 * As a bit of optimization, skip the bus_op level if the
1751 		 * node is a s/w node and if the parent's bus_prop_op function
1752 		 * is `ddi_bus_prop_op', because we know that in this case,
1753 		 * this function does nothing.
1754 		 *
1755 		 * 4225415: If the parent isn't attached, or the child
1756 		 * hasn't been named by the parent yet, use the default
1757 		 * ddi_bus_prop_op as a proxy for the parent.  This
1758 		 * allows property lookups in any child/parent state to
1759 		 * include 'prom' and inherited properties, even when
1760 		 * there are no drivers attached to the child or parent.
1761 		 */
1762 
1763 		bop = ddi_bus_prop_op;
1764 		if (i_ddi_devi_attached(pdip) &&
1765 		    (i_ddi_node_state(dip) >= DS_INITIALIZED))
1766 			bop = DEVI(pdip)->devi_ops->devo_bus_ops->bus_prop_op;
1767 
1768 		i = DDI_PROP_NOT_FOUND;
1769 
1770 		if ((bop != ddi_bus_prop_op) || ndi_dev_is_prom_node(dip)) {
1771 			i = (*bop)(dev, pdip, dip, prop_op,
1772 			    flags | DDI_PROP_DONTPASS,
1773 			    name, valuep, lengthp);
1774 		}
1775 
1776 		if ((flags & DDI_PROP_DONTPASS) ||
1777 		    (i != DDI_PROP_NOT_FOUND))
1778 			return (i);
1779 
1780 		dip = pdip;
1781 	}
1782 	/*NOTREACHED*/
1783 }
1784 
1785 
1786 /*
1787  * ddi_prop_op: The basic property operator for drivers.
1788  *
1789  * In ddi_prop_op, the type of valuep is interpreted based on prop_op:
1790  *
1791  *	prop_op			valuep
1792  *	------			------
1793  *
1794  *	PROP_LEN		<unused>
1795  *
1796  *	PROP_LEN_AND_VAL_BUF	Pointer to callers buffer
1797  *
1798  *	PROP_LEN_AND_VAL_ALLOC	Address of callers pointer (will be set to
1799  *				address of allocated buffer, if successful)
1800  */
1801 int
1802 ddi_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
1803     char *name, caddr_t valuep, int *lengthp)
1804 {
1805 	int	i;
1806 
1807 	ASSERT((mod_flags & DDI_PROP_TYPE_MASK) == 0);
1808 
1809 	/*
1810 	 * If this was originally an LDI prop lookup then we bail here.
1811 	 * The reason is that the LDI property lookup interfaces first call
1812 	 * a drivers prop_op() entry point to allow it to override
1813 	 * properties.  But if we've made it here, then the driver hasn't
1814 	 * overriden any properties.  We don't want to continue with the
1815 	 * property search here because we don't have any type inforamtion.
1816 	 * When we return failure, the LDI interfaces will then proceed to
1817 	 * call the typed property interfaces to look up the property.
1818 	 */
1819 	if (mod_flags & DDI_PROP_DYNAMIC)
1820 		return (DDI_PROP_NOT_FOUND);
1821 
1822 	/*
1823 	 * check for pre-typed property consumer asking for typed property:
1824 	 * see e_ddi_getprop_int64.
1825 	 */
1826 	if (mod_flags & DDI_PROP_CONSUMER_TYPED)
1827 		mod_flags |= DDI_PROP_TYPE_INT64;
1828 	mod_flags |= DDI_PROP_TYPE_ANY;
1829 
1830 	i = ddi_prop_search_common(dev, dip, prop_op,
1831 	    mod_flags, name, valuep, (uint_t *)lengthp);
1832 	if (i == DDI_PROP_FOUND_1275)
1833 		return (DDI_PROP_SUCCESS);
1834 	return (i);
1835 }
1836 
1837 /*
1838  * ddi_prop_op_nblocks_blksize: The basic property operator for drivers that
1839  * maintain size in number of blksize blocks.  Provides a dynamic property
1840  * implementation for size oriented properties based on nblocks64 and blksize
1841  * values passed in by the driver.  Fallback to ddi_prop_op if the nblocks64
1842  * is too large.  This interface should not be used with a nblocks64 that
1843  * represents the driver's idea of how to represent unknown, if nblocks is
1844  * unknown use ddi_prop_op.
1845  */
1846 int
1847 ddi_prop_op_nblocks_blksize(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1848     int mod_flags, char *name, caddr_t valuep, int *lengthp,
1849     uint64_t nblocks64, uint_t blksize)
1850 {
1851 	uint64_t size64;
1852 	int	blkshift;
1853 
1854 	/* convert block size to shift value */
1855 	ASSERT(BIT_ONLYONESET(blksize));
1856 	blkshift = highbit(blksize) - 1;
1857 
1858 	/*
1859 	 * There is no point in supporting nblocks64 values that don't have
1860 	 * an accurate uint64_t byte count representation.
1861 	 */
1862 	if (nblocks64 >= (UINT64_MAX >> blkshift))
1863 		return (ddi_prop_op(dev, dip, prop_op, mod_flags,
1864 		    name, valuep, lengthp));
1865 
1866 	size64 = nblocks64 << blkshift;
1867 	return (ddi_prop_op_size_blksize(dev, dip, prop_op, mod_flags,
1868 	    name, valuep, lengthp, size64, blksize));
1869 }
1870 
1871 /*
1872  * ddi_prop_op_nblocks: ddi_prop_op_nblocks_blksize with DEV_BSIZE blksize.
1873  */
1874 int
1875 ddi_prop_op_nblocks(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1876     int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t nblocks64)
1877 {
1878 	return (ddi_prop_op_nblocks_blksize(dev, dip, prop_op,
1879 	    mod_flags, name, valuep, lengthp, nblocks64, DEV_BSIZE));
1880 }
1881 
1882 /*
1883  * ddi_prop_op_size_blksize: The basic property operator for block drivers that
1884  * maintain size in bytes. Provides a of dynamic property implementation for
1885  * size oriented properties based on size64 value and blksize passed in by the
1886  * driver.  Fallback to ddi_prop_op if the size64 is too large. This interface
1887  * should not be used with a size64 that represents the driver's idea of how
1888  * to represent unknown, if size is unknown use ddi_prop_op.
1889  *
1890  * NOTE: the legacy "nblocks"/"size" properties are treated as 32-bit unsigned
1891  * integers. While the most likely interface to request them ([bc]devi_size)
1892  * is declared int (signed) there is no enforcement of this, which means we
1893  * can't enforce limitations here without risking regression.
1894  */
1895 int
1896 ddi_prop_op_size_blksize(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1897     int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t size64,
1898     uint_t blksize)
1899 {
1900 	uint64_t nblocks64;
1901 	int	callers_length;
1902 	caddr_t	buffer;
1903 	int	blkshift;
1904 
1905 	/*
1906 	 * This is a kludge to support capture of size(9P) pure dynamic
1907 	 * properties in snapshots for non-cmlb code (without exposing
1908 	 * i_ddi_prop_dyn changes). When everyone uses cmlb, this code
1909 	 * should be removed.
1910 	 */
1911 	if (i_ddi_prop_dyn_driver_get(dip) == NULL) {
1912 		static i_ddi_prop_dyn_t prop_dyn_size[] = {
1913 		    {"Size",		DDI_PROP_TYPE_INT64,	S_IFCHR},
1914 		    {"Nblocks",		DDI_PROP_TYPE_INT64,	S_IFBLK},
1915 		    {NULL}
1916 		};
1917 		i_ddi_prop_dyn_driver_set(dip, prop_dyn_size);
1918 	}
1919 
1920 	/* convert block size to shift value */
1921 	ASSERT(BIT_ONLYONESET(blksize));
1922 	blkshift = highbit(blksize) - 1;
1923 
1924 	/* compute DEV_BSIZE nblocks value */
1925 	nblocks64 = size64 >> blkshift;
1926 
1927 	/* get callers length, establish length of our dynamic properties */
1928 	callers_length = *lengthp;
1929 
1930 	if (strcmp(name, "Nblocks") == 0)
1931 		*lengthp = sizeof (uint64_t);
1932 	else if (strcmp(name, "Size") == 0)
1933 		*lengthp = sizeof (uint64_t);
1934 	else if ((strcmp(name, "nblocks") == 0) && (nblocks64 < UINT_MAX))
1935 		*lengthp = sizeof (uint32_t);
1936 	else if ((strcmp(name, "size") == 0) && (size64 < UINT_MAX))
1937 		*lengthp = sizeof (uint32_t);
1938 	else if ((strcmp(name, "blksize") == 0) && (blksize < UINT_MAX))
1939 		*lengthp = sizeof (uint32_t);
1940 	else {
1941 		/* fallback to ddi_prop_op */
1942 		return (ddi_prop_op(dev, dip, prop_op, mod_flags,
1943 		    name, valuep, lengthp));
1944 	}
1945 
1946 	/* service request for the length of the property */
1947 	if (prop_op == PROP_LEN)
1948 		return (DDI_PROP_SUCCESS);
1949 
1950 	switch (prop_op) {
1951 	case PROP_LEN_AND_VAL_ALLOC:
1952 		if ((buffer = kmem_alloc(*lengthp,
1953 		    (mod_flags & DDI_PROP_CANSLEEP) ?
1954 		    KM_SLEEP : KM_NOSLEEP)) == NULL)
1955 			return (DDI_PROP_NO_MEMORY);
1956 
1957 		*(caddr_t *)valuep = buffer;	/* set callers buf ptr */
1958 		break;
1959 
1960 	case PROP_LEN_AND_VAL_BUF:
1961 		/* the length of the property and the request must match */
1962 		if (callers_length != *lengthp)
1963 			return (DDI_PROP_INVAL_ARG);
1964 
1965 		buffer = valuep;		/* get callers buf ptr */
1966 		break;
1967 
1968 	default:
1969 		return (DDI_PROP_INVAL_ARG);
1970 	}
1971 
1972 	/* transfer the value into the buffer */
1973 	if (strcmp(name, "Nblocks") == 0)
1974 		*((uint64_t *)buffer) = nblocks64;
1975 	else if (strcmp(name, "Size") == 0)
1976 		*((uint64_t *)buffer) = size64;
1977 	else if (strcmp(name, "nblocks") == 0)
1978 		*((uint32_t *)buffer) = (uint32_t)nblocks64;
1979 	else if (strcmp(name, "size") == 0)
1980 		*((uint32_t *)buffer) = (uint32_t)size64;
1981 	else if (strcmp(name, "blksize") == 0)
1982 		*((uint32_t *)buffer) = (uint32_t)blksize;
1983 	return (DDI_PROP_SUCCESS);
1984 }
1985 
1986 /*
1987  * ddi_prop_op_size: ddi_prop_op_size_blksize with DEV_BSIZE block size.
1988  */
1989 int
1990 ddi_prop_op_size(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1991     int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t size64)
1992 {
1993 	return (ddi_prop_op_size_blksize(dev, dip, prop_op,
1994 	    mod_flags, name, valuep, lengthp, size64, DEV_BSIZE));
1995 }
1996 
1997 /*
1998  * Variable length props...
1999  */
2000 
2001 /*
2002  * ddi_getlongprop:	Get variable length property len+val into a buffer
2003  *		allocated by property provider via kmem_alloc. Requester
2004  *		is responsible for freeing returned property via kmem_free.
2005  *
2006  *	Arguments:
2007  *
2008  *	dev_t:	Input:	dev_t of property.
2009  *	dip:	Input:	dev_info_t pointer of child.
2010  *	flags:	Input:	Possible flag modifiers are:
2011  *		DDI_PROP_DONTPASS:	Don't pass to parent if prop not found.
2012  *		DDI_PROP_CANSLEEP:	Memory allocation may sleep.
2013  *	name:	Input:	name of property.
2014  *	valuep:	Output:	Addr of callers buffer pointer.
2015  *	lengthp:Output:	*lengthp will contain prop length on exit.
2016  *
2017  *	Possible Returns:
2018  *
2019  *		DDI_PROP_SUCCESS:	Prop found and returned.
2020  *		DDI_PROP_NOT_FOUND:	Prop not found
2021  *		DDI_PROP_UNDEFINED:	Prop explicitly undefined.
2022  *		DDI_PROP_NO_MEMORY:	Prop found, but unable to alloc mem.
2023  */
2024 
2025 int
2026 ddi_getlongprop(dev_t dev, dev_info_t *dip, int flags,
2027     char *name, caddr_t valuep, int *lengthp)
2028 {
2029 	return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_ALLOC,
2030 	    flags, name, valuep, lengthp));
2031 }
2032 
2033 /*
2034  *
2035  * ddi_getlongprop_buf:		Get long prop into pre-allocated callers
2036  *				buffer. (no memory allocation by provider).
2037  *
2038  *	dev_t:	Input:	dev_t of property.
2039  *	dip:	Input:	dev_info_t pointer of child.
2040  *	flags:	Input:	DDI_PROP_DONTPASS or NULL
2041  *	name:	Input:	name of property
2042  *	valuep:	Input:	ptr to callers buffer.
2043  *	lengthp:I/O:	ptr to length of callers buffer on entry,
2044  *			actual length of property on exit.
2045  *
2046  *	Possible returns:
2047  *
2048  *		DDI_PROP_SUCCESS	Prop found and returned
2049  *		DDI_PROP_NOT_FOUND	Prop not found
2050  *		DDI_PROP_UNDEFINED	Prop explicitly undefined.
2051  *		DDI_PROP_BUF_TOO_SMALL	Prop found, callers buf too small,
2052  *					no value returned, but actual prop
2053  *					length returned in *lengthp
2054  *
2055  */
2056 
2057 int
2058 ddi_getlongprop_buf(dev_t dev, dev_info_t *dip, int flags,
2059     char *name, caddr_t valuep, int *lengthp)
2060 {
2061 	return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
2062 	    flags, name, valuep, lengthp));
2063 }
2064 
2065 /*
2066  * Integer/boolean sized props.
2067  *
2068  * Call is value only... returns found boolean or int sized prop value or
2069  * defvalue if prop not found or is wrong length or is explicitly undefined.
2070  * Only flag is DDI_PROP_DONTPASS...
2071  *
2072  * By convention, this interface returns boolean (0) sized properties
2073  * as value (int)1.
2074  *
2075  * This never returns an error, if property not found or specifically
2076  * undefined, the input `defvalue' is returned.
2077  */
2078 
2079 int
2080 ddi_getprop(dev_t dev, dev_info_t *dip, int flags, char *name, int defvalue)
2081 {
2082 	int	propvalue = defvalue;
2083 	int	proplength = sizeof (int);
2084 	int	error;
2085 
2086 	error = ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
2087 	    flags, name, (caddr_t)&propvalue, &proplength);
2088 
2089 	if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
2090 		propvalue = 1;
2091 
2092 	return (propvalue);
2093 }
2094 
2095 /*
2096  * Get prop length interface: flags are 0 or DDI_PROP_DONTPASS
2097  * if returns DDI_PROP_SUCCESS, length returned in *lengthp.
2098  */
2099 
2100 int
2101 ddi_getproplen(dev_t dev, dev_info_t *dip, int flags, char *name, int *lengthp)
2102 {
2103 	return (ddi_prop_op(dev, dip, PROP_LEN, flags, name, NULL, lengthp));
2104 }
2105 
2106 /*
2107  * Allocate a struct prop_driver_data, along with 'size' bytes
2108  * for decoded property data.  This structure is freed by
2109  * calling ddi_prop_free(9F).
2110  */
2111 static void *
2112 ddi_prop_decode_alloc(size_t size, void (*prop_free)(struct prop_driver_data *))
2113 {
2114 	struct prop_driver_data *pdd;
2115 
2116 	/*
2117 	 * Allocate a structure with enough memory to store the decoded data.
2118 	 */
2119 	pdd = kmem_zalloc(sizeof (struct prop_driver_data) + size, KM_SLEEP);
2120 	pdd->pdd_size = (sizeof (struct prop_driver_data) + size);
2121 	pdd->pdd_prop_free = prop_free;
2122 
2123 	/*
2124 	 * Return a pointer to the location to put the decoded data.
2125 	 */
2126 	return ((void *)((caddr_t)pdd + sizeof (struct prop_driver_data)));
2127 }
2128 
2129 /*
2130  * Allocated the memory needed to store the encoded data in the property
2131  * handle.
2132  */
2133 static int
2134 ddi_prop_encode_alloc(prop_handle_t *ph, size_t size)
2135 {
2136 	/*
2137 	 * If size is zero, then set data to NULL and size to 0.  This
2138 	 * is a boolean property.
2139 	 */
2140 	if (size == 0) {
2141 		ph->ph_size = 0;
2142 		ph->ph_data = NULL;
2143 		ph->ph_cur_pos = NULL;
2144 		ph->ph_save_pos = NULL;
2145 	} else {
2146 		if (ph->ph_flags == DDI_PROP_DONTSLEEP) {
2147 			ph->ph_data = kmem_zalloc(size, KM_NOSLEEP);
2148 			if (ph->ph_data == NULL)
2149 				return (DDI_PROP_NO_MEMORY);
2150 		} else
2151 			ph->ph_data = kmem_zalloc(size, KM_SLEEP);
2152 		ph->ph_size = size;
2153 		ph->ph_cur_pos = ph->ph_data;
2154 		ph->ph_save_pos = ph->ph_data;
2155 	}
2156 	return (DDI_PROP_SUCCESS);
2157 }
2158 
2159 /*
2160  * Free the space allocated by the lookup routines.  Each lookup routine
2161  * returns a pointer to the decoded data to the driver.  The driver then
2162  * passes this pointer back to us.  This data actually lives in a struct
2163  * prop_driver_data.  We use negative indexing to find the beginning of
2164  * the structure and then free the entire structure using the size and
2165  * the free routine stored in the structure.
2166  */
2167 void
2168 ddi_prop_free(void *datap)
2169 {
2170 	struct prop_driver_data *pdd;
2171 
2172 	/*
2173 	 * Get the structure
2174 	 */
2175 	pdd = (struct prop_driver_data *)
2176 	    ((caddr_t)datap - sizeof (struct prop_driver_data));
2177 	/*
2178 	 * Call the free routine to free it
2179 	 */
2180 	(*pdd->pdd_prop_free)(pdd);
2181 }
2182 
2183 /*
2184  * Free the data associated with an array of ints,
2185  * allocated with ddi_prop_decode_alloc().
2186  */
2187 static void
2188 ddi_prop_free_ints(struct prop_driver_data *pdd)
2189 {
2190 	kmem_free(pdd, pdd->pdd_size);
2191 }
2192 
2193 /*
2194  * Free a single string property or a single string contained within
2195  * the argv style return value of an array of strings.
2196  */
2197 static void
2198 ddi_prop_free_string(struct prop_driver_data *pdd)
2199 {
2200 	kmem_free(pdd, pdd->pdd_size);
2201 
2202 }
2203 
2204 /*
2205  * Free an array of strings.
2206  */
2207 static void
2208 ddi_prop_free_strings(struct prop_driver_data *pdd)
2209 {
2210 	kmem_free(pdd, pdd->pdd_size);
2211 }
2212 
2213 /*
2214  * Free the data associated with an array of bytes.
2215  */
2216 static void
2217 ddi_prop_free_bytes(struct prop_driver_data *pdd)
2218 {
2219 	kmem_free(pdd, pdd->pdd_size);
2220 }
2221 
2222 /*
2223  * Reset the current location pointer in the property handle to the
2224  * beginning of the data.
2225  */
2226 void
2227 ddi_prop_reset_pos(prop_handle_t *ph)
2228 {
2229 	ph->ph_cur_pos = ph->ph_data;
2230 	ph->ph_save_pos = ph->ph_data;
2231 }
2232 
2233 /*
2234  * Restore the current location pointer in the property handle to the
2235  * saved position.
2236  */
2237 void
2238 ddi_prop_save_pos(prop_handle_t *ph)
2239 {
2240 	ph->ph_save_pos = ph->ph_cur_pos;
2241 }
2242 
2243 /*
2244  * Save the location that the current location pointer is pointing to..
2245  */
2246 void
2247 ddi_prop_restore_pos(prop_handle_t *ph)
2248 {
2249 	ph->ph_cur_pos = ph->ph_save_pos;
2250 }
2251 
2252 /*
2253  * Property encode/decode functions
2254  */
2255 
2256 /*
2257  * Decode a single integer property
2258  */
2259 static int
2260 ddi_prop_fm_decode_int(prop_handle_t *ph, void *data, uint_t *nelements)
2261 {
2262 	int	i;
2263 	int	tmp;
2264 
2265 	/*
2266 	 * If there is nothing to decode return an error
2267 	 */
2268 	if (ph->ph_size == 0)
2269 		return (DDI_PROP_END_OF_DATA);
2270 
2271 	/*
2272 	 * Decode the property as a single integer and return it
2273 	 * in data if we were able to decode it.
2274 	 */
2275 	i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, &tmp);
2276 	if (i < DDI_PROP_RESULT_OK) {
2277 		switch (i) {
2278 		case DDI_PROP_RESULT_EOF:
2279 			return (DDI_PROP_END_OF_DATA);
2280 
2281 		case DDI_PROP_RESULT_ERROR:
2282 			return (DDI_PROP_CANNOT_DECODE);
2283 		}
2284 	}
2285 
2286 	*(int *)data = tmp;
2287 	*nelements = 1;
2288 	return (DDI_PROP_SUCCESS);
2289 }
2290 
2291 /*
2292  * Decode a single 64 bit integer property
2293  */
2294 static int
2295 ddi_prop_fm_decode_int64(prop_handle_t *ph, void *data, uint_t *nelements)
2296 {
2297 	int	i;
2298 	int64_t	tmp;
2299 
2300 	/*
2301 	 * If there is nothing to decode return an error
2302 	 */
2303 	if (ph->ph_size == 0)
2304 		return (DDI_PROP_END_OF_DATA);
2305 
2306 	/*
2307 	 * Decode the property as a single integer and return it
2308 	 * in data if we were able to decode it.
2309 	 */
2310 	i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, &tmp);
2311 	if (i < DDI_PROP_RESULT_OK) {
2312 		switch (i) {
2313 		case DDI_PROP_RESULT_EOF:
2314 			return (DDI_PROP_END_OF_DATA);
2315 
2316 		case DDI_PROP_RESULT_ERROR:
2317 			return (DDI_PROP_CANNOT_DECODE);
2318 		}
2319 	}
2320 
2321 	*(int64_t *)data = tmp;
2322 	*nelements = 1;
2323 	return (DDI_PROP_SUCCESS);
2324 }
2325 
2326 /*
2327  * Decode an array of integers property
2328  */
2329 static int
2330 ddi_prop_fm_decode_ints(prop_handle_t *ph, void *data, uint_t *nelements)
2331 {
2332 	int	i;
2333 	int	cnt = 0;
2334 	int	*tmp;
2335 	int	*intp;
2336 	int	n;
2337 
2338 	/*
2339 	 * Figure out how many array elements there are by going through the
2340 	 * data without decoding it first and counting.
2341 	 */
2342 	for (;;) {
2343 		i = DDI_PROP_INT(ph, DDI_PROP_CMD_SKIP, NULL);
2344 		if (i < 0)
2345 			break;
2346 		cnt++;
2347 	}
2348 
2349 	/*
2350 	 * If there are no elements return an error
2351 	 */
2352 	if (cnt == 0)
2353 		return (DDI_PROP_END_OF_DATA);
2354 
2355 	/*
2356 	 * If we cannot skip through the data, we cannot decode it
2357 	 */
2358 	if (i == DDI_PROP_RESULT_ERROR)
2359 		return (DDI_PROP_CANNOT_DECODE);
2360 
2361 	/*
2362 	 * Reset the data pointer to the beginning of the encoded data
2363 	 */
2364 	ddi_prop_reset_pos(ph);
2365 
2366 	/*
2367 	 * Allocated memory to store the decoded value in.
2368 	 */
2369 	intp = ddi_prop_decode_alloc((cnt * sizeof (int)),
2370 	    ddi_prop_free_ints);
2371 
2372 	/*
2373 	 * Decode each element and place it in the space we just allocated
2374 	 */
2375 	tmp = intp;
2376 	for (n = 0; n < cnt; n++, tmp++) {
2377 		i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, tmp);
2378 		if (i < DDI_PROP_RESULT_OK) {
2379 			/*
2380 			 * Free the space we just allocated
2381 			 * and return an error.
2382 			 */
2383 			ddi_prop_free(intp);
2384 			switch (i) {
2385 			case DDI_PROP_RESULT_EOF:
2386 				return (DDI_PROP_END_OF_DATA);
2387 
2388 			case DDI_PROP_RESULT_ERROR:
2389 				return (DDI_PROP_CANNOT_DECODE);
2390 			}
2391 		}
2392 	}
2393 
2394 	*nelements = cnt;
2395 	*(int **)data = intp;
2396 
2397 	return (DDI_PROP_SUCCESS);
2398 }
2399 
2400 /*
2401  * Decode a 64 bit integer array property
2402  */
2403 static int
2404 ddi_prop_fm_decode_int64_array(prop_handle_t *ph, void *data, uint_t *nelements)
2405 {
2406 	int	i;
2407 	int	n;
2408 	int	cnt = 0;
2409 	int64_t	*tmp;
2410 	int64_t	*intp;
2411 
2412 	/*
2413 	 * Count the number of array elements by going
2414 	 * through the data without decoding it.
2415 	 */
2416 	for (;;) {
2417 		i = DDI_PROP_INT64(ph, DDI_PROP_CMD_SKIP, NULL);
2418 		if (i < 0)
2419 			break;
2420 		cnt++;
2421 	}
2422 
2423 	/*
2424 	 * If there are no elements return an error
2425 	 */
2426 	if (cnt == 0)
2427 		return (DDI_PROP_END_OF_DATA);
2428 
2429 	/*
2430 	 * If we cannot skip through the data, we cannot decode it
2431 	 */
2432 	if (i == DDI_PROP_RESULT_ERROR)
2433 		return (DDI_PROP_CANNOT_DECODE);
2434 
2435 	/*
2436 	 * Reset the data pointer to the beginning of the encoded data
2437 	 */
2438 	ddi_prop_reset_pos(ph);
2439 
2440 	/*
2441 	 * Allocate memory to store the decoded value.
2442 	 */
2443 	intp = ddi_prop_decode_alloc((cnt * sizeof (int64_t)),
2444 	    ddi_prop_free_ints);
2445 
2446 	/*
2447 	 * Decode each element and place it in the space allocated
2448 	 */
2449 	tmp = intp;
2450 	for (n = 0; n < cnt; n++, tmp++) {
2451 		i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, tmp);
2452 		if (i < DDI_PROP_RESULT_OK) {
2453 			/*
2454 			 * Free the space we just allocated
2455 			 * and return an error.
2456 			 */
2457 			ddi_prop_free(intp);
2458 			switch (i) {
2459 			case DDI_PROP_RESULT_EOF:
2460 				return (DDI_PROP_END_OF_DATA);
2461 
2462 			case DDI_PROP_RESULT_ERROR:
2463 				return (DDI_PROP_CANNOT_DECODE);
2464 			}
2465 		}
2466 	}
2467 
2468 	*nelements = cnt;
2469 	*(int64_t **)data = intp;
2470 
2471 	return (DDI_PROP_SUCCESS);
2472 }
2473 
2474 /*
2475  * Encode an array of integers property (Can be one element)
2476  */
2477 int
2478 ddi_prop_fm_encode_ints(prop_handle_t *ph, void *data, uint_t nelements)
2479 {
2480 	int	i;
2481 	int	*tmp;
2482 	int	cnt;
2483 	int	size;
2484 
2485 	/*
2486 	 * If there is no data, we cannot do anything
2487 	 */
2488 	if (nelements == 0)
2489 		return (DDI_PROP_CANNOT_ENCODE);
2490 
2491 	/*
2492 	 * Get the size of an encoded int.
2493 	 */
2494 	size = DDI_PROP_INT(ph, DDI_PROP_CMD_GET_ESIZE, NULL);
2495 
2496 	if (size < DDI_PROP_RESULT_OK) {
2497 		switch (size) {
2498 		case DDI_PROP_RESULT_EOF:
2499 			return (DDI_PROP_END_OF_DATA);
2500 
2501 		case DDI_PROP_RESULT_ERROR:
2502 			return (DDI_PROP_CANNOT_ENCODE);
2503 		}
2504 	}
2505 
2506 	/*
2507 	 * Allocate space in the handle to store the encoded int.
2508 	 */
2509 	if (ddi_prop_encode_alloc(ph, size * nelements) !=
2510 	    DDI_PROP_SUCCESS)
2511 		return (DDI_PROP_NO_MEMORY);
2512 
2513 	/*
2514 	 * Encode the array of ints.
2515 	 */
2516 	tmp = (int *)data;
2517 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2518 		i = DDI_PROP_INT(ph, DDI_PROP_CMD_ENCODE, tmp);
2519 		if (i < DDI_PROP_RESULT_OK) {
2520 			switch (i) {
2521 			case DDI_PROP_RESULT_EOF:
2522 				return (DDI_PROP_END_OF_DATA);
2523 
2524 			case DDI_PROP_RESULT_ERROR:
2525 				return (DDI_PROP_CANNOT_ENCODE);
2526 			}
2527 		}
2528 	}
2529 
2530 	return (DDI_PROP_SUCCESS);
2531 }
2532 
2533 
2534 /*
2535  * Encode a 64 bit integer array property
2536  */
2537 int
2538 ddi_prop_fm_encode_int64(prop_handle_t *ph, void *data, uint_t nelements)
2539 {
2540 	int i;
2541 	int cnt;
2542 	int size;
2543 	int64_t *tmp;
2544 
2545 	/*
2546 	 * If there is no data, we cannot do anything
2547 	 */
2548 	if (nelements == 0)
2549 		return (DDI_PROP_CANNOT_ENCODE);
2550 
2551 	/*
2552 	 * Get the size of an encoded 64 bit int.
2553 	 */
2554 	size = DDI_PROP_INT64(ph, DDI_PROP_CMD_GET_ESIZE, NULL);
2555 
2556 	if (size < DDI_PROP_RESULT_OK) {
2557 		switch (size) {
2558 		case DDI_PROP_RESULT_EOF:
2559 			return (DDI_PROP_END_OF_DATA);
2560 
2561 		case DDI_PROP_RESULT_ERROR:
2562 			return (DDI_PROP_CANNOT_ENCODE);
2563 		}
2564 	}
2565 
2566 	/*
2567 	 * Allocate space in the handle to store the encoded int.
2568 	 */
2569 	if (ddi_prop_encode_alloc(ph, size * nelements) !=
2570 	    DDI_PROP_SUCCESS)
2571 		return (DDI_PROP_NO_MEMORY);
2572 
2573 	/*
2574 	 * Encode the array of ints.
2575 	 */
2576 	tmp = (int64_t *)data;
2577 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2578 		i = DDI_PROP_INT64(ph, DDI_PROP_CMD_ENCODE, tmp);
2579 		if (i < DDI_PROP_RESULT_OK) {
2580 			switch (i) {
2581 			case DDI_PROP_RESULT_EOF:
2582 				return (DDI_PROP_END_OF_DATA);
2583 
2584 			case DDI_PROP_RESULT_ERROR:
2585 				return (DDI_PROP_CANNOT_ENCODE);
2586 			}
2587 		}
2588 	}
2589 
2590 	return (DDI_PROP_SUCCESS);
2591 }
2592 
2593 /*
2594  * Decode a single string property
2595  */
2596 static int
2597 ddi_prop_fm_decode_string(prop_handle_t *ph, void *data, uint_t *nelements)
2598 {
2599 	char		*tmp;
2600 	char		*str;
2601 	int		i;
2602 	int		size;
2603 
2604 	/*
2605 	 * If there is nothing to decode return an error
2606 	 */
2607 	if (ph->ph_size == 0)
2608 		return (DDI_PROP_END_OF_DATA);
2609 
2610 	/*
2611 	 * Get the decoded size of the encoded string.
2612 	 */
2613 	size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2614 	if (size < DDI_PROP_RESULT_OK) {
2615 		switch (size) {
2616 		case DDI_PROP_RESULT_EOF:
2617 			return (DDI_PROP_END_OF_DATA);
2618 
2619 		case DDI_PROP_RESULT_ERROR:
2620 			return (DDI_PROP_CANNOT_DECODE);
2621 		}
2622 	}
2623 
2624 	/*
2625 	 * Allocated memory to store the decoded value in.
2626 	 */
2627 	str = ddi_prop_decode_alloc((size_t)size, ddi_prop_free_string);
2628 
2629 	ddi_prop_reset_pos(ph);
2630 
2631 	/*
2632 	 * Decode the str and place it in the space we just allocated
2633 	 */
2634 	tmp = str;
2635 	i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, tmp);
2636 	if (i < DDI_PROP_RESULT_OK) {
2637 		/*
2638 		 * Free the space we just allocated
2639 		 * and return an error.
2640 		 */
2641 		ddi_prop_free(str);
2642 		switch (i) {
2643 		case DDI_PROP_RESULT_EOF:
2644 			return (DDI_PROP_END_OF_DATA);
2645 
2646 		case DDI_PROP_RESULT_ERROR:
2647 			return (DDI_PROP_CANNOT_DECODE);
2648 		}
2649 	}
2650 
2651 	*(char **)data = str;
2652 	*nelements = 1;
2653 
2654 	return (DDI_PROP_SUCCESS);
2655 }
2656 
2657 /*
2658  * Decode an array of strings.
2659  */
2660 int
2661 ddi_prop_fm_decode_strings(prop_handle_t *ph, void *data, uint_t *nelements)
2662 {
2663 	int		cnt = 0;
2664 	char		**strs;
2665 	char		**tmp;
2666 	char		*ptr;
2667 	int		i;
2668 	int		n;
2669 	int		size;
2670 	size_t		nbytes;
2671 
2672 	/*
2673 	 * Figure out how many array elements there are by going through the
2674 	 * data without decoding it first and counting.
2675 	 */
2676 	for (;;) {
2677 		i = DDI_PROP_STR(ph, DDI_PROP_CMD_SKIP, NULL);
2678 		if (i < 0)
2679 			break;
2680 		cnt++;
2681 	}
2682 
2683 	/*
2684 	 * If there are no elements return an error
2685 	 */
2686 	if (cnt == 0)
2687 		return (DDI_PROP_END_OF_DATA);
2688 
2689 	/*
2690 	 * If we cannot skip through the data, we cannot decode it
2691 	 */
2692 	if (i == DDI_PROP_RESULT_ERROR)
2693 		return (DDI_PROP_CANNOT_DECODE);
2694 
2695 	/*
2696 	 * Reset the data pointer to the beginning of the encoded data
2697 	 */
2698 	ddi_prop_reset_pos(ph);
2699 
2700 	/*
2701 	 * Figure out how much memory we need for the sum total
2702 	 */
2703 	nbytes = (cnt + 1) * sizeof (char *);
2704 
2705 	for (n = 0; n < cnt; n++) {
2706 		/*
2707 		 * Get the decoded size of the current encoded string.
2708 		 */
2709 		size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2710 		if (size < DDI_PROP_RESULT_OK) {
2711 			switch (size) {
2712 			case DDI_PROP_RESULT_EOF:
2713 				return (DDI_PROP_END_OF_DATA);
2714 
2715 			case DDI_PROP_RESULT_ERROR:
2716 				return (DDI_PROP_CANNOT_DECODE);
2717 			}
2718 		}
2719 
2720 		nbytes += size;
2721 	}
2722 
2723 	/*
2724 	 * Allocate memory in which to store the decoded strings.
2725 	 */
2726 	strs = ddi_prop_decode_alloc(nbytes, ddi_prop_free_strings);
2727 
2728 	/*
2729 	 * Set up pointers for each string by figuring out yet
2730 	 * again how long each string is.
2731 	 */
2732 	ddi_prop_reset_pos(ph);
2733 	ptr = (caddr_t)strs + ((cnt + 1) * sizeof (char *));
2734 	for (tmp = strs, n = 0; n < cnt; n++, tmp++) {
2735 		/*
2736 		 * Get the decoded size of the current encoded string.
2737 		 */
2738 		size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2739 		if (size < DDI_PROP_RESULT_OK) {
2740 			ddi_prop_free(strs);
2741 			switch (size) {
2742 			case DDI_PROP_RESULT_EOF:
2743 				return (DDI_PROP_END_OF_DATA);
2744 
2745 			case DDI_PROP_RESULT_ERROR:
2746 				return (DDI_PROP_CANNOT_DECODE);
2747 			}
2748 		}
2749 
2750 		*tmp = ptr;
2751 		ptr += size;
2752 	}
2753 
2754 	/*
2755 	 * String array is terminated by a NULL
2756 	 */
2757 	*tmp = NULL;
2758 
2759 	/*
2760 	 * Finally, we can decode each string
2761 	 */
2762 	ddi_prop_reset_pos(ph);
2763 	for (tmp = strs, n = 0; n < cnt; n++, tmp++) {
2764 		i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, *tmp);
2765 		if (i < DDI_PROP_RESULT_OK) {
2766 			/*
2767 			 * Free the space we just allocated
2768 			 * and return an error
2769 			 */
2770 			ddi_prop_free(strs);
2771 			switch (i) {
2772 			case DDI_PROP_RESULT_EOF:
2773 				return (DDI_PROP_END_OF_DATA);
2774 
2775 			case DDI_PROP_RESULT_ERROR:
2776 				return (DDI_PROP_CANNOT_DECODE);
2777 			}
2778 		}
2779 	}
2780 
2781 	*(char ***)data = strs;
2782 	*nelements = cnt;
2783 
2784 	return (DDI_PROP_SUCCESS);
2785 }
2786 
2787 /*
2788  * Encode a string.
2789  */
2790 int
2791 ddi_prop_fm_encode_string(prop_handle_t *ph, void *data, uint_t nelements)
2792 {
2793 	char		**tmp;
2794 	int		size;
2795 	int		i;
2796 
2797 	/*
2798 	 * If there is no data, we cannot do anything
2799 	 */
2800 	if (nelements == 0)
2801 		return (DDI_PROP_CANNOT_ENCODE);
2802 
2803 	/*
2804 	 * Get the size of the encoded string.
2805 	 */
2806 	tmp = (char **)data;
2807 	size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp);
2808 	if (size < DDI_PROP_RESULT_OK) {
2809 		switch (size) {
2810 		case DDI_PROP_RESULT_EOF:
2811 			return (DDI_PROP_END_OF_DATA);
2812 
2813 		case DDI_PROP_RESULT_ERROR:
2814 			return (DDI_PROP_CANNOT_ENCODE);
2815 		}
2816 	}
2817 
2818 	/*
2819 	 * Allocate space in the handle to store the encoded string.
2820 	 */
2821 	if (ddi_prop_encode_alloc(ph, size) != DDI_PROP_SUCCESS)
2822 		return (DDI_PROP_NO_MEMORY);
2823 
2824 	ddi_prop_reset_pos(ph);
2825 
2826 	/*
2827 	 * Encode the string.
2828 	 */
2829 	tmp = (char **)data;
2830 	i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp);
2831 	if (i < DDI_PROP_RESULT_OK) {
2832 		switch (i) {
2833 		case DDI_PROP_RESULT_EOF:
2834 			return (DDI_PROP_END_OF_DATA);
2835 
2836 		case DDI_PROP_RESULT_ERROR:
2837 			return (DDI_PROP_CANNOT_ENCODE);
2838 		}
2839 	}
2840 
2841 	return (DDI_PROP_SUCCESS);
2842 }
2843 
2844 
2845 /*
2846  * Encode an array of strings.
2847  */
2848 int
2849 ddi_prop_fm_encode_strings(prop_handle_t *ph, void *data, uint_t nelements)
2850 {
2851 	int		cnt = 0;
2852 	char		**tmp;
2853 	int		size;
2854 	uint_t		total_size;
2855 	int		i;
2856 
2857 	/*
2858 	 * If there is no data, we cannot do anything
2859 	 */
2860 	if (nelements == 0)
2861 		return (DDI_PROP_CANNOT_ENCODE);
2862 
2863 	/*
2864 	 * Get the total size required to encode all the strings.
2865 	 */
2866 	total_size = 0;
2867 	tmp = (char **)data;
2868 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2869 		size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp);
2870 		if (size < DDI_PROP_RESULT_OK) {
2871 			switch (size) {
2872 			case DDI_PROP_RESULT_EOF:
2873 				return (DDI_PROP_END_OF_DATA);
2874 
2875 			case DDI_PROP_RESULT_ERROR:
2876 				return (DDI_PROP_CANNOT_ENCODE);
2877 			}
2878 		}
2879 		total_size += (uint_t)size;
2880 	}
2881 
2882 	/*
2883 	 * Allocate space in the handle to store the encoded strings.
2884 	 */
2885 	if (ddi_prop_encode_alloc(ph, total_size) != DDI_PROP_SUCCESS)
2886 		return (DDI_PROP_NO_MEMORY);
2887 
2888 	ddi_prop_reset_pos(ph);
2889 
2890 	/*
2891 	 * Encode the array of strings.
2892 	 */
2893 	tmp = (char **)data;
2894 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2895 		i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp);
2896 		if (i < DDI_PROP_RESULT_OK) {
2897 			switch (i) {
2898 			case DDI_PROP_RESULT_EOF:
2899 				return (DDI_PROP_END_OF_DATA);
2900 
2901 			case DDI_PROP_RESULT_ERROR:
2902 				return (DDI_PROP_CANNOT_ENCODE);
2903 			}
2904 		}
2905 	}
2906 
2907 	return (DDI_PROP_SUCCESS);
2908 }
2909 
2910 
2911 /*
2912  * Decode an array of bytes.
2913  */
2914 static int
2915 ddi_prop_fm_decode_bytes(prop_handle_t *ph, void *data, uint_t *nelements)
2916 {
2917 	uchar_t		*tmp;
2918 	int		nbytes;
2919 	int		i;
2920 
2921 	/*
2922 	 * If there are no elements return an error
2923 	 */
2924 	if (ph->ph_size == 0)
2925 		return (DDI_PROP_END_OF_DATA);
2926 
2927 	/*
2928 	 * Get the size of the encoded array of bytes.
2929 	 */
2930 	nbytes = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_DSIZE,
2931 	    data, ph->ph_size);
2932 	if (nbytes < DDI_PROP_RESULT_OK) {
2933 		switch (nbytes) {
2934 		case DDI_PROP_RESULT_EOF:
2935 			return (DDI_PROP_END_OF_DATA);
2936 
2937 		case DDI_PROP_RESULT_ERROR:
2938 			return (DDI_PROP_CANNOT_DECODE);
2939 		}
2940 	}
2941 
2942 	/*
2943 	 * Allocated memory to store the decoded value in.
2944 	 */
2945 	tmp = ddi_prop_decode_alloc(nbytes, ddi_prop_free_bytes);
2946 
2947 	/*
2948 	 * Decode each element and place it in the space we just allocated
2949 	 */
2950 	i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_DECODE, tmp, nbytes);
2951 	if (i < DDI_PROP_RESULT_OK) {
2952 		/*
2953 		 * Free the space we just allocated
2954 		 * and return an error
2955 		 */
2956 		ddi_prop_free(tmp);
2957 		switch (i) {
2958 		case DDI_PROP_RESULT_EOF:
2959 			return (DDI_PROP_END_OF_DATA);
2960 
2961 		case DDI_PROP_RESULT_ERROR:
2962 			return (DDI_PROP_CANNOT_DECODE);
2963 		}
2964 	}
2965 
2966 	*(uchar_t **)data = tmp;
2967 	*nelements = nbytes;
2968 
2969 	return (DDI_PROP_SUCCESS);
2970 }
2971 
2972 /*
2973  * Encode an array of bytes.
2974  */
2975 int
2976 ddi_prop_fm_encode_bytes(prop_handle_t *ph, void *data, uint_t nelements)
2977 {
2978 	int		size;
2979 	int		i;
2980 
2981 	/*
2982 	 * If there are no elements, then this is a boolean property,
2983 	 * so just create a property handle with no data and return.
2984 	 */
2985 	if (nelements == 0) {
2986 		(void) ddi_prop_encode_alloc(ph, 0);
2987 		return (DDI_PROP_SUCCESS);
2988 	}
2989 
2990 	/*
2991 	 * Get the size of the encoded array of bytes.
2992 	 */
2993 	size = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_ESIZE, (uchar_t *)data,
2994 	    nelements);
2995 	if (size < DDI_PROP_RESULT_OK) {
2996 		switch (size) {
2997 		case DDI_PROP_RESULT_EOF:
2998 			return (DDI_PROP_END_OF_DATA);
2999 
3000 		case DDI_PROP_RESULT_ERROR:
3001 			return (DDI_PROP_CANNOT_DECODE);
3002 		}
3003 	}
3004 
3005 	/*
3006 	 * Allocate space in the handle to store the encoded bytes.
3007 	 */
3008 	if (ddi_prop_encode_alloc(ph, (uint_t)size) != DDI_PROP_SUCCESS)
3009 		return (DDI_PROP_NO_MEMORY);
3010 
3011 	/*
3012 	 * Encode the array of bytes.
3013 	 */
3014 	i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_ENCODE, (uchar_t *)data,
3015 	    nelements);
3016 	if (i < DDI_PROP_RESULT_OK) {
3017 		switch (i) {
3018 		case DDI_PROP_RESULT_EOF:
3019 			return (DDI_PROP_END_OF_DATA);
3020 
3021 		case DDI_PROP_RESULT_ERROR:
3022 			return (DDI_PROP_CANNOT_ENCODE);
3023 		}
3024 	}
3025 
3026 	return (DDI_PROP_SUCCESS);
3027 }
3028 
3029 /*
3030  * OBP 1275 integer, string and byte operators.
3031  *
3032  * DDI_PROP_CMD_DECODE:
3033  *
3034  *	DDI_PROP_RESULT_ERROR:		cannot decode the data
3035  *	DDI_PROP_RESULT_EOF:		end of data
3036  *	DDI_PROP_OK:			data was decoded
3037  *
3038  * DDI_PROP_CMD_ENCODE:
3039  *
3040  *	DDI_PROP_RESULT_ERROR:		cannot encode the data
3041  *	DDI_PROP_RESULT_EOF:		end of data
3042  *	DDI_PROP_OK:			data was encoded
3043  *
3044  * DDI_PROP_CMD_SKIP:
3045  *
3046  *	DDI_PROP_RESULT_ERROR:		cannot skip the data
3047  *	DDI_PROP_RESULT_EOF:		end of data
3048  *	DDI_PROP_OK:			data was skipped
3049  *
3050  * DDI_PROP_CMD_GET_ESIZE:
3051  *
3052  *	DDI_PROP_RESULT_ERROR:		cannot get encoded size
3053  *	DDI_PROP_RESULT_EOF:		end of data
3054  *	> 0:				the encoded size
3055  *
3056  * DDI_PROP_CMD_GET_DSIZE:
3057  *
3058  *	DDI_PROP_RESULT_ERROR:		cannot get decoded size
3059  *	DDI_PROP_RESULT_EOF:		end of data
3060  *	> 0:				the decoded size
3061  */
3062 
3063 /*
3064  * OBP 1275 integer operator
3065  *
3066  * OBP properties are a byte stream of data, so integers may not be
3067  * properly aligned.  Therefore we need to copy them one byte at a time.
3068  */
3069 int
3070 ddi_prop_1275_int(prop_handle_t *ph, uint_t cmd, int *data)
3071 {
3072 	int	i;
3073 
3074 	switch (cmd) {
3075 	case DDI_PROP_CMD_DECODE:
3076 		/*
3077 		 * Check that there is encoded data
3078 		 */
3079 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0)
3080 			return (DDI_PROP_RESULT_ERROR);
3081 		if (ph->ph_flags & PH_FROM_PROM) {
3082 			i = MIN(ph->ph_size, PROP_1275_INT_SIZE);
3083 			if ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
3084 			    ph->ph_size - i))
3085 				return (DDI_PROP_RESULT_ERROR);
3086 		} else {
3087 			if (ph->ph_size < sizeof (int) ||
3088 			    ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
3089 			    ph->ph_size - sizeof (int))))
3090 				return (DDI_PROP_RESULT_ERROR);
3091 		}
3092 
3093 		/*
3094 		 * Copy the integer, using the implementation-specific
3095 		 * copy function if the property is coming from the PROM.
3096 		 */
3097 		if (ph->ph_flags & PH_FROM_PROM) {
3098 			*data = impl_ddi_prop_int_from_prom(
3099 			    (uchar_t *)ph->ph_cur_pos,
3100 			    (ph->ph_size < PROP_1275_INT_SIZE) ?
3101 			    ph->ph_size : PROP_1275_INT_SIZE);
3102 		} else {
3103 			bcopy(ph->ph_cur_pos, data, sizeof (int));
3104 		}
3105 
3106 		/*
3107 		 * Move the current location to the start of the next
3108 		 * bit of undecoded data.
3109 		 */
3110 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
3111 		    PROP_1275_INT_SIZE;
3112 		return (DDI_PROP_RESULT_OK);
3113 
3114 	case DDI_PROP_CMD_ENCODE:
3115 		/*
3116 		 * Check that there is room to encoded the data
3117 		 */
3118 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3119 		    ph->ph_size < PROP_1275_INT_SIZE ||
3120 		    ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
3121 		    ph->ph_size - sizeof (int))))
3122 			return (DDI_PROP_RESULT_ERROR);
3123 
3124 		/*
3125 		 * Encode the integer into the byte stream one byte at a
3126 		 * time.
3127 		 */
3128 		bcopy(data, ph->ph_cur_pos, sizeof (int));
3129 
3130 		/*
3131 		 * Move the current location to the start of the next bit of
3132 		 * space where we can store encoded data.
3133 		 */
3134 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE;
3135 		return (DDI_PROP_RESULT_OK);
3136 
3137 	case DDI_PROP_CMD_SKIP:
3138 		/*
3139 		 * Check that there is encoded data
3140 		 */
3141 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3142 		    ph->ph_size < PROP_1275_INT_SIZE)
3143 			return (DDI_PROP_RESULT_ERROR);
3144 
3145 
3146 		if ((caddr_t)ph->ph_cur_pos ==
3147 		    (caddr_t)ph->ph_data + ph->ph_size) {
3148 			return (DDI_PROP_RESULT_EOF);
3149 		} else if ((caddr_t)ph->ph_cur_pos >
3150 		    (caddr_t)ph->ph_data + ph->ph_size) {
3151 			return (DDI_PROP_RESULT_EOF);
3152 		}
3153 
3154 		/*
3155 		 * Move the current location to the start of the next bit of
3156 		 * undecoded data.
3157 		 */
3158 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE;
3159 		return (DDI_PROP_RESULT_OK);
3160 
3161 	case DDI_PROP_CMD_GET_ESIZE:
3162 		/*
3163 		 * Return the size of an encoded integer on OBP
3164 		 */
3165 		return (PROP_1275_INT_SIZE);
3166 
3167 	case DDI_PROP_CMD_GET_DSIZE:
3168 		/*
3169 		 * Return the size of a decoded integer on the system.
3170 		 */
3171 		return (sizeof (int));
3172 
3173 	default:
3174 #ifdef DEBUG
3175 		panic("ddi_prop_1275_int: %x impossible", cmd);
3176 		/*NOTREACHED*/
3177 #else
3178 		return (DDI_PROP_RESULT_ERROR);
3179 #endif	/* DEBUG */
3180 	}
3181 }
3182 
3183 /*
3184  * 64 bit integer operator.
3185  *
3186  * This is an extension, defined by Sun, to the 1275 integer
3187  * operator.  This routine handles the encoding/decoding of
3188  * 64 bit integer properties.
3189  */
3190 int
3191 ddi_prop_int64_op(prop_handle_t *ph, uint_t cmd, int64_t *data)
3192 {
3193 
3194 	switch (cmd) {
3195 	case DDI_PROP_CMD_DECODE:
3196 		/*
3197 		 * Check that there is encoded data
3198 		 */
3199 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0)
3200 			return (DDI_PROP_RESULT_ERROR);
3201 		if (ph->ph_flags & PH_FROM_PROM) {
3202 			return (DDI_PROP_RESULT_ERROR);
3203 		} else {
3204 			if (ph->ph_size < sizeof (int64_t) ||
3205 			    ((int64_t *)ph->ph_cur_pos >
3206 			    ((int64_t *)ph->ph_data +
3207 			    ph->ph_size - sizeof (int64_t))))
3208 				return (DDI_PROP_RESULT_ERROR);
3209 		}
3210 		/*
3211 		 * Copy the integer, using the implementation-specific
3212 		 * copy function if the property is coming from the PROM.
3213 		 */
3214 		if (ph->ph_flags & PH_FROM_PROM) {
3215 			return (DDI_PROP_RESULT_ERROR);
3216 		} else {
3217 			bcopy(ph->ph_cur_pos, data, sizeof (int64_t));
3218 		}
3219 
3220 		/*
3221 		 * Move the current location to the start of the next
3222 		 * bit of undecoded data.
3223 		 */
3224 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
3225 		    sizeof (int64_t);
3226 			return (DDI_PROP_RESULT_OK);
3227 
3228 	case DDI_PROP_CMD_ENCODE:
3229 		/*
3230 		 * Check that there is room to encoded the data
3231 		 */
3232 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3233 		    ph->ph_size < sizeof (int64_t) ||
3234 		    ((int64_t *)ph->ph_cur_pos > ((int64_t *)ph->ph_data +
3235 		    ph->ph_size - sizeof (int64_t))))
3236 			return (DDI_PROP_RESULT_ERROR);
3237 
3238 		/*
3239 		 * Encode the integer into the byte stream one byte at a
3240 		 * time.
3241 		 */
3242 		bcopy(data, ph->ph_cur_pos, sizeof (int64_t));
3243 
3244 		/*
3245 		 * Move the current location to the start of the next bit of
3246 		 * space where we can store encoded data.
3247 		 */
3248 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
3249 		    sizeof (int64_t);
3250 		return (DDI_PROP_RESULT_OK);
3251 
3252 	case DDI_PROP_CMD_SKIP:
3253 		/*
3254 		 * Check that there is encoded data
3255 		 */
3256 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3257 		    ph->ph_size < sizeof (int64_t))
3258 			return (DDI_PROP_RESULT_ERROR);
3259 
3260 		if ((caddr_t)ph->ph_cur_pos ==
3261 		    (caddr_t)ph->ph_data + ph->ph_size) {
3262 			return (DDI_PROP_RESULT_EOF);
3263 		} else if ((caddr_t)ph->ph_cur_pos >
3264 		    (caddr_t)ph->ph_data + ph->ph_size) {
3265 			return (DDI_PROP_RESULT_EOF);
3266 		}
3267 
3268 		/*
3269 		 * Move the current location to the start of
3270 		 * the next bit of undecoded data.
3271 		 */
3272 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
3273 		    sizeof (int64_t);
3274 			return (DDI_PROP_RESULT_OK);
3275 
3276 	case DDI_PROP_CMD_GET_ESIZE:
3277 		/*
3278 		 * Return the size of an encoded integer on OBP
3279 		 */
3280 		return (sizeof (int64_t));
3281 
3282 	case DDI_PROP_CMD_GET_DSIZE:
3283 		/*
3284 		 * Return the size of a decoded integer on the system.
3285 		 */
3286 		return (sizeof (int64_t));
3287 
3288 	default:
3289 #ifdef DEBUG
3290 		panic("ddi_prop_int64_op: %x impossible", cmd);
3291 		/*NOTREACHED*/
3292 #else
3293 		return (DDI_PROP_RESULT_ERROR);
3294 #endif  /* DEBUG */
3295 	}
3296 }
3297 
3298 /*
3299  * OBP 1275 string operator.
3300  *
3301  * OBP strings are NULL terminated.
3302  */
3303 int
3304 ddi_prop_1275_string(prop_handle_t *ph, uint_t cmd, char *data)
3305 {
3306 	int	n;
3307 	char	*p;
3308 	char	*end;
3309 
3310 	switch (cmd) {
3311 	case DDI_PROP_CMD_DECODE:
3312 		/*
3313 		 * Check that there is encoded data
3314 		 */
3315 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
3316 			return (DDI_PROP_RESULT_ERROR);
3317 		}
3318 
3319 		/*
3320 		 * Match DDI_PROP_CMD_GET_DSIZE logic for when to stop and
3321 		 * how to NULL terminate result.
3322 		 */
3323 		p = (char *)ph->ph_cur_pos;
3324 		end = (char *)ph->ph_data + ph->ph_size;
3325 		if (p >= end)
3326 			return (DDI_PROP_RESULT_EOF);
3327 
3328 		while (p < end) {
3329 			*data++ = *p;
3330 			if (*p++ == 0) {	/* NULL from OBP */
3331 				ph->ph_cur_pos = p;
3332 				return (DDI_PROP_RESULT_OK);
3333 			}
3334 		}
3335 
3336 		/*
3337 		 * If OBP did not NULL terminate string, which happens
3338 		 * (at least) for 'true'/'false' boolean values, account for
3339 		 * the space and store null termination on decode.
3340 		 */
3341 		ph->ph_cur_pos = p;
3342 		*data = 0;
3343 		return (DDI_PROP_RESULT_OK);
3344 
3345 	case DDI_PROP_CMD_ENCODE:
3346 		/*
3347 		 * Check that there is room to encoded the data
3348 		 */
3349 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
3350 			return (DDI_PROP_RESULT_ERROR);
3351 		}
3352 
3353 		n = strlen(data) + 1;
3354 		if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3355 		    ph->ph_size - n)) {
3356 			return (DDI_PROP_RESULT_ERROR);
3357 		}
3358 
3359 		/*
3360 		 * Copy the NULL terminated string
3361 		 */
3362 		bcopy(data, ph->ph_cur_pos, n);
3363 
3364 		/*
3365 		 * Move the current location to the start of the next bit of
3366 		 * space where we can store encoded data.
3367 		 */
3368 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + n;
3369 		return (DDI_PROP_RESULT_OK);
3370 
3371 	case DDI_PROP_CMD_SKIP:
3372 		/*
3373 		 * Check that there is encoded data
3374 		 */
3375 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
3376 			return (DDI_PROP_RESULT_ERROR);
3377 		}
3378 
3379 		/*
3380 		 * Return the string length plus one for the NULL
3381 		 * We know the size of the property, we need to
3382 		 * ensure that the string is properly formatted,
3383 		 * since we may be looking up random OBP data.
3384 		 */
3385 		p = (char *)ph->ph_cur_pos;
3386 		end = (char *)ph->ph_data + ph->ph_size;
3387 		if (p >= end)
3388 			return (DDI_PROP_RESULT_EOF);
3389 
3390 		while (p < end) {
3391 			if (*p++ == 0) {	/* NULL from OBP */
3392 				ph->ph_cur_pos = p;
3393 				return (DDI_PROP_RESULT_OK);
3394 			}
3395 		}
3396 
3397 		/*
3398 		 * Accommodate the fact that OBP does not always NULL
3399 		 * terminate strings.
3400 		 */
3401 		ph->ph_cur_pos = p;
3402 		return (DDI_PROP_RESULT_OK);
3403 
3404 	case DDI_PROP_CMD_GET_ESIZE:
3405 		/*
3406 		 * Return the size of the encoded string on OBP.
3407 		 */
3408 		return (strlen(data) + 1);
3409 
3410 	case DDI_PROP_CMD_GET_DSIZE:
3411 		/*
3412 		 * Return the string length plus one for the NULL.
3413 		 * We know the size of the property, we need to
3414 		 * ensure that the string is properly formatted,
3415 		 * since we may be looking up random OBP data.
3416 		 */
3417 		p = (char *)ph->ph_cur_pos;
3418 		end = (char *)ph->ph_data + ph->ph_size;
3419 		if (p >= end)
3420 			return (DDI_PROP_RESULT_EOF);
3421 
3422 		for (n = 0; p < end; n++) {
3423 			if (*p++ == 0) {	/* NULL from OBP */
3424 				ph->ph_cur_pos = p;
3425 				return (n + 1);
3426 			}
3427 		}
3428 
3429 		/*
3430 		 * If OBP did not NULL terminate string, which happens for
3431 		 * 'true'/'false' boolean values, account for the space
3432 		 * to store null termination here.
3433 		 */
3434 		ph->ph_cur_pos = p;
3435 		return (n + 1);
3436 
3437 	default:
3438 #ifdef DEBUG
3439 		panic("ddi_prop_1275_string: %x impossible", cmd);
3440 		/*NOTREACHED*/
3441 #else
3442 		return (DDI_PROP_RESULT_ERROR);
3443 #endif	/* DEBUG */
3444 	}
3445 }
3446 
3447 /*
3448  * OBP 1275 byte operator
3449  *
3450  * Caller must specify the number of bytes to get.  OBP encodes bytes
3451  * as a byte so there is a 1-to-1 translation.
3452  */
3453 int
3454 ddi_prop_1275_bytes(prop_handle_t *ph, uint_t cmd, uchar_t *data,
3455 	uint_t nelements)
3456 {
3457 	switch (cmd) {
3458 	case DDI_PROP_CMD_DECODE:
3459 		/*
3460 		 * Check that there is encoded data
3461 		 */
3462 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3463 		    ph->ph_size < nelements ||
3464 		    ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3465 		    ph->ph_size - nelements)))
3466 			return (DDI_PROP_RESULT_ERROR);
3467 
3468 		/*
3469 		 * Copy out the bytes
3470 		 */
3471 		bcopy(ph->ph_cur_pos, data, nelements);
3472 
3473 		/*
3474 		 * Move the current location
3475 		 */
3476 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3477 		return (DDI_PROP_RESULT_OK);
3478 
3479 	case DDI_PROP_CMD_ENCODE:
3480 		/*
3481 		 * Check that there is room to encode the data
3482 		 */
3483 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3484 		    ph->ph_size < nelements ||
3485 		    ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3486 		    ph->ph_size - nelements)))
3487 			return (DDI_PROP_RESULT_ERROR);
3488 
3489 		/*
3490 		 * Copy in the bytes
3491 		 */
3492 		bcopy(data, ph->ph_cur_pos, nelements);
3493 
3494 		/*
3495 		 * Move the current location to the start of the next bit of
3496 		 * space where we can store encoded data.
3497 		 */
3498 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3499 		return (DDI_PROP_RESULT_OK);
3500 
3501 	case DDI_PROP_CMD_SKIP:
3502 		/*
3503 		 * Check that there is encoded data
3504 		 */
3505 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3506 		    ph->ph_size < nelements)
3507 			return (DDI_PROP_RESULT_ERROR);
3508 
3509 		if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3510 		    ph->ph_size - nelements))
3511 			return (DDI_PROP_RESULT_EOF);
3512 
3513 		/*
3514 		 * Move the current location
3515 		 */
3516 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3517 		return (DDI_PROP_RESULT_OK);
3518 
3519 	case DDI_PROP_CMD_GET_ESIZE:
3520 		/*
3521 		 * The size in bytes of the encoded size is the
3522 		 * same as the decoded size provided by the caller.
3523 		 */
3524 		return (nelements);
3525 
3526 	case DDI_PROP_CMD_GET_DSIZE:
3527 		/*
3528 		 * Just return the number of bytes specified by the caller.
3529 		 */
3530 		return (nelements);
3531 
3532 	default:
3533 #ifdef DEBUG
3534 		panic("ddi_prop_1275_bytes: %x impossible", cmd);
3535 		/*NOTREACHED*/
3536 #else
3537 		return (DDI_PROP_RESULT_ERROR);
3538 #endif	/* DEBUG */
3539 	}
3540 }
3541 
3542 /*
3543  * Used for properties that come from the OBP, hardware configuration files,
3544  * or that are created by calls to ddi_prop_update(9F).
3545  */
3546 static struct prop_handle_ops prop_1275_ops = {
3547 	ddi_prop_1275_int,
3548 	ddi_prop_1275_string,
3549 	ddi_prop_1275_bytes,
3550 	ddi_prop_int64_op
3551 };
3552 
3553 
3554 /*
3555  * Interface to create/modify a managed property on child's behalf...
3556  * Flags interpreted are:
3557  *	DDI_PROP_CANSLEEP:	Allow memory allocation to sleep.
3558  *	DDI_PROP_SYSTEM_DEF:	Manipulate system list rather than driver list.
3559  *
3560  * Use same dev_t when modifying or undefining a property.
3561  * Search for properties with DDI_DEV_T_ANY to match first named
3562  * property on the list.
3563  *
3564  * Properties are stored LIFO and subsequently will match the first
3565  * `matching' instance.
3566  */
3567 
3568 /*
3569  * ddi_prop_add:	Add a software defined property
3570  */
3571 
3572 /*
3573  * define to get a new ddi_prop_t.
3574  * km_flags are KM_SLEEP or KM_NOSLEEP.
3575  */
3576 
3577 #define	DDI_NEW_PROP_T(km_flags)	\
3578 	(kmem_zalloc(sizeof (ddi_prop_t), km_flags))
3579 
3580 static int
3581 ddi_prop_add(dev_t dev, dev_info_t *dip, int flags,
3582     char *name, caddr_t value, int length)
3583 {
3584 	ddi_prop_t	*new_propp, *propp;
3585 	ddi_prop_t	**list_head = &(DEVI(dip)->devi_drv_prop_ptr);
3586 	int		km_flags = KM_NOSLEEP;
3587 	int		name_buf_len;
3588 
3589 	/*
3590 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero return error.
3591 	 */
3592 
3593 	if (dev == DDI_DEV_T_ANY || name == (char *)0 || strlen(name) == 0)
3594 		return (DDI_PROP_INVAL_ARG);
3595 
3596 	if (flags & DDI_PROP_CANSLEEP)
3597 		km_flags = KM_SLEEP;
3598 
3599 	if (flags & DDI_PROP_SYSTEM_DEF)
3600 		list_head = &(DEVI(dip)->devi_sys_prop_ptr);
3601 	else if (flags & DDI_PROP_HW_DEF)
3602 		list_head = &(DEVI(dip)->devi_hw_prop_ptr);
3603 
3604 	if ((new_propp = DDI_NEW_PROP_T(km_flags)) == NULL)  {
3605 		cmn_err(CE_CONT, prop_no_mem_msg, name);
3606 		return (DDI_PROP_NO_MEMORY);
3607 	}
3608 
3609 	/*
3610 	 * If dev is major number 0, then we need to do a ddi_name_to_major
3611 	 * to get the real major number for the device.  This needs to be
3612 	 * done because some drivers need to call ddi_prop_create in their
3613 	 * attach routines but they don't have a dev.  By creating the dev
3614 	 * ourself if the major number is 0, drivers will not have to know what
3615 	 * their major number.	They can just create a dev with major number
3616 	 * 0 and pass it in.  For device 0, we will be doing a little extra
3617 	 * work by recreating the same dev that we already have, but its the
3618 	 * price you pay :-).
3619 	 *
3620 	 * This fixes bug #1098060.
3621 	 */
3622 	if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN) {
3623 		new_propp->prop_dev =
3624 		    makedevice(ddi_name_to_major(DEVI(dip)->devi_binding_name),
3625 		    getminor(dev));
3626 	} else
3627 		new_propp->prop_dev = dev;
3628 
3629 	/*
3630 	 * Allocate space for property name and copy it in...
3631 	 */
3632 
3633 	name_buf_len = strlen(name) + 1;
3634 	new_propp->prop_name = kmem_alloc(name_buf_len, km_flags);
3635 	if (new_propp->prop_name == 0)	{
3636 		kmem_free(new_propp, sizeof (ddi_prop_t));
3637 		cmn_err(CE_CONT, prop_no_mem_msg, name);
3638 		return (DDI_PROP_NO_MEMORY);
3639 	}
3640 	bcopy(name, new_propp->prop_name, name_buf_len);
3641 
3642 	/*
3643 	 * Set the property type
3644 	 */
3645 	new_propp->prop_flags = flags & DDI_PROP_TYPE_MASK;
3646 
3647 	/*
3648 	 * Set length and value ONLY if not an explicit property undefine:
3649 	 * NOTE: value and length are zero for explicit undefines.
3650 	 */
3651 
3652 	if (flags & DDI_PROP_UNDEF_IT) {
3653 		new_propp->prop_flags |= DDI_PROP_UNDEF_IT;
3654 	} else {
3655 		if ((new_propp->prop_len = length) != 0) {
3656 			new_propp->prop_val = kmem_alloc(length, km_flags);
3657 			if (new_propp->prop_val == 0)  {
3658 				kmem_free(new_propp->prop_name, name_buf_len);
3659 				kmem_free(new_propp, sizeof (ddi_prop_t));
3660 				cmn_err(CE_CONT, prop_no_mem_msg, name);
3661 				return (DDI_PROP_NO_MEMORY);
3662 			}
3663 			bcopy(value, new_propp->prop_val, length);
3664 		}
3665 	}
3666 
3667 	/*
3668 	 * Link property into beginning of list. (Properties are LIFO order.)
3669 	 */
3670 
3671 	mutex_enter(&(DEVI(dip)->devi_lock));
3672 	propp = *list_head;
3673 	new_propp->prop_next = propp;
3674 	*list_head = new_propp;
3675 	mutex_exit(&(DEVI(dip)->devi_lock));
3676 	return (DDI_PROP_SUCCESS);
3677 }
3678 
3679 
3680 /*
3681  * ddi_prop_change:	Modify a software managed property value
3682  *
3683  *			Set new length and value if found.
3684  *			returns DDI_PROP_INVAL_ARG if dev is DDI_DEV_T_ANY or
3685  *			input name is the NULL string.
3686  *			returns DDI_PROP_NO_MEMORY if unable to allocate memory
3687  *
3688  *			Note: an undef can be modified to be a define,
3689  *			(you can't go the other way.)
3690  */
3691 
3692 static int
3693 ddi_prop_change(dev_t dev, dev_info_t *dip, int flags,
3694     char *name, caddr_t value, int length)
3695 {
3696 	ddi_prop_t	*propp;
3697 	ddi_prop_t	**ppropp;
3698 	caddr_t		p = NULL;
3699 
3700 	if ((dev == DDI_DEV_T_ANY) || (name == NULL) || (strlen(name) == 0))
3701 		return (DDI_PROP_INVAL_ARG);
3702 
3703 	/*
3704 	 * Preallocate buffer, even if we don't need it...
3705 	 */
3706 	if (length != 0)  {
3707 		p = kmem_alloc(length, (flags & DDI_PROP_CANSLEEP) ?
3708 		    KM_SLEEP : KM_NOSLEEP);
3709 		if (p == NULL)	{
3710 			cmn_err(CE_CONT, prop_no_mem_msg, name);
3711 			return (DDI_PROP_NO_MEMORY);
3712 		}
3713 	}
3714 
3715 	/*
3716 	 * If the dev_t value contains DDI_MAJOR_T_UNKNOWN for the major
3717 	 * number, a real dev_t value should be created based upon the dip's
3718 	 * binding driver.  See ddi_prop_add...
3719 	 */
3720 	if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN)
3721 		dev = makedevice(
3722 		    ddi_name_to_major(DEVI(dip)->devi_binding_name),
3723 		    getminor(dev));
3724 
3725 	/*
3726 	 * Check to see if the property exists.  If so we modify it.
3727 	 * Else we create it by calling ddi_prop_add().
3728 	 */
3729 	mutex_enter(&(DEVI(dip)->devi_lock));
3730 	ppropp = &DEVI(dip)->devi_drv_prop_ptr;
3731 	if (flags & DDI_PROP_SYSTEM_DEF)
3732 		ppropp = &DEVI(dip)->devi_sys_prop_ptr;
3733 	else if (flags & DDI_PROP_HW_DEF)
3734 		ppropp = &DEVI(dip)->devi_hw_prop_ptr;
3735 
3736 	if ((propp = i_ddi_prop_search(dev, name, flags, ppropp)) != NULL) {
3737 		/*
3738 		 * Need to reallocate buffer?  If so, do it
3739 		 * carefully (reuse same space if new prop
3740 		 * is same size and non-NULL sized).
3741 		 */
3742 		if (length != 0)
3743 			bcopy(value, p, length);
3744 
3745 		if (propp->prop_len != 0)
3746 			kmem_free(propp->prop_val, propp->prop_len);
3747 
3748 		propp->prop_len = length;
3749 		propp->prop_val = p;
3750 		propp->prop_flags &= ~DDI_PROP_UNDEF_IT;
3751 		mutex_exit(&(DEVI(dip)->devi_lock));
3752 		return (DDI_PROP_SUCCESS);
3753 	}
3754 
3755 	mutex_exit(&(DEVI(dip)->devi_lock));
3756 	if (length != 0)
3757 		kmem_free(p, length);
3758 
3759 	return (ddi_prop_add(dev, dip, flags, name, value, length));
3760 }
3761 
3762 /*
3763  * Common update routine used to update and encode a property.	Creates
3764  * a property handle, calls the property encode routine, figures out if
3765  * the property already exists and updates if it does.	Otherwise it
3766  * creates if it does not exist.
3767  */
3768 int
3769 ddi_prop_update_common(dev_t match_dev, dev_info_t *dip, int flags,
3770     char *name, void *data, uint_t nelements,
3771     int (*prop_create)(prop_handle_t *, void *data, uint_t nelements))
3772 {
3773 	prop_handle_t	ph;
3774 	int		rval;
3775 	uint_t		ourflags;
3776 
3777 	/*
3778 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3779 	 * return error.
3780 	 */
3781 	if (match_dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3782 		return (DDI_PROP_INVAL_ARG);
3783 
3784 	/*
3785 	 * Create the handle
3786 	 */
3787 	ph.ph_data = NULL;
3788 	ph.ph_cur_pos = NULL;
3789 	ph.ph_save_pos = NULL;
3790 	ph.ph_size = 0;
3791 	ph.ph_ops = &prop_1275_ops;
3792 
3793 	/*
3794 	 * ourflags:
3795 	 * For compatibility with the old interfaces.  The old interfaces
3796 	 * didn't sleep by default and slept when the flag was set.  These
3797 	 * interfaces to the opposite.	So the old interfaces now set the
3798 	 * DDI_PROP_DONTSLEEP flag by default which tells us not to sleep.
3799 	 *
3800 	 * ph.ph_flags:
3801 	 * Blocked data or unblocked data allocation
3802 	 * for ph.ph_data in ddi_prop_encode_alloc()
3803 	 */
3804 	if (flags & DDI_PROP_DONTSLEEP) {
3805 		ourflags = flags;
3806 		ph.ph_flags = DDI_PROP_DONTSLEEP;
3807 	} else {
3808 		ourflags = flags | DDI_PROP_CANSLEEP;
3809 		ph.ph_flags = DDI_PROP_CANSLEEP;
3810 	}
3811 
3812 	/*
3813 	 * Encode the data and store it in the property handle by
3814 	 * calling the prop_encode routine.
3815 	 */
3816 	if ((rval = (*prop_create)(&ph, data, nelements)) !=
3817 	    DDI_PROP_SUCCESS) {
3818 		if (rval == DDI_PROP_NO_MEMORY)
3819 			cmn_err(CE_CONT, prop_no_mem_msg, name);
3820 		if (ph.ph_size != 0)
3821 			kmem_free(ph.ph_data, ph.ph_size);
3822 		return (rval);
3823 	}
3824 
3825 	/*
3826 	 * The old interfaces use a stacking approach to creating
3827 	 * properties.	If we are being called from the old interfaces,
3828 	 * the DDI_PROP_STACK_CREATE flag will be set, so we just do a
3829 	 * create without checking.
3830 	 */
3831 	if (flags & DDI_PROP_STACK_CREATE) {
3832 		rval = ddi_prop_add(match_dev, dip,
3833 		    ourflags, name, ph.ph_data, ph.ph_size);
3834 	} else {
3835 		rval = ddi_prop_change(match_dev, dip,
3836 		    ourflags, name, ph.ph_data, ph.ph_size);
3837 	}
3838 
3839 	/*
3840 	 * Free the encoded data allocated in the prop_encode routine.
3841 	 */
3842 	if (ph.ph_size != 0)
3843 		kmem_free(ph.ph_data, ph.ph_size);
3844 
3845 	return (rval);
3846 }
3847 
3848 
3849 /*
3850  * ddi_prop_create:	Define a managed property:
3851  *			See above for details.
3852  */
3853 
3854 int
3855 ddi_prop_create(dev_t dev, dev_info_t *dip, int flag,
3856     char *name, caddr_t value, int length)
3857 {
3858 	if (!(flag & DDI_PROP_CANSLEEP)) {
3859 		flag |= DDI_PROP_DONTSLEEP;
3860 #ifdef DDI_PROP_DEBUG
3861 		if (length != 0)
3862 			cmn_err(CE_NOTE, "!ddi_prop_create: interface obsolete,"
3863 			    "use ddi_prop_update (prop = %s, node = %s%d)",
3864 			    name, ddi_driver_name(dip), ddi_get_instance(dip));
3865 #endif /* DDI_PROP_DEBUG */
3866 	}
3867 	flag &= ~DDI_PROP_SYSTEM_DEF;
3868 	flag |= DDI_PROP_STACK_CREATE | DDI_PROP_TYPE_ANY;
3869 	return (ddi_prop_update_common(dev, dip, flag, name,
3870 	    value, length, ddi_prop_fm_encode_bytes));
3871 }
3872 
3873 int
3874 e_ddi_prop_create(dev_t dev, dev_info_t *dip, int flag,
3875     char *name, caddr_t value, int length)
3876 {
3877 	if (!(flag & DDI_PROP_CANSLEEP))
3878 		flag |= DDI_PROP_DONTSLEEP;
3879 	flag |= DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE | DDI_PROP_TYPE_ANY;
3880 	return (ddi_prop_update_common(dev, dip, flag,
3881 	    name, value, length, ddi_prop_fm_encode_bytes));
3882 }
3883 
3884 int
3885 ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag,
3886     char *name, caddr_t value, int length)
3887 {
3888 	ASSERT((flag & DDI_PROP_TYPE_MASK) == 0);
3889 
3890 	/*
3891 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3892 	 * return error.
3893 	 */
3894 	if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3895 		return (DDI_PROP_INVAL_ARG);
3896 
3897 	if (!(flag & DDI_PROP_CANSLEEP))
3898 		flag |= DDI_PROP_DONTSLEEP;
3899 	flag &= ~DDI_PROP_SYSTEM_DEF;
3900 	if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_NOTPROM), name) == 0)
3901 		return (DDI_PROP_NOT_FOUND);
3902 
3903 	return (ddi_prop_update_common(dev, dip,
3904 	    (flag | DDI_PROP_TYPE_BYTE), name,
3905 	    value, length, ddi_prop_fm_encode_bytes));
3906 }
3907 
3908 int
3909 e_ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag,
3910     char *name, caddr_t value, int length)
3911 {
3912 	ASSERT((flag & DDI_PROP_TYPE_MASK) == 0);
3913 
3914 	/*
3915 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3916 	 * return error.
3917 	 */
3918 	if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3919 		return (DDI_PROP_INVAL_ARG);
3920 
3921 	if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_SYSTEM_DEF), name) == 0)
3922 		return (DDI_PROP_NOT_FOUND);
3923 
3924 	if (!(flag & DDI_PROP_CANSLEEP))
3925 		flag |= DDI_PROP_DONTSLEEP;
3926 	return (ddi_prop_update_common(dev, dip,
3927 	    (flag | DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE),
3928 	    name, value, length, ddi_prop_fm_encode_bytes));
3929 }
3930 
3931 
3932 /*
3933  * Common lookup routine used to lookup and decode a property.
3934  * Creates a property handle, searches for the raw encoded data,
3935  * fills in the handle, and calls the property decode functions
3936  * passed in.
3937  *
3938  * This routine is not static because ddi_bus_prop_op() which lives in
3939  * ddi_impl.c calls it.  No driver should be calling this routine.
3940  */
3941 int
3942 ddi_prop_lookup_common(dev_t match_dev, dev_info_t *dip,
3943     uint_t flags, char *name, void *data, uint_t *nelements,
3944     int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements))
3945 {
3946 	int		rval;
3947 	uint_t		ourflags;
3948 	prop_handle_t	ph;
3949 
3950 	if ((match_dev == DDI_DEV_T_NONE) ||
3951 	    (name == NULL) || (strlen(name) == 0))
3952 		return (DDI_PROP_INVAL_ARG);
3953 
3954 	ourflags = (flags & DDI_PROP_DONTSLEEP) ? flags :
3955 	    flags | DDI_PROP_CANSLEEP;
3956 
3957 	/*
3958 	 * Get the encoded data
3959 	 */
3960 	bzero(&ph, sizeof (prop_handle_t));
3961 
3962 	if ((flags & DDI_UNBND_DLPI2) || (flags & DDI_PROP_ROOTNEX_GLOBAL)) {
3963 		/*
3964 		 * For rootnex and unbound dlpi style-2 devices, index into
3965 		 * the devnames' array and search the global
3966 		 * property list.
3967 		 */
3968 		ourflags &= ~DDI_UNBND_DLPI2;
3969 		rval = i_ddi_prop_search_global(match_dev,
3970 		    ourflags, name, &ph.ph_data, &ph.ph_size);
3971 	} else {
3972 		rval = ddi_prop_search_common(match_dev, dip,
3973 		    PROP_LEN_AND_VAL_ALLOC, ourflags, name,
3974 		    &ph.ph_data, &ph.ph_size);
3975 
3976 	}
3977 
3978 	if (rval != DDI_PROP_SUCCESS && rval != DDI_PROP_FOUND_1275) {
3979 		ASSERT(ph.ph_data == NULL);
3980 		ASSERT(ph.ph_size == 0);
3981 		return (rval);
3982 	}
3983 
3984 	/*
3985 	 * If the encoded data came from a OBP or software
3986 	 * use the 1275 OBP decode/encode routines.
3987 	 */
3988 	ph.ph_cur_pos = ph.ph_data;
3989 	ph.ph_save_pos = ph.ph_data;
3990 	ph.ph_ops = &prop_1275_ops;
3991 	ph.ph_flags = (rval == DDI_PROP_FOUND_1275) ? PH_FROM_PROM : 0;
3992 
3993 	rval = (*prop_decoder)(&ph, data, nelements);
3994 
3995 	/*
3996 	 * Free the encoded data
3997 	 */
3998 	if (ph.ph_size != 0)
3999 		kmem_free(ph.ph_data, ph.ph_size);
4000 
4001 	return (rval);
4002 }
4003 
4004 /*
4005  * Lookup and return an array of composite properties.  The driver must
4006  * provide the decode routine.
4007  */
4008 int
4009 ddi_prop_lookup(dev_t match_dev, dev_info_t *dip,
4010     uint_t flags, char *name, void *data, uint_t *nelements,
4011     int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements))
4012 {
4013 	return (ddi_prop_lookup_common(match_dev, dip,
4014 	    (flags | DDI_PROP_TYPE_COMPOSITE), name,
4015 	    data, nelements, prop_decoder));
4016 }
4017 
4018 /*
4019  * Return 1 if a property exists (no type checking done).
4020  * Return 0 if it does not exist.
4021  */
4022 int
4023 ddi_prop_exists(dev_t match_dev, dev_info_t *dip, uint_t flags, char *name)
4024 {
4025 	int	i;
4026 	uint_t	x = 0;
4027 
4028 	i = ddi_prop_search_common(match_dev, dip, PROP_EXISTS,
4029 	    flags | DDI_PROP_TYPE_MASK, name, NULL, &x);
4030 	return (i == DDI_PROP_SUCCESS || i == DDI_PROP_FOUND_1275);
4031 }
4032 
4033 
4034 /*
4035  * Update an array of composite properties.  The driver must
4036  * provide the encode routine.
4037  */
4038 int
4039 ddi_prop_update(dev_t match_dev, dev_info_t *dip,
4040     char *name, void *data, uint_t nelements,
4041     int (*prop_create)(prop_handle_t *, void *data, uint_t nelements))
4042 {
4043 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_COMPOSITE,
4044 	    name, data, nelements, prop_create));
4045 }
4046 
4047 /*
4048  * Get a single integer or boolean property and return it.
4049  * If the property does not exists, or cannot be decoded,
4050  * then return the defvalue passed in.
4051  *
4052  * This routine always succeeds.
4053  */
4054 int
4055 ddi_prop_get_int(dev_t match_dev, dev_info_t *dip, uint_t flags,
4056     char *name, int defvalue)
4057 {
4058 	int	data;
4059 	uint_t	nelements;
4060 	int	rval;
4061 
4062 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4063 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
4064 #ifdef DEBUG
4065 		if (dip != NULL) {
4066 			cmn_err(CE_WARN, "ddi_prop_get_int: invalid flag"
4067 			    " 0x%x (prop = %s, node = %s%d)", flags,
4068 			    name, ddi_driver_name(dip), ddi_get_instance(dip));
4069 		}
4070 #endif /* DEBUG */
4071 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4072 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4073 	}
4074 
4075 	if ((rval = ddi_prop_lookup_common(match_dev, dip,
4076 	    (flags | DDI_PROP_TYPE_INT), name, &data, &nelements,
4077 	    ddi_prop_fm_decode_int)) != DDI_PROP_SUCCESS) {
4078 		if (rval == DDI_PROP_END_OF_DATA)
4079 			data = 1;
4080 		else
4081 			data = defvalue;
4082 	}
4083 	return (data);
4084 }
4085 
4086 /*
4087  * Get a single 64 bit integer or boolean property and return it.
4088  * If the property does not exists, or cannot be decoded,
4089  * then return the defvalue passed in.
4090  *
4091  * This routine always succeeds.
4092  */
4093 int64_t
4094 ddi_prop_get_int64(dev_t match_dev, dev_info_t *dip, uint_t flags,
4095     char *name, int64_t defvalue)
4096 {
4097 	int64_t	data;
4098 	uint_t	nelements;
4099 	int	rval;
4100 
4101 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4102 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
4103 #ifdef DEBUG
4104 		if (dip != NULL) {
4105 			cmn_err(CE_WARN, "ddi_prop_get_int64: invalid flag"
4106 			    " 0x%x (prop = %s, node = %s%d)", flags,
4107 			    name, ddi_driver_name(dip), ddi_get_instance(dip));
4108 		}
4109 #endif /* DEBUG */
4110 		return (DDI_PROP_INVAL_ARG);
4111 	}
4112 
4113 	if ((rval = ddi_prop_lookup_common(match_dev, dip,
4114 	    (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM),
4115 	    name, &data, &nelements, ddi_prop_fm_decode_int64))
4116 	    != DDI_PROP_SUCCESS) {
4117 		if (rval == DDI_PROP_END_OF_DATA)
4118 			data = 1;
4119 		else
4120 			data = defvalue;
4121 	}
4122 	return (data);
4123 }
4124 
4125 /*
4126  * Get an array of integer property
4127  */
4128 int
4129 ddi_prop_lookup_int_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4130     char *name, int **data, uint_t *nelements)
4131 {
4132 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4133 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
4134 #ifdef DEBUG
4135 		if (dip != NULL) {
4136 			cmn_err(CE_WARN, "ddi_prop_lookup_int_array: "
4137 			    "invalid flag 0x%x (prop = %s, node = %s%d)",
4138 			    flags, name, ddi_driver_name(dip),
4139 			    ddi_get_instance(dip));
4140 		}
4141 #endif /* DEBUG */
4142 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4143 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4144 	}
4145 
4146 	return (ddi_prop_lookup_common(match_dev, dip,
4147 	    (flags | DDI_PROP_TYPE_INT), name, data,
4148 	    nelements, ddi_prop_fm_decode_ints));
4149 }
4150 
4151 /*
4152  * Get an array of 64 bit integer properties
4153  */
4154 int
4155 ddi_prop_lookup_int64_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4156     char *name, int64_t **data, uint_t *nelements)
4157 {
4158 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4159 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
4160 #ifdef DEBUG
4161 		if (dip != NULL) {
4162 			cmn_err(CE_WARN, "ddi_prop_lookup_int64_array: "
4163 			    "invalid flag 0x%x (prop = %s, node = %s%d)",
4164 			    flags, name, ddi_driver_name(dip),
4165 			    ddi_get_instance(dip));
4166 		}
4167 #endif /* DEBUG */
4168 		return (DDI_PROP_INVAL_ARG);
4169 	}
4170 
4171 	return (ddi_prop_lookup_common(match_dev, dip,
4172 	    (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM),
4173 	    name, data, nelements, ddi_prop_fm_decode_int64_array));
4174 }
4175 
4176 /*
4177  * Update a single integer property.  If the property exists on the drivers
4178  * property list it updates, else it creates it.
4179  */
4180 int
4181 ddi_prop_update_int(dev_t match_dev, dev_info_t *dip,
4182     char *name, int data)
4183 {
4184 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT,
4185 	    name, &data, 1, ddi_prop_fm_encode_ints));
4186 }
4187 
4188 /*
4189  * Update a single 64 bit integer property.
4190  * Update the driver property list if it exists, else create it.
4191  */
4192 int
4193 ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip,
4194     char *name, int64_t data)
4195 {
4196 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64,
4197 	    name, &data, 1, ddi_prop_fm_encode_int64));
4198 }
4199 
4200 int
4201 e_ddi_prop_update_int(dev_t match_dev, dev_info_t *dip,
4202     char *name, int data)
4203 {
4204 	return (ddi_prop_update_common(match_dev, dip,
4205 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT,
4206 	    name, &data, 1, ddi_prop_fm_encode_ints));
4207 }
4208 
4209 int
4210 e_ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip,
4211     char *name, int64_t data)
4212 {
4213 	return (ddi_prop_update_common(match_dev, dip,
4214 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64,
4215 	    name, &data, 1, ddi_prop_fm_encode_int64));
4216 }
4217 
4218 /*
4219  * Update an array of integer property.  If the property exists on the drivers
4220  * property list it updates, else it creates it.
4221  */
4222 int
4223 ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip,
4224     char *name, int *data, uint_t nelements)
4225 {
4226 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT,
4227 	    name, data, nelements, ddi_prop_fm_encode_ints));
4228 }
4229 
4230 /*
4231  * Update an array of 64 bit integer properties.
4232  * Update the driver property list if it exists, else create it.
4233  */
4234 int
4235 ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip,
4236     char *name, int64_t *data, uint_t nelements)
4237 {
4238 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64,
4239 	    name, data, nelements, ddi_prop_fm_encode_int64));
4240 }
4241 
4242 int
4243 e_ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip,
4244     char *name, int64_t *data, uint_t nelements)
4245 {
4246 	return (ddi_prop_update_common(match_dev, dip,
4247 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64,
4248 	    name, data, nelements, ddi_prop_fm_encode_int64));
4249 }
4250 
4251 int
4252 e_ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip,
4253     char *name, int *data, uint_t nelements)
4254 {
4255 	return (ddi_prop_update_common(match_dev, dip,
4256 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT,
4257 	    name, data, nelements, ddi_prop_fm_encode_ints));
4258 }
4259 
4260 /*
4261  * Get a single string property.
4262  */
4263 int
4264 ddi_prop_lookup_string(dev_t match_dev, dev_info_t *dip, uint_t flags,
4265     char *name, char **data)
4266 {
4267 	uint_t x;
4268 
4269 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4270 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
4271 #ifdef DEBUG
4272 		if (dip != NULL) {
4273 			cmn_err(CE_WARN, "%s: invalid flag 0x%x "
4274 			    "(prop = %s, node = %s%d); invalid bits ignored",
4275 			    "ddi_prop_lookup_string", flags, name,
4276 			    ddi_driver_name(dip), ddi_get_instance(dip));
4277 		}
4278 #endif /* DEBUG */
4279 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4280 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4281 	}
4282 
4283 	return (ddi_prop_lookup_common(match_dev, dip,
4284 	    (flags | DDI_PROP_TYPE_STRING), name, data,
4285 	    &x, ddi_prop_fm_decode_string));
4286 }
4287 
4288 /*
4289  * Get an array of strings property.
4290  */
4291 int
4292 ddi_prop_lookup_string_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4293     char *name, char ***data, uint_t *nelements)
4294 {
4295 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4296 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
4297 #ifdef DEBUG
4298 		if (dip != NULL) {
4299 			cmn_err(CE_WARN, "ddi_prop_lookup_string_array: "
4300 			    "invalid flag 0x%x (prop = %s, node = %s%d)",
4301 			    flags, name, ddi_driver_name(dip),
4302 			    ddi_get_instance(dip));
4303 		}
4304 #endif /* DEBUG */
4305 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4306 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4307 	}
4308 
4309 	return (ddi_prop_lookup_common(match_dev, dip,
4310 	    (flags | DDI_PROP_TYPE_STRING), name, data,
4311 	    nelements, ddi_prop_fm_decode_strings));
4312 }
4313 
4314 /*
4315  * Update a single string property.
4316  */
4317 int
4318 ddi_prop_update_string(dev_t match_dev, dev_info_t *dip,
4319     char *name, char *data)
4320 {
4321 	return (ddi_prop_update_common(match_dev, dip,
4322 	    DDI_PROP_TYPE_STRING, name, &data, 1,
4323 	    ddi_prop_fm_encode_string));
4324 }
4325 
4326 int
4327 e_ddi_prop_update_string(dev_t match_dev, dev_info_t *dip,
4328     char *name, char *data)
4329 {
4330 	return (ddi_prop_update_common(match_dev, dip,
4331 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING,
4332 	    name, &data, 1, ddi_prop_fm_encode_string));
4333 }
4334 
4335 
4336 /*
4337  * Update an array of strings property.
4338  */
4339 int
4340 ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip,
4341     char *name, char **data, uint_t nelements)
4342 {
4343 	return (ddi_prop_update_common(match_dev, dip,
4344 	    DDI_PROP_TYPE_STRING, name, data, nelements,
4345 	    ddi_prop_fm_encode_strings));
4346 }
4347 
4348 int
4349 e_ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip,
4350     char *name, char **data, uint_t nelements)
4351 {
4352 	return (ddi_prop_update_common(match_dev, dip,
4353 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING,
4354 	    name, data, nelements,
4355 	    ddi_prop_fm_encode_strings));
4356 }
4357 
4358 
4359 /*
4360  * Get an array of bytes property.
4361  */
4362 int
4363 ddi_prop_lookup_byte_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4364     char *name, uchar_t **data, uint_t *nelements)
4365 {
4366 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4367 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
4368 #ifdef DEBUG
4369 		if (dip != NULL) {
4370 			cmn_err(CE_WARN, "ddi_prop_lookup_byte_array: "
4371 			    " invalid flag 0x%x (prop = %s, node = %s%d)",
4372 			    flags, name, ddi_driver_name(dip),
4373 			    ddi_get_instance(dip));
4374 		}
4375 #endif /* DEBUG */
4376 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4377 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4378 	}
4379 
4380 	return (ddi_prop_lookup_common(match_dev, dip,
4381 	    (flags | DDI_PROP_TYPE_BYTE), name, data,
4382 	    nelements, ddi_prop_fm_decode_bytes));
4383 }
4384 
4385 /*
4386  * Update an array of bytes property.
4387  */
4388 int
4389 ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip,
4390     char *name, uchar_t *data, uint_t nelements)
4391 {
4392 	if (nelements == 0)
4393 		return (DDI_PROP_INVAL_ARG);
4394 
4395 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_BYTE,
4396 	    name, data, nelements, ddi_prop_fm_encode_bytes));
4397 }
4398 
4399 
4400 int
4401 e_ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip,
4402     char *name, uchar_t *data, uint_t nelements)
4403 {
4404 	if (nelements == 0)
4405 		return (DDI_PROP_INVAL_ARG);
4406 
4407 	return (ddi_prop_update_common(match_dev, dip,
4408 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE,
4409 	    name, data, nelements, ddi_prop_fm_encode_bytes));
4410 }
4411 
4412 
4413 /*
4414  * ddi_prop_remove_common:	Undefine a managed property:
4415  *			Input dev_t must match dev_t when defined.
4416  *			Returns DDI_PROP_NOT_FOUND, possibly.
4417  *			DDI_PROP_INVAL_ARG is also possible if dev is
4418  *			DDI_DEV_T_ANY or incoming name is the NULL string.
4419  */
4420 int
4421 ddi_prop_remove_common(dev_t dev, dev_info_t *dip, char *name, int flag)
4422 {
4423 	ddi_prop_t	**list_head = &(DEVI(dip)->devi_drv_prop_ptr);
4424 	ddi_prop_t	*propp;
4425 	ddi_prop_t	*lastpropp = NULL;
4426 
4427 	if ((dev == DDI_DEV_T_ANY) || (name == (char *)0) ||
4428 	    (strlen(name) == 0)) {
4429 		return (DDI_PROP_INVAL_ARG);
4430 	}
4431 
4432 	if (flag & DDI_PROP_SYSTEM_DEF)
4433 		list_head = &(DEVI(dip)->devi_sys_prop_ptr);
4434 	else if (flag & DDI_PROP_HW_DEF)
4435 		list_head = &(DEVI(dip)->devi_hw_prop_ptr);
4436 
4437 	mutex_enter(&(DEVI(dip)->devi_lock));
4438 
4439 	for (propp = *list_head; propp != NULL; propp = propp->prop_next)  {
4440 		if (DDI_STRSAME(propp->prop_name, name) &&
4441 		    (dev == propp->prop_dev)) {
4442 			/*
4443 			 * Unlink this propp allowing for it to
4444 			 * be first in the list:
4445 			 */
4446 
4447 			if (lastpropp == NULL)
4448 				*list_head = propp->prop_next;
4449 			else
4450 				lastpropp->prop_next = propp->prop_next;
4451 
4452 			mutex_exit(&(DEVI(dip)->devi_lock));
4453 
4454 			/*
4455 			 * Free memory and return...
4456 			 */
4457 			kmem_free(propp->prop_name,
4458 			    strlen(propp->prop_name) + 1);
4459 			if (propp->prop_len != 0)
4460 				kmem_free(propp->prop_val, propp->prop_len);
4461 			kmem_free(propp, sizeof (ddi_prop_t));
4462 			return (DDI_PROP_SUCCESS);
4463 		}
4464 		lastpropp = propp;
4465 	}
4466 	mutex_exit(&(DEVI(dip)->devi_lock));
4467 	return (DDI_PROP_NOT_FOUND);
4468 }
4469 
4470 int
4471 ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name)
4472 {
4473 	return (ddi_prop_remove_common(dev, dip, name, 0));
4474 }
4475 
4476 int
4477 e_ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name)
4478 {
4479 	return (ddi_prop_remove_common(dev, dip, name, DDI_PROP_SYSTEM_DEF));
4480 }
4481 
4482 /*
4483  * e_ddi_prop_list_delete: remove a list of properties
4484  *	Note that the caller needs to provide the required protection
4485  *	(eg. devi_lock if these properties are still attached to a devi)
4486  */
4487 void
4488 e_ddi_prop_list_delete(ddi_prop_t *props)
4489 {
4490 	i_ddi_prop_list_delete(props);
4491 }
4492 
4493 /*
4494  * ddi_prop_remove_all_common:
4495  *	Used before unloading a driver to remove
4496  *	all properties. (undefines all dev_t's props.)
4497  *	Also removes `explicitly undefined' props.
4498  *	No errors possible.
4499  */
4500 void
4501 ddi_prop_remove_all_common(dev_info_t *dip, int flag)
4502 {
4503 	ddi_prop_t	**list_head;
4504 
4505 	mutex_enter(&(DEVI(dip)->devi_lock));
4506 	if (flag & DDI_PROP_SYSTEM_DEF) {
4507 		list_head = &(DEVI(dip)->devi_sys_prop_ptr);
4508 	} else if (flag & DDI_PROP_HW_DEF) {
4509 		list_head = &(DEVI(dip)->devi_hw_prop_ptr);
4510 	} else {
4511 		list_head = &(DEVI(dip)->devi_drv_prop_ptr);
4512 	}
4513 	i_ddi_prop_list_delete(*list_head);
4514 	*list_head = NULL;
4515 	mutex_exit(&(DEVI(dip)->devi_lock));
4516 }
4517 
4518 
4519 /*
4520  * ddi_prop_remove_all:		Remove all driver prop definitions.
4521  */
4522 
4523 void
4524 ddi_prop_remove_all(dev_info_t *dip)
4525 {
4526 	i_ddi_prop_dyn_driver_set(dip, NULL);
4527 	ddi_prop_remove_all_common(dip, 0);
4528 }
4529 
4530 /*
4531  * e_ddi_prop_remove_all:	Remove all system prop definitions.
4532  */
4533 
4534 void
4535 e_ddi_prop_remove_all(dev_info_t *dip)
4536 {
4537 	ddi_prop_remove_all_common(dip, (int)DDI_PROP_SYSTEM_DEF);
4538 }
4539 
4540 
4541 /*
4542  * ddi_prop_undefine:	Explicitly undefine a property.  Property
4543  *			searches which match this property return
4544  *			the error code DDI_PROP_UNDEFINED.
4545  *
4546  *			Use ddi_prop_remove to negate effect of
4547  *			ddi_prop_undefine
4548  *
4549  *			See above for error returns.
4550  */
4551 
4552 int
4553 ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name)
4554 {
4555 	if (!(flag & DDI_PROP_CANSLEEP))
4556 		flag |= DDI_PROP_DONTSLEEP;
4557 	flag |= DDI_PROP_STACK_CREATE | DDI_PROP_UNDEF_IT | DDI_PROP_TYPE_ANY;
4558 	return (ddi_prop_update_common(dev, dip, flag,
4559 	    name, NULL, 0, ddi_prop_fm_encode_bytes));
4560 }
4561 
4562 int
4563 e_ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name)
4564 {
4565 	if (!(flag & DDI_PROP_CANSLEEP))
4566 		flag |= DDI_PROP_DONTSLEEP;
4567 	flag |= DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE |
4568 	    DDI_PROP_UNDEF_IT | DDI_PROP_TYPE_ANY;
4569 	return (ddi_prop_update_common(dev, dip, flag,
4570 	    name, NULL, 0, ddi_prop_fm_encode_bytes));
4571 }
4572 
4573 /*
4574  * Support for gathering dynamic properties in devinfo snapshot.
4575  */
4576 void
4577 i_ddi_prop_dyn_driver_set(dev_info_t *dip, i_ddi_prop_dyn_t *dp)
4578 {
4579 	DEVI(dip)->devi_prop_dyn_driver = dp;
4580 }
4581 
4582 i_ddi_prop_dyn_t *
4583 i_ddi_prop_dyn_driver_get(dev_info_t *dip)
4584 {
4585 	return (DEVI(dip)->devi_prop_dyn_driver);
4586 }
4587 
4588 void
4589 i_ddi_prop_dyn_parent_set(dev_info_t *dip, i_ddi_prop_dyn_t *dp)
4590 {
4591 	DEVI(dip)->devi_prop_dyn_parent = dp;
4592 }
4593 
4594 i_ddi_prop_dyn_t *
4595 i_ddi_prop_dyn_parent_get(dev_info_t *dip)
4596 {
4597 	return (DEVI(dip)->devi_prop_dyn_parent);
4598 }
4599 
4600 void
4601 i_ddi_prop_dyn_cache_invalidate(dev_info_t *dip, i_ddi_prop_dyn_t *dp)
4602 {
4603 	/* for now we invalidate the entire cached snapshot */
4604 	if (dip && dp)
4605 		i_ddi_di_cache_invalidate();
4606 }
4607 
4608 /* ARGSUSED */
4609 void
4610 ddi_prop_cache_invalidate(dev_t dev, dev_info_t *dip, char *name, int flags)
4611 {
4612 	/* for now we invalidate the entire cached snapshot */
4613 	i_ddi_di_cache_invalidate();
4614 }
4615 
4616 
4617 /*
4618  * Code to search hardware layer (PROM), if it exists, on behalf of child.
4619  *
4620  * if input dip != child_dip, then call is on behalf of child
4621  * to search PROM, do it via ddi_prop_search_common() and ascend only
4622  * if allowed.
4623  *
4624  * if input dip == ch_dip (child_dip), call is on behalf of root driver,
4625  * to search for PROM defined props only.
4626  *
4627  * Note that the PROM search is done only if the requested dev
4628  * is either DDI_DEV_T_ANY or DDI_DEV_T_NONE. PROM properties
4629  * have no associated dev, thus are automatically associated with
4630  * DDI_DEV_T_NONE.
4631  *
4632  * Modifying flag DDI_PROP_NOTPROM inhibits the search in the h/w layer.
4633  *
4634  * Returns DDI_PROP_FOUND_1275 if found to indicate to framework
4635  * that the property resides in the prom.
4636  */
4637 int
4638 impl_ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip,
4639     ddi_prop_op_t prop_op, int mod_flags,
4640     char *name, caddr_t valuep, int *lengthp)
4641 {
4642 	int	len;
4643 	caddr_t buffer;
4644 
4645 	/*
4646 	 * If requested dev is DDI_DEV_T_NONE or DDI_DEV_T_ANY, then
4647 	 * look in caller's PROM if it's a self identifying device...
4648 	 *
4649 	 * Note that this is very similar to ddi_prop_op, but we
4650 	 * search the PROM instead of the s/w defined properties,
4651 	 * and we are called on by the parent driver to do this for
4652 	 * the child.
4653 	 */
4654 
4655 	if (((dev == DDI_DEV_T_NONE) || (dev == DDI_DEV_T_ANY)) &&
4656 	    ndi_dev_is_prom_node(ch_dip) &&
4657 	    ((mod_flags & DDI_PROP_NOTPROM) == 0)) {
4658 		len = prom_getproplen((pnode_t)DEVI(ch_dip)->devi_nodeid, name);
4659 		if (len == -1) {
4660 			return (DDI_PROP_NOT_FOUND);
4661 		}
4662 
4663 		/*
4664 		 * If exists only request, we're done
4665 		 */
4666 		if (prop_op == PROP_EXISTS) {
4667 			return (DDI_PROP_FOUND_1275);
4668 		}
4669 
4670 		/*
4671 		 * If length only request or prop length == 0, get out
4672 		 */
4673 		if ((prop_op == PROP_LEN) || (len == 0)) {
4674 			*lengthp = len;
4675 			return (DDI_PROP_FOUND_1275);
4676 		}
4677 
4678 		/*
4679 		 * Allocate buffer if required... (either way `buffer'
4680 		 * is receiving address).
4681 		 */
4682 
4683 		switch (prop_op) {
4684 
4685 		case PROP_LEN_AND_VAL_ALLOC:
4686 
4687 			buffer = kmem_alloc((size_t)len,
4688 			    mod_flags & DDI_PROP_CANSLEEP ?
4689 			    KM_SLEEP : KM_NOSLEEP);
4690 			if (buffer == NULL) {
4691 				return (DDI_PROP_NO_MEMORY);
4692 			}
4693 			*(caddr_t *)valuep = buffer;
4694 			break;
4695 
4696 		case PROP_LEN_AND_VAL_BUF:
4697 
4698 			if (len > (*lengthp)) {
4699 				*lengthp = len;
4700 				return (DDI_PROP_BUF_TOO_SMALL);
4701 			}
4702 
4703 			buffer = valuep;
4704 			break;
4705 
4706 		default:
4707 			break;
4708 		}
4709 
4710 		/*
4711 		 * Call the PROM function to do the copy.
4712 		 */
4713 		(void) prom_getprop((pnode_t)DEVI(ch_dip)->devi_nodeid,
4714 		    name, buffer);
4715 
4716 		*lengthp = len; /* return the actual length to the caller */
4717 		(void) impl_fix_props(dip, ch_dip, name, len, buffer);
4718 		return (DDI_PROP_FOUND_1275);
4719 	}
4720 
4721 	return (DDI_PROP_NOT_FOUND);
4722 }
4723 
4724 /*
4725  * The ddi_bus_prop_op default bus nexus prop op function.
4726  *
4727  * Code to search hardware layer (PROM), if it exists,
4728  * on behalf of child, then, if appropriate, ascend and check
4729  * my own software defined properties...
4730  */
4731 int
4732 ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip,
4733     ddi_prop_op_t prop_op, int mod_flags,
4734     char *name, caddr_t valuep, int *lengthp)
4735 {
4736 	int	error;
4737 
4738 	error = impl_ddi_bus_prop_op(dev, dip, ch_dip, prop_op, mod_flags,
4739 	    name, valuep, lengthp);
4740 
4741 	if (error == DDI_PROP_SUCCESS || error == DDI_PROP_FOUND_1275 ||
4742 	    error == DDI_PROP_BUF_TOO_SMALL)
4743 		return (error);
4744 
4745 	if (error == DDI_PROP_NO_MEMORY) {
4746 		cmn_err(CE_CONT, prop_no_mem_msg, name);
4747 		return (DDI_PROP_NO_MEMORY);
4748 	}
4749 
4750 	/*
4751 	 * Check the 'options' node as a last resort
4752 	 */
4753 	if ((mod_flags & DDI_PROP_DONTPASS) != 0)
4754 		return (DDI_PROP_NOT_FOUND);
4755 
4756 	if (ch_dip == ddi_root_node())	{
4757 		/*
4758 		 * As a last resort, when we've reached
4759 		 * the top and still haven't found the
4760 		 * property, see if the desired property
4761 		 * is attached to the options node.
4762 		 *
4763 		 * The options dip is attached right after boot.
4764 		 */
4765 		ASSERT(options_dip != NULL);
4766 		/*
4767 		 * Force the "don't pass" flag to *just* see
4768 		 * what the options node has to offer.
4769 		 */
4770 		return (ddi_prop_search_common(dev, options_dip, prop_op,
4771 		    mod_flags|DDI_PROP_DONTPASS, name, valuep,
4772 		    (uint_t *)lengthp));
4773 	}
4774 
4775 	/*
4776 	 * Otherwise, continue search with parent's s/w defined properties...
4777 	 * NOTE: Using `dip' in following call increments the level.
4778 	 */
4779 
4780 	return (ddi_prop_search_common(dev, dip, prop_op, mod_flags,
4781 	    name, valuep, (uint_t *)lengthp));
4782 }
4783 
4784 /*
4785  * External property functions used by other parts of the kernel...
4786  */
4787 
4788 /*
4789  * e_ddi_getlongprop: See comments for ddi_get_longprop.
4790  */
4791 
4792 int
4793 e_ddi_getlongprop(dev_t dev, vtype_t type, char *name, int flags,
4794     caddr_t valuep, int *lengthp)
4795 {
4796 	_NOTE(ARGUNUSED(type))
4797 	dev_info_t *devi;
4798 	ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_ALLOC;
4799 	int error;
4800 
4801 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4802 		return (DDI_PROP_NOT_FOUND);
4803 
4804 	error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp);
4805 	ddi_release_devi(devi);
4806 	return (error);
4807 }
4808 
4809 /*
4810  * e_ddi_getlongprop_buf:	See comments for ddi_getlongprop_buf.
4811  */
4812 
4813 int
4814 e_ddi_getlongprop_buf(dev_t dev, vtype_t type, char *name, int flags,
4815     caddr_t valuep, int *lengthp)
4816 {
4817 	_NOTE(ARGUNUSED(type))
4818 	dev_info_t *devi;
4819 	ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF;
4820 	int error;
4821 
4822 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4823 		return (DDI_PROP_NOT_FOUND);
4824 
4825 	error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp);
4826 	ddi_release_devi(devi);
4827 	return (error);
4828 }
4829 
4830 /*
4831  * e_ddi_getprop:	See comments for ddi_getprop.
4832  */
4833 int
4834 e_ddi_getprop(dev_t dev, vtype_t type, char *name, int flags, int defvalue)
4835 {
4836 	_NOTE(ARGUNUSED(type))
4837 	dev_info_t *devi;
4838 	ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF;
4839 	int	propvalue = defvalue;
4840 	int	proplength = sizeof (int);
4841 	int	error;
4842 
4843 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4844 		return (defvalue);
4845 
4846 	error = cdev_prop_op(dev, devi, prop_op,
4847 	    flags, name, (caddr_t)&propvalue, &proplength);
4848 	ddi_release_devi(devi);
4849 
4850 	if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
4851 		propvalue = 1;
4852 
4853 	return (propvalue);
4854 }
4855 
4856 /*
4857  * e_ddi_getprop_int64:
4858  *
4859  * This is a typed interfaces, but predates typed properties. With the
4860  * introduction of typed properties the framework tries to ensure
4861  * consistent use of typed interfaces. This is why TYPE_INT64 is not
4862  * part of TYPE_ANY.  E_ddi_getprop_int64 is a special case where a
4863  * typed interface invokes legacy (non-typed) interfaces:
4864  * cdev_prop_op(), prop_op(9E), ddi_prop_op(9F)).  In this case the
4865  * fact that TYPE_INT64 is not part of TYPE_ANY matters.  To support
4866  * this type of lookup as a single operation we invoke the legacy
4867  * non-typed interfaces with the special CONSUMER_TYPED bit set. The
4868  * framework ddi_prop_op(9F) implementation is expected to check for
4869  * CONSUMER_TYPED and, if set, expand type bits beyond TYPE_ANY
4870  * (currently TYPE_INT64).
4871  */
4872 int64_t
4873 e_ddi_getprop_int64(dev_t dev, vtype_t type, char *name,
4874     int flags, int64_t defvalue)
4875 {
4876 	_NOTE(ARGUNUSED(type))
4877 	dev_info_t	*devi;
4878 	ddi_prop_op_t	prop_op = PROP_LEN_AND_VAL_BUF;
4879 	int64_t		propvalue = defvalue;
4880 	int		proplength = sizeof (propvalue);
4881 	int		error;
4882 
4883 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4884 		return (defvalue);
4885 
4886 	error = cdev_prop_op(dev, devi, prop_op, flags |
4887 	    DDI_PROP_CONSUMER_TYPED, name, (caddr_t)&propvalue, &proplength);
4888 	ddi_release_devi(devi);
4889 
4890 	if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
4891 		propvalue = 1;
4892 
4893 	return (propvalue);
4894 }
4895 
4896 /*
4897  * e_ddi_getproplen:	See comments for ddi_getproplen.
4898  */
4899 int
4900 e_ddi_getproplen(dev_t dev, vtype_t type, char *name, int flags, int *lengthp)
4901 {
4902 	_NOTE(ARGUNUSED(type))
4903 	dev_info_t *devi;
4904 	ddi_prop_op_t prop_op = PROP_LEN;
4905 	int error;
4906 
4907 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4908 		return (DDI_PROP_NOT_FOUND);
4909 
4910 	error = cdev_prop_op(dev, devi, prop_op, flags, name, NULL, lengthp);
4911 	ddi_release_devi(devi);
4912 	return (error);
4913 }
4914 
4915 /*
4916  * Routines to get at elements of the dev_info structure
4917  */
4918 
4919 /*
4920  * ddi_binding_name: Return the driver binding name of the devinfo node
4921  *		This is the name the OS used to bind the node to a driver.
4922  */
4923 char *
4924 ddi_binding_name(dev_info_t *dip)
4925 {
4926 	return (DEVI(dip)->devi_binding_name);
4927 }
4928 
4929 /*
4930  * ddi_driver_major: Return the major number of the driver that
4931  *	the supplied devinfo is bound to.  If not yet bound,
4932  *	DDI_MAJOR_T_NONE.
4933  *
4934  * When used by the driver bound to 'devi', this
4935  * function will reliably return the driver major number.
4936  * Other ways of determining the driver major number, such as
4937  *	major = ddi_name_to_major(ddi_get_name(devi));
4938  *	major = ddi_name_to_major(ddi_binding_name(devi));
4939  * can return a different result as the driver/alias binding
4940  * can change dynamically, and thus should be avoided.
4941  */
4942 major_t
4943 ddi_driver_major(dev_info_t *devi)
4944 {
4945 	return (DEVI(devi)->devi_major);
4946 }
4947 
4948 /*
4949  * ddi_driver_name: Return the normalized driver name. this is the
4950  *		actual driver name
4951  */
4952 const char *
4953 ddi_driver_name(dev_info_t *devi)
4954 {
4955 	major_t major;
4956 
4957 	if ((major = ddi_driver_major(devi)) != DDI_MAJOR_T_NONE)
4958 		return (ddi_major_to_name(major));
4959 
4960 	return (ddi_node_name(devi));
4961 }
4962 
4963 /*
4964  * i_ddi_set_binding_name:	Set binding name.
4965  *
4966  *	Set the binding name to the given name.
4967  *	This routine is for use by the ddi implementation, not by drivers.
4968  */
4969 void
4970 i_ddi_set_binding_name(dev_info_t *dip, char *name)
4971 {
4972 	DEVI(dip)->devi_binding_name = name;
4973 
4974 }
4975 
4976 /*
4977  * ddi_get_name: A synonym of ddi_binding_name() ... returns a name
4978  * the implementation has used to bind the node to a driver.
4979  */
4980 char *
4981 ddi_get_name(dev_info_t *dip)
4982 {
4983 	return (DEVI(dip)->devi_binding_name);
4984 }
4985 
4986 /*
4987  * ddi_node_name: Return the name property of the devinfo node
4988  *		This may differ from ddi_binding_name if the node name
4989  *		does not define a binding to a driver (i.e. generic names).
4990  */
4991 char *
4992 ddi_node_name(dev_info_t *dip)
4993 {
4994 	return (DEVI(dip)->devi_node_name);
4995 }
4996 
4997 
4998 /*
4999  * ddi_get_nodeid:	Get nodeid stored in dev_info structure.
5000  */
5001 int
5002 ddi_get_nodeid(dev_info_t *dip)
5003 {
5004 	return (DEVI(dip)->devi_nodeid);
5005 }
5006 
5007 int
5008 ddi_get_instance(dev_info_t *dip)
5009 {
5010 	return (DEVI(dip)->devi_instance);
5011 }
5012 
5013 struct dev_ops *
5014 ddi_get_driver(dev_info_t *dip)
5015 {
5016 	return (DEVI(dip)->devi_ops);
5017 }
5018 
5019 void
5020 ddi_set_driver(dev_info_t *dip, struct dev_ops *devo)
5021 {
5022 	DEVI(dip)->devi_ops = devo;
5023 }
5024 
5025 /*
5026  * ddi_set_driver_private/ddi_get_driver_private:
5027  * Get/set device driver private data in devinfo.
5028  */
5029 void
5030 ddi_set_driver_private(dev_info_t *dip, void *data)
5031 {
5032 	DEVI(dip)->devi_driver_data = data;
5033 }
5034 
5035 void *
5036 ddi_get_driver_private(dev_info_t *dip)
5037 {
5038 	return (DEVI(dip)->devi_driver_data);
5039 }
5040 
5041 /*
5042  * ddi_get_parent, ddi_get_child, ddi_get_next_sibling
5043  */
5044 
5045 dev_info_t *
5046 ddi_get_parent(dev_info_t *dip)
5047 {
5048 	return ((dev_info_t *)DEVI(dip)->devi_parent);
5049 }
5050 
5051 dev_info_t *
5052 ddi_get_child(dev_info_t *dip)
5053 {
5054 	return ((dev_info_t *)DEVI(dip)->devi_child);
5055 }
5056 
5057 dev_info_t *
5058 ddi_get_next_sibling(dev_info_t *dip)
5059 {
5060 	return ((dev_info_t *)DEVI(dip)->devi_sibling);
5061 }
5062 
5063 dev_info_t *
5064 ddi_get_next(dev_info_t *dip)
5065 {
5066 	return ((dev_info_t *)DEVI(dip)->devi_next);
5067 }
5068 
5069 void
5070 ddi_set_next(dev_info_t *dip, dev_info_t *nextdip)
5071 {
5072 	DEVI(dip)->devi_next = DEVI(nextdip);
5073 }
5074 
5075 /*
5076  * ddi_root_node:		Return root node of devinfo tree
5077  */
5078 
5079 dev_info_t *
5080 ddi_root_node(void)
5081 {
5082 	extern dev_info_t *top_devinfo;
5083 
5084 	return (top_devinfo);
5085 }
5086 
5087 /*
5088  * Miscellaneous functions:
5089  */
5090 
5091 /*
5092  * Implementation specific hooks
5093  */
5094 
5095 void
5096 ddi_report_dev(dev_info_t *d)
5097 {
5098 	char *b;
5099 
5100 	(void) ddi_ctlops(d, d, DDI_CTLOPS_REPORTDEV, (void *)0, (void *)0);
5101 
5102 	/*
5103 	 * If this devinfo node has cb_ops, it's implicitly accessible from
5104 	 * userland, so we print its full name together with the instance
5105 	 * number 'abbreviation' that the driver may use internally.
5106 	 */
5107 	if (DEVI(d)->devi_ops->devo_cb_ops != (struct cb_ops *)0 &&
5108 	    (b = kmem_zalloc(MAXPATHLEN, KM_NOSLEEP))) {
5109 		cmn_err(CE_CONT, "?%s%d is %s\n",
5110 		    ddi_driver_name(d), ddi_get_instance(d),
5111 		    ddi_pathname(d, b));
5112 		kmem_free(b, MAXPATHLEN);
5113 	}
5114 }
5115 
5116 /*
5117  * ddi_ctlops() is described in the assembler not to buy a new register
5118  * window when it's called and can reduce cost in climbing the device tree
5119  * without using the tail call optimization.
5120  */
5121 int
5122 ddi_dev_regsize(dev_info_t *dev, uint_t rnumber, off_t *result)
5123 {
5124 	int ret;
5125 
5126 	ret = ddi_ctlops(dev, dev, DDI_CTLOPS_REGSIZE,
5127 	    (void *)&rnumber, (void *)result);
5128 
5129 	return (ret == DDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE);
5130 }
5131 
5132 int
5133 ddi_dev_nregs(dev_info_t *dev, int *result)
5134 {
5135 	return (ddi_ctlops(dev, dev, DDI_CTLOPS_NREGS, 0, (void *)result));
5136 }
5137 
5138 int
5139 ddi_dev_is_sid(dev_info_t *d)
5140 {
5141 	return (ddi_ctlops(d, d, DDI_CTLOPS_SIDDEV, (void *)0, (void *)0));
5142 }
5143 
5144 int
5145 ddi_slaveonly(dev_info_t *d)
5146 {
5147 	return (ddi_ctlops(d, d, DDI_CTLOPS_SLAVEONLY, (void *)0, (void *)0));
5148 }
5149 
5150 int
5151 ddi_dev_affinity(dev_info_t *a, dev_info_t *b)
5152 {
5153 	return (ddi_ctlops(a, a, DDI_CTLOPS_AFFINITY, (void *)b, (void *)0));
5154 }
5155 
5156 int
5157 ddi_streams_driver(dev_info_t *dip)
5158 {
5159 	if (i_ddi_devi_attached(dip) &&
5160 	    (DEVI(dip)->devi_ops->devo_cb_ops != NULL) &&
5161 	    (DEVI(dip)->devi_ops->devo_cb_ops->cb_str != NULL))
5162 		return (DDI_SUCCESS);
5163 	return (DDI_FAILURE);
5164 }
5165 
5166 /*
5167  * callback free list
5168  */
5169 
5170 static int ncallbacks;
5171 static int nc_low = 170;
5172 static int nc_med = 512;
5173 static int nc_high = 2048;
5174 static struct ddi_callback *callbackq;
5175 static struct ddi_callback *callbackqfree;
5176 
5177 /*
5178  * set/run callback lists
5179  */
5180 struct	cbstats	{
5181 	kstat_named_t	cb_asked;
5182 	kstat_named_t	cb_new;
5183 	kstat_named_t	cb_run;
5184 	kstat_named_t	cb_delete;
5185 	kstat_named_t	cb_maxreq;
5186 	kstat_named_t	cb_maxlist;
5187 	kstat_named_t	cb_alloc;
5188 	kstat_named_t	cb_runouts;
5189 	kstat_named_t	cb_L2;
5190 	kstat_named_t	cb_grow;
5191 } cbstats = {
5192 	{"asked",	KSTAT_DATA_UINT32},
5193 	{"new",		KSTAT_DATA_UINT32},
5194 	{"run",		KSTAT_DATA_UINT32},
5195 	{"delete",	KSTAT_DATA_UINT32},
5196 	{"maxreq",	KSTAT_DATA_UINT32},
5197 	{"maxlist",	KSTAT_DATA_UINT32},
5198 	{"alloc",	KSTAT_DATA_UINT32},
5199 	{"runouts",	KSTAT_DATA_UINT32},
5200 	{"L2",		KSTAT_DATA_UINT32},
5201 	{"grow",	KSTAT_DATA_UINT32},
5202 };
5203 
5204 #define	nc_asked	cb_asked.value.ui32
5205 #define	nc_new		cb_new.value.ui32
5206 #define	nc_run		cb_run.value.ui32
5207 #define	nc_delete	cb_delete.value.ui32
5208 #define	nc_maxreq	cb_maxreq.value.ui32
5209 #define	nc_maxlist	cb_maxlist.value.ui32
5210 #define	nc_alloc	cb_alloc.value.ui32
5211 #define	nc_runouts	cb_runouts.value.ui32
5212 #define	nc_L2		cb_L2.value.ui32
5213 #define	nc_grow		cb_grow.value.ui32
5214 
5215 static kmutex_t ddi_callback_mutex;
5216 
5217 /*
5218  * callbacks are handled using a L1/L2 cache. The L1 cache
5219  * comes out of kmem_cache_alloc and can expand/shrink dynamically. If
5220  * we can't get callbacks from the L1 cache [because pageout is doing
5221  * I/O at the time freemem is 0], we allocate callbacks out of the
5222  * L2 cache. The L2 cache is static and depends on the memory size.
5223  * [We might also count the number of devices at probe time and
5224  * allocate one structure per device and adjust for deferred attach]
5225  */
5226 void
5227 impl_ddi_callback_init(void)
5228 {
5229 	int	i;
5230 	uint_t	physmegs;
5231 	kstat_t	*ksp;
5232 
5233 	physmegs = physmem >> (20 - PAGESHIFT);
5234 	if (physmegs < 48) {
5235 		ncallbacks = nc_low;
5236 	} else if (physmegs < 128) {
5237 		ncallbacks = nc_med;
5238 	} else {
5239 		ncallbacks = nc_high;
5240 	}
5241 
5242 	/*
5243 	 * init free list
5244 	 */
5245 	callbackq = kmem_zalloc(
5246 	    ncallbacks * sizeof (struct ddi_callback), KM_SLEEP);
5247 	for (i = 0; i < ncallbacks-1; i++)
5248 		callbackq[i].c_nfree = &callbackq[i+1];
5249 	callbackqfree = callbackq;
5250 
5251 	/* init kstats */
5252 	if (ksp = kstat_create("unix", 0, "cbstats", "misc", KSTAT_TYPE_NAMED,
5253 	    sizeof (cbstats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL)) {
5254 		ksp->ks_data = (void *) &cbstats;
5255 		kstat_install(ksp);
5256 	}
5257 
5258 }
5259 
5260 static void
5261 callback_insert(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid,
5262 	int count)
5263 {
5264 	struct ddi_callback *list, *marker, *new;
5265 	size_t size = sizeof (struct ddi_callback);
5266 
5267 	list = marker = (struct ddi_callback *)*listid;
5268 	while (list != NULL) {
5269 		if (list->c_call == funcp && list->c_arg == arg) {
5270 			list->c_count += count;
5271 			return;
5272 		}
5273 		marker = list;
5274 		list = list->c_nlist;
5275 	}
5276 	new = kmem_alloc(size, KM_NOSLEEP);
5277 	if (new == NULL) {
5278 		new = callbackqfree;
5279 		if (new == NULL) {
5280 			new = kmem_alloc_tryhard(sizeof (struct ddi_callback),
5281 			    &size, KM_NOSLEEP | KM_PANIC);
5282 			cbstats.nc_grow++;
5283 		} else {
5284 			callbackqfree = new->c_nfree;
5285 			cbstats.nc_L2++;
5286 		}
5287 	}
5288 	if (marker != NULL) {
5289 		marker->c_nlist = new;
5290 	} else {
5291 		*listid = (uintptr_t)new;
5292 	}
5293 	new->c_size = size;
5294 	new->c_nlist = NULL;
5295 	new->c_call = funcp;
5296 	new->c_arg = arg;
5297 	new->c_count = count;
5298 	cbstats.nc_new++;
5299 	cbstats.nc_alloc++;
5300 	if (cbstats.nc_alloc > cbstats.nc_maxlist)
5301 		cbstats.nc_maxlist = cbstats.nc_alloc;
5302 }
5303 
5304 void
5305 ddi_set_callback(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid)
5306 {
5307 	mutex_enter(&ddi_callback_mutex);
5308 	cbstats.nc_asked++;
5309 	if ((cbstats.nc_asked - cbstats.nc_run) > cbstats.nc_maxreq)
5310 		cbstats.nc_maxreq = (cbstats.nc_asked - cbstats.nc_run);
5311 	(void) callback_insert(funcp, arg, listid, 1);
5312 	mutex_exit(&ddi_callback_mutex);
5313 }
5314 
5315 static void
5316 real_callback_run(void *Queue)
5317 {
5318 	int (*funcp)(caddr_t);
5319 	caddr_t arg;
5320 	int count, rval;
5321 	uintptr_t *listid;
5322 	struct ddi_callback *list, *marker;
5323 	int check_pending = 1;
5324 	int pending = 0;
5325 
5326 	do {
5327 		mutex_enter(&ddi_callback_mutex);
5328 		listid = Queue;
5329 		list = (struct ddi_callback *)*listid;
5330 		if (list == NULL) {
5331 			mutex_exit(&ddi_callback_mutex);
5332 			return;
5333 		}
5334 		if (check_pending) {
5335 			marker = list;
5336 			while (marker != NULL) {
5337 				pending += marker->c_count;
5338 				marker = marker->c_nlist;
5339 			}
5340 			check_pending = 0;
5341 		}
5342 		ASSERT(pending > 0);
5343 		ASSERT(list->c_count > 0);
5344 		funcp = list->c_call;
5345 		arg = list->c_arg;
5346 		count = list->c_count;
5347 		*(uintptr_t *)Queue = (uintptr_t)list->c_nlist;
5348 		if (list >= &callbackq[0] &&
5349 		    list <= &callbackq[ncallbacks-1]) {
5350 			list->c_nfree = callbackqfree;
5351 			callbackqfree = list;
5352 		} else
5353 			kmem_free(list, list->c_size);
5354 
5355 		cbstats.nc_delete++;
5356 		cbstats.nc_alloc--;
5357 		mutex_exit(&ddi_callback_mutex);
5358 
5359 		do {
5360 			if ((rval = (*funcp)(arg)) == 0) {
5361 				pending -= count;
5362 				mutex_enter(&ddi_callback_mutex);
5363 				(void) callback_insert(funcp, arg, listid,
5364 				    count);
5365 				cbstats.nc_runouts++;
5366 			} else {
5367 				pending--;
5368 				mutex_enter(&ddi_callback_mutex);
5369 				cbstats.nc_run++;
5370 			}
5371 			mutex_exit(&ddi_callback_mutex);
5372 		} while (rval != 0 && (--count > 0));
5373 	} while (pending > 0);
5374 }
5375 
5376 void
5377 ddi_run_callback(uintptr_t *listid)
5378 {
5379 	softcall(real_callback_run, listid);
5380 }
5381 
5382 /*
5383  * ddi_periodic_t
5384  * ddi_periodic_add(void (*func)(void *), void *arg, hrtime_t interval,
5385  *     int level)
5386  *
5387  * INTERFACE LEVEL
5388  *      Solaris DDI specific (Solaris DDI)
5389  *
5390  * PARAMETERS
5391  *      func: the callback function
5392  *
5393  *            The callback function will be invoked. The function is invoked
5394  *            in kernel context if the argument level passed is the zero.
5395  *            Otherwise it's invoked in interrupt context at the specified
5396  *            level.
5397  *
5398  *       arg: the argument passed to the callback function
5399  *
5400  *  interval: interval time
5401  *
5402  *    level : callback interrupt level
5403  *
5404  *            If the value is the zero, the callback function is invoked
5405  *            in kernel context. If the value is more than the zero, but
5406  *            less than or equal to ten, the callback function is invoked in
5407  *            interrupt context at the specified interrupt level, which may
5408  *            be used for real time applications.
5409  *
5410  *            This value must be in range of 0-10, which can be a numeric
5411  *            number or a pre-defined macro (DDI_IPL_0, ... , DDI_IPL_10).
5412  *
5413  * DESCRIPTION
5414  *      ddi_periodic_add(9F) schedules the specified function to be
5415  *      periodically invoked in the interval time.
5416  *
5417  *      As well as timeout(9F), the exact time interval over which the function
5418  *      takes effect cannot be guaranteed, but the value given is a close
5419  *      approximation.
5420  *
5421  *      Drivers waiting on behalf of processes with real-time constraints must
5422  *      pass non-zero value with the level argument to ddi_periodic_add(9F).
5423  *
5424  * RETURN VALUES
5425  *      ddi_periodic_add(9F) returns a non-zero opaque value (ddi_periodic_t),
5426  *      which must be used for ddi_periodic_delete(9F) to specify the request.
5427  *
5428  * CONTEXT
5429  *      ddi_periodic_add(9F) can be called in user or kernel context, but
5430  *      it cannot be called in interrupt context, which is different from
5431  *      timeout(9F).
5432  */
5433 ddi_periodic_t
5434 ddi_periodic_add(void (*func)(void *), void *arg, hrtime_t interval, int level)
5435 {
5436 	/*
5437 	 * Sanity check of the argument level.
5438 	 */
5439 	if (level < DDI_IPL_0 || level > DDI_IPL_10)
5440 		cmn_err(CE_PANIC,
5441 		    "ddi_periodic_add: invalid interrupt level (%d).", level);
5442 
5443 	/*
5444 	 * Sanity check of the context. ddi_periodic_add() cannot be
5445 	 * called in either interrupt context or high interrupt context.
5446 	 */
5447 	if (servicing_interrupt())
5448 		cmn_err(CE_PANIC,
5449 		    "ddi_periodic_add: called in (high) interrupt context.");
5450 
5451 	return ((ddi_periodic_t)i_timeout(func, arg, interval, level));
5452 }
5453 
5454 /*
5455  * void
5456  * ddi_periodic_delete(ddi_periodic_t req)
5457  *
5458  * INTERFACE LEVEL
5459  *     Solaris DDI specific (Solaris DDI)
5460  *
5461  * PARAMETERS
5462  *     req: ddi_periodic_t opaque value ddi_periodic_add(9F) returned
5463  *     previously.
5464  *
5465  * DESCRIPTION
5466  *     ddi_periodic_delete(9F) cancels the ddi_periodic_add(9F) request
5467  *     previously requested.
5468  *
5469  *     ddi_periodic_delete(9F) will not return until the pending request
5470  *     is canceled or executed.
5471  *
5472  *     As well as untimeout(9F), calling ddi_periodic_delete(9F) for a
5473  *     timeout which is either running on another CPU, or has already
5474  *     completed causes no problems. However, unlike untimeout(9F), there is
5475  *     no restrictions on the lock which might be held across the call to
5476  *     ddi_periodic_delete(9F).
5477  *
5478  *     Drivers should be structured with the understanding that the arrival of
5479  *     both an interrupt and a timeout for that interrupt can occasionally
5480  *     occur, in either order.
5481  *
5482  * CONTEXT
5483  *     ddi_periodic_delete(9F) can be called in user or kernel context, but
5484  *     it cannot be called in interrupt context, which is different from
5485  *     untimeout(9F).
5486  */
5487 void
5488 ddi_periodic_delete(ddi_periodic_t req)
5489 {
5490 	/*
5491 	 * Sanity check of the context. ddi_periodic_delete() cannot be
5492 	 * called in either interrupt context or high interrupt context.
5493 	 */
5494 	if (servicing_interrupt())
5495 		cmn_err(CE_PANIC,
5496 		    "ddi_periodic_delete: called in (high) interrupt context.");
5497 
5498 	i_untimeout((timeout_t)req);
5499 }
5500 
5501 dev_info_t *
5502 nodevinfo(dev_t dev, int otyp)
5503 {
5504 	_NOTE(ARGUNUSED(dev, otyp))
5505 	return ((dev_info_t *)0);
5506 }
5507 
5508 /*
5509  * A driver should support its own getinfo(9E) entry point. This function
5510  * is provided as a convenience for ON drivers that don't expect their
5511  * getinfo(9E) entry point to be called. A driver that uses this must not
5512  * call ddi_create_minor_node.
5513  */
5514 int
5515 ddi_no_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
5516 {
5517 	_NOTE(ARGUNUSED(dip, infocmd, arg, result))
5518 	return (DDI_FAILURE);
5519 }
5520 
5521 /*
5522  * A driver should support its own getinfo(9E) entry point. This function
5523  * is provided as a convenience for ON drivers that where the minor number
5524  * is the instance. Drivers that do not have 1:1 mapping must implement
5525  * their own getinfo(9E) function.
5526  */
5527 int
5528 ddi_getinfo_1to1(dev_info_t *dip, ddi_info_cmd_t infocmd,
5529     void *arg, void **result)
5530 {
5531 	_NOTE(ARGUNUSED(dip))
5532 	int	instance;
5533 
5534 	if (infocmd != DDI_INFO_DEVT2INSTANCE)
5535 		return (DDI_FAILURE);
5536 
5537 	instance = getminor((dev_t)(uintptr_t)arg);
5538 	*result = (void *)(uintptr_t)instance;
5539 	return (DDI_SUCCESS);
5540 }
5541 
5542 int
5543 ddifail(dev_info_t *devi, ddi_attach_cmd_t cmd)
5544 {
5545 	_NOTE(ARGUNUSED(devi, cmd))
5546 	return (DDI_FAILURE);
5547 }
5548 
5549 int
5550 ddi_no_dma_map(dev_info_t *dip, dev_info_t *rdip,
5551     struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep)
5552 {
5553 	_NOTE(ARGUNUSED(dip, rdip, dmareqp, handlep))
5554 	return (DDI_DMA_NOMAPPING);
5555 }
5556 
5557 int
5558 ddi_no_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
5559     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
5560 {
5561 	_NOTE(ARGUNUSED(dip, rdip, attr, waitfp, arg, handlep))
5562 	return (DDI_DMA_BADATTR);
5563 }
5564 
5565 int
5566 ddi_no_dma_freehdl(dev_info_t *dip, dev_info_t *rdip,
5567     ddi_dma_handle_t handle)
5568 {
5569 	_NOTE(ARGUNUSED(dip, rdip, handle))
5570 	return (DDI_FAILURE);
5571 }
5572 
5573 int
5574 ddi_no_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
5575     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
5576     ddi_dma_cookie_t *cp, uint_t *ccountp)
5577 {
5578 	_NOTE(ARGUNUSED(dip, rdip, handle, dmareq, cp, ccountp))
5579 	return (DDI_DMA_NOMAPPING);
5580 }
5581 
5582 int
5583 ddi_no_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
5584     ddi_dma_handle_t handle)
5585 {
5586 	_NOTE(ARGUNUSED(dip, rdip, handle))
5587 	return (DDI_FAILURE);
5588 }
5589 
5590 int
5591 ddi_no_dma_flush(dev_info_t *dip, dev_info_t *rdip,
5592     ddi_dma_handle_t handle, off_t off, size_t len,
5593     uint_t cache_flags)
5594 {
5595 	_NOTE(ARGUNUSED(dip, rdip, handle, off, len, cache_flags))
5596 	return (DDI_FAILURE);
5597 }
5598 
5599 int
5600 ddi_no_dma_win(dev_info_t *dip, dev_info_t *rdip,
5601     ddi_dma_handle_t handle, uint_t win, off_t *offp,
5602     size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
5603 {
5604 	_NOTE(ARGUNUSED(dip, rdip, handle, win, offp, lenp, cookiep, ccountp))
5605 	return (DDI_FAILURE);
5606 }
5607 
5608 int
5609 ddi_no_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
5610     ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
5611     off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
5612 {
5613 	_NOTE(ARGUNUSED(dip, rdip, handle, request, offp, lenp, objp, flags))
5614 	return (DDI_FAILURE);
5615 }
5616 
5617 void
5618 ddivoid(void)
5619 {}
5620 
5621 int
5622 nochpoll(dev_t dev, short events, int anyyet, short *reventsp,
5623     struct pollhead **pollhdrp)
5624 {
5625 	_NOTE(ARGUNUSED(dev, events, anyyet, reventsp, pollhdrp))
5626 	return (ENXIO);
5627 }
5628 
5629 cred_t *
5630 ddi_get_cred(void)
5631 {
5632 	return (CRED());
5633 }
5634 
5635 clock_t
5636 ddi_get_lbolt(void)
5637 {
5638 	return ((clock_t)lbolt_hybrid());
5639 }
5640 
5641 int64_t
5642 ddi_get_lbolt64(void)
5643 {
5644 	return (lbolt_hybrid());
5645 }
5646 
5647 time_t
5648 ddi_get_time(void)
5649 {
5650 	time_t	now;
5651 
5652 	if ((now = gethrestime_sec()) == 0) {
5653 		timestruc_t ts;
5654 		mutex_enter(&tod_lock);
5655 		ts = tod_get();
5656 		mutex_exit(&tod_lock);
5657 		return (ts.tv_sec);
5658 	} else {
5659 		return (now);
5660 	}
5661 }
5662 
5663 pid_t
5664 ddi_get_pid(void)
5665 {
5666 	return (ttoproc(curthread)->p_pid);
5667 }
5668 
5669 kt_did_t
5670 ddi_get_kt_did(void)
5671 {
5672 	return (curthread->t_did);
5673 }
5674 
5675 /*
5676  * This function returns B_TRUE if the caller can reasonably expect that a call
5677  * to cv_wait_sig(9F), cv_timedwait_sig(9F), or qwait_sig(9F) could be awakened
5678  * by user-level signal.  If it returns B_FALSE, then the caller should use
5679  * other means to make certain that the wait will not hang "forever."
5680  *
5681  * It does not check the signal mask, nor for reception of any particular
5682  * signal.
5683  *
5684  * Currently, a thread can receive a signal if it's not a kernel thread and it
5685  * is not in the middle of exit(2) tear-down.  Threads that are in that
5686  * tear-down effectively convert cv_wait_sig to cv_wait, cv_timedwait_sig to
5687  * cv_timedwait, and qwait_sig to qwait.
5688  */
5689 boolean_t
5690 ddi_can_receive_sig(void)
5691 {
5692 	proc_t *pp;
5693 
5694 	if (curthread->t_proc_flag & TP_LWPEXIT)
5695 		return (B_FALSE);
5696 	if ((pp = ttoproc(curthread)) == NULL)
5697 		return (B_FALSE);
5698 	return (pp->p_as != &kas);
5699 }
5700 
5701 /*
5702  * Swap bytes in 16-bit [half-]words
5703  */
5704 void
5705 swab(void *src, void *dst, size_t nbytes)
5706 {
5707 	uchar_t *pf = (uchar_t *)src;
5708 	uchar_t *pt = (uchar_t *)dst;
5709 	uchar_t tmp;
5710 	int nshorts;
5711 
5712 	nshorts = nbytes >> 1;
5713 
5714 	while (--nshorts >= 0) {
5715 		tmp = *pf++;
5716 		*pt++ = *pf++;
5717 		*pt++ = tmp;
5718 	}
5719 }
5720 
5721 static void
5722 ddi_append_minor_node(dev_info_t *ddip, struct ddi_minor_data *dmdp)
5723 {
5724 	int			circ;
5725 	struct ddi_minor_data	*dp;
5726 
5727 	ndi_devi_enter(ddip, &circ);
5728 	if ((dp = DEVI(ddip)->devi_minor) == (struct ddi_minor_data *)NULL) {
5729 		DEVI(ddip)->devi_minor = dmdp;
5730 	} else {
5731 		while (dp->next != (struct ddi_minor_data *)NULL)
5732 			dp = dp->next;
5733 		dp->next = dmdp;
5734 	}
5735 	ndi_devi_exit(ddip, circ);
5736 }
5737 
5738 /*
5739  * Part of the obsolete SunCluster DDI Hooks.
5740  * Keep for binary compatibility
5741  */
5742 minor_t
5743 ddi_getiminor(dev_t dev)
5744 {
5745 	return (getminor(dev));
5746 }
5747 
5748 static int
5749 i_log_devfs_minor_create(dev_info_t *dip, char *minor_name)
5750 {
5751 	int se_flag;
5752 	int kmem_flag;
5753 	int se_err;
5754 	char *pathname, *class_name;
5755 	sysevent_t *ev = NULL;
5756 	sysevent_id_t eid;
5757 	sysevent_value_t se_val;
5758 	sysevent_attr_list_t *ev_attr_list = NULL;
5759 
5760 	/* determine interrupt context */
5761 	se_flag = (servicing_interrupt()) ? SE_NOSLEEP : SE_SLEEP;
5762 	kmem_flag = (se_flag == SE_SLEEP) ? KM_SLEEP : KM_NOSLEEP;
5763 
5764 	i_ddi_di_cache_invalidate();
5765 
5766 #ifdef DEBUG
5767 	if ((se_flag == SE_NOSLEEP) && sunddi_debug) {
5768 		cmn_err(CE_CONT, "ddi_create_minor_node: called from "
5769 		    "interrupt level by driver %s",
5770 		    ddi_driver_name(dip));
5771 	}
5772 #endif /* DEBUG */
5773 
5774 	ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_CREATE, EP_DDI, se_flag);
5775 	if (ev == NULL) {
5776 		goto fail;
5777 	}
5778 
5779 	pathname = kmem_alloc(MAXPATHLEN, kmem_flag);
5780 	if (pathname == NULL) {
5781 		sysevent_free(ev);
5782 		goto fail;
5783 	}
5784 
5785 	(void) ddi_pathname(dip, pathname);
5786 	ASSERT(strlen(pathname));
5787 	se_val.value_type = SE_DATA_TYPE_STRING;
5788 	se_val.value.sv_string = pathname;
5789 	if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME,
5790 	    &se_val, se_flag) != 0) {
5791 		kmem_free(pathname, MAXPATHLEN);
5792 		sysevent_free(ev);
5793 		goto fail;
5794 	}
5795 	kmem_free(pathname, MAXPATHLEN);
5796 
5797 	/* add the device class attribute */
5798 	if ((class_name = i_ddi_devi_class(dip)) != NULL) {
5799 		se_val.value_type = SE_DATA_TYPE_STRING;
5800 		se_val.value.sv_string = class_name;
5801 		if (sysevent_add_attr(&ev_attr_list,
5802 		    DEVFS_DEVI_CLASS, &se_val, SE_SLEEP) != 0) {
5803 			sysevent_free_attr(ev_attr_list);
5804 			goto fail;
5805 		}
5806 	}
5807 
5808 	/*
5809 	 * allow for NULL minor names
5810 	 */
5811 	if (minor_name != NULL) {
5812 		se_val.value.sv_string = minor_name;
5813 		if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME,
5814 		    &se_val, se_flag) != 0) {
5815 			sysevent_free_attr(ev_attr_list);
5816 			sysevent_free(ev);
5817 			goto fail;
5818 		}
5819 	}
5820 
5821 	if (sysevent_attach_attributes(ev, ev_attr_list) != 0) {
5822 		sysevent_free_attr(ev_attr_list);
5823 		sysevent_free(ev);
5824 		goto fail;
5825 	}
5826 
5827 	if ((se_err = log_sysevent(ev, se_flag, &eid)) != 0) {
5828 		if (se_err == SE_NO_TRANSPORT) {
5829 			cmn_err(CE_WARN, "/devices or /dev may not be current "
5830 			    "for driver %s (%s). Run devfsadm -i %s",
5831 			    ddi_driver_name(dip), "syseventd not responding",
5832 			    ddi_driver_name(dip));
5833 		} else {
5834 			sysevent_free(ev);
5835 			goto fail;
5836 		}
5837 	}
5838 
5839 	sysevent_free(ev);
5840 	return (DDI_SUCCESS);
5841 fail:
5842 	cmn_err(CE_WARN, "/devices or /dev may not be current "
5843 	    "for driver %s. Run devfsadm -i %s",
5844 	    ddi_driver_name(dip), ddi_driver_name(dip));
5845 	return (DDI_SUCCESS);
5846 }
5847 
5848 /*
5849  * failing to remove a minor node is not of interest
5850  * therefore we do not generate an error message
5851  */
5852 static int
5853 i_log_devfs_minor_remove(dev_info_t *dip, char *minor_name)
5854 {
5855 	char *pathname, *class_name;
5856 	sysevent_t *ev;
5857 	sysevent_id_t eid;
5858 	sysevent_value_t se_val;
5859 	sysevent_attr_list_t *ev_attr_list = NULL;
5860 
5861 	/*
5862 	 * only log ddi_remove_minor_node() calls outside the scope
5863 	 * of attach/detach reconfigurations and when the dip is
5864 	 * still initialized.
5865 	 */
5866 	if (DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip) ||
5867 	    (i_ddi_node_state(dip) < DS_INITIALIZED)) {
5868 		return (DDI_SUCCESS);
5869 	}
5870 
5871 	i_ddi_di_cache_invalidate();
5872 
5873 	ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_REMOVE, EP_DDI, SE_SLEEP);
5874 	if (ev == NULL) {
5875 		return (DDI_SUCCESS);
5876 	}
5877 
5878 	pathname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
5879 	if (pathname == NULL) {
5880 		sysevent_free(ev);
5881 		return (DDI_SUCCESS);
5882 	}
5883 
5884 	(void) ddi_pathname(dip, pathname);
5885 	ASSERT(strlen(pathname));
5886 	se_val.value_type = SE_DATA_TYPE_STRING;
5887 	se_val.value.sv_string = pathname;
5888 	if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME,
5889 	    &se_val, SE_SLEEP) != 0) {
5890 		kmem_free(pathname, MAXPATHLEN);
5891 		sysevent_free(ev);
5892 		return (DDI_SUCCESS);
5893 	}
5894 
5895 	kmem_free(pathname, MAXPATHLEN);
5896 
5897 	/*
5898 	 * allow for NULL minor names
5899 	 */
5900 	if (minor_name != NULL) {
5901 		se_val.value.sv_string = minor_name;
5902 		if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME,
5903 		    &se_val, SE_SLEEP) != 0) {
5904 			sysevent_free_attr(ev_attr_list);
5905 			goto fail;
5906 		}
5907 	}
5908 
5909 	if ((class_name = i_ddi_devi_class(dip)) != NULL) {
5910 		/* add the device class, driver name and instance attributes */
5911 
5912 		se_val.value_type = SE_DATA_TYPE_STRING;
5913 		se_val.value.sv_string = class_name;
5914 		if (sysevent_add_attr(&ev_attr_list,
5915 		    DEVFS_DEVI_CLASS, &se_val, SE_SLEEP) != 0) {
5916 			sysevent_free_attr(ev_attr_list);
5917 			goto fail;
5918 		}
5919 
5920 		se_val.value_type = SE_DATA_TYPE_STRING;
5921 		se_val.value.sv_string = (char *)ddi_driver_name(dip);
5922 		if (sysevent_add_attr(&ev_attr_list,
5923 		    DEVFS_DRIVER_NAME, &se_val, SE_SLEEP) != 0) {
5924 			sysevent_free_attr(ev_attr_list);
5925 			goto fail;
5926 		}
5927 
5928 		se_val.value_type = SE_DATA_TYPE_INT32;
5929 		se_val.value.sv_int32 = ddi_get_instance(dip);
5930 		if (sysevent_add_attr(&ev_attr_list,
5931 		    DEVFS_INSTANCE, &se_val, SE_SLEEP) != 0) {
5932 			sysevent_free_attr(ev_attr_list);
5933 			goto fail;
5934 		}
5935 
5936 	}
5937 
5938 	if (sysevent_attach_attributes(ev, ev_attr_list) != 0) {
5939 		sysevent_free_attr(ev_attr_list);
5940 	} else {
5941 		(void) log_sysevent(ev, SE_SLEEP, &eid);
5942 	}
5943 fail:
5944 	sysevent_free(ev);
5945 	return (DDI_SUCCESS);
5946 }
5947 
5948 /*
5949  * Derive the device class of the node.
5950  * Device class names aren't defined yet. Until this is done we use
5951  * devfs event subclass names as device class names.
5952  */
5953 static int
5954 derive_devi_class(dev_info_t *dip, char *node_type, int flag)
5955 {
5956 	int rv = DDI_SUCCESS;
5957 
5958 	if (i_ddi_devi_class(dip) == NULL) {
5959 		if (strncmp(node_type, DDI_NT_BLOCK,
5960 		    sizeof (DDI_NT_BLOCK) - 1) == 0 &&
5961 		    (node_type[sizeof (DDI_NT_BLOCK) - 1] == '\0' ||
5962 		    node_type[sizeof (DDI_NT_BLOCK) - 1] == ':') &&
5963 		    strcmp(node_type, DDI_NT_FD) != 0) {
5964 
5965 			rv = i_ddi_set_devi_class(dip, ESC_DISK, flag);
5966 
5967 		} else if (strncmp(node_type, DDI_NT_NET,
5968 		    sizeof (DDI_NT_NET) - 1) == 0 &&
5969 		    (node_type[sizeof (DDI_NT_NET) - 1] == '\0' ||
5970 		    node_type[sizeof (DDI_NT_NET) - 1] == ':')) {
5971 
5972 			rv = i_ddi_set_devi_class(dip, ESC_NETWORK, flag);
5973 
5974 		} else if (strncmp(node_type, DDI_NT_PRINTER,
5975 		    sizeof (DDI_NT_PRINTER) - 1) == 0 &&
5976 		    (node_type[sizeof (DDI_NT_PRINTER) - 1] == '\0' ||
5977 		    node_type[sizeof (DDI_NT_PRINTER) - 1] == ':')) {
5978 
5979 			rv = i_ddi_set_devi_class(dip, ESC_PRINTER, flag);
5980 
5981 		} else if (strncmp(node_type, DDI_PSEUDO,
5982 		    sizeof (DDI_PSEUDO) -1) == 0 &&
5983 		    (strncmp(ESC_LOFI, ddi_node_name(dip),
5984 		    sizeof (ESC_LOFI) -1) == 0)) {
5985 			rv = i_ddi_set_devi_class(dip, ESC_LOFI, flag);
5986 		}
5987 	}
5988 
5989 	return (rv);
5990 }
5991 
5992 /*
5993  * Check compliance with PSARC 2003/375:
5994  *
5995  * The name must contain only characters a-z, A-Z, 0-9 or _ and it must not
5996  * exceed IFNAMSIZ (16) characters in length.
5997  */
5998 static boolean_t
5999 verify_name(char *name)
6000 {
6001 	size_t	len = strlen(name);
6002 	char	*cp;
6003 
6004 	if (len == 0 || len > IFNAMSIZ)
6005 		return (B_FALSE);
6006 
6007 	for (cp = name; *cp != '\0'; cp++) {
6008 		if (!isalnum(*cp) && *cp != '_')
6009 			return (B_FALSE);
6010 	}
6011 
6012 	return (B_TRUE);
6013 }
6014 
6015 /*
6016  * ddi_create_minor_common:	Create a  ddi_minor_data structure and
6017  *				attach it to the given devinfo node.
6018  */
6019 
6020 int
6021 ddi_create_minor_common(dev_info_t *dip, char *name, int spec_type,
6022     minor_t minor_num, char *node_type, int flag, ddi_minor_type mtype,
6023     const char *read_priv, const char *write_priv, mode_t priv_mode)
6024 {
6025 	struct ddi_minor_data *dmdp;
6026 	major_t major;
6027 
6028 	if (spec_type != S_IFCHR && spec_type != S_IFBLK)
6029 		return (DDI_FAILURE);
6030 
6031 	if (name == NULL)
6032 		return (DDI_FAILURE);
6033 
6034 	/*
6035 	 * Log a message if the minor number the driver is creating
6036 	 * is not expressible on the on-disk filesystem (currently
6037 	 * this is limited to 18 bits both by UFS). The device can
6038 	 * be opened via devfs, but not by device special files created
6039 	 * via mknod().
6040 	 */
6041 	if (minor_num > L_MAXMIN32) {
6042 		cmn_err(CE_WARN,
6043 		    "%s%d:%s minor 0x%x too big for 32-bit applications",
6044 		    ddi_driver_name(dip), ddi_get_instance(dip),
6045 		    name, minor_num);
6046 		return (DDI_FAILURE);
6047 	}
6048 
6049 	/* dip must be bound and attached */
6050 	major = ddi_driver_major(dip);
6051 	ASSERT(major != DDI_MAJOR_T_NONE);
6052 
6053 	/*
6054 	 * Default node_type to DDI_PSEUDO and issue notice in debug mode
6055 	 */
6056 	if (node_type == NULL) {
6057 		node_type = DDI_PSEUDO;
6058 		NDI_CONFIG_DEBUG((CE_NOTE, "!illegal node_type NULL for %s%d "
6059 		    " minor node %s; default to DDI_PSEUDO",
6060 		    ddi_driver_name(dip), ddi_get_instance(dip), name));
6061 	}
6062 
6063 	/*
6064 	 * If the driver is a network driver, ensure that the name falls within
6065 	 * the interface naming constraints specified by PSARC/2003/375.
6066 	 */
6067 	if (strcmp(node_type, DDI_NT_NET) == 0) {
6068 		if (!verify_name(name))
6069 			return (DDI_FAILURE);
6070 
6071 		if (mtype == DDM_MINOR) {
6072 			struct devnames *dnp = &devnamesp[major];
6073 
6074 			/* Mark driver as a network driver */
6075 			LOCK_DEV_OPS(&dnp->dn_lock);
6076 			dnp->dn_flags |= DN_NETWORK_DRIVER;
6077 
6078 			/*
6079 			 * If this minor node is created during the device
6080 			 * attachment, this is a physical network device.
6081 			 * Mark the driver as a physical network driver.
6082 			 */
6083 			if (DEVI_IS_ATTACHING(dip))
6084 				dnp->dn_flags |= DN_NETWORK_PHYSDRIVER;
6085 			UNLOCK_DEV_OPS(&dnp->dn_lock);
6086 		}
6087 	}
6088 
6089 	if (mtype == DDM_MINOR) {
6090 		if (derive_devi_class(dip,  node_type, KM_NOSLEEP) !=
6091 		    DDI_SUCCESS)
6092 			return (DDI_FAILURE);
6093 	}
6094 
6095 	/*
6096 	 * Take care of minor number information for the node.
6097 	 */
6098 
6099 	if ((dmdp = kmem_zalloc(sizeof (struct ddi_minor_data),
6100 	    KM_NOSLEEP)) == NULL) {
6101 		return (DDI_FAILURE);
6102 	}
6103 	if ((dmdp->ddm_name = i_ddi_strdup(name, KM_NOSLEEP)) == NULL) {
6104 		kmem_free(dmdp, sizeof (struct ddi_minor_data));
6105 		return (DDI_FAILURE);
6106 	}
6107 	dmdp->dip = dip;
6108 	dmdp->ddm_dev = makedevice(major, minor_num);
6109 	dmdp->ddm_spec_type = spec_type;
6110 	dmdp->ddm_node_type = node_type;
6111 	dmdp->type = mtype;
6112 	if (flag & CLONE_DEV) {
6113 		dmdp->type = DDM_ALIAS;
6114 		dmdp->ddm_dev = makedevice(ddi_driver_major(clone_dip), major);
6115 	}
6116 	if (flag & PRIVONLY_DEV) {
6117 		dmdp->ddm_flags |= DM_NO_FSPERM;
6118 	}
6119 	if (read_priv || write_priv) {
6120 		dmdp->ddm_node_priv =
6121 		    devpolicy_priv_by_name(read_priv, write_priv);
6122 	}
6123 	dmdp->ddm_priv_mode = priv_mode;
6124 
6125 	ddi_append_minor_node(dip, dmdp);
6126 
6127 	/*
6128 	 * only log ddi_create_minor_node() calls which occur
6129 	 * outside the scope of attach(9e)/detach(9e) reconfigurations
6130 	 */
6131 	if (!(DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip)) &&
6132 	    mtype != DDM_INTERNAL_PATH) {
6133 		(void) i_log_devfs_minor_create(dip, name);
6134 	}
6135 
6136 	/*
6137 	 * Check if any dacf rules match the creation of this minor node
6138 	 */
6139 	dacfc_match_create_minor(name, node_type, dip, dmdp, flag);
6140 	return (DDI_SUCCESS);
6141 }
6142 
6143 int
6144 ddi_create_minor_node(dev_info_t *dip, char *name, int spec_type,
6145     minor_t minor_num, char *node_type, int flag)
6146 {
6147 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
6148 	    node_type, flag, DDM_MINOR, NULL, NULL, 0));
6149 }
6150 
6151 int
6152 ddi_create_priv_minor_node(dev_info_t *dip, char *name, int spec_type,
6153     minor_t minor_num, char *node_type, int flag,
6154     const char *rdpriv, const char *wrpriv, mode_t priv_mode)
6155 {
6156 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
6157 	    node_type, flag, DDM_MINOR, rdpriv, wrpriv, priv_mode));
6158 }
6159 
6160 int
6161 ddi_create_default_minor_node(dev_info_t *dip, char *name, int spec_type,
6162     minor_t minor_num, char *node_type, int flag)
6163 {
6164 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
6165 	    node_type, flag, DDM_DEFAULT, NULL, NULL, 0));
6166 }
6167 
6168 /*
6169  * Internal (non-ddi) routine for drivers to export names known
6170  * to the kernel (especially ddi_pathname_to_dev_t and friends)
6171  * but not exported externally to /dev
6172  */
6173 int
6174 ddi_create_internal_pathname(dev_info_t *dip, char *name, int spec_type,
6175     minor_t minor_num)
6176 {
6177 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
6178 	    "internal", 0, DDM_INTERNAL_PATH, NULL, NULL, 0));
6179 }
6180 
6181 void
6182 ddi_remove_minor_node(dev_info_t *dip, char *name)
6183 {
6184 	int			circ;
6185 	struct ddi_minor_data	*dmdp, *dmdp1;
6186 	struct ddi_minor_data	**dmdp_prev;
6187 
6188 	ndi_devi_enter(dip, &circ);
6189 	dmdp_prev = &DEVI(dip)->devi_minor;
6190 	dmdp = DEVI(dip)->devi_minor;
6191 	while (dmdp != NULL) {
6192 		dmdp1 = dmdp->next;
6193 		if ((name == NULL || (dmdp->ddm_name != NULL &&
6194 		    strcmp(name, dmdp->ddm_name) == 0))) {
6195 			if (dmdp->ddm_name != NULL) {
6196 				if (dmdp->type != DDM_INTERNAL_PATH)
6197 					(void) i_log_devfs_minor_remove(dip,
6198 					    dmdp->ddm_name);
6199 				kmem_free(dmdp->ddm_name,
6200 				    strlen(dmdp->ddm_name) + 1);
6201 			}
6202 			/*
6203 			 * Release device privilege, if any.
6204 			 * Release dacf client data associated with this minor
6205 			 * node by storing NULL.
6206 			 */
6207 			if (dmdp->ddm_node_priv)
6208 				dpfree(dmdp->ddm_node_priv);
6209 			dacf_store_info((dacf_infohdl_t)dmdp, NULL);
6210 			kmem_free(dmdp, sizeof (struct ddi_minor_data));
6211 			*dmdp_prev = dmdp1;
6212 			/*
6213 			 * OK, we found it, so get out now -- if we drive on,
6214 			 * we will strcmp against garbage.  See 1139209.
6215 			 */
6216 			if (name != NULL)
6217 				break;
6218 		} else {
6219 			dmdp_prev = &dmdp->next;
6220 		}
6221 		dmdp = dmdp1;
6222 	}
6223 	ndi_devi_exit(dip, circ);
6224 }
6225 
6226 
6227 int
6228 ddi_in_panic()
6229 {
6230 	return (panicstr != NULL);
6231 }
6232 
6233 
6234 /*
6235  * Find first bit set in a mask (returned counting from 1 up)
6236  */
6237 
6238 int
6239 ddi_ffs(long mask)
6240 {
6241 	return (ffs(mask));
6242 }
6243 
6244 /*
6245  * Find last bit set. Take mask and clear
6246  * all but the most significant bit, and
6247  * then let ffs do the rest of the work.
6248  *
6249  * Algorithm courtesy of Steve Chessin.
6250  */
6251 
6252 int
6253 ddi_fls(long mask)
6254 {
6255 	while (mask) {
6256 		long nx;
6257 
6258 		if ((nx = (mask & (mask - 1))) == 0)
6259 			break;
6260 		mask = nx;
6261 	}
6262 	return (ffs(mask));
6263 }
6264 
6265 /*
6266  * The ddi_soft_state_* routines comprise generic storage management utilities
6267  * for driver soft state structures (in "the old days," this was done with
6268  * statically sized array - big systems and dynamic loading and unloading
6269  * make heap allocation more attractive).
6270  */
6271 
6272 /*
6273  * Allocate a set of pointers to 'n_items' objects of size 'size'
6274  * bytes.  Each pointer is initialized to nil.
6275  *
6276  * The 'size' and 'n_items' values are stashed in the opaque
6277  * handle returned to the caller.
6278  *
6279  * This implementation interprets 'set of pointers' to mean 'array
6280  * of pointers' but note that nothing in the interface definition
6281  * precludes an implementation that uses, for example, a linked list.
6282  * However there should be a small efficiency gain from using an array
6283  * at lookup time.
6284  *
6285  * NOTE	As an optimization, we make our growable array allocations in
6286  *	powers of two (bytes), since that's how much kmem_alloc (currently)
6287  *	gives us anyway.  It should save us some free/realloc's ..
6288  *
6289  *	As a further optimization, we make the growable array start out
6290  *	with MIN_N_ITEMS in it.
6291  */
6292 
6293 #define	MIN_N_ITEMS	8	/* 8 void *'s == 32 bytes */
6294 
6295 int
6296 ddi_soft_state_init(void **state_p, size_t size, size_t n_items)
6297 {
6298 	i_ddi_soft_state	*ss;
6299 
6300 	if (state_p == NULL || size == 0)
6301 		return (EINVAL);
6302 
6303 	ss = kmem_zalloc(sizeof (*ss), KM_SLEEP);
6304 	mutex_init(&ss->lock, NULL, MUTEX_DRIVER, NULL);
6305 	ss->size = size;
6306 
6307 	if (n_items < MIN_N_ITEMS)
6308 		ss->n_items = MIN_N_ITEMS;
6309 	else {
6310 		int bitlog;
6311 
6312 		if ((bitlog = ddi_fls(n_items)) == ddi_ffs(n_items))
6313 			bitlog--;
6314 		ss->n_items = 1 << bitlog;
6315 	}
6316 
6317 	ASSERT(ss->n_items >= n_items);
6318 
6319 	ss->array = kmem_zalloc(ss->n_items * sizeof (void *), KM_SLEEP);
6320 
6321 	*state_p = ss;
6322 	return (0);
6323 }
6324 
6325 /*
6326  * Allocate a state structure of size 'size' to be associated
6327  * with item 'item'.
6328  *
6329  * In this implementation, the array is extended to
6330  * allow the requested offset, if needed.
6331  */
6332 int
6333 ddi_soft_state_zalloc(void *state, int item)
6334 {
6335 	i_ddi_soft_state	*ss = (i_ddi_soft_state *)state;
6336 	void			**array;
6337 	void			*new_element;
6338 
6339 	if ((state == NULL) || (item < 0))
6340 		return (DDI_FAILURE);
6341 
6342 	mutex_enter(&ss->lock);
6343 	if (ss->size == 0) {
6344 		mutex_exit(&ss->lock);
6345 		cmn_err(CE_WARN, "ddi_soft_state_zalloc: bad handle: %s",
6346 		    mod_containing_pc(caller()));
6347 		return (DDI_FAILURE);
6348 	}
6349 
6350 	array = ss->array;	/* NULL if ss->n_items == 0 */
6351 	ASSERT(ss->n_items != 0 && array != NULL);
6352 
6353 	/*
6354 	 * refuse to tread on an existing element
6355 	 */
6356 	if (item < ss->n_items && array[item] != NULL) {
6357 		mutex_exit(&ss->lock);
6358 		return (DDI_FAILURE);
6359 	}
6360 
6361 	/*
6362 	 * Allocate a new element to plug in
6363 	 */
6364 	new_element = kmem_zalloc(ss->size, KM_SLEEP);
6365 
6366 	/*
6367 	 * Check if the array is big enough, if not, grow it.
6368 	 */
6369 	if (item >= ss->n_items) {
6370 		void			**new_array;
6371 		size_t			new_n_items;
6372 		struct i_ddi_soft_state	*dirty;
6373 
6374 		/*
6375 		 * Allocate a new array of the right length, copy
6376 		 * all the old pointers to the new array, then
6377 		 * if it exists at all, put the old array on the
6378 		 * dirty list.
6379 		 *
6380 		 * Note that we can't kmem_free() the old array.
6381 		 *
6382 		 * Why -- well the 'get' operation is 'mutex-free', so we
6383 		 * can't easily catch a suspended thread that is just about
6384 		 * to dereference the array we just grew out of.  So we
6385 		 * cons up a header and put it on a list of 'dirty'
6386 		 * pointer arrays.  (Dirty in the sense that there may
6387 		 * be suspended threads somewhere that are in the middle
6388 		 * of referencing them).  Fortunately, we -can- garbage
6389 		 * collect it all at ddi_soft_state_fini time.
6390 		 */
6391 		new_n_items = ss->n_items;
6392 		while (new_n_items < (1 + item))
6393 			new_n_items <<= 1;	/* double array size .. */
6394 
6395 		ASSERT(new_n_items >= (1 + item));	/* sanity check! */
6396 
6397 		new_array = kmem_zalloc(new_n_items * sizeof (void *),
6398 		    KM_SLEEP);
6399 		/*
6400 		 * Copy the pointers into the new array
6401 		 */
6402 		bcopy(array, new_array, ss->n_items * sizeof (void *));
6403 
6404 		/*
6405 		 * Save the old array on the dirty list
6406 		 */
6407 		dirty = kmem_zalloc(sizeof (*dirty), KM_SLEEP);
6408 		dirty->array = ss->array;
6409 		dirty->n_items = ss->n_items;
6410 		dirty->next = ss->next;
6411 		ss->next = dirty;
6412 
6413 		ss->array = (array = new_array);
6414 		ss->n_items = new_n_items;
6415 	}
6416 
6417 	ASSERT(array != NULL && item < ss->n_items && array[item] == NULL);
6418 
6419 	array[item] = new_element;
6420 
6421 	mutex_exit(&ss->lock);
6422 	return (DDI_SUCCESS);
6423 }
6424 
6425 /*
6426  * Fetch a pointer to the allocated soft state structure.
6427  *
6428  * This is designed to be cheap.
6429  *
6430  * There's an argument that there should be more checking for
6431  * nil pointers and out of bounds on the array.. but we do a lot
6432  * of that in the alloc/free routines.
6433  *
6434  * An array has the convenience that we don't need to lock read-access
6435  * to it c.f. a linked list.  However our "expanding array" strategy
6436  * means that we should hold a readers lock on the i_ddi_soft_state
6437  * structure.
6438  *
6439  * However, from a performance viewpoint, we need to do it without
6440  * any locks at all -- this also makes it a leaf routine.  The algorithm
6441  * is 'lock-free' because we only discard the pointer arrays at
6442  * ddi_soft_state_fini() time.
6443  */
6444 void *
6445 ddi_get_soft_state(void *state, int item)
6446 {
6447 	i_ddi_soft_state	*ss = (i_ddi_soft_state *)state;
6448 
6449 	ASSERT((ss != NULL) && (item >= 0));
6450 
6451 	if (item < ss->n_items && ss->array != NULL)
6452 		return (ss->array[item]);
6453 	return (NULL);
6454 }
6455 
6456 /*
6457  * Free the state structure corresponding to 'item.'   Freeing an
6458  * element that has either gone or was never allocated is not
6459  * considered an error.  Note that we free the state structure, but
6460  * we don't shrink our pointer array, or discard 'dirty' arrays,
6461  * since even a few pointers don't really waste too much memory.
6462  *
6463  * Passing an item number that is out of bounds, or a null pointer will
6464  * provoke an error message.
6465  */
6466 void
6467 ddi_soft_state_free(void *state, int item)
6468 {
6469 	i_ddi_soft_state	*ss = (i_ddi_soft_state *)state;
6470 	void			**array;
6471 	void			*element;
6472 	static char		msg[] = "ddi_soft_state_free:";
6473 
6474 	if (ss == NULL) {
6475 		cmn_err(CE_WARN, "%s null handle: %s",
6476 		    msg, mod_containing_pc(caller()));
6477 		return;
6478 	}
6479 
6480 	element = NULL;
6481 
6482 	mutex_enter(&ss->lock);
6483 
6484 	if ((array = ss->array) == NULL || ss->size == 0) {
6485 		cmn_err(CE_WARN, "%s bad handle: %s",
6486 		    msg, mod_containing_pc(caller()));
6487 	} else if (item < 0 || item >= ss->n_items) {
6488 		cmn_err(CE_WARN, "%s item %d not in range [0..%lu]: %s",
6489 		    msg, item, ss->n_items - 1, mod_containing_pc(caller()));
6490 	} else if (array[item] != NULL) {
6491 		element = array[item];
6492 		array[item] = NULL;
6493 	}
6494 
6495 	mutex_exit(&ss->lock);
6496 
6497 	if (element)
6498 		kmem_free(element, ss->size);
6499 }
6500 
6501 /*
6502  * Free the entire set of pointers, and any
6503  * soft state structures contained therein.
6504  *
6505  * Note that we don't grab the ss->lock mutex, even though
6506  * we're inspecting the various fields of the data structure.
6507  *
6508  * There is an implicit assumption that this routine will
6509  * never run concurrently with any of the above on this
6510  * particular state structure i.e. by the time the driver
6511  * calls this routine, there should be no other threads
6512  * running in the driver.
6513  */
6514 void
6515 ddi_soft_state_fini(void **state_p)
6516 {
6517 	i_ddi_soft_state	*ss, *dirty;
6518 	int			item;
6519 	static char		msg[] = "ddi_soft_state_fini:";
6520 
6521 	if (state_p == NULL ||
6522 	    (ss = (i_ddi_soft_state *)(*state_p)) == NULL) {
6523 		cmn_err(CE_WARN, "%s null handle: %s",
6524 		    msg, mod_containing_pc(caller()));
6525 		return;
6526 	}
6527 
6528 	if (ss->size == 0) {
6529 		cmn_err(CE_WARN, "%s bad handle: %s",
6530 		    msg, mod_containing_pc(caller()));
6531 		return;
6532 	}
6533 
6534 	if (ss->n_items > 0) {
6535 		for (item = 0; item < ss->n_items; item++)
6536 			ddi_soft_state_free(ss, item);
6537 		kmem_free(ss->array, ss->n_items * sizeof (void *));
6538 	}
6539 
6540 	/*
6541 	 * Now delete any dirty arrays from previous 'grow' operations
6542 	 */
6543 	for (dirty = ss->next; dirty; dirty = ss->next) {
6544 		ss->next = dirty->next;
6545 		kmem_free(dirty->array, dirty->n_items * sizeof (void *));
6546 		kmem_free(dirty, sizeof (*dirty));
6547 	}
6548 
6549 	mutex_destroy(&ss->lock);
6550 	kmem_free(ss, sizeof (*ss));
6551 
6552 	*state_p = NULL;
6553 }
6554 
6555 #define	SS_N_ITEMS_PER_HASH	16
6556 #define	SS_MIN_HASH_SZ		16
6557 #define	SS_MAX_HASH_SZ		4096
6558 
6559 int
6560 ddi_soft_state_bystr_init(ddi_soft_state_bystr **state_p, size_t size,
6561     int n_items)
6562 {
6563 	i_ddi_soft_state_bystr	*sss;
6564 	int			hash_sz;
6565 
6566 	ASSERT(state_p && size && n_items);
6567 	if ((state_p == NULL) || (size == 0) || (n_items == 0))
6568 		return (EINVAL);
6569 
6570 	/* current implementation is based on hash, convert n_items to hash */
6571 	hash_sz = n_items / SS_N_ITEMS_PER_HASH;
6572 	if (hash_sz < SS_MIN_HASH_SZ)
6573 		hash_sz = SS_MIN_HASH_SZ;
6574 	else if (hash_sz > SS_MAX_HASH_SZ)
6575 		hash_sz = SS_MAX_HASH_SZ;
6576 
6577 	/* allocate soft_state pool */
6578 	sss = kmem_zalloc(sizeof (*sss), KM_SLEEP);
6579 	sss->ss_size = size;
6580 	sss->ss_mod_hash = mod_hash_create_strhash("soft_state_bystr",
6581 	    hash_sz, mod_hash_null_valdtor);
6582 	*state_p = (ddi_soft_state_bystr *)sss;
6583 	return (0);
6584 }
6585 
6586 int
6587 ddi_soft_state_bystr_zalloc(ddi_soft_state_bystr *state, const char *str)
6588 {
6589 	i_ddi_soft_state_bystr	*sss = (i_ddi_soft_state_bystr *)state;
6590 	void			*sso;
6591 	char			*dup_str;
6592 
6593 	ASSERT(sss && str && sss->ss_mod_hash);
6594 	if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL))
6595 		return (DDI_FAILURE);
6596 	sso = kmem_zalloc(sss->ss_size, KM_SLEEP);
6597 	dup_str = i_ddi_strdup((char *)str, KM_SLEEP);
6598 	if (mod_hash_insert(sss->ss_mod_hash,
6599 	    (mod_hash_key_t)dup_str, (mod_hash_val_t)sso) == 0)
6600 		return (DDI_SUCCESS);
6601 
6602 	/*
6603 	 * The only error from an strhash insert is caused by a duplicate key.
6604 	 * We refuse to tread on an existing elements, so free and fail.
6605 	 */
6606 	kmem_free(dup_str, strlen(dup_str) + 1);
6607 	kmem_free(sso, sss->ss_size);
6608 	return (DDI_FAILURE);
6609 }
6610 
6611 void *
6612 ddi_soft_state_bystr_get(ddi_soft_state_bystr *state, const char *str)
6613 {
6614 	i_ddi_soft_state_bystr	*sss = (i_ddi_soft_state_bystr *)state;
6615 	void			*sso;
6616 
6617 	ASSERT(sss && str && sss->ss_mod_hash);
6618 	if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL))
6619 		return (NULL);
6620 
6621 	if (mod_hash_find(sss->ss_mod_hash,
6622 	    (mod_hash_key_t)str, (mod_hash_val_t *)&sso) == 0)
6623 		return (sso);
6624 	return (NULL);
6625 }
6626 
6627 void
6628 ddi_soft_state_bystr_free(ddi_soft_state_bystr *state, const char *str)
6629 {
6630 	i_ddi_soft_state_bystr	*sss = (i_ddi_soft_state_bystr *)state;
6631 	void			*sso;
6632 
6633 	ASSERT(sss && str && sss->ss_mod_hash);
6634 	if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL))
6635 		return;
6636 
6637 	(void) mod_hash_remove(sss->ss_mod_hash,
6638 	    (mod_hash_key_t)str, (mod_hash_val_t *)&sso);
6639 	kmem_free(sso, sss->ss_size);
6640 }
6641 
6642 void
6643 ddi_soft_state_bystr_fini(ddi_soft_state_bystr **state_p)
6644 {
6645 	i_ddi_soft_state_bystr	*sss;
6646 
6647 	ASSERT(state_p);
6648 	if (state_p == NULL)
6649 		return;
6650 
6651 	sss = (i_ddi_soft_state_bystr *)(*state_p);
6652 	if (sss == NULL)
6653 		return;
6654 
6655 	ASSERT(sss->ss_mod_hash);
6656 	if (sss->ss_mod_hash) {
6657 		mod_hash_destroy_strhash(sss->ss_mod_hash);
6658 		sss->ss_mod_hash = NULL;
6659 	}
6660 
6661 	kmem_free(sss, sizeof (*sss));
6662 	*state_p = NULL;
6663 }
6664 
6665 /*
6666  * The ddi_strid_* routines provide string-to-index management utilities.
6667  */
6668 /* allocate and initialize an strid set */
6669 int
6670 ddi_strid_init(ddi_strid **strid_p, int n_items)
6671 {
6672 	i_ddi_strid	*ss;
6673 	int		hash_sz;
6674 
6675 	if (strid_p == NULL)
6676 		return (DDI_FAILURE);
6677 
6678 	/* current implementation is based on hash, convert n_items to hash */
6679 	hash_sz = n_items / SS_N_ITEMS_PER_HASH;
6680 	if (hash_sz < SS_MIN_HASH_SZ)
6681 		hash_sz = SS_MIN_HASH_SZ;
6682 	else if (hash_sz > SS_MAX_HASH_SZ)
6683 		hash_sz = SS_MAX_HASH_SZ;
6684 
6685 	ss = kmem_alloc(sizeof (*ss), KM_SLEEP);
6686 	ss->strid_chunksz = n_items;
6687 	ss->strid_spacesz = n_items;
6688 	ss->strid_space = id_space_create("strid", 1, n_items);
6689 	ss->strid_bystr = mod_hash_create_strhash("strid_bystr", hash_sz,
6690 	    mod_hash_null_valdtor);
6691 	ss->strid_byid = mod_hash_create_idhash("strid_byid", hash_sz,
6692 	    mod_hash_null_valdtor);
6693 	*strid_p = (ddi_strid *)ss;
6694 	return (DDI_SUCCESS);
6695 }
6696 
6697 /* allocate an id mapping within the specified set for str, return id */
6698 static id_t
6699 i_ddi_strid_alloc(ddi_strid *strid, char *str)
6700 {
6701 	i_ddi_strid	*ss = (i_ddi_strid *)strid;
6702 	id_t		id;
6703 	char		*s;
6704 
6705 	ASSERT(ss && str);
6706 	if ((ss == NULL) || (str == NULL))
6707 		return (0);
6708 
6709 	/*
6710 	 * Allocate an id using VM_FIRSTFIT in order to keep allocated id
6711 	 * range as compressed as possible.  This is important to minimize
6712 	 * the amount of space used when the id is used as a ddi_soft_state
6713 	 * index by the caller.
6714 	 *
6715 	 * If the id list is exhausted, increase the size of the list
6716 	 * by the chuck size specified in ddi_strid_init and reattempt
6717 	 * the allocation
6718 	 */
6719 	if ((id = id_allocff_nosleep(ss->strid_space)) == (id_t)-1) {
6720 		id_space_extend(ss->strid_space, ss->strid_spacesz,
6721 		    ss->strid_spacesz + ss->strid_chunksz);
6722 		ss->strid_spacesz += ss->strid_chunksz;
6723 		if ((id = id_allocff_nosleep(ss->strid_space)) == (id_t)-1)
6724 			return (0);
6725 	}
6726 
6727 	/*
6728 	 * NOTE: since we create and destroy in unison we can save space by
6729 	 * using bystr key as the byid value.  This means destroy must occur
6730 	 * in (byid, bystr) order.
6731 	 */
6732 	s = i_ddi_strdup(str, KM_SLEEP);
6733 	if (mod_hash_insert(ss->strid_bystr, (mod_hash_key_t)s,
6734 	    (mod_hash_val_t)(intptr_t)id) != 0) {
6735 		ddi_strid_free(strid, id);
6736 		return (0);
6737 	}
6738 	if (mod_hash_insert(ss->strid_byid, (mod_hash_key_t)(intptr_t)id,
6739 	    (mod_hash_val_t)s) != 0) {
6740 		ddi_strid_free(strid, id);
6741 		return (0);
6742 	}
6743 
6744 	/* NOTE: s if freed on mod_hash_destroy by mod_hash_strval_dtor */
6745 	return (id);
6746 }
6747 
6748 /* allocate an id mapping within the specified set for str, return id */
6749 id_t
6750 ddi_strid_alloc(ddi_strid *strid, char *str)
6751 {
6752 	return (i_ddi_strid_alloc(strid, str));
6753 }
6754 
6755 /* return the id within the specified strid given the str */
6756 id_t
6757 ddi_strid_str2id(ddi_strid *strid, char *str)
6758 {
6759 	i_ddi_strid	*ss = (i_ddi_strid *)strid;
6760 	id_t		id = 0;
6761 	mod_hash_val_t	hv;
6762 
6763 	ASSERT(ss && str);
6764 	if (ss && str && (mod_hash_find(ss->strid_bystr,
6765 	    (mod_hash_key_t)str, &hv) == 0))
6766 		id = (int)(intptr_t)hv;
6767 	return (id);
6768 }
6769 
6770 /* return str within the specified strid given the id */
6771 char *
6772 ddi_strid_id2str(ddi_strid *strid, id_t id)
6773 {
6774 	i_ddi_strid	*ss = (i_ddi_strid *)strid;
6775 	char		*str = NULL;
6776 	mod_hash_val_t	hv;
6777 
6778 	ASSERT(ss && id > 0);
6779 	if (ss && (id > 0) && (mod_hash_find(ss->strid_byid,
6780 	    (mod_hash_key_t)(uintptr_t)id, &hv) == 0))
6781 		str = (char *)hv;
6782 	return (str);
6783 }
6784 
6785 /* free the id mapping within the specified strid */
6786 void
6787 ddi_strid_free(ddi_strid *strid, id_t id)
6788 {
6789 	i_ddi_strid	*ss = (i_ddi_strid *)strid;
6790 	char		*str;
6791 
6792 	ASSERT(ss && id > 0);
6793 	if ((ss == NULL) || (id <= 0))
6794 		return;
6795 
6796 	/* bystr key is byid value: destroy order must be (byid, bystr) */
6797 	str = ddi_strid_id2str(strid, id);
6798 	(void) mod_hash_destroy(ss->strid_byid, (mod_hash_key_t)(uintptr_t)id);
6799 	id_free(ss->strid_space, id);
6800 
6801 	if (str)
6802 		(void) mod_hash_destroy(ss->strid_bystr, (mod_hash_key_t)str);
6803 }
6804 
6805 /* destroy the strid set */
6806 void
6807 ddi_strid_fini(ddi_strid **strid_p)
6808 {
6809 	i_ddi_strid	*ss;
6810 
6811 	ASSERT(strid_p);
6812 	if (strid_p == NULL)
6813 		return;
6814 
6815 	ss = (i_ddi_strid *)(*strid_p);
6816 	if (ss == NULL)
6817 		return;
6818 
6819 	/* bystr key is byid value: destroy order must be (byid, bystr) */
6820 	if (ss->strid_byid)
6821 		mod_hash_destroy_hash(ss->strid_byid);
6822 	if (ss->strid_byid)
6823 		mod_hash_destroy_hash(ss->strid_bystr);
6824 	if (ss->strid_space)
6825 		id_space_destroy(ss->strid_space);
6826 	kmem_free(ss, sizeof (*ss));
6827 	*strid_p = NULL;
6828 }
6829 
6830 /*
6831  * This sets the devi_addr entry in the dev_info structure 'dip' to 'name'.
6832  * Storage is double buffered to prevent updates during devi_addr use -
6833  * double buffering is adaquate for reliable ddi_deviname() consumption.
6834  * The double buffer is not freed until dev_info structure destruction
6835  * (by i_ddi_free_node).
6836  */
6837 void
6838 ddi_set_name_addr(dev_info_t *dip, char *name)
6839 {
6840 	char	*buf = DEVI(dip)->devi_addr_buf;
6841 	char	*newaddr;
6842 
6843 	if (buf == NULL) {
6844 		buf = kmem_zalloc(2 * MAXNAMELEN, KM_SLEEP);
6845 		DEVI(dip)->devi_addr_buf = buf;
6846 	}
6847 
6848 	if (name) {
6849 		ASSERT(strlen(name) < MAXNAMELEN);
6850 		newaddr = (DEVI(dip)->devi_addr == buf) ?
6851 		    (buf + MAXNAMELEN) : buf;
6852 		(void) strlcpy(newaddr, name, MAXNAMELEN);
6853 	} else
6854 		newaddr = NULL;
6855 
6856 	DEVI(dip)->devi_addr = newaddr;
6857 }
6858 
6859 char *
6860 ddi_get_name_addr(dev_info_t *dip)
6861 {
6862 	return (DEVI(dip)->devi_addr);
6863 }
6864 
6865 void
6866 ddi_set_parent_data(dev_info_t *dip, void *pd)
6867 {
6868 	DEVI(dip)->devi_parent_data = pd;
6869 }
6870 
6871 void *
6872 ddi_get_parent_data(dev_info_t *dip)
6873 {
6874 	return (DEVI(dip)->devi_parent_data);
6875 }
6876 
6877 /*
6878  * ddi_name_to_major: returns the major number of a named module,
6879  * derived from the current driver alias binding.
6880  *
6881  * Caveat: drivers should avoid the use of this function, in particular
6882  * together with ddi_get_name/ddi_binding name, as per
6883  *	major = ddi_name_to_major(ddi_get_name(devi));
6884  * ddi_name_to_major() relies on the state of the device/alias binding,
6885  * which can and does change dynamically as aliases are administered
6886  * over time.  An attached device instance cannot rely on the major
6887  * number returned by ddi_name_to_major() to match its own major number.
6888  *
6889  * For driver use, ddi_driver_major() reliably returns the major number
6890  * for the module to which the device was bound at attach time over
6891  * the life of the instance.
6892  *	major = ddi_driver_major(dev_info_t *)
6893  */
6894 major_t
6895 ddi_name_to_major(char *name)
6896 {
6897 	return (mod_name_to_major(name));
6898 }
6899 
6900 /*
6901  * ddi_major_to_name: Returns the module name bound to a major number.
6902  */
6903 char *
6904 ddi_major_to_name(major_t major)
6905 {
6906 	return (mod_major_to_name(major));
6907 }
6908 
6909 /*
6910  * Return the name of the devinfo node pointed at by 'dip' in the buffer
6911  * pointed at by 'name.'  A devinfo node is named as a result of calling
6912  * ddi_initchild().
6913  *
6914  * Note: the driver must be held before calling this function!
6915  */
6916 char *
6917 ddi_deviname(dev_info_t *dip, char *name)
6918 {
6919 	char *addrname;
6920 	char none = '\0';
6921 
6922 	if (dip == ddi_root_node()) {
6923 		*name = '\0';
6924 		return (name);
6925 	}
6926 
6927 	if (i_ddi_node_state(dip) < DS_BOUND) {
6928 		addrname = &none;
6929 	} else {
6930 		/*
6931 		 * Use ddi_get_name_addr() without checking state so we get
6932 		 * a unit-address if we are called after ddi_set_name_addr()
6933 		 * by nexus DDI_CTL_INITCHILD code, but before completing
6934 		 * node promotion to DS_INITIALIZED.  We currently have
6935 		 * two situations where we are called in this state:
6936 		 *   o  For framework processing of a path-oriented alias.
6937 		 *   o  If a SCSA nexus driver calls ddi_devid_register()
6938 		 *	from it's tran_tgt_init(9E) implementation.
6939 		 */
6940 		addrname = ddi_get_name_addr(dip);
6941 		if (addrname == NULL)
6942 			addrname = &none;
6943 	}
6944 
6945 	if (*addrname == '\0') {
6946 		(void) sprintf(name, "/%s", ddi_node_name(dip));
6947 	} else {
6948 		(void) sprintf(name, "/%s@%s", ddi_node_name(dip), addrname);
6949 	}
6950 
6951 	return (name);
6952 }
6953 
6954 /*
6955  * Spits out the name of device node, typically name@addr, for a given node,
6956  * using the driver name, not the nodename.
6957  *
6958  * Used by match_parent. Not to be used elsewhere.
6959  */
6960 char *
6961 i_ddi_parname(dev_info_t *dip, char *name)
6962 {
6963 	char *addrname;
6964 
6965 	if (dip == ddi_root_node()) {
6966 		*name = '\0';
6967 		return (name);
6968 	}
6969 
6970 	ASSERT(i_ddi_node_state(dip) >= DS_INITIALIZED);
6971 
6972 	if (*(addrname = ddi_get_name_addr(dip)) == '\0')
6973 		(void) sprintf(name, "%s", ddi_binding_name(dip));
6974 	else
6975 		(void) sprintf(name, "%s@%s", ddi_binding_name(dip), addrname);
6976 	return (name);
6977 }
6978 
6979 static char *
6980 pathname_work(dev_info_t *dip, char *path)
6981 {
6982 	char *bp;
6983 
6984 	if (dip == ddi_root_node()) {
6985 		*path = '\0';
6986 		return (path);
6987 	}
6988 	(void) pathname_work(ddi_get_parent(dip), path);
6989 	bp = path + strlen(path);
6990 	(void) ddi_deviname(dip, bp);
6991 	return (path);
6992 }
6993 
6994 char *
6995 ddi_pathname(dev_info_t *dip, char *path)
6996 {
6997 	return (pathname_work(dip, path));
6998 }
6999 
7000 char *
7001 ddi_pathname_minor(struct ddi_minor_data *dmdp, char *path)
7002 {
7003 	if (dmdp->dip == NULL)
7004 		*path = '\0';
7005 	else {
7006 		(void) ddi_pathname(dmdp->dip, path);
7007 		if (dmdp->ddm_name) {
7008 			(void) strcat(path, ":");
7009 			(void) strcat(path, dmdp->ddm_name);
7010 		}
7011 	}
7012 	return (path);
7013 }
7014 
7015 static char *
7016 pathname_work_obp(dev_info_t *dip, char *path)
7017 {
7018 	char *bp;
7019 	char *obp_path;
7020 
7021 	/*
7022 	 * look up the "obp-path" property, return the path if it exists
7023 	 */
7024 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
7025 	    "obp-path", &obp_path) == DDI_PROP_SUCCESS) {
7026 		(void) strcpy(path, obp_path);
7027 		ddi_prop_free(obp_path);
7028 		return (path);
7029 	}
7030 
7031 	/*
7032 	 * stop at root, no obp path
7033 	 */
7034 	if (dip == ddi_root_node()) {
7035 		return (NULL);
7036 	}
7037 
7038 	obp_path = pathname_work_obp(ddi_get_parent(dip), path);
7039 	if (obp_path == NULL)
7040 		return (NULL);
7041 
7042 	/*
7043 	 * append our component to parent's obp path
7044 	 */
7045 	bp = path + strlen(path);
7046 	if (*(bp - 1) != '/')
7047 		(void) strcat(bp++, "/");
7048 	(void) ddi_deviname(dip, bp);
7049 	return (path);
7050 }
7051 
7052 /*
7053  * return the 'obp-path' based path for the given node, or NULL if the node
7054  * does not have a different obp path. NOTE: Unlike ddi_pathname, this
7055  * function can't be called from interrupt context (since we need to
7056  * lookup a string property).
7057  */
7058 char *
7059 ddi_pathname_obp(dev_info_t *dip, char *path)
7060 {
7061 	ASSERT(!servicing_interrupt());
7062 	if (dip == NULL || path == NULL)
7063 		return (NULL);
7064 
7065 	/* split work into a separate function to aid debugging */
7066 	return (pathname_work_obp(dip, path));
7067 }
7068 
7069 int
7070 ddi_pathname_obp_set(dev_info_t *dip, char *component)
7071 {
7072 	dev_info_t *pdip;
7073 	char *obp_path = NULL;
7074 	int rc = DDI_FAILURE;
7075 
7076 	if (dip == NULL)
7077 		return (DDI_FAILURE);
7078 
7079 	obp_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
7080 
7081 	pdip = ddi_get_parent(dip);
7082 
7083 	if (ddi_pathname_obp(pdip, obp_path) == NULL) {
7084 		(void) ddi_pathname(pdip, obp_path);
7085 	}
7086 
7087 	if (component) {
7088 		(void) strncat(obp_path, "/", MAXPATHLEN);
7089 		(void) strncat(obp_path, component, MAXPATHLEN);
7090 	}
7091 	rc = ndi_prop_update_string(DDI_DEV_T_NONE, dip, "obp-path",
7092 	    obp_path);
7093 
7094 	if (obp_path)
7095 		kmem_free(obp_path, MAXPATHLEN);
7096 
7097 	return (rc);
7098 }
7099 
7100 /*
7101  * Given a dev_t, return the pathname of the corresponding device in the
7102  * buffer pointed at by "path."  The buffer is assumed to be large enough
7103  * to hold the pathname of the device (MAXPATHLEN).
7104  *
7105  * The pathname of a device is the pathname of the devinfo node to which
7106  * the device "belongs," concatenated with the character ':' and the name
7107  * of the minor node corresponding to the dev_t.  If spec_type is 0 then
7108  * just the pathname of the devinfo node is returned without driving attach
7109  * of that node.  For a non-zero spec_type, an attach is performed and a
7110  * search of the minor list occurs.
7111  *
7112  * It is possible that the path associated with the dev_t is not
7113  * currently available in the devinfo tree.  In order to have a
7114  * dev_t, a device must have been discovered before, which means
7115  * that the path is always in the instance tree.  The one exception
7116  * to this is if the dev_t is associated with a pseudo driver, in
7117  * which case the device must exist on the pseudo branch of the
7118  * devinfo tree as a result of parsing .conf files.
7119  */
7120 int
7121 ddi_dev_pathname(dev_t devt, int spec_type, char *path)
7122 {
7123 	int		circ;
7124 	major_t		major = getmajor(devt);
7125 	int		instance;
7126 	dev_info_t	*dip;
7127 	char		*minorname;
7128 	char		*drvname;
7129 
7130 	if (major >= devcnt)
7131 		goto fail;
7132 	if (major == clone_major) {
7133 		/* clone has no minor nodes, manufacture the path here */
7134 		if ((drvname = ddi_major_to_name(getminor(devt))) == NULL)
7135 			goto fail;
7136 
7137 		(void) snprintf(path, MAXPATHLEN, "%s:%s", CLONE_PATH, drvname);
7138 		return (DDI_SUCCESS);
7139 	}
7140 
7141 	/* extract instance from devt (getinfo(9E) DDI_INFO_DEVT2INSTANCE). */
7142 	if ((instance = dev_to_instance(devt)) == -1)
7143 		goto fail;
7144 
7145 	/* reconstruct the path given the major/instance */
7146 	if (e_ddi_majorinstance_to_path(major, instance, path) != DDI_SUCCESS)
7147 		goto fail;
7148 
7149 	/* if spec_type given we must drive attach and search minor nodes */
7150 	if ((spec_type == S_IFCHR) || (spec_type == S_IFBLK)) {
7151 		/* attach the path so we can search minors */
7152 		if ((dip = e_ddi_hold_devi_by_path(path, 0)) == NULL)
7153 			goto fail;
7154 
7155 		/* Add minorname to path. */
7156 		ndi_devi_enter(dip, &circ);
7157 		minorname = i_ddi_devtspectype_to_minorname(dip,
7158 		    devt, spec_type);
7159 		if (minorname) {
7160 			(void) strcat(path, ":");
7161 			(void) strcat(path, minorname);
7162 		}
7163 		ndi_devi_exit(dip, circ);
7164 		ddi_release_devi(dip);
7165 		if (minorname == NULL)
7166 			goto fail;
7167 	}
7168 	ASSERT(strlen(path) < MAXPATHLEN);
7169 	return (DDI_SUCCESS);
7170 
7171 fail:	*path = 0;
7172 	return (DDI_FAILURE);
7173 }
7174 
7175 /*
7176  * Given a major number and an instance, return the path.
7177  * This interface does NOT drive attach.
7178  */
7179 int
7180 e_ddi_majorinstance_to_path(major_t major, int instance, char *path)
7181 {
7182 	struct devnames *dnp;
7183 	dev_info_t	*dip;
7184 
7185 	if ((major >= devcnt) || (instance == -1)) {
7186 		*path = 0;
7187 		return (DDI_FAILURE);
7188 	}
7189 
7190 	/* look for the major/instance in the instance tree */
7191 	if (e_ddi_instance_majorinstance_to_path(major, instance,
7192 	    path) == DDI_SUCCESS) {
7193 		ASSERT(strlen(path) < MAXPATHLEN);
7194 		return (DDI_SUCCESS);
7195 	}
7196 
7197 	/*
7198 	 * Not in instance tree, find the instance on the per driver list and
7199 	 * construct path to instance via ddi_pathname(). This is how paths
7200 	 * down the 'pseudo' branch are constructed.
7201 	 */
7202 	dnp = &(devnamesp[major]);
7203 	LOCK_DEV_OPS(&(dnp->dn_lock));
7204 	for (dip = dnp->dn_head; dip;
7205 	    dip = (dev_info_t *)DEVI(dip)->devi_next) {
7206 		/* Skip if instance does not match. */
7207 		if (DEVI(dip)->devi_instance != instance)
7208 			continue;
7209 
7210 		/*
7211 		 * An ndi_hold_devi() does not prevent DS_INITIALIZED->DS_BOUND
7212 		 * node demotion, so it is not an effective way of ensuring
7213 		 * that the ddi_pathname result has a unit-address.  Instead,
7214 		 * we reverify the node state after calling ddi_pathname().
7215 		 */
7216 		if (i_ddi_node_state(dip) >= DS_INITIALIZED) {
7217 			(void) ddi_pathname(dip, path);
7218 			if (i_ddi_node_state(dip) < DS_INITIALIZED)
7219 				continue;
7220 			UNLOCK_DEV_OPS(&(dnp->dn_lock));
7221 			ASSERT(strlen(path) < MAXPATHLEN);
7222 			return (DDI_SUCCESS);
7223 		}
7224 	}
7225 	UNLOCK_DEV_OPS(&(dnp->dn_lock));
7226 
7227 	/* can't reconstruct the path */
7228 	*path = 0;
7229 	return (DDI_FAILURE);
7230 }
7231 
7232 #define	GLD_DRIVER_PPA "SUNW,gld_v0_ppa"
7233 
7234 /*
7235  * Given the dip for a network interface return the ppa for that interface.
7236  *
7237  * In all cases except GLD v0 drivers, the ppa == instance.
7238  * In the case of GLD v0 drivers, the ppa is equal to the attach order.
7239  * So for these drivers when the attach routine calls gld_register(),
7240  * the GLD framework creates an integer property called "gld_driver_ppa"
7241  * that can be queried here.
7242  *
7243  * The only time this function is used is when a system is booting over nfs.
7244  * In this case the system has to resolve the pathname of the boot device
7245  * to it's ppa.
7246  */
7247 int
7248 i_ddi_devi_get_ppa(dev_info_t *dip)
7249 {
7250 	return (ddi_prop_get_int(DDI_DEV_T_ANY, dip,
7251 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
7252 	    GLD_DRIVER_PPA, ddi_get_instance(dip)));
7253 }
7254 
7255 /*
7256  * i_ddi_devi_set_ppa() should only be called from gld_register()
7257  * and only for GLD v0 drivers
7258  */
7259 void
7260 i_ddi_devi_set_ppa(dev_info_t *dip, int ppa)
7261 {
7262 	(void) e_ddi_prop_update_int(DDI_DEV_T_NONE, dip, GLD_DRIVER_PPA, ppa);
7263 }
7264 
7265 
7266 /*
7267  * Private DDI Console bell functions.
7268  */
7269 void
7270 ddi_ring_console_bell(clock_t duration)
7271 {
7272 	if (ddi_console_bell_func != NULL)
7273 		(*ddi_console_bell_func)(duration);
7274 }
7275 
7276 void
7277 ddi_set_console_bell(void (*bellfunc)(clock_t duration))
7278 {
7279 	ddi_console_bell_func = bellfunc;
7280 }
7281 
7282 int
7283 ddi_dma_alloc_handle(dev_info_t *dip, ddi_dma_attr_t *attr,
7284 	int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
7285 {
7286 	int (*funcp)() = ddi_dma_allochdl;
7287 	ddi_dma_attr_t dma_attr;
7288 	struct bus_ops *bop;
7289 
7290 	if (attr == (ddi_dma_attr_t *)0)
7291 		return (DDI_DMA_BADATTR);
7292 
7293 	dma_attr = *attr;
7294 
7295 	bop = DEVI(dip)->devi_ops->devo_bus_ops;
7296 	if (bop && bop->bus_dma_allochdl)
7297 		funcp = bop->bus_dma_allochdl;
7298 
7299 	return ((*funcp)(dip, dip, &dma_attr, waitfp, arg, handlep));
7300 }
7301 
7302 void
7303 ddi_dma_free_handle(ddi_dma_handle_t *handlep)
7304 {
7305 	ddi_dma_handle_t h = *handlep;
7306 	(void) ddi_dma_freehdl(HD, HD, h);
7307 }
7308 
7309 static uintptr_t dma_mem_list_id = 0;
7310 
7311 
7312 int
7313 ddi_dma_mem_alloc(ddi_dma_handle_t handle, size_t length,
7314 	ddi_device_acc_attr_t *accattrp, uint_t flags,
7315 	int (*waitfp)(caddr_t), caddr_t arg, caddr_t *kaddrp,
7316 	size_t *real_length, ddi_acc_handle_t *handlep)
7317 {
7318 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7319 	dev_info_t *dip = hp->dmai_rdip;
7320 	ddi_acc_hdl_t *ap;
7321 	ddi_dma_attr_t *attrp = &hp->dmai_attr;
7322 	uint_t sleepflag, xfermodes;
7323 	int (*fp)(caddr_t);
7324 	int rval;
7325 
7326 	if (waitfp == DDI_DMA_SLEEP)
7327 		fp = (int (*)())KM_SLEEP;
7328 	else if (waitfp == DDI_DMA_DONTWAIT)
7329 		fp = (int (*)())KM_NOSLEEP;
7330 	else
7331 		fp = waitfp;
7332 	*handlep = impl_acc_hdl_alloc(fp, arg);
7333 	if (*handlep == NULL)
7334 		return (DDI_FAILURE);
7335 
7336 	/* check if the cache attributes are supported */
7337 	if (i_ddi_check_cache_attr(flags) == B_FALSE)
7338 		return (DDI_FAILURE);
7339 
7340 	/*
7341 	 * Transfer the meaningful bits to xfermodes.
7342 	 * Double-check if the 3rd party driver correctly sets the bits.
7343 	 * If not, set DDI_DMA_STREAMING to keep compatibility.
7344 	 */
7345 	xfermodes = flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING);
7346 	if (xfermodes == 0) {
7347 		xfermodes = DDI_DMA_STREAMING;
7348 	}
7349 
7350 	/*
7351 	 * initialize the common elements of data access handle
7352 	 */
7353 	ap = impl_acc_hdl_get(*handlep);
7354 	ap->ah_vers = VERS_ACCHDL;
7355 	ap->ah_dip = dip;
7356 	ap->ah_offset = 0;
7357 	ap->ah_len = 0;
7358 	ap->ah_xfermodes = flags;
7359 	ap->ah_acc = *accattrp;
7360 
7361 	sleepflag = ((waitfp == DDI_DMA_SLEEP) ? 1 : 0);
7362 	if (xfermodes == DDI_DMA_CONSISTENT) {
7363 		rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag,
7364 		    flags, accattrp, kaddrp, NULL, ap);
7365 		*real_length = length;
7366 	} else {
7367 		rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag,
7368 		    flags, accattrp, kaddrp, real_length, ap);
7369 	}
7370 	if (rval == DDI_SUCCESS) {
7371 		ap->ah_len = (off_t)(*real_length);
7372 		ap->ah_addr = *kaddrp;
7373 	} else {
7374 		impl_acc_hdl_free(*handlep);
7375 		*handlep = (ddi_acc_handle_t)NULL;
7376 		if (waitfp != DDI_DMA_SLEEP && waitfp != DDI_DMA_DONTWAIT) {
7377 			ddi_set_callback(waitfp, arg, &dma_mem_list_id);
7378 		}
7379 		rval = DDI_FAILURE;
7380 	}
7381 	return (rval);
7382 }
7383 
7384 void
7385 ddi_dma_mem_free(ddi_acc_handle_t *handlep)
7386 {
7387 	ddi_acc_hdl_t *ap;
7388 
7389 	ap = impl_acc_hdl_get(*handlep);
7390 	ASSERT(ap);
7391 
7392 	i_ddi_mem_free((caddr_t)ap->ah_addr, ap);
7393 
7394 	/*
7395 	 * free the handle
7396 	 */
7397 	impl_acc_hdl_free(*handlep);
7398 	*handlep = (ddi_acc_handle_t)NULL;
7399 
7400 	if (dma_mem_list_id != 0) {
7401 		ddi_run_callback(&dma_mem_list_id);
7402 	}
7403 }
7404 
7405 int
7406 ddi_dma_buf_bind_handle(ddi_dma_handle_t handle, struct buf *bp,
7407 	uint_t flags, int (*waitfp)(caddr_t), caddr_t arg,
7408 	ddi_dma_cookie_t *cookiep, uint_t *ccountp)
7409 {
7410 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7411 	dev_info_t *dip, *rdip;
7412 	struct ddi_dma_req dmareq;
7413 	int (*funcp)();
7414 
7415 	dmareq.dmar_flags = flags;
7416 	dmareq.dmar_fp = waitfp;
7417 	dmareq.dmar_arg = arg;
7418 	dmareq.dmar_object.dmao_size = (uint_t)bp->b_bcount;
7419 
7420 	if (bp->b_flags & B_PAGEIO) {
7421 		dmareq.dmar_object.dmao_type = DMA_OTYP_PAGES;
7422 		dmareq.dmar_object.dmao_obj.pp_obj.pp_pp = bp->b_pages;
7423 		dmareq.dmar_object.dmao_obj.pp_obj.pp_offset =
7424 		    (uint_t)(((uintptr_t)bp->b_un.b_addr) & MMU_PAGEOFFSET);
7425 	} else {
7426 		dmareq.dmar_object.dmao_obj.virt_obj.v_addr = bp->b_un.b_addr;
7427 		if (bp->b_flags & B_SHADOW) {
7428 			dmareq.dmar_object.dmao_obj.virt_obj.v_priv =
7429 			    bp->b_shadow;
7430 			dmareq.dmar_object.dmao_type = DMA_OTYP_BUFVADDR;
7431 		} else {
7432 			dmareq.dmar_object.dmao_type =
7433 			    (bp->b_flags & (B_PHYS | B_REMAPPED)) ?
7434 			    DMA_OTYP_BUFVADDR : DMA_OTYP_VADDR;
7435 			dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
7436 		}
7437 
7438 		/*
7439 		 * If the buffer has no proc pointer, or the proc
7440 		 * struct has the kernel address space, or the buffer has
7441 		 * been marked B_REMAPPED (meaning that it is now
7442 		 * mapped into the kernel's address space), then
7443 		 * the address space is kas (kernel address space).
7444 		 */
7445 		if ((bp->b_proc == NULL) || (bp->b_proc->p_as == &kas) ||
7446 		    (bp->b_flags & B_REMAPPED)) {
7447 			dmareq.dmar_object.dmao_obj.virt_obj.v_as = 0;
7448 		} else {
7449 			dmareq.dmar_object.dmao_obj.virt_obj.v_as =
7450 			    bp->b_proc->p_as;
7451 		}
7452 	}
7453 
7454 	dip = rdip = hp->dmai_rdip;
7455 	if (dip != ddi_root_node())
7456 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
7457 	funcp = DEVI(rdip)->devi_bus_dma_bindfunc;
7458 	return ((*funcp)(dip, rdip, handle, &dmareq, cookiep, ccountp));
7459 }
7460 
7461 int
7462 ddi_dma_addr_bind_handle(ddi_dma_handle_t handle, struct as *as,
7463 	caddr_t addr, size_t len, uint_t flags, int (*waitfp)(caddr_t),
7464 	caddr_t arg, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
7465 {
7466 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7467 	dev_info_t *dip, *rdip;
7468 	struct ddi_dma_req dmareq;
7469 	int (*funcp)();
7470 
7471 	if (len == (uint_t)0) {
7472 		return (DDI_DMA_NOMAPPING);
7473 	}
7474 	dmareq.dmar_flags = flags;
7475 	dmareq.dmar_fp = waitfp;
7476 	dmareq.dmar_arg = arg;
7477 	dmareq.dmar_object.dmao_size = len;
7478 	dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR;
7479 	dmareq.dmar_object.dmao_obj.virt_obj.v_as = as;
7480 	dmareq.dmar_object.dmao_obj.virt_obj.v_addr = addr;
7481 	dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
7482 
7483 	dip = rdip = hp->dmai_rdip;
7484 	if (dip != ddi_root_node())
7485 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
7486 	funcp = DEVI(rdip)->devi_bus_dma_bindfunc;
7487 	return ((*funcp)(dip, rdip, handle, &dmareq, cookiep, ccountp));
7488 }
7489 
7490 void
7491 ddi_dma_nextcookie(ddi_dma_handle_t handle, ddi_dma_cookie_t *cookiep)
7492 {
7493 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7494 	ddi_dma_cookie_t *cp;
7495 
7496 	cp = hp->dmai_cookie;
7497 	ASSERT(cp);
7498 
7499 	cookiep->dmac_notused = cp->dmac_notused;
7500 	cookiep->dmac_type = cp->dmac_type;
7501 	cookiep->dmac_address = cp->dmac_address;
7502 	cookiep->dmac_size = cp->dmac_size;
7503 	hp->dmai_cookie++;
7504 }
7505 
7506 int
7507 ddi_dma_numwin(ddi_dma_handle_t handle, uint_t *nwinp)
7508 {
7509 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7510 	if ((hp->dmai_rflags & DDI_DMA_PARTIAL) == 0) {
7511 		return (DDI_FAILURE);
7512 	} else {
7513 		*nwinp = hp->dmai_nwin;
7514 		return (DDI_SUCCESS);
7515 	}
7516 }
7517 
7518 int
7519 ddi_dma_getwin(ddi_dma_handle_t h, uint_t win, off_t *offp,
7520 	size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
7521 {
7522 	int (*funcp)() = ddi_dma_win;
7523 	struct bus_ops *bop;
7524 
7525 	bop = DEVI(HD)->devi_ops->devo_bus_ops;
7526 	if (bop && bop->bus_dma_win)
7527 		funcp = bop->bus_dma_win;
7528 
7529 	return ((*funcp)(HD, HD, h, win, offp, lenp, cookiep, ccountp));
7530 }
7531 
7532 int
7533 ddi_dma_set_sbus64(ddi_dma_handle_t h, ulong_t burstsizes)
7534 {
7535 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_SET_SBUS64, 0,
7536 	    &burstsizes, 0, 0));
7537 }
7538 
7539 int
7540 i_ddi_dma_fault_check(ddi_dma_impl_t *hp)
7541 {
7542 	return (hp->dmai_fault);
7543 }
7544 
7545 int
7546 ddi_check_dma_handle(ddi_dma_handle_t handle)
7547 {
7548 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7549 	int (*check)(ddi_dma_impl_t *);
7550 
7551 	if ((check = hp->dmai_fault_check) == NULL)
7552 		check = i_ddi_dma_fault_check;
7553 
7554 	return (((*check)(hp) == DDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
7555 }
7556 
7557 void
7558 i_ddi_dma_set_fault(ddi_dma_handle_t handle)
7559 {
7560 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7561 	void (*notify)(ddi_dma_impl_t *);
7562 
7563 	if (!hp->dmai_fault) {
7564 		hp->dmai_fault = 1;
7565 		if ((notify = hp->dmai_fault_notify) != NULL)
7566 			(*notify)(hp);
7567 	}
7568 }
7569 
7570 void
7571 i_ddi_dma_clr_fault(ddi_dma_handle_t handle)
7572 {
7573 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7574 	void (*notify)(ddi_dma_impl_t *);
7575 
7576 	if (hp->dmai_fault) {
7577 		hp->dmai_fault = 0;
7578 		if ((notify = hp->dmai_fault_notify) != NULL)
7579 			(*notify)(hp);
7580 	}
7581 }
7582 
7583 /*
7584  * register mapping routines.
7585  */
7586 int
7587 ddi_regs_map_setup(dev_info_t *dip, uint_t rnumber, caddr_t *addrp,
7588 	offset_t offset, offset_t len, ddi_device_acc_attr_t *accattrp,
7589 	ddi_acc_handle_t *handle)
7590 {
7591 	ddi_map_req_t mr;
7592 	ddi_acc_hdl_t *hp;
7593 	int result;
7594 
7595 	/*
7596 	 * Allocate and initialize the common elements of data access handle.
7597 	 */
7598 	*handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
7599 	hp = impl_acc_hdl_get(*handle);
7600 	hp->ah_vers = VERS_ACCHDL;
7601 	hp->ah_dip = dip;
7602 	hp->ah_rnumber = rnumber;
7603 	hp->ah_offset = offset;
7604 	hp->ah_len = len;
7605 	hp->ah_acc = *accattrp;
7606 
7607 	/*
7608 	 * Set up the mapping request and call to parent.
7609 	 */
7610 	mr.map_op = DDI_MO_MAP_LOCKED;
7611 	mr.map_type = DDI_MT_RNUMBER;
7612 	mr.map_obj.rnumber = rnumber;
7613 	mr.map_prot = PROT_READ | PROT_WRITE;
7614 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
7615 	mr.map_handlep = hp;
7616 	mr.map_vers = DDI_MAP_VERSION;
7617 	result = ddi_map(dip, &mr, offset, len, addrp);
7618 
7619 	/*
7620 	 * check for end result
7621 	 */
7622 	if (result != DDI_SUCCESS) {
7623 		impl_acc_hdl_free(*handle);
7624 		*handle = (ddi_acc_handle_t)NULL;
7625 	} else {
7626 		hp->ah_addr = *addrp;
7627 	}
7628 
7629 	return (result);
7630 }
7631 
7632 void
7633 ddi_regs_map_free(ddi_acc_handle_t *handlep)
7634 {
7635 	ddi_map_req_t mr;
7636 	ddi_acc_hdl_t *hp;
7637 
7638 	hp = impl_acc_hdl_get(*handlep);
7639 	ASSERT(hp);
7640 
7641 	mr.map_op = DDI_MO_UNMAP;
7642 	mr.map_type = DDI_MT_RNUMBER;
7643 	mr.map_obj.rnumber = hp->ah_rnumber;
7644 	mr.map_prot = PROT_READ | PROT_WRITE;
7645 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
7646 	mr.map_handlep = hp;
7647 	mr.map_vers = DDI_MAP_VERSION;
7648 
7649 	/*
7650 	 * Call my parent to unmap my regs.
7651 	 */
7652 	(void) ddi_map(hp->ah_dip, &mr, hp->ah_offset,
7653 	    hp->ah_len, &hp->ah_addr);
7654 	/*
7655 	 * free the handle
7656 	 */
7657 	impl_acc_hdl_free(*handlep);
7658 	*handlep = (ddi_acc_handle_t)NULL;
7659 }
7660 
7661 int
7662 ddi_device_zero(ddi_acc_handle_t handle, caddr_t dev_addr, size_t bytecount,
7663 	ssize_t dev_advcnt, uint_t dev_datasz)
7664 {
7665 	uint8_t *b;
7666 	uint16_t *w;
7667 	uint32_t *l;
7668 	uint64_t *ll;
7669 
7670 	/* check for total byte count is multiple of data transfer size */
7671 	if (bytecount != ((bytecount / dev_datasz) * dev_datasz))
7672 		return (DDI_FAILURE);
7673 
7674 	switch (dev_datasz) {
7675 	case DDI_DATA_SZ01_ACC:
7676 		for (b = (uint8_t *)dev_addr;
7677 		    bytecount != 0; bytecount -= 1, b += dev_advcnt)
7678 			ddi_put8(handle, b, 0);
7679 		break;
7680 	case DDI_DATA_SZ02_ACC:
7681 		for (w = (uint16_t *)dev_addr;
7682 		    bytecount != 0; bytecount -= 2, w += dev_advcnt)
7683 			ddi_put16(handle, w, 0);
7684 		break;
7685 	case DDI_DATA_SZ04_ACC:
7686 		for (l = (uint32_t *)dev_addr;
7687 		    bytecount != 0; bytecount -= 4, l += dev_advcnt)
7688 			ddi_put32(handle, l, 0);
7689 		break;
7690 	case DDI_DATA_SZ08_ACC:
7691 		for (ll = (uint64_t *)dev_addr;
7692 		    bytecount != 0; bytecount -= 8, ll += dev_advcnt)
7693 			ddi_put64(handle, ll, 0x0ll);
7694 		break;
7695 	default:
7696 		return (DDI_FAILURE);
7697 	}
7698 	return (DDI_SUCCESS);
7699 }
7700 
7701 int
7702 ddi_device_copy(
7703 	ddi_acc_handle_t src_handle, caddr_t src_addr, ssize_t src_advcnt,
7704 	ddi_acc_handle_t dest_handle, caddr_t dest_addr, ssize_t dest_advcnt,
7705 	size_t bytecount, uint_t dev_datasz)
7706 {
7707 	uint8_t *b_src, *b_dst;
7708 	uint16_t *w_src, *w_dst;
7709 	uint32_t *l_src, *l_dst;
7710 	uint64_t *ll_src, *ll_dst;
7711 
7712 	/* check for total byte count is multiple of data transfer size */
7713 	if (bytecount != ((bytecount / dev_datasz) * dev_datasz))
7714 		return (DDI_FAILURE);
7715 
7716 	switch (dev_datasz) {
7717 	case DDI_DATA_SZ01_ACC:
7718 		b_src = (uint8_t *)src_addr;
7719 		b_dst = (uint8_t *)dest_addr;
7720 
7721 		for (; bytecount != 0; bytecount -= 1) {
7722 			ddi_put8(dest_handle, b_dst,
7723 			    ddi_get8(src_handle, b_src));
7724 			b_dst += dest_advcnt;
7725 			b_src += src_advcnt;
7726 		}
7727 		break;
7728 	case DDI_DATA_SZ02_ACC:
7729 		w_src = (uint16_t *)src_addr;
7730 		w_dst = (uint16_t *)dest_addr;
7731 
7732 		for (; bytecount != 0; bytecount -= 2) {
7733 			ddi_put16(dest_handle, w_dst,
7734 			    ddi_get16(src_handle, w_src));
7735 			w_dst += dest_advcnt;
7736 			w_src += src_advcnt;
7737 		}
7738 		break;
7739 	case DDI_DATA_SZ04_ACC:
7740 		l_src = (uint32_t *)src_addr;
7741 		l_dst = (uint32_t *)dest_addr;
7742 
7743 		for (; bytecount != 0; bytecount -= 4) {
7744 			ddi_put32(dest_handle, l_dst,
7745 			    ddi_get32(src_handle, l_src));
7746 			l_dst += dest_advcnt;
7747 			l_src += src_advcnt;
7748 		}
7749 		break;
7750 	case DDI_DATA_SZ08_ACC:
7751 		ll_src = (uint64_t *)src_addr;
7752 		ll_dst = (uint64_t *)dest_addr;
7753 
7754 		for (; bytecount != 0; bytecount -= 8) {
7755 			ddi_put64(dest_handle, ll_dst,
7756 			    ddi_get64(src_handle, ll_src));
7757 			ll_dst += dest_advcnt;
7758 			ll_src += src_advcnt;
7759 		}
7760 		break;
7761 	default:
7762 		return (DDI_FAILURE);
7763 	}
7764 	return (DDI_SUCCESS);
7765 }
7766 
7767 #define	swap16(value)  \
7768 	((((value) & 0xff) << 8) | ((value) >> 8))
7769 
7770 #define	swap32(value)	\
7771 	(((uint32_t)swap16((uint16_t)((value) & 0xffff)) << 16) | \
7772 	(uint32_t)swap16((uint16_t)((value) >> 16)))
7773 
7774 #define	swap64(value)	\
7775 	(((uint64_t)swap32((uint32_t)((value) & 0xffffffff)) \
7776 	    << 32) | \
7777 	(uint64_t)swap32((uint32_t)((value) >> 32)))
7778 
7779 uint16_t
7780 ddi_swap16(uint16_t value)
7781 {
7782 	return (swap16(value));
7783 }
7784 
7785 uint32_t
7786 ddi_swap32(uint32_t value)
7787 {
7788 	return (swap32(value));
7789 }
7790 
7791 uint64_t
7792 ddi_swap64(uint64_t value)
7793 {
7794 	return (swap64(value));
7795 }
7796 
7797 /*
7798  * Convert a binding name to a driver name.
7799  * A binding name is the name used to determine the driver for a
7800  * device - it may be either an alias for the driver or the name
7801  * of the driver itself.
7802  */
7803 char *
7804 i_binding_to_drv_name(char *bname)
7805 {
7806 	major_t major_no;
7807 
7808 	ASSERT(bname != NULL);
7809 
7810 	if ((major_no = ddi_name_to_major(bname)) == -1)
7811 		return (NULL);
7812 	return (ddi_major_to_name(major_no));
7813 }
7814 
7815 /*
7816  * Search for minor name that has specified dev_t and spec_type.
7817  * If spec_type is zero then any dev_t match works.  Since we
7818  * are returning a pointer to the minor name string, we require the
7819  * caller to do the locking.
7820  */
7821 char *
7822 i_ddi_devtspectype_to_minorname(dev_info_t *dip, dev_t dev, int spec_type)
7823 {
7824 	struct ddi_minor_data	*dmdp;
7825 
7826 	/*
7827 	 * The did layered driver currently intentionally returns a
7828 	 * devinfo ptr for an underlying sd instance based on a did
7829 	 * dev_t. In this case it is not an error.
7830 	 *
7831 	 * The did layered driver is associated with Sun Cluster.
7832 	 */
7833 	ASSERT((ddi_driver_major(dip) == getmajor(dev)) ||
7834 	    (strcmp(ddi_major_to_name(getmajor(dev)), "did") == 0));
7835 
7836 	ASSERT(DEVI_BUSY_OWNED(dip));
7837 	for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) {
7838 		if (((dmdp->type == DDM_MINOR) ||
7839 		    (dmdp->type == DDM_INTERNAL_PATH) ||
7840 		    (dmdp->type == DDM_DEFAULT)) &&
7841 		    (dmdp->ddm_dev == dev) &&
7842 		    ((((spec_type & (S_IFCHR|S_IFBLK))) == 0) ||
7843 		    (dmdp->ddm_spec_type == spec_type)))
7844 			return (dmdp->ddm_name);
7845 	}
7846 
7847 	return (NULL);
7848 }
7849 
7850 /*
7851  * Find the devt and spectype of the specified minor_name.
7852  * Return DDI_FAILURE if minor_name not found. Since we are
7853  * returning everything via arguments we can do the locking.
7854  */
7855 int
7856 i_ddi_minorname_to_devtspectype(dev_info_t *dip, char *minor_name,
7857 	dev_t *devtp, int *spectypep)
7858 {
7859 	int			circ;
7860 	struct ddi_minor_data	*dmdp;
7861 
7862 	/* deal with clone minor nodes */
7863 	if (dip == clone_dip) {
7864 		major_t	major;
7865 		/*
7866 		 * Make sure minor_name is a STREAMS driver.
7867 		 * We load the driver but don't attach to any instances.
7868 		 */
7869 
7870 		major = ddi_name_to_major(minor_name);
7871 		if (major == DDI_MAJOR_T_NONE)
7872 			return (DDI_FAILURE);
7873 
7874 		if (ddi_hold_driver(major) == NULL)
7875 			return (DDI_FAILURE);
7876 
7877 		if (STREAMSTAB(major) == NULL) {
7878 			ddi_rele_driver(major);
7879 			return (DDI_FAILURE);
7880 		}
7881 		ddi_rele_driver(major);
7882 
7883 		if (devtp)
7884 			*devtp = makedevice(clone_major, (minor_t)major);
7885 
7886 		if (spectypep)
7887 			*spectypep = S_IFCHR;
7888 
7889 		return (DDI_SUCCESS);
7890 	}
7891 
7892 	ndi_devi_enter(dip, &circ);
7893 	for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) {
7894 		if (((dmdp->type != DDM_MINOR) &&
7895 		    (dmdp->type != DDM_INTERNAL_PATH) &&
7896 		    (dmdp->type != DDM_DEFAULT)) ||
7897 		    strcmp(minor_name, dmdp->ddm_name))
7898 			continue;
7899 
7900 		if (devtp)
7901 			*devtp = dmdp->ddm_dev;
7902 
7903 		if (spectypep)
7904 			*spectypep = dmdp->ddm_spec_type;
7905 
7906 		ndi_devi_exit(dip, circ);
7907 		return (DDI_SUCCESS);
7908 	}
7909 	ndi_devi_exit(dip, circ);
7910 
7911 	return (DDI_FAILURE);
7912 }
7913 
7914 static kmutex_t devid_gen_mutex;
7915 static short	devid_gen_number;
7916 
7917 #ifdef DEBUG
7918 
7919 static int	devid_register_corrupt = 0;
7920 static int	devid_register_corrupt_major = 0;
7921 static int	devid_register_corrupt_hint = 0;
7922 static int	devid_register_corrupt_hint_major = 0;
7923 
7924 static int devid_lyr_debug = 0;
7925 
7926 #define	DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs)		\
7927 	if (devid_lyr_debug)					\
7928 		ddi_debug_devid_devts(msg, ndevs, devs)
7929 
7930 #else
7931 
7932 #define	DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs)
7933 
7934 #endif /* DEBUG */
7935 
7936 
7937 #ifdef	DEBUG
7938 
7939 static void
7940 ddi_debug_devid_devts(char *msg, int ndevs, dev_t *devs)
7941 {
7942 	int i;
7943 
7944 	cmn_err(CE_CONT, "%s:\n", msg);
7945 	for (i = 0; i < ndevs; i++) {
7946 		cmn_err(CE_CONT, "    0x%lx\n", devs[i]);
7947 	}
7948 }
7949 
7950 static void
7951 ddi_debug_devid_paths(char *msg, int npaths, char **paths)
7952 {
7953 	int i;
7954 
7955 	cmn_err(CE_CONT, "%s:\n", msg);
7956 	for (i = 0; i < npaths; i++) {
7957 		cmn_err(CE_CONT, "    %s\n", paths[i]);
7958 	}
7959 }
7960 
7961 static void
7962 ddi_debug_devid_devts_per_path(char *path, int ndevs, dev_t *devs)
7963 {
7964 	int i;
7965 
7966 	cmn_err(CE_CONT, "dev_ts per path %s\n", path);
7967 	for (i = 0; i < ndevs; i++) {
7968 		cmn_err(CE_CONT, "    0x%lx\n", devs[i]);
7969 	}
7970 }
7971 
7972 #endif	/* DEBUG */
7973 
7974 /*
7975  * Register device id into DDI framework.
7976  * Must be called when device is attached.
7977  */
7978 static int
7979 i_ddi_devid_register(dev_info_t *dip, ddi_devid_t devid)
7980 {
7981 	impl_devid_t	*i_devid = (impl_devid_t *)devid;
7982 	size_t		driver_len;
7983 	const char	*driver_name;
7984 	char		*devid_str;
7985 	major_t		major;
7986 
7987 	if ((dip == NULL) ||
7988 	    ((major = ddi_driver_major(dip)) == DDI_MAJOR_T_NONE))
7989 		return (DDI_FAILURE);
7990 
7991 	/* verify that the devid is valid */
7992 	if (ddi_devid_valid(devid) != DDI_SUCCESS)
7993 		return (DDI_FAILURE);
7994 
7995 	/* Updating driver name hint in devid */
7996 	driver_name = ddi_driver_name(dip);
7997 	driver_len = strlen(driver_name);
7998 	if (driver_len > DEVID_HINT_SIZE) {
7999 		/* Pick up last four characters of driver name */
8000 		driver_name += driver_len - DEVID_HINT_SIZE;
8001 		driver_len = DEVID_HINT_SIZE;
8002 	}
8003 	bzero(i_devid->did_driver, DEVID_HINT_SIZE);
8004 	bcopy(driver_name, i_devid->did_driver, driver_len);
8005 
8006 #ifdef DEBUG
8007 	/* Corrupt the devid for testing. */
8008 	if (devid_register_corrupt)
8009 		i_devid->did_id[0] += devid_register_corrupt;
8010 	if (devid_register_corrupt_major &&
8011 	    (major == devid_register_corrupt_major))
8012 		i_devid->did_id[0] += 1;
8013 	if (devid_register_corrupt_hint)
8014 		i_devid->did_driver[0] += devid_register_corrupt_hint;
8015 	if (devid_register_corrupt_hint_major &&
8016 	    (major == devid_register_corrupt_hint_major))
8017 		i_devid->did_driver[0] += 1;
8018 #endif /* DEBUG */
8019 
8020 	/* encode the devid as a string */
8021 	if ((devid_str = ddi_devid_str_encode(devid, NULL)) == NULL)
8022 		return (DDI_FAILURE);
8023 
8024 	/* add string as a string property */
8025 	if (ndi_prop_update_string(DDI_DEV_T_NONE, dip,
8026 	    DEVID_PROP_NAME, devid_str) != DDI_SUCCESS) {
8027 		cmn_err(CE_WARN, "%s%d: devid property update failed",
8028 		    ddi_driver_name(dip), ddi_get_instance(dip));
8029 		ddi_devid_str_free(devid_str);
8030 		return (DDI_FAILURE);
8031 	}
8032 
8033 	/* keep pointer to devid string for interrupt context fma code */
8034 	if (DEVI(dip)->devi_devid_str)
8035 		ddi_devid_str_free(DEVI(dip)->devi_devid_str);
8036 	DEVI(dip)->devi_devid_str = devid_str;
8037 	return (DDI_SUCCESS);
8038 }
8039 
8040 int
8041 ddi_devid_register(dev_info_t *dip, ddi_devid_t devid)
8042 {
8043 	int rval;
8044 
8045 	rval = i_ddi_devid_register(dip, devid);
8046 	if (rval == DDI_SUCCESS) {
8047 		/*
8048 		 * Register devid in devid-to-path cache
8049 		 */
8050 		if (e_devid_cache_register(dip, devid) == DDI_SUCCESS) {
8051 			mutex_enter(&DEVI(dip)->devi_lock);
8052 			DEVI(dip)->devi_flags |= DEVI_REGISTERED_DEVID;
8053 			mutex_exit(&DEVI(dip)->devi_lock);
8054 		} else {
8055 			cmn_err(CE_WARN, "%s%d: failed to cache devid",
8056 			    ddi_driver_name(dip), ddi_get_instance(dip));
8057 		}
8058 	} else {
8059 		cmn_err(CE_WARN, "%s%d: failed to register devid",
8060 		    ddi_driver_name(dip), ddi_get_instance(dip));
8061 	}
8062 	return (rval);
8063 }
8064 
8065 /*
8066  * Remove (unregister) device id from DDI framework.
8067  * Must be called when device is detached.
8068  */
8069 static void
8070 i_ddi_devid_unregister(dev_info_t *dip)
8071 {
8072 	if (DEVI(dip)->devi_devid_str) {
8073 		ddi_devid_str_free(DEVI(dip)->devi_devid_str);
8074 		DEVI(dip)->devi_devid_str = NULL;
8075 	}
8076 
8077 	/* remove the devid property */
8078 	(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, DEVID_PROP_NAME);
8079 }
8080 
8081 void
8082 ddi_devid_unregister(dev_info_t *dip)
8083 {
8084 	mutex_enter(&DEVI(dip)->devi_lock);
8085 	DEVI(dip)->devi_flags &= ~DEVI_REGISTERED_DEVID;
8086 	mutex_exit(&DEVI(dip)->devi_lock);
8087 	e_devid_cache_unregister(dip);
8088 	i_ddi_devid_unregister(dip);
8089 }
8090 
8091 /*
8092  * Allocate and initialize a device id.
8093  */
8094 int
8095 ddi_devid_init(
8096 	dev_info_t	*dip,
8097 	ushort_t	devid_type,
8098 	ushort_t	nbytes,
8099 	void		*id,
8100 	ddi_devid_t	*ret_devid)
8101 {
8102 	impl_devid_t	*i_devid;
8103 	int		sz = sizeof (*i_devid) + nbytes - sizeof (char);
8104 	int		driver_len;
8105 	const char	*driver_name;
8106 
8107 	switch (devid_type) {
8108 	case DEVID_SCSI3_WWN:
8109 		/*FALLTHRU*/
8110 	case DEVID_SCSI_SERIAL:
8111 		/*FALLTHRU*/
8112 	case DEVID_ATA_SERIAL:
8113 		/*FALLTHRU*/
8114 	case DEVID_ENCAP:
8115 		if (nbytes == 0)
8116 			return (DDI_FAILURE);
8117 		if (id == NULL)
8118 			return (DDI_FAILURE);
8119 		break;
8120 	case DEVID_FAB:
8121 		if (nbytes != 0)
8122 			return (DDI_FAILURE);
8123 		if (id != NULL)
8124 			return (DDI_FAILURE);
8125 		nbytes = sizeof (int) +
8126 		    sizeof (struct timeval32) + sizeof (short);
8127 		sz += nbytes;
8128 		break;
8129 	default:
8130 		return (DDI_FAILURE);
8131 	}
8132 
8133 	if ((i_devid = kmem_zalloc(sz, KM_SLEEP)) == NULL)
8134 		return (DDI_FAILURE);
8135 
8136 	i_devid->did_magic_hi = DEVID_MAGIC_MSB;
8137 	i_devid->did_magic_lo = DEVID_MAGIC_LSB;
8138 	i_devid->did_rev_hi = DEVID_REV_MSB;
8139 	i_devid->did_rev_lo = DEVID_REV_LSB;
8140 	DEVID_FORMTYPE(i_devid, devid_type);
8141 	DEVID_FORMLEN(i_devid, nbytes);
8142 
8143 	/* Fill in driver name hint */
8144 	driver_name = ddi_driver_name(dip);
8145 	driver_len = strlen(driver_name);
8146 	if (driver_len > DEVID_HINT_SIZE) {
8147 		/* Pick up last four characters of driver name */
8148 		driver_name += driver_len - DEVID_HINT_SIZE;
8149 		driver_len = DEVID_HINT_SIZE;
8150 	}
8151 
8152 	bcopy(driver_name, i_devid->did_driver, driver_len);
8153 
8154 	/* Fill in id field */
8155 	if (devid_type == DEVID_FAB) {
8156 		char		*cp;
8157 		uint32_t	hostid;
8158 		struct timeval32 timestamp32;
8159 		int		i;
8160 		int		*ip;
8161 		short		gen;
8162 
8163 		/* increase the generation number */
8164 		mutex_enter(&devid_gen_mutex);
8165 		gen = devid_gen_number++;
8166 		mutex_exit(&devid_gen_mutex);
8167 
8168 		cp = i_devid->did_id;
8169 
8170 		/* Fill in host id (big-endian byte ordering) */
8171 		hostid = zone_get_hostid(NULL);
8172 		*cp++ = hibyte(hiword(hostid));
8173 		*cp++ = lobyte(hiword(hostid));
8174 		*cp++ = hibyte(loword(hostid));
8175 		*cp++ = lobyte(loword(hostid));
8176 
8177 		/*
8178 		 * Fill in timestamp (big-endian byte ordering)
8179 		 *
8180 		 * (Note that the format may have to be changed
8181 		 * before 2038 comes around, though it's arguably
8182 		 * unique enough as it is..)
8183 		 */
8184 		uniqtime32(&timestamp32);
8185 		ip = (int *)&timestamp32;
8186 		for (i = 0;
8187 		    i < sizeof (timestamp32) / sizeof (int); i++, ip++) {
8188 			int	val;
8189 			val = *ip;
8190 			*cp++ = hibyte(hiword(val));
8191 			*cp++ = lobyte(hiword(val));
8192 			*cp++ = hibyte(loword(val));
8193 			*cp++ = lobyte(loword(val));
8194 		}
8195 
8196 		/* fill in the generation number */
8197 		*cp++ = hibyte(gen);
8198 		*cp++ = lobyte(gen);
8199 	} else
8200 		bcopy(id, i_devid->did_id, nbytes);
8201 
8202 	/* return device id */
8203 	*ret_devid = (ddi_devid_t)i_devid;
8204 	return (DDI_SUCCESS);
8205 }
8206 
8207 int
8208 ddi_devid_get(dev_info_t *dip, ddi_devid_t *ret_devid)
8209 {
8210 	return (i_ddi_devi_get_devid(DDI_DEV_T_ANY, dip, ret_devid));
8211 }
8212 
8213 int
8214 i_ddi_devi_get_devid(dev_t dev, dev_info_t *dip, ddi_devid_t *ret_devid)
8215 {
8216 	char		*devidstr;
8217 
8218 	ASSERT(dev != DDI_DEV_T_NONE);
8219 
8220 	/* look up the property, devt specific first */
8221 	if (ddi_prop_lookup_string(dev, dip, DDI_PROP_DONTPASS,
8222 	    DEVID_PROP_NAME, &devidstr) != DDI_PROP_SUCCESS) {
8223 		if ((dev == DDI_DEV_T_ANY) ||
8224 		    (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
8225 		    DDI_PROP_DONTPASS, DEVID_PROP_NAME, &devidstr) !=
8226 		    DDI_PROP_SUCCESS)) {
8227 			return (DDI_FAILURE);
8228 		}
8229 	}
8230 
8231 	/* convert to binary form */
8232 	if (ddi_devid_str_decode(devidstr, ret_devid, NULL) == -1) {
8233 		ddi_prop_free(devidstr);
8234 		return (DDI_FAILURE);
8235 	}
8236 	ddi_prop_free(devidstr);
8237 	return (DDI_SUCCESS);
8238 }
8239 
8240 /*
8241  * Return a copy of the device id for dev_t
8242  */
8243 int
8244 ddi_lyr_get_devid(dev_t dev, ddi_devid_t *ret_devid)
8245 {
8246 	dev_info_t	*dip;
8247 	int		rval;
8248 
8249 	/* get the dip */
8250 	if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
8251 		return (DDI_FAILURE);
8252 
8253 	rval = i_ddi_devi_get_devid(dev, dip, ret_devid);
8254 
8255 	ddi_release_devi(dip);		/* e_ddi_hold_devi_by_dev() */
8256 	return (rval);
8257 }
8258 
8259 /*
8260  * Return a copy of the minor name for dev_t and spec_type
8261  */
8262 int
8263 ddi_lyr_get_minor_name(dev_t dev, int spec_type, char **minor_name)
8264 {
8265 	char		*buf;
8266 	int		circ;
8267 	dev_info_t	*dip;
8268 	char		*nm;
8269 	int		rval;
8270 
8271 	if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) {
8272 		*minor_name = NULL;
8273 		return (DDI_FAILURE);
8274 	}
8275 
8276 	/* Find the minor name and copy into max size buf */
8277 	buf = kmem_alloc(MAXNAMELEN, KM_SLEEP);
8278 	ndi_devi_enter(dip, &circ);
8279 	nm = i_ddi_devtspectype_to_minorname(dip, dev, spec_type);
8280 	if (nm)
8281 		(void) strcpy(buf, nm);
8282 	ndi_devi_exit(dip, circ);
8283 	ddi_release_devi(dip);	/* e_ddi_hold_devi_by_dev() */
8284 
8285 	if (nm) {
8286 		/* duplicate into min size buf for return result */
8287 		*minor_name = i_ddi_strdup(buf, KM_SLEEP);
8288 		rval = DDI_SUCCESS;
8289 	} else {
8290 		*minor_name = NULL;
8291 		rval = DDI_FAILURE;
8292 	}
8293 
8294 	/* free max size buf and return */
8295 	kmem_free(buf, MAXNAMELEN);
8296 	return (rval);
8297 }
8298 
8299 int
8300 ddi_lyr_devid_to_devlist(
8301 	ddi_devid_t	devid,
8302 	char		*minor_name,
8303 	int		*retndevs,
8304 	dev_t		**retdevs)
8305 {
8306 	ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS);
8307 
8308 	if (e_devid_cache_to_devt_list(devid, minor_name,
8309 	    retndevs, retdevs) == DDI_SUCCESS) {
8310 		ASSERT(*retndevs > 0);
8311 		DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist",
8312 		    *retndevs, *retdevs);
8313 		return (DDI_SUCCESS);
8314 	}
8315 
8316 	if (e_ddi_devid_discovery(devid) == DDI_FAILURE) {
8317 		return (DDI_FAILURE);
8318 	}
8319 
8320 	if (e_devid_cache_to_devt_list(devid, minor_name,
8321 	    retndevs, retdevs) == DDI_SUCCESS) {
8322 		ASSERT(*retndevs > 0);
8323 		DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist",
8324 		    *retndevs, *retdevs);
8325 		return (DDI_SUCCESS);
8326 	}
8327 
8328 	return (DDI_FAILURE);
8329 }
8330 
8331 void
8332 ddi_lyr_free_devlist(dev_t *devlist, int ndevs)
8333 {
8334 	kmem_free(devlist, sizeof (dev_t) * ndevs);
8335 }
8336 
8337 /*
8338  * Note: This will need to be fixed if we ever allow processes to
8339  * have more than one data model per exec.
8340  */
8341 model_t
8342 ddi_mmap_get_model(void)
8343 {
8344 	return (get_udatamodel());
8345 }
8346 
8347 model_t
8348 ddi_model_convert_from(model_t model)
8349 {
8350 	return ((model & DDI_MODEL_MASK) & ~DDI_MODEL_NATIVE);
8351 }
8352 
8353 /*
8354  * ddi interfaces managing storage and retrieval of eventcookies.
8355  */
8356 
8357 /*
8358  * Invoke bus nexus driver's implementation of the
8359  * (*bus_remove_eventcall)() interface to remove a registered
8360  * callback handler for "event".
8361  */
8362 int
8363 ddi_remove_event_handler(ddi_callback_id_t id)
8364 {
8365 	ndi_event_callbacks_t *cb = (ndi_event_callbacks_t *)id;
8366 	dev_info_t *ddip;
8367 
8368 	ASSERT(cb);
8369 	if (!cb) {
8370 		return (DDI_FAILURE);
8371 	}
8372 
8373 	ddip = NDI_EVENT_DDIP(cb->ndi_evtcb_cookie);
8374 	return (ndi_busop_remove_eventcall(ddip, id));
8375 }
8376 
8377 /*
8378  * Invoke bus nexus driver's implementation of the
8379  * (*bus_add_eventcall)() interface to register a callback handler
8380  * for "event".
8381  */
8382 int
8383 ddi_add_event_handler(dev_info_t *dip, ddi_eventcookie_t event,
8384     void (*handler)(dev_info_t *, ddi_eventcookie_t, void *, void *),
8385     void *arg, ddi_callback_id_t *id)
8386 {
8387 	return (ndi_busop_add_eventcall(dip, dip, event, handler, arg, id));
8388 }
8389 
8390 
8391 /*
8392  * Return a handle for event "name" by calling up the device tree
8393  * hierarchy via  (*bus_get_eventcookie)() interface until claimed
8394  * by a bus nexus or top of dev_info tree is reached.
8395  */
8396 int
8397 ddi_get_eventcookie(dev_info_t *dip, char *name,
8398     ddi_eventcookie_t *event_cookiep)
8399 {
8400 	return (ndi_busop_get_eventcookie(dip, dip,
8401 	    name, event_cookiep));
8402 }
8403 
8404 /*
8405  * This procedure is provided as the general callback function when
8406  * umem_lockmemory calls as_add_callback for long term memory locking.
8407  * When as_unmap, as_setprot, or as_free encounter segments which have
8408  * locked memory, this callback will be invoked.
8409  */
8410 void
8411 umem_lock_undo(struct as *as, void *arg, uint_t event)
8412 {
8413 	_NOTE(ARGUNUSED(as, event))
8414 	struct ddi_umem_cookie *cp = (struct ddi_umem_cookie *)arg;
8415 
8416 	/*
8417 	 * Call the cleanup function.  Decrement the cookie reference
8418 	 * count, if it goes to zero, return the memory for the cookie.
8419 	 * The i_ddi_umem_unlock for this cookie may or may not have been
8420 	 * called already.  It is the responsibility of the caller of
8421 	 * umem_lockmemory to handle the case of the cleanup routine
8422 	 * being called after a ddi_umem_unlock for the cookie
8423 	 * was called.
8424 	 */
8425 
8426 	(*cp->callbacks.cbo_umem_lock_cleanup)((ddi_umem_cookie_t)cp);
8427 
8428 	/* remove the cookie if reference goes to zero */
8429 	if (atomic_add_long_nv((ulong_t *)(&(cp->cook_refcnt)), -1) == 0) {
8430 		kmem_free(cp, sizeof (struct ddi_umem_cookie));
8431 	}
8432 }
8433 
8434 /*
8435  * The following two Consolidation Private routines provide generic
8436  * interfaces to increase/decrease the amount of device-locked memory.
8437  *
8438  * To keep project_rele and project_hold consistent, i_ddi_decr_locked_memory()
8439  * must be called every time i_ddi_incr_locked_memory() is called.
8440  */
8441 int
8442 /* ARGSUSED */
8443 i_ddi_incr_locked_memory(proc_t *procp, rctl_qty_t inc)
8444 {
8445 	ASSERT(procp != NULL);
8446 	mutex_enter(&procp->p_lock);
8447 	if (rctl_incr_locked_mem(procp, NULL, inc, 1)) {
8448 		mutex_exit(&procp->p_lock);
8449 		return (ENOMEM);
8450 	}
8451 	mutex_exit(&procp->p_lock);
8452 	return (0);
8453 }
8454 
8455 /*
8456  * To keep project_rele and project_hold consistent, i_ddi_incr_locked_memory()
8457  * must be called every time i_ddi_decr_locked_memory() is called.
8458  */
8459 /* ARGSUSED */
8460 void
8461 i_ddi_decr_locked_memory(proc_t *procp, rctl_qty_t dec)
8462 {
8463 	ASSERT(procp != NULL);
8464 	mutex_enter(&procp->p_lock);
8465 	rctl_decr_locked_mem(procp, NULL, dec, 1);
8466 	mutex_exit(&procp->p_lock);
8467 }
8468 
8469 /*
8470  * This routine checks if the max-locked-memory resource ctl is
8471  * exceeded, if not increments it, grabs a hold on the project.
8472  * Returns 0 if successful otherwise returns error code
8473  */
8474 static int
8475 umem_incr_devlockmem(struct ddi_umem_cookie *cookie)
8476 {
8477 	proc_t		*procp;
8478 	int		ret;
8479 
8480 	ASSERT(cookie);
8481 	procp = cookie->procp;
8482 	ASSERT(procp);
8483 
8484 	if ((ret = i_ddi_incr_locked_memory(procp,
8485 	    cookie->size)) != 0) {
8486 		return (ret);
8487 	}
8488 	return (0);
8489 }
8490 
8491 /*
8492  * Decrements the max-locked-memory resource ctl and releases
8493  * the hold on the project that was acquired during umem_incr_devlockmem
8494  */
8495 static void
8496 umem_decr_devlockmem(struct ddi_umem_cookie *cookie)
8497 {
8498 	proc_t		*proc;
8499 
8500 	proc = (proc_t *)cookie->procp;
8501 	if (!proc)
8502 		return;
8503 
8504 	i_ddi_decr_locked_memory(proc, cookie->size);
8505 }
8506 
8507 /*
8508  * A consolidation private function which is essentially equivalent to
8509  * ddi_umem_lock but with the addition of arguments ops_vector and procp.
8510  * A call to as_add_callback is done if DDI_UMEMLOCK_LONGTERM is set, and
8511  * the ops_vector is valid.
8512  *
8513  * Lock the virtual address range in the current process and create a
8514  * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to
8515  * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export
8516  * to user space.
8517  *
8518  * Note: The resource control accounting currently uses a full charge model
8519  * in other words attempts to lock the same/overlapping areas of memory
8520  * will deduct the full size of the buffer from the projects running
8521  * counter for the device locked memory.
8522  *
8523  * addr, size should be PAGESIZE aligned
8524  *
8525  * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both
8526  *	identifies whether the locked memory will be read or written or both
8527  *      DDI_UMEMLOCK_LONGTERM  must be set when the locking will
8528  * be maintained for an indefinitely long period (essentially permanent),
8529  * rather than for what would be required for a typical I/O completion.
8530  * When DDI_UMEMLOCK_LONGTERM is set, umem_lockmemory will return EFAULT
8531  * if the memory pertains to a regular file which is mapped MAP_SHARED.
8532  * This is to prevent a deadlock if a file truncation is attempted after
8533  * after the locking is done.
8534  *
8535  * Returns 0 on success
8536  *	EINVAL - for invalid parameters
8537  *	EPERM, ENOMEM and other error codes returned by as_pagelock
8538  *	ENOMEM - is returned if the current request to lock memory exceeds
8539  *		*.max-locked-memory resource control value.
8540  *      EFAULT - memory pertains to a regular file mapped shared and
8541  *		and DDI_UMEMLOCK_LONGTERM flag is set
8542  *	EAGAIN - could not start the ddi_umem_unlock list processing thread
8543  */
8544 int
8545 umem_lockmemory(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie,
8546 		struct umem_callback_ops *ops_vector,
8547 		proc_t *procp)
8548 {
8549 	int	error;
8550 	struct ddi_umem_cookie *p;
8551 	void	(*driver_callback)() = NULL;
8552 	struct as *as;
8553 	struct seg		*seg;
8554 	vnode_t			*vp;
8555 
8556 	/* Allow device drivers to not have to reference "curproc" */
8557 	if (procp == NULL)
8558 		procp = curproc;
8559 	as = procp->p_as;
8560 	*cookie = NULL;		/* in case of any error return */
8561 
8562 	/* These are the only three valid flags */
8563 	if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE |
8564 	    DDI_UMEMLOCK_LONGTERM)) != 0)
8565 		return (EINVAL);
8566 
8567 	/* At least one (can be both) of the two access flags must be set */
8568 	if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0)
8569 		return (EINVAL);
8570 
8571 	/* addr and len must be page-aligned */
8572 	if (((uintptr_t)addr & PAGEOFFSET) != 0)
8573 		return (EINVAL);
8574 
8575 	if ((len & PAGEOFFSET) != 0)
8576 		return (EINVAL);
8577 
8578 	/*
8579 	 * For longterm locking a driver callback must be specified; if
8580 	 * not longterm then a callback is optional.
8581 	 */
8582 	if (ops_vector != NULL) {
8583 		if (ops_vector->cbo_umem_callback_version !=
8584 		    UMEM_CALLBACK_VERSION)
8585 			return (EINVAL);
8586 		else
8587 			driver_callback = ops_vector->cbo_umem_lock_cleanup;
8588 	}
8589 	if ((driver_callback == NULL) && (flags & DDI_UMEMLOCK_LONGTERM))
8590 		return (EINVAL);
8591 
8592 	/*
8593 	 * Call i_ddi_umem_unlock_thread_start if necessary.  It will
8594 	 * be called on first ddi_umem_lock or umem_lockmemory call.
8595 	 */
8596 	if (ddi_umem_unlock_thread == NULL)
8597 		i_ddi_umem_unlock_thread_start();
8598 
8599 	/* Allocate memory for the cookie */
8600 	p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP);
8601 
8602 	/* Convert the flags to seg_rw type */
8603 	if (flags & DDI_UMEMLOCK_WRITE) {
8604 		p->s_flags = S_WRITE;
8605 	} else {
8606 		p->s_flags = S_READ;
8607 	}
8608 
8609 	/* Store procp in cookie for later iosetup/unlock */
8610 	p->procp = (void *)procp;
8611 
8612 	/*
8613 	 * Store the struct as pointer in cookie for later use by
8614 	 * ddi_umem_unlock.  The proc->p_as will be stale if ddi_umem_unlock
8615 	 * is called after relvm is called.
8616 	 */
8617 	p->asp = as;
8618 
8619 	/*
8620 	 * The size field is needed for lockmem accounting.
8621 	 */
8622 	p->size = len;
8623 
8624 	if (umem_incr_devlockmem(p) != 0) {
8625 		/*
8626 		 * The requested memory cannot be locked
8627 		 */
8628 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8629 		*cookie = (ddi_umem_cookie_t)NULL;
8630 		return (ENOMEM);
8631 	}
8632 
8633 	/* Lock the pages corresponding to addr, len in memory */
8634 	error = as_pagelock(as, &(p->pparray), addr, len, p->s_flags);
8635 	if (error != 0) {
8636 		umem_decr_devlockmem(p);
8637 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8638 		*cookie = (ddi_umem_cookie_t)NULL;
8639 		return (error);
8640 	}
8641 
8642 	/*
8643 	 * For longterm locking the addr must pertain to a seg_vn segment or
8644 	 * or a seg_spt segment.
8645 	 * If the segment pertains to a regular file, it cannot be
8646 	 * mapped MAP_SHARED.
8647 	 * This is to prevent a deadlock if a file truncation is attempted
8648 	 * after the locking is done.
8649 	 * Doing this after as_pagelock guarantees persistence of the as; if
8650 	 * an unacceptable segment is found, the cleanup includes calling
8651 	 * as_pageunlock before returning EFAULT.
8652 	 *
8653 	 * segdev is allowed here as it is already locked.  This allows
8654 	 * for memory exported by drivers through mmap() (which is already
8655 	 * locked) to be allowed for LONGTERM.
8656 	 */
8657 	if (flags & DDI_UMEMLOCK_LONGTERM) {
8658 		extern  struct seg_ops segspt_shmops;
8659 		extern	struct seg_ops segdev_ops;
8660 		AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
8661 		for (seg = as_segat(as, addr); ; seg = AS_SEGNEXT(as, seg)) {
8662 			if (seg == NULL || seg->s_base > addr + len)
8663 				break;
8664 			if (seg->s_ops == &segdev_ops)
8665 				continue;
8666 			if (((seg->s_ops != &segvn_ops) &&
8667 			    (seg->s_ops != &segspt_shmops)) ||
8668 			    ((SEGOP_GETVP(seg, addr, &vp) == 0 &&
8669 			    vp != NULL && vp->v_type == VREG) &&
8670 			    (SEGOP_GETTYPE(seg, addr) & MAP_SHARED))) {
8671 				as_pageunlock(as, p->pparray,
8672 				    addr, len, p->s_flags);
8673 				AS_LOCK_EXIT(as, &as->a_lock);
8674 				umem_decr_devlockmem(p);
8675 				kmem_free(p, sizeof (struct ddi_umem_cookie));
8676 				*cookie = (ddi_umem_cookie_t)NULL;
8677 				return (EFAULT);
8678 			}
8679 		}
8680 		AS_LOCK_EXIT(as, &as->a_lock);
8681 	}
8682 
8683 
8684 	/* Initialize the fields in the ddi_umem_cookie */
8685 	p->cvaddr = addr;
8686 	p->type = UMEM_LOCKED;
8687 	if (driver_callback != NULL) {
8688 		/* i_ddi_umem_unlock and umem_lock_undo may need the cookie */
8689 		p->cook_refcnt = 2;
8690 		p->callbacks = *ops_vector;
8691 	} else {
8692 		/* only i_ddi_umme_unlock needs the cookie */
8693 		p->cook_refcnt = 1;
8694 	}
8695 
8696 	*cookie = (ddi_umem_cookie_t)p;
8697 
8698 	/*
8699 	 * If a driver callback was specified, add an entry to the
8700 	 * as struct callback list. The as_pagelock above guarantees
8701 	 * the persistence of as.
8702 	 */
8703 	if (driver_callback) {
8704 		error = as_add_callback(as, umem_lock_undo, p, AS_ALL_EVENT,
8705 		    addr, len, KM_SLEEP);
8706 		if (error != 0) {
8707 			as_pageunlock(as, p->pparray,
8708 			    addr, len, p->s_flags);
8709 			umem_decr_devlockmem(p);
8710 			kmem_free(p, sizeof (struct ddi_umem_cookie));
8711 			*cookie = (ddi_umem_cookie_t)NULL;
8712 		}
8713 	}
8714 	return (error);
8715 }
8716 
8717 /*
8718  * Unlock the pages locked by ddi_umem_lock or umem_lockmemory and free
8719  * the cookie.  Called from i_ddi_umem_unlock_thread.
8720  */
8721 
8722 static void
8723 i_ddi_umem_unlock(struct ddi_umem_cookie *p)
8724 {
8725 	uint_t	rc;
8726 
8727 	/*
8728 	 * There is no way to determine whether a callback to
8729 	 * umem_lock_undo was registered via as_add_callback.
8730 	 * (i.e. umem_lockmemory was called with DDI_MEMLOCK_LONGTERM and
8731 	 * a valid callback function structure.)  as_delete_callback
8732 	 * is called to delete a possible registered callback.  If the
8733 	 * return from as_delete_callbacks is AS_CALLBACK_DELETED, it
8734 	 * indicates that there was a callback registered, and that is was
8735 	 * successfully deleted.  Thus, the cookie reference count
8736 	 * will never be decremented by umem_lock_undo.  Just return the
8737 	 * memory for the cookie, since both users of the cookie are done.
8738 	 * A return of AS_CALLBACK_NOTFOUND indicates a callback was
8739 	 * never registered.  A return of AS_CALLBACK_DELETE_DEFERRED
8740 	 * indicates that callback processing is taking place and, and
8741 	 * umem_lock_undo is, or will be, executing, and thus decrementing
8742 	 * the cookie reference count when it is complete.
8743 	 *
8744 	 * This needs to be done before as_pageunlock so that the
8745 	 * persistence of as is guaranteed because of the locked pages.
8746 	 *
8747 	 */
8748 	rc = as_delete_callback(p->asp, p);
8749 
8750 
8751 	/*
8752 	 * The proc->p_as will be stale if i_ddi_umem_unlock is called
8753 	 * after relvm is called so use p->asp.
8754 	 */
8755 	as_pageunlock(p->asp, p->pparray, p->cvaddr, p->size, p->s_flags);
8756 
8757 	/*
8758 	 * Now that we have unlocked the memory decrement the
8759 	 * *.max-locked-memory rctl
8760 	 */
8761 	umem_decr_devlockmem(p);
8762 
8763 	if (rc == AS_CALLBACK_DELETED) {
8764 		/* umem_lock_undo will not happen, return the cookie memory */
8765 		ASSERT(p->cook_refcnt == 2);
8766 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8767 	} else {
8768 		/*
8769 		 * umem_undo_lock may happen if as_delete_callback returned
8770 		 * AS_CALLBACK_DELETE_DEFERRED.  In that case, decrement the
8771 		 * reference count, atomically, and return the cookie
8772 		 * memory if the reference count goes to zero.  The only
8773 		 * other value for rc is AS_CALLBACK_NOTFOUND.  In that
8774 		 * case, just return the cookie memory.
8775 		 */
8776 		if ((rc != AS_CALLBACK_DELETE_DEFERRED) ||
8777 		    (atomic_add_long_nv((ulong_t *)(&(p->cook_refcnt)), -1)
8778 		    == 0)) {
8779 			kmem_free(p, sizeof (struct ddi_umem_cookie));
8780 		}
8781 	}
8782 }
8783 
8784 /*
8785  * i_ddi_umem_unlock_thread - deferred ddi_umem_unlock list handler.
8786  *
8787  * Call i_ddi_umem_unlock for entries in the ddi_umem_unlock list
8788  * until it is empty.  Then, wait for more to be added.  This thread is awoken
8789  * via calls to ddi_umem_unlock.
8790  */
8791 
8792 static void
8793 i_ddi_umem_unlock_thread(void)
8794 {
8795 	struct ddi_umem_cookie	*ret_cookie;
8796 	callb_cpr_t	cprinfo;
8797 
8798 	/* process the ddi_umem_unlock list */
8799 	CALLB_CPR_INIT(&cprinfo, &ddi_umem_unlock_mutex,
8800 	    callb_generic_cpr, "unlock_thread");
8801 	for (;;) {
8802 		mutex_enter(&ddi_umem_unlock_mutex);
8803 		if (ddi_umem_unlock_head != NULL) {	/* list not empty */
8804 			ret_cookie = ddi_umem_unlock_head;
8805 			/* take if off the list */
8806 			if ((ddi_umem_unlock_head =
8807 			    ddi_umem_unlock_head->unl_forw) == NULL) {
8808 				ddi_umem_unlock_tail = NULL;
8809 			}
8810 			mutex_exit(&ddi_umem_unlock_mutex);
8811 			/* unlock the pages in this cookie */
8812 			(void) i_ddi_umem_unlock(ret_cookie);
8813 		} else {   /* list is empty, wait for next ddi_umem_unlock */
8814 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
8815 			cv_wait(&ddi_umem_unlock_cv, &ddi_umem_unlock_mutex);
8816 			CALLB_CPR_SAFE_END(&cprinfo, &ddi_umem_unlock_mutex);
8817 			mutex_exit(&ddi_umem_unlock_mutex);
8818 		}
8819 	}
8820 	/* ddi_umem_unlock_thread does not exit */
8821 	/* NOTREACHED */
8822 }
8823 
8824 /*
8825  * Start the thread that will process the ddi_umem_unlock list if it is
8826  * not already started (i_ddi_umem_unlock_thread).
8827  */
8828 static void
8829 i_ddi_umem_unlock_thread_start(void)
8830 {
8831 	mutex_enter(&ddi_umem_unlock_mutex);
8832 	if (ddi_umem_unlock_thread == NULL) {
8833 		ddi_umem_unlock_thread = thread_create(NULL, 0,
8834 		    i_ddi_umem_unlock_thread, NULL, 0, &p0,
8835 		    TS_RUN, minclsyspri);
8836 	}
8837 	mutex_exit(&ddi_umem_unlock_mutex);
8838 }
8839 
8840 /*
8841  * Lock the virtual address range in the current process and create a
8842  * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to
8843  * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export
8844  * to user space.
8845  *
8846  * Note: The resource control accounting currently uses a full charge model
8847  * in other words attempts to lock the same/overlapping areas of memory
8848  * will deduct the full size of the buffer from the projects running
8849  * counter for the device locked memory. This applies to umem_lockmemory too.
8850  *
8851  * addr, size should be PAGESIZE aligned
8852  * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both
8853  *	identifies whether the locked memory will be read or written or both
8854  *
8855  * Returns 0 on success
8856  *	EINVAL - for invalid parameters
8857  *	EPERM, ENOMEM and other error codes returned by as_pagelock
8858  *	ENOMEM - is returned if the current request to lock memory exceeds
8859  *		*.max-locked-memory resource control value.
8860  *	EAGAIN - could not start the ddi_umem_unlock list processing thread
8861  */
8862 int
8863 ddi_umem_lock(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie)
8864 {
8865 	int	error;
8866 	struct ddi_umem_cookie *p;
8867 
8868 	*cookie = NULL;		/* in case of any error return */
8869 
8870 	/* These are the only two valid flags */
8871 	if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) != 0) {
8872 		return (EINVAL);
8873 	}
8874 
8875 	/* At least one of the two flags (or both) must be set */
8876 	if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0) {
8877 		return (EINVAL);
8878 	}
8879 
8880 	/* addr and len must be page-aligned */
8881 	if (((uintptr_t)addr & PAGEOFFSET) != 0) {
8882 		return (EINVAL);
8883 	}
8884 
8885 	if ((len & PAGEOFFSET) != 0) {
8886 		return (EINVAL);
8887 	}
8888 
8889 	/*
8890 	 * Call i_ddi_umem_unlock_thread_start if necessary.  It will
8891 	 * be called on first ddi_umem_lock or umem_lockmemory call.
8892 	 */
8893 	if (ddi_umem_unlock_thread == NULL)
8894 		i_ddi_umem_unlock_thread_start();
8895 
8896 	/* Allocate memory for the cookie */
8897 	p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP);
8898 
8899 	/* Convert the flags to seg_rw type */
8900 	if (flags & DDI_UMEMLOCK_WRITE) {
8901 		p->s_flags = S_WRITE;
8902 	} else {
8903 		p->s_flags = S_READ;
8904 	}
8905 
8906 	/* Store curproc in cookie for later iosetup/unlock */
8907 	p->procp = (void *)curproc;
8908 
8909 	/*
8910 	 * Store the struct as pointer in cookie for later use by
8911 	 * ddi_umem_unlock.  The proc->p_as will be stale if ddi_umem_unlock
8912 	 * is called after relvm is called.
8913 	 */
8914 	p->asp = curproc->p_as;
8915 	/*
8916 	 * The size field is needed for lockmem accounting.
8917 	 */
8918 	p->size = len;
8919 
8920 	if (umem_incr_devlockmem(p) != 0) {
8921 		/*
8922 		 * The requested memory cannot be locked
8923 		 */
8924 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8925 		*cookie = (ddi_umem_cookie_t)NULL;
8926 		return (ENOMEM);
8927 	}
8928 
8929 	/* Lock the pages corresponding to addr, len in memory */
8930 	error = as_pagelock(((proc_t *)p->procp)->p_as, &(p->pparray),
8931 	    addr, len, p->s_flags);
8932 	if (error != 0) {
8933 		umem_decr_devlockmem(p);
8934 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8935 		*cookie = (ddi_umem_cookie_t)NULL;
8936 		return (error);
8937 	}
8938 
8939 	/* Initialize the fields in the ddi_umem_cookie */
8940 	p->cvaddr = addr;
8941 	p->type = UMEM_LOCKED;
8942 	p->cook_refcnt = 1;
8943 
8944 	*cookie = (ddi_umem_cookie_t)p;
8945 	return (error);
8946 }
8947 
8948 /*
8949  * Add the cookie to the ddi_umem_unlock list.  Pages will be
8950  * unlocked by i_ddi_umem_unlock_thread.
8951  */
8952 
8953 void
8954 ddi_umem_unlock(ddi_umem_cookie_t cookie)
8955 {
8956 	struct ddi_umem_cookie	*p = (struct ddi_umem_cookie *)cookie;
8957 
8958 	ASSERT(p->type == UMEM_LOCKED);
8959 	ASSERT(CPU_ON_INTR(CPU) == 0); /* cannot be high level */
8960 	ASSERT(ddi_umem_unlock_thread != NULL);
8961 
8962 	p->unl_forw = (struct ddi_umem_cookie *)NULL;	/* end of list */
8963 	/*
8964 	 * Queue the unlock request and notify i_ddi_umem_unlock thread
8965 	 * if it's called in the interrupt context. Otherwise, unlock pages
8966 	 * immediately.
8967 	 */
8968 	if (servicing_interrupt()) {
8969 		/* queue the unlock request and notify the thread */
8970 		mutex_enter(&ddi_umem_unlock_mutex);
8971 		if (ddi_umem_unlock_head == NULL) {
8972 			ddi_umem_unlock_head = ddi_umem_unlock_tail = p;
8973 			cv_broadcast(&ddi_umem_unlock_cv);
8974 		} else {
8975 			ddi_umem_unlock_tail->unl_forw = p;
8976 			ddi_umem_unlock_tail = p;
8977 		}
8978 		mutex_exit(&ddi_umem_unlock_mutex);
8979 	} else {
8980 		/* unlock the pages right away */
8981 		(void) i_ddi_umem_unlock(p);
8982 	}
8983 }
8984 
8985 /*
8986  * Create a buf structure from a ddi_umem_cookie
8987  * cookie - is a ddi_umem_cookie for from ddi_umem_lock and ddi_umem_alloc
8988  *		(only UMEM_LOCKED & KMEM_NON_PAGEABLE types supported)
8989  * off, len - identifies the portion of the memory represented by the cookie
8990  *		that the buf points to.
8991  *	NOTE: off, len need to follow the alignment/size restrictions of the
8992  *		device (dev) that this buf will be passed to. Some devices
8993  *		will accept unrestricted alignment/size, whereas others (such as
8994  *		st) require some block-size alignment/size. It is the caller's
8995  *		responsibility to ensure that the alignment/size restrictions
8996  *		are met (we cannot assert as we do not know the restrictions)
8997  *
8998  * direction - is one of B_READ or B_WRITE and needs to be compatible with
8999  *		the flags used in ddi_umem_lock
9000  *
9001  * The following three arguments are used to initialize fields in the
9002  * buf structure and are uninterpreted by this routine.
9003  *
9004  * dev
9005  * blkno
9006  * iodone
9007  *
9008  * sleepflag - is one of DDI_UMEM_SLEEP or DDI_UMEM_NOSLEEP
9009  *
9010  * Returns a buf structure pointer on success (to be freed by freerbuf)
9011  *	NULL on any parameter error or memory alloc failure
9012  *
9013  */
9014 struct buf *
9015 ddi_umem_iosetup(ddi_umem_cookie_t cookie, off_t off, size_t len,
9016 	int direction, dev_t dev, daddr_t blkno,
9017 	int (*iodone)(struct buf *), int sleepflag)
9018 {
9019 	struct ddi_umem_cookie *p = (struct ddi_umem_cookie *)cookie;
9020 	struct buf *bp;
9021 
9022 	/*
9023 	 * check for valid cookie offset, len
9024 	 */
9025 	if ((off + len) > p->size) {
9026 		return (NULL);
9027 	}
9028 
9029 	if (len > p->size) {
9030 		return (NULL);
9031 	}
9032 
9033 	/* direction has to be one of B_READ or B_WRITE */
9034 	if ((direction != B_READ) && (direction != B_WRITE)) {
9035 		return (NULL);
9036 	}
9037 
9038 	/* These are the only two valid sleepflags */
9039 	if ((sleepflag != DDI_UMEM_SLEEP) && (sleepflag != DDI_UMEM_NOSLEEP)) {
9040 		return (NULL);
9041 	}
9042 
9043 	/*
9044 	 * Only cookies of type UMEM_LOCKED and KMEM_NON_PAGEABLE are supported
9045 	 */
9046 	if ((p->type != UMEM_LOCKED) && (p->type != KMEM_NON_PAGEABLE)) {
9047 		return (NULL);
9048 	}
9049 
9050 	/* If type is KMEM_NON_PAGEABLE procp is NULL */
9051 	ASSERT((p->type == KMEM_NON_PAGEABLE) ?
9052 	    (p->procp == NULL) : (p->procp != NULL));
9053 
9054 	bp = kmem_alloc(sizeof (struct buf), sleepflag);
9055 	if (bp == NULL) {
9056 		return (NULL);
9057 	}
9058 	bioinit(bp);
9059 
9060 	bp->b_flags = B_BUSY | B_PHYS | direction;
9061 	bp->b_edev = dev;
9062 	bp->b_lblkno = blkno;
9063 	bp->b_iodone = iodone;
9064 	bp->b_bcount = len;
9065 	bp->b_proc = (proc_t *)p->procp;
9066 	ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0);
9067 	bp->b_un.b_addr = (caddr_t)((uintptr_t)(p->cvaddr) + off);
9068 	if (p->pparray != NULL) {
9069 		bp->b_flags |= B_SHADOW;
9070 		ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0);
9071 		bp->b_shadow = p->pparray + btop(off);
9072 	}
9073 	return (bp);
9074 }
9075 
9076 /*
9077  * Fault-handling and related routines
9078  */
9079 
9080 ddi_devstate_t
9081 ddi_get_devstate(dev_info_t *dip)
9082 {
9083 	if (DEVI_IS_DEVICE_OFFLINE(dip))
9084 		return (DDI_DEVSTATE_OFFLINE);
9085 	else if (DEVI_IS_DEVICE_DOWN(dip) || DEVI_IS_BUS_DOWN(dip))
9086 		return (DDI_DEVSTATE_DOWN);
9087 	else if (DEVI_IS_BUS_QUIESCED(dip))
9088 		return (DDI_DEVSTATE_QUIESCED);
9089 	else if (DEVI_IS_DEVICE_DEGRADED(dip))
9090 		return (DDI_DEVSTATE_DEGRADED);
9091 	else
9092 		return (DDI_DEVSTATE_UP);
9093 }
9094 
9095 void
9096 ddi_dev_report_fault(dev_info_t *dip, ddi_fault_impact_t impact,
9097 	ddi_fault_location_t location, const char *message)
9098 {
9099 	struct ddi_fault_event_data fd;
9100 	ddi_eventcookie_t ec;
9101 
9102 	/*
9103 	 * Assemble all the information into a fault-event-data structure
9104 	 */
9105 	fd.f_dip = dip;
9106 	fd.f_impact = impact;
9107 	fd.f_location = location;
9108 	fd.f_message = message;
9109 	fd.f_oldstate = ddi_get_devstate(dip);
9110 
9111 	/*
9112 	 * Get eventcookie from defining parent.
9113 	 */
9114 	if (ddi_get_eventcookie(dip, DDI_DEVI_FAULT_EVENT, &ec) !=
9115 	    DDI_SUCCESS)
9116 		return;
9117 
9118 	(void) ndi_post_event(dip, dip, ec, &fd);
9119 }
9120 
9121 char *
9122 i_ddi_devi_class(dev_info_t *dip)
9123 {
9124 	return (DEVI(dip)->devi_device_class);
9125 }
9126 
9127 int
9128 i_ddi_set_devi_class(dev_info_t *dip, char *devi_class, int flag)
9129 {
9130 	struct dev_info *devi = DEVI(dip);
9131 
9132 	mutex_enter(&devi->devi_lock);
9133 
9134 	if (devi->devi_device_class)
9135 		kmem_free(devi->devi_device_class,
9136 		    strlen(devi->devi_device_class) + 1);
9137 
9138 	if ((devi->devi_device_class = i_ddi_strdup(devi_class, flag))
9139 	    != NULL) {
9140 		mutex_exit(&devi->devi_lock);
9141 		return (DDI_SUCCESS);
9142 	}
9143 
9144 	mutex_exit(&devi->devi_lock);
9145 
9146 	return (DDI_FAILURE);
9147 }
9148 
9149 
9150 /*
9151  * Task Queues DDI interfaces.
9152  */
9153 
9154 /* ARGSUSED */
9155 ddi_taskq_t *
9156 ddi_taskq_create(dev_info_t *dip, const char *name, int nthreads,
9157     pri_t pri, uint_t cflags)
9158 {
9159 	char full_name[TASKQ_NAMELEN];
9160 	const char *tq_name;
9161 	int nodeid = 0;
9162 
9163 	if (dip == NULL)
9164 		tq_name = name;
9165 	else {
9166 		nodeid = ddi_get_instance(dip);
9167 
9168 		if (name == NULL)
9169 			name = "tq";
9170 
9171 		(void) snprintf(full_name, sizeof (full_name), "%s_%s",
9172 		    ddi_driver_name(dip), name);
9173 
9174 		tq_name = full_name;
9175 	}
9176 
9177 	return ((ddi_taskq_t *)taskq_create_instance(tq_name, nodeid, nthreads,
9178 	    pri == TASKQ_DEFAULTPRI ? minclsyspri : pri,
9179 	    nthreads, INT_MAX, TASKQ_PREPOPULATE));
9180 }
9181 
9182 void
9183 ddi_taskq_destroy(ddi_taskq_t *tq)
9184 {
9185 	taskq_destroy((taskq_t *)tq);
9186 }
9187 
9188 int
9189 ddi_taskq_dispatch(ddi_taskq_t *tq, void (* func)(void *),
9190     void *arg, uint_t dflags)
9191 {
9192 	taskqid_t id = taskq_dispatch((taskq_t *)tq, func, arg,
9193 	    dflags == DDI_SLEEP ? TQ_SLEEP : TQ_NOSLEEP);
9194 
9195 	return (id != 0 ? DDI_SUCCESS : DDI_FAILURE);
9196 }
9197 
9198 void
9199 ddi_taskq_wait(ddi_taskq_t *tq)
9200 {
9201 	taskq_wait((taskq_t *)tq);
9202 }
9203 
9204 void
9205 ddi_taskq_suspend(ddi_taskq_t *tq)
9206 {
9207 	taskq_suspend((taskq_t *)tq);
9208 }
9209 
9210 boolean_t
9211 ddi_taskq_suspended(ddi_taskq_t *tq)
9212 {
9213 	return (taskq_suspended((taskq_t *)tq));
9214 }
9215 
9216 void
9217 ddi_taskq_resume(ddi_taskq_t *tq)
9218 {
9219 	taskq_resume((taskq_t *)tq);
9220 }
9221 
9222 int
9223 ddi_parse(
9224 	const char	*ifname,
9225 	char		*alnum,
9226 	uint_t		*nump)
9227 {
9228 	const char	*p;
9229 	int		l;
9230 	ulong_t		num;
9231 	boolean_t	nonum = B_TRUE;
9232 	char		c;
9233 
9234 	l = strlen(ifname);
9235 	for (p = ifname + l; p != ifname; l--) {
9236 		c = *--p;
9237 		if (!isdigit(c)) {
9238 			(void) strlcpy(alnum, ifname, l + 1);
9239 			if (ddi_strtoul(p + 1, NULL, 10, &num) != 0)
9240 				return (DDI_FAILURE);
9241 			break;
9242 		}
9243 		nonum = B_FALSE;
9244 	}
9245 	if (l == 0 || nonum)
9246 		return (DDI_FAILURE);
9247 
9248 	*nump = num;
9249 	return (DDI_SUCCESS);
9250 }
9251 
9252 /*
9253  * Default initialization function for drivers that don't need to quiesce.
9254  */
9255 /* ARGSUSED */
9256 int
9257 ddi_quiesce_not_needed(dev_info_t *dip)
9258 {
9259 	return (DDI_SUCCESS);
9260 }
9261 
9262 /*
9263  * Initialization function for drivers that should implement quiesce()
9264  * but haven't yet.
9265  */
9266 /* ARGSUSED */
9267 int
9268 ddi_quiesce_not_supported(dev_info_t *dip)
9269 {
9270 	return (DDI_FAILURE);
9271 }
9272 
9273 char *
9274 ddi_strdup(const char *str, int flag)
9275 {
9276 	int	n;
9277 	char	*ptr;
9278 
9279 	ASSERT(str != NULL);
9280 	ASSERT((flag == KM_SLEEP) || (flag == KM_NOSLEEP));
9281 
9282 	n = strlen(str);
9283 	if ((ptr = kmem_alloc(n + 1, flag)) == NULL)
9284 		return (NULL);
9285 	bcopy(str, ptr, n + 1);
9286 	return (ptr);
9287 }
9288 
9289 char *
9290 strdup(const char *str)
9291 {
9292 	return (ddi_strdup(str, KM_SLEEP));
9293 }
9294 
9295 void
9296 strfree(char *str)
9297 {
9298 	ASSERT(str != NULL);
9299 	kmem_free(str, strlen(str) + 1);
9300 }
9301 
9302 /*
9303  * Generic DDI callback interfaces.
9304  */
9305 
9306 int
9307 ddi_cb_register(dev_info_t *dip, ddi_cb_flags_t flags, ddi_cb_func_t cbfunc,
9308     void *arg1, void *arg2, ddi_cb_handle_t *ret_hdlp)
9309 {
9310 	ddi_cb_t	*cbp;
9311 
9312 	ASSERT(dip != NULL);
9313 	ASSERT(DDI_CB_FLAG_VALID(flags));
9314 	ASSERT(cbfunc != NULL);
9315 	ASSERT(ret_hdlp != NULL);
9316 
9317 	/* Sanity check the context */
9318 	ASSERT(!servicing_interrupt());
9319 	if (servicing_interrupt())
9320 		return (DDI_FAILURE);
9321 
9322 	/* Validate parameters */
9323 	if ((dip == NULL) || !DDI_CB_FLAG_VALID(flags) ||
9324 	    (cbfunc == NULL) || (ret_hdlp == NULL))
9325 		return (DDI_EINVAL);
9326 
9327 	/* Check for previous registration */
9328 	if (DEVI(dip)->devi_cb_p != NULL)
9329 		return (DDI_EALREADY);
9330 
9331 	/* Allocate and initialize callback */
9332 	cbp = kmem_zalloc(sizeof (ddi_cb_t), KM_SLEEP);
9333 	cbp->cb_dip = dip;
9334 	cbp->cb_func = cbfunc;
9335 	cbp->cb_arg1 = arg1;
9336 	cbp->cb_arg2 = arg2;
9337 	cbp->cb_flags = flags;
9338 	DEVI(dip)->devi_cb_p = cbp;
9339 
9340 	/* If adding an IRM callback, notify IRM */
9341 	if (flags & DDI_CB_FLAG_INTR)
9342 		i_ddi_irm_set_cb(dip, B_TRUE);
9343 
9344 	*ret_hdlp = (ddi_cb_handle_t)&(DEVI(dip)->devi_cb_p);
9345 	return (DDI_SUCCESS);
9346 }
9347 
9348 int
9349 ddi_cb_unregister(ddi_cb_handle_t hdl)
9350 {
9351 	ddi_cb_t	*cbp;
9352 	dev_info_t	*dip;
9353 
9354 	ASSERT(hdl != NULL);
9355 
9356 	/* Sanity check the context */
9357 	ASSERT(!servicing_interrupt());
9358 	if (servicing_interrupt())
9359 		return (DDI_FAILURE);
9360 
9361 	/* Validate parameters */
9362 	if ((hdl == NULL) || ((cbp = *(ddi_cb_t **)hdl) == NULL) ||
9363 	    ((dip = cbp->cb_dip) == NULL))
9364 		return (DDI_EINVAL);
9365 
9366 	/* If removing an IRM callback, notify IRM */
9367 	if (cbp->cb_flags & DDI_CB_FLAG_INTR)
9368 		i_ddi_irm_set_cb(dip, B_FALSE);
9369 
9370 	/* Destroy the callback */
9371 	kmem_free(cbp, sizeof (ddi_cb_t));
9372 	DEVI(dip)->devi_cb_p = NULL;
9373 
9374 	return (DDI_SUCCESS);
9375 }
9376 
9377 /*
9378  * Platform independent DR routines
9379  */
9380 
9381 static int
9382 ndi2errno(int n)
9383 {
9384 	int err = 0;
9385 
9386 	switch (n) {
9387 		case NDI_NOMEM:
9388 			err = ENOMEM;
9389 			break;
9390 		case NDI_BUSY:
9391 			err = EBUSY;
9392 			break;
9393 		case NDI_FAULT:
9394 			err = EFAULT;
9395 			break;
9396 		case NDI_FAILURE:
9397 			err = EIO;
9398 			break;
9399 		case NDI_SUCCESS:
9400 			break;
9401 		case NDI_BADHANDLE:
9402 		default:
9403 			err = EINVAL;
9404 			break;
9405 	}
9406 	return (err);
9407 }
9408 
9409 /*
9410  * Prom tree node list
9411  */
9412 struct ptnode {
9413 	pnode_t		nodeid;
9414 	struct ptnode	*next;
9415 };
9416 
9417 /*
9418  * Prom tree walk arg
9419  */
9420 struct pta {
9421 	dev_info_t	*pdip;
9422 	devi_branch_t	*bp;
9423 	uint_t		flags;
9424 	dev_info_t	*fdip;
9425 	struct ptnode	*head;
9426 };
9427 
9428 static void
9429 visit_node(pnode_t nodeid, struct pta *ap)
9430 {
9431 	struct ptnode	**nextp;
9432 	int		(*select)(pnode_t, void *, uint_t);
9433 
9434 	ASSERT(nodeid != OBP_NONODE && nodeid != OBP_BADNODE);
9435 
9436 	select = ap->bp->create.prom_branch_select;
9437 
9438 	ASSERT(select);
9439 
9440 	if (select(nodeid, ap->bp->arg, 0) == DDI_SUCCESS) {
9441 
9442 		for (nextp = &ap->head; *nextp; nextp = &(*nextp)->next)
9443 			;
9444 
9445 		*nextp = kmem_zalloc(sizeof (struct ptnode), KM_SLEEP);
9446 
9447 		(*nextp)->nodeid = nodeid;
9448 	}
9449 
9450 	if ((ap->flags & DEVI_BRANCH_CHILD) == DEVI_BRANCH_CHILD)
9451 		return;
9452 
9453 	nodeid = prom_childnode(nodeid);
9454 	while (nodeid != OBP_NONODE && nodeid != OBP_BADNODE) {
9455 		visit_node(nodeid, ap);
9456 		nodeid = prom_nextnode(nodeid);
9457 	}
9458 }
9459 
9460 /*
9461  * NOTE: The caller of this function must check for device contracts
9462  * or LDI callbacks against this dip before setting the dip offline.
9463  */
9464 static int
9465 set_infant_dip_offline(dev_info_t *dip, void *arg)
9466 {
9467 	char	*path = (char *)arg;
9468 
9469 	ASSERT(dip);
9470 	ASSERT(arg);
9471 
9472 	if (i_ddi_node_state(dip) >= DS_ATTACHED) {
9473 		(void) ddi_pathname(dip, path);
9474 		cmn_err(CE_WARN, "Attempt to set offline flag on attached "
9475 		    "node: %s", path);
9476 		return (DDI_FAILURE);
9477 	}
9478 
9479 	mutex_enter(&(DEVI(dip)->devi_lock));
9480 	if (!DEVI_IS_DEVICE_OFFLINE(dip))
9481 		DEVI_SET_DEVICE_OFFLINE(dip);
9482 	mutex_exit(&(DEVI(dip)->devi_lock));
9483 
9484 	return (DDI_SUCCESS);
9485 }
9486 
9487 typedef struct result {
9488 	char	*path;
9489 	int	result;
9490 } result_t;
9491 
9492 static int
9493 dip_set_offline(dev_info_t *dip, void *arg)
9494 {
9495 	int end;
9496 	result_t *resp = (result_t *)arg;
9497 
9498 	ASSERT(dip);
9499 	ASSERT(resp);
9500 
9501 	/*
9502 	 * We stop the walk if e_ddi_offline_notify() returns
9503 	 * failure, because this implies that one or more consumers
9504 	 * (either LDI or contract based) has blocked the offline.
9505 	 * So there is no point in conitnuing the walk
9506 	 */
9507 	if (e_ddi_offline_notify(dip) == DDI_FAILURE) {
9508 		resp->result = DDI_FAILURE;
9509 		return (DDI_WALK_TERMINATE);
9510 	}
9511 
9512 	/*
9513 	 * If set_infant_dip_offline() returns failure, it implies
9514 	 * that we failed to set a particular dip offline. This
9515 	 * does not imply that the offline as a whole should fail.
9516 	 * We want to do the best we can, so we continue the walk.
9517 	 */
9518 	if (set_infant_dip_offline(dip, resp->path) == DDI_SUCCESS)
9519 		end = DDI_SUCCESS;
9520 	else
9521 		end = DDI_FAILURE;
9522 
9523 	e_ddi_offline_finalize(dip, end);
9524 
9525 	return (DDI_WALK_CONTINUE);
9526 }
9527 
9528 /*
9529  * The call to e_ddi_offline_notify() exists for the
9530  * unlikely error case that a branch we are trying to
9531  * create already exists and has device contracts or LDI
9532  * event callbacks against it.
9533  *
9534  * We allow create to succeed for such branches only if
9535  * no constraints block the offline.
9536  */
9537 static int
9538 branch_set_offline(dev_info_t *dip, char *path)
9539 {
9540 	int		circ;
9541 	int		end;
9542 	result_t	res;
9543 
9544 
9545 	if (e_ddi_offline_notify(dip) == DDI_FAILURE) {
9546 		return (DDI_FAILURE);
9547 	}
9548 
9549 	if (set_infant_dip_offline(dip, path) == DDI_SUCCESS)
9550 		end = DDI_SUCCESS;
9551 	else
9552 		end = DDI_FAILURE;
9553 
9554 	e_ddi_offline_finalize(dip, end);
9555 
9556 	if (end == DDI_FAILURE)
9557 		return (DDI_FAILURE);
9558 
9559 	res.result = DDI_SUCCESS;
9560 	res.path = path;
9561 
9562 	ndi_devi_enter(dip, &circ);
9563 	ddi_walk_devs(ddi_get_child(dip), dip_set_offline, &res);
9564 	ndi_devi_exit(dip, circ);
9565 
9566 	return (res.result);
9567 }
9568 
9569 /*ARGSUSED*/
9570 static int
9571 create_prom_branch(void *arg, int has_changed)
9572 {
9573 	int		circ;
9574 	int		exists, rv;
9575 	pnode_t		nodeid;
9576 	struct ptnode	*tnp;
9577 	dev_info_t	*dip;
9578 	struct pta	*ap = arg;
9579 	devi_branch_t	*bp;
9580 	char		*path;
9581 
9582 	ASSERT(ap);
9583 	ASSERT(ap->fdip == NULL);
9584 	ASSERT(ap->pdip && ndi_dev_is_prom_node(ap->pdip));
9585 
9586 	bp = ap->bp;
9587 
9588 	nodeid = ddi_get_nodeid(ap->pdip);
9589 	if (nodeid == OBP_NONODE || nodeid == OBP_BADNODE) {
9590 		cmn_err(CE_WARN, "create_prom_branch: invalid "
9591 		    "nodeid: 0x%x", nodeid);
9592 		return (EINVAL);
9593 	}
9594 
9595 	ap->head = NULL;
9596 
9597 	nodeid = prom_childnode(nodeid);
9598 	while (nodeid != OBP_NONODE && nodeid != OBP_BADNODE) {
9599 		visit_node(nodeid, ap);
9600 		nodeid = prom_nextnode(nodeid);
9601 	}
9602 
9603 	if (ap->head == NULL)
9604 		return (ENODEV);
9605 
9606 	path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
9607 	rv = 0;
9608 	while ((tnp = ap->head) != NULL) {
9609 		ap->head = tnp->next;
9610 
9611 		ndi_devi_enter(ap->pdip, &circ);
9612 
9613 		/*
9614 		 * Check if the branch already exists.
9615 		 */
9616 		exists = 0;
9617 		dip = e_ddi_nodeid_to_dip(tnp->nodeid);
9618 		if (dip != NULL) {
9619 			exists = 1;
9620 
9621 			/* Parent is held busy, so release hold */
9622 			ndi_rele_devi(dip);
9623 #ifdef	DEBUG
9624 			cmn_err(CE_WARN, "create_prom_branch: dip(%p) exists"
9625 			    " for nodeid 0x%x", (void *)dip, tnp->nodeid);
9626 #endif
9627 		} else {
9628 			dip = i_ddi_create_branch(ap->pdip, tnp->nodeid);
9629 		}
9630 
9631 		kmem_free(tnp, sizeof (struct ptnode));
9632 
9633 		/*
9634 		 * Hold the branch if it is not already held
9635 		 */
9636 		if (dip && !exists) {
9637 			e_ddi_branch_hold(dip);
9638 		}
9639 
9640 		ASSERT(dip == NULL || e_ddi_branch_held(dip));
9641 
9642 		/*
9643 		 * Set all dips in the newly created branch offline so that
9644 		 * only a "configure" operation can attach
9645 		 * the branch
9646 		 */
9647 		if (dip == NULL || branch_set_offline(dip, path)
9648 		    == DDI_FAILURE) {
9649 			ndi_devi_exit(ap->pdip, circ);
9650 			rv = EIO;
9651 			continue;
9652 		}
9653 
9654 		ASSERT(ddi_get_parent(dip) == ap->pdip);
9655 
9656 		ndi_devi_exit(ap->pdip, circ);
9657 
9658 		if (ap->flags & DEVI_BRANCH_CONFIGURE) {
9659 			int error = e_ddi_branch_configure(dip, &ap->fdip, 0);
9660 			if (error && rv == 0)
9661 				rv = error;
9662 		}
9663 
9664 		/*
9665 		 * Invoke devi_branch_callback() (if it exists) only for
9666 		 * newly created branches
9667 		 */
9668 		if (bp->devi_branch_callback && !exists)
9669 			bp->devi_branch_callback(dip, bp->arg, 0);
9670 	}
9671 
9672 	kmem_free(path, MAXPATHLEN);
9673 
9674 	return (rv);
9675 }
9676 
9677 static int
9678 sid_node_create(dev_info_t *pdip, devi_branch_t *bp, dev_info_t **rdipp)
9679 {
9680 	int			rv, circ, len;
9681 	int			i, flags, ret;
9682 	dev_info_t		*dip;
9683 	char			*nbuf;
9684 	char			*path;
9685 	static const char	*noname = "<none>";
9686 
9687 	ASSERT(pdip);
9688 	ASSERT(DEVI_BUSY_OWNED(pdip));
9689 
9690 	flags = 0;
9691 
9692 	/*
9693 	 * Creating the root of a branch ?
9694 	 */
9695 	if (rdipp) {
9696 		*rdipp = NULL;
9697 		flags = DEVI_BRANCH_ROOT;
9698 	}
9699 
9700 	ndi_devi_alloc_sleep(pdip, (char *)noname, DEVI_SID_NODEID, &dip);
9701 	rv = bp->create.sid_branch_create(dip, bp->arg, flags);
9702 
9703 	nbuf = kmem_alloc(OBP_MAXDRVNAME, KM_SLEEP);
9704 
9705 	if (rv == DDI_WALK_ERROR) {
9706 		cmn_err(CE_WARN, "e_ddi_branch_create: Error setting"
9707 		    " properties on devinfo node %p",  (void *)dip);
9708 		goto fail;
9709 	}
9710 
9711 	len = OBP_MAXDRVNAME;
9712 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
9713 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "name", nbuf, &len)
9714 	    != DDI_PROP_SUCCESS) {
9715 		cmn_err(CE_WARN, "e_ddi_branch_create: devinfo node %p has"
9716 		    "no name property", (void *)dip);
9717 		goto fail;
9718 	}
9719 
9720 	ASSERT(i_ddi_node_state(dip) == DS_PROTO);
9721 	if (ndi_devi_set_nodename(dip, nbuf, 0) != NDI_SUCCESS) {
9722 		cmn_err(CE_WARN, "e_ddi_branch_create: cannot set name (%s)"
9723 		    " for devinfo node %p", nbuf, (void *)dip);
9724 		goto fail;
9725 	}
9726 
9727 	kmem_free(nbuf, OBP_MAXDRVNAME);
9728 
9729 	/*
9730 	 * Ignore bind failures just like boot does
9731 	 */
9732 	(void) ndi_devi_bind_driver(dip, 0);
9733 
9734 	switch (rv) {
9735 	case DDI_WALK_CONTINUE:
9736 	case DDI_WALK_PRUNESIB:
9737 		ndi_devi_enter(dip, &circ);
9738 
9739 		i = DDI_WALK_CONTINUE;
9740 		for (; i == DDI_WALK_CONTINUE; ) {
9741 			i = sid_node_create(dip, bp, NULL);
9742 		}
9743 
9744 		ASSERT(i == DDI_WALK_ERROR || i == DDI_WALK_PRUNESIB);
9745 		if (i == DDI_WALK_ERROR)
9746 			rv = i;
9747 		/*
9748 		 * If PRUNESIB stop creating siblings
9749 		 * of dip's child. Subsequent walk behavior
9750 		 * is determined by rv returned by dip.
9751 		 */
9752 
9753 		ndi_devi_exit(dip, circ);
9754 		break;
9755 	case DDI_WALK_TERMINATE:
9756 		/*
9757 		 * Don't create children and ask our parent
9758 		 * to not create siblings either.
9759 		 */
9760 		rv = DDI_WALK_PRUNESIB;
9761 		break;
9762 	case DDI_WALK_PRUNECHILD:
9763 		/*
9764 		 * Don't create children, but ask parent to continue
9765 		 * with siblings.
9766 		 */
9767 		rv = DDI_WALK_CONTINUE;
9768 		break;
9769 	default:
9770 		ASSERT(0);
9771 		break;
9772 	}
9773 
9774 	if (rdipp)
9775 		*rdipp = dip;
9776 
9777 	/*
9778 	 * Set device offline - only the "configure" op should cause an attach.
9779 	 * Note that it is safe to set the dip offline without checking
9780 	 * for either device contract or layered driver (LDI) based constraints
9781 	 * since there cannot be any contracts or LDI opens of this device.
9782 	 * This is because this node is a newly created dip with the parent busy
9783 	 * held, so no other thread can come in and attach this dip. A dip that
9784 	 * has never been attached cannot have contracts since by definition
9785 	 * a device contract (an agreement between a process and a device minor
9786 	 * node) can only be created against a device that has minor nodes
9787 	 * i.e is attached. Similarly an LDI open will only succeed if the
9788 	 * dip is attached. We assert below that the dip is not attached.
9789 	 */
9790 	ASSERT(i_ddi_node_state(dip) < DS_ATTACHED);
9791 	path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
9792 	ret = set_infant_dip_offline(dip, path);
9793 	ASSERT(ret == DDI_SUCCESS);
9794 	kmem_free(path, MAXPATHLEN);
9795 
9796 	return (rv);
9797 fail:
9798 	(void) ndi_devi_free(dip);
9799 	kmem_free(nbuf, OBP_MAXDRVNAME);
9800 	return (DDI_WALK_ERROR);
9801 }
9802 
9803 static int
9804 create_sid_branch(
9805 	dev_info_t	*pdip,
9806 	devi_branch_t	*bp,
9807 	dev_info_t	**dipp,
9808 	uint_t		flags)
9809 {
9810 	int		rv = 0, state = DDI_WALK_CONTINUE;
9811 	dev_info_t	*rdip;
9812 
9813 	while (state == DDI_WALK_CONTINUE) {
9814 		int	circ;
9815 
9816 		ndi_devi_enter(pdip, &circ);
9817 
9818 		state = sid_node_create(pdip, bp, &rdip);
9819 		if (rdip == NULL) {
9820 			ndi_devi_exit(pdip, circ);
9821 			ASSERT(state == DDI_WALK_ERROR);
9822 			break;
9823 		}
9824 
9825 		e_ddi_branch_hold(rdip);
9826 
9827 		ndi_devi_exit(pdip, circ);
9828 
9829 		if (flags & DEVI_BRANCH_CONFIGURE) {
9830 			int error = e_ddi_branch_configure(rdip, dipp, 0);
9831 			if (error && rv == 0)
9832 				rv = error;
9833 		}
9834 
9835 		/*
9836 		 * devi_branch_callback() is optional
9837 		 */
9838 		if (bp->devi_branch_callback)
9839 			bp->devi_branch_callback(rdip, bp->arg, 0);
9840 	}
9841 
9842 	ASSERT(state == DDI_WALK_ERROR || state == DDI_WALK_PRUNESIB);
9843 
9844 	return (state == DDI_WALK_ERROR ? EIO : rv);
9845 }
9846 
9847 int
9848 e_ddi_branch_create(
9849 	dev_info_t	*pdip,
9850 	devi_branch_t	*bp,
9851 	dev_info_t	**dipp,
9852 	uint_t		flags)
9853 {
9854 	int prom_devi, sid_devi, error;
9855 
9856 	if (pdip == NULL || bp == NULL || bp->type == 0)
9857 		return (EINVAL);
9858 
9859 	prom_devi = (bp->type == DEVI_BRANCH_PROM) ? 1 : 0;
9860 	sid_devi = (bp->type == DEVI_BRANCH_SID) ? 1 : 0;
9861 
9862 	if (prom_devi && bp->create.prom_branch_select == NULL)
9863 		return (EINVAL);
9864 	else if (sid_devi && bp->create.sid_branch_create == NULL)
9865 		return (EINVAL);
9866 	else if (!prom_devi && !sid_devi)
9867 		return (EINVAL);
9868 
9869 	if (flags & DEVI_BRANCH_EVENT)
9870 		return (EINVAL);
9871 
9872 	if (prom_devi) {
9873 		struct pta pta = {0};
9874 
9875 		pta.pdip = pdip;
9876 		pta.bp = bp;
9877 		pta.flags = flags;
9878 
9879 		error = prom_tree_access(create_prom_branch, &pta, NULL);
9880 
9881 		if (dipp)
9882 			*dipp = pta.fdip;
9883 		else if (pta.fdip)
9884 			ndi_rele_devi(pta.fdip);
9885 	} else {
9886 		error = create_sid_branch(pdip, bp, dipp, flags);
9887 	}
9888 
9889 	return (error);
9890 }
9891 
9892 int
9893 e_ddi_branch_configure(dev_info_t *rdip, dev_info_t **dipp, uint_t flags)
9894 {
9895 	int		rv;
9896 	char		*devnm;
9897 	dev_info_t	*pdip;
9898 
9899 	if (dipp)
9900 		*dipp = NULL;
9901 
9902 	if (rdip == NULL || flags != 0 || (flags & DEVI_BRANCH_EVENT))
9903 		return (EINVAL);
9904 
9905 	pdip = ddi_get_parent(rdip);
9906 
9907 	ndi_hold_devi(pdip);
9908 
9909 	if (!e_ddi_branch_held(rdip)) {
9910 		ndi_rele_devi(pdip);
9911 		cmn_err(CE_WARN, "e_ddi_branch_configure: "
9912 		    "dip(%p) not held", (void *)rdip);
9913 		return (EINVAL);
9914 	}
9915 
9916 	if (i_ddi_node_state(rdip) < DS_INITIALIZED) {
9917 		/*
9918 		 * First attempt to bind a driver. If we fail, return
9919 		 * success (On some platforms, dips for some device
9920 		 * types (CPUs) may not have a driver)
9921 		 */
9922 		if (ndi_devi_bind_driver(rdip, 0) != NDI_SUCCESS) {
9923 			ndi_rele_devi(pdip);
9924 			return (0);
9925 		}
9926 
9927 		if (ddi_initchild(pdip, rdip) != DDI_SUCCESS) {
9928 			rv = NDI_FAILURE;
9929 			goto out;
9930 		}
9931 	}
9932 
9933 	ASSERT(i_ddi_node_state(rdip) >= DS_INITIALIZED);
9934 
9935 	devnm = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
9936 
9937 	(void) ddi_deviname(rdip, devnm);
9938 
9939 	if ((rv = ndi_devi_config_one(pdip, devnm+1, &rdip,
9940 	    NDI_DEVI_ONLINE | NDI_CONFIG)) == NDI_SUCCESS) {
9941 		/* release hold from ndi_devi_config_one() */
9942 		ndi_rele_devi(rdip);
9943 	}
9944 
9945 	kmem_free(devnm, MAXNAMELEN + 1);
9946 out:
9947 	if (rv != NDI_SUCCESS && dipp && rdip) {
9948 		ndi_hold_devi(rdip);
9949 		*dipp = rdip;
9950 	}
9951 	ndi_rele_devi(pdip);
9952 	return (ndi2errno(rv));
9953 }
9954 
9955 void
9956 e_ddi_branch_hold(dev_info_t *rdip)
9957 {
9958 	if (e_ddi_branch_held(rdip)) {
9959 		cmn_err(CE_WARN, "e_ddi_branch_hold: branch already held");
9960 		return;
9961 	}
9962 
9963 	mutex_enter(&DEVI(rdip)->devi_lock);
9964 	if ((DEVI(rdip)->devi_flags & DEVI_BRANCH_HELD) == 0) {
9965 		DEVI(rdip)->devi_flags |= DEVI_BRANCH_HELD;
9966 		DEVI(rdip)->devi_ref++;
9967 	}
9968 	ASSERT(DEVI(rdip)->devi_ref > 0);
9969 	mutex_exit(&DEVI(rdip)->devi_lock);
9970 }
9971 
9972 int
9973 e_ddi_branch_held(dev_info_t *rdip)
9974 {
9975 	int rv = 0;
9976 
9977 	mutex_enter(&DEVI(rdip)->devi_lock);
9978 	if ((DEVI(rdip)->devi_flags & DEVI_BRANCH_HELD) &&
9979 	    DEVI(rdip)->devi_ref > 0) {
9980 		rv = 1;
9981 	}
9982 	mutex_exit(&DEVI(rdip)->devi_lock);
9983 
9984 	return (rv);
9985 }
9986 
9987 void
9988 e_ddi_branch_rele(dev_info_t *rdip)
9989 {
9990 	mutex_enter(&DEVI(rdip)->devi_lock);
9991 	DEVI(rdip)->devi_flags &= ~DEVI_BRANCH_HELD;
9992 	DEVI(rdip)->devi_ref--;
9993 	mutex_exit(&DEVI(rdip)->devi_lock);
9994 }
9995 
9996 int
9997 e_ddi_branch_unconfigure(
9998 	dev_info_t *rdip,
9999 	dev_info_t **dipp,
10000 	uint_t flags)
10001 {
10002 	int	circ, rv;
10003 	int	destroy;
10004 	char	*devnm;
10005 	uint_t	nflags;
10006 	dev_info_t *pdip;
10007 
10008 	if (dipp)
10009 		*dipp = NULL;
10010 
10011 	if (rdip == NULL)
10012 		return (EINVAL);
10013 
10014 	pdip = ddi_get_parent(rdip);
10015 
10016 	ASSERT(pdip);
10017 
10018 	/*
10019 	 * Check if caller holds pdip busy - can cause deadlocks during
10020 	 * devfs_clean()
10021 	 */
10022 	if (DEVI_BUSY_OWNED(pdip)) {
10023 		cmn_err(CE_WARN, "e_ddi_branch_unconfigure: failed: parent"
10024 		    " devinfo node(%p) is busy held", (void *)pdip);
10025 		return (EINVAL);
10026 	}
10027 
10028 	destroy = (flags & DEVI_BRANCH_DESTROY) ? 1 : 0;
10029 
10030 	devnm = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
10031 
10032 	ndi_devi_enter(pdip, &circ);
10033 	(void) ddi_deviname(rdip, devnm);
10034 	ndi_devi_exit(pdip, circ);
10035 
10036 	/*
10037 	 * ddi_deviname() returns a component name with / prepended.
10038 	 */
10039 	(void) devfs_clean(pdip, devnm + 1, DV_CLEAN_FORCE);
10040 
10041 	ndi_devi_enter(pdip, &circ);
10042 
10043 	/*
10044 	 * Recreate device name as it may have changed state (init/uninit)
10045 	 * when parent busy lock was dropped for devfs_clean()
10046 	 */
10047 	(void) ddi_deviname(rdip, devnm);
10048 
10049 	if (!e_ddi_branch_held(rdip)) {
10050 		kmem_free(devnm, MAXNAMELEN + 1);
10051 		ndi_devi_exit(pdip, circ);
10052 		cmn_err(CE_WARN, "e_ddi_%s_branch: dip(%p) not held",
10053 		    destroy ? "destroy" : "unconfigure", (void *)rdip);
10054 		return (EINVAL);
10055 	}
10056 
10057 	/*
10058 	 * Release hold on the branch. This is ok since we are holding the
10059 	 * parent busy. If rdip is not removed, we must do a hold on the
10060 	 * branch before returning.
10061 	 */
10062 	e_ddi_branch_rele(rdip);
10063 
10064 	nflags = NDI_DEVI_OFFLINE;
10065 	if (destroy || (flags & DEVI_BRANCH_DESTROY)) {
10066 		nflags |= NDI_DEVI_REMOVE;
10067 		destroy = 1;
10068 	} else {
10069 		nflags |= NDI_UNCONFIG;		/* uninit but don't remove */
10070 	}
10071 
10072 	if (flags & DEVI_BRANCH_EVENT)
10073 		nflags |= NDI_POST_EVENT;
10074 
10075 	if (i_ddi_devi_attached(pdip) &&
10076 	    (i_ddi_node_state(rdip) >= DS_INITIALIZED)) {
10077 		rv = ndi_devi_unconfig_one(pdip, devnm+1, dipp, nflags);
10078 	} else {
10079 		rv = e_ddi_devi_unconfig(rdip, dipp, nflags);
10080 		if (rv == NDI_SUCCESS) {
10081 			ASSERT(!destroy || ddi_get_child(rdip) == NULL);
10082 			rv = ndi_devi_offline(rdip, nflags);
10083 		}
10084 	}
10085 
10086 	if (!destroy || rv != NDI_SUCCESS) {
10087 		/* The dip still exists, so do a hold */
10088 		e_ddi_branch_hold(rdip);
10089 	}
10090 out:
10091 	kmem_free(devnm, MAXNAMELEN + 1);
10092 	ndi_devi_exit(pdip, circ);
10093 	return (ndi2errno(rv));
10094 }
10095 
10096 int
10097 e_ddi_branch_destroy(dev_info_t *rdip, dev_info_t **dipp, uint_t flag)
10098 {
10099 	return (e_ddi_branch_unconfigure(rdip, dipp,
10100 	    flag|DEVI_BRANCH_DESTROY));
10101 }
10102 
10103 /*
10104  * Number of chains for hash table
10105  */
10106 #define	NUMCHAINS	17
10107 
10108 /*
10109  * Devinfo busy arg
10110  */
10111 struct devi_busy {
10112 	int dv_total;
10113 	int s_total;
10114 	mod_hash_t *dv_hash;
10115 	mod_hash_t *s_hash;
10116 	int (*callback)(dev_info_t *, void *, uint_t);
10117 	void *arg;
10118 };
10119 
10120 static int
10121 visit_dip(dev_info_t *dip, void *arg)
10122 {
10123 	uintptr_t sbusy, dvbusy, ref;
10124 	struct devi_busy *bsp = arg;
10125 
10126 	ASSERT(bsp->callback);
10127 
10128 	/*
10129 	 * A dip cannot be busy if its reference count is 0
10130 	 */
10131 	if ((ref = e_ddi_devi_holdcnt(dip)) == 0) {
10132 		return (bsp->callback(dip, bsp->arg, 0));
10133 	}
10134 
10135 	if (mod_hash_find(bsp->dv_hash, dip, (mod_hash_val_t *)&dvbusy))
10136 		dvbusy = 0;
10137 
10138 	/*
10139 	 * To catch device opens currently maintained on specfs common snodes.
10140 	 */
10141 	if (mod_hash_find(bsp->s_hash, dip, (mod_hash_val_t *)&sbusy))
10142 		sbusy = 0;
10143 
10144 #ifdef	DEBUG
10145 	if (ref < sbusy || ref < dvbusy) {
10146 		cmn_err(CE_WARN, "dip(%p): sopen = %lu, dvopen = %lu "
10147 		    "dip ref = %lu\n", (void *)dip, sbusy, dvbusy, ref);
10148 	}
10149 #endif
10150 
10151 	dvbusy = (sbusy > dvbusy) ? sbusy : dvbusy;
10152 
10153 	return (bsp->callback(dip, bsp->arg, dvbusy));
10154 }
10155 
10156 static int
10157 visit_snode(struct snode *sp, void *arg)
10158 {
10159 	uintptr_t sbusy;
10160 	dev_info_t *dip;
10161 	int count;
10162 	struct devi_busy *bsp = arg;
10163 
10164 	ASSERT(sp);
10165 
10166 	/*
10167 	 * The stable lock is held. This prevents
10168 	 * the snode and its associated dip from
10169 	 * going away.
10170 	 */
10171 	dip = NULL;
10172 	count = spec_devi_open_count(sp, &dip);
10173 
10174 	if (count <= 0)
10175 		return (DDI_WALK_CONTINUE);
10176 
10177 	ASSERT(dip);
10178 
10179 	if (mod_hash_remove(bsp->s_hash, dip, (mod_hash_val_t *)&sbusy))
10180 		sbusy = count;
10181 	else
10182 		sbusy += count;
10183 
10184 	if (mod_hash_insert(bsp->s_hash, dip, (mod_hash_val_t)sbusy)) {
10185 		cmn_err(CE_WARN, "%s: s_hash insert failed: dip=0x%p, "
10186 		    "sbusy = %lu", "e_ddi_branch_referenced",
10187 		    (void *)dip, sbusy);
10188 	}
10189 
10190 	bsp->s_total += count;
10191 
10192 	return (DDI_WALK_CONTINUE);
10193 }
10194 
10195 static void
10196 visit_dvnode(struct dv_node *dv, void *arg)
10197 {
10198 	uintptr_t dvbusy;
10199 	uint_t count;
10200 	struct vnode *vp;
10201 	struct devi_busy *bsp = arg;
10202 
10203 	ASSERT(dv && dv->dv_devi);
10204 
10205 	vp = DVTOV(dv);
10206 
10207 	mutex_enter(&vp->v_lock);
10208 	count = vp->v_count;
10209 	mutex_exit(&vp->v_lock);
10210 
10211 	if (!count)
10212 		return;
10213 
10214 	if (mod_hash_remove(bsp->dv_hash, dv->dv_devi,
10215 	    (mod_hash_val_t *)&dvbusy))
10216 		dvbusy = count;
10217 	else
10218 		dvbusy += count;
10219 
10220 	if (mod_hash_insert(bsp->dv_hash, dv->dv_devi,
10221 	    (mod_hash_val_t)dvbusy)) {
10222 		cmn_err(CE_WARN, "%s: dv_hash insert failed: dip=0x%p, "
10223 		    "dvbusy=%lu", "e_ddi_branch_referenced",
10224 		    (void *)dv->dv_devi, dvbusy);
10225 	}
10226 
10227 	bsp->dv_total += count;
10228 }
10229 
10230 /*
10231  * Returns reference count on success or -1 on failure.
10232  */
10233 int
10234 e_ddi_branch_referenced(
10235 	dev_info_t *rdip,
10236 	int (*callback)(dev_info_t *dip, void *arg, uint_t ref),
10237 	void *arg)
10238 {
10239 	int circ;
10240 	char *path;
10241 	dev_info_t *pdip;
10242 	struct devi_busy bsa = {0};
10243 
10244 	ASSERT(rdip);
10245 
10246 	path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
10247 
10248 	ndi_hold_devi(rdip);
10249 
10250 	pdip = ddi_get_parent(rdip);
10251 
10252 	ASSERT(pdip);
10253 
10254 	/*
10255 	 * Check if caller holds pdip busy - can cause deadlocks during
10256 	 * devfs_walk()
10257 	 */
10258 	if (!e_ddi_branch_held(rdip) || DEVI_BUSY_OWNED(pdip)) {
10259 		cmn_err(CE_WARN, "e_ddi_branch_referenced: failed: "
10260 		    "devinfo branch(%p) not held or parent busy held",
10261 		    (void *)rdip);
10262 		ndi_rele_devi(rdip);
10263 		kmem_free(path, MAXPATHLEN);
10264 		return (-1);
10265 	}
10266 
10267 	ndi_devi_enter(pdip, &circ);
10268 	(void) ddi_pathname(rdip, path);
10269 	ndi_devi_exit(pdip, circ);
10270 
10271 	bsa.dv_hash = mod_hash_create_ptrhash("dv_node busy hash", NUMCHAINS,
10272 	    mod_hash_null_valdtor, sizeof (struct dev_info));
10273 
10274 	bsa.s_hash = mod_hash_create_ptrhash("snode busy hash", NUMCHAINS,
10275 	    mod_hash_null_valdtor, sizeof (struct snode));
10276 
10277 	if (devfs_walk(path, visit_dvnode, &bsa)) {
10278 		cmn_err(CE_WARN, "e_ddi_branch_referenced: "
10279 		    "devfs walk failed for: %s", path);
10280 		kmem_free(path, MAXPATHLEN);
10281 		bsa.s_total = bsa.dv_total = -1;
10282 		goto out;
10283 	}
10284 
10285 	kmem_free(path, MAXPATHLEN);
10286 
10287 	/*
10288 	 * Walk the snode table to detect device opens, which are currently
10289 	 * maintained on specfs common snodes.
10290 	 */
10291 	spec_snode_walk(visit_snode, &bsa);
10292 
10293 	if (callback == NULL)
10294 		goto out;
10295 
10296 	bsa.callback = callback;
10297 	bsa.arg = arg;
10298 
10299 	if (visit_dip(rdip, &bsa) == DDI_WALK_CONTINUE) {
10300 		ndi_devi_enter(rdip, &circ);
10301 		ddi_walk_devs(ddi_get_child(rdip), visit_dip, &bsa);
10302 		ndi_devi_exit(rdip, circ);
10303 	}
10304 
10305 out:
10306 	ndi_rele_devi(rdip);
10307 	mod_hash_destroy_ptrhash(bsa.s_hash);
10308 	mod_hash_destroy_ptrhash(bsa.dv_hash);
10309 	return (bsa.s_total > bsa.dv_total ? bsa.s_total : bsa.dv_total);
10310 }
10311