xref: /titanic_51/usr/src/uts/common/os/sunddi.c (revision 8eea8e29cc4374d1ee24c25a07f45af132db3499)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/note.h>
30 #include <sys/types.h>
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/buf.h>
34 #include <sys/uio.h>
35 #include <sys/cred.h>
36 #include <sys/poll.h>
37 #include <sys/mman.h>
38 #include <sys/kmem.h>
39 #include <sys/model.h>
40 #include <sys/file.h>
41 #include <sys/proc.h>
42 #include <sys/open.h>
43 #include <sys/user.h>
44 #include <sys/t_lock.h>
45 #include <sys/vm.h>
46 #include <sys/stat.h>
47 #include <vm/hat.h>
48 #include <vm/seg.h>
49 #include <vm/seg_vn.h>
50 #include <vm/seg_dev.h>
51 #include <vm/as.h>
52 #include <sys/cmn_err.h>
53 #include <sys/cpuvar.h>
54 #include <sys/debug.h>
55 #include <sys/autoconf.h>
56 #include <sys/sunddi.h>
57 #include <sys/esunddi.h>
58 #include <sys/sunndi.h>
59 #include <sys/kstat.h>
60 #include <sys/conf.h>
61 #include <sys/ddi_impldefs.h>	/* include implementation structure defs */
62 #include <sys/ndi_impldefs.h>	/* include prototypes */
63 #include <sys/hwconf.h>
64 #include <sys/pathname.h>
65 #include <sys/modctl.h>
66 #include <sys/epm.h>
67 #include <sys/devctl.h>
68 #include <sys/callb.h>
69 #include <sys/cladm.h>
70 #include <sys/sysevent.h>
71 #include <sys/dacf_impl.h>
72 #include <sys/ddidevmap.h>
73 #include <sys/bootconf.h>
74 #include <sys/disp.h>
75 #include <sys/atomic.h>
76 #include <sys/promif.h>
77 #include <sys/instance.h>
78 #include <sys/sysevent/eventdefs.h>
79 #include <sys/task.h>
80 #include <sys/project.h>
81 #include <sys/taskq.h>
82 #include <sys/devpolicy.h>
83 #include <sys/ctype.h>
84 #include <net/if.h>
85 
86 extern	pri_t	minclsyspri;
87 
88 extern	rctl_hndl_t rc_project_devlockmem;
89 
90 #ifdef DEBUG
91 static int sunddi_debug = 0;
92 #endif /* DEBUG */
93 
94 /* ddi_umem_unlock miscellaneous */
95 
96 static	void	i_ddi_umem_unlock_thread_start(void);
97 
98 static	kmutex_t	ddi_umem_unlock_mutex; /* unlock list mutex */
99 static	kcondvar_t	ddi_umem_unlock_cv; /* unlock list block/unblock */
100 static	kthread_t	*ddi_umem_unlock_thread;
101 /*
102  * The ddi_umem_unlock FIFO list.  NULL head pointer indicates empty list.
103  */
104 static	struct	ddi_umem_cookie *ddi_umem_unlock_head = NULL;
105 static	struct	ddi_umem_cookie *ddi_umem_unlock_tail = NULL;
106 
107 /*
108  * This lock protects the project.max-device-locked-memory counter.
109  * When both p_lock (proc_t) and this lock need to acquired, p_lock
110  * should be acquired first.
111  */
112 static kmutex_t umem_devlockmem_rctl_lock;
113 
114 
115 /*
116  * DDI(Sun) Function and flag definitions:
117  */
118 
119 #if defined(__x86)
120 /*
121  * Used to indicate which entries were chosen from a range.
122  */
123 char	*chosen_reg = "chosen-reg";
124 #endif
125 
126 /*
127  * Function used to ring system console bell
128  */
129 void (*ddi_console_bell_func)(clock_t duration);
130 
131 /*
132  * Creating register mappings and handling interrupts:
133  */
134 
135 /*
136  * Generic ddi_map: Call parent to fulfill request...
137  */
138 
139 int
140 ddi_map(dev_info_t *dp, ddi_map_req_t *mp, off_t offset,
141     off_t len, caddr_t *addrp)
142 {
143 	dev_info_t *pdip;
144 
145 	ASSERT(dp);
146 	pdip = (dev_info_t *)DEVI(dp)->devi_parent;
147 	return ((DEVI(pdip)->devi_ops->devo_bus_ops->bus_map)(pdip,
148 	    dp, mp, offset, len, addrp));
149 }
150 
151 /*
152  * ddi_apply_range: (Called by nexi only.)
153  * Apply ranges in parent node dp, to child regspec rp...
154  */
155 
156 int
157 ddi_apply_range(dev_info_t *dp, dev_info_t *rdip, struct regspec *rp)
158 {
159 	return (i_ddi_apply_range(dp, rdip, rp));
160 }
161 
162 int
163 ddi_map_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset,
164     off_t len)
165 {
166 	ddi_map_req_t mr;
167 #if defined(__x86)
168 	struct {
169 		int	bus;
170 		int	addr;
171 		int	size;
172 	} reg, *reglist;
173 	uint_t	length;
174 	int	rc;
175 
176 	/*
177 	 * get the 'registers' or the 'reg' property.
178 	 * We look up the reg property as an array of
179 	 * int's.
180 	 */
181 	rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
182 	    DDI_PROP_DONTPASS, "registers", (int **)&reglist, &length);
183 	if (rc != DDI_PROP_SUCCESS)
184 		rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
185 		    DDI_PROP_DONTPASS, "reg", (int **)&reglist, &length);
186 	if (rc == DDI_PROP_SUCCESS) {
187 		/*
188 		 * point to the required entry.
189 		 */
190 		reg = reglist[rnumber];
191 		reg.addr += offset;
192 		if (len != 0)
193 			reg.size = len;
194 		/*
195 		 * make a new property containing ONLY the required tuple.
196 		 */
197 		if (ddi_prop_update_int_array(DDI_DEV_T_NONE, dip,
198 		    chosen_reg, (int *)&reg, (sizeof (reg)/sizeof (int)))
199 		    != DDI_PROP_SUCCESS) {
200 			cmn_err(CE_WARN, "%s%d: cannot create '%s' "
201 			    "property", DEVI(dip)->devi_name,
202 			    DEVI(dip)->devi_instance, chosen_reg);
203 		}
204 		/*
205 		 * free the memory allocated by
206 		 * ddi_prop_lookup_int_array ().
207 		 */
208 		ddi_prop_free((void *)reglist);
209 	}
210 #endif
211 	mr.map_op = DDI_MO_MAP_LOCKED;
212 	mr.map_type = DDI_MT_RNUMBER;
213 	mr.map_obj.rnumber = rnumber;
214 	mr.map_prot = PROT_READ | PROT_WRITE;
215 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
216 	mr.map_handlep = NULL;
217 	mr.map_vers = DDI_MAP_VERSION;
218 
219 	/*
220 	 * Call my parent to map in my regs.
221 	 */
222 
223 	return (ddi_map(dip, &mr, offset, len, kaddrp));
224 }
225 
226 void
227 ddi_unmap_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset,
228     off_t len)
229 {
230 	ddi_map_req_t mr;
231 
232 	mr.map_op = DDI_MO_UNMAP;
233 	mr.map_type = DDI_MT_RNUMBER;
234 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
235 	mr.map_prot = PROT_READ | PROT_WRITE;	/* who cares? */
236 	mr.map_obj.rnumber = rnumber;
237 	mr.map_handlep = NULL;
238 	mr.map_vers = DDI_MAP_VERSION;
239 
240 	/*
241 	 * Call my parent to unmap my regs.
242 	 */
243 
244 	(void) ddi_map(dip, &mr, offset, len, kaddrp);
245 	*kaddrp = (caddr_t)0;
246 #if defined(__x86)
247 	(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, chosen_reg);
248 #endif
249 }
250 
251 int
252 ddi_bus_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
253 	off_t offset, off_t len, caddr_t *vaddrp)
254 {
255 	return (i_ddi_bus_map(dip, rdip, mp, offset, len, vaddrp));
256 }
257 
258 /*
259  * nullbusmap:	The/DDI default bus_map entry point for nexi
260  *		not conforming to the reg/range paradigm (i.e. scsi, etc.)
261  *		with no HAT/MMU layer to be programmed at this level.
262  *
263  *		If the call is to map by rnumber, return an error,
264  *		otherwise pass anything else up the tree to my parent.
265  */
266 int
267 nullbusmap(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
268 	off_t offset, off_t len, caddr_t *vaddrp)
269 {
270 	_NOTE(ARGUNUSED(rdip))
271 	if (mp->map_type == DDI_MT_RNUMBER)
272 		return (DDI_ME_UNSUPPORTED);
273 
274 	return (ddi_map(dip, mp, offset, len, vaddrp));
275 }
276 
277 /*
278  * ddi_rnumber_to_regspec: Not for use by leaf drivers.
279  *			   Only for use by nexi using the reg/range paradigm.
280  */
281 struct regspec *
282 ddi_rnumber_to_regspec(dev_info_t *dip, int rnumber)
283 {
284 	return (i_ddi_rnumber_to_regspec(dip, rnumber));
285 }
286 
287 
288 /*
289  * Note that we allow the dip to be nil because we may be called
290  * prior even to the instantiation of the devinfo tree itself - all
291  * regular leaf and nexus drivers should always use a non-nil dip!
292  *
293  * We treat peek in a somewhat cavalier fashion .. assuming that we'll
294  * simply get a synchronous fault as soon as we touch a missing address.
295  *
296  * Poke is rather more carefully handled because we might poke to a write
297  * buffer, "succeed", then only find some time later that we got an
298  * asynchronous fault that indicated that the address we were writing to
299  * was not really backed by hardware.
300  */
301 
302 static int
303 i_ddi_peekpoke(dev_info_t *devi, ddi_ctl_enum_t cmd, size_t size,
304     void *addr, void *value_p)
305 {
306 	union {
307 		uint64_t	u64;
308 		uint32_t	u32;
309 		uint16_t	u16;
310 		uint8_t		u8;
311 	} peekpoke_value;
312 
313 	peekpoke_ctlops_t peekpoke_args;
314 	uint64_t dummy_result;
315 	int rval;
316 
317 	/* Note: size is assumed to be correct;  it is not checked. */
318 	peekpoke_args.size = size;
319 	peekpoke_args.dev_addr = (uint64_t)addr;
320 	peekpoke_args.handle = NULL;
321 	peekpoke_args.repcount = 1;
322 	peekpoke_args.flags = 0;
323 
324 	if (cmd == DDI_CTLOPS_POKE) {
325 		switch (size) {
326 		case sizeof (uint8_t):
327 			peekpoke_value.u8 = *(uint8_t *)value_p;
328 			break;
329 		case sizeof (uint16_t):
330 			peekpoke_value.u16 = *(uint16_t *)value_p;
331 			break;
332 		case sizeof (uint32_t):
333 			peekpoke_value.u32 = *(uint32_t *)value_p;
334 			break;
335 		case sizeof (uint64_t):
336 			peekpoke_value.u64 = *(uint64_t *)value_p;
337 			break;
338 		}
339 	}
340 
341 	peekpoke_args.host_addr = (uint64_t)&peekpoke_value.u64;
342 
343 	if (devi != NULL)
344 		rval = ddi_ctlops(devi, devi, cmd, &peekpoke_args,
345 		    &dummy_result);
346 	else
347 		rval = peekpoke_mem(cmd, &peekpoke_args);
348 
349 	/*
350 	 * A NULL value_p is permitted by ddi_peek(9F); discard the result.
351 	 */
352 	if ((cmd == DDI_CTLOPS_PEEK) & (value_p != NULL)) {
353 		switch (size) {
354 		case sizeof (uint8_t):
355 			*(uint8_t *)value_p = peekpoke_value.u8;
356 			break;
357 		case sizeof (uint16_t):
358 			*(uint16_t *)value_p = peekpoke_value.u16;
359 			break;
360 		case sizeof (uint32_t):
361 			*(uint32_t *)value_p = peekpoke_value.u32;
362 			break;
363 		case sizeof (uint64_t):
364 			*(uint64_t *)value_p = peekpoke_value.u64;
365 			break;
366 		}
367 	}
368 
369 	return (rval);
370 }
371 
372 /*
373  * Keep ddi_peek() and ddi_poke() in case 3rd parties are calling this.
374  * they shouldn't be, but the 9f manpage kind of pseudo exposes it.
375  */
376 int
377 ddi_peek(dev_info_t *devi, size_t size, void *addr, void *value_p)
378 {
379 	switch (size) {
380 	case sizeof (uint8_t):
381 	case sizeof (uint16_t):
382 	case sizeof (uint32_t):
383 	case sizeof (uint64_t):
384 		break;
385 	default:
386 		return (DDI_FAILURE);
387 	}
388 
389 	return (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, size, addr, value_p));
390 }
391 
392 int
393 ddi_poke(dev_info_t *devi, size_t size, void *addr, void *value_p)
394 {
395 	switch (size) {
396 	case sizeof (uint8_t):
397 	case sizeof (uint16_t):
398 	case sizeof (uint32_t):
399 	case sizeof (uint64_t):
400 		break;
401 	default:
402 		return (DDI_FAILURE);
403 	}
404 
405 	return (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, size, addr, value_p));
406 }
407 
408 #ifdef _LP64
409 int
410 ddi_peek8(dev_info_t *dip, int8_t *addr, int8_t *val_p)
411 #else /* _ILP32 */
412 int
413 ddi_peekc(dev_info_t *dip, int8_t *addr, int8_t *val_p)
414 #endif
415 {
416 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
417 	    val_p));
418 }
419 
420 #ifdef _LP64
421 int
422 ddi_peek16(dev_info_t *dip, int16_t *addr, int16_t *val_p)
423 #else /* _ILP32 */
424 int
425 ddi_peeks(dev_info_t *dip, int16_t *addr, int16_t *val_p)
426 #endif
427 {
428 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
429 	    val_p));
430 }
431 
432 #ifdef _LP64
433 int
434 ddi_peek32(dev_info_t *dip, int32_t *addr, int32_t *val_p)
435 #else /* _ILP32 */
436 int
437 ddi_peekl(dev_info_t *dip, int32_t *addr, int32_t *val_p)
438 #endif
439 {
440 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
441 	    val_p));
442 }
443 
444 #ifdef _LP64
445 int
446 ddi_peek64(dev_info_t *dip, int64_t *addr, int64_t *val_p)
447 #else /* _ILP32 */
448 int
449 ddi_peekd(dev_info_t *dip, int64_t *addr, int64_t *val_p)
450 #endif
451 {
452 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
453 	    val_p));
454 }
455 
456 #ifdef _LP64
457 int
458 ddi_poke8(dev_info_t *dip, int8_t *addr, int8_t val)
459 #else /* _ILP32 */
460 int
461 ddi_pokec(dev_info_t *dip, int8_t *addr, int8_t val)
462 #endif
463 {
464 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
465 }
466 
467 #ifdef _LP64
468 int
469 ddi_poke16(dev_info_t *dip, int16_t *addr, int16_t val)
470 #else /* _ILP32 */
471 int
472 ddi_pokes(dev_info_t *dip, int16_t *addr, int16_t val)
473 #endif
474 {
475 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
476 }
477 
478 #ifdef _LP64
479 int
480 ddi_poke32(dev_info_t *dip, int32_t *addr, int32_t val)
481 #else /* _ILP32 */
482 int
483 ddi_pokel(dev_info_t *dip, int32_t *addr, int32_t val)
484 #endif
485 {
486 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
487 }
488 
489 #ifdef _LP64
490 int
491 ddi_poke64(dev_info_t *dip, int64_t *addr, int64_t val)
492 #else /* _ILP32 */
493 int
494 ddi_poked(dev_info_t *dip, int64_t *addr, int64_t val)
495 #endif
496 {
497 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
498 }
499 
500 /*
501  * ddi_peekpokeio() is used primarily by the mem drivers for moving
502  * data to and from uio structures via peek and poke.  Note that we
503  * use "internal" routines ddi_peek and ddi_poke to make this go
504  * slightly faster, avoiding the call overhead ..
505  */
506 int
507 ddi_peekpokeio(dev_info_t *devi, struct uio *uio, enum uio_rw rw,
508     caddr_t addr, size_t len, uint_t xfersize)
509 {
510 	int64_t	ibuffer;
511 	int8_t w8;
512 	size_t sz;
513 	int o;
514 
515 	if (xfersize > sizeof (long))
516 		xfersize = sizeof (long);
517 
518 	while (len != 0) {
519 		if ((len | (uintptr_t)addr) & 1) {
520 			sz = sizeof (int8_t);
521 			if (rw == UIO_WRITE) {
522 				if ((o = uwritec(uio)) == -1)
523 					return (DDI_FAILURE);
524 				if (ddi_poke8(devi, (int8_t *)addr,
525 				    (int8_t)o) != DDI_SUCCESS)
526 					return (DDI_FAILURE);
527 			} else {
528 				if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz,
529 				    (int8_t *)addr, &w8) != DDI_SUCCESS)
530 					return (DDI_FAILURE);
531 				if (ureadc(w8, uio))
532 					return (DDI_FAILURE);
533 			}
534 		} else {
535 			switch (xfersize) {
536 			case sizeof (int64_t):
537 				if (((len | (uintptr_t)addr) &
538 				    (sizeof (int64_t) - 1)) == 0) {
539 					sz = xfersize;
540 					break;
541 				}
542 				/*FALLTHROUGH*/
543 			case sizeof (int32_t):
544 				if (((len | (uintptr_t)addr) &
545 				    (sizeof (int32_t) - 1)) == 0) {
546 					sz = xfersize;
547 					break;
548 				}
549 				/*FALLTHROUGH*/
550 			default:
551 				/*
552 				 * This still assumes that we might have an
553 				 * I/O bus out there that permits 16-bit
554 				 * transfers (and that it would be upset by
555 				 * 32-bit transfers from such locations).
556 				 */
557 				sz = sizeof (int16_t);
558 				break;
559 			}
560 
561 			if (rw == UIO_READ) {
562 				if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz,
563 				    addr, &ibuffer) != DDI_SUCCESS)
564 					return (DDI_FAILURE);
565 			}
566 
567 			if (uiomove(&ibuffer, sz, rw, uio))
568 				return (DDI_FAILURE);
569 
570 			if (rw == UIO_WRITE) {
571 				if (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, sz,
572 				    addr, &ibuffer) != DDI_SUCCESS)
573 					return (DDI_FAILURE);
574 			}
575 		}
576 		addr += sz;
577 		len -= sz;
578 	}
579 	return (DDI_SUCCESS);
580 }
581 
582 /*
583  * These routines are used by drivers that do layered ioctls
584  * On sparc, they're implemented in assembler to avoid spilling
585  * register windows in the common (copyin) case ..
586  */
587 #if !defined(__sparc)
588 int
589 ddi_copyin(const void *buf, void *kernbuf, size_t size, int flags)
590 {
591 	if (flags & FKIOCTL)
592 		return (kcopy(buf, kernbuf, size) ? -1 : 0);
593 	return (copyin(buf, kernbuf, size));
594 }
595 
596 int
597 ddi_copyout(const void *buf, void *kernbuf, size_t size, int flags)
598 {
599 	if (flags & FKIOCTL)
600 		return (kcopy(buf, kernbuf, size) ? -1 : 0);
601 	return (copyout(buf, kernbuf, size));
602 }
603 #endif	/* !__sparc */
604 
605 /*
606  * Conversions in nexus pagesize units.  We don't duplicate the
607  * 'nil dip' semantics of peek/poke because btopr/btop/ptob are DDI/DKI
608  * routines anyway.
609  */
610 unsigned long
611 ddi_btop(dev_info_t *dip, unsigned long bytes)
612 {
613 	unsigned long pages;
614 
615 	(void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOP, &bytes, &pages);
616 	return (pages);
617 }
618 
619 unsigned long
620 ddi_btopr(dev_info_t *dip, unsigned long bytes)
621 {
622 	unsigned long pages;
623 
624 	(void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOPR, &bytes, &pages);
625 	return (pages);
626 }
627 
628 unsigned long
629 ddi_ptob(dev_info_t *dip, unsigned long pages)
630 {
631 	unsigned long bytes;
632 
633 	(void) ddi_ctlops(dip, dip, DDI_CTLOPS_PTOB, &pages, &bytes);
634 	return (bytes);
635 }
636 
637 unsigned int
638 ddi_enter_critical(void)
639 {
640 	return ((uint_t)spl7());
641 }
642 
643 void
644 ddi_exit_critical(unsigned int spl)
645 {
646 	splx((int)spl);
647 }
648 
649 /*
650  * Nexus ctlops punter
651  */
652 
653 #if !defined(__sparc)
654 /*
655  * Request bus_ctl parent to handle a bus_ctl request
656  *
657  * (The sparc version is in sparc_ddi.s)
658  */
659 int
660 ddi_ctlops(dev_info_t *d, dev_info_t *r, ddi_ctl_enum_t op, void *a, void *v)
661 {
662 	int (*fp)();
663 
664 	if (!d || !r)
665 		return (DDI_FAILURE);
666 
667 	if ((d = (dev_info_t *)DEVI(d)->devi_bus_ctl) == NULL)
668 		return (DDI_FAILURE);
669 
670 	fp = DEVI(d)->devi_ops->devo_bus_ops->bus_ctl;
671 	return ((*fp)(d, r, op, a, v));
672 }
673 
674 #endif
675 
676 /*
677  * DMA/DVMA setup
678  */
679 
680 #if defined(__sparc)
681 static ddi_dma_lim_t standard_limits = {
682 	(uint_t)0,	/* addr_t dlim_addr_lo */
683 	(uint_t)-1,	/* addr_t dlim_addr_hi */
684 	(uint_t)-1,	/* uint_t dlim_cntr_max */
685 	(uint_t)1,	/* uint_t dlim_burstsizes */
686 	(uint_t)1,	/* uint_t dlim_minxfer */
687 	0		/* uint_t dlim_dmaspeed */
688 };
689 #elif defined(__x86)
690 static ddi_dma_lim_t standard_limits = {
691 	(uint_t)0,		/* addr_t dlim_addr_lo */
692 	(uint_t)0xffffff,	/* addr_t dlim_addr_hi */
693 	(uint_t)0,		/* uint_t dlim_cntr_max */
694 	(uint_t)0x00000001,	/* uint_t dlim_burstsizes */
695 	(uint_t)DMA_UNIT_8,	/* uint_t dlim_minxfer */
696 	(uint_t)0,		/* uint_t dlim_dmaspeed */
697 	(uint_t)0x86<<24+0,	/* uint_t dlim_version */
698 	(uint_t)0xffff,		/* uint_t dlim_adreg_max */
699 	(uint_t)0xffff,		/* uint_t dlim_ctreg_max */
700 	(uint_t)512,		/* uint_t dlim_granular */
701 	(int)1,			/* int dlim_sgllen */
702 	(uint_t)0xffffffff	/* uint_t dlim_reqsizes */
703 };
704 
705 #endif
706 
707 int
708 ddi_dma_setup(dev_info_t *dip, struct ddi_dma_req *dmareqp,
709     ddi_dma_handle_t *handlep)
710 {
711 	int (*funcp)() = ddi_dma_map;
712 	struct bus_ops *bop;
713 #if defined(__sparc)
714 	auto ddi_dma_lim_t dma_lim;
715 
716 	if (dmareqp->dmar_limits == (ddi_dma_lim_t *)0) {
717 		dma_lim = standard_limits;
718 	} else {
719 		dma_lim = *dmareqp->dmar_limits;
720 	}
721 	dmareqp->dmar_limits = &dma_lim;
722 #endif
723 #if defined(__x86)
724 	if (dmareqp->dmar_limits == (ddi_dma_lim_t *)0)
725 		return (DDI_FAILURE);
726 #endif
727 
728 	/*
729 	 * Handle the case that the requester is both a leaf
730 	 * and a nexus driver simultaneously by calling the
731 	 * requester's bus_dma_map function directly instead
732 	 * of ddi_dma_map.
733 	 */
734 	bop = DEVI(dip)->devi_ops->devo_bus_ops;
735 	if (bop && bop->bus_dma_map)
736 		funcp = bop->bus_dma_map;
737 	return ((*funcp)(dip, dip, dmareqp, handlep));
738 }
739 
740 int
741 ddi_dma_addr_setup(dev_info_t *dip, struct as *as, caddr_t addr, size_t len,
742     uint_t flags, int (*waitfp)(), caddr_t arg,
743     ddi_dma_lim_t *limits, ddi_dma_handle_t *handlep)
744 {
745 	int (*funcp)() = ddi_dma_map;
746 	ddi_dma_lim_t dma_lim;
747 	struct ddi_dma_req dmareq;
748 	struct bus_ops *bop;
749 
750 	if (len == 0) {
751 		return (DDI_DMA_NOMAPPING);
752 	}
753 	if (limits == (ddi_dma_lim_t *)0) {
754 		dma_lim = standard_limits;
755 	} else {
756 		dma_lim = *limits;
757 	}
758 	dmareq.dmar_limits = &dma_lim;
759 	dmareq.dmar_flags = flags;
760 	dmareq.dmar_fp = waitfp;
761 	dmareq.dmar_arg = arg;
762 	dmareq.dmar_object.dmao_size = len;
763 	dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR;
764 	dmareq.dmar_object.dmao_obj.virt_obj.v_as = as;
765 	dmareq.dmar_object.dmao_obj.virt_obj.v_addr = addr;
766 	dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
767 
768 	/*
769 	 * Handle the case that the requester is both a leaf
770 	 * and a nexus driver simultaneously by calling the
771 	 * requester's bus_dma_map function directly instead
772 	 * of ddi_dma_map.
773 	 */
774 	bop = DEVI(dip)->devi_ops->devo_bus_ops;
775 	if (bop && bop->bus_dma_map)
776 		funcp = bop->bus_dma_map;
777 
778 	return ((*funcp)(dip, dip, &dmareq, handlep));
779 }
780 
781 int
782 ddi_dma_buf_setup(dev_info_t *dip, struct buf *bp, uint_t flags,
783     int (*waitfp)(), caddr_t arg, ddi_dma_lim_t *limits,
784     ddi_dma_handle_t *handlep)
785 {
786 	int (*funcp)() = ddi_dma_map;
787 	ddi_dma_lim_t dma_lim;
788 	struct ddi_dma_req dmareq;
789 	struct bus_ops *bop;
790 
791 	if (limits == (ddi_dma_lim_t *)0) {
792 		dma_lim = standard_limits;
793 	} else {
794 		dma_lim = *limits;
795 	}
796 	dmareq.dmar_limits = &dma_lim;
797 	dmareq.dmar_flags = flags;
798 	dmareq.dmar_fp = waitfp;
799 	dmareq.dmar_arg = arg;
800 	dmareq.dmar_object.dmao_size = (uint_t)bp->b_bcount;
801 
802 	if ((bp->b_flags & (B_PAGEIO|B_REMAPPED)) == B_PAGEIO) {
803 		dmareq.dmar_object.dmao_type = DMA_OTYP_PAGES;
804 		dmareq.dmar_object.dmao_obj.pp_obj.pp_pp = bp->b_pages;
805 		dmareq.dmar_object.dmao_obj.pp_obj.pp_offset =
806 		    (uint_t)(((uintptr_t)bp->b_un.b_addr) & MMU_PAGEOFFSET);
807 	} else {
808 		dmareq.dmar_object.dmao_type = DMA_OTYP_BUFVADDR;
809 		dmareq.dmar_object.dmao_obj.virt_obj.v_addr = bp->b_un.b_addr;
810 		if ((bp->b_flags & (B_SHADOW|B_REMAPPED)) == B_SHADOW) {
811 			dmareq.dmar_object.dmao_obj.virt_obj.v_priv =
812 							bp->b_shadow;
813 		} else {
814 			dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
815 		}
816 
817 		/*
818 		 * If the buffer has no proc pointer, or the proc
819 		 * struct has the kernel address space, or the buffer has
820 		 * been marked B_REMAPPED (meaning that it is now
821 		 * mapped into the kernel's address space), then
822 		 * the address space is kas (kernel address space).
823 		 */
824 		if (bp->b_proc == NULL || bp->b_proc->p_as == &kas ||
825 		    (bp->b_flags & B_REMAPPED) != 0) {
826 			dmareq.dmar_object.dmao_obj.virt_obj.v_as = 0;
827 		} else {
828 			dmareq.dmar_object.dmao_obj.virt_obj.v_as =
829 			    bp->b_proc->p_as;
830 		}
831 	}
832 
833 	/*
834 	 * Handle the case that the requester is both a leaf
835 	 * and a nexus driver simultaneously by calling the
836 	 * requester's bus_dma_map function directly instead
837 	 * of ddi_dma_map.
838 	 */
839 	bop = DEVI(dip)->devi_ops->devo_bus_ops;
840 	if (bop && bop->bus_dma_map)
841 		funcp = bop->bus_dma_map;
842 
843 	return ((*funcp)(dip, dip, &dmareq, handlep));
844 }
845 
846 #if !defined(__sparc)
847 /*
848  * Request bus_dma_ctl parent to fiddle with a dma request.
849  *
850  * (The sparc version is in sparc_subr.s)
851  */
852 int
853 ddi_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
854     ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
855     off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
856 {
857 	int (*fp)();
858 
859 	dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_ctl;
860 	fp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_ctl;
861 	return ((*fp) (dip, rdip, handle, request, offp, lenp, objp, flags));
862 }
863 #endif
864 
865 /*
866  * For all DMA control functions, call the DMA control
867  * routine and return status.
868  *
869  * Just plain assume that the parent is to be called.
870  * If a nexus driver or a thread outside the framework
871  * of a nexus driver or a leaf driver calls these functions,
872  * it is up to them to deal with the fact that the parent's
873  * bus_dma_ctl function will be the first one called.
874  */
875 
876 #define	HD	((ddi_dma_impl_t *)h)->dmai_rdip
877 
878 int
879 ddi_dma_kvaddrp(ddi_dma_handle_t h, off_t off, size_t len, caddr_t *kp)
880 {
881 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_KVADDR, &off, &len, kp, 0));
882 }
883 
884 int
885 ddi_dma_htoc(ddi_dma_handle_t h, off_t o, ddi_dma_cookie_t *c)
886 {
887 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_HTOC, &o, 0, (caddr_t *)c, 0));
888 }
889 
890 int
891 ddi_dma_coff(ddi_dma_handle_t h, ddi_dma_cookie_t *c, off_t *o)
892 {
893 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_COFF,
894 	    (off_t *)c, 0, (caddr_t *)o, 0));
895 }
896 
897 int
898 ddi_dma_movwin(ddi_dma_handle_t h, off_t *o, size_t *l, ddi_dma_cookie_t *c)
899 {
900 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_MOVWIN, o,
901 	    l, (caddr_t *)c, 0));
902 }
903 
904 int
905 ddi_dma_curwin(ddi_dma_handle_t h, off_t *o, size_t *l)
906 {
907 	if ((((ddi_dma_impl_t *)h)->dmai_rflags & DDI_DMA_PARTIAL) == 0)
908 		return (DDI_FAILURE);
909 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_REPWIN, o, l, 0, 0));
910 }
911 
912 /*
913  * Note:  The astute might notice that in the next two routines
914  * the SPARC case passes a pointer to a ddi_dma_win_t as the 5th
915  * argument while the x86 case passes the ddi_dma_win_t directly.
916  *
917  * While it would be nice if the "correct" behavior was
918  * platform independent and specified someplace, it isn't.
919  * Until that point, what's required is that this call and
920  * the relevant bus nexus drivers agree, and in this case they
921  * do, at least for the cases I've looked at.
922  */
923 int
924 ddi_dma_nextwin(ddi_dma_handle_t h, ddi_dma_win_t win,
925     ddi_dma_win_t *nwin)
926 {
927 #if defined(__sparc)
928 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_NEXTWIN, (off_t *)&win, 0,
929 	    (caddr_t *)nwin, 0));
930 #elif defined(__x86)
931 	return (((ddi_dma_impl_t *)h)->dmai_mctl(HD, HD, h, DDI_DMA_NEXTWIN,
932 		(off_t *)win, 0, (caddr_t *)nwin, 0));
933 #else
934 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_NEXTWIN,
935 		(off_t *)win, 0, (caddr_t *)nwin, 0));
936 #endif
937 }
938 
939 int
940 ddi_dma_nextseg(ddi_dma_win_t win, ddi_dma_seg_t seg, ddi_dma_seg_t *nseg)
941 {
942 #if defined(__sparc)
943 	ddi_dma_handle_t h = (ddi_dma_handle_t)win;
944 
945 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_NEXTSEG, (off_t *)&win,
946 	    (size_t *)&seg, (caddr_t *)nseg, 0));
947 #else
948 	ddi_dma_handle_t h = (ddi_dma_handle_t)
949 	    ((impl_dma_segment_t *)win)->dmais_hndl;
950 
951 #if defined(__x86)
952 	return (((ddi_dma_impl_t *)h)->dmai_mctl(HD, HD, h, DDI_DMA_NEXTSEG,
953 		(off_t *)win, (size_t *)seg, (caddr_t *)nseg, 0));
954 #else
955 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_NEXTSEG,
956 		(off_t *)win, (size_t *)seg, (caddr_t *)nseg, 0));
957 #endif
958 #endif
959 }
960 
961 #if (defined(__i386) && !defined(__amd64)) || defined(__sparc)
962 /*
963  * This routine is Obsolete and should be removed from ALL architectures
964  * in a future release of Solaris.
965  *
966  * It is deliberately NOT ported to amd64; please fix the code that
967  * depends on this routine to use ddi_dma_nextcookie(9F).
968  */
969 int
970 ddi_dma_segtocookie(ddi_dma_seg_t seg, off_t *o, off_t *l,
971     ddi_dma_cookie_t *cookiep)
972 {
973 #if defined(__sparc)
974 	ddi_dma_handle_t h = (ddi_dma_handle_t)seg;
975 
976 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_SEGTOC, o, (size_t *)l,
977 	    (caddr_t *)cookiep, 0));
978 #elif defined(__i386) && !defined(__amd64)
979 	ddi_dma_handle_t h = (ddi_dma_handle_t)
980 	    ((impl_dma_segment_t *)seg)->dmais_hndl;
981 
982 	/*
983 	 * The hack used for i386 won't work here; we can't squeeze a
984 	 * pointer through the 'cache_flags' field.
985 	 */
986 	return (((ddi_dma_impl_t *)h)->dmai_mctl(HD, HD, h, DDI_DMA_SEGTOC,
987 		o, (size_t *)l, (caddr_t *)cookiep, (uint_t)seg));
988 #endif
989 }
990 #endif	/* (__i386 && !__amd64) || __sparc */
991 
992 #if !defined(__sparc)
993 
994 /*
995  * The SPARC versions of these routines are done in assembler to
996  * save register windows, so they're in sparc_subr.s.
997  */
998 
999 int
1000 ddi_dma_map(dev_info_t *dip, dev_info_t *rdip,
1001 	struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep)
1002 {
1003 	dev_info_t	*hdip;
1004 	int (*funcp)(dev_info_t *, dev_info_t *, struct ddi_dma_req *,
1005 	    ddi_dma_handle_t *);
1006 
1007 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_map;
1008 
1009 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_map;
1010 	return ((*funcp)(hdip, rdip, dmareqp, handlep));
1011 }
1012 
1013 int
1014 ddi_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
1015     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
1016 {
1017 	dev_info_t	*hdip;
1018 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_attr_t *,
1019 	    int (*)(caddr_t), caddr_t, ddi_dma_handle_t *);
1020 
1021 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl;
1022 
1023 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_allochdl;
1024 	return ((*funcp)(hdip, rdip, attr, waitfp, arg, handlep));
1025 }
1026 
1027 int
1028 ddi_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handlep)
1029 {
1030 	dev_info_t	*hdip;
1031 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
1032 
1033 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl;
1034 
1035 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_freehdl;
1036 	return ((*funcp)(hdip, rdip, handlep));
1037 }
1038 
1039 int
1040 ddi_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
1041     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
1042     ddi_dma_cookie_t *cp, uint_t *ccountp)
1043 {
1044 	dev_info_t	*hdip;
1045 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
1046 	    struct ddi_dma_req *, ddi_dma_cookie_t *, uint_t *);
1047 
1048 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
1049 
1050 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_bindhdl;
1051 	return ((*funcp)(hdip, rdip, handle, dmareq, cp, ccountp));
1052 }
1053 
1054 int
1055 ddi_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
1056     ddi_dma_handle_t handle)
1057 {
1058 	dev_info_t	*hdip;
1059 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
1060 
1061 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl;
1062 
1063 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_unbindhdl;
1064 	return ((*funcp)(hdip, rdip, handle));
1065 }
1066 
1067 
1068 int
1069 ddi_dma_flush(dev_info_t *dip, dev_info_t *rdip,
1070     ddi_dma_handle_t handle, off_t off, size_t len,
1071     uint_t cache_flags)
1072 {
1073 	dev_info_t	*hdip;
1074 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
1075 	    off_t, size_t, uint_t);
1076 
1077 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush;
1078 
1079 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_flush;
1080 	return ((*funcp)(hdip, rdip, handle, off, len, cache_flags));
1081 }
1082 
1083 int
1084 ddi_dma_win(dev_info_t *dip, dev_info_t *rdip,
1085     ddi_dma_handle_t handle, uint_t win, off_t *offp,
1086     size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
1087 {
1088 	dev_info_t	*hdip;
1089 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
1090 	    uint_t, off_t *, size_t *, ddi_dma_cookie_t *, uint_t *);
1091 
1092 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_win;
1093 
1094 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_win;
1095 	return ((*funcp)(hdip, rdip, handle, win, offp, lenp,
1096 	    cookiep, ccountp));
1097 }
1098 
1099 int
1100 ddi_dma_sync(ddi_dma_handle_t h, off_t o, size_t l, uint_t whom)
1101 {
1102 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h;
1103 	dev_info_t *hdip, *dip;
1104 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t, off_t,
1105 		size_t, uint_t);
1106 
1107 	/*
1108 	 * the DMA nexus driver will set DMP_NOSYNC if the
1109 	 * platform does not require any sync operation. For
1110 	 * example if the memory is uncached or consistent
1111 	 * and without any I/O write buffers involved.
1112 	 */
1113 	if ((hp->dmai_rflags & DMP_NOSYNC) == DMP_NOSYNC)
1114 		return (DDI_SUCCESS);
1115 
1116 	dip = hp->dmai_rdip;
1117 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush;
1118 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_flush;
1119 	return ((*funcp)(hdip, dip, h, o, l, whom));
1120 }
1121 
1122 int
1123 ddi_dma_unbind_handle(ddi_dma_handle_t h)
1124 {
1125 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h;
1126 	dev_info_t *hdip, *dip;
1127 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
1128 
1129 	dip = hp->dmai_rdip;
1130 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl;
1131 	funcp = DEVI(dip)->devi_bus_dma_unbindfunc;
1132 	return ((*funcp)(hdip, dip, h));
1133 }
1134 
1135 #endif	/* !__sparc */
1136 
1137 int
1138 ddi_dma_free(ddi_dma_handle_t h)
1139 {
1140 #if !defined(__x86)
1141 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_FREE, 0, 0, 0, 0));
1142 #else
1143 	return (((ddi_dma_impl_t *)h)->dmai_mctl(HD, HD, h, DDI_DMA_FREE,
1144 		0, 0, 0, 0));
1145 #endif
1146 }
1147 
1148 int
1149 ddi_iopb_alloc(dev_info_t *dip, ddi_dma_lim_t *limp, uint_t len, caddr_t *iopbp)
1150 {
1151 	ddi_dma_lim_t defalt;
1152 	size_t size = len;
1153 
1154 	if (!limp) {
1155 		defalt = standard_limits;
1156 		limp = &defalt;
1157 	}
1158 #if defined(__sparc)
1159 	return (i_ddi_mem_alloc_lim(dip, limp, size, 0, 0, 0,
1160 	    iopbp, NULL, NULL));
1161 #else
1162 	return (ddi_dma_mctl(dip, dip, 0, DDI_DMA_IOPB_ALLOC, (off_t *)limp,
1163 	    &size, iopbp, 0));
1164 #endif
1165 }
1166 
1167 void
1168 ddi_iopb_free(caddr_t iopb)
1169 {
1170 	i_ddi_mem_free(iopb, 0);
1171 }
1172 
1173 int
1174 ddi_mem_alloc(dev_info_t *dip, ddi_dma_lim_t *limits, uint_t length,
1175 	uint_t flags, caddr_t *kaddrp, uint_t *real_length)
1176 {
1177 	ddi_dma_lim_t defalt;
1178 	size_t size = length;
1179 
1180 	if (!limits) {
1181 		defalt = standard_limits;
1182 		limits = &defalt;
1183 	}
1184 #if defined(__sparc)
1185 	return (i_ddi_mem_alloc_lim(dip, limits, size, flags & 0x1,
1186 	    1, 0, kaddrp, real_length, NULL));
1187 #else
1188 	return (ddi_dma_mctl(dip, dip, (ddi_dma_handle_t)real_length,
1189 	    DDI_DMA_SMEM_ALLOC, (off_t *)limits, &size,
1190 	    kaddrp, (flags & 0x1)));
1191 #endif
1192 }
1193 
1194 void
1195 ddi_mem_free(caddr_t kaddr)
1196 {
1197 	i_ddi_mem_free(kaddr, 1);
1198 }
1199 
1200 /*
1201  * DMA attributes, alignment, burst sizes, and transfer minimums
1202  */
1203 int
1204 ddi_dma_get_attr(ddi_dma_handle_t handle, ddi_dma_attr_t *attrp)
1205 {
1206 	ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle;
1207 
1208 	if (attrp == NULL)
1209 		return (DDI_FAILURE);
1210 	*attrp = dimp->dmai_attr;
1211 	return (DDI_SUCCESS);
1212 }
1213 
1214 int
1215 ddi_dma_burstsizes(ddi_dma_handle_t handle)
1216 {
1217 	ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle;
1218 
1219 	if (!dimp)
1220 		return (0);
1221 	else
1222 		return (dimp->dmai_burstsizes);
1223 }
1224 
1225 int
1226 ddi_dma_devalign(ddi_dma_handle_t handle, uint_t *alignment, uint_t *mineffect)
1227 {
1228 	ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle;
1229 
1230 	if (!dimp || !alignment || !mineffect)
1231 		return (DDI_FAILURE);
1232 	if (!(dimp->dmai_rflags & DDI_DMA_SBUS_64BIT)) {
1233 		*alignment = 1 << ddi_ffs(dimp->dmai_burstsizes);
1234 	} else {
1235 		if (dimp->dmai_burstsizes & 0xff0000) {
1236 			*alignment = 1 << ddi_ffs(dimp->dmai_burstsizes >> 16);
1237 		} else {
1238 			*alignment = 1 << ddi_ffs(dimp->dmai_burstsizes);
1239 		}
1240 	}
1241 	*mineffect = dimp->dmai_minxfer;
1242 	return (DDI_SUCCESS);
1243 }
1244 
1245 int
1246 ddi_iomin(dev_info_t *a, int i, int stream)
1247 {
1248 	int r;
1249 
1250 	/*
1251 	 * Make sure that the initial value is sane
1252 	 */
1253 	if (i & (i - 1))
1254 		return (0);
1255 	if (i == 0)
1256 		i = (stream) ? 4 : 1;
1257 
1258 	r = ddi_ctlops(a, a,
1259 	    DDI_CTLOPS_IOMIN, (void *)(uintptr_t)stream, (void *)&i);
1260 	if (r != DDI_SUCCESS || (i & (i - 1)))
1261 		return (0);
1262 	return (i);
1263 }
1264 
1265 /*
1266  * Given two DMA attribute structures, apply the attributes
1267  * of one to the other, following the rules of attributes
1268  * and the wishes of the caller.
1269  *
1270  * The rules of DMA attribute structures are that you cannot
1271  * make things *less* restrictive as you apply one set
1272  * of attributes to another.
1273  *
1274  */
1275 void
1276 ddi_dma_attr_merge(ddi_dma_attr_t *attr, ddi_dma_attr_t *mod)
1277 {
1278 	attr->dma_attr_addr_lo =
1279 	    MAX(attr->dma_attr_addr_lo, mod->dma_attr_addr_lo);
1280 	attr->dma_attr_addr_hi =
1281 	    MIN(attr->dma_attr_addr_hi, mod->dma_attr_addr_hi);
1282 	attr->dma_attr_count_max =
1283 	    MIN(attr->dma_attr_count_max, mod->dma_attr_count_max);
1284 	attr->dma_attr_align =
1285 	    MAX(attr->dma_attr_align,  mod->dma_attr_align);
1286 	attr->dma_attr_burstsizes =
1287 	    (uint_t)(attr->dma_attr_burstsizes & mod->dma_attr_burstsizes);
1288 	attr->dma_attr_minxfer =
1289 	    maxbit(attr->dma_attr_minxfer, mod->dma_attr_minxfer);
1290 	attr->dma_attr_maxxfer =
1291 	    MIN(attr->dma_attr_maxxfer, mod->dma_attr_maxxfer);
1292 	attr->dma_attr_seg = MIN(attr->dma_attr_seg, mod->dma_attr_seg);
1293 	attr->dma_attr_sgllen = MIN((uint_t)attr->dma_attr_sgllen,
1294 	    (uint_t)mod->dma_attr_sgllen);
1295 	attr->dma_attr_granular =
1296 	    MAX(attr->dma_attr_granular, mod->dma_attr_granular);
1297 }
1298 
1299 /*
1300  * mmap/segmap interface:
1301  */
1302 
1303 /*
1304  * ddi_segmap:		setup the default segment driver. Calls the drivers
1305  *			XXmmap routine to validate the range to be mapped.
1306  *			Return ENXIO of the range is not valid.  Create
1307  *			a seg_dev segment that contains all of the
1308  *			necessary information and will reference the
1309  *			default segment driver routines. It returns zero
1310  *			on success or non-zero on failure.
1311  */
1312 int
1313 ddi_segmap(dev_t dev, off_t offset, struct as *asp, caddr_t *addrp, off_t len,
1314     uint_t prot, uint_t maxprot, uint_t flags, cred_t *credp)
1315 {
1316 	extern int spec_segmap(dev_t, off_t, struct as *, caddr_t *,
1317 	    off_t, uint_t, uint_t, uint_t, struct cred *);
1318 
1319 	return (spec_segmap(dev, offset, asp, addrp, len,
1320 	    prot, maxprot, flags, credp));
1321 }
1322 
1323 /*
1324  * ddi_map_fault:	Resolve mappings at fault time.  Used by segment
1325  *			drivers. Allows each successive parent to resolve
1326  *			address translations and add its mappings to the
1327  *			mapping list supplied in the page structure. It
1328  *			returns zero on success	or non-zero on failure.
1329  */
1330 
1331 int
1332 ddi_map_fault(dev_info_t *dip, struct hat *hat, struct seg *seg,
1333     caddr_t addr, struct devpage *dp, pfn_t pfn, uint_t prot, uint_t lock)
1334 {
1335 	return (i_ddi_map_fault(dip, dip, hat, seg, addr, dp, pfn, prot, lock));
1336 }
1337 
1338 /*
1339  * ddi_device_mapping_check:	Called from ddi_segmap_setup.
1340  *	Invokes platform specific DDI to determine whether attributes specified
1341  *	in attr(9s) are	valid for the region of memory that will be made
1342  *	available for direct access to user process via the mmap(2) system call.
1343  */
1344 int
1345 ddi_device_mapping_check(dev_t dev, ddi_device_acc_attr_t *accattrp,
1346     uint_t rnumber, uint_t *hat_flags)
1347 {
1348 	ddi_acc_handle_t handle;
1349 	ddi_map_req_t mr;
1350 	ddi_acc_hdl_t *hp;
1351 	int result;
1352 	dev_info_t *dip;
1353 
1354 	/*
1355 	 * we use e_ddi_hold_devi_by_dev to search for the devi.  We
1356 	 * release it immediately since it should already be held by
1357 	 * a devfs vnode.
1358 	 */
1359 	if ((dip =
1360 	    e_ddi_hold_devi_by_dev(dev, E_DDI_HOLD_DEVI_NOATTACH)) == NULL)
1361 		return (-1);
1362 	ddi_release_devi(dip);		/* for e_ddi_hold_devi_by_dev() */
1363 
1364 	/*
1365 	 * Allocate and initialize the common elements of data
1366 	 * access handle.
1367 	 */
1368 	handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
1369 	if (handle == NULL)
1370 		return (-1);
1371 
1372 	hp = impl_acc_hdl_get(handle);
1373 	hp->ah_vers = VERS_ACCHDL;
1374 	hp->ah_dip = dip;
1375 	hp->ah_rnumber = rnumber;
1376 	hp->ah_offset = 0;
1377 	hp->ah_len = 0;
1378 	hp->ah_acc = *accattrp;
1379 
1380 	/*
1381 	 * Set up the mapping request and call to parent.
1382 	 */
1383 	mr.map_op = DDI_MO_MAP_HANDLE;
1384 	mr.map_type = DDI_MT_RNUMBER;
1385 	mr.map_obj.rnumber = rnumber;
1386 	mr.map_prot = PROT_READ | PROT_WRITE;
1387 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
1388 	mr.map_handlep = hp;
1389 	mr.map_vers = DDI_MAP_VERSION;
1390 	result = ddi_map(dip, &mr, 0, 0, NULL);
1391 
1392 	/*
1393 	 * Region must be mappable, pick up flags from the framework.
1394 	 */
1395 	*hat_flags = hp->ah_hat_flags;
1396 
1397 	impl_acc_hdl_free(handle);
1398 
1399 	/*
1400 	 * check for end result.
1401 	 */
1402 	if (result != DDI_SUCCESS)
1403 		return (-1);
1404 	return (0);
1405 }
1406 
1407 
1408 /*
1409  * Property functions:	 See also, ddipropdefs.h.
1410  *
1411  * These functions are the framework for the property functions,
1412  * i.e. they support software defined properties.  All implementation
1413  * specific property handling (i.e.: self-identifying devices and
1414  * PROM defined properties are handled in the implementation specific
1415  * functions (defined in ddi_implfuncs.h).
1416  */
1417 
1418 /*
1419  * nopropop:	Shouldn't be called, right?
1420  */
1421 int
1422 nopropop(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
1423     char *name, caddr_t valuep, int *lengthp)
1424 {
1425 	_NOTE(ARGUNUSED(dev, dip, prop_op, mod_flags, name, valuep, lengthp))
1426 	return (DDI_PROP_NOT_FOUND);
1427 }
1428 
1429 #ifdef	DDI_PROP_DEBUG
1430 int ddi_prop_debug_flag = 0;
1431 
1432 int
1433 ddi_prop_debug(int enable)
1434 {
1435 	int prev = ddi_prop_debug_flag;
1436 
1437 	if ((enable != 0) || (prev != 0))
1438 		printf("ddi_prop_debug: debugging %s\n",
1439 		    enable ? "enabled" : "disabled");
1440 	ddi_prop_debug_flag = enable;
1441 	return (prev);
1442 }
1443 
1444 #endif	/* DDI_PROP_DEBUG */
1445 
1446 /*
1447  * Search a property list for a match, if found return pointer
1448  * to matching prop struct, else return NULL.
1449  */
1450 
1451 ddi_prop_t *
1452 i_ddi_prop_search(dev_t dev, char *name, uint_t flags, ddi_prop_t **list_head)
1453 {
1454 	ddi_prop_t	*propp;
1455 
1456 	/*
1457 	 * find the property in child's devinfo:
1458 	 */
1459 
1460 	/*
1461 	 * Search order defined by this search function is
1462 	 * first matching property with input dev ==
1463 	 * DDI_DEV_T_ANY matching any dev or dev == propp->prop_dev,
1464 	 * name == propp->name, and the correct data type as specified
1465 	 * in the flags
1466 	 */
1467 
1468 	for (propp = *list_head; propp != NULL; propp = propp->prop_next)  {
1469 
1470 		if (strcmp(propp->prop_name, name) != 0)
1471 			continue;
1472 
1473 		if ((dev != DDI_DEV_T_ANY) && (propp->prop_dev != dev))
1474 			continue;
1475 
1476 		if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0)
1477 			continue;
1478 
1479 		return (propp);
1480 	}
1481 
1482 	return ((ddi_prop_t *)0);
1483 }
1484 
1485 /*
1486  * Search for property within devnames structures
1487  */
1488 ddi_prop_t *
1489 i_ddi_search_global_prop(dev_t dev, char *name, uint_t flags)
1490 {
1491 	major_t		major;
1492 	struct devnames	*dnp;
1493 	ddi_prop_t	*propp;
1494 
1495 	/*
1496 	 * Valid dev_t value is needed to index into the
1497 	 * correct devnames entry, therefore a dev_t
1498 	 * value of DDI_DEV_T_ANY is not appropriate.
1499 	 */
1500 	ASSERT(dev != DDI_DEV_T_ANY);
1501 	if (dev == DDI_DEV_T_ANY) {
1502 		return ((ddi_prop_t *)0);
1503 	}
1504 
1505 	major = getmajor(dev);
1506 	dnp = &(devnamesp[major]);
1507 
1508 	if (dnp->dn_global_prop_ptr == NULL)
1509 		return ((ddi_prop_t *)0);
1510 
1511 	LOCK_DEV_OPS(&dnp->dn_lock);
1512 
1513 	for (propp = dnp->dn_global_prop_ptr->prop_list;
1514 	    propp != NULL;
1515 	    propp = (ddi_prop_t *)propp->prop_next) {
1516 
1517 		if (strcmp(propp->prop_name, name) != 0)
1518 			continue;
1519 
1520 		if ((!(flags & LDI_DEV_T_ANY)) && (propp->prop_dev != dev))
1521 			continue;
1522 
1523 		if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0)
1524 			continue;
1525 
1526 		/* Property found, return it */
1527 		UNLOCK_DEV_OPS(&dnp->dn_lock);
1528 		return (propp);
1529 	}
1530 
1531 	UNLOCK_DEV_OPS(&dnp->dn_lock);
1532 	return ((ddi_prop_t *)0);
1533 }
1534 
1535 static char prop_no_mem_msg[] = "can't allocate memory for ddi property <%s>";
1536 
1537 /*
1538  * ddi_prop_search_global:
1539  *	Search the global property list within devnames
1540  *	for the named property.  Return the encoded value.
1541  */
1542 static int
1543 i_ddi_prop_search_global(dev_t dev, uint_t flags, char *name,
1544     void *valuep, uint_t *lengthp)
1545 {
1546 	ddi_prop_t	*propp;
1547 	caddr_t		buffer;
1548 
1549 	propp =  i_ddi_search_global_prop(dev, name, flags);
1550 
1551 	/* Property NOT found, bail */
1552 	if (propp == (ddi_prop_t *)0)
1553 		return (DDI_PROP_NOT_FOUND);
1554 
1555 	if (propp->prop_flags & DDI_PROP_UNDEF_IT)
1556 		return (DDI_PROP_UNDEFINED);
1557 
1558 	if ((buffer = kmem_alloc(propp->prop_len, KM_NOSLEEP)) == NULL) {
1559 		cmn_err(CE_CONT, prop_no_mem_msg, name);
1560 		return (DDI_PROP_NO_MEMORY);
1561 	}
1562 
1563 	/*
1564 	 * Return the encoded data
1565 	 */
1566 	*(caddr_t *)valuep = buffer;
1567 	*lengthp = propp->prop_len;
1568 	bcopy(propp->prop_val, buffer, propp->prop_len);
1569 
1570 	return (DDI_PROP_SUCCESS);
1571 }
1572 
1573 /*
1574  * ddi_prop_search_common:	Lookup and return the encoded value
1575  */
1576 int
1577 ddi_prop_search_common(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1578     uint_t flags, char *name, void *valuep, uint_t *lengthp)
1579 {
1580 	ddi_prop_t	*propp;
1581 	int		i;
1582 	caddr_t		buffer;
1583 	caddr_t		prealloc = NULL;
1584 	int		plength = 0;
1585 	dev_info_t	*pdip;
1586 	int		(*bop)();
1587 
1588 	/*CONSTANTCONDITION*/
1589 	while (1)  {
1590 
1591 		mutex_enter(&(DEVI(dip)->devi_lock));
1592 
1593 
1594 		/*
1595 		 * find the property in child's devinfo:
1596 		 * Search order is:
1597 		 *	1. driver defined properties
1598 		 *	2. system defined properties
1599 		 *	3. driver global properties
1600 		 *	4. boot defined properties
1601 		 */
1602 
1603 		propp = i_ddi_prop_search(dev, name, flags,
1604 		    &(DEVI(dip)->devi_drv_prop_ptr));
1605 		if (propp == NULL)  {
1606 			propp = i_ddi_prop_search(dev, name, flags,
1607 			    &(DEVI(dip)->devi_sys_prop_ptr));
1608 		}
1609 		if ((propp == NULL) && DEVI(dip)->devi_global_prop_list) {
1610 			propp = i_ddi_prop_search(dev, name, flags,
1611 			    &DEVI(dip)->devi_global_prop_list->prop_list);
1612 		}
1613 
1614 		if (propp == NULL)  {
1615 			propp = i_ddi_prop_search(dev, name, flags,
1616 			    &(DEVI(dip)->devi_hw_prop_ptr));
1617 		}
1618 
1619 		/*
1620 		 * Software property found?
1621 		 */
1622 		if (propp != (ddi_prop_t *)0)	{
1623 
1624 			/*
1625 			 * If explicit undefine, return now.
1626 			 */
1627 			if (propp->prop_flags & DDI_PROP_UNDEF_IT) {
1628 				mutex_exit(&(DEVI(dip)->devi_lock));
1629 				if (prealloc)
1630 					kmem_free(prealloc, plength);
1631 				return (DDI_PROP_UNDEFINED);
1632 			}
1633 
1634 			/*
1635 			 * If we only want to know if it exists, return now
1636 			 */
1637 			if (prop_op == PROP_EXISTS) {
1638 				mutex_exit(&(DEVI(dip)->devi_lock));
1639 				ASSERT(prealloc == NULL);
1640 				return (DDI_PROP_SUCCESS);
1641 			}
1642 
1643 			/*
1644 			 * If length only request or prop length == 0,
1645 			 * service request and return now.
1646 			 */
1647 			if ((prop_op == PROP_LEN) ||(propp->prop_len == 0)) {
1648 				*lengthp = propp->prop_len;
1649 
1650 				/*
1651 				 * if prop_op is PROP_LEN_AND_VAL_ALLOC
1652 				 * that means prop_len is 0, so set valuep
1653 				 * also to NULL
1654 				 */
1655 				if (prop_op == PROP_LEN_AND_VAL_ALLOC)
1656 					*(caddr_t *)valuep = NULL;
1657 
1658 				mutex_exit(&(DEVI(dip)->devi_lock));
1659 				if (prealloc)
1660 					kmem_free(prealloc, plength);
1661 				return (DDI_PROP_SUCCESS);
1662 			}
1663 
1664 			/*
1665 			 * If LEN_AND_VAL_ALLOC and the request can sleep,
1666 			 * drop the mutex, allocate the buffer, and go
1667 			 * through the loop again.  If we already allocated
1668 			 * the buffer, and the size of the property changed,
1669 			 * keep trying...
1670 			 */
1671 			if ((prop_op == PROP_LEN_AND_VAL_ALLOC) &&
1672 			    (flags & DDI_PROP_CANSLEEP))  {
1673 				if (prealloc && (propp->prop_len != plength)) {
1674 					kmem_free(prealloc, plength);
1675 					prealloc = NULL;
1676 				}
1677 				if (prealloc == NULL)  {
1678 					plength = propp->prop_len;
1679 					mutex_exit(&(DEVI(dip)->devi_lock));
1680 					prealloc = kmem_alloc(plength,
1681 					    KM_SLEEP);
1682 					continue;
1683 				}
1684 			}
1685 
1686 			/*
1687 			 * Allocate buffer, if required.  Either way,
1688 			 * set `buffer' variable.
1689 			 */
1690 			i = *lengthp;			/* Get callers length */
1691 			*lengthp = propp->prop_len;	/* Set callers length */
1692 
1693 			switch (prop_op) {
1694 
1695 			case PROP_LEN_AND_VAL_ALLOC:
1696 
1697 				if (prealloc == NULL) {
1698 					buffer = kmem_alloc(propp->prop_len,
1699 					    KM_NOSLEEP);
1700 				} else {
1701 					buffer = prealloc;
1702 				}
1703 
1704 				if (buffer == NULL)  {
1705 					mutex_exit(&(DEVI(dip)->devi_lock));
1706 					cmn_err(CE_CONT, prop_no_mem_msg, name);
1707 					return (DDI_PROP_NO_MEMORY);
1708 				}
1709 				/* Set callers buf ptr */
1710 				*(caddr_t *)valuep = buffer;
1711 				break;
1712 
1713 			case PROP_LEN_AND_VAL_BUF:
1714 
1715 				if (propp->prop_len > (i)) {
1716 					mutex_exit(&(DEVI(dip)->devi_lock));
1717 					return (DDI_PROP_BUF_TOO_SMALL);
1718 				}
1719 
1720 				buffer = valuep;  /* Get callers buf ptr */
1721 				break;
1722 
1723 			default:
1724 				break;
1725 			}
1726 
1727 			/*
1728 			 * Do the copy.
1729 			 */
1730 			bcopy(propp->prop_val, buffer, propp->prop_len);
1731 			mutex_exit(&(DEVI(dip)->devi_lock));
1732 			return (DDI_PROP_SUCCESS);
1733 		}
1734 
1735 		mutex_exit(&(DEVI(dip)->devi_lock));
1736 		if (prealloc)
1737 			kmem_free(prealloc, plength);
1738 		prealloc = NULL;
1739 
1740 		/*
1741 		 * Prop not found, call parent bus_ops to deal with possible
1742 		 * h/w layer (possible PROM defined props, etc.) and to
1743 		 * possibly ascend the hierarchy, if allowed by flags.
1744 		 */
1745 		pdip = (dev_info_t *)DEVI(dip)->devi_parent;
1746 
1747 		/*
1748 		 * One last call for the root driver PROM props?
1749 		 */
1750 		if (dip == ddi_root_node())  {
1751 			return (ddi_bus_prop_op(dev, dip, dip, prop_op,
1752 			    flags, name, valuep, (int *)lengthp));
1753 		}
1754 
1755 		/*
1756 		 * We may have been called to check for properties
1757 		 * within a single devinfo node that has no parent -
1758 		 * see make_prop()
1759 		 */
1760 		if (pdip == NULL) {
1761 			ASSERT((flags &
1762 			    (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM)) ==
1763 			    (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM));
1764 			return (DDI_PROP_NOT_FOUND);
1765 		}
1766 
1767 		/*
1768 		 * Instead of recursing, we do iterative calls up the tree.
1769 		 * As a bit of optimization, skip the bus_op level if the
1770 		 * node is a s/w node and if the parent's bus_prop_op function
1771 		 * is `ddi_bus_prop_op', because we know that in this case,
1772 		 * this function does nothing.
1773 		 *
1774 		 * 4225415: If the parent isn't attached, or the child
1775 		 * hasn't been named by the parent yet, use the default
1776 		 * ddi_bus_prop_op as a proxy for the parent.  This
1777 		 * allows property lookups in any child/parent state to
1778 		 * include 'prom' and inherited properties, even when
1779 		 * there are no drivers attached to the child or parent.
1780 		 */
1781 
1782 		bop = ddi_bus_prop_op;
1783 		if ((i_ddi_node_state(pdip) == DS_READY) &&
1784 		    (i_ddi_node_state(dip) >= DS_INITIALIZED))
1785 			bop = DEVI(pdip)->devi_ops->devo_bus_ops->bus_prop_op;
1786 
1787 		i = DDI_PROP_NOT_FOUND;
1788 
1789 		if ((bop != ddi_bus_prop_op) || ndi_dev_is_prom_node(dip)) {
1790 			i = (*bop)(dev, pdip, dip, prop_op,
1791 			    flags | DDI_PROP_DONTPASS,
1792 			    name, valuep, lengthp);
1793 		}
1794 
1795 		if ((flags & DDI_PROP_DONTPASS) ||
1796 		    (i != DDI_PROP_NOT_FOUND))
1797 			return (i);
1798 
1799 		dip = pdip;
1800 	}
1801 	/*NOTREACHED*/
1802 }
1803 
1804 
1805 /*
1806  * ddi_prop_op: The basic property operator for drivers.
1807  *
1808  * In ddi_prop_op, the type of valuep is interpreted based on prop_op:
1809  *
1810  *	prop_op			valuep
1811  *	------			------
1812  *
1813  *	PROP_LEN		<unused>
1814  *
1815  *	PROP_LEN_AND_VAL_BUF	Pointer to callers buffer
1816  *
1817  *	PROP_LEN_AND_VAL_ALLOC	Address of callers pointer (will be set to
1818  *				address of allocated buffer, if successful)
1819  */
1820 int
1821 ddi_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
1822     char *name, caddr_t valuep, int *lengthp)
1823 {
1824 	int	i;
1825 
1826 	ASSERT((mod_flags & DDI_PROP_TYPE_MASK) == 0);
1827 
1828 	/*
1829 	 * If this was originally an LDI prop lookup then we bail here.
1830 	 * The reason is that the LDI property lookup interfaces first call
1831 	 * a drivers prop_op() entry point to allow it to override
1832 	 * properties.  But if we've made it here, then the driver hasn't
1833 	 * overriden any properties.  We don't want to continue with the
1834 	 * property search here because we don't have any type inforamtion.
1835 	 * When we return failure, the LDI interfaces will then proceed to
1836 	 * call the typed property interfaces to look up the property.
1837 	 */
1838 	if (mod_flags & DDI_PROP_DYNAMIC)
1839 		return (DDI_PROP_NOT_FOUND);
1840 
1841 	/*
1842 	 * check for pre-typed property consumer asking for typed property:
1843 	 * see e_ddi_getprop_int64.
1844 	 */
1845 	if (mod_flags & DDI_PROP_CONSUMER_TYPED)
1846 		mod_flags |= DDI_PROP_TYPE_INT64;
1847 	mod_flags |= DDI_PROP_TYPE_ANY;
1848 
1849 	i = ddi_prop_search_common(dev, dip, prop_op,
1850 		mod_flags, name, valuep, (uint_t *)lengthp);
1851 	if (i == DDI_PROP_FOUND_1275)
1852 		return (DDI_PROP_SUCCESS);
1853 	return (i);
1854 }
1855 
1856 /*
1857  * ddi_prop_op_nblocks: The basic property operator for drivers that maintain
1858  * size in number of DEV_BSIZE blocks.  Provides a dynamic property
1859  * implementation for size oriented properties based on nblocks64 values passed
1860  * in by the driver.  Fallback to ddi_prop_op if the nblocks64 is too large.
1861  * This interface should not be used with a nblocks64 that represents the
1862  * driver's idea of how to represent unknown, if nblocks is unknown use
1863  * ddi_prop_op.
1864  */
1865 int
1866 ddi_prop_op_nblocks(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1867     int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t nblocks64)
1868 {
1869 	uint64_t size64;
1870 
1871 	/*
1872 	 * There is no point in supporting nblocks64 values that don't have
1873 	 * an accurate uint64_t byte count representation.
1874 	 */
1875 	if (nblocks64 >= (UINT64_MAX >> DEV_BSHIFT))
1876 		return (ddi_prop_op(dev, dip, prop_op, mod_flags,
1877 		    name, valuep, lengthp));
1878 
1879 	size64 = nblocks64 << DEV_BSHIFT;
1880 	return (ddi_prop_op_size(dev, dip, prop_op, mod_flags,
1881 	    name, valuep, lengthp, size64));
1882 }
1883 
1884 /*
1885  * ddi_prop_op_size: The basic property operator for drivers that maintain size
1886  * in bytes. Provides a of dynamic property implementation for size oriented
1887  * properties based on size64 values passed in by the driver.  Fallback to
1888  * ddi_prop_op if the size64 is too large. This interface should not be used
1889  * with a size64 that represents the driver's idea of how to represent unknown,
1890  * if size is unknown use ddi_prop_op.
1891  *
1892  * NOTE: the legacy "nblocks"/"size" properties are treated as 32-bit unsigned
1893  * integers. While the most likely interface to request them ([bc]devi_size)
1894  * is declared int (signed) there is no enforcement of this, which means we
1895  * can't enforce limitations here without risking regression.
1896  */
1897 int
1898 ddi_prop_op_size(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1899     int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t size64)
1900 {
1901 	uint64_t nblocks64;
1902 	int	callers_length;
1903 	caddr_t	buffer;
1904 
1905 	/* compute DEV_BSIZE nblocks value */
1906 	nblocks64 = lbtodb(size64);
1907 
1908 	/* get callers length, establish length of our dynamic properties */
1909 	callers_length = *lengthp;
1910 
1911 	if (strcmp(name, "Nblocks") == 0)
1912 		*lengthp = sizeof (uint64_t);
1913 	else if (strcmp(name, "Size") == 0)
1914 		*lengthp = sizeof (uint64_t);
1915 	else if ((strcmp(name, "nblocks") == 0) && (nblocks64 < UINT_MAX))
1916 		*lengthp = sizeof (uint32_t);
1917 	else if ((strcmp(name, "size") == 0) && (size64 < UINT_MAX))
1918 		*lengthp = sizeof (uint32_t);
1919 	else {
1920 		/* fallback to ddi_prop_op */
1921 		return (ddi_prop_op(dev, dip, prop_op, mod_flags,
1922 		    name, valuep, lengthp));
1923 	}
1924 
1925 	/* service request for the length of the property */
1926 	if (prop_op == PROP_LEN)
1927 		return (DDI_PROP_SUCCESS);
1928 
1929 	/* the length of the property and the request must match */
1930 	if (callers_length != *lengthp)
1931 		return (DDI_PROP_INVAL_ARG);
1932 
1933 	switch (prop_op) {
1934 	case PROP_LEN_AND_VAL_ALLOC:
1935 		if ((buffer = kmem_alloc(*lengthp,
1936 		    (mod_flags & DDI_PROP_CANSLEEP) ?
1937 		    KM_SLEEP : KM_NOSLEEP)) == NULL)
1938 			return (DDI_PROP_NO_MEMORY);
1939 
1940 		*(caddr_t *)valuep = buffer;	/* set callers buf ptr */
1941 		break;
1942 
1943 	case PROP_LEN_AND_VAL_BUF:
1944 		buffer = valuep;		/* get callers buf ptr */
1945 		break;
1946 
1947 	default:
1948 		return (DDI_PROP_INVAL_ARG);
1949 	}
1950 
1951 	/* transfer the value into the buffer */
1952 	if (strcmp(name, "Nblocks") == 0)
1953 		*((uint64_t *)buffer) = nblocks64;
1954 	else if (strcmp(name, "Size") == 0)
1955 		*((uint64_t *)buffer) = size64;
1956 	else if (strcmp(name, "nblocks") == 0)
1957 		*((uint32_t *)buffer) = (uint32_t)nblocks64;
1958 	else if (strcmp(name, "size") == 0)
1959 		*((uint32_t *)buffer) = (uint32_t)size64;
1960 	return (DDI_PROP_SUCCESS);
1961 }
1962 
1963 /*
1964  * Variable length props...
1965  */
1966 
1967 /*
1968  * ddi_getlongprop:	Get variable length property len+val into a buffer
1969  *		allocated by property provider via kmem_alloc. Requester
1970  *		is responsible for freeing returned property via kmem_free.
1971  *
1972  *	Arguments:
1973  *
1974  *	dev_t:	Input:	dev_t of property.
1975  *	dip:	Input:	dev_info_t pointer of child.
1976  *	flags:	Input:	Possible flag modifiers are:
1977  *		DDI_PROP_DONTPASS:	Don't pass to parent if prop not found.
1978  *		DDI_PROP_CANSLEEP:	Memory allocation may sleep.
1979  *	name:	Input:	name of property.
1980  *	valuep:	Output:	Addr of callers buffer pointer.
1981  *	lengthp:Output:	*lengthp will contain prop length on exit.
1982  *
1983  *	Possible Returns:
1984  *
1985  *		DDI_PROP_SUCCESS:	Prop found and returned.
1986  *		DDI_PROP_NOT_FOUND:	Prop not found
1987  *		DDI_PROP_UNDEFINED:	Prop explicitly undefined.
1988  *		DDI_PROP_NO_MEMORY:	Prop found, but unable to alloc mem.
1989  */
1990 
1991 int
1992 ddi_getlongprop(dev_t dev, dev_info_t *dip, int flags,
1993     char *name, caddr_t valuep, int *lengthp)
1994 {
1995 	return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_ALLOC,
1996 	    flags, name, valuep, lengthp));
1997 }
1998 
1999 /*
2000  *
2001  * ddi_getlongprop_buf:		Get long prop into pre-allocated callers
2002  *				buffer. (no memory allocation by provider).
2003  *
2004  *	dev_t:	Input:	dev_t of property.
2005  *	dip:	Input:	dev_info_t pointer of child.
2006  *	flags:	Input:	DDI_PROP_DONTPASS or NULL
2007  *	name:	Input:	name of property
2008  *	valuep:	Input:	ptr to callers buffer.
2009  *	lengthp:I/O:	ptr to length of callers buffer on entry,
2010  *			actual length of property on exit.
2011  *
2012  *	Possible returns:
2013  *
2014  *		DDI_PROP_SUCCESS	Prop found and returned
2015  *		DDI_PROP_NOT_FOUND	Prop not found
2016  *		DDI_PROP_UNDEFINED	Prop explicitly undefined.
2017  *		DDI_PROP_BUF_TOO_SMALL	Prop found, callers buf too small,
2018  *					no value returned, but actual prop
2019  *					length returned in *lengthp
2020  *
2021  */
2022 
2023 int
2024 ddi_getlongprop_buf(dev_t dev, dev_info_t *dip, int flags,
2025     char *name, caddr_t valuep, int *lengthp)
2026 {
2027 	return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
2028 	    flags, name, valuep, lengthp));
2029 }
2030 
2031 /*
2032  * Integer/boolean sized props.
2033  *
2034  * Call is value only... returns found boolean or int sized prop value or
2035  * defvalue if prop not found or is wrong length or is explicitly undefined.
2036  * Only flag is DDI_PROP_DONTPASS...
2037  *
2038  * By convention, this interface returns boolean (0) sized properties
2039  * as value (int)1.
2040  *
2041  * This never returns an error, if property not found or specifically
2042  * undefined, the input `defvalue' is returned.
2043  */
2044 
2045 int
2046 ddi_getprop(dev_t dev, dev_info_t *dip, int flags, char *name, int defvalue)
2047 {
2048 	int	propvalue = defvalue;
2049 	int	proplength = sizeof (int);
2050 	int	error;
2051 
2052 	error = ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
2053 	    flags, name, (caddr_t)&propvalue, &proplength);
2054 
2055 	if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
2056 		propvalue = 1;
2057 
2058 	return (propvalue);
2059 }
2060 
2061 /*
2062  * Get prop length interface: flags are 0 or DDI_PROP_DONTPASS
2063  * if returns DDI_PROP_SUCCESS, length returned in *lengthp.
2064  */
2065 
2066 int
2067 ddi_getproplen(dev_t dev, dev_info_t *dip, int flags, char *name, int *lengthp)
2068 {
2069 	return (ddi_prop_op(dev, dip, PROP_LEN, flags, name, NULL, lengthp));
2070 }
2071 
2072 /*
2073  * Allocate a struct prop_driver_data, along with 'size' bytes
2074  * for decoded property data.  This structure is freed by
2075  * calling ddi_prop_free(9F).
2076  */
2077 static void *
2078 ddi_prop_decode_alloc(size_t size, void (*prop_free)(struct prop_driver_data *))
2079 {
2080 	struct prop_driver_data *pdd;
2081 
2082 	/*
2083 	 * Allocate a structure with enough memory to store the decoded data.
2084 	 */
2085 	pdd = kmem_zalloc(sizeof (struct prop_driver_data) + size, KM_SLEEP);
2086 	pdd->pdd_size = (sizeof (struct prop_driver_data) + size);
2087 	pdd->pdd_prop_free = prop_free;
2088 
2089 	/*
2090 	 * Return a pointer to the location to put the decoded data.
2091 	 */
2092 	return ((void *)((caddr_t)pdd + sizeof (struct prop_driver_data)));
2093 }
2094 
2095 /*
2096  * Allocated the memory needed to store the encoded data in the property
2097  * handle.
2098  */
2099 static int
2100 ddi_prop_encode_alloc(prop_handle_t *ph, size_t size)
2101 {
2102 	/*
2103 	 * If size is zero, then set data to NULL and size to 0.  This
2104 	 * is a boolean property.
2105 	 */
2106 	if (size == 0) {
2107 		ph->ph_size = 0;
2108 		ph->ph_data = NULL;
2109 		ph->ph_cur_pos = NULL;
2110 		ph->ph_save_pos = NULL;
2111 	} else {
2112 		if (ph->ph_flags == DDI_PROP_DONTSLEEP) {
2113 			ph->ph_data = kmem_zalloc(size, KM_NOSLEEP);
2114 			if (ph->ph_data == NULL)
2115 				return (DDI_PROP_NO_MEMORY);
2116 		} else
2117 			ph->ph_data = kmem_zalloc(size, KM_SLEEP);
2118 		ph->ph_size = size;
2119 		ph->ph_cur_pos = ph->ph_data;
2120 		ph->ph_save_pos = ph->ph_data;
2121 	}
2122 	return (DDI_PROP_SUCCESS);
2123 }
2124 
2125 /*
2126  * Free the space allocated by the lookup routines.  Each lookup routine
2127  * returns a pointer to the decoded data to the driver.  The driver then
2128  * passes this pointer back to us.  This data actually lives in a struct
2129  * prop_driver_data.  We use negative indexing to find the beginning of
2130  * the structure and then free the entire structure using the size and
2131  * the free routine stored in the structure.
2132  */
2133 void
2134 ddi_prop_free(void *datap)
2135 {
2136 	struct prop_driver_data *pdd;
2137 
2138 	/*
2139 	 * Get the structure
2140 	 */
2141 	pdd = (struct prop_driver_data *)
2142 		((caddr_t)datap - sizeof (struct prop_driver_data));
2143 	/*
2144 	 * Call the free routine to free it
2145 	 */
2146 	(*pdd->pdd_prop_free)(pdd);
2147 }
2148 
2149 /*
2150  * Free the data associated with an array of ints,
2151  * allocated with ddi_prop_decode_alloc().
2152  */
2153 static void
2154 ddi_prop_free_ints(struct prop_driver_data *pdd)
2155 {
2156 	kmem_free(pdd, pdd->pdd_size);
2157 }
2158 
2159 /*
2160  * Free a single string property or a single string contained within
2161  * the argv style return value of an array of strings.
2162  */
2163 static void
2164 ddi_prop_free_string(struct prop_driver_data *pdd)
2165 {
2166 	kmem_free(pdd, pdd->pdd_size);
2167 
2168 }
2169 
2170 /*
2171  * Free an array of strings.
2172  */
2173 static void
2174 ddi_prop_free_strings(struct prop_driver_data *pdd)
2175 {
2176 	kmem_free(pdd, pdd->pdd_size);
2177 }
2178 
2179 /*
2180  * Free the data associated with an array of bytes.
2181  */
2182 static void
2183 ddi_prop_free_bytes(struct prop_driver_data *pdd)
2184 {
2185 	kmem_free(pdd, pdd->pdd_size);
2186 }
2187 
2188 /*
2189  * Reset the current location pointer in the property handle to the
2190  * beginning of the data.
2191  */
2192 void
2193 ddi_prop_reset_pos(prop_handle_t *ph)
2194 {
2195 	ph->ph_cur_pos = ph->ph_data;
2196 	ph->ph_save_pos = ph->ph_data;
2197 }
2198 
2199 /*
2200  * Restore the current location pointer in the property handle to the
2201  * saved position.
2202  */
2203 void
2204 ddi_prop_save_pos(prop_handle_t *ph)
2205 {
2206 	ph->ph_save_pos = ph->ph_cur_pos;
2207 }
2208 
2209 /*
2210  * Save the location that the current location pointer is pointing to..
2211  */
2212 void
2213 ddi_prop_restore_pos(prop_handle_t *ph)
2214 {
2215 	ph->ph_cur_pos = ph->ph_save_pos;
2216 }
2217 
2218 /*
2219  * Property encode/decode functions
2220  */
2221 
2222 /*
2223  * Decode a single integer property
2224  */
2225 static int
2226 ddi_prop_fm_decode_int(prop_handle_t *ph, void *data, uint_t *nelements)
2227 {
2228 	int	i;
2229 	int	tmp;
2230 
2231 	/*
2232 	 * If there is nothing to decode return an error
2233 	 */
2234 	if (ph->ph_size == 0)
2235 		return (DDI_PROP_END_OF_DATA);
2236 
2237 	/*
2238 	 * Decode the property as a single integer and return it
2239 	 * in data if we were able to decode it.
2240 	 */
2241 	i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, &tmp);
2242 	if (i < DDI_PROP_RESULT_OK) {
2243 		switch (i) {
2244 		case DDI_PROP_RESULT_EOF:
2245 			return (DDI_PROP_END_OF_DATA);
2246 
2247 		case DDI_PROP_RESULT_ERROR:
2248 			return (DDI_PROP_CANNOT_DECODE);
2249 		}
2250 	}
2251 
2252 	*(int *)data = tmp;
2253 	*nelements = 1;
2254 	return (DDI_PROP_SUCCESS);
2255 }
2256 
2257 /*
2258  * Decode a single 64 bit integer property
2259  */
2260 static int
2261 ddi_prop_fm_decode_int64(prop_handle_t *ph, void *data, uint_t *nelements)
2262 {
2263 	int	i;
2264 	int64_t	tmp;
2265 
2266 	/*
2267 	 * If there is nothing to decode return an error
2268 	 */
2269 	if (ph->ph_size == 0)
2270 		return (DDI_PROP_END_OF_DATA);
2271 
2272 	/*
2273 	 * Decode the property as a single integer and return it
2274 	 * in data if we were able to decode it.
2275 	 */
2276 	i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, &tmp);
2277 	if (i < DDI_PROP_RESULT_OK) {
2278 		switch (i) {
2279 		case DDI_PROP_RESULT_EOF:
2280 			return (DDI_PROP_END_OF_DATA);
2281 
2282 		case DDI_PROP_RESULT_ERROR:
2283 			return (DDI_PROP_CANNOT_DECODE);
2284 		}
2285 	}
2286 
2287 	*(int64_t *)data = tmp;
2288 	*nelements = 1;
2289 	return (DDI_PROP_SUCCESS);
2290 }
2291 
2292 /*
2293  * Decode an array of integers property
2294  */
2295 static int
2296 ddi_prop_fm_decode_ints(prop_handle_t *ph, void *data, uint_t *nelements)
2297 {
2298 	int	i;
2299 	int	cnt = 0;
2300 	int	*tmp;
2301 	int	*intp;
2302 	int	n;
2303 
2304 	/*
2305 	 * Figure out how many array elements there are by going through the
2306 	 * data without decoding it first and counting.
2307 	 */
2308 	for (;;) {
2309 		i = DDI_PROP_INT(ph, DDI_PROP_CMD_SKIP, NULL);
2310 		if (i < 0)
2311 			break;
2312 		cnt++;
2313 	}
2314 
2315 	/*
2316 	 * If there are no elements return an error
2317 	 */
2318 	if (cnt == 0)
2319 		return (DDI_PROP_END_OF_DATA);
2320 
2321 	/*
2322 	 * If we cannot skip through the data, we cannot decode it
2323 	 */
2324 	if (i == DDI_PROP_RESULT_ERROR)
2325 		return (DDI_PROP_CANNOT_DECODE);
2326 
2327 	/*
2328 	 * Reset the data pointer to the beginning of the encoded data
2329 	 */
2330 	ddi_prop_reset_pos(ph);
2331 
2332 	/*
2333 	 * Allocated memory to store the decoded value in.
2334 	 */
2335 	intp = ddi_prop_decode_alloc((cnt * sizeof (int)),
2336 		ddi_prop_free_ints);
2337 
2338 	/*
2339 	 * Decode each element and place it in the space we just allocated
2340 	 */
2341 	tmp = intp;
2342 	for (n = 0; n < cnt; n++, tmp++) {
2343 		i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, tmp);
2344 		if (i < DDI_PROP_RESULT_OK) {
2345 			/*
2346 			 * Free the space we just allocated
2347 			 * and return an error.
2348 			 */
2349 			ddi_prop_free(intp);
2350 			switch (i) {
2351 			case DDI_PROP_RESULT_EOF:
2352 				return (DDI_PROP_END_OF_DATA);
2353 
2354 			case DDI_PROP_RESULT_ERROR:
2355 				return (DDI_PROP_CANNOT_DECODE);
2356 			}
2357 		}
2358 	}
2359 
2360 	*nelements = cnt;
2361 	*(int **)data = intp;
2362 
2363 	return (DDI_PROP_SUCCESS);
2364 }
2365 
2366 /*
2367  * Decode a 64 bit integer array property
2368  */
2369 static int
2370 ddi_prop_fm_decode_int64_array(prop_handle_t *ph, void *data, uint_t *nelements)
2371 {
2372 	int	i;
2373 	int	n;
2374 	int	cnt = 0;
2375 	int64_t	*tmp;
2376 	int64_t	*intp;
2377 
2378 	/*
2379 	 * Count the number of array elements by going
2380 	 * through the data without decoding it.
2381 	 */
2382 	for (;;) {
2383 		i = DDI_PROP_INT64(ph, DDI_PROP_CMD_SKIP, NULL);
2384 		if (i < 0)
2385 			break;
2386 		cnt++;
2387 	}
2388 
2389 	/*
2390 	 * If there are no elements return an error
2391 	 */
2392 	if (cnt == 0)
2393 		return (DDI_PROP_END_OF_DATA);
2394 
2395 	/*
2396 	 * If we cannot skip through the data, we cannot decode it
2397 	 */
2398 	if (i == DDI_PROP_RESULT_ERROR)
2399 		return (DDI_PROP_CANNOT_DECODE);
2400 
2401 	/*
2402 	 * Reset the data pointer to the beginning of the encoded data
2403 	 */
2404 	ddi_prop_reset_pos(ph);
2405 
2406 	/*
2407 	 * Allocate memory to store the decoded value.
2408 	 */
2409 	intp = ddi_prop_decode_alloc((cnt * sizeof (int64_t)),
2410 		ddi_prop_free_ints);
2411 
2412 	/*
2413 	 * Decode each element and place it in the space allocated
2414 	 */
2415 	tmp = intp;
2416 	for (n = 0; n < cnt; n++, tmp++) {
2417 		i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, tmp);
2418 		if (i < DDI_PROP_RESULT_OK) {
2419 			/*
2420 			 * Free the space we just allocated
2421 			 * and return an error.
2422 			 */
2423 			ddi_prop_free(intp);
2424 			switch (i) {
2425 			case DDI_PROP_RESULT_EOF:
2426 				return (DDI_PROP_END_OF_DATA);
2427 
2428 			case DDI_PROP_RESULT_ERROR:
2429 				return (DDI_PROP_CANNOT_DECODE);
2430 			}
2431 		}
2432 	}
2433 
2434 	*nelements = cnt;
2435 	*(int64_t **)data = intp;
2436 
2437 	return (DDI_PROP_SUCCESS);
2438 }
2439 
2440 /*
2441  * Encode an array of integers property (Can be one element)
2442  */
2443 int
2444 ddi_prop_fm_encode_ints(prop_handle_t *ph, void *data, uint_t nelements)
2445 {
2446 	int	i;
2447 	int	*tmp;
2448 	int	cnt;
2449 	int	size;
2450 
2451 	/*
2452 	 * If there is no data, we cannot do anything
2453 	 */
2454 	if (nelements == 0)
2455 		return (DDI_PROP_CANNOT_ENCODE);
2456 
2457 	/*
2458 	 * Get the size of an encoded int.
2459 	 */
2460 	size = DDI_PROP_INT(ph, DDI_PROP_CMD_GET_ESIZE, NULL);
2461 
2462 	if (size < DDI_PROP_RESULT_OK) {
2463 		switch (size) {
2464 		case DDI_PROP_RESULT_EOF:
2465 			return (DDI_PROP_END_OF_DATA);
2466 
2467 		case DDI_PROP_RESULT_ERROR:
2468 			return (DDI_PROP_CANNOT_ENCODE);
2469 		}
2470 	}
2471 
2472 	/*
2473 	 * Allocate space in the handle to store the encoded int.
2474 	 */
2475 	if (ddi_prop_encode_alloc(ph, size * nelements) !=
2476 		DDI_PROP_SUCCESS)
2477 		return (DDI_PROP_NO_MEMORY);
2478 
2479 	/*
2480 	 * Encode the array of ints.
2481 	 */
2482 	tmp = (int *)data;
2483 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2484 		i = DDI_PROP_INT(ph, DDI_PROP_CMD_ENCODE, tmp);
2485 		if (i < DDI_PROP_RESULT_OK) {
2486 			switch (i) {
2487 			case DDI_PROP_RESULT_EOF:
2488 				return (DDI_PROP_END_OF_DATA);
2489 
2490 			case DDI_PROP_RESULT_ERROR:
2491 				return (DDI_PROP_CANNOT_ENCODE);
2492 			}
2493 		}
2494 	}
2495 
2496 	return (DDI_PROP_SUCCESS);
2497 }
2498 
2499 
2500 /*
2501  * Encode a 64 bit integer array property
2502  */
2503 int
2504 ddi_prop_fm_encode_int64(prop_handle_t *ph, void *data, uint_t nelements)
2505 {
2506 	int i;
2507 	int cnt;
2508 	int size;
2509 	int64_t *tmp;
2510 
2511 	/*
2512 	 * If there is no data, we cannot do anything
2513 	 */
2514 	if (nelements == 0)
2515 		return (DDI_PROP_CANNOT_ENCODE);
2516 
2517 	/*
2518 	 * Get the size of an encoded 64 bit int.
2519 	 */
2520 	size = DDI_PROP_INT64(ph, DDI_PROP_CMD_GET_ESIZE, NULL);
2521 
2522 	if (size < DDI_PROP_RESULT_OK) {
2523 		switch (size) {
2524 		case DDI_PROP_RESULT_EOF:
2525 			return (DDI_PROP_END_OF_DATA);
2526 
2527 		case DDI_PROP_RESULT_ERROR:
2528 			return (DDI_PROP_CANNOT_ENCODE);
2529 		}
2530 	}
2531 
2532 	/*
2533 	 * Allocate space in the handle to store the encoded int.
2534 	 */
2535 	if (ddi_prop_encode_alloc(ph, size * nelements) !=
2536 	    DDI_PROP_SUCCESS)
2537 		return (DDI_PROP_NO_MEMORY);
2538 
2539 	/*
2540 	 * Encode the array of ints.
2541 	 */
2542 	tmp = (int64_t *)data;
2543 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2544 		i = DDI_PROP_INT64(ph, DDI_PROP_CMD_ENCODE, tmp);
2545 		if (i < DDI_PROP_RESULT_OK) {
2546 			switch (i) {
2547 			case DDI_PROP_RESULT_EOF:
2548 				return (DDI_PROP_END_OF_DATA);
2549 
2550 			case DDI_PROP_RESULT_ERROR:
2551 				return (DDI_PROP_CANNOT_ENCODE);
2552 			}
2553 		}
2554 	}
2555 
2556 	return (DDI_PROP_SUCCESS);
2557 }
2558 
2559 /*
2560  * Decode a single string property
2561  */
2562 static int
2563 ddi_prop_fm_decode_string(prop_handle_t *ph, void *data, uint_t *nelements)
2564 {
2565 	char		*tmp;
2566 	char		*str;
2567 	int		i;
2568 	int		size;
2569 
2570 	/*
2571 	 * If there is nothing to decode return an error
2572 	 */
2573 	if (ph->ph_size == 0)
2574 		return (DDI_PROP_END_OF_DATA);
2575 
2576 	/*
2577 	 * Get the decoded size of the encoded string.
2578 	 */
2579 	size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2580 	if (size < DDI_PROP_RESULT_OK) {
2581 		switch (size) {
2582 		case DDI_PROP_RESULT_EOF:
2583 			return (DDI_PROP_END_OF_DATA);
2584 
2585 		case DDI_PROP_RESULT_ERROR:
2586 			return (DDI_PROP_CANNOT_DECODE);
2587 		}
2588 	}
2589 
2590 	/*
2591 	 * Allocated memory to store the decoded value in.
2592 	 */
2593 	str = ddi_prop_decode_alloc((size_t)size, ddi_prop_free_string);
2594 
2595 	ddi_prop_reset_pos(ph);
2596 
2597 	/*
2598 	 * Decode the str and place it in the space we just allocated
2599 	 */
2600 	tmp = str;
2601 	i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, tmp);
2602 	if (i < DDI_PROP_RESULT_OK) {
2603 		/*
2604 		 * Free the space we just allocated
2605 		 * and return an error.
2606 		 */
2607 		ddi_prop_free(str);
2608 		switch (i) {
2609 		case DDI_PROP_RESULT_EOF:
2610 			return (DDI_PROP_END_OF_DATA);
2611 
2612 		case DDI_PROP_RESULT_ERROR:
2613 			return (DDI_PROP_CANNOT_DECODE);
2614 		}
2615 	}
2616 
2617 	*(char **)data = str;
2618 	*nelements = 1;
2619 
2620 	return (DDI_PROP_SUCCESS);
2621 }
2622 
2623 /*
2624  * Decode an array of strings.
2625  */
2626 int
2627 ddi_prop_fm_decode_strings(prop_handle_t *ph, void *data, uint_t *nelements)
2628 {
2629 	int		cnt = 0;
2630 	char		**strs;
2631 	char		**tmp;
2632 	char		*ptr;
2633 	int		i;
2634 	int		n;
2635 	int		size;
2636 	size_t		nbytes;
2637 
2638 	/*
2639 	 * Figure out how many array elements there are by going through the
2640 	 * data without decoding it first and counting.
2641 	 */
2642 	for (;;) {
2643 		i = DDI_PROP_STR(ph, DDI_PROP_CMD_SKIP, NULL);
2644 		if (i < 0)
2645 			break;
2646 		cnt++;
2647 	}
2648 
2649 	/*
2650 	 * If there are no elements return an error
2651 	 */
2652 	if (cnt == 0)
2653 		return (DDI_PROP_END_OF_DATA);
2654 
2655 	/*
2656 	 * If we cannot skip through the data, we cannot decode it
2657 	 */
2658 	if (i == DDI_PROP_RESULT_ERROR)
2659 		return (DDI_PROP_CANNOT_DECODE);
2660 
2661 	/*
2662 	 * Reset the data pointer to the beginning of the encoded data
2663 	 */
2664 	ddi_prop_reset_pos(ph);
2665 
2666 	/*
2667 	 * Figure out how much memory we need for the sum total
2668 	 */
2669 	nbytes = (cnt + 1) * sizeof (char *);
2670 
2671 	for (n = 0; n < cnt; n++) {
2672 		/*
2673 		 * Get the decoded size of the current encoded string.
2674 		 */
2675 		size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2676 		if (size < DDI_PROP_RESULT_OK) {
2677 			switch (size) {
2678 			case DDI_PROP_RESULT_EOF:
2679 				return (DDI_PROP_END_OF_DATA);
2680 
2681 			case DDI_PROP_RESULT_ERROR:
2682 				return (DDI_PROP_CANNOT_DECODE);
2683 			}
2684 		}
2685 
2686 		nbytes += size;
2687 	}
2688 
2689 	/*
2690 	 * Allocate memory in which to store the decoded strings.
2691 	 */
2692 	strs = ddi_prop_decode_alloc(nbytes, ddi_prop_free_strings);
2693 
2694 	/*
2695 	 * Set up pointers for each string by figuring out yet
2696 	 * again how long each string is.
2697 	 */
2698 	ddi_prop_reset_pos(ph);
2699 	ptr = (caddr_t)strs + ((cnt + 1) * sizeof (char *));
2700 	for (tmp = strs, n = 0; n < cnt; n++, tmp++) {
2701 		/*
2702 		 * Get the decoded size of the current encoded string.
2703 		 */
2704 		size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2705 		if (size < DDI_PROP_RESULT_OK) {
2706 			ddi_prop_free(strs);
2707 			switch (size) {
2708 			case DDI_PROP_RESULT_EOF:
2709 				return (DDI_PROP_END_OF_DATA);
2710 
2711 			case DDI_PROP_RESULT_ERROR:
2712 				return (DDI_PROP_CANNOT_DECODE);
2713 			}
2714 		}
2715 
2716 		*tmp = ptr;
2717 		ptr += size;
2718 	}
2719 
2720 	/*
2721 	 * String array is terminated by a NULL
2722 	 */
2723 	*tmp = NULL;
2724 
2725 	/*
2726 	 * Finally, we can decode each string
2727 	 */
2728 	ddi_prop_reset_pos(ph);
2729 	for (tmp = strs, n = 0; n < cnt; n++, tmp++) {
2730 		i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, *tmp);
2731 		if (i < DDI_PROP_RESULT_OK) {
2732 			/*
2733 			 * Free the space we just allocated
2734 			 * and return an error
2735 			 */
2736 			ddi_prop_free(strs);
2737 			switch (i) {
2738 			case DDI_PROP_RESULT_EOF:
2739 				return (DDI_PROP_END_OF_DATA);
2740 
2741 			case DDI_PROP_RESULT_ERROR:
2742 				return (DDI_PROP_CANNOT_DECODE);
2743 			}
2744 		}
2745 	}
2746 
2747 	*(char ***)data = strs;
2748 	*nelements = cnt;
2749 
2750 	return (DDI_PROP_SUCCESS);
2751 }
2752 
2753 /*
2754  * Encode a string.
2755  */
2756 int
2757 ddi_prop_fm_encode_string(prop_handle_t *ph, void *data, uint_t nelements)
2758 {
2759 	char		**tmp;
2760 	int		size;
2761 	int		i;
2762 
2763 	/*
2764 	 * If there is no data, we cannot do anything
2765 	 */
2766 	if (nelements == 0)
2767 		return (DDI_PROP_CANNOT_ENCODE);
2768 
2769 	/*
2770 	 * Get the size of the encoded string.
2771 	 */
2772 	tmp = (char **)data;
2773 	size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp);
2774 	if (size < DDI_PROP_RESULT_OK) {
2775 		switch (size) {
2776 		case DDI_PROP_RESULT_EOF:
2777 			return (DDI_PROP_END_OF_DATA);
2778 
2779 		case DDI_PROP_RESULT_ERROR:
2780 			return (DDI_PROP_CANNOT_ENCODE);
2781 		}
2782 	}
2783 
2784 	/*
2785 	 * Allocate space in the handle to store the encoded string.
2786 	 */
2787 	if (ddi_prop_encode_alloc(ph, size) != DDI_PROP_SUCCESS)
2788 		return (DDI_PROP_NO_MEMORY);
2789 
2790 	ddi_prop_reset_pos(ph);
2791 
2792 	/*
2793 	 * Encode the string.
2794 	 */
2795 	tmp = (char **)data;
2796 	i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp);
2797 	if (i < DDI_PROP_RESULT_OK) {
2798 		switch (i) {
2799 		case DDI_PROP_RESULT_EOF:
2800 			return (DDI_PROP_END_OF_DATA);
2801 
2802 		case DDI_PROP_RESULT_ERROR:
2803 			return (DDI_PROP_CANNOT_ENCODE);
2804 		}
2805 	}
2806 
2807 	return (DDI_PROP_SUCCESS);
2808 }
2809 
2810 
2811 /*
2812  * Encode an array of strings.
2813  */
2814 int
2815 ddi_prop_fm_encode_strings(prop_handle_t *ph, void *data, uint_t nelements)
2816 {
2817 	int		cnt = 0;
2818 	char		**tmp;
2819 	int		size;
2820 	uint_t		total_size;
2821 	int		i;
2822 
2823 	/*
2824 	 * If there is no data, we cannot do anything
2825 	 */
2826 	if (nelements == 0)
2827 		return (DDI_PROP_CANNOT_ENCODE);
2828 
2829 	/*
2830 	 * Get the total size required to encode all the strings.
2831 	 */
2832 	total_size = 0;
2833 	tmp = (char **)data;
2834 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2835 		size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp);
2836 		if (size < DDI_PROP_RESULT_OK) {
2837 			switch (size) {
2838 			case DDI_PROP_RESULT_EOF:
2839 				return (DDI_PROP_END_OF_DATA);
2840 
2841 			case DDI_PROP_RESULT_ERROR:
2842 				return (DDI_PROP_CANNOT_ENCODE);
2843 			}
2844 		}
2845 		total_size += (uint_t)size;
2846 	}
2847 
2848 	/*
2849 	 * Allocate space in the handle to store the encoded strings.
2850 	 */
2851 	if (ddi_prop_encode_alloc(ph, total_size) != DDI_PROP_SUCCESS)
2852 		return (DDI_PROP_NO_MEMORY);
2853 
2854 	ddi_prop_reset_pos(ph);
2855 
2856 	/*
2857 	 * Encode the array of strings.
2858 	 */
2859 	tmp = (char **)data;
2860 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2861 		i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp);
2862 		if (i < DDI_PROP_RESULT_OK) {
2863 			switch (i) {
2864 			case DDI_PROP_RESULT_EOF:
2865 				return (DDI_PROP_END_OF_DATA);
2866 
2867 			case DDI_PROP_RESULT_ERROR:
2868 				return (DDI_PROP_CANNOT_ENCODE);
2869 			}
2870 		}
2871 	}
2872 
2873 	return (DDI_PROP_SUCCESS);
2874 }
2875 
2876 
2877 /*
2878  * Decode an array of bytes.
2879  */
2880 static int
2881 ddi_prop_fm_decode_bytes(prop_handle_t *ph, void *data, uint_t *nelements)
2882 {
2883 	uchar_t		*tmp;
2884 	int		nbytes;
2885 	int		i;
2886 
2887 	/*
2888 	 * If there are no elements return an error
2889 	 */
2890 	if (ph->ph_size == 0)
2891 		return (DDI_PROP_END_OF_DATA);
2892 
2893 	/*
2894 	 * Get the size of the encoded array of bytes.
2895 	 */
2896 	nbytes = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_DSIZE,
2897 		data, ph->ph_size);
2898 	if (nbytes < DDI_PROP_RESULT_OK) {
2899 		switch (nbytes) {
2900 		case DDI_PROP_RESULT_EOF:
2901 			return (DDI_PROP_END_OF_DATA);
2902 
2903 		case DDI_PROP_RESULT_ERROR:
2904 			return (DDI_PROP_CANNOT_DECODE);
2905 		}
2906 	}
2907 
2908 	/*
2909 	 * Allocated memory to store the decoded value in.
2910 	 */
2911 	tmp = ddi_prop_decode_alloc(nbytes, ddi_prop_free_bytes);
2912 
2913 	/*
2914 	 * Decode each element and place it in the space we just allocated
2915 	 */
2916 	i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_DECODE, tmp, nbytes);
2917 	if (i < DDI_PROP_RESULT_OK) {
2918 		/*
2919 		 * Free the space we just allocated
2920 		 * and return an error
2921 		 */
2922 		ddi_prop_free(tmp);
2923 		switch (i) {
2924 		case DDI_PROP_RESULT_EOF:
2925 			return (DDI_PROP_END_OF_DATA);
2926 
2927 		case DDI_PROP_RESULT_ERROR:
2928 			return (DDI_PROP_CANNOT_DECODE);
2929 		}
2930 	}
2931 
2932 	*(uchar_t **)data = tmp;
2933 	*nelements = nbytes;
2934 
2935 	return (DDI_PROP_SUCCESS);
2936 }
2937 
2938 /*
2939  * Encode an array of bytes.
2940  */
2941 int
2942 ddi_prop_fm_encode_bytes(prop_handle_t *ph, void *data, uint_t nelements)
2943 {
2944 	int		size;
2945 	int		i;
2946 
2947 	/*
2948 	 * If there are no elements, then this is a boolean property,
2949 	 * so just create a property handle with no data and return.
2950 	 */
2951 	if (nelements == 0) {
2952 		(void) ddi_prop_encode_alloc(ph, 0);
2953 		return (DDI_PROP_SUCCESS);
2954 	}
2955 
2956 	/*
2957 	 * Get the size of the encoded array of bytes.
2958 	 */
2959 	size = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_ESIZE, (uchar_t *)data,
2960 		nelements);
2961 	if (size < DDI_PROP_RESULT_OK) {
2962 		switch (size) {
2963 		case DDI_PROP_RESULT_EOF:
2964 			return (DDI_PROP_END_OF_DATA);
2965 
2966 		case DDI_PROP_RESULT_ERROR:
2967 			return (DDI_PROP_CANNOT_DECODE);
2968 		}
2969 	}
2970 
2971 	/*
2972 	 * Allocate space in the handle to store the encoded bytes.
2973 	 */
2974 	if (ddi_prop_encode_alloc(ph, (uint_t)size) != DDI_PROP_SUCCESS)
2975 		return (DDI_PROP_NO_MEMORY);
2976 
2977 	/*
2978 	 * Encode the array of bytes.
2979 	 */
2980 	i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_ENCODE, (uchar_t *)data,
2981 		nelements);
2982 	if (i < DDI_PROP_RESULT_OK) {
2983 		switch (i) {
2984 		case DDI_PROP_RESULT_EOF:
2985 			return (DDI_PROP_END_OF_DATA);
2986 
2987 		case DDI_PROP_RESULT_ERROR:
2988 			return (DDI_PROP_CANNOT_ENCODE);
2989 		}
2990 	}
2991 
2992 	return (DDI_PROP_SUCCESS);
2993 }
2994 
2995 /*
2996  * OBP 1275 integer, string and byte operators.
2997  *
2998  * DDI_PROP_CMD_DECODE:
2999  *
3000  *	DDI_PROP_RESULT_ERROR:		cannot decode the data
3001  *	DDI_PROP_RESULT_EOF:		end of data
3002  *	DDI_PROP_OK:			data was decoded
3003  *
3004  * DDI_PROP_CMD_ENCODE:
3005  *
3006  *	DDI_PROP_RESULT_ERROR:		cannot encode the data
3007  *	DDI_PROP_RESULT_EOF:		end of data
3008  *	DDI_PROP_OK:			data was encoded
3009  *
3010  * DDI_PROP_CMD_SKIP:
3011  *
3012  *	DDI_PROP_RESULT_ERROR:		cannot skip the data
3013  *	DDI_PROP_RESULT_EOF:		end of data
3014  *	DDI_PROP_OK:			data was skipped
3015  *
3016  * DDI_PROP_CMD_GET_ESIZE:
3017  *
3018  *	DDI_PROP_RESULT_ERROR:		cannot get encoded size
3019  *	DDI_PROP_RESULT_EOF:		end of data
3020  *	> 0:				the encoded size
3021  *
3022  * DDI_PROP_CMD_GET_DSIZE:
3023  *
3024  *	DDI_PROP_RESULT_ERROR:		cannot get decoded size
3025  *	DDI_PROP_RESULT_EOF:		end of data
3026  *	> 0:				the decoded size
3027  */
3028 
3029 /*
3030  * OBP 1275 integer operator
3031  *
3032  * OBP properties are a byte stream of data, so integers may not be
3033  * properly aligned.  Therefore we need to copy them one byte at a time.
3034  */
3035 int
3036 ddi_prop_1275_int(prop_handle_t *ph, uint_t cmd, int *data)
3037 {
3038 	int	i;
3039 
3040 	switch (cmd) {
3041 	case DDI_PROP_CMD_DECODE:
3042 		/*
3043 		 * Check that there is encoded data
3044 		 */
3045 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0)
3046 			return (DDI_PROP_RESULT_ERROR);
3047 		if (ph->ph_flags & PH_FROM_PROM) {
3048 			i = MIN(ph->ph_size, PROP_1275_INT_SIZE);
3049 			if ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
3050 				ph->ph_size - i))
3051 				return (DDI_PROP_RESULT_ERROR);
3052 		} else {
3053 			if (ph->ph_size < sizeof (int) ||
3054 			((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
3055 				ph->ph_size - sizeof (int))))
3056 			return (DDI_PROP_RESULT_ERROR);
3057 		}
3058 
3059 		/*
3060 		 * Copy the integer, using the implementation-specific
3061 		 * copy function if the property is coming from the PROM.
3062 		 */
3063 		if (ph->ph_flags & PH_FROM_PROM) {
3064 			*data = impl_ddi_prop_int_from_prom(
3065 				(uchar_t *)ph->ph_cur_pos,
3066 				(ph->ph_size < PROP_1275_INT_SIZE) ?
3067 				ph->ph_size : PROP_1275_INT_SIZE);
3068 		} else {
3069 			bcopy(ph->ph_cur_pos, data, sizeof (int));
3070 		}
3071 
3072 		/*
3073 		 * Move the current location to the start of the next
3074 		 * bit of undecoded data.
3075 		 */
3076 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
3077 			PROP_1275_INT_SIZE;
3078 		return (DDI_PROP_RESULT_OK);
3079 
3080 	case DDI_PROP_CMD_ENCODE:
3081 		/*
3082 		 * Check that there is room to encoded the data
3083 		 */
3084 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3085 			ph->ph_size < PROP_1275_INT_SIZE ||
3086 			((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
3087 				ph->ph_size - sizeof (int))))
3088 			return (DDI_PROP_RESULT_ERROR);
3089 
3090 		/*
3091 		 * Encode the integer into the byte stream one byte at a
3092 		 * time.
3093 		 */
3094 		bcopy(data, ph->ph_cur_pos, sizeof (int));
3095 
3096 		/*
3097 		 * Move the current location to the start of the next bit of
3098 		 * space where we can store encoded data.
3099 		 */
3100 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE;
3101 		return (DDI_PROP_RESULT_OK);
3102 
3103 	case DDI_PROP_CMD_SKIP:
3104 		/*
3105 		 * Check that there is encoded data
3106 		 */
3107 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3108 				ph->ph_size < PROP_1275_INT_SIZE)
3109 			return (DDI_PROP_RESULT_ERROR);
3110 
3111 
3112 		if ((caddr_t)ph->ph_cur_pos ==
3113 				(caddr_t)ph->ph_data + ph->ph_size) {
3114 			return (DDI_PROP_RESULT_EOF);
3115 		} else if ((caddr_t)ph->ph_cur_pos >
3116 				(caddr_t)ph->ph_data + ph->ph_size) {
3117 			return (DDI_PROP_RESULT_EOF);
3118 		}
3119 
3120 		/*
3121 		 * Move the current location to the start of the next bit of
3122 		 * undecoded data.
3123 		 */
3124 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE;
3125 		return (DDI_PROP_RESULT_OK);
3126 
3127 	case DDI_PROP_CMD_GET_ESIZE:
3128 		/*
3129 		 * Return the size of an encoded integer on OBP
3130 		 */
3131 		return (PROP_1275_INT_SIZE);
3132 
3133 	case DDI_PROP_CMD_GET_DSIZE:
3134 		/*
3135 		 * Return the size of a decoded integer on the system.
3136 		 */
3137 		return (sizeof (int));
3138 
3139 	default:
3140 #ifdef DEBUG
3141 		panic("ddi_prop_1275_int: %x impossible", cmd);
3142 		/*NOTREACHED*/
3143 #else
3144 		return (DDI_PROP_RESULT_ERROR);
3145 #endif	/* DEBUG */
3146 	}
3147 }
3148 
3149 /*
3150  * 64 bit integer operator.
3151  *
3152  * This is an extension, defined by Sun, to the 1275 integer
3153  * operator.  This routine handles the encoding/decoding of
3154  * 64 bit integer properties.
3155  */
3156 int
3157 ddi_prop_int64_op(prop_handle_t *ph, uint_t cmd, int64_t *data)
3158 {
3159 
3160 	switch (cmd) {
3161 	case DDI_PROP_CMD_DECODE:
3162 		/*
3163 		 * Check that there is encoded data
3164 		 */
3165 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0)
3166 			return (DDI_PROP_RESULT_ERROR);
3167 		if (ph->ph_flags & PH_FROM_PROM) {
3168 			return (DDI_PROP_RESULT_ERROR);
3169 		} else {
3170 			if (ph->ph_size < sizeof (int64_t) ||
3171 			    ((int64_t *)ph->ph_cur_pos >
3172 			    ((int64_t *)ph->ph_data +
3173 			    ph->ph_size - sizeof (int64_t))))
3174 				return (DDI_PROP_RESULT_ERROR);
3175 		}
3176 		/*
3177 		 * Copy the integer, using the implementation-specific
3178 		 * copy function if the property is coming from the PROM.
3179 		 */
3180 		if (ph->ph_flags & PH_FROM_PROM) {
3181 			return (DDI_PROP_RESULT_ERROR);
3182 		} else {
3183 			bcopy(ph->ph_cur_pos, data, sizeof (int64_t));
3184 		}
3185 
3186 		/*
3187 		 * Move the current location to the start of the next
3188 		 * bit of undecoded data.
3189 		 */
3190 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
3191 		    sizeof (int64_t);
3192 			return (DDI_PROP_RESULT_OK);
3193 
3194 	case DDI_PROP_CMD_ENCODE:
3195 		/*
3196 		 * Check that there is room to encoded the data
3197 		 */
3198 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3199 		    ph->ph_size < sizeof (int64_t) ||
3200 		    ((int64_t *)ph->ph_cur_pos > ((int64_t *)ph->ph_data +
3201 		    ph->ph_size - sizeof (int64_t))))
3202 			return (DDI_PROP_RESULT_ERROR);
3203 
3204 		/*
3205 		 * Encode the integer into the byte stream one byte at a
3206 		 * time.
3207 		 */
3208 		bcopy(data, ph->ph_cur_pos, sizeof (int64_t));
3209 
3210 		/*
3211 		 * Move the current location to the start of the next bit of
3212 		 * space where we can store encoded data.
3213 		 */
3214 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
3215 		    sizeof (int64_t);
3216 		return (DDI_PROP_RESULT_OK);
3217 
3218 	case DDI_PROP_CMD_SKIP:
3219 		/*
3220 		 * Check that there is encoded data
3221 		 */
3222 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3223 		    ph->ph_size < sizeof (int64_t))
3224 			return (DDI_PROP_RESULT_ERROR);
3225 
3226 		if ((caddr_t)ph->ph_cur_pos ==
3227 		    (caddr_t)ph->ph_data + ph->ph_size) {
3228 			return (DDI_PROP_RESULT_EOF);
3229 		} else if ((caddr_t)ph->ph_cur_pos >
3230 		    (caddr_t)ph->ph_data + ph->ph_size) {
3231 			return (DDI_PROP_RESULT_EOF);
3232 		}
3233 
3234 		/*
3235 		 * Move the current location to the start of
3236 		 * the next bit of undecoded data.
3237 		 */
3238 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
3239 		    sizeof (int64_t);
3240 			return (DDI_PROP_RESULT_OK);
3241 
3242 	case DDI_PROP_CMD_GET_ESIZE:
3243 		/*
3244 		 * Return the size of an encoded integer on OBP
3245 		 */
3246 		return (sizeof (int64_t));
3247 
3248 	case DDI_PROP_CMD_GET_DSIZE:
3249 		/*
3250 		 * Return the size of a decoded integer on the system.
3251 		 */
3252 		return (sizeof (int64_t));
3253 
3254 	default:
3255 #ifdef DEBUG
3256 		panic("ddi_prop_int64_op: %x impossible", cmd);
3257 		/*NOTREACHED*/
3258 #else
3259 		return (DDI_PROP_RESULT_ERROR);
3260 #endif  /* DEBUG */
3261 	}
3262 }
3263 
3264 /*
3265  * OBP 1275 string operator.
3266  *
3267  * OBP strings are NULL terminated.
3268  */
3269 int
3270 ddi_prop_1275_string(prop_handle_t *ph, uint_t cmd, char *data)
3271 {
3272 	int	n;
3273 	char	*p;
3274 	char	*end;
3275 
3276 	switch (cmd) {
3277 	case DDI_PROP_CMD_DECODE:
3278 		/*
3279 		 * Check that there is encoded data
3280 		 */
3281 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
3282 			return (DDI_PROP_RESULT_ERROR);
3283 		}
3284 
3285 		n = strlen((char *)ph->ph_cur_pos) + 1;
3286 		if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3287 				ph->ph_size - n)) {
3288 			return (DDI_PROP_RESULT_ERROR);
3289 		}
3290 
3291 		/*
3292 		 * Copy the NULL terminated string
3293 		 */
3294 		bcopy(ph->ph_cur_pos, data, n);
3295 
3296 		/*
3297 		 * Move the current location to the start of the next bit of
3298 		 * undecoded data.
3299 		 */
3300 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + n;
3301 		return (DDI_PROP_RESULT_OK);
3302 
3303 	case DDI_PROP_CMD_ENCODE:
3304 		/*
3305 		 * Check that there is room to encoded the data
3306 		 */
3307 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
3308 			return (DDI_PROP_RESULT_ERROR);
3309 		}
3310 
3311 		n = strlen(data) + 1;
3312 		if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3313 				ph->ph_size - n)) {
3314 			return (DDI_PROP_RESULT_ERROR);
3315 		}
3316 
3317 		/*
3318 		 * Copy the NULL terminated string
3319 		 */
3320 		bcopy(data, ph->ph_cur_pos, n);
3321 
3322 		/*
3323 		 * Move the current location to the start of the next bit of
3324 		 * space where we can store encoded data.
3325 		 */
3326 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + n;
3327 		return (DDI_PROP_RESULT_OK);
3328 
3329 	case DDI_PROP_CMD_SKIP:
3330 		/*
3331 		 * Check that there is encoded data
3332 		 */
3333 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
3334 			return (DDI_PROP_RESULT_ERROR);
3335 		}
3336 
3337 		/*
3338 		 * Return the string length plus one for the NULL
3339 		 * We know the size of the property, we need to
3340 		 * ensure that the string is properly formatted,
3341 		 * since we may be looking up random OBP data.
3342 		 */
3343 		p = (char *)ph->ph_cur_pos;
3344 		end = (char *)ph->ph_data + ph->ph_size;
3345 
3346 		if (p == end) {
3347 			return (DDI_PROP_RESULT_EOF);
3348 		}
3349 
3350 		for (n = 0; p < end; n++) {
3351 			if (*p++ == 0) {
3352 				ph->ph_cur_pos = p;
3353 				return (DDI_PROP_RESULT_OK);
3354 			}
3355 		}
3356 
3357 		return (DDI_PROP_RESULT_ERROR);
3358 
3359 	case DDI_PROP_CMD_GET_ESIZE:
3360 		/*
3361 		 * Return the size of the encoded string on OBP.
3362 		 */
3363 		return (strlen(data) + 1);
3364 
3365 	case DDI_PROP_CMD_GET_DSIZE:
3366 		/*
3367 		 * Return the string length plus one for the NULL
3368 		 * We know the size of the property, we need to
3369 		 * ensure that the string is properly formatted,
3370 		 * since we may be looking up random OBP data.
3371 		 */
3372 		p = (char *)ph->ph_cur_pos;
3373 		end = (char *)ph->ph_data + ph->ph_size;
3374 		for (n = 0; p < end; n++) {
3375 			if (*p++ == 0) {
3376 				ph->ph_cur_pos = p;
3377 				return (n+1);
3378 			}
3379 		}
3380 		return (DDI_PROP_RESULT_ERROR);
3381 
3382 	default:
3383 #ifdef DEBUG
3384 		panic("ddi_prop_1275_string: %x impossible", cmd);
3385 		/*NOTREACHED*/
3386 #else
3387 		return (DDI_PROP_RESULT_ERROR);
3388 #endif	/* DEBUG */
3389 	}
3390 }
3391 
3392 /*
3393  * OBP 1275 byte operator
3394  *
3395  * Caller must specify the number of bytes to get.  OBP encodes bytes
3396  * as a byte so there is a 1-to-1 translation.
3397  */
3398 int
3399 ddi_prop_1275_bytes(prop_handle_t *ph, uint_t cmd, uchar_t *data,
3400 	uint_t nelements)
3401 {
3402 	switch (cmd) {
3403 	case DDI_PROP_CMD_DECODE:
3404 		/*
3405 		 * Check that there is encoded data
3406 		 */
3407 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3408 			ph->ph_size < nelements ||
3409 			((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3410 				ph->ph_size - nelements)))
3411 			return (DDI_PROP_RESULT_ERROR);
3412 
3413 		/*
3414 		 * Copy out the bytes
3415 		 */
3416 		bcopy(ph->ph_cur_pos, data, nelements);
3417 
3418 		/*
3419 		 * Move the current location
3420 		 */
3421 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3422 		return (DDI_PROP_RESULT_OK);
3423 
3424 	case DDI_PROP_CMD_ENCODE:
3425 		/*
3426 		 * Check that there is room to encode the data
3427 		 */
3428 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3429 			ph->ph_size < nelements ||
3430 			((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3431 				ph->ph_size - nelements)))
3432 			return (DDI_PROP_RESULT_ERROR);
3433 
3434 		/*
3435 		 * Copy in the bytes
3436 		 */
3437 		bcopy(data, ph->ph_cur_pos, nelements);
3438 
3439 		/*
3440 		 * Move the current location to the start of the next bit of
3441 		 * space where we can store encoded data.
3442 		 */
3443 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3444 		return (DDI_PROP_RESULT_OK);
3445 
3446 	case DDI_PROP_CMD_SKIP:
3447 		/*
3448 		 * Check that there is encoded data
3449 		 */
3450 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3451 				ph->ph_size < nelements)
3452 			return (DDI_PROP_RESULT_ERROR);
3453 
3454 		if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3455 				ph->ph_size - nelements))
3456 			return (DDI_PROP_RESULT_EOF);
3457 
3458 		/*
3459 		 * Move the current location
3460 		 */
3461 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3462 		return (DDI_PROP_RESULT_OK);
3463 
3464 	case DDI_PROP_CMD_GET_ESIZE:
3465 		/*
3466 		 * The size in bytes of the encoded size is the
3467 		 * same as the decoded size provided by the caller.
3468 		 */
3469 		return (nelements);
3470 
3471 	case DDI_PROP_CMD_GET_DSIZE:
3472 		/*
3473 		 * Just return the number of bytes specified by the caller.
3474 		 */
3475 		return (nelements);
3476 
3477 	default:
3478 #ifdef DEBUG
3479 		panic("ddi_prop_1275_bytes: %x impossible", cmd);
3480 		/*NOTREACHED*/
3481 #else
3482 		return (DDI_PROP_RESULT_ERROR);
3483 #endif	/* DEBUG */
3484 	}
3485 }
3486 
3487 /*
3488  * Used for properties that come from the OBP, hardware configuration files,
3489  * or that are created by calls to ddi_prop_update(9F).
3490  */
3491 static struct prop_handle_ops prop_1275_ops = {
3492 	ddi_prop_1275_int,
3493 	ddi_prop_1275_string,
3494 	ddi_prop_1275_bytes,
3495 	ddi_prop_int64_op
3496 };
3497 
3498 
3499 /*
3500  * Interface to create/modify a managed property on child's behalf...
3501  * Flags interpreted are:
3502  *	DDI_PROP_CANSLEEP:	Allow memory allocation to sleep.
3503  *	DDI_PROP_SYSTEM_DEF:	Manipulate system list rather than driver list.
3504  *
3505  * Use same dev_t when modifying or undefining a property.
3506  * Search for properties with DDI_DEV_T_ANY to match first named
3507  * property on the list.
3508  *
3509  * Properties are stored LIFO and subsequently will match the first
3510  * `matching' instance.
3511  */
3512 
3513 /*
3514  * ddi_prop_add:	Add a software defined property
3515  */
3516 
3517 /*
3518  * define to get a new ddi_prop_t.
3519  * km_flags are KM_SLEEP or KM_NOSLEEP.
3520  */
3521 
3522 #define	DDI_NEW_PROP_T(km_flags)	\
3523 	(kmem_zalloc(sizeof (ddi_prop_t), km_flags))
3524 
3525 static int
3526 ddi_prop_add(dev_t dev, dev_info_t *dip, int flags,
3527     char *name, caddr_t value, int length)
3528 {
3529 	ddi_prop_t	*new_propp, *propp;
3530 	ddi_prop_t	**list_head = &(DEVI(dip)->devi_drv_prop_ptr);
3531 	int		km_flags = KM_NOSLEEP;
3532 	int		name_buf_len;
3533 
3534 	/*
3535 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero return error.
3536 	 */
3537 
3538 	if (dev == DDI_DEV_T_ANY || name == (char *)0 || strlen(name) == 0)
3539 		return (DDI_PROP_INVAL_ARG);
3540 
3541 	if (flags & DDI_PROP_CANSLEEP)
3542 		km_flags = KM_SLEEP;
3543 
3544 	if (flags & DDI_PROP_SYSTEM_DEF)
3545 		list_head = &(DEVI(dip)->devi_sys_prop_ptr);
3546 	else if (flags & DDI_PROP_HW_DEF)
3547 		list_head = &(DEVI(dip)->devi_hw_prop_ptr);
3548 
3549 	if ((new_propp = DDI_NEW_PROP_T(km_flags)) == NULL)  {
3550 		cmn_err(CE_CONT, prop_no_mem_msg, name);
3551 		return (DDI_PROP_NO_MEMORY);
3552 	}
3553 
3554 	/*
3555 	 * If dev is major number 0, then we need to do a ddi_name_to_major
3556 	 * to get the real major number for the device.  This needs to be
3557 	 * done because some drivers need to call ddi_prop_create in their
3558 	 * attach routines but they don't have a dev.  By creating the dev
3559 	 * ourself if the major number is 0, drivers will not have to know what
3560 	 * their major number.	They can just create a dev with major number
3561 	 * 0 and pass it in.  For device 0, we will be doing a little extra
3562 	 * work by recreating the same dev that we already have, but its the
3563 	 * price you pay :-).
3564 	 *
3565 	 * This fixes bug #1098060.
3566 	 */
3567 	if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN) {
3568 		new_propp->prop_dev =
3569 		    makedevice(ddi_name_to_major(DEVI(dip)->devi_binding_name),
3570 		    getminor(dev));
3571 	} else
3572 		new_propp->prop_dev = dev;
3573 
3574 	/*
3575 	 * Allocate space for property name and copy it in...
3576 	 */
3577 
3578 	name_buf_len = strlen(name) + 1;
3579 	new_propp->prop_name = kmem_alloc(name_buf_len, km_flags);
3580 	if (new_propp->prop_name == 0)	{
3581 		kmem_free(new_propp, sizeof (ddi_prop_t));
3582 		cmn_err(CE_CONT, prop_no_mem_msg, name);
3583 		return (DDI_PROP_NO_MEMORY);
3584 	}
3585 	bcopy(name, new_propp->prop_name, name_buf_len);
3586 
3587 	/*
3588 	 * Set the property type
3589 	 */
3590 	new_propp->prop_flags = flags & DDI_PROP_TYPE_MASK;
3591 
3592 	/*
3593 	 * Set length and value ONLY if not an explicit property undefine:
3594 	 * NOTE: value and length are zero for explicit undefines.
3595 	 */
3596 
3597 	if (flags & DDI_PROP_UNDEF_IT) {
3598 		new_propp->prop_flags |= DDI_PROP_UNDEF_IT;
3599 	} else {
3600 		if ((new_propp->prop_len = length) != 0) {
3601 			new_propp->prop_val = kmem_alloc(length, km_flags);
3602 			if (new_propp->prop_val == 0)  {
3603 				kmem_free(new_propp->prop_name, name_buf_len);
3604 				kmem_free(new_propp, sizeof (ddi_prop_t));
3605 				cmn_err(CE_CONT, prop_no_mem_msg, name);
3606 				return (DDI_PROP_NO_MEMORY);
3607 			}
3608 			bcopy(value, new_propp->prop_val, length);
3609 		}
3610 	}
3611 
3612 	/*
3613 	 * Link property into beginning of list. (Properties are LIFO order.)
3614 	 */
3615 
3616 	mutex_enter(&(DEVI(dip)->devi_lock));
3617 	propp = *list_head;
3618 	new_propp->prop_next = propp;
3619 	*list_head = new_propp;
3620 	mutex_exit(&(DEVI(dip)->devi_lock));
3621 	return (DDI_PROP_SUCCESS);
3622 }
3623 
3624 
3625 /*
3626  * ddi_prop_change:	Modify a software managed property value
3627  *
3628  *			Set new length and value if found.
3629  *			returns DDI_PROP_INVAL_ARG if dev is DDI_DEV_T_ANY or
3630  *			input name is the NULL string.
3631  *			returns DDI_PROP_NO_MEMORY if unable to allocate memory
3632  *
3633  *			Note: an undef can be modified to be a define,
3634  *			(you can't go the other way.)
3635  */
3636 
3637 static int
3638 ddi_prop_change(dev_t dev, dev_info_t *dip, int flags,
3639     char *name, caddr_t value, int length)
3640 {
3641 	ddi_prop_t	*propp;
3642 	int		km_flags = KM_NOSLEEP;
3643 	caddr_t		p = NULL;
3644 
3645 	if (dev == DDI_DEV_T_ANY || name == (char *)0 || strlen(name) == 0)
3646 		return (DDI_PROP_INVAL_ARG);
3647 
3648 	if (flags & DDI_PROP_CANSLEEP)
3649 		km_flags = KM_SLEEP;
3650 
3651 	/*
3652 	 * Preallocate buffer, even if we don't need it...
3653 	 */
3654 	if (length != 0)  {
3655 		p = kmem_alloc(length, km_flags);
3656 		if (p == NULL)	{
3657 			cmn_err(CE_CONT, prop_no_mem_msg, name);
3658 			return (DDI_PROP_NO_MEMORY);
3659 		}
3660 	}
3661 
3662 	/*
3663 	 * Check to see if the property exists.  If so we modify it.
3664 	 * Else we create it by calling ddi_prop_add().
3665 	 */
3666 	mutex_enter(&(DEVI(dip)->devi_lock));
3667 
3668 	propp = DEVI(dip)->devi_drv_prop_ptr;
3669 	if (flags & DDI_PROP_SYSTEM_DEF)
3670 		propp = DEVI(dip)->devi_sys_prop_ptr;
3671 	else if (flags & DDI_PROP_HW_DEF)
3672 		propp = DEVI(dip)->devi_hw_prop_ptr;
3673 
3674 	while (propp != NULL) {
3675 		if (strcmp(name, propp->prop_name) == 0 &&
3676 		    dev == propp->prop_dev) {
3677 
3678 			/*
3679 			 * Need to reallocate buffer?  If so, do it
3680 			 * (carefully). (Reuse same space if new prop
3681 			 * is same size and non-NULL sized).
3682 			 */
3683 
3684 			if (length != 0)
3685 				bcopy(value, p, length);
3686 
3687 			if (propp->prop_len != 0)
3688 				kmem_free(propp->prop_val, propp->prop_len);
3689 
3690 			propp->prop_len = length;
3691 			propp->prop_val = p;
3692 			propp->prop_flags &= ~DDI_PROP_UNDEF_IT;
3693 			mutex_exit(&(DEVI(dip)->devi_lock));
3694 			return (DDI_PROP_SUCCESS);
3695 		}
3696 		propp = propp->prop_next;
3697 	}
3698 
3699 	mutex_exit(&(DEVI(dip)->devi_lock));
3700 	if (length != 0)
3701 		kmem_free(p, length);
3702 	return (ddi_prop_add(dev, dip, flags, name, value, length));
3703 }
3704 
3705 
3706 
3707 /*
3708  * Common update routine used to update and encode a property.	Creates
3709  * a property handle, calls the property encode routine, figures out if
3710  * the property already exists and updates if it does.	Otherwise it
3711  * creates if it does not exist.
3712  */
3713 int
3714 ddi_prop_update_common(dev_t match_dev, dev_info_t *dip, int flags,
3715     char *name, void *data, uint_t nelements,
3716     int (*prop_create)(prop_handle_t *, void *data, uint_t nelements))
3717 {
3718 	prop_handle_t	ph;
3719 	int		rval;
3720 	uint_t		ourflags;
3721 
3722 	/*
3723 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3724 	 * return error.
3725 	 */
3726 	if (match_dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3727 		return (DDI_PROP_INVAL_ARG);
3728 
3729 	/*
3730 	 * Create the handle
3731 	 */
3732 	ph.ph_data = NULL;
3733 	ph.ph_cur_pos = NULL;
3734 	ph.ph_save_pos = NULL;
3735 	ph.ph_size = 0;
3736 	ph.ph_ops = &prop_1275_ops;
3737 
3738 	/*
3739 	 * ourflags:
3740 	 * For compatibility with the old interfaces.  The old interfaces
3741 	 * didn't sleep by default and slept when the flag was set.  These
3742 	 * interfaces to the opposite.	So the old interfaces now set the
3743 	 * DDI_PROP_DONTSLEEP flag by default which tells us not to sleep.
3744 	 *
3745 	 * ph.ph_flags:
3746 	 * Blocked data or unblocked data allocation
3747 	 * for ph.ph_data in ddi_prop_encode_alloc()
3748 	 */
3749 	if (flags & DDI_PROP_DONTSLEEP) {
3750 		ourflags = flags;
3751 		ph.ph_flags = DDI_PROP_DONTSLEEP;
3752 	} else {
3753 		ourflags = flags | DDI_PROP_CANSLEEP;
3754 		ph.ph_flags = DDI_PROP_CANSLEEP;
3755 	}
3756 
3757 	/*
3758 	 * Encode the data and store it in the property handle by
3759 	 * calling the prop_encode routine.
3760 	 */
3761 	if ((rval = (*prop_create)(&ph, data, nelements)) !=
3762 	    DDI_PROP_SUCCESS) {
3763 		if (rval == DDI_PROP_NO_MEMORY)
3764 			cmn_err(CE_CONT, prop_no_mem_msg, name);
3765 		if (ph.ph_size != 0)
3766 			kmem_free(ph.ph_data, ph.ph_size);
3767 		return (rval);
3768 	}
3769 
3770 	/*
3771 	 * The old interfaces use a stacking approach to creating
3772 	 * properties.	If we are being called from the old interfaces,
3773 	 * the DDI_PROP_STACK_CREATE flag will be set, so we just do a
3774 	 * create without checking.
3775 	 */
3776 	if (flags & DDI_PROP_STACK_CREATE) {
3777 		rval = ddi_prop_add(match_dev, dip,
3778 		    ourflags, name, ph.ph_data, ph.ph_size);
3779 	} else {
3780 		rval = ddi_prop_change(match_dev, dip,
3781 		    ourflags, name, ph.ph_data, ph.ph_size);
3782 	}
3783 
3784 	/*
3785 	 * Free the encoded data allocated in the prop_encode routine.
3786 	 */
3787 	if (ph.ph_size != 0)
3788 		kmem_free(ph.ph_data, ph.ph_size);
3789 
3790 	return (rval);
3791 }
3792 
3793 
3794 /*
3795  * ddi_prop_create:	Define a managed property:
3796  *			See above for details.
3797  */
3798 
3799 int
3800 ddi_prop_create(dev_t dev, dev_info_t *dip, int flag,
3801     char *name, caddr_t value, int length)
3802 {
3803 	if (!(flag & DDI_PROP_CANSLEEP)) {
3804 		flag |= DDI_PROP_DONTSLEEP;
3805 #ifdef DDI_PROP_DEBUG
3806 		if (length != 0)
3807 			cmn_err(CE_NOTE, "!ddi_prop_create: interface obsolete,"
3808 			    "use ddi_prop_update (prop = %s, node = %s%d)",
3809 			    name, ddi_driver_name(dip), ddi_get_instance(dip));
3810 #endif /* DDI_PROP_DEBUG */
3811 	}
3812 	flag &= ~DDI_PROP_SYSTEM_DEF;
3813 	return (ddi_prop_update_common(dev, dip,
3814 	    (flag | DDI_PROP_STACK_CREATE | DDI_PROP_TYPE_ANY), name,
3815 	    value, length, ddi_prop_fm_encode_bytes));
3816 }
3817 
3818 int
3819 e_ddi_prop_create(dev_t dev, dev_info_t *dip, int flag,
3820     char *name, caddr_t value, int length)
3821 {
3822 	if (!(flag & DDI_PROP_CANSLEEP))
3823 		flag |= DDI_PROP_DONTSLEEP;
3824 	return (ddi_prop_update_common(dev, dip,
3825 	    (flag | DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE |
3826 	    DDI_PROP_TYPE_ANY),
3827 	    name, value, length, ddi_prop_fm_encode_bytes));
3828 }
3829 
3830 int
3831 ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag,
3832     char *name, caddr_t value, int length)
3833 {
3834 	ASSERT((flag & DDI_PROP_TYPE_MASK) == 0);
3835 
3836 	/*
3837 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3838 	 * return error.
3839 	 */
3840 	if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3841 		return (DDI_PROP_INVAL_ARG);
3842 
3843 	if (!(flag & DDI_PROP_CANSLEEP))
3844 		flag |= DDI_PROP_DONTSLEEP;
3845 	flag &= ~DDI_PROP_SYSTEM_DEF;
3846 	if (ddi_prop_exists((dev == DDI_DEV_T_NONE) ? DDI_DEV_T_ANY : dev,
3847 	    dip, (flag | DDI_PROP_NOTPROM), name) == 0)
3848 		return (DDI_PROP_NOT_FOUND);
3849 
3850 	return (ddi_prop_update_common(dev, dip,
3851 	    (flag | DDI_PROP_TYPE_BYTE), name,
3852 	    value, length, ddi_prop_fm_encode_bytes));
3853 }
3854 
3855 int
3856 e_ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag,
3857     char *name, caddr_t value, int length)
3858 {
3859 	ASSERT((flag & DDI_PROP_TYPE_MASK) == 0);
3860 
3861 	/*
3862 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3863 	 * return error.
3864 	 */
3865 	if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3866 		return (DDI_PROP_INVAL_ARG);
3867 
3868 	if (ddi_prop_exists((dev == DDI_DEV_T_NONE) ? DDI_DEV_T_ANY : dev,
3869 	    dip, (flag | DDI_PROP_SYSTEM_DEF), name) == 0)
3870 		return (DDI_PROP_NOT_FOUND);
3871 
3872 	if (!(flag & DDI_PROP_CANSLEEP))
3873 		flag |= DDI_PROP_DONTSLEEP;
3874 	return (ddi_prop_update_common(dev, dip,
3875 		(flag | DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE),
3876 		name, value, length, ddi_prop_fm_encode_bytes));
3877 }
3878 
3879 
3880 /*
3881  * Common lookup routine used to lookup and decode a property.
3882  * Creates a property handle, searches for the raw encoded data,
3883  * fills in the handle, and calls the property decode functions
3884  * passed in.
3885  *
3886  * This routine is not static because ddi_bus_prop_op() which lives in
3887  * ddi_impl.c calls it.  No driver should be calling this routine.
3888  */
3889 int
3890 ddi_prop_lookup_common(dev_t match_dev, dev_info_t *dip,
3891     uint_t flags, char *name, void *data, uint_t *nelements,
3892     int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements))
3893 {
3894 	int		rval;
3895 	uint_t		ourflags;
3896 	prop_handle_t	ph;
3897 
3898 	if ((match_dev == DDI_DEV_T_NONE) ||
3899 		(name == NULL) || (strlen(name) == 0))
3900 		return (DDI_PROP_INVAL_ARG);
3901 
3902 	ourflags = (flags & DDI_PROP_DONTSLEEP) ? flags :
3903 		flags | DDI_PROP_CANSLEEP;
3904 
3905 	/*
3906 	 * Get the encoded data
3907 	 */
3908 	bzero(&ph, sizeof (prop_handle_t));
3909 
3910 	if (flags & DDI_UNBND_DLPI2) {
3911 		/*
3912 		 * For unbound dlpi style-2 devices, index into
3913 		 * the devnames' array and search the global
3914 		 * property list.
3915 		 */
3916 		ourflags &= ~DDI_UNBND_DLPI2;
3917 		rval = i_ddi_prop_search_global(match_dev,
3918 		    ourflags, name, &ph.ph_data, &ph.ph_size);
3919 	} else {
3920 		rval = ddi_prop_search_common(match_dev, dip,
3921 		    PROP_LEN_AND_VAL_ALLOC, ourflags, name,
3922 		    &ph.ph_data, &ph.ph_size);
3923 
3924 	}
3925 
3926 	if (rval != DDI_PROP_SUCCESS && rval != DDI_PROP_FOUND_1275) {
3927 		ASSERT(ph.ph_data == NULL);
3928 		ASSERT(ph.ph_size == 0);
3929 		return (rval);
3930 	}
3931 
3932 	/*
3933 	 * If the encoded data came from a OBP or software
3934 	 * use the 1275 OBP decode/encode routines.
3935 	 */
3936 	ph.ph_cur_pos = ph.ph_data;
3937 	ph.ph_save_pos = ph.ph_data;
3938 	ph.ph_ops = &prop_1275_ops;
3939 	ph.ph_flags = (rval == DDI_PROP_FOUND_1275) ? PH_FROM_PROM : 0;
3940 
3941 	rval = (*prop_decoder)(&ph, data, nelements);
3942 
3943 	/*
3944 	 * Free the encoded data
3945 	 */
3946 	if (ph.ph_size != 0)
3947 		kmem_free(ph.ph_data, ph.ph_size);
3948 
3949 	return (rval);
3950 }
3951 
3952 /*
3953  * Lookup and return an array of composite properties.  The driver must
3954  * provide the decode routine.
3955  */
3956 int
3957 ddi_prop_lookup(dev_t match_dev, dev_info_t *dip,
3958     uint_t flags, char *name, void *data, uint_t *nelements,
3959     int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements))
3960 {
3961 	return (ddi_prop_lookup_common(match_dev, dip,
3962 	    (flags | DDI_PROP_TYPE_COMPOSITE), name,
3963 	    data, nelements, prop_decoder));
3964 }
3965 
3966 /*
3967  * Return 1 if a property exists (no type checking done).
3968  * Return 0 if it does not exist.
3969  */
3970 int
3971 ddi_prop_exists(dev_t match_dev, dev_info_t *dip, uint_t flags, char *name)
3972 {
3973 	int	i;
3974 	uint_t	x = 0;
3975 
3976 	i = ddi_prop_search_common(match_dev, dip, PROP_EXISTS,
3977 		flags | DDI_PROP_TYPE_MASK, name, NULL, &x);
3978 	return (i == DDI_PROP_SUCCESS || i == DDI_PROP_FOUND_1275);
3979 }
3980 
3981 
3982 /*
3983  * Update an array of composite properties.  The driver must
3984  * provide the encode routine.
3985  */
3986 int
3987 ddi_prop_update(dev_t match_dev, dev_info_t *dip,
3988     char *name, void *data, uint_t nelements,
3989     int (*prop_create)(prop_handle_t *, void *data, uint_t nelements))
3990 {
3991 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_COMPOSITE,
3992 	    name, data, nelements, prop_create));
3993 }
3994 
3995 /*
3996  * Get a single integer or boolean property and return it.
3997  * If the property does not exists, or cannot be decoded,
3998  * then return the defvalue passed in.
3999  *
4000  * This routine always succeeds.
4001  */
4002 int
4003 ddi_prop_get_int(dev_t match_dev, dev_info_t *dip, uint_t flags,
4004     char *name, int defvalue)
4005 {
4006 	int	data;
4007 	uint_t	nelements;
4008 	int	rval;
4009 
4010 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4011 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) {
4012 #ifdef DEBUG
4013 		if (dip != NULL) {
4014 			cmn_err(CE_WARN, "ddi_prop_get_int: invalid flag"
4015 			    " 0x%x (prop = %s, node = %s%d)", flags,
4016 			    name, ddi_driver_name(dip), ddi_get_instance(dip));
4017 		}
4018 #endif /* DEBUG */
4019 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4020 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4021 	}
4022 
4023 	if ((rval = ddi_prop_lookup_common(match_dev, dip,
4024 	    (flags | DDI_PROP_TYPE_INT), name, &data, &nelements,
4025 	    ddi_prop_fm_decode_int)) != DDI_PROP_SUCCESS) {
4026 		if (rval == DDI_PROP_END_OF_DATA)
4027 			data = 1;
4028 		else
4029 			data = defvalue;
4030 	}
4031 	return (data);
4032 }
4033 
4034 /*
4035  * Get a single 64 bit integer or boolean property and return it.
4036  * If the property does not exists, or cannot be decoded,
4037  * then return the defvalue passed in.
4038  *
4039  * This routine always succeeds.
4040  */
4041 int64_t
4042 ddi_prop_get_int64(dev_t match_dev, dev_info_t *dip, uint_t flags,
4043     char *name, int64_t defvalue)
4044 {
4045 	int64_t	data;
4046 	uint_t	nelements;
4047 	int	rval;
4048 
4049 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4050 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) {
4051 #ifdef DEBUG
4052 		if (dip != NULL) {
4053 			cmn_err(CE_WARN, "ddi_prop_get_int64: invalid flag"
4054 			    " 0x%x (prop = %s, node = %s%d)", flags,
4055 			    name, ddi_driver_name(dip), ddi_get_instance(dip));
4056 		}
4057 #endif /* DEBUG */
4058 		return (DDI_PROP_INVAL_ARG);
4059 	}
4060 
4061 	if ((rval = ddi_prop_lookup_common(match_dev, dip,
4062 	    (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM),
4063 	    name, &data, &nelements, ddi_prop_fm_decode_int64))
4064 	    != DDI_PROP_SUCCESS) {
4065 		if (rval == DDI_PROP_END_OF_DATA)
4066 			data = 1;
4067 		else
4068 			data = defvalue;
4069 	}
4070 	return (data);
4071 }
4072 
4073 /*
4074  * Get an array of integer property
4075  */
4076 int
4077 ddi_prop_lookup_int_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4078     char *name, int **data, uint_t *nelements)
4079 {
4080 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4081 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) {
4082 #ifdef DEBUG
4083 		if (dip != NULL) {
4084 			cmn_err(CE_WARN, "ddi_prop_lookup_int_array: "
4085 			    "invalid flag 0x%x (prop = %s, node = %s%d)",
4086 			    flags, name, ddi_driver_name(dip),
4087 			    ddi_get_instance(dip));
4088 		}
4089 #endif /* DEBUG */
4090 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4091 		LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4092 	}
4093 
4094 	return (ddi_prop_lookup_common(match_dev, dip,
4095 	    (flags | DDI_PROP_TYPE_INT), name, data,
4096 	    nelements, ddi_prop_fm_decode_ints));
4097 }
4098 
4099 /*
4100  * Get an array of 64 bit integer properties
4101  */
4102 int
4103 ddi_prop_lookup_int64_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4104     char *name, int64_t **data, uint_t *nelements)
4105 {
4106 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4107 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) {
4108 #ifdef DEBUG
4109 		if (dip != NULL) {
4110 			cmn_err(CE_WARN, "ddi_prop_lookup_int64_array: "
4111 			    "invalid flag 0x%x (prop = %s, node = %s%d)",
4112 			    flags, name, ddi_driver_name(dip),
4113 			    ddi_get_instance(dip));
4114 		}
4115 #endif /* DEBUG */
4116 		return (DDI_PROP_INVAL_ARG);
4117 	}
4118 
4119 	return (ddi_prop_lookup_common(match_dev, dip,
4120 	    (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM),
4121 	    name, data, nelements, ddi_prop_fm_decode_int64_array));
4122 }
4123 
4124 /*
4125  * Update a single integer property.  If the property exists on the drivers
4126  * property list it updates, else it creates it.
4127  */
4128 int
4129 ddi_prop_update_int(dev_t match_dev, dev_info_t *dip,
4130     char *name, int data)
4131 {
4132 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT,
4133 	    name, &data, 1, ddi_prop_fm_encode_ints));
4134 }
4135 
4136 /*
4137  * Update a single 64 bit integer property.
4138  * Update the driver property list if it exists, else create it.
4139  */
4140 int
4141 ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip,
4142     char *name, int64_t data)
4143 {
4144 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64,
4145 	    name, &data, 1, ddi_prop_fm_encode_int64));
4146 }
4147 
4148 int
4149 e_ddi_prop_update_int(dev_t match_dev, dev_info_t *dip,
4150     char *name, int data)
4151 {
4152 	return (ddi_prop_update_common(match_dev, dip,
4153 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT,
4154 	    name, &data, 1, ddi_prop_fm_encode_ints));
4155 }
4156 
4157 int
4158 e_ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip,
4159     char *name, int64_t data)
4160 {
4161 	return (ddi_prop_update_common(match_dev, dip,
4162 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64,
4163 	    name, &data, 1, ddi_prop_fm_encode_int64));
4164 }
4165 
4166 /*
4167  * Update an array of integer property.  If the property exists on the drivers
4168  * property list it updates, else it creates it.
4169  */
4170 int
4171 ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip,
4172     char *name, int *data, uint_t nelements)
4173 {
4174 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT,
4175 	    name, data, nelements, ddi_prop_fm_encode_ints));
4176 }
4177 
4178 /*
4179  * Update an array of 64 bit integer properties.
4180  * Update the driver property list if it exists, else create it.
4181  */
4182 int
4183 ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip,
4184     char *name, int64_t *data, uint_t nelements)
4185 {
4186 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64,
4187 	    name, data, nelements, ddi_prop_fm_encode_int64));
4188 }
4189 
4190 int
4191 e_ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip,
4192     char *name, int64_t *data, uint_t nelements)
4193 {
4194 	return (ddi_prop_update_common(match_dev, dip,
4195 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64,
4196 	    name, data, nelements, ddi_prop_fm_encode_int64));
4197 }
4198 
4199 int
4200 e_ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip,
4201     char *name, int *data, uint_t nelements)
4202 {
4203 	return (ddi_prop_update_common(match_dev, dip,
4204 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT,
4205 	    name, data, nelements, ddi_prop_fm_encode_ints));
4206 }
4207 
4208 /*
4209  * Get a single string property.
4210  */
4211 int
4212 ddi_prop_lookup_string(dev_t match_dev, dev_info_t *dip, uint_t flags,
4213     char *name, char **data)
4214 {
4215 	uint_t x;
4216 
4217 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4218 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) {
4219 #ifdef DEBUG
4220 		if (dip != NULL) {
4221 			cmn_err(CE_WARN, "%s: invalid flag 0x%x "
4222 			    "(prop = %s, node = %s%d); invalid bits ignored",
4223 			    "ddi_prop_lookup_string", flags, name,
4224 			    ddi_driver_name(dip), ddi_get_instance(dip));
4225 		}
4226 #endif /* DEBUG */
4227 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4228 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4229 	}
4230 
4231 	return (ddi_prop_lookup_common(match_dev, dip,
4232 	    (flags | DDI_PROP_TYPE_STRING), name, data,
4233 	    &x, ddi_prop_fm_decode_string));
4234 }
4235 
4236 /*
4237  * Get an array of strings property.
4238  */
4239 int
4240 ddi_prop_lookup_string_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4241     char *name, char ***data, uint_t *nelements)
4242 {
4243 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4244 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) {
4245 #ifdef DEBUG
4246 		if (dip != NULL) {
4247 			cmn_err(CE_WARN, "ddi_prop_lookup_string_array: "
4248 			    "invalid flag 0x%x (prop = %s, node = %s%d)",
4249 			    flags, name, ddi_driver_name(dip),
4250 			    ddi_get_instance(dip));
4251 		}
4252 #endif /* DEBUG */
4253 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4254 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4255 	}
4256 
4257 	return (ddi_prop_lookup_common(match_dev, dip,
4258 	    (flags | DDI_PROP_TYPE_STRING), name, data,
4259 	    nelements, ddi_prop_fm_decode_strings));
4260 }
4261 
4262 /*
4263  * Update a single string property.
4264  */
4265 int
4266 ddi_prop_update_string(dev_t match_dev, dev_info_t *dip,
4267     char *name, char *data)
4268 {
4269 	return (ddi_prop_update_common(match_dev, dip,
4270 	    DDI_PROP_TYPE_STRING, name, &data, 1,
4271 	    ddi_prop_fm_encode_string));
4272 }
4273 
4274 int
4275 e_ddi_prop_update_string(dev_t match_dev, dev_info_t *dip,
4276     char *name, char *data)
4277 {
4278 	return (ddi_prop_update_common(match_dev, dip,
4279 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING,
4280 	    name, &data, 1, ddi_prop_fm_encode_string));
4281 }
4282 
4283 
4284 /*
4285  * Update an array of strings property.
4286  */
4287 int
4288 ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip,
4289     char *name, char **data, uint_t nelements)
4290 {
4291 	return (ddi_prop_update_common(match_dev, dip,
4292 	    DDI_PROP_TYPE_STRING, name, data, nelements,
4293 	    ddi_prop_fm_encode_strings));
4294 }
4295 
4296 int
4297 e_ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip,
4298     char *name, char **data, uint_t nelements)
4299 {
4300 	return (ddi_prop_update_common(match_dev, dip,
4301 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING,
4302 	    name, data, nelements,
4303 	    ddi_prop_fm_encode_strings));
4304 }
4305 
4306 
4307 /*
4308  * Get an array of bytes property.
4309  */
4310 int
4311 ddi_prop_lookup_byte_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4312     char *name, uchar_t **data, uint_t *nelements)
4313 {
4314 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4315 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) {
4316 #ifdef DEBUG
4317 		if (dip != NULL) {
4318 			cmn_err(CE_WARN, "ddi_prop_lookup_byte_array: "
4319 			    " invalid flag 0x%x (prop = %s, node = %s%d)",
4320 			    flags, name, ddi_driver_name(dip),
4321 			    ddi_get_instance(dip));
4322 		}
4323 #endif /* DEBUG */
4324 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4325 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4326 	}
4327 
4328 	return (ddi_prop_lookup_common(match_dev, dip,
4329 	    (flags | DDI_PROP_TYPE_BYTE), name, data,
4330 	    nelements, ddi_prop_fm_decode_bytes));
4331 }
4332 
4333 /*
4334  * Update an array of bytes property.
4335  */
4336 int
4337 ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip,
4338     char *name, uchar_t *data, uint_t nelements)
4339 {
4340 	if (nelements == 0)
4341 		return (DDI_PROP_INVAL_ARG);
4342 
4343 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_BYTE,
4344 	    name, data, nelements, ddi_prop_fm_encode_bytes));
4345 }
4346 
4347 
4348 int
4349 e_ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip,
4350     char *name, uchar_t *data, uint_t nelements)
4351 {
4352 	if (nelements == 0)
4353 		return (DDI_PROP_INVAL_ARG);
4354 
4355 	return (ddi_prop_update_common(match_dev, dip,
4356 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE,
4357 	    name, data, nelements, ddi_prop_fm_encode_bytes));
4358 }
4359 
4360 
4361 /*
4362  * ddi_prop_remove_common:	Undefine a managed property:
4363  *			Input dev_t must match dev_t when defined.
4364  *			Returns DDI_PROP_NOT_FOUND, possibly.
4365  *			DDI_PROP_INVAL_ARG is also possible if dev is
4366  *			DDI_DEV_T_ANY or incoming name is the NULL string.
4367  */
4368 int
4369 ddi_prop_remove_common(dev_t dev, dev_info_t *dip, char *name, int flag)
4370 {
4371 	ddi_prop_t	**list_head = &(DEVI(dip)->devi_drv_prop_ptr);
4372 	ddi_prop_t	*propp;
4373 	ddi_prop_t	*lastpropp = NULL;
4374 
4375 	if ((dev == DDI_DEV_T_ANY) || (name == (char *)0) ||
4376 	    (strlen(name) == 0)) {
4377 		return (DDI_PROP_INVAL_ARG);
4378 	}
4379 
4380 	if (flag & DDI_PROP_SYSTEM_DEF)
4381 		list_head = &(DEVI(dip)->devi_sys_prop_ptr);
4382 	else if (flag & DDI_PROP_HW_DEF)
4383 		list_head = &(DEVI(dip)->devi_hw_prop_ptr);
4384 
4385 	mutex_enter(&(DEVI(dip)->devi_lock));
4386 
4387 	for (propp = *list_head; propp != NULL; propp = propp->prop_next)  {
4388 		if ((strcmp(name, propp->prop_name) == 0) &&
4389 		    (dev == propp->prop_dev)) {
4390 			/*
4391 			 * Unlink this propp allowing for it to
4392 			 * be first in the list:
4393 			 */
4394 
4395 			if (lastpropp == NULL)
4396 				*list_head = propp->prop_next;
4397 			else
4398 				lastpropp->prop_next = propp->prop_next;
4399 
4400 			mutex_exit(&(DEVI(dip)->devi_lock));
4401 
4402 			/*
4403 			 * Free memory and return...
4404 			 */
4405 			kmem_free(propp->prop_name,
4406 			    strlen(propp->prop_name) + 1);
4407 			if (propp->prop_len != 0)
4408 				kmem_free(propp->prop_val, propp->prop_len);
4409 			kmem_free(propp, sizeof (ddi_prop_t));
4410 			return (DDI_PROP_SUCCESS);
4411 		}
4412 		lastpropp = propp;
4413 	}
4414 	mutex_exit(&(DEVI(dip)->devi_lock));
4415 	return (DDI_PROP_NOT_FOUND);
4416 }
4417 
4418 int
4419 ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name)
4420 {
4421 	return (ddi_prop_remove_common(dev, dip, name, 0));
4422 }
4423 
4424 int
4425 e_ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name)
4426 {
4427 	return (ddi_prop_remove_common(dev, dip, name, DDI_PROP_SYSTEM_DEF));
4428 }
4429 
4430 /*
4431  * e_ddi_prop_list_delete: remove a list of properties
4432  *	Note that the caller needs to provide the required protection
4433  *	(eg. devi_lock if these properties are still attached to a devi)
4434  */
4435 void
4436 e_ddi_prop_list_delete(ddi_prop_t *props)
4437 {
4438 	i_ddi_prop_list_delete(props);
4439 }
4440 
4441 /*
4442  * ddi_prop_remove_all_common:
4443  *	Used before unloading a driver to remove
4444  *	all properties. (undefines all dev_t's props.)
4445  *	Also removes `explicitly undefined' props.
4446  *	No errors possible.
4447  */
4448 void
4449 ddi_prop_remove_all_common(dev_info_t *dip, int flag)
4450 {
4451 	ddi_prop_t	**list_head;
4452 
4453 	mutex_enter(&(DEVI(dip)->devi_lock));
4454 	if (flag & DDI_PROP_SYSTEM_DEF) {
4455 		list_head = &(DEVI(dip)->devi_sys_prop_ptr);
4456 	} else if (flag & DDI_PROP_HW_DEF) {
4457 		list_head = &(DEVI(dip)->devi_hw_prop_ptr);
4458 	} else {
4459 		list_head = &(DEVI(dip)->devi_drv_prop_ptr);
4460 	}
4461 	i_ddi_prop_list_delete(*list_head);
4462 	*list_head = NULL;
4463 	mutex_exit(&(DEVI(dip)->devi_lock));
4464 }
4465 
4466 
4467 /*
4468  * ddi_prop_remove_all:		Remove all driver prop definitions.
4469  */
4470 
4471 void
4472 ddi_prop_remove_all(dev_info_t *dip)
4473 {
4474 	ddi_prop_remove_all_common(dip, 0);
4475 }
4476 
4477 /*
4478  * e_ddi_prop_remove_all:	Remove all system prop definitions.
4479  */
4480 
4481 void
4482 e_ddi_prop_remove_all(dev_info_t *dip)
4483 {
4484 	ddi_prop_remove_all_common(dip, (int)DDI_PROP_SYSTEM_DEF);
4485 }
4486 
4487 
4488 /*
4489  * ddi_prop_undefine:	Explicitly undefine a property.  Property
4490  *			searches which match this property return
4491  *			the error code DDI_PROP_UNDEFINED.
4492  *
4493  *			Use ddi_prop_remove to negate effect of
4494  *			ddi_prop_undefine
4495  *
4496  *			See above for error returns.
4497  */
4498 
4499 int
4500 ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name)
4501 {
4502 	if (!(flag & DDI_PROP_CANSLEEP))
4503 		flag |= DDI_PROP_DONTSLEEP;
4504 	return (ddi_prop_update_common(dev, dip,
4505 	    (flag | DDI_PROP_STACK_CREATE | DDI_PROP_UNDEF_IT |
4506 	    DDI_PROP_TYPE_ANY), name, NULL, 0, ddi_prop_fm_encode_bytes));
4507 }
4508 
4509 int
4510 e_ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name)
4511 {
4512 	if (!(flag & DDI_PROP_CANSLEEP))
4513 		flag |= DDI_PROP_DONTSLEEP;
4514 	return (ddi_prop_update_common(dev, dip,
4515 	    (flag | DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE |
4516 	    DDI_PROP_UNDEF_IT | DDI_PROP_TYPE_ANY),
4517 	    name, NULL, 0, ddi_prop_fm_encode_bytes));
4518 }
4519 
4520 /*
4521  * Code to search hardware layer (PROM), if it exists, on behalf of child.
4522  *
4523  * if input dip != child_dip, then call is on behalf of child
4524  * to search PROM, do it via ddi_prop_search_common() and ascend only
4525  * if allowed.
4526  *
4527  * if input dip == ch_dip (child_dip), call is on behalf of root driver,
4528  * to search for PROM defined props only.
4529  *
4530  * Note that the PROM search is done only if the requested dev
4531  * is either DDI_DEV_T_ANY or DDI_DEV_T_NONE. PROM properties
4532  * have no associated dev, thus are automatically associated with
4533  * DDI_DEV_T_NONE.
4534  *
4535  * Modifying flag DDI_PROP_NOTPROM inhibits the search in the h/w layer.
4536  *
4537  * Returns DDI_PROP_FOUND_1275 if found to indicate to framework
4538  * that the property resides in the prom.
4539  */
4540 int
4541 impl_ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip,
4542     ddi_prop_op_t prop_op, int mod_flags,
4543     char *name, caddr_t valuep, int *lengthp)
4544 {
4545 	int	len;
4546 	caddr_t buffer;
4547 
4548 	/*
4549 	 * If requested dev is DDI_DEV_T_NONE or DDI_DEV_T_ANY, then
4550 	 * look in caller's PROM if it's a self identifying device...
4551 	 *
4552 	 * Note that this is very similar to ddi_prop_op, but we
4553 	 * search the PROM instead of the s/w defined properties,
4554 	 * and we are called on by the parent driver to do this for
4555 	 * the child.
4556 	 */
4557 
4558 	if (((dev == DDI_DEV_T_NONE) || (dev == DDI_DEV_T_ANY)) &&
4559 	    ndi_dev_is_prom_node(ch_dip) &&
4560 	    ((mod_flags & DDI_PROP_NOTPROM) == 0)) {
4561 		len = prom_getproplen((dnode_t)DEVI(ch_dip)->devi_nodeid, name);
4562 		if (len == -1) {
4563 			return (DDI_PROP_NOT_FOUND);
4564 		}
4565 
4566 		/*
4567 		 * If exists only request, we're done
4568 		 */
4569 		if (prop_op == PROP_EXISTS) {
4570 			return (DDI_PROP_FOUND_1275);
4571 		}
4572 
4573 		/*
4574 		 * If length only request or prop length == 0, get out
4575 		 */
4576 		if ((prop_op == PROP_LEN) || (len == 0)) {
4577 			*lengthp = len;
4578 			return (DDI_PROP_FOUND_1275);
4579 		}
4580 
4581 		/*
4582 		 * Allocate buffer if required... (either way `buffer'
4583 		 * is receiving address).
4584 		 */
4585 
4586 		switch (prop_op) {
4587 
4588 		case PROP_LEN_AND_VAL_ALLOC:
4589 
4590 			buffer = kmem_alloc((size_t)len,
4591 			    mod_flags & DDI_PROP_CANSLEEP ?
4592 			    KM_SLEEP : KM_NOSLEEP);
4593 			if (buffer == NULL) {
4594 				return (DDI_PROP_NO_MEMORY);
4595 			}
4596 			*(caddr_t *)valuep = buffer;
4597 			break;
4598 
4599 		case PROP_LEN_AND_VAL_BUF:
4600 
4601 			if (len > (*lengthp)) {
4602 				*lengthp = len;
4603 				return (DDI_PROP_BUF_TOO_SMALL);
4604 			}
4605 
4606 			buffer = valuep;
4607 			break;
4608 
4609 		default:
4610 			break;
4611 		}
4612 
4613 		/*
4614 		 * Call the PROM function to do the copy.
4615 		 */
4616 		(void) prom_getprop((dnode_t)DEVI(ch_dip)->devi_nodeid,
4617 			name, buffer);
4618 
4619 		*lengthp = len; /* return the actual length to the caller */
4620 		(void) impl_fix_props(dip, ch_dip, name, len, buffer);
4621 		return (DDI_PROP_FOUND_1275);
4622 	}
4623 
4624 	return (DDI_PROP_NOT_FOUND);
4625 }
4626 
4627 /*
4628  * The ddi_bus_prop_op default bus nexus prop op function.
4629  *
4630  * Code to search hardware layer (PROM), if it exists,
4631  * on behalf of child, then, if appropriate, ascend and check
4632  * my own software defined properties...
4633  */
4634 int
4635 ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip,
4636     ddi_prop_op_t prop_op, int mod_flags,
4637     char *name, caddr_t valuep, int *lengthp)
4638 {
4639 	int	error;
4640 
4641 	error = impl_ddi_bus_prop_op(dev, dip, ch_dip, prop_op, mod_flags,
4642 				    name, valuep, lengthp);
4643 
4644 	if (error == DDI_PROP_SUCCESS || error == DDI_PROP_FOUND_1275 ||
4645 	    error == DDI_PROP_BUF_TOO_SMALL)
4646 		return (error);
4647 
4648 	if (error == DDI_PROP_NO_MEMORY) {
4649 		cmn_err(CE_CONT, prop_no_mem_msg, name);
4650 		return (DDI_PROP_NO_MEMORY);
4651 	}
4652 
4653 	/*
4654 	 * Check the 'options' node as a last resort
4655 	 */
4656 	if ((mod_flags & DDI_PROP_DONTPASS) != 0)
4657 		return (DDI_PROP_NOT_FOUND);
4658 
4659 	if (ch_dip == ddi_root_node())	{
4660 		/*
4661 		 * As a last resort, when we've reached
4662 		 * the top and still haven't found the
4663 		 * property, see if the desired property
4664 		 * is attached to the options node.
4665 		 *
4666 		 * The options dip is attached right after boot.
4667 		 */
4668 		ASSERT(options_dip != NULL);
4669 		/*
4670 		 * Force the "don't pass" flag to *just* see
4671 		 * what the options node has to offer.
4672 		 */
4673 		return (ddi_prop_search_common(dev, options_dip, prop_op,
4674 		    mod_flags|DDI_PROP_DONTPASS, name, valuep,
4675 		    (uint_t *)lengthp));
4676 	}
4677 
4678 	/*
4679 	 * Otherwise, continue search with parent's s/w defined properties...
4680 	 * NOTE: Using `dip' in following call increments the level.
4681 	 */
4682 
4683 	return (ddi_prop_search_common(dev, dip, prop_op, mod_flags,
4684 	    name, valuep, (uint_t *)lengthp));
4685 }
4686 
4687 /*
4688  * External property functions used by other parts of the kernel...
4689  */
4690 
4691 /*
4692  * e_ddi_getlongprop: See comments for ddi_get_longprop.
4693  */
4694 
4695 int
4696 e_ddi_getlongprop(dev_t dev, vtype_t type, char *name, int flags,
4697     caddr_t valuep, int *lengthp)
4698 {
4699 	_NOTE(ARGUNUSED(type))
4700 	dev_info_t *devi;
4701 	ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_ALLOC;
4702 	int error;
4703 
4704 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4705 		return (DDI_PROP_NOT_FOUND);
4706 
4707 	error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp);
4708 	ddi_release_devi(devi);
4709 	return (error);
4710 }
4711 
4712 /*
4713  * e_ddi_getlongprop_buf:	See comments for ddi_getlongprop_buf.
4714  */
4715 
4716 int
4717 e_ddi_getlongprop_buf(dev_t dev, vtype_t type, char *name, int flags,
4718     caddr_t valuep, int *lengthp)
4719 {
4720 	_NOTE(ARGUNUSED(type))
4721 	dev_info_t *devi;
4722 	ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF;
4723 	int error;
4724 
4725 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4726 		return (DDI_PROP_NOT_FOUND);
4727 
4728 	error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp);
4729 	ddi_release_devi(devi);
4730 	return (error);
4731 }
4732 
4733 /*
4734  * e_ddi_getprop:	See comments for ddi_getprop.
4735  */
4736 int
4737 e_ddi_getprop(dev_t dev, vtype_t type, char *name, int flags, int defvalue)
4738 {
4739 	_NOTE(ARGUNUSED(type))
4740 	dev_info_t *devi;
4741 	ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF;
4742 	int	propvalue = defvalue;
4743 	int	proplength = sizeof (int);
4744 	int	error;
4745 
4746 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4747 		return (defvalue);
4748 
4749 	error = cdev_prop_op(dev, devi, prop_op,
4750 	    flags, name, (caddr_t)&propvalue, &proplength);
4751 	ddi_release_devi(devi);
4752 
4753 	if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
4754 		propvalue = 1;
4755 
4756 	return (propvalue);
4757 }
4758 
4759 /*
4760  * e_ddi_getprop_int64:
4761  *
4762  * This is a typed interfaces, but predates typed properties. With the
4763  * introduction of typed properties the framework tries to ensure
4764  * consistent use of typed interfaces. This is why TYPE_INT64 is not
4765  * part of TYPE_ANY.  E_ddi_getprop_int64 is a special case where a
4766  * typed interface invokes legacy (non-typed) interfaces:
4767  * cdev_prop_op(), prop_op(9E), ddi_prop_op(9F)).  In this case the
4768  * fact that TYPE_INT64 is not part of TYPE_ANY matters.  To support
4769  * this type of lookup as a single operation we invoke the legacy
4770  * non-typed interfaces with the special CONSUMER_TYPED bit set. The
4771  * framework ddi_prop_op(9F) implementation is expected to check for
4772  * CONSUMER_TYPED and, if set, expand type bits beyond TYPE_ANY
4773  * (currently TYPE_INT64).
4774  */
4775 int64_t
4776 e_ddi_getprop_int64(dev_t dev, vtype_t type, char *name,
4777     int flags, int64_t defvalue)
4778 {
4779 	_NOTE(ARGUNUSED(type))
4780 	dev_info_t	*devi;
4781 	ddi_prop_op_t	prop_op = PROP_LEN_AND_VAL_BUF;
4782 	int64_t		propvalue = defvalue;
4783 	int		proplength = sizeof (propvalue);
4784 	int		error;
4785 
4786 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4787 		return (defvalue);
4788 
4789 	error = cdev_prop_op(dev, devi, prop_op, flags |
4790 	    DDI_PROP_CONSUMER_TYPED, name, (caddr_t)&propvalue, &proplength);
4791 	ddi_release_devi(devi);
4792 
4793 	if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
4794 		propvalue = 1;
4795 
4796 	return (propvalue);
4797 }
4798 
4799 /*
4800  * e_ddi_getproplen:	See comments for ddi_getproplen.
4801  */
4802 int
4803 e_ddi_getproplen(dev_t dev, vtype_t type, char *name, int flags, int *lengthp)
4804 {
4805 	_NOTE(ARGUNUSED(type))
4806 	dev_info_t *devi;
4807 	ddi_prop_op_t prop_op = PROP_LEN;
4808 	int error;
4809 
4810 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4811 		return (DDI_PROP_NOT_FOUND);
4812 
4813 	error = cdev_prop_op(dev, devi, prop_op, flags, name, NULL, lengthp);
4814 	ddi_release_devi(devi);
4815 	return (error);
4816 }
4817 
4818 /*
4819  * Routines to get at elements of the dev_info structure
4820  */
4821 
4822 /*
4823  * ddi_binding_name: Return the driver binding name of the devinfo node
4824  *		This is the name the OS used to bind the node to a driver.
4825  */
4826 char *
4827 ddi_binding_name(dev_info_t *dip)
4828 {
4829 	return (DEVI(dip)->devi_binding_name);
4830 }
4831 
4832 /*
4833  * ddi_driver_major: Return the major number of the driver that
4834  *		the supplied devinfo is bound to (-1 if none)
4835  */
4836 major_t
4837 ddi_driver_major(dev_info_t *devi)
4838 {
4839 	return (DEVI(devi)->devi_major);
4840 }
4841 
4842 /*
4843  * ddi_driver_name: Return the normalized driver name. this is the
4844  *		actual driver name
4845  */
4846 const char *
4847 ddi_driver_name(dev_info_t *devi)
4848 {
4849 	major_t major;
4850 
4851 	if ((major = ddi_driver_major(devi)) != (major_t)-1)
4852 		return (ddi_major_to_name(major));
4853 
4854 	return (ddi_node_name(devi));
4855 }
4856 
4857 /*
4858  * i_ddi_set_binding_name:	Set binding name.
4859  *
4860  *	Set the binding name to the given name.
4861  *	This routine is for use by the ddi implementation, not by drivers.
4862  */
4863 void
4864 i_ddi_set_binding_name(dev_info_t *dip, char *name)
4865 {
4866 	DEVI(dip)->devi_binding_name = name;
4867 
4868 }
4869 
4870 /*
4871  * ddi_get_name: A synonym of ddi_binding_name() ... returns a name
4872  * the implementation has used to bind the node to a driver.
4873  */
4874 char *
4875 ddi_get_name(dev_info_t *dip)
4876 {
4877 	return (DEVI(dip)->devi_binding_name);
4878 }
4879 
4880 /*
4881  * ddi_node_name: Return the name property of the devinfo node
4882  *		This may differ from ddi_binding_name if the node name
4883  *		does not define a binding to a driver (i.e. generic names).
4884  */
4885 char *
4886 ddi_node_name(dev_info_t *dip)
4887 {
4888 	return (DEVI(dip)->devi_node_name);
4889 }
4890 
4891 
4892 /*
4893  * ddi_get_nodeid:	Get nodeid stored in dev_info structure.
4894  */
4895 int
4896 ddi_get_nodeid(dev_info_t *dip)
4897 {
4898 	return (DEVI(dip)->devi_nodeid);
4899 }
4900 
4901 int
4902 ddi_get_instance(dev_info_t *dip)
4903 {
4904 	return (DEVI(dip)->devi_instance);
4905 }
4906 
4907 struct dev_ops *
4908 ddi_get_driver(dev_info_t *dip)
4909 {
4910 	return (DEVI(dip)->devi_ops);
4911 }
4912 
4913 void
4914 ddi_set_driver(dev_info_t *dip, struct dev_ops *devo)
4915 {
4916 	DEVI(dip)->devi_ops = devo;
4917 }
4918 
4919 /*
4920  * ddi_set_driver_private/ddi_get_driver_private:
4921  * Get/set device driver private data in devinfo.
4922  */
4923 void
4924 ddi_set_driver_private(dev_info_t *dip, void *data)
4925 {
4926 	DEVI(dip)->devi_driver_data = data;
4927 }
4928 
4929 void *
4930 ddi_get_driver_private(dev_info_t *dip)
4931 {
4932 	return (DEVI(dip)->devi_driver_data);
4933 }
4934 
4935 /*
4936  * ddi_get_parent, ddi_get_child, ddi_get_next_sibling
4937  */
4938 
4939 dev_info_t *
4940 ddi_get_parent(dev_info_t *dip)
4941 {
4942 	return ((dev_info_t *)DEVI(dip)->devi_parent);
4943 }
4944 
4945 dev_info_t *
4946 ddi_get_child(dev_info_t *dip)
4947 {
4948 	return ((dev_info_t *)DEVI(dip)->devi_child);
4949 }
4950 
4951 dev_info_t *
4952 ddi_get_next_sibling(dev_info_t *dip)
4953 {
4954 	return ((dev_info_t *)DEVI(dip)->devi_sibling);
4955 }
4956 
4957 dev_info_t *
4958 ddi_get_next(dev_info_t *dip)
4959 {
4960 	return ((dev_info_t *)DEVI(dip)->devi_next);
4961 }
4962 
4963 void
4964 ddi_set_next(dev_info_t *dip, dev_info_t *nextdip)
4965 {
4966 	DEVI(dip)->devi_next = DEVI(nextdip);
4967 }
4968 
4969 /*
4970  * ddi_root_node:		Return root node of devinfo tree
4971  */
4972 
4973 dev_info_t *
4974 ddi_root_node(void)
4975 {
4976 	extern dev_info_t *top_devinfo;
4977 
4978 	return (top_devinfo);
4979 }
4980 
4981 /*
4982  * Miscellaneous functions:
4983  */
4984 
4985 /*
4986  * Implementation specific hooks
4987  */
4988 
4989 void
4990 ddi_report_dev(dev_info_t *d)
4991 {
4992 	char *b;
4993 
4994 	(void) ddi_ctlops(d, d, DDI_CTLOPS_REPORTDEV, (void *)0, (void *)0);
4995 
4996 	/*
4997 	 * If this devinfo node has cb_ops, it's implicitly accessible from
4998 	 * userland, so we print its full name together with the instance
4999 	 * number 'abbreviation' that the driver may use internally.
5000 	 */
5001 	if (DEVI(d)->devi_ops->devo_cb_ops != (struct cb_ops *)0 &&
5002 	    (b = kmem_zalloc(MAXPATHLEN, KM_NOSLEEP))) {
5003 		cmn_err(CE_CONT, "?%s%d is %s\n",
5004 		    ddi_driver_name(d), ddi_get_instance(d),
5005 		    ddi_pathname(d, b));
5006 		kmem_free(b, MAXPATHLEN);
5007 	}
5008 }
5009 
5010 /*
5011  * ddi_ctlops() is described in the assembler not to buy a new register
5012  * window when it's called and can reduce cost in climbing the device tree
5013  * without using the tail call optimization.
5014  */
5015 int
5016 ddi_dev_regsize(dev_info_t *dev, uint_t rnumber, off_t *result)
5017 {
5018 	int ret;
5019 
5020 	ret = ddi_ctlops(dev, dev, DDI_CTLOPS_REGSIZE,
5021 	    (void *)&rnumber, (void *)result);
5022 
5023 	return (ret == DDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE);
5024 }
5025 
5026 int
5027 ddi_dev_nregs(dev_info_t *dev, int *result)
5028 {
5029 	return (ddi_ctlops(dev, dev, DDI_CTLOPS_NREGS, 0, (void *)result));
5030 }
5031 
5032 int
5033 ddi_dev_is_sid(dev_info_t *d)
5034 {
5035 	return (ddi_ctlops(d, d, DDI_CTLOPS_SIDDEV, (void *)0, (void *)0));
5036 }
5037 
5038 int
5039 ddi_slaveonly(dev_info_t *d)
5040 {
5041 	return (ddi_ctlops(d, d, DDI_CTLOPS_SLAVEONLY, (void *)0, (void *)0));
5042 }
5043 
5044 int
5045 ddi_dev_affinity(dev_info_t *a, dev_info_t *b)
5046 {
5047 	return (ddi_ctlops(a, a, DDI_CTLOPS_AFFINITY, (void *)b, (void *)0));
5048 }
5049 
5050 int
5051 ddi_streams_driver(dev_info_t *dip)
5052 {
5053 	if ((i_ddi_node_state(dip) >= DS_ATTACHED) &&
5054 	    (DEVI(dip)->devi_ops->devo_cb_ops != NULL) &&
5055 	    (DEVI(dip)->devi_ops->devo_cb_ops->cb_str != NULL))
5056 		return (DDI_SUCCESS);
5057 	return (DDI_FAILURE);
5058 }
5059 
5060 /*
5061  * callback free list
5062  */
5063 
5064 static int ncallbacks;
5065 static int nc_low = 170;
5066 static int nc_med = 512;
5067 static int nc_high = 2048;
5068 static struct ddi_callback *callbackq;
5069 static struct ddi_callback *callbackqfree;
5070 
5071 /*
5072  * set/run callback lists
5073  */
5074 struct	cbstats	{
5075 	kstat_named_t	cb_asked;
5076 	kstat_named_t	cb_new;
5077 	kstat_named_t	cb_run;
5078 	kstat_named_t	cb_delete;
5079 	kstat_named_t	cb_maxreq;
5080 	kstat_named_t	cb_maxlist;
5081 	kstat_named_t	cb_alloc;
5082 	kstat_named_t	cb_runouts;
5083 	kstat_named_t	cb_L2;
5084 	kstat_named_t	cb_grow;
5085 } cbstats = {
5086 	{"asked",	KSTAT_DATA_UINT32},
5087 	{"new",		KSTAT_DATA_UINT32},
5088 	{"run",		KSTAT_DATA_UINT32},
5089 	{"delete",	KSTAT_DATA_UINT32},
5090 	{"maxreq",	KSTAT_DATA_UINT32},
5091 	{"maxlist",	KSTAT_DATA_UINT32},
5092 	{"alloc",	KSTAT_DATA_UINT32},
5093 	{"runouts",	KSTAT_DATA_UINT32},
5094 	{"L2",		KSTAT_DATA_UINT32},
5095 	{"grow",	KSTAT_DATA_UINT32},
5096 };
5097 
5098 #define	nc_asked	cb_asked.value.ui32
5099 #define	nc_new		cb_new.value.ui32
5100 #define	nc_run		cb_run.value.ui32
5101 #define	nc_delete	cb_delete.value.ui32
5102 #define	nc_maxreq	cb_maxreq.value.ui32
5103 #define	nc_maxlist	cb_maxlist.value.ui32
5104 #define	nc_alloc	cb_alloc.value.ui32
5105 #define	nc_runouts	cb_runouts.value.ui32
5106 #define	nc_L2		cb_L2.value.ui32
5107 #define	nc_grow		cb_grow.value.ui32
5108 
5109 static kmutex_t ddi_callback_mutex;
5110 
5111 /*
5112  * callbacks are handled using a L1/L2 cache. The L1 cache
5113  * comes out of kmem_cache_alloc and can expand/shrink dynamically. If
5114  * we can't get callbacks from the L1 cache [because pageout is doing
5115  * I/O at the time freemem is 0], we allocate callbacks out of the
5116  * L2 cache. The L2 cache is static and depends on the memory size.
5117  * [We might also count the number of devices at probe time and
5118  * allocate one structure per device and adjust for deferred attach]
5119  */
5120 void
5121 impl_ddi_callback_init(void)
5122 {
5123 	int	i;
5124 	uint_t	physmegs;
5125 	kstat_t	*ksp;
5126 
5127 	physmegs = physmem >> (20 - PAGESHIFT);
5128 	if (physmegs < 48) {
5129 		ncallbacks = nc_low;
5130 	} else if (physmegs < 128) {
5131 		ncallbacks = nc_med;
5132 	} else {
5133 		ncallbacks = nc_high;
5134 	}
5135 
5136 	/*
5137 	 * init free list
5138 	 */
5139 	callbackq = kmem_zalloc(
5140 	    ncallbacks * sizeof (struct ddi_callback), KM_SLEEP);
5141 	for (i = 0; i < ncallbacks-1; i++)
5142 		callbackq[i].c_nfree = &callbackq[i+1];
5143 	callbackqfree = callbackq;
5144 
5145 	/* init kstats */
5146 	if (ksp = kstat_create("unix", 0, "cbstats", "misc", KSTAT_TYPE_NAMED,
5147 	    sizeof (cbstats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL)) {
5148 		ksp->ks_data = (void *) &cbstats;
5149 		kstat_install(ksp);
5150 	}
5151 
5152 }
5153 
5154 static void
5155 callback_insert(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid,
5156 	int count)
5157 {
5158 	struct ddi_callback *list, *marker, *new;
5159 	size_t size = sizeof (struct ddi_callback);
5160 
5161 	list = marker = (struct ddi_callback *)*listid;
5162 	while (list != NULL) {
5163 		if (list->c_call == funcp && list->c_arg == arg) {
5164 			list->c_count += count;
5165 			return;
5166 		}
5167 		marker = list;
5168 		list = list->c_nlist;
5169 	}
5170 	new = kmem_alloc(size, KM_NOSLEEP);
5171 	if (new == NULL) {
5172 		new = callbackqfree;
5173 		if (new == NULL) {
5174 			new = kmem_alloc_tryhard(sizeof (struct ddi_callback),
5175 			    &size, KM_NOSLEEP | KM_PANIC);
5176 			cbstats.nc_grow++;
5177 		} else {
5178 			callbackqfree = new->c_nfree;
5179 			cbstats.nc_L2++;
5180 		}
5181 	}
5182 	if (marker != NULL) {
5183 		marker->c_nlist = new;
5184 	} else {
5185 		*listid = (uintptr_t)new;
5186 	}
5187 	new->c_size = size;
5188 	new->c_nlist = NULL;
5189 	new->c_call = funcp;
5190 	new->c_arg = arg;
5191 	new->c_count = count;
5192 	cbstats.nc_new++;
5193 	cbstats.nc_alloc++;
5194 	if (cbstats.nc_alloc > cbstats.nc_maxlist)
5195 		cbstats.nc_maxlist = cbstats.nc_alloc;
5196 }
5197 
5198 void
5199 ddi_set_callback(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid)
5200 {
5201 	mutex_enter(&ddi_callback_mutex);
5202 	cbstats.nc_asked++;
5203 	if ((cbstats.nc_asked - cbstats.nc_run) > cbstats.nc_maxreq)
5204 		cbstats.nc_maxreq = (cbstats.nc_asked - cbstats.nc_run);
5205 	(void) callback_insert(funcp, arg, listid, 1);
5206 	mutex_exit(&ddi_callback_mutex);
5207 }
5208 
5209 static void
5210 real_callback_run(void *Queue)
5211 {
5212 	int (*funcp)(caddr_t);
5213 	caddr_t arg;
5214 	int count, rval;
5215 	uintptr_t *listid;
5216 	struct ddi_callback *list, *marker;
5217 	int check_pending = 1;
5218 	int pending = 0;
5219 
5220 	do {
5221 		mutex_enter(&ddi_callback_mutex);
5222 		listid = Queue;
5223 		list = (struct ddi_callback *)*listid;
5224 		if (list == NULL) {
5225 			mutex_exit(&ddi_callback_mutex);
5226 			return;
5227 		}
5228 		if (check_pending) {
5229 			marker = list;
5230 			while (marker != NULL) {
5231 				pending += marker->c_count;
5232 				marker = marker->c_nlist;
5233 			}
5234 			check_pending = 0;
5235 		}
5236 		ASSERT(pending > 0);
5237 		ASSERT(list->c_count > 0);
5238 		funcp = list->c_call;
5239 		arg = list->c_arg;
5240 		count = list->c_count;
5241 		*(uintptr_t *)Queue = (uintptr_t)list->c_nlist;
5242 		if (list >= &callbackq[0] &&
5243 		    list <= &callbackq[ncallbacks-1]) {
5244 			list->c_nfree = callbackqfree;
5245 			callbackqfree = list;
5246 		} else
5247 			kmem_free(list, list->c_size);
5248 
5249 		cbstats.nc_delete++;
5250 		cbstats.nc_alloc--;
5251 		mutex_exit(&ddi_callback_mutex);
5252 
5253 		do {
5254 			if ((rval = (*funcp)(arg)) == 0) {
5255 				pending -= count;
5256 				mutex_enter(&ddi_callback_mutex);
5257 				(void) callback_insert(funcp, arg, listid,
5258 					count);
5259 				cbstats.nc_runouts++;
5260 			} else {
5261 				pending--;
5262 				mutex_enter(&ddi_callback_mutex);
5263 				cbstats.nc_run++;
5264 			}
5265 			mutex_exit(&ddi_callback_mutex);
5266 		} while (rval != 0 && (--count > 0));
5267 	} while (pending > 0);
5268 }
5269 
5270 void
5271 ddi_run_callback(uintptr_t *listid)
5272 {
5273 	softcall(real_callback_run, listid);
5274 }
5275 
5276 dev_info_t *
5277 nodevinfo(dev_t dev, int otyp)
5278 {
5279 	_NOTE(ARGUNUSED(dev, otyp))
5280 	return ((dev_info_t *)0);
5281 }
5282 
5283 /*
5284  * A driver should support its own getinfo(9E) entry point. This function
5285  * is provided as a convenience for ON drivers that don't expect their
5286  * getinfo(9E) entry point to be called. A driver that uses this must not
5287  * call ddi_create_minor_node.
5288  */
5289 int
5290 ddi_no_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
5291 {
5292 	_NOTE(ARGUNUSED(dip, infocmd, arg, result))
5293 	return (DDI_FAILURE);
5294 }
5295 
5296 /*
5297  * A driver should support its own getinfo(9E) entry point. This function
5298  * is provided as a convenience for ON drivers that where the minor number
5299  * is the instance. Drivers that do not have 1:1 mapping must implement
5300  * their own getinfo(9E) function.
5301  */
5302 int
5303 ddi_getinfo_1to1(dev_info_t *dip, ddi_info_cmd_t infocmd,
5304     void *arg, void **result)
5305 {
5306 	_NOTE(ARGUNUSED(dip))
5307 	int	instance;
5308 
5309 	if (infocmd != DDI_INFO_DEVT2INSTANCE)
5310 		return (DDI_FAILURE);
5311 
5312 	instance = getminor((dev_t)(uintptr_t)arg);
5313 	*result = (void *)(uintptr_t)instance;
5314 	return (DDI_SUCCESS);
5315 }
5316 
5317 int
5318 ddifail(dev_info_t *devi, ddi_attach_cmd_t cmd)
5319 {
5320 	_NOTE(ARGUNUSED(devi, cmd))
5321 	return (DDI_FAILURE);
5322 }
5323 
5324 int
5325 ddi_no_dma_map(dev_info_t *dip, dev_info_t *rdip,
5326     struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep)
5327 {
5328 	_NOTE(ARGUNUSED(dip, rdip, dmareqp, handlep))
5329 	return (DDI_DMA_NOMAPPING);
5330 }
5331 
5332 int
5333 ddi_no_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
5334     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
5335 {
5336 	_NOTE(ARGUNUSED(dip, rdip, attr, waitfp, arg, handlep))
5337 	return (DDI_DMA_BADATTR);
5338 }
5339 
5340 int
5341 ddi_no_dma_freehdl(dev_info_t *dip, dev_info_t *rdip,
5342     ddi_dma_handle_t handle)
5343 {
5344 	_NOTE(ARGUNUSED(dip, rdip, handle))
5345 	return (DDI_FAILURE);
5346 }
5347 
5348 int
5349 ddi_no_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
5350     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
5351     ddi_dma_cookie_t *cp, uint_t *ccountp)
5352 {
5353 	_NOTE(ARGUNUSED(dip, rdip, handle, dmareq, cp, ccountp))
5354 	return (DDI_DMA_NOMAPPING);
5355 }
5356 
5357 int
5358 ddi_no_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
5359     ddi_dma_handle_t handle)
5360 {
5361 	_NOTE(ARGUNUSED(dip, rdip, handle))
5362 	return (DDI_FAILURE);
5363 }
5364 
5365 int
5366 ddi_no_dma_flush(dev_info_t *dip, dev_info_t *rdip,
5367     ddi_dma_handle_t handle, off_t off, size_t len,
5368     uint_t cache_flags)
5369 {
5370 	_NOTE(ARGUNUSED(dip, rdip, handle, off, len, cache_flags))
5371 	return (DDI_FAILURE);
5372 }
5373 
5374 int
5375 ddi_no_dma_win(dev_info_t *dip, dev_info_t *rdip,
5376     ddi_dma_handle_t handle, uint_t win, off_t *offp,
5377     size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
5378 {
5379 	_NOTE(ARGUNUSED(dip, rdip, handle, win, offp, lenp, cookiep, ccountp))
5380 	return (DDI_FAILURE);
5381 }
5382 
5383 int
5384 ddi_no_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
5385     ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
5386     off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
5387 {
5388 	_NOTE(ARGUNUSED(dip, rdip, handle, request, offp, lenp, objp, flags))
5389 	return (DDI_FAILURE);
5390 }
5391 
5392 void
5393 ddivoid(void)
5394 {}
5395 
5396 int
5397 nochpoll(dev_t dev, short events, int anyyet, short *reventsp,
5398     struct pollhead **pollhdrp)
5399 {
5400 	_NOTE(ARGUNUSED(dev, events, anyyet, reventsp, pollhdrp))
5401 	return (ENXIO);
5402 }
5403 
5404 cred_t *
5405 ddi_get_cred(void)
5406 {
5407 	return (CRED());
5408 }
5409 
5410 clock_t
5411 ddi_get_lbolt(void)
5412 {
5413 	return (lbolt);
5414 }
5415 
5416 time_t
5417 ddi_get_time(void)
5418 {
5419 	time_t	now;
5420 
5421 	if ((now = gethrestime_sec()) == 0) {
5422 		timestruc_t ts;
5423 		mutex_enter(&tod_lock);
5424 		ts = tod_get();
5425 		mutex_exit(&tod_lock);
5426 		return (ts.tv_sec);
5427 	} else {
5428 		return (now);
5429 	}
5430 }
5431 
5432 pid_t
5433 ddi_get_pid(void)
5434 {
5435 	return (ttoproc(curthread)->p_pid);
5436 }
5437 
5438 kt_did_t
5439 ddi_get_kt_did(void)
5440 {
5441 	return (curthread->t_did);
5442 }
5443 
5444 /*
5445  * This function returns B_TRUE if the caller can reasonably expect that a call
5446  * to cv_wait_sig(9F), cv_timedwait_sig(9F), or qwait_sig(9F) could be awakened
5447  * by user-level signal.  If it returns B_FALSE, then the caller should use
5448  * other means to make certain that the wait will not hang "forever."
5449  *
5450  * It does not check the signal mask, nor for reception of any particular
5451  * signal.
5452  *
5453  * Currently, a thread can receive a signal if it's not a kernel thread and it
5454  * is not in the middle of exit(2) tear-down.  Threads that are in that
5455  * tear-down effectively convert cv_wait_sig to cv_wait, cv_timedwait_sig to
5456  * cv_timedwait, and qwait_sig to qwait.
5457  */
5458 boolean_t
5459 ddi_can_receive_sig(void)
5460 {
5461 	proc_t *pp;
5462 
5463 	if (curthread->t_proc_flag & TP_LWPEXIT)
5464 		return (B_FALSE);
5465 	if ((pp = ttoproc(curthread)) == NULL)
5466 		return (B_FALSE);
5467 	return (pp->p_as != &kas);
5468 }
5469 
5470 /*
5471  * Swap bytes in 16-bit [half-]words
5472  */
5473 void
5474 swab(void *src, void *dst, size_t nbytes)
5475 {
5476 	uchar_t *pf = (uchar_t *)src;
5477 	uchar_t *pt = (uchar_t *)dst;
5478 	uchar_t tmp;
5479 	int nshorts;
5480 
5481 	nshorts = nbytes >> 1;
5482 
5483 	while (--nshorts >= 0) {
5484 		tmp = *pf++;
5485 		*pt++ = *pf++;
5486 		*pt++ = tmp;
5487 	}
5488 }
5489 
5490 static void
5491 ddi_append_minor_node(dev_info_t *ddip, struct ddi_minor_data *dmdp)
5492 {
5493 	struct ddi_minor_data *dp;
5494 
5495 	mutex_enter(&(DEVI(ddip)->devi_lock));
5496 	i_devi_enter(ddip, DEVI_S_MD_UPDATE, DEVI_S_MD_UPDATE, 1);
5497 
5498 	if ((dp = DEVI(ddip)->devi_minor) == (struct ddi_minor_data *)NULL) {
5499 		DEVI(ddip)->devi_minor = dmdp;
5500 	} else {
5501 		while (dp->next != (struct ddi_minor_data *)NULL)
5502 			dp = dp->next;
5503 		dp->next = dmdp;
5504 	}
5505 
5506 	i_devi_exit(ddip, DEVI_S_MD_UPDATE, 1);
5507 	mutex_exit(&(DEVI(ddip)->devi_lock));
5508 }
5509 
5510 /*
5511  * Part of the obsolete SunCluster DDI Hooks.
5512  * Keep for binary compatibility
5513  */
5514 minor_t
5515 ddi_getiminor(dev_t dev)
5516 {
5517 	return (getminor(dev));
5518 }
5519 
5520 static int
5521 i_log_devfs_minor_create(dev_info_t *dip, char *minor_name)
5522 {
5523 	int se_flag;
5524 	int kmem_flag;
5525 	int se_err;
5526 	char *pathname;
5527 	sysevent_t *ev = NULL;
5528 	sysevent_id_t eid;
5529 	sysevent_value_t se_val;
5530 	sysevent_attr_list_t *ev_attr_list = NULL;
5531 
5532 	/* determine interrupt context */
5533 	se_flag = (servicing_interrupt()) ? SE_NOSLEEP : SE_SLEEP;
5534 	kmem_flag = (se_flag == SE_SLEEP) ? KM_SLEEP : KM_NOSLEEP;
5535 
5536 	i_ddi_di_cache_invalidate(kmem_flag);
5537 
5538 #ifdef DEBUG
5539 	if ((se_flag == SE_NOSLEEP) && sunddi_debug) {
5540 		cmn_err(CE_CONT, "ddi_create_minor_node: called from "
5541 		    "interrupt level by driver %s",
5542 		    ddi_driver_name(dip));
5543 	}
5544 #endif /* DEBUG */
5545 
5546 	ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_CREATE, EP_DDI, se_flag);
5547 	if (ev == NULL) {
5548 		goto fail;
5549 	}
5550 
5551 	pathname = kmem_alloc(MAXPATHLEN, kmem_flag);
5552 	if (pathname == NULL) {
5553 		sysevent_free(ev);
5554 		goto fail;
5555 	}
5556 
5557 	(void) ddi_pathname(dip, pathname);
5558 	ASSERT(strlen(pathname));
5559 	se_val.value_type = SE_DATA_TYPE_STRING;
5560 	se_val.value.sv_string = pathname;
5561 	if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME,
5562 	    &se_val, se_flag) != 0) {
5563 		kmem_free(pathname, MAXPATHLEN);
5564 		sysevent_free(ev);
5565 		goto fail;
5566 	}
5567 	kmem_free(pathname, MAXPATHLEN);
5568 
5569 	/*
5570 	 * allow for NULL minor names
5571 	 */
5572 	if (minor_name != NULL) {
5573 		se_val.value.sv_string = minor_name;
5574 		if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME,
5575 		    &se_val, se_flag) != 0) {
5576 			sysevent_free_attr(ev_attr_list);
5577 			sysevent_free(ev);
5578 			goto fail;
5579 		}
5580 	}
5581 
5582 	if (sysevent_attach_attributes(ev, ev_attr_list) != 0) {
5583 		sysevent_free_attr(ev_attr_list);
5584 		sysevent_free(ev);
5585 		goto fail;
5586 	}
5587 
5588 	if ((se_err = log_sysevent(ev, se_flag, &eid)) != 0) {
5589 		if (se_err == SE_NO_TRANSPORT) {
5590 			cmn_err(CE_WARN, "/devices or /dev may not be current "
5591 			    "for driver %s (%s). Run devfsadm -i %s",
5592 			    ddi_driver_name(dip), "syseventd not responding",
5593 			    ddi_driver_name(dip));
5594 		} else {
5595 			sysevent_free(ev);
5596 			goto fail;
5597 		}
5598 	}
5599 
5600 	sysevent_free(ev);
5601 	return (DDI_SUCCESS);
5602 fail:
5603 	cmn_err(CE_WARN, "/devices or /dev may not be current "
5604 	    "for driver %s. Run devfsadm -i %s",
5605 	    ddi_driver_name(dip), ddi_driver_name(dip));
5606 	return (DDI_SUCCESS);
5607 }
5608 
5609 /*
5610  * failing to remove a minor node is not of interest
5611  * therefore we do not generate an error message
5612  */
5613 static int
5614 i_log_devfs_minor_remove(dev_info_t *dip, char *minor_name)
5615 {
5616 	char *pathname;
5617 	sysevent_t *ev;
5618 	sysevent_id_t eid;
5619 	sysevent_value_t se_val;
5620 	sysevent_attr_list_t *ev_attr_list = NULL;
5621 
5622 	/*
5623 	 * only log ddi_remove_minor_node() calls outside the scope
5624 	 * of attach/detach reconfigurations and when the dip is
5625 	 * still initialized.
5626 	 */
5627 	if (DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip) ||
5628 	    (i_ddi_node_state(dip) < DS_INITIALIZED)) {
5629 		return (DDI_SUCCESS);
5630 	}
5631 
5632 	i_ddi_di_cache_invalidate(KM_SLEEP);
5633 
5634 	ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_REMOVE, EP_DDI, SE_SLEEP);
5635 	if (ev == NULL) {
5636 		return (DDI_SUCCESS);
5637 	}
5638 
5639 	pathname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
5640 	if (pathname == NULL) {
5641 		sysevent_free(ev);
5642 		return (DDI_SUCCESS);
5643 	}
5644 
5645 	(void) ddi_pathname(dip, pathname);
5646 	ASSERT(strlen(pathname));
5647 	se_val.value_type = SE_DATA_TYPE_STRING;
5648 	se_val.value.sv_string = pathname;
5649 	if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME,
5650 	    &se_val, SE_SLEEP) != 0) {
5651 		kmem_free(pathname, MAXPATHLEN);
5652 		sysevent_free(ev);
5653 		return (DDI_SUCCESS);
5654 	}
5655 
5656 	kmem_free(pathname, MAXPATHLEN);
5657 
5658 	/*
5659 	 * allow for NULL minor names
5660 	 */
5661 	if (minor_name != NULL) {
5662 		se_val.value.sv_string = minor_name;
5663 		if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME,
5664 		    &se_val, SE_SLEEP) != 0) {
5665 			sysevent_free_attr(ev_attr_list);
5666 			goto fail;
5667 		}
5668 	}
5669 
5670 	if (sysevent_attach_attributes(ev, ev_attr_list) != 0) {
5671 		sysevent_free_attr(ev_attr_list);
5672 	} else {
5673 		(void) log_sysevent(ev, SE_SLEEP, &eid);
5674 	}
5675 fail:
5676 	sysevent_free(ev);
5677 	return (DDI_SUCCESS);
5678 }
5679 
5680 /*
5681  * Derive the device class of the node.
5682  * Device class names aren't defined yet. Until this is done we use
5683  * devfs event subclass names as device class names.
5684  */
5685 static int
5686 derive_devi_class(dev_info_t *dip, char *node_type, int flag)
5687 {
5688 	int rv = DDI_SUCCESS;
5689 
5690 	if (i_ddi_devi_class(dip) == NULL) {
5691 		if (strncmp(node_type, DDI_NT_BLOCK,
5692 		    sizeof (DDI_NT_BLOCK) - 1) == 0 &&
5693 		    (node_type[sizeof (DDI_NT_BLOCK) - 1] == '\0' ||
5694 		    node_type[sizeof (DDI_NT_BLOCK) - 1] == ':') &&
5695 		    strcmp(node_type, DDI_NT_FD) != 0) {
5696 
5697 			rv = i_ddi_set_devi_class(dip, ESC_DISK, flag);
5698 
5699 		} else if (strncmp(node_type, DDI_NT_NET,
5700 		    sizeof (DDI_NT_NET) - 1) == 0 &&
5701 		    (node_type[sizeof (DDI_NT_NET) - 1] == '\0' ||
5702 		    node_type[sizeof (DDI_NT_NET) - 1] == ':')) {
5703 
5704 			rv = i_ddi_set_devi_class(dip, ESC_NETWORK, flag);
5705 		}
5706 	}
5707 
5708 	return (rv);
5709 }
5710 
5711 /*
5712  * Check compliance with PSARC 2003/375:
5713  *
5714  * The name must contain only characters a-z, A-Z, 0-9 or _ and it must not
5715  * exceed IFNAMSIZ (16) characters in length.
5716  */
5717 static boolean_t
5718 verify_name(char *name)
5719 {
5720 	size_t	len = strlen(name);
5721 	char	*cp;
5722 
5723 	if (len == 0 || len > IFNAMSIZ)
5724 		return (B_FALSE);
5725 
5726 	for (cp = name; *cp != '\0'; cp++) {
5727 		if (!isalnum(*cp) && *cp != '_')
5728 			return (B_FALSE);
5729 	}
5730 
5731 	return (B_TRUE);
5732 }
5733 
5734 /*
5735  * ddi_create_minor_common:	Create a  ddi_minor_data structure and
5736  *				attach it to the given devinfo node.
5737  */
5738 
5739 int
5740 ddi_create_minor_common(dev_info_t *dip, char *name, int spec_type,
5741     minor_t minor_num, char *node_type, int flag, ddi_minor_type mtype,
5742     const char *read_priv, const char *write_priv, mode_t priv_mode)
5743 {
5744 	struct ddi_minor_data *dmdp;
5745 	major_t major;
5746 
5747 	if (spec_type != S_IFCHR && spec_type != S_IFBLK)
5748 		return (DDI_FAILURE);
5749 
5750 	if (name == NULL)
5751 		return (DDI_FAILURE);
5752 
5753 	/*
5754 	 * Log a message if the minor number the driver is creating
5755 	 * is not expressible on the on-disk filesystem (currently
5756 	 * this is limited to 18 bits both by UFS). The device can
5757 	 * be opened via devfs, but not by device special files created
5758 	 * via mknod().
5759 	 */
5760 	if (minor_num > L_MAXMIN32) {
5761 		cmn_err(CE_WARN,
5762 		    "%s%d:%s minor 0x%x too big for 32-bit applications",
5763 		    ddi_driver_name(dip), ddi_get_instance(dip),
5764 		    name, minor_num);
5765 		return (DDI_FAILURE);
5766 	}
5767 
5768 	/* dip must be bound and attached */
5769 	major = ddi_driver_major(dip);
5770 	ASSERT(major != (major_t)-1);
5771 
5772 	/*
5773 	 * Default node_type to DDI_PSEUDO and issue notice in debug mode
5774 	 */
5775 	if (node_type == NULL) {
5776 		node_type = DDI_PSEUDO;
5777 		NDI_CONFIG_DEBUG((CE_NOTE, "!illegal node_type NULL for %s%d "
5778 		    " minor node %s; default to DDI_PSEUDO",
5779 		    ddi_driver_name(dip), ddi_get_instance(dip), name));
5780 	}
5781 
5782 	/*
5783 	 * If the driver is a network driver, ensure that the name falls within
5784 	 * the interface naming constraints specified by PSARC/2003/375.
5785 	 */
5786 	if (strcmp(node_type, DDI_NT_NET) == 0) {
5787 		if (!verify_name(name))
5788 			return (DDI_FAILURE);
5789 
5790 		if (mtype == DDM_MINOR) {
5791 			struct devnames *dnp = &devnamesp[major];
5792 
5793 			/* Mark driver as a network driver */
5794 			LOCK_DEV_OPS(&dnp->dn_lock);
5795 			dnp->dn_flags |= DN_NETWORK_DRIVER;
5796 			UNLOCK_DEV_OPS(&dnp->dn_lock);
5797 		}
5798 	}
5799 
5800 	if (mtype == DDM_MINOR) {
5801 		if (derive_devi_class(dip,  node_type, KM_NOSLEEP) !=
5802 		    DDI_SUCCESS)
5803 			return (DDI_FAILURE);
5804 	}
5805 
5806 	/*
5807 	 * Take care of minor number information for the node.
5808 	 */
5809 
5810 	if ((dmdp = kmem_zalloc(sizeof (struct ddi_minor_data),
5811 	    KM_NOSLEEP)) == NULL) {
5812 		return (DDI_FAILURE);
5813 	}
5814 	if ((dmdp->ddm_name = i_ddi_strdup(name, KM_NOSLEEP)) == NULL) {
5815 		kmem_free(dmdp, sizeof (struct ddi_minor_data));
5816 		return (DDI_FAILURE);
5817 	}
5818 	dmdp->dip = dip;
5819 	dmdp->ddm_dev = makedevice(major, minor_num);
5820 	dmdp->ddm_spec_type = spec_type;
5821 	dmdp->ddm_node_type = node_type;
5822 	dmdp->type = mtype;
5823 	if (flag & CLONE_DEV) {
5824 		dmdp->type = DDM_ALIAS;
5825 		dmdp->ddm_dev = makedevice(ddi_driver_major(clone_dip), major);
5826 	}
5827 	if (flag & PRIVONLY_DEV) {
5828 		dmdp->ddm_flags |= DM_NO_FSPERM;
5829 	}
5830 	if (read_priv || write_priv) {
5831 		dmdp->ddm_node_priv =
5832 		    devpolicy_priv_by_name(read_priv, write_priv);
5833 	}
5834 	dmdp->ddm_priv_mode = priv_mode;
5835 
5836 	ddi_append_minor_node(dip, dmdp);
5837 
5838 	/*
5839 	 * only log ddi_create_minor_node() calls which occur
5840 	 * outside the scope of attach(9e)/detach(9e) reconfigurations
5841 	 */
5842 	if (!(DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip))) {
5843 		(void) i_log_devfs_minor_create(dip, name);
5844 	}
5845 
5846 	/*
5847 	 * Check if any dacf rules match the creation of this minor node
5848 	 */
5849 	dacfc_match_create_minor(name, node_type, dip, dmdp, flag);
5850 	return (DDI_SUCCESS);
5851 }
5852 
5853 int
5854 ddi_create_minor_node(dev_info_t *dip, char *name, int spec_type,
5855     minor_t minor_num, char *node_type, int flag)
5856 {
5857 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5858 	    node_type, flag, DDM_MINOR, NULL, NULL, 0));
5859 }
5860 
5861 int
5862 ddi_create_priv_minor_node(dev_info_t *dip, char *name, int spec_type,
5863     minor_t minor_num, char *node_type, int flag,
5864     const char *rdpriv, const char *wrpriv, mode_t priv_mode)
5865 {
5866 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5867 	    node_type, flag, DDM_MINOR, rdpriv, wrpriv, priv_mode));
5868 }
5869 
5870 int
5871 ddi_create_default_minor_node(dev_info_t *dip, char *name, int spec_type,
5872     minor_t minor_num, char *node_type, int flag)
5873 {
5874 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5875 	    node_type, flag, DDM_DEFAULT, NULL, NULL, 0));
5876 }
5877 
5878 /*
5879  * Internal (non-ddi) routine for drivers to export names known
5880  * to the kernel (especially ddi_pathname_to_dev_t and friends)
5881  * but not exported externally to /dev
5882  */
5883 int
5884 ddi_create_internal_pathname(dev_info_t *dip, char *name, int spec_type,
5885     minor_t minor_num)
5886 {
5887 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5888 	    "internal", 0, DDM_INTERNAL_PATH, NULL, NULL, 0));
5889 }
5890 
5891 void
5892 ddi_remove_minor_node(dev_info_t *dip, char *name)
5893 {
5894 	struct ddi_minor_data *dmdp, *dmdp1;
5895 	struct ddi_minor_data **dmdp_prev;
5896 
5897 	mutex_enter(&(DEVI(dip)->devi_lock));
5898 	i_devi_enter(dip, DEVI_S_MD_UPDATE, DEVI_S_MD_UPDATE, 1);
5899 
5900 	dmdp_prev = &DEVI(dip)->devi_minor;
5901 	dmdp = DEVI(dip)->devi_minor;
5902 	while (dmdp != NULL) {
5903 		dmdp1 = dmdp->next;
5904 		if ((name == NULL || (dmdp->ddm_name != NULL &&
5905 		    strcmp(name, dmdp->ddm_name) == 0))) {
5906 			if (dmdp->ddm_name != NULL) {
5907 				(void) i_log_devfs_minor_remove(dip,
5908 				    dmdp->ddm_name);
5909 				kmem_free(dmdp->ddm_name,
5910 				    strlen(dmdp->ddm_name) + 1);
5911 			}
5912 			/*
5913 			 * Release device privilege, if any.
5914 			 * Release dacf client data associated with this minor
5915 			 * node by storing NULL.
5916 			 */
5917 			if (dmdp->ddm_node_priv)
5918 				dpfree(dmdp->ddm_node_priv);
5919 			dacf_store_info((dacf_infohdl_t)dmdp, NULL);
5920 			kmem_free(dmdp, sizeof (struct ddi_minor_data));
5921 			*dmdp_prev = dmdp1;
5922 			/*
5923 			 * OK, we found it, so get out now -- if we drive on,
5924 			 * we will strcmp against garbage.  See 1139209.
5925 			 */
5926 			if (name != NULL)
5927 				break;
5928 		} else {
5929 			dmdp_prev = &dmdp->next;
5930 		}
5931 		dmdp = dmdp1;
5932 	}
5933 
5934 	i_devi_exit(dip, DEVI_S_MD_UPDATE, 1);
5935 	mutex_exit(&(DEVI(dip)->devi_lock));
5936 }
5937 
5938 
5939 int
5940 ddi_in_panic()
5941 {
5942 	return (panicstr != NULL);
5943 }
5944 
5945 
5946 /*
5947  * Find first bit set in a mask (returned counting from 1 up)
5948  */
5949 
5950 int
5951 ddi_ffs(long mask)
5952 {
5953 	extern int ffs(long mask);
5954 	return (ffs(mask));
5955 }
5956 
5957 /*
5958  * Find last bit set. Take mask and clear
5959  * all but the most significant bit, and
5960  * then let ffs do the rest of the work.
5961  *
5962  * Algorithm courtesy of Steve Chessin.
5963  */
5964 
5965 int
5966 ddi_fls(long mask)
5967 {
5968 	extern int ffs(long);
5969 
5970 	while (mask) {
5971 		long nx;
5972 
5973 		if ((nx = (mask & (mask - 1))) == 0)
5974 			break;
5975 		mask = nx;
5976 	}
5977 	return (ffs(mask));
5978 }
5979 
5980 /*
5981  * The next five routines comprise generic storage management utilities
5982  * for driver soft state structures (in "the old days," this was done
5983  * with a statically sized array - big systems and dynamic loading
5984  * and unloading make heap allocation more attractive)
5985  */
5986 
5987 /*
5988  * Allocate a set of pointers to 'n_items' objects of size 'size'
5989  * bytes.  Each pointer is initialized to nil.
5990  *
5991  * The 'size' and 'n_items' values are stashed in the opaque
5992  * handle returned to the caller.
5993  *
5994  * This implementation interprets 'set of pointers' to mean 'array
5995  * of pointers' but note that nothing in the interface definition
5996  * precludes an implementation that uses, for example, a linked list.
5997  * However there should be a small efficiency gain from using an array
5998  * at lookup time.
5999  *
6000  * NOTE	As an optimization, we make our growable array allocations in
6001  *	powers of two (bytes), since that's how much kmem_alloc (currently)
6002  *	gives us anyway.  It should save us some free/realloc's ..
6003  *
6004  *	As a further optimization, we make the growable array start out
6005  *	with MIN_N_ITEMS in it.
6006  */
6007 
6008 #define	MIN_N_ITEMS	8	/* 8 void *'s == 32 bytes */
6009 
6010 int
6011 ddi_soft_state_init(void **state_p, size_t size, size_t n_items)
6012 {
6013 	struct i_ddi_soft_state *ss;
6014 
6015 	if (state_p == NULL || *state_p != NULL || size == 0)
6016 		return (EINVAL);
6017 
6018 	ss = kmem_zalloc(sizeof (*ss), KM_SLEEP);
6019 	mutex_init(&ss->lock, NULL, MUTEX_DRIVER, NULL);
6020 	ss->size = size;
6021 
6022 	if (n_items < MIN_N_ITEMS)
6023 		ss->n_items = MIN_N_ITEMS;
6024 	else {
6025 		int bitlog;
6026 
6027 		if ((bitlog = ddi_fls(n_items)) == ddi_ffs(n_items))
6028 			bitlog--;
6029 		ss->n_items = 1 << bitlog;
6030 	}
6031 
6032 	ASSERT(ss->n_items >= n_items);
6033 
6034 	ss->array = kmem_zalloc(ss->n_items * sizeof (void *), KM_SLEEP);
6035 
6036 	*state_p = ss;
6037 
6038 	return (0);
6039 }
6040 
6041 
6042 /*
6043  * Allocate a state structure of size 'size' to be associated
6044  * with item 'item'.
6045  *
6046  * In this implementation, the array is extended to
6047  * allow the requested offset, if needed.
6048  */
6049 int
6050 ddi_soft_state_zalloc(void *state, int item)
6051 {
6052 	struct i_ddi_soft_state *ss;
6053 	void **array;
6054 	void *new_element;
6055 
6056 	if ((ss = state) == NULL || item < 0)
6057 		return (DDI_FAILURE);
6058 
6059 	mutex_enter(&ss->lock);
6060 	if (ss->size == 0) {
6061 		mutex_exit(&ss->lock);
6062 		cmn_err(CE_WARN, "ddi_soft_state_zalloc: bad handle: %s",
6063 		    mod_containing_pc(caller()));
6064 		return (DDI_FAILURE);
6065 	}
6066 
6067 	array = ss->array;	/* NULL if ss->n_items == 0 */
6068 	ASSERT(ss->n_items != 0 && array != NULL);
6069 
6070 	/*
6071 	 * refuse to tread on an existing element
6072 	 */
6073 	if (item < ss->n_items && array[item] != NULL) {
6074 		mutex_exit(&ss->lock);
6075 		return (DDI_FAILURE);
6076 	}
6077 
6078 	/*
6079 	 * Allocate a new element to plug in
6080 	 */
6081 	new_element = kmem_zalloc(ss->size, KM_SLEEP);
6082 
6083 	/*
6084 	 * Check if the array is big enough, if not, grow it.
6085 	 */
6086 	if (item >= ss->n_items) {
6087 		void	**new_array;
6088 		size_t	new_n_items;
6089 		struct i_ddi_soft_state *dirty;
6090 
6091 		/*
6092 		 * Allocate a new array of the right length, copy
6093 		 * all the old pointers to the new array, then
6094 		 * if it exists at all, put the old array on the
6095 		 * dirty list.
6096 		 *
6097 		 * Note that we can't kmem_free() the old array.
6098 		 *
6099 		 * Why -- well the 'get' operation is 'mutex-free', so we
6100 		 * can't easily catch a suspended thread that is just about
6101 		 * to dereference the array we just grew out of.  So we
6102 		 * cons up a header and put it on a list of 'dirty'
6103 		 * pointer arrays.  (Dirty in the sense that there may
6104 		 * be suspended threads somewhere that are in the middle
6105 		 * of referencing them).  Fortunately, we -can- garbage
6106 		 * collect it all at ddi_soft_state_fini time.
6107 		 */
6108 		new_n_items = ss->n_items;
6109 		while (new_n_items < (1 + item))
6110 			new_n_items <<= 1;	/* double array size .. */
6111 
6112 		ASSERT(new_n_items >= (1 + item));	/* sanity check! */
6113 
6114 		new_array = kmem_zalloc(new_n_items * sizeof (void *),
6115 		    KM_SLEEP);
6116 		/*
6117 		 * Copy the pointers into the new array
6118 		 */
6119 		bcopy(array, new_array, ss->n_items * sizeof (void *));
6120 
6121 		/*
6122 		 * Save the old array on the dirty list
6123 		 */
6124 		dirty = kmem_zalloc(sizeof (*dirty), KM_SLEEP);
6125 		dirty->array = ss->array;
6126 		dirty->n_items = ss->n_items;
6127 		dirty->next = ss->next;
6128 		ss->next = dirty;
6129 
6130 		ss->array = (array = new_array);
6131 		ss->n_items = new_n_items;
6132 	}
6133 
6134 	ASSERT(array != NULL && item < ss->n_items && array[item] == NULL);
6135 
6136 	array[item] = new_element;
6137 
6138 	mutex_exit(&ss->lock);
6139 	return (DDI_SUCCESS);
6140 }
6141 
6142 
6143 /*
6144  * Fetch a pointer to the allocated soft state structure.
6145  *
6146  * This is designed to be cheap.
6147  *
6148  * There's an argument that there should be more checking for
6149  * nil pointers and out of bounds on the array.. but we do a lot
6150  * of that in the alloc/free routines.
6151  *
6152  * An array has the convenience that we don't need to lock read-access
6153  * to it c.f. a linked list.  However our "expanding array" strategy
6154  * means that we should hold a readers lock on the i_ddi_soft_state
6155  * structure.
6156  *
6157  * However, from a performance viewpoint, we need to do it without
6158  * any locks at all -- this also makes it a leaf routine.  The algorithm
6159  * is 'lock-free' because we only discard the pointer arrays at
6160  * ddi_soft_state_fini() time.
6161  */
6162 void *
6163 ddi_get_soft_state(void *state, int item)
6164 {
6165 	struct i_ddi_soft_state *ss = state;
6166 
6167 	ASSERT(ss != NULL && item >= 0);
6168 
6169 	if (item < ss->n_items && ss->array != NULL)
6170 		return (ss->array[item]);
6171 	return (NULL);
6172 }
6173 
6174 /*
6175  * Free the state structure corresponding to 'item.'   Freeing an
6176  * element that has either gone or was never allocated is not
6177  * considered an error.  Note that we free the state structure, but
6178  * we don't shrink our pointer array, or discard 'dirty' arrays,
6179  * since even a few pointers don't really waste too much memory.
6180  *
6181  * Passing an item number that is out of bounds, or a null pointer will
6182  * provoke an error message.
6183  */
6184 void
6185 ddi_soft_state_free(void *state, int item)
6186 {
6187 	struct i_ddi_soft_state *ss;
6188 	void **array;
6189 	void *element;
6190 	static char msg[] = "ddi_soft_state_free:";
6191 
6192 	if ((ss = state) == NULL) {
6193 		cmn_err(CE_WARN, "%s null handle: %s",
6194 		    msg, mod_containing_pc(caller()));
6195 		return;
6196 	}
6197 
6198 	element = NULL;
6199 
6200 	mutex_enter(&ss->lock);
6201 
6202 	if ((array = ss->array) == NULL || ss->size == 0) {
6203 		cmn_err(CE_WARN, "%s bad handle: %s",
6204 		    msg, mod_containing_pc(caller()));
6205 	} else if (item < 0 || item >= ss->n_items) {
6206 		cmn_err(CE_WARN, "%s item %d not in range [0..%lu]: %s",
6207 		    msg, item, ss->n_items - 1, mod_containing_pc(caller()));
6208 	} else if (array[item] != NULL) {
6209 		element = array[item];
6210 		array[item] = NULL;
6211 	}
6212 
6213 	mutex_exit(&ss->lock);
6214 
6215 	if (element)
6216 		kmem_free(element, ss->size);
6217 }
6218 
6219 
6220 /*
6221  * Free the entire set of pointers, and any
6222  * soft state structures contained therein.
6223  *
6224  * Note that we don't grab the ss->lock mutex, even though
6225  * we're inspecting the various fields of the data structure.
6226  *
6227  * There is an implicit assumption that this routine will
6228  * never run concurrently with any of the above on this
6229  * particular state structure i.e. by the time the driver
6230  * calls this routine, there should be no other threads
6231  * running in the driver.
6232  */
6233 void
6234 ddi_soft_state_fini(void **state_p)
6235 {
6236 	struct i_ddi_soft_state *ss, *dirty;
6237 	int item;
6238 	static char msg[] = "ddi_soft_state_fini:";
6239 
6240 	if (state_p == NULL || (ss = *state_p) == NULL) {
6241 		cmn_err(CE_WARN, "%s null handle: %s",
6242 		    msg, mod_containing_pc(caller()));
6243 		return;
6244 	}
6245 
6246 	if (ss->size == 0) {
6247 		cmn_err(CE_WARN, "%s bad handle: %s",
6248 		    msg, mod_containing_pc(caller()));
6249 		return;
6250 	}
6251 
6252 	if (ss->n_items > 0) {
6253 		for (item = 0; item < ss->n_items; item++)
6254 			ddi_soft_state_free(ss, item);
6255 		kmem_free(ss->array, ss->n_items * sizeof (void *));
6256 	}
6257 
6258 	/*
6259 	 * Now delete any dirty arrays from previous 'grow' operations
6260 	 */
6261 	for (dirty = ss->next; dirty; dirty = ss->next) {
6262 		ss->next = dirty->next;
6263 		kmem_free(dirty->array, dirty->n_items * sizeof (void *));
6264 		kmem_free(dirty, sizeof (*dirty));
6265 	}
6266 
6267 	mutex_destroy(&ss->lock);
6268 	kmem_free(ss, sizeof (*ss));
6269 
6270 	*state_p = NULL;
6271 }
6272 
6273 
6274 /*
6275  * This sets the devi_addr entry in the dev_info structure 'dip' to 'name'
6276  * If name is NULL, this frees the devi_addr entry, if any.
6277  */
6278 void
6279 ddi_set_name_addr(dev_info_t *dip, char *name)
6280 {
6281 	char *oldname = DEVI(dip)->devi_addr;
6282 
6283 	DEVI(dip)->devi_addr = i_ddi_strdup(name, KM_SLEEP);
6284 	if (oldname) {
6285 		kmem_free(oldname, strlen(oldname) + 1);
6286 	}
6287 }
6288 
6289 char *
6290 ddi_get_name_addr(dev_info_t *dip)
6291 {
6292 	return (DEVI(dip)->devi_addr);
6293 }
6294 
6295 void
6296 ddi_set_parent_data(dev_info_t *dip, void *pd)
6297 {
6298 	DEVI(dip)->devi_parent_data = pd;
6299 }
6300 
6301 void *
6302 ddi_get_parent_data(dev_info_t *dip)
6303 {
6304 	return (DEVI(dip)->devi_parent_data);
6305 }
6306 
6307 /*
6308  * ddi_name_to_major: Returns the major number of a module given its name.
6309  */
6310 major_t
6311 ddi_name_to_major(char *name)
6312 {
6313 	return (mod_name_to_major(name));
6314 }
6315 
6316 /*
6317  * ddi_major_to_name: Returns the module name bound to a major number.
6318  */
6319 char *
6320 ddi_major_to_name(major_t major)
6321 {
6322 	return (mod_major_to_name(major));
6323 }
6324 
6325 /*
6326  * Return the name of the devinfo node pointed at by 'dip' in the buffer
6327  * pointed at by 'name.'  A devinfo node is named as a result of calling
6328  * ddi_initchild().
6329  *
6330  * Note: the driver must be held before calling this function!
6331  */
6332 char *
6333 ddi_deviname(dev_info_t *dip, char *name)
6334 {
6335 	char *addrname;
6336 	char none = '\0';
6337 
6338 	if (dip == ddi_root_node()) {
6339 		*name = '\0';
6340 		return (name);
6341 	}
6342 
6343 	if (i_ddi_node_state(dip) < DS_INITIALIZED) {
6344 		addrname = &none;
6345 	} else {
6346 		addrname = ddi_get_name_addr(dip);
6347 	}
6348 
6349 	if (*addrname == '\0') {
6350 		(void) sprintf(name, "/%s", ddi_node_name(dip));
6351 	} else {
6352 		(void) sprintf(name, "/%s@%s", ddi_node_name(dip), addrname);
6353 	}
6354 
6355 	return (name);
6356 }
6357 
6358 /*
6359  * Spits out the name of device node, typically name@addr, for a given node,
6360  * using the driver name, not the nodename.
6361  *
6362  * Used by match_parent. Not to be used elsewhere.
6363  */
6364 char *
6365 i_ddi_parname(dev_info_t *dip, char *name)
6366 {
6367 	char *addrname;
6368 
6369 	if (dip == ddi_root_node()) {
6370 		*name = '\0';
6371 		return (name);
6372 	}
6373 
6374 	ASSERT(i_ddi_node_state(dip) >= DS_INITIALIZED);
6375 
6376 	if (*(addrname = ddi_get_name_addr(dip)) == '\0')
6377 		(void) sprintf(name, "%s", ddi_binding_name(dip));
6378 	else
6379 		(void) sprintf(name, "%s@%s", ddi_binding_name(dip), addrname);
6380 	return (name);
6381 }
6382 
6383 static char *
6384 pathname_work(dev_info_t *dip, char *path)
6385 {
6386 	char *bp;
6387 
6388 	if (dip == ddi_root_node()) {
6389 		*path = '\0';
6390 		return (path);
6391 	}
6392 	(void) pathname_work(ddi_get_parent(dip), path);
6393 	bp = path + strlen(path);
6394 	(void) ddi_deviname(dip, bp);
6395 	return (path);
6396 }
6397 
6398 char *
6399 ddi_pathname(dev_info_t *dip, char *path)
6400 {
6401 	return (pathname_work(dip, path));
6402 }
6403 
6404 /*
6405  * Given a dev_t, return the pathname of the corresponding device in the
6406  * buffer pointed at by "path."  The buffer is assumed to be large enough
6407  * to hold the pathname of the device (MAXPATHLEN).
6408  *
6409  * The pathname of a device is the pathname of the devinfo node to which
6410  * the device "belongs," concatenated with the character ':' and the name
6411  * of the minor node corresponding to the dev_t.  If spec_type is 0 then
6412  * just the pathname of the devinfo node is returned without driving attach
6413  * of that node.  For a non-zero spec_type, an attach is performed and a
6414  * search of the minor list occurs.
6415  *
6416  * It is possible that the path associated with the dev_t is not
6417  * currently available in the devinfo tree.  In order to have a
6418  * dev_t, a device must have been discovered before, which means
6419  * that the path is always in the instance tree.  The one exception
6420  * to this is if the dev_t is associated with a pseudo driver, in
6421  * which case the device must exist on the pseudo branch of the
6422  * devinfo tree as a result of parsing .conf files.
6423  */
6424 int
6425 ddi_dev_pathname(dev_t devt, int spec_type, char *path)
6426 {
6427 	major_t		major = getmajor(devt);
6428 	int		instance;
6429 	dev_info_t	*dip;
6430 	char		*minorname;
6431 	char		*drvname;
6432 
6433 	if (major >= devcnt)
6434 		goto fail;
6435 	if (major == clone_major) {
6436 		/* clone has no minor nodes, manufacture the path here */
6437 		if ((drvname = ddi_major_to_name(getminor(devt))) == NULL)
6438 			goto fail;
6439 
6440 		(void) snprintf(path, MAXPATHLEN, "%s:%s", CLONE_PATH, drvname);
6441 		return (DDI_SUCCESS);
6442 	}
6443 
6444 	/* extract instance from devt (getinfo(9E) DDI_INFO_DEVT2INSTANCE). */
6445 	if ((instance = dev_to_instance(devt)) == -1)
6446 		goto fail;
6447 
6448 	/* reconstruct the path given the major/instance */
6449 	if (e_ddi_majorinstance_to_path(major, instance, path) != DDI_SUCCESS)
6450 		goto fail;
6451 
6452 	/* if spec_type given we must drive attach and search minor nodes */
6453 	if ((spec_type == S_IFCHR) || (spec_type == S_IFBLK)) {
6454 		/* attach the path so we can search minors */
6455 		if ((dip = e_ddi_hold_devi_by_path(path, 0)) == NULL)
6456 			goto fail;
6457 
6458 		/* Add minorname to path. */
6459 		mutex_enter(&(DEVI(dip)->devi_lock));
6460 		minorname = i_ddi_devtspectype_to_minorname(dip,
6461 		    devt, spec_type);
6462 		if (minorname) {
6463 			(void) strcat(path, ":");
6464 			(void) strcat(path, minorname);
6465 		}
6466 		mutex_exit(&(DEVI(dip)->devi_lock));
6467 		ddi_release_devi(dip);
6468 		if (minorname == NULL)
6469 			goto fail;
6470 	}
6471 	ASSERT(strlen(path) < MAXPATHLEN);
6472 	return (DDI_SUCCESS);
6473 
6474 fail:	*path = 0;
6475 	return (DDI_FAILURE);
6476 }
6477 
6478 /*
6479  * Given a major number and an instance, return the path.
6480  * This interface does NOT drive attach.
6481  */
6482 int
6483 e_ddi_majorinstance_to_path(major_t major, int instance, char *path)
6484 {
6485 	dev_info_t	*dip;
6486 
6487 	/* look for the major/instance in the instance tree */
6488 	if (e_ddi_instance_majorinstance_to_path(major, instance,
6489 	    path) != DDI_SUCCESS) {
6490 		/* not in instance tree, look in 'pseudo' branch */
6491 		if ((dip = ddi_hold_devi_by_instance(major,
6492 		    instance, E_DDI_HOLD_DEVI_NOATTACH)) == NULL) {
6493 			*path = 0;
6494 			return (DDI_FAILURE);
6495 		}
6496 		(void) ddi_pathname(dip, path);
6497 		ddi_release_devi(dip);
6498 	}
6499 	ASSERT(strlen(path) < MAXPATHLEN);
6500 	return (DDI_SUCCESS);
6501 }
6502 
6503 
6504 #define	GLD_DRIVER_PPA "SUNW,gld_v0_ppa"
6505 
6506 /*
6507  * Given the dip for a network interface return the ppa for that interface.
6508  *
6509  * In all cases except GLD v0 drivers, the ppa == instance.
6510  * In the case of GLD v0 drivers, the ppa is equal to the attach order.
6511  * So for these drivers when the attach routine calls gld_register(),
6512  * the GLD framework creates an integer property called "gld_driver_ppa"
6513  * that can be queried here.
6514  *
6515  * The only time this function is used is when a system is booting over nfs.
6516  * In this case the system has to resolve the pathname of the boot device
6517  * to it's ppa.
6518  */
6519 int
6520 i_ddi_devi_get_ppa(dev_info_t *dip)
6521 {
6522 	return (ddi_prop_get_int(DDI_DEV_T_ANY, dip,
6523 			DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
6524 			GLD_DRIVER_PPA, ddi_get_instance(dip)));
6525 }
6526 
6527 /*
6528  * i_ddi_devi_set_ppa() should only be called from gld_register()
6529  * and only for GLD v0 drivers
6530  */
6531 void
6532 i_ddi_devi_set_ppa(dev_info_t *dip, int ppa)
6533 {
6534 	(void) e_ddi_prop_update_int(DDI_DEV_T_NONE, dip, GLD_DRIVER_PPA, ppa);
6535 }
6536 
6537 
6538 /*
6539  * Private DDI Console bell functions.
6540  */
6541 void
6542 ddi_ring_console_bell(clock_t duration)
6543 {
6544 	if (ddi_console_bell_func != NULL)
6545 		(*ddi_console_bell_func)(duration);
6546 }
6547 
6548 void
6549 ddi_set_console_bell(void (*bellfunc)(clock_t duration))
6550 {
6551 	ddi_console_bell_func = bellfunc;
6552 }
6553 
6554 int
6555 ddi_dma_alloc_handle(dev_info_t *dip, ddi_dma_attr_t *attr,
6556 	int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
6557 {
6558 	int (*funcp)() = ddi_dma_allochdl;
6559 	ddi_dma_attr_t dma_attr;
6560 	struct bus_ops *bop;
6561 
6562 	if (attr == (ddi_dma_attr_t *)0)
6563 		return (DDI_DMA_BADATTR);
6564 
6565 	dma_attr = *attr;
6566 
6567 	bop = DEVI(dip)->devi_ops->devo_bus_ops;
6568 	if (bop && bop->bus_dma_allochdl)
6569 		funcp = bop->bus_dma_allochdl;
6570 
6571 	return ((*funcp)(dip, dip, &dma_attr, waitfp, arg, handlep));
6572 }
6573 
6574 void
6575 ddi_dma_free_handle(ddi_dma_handle_t *handlep)
6576 {
6577 	ddi_dma_handle_t h = *handlep;
6578 	(void) ddi_dma_freehdl(HD, HD, h);
6579 }
6580 
6581 static uintptr_t dma_mem_list_id = 0;
6582 
6583 
6584 int
6585 ddi_dma_mem_alloc(ddi_dma_handle_t handle, size_t length,
6586 	ddi_device_acc_attr_t *accattrp, uint_t xfermodes,
6587 	int (*waitfp)(caddr_t), caddr_t arg, caddr_t *kaddrp,
6588 	size_t *real_length, ddi_acc_handle_t *handlep)
6589 {
6590 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6591 	dev_info_t *dip = hp->dmai_rdip;
6592 	ddi_acc_hdl_t *ap;
6593 	ddi_dma_attr_t *attrp = &hp->dmai_attr;
6594 	uint_t sleepflag;
6595 	int (*fp)(caddr_t);
6596 	int rval;
6597 
6598 	if (waitfp == DDI_DMA_SLEEP)
6599 		fp = (int (*)())KM_SLEEP;
6600 	else if (waitfp == DDI_DMA_DONTWAIT)
6601 		fp = (int (*)())KM_NOSLEEP;
6602 	else
6603 		fp = waitfp;
6604 	*handlep = impl_acc_hdl_alloc(fp, arg);
6605 	if (*handlep == NULL)
6606 		return (DDI_FAILURE);
6607 
6608 	/*
6609 	 * initialize the common elements of data access handle
6610 	 */
6611 	ap = impl_acc_hdl_get(*handlep);
6612 	ap->ah_vers = VERS_ACCHDL;
6613 	ap->ah_dip = dip;
6614 	ap->ah_offset = 0;
6615 	ap->ah_len = 0;
6616 	ap->ah_xfermodes = xfermodes;
6617 	ap->ah_acc = *accattrp;
6618 
6619 	sleepflag = ((waitfp == DDI_DMA_SLEEP) ? 1 : 0);
6620 	if (xfermodes == DDI_DMA_CONSISTENT) {
6621 		rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag, 0,
6622 			    accattrp, kaddrp, NULL, ap);
6623 		*real_length = length;
6624 	} else {
6625 		rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag, 1,
6626 			    accattrp, kaddrp, real_length, ap);
6627 	}
6628 	if (rval == DDI_SUCCESS) {
6629 		ap->ah_len = (off_t)(*real_length);
6630 		ap->ah_addr = *kaddrp;
6631 	} else {
6632 		impl_acc_hdl_free(*handlep);
6633 		*handlep = (ddi_acc_handle_t)NULL;
6634 		if (waitfp != DDI_DMA_SLEEP && waitfp != DDI_DMA_DONTWAIT) {
6635 			ddi_set_callback(waitfp, arg, &dma_mem_list_id);
6636 		}
6637 		rval = DDI_FAILURE;
6638 	}
6639 	return (rval);
6640 }
6641 
6642 void
6643 ddi_dma_mem_free(ddi_acc_handle_t *handlep)
6644 {
6645 	ddi_acc_hdl_t *ap;
6646 
6647 	ap = impl_acc_hdl_get(*handlep);
6648 	ASSERT(ap);
6649 
6650 	if (ap->ah_xfermodes == DDI_DMA_CONSISTENT) {
6651 		i_ddi_mem_free((caddr_t)ap->ah_addr, 0);
6652 	} else {
6653 		i_ddi_mem_free((caddr_t)ap->ah_addr, 1);
6654 	}
6655 
6656 	/*
6657 	 * free the handle
6658 	 */
6659 	impl_acc_hdl_free(*handlep);
6660 	*handlep = (ddi_acc_handle_t)NULL;
6661 
6662 	if (dma_mem_list_id != 0) {
6663 		ddi_run_callback(&dma_mem_list_id);
6664 	}
6665 }
6666 
6667 int
6668 ddi_dma_buf_bind_handle(ddi_dma_handle_t handle, struct buf *bp,
6669 	uint_t flags, int (*waitfp)(caddr_t), caddr_t arg,
6670 	ddi_dma_cookie_t *cookiep, uint_t *ccountp)
6671 {
6672 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6673 	dev_info_t *hdip, *dip;
6674 	struct ddi_dma_req dmareq;
6675 	int (*funcp)();
6676 
6677 	dmareq.dmar_flags = flags;
6678 	dmareq.dmar_fp = waitfp;
6679 	dmareq.dmar_arg = arg;
6680 	dmareq.dmar_object.dmao_size = (uint_t)bp->b_bcount;
6681 
6682 	if ((bp->b_flags & (B_PAGEIO|B_REMAPPED)) == B_PAGEIO) {
6683 		dmareq.dmar_object.dmao_type = DMA_OTYP_PAGES;
6684 		dmareq.dmar_object.dmao_obj.pp_obj.pp_pp = bp->b_pages;
6685 		dmareq.dmar_object.dmao_obj.pp_obj.pp_offset =
6686 		    (uint_t)(((uintptr_t)bp->b_un.b_addr) & MMU_PAGEOFFSET);
6687 	} else {
6688 		dmareq.dmar_object.dmao_obj.virt_obj.v_addr = bp->b_un.b_addr;
6689 		if ((bp->b_flags & (B_SHADOW|B_REMAPPED)) == B_SHADOW) {
6690 			dmareq.dmar_object.dmao_obj.virt_obj.v_priv =
6691 							bp->b_shadow;
6692 			dmareq.dmar_object.dmao_type = DMA_OTYP_BUFVADDR;
6693 		} else {
6694 			dmareq.dmar_object.dmao_type =
6695 				(bp->b_flags & (B_PHYS | B_REMAPPED))?
6696 				DMA_OTYP_BUFVADDR : DMA_OTYP_VADDR;
6697 			dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
6698 		}
6699 
6700 		/*
6701 		 * If the buffer has no proc pointer, or the proc
6702 		 * struct has the kernel address space, or the buffer has
6703 		 * been marked B_REMAPPED (meaning that it is now
6704 		 * mapped into the kernel's address space), then
6705 		 * the address space is kas (kernel address space).
6706 		 */
6707 		if (bp->b_proc == NULL || bp->b_proc->p_as == &kas ||
6708 		    (bp->b_flags & B_REMAPPED) != 0) {
6709 			dmareq.dmar_object.dmao_obj.virt_obj.v_as = 0;
6710 		} else {
6711 			dmareq.dmar_object.dmao_obj.virt_obj.v_as =
6712 			    bp->b_proc->p_as;
6713 		}
6714 	}
6715 
6716 	dip = hp->dmai_rdip;
6717 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
6718 	funcp = DEVI(dip)->devi_bus_dma_bindfunc;
6719 	return ((*funcp)(hdip, dip, handle, &dmareq, cookiep, ccountp));
6720 }
6721 
6722 int
6723 ddi_dma_addr_bind_handle(ddi_dma_handle_t handle, struct as *as,
6724 	caddr_t addr, size_t len, uint_t flags, int (*waitfp)(caddr_t),
6725 	caddr_t arg, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
6726 {
6727 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6728 	dev_info_t *hdip, *dip;
6729 	struct ddi_dma_req dmareq;
6730 	int (*funcp)();
6731 
6732 	if (len == (uint_t)0) {
6733 		return (DDI_DMA_NOMAPPING);
6734 	}
6735 	dmareq.dmar_flags = flags;
6736 	dmareq.dmar_fp = waitfp;
6737 	dmareq.dmar_arg = arg;
6738 	dmareq.dmar_object.dmao_size = len;
6739 	dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR;
6740 	dmareq.dmar_object.dmao_obj.virt_obj.v_as = as;
6741 	dmareq.dmar_object.dmao_obj.virt_obj.v_addr = addr;
6742 	dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
6743 
6744 	dip = hp->dmai_rdip;
6745 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
6746 	funcp = DEVI(dip)->devi_bus_dma_bindfunc;
6747 	return ((*funcp)(hdip, dip, handle, &dmareq, cookiep, ccountp));
6748 }
6749 
6750 void
6751 ddi_dma_nextcookie(ddi_dma_handle_t handle, ddi_dma_cookie_t *cookiep)
6752 {
6753 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6754 	ddi_dma_cookie_t *cp;
6755 
6756 	cp = hp->dmai_cookie;
6757 	ASSERT(cp);
6758 
6759 	cookiep->dmac_notused = cp->dmac_notused;
6760 	cookiep->dmac_type = cp->dmac_type;
6761 	cookiep->dmac_address = cp->dmac_address;
6762 	cookiep->dmac_size = cp->dmac_size;
6763 	hp->dmai_cookie++;
6764 }
6765 
6766 int
6767 ddi_dma_numwin(ddi_dma_handle_t handle, uint_t *nwinp)
6768 {
6769 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6770 	if ((hp->dmai_rflags & DDI_DMA_PARTIAL) == 0) {
6771 		return (DDI_FAILURE);
6772 	} else {
6773 		*nwinp = hp->dmai_nwin;
6774 		return (DDI_SUCCESS);
6775 	}
6776 }
6777 
6778 int
6779 ddi_dma_getwin(ddi_dma_handle_t h, uint_t win, off_t *offp,
6780 	size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
6781 {
6782 	int (*funcp)() = ddi_dma_win;
6783 	struct bus_ops *bop;
6784 
6785 	bop = DEVI(HD)->devi_ops->devo_bus_ops;
6786 	if (bop && bop->bus_dma_win)
6787 		funcp = bop->bus_dma_win;
6788 
6789 	return ((*funcp)(HD, HD, h, win, offp, lenp, cookiep, ccountp));
6790 }
6791 
6792 int
6793 ddi_dma_set_sbus64(ddi_dma_handle_t h, ulong_t burstsizes)
6794 {
6795 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_SET_SBUS64, 0,
6796 		&burstsizes, 0, 0));
6797 }
6798 
6799 int
6800 i_ddi_dma_fault_check(ddi_dma_impl_t *hp)
6801 {
6802 	return (hp->dmai_fault);
6803 }
6804 
6805 int
6806 ddi_check_dma_handle(ddi_dma_handle_t handle)
6807 {
6808 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6809 	int (*check)(ddi_dma_impl_t *);
6810 
6811 	if ((check = hp->dmai_fault_check) == NULL)
6812 		check = i_ddi_dma_fault_check;
6813 
6814 	return (((*check)(hp) == DDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
6815 }
6816 
6817 void
6818 i_ddi_dma_set_fault(ddi_dma_handle_t handle)
6819 {
6820 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6821 	void (*notify)(ddi_dma_impl_t *);
6822 
6823 	if (!hp->dmai_fault) {
6824 		hp->dmai_fault = 1;
6825 		if ((notify = hp->dmai_fault_notify) != NULL)
6826 			(*notify)(hp);
6827 	}
6828 }
6829 
6830 void
6831 i_ddi_dma_clr_fault(ddi_dma_handle_t handle)
6832 {
6833 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6834 	void (*notify)(ddi_dma_impl_t *);
6835 
6836 	if (hp->dmai_fault) {
6837 		hp->dmai_fault = 0;
6838 		if ((notify = hp->dmai_fault_notify) != NULL)
6839 			(*notify)(hp);
6840 	}
6841 }
6842 
6843 /*
6844  * register mapping routines.
6845  */
6846 int
6847 ddi_regs_map_setup(dev_info_t *dip, uint_t rnumber, caddr_t *addrp,
6848 	offset_t offset, offset_t len, ddi_device_acc_attr_t *accattrp,
6849 	ddi_acc_handle_t *handle)
6850 {
6851 	ddi_map_req_t mr;
6852 	ddi_acc_hdl_t *hp;
6853 	int result;
6854 
6855 	/*
6856 	 * Allocate and initialize the common elements of data access handle.
6857 	 */
6858 	*handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
6859 	hp = impl_acc_hdl_get(*handle);
6860 	hp->ah_vers = VERS_ACCHDL;
6861 	hp->ah_dip = dip;
6862 	hp->ah_rnumber = rnumber;
6863 	hp->ah_offset = offset;
6864 	hp->ah_len = len;
6865 	hp->ah_acc = *accattrp;
6866 
6867 	/*
6868 	 * Set up the mapping request and call to parent.
6869 	 */
6870 	mr.map_op = DDI_MO_MAP_LOCKED;
6871 	mr.map_type = DDI_MT_RNUMBER;
6872 	mr.map_obj.rnumber = rnumber;
6873 	mr.map_prot = PROT_READ | PROT_WRITE;
6874 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
6875 	mr.map_handlep = hp;
6876 	mr.map_vers = DDI_MAP_VERSION;
6877 	result = ddi_map(dip, &mr, offset, len, addrp);
6878 
6879 	/*
6880 	 * check for end result
6881 	 */
6882 	if (result != DDI_SUCCESS) {
6883 		impl_acc_hdl_free(*handle);
6884 		*handle = (ddi_acc_handle_t)NULL;
6885 	} else {
6886 		hp->ah_addr = *addrp;
6887 	}
6888 
6889 	return (result);
6890 }
6891 
6892 void
6893 ddi_regs_map_free(ddi_acc_handle_t *handlep)
6894 {
6895 	ddi_map_req_t mr;
6896 	ddi_acc_hdl_t *hp;
6897 
6898 	hp = impl_acc_hdl_get(*handlep);
6899 	ASSERT(hp);
6900 
6901 	mr.map_op = DDI_MO_UNMAP;
6902 	mr.map_type = DDI_MT_RNUMBER;
6903 	mr.map_obj.rnumber = hp->ah_rnumber;
6904 	mr.map_prot = PROT_READ | PROT_WRITE;
6905 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
6906 	mr.map_handlep = hp;
6907 	mr.map_vers = DDI_MAP_VERSION;
6908 
6909 	/*
6910 	 * Call my parent to unmap my regs.
6911 	 */
6912 	(void) ddi_map(hp->ah_dip, &mr, hp->ah_offset,
6913 		hp->ah_len, &hp->ah_addr);
6914 	/*
6915 	 * free the handle
6916 	 */
6917 	impl_acc_hdl_free(*handlep);
6918 	*handlep = (ddi_acc_handle_t)NULL;
6919 }
6920 
6921 int
6922 ddi_device_zero(ddi_acc_handle_t handle, caddr_t dev_addr, size_t bytecount,
6923 	ssize_t dev_advcnt, uint_t dev_datasz)
6924 {
6925 	uint8_t *b;
6926 	uint16_t *w;
6927 	uint32_t *l;
6928 	uint64_t *ll;
6929 
6930 	/* check for total byte count is multiple of data transfer size */
6931 	if (bytecount != ((bytecount / dev_datasz) * dev_datasz))
6932 		return (DDI_FAILURE);
6933 
6934 	switch (dev_datasz) {
6935 	case DDI_DATA_SZ01_ACC:
6936 		for (b = (uint8_t *)dev_addr;
6937 			bytecount != 0; bytecount -= 1, b += dev_advcnt)
6938 			ddi_put8(handle, b, 0);
6939 		break;
6940 	case DDI_DATA_SZ02_ACC:
6941 		for (w = (uint16_t *)dev_addr;
6942 			bytecount != 0; bytecount -= 2, w += dev_advcnt)
6943 			ddi_put16(handle, w, 0);
6944 		break;
6945 	case DDI_DATA_SZ04_ACC:
6946 		for (l = (uint32_t *)dev_addr;
6947 			bytecount != 0; bytecount -= 4, l += dev_advcnt)
6948 			ddi_put32(handle, l, 0);
6949 		break;
6950 	case DDI_DATA_SZ08_ACC:
6951 		for (ll = (uint64_t *)dev_addr;
6952 			bytecount != 0; bytecount -= 8, ll += dev_advcnt)
6953 			ddi_put64(handle, ll, 0x0ll);
6954 		break;
6955 	default:
6956 		return (DDI_FAILURE);
6957 	}
6958 	return (DDI_SUCCESS);
6959 }
6960 
6961 int
6962 ddi_device_copy(
6963 	ddi_acc_handle_t src_handle, caddr_t src_addr, ssize_t src_advcnt,
6964 	ddi_acc_handle_t dest_handle, caddr_t dest_addr, ssize_t dest_advcnt,
6965 	size_t bytecount, uint_t dev_datasz)
6966 {
6967 	uint8_t *b_src, *b_dst;
6968 	uint16_t *w_src, *w_dst;
6969 	uint32_t *l_src, *l_dst;
6970 	uint64_t *ll_src, *ll_dst;
6971 
6972 	/* check for total byte count is multiple of data transfer size */
6973 	if (bytecount != ((bytecount / dev_datasz) * dev_datasz))
6974 		return (DDI_FAILURE);
6975 
6976 	switch (dev_datasz) {
6977 	case DDI_DATA_SZ01_ACC:
6978 		b_src = (uint8_t *)src_addr;
6979 		b_dst = (uint8_t *)dest_addr;
6980 
6981 		for (; bytecount != 0; bytecount -= 1) {
6982 			ddi_put8(dest_handle, b_dst,
6983 				ddi_get8(src_handle, b_src));
6984 			b_dst += dest_advcnt;
6985 			b_src += src_advcnt;
6986 		}
6987 		break;
6988 	case DDI_DATA_SZ02_ACC:
6989 		w_src = (uint16_t *)src_addr;
6990 		w_dst = (uint16_t *)dest_addr;
6991 
6992 		for (; bytecount != 0; bytecount -= 2) {
6993 			ddi_put16(dest_handle, w_dst,
6994 				ddi_get16(src_handle, w_src));
6995 			w_dst += dest_advcnt;
6996 			w_src += src_advcnt;
6997 		}
6998 		break;
6999 	case DDI_DATA_SZ04_ACC:
7000 		l_src = (uint32_t *)src_addr;
7001 		l_dst = (uint32_t *)dest_addr;
7002 
7003 		for (; bytecount != 0; bytecount -= 4) {
7004 			ddi_put32(dest_handle, l_dst,
7005 				ddi_get32(src_handle, l_src));
7006 			l_dst += dest_advcnt;
7007 			l_src += src_advcnt;
7008 		}
7009 		break;
7010 	case DDI_DATA_SZ08_ACC:
7011 		ll_src = (uint64_t *)src_addr;
7012 		ll_dst = (uint64_t *)dest_addr;
7013 
7014 		for (; bytecount != 0; bytecount -= 8) {
7015 			ddi_put64(dest_handle, ll_dst,
7016 				ddi_get64(src_handle, ll_src));
7017 			ll_dst += dest_advcnt;
7018 			ll_src += src_advcnt;
7019 		}
7020 		break;
7021 	default:
7022 		return (DDI_FAILURE);
7023 	}
7024 	return (DDI_SUCCESS);
7025 }
7026 
7027 #define	swap16(value)  \
7028 	((((value) & 0xff) << 8) | ((value) >> 8))
7029 
7030 #define	swap32(value)	\
7031 	(((uint32_t)swap16((uint16_t)((value) & 0xffff)) << 16) | \
7032 	(uint32_t)swap16((uint16_t)((value) >> 16)))
7033 
7034 #define	swap64(value)	\
7035 	(((uint64_t)swap32((uint32_t)((value) & 0xffffffff)) \
7036 	    << 32) | \
7037 	(uint64_t)swap32((uint32_t)((value) >> 32)))
7038 
7039 uint16_t
7040 ddi_swap16(uint16_t value)
7041 {
7042 	return (swap16(value));
7043 }
7044 
7045 uint32_t
7046 ddi_swap32(uint32_t value)
7047 {
7048 	return (swap32(value));
7049 }
7050 
7051 uint64_t
7052 ddi_swap64(uint64_t value)
7053 {
7054 	return (swap64(value));
7055 }
7056 
7057 /*
7058  * Convert a binding name to a driver name.
7059  * A binding name is the name used to determine the driver for a
7060  * device - it may be either an alias for the driver or the name
7061  * of the driver itself.
7062  */
7063 char *
7064 i_binding_to_drv_name(char *bname)
7065 {
7066 	major_t major_no;
7067 
7068 	ASSERT(bname != NULL);
7069 
7070 	if ((major_no = ddi_name_to_major(bname)) == -1)
7071 		return (NULL);
7072 	return (ddi_major_to_name(major_no));
7073 }
7074 
7075 /*
7076  * Search for minor name that has specified dev_t and spec_type.
7077  * If spec_type is zero then any dev_t match works.  Since we
7078  * are returning a pointer to the minor name string, we require the
7079  * caller to do the locking.
7080  */
7081 char *
7082 i_ddi_devtspectype_to_minorname(dev_info_t *dip, dev_t dev, int spec_type)
7083 {
7084 	struct ddi_minor_data	*dmdp;
7085 
7086 	/*
7087 	 * The did layered driver currently intentionally returns a
7088 	 * devinfo ptr for an underlying sd instance based on a did
7089 	 * dev_t. In this case it is not an error.
7090 	 *
7091 	 * The did layered driver is associated with Sun Cluster.
7092 	 */
7093 	ASSERT((ddi_driver_major(dip) == getmajor(dev)) ||
7094 		(strcmp(ddi_major_to_name(getmajor(dev)), "did") == 0));
7095 	ASSERT(MUTEX_HELD(&(DEVI(dip)->devi_lock)));
7096 
7097 	for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) {
7098 		if (((dmdp->type == DDM_MINOR) ||
7099 		    (dmdp->type == DDM_INTERNAL_PATH) ||
7100 		    (dmdp->type == DDM_DEFAULT)) &&
7101 		    (dmdp->ddm_dev == dev) &&
7102 		    ((((spec_type & (S_IFCHR|S_IFBLK))) == 0) ||
7103 		    (dmdp->ddm_spec_type == spec_type)))
7104 			return (dmdp->ddm_name);
7105 	}
7106 
7107 	return (NULL);
7108 }
7109 
7110 /*
7111  * Find the devt and spectype of the specified minor_name.
7112  * Return DDI_FAILURE if minor_name not found. Since we are
7113  * returning everything via arguments we can do the locking.
7114  */
7115 int
7116 i_ddi_minorname_to_devtspectype(dev_info_t *dip, char *minor_name,
7117 	dev_t *devtp, int *spectypep)
7118 {
7119 	struct ddi_minor_data	*dmdp;
7120 
7121 	/* deal with clone minor nodes */
7122 	if (dip == clone_dip) {
7123 		major_t	major;
7124 		/*
7125 		 * Make sure minor_name is a STREAMS driver.
7126 		 * We load the driver but don't attach to any instances.
7127 		 */
7128 
7129 		major = ddi_name_to_major(minor_name);
7130 		if (major == (major_t)-1)
7131 			return (DDI_FAILURE);
7132 
7133 		if (ddi_hold_driver(major) == NULL)
7134 			return (DDI_FAILURE);
7135 
7136 		if (STREAMSTAB(major) == NULL) {
7137 			ddi_rele_driver(major);
7138 			return (DDI_FAILURE);
7139 		}
7140 		ddi_rele_driver(major);
7141 
7142 		if (devtp)
7143 			*devtp = makedevice(clone_major, (minor_t)major);
7144 
7145 		if (spectypep)
7146 			*spectypep = S_IFCHR;
7147 
7148 		return (DDI_SUCCESS);
7149 	}
7150 
7151 	ASSERT(!MUTEX_HELD(&(DEVI(dip)->devi_lock)));
7152 	mutex_enter(&(DEVI(dip)->devi_lock));
7153 
7154 	for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) {
7155 		if (((dmdp->type != DDM_MINOR) &&
7156 		    (dmdp->type != DDM_INTERNAL_PATH) &&
7157 		    (dmdp->type != DDM_DEFAULT)) ||
7158 		    strcmp(minor_name, dmdp->ddm_name))
7159 			continue;
7160 
7161 		if (devtp)
7162 			*devtp = dmdp->ddm_dev;
7163 
7164 		if (spectypep)
7165 			*spectypep = dmdp->ddm_spec_type;
7166 
7167 		mutex_exit(&(DEVI(dip)->devi_lock));
7168 		return (DDI_SUCCESS);
7169 	}
7170 
7171 	mutex_exit(&(DEVI(dip)->devi_lock));
7172 	return (DDI_FAILURE);
7173 }
7174 
7175 extern char	hw_serial[];
7176 static kmutex_t devid_gen_mutex;
7177 static short	devid_gen_number;
7178 
7179 #ifdef DEBUG
7180 
7181 static int	devid_register_corrupt = 0;
7182 static int	devid_register_corrupt_major = 0;
7183 static int	devid_register_corrupt_hint = 0;
7184 static int	devid_register_corrupt_hint_major = 0;
7185 
7186 static int devid_lyr_debug = 0;
7187 
7188 #define	DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs)		\
7189 	if (devid_lyr_debug)					\
7190 		ddi_debug_devid_devts(msg, ndevs, devs)
7191 
7192 #else
7193 
7194 #define	DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs)
7195 
7196 #endif /* DEBUG */
7197 
7198 
7199 #ifdef	DEBUG
7200 
7201 static void
7202 ddi_debug_devid_devts(char *msg, int ndevs, dev_t *devs)
7203 {
7204 	int i;
7205 
7206 	cmn_err(CE_CONT, "%s:\n", msg);
7207 	for (i = 0; i < ndevs; i++) {
7208 		cmn_err(CE_CONT, "    0x%lx\n", devs[i]);
7209 	}
7210 }
7211 
7212 static void
7213 ddi_debug_devid_paths(char *msg, int npaths, char **paths)
7214 {
7215 	int i;
7216 
7217 	cmn_err(CE_CONT, "%s:\n", msg);
7218 	for (i = 0; i < npaths; i++) {
7219 		cmn_err(CE_CONT, "    %s\n", paths[i]);
7220 	}
7221 }
7222 
7223 static void
7224 ddi_debug_devid_devts_per_path(char *path, int ndevs, dev_t *devs)
7225 {
7226 	int i;
7227 
7228 	cmn_err(CE_CONT, "dev_ts per path %s\n", path);
7229 	for (i = 0; i < ndevs; i++) {
7230 		cmn_err(CE_CONT, "    0x%lx\n", devs[i]);
7231 	}
7232 }
7233 
7234 #endif	/* DEBUG */
7235 
7236 /*
7237  * Register device id into DDI framework.
7238  * Must be called when device is attached.
7239  */
7240 static int
7241 i_ddi_devid_register(dev_info_t *dip, ddi_devid_t devid)
7242 {
7243 	impl_devid_t	*i_devid = (impl_devid_t *)devid;
7244 	size_t		driver_len;
7245 	const char	*driver_name;
7246 	char		*devid_str;
7247 	major_t		major;
7248 
7249 	if ((dip == NULL) ||
7250 	    ((major = ddi_driver_major(dip)) == (major_t)-1))
7251 		return (DDI_FAILURE);
7252 
7253 	/* verify that the devid is valid */
7254 	if (ddi_devid_valid(devid) != DDI_SUCCESS)
7255 		return (DDI_FAILURE);
7256 
7257 	/* Updating driver name hint in devid */
7258 	driver_name = ddi_driver_name(dip);
7259 	driver_len = strlen(driver_name);
7260 	if (driver_len > DEVID_HINT_SIZE) {
7261 		/* Pick up last four characters of driver name */
7262 		driver_name += driver_len - DEVID_HINT_SIZE;
7263 		driver_len = DEVID_HINT_SIZE;
7264 	}
7265 	bzero(i_devid->did_driver, DEVID_HINT_SIZE);
7266 	bcopy(driver_name, i_devid->did_driver, driver_len);
7267 
7268 #ifdef DEBUG
7269 	/* Corrupt the devid for testing. */
7270 	if (devid_register_corrupt)
7271 		i_devid->did_id[0] += devid_register_corrupt;
7272 	if (devid_register_corrupt_major &&
7273 	    (major == devid_register_corrupt_major))
7274 		i_devid->did_id[0] += 1;
7275 	if (devid_register_corrupt_hint)
7276 		i_devid->did_driver[0] += devid_register_corrupt_hint;
7277 	if (devid_register_corrupt_hint_major &&
7278 	    (major == devid_register_corrupt_hint_major))
7279 		i_devid->did_driver[0] += 1;
7280 #endif /* DEBUG */
7281 
7282 	/* encode the devid as a string */
7283 	if ((devid_str = ddi_devid_str_encode(devid, NULL)) == NULL)
7284 		return (DDI_FAILURE);
7285 
7286 	/* add string as a string property */
7287 	if (ndi_prop_update_string(DDI_DEV_T_NONE, dip,
7288 	    DEVID_PROP_NAME, devid_str) != DDI_SUCCESS) {
7289 		cmn_err(CE_WARN, "%s%d: devid property update failed",
7290 			ddi_driver_name(dip), ddi_get_instance(dip));
7291 		ddi_devid_str_free(devid_str);
7292 		return (DDI_FAILURE);
7293 	}
7294 
7295 	ddi_devid_str_free(devid_str);
7296 
7297 #ifdef	DEVID_COMPATIBILITY
7298 	/*
7299 	 * marker for devinfo snapshot compatibility.
7300 	 * This code gets deleted when di_devid is gone from libdevid
7301 	 */
7302 	DEVI(dip)->devi_devid = DEVID_COMPATIBILITY;
7303 #endif	/* DEVID_COMPATIBILITY */
7304 	return (DDI_SUCCESS);
7305 }
7306 
7307 int
7308 ddi_devid_register(dev_info_t *dip, ddi_devid_t devid)
7309 {
7310 	int rval;
7311 
7312 	rval = i_ddi_devid_register(dip, devid);
7313 	if (rval == DDI_SUCCESS) {
7314 		/*
7315 		 * Register devid in devid-to-path cache
7316 		 */
7317 		if (e_devid_cache_register(dip, devid) == DDI_SUCCESS) {
7318 			mutex_enter(&DEVI(dip)->devi_lock);
7319 			DEVI(dip)->devi_flags |= DEVI_REGISTERED_DEVID;
7320 			mutex_exit(&DEVI(dip)->devi_lock);
7321 		} else {
7322 			cmn_err(CE_WARN, "%s%d: failed to cache devid",
7323 				ddi_driver_name(dip), ddi_get_instance(dip));
7324 		}
7325 	} else {
7326 		cmn_err(CE_WARN, "%s%d: failed to register devid",
7327 			ddi_driver_name(dip), ddi_get_instance(dip));
7328 	}
7329 	return (rval);
7330 }
7331 
7332 /*
7333  * Remove (unregister) device id from DDI framework.
7334  * Must be called when device is detached.
7335  */
7336 static void
7337 i_ddi_devid_unregister(dev_info_t *dip)
7338 {
7339 #ifdef	DEVID_COMPATIBILITY
7340 	/*
7341 	 * marker for micro release devinfo snapshot compatibility.
7342 	 * This code gets deleted for the minor release.
7343 	 */
7344 	DEVI(dip)->devi_devid = NULL;		/* unset DEVID_PROP */
7345 #endif	/* DEVID_COMPATIBILITY */
7346 
7347 	/* remove the devid property */
7348 	(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, DEVID_PROP_NAME);
7349 }
7350 
7351 void
7352 ddi_devid_unregister(dev_info_t *dip)
7353 {
7354 	mutex_enter(&DEVI(dip)->devi_lock);
7355 	DEVI(dip)->devi_flags &= ~DEVI_REGISTERED_DEVID;
7356 	mutex_exit(&DEVI(dip)->devi_lock);
7357 	e_devid_cache_unregister(dip);
7358 	i_ddi_devid_unregister(dip);
7359 }
7360 
7361 /*
7362  * Allocate and initialize a device id.
7363  */
7364 int
7365 ddi_devid_init(
7366 	dev_info_t	*dip,
7367 	ushort_t	devid_type,
7368 	ushort_t	nbytes,
7369 	void		*id,
7370 	ddi_devid_t	*ret_devid)
7371 {
7372 	impl_devid_t	*i_devid;
7373 	int		sz = sizeof (*i_devid) + nbytes - sizeof (char);
7374 	int		driver_len;
7375 	const char	*driver_name;
7376 
7377 	switch (devid_type) {
7378 	case DEVID_SCSI3_WWN:
7379 		/*FALLTHRU*/
7380 	case DEVID_SCSI_SERIAL:
7381 		/*FALLTHRU*/
7382 	case DEVID_ATA_SERIAL:
7383 		/*FALLTHRU*/
7384 	case DEVID_ENCAP:
7385 		if (nbytes == 0)
7386 			return (DDI_FAILURE);
7387 		if (id == NULL)
7388 			return (DDI_FAILURE);
7389 		break;
7390 	case DEVID_FAB:
7391 		if (nbytes != 0)
7392 			return (DDI_FAILURE);
7393 		if (id != NULL)
7394 			return (DDI_FAILURE);
7395 		nbytes = sizeof (int) +
7396 		    sizeof (struct timeval32) + sizeof (short);
7397 		sz += nbytes;
7398 		break;
7399 	default:
7400 		return (DDI_FAILURE);
7401 	}
7402 
7403 	if ((i_devid = kmem_zalloc(sz, KM_SLEEP)) == NULL)
7404 		return (DDI_FAILURE);
7405 
7406 	i_devid->did_magic_hi = DEVID_MAGIC_MSB;
7407 	i_devid->did_magic_lo = DEVID_MAGIC_LSB;
7408 	i_devid->did_rev_hi = DEVID_REV_MSB;
7409 	i_devid->did_rev_lo = DEVID_REV_LSB;
7410 	DEVID_FORMTYPE(i_devid, devid_type);
7411 	DEVID_FORMLEN(i_devid, nbytes);
7412 
7413 	/* Fill in driver name hint */
7414 	driver_name = ddi_driver_name(dip);
7415 	driver_len = strlen(driver_name);
7416 	if (driver_len > DEVID_HINT_SIZE) {
7417 		/* Pick up last four characters of driver name */
7418 		driver_name += driver_len - DEVID_HINT_SIZE;
7419 		driver_len = DEVID_HINT_SIZE;
7420 	}
7421 
7422 	bcopy(driver_name, i_devid->did_driver, driver_len);
7423 
7424 	/* Fill in id field */
7425 	if (devid_type == DEVID_FAB) {
7426 		char		*cp;
7427 		int		hostid;
7428 		char		*hostid_cp = &hw_serial[0];
7429 		struct timeval32 timestamp32;
7430 		int		i;
7431 		int		*ip;
7432 		short		gen;
7433 
7434 		/* increase the generation number */
7435 		mutex_enter(&devid_gen_mutex);
7436 		gen = devid_gen_number++;
7437 		mutex_exit(&devid_gen_mutex);
7438 
7439 		cp = i_devid->did_id;
7440 
7441 		/* Fill in host id (big-endian byte ordering) */
7442 		hostid = stoi(&hostid_cp);
7443 		*cp++ = hibyte(hiword(hostid));
7444 		*cp++ = lobyte(hiword(hostid));
7445 		*cp++ = hibyte(loword(hostid));
7446 		*cp++ = lobyte(loword(hostid));
7447 
7448 		/*
7449 		 * Fill in timestamp (big-endian byte ordering)
7450 		 *
7451 		 * (Note that the format may have to be changed
7452 		 * before 2038 comes around, though it's arguably
7453 		 * unique enough as it is..)
7454 		 */
7455 		uniqtime32(&timestamp32);
7456 		ip = (int *)&timestamp32;
7457 		for (i = 0;
7458 		    i < sizeof (timestamp32) / sizeof (int); i++, ip++) {
7459 			int	val;
7460 			val = *ip;
7461 			*cp++ = hibyte(hiword(val));
7462 			*cp++ = lobyte(hiword(val));
7463 			*cp++ = hibyte(loword(val));
7464 			*cp++ = lobyte(loword(val));
7465 		}
7466 
7467 		/* fill in the generation number */
7468 		*cp++ = hibyte(gen);
7469 		*cp++ = lobyte(gen);
7470 	} else
7471 		bcopy(id, i_devid->did_id, nbytes);
7472 
7473 	/* return device id */
7474 	*ret_devid = (ddi_devid_t)i_devid;
7475 	return (DDI_SUCCESS);
7476 }
7477 
7478 int
7479 i_ddi_devi_get_devid(dev_t dev, dev_info_t *dip, ddi_devid_t *ret_devid)
7480 {
7481 	char		*devidstr;
7482 
7483 	ASSERT(dev != DDI_DEV_T_NONE);
7484 
7485 	/* look up the property, devt specific first */
7486 	if (ddi_prop_lookup_string(dev, dip, DDI_PROP_DONTPASS,
7487 	    DEVID_PROP_NAME, &devidstr) != DDI_PROP_SUCCESS) {
7488 		if ((dev == DDI_DEV_T_ANY) ||
7489 		    (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
7490 			DDI_PROP_DONTPASS, DEVID_PROP_NAME, &devidstr) !=
7491 			DDI_PROP_SUCCESS)) {
7492 				return (DDI_FAILURE);
7493 		}
7494 	}
7495 
7496 	/* convert to binary form */
7497 	if (ddi_devid_str_decode(devidstr, ret_devid, NULL) == -1) {
7498 		ddi_prop_free(devidstr);
7499 		return (DDI_FAILURE);
7500 	}
7501 	ddi_prop_free(devidstr);
7502 	return (DDI_SUCCESS);
7503 }
7504 
7505 /*
7506  * Return a copy of the device id for dev_t
7507  */
7508 int
7509 ddi_lyr_get_devid(dev_t dev, ddi_devid_t *ret_devid)
7510 {
7511 	dev_info_t	*dip;
7512 	int		rval;
7513 
7514 	/* get the dip */
7515 	if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
7516 		return (DDI_FAILURE);
7517 
7518 	rval = i_ddi_devi_get_devid(dev, dip, ret_devid);
7519 
7520 	ddi_release_devi(dip);		/* e_ddi_hold_devi_by_dev() */
7521 	return (rval);
7522 }
7523 
7524 /*
7525  * Return a copy of the minor name for dev_t and spec_type
7526  */
7527 int
7528 ddi_lyr_get_minor_name(dev_t dev, int spec_type, char **minor_name)
7529 {
7530 	dev_info_t	*dip;
7531 	char		*nm;
7532 	size_t		alloc_sz, sz;
7533 
7534 	if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
7535 		return (DDI_FAILURE);
7536 
7537 	mutex_enter(&(DEVI(dip)->devi_lock));
7538 
7539 	if ((nm = i_ddi_devtspectype_to_minorname(dip,
7540 	    dev, spec_type)) == NULL) {
7541 		mutex_exit(&(DEVI(dip)->devi_lock));
7542 		ddi_release_devi(dip);	/* e_ddi_hold_devi_by_dev() */
7543 		return (DDI_FAILURE);
7544 	}
7545 
7546 	/* make a copy */
7547 	alloc_sz = strlen(nm) + 1;
7548 retry:
7549 	/* drop lock to allocate memory */
7550 	mutex_exit(&(DEVI(dip)->devi_lock));
7551 	*minor_name = kmem_alloc(alloc_sz, KM_SLEEP);
7552 	mutex_enter(&(DEVI(dip)->devi_lock));
7553 
7554 	/* re-check things, since we dropped the lock */
7555 	if ((nm = i_ddi_devtspectype_to_minorname(dip,
7556 	    dev, spec_type)) == NULL) {
7557 		mutex_exit(&(DEVI(dip)->devi_lock));
7558 		kmem_free(*minor_name, alloc_sz);
7559 		*minor_name = NULL;
7560 		ddi_release_devi(dip);	/* e_ddi_hold_devi_by_dev() */
7561 		return (DDI_FAILURE);
7562 	}
7563 
7564 	/* verify size is the same */
7565 	sz = strlen(nm) + 1;
7566 	if (alloc_sz != sz) {
7567 		kmem_free(*minor_name, alloc_sz);
7568 		alloc_sz = sz;
7569 		goto retry;
7570 	}
7571 
7572 	/* sz == alloc_sz - make a copy */
7573 	(void) strcpy(*minor_name, nm);
7574 
7575 	mutex_exit(&(DEVI(dip)->devi_lock));
7576 	ddi_release_devi(dip);	/* e_ddi_hold_devi_by_dev() */
7577 	return (DDI_SUCCESS);
7578 }
7579 
7580 int
7581 ddi_lyr_devid_to_devlist(
7582 	ddi_devid_t	devid,
7583 	char		*minor_name,
7584 	int		*retndevs,
7585 	dev_t		**retdevs)
7586 {
7587 	ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS);
7588 
7589 	if (e_devid_cache_to_devt_list(devid, minor_name,
7590 	    retndevs, retdevs) == DDI_SUCCESS) {
7591 		ASSERT(*retndevs > 0);
7592 		DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist",
7593 			*retndevs, *retdevs);
7594 		return (DDI_SUCCESS);
7595 	}
7596 
7597 	if (e_ddi_devid_discovery(devid) == DDI_FAILURE) {
7598 		return (DDI_FAILURE);
7599 	}
7600 
7601 	if (e_devid_cache_to_devt_list(devid, minor_name,
7602 	    retndevs, retdevs) == DDI_SUCCESS) {
7603 		ASSERT(*retndevs > 0);
7604 		DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist",
7605 			*retndevs, *retdevs);
7606 		return (DDI_SUCCESS);
7607 	}
7608 
7609 	return (DDI_FAILURE);
7610 }
7611 
7612 void
7613 ddi_lyr_free_devlist(dev_t *devlist, int ndevs)
7614 {
7615 	kmem_free(devlist, sizeof (dev_t) * ndevs);
7616 }
7617 
7618 /*
7619  * Note: This will need to be fixed if we ever allow processes to
7620  * have more than one data model per exec.
7621  */
7622 model_t
7623 ddi_mmap_get_model(void)
7624 {
7625 	return (get_udatamodel());
7626 }
7627 
7628 model_t
7629 ddi_model_convert_from(model_t model)
7630 {
7631 	return ((model & DDI_MODEL_MASK) & ~DDI_MODEL_NATIVE);
7632 }
7633 
7634 /*
7635  * ddi interfaces managing storage and retrieval of eventcookies.
7636  */
7637 
7638 /*
7639  * Invoke bus nexus driver's implementation of the
7640  * (*bus_remove_eventcall)() interface to remove a registered
7641  * callback handler for "event".
7642  */
7643 int
7644 ddi_remove_event_handler(ddi_callback_id_t id)
7645 {
7646 	ndi_event_callbacks_t *cb = (ndi_event_callbacks_t *)id;
7647 	dev_info_t *ddip;
7648 
7649 	ASSERT(cb);
7650 	if (!cb) {
7651 		return (DDI_FAILURE);
7652 	}
7653 
7654 	ddip = NDI_EVENT_DDIP(cb->ndi_evtcb_cookie);
7655 	return (ndi_busop_remove_eventcall(ddip, id));
7656 }
7657 
7658 /*
7659  * Invoke bus nexus driver's implementation of the
7660  * (*bus_add_eventcall)() interface to register a callback handler
7661  * for "event".
7662  */
7663 int
7664 ddi_add_event_handler(dev_info_t *dip, ddi_eventcookie_t event,
7665     void (*handler)(dev_info_t *, ddi_eventcookie_t, void *, void *),
7666     void *arg, ddi_callback_id_t *id)
7667 {
7668 	return (ndi_busop_add_eventcall(dip, dip, event, handler, arg, id));
7669 }
7670 
7671 
7672 /*
7673  * Return a handle for event "name" by calling up the device tree
7674  * hierarchy via  (*bus_get_eventcookie)() interface until claimed
7675  * by a bus nexus or top of dev_info tree is reached.
7676  */
7677 int
7678 ddi_get_eventcookie(dev_info_t *dip, char *name,
7679     ddi_eventcookie_t *event_cookiep)
7680 {
7681 	return (ndi_busop_get_eventcookie(dip, dip,
7682 	    name, event_cookiep));
7683 }
7684 
7685 /*
7686  * single thread access to dev_info node and set state
7687  */
7688 void
7689 i_devi_enter(dev_info_t *dip, uint_t s_mask, uint_t w_mask, int has_lock)
7690 {
7691 	if (!has_lock)
7692 		mutex_enter(&(DEVI(dip)->devi_lock));
7693 
7694 	ASSERT(mutex_owned(&(DEVI(dip)->devi_lock)));
7695 
7696 	/*
7697 	 * wait until state(s) have been changed
7698 	 */
7699 	while ((DEVI(dip)->devi_state & w_mask) != 0) {
7700 		cv_wait(&(DEVI(dip)->devi_cv), &(DEVI(dip)->devi_lock));
7701 	}
7702 	DEVI(dip)->devi_state |= s_mask;
7703 
7704 	if (!has_lock)
7705 		mutex_exit(&(DEVI(dip)->devi_lock));
7706 }
7707 
7708 void
7709 i_devi_exit(dev_info_t *dip, uint_t c_mask, int has_lock)
7710 {
7711 	if (!has_lock)
7712 		mutex_enter(&(DEVI(dip)->devi_lock));
7713 
7714 	ASSERT(mutex_owned(&(DEVI(dip)->devi_lock)));
7715 
7716 	/*
7717 	 * clear the state(s) and wakeup any threads waiting
7718 	 * for state change
7719 	 */
7720 	DEVI(dip)->devi_state &= ~c_mask;
7721 	cv_broadcast(&(DEVI(dip)->devi_cv));
7722 
7723 	if (!has_lock)
7724 		mutex_exit(&(DEVI(dip)->devi_lock));
7725 }
7726 
7727 /*
7728  * This procedure is provided as the general callback function when
7729  * umem_lockmemory calls as_add_callback for long term memory locking.
7730  * When as_unmap, as_setprot, or as_free encounter segments which have
7731  * locked memory, this callback will be invoked.
7732  */
7733 void
7734 umem_lock_undo(struct as *as, void *arg, uint_t event)
7735 {
7736 	_NOTE(ARGUNUSED(as, event))
7737 	struct ddi_umem_cookie *cp = (struct ddi_umem_cookie *)arg;
7738 
7739 	/*
7740 	 * Call the cleanup function.  Decrement the cookie reference
7741 	 * count, if it goes to zero, return the memory for the cookie.
7742 	 * The i_ddi_umem_unlock for this cookie may or may not have been
7743 	 * called already.  It is the responsibility of the caller of
7744 	 * umem_lockmemory to handle the case of the cleanup routine
7745 	 * being called after a ddi_umem_unlock for the cookie
7746 	 * was called.
7747 	 */
7748 
7749 	(*cp->callbacks.cbo_umem_lock_cleanup)((ddi_umem_cookie_t)cp);
7750 
7751 	/* remove the cookie if reference goes to zero */
7752 	if (atomic_add_long_nv((ulong_t *)(&(cp->cook_refcnt)), -1) == 0) {
7753 		kmem_free(cp, sizeof (struct ddi_umem_cookie));
7754 	}
7755 }
7756 
7757 /*
7758  * The following two Consolidation Private routines provide generic
7759  * interfaces to increase/decrease the amount of device-locked memory.
7760  *
7761  * To keep project_rele and project_hold consistent, i_ddi_decr_locked_memory()
7762  * must be called every time i_ddi_incr_locked_memory() is called.
7763  */
7764 int
7765 /* ARGSUSED */
7766 i_ddi_incr_locked_memory(proc_t *procp, task_t *taskp,
7767     kproject_t *projectp, zone_t *zonep, rctl_qty_t inc)
7768 {
7769 	kproject_t *projp;
7770 
7771 	ASSERT(procp);
7772 	ASSERT(mutex_owned(&procp->p_lock));
7773 
7774 	projp = procp->p_task->tk_proj;
7775 	mutex_enter(&umem_devlockmem_rctl_lock);
7776 	/*
7777 	 * Test if the requested memory can be locked without exceeding the
7778 	 * limits.
7779 	 */
7780 	if (rctl_test(rc_project_devlockmem, projp->kpj_rctls,
7781 	    procp, inc, RCA_SAFE) & RCT_DENY) {
7782 		mutex_exit(&umem_devlockmem_rctl_lock);
7783 		return (ENOMEM);
7784 	}
7785 	projp->kpj_data.kpd_devlockmem += inc;
7786 	mutex_exit(&umem_devlockmem_rctl_lock);
7787 	/*
7788 	 * Grab a hold on the project.
7789 	 */
7790 	(void) project_hold(projp);
7791 
7792 	return (0);
7793 }
7794 
7795 /*
7796  * To keep project_rele and project_hold consistent, i_ddi_incr_locked_memory()
7797  * must be called every time i_ddi_decr_locked_memory() is called.
7798  */
7799 /* ARGSUSED */
7800 void
7801 i_ddi_decr_locked_memory(proc_t *procp, task_t *taskp,
7802     kproject_t *projectp, zone_t *zonep, rctl_qty_t dec)
7803 {
7804 	ASSERT(projectp);
7805 
7806 	mutex_enter(&umem_devlockmem_rctl_lock);
7807 	projectp->kpj_data.kpd_devlockmem -= dec;
7808 	mutex_exit(&umem_devlockmem_rctl_lock);
7809 
7810 	/*
7811 	 * Release the project pointer reference accquired in
7812 	 * i_ddi_incr_locked_memory().
7813 	 */
7814 	(void) project_rele(projectp);
7815 }
7816 
7817 /*
7818  * This routine checks if the max-device-locked-memory resource ctl is
7819  * exceeded, if not increments it, grabs a hold on the project.
7820  * Returns 0 if successful otherwise returns error code
7821  */
7822 static int
7823 umem_incr_devlockmem(struct ddi_umem_cookie *cookie)
7824 {
7825 	proc_t		*procp;
7826 	int		ret;
7827 
7828 	ASSERT(cookie);
7829 	procp = cookie->procp;
7830 	ASSERT(procp);
7831 
7832 	mutex_enter(&procp->p_lock);
7833 
7834 	if ((ret = i_ddi_incr_locked_memory(procp, NULL,
7835 		NULL, NULL, cookie->size)) != 0) {
7836 		mutex_exit(&procp->p_lock);
7837 		return (ret);
7838 	}
7839 
7840 	/*
7841 	 * save the project pointer in the
7842 	 * umem cookie, project pointer already
7843 	 * hold in i_ddi_incr_locked_memory
7844 	 */
7845 	cookie->lockmem_proj = (void *)procp->p_task->tk_proj;
7846 	mutex_exit(&procp->p_lock);
7847 
7848 	return (0);
7849 }
7850 
7851 /*
7852  * Decrements the max-device-locked-memory resource ctl and releases
7853  * the hold on the project that was acquired during umem_incr_devlockmem
7854  */
7855 static void
7856 umem_decr_devlockmem(struct ddi_umem_cookie *cookie)
7857 {
7858 	kproject_t	*projp;
7859 
7860 	if (!cookie->lockmem_proj)
7861 		return;
7862 
7863 	projp = (kproject_t *)cookie->lockmem_proj;
7864 	i_ddi_decr_locked_memory(NULL, NULL, projp, NULL, cookie->size);
7865 
7866 	cookie->lockmem_proj = NULL;
7867 }
7868 
7869 /*
7870  * A consolidation private function which is essentially equivalent to
7871  * ddi_umem_lock but with the addition of arguments ops_vector and procp.
7872  * A call to as_add_callback is done if DDI_UMEMLOCK_LONGTERM is set, and
7873  * the ops_vector is valid.
7874  *
7875  * Lock the virtual address range in the current process and create a
7876  * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to
7877  * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export
7878  * to user space.
7879  *
7880  * Note: The resource control accounting currently uses a full charge model
7881  * in other words attempts to lock the same/overlapping areas of memory
7882  * will deduct the full size of the buffer from the projects running
7883  * counter for the device locked memory.
7884  *
7885  * addr, size should be PAGESIZE aligned
7886  *
7887  * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both
7888  *	identifies whether the locked memory will be read or written or both
7889  *      DDI_UMEMLOCK_LONGTERM  must be set when the locking will
7890  * be maintained for an indefinitely long period (essentially permanent),
7891  * rather than for what would be required for a typical I/O completion.
7892  * When DDI_UMEMLOCK_LONGTERM is set, umem_lockmemory will return EFAULT
7893  * if the memory pertains to a regular file which is mapped MAP_SHARED.
7894  * This is to prevent a deadlock if a file truncation is attempted after
7895  * after the locking is done.
7896  *
7897  * Returns 0 on success
7898  *	EINVAL - for invalid parameters
7899  *	EPERM, ENOMEM and other error codes returned by as_pagelock
7900  *	ENOMEM - is returned if the current request to lock memory exceeds
7901  *		project.max-device-locked-memory resource control value.
7902  *      EFAULT - memory pertains to a regular file mapped shared and
7903  *		and DDI_UMEMLOCK_LONGTERM flag is set
7904  *	EAGAIN - could not start the ddi_umem_unlock list processing thread
7905  */
7906 int
7907 umem_lockmemory(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie,
7908 		struct umem_callback_ops *ops_vector,
7909 		proc_t *procp)
7910 {
7911 	int	error;
7912 	struct ddi_umem_cookie *p;
7913 	void	(*driver_callback)() = NULL;
7914 	struct as *as = procp->p_as;
7915 	struct seg		*seg;
7916 	vnode_t			*vp;
7917 
7918 	*cookie = NULL;		/* in case of any error return */
7919 
7920 	/* These are the only three valid flags */
7921 	if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE |
7922 	    DDI_UMEMLOCK_LONGTERM)) != 0)
7923 		return (EINVAL);
7924 
7925 	/* At least one (can be both) of the two access flags must be set */
7926 	if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0)
7927 		return (EINVAL);
7928 
7929 	/* addr and len must be page-aligned */
7930 	if (((uintptr_t)addr & PAGEOFFSET) != 0)
7931 		return (EINVAL);
7932 
7933 	if ((len & PAGEOFFSET) != 0)
7934 		return (EINVAL);
7935 
7936 	/*
7937 	 * For longterm locking a driver callback must be specified; if
7938 	 * not longterm then a callback is optional.
7939 	 */
7940 	if (ops_vector != NULL) {
7941 		if (ops_vector->cbo_umem_callback_version !=
7942 		    UMEM_CALLBACK_VERSION)
7943 			return (EINVAL);
7944 		else
7945 			driver_callback = ops_vector->cbo_umem_lock_cleanup;
7946 	}
7947 	if ((driver_callback == NULL) && (flags & DDI_UMEMLOCK_LONGTERM))
7948 		return (EINVAL);
7949 
7950 	/*
7951 	 * Call i_ddi_umem_unlock_thread_start if necessary.  It will
7952 	 * be called on first ddi_umem_lock or umem_lockmemory call.
7953 	 */
7954 	if (ddi_umem_unlock_thread == NULL)
7955 		i_ddi_umem_unlock_thread_start();
7956 
7957 	/* Allocate memory for the cookie */
7958 	p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP);
7959 
7960 	/* Convert the flags to seg_rw type */
7961 	if (flags & DDI_UMEMLOCK_WRITE) {
7962 		p->s_flags = S_WRITE;
7963 	} else {
7964 		p->s_flags = S_READ;
7965 	}
7966 
7967 	/* Store procp in cookie for later iosetup/unlock */
7968 	p->procp = (void *)procp;
7969 
7970 	/*
7971 	 * Store the struct as pointer in cookie for later use by
7972 	 * ddi_umem_unlock.  The proc->p_as will be stale if ddi_umem_unlock
7973 	 * is called after relvm is called.
7974 	 */
7975 	p->asp = as;
7976 
7977 	/*
7978 	 * The size field is needed for lockmem accounting.
7979 	 */
7980 	p->size = len;
7981 
7982 	if (umem_incr_devlockmem(p) != 0) {
7983 		/*
7984 		 * The requested memory cannot be locked
7985 		 */
7986 		kmem_free(p, sizeof (struct ddi_umem_cookie));
7987 		*cookie = (ddi_umem_cookie_t)NULL;
7988 		return (ENOMEM);
7989 	}
7990 	/*
7991 	 * umem_incr_devlockmem stashes the project ptr into the
7992 	 * cookie. This is needed during unlock since that can
7993 	 * happen in a non-USER context
7994 	 */
7995 	ASSERT(p->lockmem_proj);
7996 
7997 	/* Lock the pages corresponding to addr, len in memory */
7998 	error = as_pagelock(as, &(p->pparray), addr, len, p->s_flags);
7999 	if (error != 0) {
8000 		umem_decr_devlockmem(p);
8001 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8002 		*cookie = (ddi_umem_cookie_t)NULL;
8003 		return (error);
8004 	}
8005 
8006 	/*
8007 	 * For longterm locking the addr must pertain to a seg_vn segment or
8008 	 * or a seg_spt segment.
8009 	 * If the segment pertains to a regular file, it cannot be
8010 	 * mapped MAP_SHARED.
8011 	 * This is to prevent a deadlock if a file truncation is attempted
8012 	 * after the locking is done.
8013 	 * Doing this after as_pagelock guarantees persistence of the as; if
8014 	 * an unacceptable segment is found, the cleanup includes calling
8015 	 * as_pageunlock before returning EFAULT.
8016 	 */
8017 	if (flags & DDI_UMEMLOCK_LONGTERM) {
8018 		extern  struct seg_ops segspt_shmops;
8019 		AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
8020 		for (seg = as_segat(as, addr); ; seg = AS_SEGNEXT(as, seg)) {
8021 			if (seg == NULL || seg->s_base > addr + len)
8022 				break;
8023 			if (((seg->s_ops != &segvn_ops) &&
8024 			    (seg->s_ops != &segspt_shmops)) ||
8025 			    ((SEGOP_GETVP(seg, addr, &vp) == 0 &&
8026 			    vp != NULL && vp->v_type == VREG) &&
8027 			    (SEGOP_GETTYPE(seg, addr) & MAP_SHARED))) {
8028 				as_pageunlock(as, p->pparray,
8029 						addr, len, p->s_flags);
8030 				AS_LOCK_EXIT(as, &as->a_lock);
8031 				umem_decr_devlockmem(p);
8032 				kmem_free(p, sizeof (struct ddi_umem_cookie));
8033 				*cookie = (ddi_umem_cookie_t)NULL;
8034 				return (EFAULT);
8035 			}
8036 		}
8037 		AS_LOCK_EXIT(as, &as->a_lock);
8038 	}
8039 
8040 
8041 	/* Initialize the fields in the ddi_umem_cookie */
8042 	p->cvaddr = addr;
8043 	p->type = UMEM_LOCKED;
8044 	if (driver_callback != NULL) {
8045 		/* i_ddi_umem_unlock and umem_lock_undo may need the cookie */
8046 		p->cook_refcnt = 2;
8047 		p->callbacks = *ops_vector;
8048 	} else {
8049 		/* only i_ddi_umme_unlock needs the cookie */
8050 		p->cook_refcnt = 1;
8051 	}
8052 
8053 	*cookie = (ddi_umem_cookie_t)p;
8054 
8055 	/*
8056 	 * If a driver callback was specified, add an entry to the
8057 	 * as struct callback list. The as_pagelock above guarantees
8058 	 * the persistence of as.
8059 	 */
8060 	if (driver_callback) {
8061 		error = as_add_callback(as, umem_lock_undo, p, AS_ALL_EVENT,
8062 						addr, len, KM_SLEEP);
8063 		if (error != 0) {
8064 			as_pageunlock(as, p->pparray,
8065 					addr, len, p->s_flags);
8066 			umem_decr_devlockmem(p);
8067 			kmem_free(p, sizeof (struct ddi_umem_cookie));
8068 			*cookie = (ddi_umem_cookie_t)NULL;
8069 		}
8070 	}
8071 	return (error);
8072 }
8073 
8074 /*
8075  * Unlock the pages locked by ddi_umem_lock or umem_lockmemory and free
8076  * the cookie.  Called from i_ddi_umem_unlock_thread.
8077  */
8078 
8079 static void
8080 i_ddi_umem_unlock(struct ddi_umem_cookie *p)
8081 {
8082 	uint_t	rc;
8083 
8084 	/*
8085 	 * There is no way to determine whether a callback to
8086 	 * umem_lock_undo was registered via as_add_callback.
8087 	 * (i.e. umem_lockmemory was called with DDI_MEMLOCK_LONGTERM and
8088 	 * a valid callback function structure.)  as_delete_callback
8089 	 * is called to delete a possible registered callback.  If the
8090 	 * return from as_delete_callbacks is AS_CALLBACK_DELETED, it
8091 	 * indicates that there was a callback registered, and that is was
8092 	 * successfully deleted.  Thus, the cookie reference count
8093 	 * will never be decremented by umem_lock_undo.  Just return the
8094 	 * memory for the cookie, since both users of the cookie are done.
8095 	 * A return of AS_CALLBACK_NOTFOUND indicates a callback was
8096 	 * never registered.  A return of AS_CALLBACK_DELETE_DEFERRED
8097 	 * indicates that callback processing is taking place and, and
8098 	 * umem_lock_undo is, or will be, executing, and thus decrementing
8099 	 * the cookie reference count when it is complete.
8100 	 *
8101 	 * This needs to be done before as_pageunlock so that the
8102 	 * persistence of as is guaranteed because of the locked pages.
8103 	 *
8104 	 */
8105 	rc = as_delete_callback(p->asp, p);
8106 
8107 
8108 	/*
8109 	 * The proc->p_as will be stale if i_ddi_umem_unlock is called
8110 	 * after relvm is called so use p->asp.
8111 	 */
8112 	as_pageunlock(p->asp, p->pparray, p->cvaddr, p->size, p->s_flags);
8113 
8114 	/*
8115 	 * Now that we have unlocked the memory decrement the
8116 	 * max-device-locked-memory rctl
8117 	 */
8118 	umem_decr_devlockmem(p);
8119 
8120 	if (rc == AS_CALLBACK_DELETED) {
8121 		/* umem_lock_undo will not happen, return the cookie memory */
8122 		ASSERT(p->cook_refcnt == 2);
8123 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8124 	} else {
8125 		/*
8126 		 * umem_undo_lock may happen if as_delete_callback returned
8127 		 * AS_CALLBACK_DELETE_DEFERRED.  In that case, decrement the
8128 		 * reference count, atomically, and return the cookie
8129 		 * memory if the reference count goes to zero.  The only
8130 		 * other value for rc is AS_CALLBACK_NOTFOUND.  In that
8131 		 * case, just return the cookie memory.
8132 		 */
8133 		if ((rc != AS_CALLBACK_DELETE_DEFERRED) ||
8134 		    (atomic_add_long_nv((ulong_t *)(&(p->cook_refcnt)), -1)
8135 		    == 0)) {
8136 			kmem_free(p, sizeof (struct ddi_umem_cookie));
8137 		}
8138 	}
8139 }
8140 
8141 /*
8142  * i_ddi_umem_unlock_thread - deferred ddi_umem_unlock list handler.
8143  *
8144  * Call i_ddi_umem_unlock for entries in the ddi_umem_unlock list
8145  * until it is empty.  Then, wait for more to be added.  This thread is awoken
8146  * via calls to ddi_umem_unlock.
8147  */
8148 
8149 static void
8150 i_ddi_umem_unlock_thread(void)
8151 {
8152 	struct ddi_umem_cookie	*ret_cookie;
8153 	callb_cpr_t	cprinfo;
8154 
8155 	/* process the ddi_umem_unlock list */
8156 	CALLB_CPR_INIT(&cprinfo, &ddi_umem_unlock_mutex,
8157 	    callb_generic_cpr, "unlock_thread");
8158 	for (;;) {
8159 		mutex_enter(&ddi_umem_unlock_mutex);
8160 		if (ddi_umem_unlock_head != NULL) {	/* list not empty */
8161 			ret_cookie = ddi_umem_unlock_head;
8162 			/* take if off the list */
8163 			if ((ddi_umem_unlock_head =
8164 			    ddi_umem_unlock_head->unl_forw) == NULL) {
8165 				ddi_umem_unlock_tail = NULL;
8166 			}
8167 			mutex_exit(&ddi_umem_unlock_mutex);
8168 			/* unlock the pages in this cookie */
8169 			(void) i_ddi_umem_unlock(ret_cookie);
8170 		} else {   /* list is empty, wait for next ddi_umem_unlock */
8171 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
8172 			cv_wait(&ddi_umem_unlock_cv, &ddi_umem_unlock_mutex);
8173 			CALLB_CPR_SAFE_END(&cprinfo, &ddi_umem_unlock_mutex);
8174 			mutex_exit(&ddi_umem_unlock_mutex);
8175 		}
8176 	}
8177 	/* ddi_umem_unlock_thread does not exit */
8178 	/* NOTREACHED */
8179 }
8180 
8181 /*
8182  * Start the thread that will process the ddi_umem_unlock list if it is
8183  * not already started (i_ddi_umem_unlock_thread).
8184  */
8185 static void
8186 i_ddi_umem_unlock_thread_start(void)
8187 {
8188 	mutex_enter(&ddi_umem_unlock_mutex);
8189 	if (ddi_umem_unlock_thread == NULL) {
8190 		ddi_umem_unlock_thread = thread_create(NULL, 0,
8191 		    i_ddi_umem_unlock_thread, NULL, 0, &p0,
8192 		    TS_RUN, minclsyspri);
8193 	}
8194 	mutex_exit(&ddi_umem_unlock_mutex);
8195 }
8196 
8197 /*
8198  * Lock the virtual address range in the current process and create a
8199  * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to
8200  * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export
8201  * to user space.
8202  *
8203  * Note: The resource control accounting currently uses a full charge model
8204  * in other words attempts to lock the same/overlapping areas of memory
8205  * will deduct the full size of the buffer from the projects running
8206  * counter for the device locked memory. This applies to umem_lockmemory too.
8207  *
8208  * addr, size should be PAGESIZE aligned
8209  * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both
8210  *	identifies whether the locked memory will be read or written or both
8211  *
8212  * Returns 0 on success
8213  *	EINVAL - for invalid parameters
8214  *	EPERM, ENOMEM and other error codes returned by as_pagelock
8215  *	ENOMEM - is returned if the current request to lock memory exceeds
8216  *		project.max-device-locked-memory resource control value.
8217  *	EAGAIN - could not start the ddi_umem_unlock list processing thread
8218  */
8219 int
8220 ddi_umem_lock(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie)
8221 {
8222 	int	error;
8223 	struct ddi_umem_cookie *p;
8224 
8225 	*cookie = NULL;		/* in case of any error return */
8226 
8227 	/* These are the only two valid flags */
8228 	if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) != 0) {
8229 		return (EINVAL);
8230 	}
8231 
8232 	/* At least one of the two flags (or both) must be set */
8233 	if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0) {
8234 		return (EINVAL);
8235 	}
8236 
8237 	/* addr and len must be page-aligned */
8238 	if (((uintptr_t)addr & PAGEOFFSET) != 0) {
8239 		return (EINVAL);
8240 	}
8241 
8242 	if ((len & PAGEOFFSET) != 0) {
8243 		return (EINVAL);
8244 	}
8245 
8246 	/*
8247 	 * Call i_ddi_umem_unlock_thread_start if necessary.  It will
8248 	 * be called on first ddi_umem_lock or umem_lockmemory call.
8249 	 */
8250 	if (ddi_umem_unlock_thread == NULL)
8251 		i_ddi_umem_unlock_thread_start();
8252 
8253 	/* Allocate memory for the cookie */
8254 	p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP);
8255 
8256 	/* Convert the flags to seg_rw type */
8257 	if (flags & DDI_UMEMLOCK_WRITE) {
8258 		p->s_flags = S_WRITE;
8259 	} else {
8260 		p->s_flags = S_READ;
8261 	}
8262 
8263 	/* Store curproc in cookie for later iosetup/unlock */
8264 	p->procp = (void *)curproc;
8265 
8266 	/*
8267 	 * Store the struct as pointer in cookie for later use by
8268 	 * ddi_umem_unlock.  The proc->p_as will be stale if ddi_umem_unlock
8269 	 * is called after relvm is called.
8270 	 */
8271 	p->asp = curproc->p_as;
8272 	/*
8273 	 * The size field is needed for lockmem accounting.
8274 	 */
8275 	p->size = len;
8276 
8277 	if (umem_incr_devlockmem(p) != 0) {
8278 		/*
8279 		 * The requested memory cannot be locked
8280 		 */
8281 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8282 		*cookie = (ddi_umem_cookie_t)NULL;
8283 		return (ENOMEM);
8284 	}
8285 	/*
8286 	 * umem_incr_devlockmem stashes the project ptr into the
8287 	 * cookie. This is needed during unlock since that can
8288 	 * happen in a non-USER context
8289 	 */
8290 	ASSERT(p->lockmem_proj);
8291 
8292 	/* Lock the pages corresponding to addr, len in memory */
8293 	error = as_pagelock(((proc_t *)p->procp)->p_as, &(p->pparray),
8294 	    addr, len, p->s_flags);
8295 	if (error != 0) {
8296 		umem_decr_devlockmem(p);
8297 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8298 		*cookie = (ddi_umem_cookie_t)NULL;
8299 		return (error);
8300 	}
8301 
8302 	/* Initialize the fields in the ddi_umem_cookie */
8303 	p->cvaddr = addr;
8304 	p->type = UMEM_LOCKED;
8305 	p->cook_refcnt = 1;
8306 
8307 	*cookie = (ddi_umem_cookie_t)p;
8308 	return (error);
8309 }
8310 
8311 /*
8312  * Add the cookie to the ddi_umem_unlock list.  Pages will be
8313  * unlocked by i_ddi_umem_unlock_thread.
8314  */
8315 
8316 void
8317 ddi_umem_unlock(ddi_umem_cookie_t cookie)
8318 {
8319 	struct ddi_umem_cookie	*p = (struct ddi_umem_cookie *)cookie;
8320 
8321 	ASSERT(p->type == UMEM_LOCKED);
8322 	ASSERT(CPU_ON_INTR(CPU) == 0); /* cannot be high level */
8323 	ASSERT(ddi_umem_unlock_thread != NULL);
8324 
8325 	p->unl_forw = (struct ddi_umem_cookie *)NULL;	/* end of list */
8326 	mutex_enter(&ddi_umem_unlock_mutex);
8327 	if (ddi_umem_unlock_head == NULL) {
8328 		ddi_umem_unlock_head = ddi_umem_unlock_tail = p;
8329 		cv_broadcast(&ddi_umem_unlock_cv);
8330 	} else {
8331 		ddi_umem_unlock_tail->unl_forw = p;
8332 		ddi_umem_unlock_tail = p;
8333 	}
8334 	mutex_exit(&ddi_umem_unlock_mutex);
8335 }
8336 
8337 /*
8338  * Create a buf structure from a ddi_umem_cookie
8339  * cookie - is a ddi_umem_cookie for from ddi_umem_lock and ddi_umem_alloc
8340  *		(only UMEM_LOCKED & KMEM_NON_PAGEABLE types supported)
8341  * off, len - identifies the portion of the memory represented by the cookie
8342  *		that the buf points to.
8343  *	NOTE: off, len need to follow the alignment/size restrictions of the
8344  *		device (dev) that this buf will be passed to. Some devices
8345  *		will accept unrestricted alignment/size, whereas others (such as
8346  *		st) require some block-size alignment/size. It is the caller's
8347  *		responsibility to ensure that the alignment/size restrictions
8348  *		are met (we cannot assert as we do not know the restrictions)
8349  *
8350  * direction - is one of B_READ or B_WRITE and needs to be compatible with
8351  *		the flags used in ddi_umem_lock
8352  *
8353  * The following three arguments are used to initialize fields in the
8354  * buf structure and are uninterpreted by this routine.
8355  *
8356  * dev
8357  * blkno
8358  * iodone
8359  *
8360  * sleepflag - is one of DDI_UMEM_SLEEP or DDI_UMEM_NOSLEEP
8361  *
8362  * Returns a buf structure pointer on success (to be freed by freerbuf)
8363  *	NULL on any parameter error or memory alloc failure
8364  *
8365  */
8366 struct buf *
8367 ddi_umem_iosetup(ddi_umem_cookie_t cookie, off_t off, size_t len,
8368 	int direction, dev_t dev, daddr_t blkno,
8369 	int (*iodone)(struct buf *), int sleepflag)
8370 {
8371 	struct ddi_umem_cookie *p = (struct ddi_umem_cookie *)cookie;
8372 	struct buf *bp;
8373 
8374 	/*
8375 	 * check for valid cookie offset, len
8376 	 */
8377 	if ((off + len) > p->size) {
8378 		return (NULL);
8379 	}
8380 
8381 	if (len > p->size) {
8382 		return (NULL);
8383 	}
8384 
8385 	/* direction has to be one of B_READ or B_WRITE */
8386 	if ((direction != B_READ) && (direction != B_WRITE)) {
8387 		return (NULL);
8388 	}
8389 
8390 	/* These are the only two valid sleepflags */
8391 	if ((sleepflag != DDI_UMEM_SLEEP) && (sleepflag != DDI_UMEM_NOSLEEP)) {
8392 		return (NULL);
8393 	}
8394 
8395 	/*
8396 	 * Only cookies of type UMEM_LOCKED and KMEM_NON_PAGEABLE are supported
8397 	 */
8398 	if ((p->type != UMEM_LOCKED) && (p->type != KMEM_NON_PAGEABLE)) {
8399 		return (NULL);
8400 	}
8401 
8402 	/* If type is KMEM_NON_PAGEABLE procp is NULL */
8403 	ASSERT((p->type == KMEM_NON_PAGEABLE) ?
8404 		(p->procp == NULL) : (p->procp != NULL));
8405 
8406 	bp = kmem_alloc(sizeof (struct buf), sleepflag);
8407 	if (bp == NULL) {
8408 		return (NULL);
8409 	}
8410 	bioinit(bp);
8411 
8412 	bp->b_flags = B_BUSY | B_PHYS | direction;
8413 	bp->b_edev = dev;
8414 	bp->b_lblkno = blkno;
8415 	bp->b_iodone = iodone;
8416 	bp->b_bcount = len;
8417 	bp->b_proc = (proc_t *)p->procp;
8418 	ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0);
8419 	bp->b_un.b_addr = (caddr_t)((uintptr_t)(p->cvaddr) + off);
8420 	if (p->pparray != NULL) {
8421 		bp->b_flags |= B_SHADOW;
8422 		ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0);
8423 		bp->b_shadow = p->pparray + btop(off);
8424 	}
8425 	return (bp);
8426 }
8427 
8428 /*
8429  * Fault-handling and related routines
8430  */
8431 
8432 ddi_devstate_t
8433 ddi_get_devstate(dev_info_t *dip)
8434 {
8435 	if (DEVI_IS_DEVICE_OFFLINE(dip))
8436 		return (DDI_DEVSTATE_OFFLINE);
8437 	else if (DEVI_IS_DEVICE_DOWN(dip) || DEVI_IS_BUS_DOWN(dip))
8438 		return (DDI_DEVSTATE_DOWN);
8439 	else if (DEVI_IS_BUS_QUIESCED(dip))
8440 		return (DDI_DEVSTATE_QUIESCED);
8441 	else if (DEVI_IS_DEVICE_DEGRADED(dip))
8442 		return (DDI_DEVSTATE_DEGRADED);
8443 	else
8444 		return (DDI_DEVSTATE_UP);
8445 }
8446 
8447 void
8448 ddi_dev_report_fault(dev_info_t *dip, ddi_fault_impact_t impact,
8449 	ddi_fault_location_t location, const char *message)
8450 {
8451 	struct ddi_fault_event_data fd;
8452 	ddi_eventcookie_t ec;
8453 
8454 	/*
8455 	 * Assemble all the information into a fault-event-data structure
8456 	 */
8457 	fd.f_dip = dip;
8458 	fd.f_impact = impact;
8459 	fd.f_location = location;
8460 	fd.f_message = message;
8461 	fd.f_oldstate = ddi_get_devstate(dip);
8462 
8463 	/*
8464 	 * Get eventcookie from defining parent.
8465 	 */
8466 	if (ddi_get_eventcookie(dip, DDI_DEVI_FAULT_EVENT, &ec) !=
8467 	    DDI_SUCCESS)
8468 		return;
8469 
8470 	(void) ndi_post_event(dip, dip, ec, &fd);
8471 }
8472 
8473 char *
8474 i_ddi_devi_class(dev_info_t *dip)
8475 {
8476 	return (DEVI(dip)->devi_device_class);
8477 }
8478 
8479 int
8480 i_ddi_set_devi_class(dev_info_t *dip, char *devi_class, int flag)
8481 {
8482 	struct dev_info *devi = DEVI(dip);
8483 
8484 	mutex_enter(&devi->devi_lock);
8485 
8486 	if (devi->devi_device_class)
8487 		kmem_free(devi->devi_device_class,
8488 		    strlen(devi->devi_device_class) + 1);
8489 
8490 	if ((devi->devi_device_class = i_ddi_strdup(devi_class, flag))
8491 	    != NULL) {
8492 		mutex_exit(&devi->devi_lock);
8493 		return (DDI_SUCCESS);
8494 	}
8495 
8496 	mutex_exit(&devi->devi_lock);
8497 
8498 	return (DDI_FAILURE);
8499 }
8500 
8501 
8502 /*
8503  * Task Queues DDI interfaces.
8504  */
8505 
8506 /* ARGSUSED */
8507 ddi_taskq_t *
8508 ddi_taskq_create(dev_info_t *dip, const char *name, int nthreads,
8509     pri_t pri, uint_t cflags)
8510 {
8511 	char full_name[TASKQ_NAMELEN];
8512 	const char *tq_name;
8513 	int nodeid = 0;
8514 
8515 	if (dip == NULL)
8516 		tq_name = name;
8517 	else {
8518 		nodeid = ddi_get_instance(dip);
8519 
8520 		if (name == NULL)
8521 			name = "tq";
8522 
8523 		(void) snprintf(full_name, sizeof (full_name), "%s_%s",
8524 		    ddi_driver_name(dip), name);
8525 
8526 		tq_name = full_name;
8527 	}
8528 
8529 	return ((ddi_taskq_t *)taskq_create_instance(tq_name, nodeid, nthreads,
8530 		    pri == TASKQ_DEFAULTPRI ? minclsyspri : pri,
8531 		    nthreads, INT_MAX, TASKQ_PREPOPULATE));
8532 }
8533 
8534 void
8535 ddi_taskq_destroy(ddi_taskq_t *tq)
8536 {
8537 	taskq_destroy((taskq_t *)tq);
8538 }
8539 
8540 int
8541 ddi_taskq_dispatch(ddi_taskq_t *tq, void (* func)(void *),
8542     void *arg, uint_t dflags)
8543 {
8544 	taskqid_t id = taskq_dispatch((taskq_t *)tq, func, arg,
8545 	    dflags == DDI_SLEEP ? TQ_SLEEP : TQ_NOSLEEP);
8546 
8547 	return (id != 0 ? DDI_SUCCESS : DDI_FAILURE);
8548 }
8549 
8550 void
8551 ddi_taskq_wait(ddi_taskq_t *tq)
8552 {
8553 	taskq_wait((taskq_t *)tq);
8554 }
8555 
8556 void
8557 ddi_taskq_suspend(ddi_taskq_t *tq)
8558 {
8559 	taskq_suspend((taskq_t *)tq);
8560 }
8561 
8562 boolean_t
8563 ddi_taskq_suspended(ddi_taskq_t *tq)
8564 {
8565 	return (taskq_suspended((taskq_t *)tq));
8566 }
8567 
8568 void
8569 ddi_taskq_resume(ddi_taskq_t *tq)
8570 {
8571 	taskq_resume((taskq_t *)tq);
8572 }
8573 
8574 int
8575 ddi_parse(
8576 	const char	*ifname,
8577 	char		*alnum,
8578 	uint_t		*nump)
8579 {
8580 	const char	*p;
8581 	int		l;
8582 	ulong_t		num;
8583 	boolean_t	nonum = B_TRUE;
8584 	char		c;
8585 
8586 	l = strlen(ifname);
8587 	for (p = ifname + l; p != ifname; l--) {
8588 		c = *--p;
8589 		if (!isdigit(c)) {
8590 			(void) strlcpy(alnum, ifname, l + 1);
8591 			if (ddi_strtoul(p + 1, NULL, 10, &num) != 0)
8592 				return (DDI_FAILURE);
8593 			break;
8594 		}
8595 		nonum = B_FALSE;
8596 	}
8597 	if (l == 0 || nonum)
8598 		return (DDI_FAILURE);
8599 
8600 	*nump = num;
8601 	return (DDI_SUCCESS);
8602 }
8603