xref: /titanic_51/usr/src/uts/common/os/sunddi.c (revision 10144ea86a21f583d4eec553d1a18da7544ba6de)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/note.h>
30 #include <sys/types.h>
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/buf.h>
34 #include <sys/uio.h>
35 #include <sys/cred.h>
36 #include <sys/poll.h>
37 #include <sys/mman.h>
38 #include <sys/kmem.h>
39 #include <sys/model.h>
40 #include <sys/file.h>
41 #include <sys/proc.h>
42 #include <sys/open.h>
43 #include <sys/user.h>
44 #include <sys/t_lock.h>
45 #include <sys/vm.h>
46 #include <sys/stat.h>
47 #include <vm/hat.h>
48 #include <vm/seg.h>
49 #include <vm/seg_vn.h>
50 #include <vm/seg_dev.h>
51 #include <vm/as.h>
52 #include <sys/cmn_err.h>
53 #include <sys/cpuvar.h>
54 #include <sys/debug.h>
55 #include <sys/autoconf.h>
56 #include <sys/sunddi.h>
57 #include <sys/esunddi.h>
58 #include <sys/sunndi.h>
59 #include <sys/kstat.h>
60 #include <sys/conf.h>
61 #include <sys/ddi_impldefs.h>	/* include implementation structure defs */
62 #include <sys/ndi_impldefs.h>	/* include prototypes */
63 #include <sys/hwconf.h>
64 #include <sys/pathname.h>
65 #include <sys/modctl.h>
66 #include <sys/epm.h>
67 #include <sys/devctl.h>
68 #include <sys/callb.h>
69 #include <sys/cladm.h>
70 #include <sys/sysevent.h>
71 #include <sys/dacf_impl.h>
72 #include <sys/ddidevmap.h>
73 #include <sys/bootconf.h>
74 #include <sys/disp.h>
75 #include <sys/atomic.h>
76 #include <sys/promif.h>
77 #include <sys/instance.h>
78 #include <sys/sysevent/eventdefs.h>
79 #include <sys/task.h>
80 #include <sys/project.h>
81 #include <sys/taskq.h>
82 #include <sys/devpolicy.h>
83 #include <sys/ctype.h>
84 #include <net/if.h>
85 #include <sys/rctl.h>
86 
87 extern	pri_t	minclsyspri;
88 
89 extern	rctl_hndl_t rc_project_locked_mem;
90 extern	rctl_hndl_t rc_zone_locked_mem;
91 
92 #ifdef DEBUG
93 static int sunddi_debug = 0;
94 #endif /* DEBUG */
95 
96 /* ddi_umem_unlock miscellaneous */
97 
98 static	void	i_ddi_umem_unlock_thread_start(void);
99 
100 static	kmutex_t	ddi_umem_unlock_mutex; /* unlock list mutex */
101 static	kcondvar_t	ddi_umem_unlock_cv; /* unlock list block/unblock */
102 static	kthread_t	*ddi_umem_unlock_thread;
103 /*
104  * The ddi_umem_unlock FIFO list.  NULL head pointer indicates empty list.
105  */
106 static	struct	ddi_umem_cookie *ddi_umem_unlock_head = NULL;
107 static	struct	ddi_umem_cookie *ddi_umem_unlock_tail = NULL;
108 
109 
110 /*
111  * DDI(Sun) Function and flag definitions:
112  */
113 
114 #if defined(__x86)
115 /*
116  * Used to indicate which entries were chosen from a range.
117  */
118 char	*chosen_reg = "chosen-reg";
119 #endif
120 
121 /*
122  * Function used to ring system console bell
123  */
124 void (*ddi_console_bell_func)(clock_t duration);
125 
126 /*
127  * Creating register mappings and handling interrupts:
128  */
129 
130 /*
131  * Generic ddi_map: Call parent to fulfill request...
132  */
133 
134 int
135 ddi_map(dev_info_t *dp, ddi_map_req_t *mp, off_t offset,
136     off_t len, caddr_t *addrp)
137 {
138 	dev_info_t *pdip;
139 
140 	ASSERT(dp);
141 	pdip = (dev_info_t *)DEVI(dp)->devi_parent;
142 	return ((DEVI(pdip)->devi_ops->devo_bus_ops->bus_map)(pdip,
143 	    dp, mp, offset, len, addrp));
144 }
145 
146 /*
147  * ddi_apply_range: (Called by nexi only.)
148  * Apply ranges in parent node dp, to child regspec rp...
149  */
150 
151 int
152 ddi_apply_range(dev_info_t *dp, dev_info_t *rdip, struct regspec *rp)
153 {
154 	return (i_ddi_apply_range(dp, rdip, rp));
155 }
156 
157 int
158 ddi_map_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset,
159     off_t len)
160 {
161 	ddi_map_req_t mr;
162 #if defined(__x86)
163 	struct {
164 		int	bus;
165 		int	addr;
166 		int	size;
167 	} reg, *reglist;
168 	uint_t	length;
169 	int	rc;
170 
171 	/*
172 	 * get the 'registers' or the 'reg' property.
173 	 * We look up the reg property as an array of
174 	 * int's.
175 	 */
176 	rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
177 	    DDI_PROP_DONTPASS, "registers", (int **)&reglist, &length);
178 	if (rc != DDI_PROP_SUCCESS)
179 		rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
180 		    DDI_PROP_DONTPASS, "reg", (int **)&reglist, &length);
181 	if (rc == DDI_PROP_SUCCESS) {
182 		/*
183 		 * point to the required entry.
184 		 */
185 		reg = reglist[rnumber];
186 		reg.addr += offset;
187 		if (len != 0)
188 			reg.size = len;
189 		/*
190 		 * make a new property containing ONLY the required tuple.
191 		 */
192 		if (ddi_prop_update_int_array(DDI_DEV_T_NONE, dip,
193 		    chosen_reg, (int *)&reg, (sizeof (reg)/sizeof (int)))
194 		    != DDI_PROP_SUCCESS) {
195 			cmn_err(CE_WARN, "%s%d: cannot create '%s' "
196 			    "property", DEVI(dip)->devi_name,
197 			    DEVI(dip)->devi_instance, chosen_reg);
198 		}
199 		/*
200 		 * free the memory allocated by
201 		 * ddi_prop_lookup_int_array ().
202 		 */
203 		ddi_prop_free((void *)reglist);
204 	}
205 #endif
206 	mr.map_op = DDI_MO_MAP_LOCKED;
207 	mr.map_type = DDI_MT_RNUMBER;
208 	mr.map_obj.rnumber = rnumber;
209 	mr.map_prot = PROT_READ | PROT_WRITE;
210 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
211 	mr.map_handlep = NULL;
212 	mr.map_vers = DDI_MAP_VERSION;
213 
214 	/*
215 	 * Call my parent to map in my regs.
216 	 */
217 
218 	return (ddi_map(dip, &mr, offset, len, kaddrp));
219 }
220 
221 void
222 ddi_unmap_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset,
223     off_t len)
224 {
225 	ddi_map_req_t mr;
226 
227 	mr.map_op = DDI_MO_UNMAP;
228 	mr.map_type = DDI_MT_RNUMBER;
229 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
230 	mr.map_prot = PROT_READ | PROT_WRITE;	/* who cares? */
231 	mr.map_obj.rnumber = rnumber;
232 	mr.map_handlep = NULL;
233 	mr.map_vers = DDI_MAP_VERSION;
234 
235 	/*
236 	 * Call my parent to unmap my regs.
237 	 */
238 
239 	(void) ddi_map(dip, &mr, offset, len, kaddrp);
240 	*kaddrp = (caddr_t)0;
241 #if defined(__x86)
242 	(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, chosen_reg);
243 #endif
244 }
245 
246 int
247 ddi_bus_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
248 	off_t offset, off_t len, caddr_t *vaddrp)
249 {
250 	return (i_ddi_bus_map(dip, rdip, mp, offset, len, vaddrp));
251 }
252 
253 /*
254  * nullbusmap:	The/DDI default bus_map entry point for nexi
255  *		not conforming to the reg/range paradigm (i.e. scsi, etc.)
256  *		with no HAT/MMU layer to be programmed at this level.
257  *
258  *		If the call is to map by rnumber, return an error,
259  *		otherwise pass anything else up the tree to my parent.
260  */
261 int
262 nullbusmap(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
263 	off_t offset, off_t len, caddr_t *vaddrp)
264 {
265 	_NOTE(ARGUNUSED(rdip))
266 	if (mp->map_type == DDI_MT_RNUMBER)
267 		return (DDI_ME_UNSUPPORTED);
268 
269 	return (ddi_map(dip, mp, offset, len, vaddrp));
270 }
271 
272 /*
273  * ddi_rnumber_to_regspec: Not for use by leaf drivers.
274  *			   Only for use by nexi using the reg/range paradigm.
275  */
276 struct regspec *
277 ddi_rnumber_to_regspec(dev_info_t *dip, int rnumber)
278 {
279 	return (i_ddi_rnumber_to_regspec(dip, rnumber));
280 }
281 
282 
283 /*
284  * Note that we allow the dip to be nil because we may be called
285  * prior even to the instantiation of the devinfo tree itself - all
286  * regular leaf and nexus drivers should always use a non-nil dip!
287  *
288  * We treat peek in a somewhat cavalier fashion .. assuming that we'll
289  * simply get a synchronous fault as soon as we touch a missing address.
290  *
291  * Poke is rather more carefully handled because we might poke to a write
292  * buffer, "succeed", then only find some time later that we got an
293  * asynchronous fault that indicated that the address we were writing to
294  * was not really backed by hardware.
295  */
296 
297 static int
298 i_ddi_peekpoke(dev_info_t *devi, ddi_ctl_enum_t cmd, size_t size,
299     void *addr, void *value_p)
300 {
301 	union {
302 		uint64_t	u64;
303 		uint32_t	u32;
304 		uint16_t	u16;
305 		uint8_t		u8;
306 	} peekpoke_value;
307 
308 	peekpoke_ctlops_t peekpoke_args;
309 	uint64_t dummy_result;
310 	int rval;
311 
312 	/* Note: size is assumed to be correct;  it is not checked. */
313 	peekpoke_args.size = size;
314 	peekpoke_args.dev_addr = (uintptr_t)addr;
315 	peekpoke_args.handle = NULL;
316 	peekpoke_args.repcount = 1;
317 	peekpoke_args.flags = 0;
318 
319 	if (cmd == DDI_CTLOPS_POKE) {
320 		switch (size) {
321 		case sizeof (uint8_t):
322 			peekpoke_value.u8 = *(uint8_t *)value_p;
323 			break;
324 		case sizeof (uint16_t):
325 			peekpoke_value.u16 = *(uint16_t *)value_p;
326 			break;
327 		case sizeof (uint32_t):
328 			peekpoke_value.u32 = *(uint32_t *)value_p;
329 			break;
330 		case sizeof (uint64_t):
331 			peekpoke_value.u64 = *(uint64_t *)value_p;
332 			break;
333 		}
334 	}
335 
336 	peekpoke_args.host_addr = (uintptr_t)&peekpoke_value.u64;
337 
338 	if (devi != NULL)
339 		rval = ddi_ctlops(devi, devi, cmd, &peekpoke_args,
340 		    &dummy_result);
341 	else
342 		rval = peekpoke_mem(cmd, &peekpoke_args);
343 
344 	/*
345 	 * A NULL value_p is permitted by ddi_peek(9F); discard the result.
346 	 */
347 	if ((cmd == DDI_CTLOPS_PEEK) & (value_p != NULL)) {
348 		switch (size) {
349 		case sizeof (uint8_t):
350 			*(uint8_t *)value_p = peekpoke_value.u8;
351 			break;
352 		case sizeof (uint16_t):
353 			*(uint16_t *)value_p = peekpoke_value.u16;
354 			break;
355 		case sizeof (uint32_t):
356 			*(uint32_t *)value_p = peekpoke_value.u32;
357 			break;
358 		case sizeof (uint64_t):
359 			*(uint64_t *)value_p = peekpoke_value.u64;
360 			break;
361 		}
362 	}
363 
364 	return (rval);
365 }
366 
367 /*
368  * Keep ddi_peek() and ddi_poke() in case 3rd parties are calling this.
369  * they shouldn't be, but the 9f manpage kind of pseudo exposes it.
370  */
371 int
372 ddi_peek(dev_info_t *devi, size_t size, void *addr, void *value_p)
373 {
374 	switch (size) {
375 	case sizeof (uint8_t):
376 	case sizeof (uint16_t):
377 	case sizeof (uint32_t):
378 	case sizeof (uint64_t):
379 		break;
380 	default:
381 		return (DDI_FAILURE);
382 	}
383 
384 	return (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, size, addr, value_p));
385 }
386 
387 int
388 ddi_poke(dev_info_t *devi, size_t size, void *addr, void *value_p)
389 {
390 	switch (size) {
391 	case sizeof (uint8_t):
392 	case sizeof (uint16_t):
393 	case sizeof (uint32_t):
394 	case sizeof (uint64_t):
395 		break;
396 	default:
397 		return (DDI_FAILURE);
398 	}
399 
400 	return (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, size, addr, value_p));
401 }
402 
403 int
404 ddi_peek8(dev_info_t *dip, int8_t *addr, int8_t *val_p)
405 {
406 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
407 	    val_p));
408 }
409 
410 int
411 ddi_peek16(dev_info_t *dip, int16_t *addr, int16_t *val_p)
412 {
413 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
414 	    val_p));
415 }
416 
417 int
418 ddi_peek32(dev_info_t *dip, int32_t *addr, int32_t *val_p)
419 {
420 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
421 	    val_p));
422 }
423 
424 int
425 ddi_peek64(dev_info_t *dip, int64_t *addr, int64_t *val_p)
426 {
427 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
428 	    val_p));
429 }
430 
431 
432 /*
433  * We need to separate the old interfaces from the new ones and leave them
434  * in here for a while. Previous versions of the OS defined the new interfaces
435  * to the old interfaces. This way we can fix things up so that we can
436  * eventually remove these interfaces.
437  * e.g. A 3rd party module/driver using ddi_peek8 and built against S10
438  * or earlier will actually have a reference to ddi_peekc in the binary.
439  */
440 #ifdef _ILP32
441 int
442 ddi_peekc(dev_info_t *dip, int8_t *addr, int8_t *val_p)
443 {
444 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
445 	    val_p));
446 }
447 
448 int
449 ddi_peeks(dev_info_t *dip, int16_t *addr, int16_t *val_p)
450 {
451 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
452 	    val_p));
453 }
454 
455 int
456 ddi_peekl(dev_info_t *dip, int32_t *addr, int32_t *val_p)
457 {
458 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
459 	    val_p));
460 }
461 
462 int
463 ddi_peekd(dev_info_t *dip, int64_t *addr, int64_t *val_p)
464 {
465 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
466 	    val_p));
467 }
468 #endif /* _ILP32 */
469 
470 int
471 ddi_poke8(dev_info_t *dip, int8_t *addr, int8_t val)
472 {
473 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
474 }
475 
476 int
477 ddi_poke16(dev_info_t *dip, int16_t *addr, int16_t val)
478 {
479 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
480 }
481 
482 int
483 ddi_poke32(dev_info_t *dip, int32_t *addr, int32_t val)
484 {
485 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
486 }
487 
488 int
489 ddi_poke64(dev_info_t *dip, int64_t *addr, int64_t val)
490 {
491 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
492 }
493 
494 /*
495  * We need to separate the old interfaces from the new ones and leave them
496  * in here for a while. Previous versions of the OS defined the new interfaces
497  * to the old interfaces. This way we can fix things up so that we can
498  * eventually remove these interfaces.
499  * e.g. A 3rd party module/driver using ddi_poke8 and built against S10
500  * or earlier will actually have a reference to ddi_pokec in the binary.
501  */
502 #ifdef _ILP32
503 int
504 ddi_pokec(dev_info_t *dip, int8_t *addr, int8_t val)
505 {
506 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
507 }
508 
509 int
510 ddi_pokes(dev_info_t *dip, int16_t *addr, int16_t val)
511 {
512 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
513 }
514 
515 int
516 ddi_pokel(dev_info_t *dip, int32_t *addr, int32_t val)
517 {
518 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
519 }
520 
521 int
522 ddi_poked(dev_info_t *dip, int64_t *addr, int64_t val)
523 {
524 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
525 }
526 #endif /* _ILP32 */
527 
528 /*
529  * ddi_peekpokeio() is used primarily by the mem drivers for moving
530  * data to and from uio structures via peek and poke.  Note that we
531  * use "internal" routines ddi_peek and ddi_poke to make this go
532  * slightly faster, avoiding the call overhead ..
533  */
534 int
535 ddi_peekpokeio(dev_info_t *devi, struct uio *uio, enum uio_rw rw,
536     caddr_t addr, size_t len, uint_t xfersize)
537 {
538 	int64_t	ibuffer;
539 	int8_t w8;
540 	size_t sz;
541 	int o;
542 
543 	if (xfersize > sizeof (long))
544 		xfersize = sizeof (long);
545 
546 	while (len != 0) {
547 		if ((len | (uintptr_t)addr) & 1) {
548 			sz = sizeof (int8_t);
549 			if (rw == UIO_WRITE) {
550 				if ((o = uwritec(uio)) == -1)
551 					return (DDI_FAILURE);
552 				if (ddi_poke8(devi, (int8_t *)addr,
553 				    (int8_t)o) != DDI_SUCCESS)
554 					return (DDI_FAILURE);
555 			} else {
556 				if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz,
557 				    (int8_t *)addr, &w8) != DDI_SUCCESS)
558 					return (DDI_FAILURE);
559 				if (ureadc(w8, uio))
560 					return (DDI_FAILURE);
561 			}
562 		} else {
563 			switch (xfersize) {
564 			case sizeof (int64_t):
565 				if (((len | (uintptr_t)addr) &
566 				    (sizeof (int64_t) - 1)) == 0) {
567 					sz = xfersize;
568 					break;
569 				}
570 				/*FALLTHROUGH*/
571 			case sizeof (int32_t):
572 				if (((len | (uintptr_t)addr) &
573 				    (sizeof (int32_t) - 1)) == 0) {
574 					sz = xfersize;
575 					break;
576 				}
577 				/*FALLTHROUGH*/
578 			default:
579 				/*
580 				 * This still assumes that we might have an
581 				 * I/O bus out there that permits 16-bit
582 				 * transfers (and that it would be upset by
583 				 * 32-bit transfers from such locations).
584 				 */
585 				sz = sizeof (int16_t);
586 				break;
587 			}
588 
589 			if (rw == UIO_READ) {
590 				if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz,
591 				    addr, &ibuffer) != DDI_SUCCESS)
592 					return (DDI_FAILURE);
593 			}
594 
595 			if (uiomove(&ibuffer, sz, rw, uio))
596 				return (DDI_FAILURE);
597 
598 			if (rw == UIO_WRITE) {
599 				if (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, sz,
600 				    addr, &ibuffer) != DDI_SUCCESS)
601 					return (DDI_FAILURE);
602 			}
603 		}
604 		addr += sz;
605 		len -= sz;
606 	}
607 	return (DDI_SUCCESS);
608 }
609 
610 /*
611  * These routines are used by drivers that do layered ioctls
612  * On sparc, they're implemented in assembler to avoid spilling
613  * register windows in the common (copyin) case ..
614  */
615 #if !defined(__sparc)
616 int
617 ddi_copyin(const void *buf, void *kernbuf, size_t size, int flags)
618 {
619 	if (flags & FKIOCTL)
620 		return (kcopy(buf, kernbuf, size) ? -1 : 0);
621 	return (copyin(buf, kernbuf, size));
622 }
623 
624 int
625 ddi_copyout(const void *buf, void *kernbuf, size_t size, int flags)
626 {
627 	if (flags & FKIOCTL)
628 		return (kcopy(buf, kernbuf, size) ? -1 : 0);
629 	return (copyout(buf, kernbuf, size));
630 }
631 #endif	/* !__sparc */
632 
633 /*
634  * Conversions in nexus pagesize units.  We don't duplicate the
635  * 'nil dip' semantics of peek/poke because btopr/btop/ptob are DDI/DKI
636  * routines anyway.
637  */
638 unsigned long
639 ddi_btop(dev_info_t *dip, unsigned long bytes)
640 {
641 	unsigned long pages;
642 
643 	(void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOP, &bytes, &pages);
644 	return (pages);
645 }
646 
647 unsigned long
648 ddi_btopr(dev_info_t *dip, unsigned long bytes)
649 {
650 	unsigned long pages;
651 
652 	(void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOPR, &bytes, &pages);
653 	return (pages);
654 }
655 
656 unsigned long
657 ddi_ptob(dev_info_t *dip, unsigned long pages)
658 {
659 	unsigned long bytes;
660 
661 	(void) ddi_ctlops(dip, dip, DDI_CTLOPS_PTOB, &pages, &bytes);
662 	return (bytes);
663 }
664 
665 unsigned int
666 ddi_enter_critical(void)
667 {
668 	return ((uint_t)spl7());
669 }
670 
671 void
672 ddi_exit_critical(unsigned int spl)
673 {
674 	splx((int)spl);
675 }
676 
677 /*
678  * Nexus ctlops punter
679  */
680 
681 #if !defined(__sparc)
682 /*
683  * Request bus_ctl parent to handle a bus_ctl request
684  *
685  * (The sparc version is in sparc_ddi.s)
686  */
687 int
688 ddi_ctlops(dev_info_t *d, dev_info_t *r, ddi_ctl_enum_t op, void *a, void *v)
689 {
690 	int (*fp)();
691 
692 	if (!d || !r)
693 		return (DDI_FAILURE);
694 
695 	if ((d = (dev_info_t *)DEVI(d)->devi_bus_ctl) == NULL)
696 		return (DDI_FAILURE);
697 
698 	fp = DEVI(d)->devi_ops->devo_bus_ops->bus_ctl;
699 	return ((*fp)(d, r, op, a, v));
700 }
701 
702 #endif
703 
704 /*
705  * DMA/DVMA setup
706  */
707 
708 #if defined(__sparc)
709 static ddi_dma_lim_t standard_limits = {
710 	(uint_t)0,	/* addr_t dlim_addr_lo */
711 	(uint_t)-1,	/* addr_t dlim_addr_hi */
712 	(uint_t)-1,	/* uint_t dlim_cntr_max */
713 	(uint_t)1,	/* uint_t dlim_burstsizes */
714 	(uint_t)1,	/* uint_t dlim_minxfer */
715 	0		/* uint_t dlim_dmaspeed */
716 };
717 #elif defined(__x86)
718 static ddi_dma_lim_t standard_limits = {
719 	(uint_t)0,		/* addr_t dlim_addr_lo */
720 	(uint_t)0xffffff,	/* addr_t dlim_addr_hi */
721 	(uint_t)0,		/* uint_t dlim_cntr_max */
722 	(uint_t)0x00000001,	/* uint_t dlim_burstsizes */
723 	(uint_t)DMA_UNIT_8,	/* uint_t dlim_minxfer */
724 	(uint_t)0,		/* uint_t dlim_dmaspeed */
725 	(uint_t)0x86<<24+0,	/* uint_t dlim_version */
726 	(uint_t)0xffff,		/* uint_t dlim_adreg_max */
727 	(uint_t)0xffff,		/* uint_t dlim_ctreg_max */
728 	(uint_t)512,		/* uint_t dlim_granular */
729 	(int)1,			/* int dlim_sgllen */
730 	(uint_t)0xffffffff	/* uint_t dlim_reqsizes */
731 };
732 
733 #endif
734 
735 int
736 ddi_dma_setup(dev_info_t *dip, struct ddi_dma_req *dmareqp,
737     ddi_dma_handle_t *handlep)
738 {
739 	int (*funcp)() = ddi_dma_map;
740 	struct bus_ops *bop;
741 #if defined(__sparc)
742 	auto ddi_dma_lim_t dma_lim;
743 
744 	if (dmareqp->dmar_limits == (ddi_dma_lim_t *)0) {
745 		dma_lim = standard_limits;
746 	} else {
747 		dma_lim = *dmareqp->dmar_limits;
748 	}
749 	dmareqp->dmar_limits = &dma_lim;
750 #endif
751 #if defined(__x86)
752 	if (dmareqp->dmar_limits == (ddi_dma_lim_t *)0)
753 		return (DDI_FAILURE);
754 #endif
755 
756 	/*
757 	 * Handle the case that the requester is both a leaf
758 	 * and a nexus driver simultaneously by calling the
759 	 * requester's bus_dma_map function directly instead
760 	 * of ddi_dma_map.
761 	 */
762 	bop = DEVI(dip)->devi_ops->devo_bus_ops;
763 	if (bop && bop->bus_dma_map)
764 		funcp = bop->bus_dma_map;
765 	return ((*funcp)(dip, dip, dmareqp, handlep));
766 }
767 
768 int
769 ddi_dma_addr_setup(dev_info_t *dip, struct as *as, caddr_t addr, size_t len,
770     uint_t flags, int (*waitfp)(), caddr_t arg,
771     ddi_dma_lim_t *limits, ddi_dma_handle_t *handlep)
772 {
773 	int (*funcp)() = ddi_dma_map;
774 	ddi_dma_lim_t dma_lim;
775 	struct ddi_dma_req dmareq;
776 	struct bus_ops *bop;
777 
778 	if (len == 0) {
779 		return (DDI_DMA_NOMAPPING);
780 	}
781 	if (limits == (ddi_dma_lim_t *)0) {
782 		dma_lim = standard_limits;
783 	} else {
784 		dma_lim = *limits;
785 	}
786 	dmareq.dmar_limits = &dma_lim;
787 	dmareq.dmar_flags = flags;
788 	dmareq.dmar_fp = waitfp;
789 	dmareq.dmar_arg = arg;
790 	dmareq.dmar_object.dmao_size = len;
791 	dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR;
792 	dmareq.dmar_object.dmao_obj.virt_obj.v_as = as;
793 	dmareq.dmar_object.dmao_obj.virt_obj.v_addr = addr;
794 	dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
795 
796 	/*
797 	 * Handle the case that the requester is both a leaf
798 	 * and a nexus driver simultaneously by calling the
799 	 * requester's bus_dma_map function directly instead
800 	 * of ddi_dma_map.
801 	 */
802 	bop = DEVI(dip)->devi_ops->devo_bus_ops;
803 	if (bop && bop->bus_dma_map)
804 		funcp = bop->bus_dma_map;
805 
806 	return ((*funcp)(dip, dip, &dmareq, handlep));
807 }
808 
809 int
810 ddi_dma_buf_setup(dev_info_t *dip, struct buf *bp, uint_t flags,
811     int (*waitfp)(), caddr_t arg, ddi_dma_lim_t *limits,
812     ddi_dma_handle_t *handlep)
813 {
814 	int (*funcp)() = ddi_dma_map;
815 	ddi_dma_lim_t dma_lim;
816 	struct ddi_dma_req dmareq;
817 	struct bus_ops *bop;
818 
819 	if (limits == (ddi_dma_lim_t *)0) {
820 		dma_lim = standard_limits;
821 	} else {
822 		dma_lim = *limits;
823 	}
824 	dmareq.dmar_limits = &dma_lim;
825 	dmareq.dmar_flags = flags;
826 	dmareq.dmar_fp = waitfp;
827 	dmareq.dmar_arg = arg;
828 	dmareq.dmar_object.dmao_size = (uint_t)bp->b_bcount;
829 
830 	if (bp->b_flags & B_PAGEIO) {
831 		dmareq.dmar_object.dmao_type = DMA_OTYP_PAGES;
832 		dmareq.dmar_object.dmao_obj.pp_obj.pp_pp = bp->b_pages;
833 		dmareq.dmar_object.dmao_obj.pp_obj.pp_offset =
834 		    (uint_t)(((uintptr_t)bp->b_un.b_addr) & MMU_PAGEOFFSET);
835 	} else {
836 		dmareq.dmar_object.dmao_type = DMA_OTYP_BUFVADDR;
837 		dmareq.dmar_object.dmao_obj.virt_obj.v_addr = bp->b_un.b_addr;
838 		if (bp->b_flags & B_SHADOW) {
839 			dmareq.dmar_object.dmao_obj.virt_obj.v_priv =
840 							bp->b_shadow;
841 		} else {
842 			dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
843 		}
844 
845 		/*
846 		 * If the buffer has no proc pointer, or the proc
847 		 * struct has the kernel address space, or the buffer has
848 		 * been marked B_REMAPPED (meaning that it is now
849 		 * mapped into the kernel's address space), then
850 		 * the address space is kas (kernel address space).
851 		 */
852 		if ((bp->b_proc == NULL) || (bp->b_proc->p_as == &kas) ||
853 		    (bp->b_flags & B_REMAPPED)) {
854 			dmareq.dmar_object.dmao_obj.virt_obj.v_as = 0;
855 		} else {
856 			dmareq.dmar_object.dmao_obj.virt_obj.v_as =
857 			    bp->b_proc->p_as;
858 		}
859 	}
860 
861 	/*
862 	 * Handle the case that the requester is both a leaf
863 	 * and a nexus driver simultaneously by calling the
864 	 * requester's bus_dma_map function directly instead
865 	 * of ddi_dma_map.
866 	 */
867 	bop = DEVI(dip)->devi_ops->devo_bus_ops;
868 	if (bop && bop->bus_dma_map)
869 		funcp = bop->bus_dma_map;
870 
871 	return ((*funcp)(dip, dip, &dmareq, handlep));
872 }
873 
874 #if !defined(__sparc)
875 /*
876  * Request bus_dma_ctl parent to fiddle with a dma request.
877  *
878  * (The sparc version is in sparc_subr.s)
879  */
880 int
881 ddi_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
882     ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
883     off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
884 {
885 	int (*fp)();
886 
887 	dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_ctl;
888 	fp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_ctl;
889 	return ((*fp) (dip, rdip, handle, request, offp, lenp, objp, flags));
890 }
891 #endif
892 
893 /*
894  * For all DMA control functions, call the DMA control
895  * routine and return status.
896  *
897  * Just plain assume that the parent is to be called.
898  * If a nexus driver or a thread outside the framework
899  * of a nexus driver or a leaf driver calls these functions,
900  * it is up to them to deal with the fact that the parent's
901  * bus_dma_ctl function will be the first one called.
902  */
903 
904 #define	HD	((ddi_dma_impl_t *)h)->dmai_rdip
905 
906 int
907 ddi_dma_kvaddrp(ddi_dma_handle_t h, off_t off, size_t len, caddr_t *kp)
908 {
909 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_KVADDR, &off, &len, kp, 0));
910 }
911 
912 int
913 ddi_dma_htoc(ddi_dma_handle_t h, off_t o, ddi_dma_cookie_t *c)
914 {
915 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_HTOC, &o, 0, (caddr_t *)c, 0));
916 }
917 
918 int
919 ddi_dma_coff(ddi_dma_handle_t h, ddi_dma_cookie_t *c, off_t *o)
920 {
921 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_COFF,
922 	    (off_t *)c, 0, (caddr_t *)o, 0));
923 }
924 
925 int
926 ddi_dma_movwin(ddi_dma_handle_t h, off_t *o, size_t *l, ddi_dma_cookie_t *c)
927 {
928 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_MOVWIN, o,
929 	    l, (caddr_t *)c, 0));
930 }
931 
932 int
933 ddi_dma_curwin(ddi_dma_handle_t h, off_t *o, size_t *l)
934 {
935 	if ((((ddi_dma_impl_t *)h)->dmai_rflags & DDI_DMA_PARTIAL) == 0)
936 		return (DDI_FAILURE);
937 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_REPWIN, o, l, 0, 0));
938 }
939 
940 int
941 ddi_dma_nextwin(ddi_dma_handle_t h, ddi_dma_win_t win,
942     ddi_dma_win_t *nwin)
943 {
944 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_NEXTWIN, (off_t *)&win, 0,
945 	    (caddr_t *)nwin, 0));
946 }
947 
948 int
949 ddi_dma_nextseg(ddi_dma_win_t win, ddi_dma_seg_t seg, ddi_dma_seg_t *nseg)
950 {
951 	ddi_dma_handle_t h = (ddi_dma_handle_t)win;
952 
953 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_NEXTSEG, (off_t *)&win,
954 	    (size_t *)&seg, (caddr_t *)nseg, 0));
955 }
956 
957 #if (defined(__i386) && !defined(__amd64)) || defined(__sparc)
958 /*
959  * This routine is Obsolete and should be removed from ALL architectures
960  * in a future release of Solaris.
961  *
962  * It is deliberately NOT ported to amd64; please fix the code that
963  * depends on this routine to use ddi_dma_nextcookie(9F).
964  *
965  * NOTE: even though we fixed the pointer through a 32-bit param issue (the fix
966  * is a side effect to some other cleanup), we're still not going to support
967  * this interface on x64.
968  */
969 int
970 ddi_dma_segtocookie(ddi_dma_seg_t seg, off_t *o, off_t *l,
971     ddi_dma_cookie_t *cookiep)
972 {
973 	ddi_dma_handle_t h = (ddi_dma_handle_t)seg;
974 
975 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_SEGTOC, o, (size_t *)l,
976 	    (caddr_t *)cookiep, 0));
977 }
978 #endif	/* (__i386 && !__amd64) || __sparc */
979 
980 #if !defined(__sparc)
981 
982 /*
983  * The SPARC versions of these routines are done in assembler to
984  * save register windows, so they're in sparc_subr.s.
985  */
986 
987 int
988 ddi_dma_map(dev_info_t *dip, dev_info_t *rdip,
989 	struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep)
990 {
991 	dev_info_t	*hdip;
992 	int (*funcp)(dev_info_t *, dev_info_t *, struct ddi_dma_req *,
993 	    ddi_dma_handle_t *);
994 
995 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_map;
996 
997 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_map;
998 	return ((*funcp)(hdip, rdip, dmareqp, handlep));
999 }
1000 
1001 int
1002 ddi_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
1003     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
1004 {
1005 	dev_info_t	*hdip;
1006 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_attr_t *,
1007 	    int (*)(caddr_t), caddr_t, ddi_dma_handle_t *);
1008 
1009 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl;
1010 
1011 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_allochdl;
1012 	return ((*funcp)(hdip, rdip, attr, waitfp, arg, handlep));
1013 }
1014 
1015 int
1016 ddi_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handlep)
1017 {
1018 	dev_info_t	*hdip;
1019 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
1020 
1021 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl;
1022 
1023 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_freehdl;
1024 	return ((*funcp)(hdip, rdip, handlep));
1025 }
1026 
1027 int
1028 ddi_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
1029     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
1030     ddi_dma_cookie_t *cp, uint_t *ccountp)
1031 {
1032 	dev_info_t	*hdip;
1033 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
1034 	    struct ddi_dma_req *, ddi_dma_cookie_t *, uint_t *);
1035 
1036 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
1037 
1038 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_bindhdl;
1039 	return ((*funcp)(hdip, rdip, handle, dmareq, cp, ccountp));
1040 }
1041 
1042 int
1043 ddi_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
1044     ddi_dma_handle_t handle)
1045 {
1046 	dev_info_t	*hdip;
1047 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
1048 
1049 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl;
1050 
1051 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_unbindhdl;
1052 	return ((*funcp)(hdip, rdip, handle));
1053 }
1054 
1055 
1056 int
1057 ddi_dma_flush(dev_info_t *dip, dev_info_t *rdip,
1058     ddi_dma_handle_t handle, off_t off, size_t len,
1059     uint_t cache_flags)
1060 {
1061 	dev_info_t	*hdip;
1062 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
1063 	    off_t, size_t, uint_t);
1064 
1065 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush;
1066 
1067 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_flush;
1068 	return ((*funcp)(hdip, rdip, handle, off, len, cache_flags));
1069 }
1070 
1071 int
1072 ddi_dma_win(dev_info_t *dip, dev_info_t *rdip,
1073     ddi_dma_handle_t handle, uint_t win, off_t *offp,
1074     size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
1075 {
1076 	dev_info_t	*hdip;
1077 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
1078 	    uint_t, off_t *, size_t *, ddi_dma_cookie_t *, uint_t *);
1079 
1080 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_win;
1081 
1082 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_win;
1083 	return ((*funcp)(hdip, rdip, handle, win, offp, lenp,
1084 	    cookiep, ccountp));
1085 }
1086 
1087 int
1088 ddi_dma_sync(ddi_dma_handle_t h, off_t o, size_t l, uint_t whom)
1089 {
1090 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h;
1091 	dev_info_t *hdip, *dip;
1092 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t, off_t,
1093 		size_t, uint_t);
1094 
1095 	/*
1096 	 * the DMA nexus driver will set DMP_NOSYNC if the
1097 	 * platform does not require any sync operation. For
1098 	 * example if the memory is uncached or consistent
1099 	 * and without any I/O write buffers involved.
1100 	 */
1101 	if ((hp->dmai_rflags & DMP_NOSYNC) == DMP_NOSYNC)
1102 		return (DDI_SUCCESS);
1103 
1104 	dip = hp->dmai_rdip;
1105 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush;
1106 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_flush;
1107 	return ((*funcp)(hdip, dip, h, o, l, whom));
1108 }
1109 
1110 int
1111 ddi_dma_unbind_handle(ddi_dma_handle_t h)
1112 {
1113 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h;
1114 	dev_info_t *hdip, *dip;
1115 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
1116 
1117 	dip = hp->dmai_rdip;
1118 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl;
1119 	funcp = DEVI(dip)->devi_bus_dma_unbindfunc;
1120 	return ((*funcp)(hdip, dip, h));
1121 }
1122 
1123 #endif	/* !__sparc */
1124 
1125 int
1126 ddi_dma_free(ddi_dma_handle_t h)
1127 {
1128 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_FREE, 0, 0, 0, 0));
1129 }
1130 
1131 int
1132 ddi_iopb_alloc(dev_info_t *dip, ddi_dma_lim_t *limp, uint_t len, caddr_t *iopbp)
1133 {
1134 	ddi_dma_lim_t defalt;
1135 	size_t size = len;
1136 
1137 	if (!limp) {
1138 		defalt = standard_limits;
1139 		limp = &defalt;
1140 	}
1141 	return (i_ddi_mem_alloc_lim(dip, limp, size, 0, 0, 0,
1142 	    iopbp, NULL, NULL));
1143 }
1144 
1145 void
1146 ddi_iopb_free(caddr_t iopb)
1147 {
1148 	i_ddi_mem_free(iopb, NULL);
1149 }
1150 
1151 int
1152 ddi_mem_alloc(dev_info_t *dip, ddi_dma_lim_t *limits, uint_t length,
1153 	uint_t flags, caddr_t *kaddrp, uint_t *real_length)
1154 {
1155 	ddi_dma_lim_t defalt;
1156 	size_t size = length;
1157 
1158 	if (!limits) {
1159 		defalt = standard_limits;
1160 		limits = &defalt;
1161 	}
1162 	return (i_ddi_mem_alloc_lim(dip, limits, size, flags & 0x1,
1163 	    1, 0, kaddrp, real_length, NULL));
1164 }
1165 
1166 void
1167 ddi_mem_free(caddr_t kaddr)
1168 {
1169 	i_ddi_mem_free(kaddr, NULL);
1170 }
1171 
1172 /*
1173  * DMA attributes, alignment, burst sizes, and transfer minimums
1174  */
1175 int
1176 ddi_dma_get_attr(ddi_dma_handle_t handle, ddi_dma_attr_t *attrp)
1177 {
1178 	ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle;
1179 
1180 	if (attrp == NULL)
1181 		return (DDI_FAILURE);
1182 	*attrp = dimp->dmai_attr;
1183 	return (DDI_SUCCESS);
1184 }
1185 
1186 int
1187 ddi_dma_burstsizes(ddi_dma_handle_t handle)
1188 {
1189 	ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle;
1190 
1191 	if (!dimp)
1192 		return (0);
1193 	else
1194 		return (dimp->dmai_burstsizes);
1195 }
1196 
1197 int
1198 ddi_dma_devalign(ddi_dma_handle_t handle, uint_t *alignment, uint_t *mineffect)
1199 {
1200 	ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle;
1201 
1202 	if (!dimp || !alignment || !mineffect)
1203 		return (DDI_FAILURE);
1204 	if (!(dimp->dmai_rflags & DDI_DMA_SBUS_64BIT)) {
1205 		*alignment = 1 << ddi_ffs(dimp->dmai_burstsizes);
1206 	} else {
1207 		if (dimp->dmai_burstsizes & 0xff0000) {
1208 			*alignment = 1 << ddi_ffs(dimp->dmai_burstsizes >> 16);
1209 		} else {
1210 			*alignment = 1 << ddi_ffs(dimp->dmai_burstsizes);
1211 		}
1212 	}
1213 	*mineffect = dimp->dmai_minxfer;
1214 	return (DDI_SUCCESS);
1215 }
1216 
1217 int
1218 ddi_iomin(dev_info_t *a, int i, int stream)
1219 {
1220 	int r;
1221 
1222 	/*
1223 	 * Make sure that the initial value is sane
1224 	 */
1225 	if (i & (i - 1))
1226 		return (0);
1227 	if (i == 0)
1228 		i = (stream) ? 4 : 1;
1229 
1230 	r = ddi_ctlops(a, a,
1231 	    DDI_CTLOPS_IOMIN, (void *)(uintptr_t)stream, (void *)&i);
1232 	if (r != DDI_SUCCESS || (i & (i - 1)))
1233 		return (0);
1234 	return (i);
1235 }
1236 
1237 /*
1238  * Given two DMA attribute structures, apply the attributes
1239  * of one to the other, following the rules of attributes
1240  * and the wishes of the caller.
1241  *
1242  * The rules of DMA attribute structures are that you cannot
1243  * make things *less* restrictive as you apply one set
1244  * of attributes to another.
1245  *
1246  */
1247 void
1248 ddi_dma_attr_merge(ddi_dma_attr_t *attr, ddi_dma_attr_t *mod)
1249 {
1250 	attr->dma_attr_addr_lo =
1251 	    MAX(attr->dma_attr_addr_lo, mod->dma_attr_addr_lo);
1252 	attr->dma_attr_addr_hi =
1253 	    MIN(attr->dma_attr_addr_hi, mod->dma_attr_addr_hi);
1254 	attr->dma_attr_count_max =
1255 	    MIN(attr->dma_attr_count_max, mod->dma_attr_count_max);
1256 	attr->dma_attr_align =
1257 	    MAX(attr->dma_attr_align,  mod->dma_attr_align);
1258 	attr->dma_attr_burstsizes =
1259 	    (uint_t)(attr->dma_attr_burstsizes & mod->dma_attr_burstsizes);
1260 	attr->dma_attr_minxfer =
1261 	    maxbit(attr->dma_attr_minxfer, mod->dma_attr_minxfer);
1262 	attr->dma_attr_maxxfer =
1263 	    MIN(attr->dma_attr_maxxfer, mod->dma_attr_maxxfer);
1264 	attr->dma_attr_seg = MIN(attr->dma_attr_seg, mod->dma_attr_seg);
1265 	attr->dma_attr_sgllen = MIN((uint_t)attr->dma_attr_sgllen,
1266 	    (uint_t)mod->dma_attr_sgllen);
1267 	attr->dma_attr_granular =
1268 	    MAX(attr->dma_attr_granular, mod->dma_attr_granular);
1269 }
1270 
1271 /*
1272  * mmap/segmap interface:
1273  */
1274 
1275 /*
1276  * ddi_segmap:		setup the default segment driver. Calls the drivers
1277  *			XXmmap routine to validate the range to be mapped.
1278  *			Return ENXIO of the range is not valid.  Create
1279  *			a seg_dev segment that contains all of the
1280  *			necessary information and will reference the
1281  *			default segment driver routines. It returns zero
1282  *			on success or non-zero on failure.
1283  */
1284 int
1285 ddi_segmap(dev_t dev, off_t offset, struct as *asp, caddr_t *addrp, off_t len,
1286     uint_t prot, uint_t maxprot, uint_t flags, cred_t *credp)
1287 {
1288 	extern int spec_segmap(dev_t, off_t, struct as *, caddr_t *,
1289 	    off_t, uint_t, uint_t, uint_t, struct cred *);
1290 
1291 	return (spec_segmap(dev, offset, asp, addrp, len,
1292 	    prot, maxprot, flags, credp));
1293 }
1294 
1295 /*
1296  * ddi_map_fault:	Resolve mappings at fault time.  Used by segment
1297  *			drivers. Allows each successive parent to resolve
1298  *			address translations and add its mappings to the
1299  *			mapping list supplied in the page structure. It
1300  *			returns zero on success	or non-zero on failure.
1301  */
1302 
1303 int
1304 ddi_map_fault(dev_info_t *dip, struct hat *hat, struct seg *seg,
1305     caddr_t addr, struct devpage *dp, pfn_t pfn, uint_t prot, uint_t lock)
1306 {
1307 	return (i_ddi_map_fault(dip, dip, hat, seg, addr, dp, pfn, prot, lock));
1308 }
1309 
1310 /*
1311  * ddi_device_mapping_check:	Called from ddi_segmap_setup.
1312  *	Invokes platform specific DDI to determine whether attributes specified
1313  *	in attr(9s) are	valid for the region of memory that will be made
1314  *	available for direct access to user process via the mmap(2) system call.
1315  */
1316 int
1317 ddi_device_mapping_check(dev_t dev, ddi_device_acc_attr_t *accattrp,
1318     uint_t rnumber, uint_t *hat_flags)
1319 {
1320 	ddi_acc_handle_t handle;
1321 	ddi_map_req_t mr;
1322 	ddi_acc_hdl_t *hp;
1323 	int result;
1324 	dev_info_t *dip;
1325 
1326 	/*
1327 	 * we use e_ddi_hold_devi_by_dev to search for the devi.  We
1328 	 * release it immediately since it should already be held by
1329 	 * a devfs vnode.
1330 	 */
1331 	if ((dip =
1332 	    e_ddi_hold_devi_by_dev(dev, E_DDI_HOLD_DEVI_NOATTACH)) == NULL)
1333 		return (-1);
1334 	ddi_release_devi(dip);		/* for e_ddi_hold_devi_by_dev() */
1335 
1336 	/*
1337 	 * Allocate and initialize the common elements of data
1338 	 * access handle.
1339 	 */
1340 	handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
1341 	if (handle == NULL)
1342 		return (-1);
1343 
1344 	hp = impl_acc_hdl_get(handle);
1345 	hp->ah_vers = VERS_ACCHDL;
1346 	hp->ah_dip = dip;
1347 	hp->ah_rnumber = rnumber;
1348 	hp->ah_offset = 0;
1349 	hp->ah_len = 0;
1350 	hp->ah_acc = *accattrp;
1351 
1352 	/*
1353 	 * Set up the mapping request and call to parent.
1354 	 */
1355 	mr.map_op = DDI_MO_MAP_HANDLE;
1356 	mr.map_type = DDI_MT_RNUMBER;
1357 	mr.map_obj.rnumber = rnumber;
1358 	mr.map_prot = PROT_READ | PROT_WRITE;
1359 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
1360 	mr.map_handlep = hp;
1361 	mr.map_vers = DDI_MAP_VERSION;
1362 	result = ddi_map(dip, &mr, 0, 0, NULL);
1363 
1364 	/*
1365 	 * Region must be mappable, pick up flags from the framework.
1366 	 */
1367 	*hat_flags = hp->ah_hat_flags;
1368 
1369 	impl_acc_hdl_free(handle);
1370 
1371 	/*
1372 	 * check for end result.
1373 	 */
1374 	if (result != DDI_SUCCESS)
1375 		return (-1);
1376 	return (0);
1377 }
1378 
1379 
1380 /*
1381  * Property functions:	 See also, ddipropdefs.h.
1382  *
1383  * These functions are the framework for the property functions,
1384  * i.e. they support software defined properties.  All implementation
1385  * specific property handling (i.e.: self-identifying devices and
1386  * PROM defined properties are handled in the implementation specific
1387  * functions (defined in ddi_implfuncs.h).
1388  */
1389 
1390 /*
1391  * nopropop:	Shouldn't be called, right?
1392  */
1393 int
1394 nopropop(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
1395     char *name, caddr_t valuep, int *lengthp)
1396 {
1397 	_NOTE(ARGUNUSED(dev, dip, prop_op, mod_flags, name, valuep, lengthp))
1398 	return (DDI_PROP_NOT_FOUND);
1399 }
1400 
1401 #ifdef	DDI_PROP_DEBUG
1402 int ddi_prop_debug_flag = 0;
1403 
1404 int
1405 ddi_prop_debug(int enable)
1406 {
1407 	int prev = ddi_prop_debug_flag;
1408 
1409 	if ((enable != 0) || (prev != 0))
1410 		printf("ddi_prop_debug: debugging %s\n",
1411 		    enable ? "enabled" : "disabled");
1412 	ddi_prop_debug_flag = enable;
1413 	return (prev);
1414 }
1415 
1416 #endif	/* DDI_PROP_DEBUG */
1417 
1418 /*
1419  * Search a property list for a match, if found return pointer
1420  * to matching prop struct, else return NULL.
1421  */
1422 
1423 ddi_prop_t *
1424 i_ddi_prop_search(dev_t dev, char *name, uint_t flags, ddi_prop_t **list_head)
1425 {
1426 	ddi_prop_t	*propp;
1427 
1428 	/*
1429 	 * find the property in child's devinfo:
1430 	 * Search order defined by this search function is first matching
1431 	 * property with input dev == DDI_DEV_T_ANY matching any dev or
1432 	 * dev == propp->prop_dev, name == propp->name, and the correct
1433 	 * data type as specified in the flags.  If a DDI_DEV_T_NONE dev
1434 	 * value made it this far then it implies a DDI_DEV_T_ANY search.
1435 	 */
1436 	if (dev == DDI_DEV_T_NONE)
1437 		dev = DDI_DEV_T_ANY;
1438 
1439 	for (propp = *list_head; propp != NULL; propp = propp->prop_next)  {
1440 
1441 		if (!DDI_STRSAME(propp->prop_name, name))
1442 			continue;
1443 
1444 		if ((dev != DDI_DEV_T_ANY) && (propp->prop_dev != dev))
1445 			continue;
1446 
1447 		if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0)
1448 			continue;
1449 
1450 		return (propp);
1451 	}
1452 
1453 	return ((ddi_prop_t *)0);
1454 }
1455 
1456 /*
1457  * Search for property within devnames structures
1458  */
1459 ddi_prop_t *
1460 i_ddi_search_global_prop(dev_t dev, char *name, uint_t flags)
1461 {
1462 	major_t		major;
1463 	struct devnames	*dnp;
1464 	ddi_prop_t	*propp;
1465 
1466 	/*
1467 	 * Valid dev_t value is needed to index into the
1468 	 * correct devnames entry, therefore a dev_t
1469 	 * value of DDI_DEV_T_ANY is not appropriate.
1470 	 */
1471 	ASSERT(dev != DDI_DEV_T_ANY);
1472 	if (dev == DDI_DEV_T_ANY) {
1473 		return ((ddi_prop_t *)0);
1474 	}
1475 
1476 	major = getmajor(dev);
1477 	dnp = &(devnamesp[major]);
1478 
1479 	if (dnp->dn_global_prop_ptr == NULL)
1480 		return ((ddi_prop_t *)0);
1481 
1482 	LOCK_DEV_OPS(&dnp->dn_lock);
1483 
1484 	for (propp = dnp->dn_global_prop_ptr->prop_list;
1485 	    propp != NULL;
1486 	    propp = (ddi_prop_t *)propp->prop_next) {
1487 
1488 		if (!DDI_STRSAME(propp->prop_name, name))
1489 			continue;
1490 
1491 		if ((!(flags & LDI_DEV_T_ANY)) && (propp->prop_dev != dev))
1492 			continue;
1493 
1494 		if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0)
1495 			continue;
1496 
1497 		/* Property found, return it */
1498 		UNLOCK_DEV_OPS(&dnp->dn_lock);
1499 		return (propp);
1500 	}
1501 
1502 	UNLOCK_DEV_OPS(&dnp->dn_lock);
1503 	return ((ddi_prop_t *)0);
1504 }
1505 
1506 static char prop_no_mem_msg[] = "can't allocate memory for ddi property <%s>";
1507 
1508 /*
1509  * ddi_prop_search_global:
1510  *	Search the global property list within devnames
1511  *	for the named property.  Return the encoded value.
1512  */
1513 static int
1514 i_ddi_prop_search_global(dev_t dev, uint_t flags, char *name,
1515     void *valuep, uint_t *lengthp)
1516 {
1517 	ddi_prop_t	*propp;
1518 	caddr_t		buffer;
1519 
1520 	propp =  i_ddi_search_global_prop(dev, name, flags);
1521 
1522 	/* Property NOT found, bail */
1523 	if (propp == (ddi_prop_t *)0)
1524 		return (DDI_PROP_NOT_FOUND);
1525 
1526 	if (propp->prop_flags & DDI_PROP_UNDEF_IT)
1527 		return (DDI_PROP_UNDEFINED);
1528 
1529 	if ((buffer = kmem_alloc(propp->prop_len,
1530 	    (flags & DDI_PROP_CANSLEEP) ? KM_SLEEP : KM_NOSLEEP)) == NULL) {
1531 		cmn_err(CE_CONT, prop_no_mem_msg, name);
1532 		return (DDI_PROP_NO_MEMORY);
1533 	}
1534 
1535 	/*
1536 	 * Return the encoded data
1537 	 */
1538 	*(caddr_t *)valuep = buffer;
1539 	*lengthp = propp->prop_len;
1540 	bcopy(propp->prop_val, buffer, propp->prop_len);
1541 
1542 	return (DDI_PROP_SUCCESS);
1543 }
1544 
1545 /*
1546  * ddi_prop_search_common:	Lookup and return the encoded value
1547  */
1548 int
1549 ddi_prop_search_common(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1550     uint_t flags, char *name, void *valuep, uint_t *lengthp)
1551 {
1552 	ddi_prop_t	*propp;
1553 	int		i;
1554 	caddr_t		buffer;
1555 	caddr_t		prealloc = NULL;
1556 	int		plength = 0;
1557 	dev_info_t	*pdip;
1558 	int		(*bop)();
1559 
1560 	/*CONSTANTCONDITION*/
1561 	while (1)  {
1562 
1563 		mutex_enter(&(DEVI(dip)->devi_lock));
1564 
1565 
1566 		/*
1567 		 * find the property in child's devinfo:
1568 		 * Search order is:
1569 		 *	1. driver defined properties
1570 		 *	2. system defined properties
1571 		 *	3. driver global properties
1572 		 *	4. boot defined properties
1573 		 */
1574 
1575 		propp = i_ddi_prop_search(dev, name, flags,
1576 		    &(DEVI(dip)->devi_drv_prop_ptr));
1577 		if (propp == NULL)  {
1578 			propp = i_ddi_prop_search(dev, name, flags,
1579 			    &(DEVI(dip)->devi_sys_prop_ptr));
1580 		}
1581 		if ((propp == NULL) && DEVI(dip)->devi_global_prop_list) {
1582 			propp = i_ddi_prop_search(dev, name, flags,
1583 			    &DEVI(dip)->devi_global_prop_list->prop_list);
1584 		}
1585 
1586 		if (propp == NULL)  {
1587 			propp = i_ddi_prop_search(dev, name, flags,
1588 			    &(DEVI(dip)->devi_hw_prop_ptr));
1589 		}
1590 
1591 		/*
1592 		 * Software property found?
1593 		 */
1594 		if (propp != (ddi_prop_t *)0)	{
1595 
1596 			/*
1597 			 * If explicit undefine, return now.
1598 			 */
1599 			if (propp->prop_flags & DDI_PROP_UNDEF_IT) {
1600 				mutex_exit(&(DEVI(dip)->devi_lock));
1601 				if (prealloc)
1602 					kmem_free(prealloc, plength);
1603 				return (DDI_PROP_UNDEFINED);
1604 			}
1605 
1606 			/*
1607 			 * If we only want to know if it exists, return now
1608 			 */
1609 			if (prop_op == PROP_EXISTS) {
1610 				mutex_exit(&(DEVI(dip)->devi_lock));
1611 				ASSERT(prealloc == NULL);
1612 				return (DDI_PROP_SUCCESS);
1613 			}
1614 
1615 			/*
1616 			 * If length only request or prop length == 0,
1617 			 * service request and return now.
1618 			 */
1619 			if ((prop_op == PROP_LEN) ||(propp->prop_len == 0)) {
1620 				*lengthp = propp->prop_len;
1621 
1622 				/*
1623 				 * if prop_op is PROP_LEN_AND_VAL_ALLOC
1624 				 * that means prop_len is 0, so set valuep
1625 				 * also to NULL
1626 				 */
1627 				if (prop_op == PROP_LEN_AND_VAL_ALLOC)
1628 					*(caddr_t *)valuep = NULL;
1629 
1630 				mutex_exit(&(DEVI(dip)->devi_lock));
1631 				if (prealloc)
1632 					kmem_free(prealloc, plength);
1633 				return (DDI_PROP_SUCCESS);
1634 			}
1635 
1636 			/*
1637 			 * If LEN_AND_VAL_ALLOC and the request can sleep,
1638 			 * drop the mutex, allocate the buffer, and go
1639 			 * through the loop again.  If we already allocated
1640 			 * the buffer, and the size of the property changed,
1641 			 * keep trying...
1642 			 */
1643 			if ((prop_op == PROP_LEN_AND_VAL_ALLOC) &&
1644 			    (flags & DDI_PROP_CANSLEEP))  {
1645 				if (prealloc && (propp->prop_len != plength)) {
1646 					kmem_free(prealloc, plength);
1647 					prealloc = NULL;
1648 				}
1649 				if (prealloc == NULL)  {
1650 					plength = propp->prop_len;
1651 					mutex_exit(&(DEVI(dip)->devi_lock));
1652 					prealloc = kmem_alloc(plength,
1653 					    KM_SLEEP);
1654 					continue;
1655 				}
1656 			}
1657 
1658 			/*
1659 			 * Allocate buffer, if required.  Either way,
1660 			 * set `buffer' variable.
1661 			 */
1662 			i = *lengthp;			/* Get callers length */
1663 			*lengthp = propp->prop_len;	/* Set callers length */
1664 
1665 			switch (prop_op) {
1666 
1667 			case PROP_LEN_AND_VAL_ALLOC:
1668 
1669 				if (prealloc == NULL) {
1670 					buffer = kmem_alloc(propp->prop_len,
1671 					    KM_NOSLEEP);
1672 				} else {
1673 					buffer = prealloc;
1674 				}
1675 
1676 				if (buffer == NULL)  {
1677 					mutex_exit(&(DEVI(dip)->devi_lock));
1678 					cmn_err(CE_CONT, prop_no_mem_msg, name);
1679 					return (DDI_PROP_NO_MEMORY);
1680 				}
1681 				/* Set callers buf ptr */
1682 				*(caddr_t *)valuep = buffer;
1683 				break;
1684 
1685 			case PROP_LEN_AND_VAL_BUF:
1686 
1687 				if (propp->prop_len > (i)) {
1688 					mutex_exit(&(DEVI(dip)->devi_lock));
1689 					return (DDI_PROP_BUF_TOO_SMALL);
1690 				}
1691 
1692 				buffer = valuep;  /* Get callers buf ptr */
1693 				break;
1694 
1695 			default:
1696 				break;
1697 			}
1698 
1699 			/*
1700 			 * Do the copy.
1701 			 */
1702 			bcopy(propp->prop_val, buffer, propp->prop_len);
1703 			mutex_exit(&(DEVI(dip)->devi_lock));
1704 			return (DDI_PROP_SUCCESS);
1705 		}
1706 
1707 		mutex_exit(&(DEVI(dip)->devi_lock));
1708 		if (prealloc)
1709 			kmem_free(prealloc, plength);
1710 		prealloc = NULL;
1711 
1712 		/*
1713 		 * Prop not found, call parent bus_ops to deal with possible
1714 		 * h/w layer (possible PROM defined props, etc.) and to
1715 		 * possibly ascend the hierarchy, if allowed by flags.
1716 		 */
1717 		pdip = (dev_info_t *)DEVI(dip)->devi_parent;
1718 
1719 		/*
1720 		 * One last call for the root driver PROM props?
1721 		 */
1722 		if (dip == ddi_root_node())  {
1723 			return (ddi_bus_prop_op(dev, dip, dip, prop_op,
1724 			    flags, name, valuep, (int *)lengthp));
1725 		}
1726 
1727 		/*
1728 		 * We may have been called to check for properties
1729 		 * within a single devinfo node that has no parent -
1730 		 * see make_prop()
1731 		 */
1732 		if (pdip == NULL) {
1733 			ASSERT((flags &
1734 			    (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM)) ==
1735 			    (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM));
1736 			return (DDI_PROP_NOT_FOUND);
1737 		}
1738 
1739 		/*
1740 		 * Instead of recursing, we do iterative calls up the tree.
1741 		 * As a bit of optimization, skip the bus_op level if the
1742 		 * node is a s/w node and if the parent's bus_prop_op function
1743 		 * is `ddi_bus_prop_op', because we know that in this case,
1744 		 * this function does nothing.
1745 		 *
1746 		 * 4225415: If the parent isn't attached, or the child
1747 		 * hasn't been named by the parent yet, use the default
1748 		 * ddi_bus_prop_op as a proxy for the parent.  This
1749 		 * allows property lookups in any child/parent state to
1750 		 * include 'prom' and inherited properties, even when
1751 		 * there are no drivers attached to the child or parent.
1752 		 */
1753 
1754 		bop = ddi_bus_prop_op;
1755 		if (i_ddi_devi_attached(pdip) &&
1756 		    (i_ddi_node_state(dip) >= DS_INITIALIZED))
1757 			bop = DEVI(pdip)->devi_ops->devo_bus_ops->bus_prop_op;
1758 
1759 		i = DDI_PROP_NOT_FOUND;
1760 
1761 		if ((bop != ddi_bus_prop_op) || ndi_dev_is_prom_node(dip)) {
1762 			i = (*bop)(dev, pdip, dip, prop_op,
1763 			    flags | DDI_PROP_DONTPASS,
1764 			    name, valuep, lengthp);
1765 		}
1766 
1767 		if ((flags & DDI_PROP_DONTPASS) ||
1768 		    (i != DDI_PROP_NOT_FOUND))
1769 			return (i);
1770 
1771 		dip = pdip;
1772 	}
1773 	/*NOTREACHED*/
1774 }
1775 
1776 
1777 /*
1778  * ddi_prop_op: The basic property operator for drivers.
1779  *
1780  * In ddi_prop_op, the type of valuep is interpreted based on prop_op:
1781  *
1782  *	prop_op			valuep
1783  *	------			------
1784  *
1785  *	PROP_LEN		<unused>
1786  *
1787  *	PROP_LEN_AND_VAL_BUF	Pointer to callers buffer
1788  *
1789  *	PROP_LEN_AND_VAL_ALLOC	Address of callers pointer (will be set to
1790  *				address of allocated buffer, if successful)
1791  */
1792 int
1793 ddi_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
1794     char *name, caddr_t valuep, int *lengthp)
1795 {
1796 	int	i;
1797 
1798 	ASSERT((mod_flags & DDI_PROP_TYPE_MASK) == 0);
1799 
1800 	/*
1801 	 * If this was originally an LDI prop lookup then we bail here.
1802 	 * The reason is that the LDI property lookup interfaces first call
1803 	 * a drivers prop_op() entry point to allow it to override
1804 	 * properties.  But if we've made it here, then the driver hasn't
1805 	 * overriden any properties.  We don't want to continue with the
1806 	 * property search here because we don't have any type inforamtion.
1807 	 * When we return failure, the LDI interfaces will then proceed to
1808 	 * call the typed property interfaces to look up the property.
1809 	 */
1810 	if (mod_flags & DDI_PROP_DYNAMIC)
1811 		return (DDI_PROP_NOT_FOUND);
1812 
1813 	/*
1814 	 * check for pre-typed property consumer asking for typed property:
1815 	 * see e_ddi_getprop_int64.
1816 	 */
1817 	if (mod_flags & DDI_PROP_CONSUMER_TYPED)
1818 		mod_flags |= DDI_PROP_TYPE_INT64;
1819 	mod_flags |= DDI_PROP_TYPE_ANY;
1820 
1821 	i = ddi_prop_search_common(dev, dip, prop_op,
1822 		mod_flags, name, valuep, (uint_t *)lengthp);
1823 	if (i == DDI_PROP_FOUND_1275)
1824 		return (DDI_PROP_SUCCESS);
1825 	return (i);
1826 }
1827 
1828 /*
1829  * ddi_prop_op_nblocks: The basic property operator for drivers that maintain
1830  * size in number of DEV_BSIZE blocks.  Provides a dynamic property
1831  * implementation for size oriented properties based on nblocks64 values passed
1832  * in by the driver.  Fallback to ddi_prop_op if the nblocks64 is too large.
1833  * This interface should not be used with a nblocks64 that represents the
1834  * driver's idea of how to represent unknown, if nblocks is unknown use
1835  * ddi_prop_op.
1836  */
1837 int
1838 ddi_prop_op_nblocks(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1839     int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t nblocks64)
1840 {
1841 	uint64_t size64;
1842 
1843 	/*
1844 	 * There is no point in supporting nblocks64 values that don't have
1845 	 * an accurate uint64_t byte count representation.
1846 	 */
1847 	if (nblocks64 >= (UINT64_MAX >> DEV_BSHIFT))
1848 		return (ddi_prop_op(dev, dip, prop_op, mod_flags,
1849 		    name, valuep, lengthp));
1850 
1851 	size64 = nblocks64 << DEV_BSHIFT;
1852 	return (ddi_prop_op_size(dev, dip, prop_op, mod_flags,
1853 	    name, valuep, lengthp, size64));
1854 }
1855 
1856 /*
1857  * ddi_prop_op_size: The basic property operator for drivers that maintain size
1858  * in bytes. Provides a of dynamic property implementation for size oriented
1859  * properties based on size64 values passed in by the driver.  Fallback to
1860  * ddi_prop_op if the size64 is too large. This interface should not be used
1861  * with a size64 that represents the driver's idea of how to represent unknown,
1862  * if size is unknown use ddi_prop_op.
1863  *
1864  * NOTE: the legacy "nblocks"/"size" properties are treated as 32-bit unsigned
1865  * integers. While the most likely interface to request them ([bc]devi_size)
1866  * is declared int (signed) there is no enforcement of this, which means we
1867  * can't enforce limitations here without risking regression.
1868  */
1869 int
1870 ddi_prop_op_size(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1871     int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t size64)
1872 {
1873 	uint64_t nblocks64;
1874 	int	callers_length;
1875 	caddr_t	buffer;
1876 
1877 	/* compute DEV_BSIZE nblocks value */
1878 	nblocks64 = lbtodb(size64);
1879 
1880 	/* get callers length, establish length of our dynamic properties */
1881 	callers_length = *lengthp;
1882 
1883 	if (strcmp(name, "Nblocks") == 0)
1884 		*lengthp = sizeof (uint64_t);
1885 	else if (strcmp(name, "Size") == 0)
1886 		*lengthp = sizeof (uint64_t);
1887 	else if ((strcmp(name, "nblocks") == 0) && (nblocks64 < UINT_MAX))
1888 		*lengthp = sizeof (uint32_t);
1889 	else if ((strcmp(name, "size") == 0) && (size64 < UINT_MAX))
1890 		*lengthp = sizeof (uint32_t);
1891 	else {
1892 		/* fallback to ddi_prop_op */
1893 		return (ddi_prop_op(dev, dip, prop_op, mod_flags,
1894 		    name, valuep, lengthp));
1895 	}
1896 
1897 	/* service request for the length of the property */
1898 	if (prop_op == PROP_LEN)
1899 		return (DDI_PROP_SUCCESS);
1900 
1901 	/* the length of the property and the request must match */
1902 	if (callers_length != *lengthp)
1903 		return (DDI_PROP_INVAL_ARG);
1904 
1905 	switch (prop_op) {
1906 	case PROP_LEN_AND_VAL_ALLOC:
1907 		if ((buffer = kmem_alloc(*lengthp,
1908 		    (mod_flags & DDI_PROP_CANSLEEP) ?
1909 		    KM_SLEEP : KM_NOSLEEP)) == NULL)
1910 			return (DDI_PROP_NO_MEMORY);
1911 
1912 		*(caddr_t *)valuep = buffer;	/* set callers buf ptr */
1913 		break;
1914 
1915 	case PROP_LEN_AND_VAL_BUF:
1916 		buffer = valuep;		/* get callers buf ptr */
1917 		break;
1918 
1919 	default:
1920 		return (DDI_PROP_INVAL_ARG);
1921 	}
1922 
1923 	/* transfer the value into the buffer */
1924 	if (strcmp(name, "Nblocks") == 0)
1925 		*((uint64_t *)buffer) = nblocks64;
1926 	else if (strcmp(name, "Size") == 0)
1927 		*((uint64_t *)buffer) = size64;
1928 	else if (strcmp(name, "nblocks") == 0)
1929 		*((uint32_t *)buffer) = (uint32_t)nblocks64;
1930 	else if (strcmp(name, "size") == 0)
1931 		*((uint32_t *)buffer) = (uint32_t)size64;
1932 	return (DDI_PROP_SUCCESS);
1933 }
1934 
1935 /*
1936  * Variable length props...
1937  */
1938 
1939 /*
1940  * ddi_getlongprop:	Get variable length property len+val into a buffer
1941  *		allocated by property provider via kmem_alloc. Requester
1942  *		is responsible for freeing returned property via kmem_free.
1943  *
1944  *	Arguments:
1945  *
1946  *	dev_t:	Input:	dev_t of property.
1947  *	dip:	Input:	dev_info_t pointer of child.
1948  *	flags:	Input:	Possible flag modifiers are:
1949  *		DDI_PROP_DONTPASS:	Don't pass to parent if prop not found.
1950  *		DDI_PROP_CANSLEEP:	Memory allocation may sleep.
1951  *	name:	Input:	name of property.
1952  *	valuep:	Output:	Addr of callers buffer pointer.
1953  *	lengthp:Output:	*lengthp will contain prop length on exit.
1954  *
1955  *	Possible Returns:
1956  *
1957  *		DDI_PROP_SUCCESS:	Prop found and returned.
1958  *		DDI_PROP_NOT_FOUND:	Prop not found
1959  *		DDI_PROP_UNDEFINED:	Prop explicitly undefined.
1960  *		DDI_PROP_NO_MEMORY:	Prop found, but unable to alloc mem.
1961  */
1962 
1963 int
1964 ddi_getlongprop(dev_t dev, dev_info_t *dip, int flags,
1965     char *name, caddr_t valuep, int *lengthp)
1966 {
1967 	return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_ALLOC,
1968 	    flags, name, valuep, lengthp));
1969 }
1970 
1971 /*
1972  *
1973  * ddi_getlongprop_buf:		Get long prop into pre-allocated callers
1974  *				buffer. (no memory allocation by provider).
1975  *
1976  *	dev_t:	Input:	dev_t of property.
1977  *	dip:	Input:	dev_info_t pointer of child.
1978  *	flags:	Input:	DDI_PROP_DONTPASS or NULL
1979  *	name:	Input:	name of property
1980  *	valuep:	Input:	ptr to callers buffer.
1981  *	lengthp:I/O:	ptr to length of callers buffer on entry,
1982  *			actual length of property on exit.
1983  *
1984  *	Possible returns:
1985  *
1986  *		DDI_PROP_SUCCESS	Prop found and returned
1987  *		DDI_PROP_NOT_FOUND	Prop not found
1988  *		DDI_PROP_UNDEFINED	Prop explicitly undefined.
1989  *		DDI_PROP_BUF_TOO_SMALL	Prop found, callers buf too small,
1990  *					no value returned, but actual prop
1991  *					length returned in *lengthp
1992  *
1993  */
1994 
1995 int
1996 ddi_getlongprop_buf(dev_t dev, dev_info_t *dip, int flags,
1997     char *name, caddr_t valuep, int *lengthp)
1998 {
1999 	return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
2000 	    flags, name, valuep, lengthp));
2001 }
2002 
2003 /*
2004  * Integer/boolean sized props.
2005  *
2006  * Call is value only... returns found boolean or int sized prop value or
2007  * defvalue if prop not found or is wrong length or is explicitly undefined.
2008  * Only flag is DDI_PROP_DONTPASS...
2009  *
2010  * By convention, this interface returns boolean (0) sized properties
2011  * as value (int)1.
2012  *
2013  * This never returns an error, if property not found or specifically
2014  * undefined, the input `defvalue' is returned.
2015  */
2016 
2017 int
2018 ddi_getprop(dev_t dev, dev_info_t *dip, int flags, char *name, int defvalue)
2019 {
2020 	int	propvalue = defvalue;
2021 	int	proplength = sizeof (int);
2022 	int	error;
2023 
2024 	error = ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
2025 	    flags, name, (caddr_t)&propvalue, &proplength);
2026 
2027 	if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
2028 		propvalue = 1;
2029 
2030 	return (propvalue);
2031 }
2032 
2033 /*
2034  * Get prop length interface: flags are 0 or DDI_PROP_DONTPASS
2035  * if returns DDI_PROP_SUCCESS, length returned in *lengthp.
2036  */
2037 
2038 int
2039 ddi_getproplen(dev_t dev, dev_info_t *dip, int flags, char *name, int *lengthp)
2040 {
2041 	return (ddi_prop_op(dev, dip, PROP_LEN, flags, name, NULL, lengthp));
2042 }
2043 
2044 /*
2045  * Allocate a struct prop_driver_data, along with 'size' bytes
2046  * for decoded property data.  This structure is freed by
2047  * calling ddi_prop_free(9F).
2048  */
2049 static void *
2050 ddi_prop_decode_alloc(size_t size, void (*prop_free)(struct prop_driver_data *))
2051 {
2052 	struct prop_driver_data *pdd;
2053 
2054 	/*
2055 	 * Allocate a structure with enough memory to store the decoded data.
2056 	 */
2057 	pdd = kmem_zalloc(sizeof (struct prop_driver_data) + size, KM_SLEEP);
2058 	pdd->pdd_size = (sizeof (struct prop_driver_data) + size);
2059 	pdd->pdd_prop_free = prop_free;
2060 
2061 	/*
2062 	 * Return a pointer to the location to put the decoded data.
2063 	 */
2064 	return ((void *)((caddr_t)pdd + sizeof (struct prop_driver_data)));
2065 }
2066 
2067 /*
2068  * Allocated the memory needed to store the encoded data in the property
2069  * handle.
2070  */
2071 static int
2072 ddi_prop_encode_alloc(prop_handle_t *ph, size_t size)
2073 {
2074 	/*
2075 	 * If size is zero, then set data to NULL and size to 0.  This
2076 	 * is a boolean property.
2077 	 */
2078 	if (size == 0) {
2079 		ph->ph_size = 0;
2080 		ph->ph_data = NULL;
2081 		ph->ph_cur_pos = NULL;
2082 		ph->ph_save_pos = NULL;
2083 	} else {
2084 		if (ph->ph_flags == DDI_PROP_DONTSLEEP) {
2085 			ph->ph_data = kmem_zalloc(size, KM_NOSLEEP);
2086 			if (ph->ph_data == NULL)
2087 				return (DDI_PROP_NO_MEMORY);
2088 		} else
2089 			ph->ph_data = kmem_zalloc(size, KM_SLEEP);
2090 		ph->ph_size = size;
2091 		ph->ph_cur_pos = ph->ph_data;
2092 		ph->ph_save_pos = ph->ph_data;
2093 	}
2094 	return (DDI_PROP_SUCCESS);
2095 }
2096 
2097 /*
2098  * Free the space allocated by the lookup routines.  Each lookup routine
2099  * returns a pointer to the decoded data to the driver.  The driver then
2100  * passes this pointer back to us.  This data actually lives in a struct
2101  * prop_driver_data.  We use negative indexing to find the beginning of
2102  * the structure and then free the entire structure using the size and
2103  * the free routine stored in the structure.
2104  */
2105 void
2106 ddi_prop_free(void *datap)
2107 {
2108 	struct prop_driver_data *pdd;
2109 
2110 	/*
2111 	 * Get the structure
2112 	 */
2113 	pdd = (struct prop_driver_data *)
2114 		((caddr_t)datap - sizeof (struct prop_driver_data));
2115 	/*
2116 	 * Call the free routine to free it
2117 	 */
2118 	(*pdd->pdd_prop_free)(pdd);
2119 }
2120 
2121 /*
2122  * Free the data associated with an array of ints,
2123  * allocated with ddi_prop_decode_alloc().
2124  */
2125 static void
2126 ddi_prop_free_ints(struct prop_driver_data *pdd)
2127 {
2128 	kmem_free(pdd, pdd->pdd_size);
2129 }
2130 
2131 /*
2132  * Free a single string property or a single string contained within
2133  * the argv style return value of an array of strings.
2134  */
2135 static void
2136 ddi_prop_free_string(struct prop_driver_data *pdd)
2137 {
2138 	kmem_free(pdd, pdd->pdd_size);
2139 
2140 }
2141 
2142 /*
2143  * Free an array of strings.
2144  */
2145 static void
2146 ddi_prop_free_strings(struct prop_driver_data *pdd)
2147 {
2148 	kmem_free(pdd, pdd->pdd_size);
2149 }
2150 
2151 /*
2152  * Free the data associated with an array of bytes.
2153  */
2154 static void
2155 ddi_prop_free_bytes(struct prop_driver_data *pdd)
2156 {
2157 	kmem_free(pdd, pdd->pdd_size);
2158 }
2159 
2160 /*
2161  * Reset the current location pointer in the property handle to the
2162  * beginning of the data.
2163  */
2164 void
2165 ddi_prop_reset_pos(prop_handle_t *ph)
2166 {
2167 	ph->ph_cur_pos = ph->ph_data;
2168 	ph->ph_save_pos = ph->ph_data;
2169 }
2170 
2171 /*
2172  * Restore the current location pointer in the property handle to the
2173  * saved position.
2174  */
2175 void
2176 ddi_prop_save_pos(prop_handle_t *ph)
2177 {
2178 	ph->ph_save_pos = ph->ph_cur_pos;
2179 }
2180 
2181 /*
2182  * Save the location that the current location pointer is pointing to..
2183  */
2184 void
2185 ddi_prop_restore_pos(prop_handle_t *ph)
2186 {
2187 	ph->ph_cur_pos = ph->ph_save_pos;
2188 }
2189 
2190 /*
2191  * Property encode/decode functions
2192  */
2193 
2194 /*
2195  * Decode a single integer property
2196  */
2197 static int
2198 ddi_prop_fm_decode_int(prop_handle_t *ph, void *data, uint_t *nelements)
2199 {
2200 	int	i;
2201 	int	tmp;
2202 
2203 	/*
2204 	 * If there is nothing to decode return an error
2205 	 */
2206 	if (ph->ph_size == 0)
2207 		return (DDI_PROP_END_OF_DATA);
2208 
2209 	/*
2210 	 * Decode the property as a single integer and return it
2211 	 * in data if we were able to decode it.
2212 	 */
2213 	i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, &tmp);
2214 	if (i < DDI_PROP_RESULT_OK) {
2215 		switch (i) {
2216 		case DDI_PROP_RESULT_EOF:
2217 			return (DDI_PROP_END_OF_DATA);
2218 
2219 		case DDI_PROP_RESULT_ERROR:
2220 			return (DDI_PROP_CANNOT_DECODE);
2221 		}
2222 	}
2223 
2224 	*(int *)data = tmp;
2225 	*nelements = 1;
2226 	return (DDI_PROP_SUCCESS);
2227 }
2228 
2229 /*
2230  * Decode a single 64 bit integer property
2231  */
2232 static int
2233 ddi_prop_fm_decode_int64(prop_handle_t *ph, void *data, uint_t *nelements)
2234 {
2235 	int	i;
2236 	int64_t	tmp;
2237 
2238 	/*
2239 	 * If there is nothing to decode return an error
2240 	 */
2241 	if (ph->ph_size == 0)
2242 		return (DDI_PROP_END_OF_DATA);
2243 
2244 	/*
2245 	 * Decode the property as a single integer and return it
2246 	 * in data if we were able to decode it.
2247 	 */
2248 	i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, &tmp);
2249 	if (i < DDI_PROP_RESULT_OK) {
2250 		switch (i) {
2251 		case DDI_PROP_RESULT_EOF:
2252 			return (DDI_PROP_END_OF_DATA);
2253 
2254 		case DDI_PROP_RESULT_ERROR:
2255 			return (DDI_PROP_CANNOT_DECODE);
2256 		}
2257 	}
2258 
2259 	*(int64_t *)data = tmp;
2260 	*nelements = 1;
2261 	return (DDI_PROP_SUCCESS);
2262 }
2263 
2264 /*
2265  * Decode an array of integers property
2266  */
2267 static int
2268 ddi_prop_fm_decode_ints(prop_handle_t *ph, void *data, uint_t *nelements)
2269 {
2270 	int	i;
2271 	int	cnt = 0;
2272 	int	*tmp;
2273 	int	*intp;
2274 	int	n;
2275 
2276 	/*
2277 	 * Figure out how many array elements there are by going through the
2278 	 * data without decoding it first and counting.
2279 	 */
2280 	for (;;) {
2281 		i = DDI_PROP_INT(ph, DDI_PROP_CMD_SKIP, NULL);
2282 		if (i < 0)
2283 			break;
2284 		cnt++;
2285 	}
2286 
2287 	/*
2288 	 * If there are no elements return an error
2289 	 */
2290 	if (cnt == 0)
2291 		return (DDI_PROP_END_OF_DATA);
2292 
2293 	/*
2294 	 * If we cannot skip through the data, we cannot decode it
2295 	 */
2296 	if (i == DDI_PROP_RESULT_ERROR)
2297 		return (DDI_PROP_CANNOT_DECODE);
2298 
2299 	/*
2300 	 * Reset the data pointer to the beginning of the encoded data
2301 	 */
2302 	ddi_prop_reset_pos(ph);
2303 
2304 	/*
2305 	 * Allocated memory to store the decoded value in.
2306 	 */
2307 	intp = ddi_prop_decode_alloc((cnt * sizeof (int)),
2308 		ddi_prop_free_ints);
2309 
2310 	/*
2311 	 * Decode each element and place it in the space we just allocated
2312 	 */
2313 	tmp = intp;
2314 	for (n = 0; n < cnt; n++, tmp++) {
2315 		i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, tmp);
2316 		if (i < DDI_PROP_RESULT_OK) {
2317 			/*
2318 			 * Free the space we just allocated
2319 			 * and return an error.
2320 			 */
2321 			ddi_prop_free(intp);
2322 			switch (i) {
2323 			case DDI_PROP_RESULT_EOF:
2324 				return (DDI_PROP_END_OF_DATA);
2325 
2326 			case DDI_PROP_RESULT_ERROR:
2327 				return (DDI_PROP_CANNOT_DECODE);
2328 			}
2329 		}
2330 	}
2331 
2332 	*nelements = cnt;
2333 	*(int **)data = intp;
2334 
2335 	return (DDI_PROP_SUCCESS);
2336 }
2337 
2338 /*
2339  * Decode a 64 bit integer array property
2340  */
2341 static int
2342 ddi_prop_fm_decode_int64_array(prop_handle_t *ph, void *data, uint_t *nelements)
2343 {
2344 	int	i;
2345 	int	n;
2346 	int	cnt = 0;
2347 	int64_t	*tmp;
2348 	int64_t	*intp;
2349 
2350 	/*
2351 	 * Count the number of array elements by going
2352 	 * through the data without decoding it.
2353 	 */
2354 	for (;;) {
2355 		i = DDI_PROP_INT64(ph, DDI_PROP_CMD_SKIP, NULL);
2356 		if (i < 0)
2357 			break;
2358 		cnt++;
2359 	}
2360 
2361 	/*
2362 	 * If there are no elements return an error
2363 	 */
2364 	if (cnt == 0)
2365 		return (DDI_PROP_END_OF_DATA);
2366 
2367 	/*
2368 	 * If we cannot skip through the data, we cannot decode it
2369 	 */
2370 	if (i == DDI_PROP_RESULT_ERROR)
2371 		return (DDI_PROP_CANNOT_DECODE);
2372 
2373 	/*
2374 	 * Reset the data pointer to the beginning of the encoded data
2375 	 */
2376 	ddi_prop_reset_pos(ph);
2377 
2378 	/*
2379 	 * Allocate memory to store the decoded value.
2380 	 */
2381 	intp = ddi_prop_decode_alloc((cnt * sizeof (int64_t)),
2382 		ddi_prop_free_ints);
2383 
2384 	/*
2385 	 * Decode each element and place it in the space allocated
2386 	 */
2387 	tmp = intp;
2388 	for (n = 0; n < cnt; n++, tmp++) {
2389 		i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, tmp);
2390 		if (i < DDI_PROP_RESULT_OK) {
2391 			/*
2392 			 * Free the space we just allocated
2393 			 * and return an error.
2394 			 */
2395 			ddi_prop_free(intp);
2396 			switch (i) {
2397 			case DDI_PROP_RESULT_EOF:
2398 				return (DDI_PROP_END_OF_DATA);
2399 
2400 			case DDI_PROP_RESULT_ERROR:
2401 				return (DDI_PROP_CANNOT_DECODE);
2402 			}
2403 		}
2404 	}
2405 
2406 	*nelements = cnt;
2407 	*(int64_t **)data = intp;
2408 
2409 	return (DDI_PROP_SUCCESS);
2410 }
2411 
2412 /*
2413  * Encode an array of integers property (Can be one element)
2414  */
2415 int
2416 ddi_prop_fm_encode_ints(prop_handle_t *ph, void *data, uint_t nelements)
2417 {
2418 	int	i;
2419 	int	*tmp;
2420 	int	cnt;
2421 	int	size;
2422 
2423 	/*
2424 	 * If there is no data, we cannot do anything
2425 	 */
2426 	if (nelements == 0)
2427 		return (DDI_PROP_CANNOT_ENCODE);
2428 
2429 	/*
2430 	 * Get the size of an encoded int.
2431 	 */
2432 	size = DDI_PROP_INT(ph, DDI_PROP_CMD_GET_ESIZE, NULL);
2433 
2434 	if (size < DDI_PROP_RESULT_OK) {
2435 		switch (size) {
2436 		case DDI_PROP_RESULT_EOF:
2437 			return (DDI_PROP_END_OF_DATA);
2438 
2439 		case DDI_PROP_RESULT_ERROR:
2440 			return (DDI_PROP_CANNOT_ENCODE);
2441 		}
2442 	}
2443 
2444 	/*
2445 	 * Allocate space in the handle to store the encoded int.
2446 	 */
2447 	if (ddi_prop_encode_alloc(ph, size * nelements) !=
2448 		DDI_PROP_SUCCESS)
2449 		return (DDI_PROP_NO_MEMORY);
2450 
2451 	/*
2452 	 * Encode the array of ints.
2453 	 */
2454 	tmp = (int *)data;
2455 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2456 		i = DDI_PROP_INT(ph, DDI_PROP_CMD_ENCODE, tmp);
2457 		if (i < DDI_PROP_RESULT_OK) {
2458 			switch (i) {
2459 			case DDI_PROP_RESULT_EOF:
2460 				return (DDI_PROP_END_OF_DATA);
2461 
2462 			case DDI_PROP_RESULT_ERROR:
2463 				return (DDI_PROP_CANNOT_ENCODE);
2464 			}
2465 		}
2466 	}
2467 
2468 	return (DDI_PROP_SUCCESS);
2469 }
2470 
2471 
2472 /*
2473  * Encode a 64 bit integer array property
2474  */
2475 int
2476 ddi_prop_fm_encode_int64(prop_handle_t *ph, void *data, uint_t nelements)
2477 {
2478 	int i;
2479 	int cnt;
2480 	int size;
2481 	int64_t *tmp;
2482 
2483 	/*
2484 	 * If there is no data, we cannot do anything
2485 	 */
2486 	if (nelements == 0)
2487 		return (DDI_PROP_CANNOT_ENCODE);
2488 
2489 	/*
2490 	 * Get the size of an encoded 64 bit int.
2491 	 */
2492 	size = DDI_PROP_INT64(ph, DDI_PROP_CMD_GET_ESIZE, NULL);
2493 
2494 	if (size < DDI_PROP_RESULT_OK) {
2495 		switch (size) {
2496 		case DDI_PROP_RESULT_EOF:
2497 			return (DDI_PROP_END_OF_DATA);
2498 
2499 		case DDI_PROP_RESULT_ERROR:
2500 			return (DDI_PROP_CANNOT_ENCODE);
2501 		}
2502 	}
2503 
2504 	/*
2505 	 * Allocate space in the handle to store the encoded int.
2506 	 */
2507 	if (ddi_prop_encode_alloc(ph, size * nelements) !=
2508 	    DDI_PROP_SUCCESS)
2509 		return (DDI_PROP_NO_MEMORY);
2510 
2511 	/*
2512 	 * Encode the array of ints.
2513 	 */
2514 	tmp = (int64_t *)data;
2515 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2516 		i = DDI_PROP_INT64(ph, DDI_PROP_CMD_ENCODE, tmp);
2517 		if (i < DDI_PROP_RESULT_OK) {
2518 			switch (i) {
2519 			case DDI_PROP_RESULT_EOF:
2520 				return (DDI_PROP_END_OF_DATA);
2521 
2522 			case DDI_PROP_RESULT_ERROR:
2523 				return (DDI_PROP_CANNOT_ENCODE);
2524 			}
2525 		}
2526 	}
2527 
2528 	return (DDI_PROP_SUCCESS);
2529 }
2530 
2531 /*
2532  * Decode a single string property
2533  */
2534 static int
2535 ddi_prop_fm_decode_string(prop_handle_t *ph, void *data, uint_t *nelements)
2536 {
2537 	char		*tmp;
2538 	char		*str;
2539 	int		i;
2540 	int		size;
2541 
2542 	/*
2543 	 * If there is nothing to decode return an error
2544 	 */
2545 	if (ph->ph_size == 0)
2546 		return (DDI_PROP_END_OF_DATA);
2547 
2548 	/*
2549 	 * Get the decoded size of the encoded string.
2550 	 */
2551 	size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2552 	if (size < DDI_PROP_RESULT_OK) {
2553 		switch (size) {
2554 		case DDI_PROP_RESULT_EOF:
2555 			return (DDI_PROP_END_OF_DATA);
2556 
2557 		case DDI_PROP_RESULT_ERROR:
2558 			return (DDI_PROP_CANNOT_DECODE);
2559 		}
2560 	}
2561 
2562 	/*
2563 	 * Allocated memory to store the decoded value in.
2564 	 */
2565 	str = ddi_prop_decode_alloc((size_t)size, ddi_prop_free_string);
2566 
2567 	ddi_prop_reset_pos(ph);
2568 
2569 	/*
2570 	 * Decode the str and place it in the space we just allocated
2571 	 */
2572 	tmp = str;
2573 	i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, tmp);
2574 	if (i < DDI_PROP_RESULT_OK) {
2575 		/*
2576 		 * Free the space we just allocated
2577 		 * and return an error.
2578 		 */
2579 		ddi_prop_free(str);
2580 		switch (i) {
2581 		case DDI_PROP_RESULT_EOF:
2582 			return (DDI_PROP_END_OF_DATA);
2583 
2584 		case DDI_PROP_RESULT_ERROR:
2585 			return (DDI_PROP_CANNOT_DECODE);
2586 		}
2587 	}
2588 
2589 	*(char **)data = str;
2590 	*nelements = 1;
2591 
2592 	return (DDI_PROP_SUCCESS);
2593 }
2594 
2595 /*
2596  * Decode an array of strings.
2597  */
2598 int
2599 ddi_prop_fm_decode_strings(prop_handle_t *ph, void *data, uint_t *nelements)
2600 {
2601 	int		cnt = 0;
2602 	char		**strs;
2603 	char		**tmp;
2604 	char		*ptr;
2605 	int		i;
2606 	int		n;
2607 	int		size;
2608 	size_t		nbytes;
2609 
2610 	/*
2611 	 * Figure out how many array elements there are by going through the
2612 	 * data without decoding it first and counting.
2613 	 */
2614 	for (;;) {
2615 		i = DDI_PROP_STR(ph, DDI_PROP_CMD_SKIP, NULL);
2616 		if (i < 0)
2617 			break;
2618 		cnt++;
2619 	}
2620 
2621 	/*
2622 	 * If there are no elements return an error
2623 	 */
2624 	if (cnt == 0)
2625 		return (DDI_PROP_END_OF_DATA);
2626 
2627 	/*
2628 	 * If we cannot skip through the data, we cannot decode it
2629 	 */
2630 	if (i == DDI_PROP_RESULT_ERROR)
2631 		return (DDI_PROP_CANNOT_DECODE);
2632 
2633 	/*
2634 	 * Reset the data pointer to the beginning of the encoded data
2635 	 */
2636 	ddi_prop_reset_pos(ph);
2637 
2638 	/*
2639 	 * Figure out how much memory we need for the sum total
2640 	 */
2641 	nbytes = (cnt + 1) * sizeof (char *);
2642 
2643 	for (n = 0; n < cnt; n++) {
2644 		/*
2645 		 * Get the decoded size of the current encoded string.
2646 		 */
2647 		size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2648 		if (size < DDI_PROP_RESULT_OK) {
2649 			switch (size) {
2650 			case DDI_PROP_RESULT_EOF:
2651 				return (DDI_PROP_END_OF_DATA);
2652 
2653 			case DDI_PROP_RESULT_ERROR:
2654 				return (DDI_PROP_CANNOT_DECODE);
2655 			}
2656 		}
2657 
2658 		nbytes += size;
2659 	}
2660 
2661 	/*
2662 	 * Allocate memory in which to store the decoded strings.
2663 	 */
2664 	strs = ddi_prop_decode_alloc(nbytes, ddi_prop_free_strings);
2665 
2666 	/*
2667 	 * Set up pointers for each string by figuring out yet
2668 	 * again how long each string is.
2669 	 */
2670 	ddi_prop_reset_pos(ph);
2671 	ptr = (caddr_t)strs + ((cnt + 1) * sizeof (char *));
2672 	for (tmp = strs, n = 0; n < cnt; n++, tmp++) {
2673 		/*
2674 		 * Get the decoded size of the current encoded string.
2675 		 */
2676 		size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2677 		if (size < DDI_PROP_RESULT_OK) {
2678 			ddi_prop_free(strs);
2679 			switch (size) {
2680 			case DDI_PROP_RESULT_EOF:
2681 				return (DDI_PROP_END_OF_DATA);
2682 
2683 			case DDI_PROP_RESULT_ERROR:
2684 				return (DDI_PROP_CANNOT_DECODE);
2685 			}
2686 		}
2687 
2688 		*tmp = ptr;
2689 		ptr += size;
2690 	}
2691 
2692 	/*
2693 	 * String array is terminated by a NULL
2694 	 */
2695 	*tmp = NULL;
2696 
2697 	/*
2698 	 * Finally, we can decode each string
2699 	 */
2700 	ddi_prop_reset_pos(ph);
2701 	for (tmp = strs, n = 0; n < cnt; n++, tmp++) {
2702 		i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, *tmp);
2703 		if (i < DDI_PROP_RESULT_OK) {
2704 			/*
2705 			 * Free the space we just allocated
2706 			 * and return an error
2707 			 */
2708 			ddi_prop_free(strs);
2709 			switch (i) {
2710 			case DDI_PROP_RESULT_EOF:
2711 				return (DDI_PROP_END_OF_DATA);
2712 
2713 			case DDI_PROP_RESULT_ERROR:
2714 				return (DDI_PROP_CANNOT_DECODE);
2715 			}
2716 		}
2717 	}
2718 
2719 	*(char ***)data = strs;
2720 	*nelements = cnt;
2721 
2722 	return (DDI_PROP_SUCCESS);
2723 }
2724 
2725 /*
2726  * Encode a string.
2727  */
2728 int
2729 ddi_prop_fm_encode_string(prop_handle_t *ph, void *data, uint_t nelements)
2730 {
2731 	char		**tmp;
2732 	int		size;
2733 	int		i;
2734 
2735 	/*
2736 	 * If there is no data, we cannot do anything
2737 	 */
2738 	if (nelements == 0)
2739 		return (DDI_PROP_CANNOT_ENCODE);
2740 
2741 	/*
2742 	 * Get the size of the encoded string.
2743 	 */
2744 	tmp = (char **)data;
2745 	size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp);
2746 	if (size < DDI_PROP_RESULT_OK) {
2747 		switch (size) {
2748 		case DDI_PROP_RESULT_EOF:
2749 			return (DDI_PROP_END_OF_DATA);
2750 
2751 		case DDI_PROP_RESULT_ERROR:
2752 			return (DDI_PROP_CANNOT_ENCODE);
2753 		}
2754 	}
2755 
2756 	/*
2757 	 * Allocate space in the handle to store the encoded string.
2758 	 */
2759 	if (ddi_prop_encode_alloc(ph, size) != DDI_PROP_SUCCESS)
2760 		return (DDI_PROP_NO_MEMORY);
2761 
2762 	ddi_prop_reset_pos(ph);
2763 
2764 	/*
2765 	 * Encode the string.
2766 	 */
2767 	tmp = (char **)data;
2768 	i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp);
2769 	if (i < DDI_PROP_RESULT_OK) {
2770 		switch (i) {
2771 		case DDI_PROP_RESULT_EOF:
2772 			return (DDI_PROP_END_OF_DATA);
2773 
2774 		case DDI_PROP_RESULT_ERROR:
2775 			return (DDI_PROP_CANNOT_ENCODE);
2776 		}
2777 	}
2778 
2779 	return (DDI_PROP_SUCCESS);
2780 }
2781 
2782 
2783 /*
2784  * Encode an array of strings.
2785  */
2786 int
2787 ddi_prop_fm_encode_strings(prop_handle_t *ph, void *data, uint_t nelements)
2788 {
2789 	int		cnt = 0;
2790 	char		**tmp;
2791 	int		size;
2792 	uint_t		total_size;
2793 	int		i;
2794 
2795 	/*
2796 	 * If there is no data, we cannot do anything
2797 	 */
2798 	if (nelements == 0)
2799 		return (DDI_PROP_CANNOT_ENCODE);
2800 
2801 	/*
2802 	 * Get the total size required to encode all the strings.
2803 	 */
2804 	total_size = 0;
2805 	tmp = (char **)data;
2806 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2807 		size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp);
2808 		if (size < DDI_PROP_RESULT_OK) {
2809 			switch (size) {
2810 			case DDI_PROP_RESULT_EOF:
2811 				return (DDI_PROP_END_OF_DATA);
2812 
2813 			case DDI_PROP_RESULT_ERROR:
2814 				return (DDI_PROP_CANNOT_ENCODE);
2815 			}
2816 		}
2817 		total_size += (uint_t)size;
2818 	}
2819 
2820 	/*
2821 	 * Allocate space in the handle to store the encoded strings.
2822 	 */
2823 	if (ddi_prop_encode_alloc(ph, total_size) != DDI_PROP_SUCCESS)
2824 		return (DDI_PROP_NO_MEMORY);
2825 
2826 	ddi_prop_reset_pos(ph);
2827 
2828 	/*
2829 	 * Encode the array of strings.
2830 	 */
2831 	tmp = (char **)data;
2832 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2833 		i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp);
2834 		if (i < DDI_PROP_RESULT_OK) {
2835 			switch (i) {
2836 			case DDI_PROP_RESULT_EOF:
2837 				return (DDI_PROP_END_OF_DATA);
2838 
2839 			case DDI_PROP_RESULT_ERROR:
2840 				return (DDI_PROP_CANNOT_ENCODE);
2841 			}
2842 		}
2843 	}
2844 
2845 	return (DDI_PROP_SUCCESS);
2846 }
2847 
2848 
2849 /*
2850  * Decode an array of bytes.
2851  */
2852 static int
2853 ddi_prop_fm_decode_bytes(prop_handle_t *ph, void *data, uint_t *nelements)
2854 {
2855 	uchar_t		*tmp;
2856 	int		nbytes;
2857 	int		i;
2858 
2859 	/*
2860 	 * If there are no elements return an error
2861 	 */
2862 	if (ph->ph_size == 0)
2863 		return (DDI_PROP_END_OF_DATA);
2864 
2865 	/*
2866 	 * Get the size of the encoded array of bytes.
2867 	 */
2868 	nbytes = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_DSIZE,
2869 		data, ph->ph_size);
2870 	if (nbytes < DDI_PROP_RESULT_OK) {
2871 		switch (nbytes) {
2872 		case DDI_PROP_RESULT_EOF:
2873 			return (DDI_PROP_END_OF_DATA);
2874 
2875 		case DDI_PROP_RESULT_ERROR:
2876 			return (DDI_PROP_CANNOT_DECODE);
2877 		}
2878 	}
2879 
2880 	/*
2881 	 * Allocated memory to store the decoded value in.
2882 	 */
2883 	tmp = ddi_prop_decode_alloc(nbytes, ddi_prop_free_bytes);
2884 
2885 	/*
2886 	 * Decode each element and place it in the space we just allocated
2887 	 */
2888 	i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_DECODE, tmp, nbytes);
2889 	if (i < DDI_PROP_RESULT_OK) {
2890 		/*
2891 		 * Free the space we just allocated
2892 		 * and return an error
2893 		 */
2894 		ddi_prop_free(tmp);
2895 		switch (i) {
2896 		case DDI_PROP_RESULT_EOF:
2897 			return (DDI_PROP_END_OF_DATA);
2898 
2899 		case DDI_PROP_RESULT_ERROR:
2900 			return (DDI_PROP_CANNOT_DECODE);
2901 		}
2902 	}
2903 
2904 	*(uchar_t **)data = tmp;
2905 	*nelements = nbytes;
2906 
2907 	return (DDI_PROP_SUCCESS);
2908 }
2909 
2910 /*
2911  * Encode an array of bytes.
2912  */
2913 int
2914 ddi_prop_fm_encode_bytes(prop_handle_t *ph, void *data, uint_t nelements)
2915 {
2916 	int		size;
2917 	int		i;
2918 
2919 	/*
2920 	 * If there are no elements, then this is a boolean property,
2921 	 * so just create a property handle with no data and return.
2922 	 */
2923 	if (nelements == 0) {
2924 		(void) ddi_prop_encode_alloc(ph, 0);
2925 		return (DDI_PROP_SUCCESS);
2926 	}
2927 
2928 	/*
2929 	 * Get the size of the encoded array of bytes.
2930 	 */
2931 	size = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_ESIZE, (uchar_t *)data,
2932 		nelements);
2933 	if (size < DDI_PROP_RESULT_OK) {
2934 		switch (size) {
2935 		case DDI_PROP_RESULT_EOF:
2936 			return (DDI_PROP_END_OF_DATA);
2937 
2938 		case DDI_PROP_RESULT_ERROR:
2939 			return (DDI_PROP_CANNOT_DECODE);
2940 		}
2941 	}
2942 
2943 	/*
2944 	 * Allocate space in the handle to store the encoded bytes.
2945 	 */
2946 	if (ddi_prop_encode_alloc(ph, (uint_t)size) != DDI_PROP_SUCCESS)
2947 		return (DDI_PROP_NO_MEMORY);
2948 
2949 	/*
2950 	 * Encode the array of bytes.
2951 	 */
2952 	i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_ENCODE, (uchar_t *)data,
2953 		nelements);
2954 	if (i < DDI_PROP_RESULT_OK) {
2955 		switch (i) {
2956 		case DDI_PROP_RESULT_EOF:
2957 			return (DDI_PROP_END_OF_DATA);
2958 
2959 		case DDI_PROP_RESULT_ERROR:
2960 			return (DDI_PROP_CANNOT_ENCODE);
2961 		}
2962 	}
2963 
2964 	return (DDI_PROP_SUCCESS);
2965 }
2966 
2967 /*
2968  * OBP 1275 integer, string and byte operators.
2969  *
2970  * DDI_PROP_CMD_DECODE:
2971  *
2972  *	DDI_PROP_RESULT_ERROR:		cannot decode the data
2973  *	DDI_PROP_RESULT_EOF:		end of data
2974  *	DDI_PROP_OK:			data was decoded
2975  *
2976  * DDI_PROP_CMD_ENCODE:
2977  *
2978  *	DDI_PROP_RESULT_ERROR:		cannot encode the data
2979  *	DDI_PROP_RESULT_EOF:		end of data
2980  *	DDI_PROP_OK:			data was encoded
2981  *
2982  * DDI_PROP_CMD_SKIP:
2983  *
2984  *	DDI_PROP_RESULT_ERROR:		cannot skip the data
2985  *	DDI_PROP_RESULT_EOF:		end of data
2986  *	DDI_PROP_OK:			data was skipped
2987  *
2988  * DDI_PROP_CMD_GET_ESIZE:
2989  *
2990  *	DDI_PROP_RESULT_ERROR:		cannot get encoded size
2991  *	DDI_PROP_RESULT_EOF:		end of data
2992  *	> 0:				the encoded size
2993  *
2994  * DDI_PROP_CMD_GET_DSIZE:
2995  *
2996  *	DDI_PROP_RESULT_ERROR:		cannot get decoded size
2997  *	DDI_PROP_RESULT_EOF:		end of data
2998  *	> 0:				the decoded size
2999  */
3000 
3001 /*
3002  * OBP 1275 integer operator
3003  *
3004  * OBP properties are a byte stream of data, so integers may not be
3005  * properly aligned.  Therefore we need to copy them one byte at a time.
3006  */
3007 int
3008 ddi_prop_1275_int(prop_handle_t *ph, uint_t cmd, int *data)
3009 {
3010 	int	i;
3011 
3012 	switch (cmd) {
3013 	case DDI_PROP_CMD_DECODE:
3014 		/*
3015 		 * Check that there is encoded data
3016 		 */
3017 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0)
3018 			return (DDI_PROP_RESULT_ERROR);
3019 		if (ph->ph_flags & PH_FROM_PROM) {
3020 			i = MIN(ph->ph_size, PROP_1275_INT_SIZE);
3021 			if ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
3022 				ph->ph_size - i))
3023 				return (DDI_PROP_RESULT_ERROR);
3024 		} else {
3025 			if (ph->ph_size < sizeof (int) ||
3026 			((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
3027 				ph->ph_size - sizeof (int))))
3028 			return (DDI_PROP_RESULT_ERROR);
3029 		}
3030 
3031 		/*
3032 		 * Copy the integer, using the implementation-specific
3033 		 * copy function if the property is coming from the PROM.
3034 		 */
3035 		if (ph->ph_flags & PH_FROM_PROM) {
3036 			*data = impl_ddi_prop_int_from_prom(
3037 				(uchar_t *)ph->ph_cur_pos,
3038 				(ph->ph_size < PROP_1275_INT_SIZE) ?
3039 				ph->ph_size : PROP_1275_INT_SIZE);
3040 		} else {
3041 			bcopy(ph->ph_cur_pos, data, sizeof (int));
3042 		}
3043 
3044 		/*
3045 		 * Move the current location to the start of the next
3046 		 * bit of undecoded data.
3047 		 */
3048 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
3049 			PROP_1275_INT_SIZE;
3050 		return (DDI_PROP_RESULT_OK);
3051 
3052 	case DDI_PROP_CMD_ENCODE:
3053 		/*
3054 		 * Check that there is room to encoded the data
3055 		 */
3056 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3057 			ph->ph_size < PROP_1275_INT_SIZE ||
3058 			((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
3059 				ph->ph_size - sizeof (int))))
3060 			return (DDI_PROP_RESULT_ERROR);
3061 
3062 		/*
3063 		 * Encode the integer into the byte stream one byte at a
3064 		 * time.
3065 		 */
3066 		bcopy(data, ph->ph_cur_pos, sizeof (int));
3067 
3068 		/*
3069 		 * Move the current location to the start of the next bit of
3070 		 * space where we can store encoded data.
3071 		 */
3072 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE;
3073 		return (DDI_PROP_RESULT_OK);
3074 
3075 	case DDI_PROP_CMD_SKIP:
3076 		/*
3077 		 * Check that there is encoded data
3078 		 */
3079 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3080 				ph->ph_size < PROP_1275_INT_SIZE)
3081 			return (DDI_PROP_RESULT_ERROR);
3082 
3083 
3084 		if ((caddr_t)ph->ph_cur_pos ==
3085 				(caddr_t)ph->ph_data + ph->ph_size) {
3086 			return (DDI_PROP_RESULT_EOF);
3087 		} else if ((caddr_t)ph->ph_cur_pos >
3088 				(caddr_t)ph->ph_data + ph->ph_size) {
3089 			return (DDI_PROP_RESULT_EOF);
3090 		}
3091 
3092 		/*
3093 		 * Move the current location to the start of the next bit of
3094 		 * undecoded data.
3095 		 */
3096 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE;
3097 		return (DDI_PROP_RESULT_OK);
3098 
3099 	case DDI_PROP_CMD_GET_ESIZE:
3100 		/*
3101 		 * Return the size of an encoded integer on OBP
3102 		 */
3103 		return (PROP_1275_INT_SIZE);
3104 
3105 	case DDI_PROP_CMD_GET_DSIZE:
3106 		/*
3107 		 * Return the size of a decoded integer on the system.
3108 		 */
3109 		return (sizeof (int));
3110 
3111 	default:
3112 #ifdef DEBUG
3113 		panic("ddi_prop_1275_int: %x impossible", cmd);
3114 		/*NOTREACHED*/
3115 #else
3116 		return (DDI_PROP_RESULT_ERROR);
3117 #endif	/* DEBUG */
3118 	}
3119 }
3120 
3121 /*
3122  * 64 bit integer operator.
3123  *
3124  * This is an extension, defined by Sun, to the 1275 integer
3125  * operator.  This routine handles the encoding/decoding of
3126  * 64 bit integer properties.
3127  */
3128 int
3129 ddi_prop_int64_op(prop_handle_t *ph, uint_t cmd, int64_t *data)
3130 {
3131 
3132 	switch (cmd) {
3133 	case DDI_PROP_CMD_DECODE:
3134 		/*
3135 		 * Check that there is encoded data
3136 		 */
3137 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0)
3138 			return (DDI_PROP_RESULT_ERROR);
3139 		if (ph->ph_flags & PH_FROM_PROM) {
3140 			return (DDI_PROP_RESULT_ERROR);
3141 		} else {
3142 			if (ph->ph_size < sizeof (int64_t) ||
3143 			    ((int64_t *)ph->ph_cur_pos >
3144 			    ((int64_t *)ph->ph_data +
3145 			    ph->ph_size - sizeof (int64_t))))
3146 				return (DDI_PROP_RESULT_ERROR);
3147 		}
3148 		/*
3149 		 * Copy the integer, using the implementation-specific
3150 		 * copy function if the property is coming from the PROM.
3151 		 */
3152 		if (ph->ph_flags & PH_FROM_PROM) {
3153 			return (DDI_PROP_RESULT_ERROR);
3154 		} else {
3155 			bcopy(ph->ph_cur_pos, data, sizeof (int64_t));
3156 		}
3157 
3158 		/*
3159 		 * Move the current location to the start of the next
3160 		 * bit of undecoded data.
3161 		 */
3162 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
3163 		    sizeof (int64_t);
3164 			return (DDI_PROP_RESULT_OK);
3165 
3166 	case DDI_PROP_CMD_ENCODE:
3167 		/*
3168 		 * Check that there is room to encoded the data
3169 		 */
3170 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3171 		    ph->ph_size < sizeof (int64_t) ||
3172 		    ((int64_t *)ph->ph_cur_pos > ((int64_t *)ph->ph_data +
3173 		    ph->ph_size - sizeof (int64_t))))
3174 			return (DDI_PROP_RESULT_ERROR);
3175 
3176 		/*
3177 		 * Encode the integer into the byte stream one byte at a
3178 		 * time.
3179 		 */
3180 		bcopy(data, ph->ph_cur_pos, sizeof (int64_t));
3181 
3182 		/*
3183 		 * Move the current location to the start of the next bit of
3184 		 * space where we can store encoded data.
3185 		 */
3186 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
3187 		    sizeof (int64_t);
3188 		return (DDI_PROP_RESULT_OK);
3189 
3190 	case DDI_PROP_CMD_SKIP:
3191 		/*
3192 		 * Check that there is encoded data
3193 		 */
3194 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3195 		    ph->ph_size < sizeof (int64_t))
3196 			return (DDI_PROP_RESULT_ERROR);
3197 
3198 		if ((caddr_t)ph->ph_cur_pos ==
3199 		    (caddr_t)ph->ph_data + ph->ph_size) {
3200 			return (DDI_PROP_RESULT_EOF);
3201 		} else if ((caddr_t)ph->ph_cur_pos >
3202 		    (caddr_t)ph->ph_data + ph->ph_size) {
3203 			return (DDI_PROP_RESULT_EOF);
3204 		}
3205 
3206 		/*
3207 		 * Move the current location to the start of
3208 		 * the next bit of undecoded data.
3209 		 */
3210 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
3211 		    sizeof (int64_t);
3212 			return (DDI_PROP_RESULT_OK);
3213 
3214 	case DDI_PROP_CMD_GET_ESIZE:
3215 		/*
3216 		 * Return the size of an encoded integer on OBP
3217 		 */
3218 		return (sizeof (int64_t));
3219 
3220 	case DDI_PROP_CMD_GET_DSIZE:
3221 		/*
3222 		 * Return the size of a decoded integer on the system.
3223 		 */
3224 		return (sizeof (int64_t));
3225 
3226 	default:
3227 #ifdef DEBUG
3228 		panic("ddi_prop_int64_op: %x impossible", cmd);
3229 		/*NOTREACHED*/
3230 #else
3231 		return (DDI_PROP_RESULT_ERROR);
3232 #endif  /* DEBUG */
3233 	}
3234 }
3235 
3236 /*
3237  * OBP 1275 string operator.
3238  *
3239  * OBP strings are NULL terminated.
3240  */
3241 int
3242 ddi_prop_1275_string(prop_handle_t *ph, uint_t cmd, char *data)
3243 {
3244 	int	n;
3245 	char	*p;
3246 	char	*end;
3247 
3248 	switch (cmd) {
3249 	case DDI_PROP_CMD_DECODE:
3250 		/*
3251 		 * Check that there is encoded data
3252 		 */
3253 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
3254 			return (DDI_PROP_RESULT_ERROR);
3255 		}
3256 
3257 		/*
3258 		 * Match DDI_PROP_CMD_GET_DSIZE logic for when to stop and
3259 		 * how to NULL terminate result.
3260 		 */
3261 		p = (char *)ph->ph_cur_pos;
3262 		end = (char *)ph->ph_data + ph->ph_size;
3263 		if (p >= end)
3264 			return (DDI_PROP_RESULT_EOF);
3265 
3266 		while (p < end) {
3267 			*data++ = *p;
3268 			if (*p++ == 0) {	/* NULL from OBP */
3269 				ph->ph_cur_pos = p;
3270 				return (DDI_PROP_RESULT_OK);
3271 			}
3272 		}
3273 
3274 		/*
3275 		 * If OBP did not NULL terminate string, which happens
3276 		 * (at least) for 'true'/'false' boolean values, account for
3277 		 * the space and store null termination on decode.
3278 		 */
3279 		ph->ph_cur_pos = p;
3280 		*data = 0;
3281 		return (DDI_PROP_RESULT_OK);
3282 
3283 	case DDI_PROP_CMD_ENCODE:
3284 		/*
3285 		 * Check that there is room to encoded the data
3286 		 */
3287 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
3288 			return (DDI_PROP_RESULT_ERROR);
3289 		}
3290 
3291 		n = strlen(data) + 1;
3292 		if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3293 				ph->ph_size - n)) {
3294 			return (DDI_PROP_RESULT_ERROR);
3295 		}
3296 
3297 		/*
3298 		 * Copy the NULL terminated string
3299 		 */
3300 		bcopy(data, ph->ph_cur_pos, n);
3301 
3302 		/*
3303 		 * Move the current location to the start of the next bit of
3304 		 * space where we can store encoded data.
3305 		 */
3306 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + n;
3307 		return (DDI_PROP_RESULT_OK);
3308 
3309 	case DDI_PROP_CMD_SKIP:
3310 		/*
3311 		 * Check that there is encoded data
3312 		 */
3313 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
3314 			return (DDI_PROP_RESULT_ERROR);
3315 		}
3316 
3317 		/*
3318 		 * Return the string length plus one for the NULL
3319 		 * We know the size of the property, we need to
3320 		 * ensure that the string is properly formatted,
3321 		 * since we may be looking up random OBP data.
3322 		 */
3323 		p = (char *)ph->ph_cur_pos;
3324 		end = (char *)ph->ph_data + ph->ph_size;
3325 		if (p >= end)
3326 			return (DDI_PROP_RESULT_EOF);
3327 
3328 		while (p < end) {
3329 			if (*p++ == 0) {	/* NULL from OBP */
3330 				ph->ph_cur_pos = p;
3331 				return (DDI_PROP_RESULT_OK);
3332 			}
3333 		}
3334 
3335 		/*
3336 		 * Accommodate the fact that OBP does not always NULL
3337 		 * terminate strings.
3338 		 */
3339 		ph->ph_cur_pos = p;
3340 		return (DDI_PROP_RESULT_OK);
3341 
3342 	case DDI_PROP_CMD_GET_ESIZE:
3343 		/*
3344 		 * Return the size of the encoded string on OBP.
3345 		 */
3346 		return (strlen(data) + 1);
3347 
3348 	case DDI_PROP_CMD_GET_DSIZE:
3349 		/*
3350 		 * Return the string length plus one for the NULL.
3351 		 * We know the size of the property, we need to
3352 		 * ensure that the string is properly formatted,
3353 		 * since we may be looking up random OBP data.
3354 		 */
3355 		p = (char *)ph->ph_cur_pos;
3356 		end = (char *)ph->ph_data + ph->ph_size;
3357 		if (p >= end)
3358 			return (DDI_PROP_RESULT_EOF);
3359 
3360 		for (n = 0; p < end; n++) {
3361 			if (*p++ == 0) {	/* NULL from OBP */
3362 				ph->ph_cur_pos = p;
3363 				return (n + 1);
3364 			}
3365 		}
3366 
3367 		/*
3368 		 * If OBP did not NULL terminate string, which happens for
3369 		 * 'true'/'false' boolean values, account for the space
3370 		 * to store null termination here.
3371 		 */
3372 		ph->ph_cur_pos = p;
3373 		return (n + 1);
3374 
3375 	default:
3376 #ifdef DEBUG
3377 		panic("ddi_prop_1275_string: %x impossible", cmd);
3378 		/*NOTREACHED*/
3379 #else
3380 		return (DDI_PROP_RESULT_ERROR);
3381 #endif	/* DEBUG */
3382 	}
3383 }
3384 
3385 /*
3386  * OBP 1275 byte operator
3387  *
3388  * Caller must specify the number of bytes to get.  OBP encodes bytes
3389  * as a byte so there is a 1-to-1 translation.
3390  */
3391 int
3392 ddi_prop_1275_bytes(prop_handle_t *ph, uint_t cmd, uchar_t *data,
3393 	uint_t nelements)
3394 {
3395 	switch (cmd) {
3396 	case DDI_PROP_CMD_DECODE:
3397 		/*
3398 		 * Check that there is encoded data
3399 		 */
3400 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3401 			ph->ph_size < nelements ||
3402 			((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3403 				ph->ph_size - nelements)))
3404 			return (DDI_PROP_RESULT_ERROR);
3405 
3406 		/*
3407 		 * Copy out the bytes
3408 		 */
3409 		bcopy(ph->ph_cur_pos, data, nelements);
3410 
3411 		/*
3412 		 * Move the current location
3413 		 */
3414 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3415 		return (DDI_PROP_RESULT_OK);
3416 
3417 	case DDI_PROP_CMD_ENCODE:
3418 		/*
3419 		 * Check that there is room to encode the data
3420 		 */
3421 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3422 			ph->ph_size < nelements ||
3423 			((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3424 				ph->ph_size - nelements)))
3425 			return (DDI_PROP_RESULT_ERROR);
3426 
3427 		/*
3428 		 * Copy in the bytes
3429 		 */
3430 		bcopy(data, ph->ph_cur_pos, nelements);
3431 
3432 		/*
3433 		 * Move the current location to the start of the next bit of
3434 		 * space where we can store encoded data.
3435 		 */
3436 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3437 		return (DDI_PROP_RESULT_OK);
3438 
3439 	case DDI_PROP_CMD_SKIP:
3440 		/*
3441 		 * Check that there is encoded data
3442 		 */
3443 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3444 				ph->ph_size < nelements)
3445 			return (DDI_PROP_RESULT_ERROR);
3446 
3447 		if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3448 				ph->ph_size - nelements))
3449 			return (DDI_PROP_RESULT_EOF);
3450 
3451 		/*
3452 		 * Move the current location
3453 		 */
3454 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3455 		return (DDI_PROP_RESULT_OK);
3456 
3457 	case DDI_PROP_CMD_GET_ESIZE:
3458 		/*
3459 		 * The size in bytes of the encoded size is the
3460 		 * same as the decoded size provided by the caller.
3461 		 */
3462 		return (nelements);
3463 
3464 	case DDI_PROP_CMD_GET_DSIZE:
3465 		/*
3466 		 * Just return the number of bytes specified by the caller.
3467 		 */
3468 		return (nelements);
3469 
3470 	default:
3471 #ifdef DEBUG
3472 		panic("ddi_prop_1275_bytes: %x impossible", cmd);
3473 		/*NOTREACHED*/
3474 #else
3475 		return (DDI_PROP_RESULT_ERROR);
3476 #endif	/* DEBUG */
3477 	}
3478 }
3479 
3480 /*
3481  * Used for properties that come from the OBP, hardware configuration files,
3482  * or that are created by calls to ddi_prop_update(9F).
3483  */
3484 static struct prop_handle_ops prop_1275_ops = {
3485 	ddi_prop_1275_int,
3486 	ddi_prop_1275_string,
3487 	ddi_prop_1275_bytes,
3488 	ddi_prop_int64_op
3489 };
3490 
3491 
3492 /*
3493  * Interface to create/modify a managed property on child's behalf...
3494  * Flags interpreted are:
3495  *	DDI_PROP_CANSLEEP:	Allow memory allocation to sleep.
3496  *	DDI_PROP_SYSTEM_DEF:	Manipulate system list rather than driver list.
3497  *
3498  * Use same dev_t when modifying or undefining a property.
3499  * Search for properties with DDI_DEV_T_ANY to match first named
3500  * property on the list.
3501  *
3502  * Properties are stored LIFO and subsequently will match the first
3503  * `matching' instance.
3504  */
3505 
3506 /*
3507  * ddi_prop_add:	Add a software defined property
3508  */
3509 
3510 /*
3511  * define to get a new ddi_prop_t.
3512  * km_flags are KM_SLEEP or KM_NOSLEEP.
3513  */
3514 
3515 #define	DDI_NEW_PROP_T(km_flags)	\
3516 	(kmem_zalloc(sizeof (ddi_prop_t), km_flags))
3517 
3518 static int
3519 ddi_prop_add(dev_t dev, dev_info_t *dip, int flags,
3520     char *name, caddr_t value, int length)
3521 {
3522 	ddi_prop_t	*new_propp, *propp;
3523 	ddi_prop_t	**list_head = &(DEVI(dip)->devi_drv_prop_ptr);
3524 	int		km_flags = KM_NOSLEEP;
3525 	int		name_buf_len;
3526 
3527 	/*
3528 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero return error.
3529 	 */
3530 
3531 	if (dev == DDI_DEV_T_ANY || name == (char *)0 || strlen(name) == 0)
3532 		return (DDI_PROP_INVAL_ARG);
3533 
3534 	if (flags & DDI_PROP_CANSLEEP)
3535 		km_flags = KM_SLEEP;
3536 
3537 	if (flags & DDI_PROP_SYSTEM_DEF)
3538 		list_head = &(DEVI(dip)->devi_sys_prop_ptr);
3539 	else if (flags & DDI_PROP_HW_DEF)
3540 		list_head = &(DEVI(dip)->devi_hw_prop_ptr);
3541 
3542 	if ((new_propp = DDI_NEW_PROP_T(km_flags)) == NULL)  {
3543 		cmn_err(CE_CONT, prop_no_mem_msg, name);
3544 		return (DDI_PROP_NO_MEMORY);
3545 	}
3546 
3547 	/*
3548 	 * If dev is major number 0, then we need to do a ddi_name_to_major
3549 	 * to get the real major number for the device.  This needs to be
3550 	 * done because some drivers need to call ddi_prop_create in their
3551 	 * attach routines but they don't have a dev.  By creating the dev
3552 	 * ourself if the major number is 0, drivers will not have to know what
3553 	 * their major number.	They can just create a dev with major number
3554 	 * 0 and pass it in.  For device 0, we will be doing a little extra
3555 	 * work by recreating the same dev that we already have, but its the
3556 	 * price you pay :-).
3557 	 *
3558 	 * This fixes bug #1098060.
3559 	 */
3560 	if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN) {
3561 		new_propp->prop_dev =
3562 		    makedevice(ddi_name_to_major(DEVI(dip)->devi_binding_name),
3563 		    getminor(dev));
3564 	} else
3565 		new_propp->prop_dev = dev;
3566 
3567 	/*
3568 	 * Allocate space for property name and copy it in...
3569 	 */
3570 
3571 	name_buf_len = strlen(name) + 1;
3572 	new_propp->prop_name = kmem_alloc(name_buf_len, km_flags);
3573 	if (new_propp->prop_name == 0)	{
3574 		kmem_free(new_propp, sizeof (ddi_prop_t));
3575 		cmn_err(CE_CONT, prop_no_mem_msg, name);
3576 		return (DDI_PROP_NO_MEMORY);
3577 	}
3578 	bcopy(name, new_propp->prop_name, name_buf_len);
3579 
3580 	/*
3581 	 * Set the property type
3582 	 */
3583 	new_propp->prop_flags = flags & DDI_PROP_TYPE_MASK;
3584 
3585 	/*
3586 	 * Set length and value ONLY if not an explicit property undefine:
3587 	 * NOTE: value and length are zero for explicit undefines.
3588 	 */
3589 
3590 	if (flags & DDI_PROP_UNDEF_IT) {
3591 		new_propp->prop_flags |= DDI_PROP_UNDEF_IT;
3592 	} else {
3593 		if ((new_propp->prop_len = length) != 0) {
3594 			new_propp->prop_val = kmem_alloc(length, km_flags);
3595 			if (new_propp->prop_val == 0)  {
3596 				kmem_free(new_propp->prop_name, name_buf_len);
3597 				kmem_free(new_propp, sizeof (ddi_prop_t));
3598 				cmn_err(CE_CONT, prop_no_mem_msg, name);
3599 				return (DDI_PROP_NO_MEMORY);
3600 			}
3601 			bcopy(value, new_propp->prop_val, length);
3602 		}
3603 	}
3604 
3605 	/*
3606 	 * Link property into beginning of list. (Properties are LIFO order.)
3607 	 */
3608 
3609 	mutex_enter(&(DEVI(dip)->devi_lock));
3610 	propp = *list_head;
3611 	new_propp->prop_next = propp;
3612 	*list_head = new_propp;
3613 	mutex_exit(&(DEVI(dip)->devi_lock));
3614 	return (DDI_PROP_SUCCESS);
3615 }
3616 
3617 
3618 /*
3619  * ddi_prop_change:	Modify a software managed property value
3620  *
3621  *			Set new length and value if found.
3622  *			returns DDI_PROP_INVAL_ARG if dev is DDI_DEV_T_ANY or
3623  *			input name is the NULL string.
3624  *			returns DDI_PROP_NO_MEMORY if unable to allocate memory
3625  *
3626  *			Note: an undef can be modified to be a define,
3627  *			(you can't go the other way.)
3628  */
3629 
3630 static int
3631 ddi_prop_change(dev_t dev, dev_info_t *dip, int flags,
3632     char *name, caddr_t value, int length)
3633 {
3634 	ddi_prop_t	*propp;
3635 	ddi_prop_t	**ppropp;
3636 	caddr_t		p = NULL;
3637 
3638 	if ((dev == DDI_DEV_T_ANY) || (name == NULL) || (strlen(name) == 0))
3639 		return (DDI_PROP_INVAL_ARG);
3640 
3641 	/*
3642 	 * Preallocate buffer, even if we don't need it...
3643 	 */
3644 	if (length != 0)  {
3645 		p = kmem_alloc(length, (flags & DDI_PROP_CANSLEEP) ?
3646 		    KM_SLEEP : KM_NOSLEEP);
3647 		if (p == NULL)	{
3648 			cmn_err(CE_CONT, prop_no_mem_msg, name);
3649 			return (DDI_PROP_NO_MEMORY);
3650 		}
3651 	}
3652 
3653 	/*
3654 	 * If the dev_t value contains DDI_MAJOR_T_UNKNOWN for the major
3655 	 * number, a real dev_t value should be created based upon the dip's
3656 	 * binding driver.  See ddi_prop_add...
3657 	 */
3658 	if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN)
3659 		dev = makedevice(
3660 		    ddi_name_to_major(DEVI(dip)->devi_binding_name),
3661 		    getminor(dev));
3662 
3663 	/*
3664 	 * Check to see if the property exists.  If so we modify it.
3665 	 * Else we create it by calling ddi_prop_add().
3666 	 */
3667 	mutex_enter(&(DEVI(dip)->devi_lock));
3668 	ppropp = &DEVI(dip)->devi_drv_prop_ptr;
3669 	if (flags & DDI_PROP_SYSTEM_DEF)
3670 		ppropp = &DEVI(dip)->devi_sys_prop_ptr;
3671 	else if (flags & DDI_PROP_HW_DEF)
3672 		ppropp = &DEVI(dip)->devi_hw_prop_ptr;
3673 
3674 	if ((propp = i_ddi_prop_search(dev, name, flags, ppropp)) != NULL) {
3675 		/*
3676 		 * Need to reallocate buffer?  If so, do it
3677 		 * carefully (reuse same space if new prop
3678 		 * is same size and non-NULL sized).
3679 		 */
3680 		if (length != 0)
3681 			bcopy(value, p, length);
3682 
3683 		if (propp->prop_len != 0)
3684 			kmem_free(propp->prop_val, propp->prop_len);
3685 
3686 		propp->prop_len = length;
3687 		propp->prop_val = p;
3688 		propp->prop_flags &= ~DDI_PROP_UNDEF_IT;
3689 		mutex_exit(&(DEVI(dip)->devi_lock));
3690 		return (DDI_PROP_SUCCESS);
3691 	}
3692 
3693 	mutex_exit(&(DEVI(dip)->devi_lock));
3694 	if (length != 0)
3695 		kmem_free(p, length);
3696 
3697 	return (ddi_prop_add(dev, dip, flags, name, value, length));
3698 }
3699 
3700 /*
3701  * Common update routine used to update and encode a property.	Creates
3702  * a property handle, calls the property encode routine, figures out if
3703  * the property already exists and updates if it does.	Otherwise it
3704  * creates if it does not exist.
3705  */
3706 int
3707 ddi_prop_update_common(dev_t match_dev, dev_info_t *dip, int flags,
3708     char *name, void *data, uint_t nelements,
3709     int (*prop_create)(prop_handle_t *, void *data, uint_t nelements))
3710 {
3711 	prop_handle_t	ph;
3712 	int		rval;
3713 	uint_t		ourflags;
3714 
3715 	/*
3716 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3717 	 * return error.
3718 	 */
3719 	if (match_dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3720 		return (DDI_PROP_INVAL_ARG);
3721 
3722 	/*
3723 	 * Create the handle
3724 	 */
3725 	ph.ph_data = NULL;
3726 	ph.ph_cur_pos = NULL;
3727 	ph.ph_save_pos = NULL;
3728 	ph.ph_size = 0;
3729 	ph.ph_ops = &prop_1275_ops;
3730 
3731 	/*
3732 	 * ourflags:
3733 	 * For compatibility with the old interfaces.  The old interfaces
3734 	 * didn't sleep by default and slept when the flag was set.  These
3735 	 * interfaces to the opposite.	So the old interfaces now set the
3736 	 * DDI_PROP_DONTSLEEP flag by default which tells us not to sleep.
3737 	 *
3738 	 * ph.ph_flags:
3739 	 * Blocked data or unblocked data allocation
3740 	 * for ph.ph_data in ddi_prop_encode_alloc()
3741 	 */
3742 	if (flags & DDI_PROP_DONTSLEEP) {
3743 		ourflags = flags;
3744 		ph.ph_flags = DDI_PROP_DONTSLEEP;
3745 	} else {
3746 		ourflags = flags | DDI_PROP_CANSLEEP;
3747 		ph.ph_flags = DDI_PROP_CANSLEEP;
3748 	}
3749 
3750 	/*
3751 	 * Encode the data and store it in the property handle by
3752 	 * calling the prop_encode routine.
3753 	 */
3754 	if ((rval = (*prop_create)(&ph, data, nelements)) !=
3755 	    DDI_PROP_SUCCESS) {
3756 		if (rval == DDI_PROP_NO_MEMORY)
3757 			cmn_err(CE_CONT, prop_no_mem_msg, name);
3758 		if (ph.ph_size != 0)
3759 			kmem_free(ph.ph_data, ph.ph_size);
3760 		return (rval);
3761 	}
3762 
3763 	/*
3764 	 * The old interfaces use a stacking approach to creating
3765 	 * properties.	If we are being called from the old interfaces,
3766 	 * the DDI_PROP_STACK_CREATE flag will be set, so we just do a
3767 	 * create without checking.
3768 	 */
3769 	if (flags & DDI_PROP_STACK_CREATE) {
3770 		rval = ddi_prop_add(match_dev, dip,
3771 		    ourflags, name, ph.ph_data, ph.ph_size);
3772 	} else {
3773 		rval = ddi_prop_change(match_dev, dip,
3774 		    ourflags, name, ph.ph_data, ph.ph_size);
3775 	}
3776 
3777 	/*
3778 	 * Free the encoded data allocated in the prop_encode routine.
3779 	 */
3780 	if (ph.ph_size != 0)
3781 		kmem_free(ph.ph_data, ph.ph_size);
3782 
3783 	return (rval);
3784 }
3785 
3786 
3787 /*
3788  * ddi_prop_create:	Define a managed property:
3789  *			See above for details.
3790  */
3791 
3792 int
3793 ddi_prop_create(dev_t dev, dev_info_t *dip, int flag,
3794     char *name, caddr_t value, int length)
3795 {
3796 	if (!(flag & DDI_PROP_CANSLEEP)) {
3797 		flag |= DDI_PROP_DONTSLEEP;
3798 #ifdef DDI_PROP_DEBUG
3799 		if (length != 0)
3800 			cmn_err(CE_NOTE, "!ddi_prop_create: interface obsolete,"
3801 			    "use ddi_prop_update (prop = %s, node = %s%d)",
3802 			    name, ddi_driver_name(dip), ddi_get_instance(dip));
3803 #endif /* DDI_PROP_DEBUG */
3804 	}
3805 	flag &= ~DDI_PROP_SYSTEM_DEF;
3806 	return (ddi_prop_update_common(dev, dip,
3807 	    (flag | DDI_PROP_STACK_CREATE | DDI_PROP_TYPE_ANY), name,
3808 	    value, length, ddi_prop_fm_encode_bytes));
3809 }
3810 
3811 int
3812 e_ddi_prop_create(dev_t dev, dev_info_t *dip, int flag,
3813     char *name, caddr_t value, int length)
3814 {
3815 	if (!(flag & DDI_PROP_CANSLEEP))
3816 		flag |= DDI_PROP_DONTSLEEP;
3817 	return (ddi_prop_update_common(dev, dip,
3818 	    (flag | DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE |
3819 	    DDI_PROP_TYPE_ANY),
3820 	    name, value, length, ddi_prop_fm_encode_bytes));
3821 }
3822 
3823 int
3824 ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag,
3825     char *name, caddr_t value, int length)
3826 {
3827 	ASSERT((flag & DDI_PROP_TYPE_MASK) == 0);
3828 
3829 	/*
3830 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3831 	 * return error.
3832 	 */
3833 	if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3834 		return (DDI_PROP_INVAL_ARG);
3835 
3836 	if (!(flag & DDI_PROP_CANSLEEP))
3837 		flag |= DDI_PROP_DONTSLEEP;
3838 	flag &= ~DDI_PROP_SYSTEM_DEF;
3839 	if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_NOTPROM), name) == 0)
3840 		return (DDI_PROP_NOT_FOUND);
3841 
3842 	return (ddi_prop_update_common(dev, dip,
3843 	    (flag | DDI_PROP_TYPE_BYTE), name,
3844 	    value, length, ddi_prop_fm_encode_bytes));
3845 }
3846 
3847 int
3848 e_ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag,
3849     char *name, caddr_t value, int length)
3850 {
3851 	ASSERT((flag & DDI_PROP_TYPE_MASK) == 0);
3852 
3853 	/*
3854 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3855 	 * return error.
3856 	 */
3857 	if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3858 		return (DDI_PROP_INVAL_ARG);
3859 
3860 	if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_SYSTEM_DEF), name) == 0)
3861 		return (DDI_PROP_NOT_FOUND);
3862 
3863 	if (!(flag & DDI_PROP_CANSLEEP))
3864 		flag |= DDI_PROP_DONTSLEEP;
3865 	return (ddi_prop_update_common(dev, dip,
3866 		(flag | DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE),
3867 		name, value, length, ddi_prop_fm_encode_bytes));
3868 }
3869 
3870 
3871 /*
3872  * Common lookup routine used to lookup and decode a property.
3873  * Creates a property handle, searches for the raw encoded data,
3874  * fills in the handle, and calls the property decode functions
3875  * passed in.
3876  *
3877  * This routine is not static because ddi_bus_prop_op() which lives in
3878  * ddi_impl.c calls it.  No driver should be calling this routine.
3879  */
3880 int
3881 ddi_prop_lookup_common(dev_t match_dev, dev_info_t *dip,
3882     uint_t flags, char *name, void *data, uint_t *nelements,
3883     int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements))
3884 {
3885 	int		rval;
3886 	uint_t		ourflags;
3887 	prop_handle_t	ph;
3888 
3889 	if ((match_dev == DDI_DEV_T_NONE) ||
3890 	    (name == NULL) || (strlen(name) == 0))
3891 		return (DDI_PROP_INVAL_ARG);
3892 
3893 	ourflags = (flags & DDI_PROP_DONTSLEEP) ? flags :
3894 		flags | DDI_PROP_CANSLEEP;
3895 
3896 	/*
3897 	 * Get the encoded data
3898 	 */
3899 	bzero(&ph, sizeof (prop_handle_t));
3900 
3901 	if (flags & DDI_UNBND_DLPI2) {
3902 		/*
3903 		 * For unbound dlpi style-2 devices, index into
3904 		 * the devnames' array and search the global
3905 		 * property list.
3906 		 */
3907 		ourflags &= ~DDI_UNBND_DLPI2;
3908 		rval = i_ddi_prop_search_global(match_dev,
3909 		    ourflags, name, &ph.ph_data, &ph.ph_size);
3910 	} else {
3911 		rval = ddi_prop_search_common(match_dev, dip,
3912 		    PROP_LEN_AND_VAL_ALLOC, ourflags, name,
3913 		    &ph.ph_data, &ph.ph_size);
3914 
3915 	}
3916 
3917 	if (rval != DDI_PROP_SUCCESS && rval != DDI_PROP_FOUND_1275) {
3918 		ASSERT(ph.ph_data == NULL);
3919 		ASSERT(ph.ph_size == 0);
3920 		return (rval);
3921 	}
3922 
3923 	/*
3924 	 * If the encoded data came from a OBP or software
3925 	 * use the 1275 OBP decode/encode routines.
3926 	 */
3927 	ph.ph_cur_pos = ph.ph_data;
3928 	ph.ph_save_pos = ph.ph_data;
3929 	ph.ph_ops = &prop_1275_ops;
3930 	ph.ph_flags = (rval == DDI_PROP_FOUND_1275) ? PH_FROM_PROM : 0;
3931 
3932 	rval = (*prop_decoder)(&ph, data, nelements);
3933 
3934 	/*
3935 	 * Free the encoded data
3936 	 */
3937 	if (ph.ph_size != 0)
3938 		kmem_free(ph.ph_data, ph.ph_size);
3939 
3940 	return (rval);
3941 }
3942 
3943 /*
3944  * Lookup and return an array of composite properties.  The driver must
3945  * provide the decode routine.
3946  */
3947 int
3948 ddi_prop_lookup(dev_t match_dev, dev_info_t *dip,
3949     uint_t flags, char *name, void *data, uint_t *nelements,
3950     int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements))
3951 {
3952 	return (ddi_prop_lookup_common(match_dev, dip,
3953 	    (flags | DDI_PROP_TYPE_COMPOSITE), name,
3954 	    data, nelements, prop_decoder));
3955 }
3956 
3957 /*
3958  * Return 1 if a property exists (no type checking done).
3959  * Return 0 if it does not exist.
3960  */
3961 int
3962 ddi_prop_exists(dev_t match_dev, dev_info_t *dip, uint_t flags, char *name)
3963 {
3964 	int	i;
3965 	uint_t	x = 0;
3966 
3967 	i = ddi_prop_search_common(match_dev, dip, PROP_EXISTS,
3968 		flags | DDI_PROP_TYPE_MASK, name, NULL, &x);
3969 	return (i == DDI_PROP_SUCCESS || i == DDI_PROP_FOUND_1275);
3970 }
3971 
3972 
3973 /*
3974  * Update an array of composite properties.  The driver must
3975  * provide the encode routine.
3976  */
3977 int
3978 ddi_prop_update(dev_t match_dev, dev_info_t *dip,
3979     char *name, void *data, uint_t nelements,
3980     int (*prop_create)(prop_handle_t *, void *data, uint_t nelements))
3981 {
3982 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_COMPOSITE,
3983 	    name, data, nelements, prop_create));
3984 }
3985 
3986 /*
3987  * Get a single integer or boolean property and return it.
3988  * If the property does not exists, or cannot be decoded,
3989  * then return the defvalue passed in.
3990  *
3991  * This routine always succeeds.
3992  */
3993 int
3994 ddi_prop_get_int(dev_t match_dev, dev_info_t *dip, uint_t flags,
3995     char *name, int defvalue)
3996 {
3997 	int	data;
3998 	uint_t	nelements;
3999 	int	rval;
4000 
4001 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4002 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) {
4003 #ifdef DEBUG
4004 		if (dip != NULL) {
4005 			cmn_err(CE_WARN, "ddi_prop_get_int: invalid flag"
4006 			    " 0x%x (prop = %s, node = %s%d)", flags,
4007 			    name, ddi_driver_name(dip), ddi_get_instance(dip));
4008 		}
4009 #endif /* DEBUG */
4010 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4011 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4012 	}
4013 
4014 	if ((rval = ddi_prop_lookup_common(match_dev, dip,
4015 	    (flags | DDI_PROP_TYPE_INT), name, &data, &nelements,
4016 	    ddi_prop_fm_decode_int)) != DDI_PROP_SUCCESS) {
4017 		if (rval == DDI_PROP_END_OF_DATA)
4018 			data = 1;
4019 		else
4020 			data = defvalue;
4021 	}
4022 	return (data);
4023 }
4024 
4025 /*
4026  * Get a single 64 bit integer or boolean property and return it.
4027  * If the property does not exists, or cannot be decoded,
4028  * then return the defvalue passed in.
4029  *
4030  * This routine always succeeds.
4031  */
4032 int64_t
4033 ddi_prop_get_int64(dev_t match_dev, dev_info_t *dip, uint_t flags,
4034     char *name, int64_t defvalue)
4035 {
4036 	int64_t	data;
4037 	uint_t	nelements;
4038 	int	rval;
4039 
4040 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4041 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) {
4042 #ifdef DEBUG
4043 		if (dip != NULL) {
4044 			cmn_err(CE_WARN, "ddi_prop_get_int64: invalid flag"
4045 			    " 0x%x (prop = %s, node = %s%d)", flags,
4046 			    name, ddi_driver_name(dip), ddi_get_instance(dip));
4047 		}
4048 #endif /* DEBUG */
4049 		return (DDI_PROP_INVAL_ARG);
4050 	}
4051 
4052 	if ((rval = ddi_prop_lookup_common(match_dev, dip,
4053 	    (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM),
4054 	    name, &data, &nelements, ddi_prop_fm_decode_int64))
4055 	    != DDI_PROP_SUCCESS) {
4056 		if (rval == DDI_PROP_END_OF_DATA)
4057 			data = 1;
4058 		else
4059 			data = defvalue;
4060 	}
4061 	return (data);
4062 }
4063 
4064 /*
4065  * Get an array of integer property
4066  */
4067 int
4068 ddi_prop_lookup_int_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4069     char *name, int **data, uint_t *nelements)
4070 {
4071 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4072 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) {
4073 #ifdef DEBUG
4074 		if (dip != NULL) {
4075 			cmn_err(CE_WARN, "ddi_prop_lookup_int_array: "
4076 			    "invalid flag 0x%x (prop = %s, node = %s%d)",
4077 			    flags, name, ddi_driver_name(dip),
4078 			    ddi_get_instance(dip));
4079 		}
4080 #endif /* DEBUG */
4081 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4082 		LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4083 	}
4084 
4085 	return (ddi_prop_lookup_common(match_dev, dip,
4086 	    (flags | DDI_PROP_TYPE_INT), name, data,
4087 	    nelements, ddi_prop_fm_decode_ints));
4088 }
4089 
4090 /*
4091  * Get an array of 64 bit integer properties
4092  */
4093 int
4094 ddi_prop_lookup_int64_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4095     char *name, int64_t **data, uint_t *nelements)
4096 {
4097 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4098 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) {
4099 #ifdef DEBUG
4100 		if (dip != NULL) {
4101 			cmn_err(CE_WARN, "ddi_prop_lookup_int64_array: "
4102 			    "invalid flag 0x%x (prop = %s, node = %s%d)",
4103 			    flags, name, ddi_driver_name(dip),
4104 			    ddi_get_instance(dip));
4105 		}
4106 #endif /* DEBUG */
4107 		return (DDI_PROP_INVAL_ARG);
4108 	}
4109 
4110 	return (ddi_prop_lookup_common(match_dev, dip,
4111 	    (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM),
4112 	    name, data, nelements, ddi_prop_fm_decode_int64_array));
4113 }
4114 
4115 /*
4116  * Update a single integer property.  If the property exists on the drivers
4117  * property list it updates, else it creates it.
4118  */
4119 int
4120 ddi_prop_update_int(dev_t match_dev, dev_info_t *dip,
4121     char *name, int data)
4122 {
4123 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT,
4124 	    name, &data, 1, ddi_prop_fm_encode_ints));
4125 }
4126 
4127 /*
4128  * Update a single 64 bit integer property.
4129  * Update the driver property list if it exists, else create it.
4130  */
4131 int
4132 ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip,
4133     char *name, int64_t data)
4134 {
4135 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64,
4136 	    name, &data, 1, ddi_prop_fm_encode_int64));
4137 }
4138 
4139 int
4140 e_ddi_prop_update_int(dev_t match_dev, dev_info_t *dip,
4141     char *name, int data)
4142 {
4143 	return (ddi_prop_update_common(match_dev, dip,
4144 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT,
4145 	    name, &data, 1, ddi_prop_fm_encode_ints));
4146 }
4147 
4148 int
4149 e_ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip,
4150     char *name, int64_t data)
4151 {
4152 	return (ddi_prop_update_common(match_dev, dip,
4153 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64,
4154 	    name, &data, 1, ddi_prop_fm_encode_int64));
4155 }
4156 
4157 /*
4158  * Update an array of integer property.  If the property exists on the drivers
4159  * property list it updates, else it creates it.
4160  */
4161 int
4162 ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip,
4163     char *name, int *data, uint_t nelements)
4164 {
4165 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT,
4166 	    name, data, nelements, ddi_prop_fm_encode_ints));
4167 }
4168 
4169 /*
4170  * Update an array of 64 bit integer properties.
4171  * Update the driver property list if it exists, else create it.
4172  */
4173 int
4174 ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip,
4175     char *name, int64_t *data, uint_t nelements)
4176 {
4177 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64,
4178 	    name, data, nelements, ddi_prop_fm_encode_int64));
4179 }
4180 
4181 int
4182 e_ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip,
4183     char *name, int64_t *data, uint_t nelements)
4184 {
4185 	return (ddi_prop_update_common(match_dev, dip,
4186 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64,
4187 	    name, data, nelements, ddi_prop_fm_encode_int64));
4188 }
4189 
4190 int
4191 e_ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip,
4192     char *name, int *data, uint_t nelements)
4193 {
4194 	return (ddi_prop_update_common(match_dev, dip,
4195 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT,
4196 	    name, data, nelements, ddi_prop_fm_encode_ints));
4197 }
4198 
4199 /*
4200  * Get a single string property.
4201  */
4202 int
4203 ddi_prop_lookup_string(dev_t match_dev, dev_info_t *dip, uint_t flags,
4204     char *name, char **data)
4205 {
4206 	uint_t x;
4207 
4208 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4209 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) {
4210 #ifdef DEBUG
4211 		if (dip != NULL) {
4212 			cmn_err(CE_WARN, "%s: invalid flag 0x%x "
4213 			    "(prop = %s, node = %s%d); invalid bits ignored",
4214 			    "ddi_prop_lookup_string", flags, name,
4215 			    ddi_driver_name(dip), ddi_get_instance(dip));
4216 		}
4217 #endif /* DEBUG */
4218 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4219 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4220 	}
4221 
4222 	return (ddi_prop_lookup_common(match_dev, dip,
4223 	    (flags | DDI_PROP_TYPE_STRING), name, data,
4224 	    &x, ddi_prop_fm_decode_string));
4225 }
4226 
4227 /*
4228  * Get an array of strings property.
4229  */
4230 int
4231 ddi_prop_lookup_string_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4232     char *name, char ***data, uint_t *nelements)
4233 {
4234 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4235 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) {
4236 #ifdef DEBUG
4237 		if (dip != NULL) {
4238 			cmn_err(CE_WARN, "ddi_prop_lookup_string_array: "
4239 			    "invalid flag 0x%x (prop = %s, node = %s%d)",
4240 			    flags, name, ddi_driver_name(dip),
4241 			    ddi_get_instance(dip));
4242 		}
4243 #endif /* DEBUG */
4244 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4245 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4246 	}
4247 
4248 	return (ddi_prop_lookup_common(match_dev, dip,
4249 	    (flags | DDI_PROP_TYPE_STRING), name, data,
4250 	    nelements, ddi_prop_fm_decode_strings));
4251 }
4252 
4253 /*
4254  * Update a single string property.
4255  */
4256 int
4257 ddi_prop_update_string(dev_t match_dev, dev_info_t *dip,
4258     char *name, char *data)
4259 {
4260 	return (ddi_prop_update_common(match_dev, dip,
4261 	    DDI_PROP_TYPE_STRING, name, &data, 1,
4262 	    ddi_prop_fm_encode_string));
4263 }
4264 
4265 int
4266 e_ddi_prop_update_string(dev_t match_dev, dev_info_t *dip,
4267     char *name, char *data)
4268 {
4269 	return (ddi_prop_update_common(match_dev, dip,
4270 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING,
4271 	    name, &data, 1, ddi_prop_fm_encode_string));
4272 }
4273 
4274 
4275 /*
4276  * Update an array of strings property.
4277  */
4278 int
4279 ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip,
4280     char *name, char **data, uint_t nelements)
4281 {
4282 	return (ddi_prop_update_common(match_dev, dip,
4283 	    DDI_PROP_TYPE_STRING, name, data, nelements,
4284 	    ddi_prop_fm_encode_strings));
4285 }
4286 
4287 int
4288 e_ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip,
4289     char *name, char **data, uint_t nelements)
4290 {
4291 	return (ddi_prop_update_common(match_dev, dip,
4292 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING,
4293 	    name, data, nelements,
4294 	    ddi_prop_fm_encode_strings));
4295 }
4296 
4297 
4298 /*
4299  * Get an array of bytes property.
4300  */
4301 int
4302 ddi_prop_lookup_byte_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4303     char *name, uchar_t **data, uint_t *nelements)
4304 {
4305 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4306 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) {
4307 #ifdef DEBUG
4308 		if (dip != NULL) {
4309 			cmn_err(CE_WARN, "ddi_prop_lookup_byte_array: "
4310 			    " invalid flag 0x%x (prop = %s, node = %s%d)",
4311 			    flags, name, ddi_driver_name(dip),
4312 			    ddi_get_instance(dip));
4313 		}
4314 #endif /* DEBUG */
4315 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4316 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4317 	}
4318 
4319 	return (ddi_prop_lookup_common(match_dev, dip,
4320 	    (flags | DDI_PROP_TYPE_BYTE), name, data,
4321 	    nelements, ddi_prop_fm_decode_bytes));
4322 }
4323 
4324 /*
4325  * Update an array of bytes property.
4326  */
4327 int
4328 ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip,
4329     char *name, uchar_t *data, uint_t nelements)
4330 {
4331 	if (nelements == 0)
4332 		return (DDI_PROP_INVAL_ARG);
4333 
4334 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_BYTE,
4335 	    name, data, nelements, ddi_prop_fm_encode_bytes));
4336 }
4337 
4338 
4339 int
4340 e_ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip,
4341     char *name, uchar_t *data, uint_t nelements)
4342 {
4343 	if (nelements == 0)
4344 		return (DDI_PROP_INVAL_ARG);
4345 
4346 	return (ddi_prop_update_common(match_dev, dip,
4347 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE,
4348 	    name, data, nelements, ddi_prop_fm_encode_bytes));
4349 }
4350 
4351 
4352 /*
4353  * ddi_prop_remove_common:	Undefine a managed property:
4354  *			Input dev_t must match dev_t when defined.
4355  *			Returns DDI_PROP_NOT_FOUND, possibly.
4356  *			DDI_PROP_INVAL_ARG is also possible if dev is
4357  *			DDI_DEV_T_ANY or incoming name is the NULL string.
4358  */
4359 int
4360 ddi_prop_remove_common(dev_t dev, dev_info_t *dip, char *name, int flag)
4361 {
4362 	ddi_prop_t	**list_head = &(DEVI(dip)->devi_drv_prop_ptr);
4363 	ddi_prop_t	*propp;
4364 	ddi_prop_t	*lastpropp = NULL;
4365 
4366 	if ((dev == DDI_DEV_T_ANY) || (name == (char *)0) ||
4367 	    (strlen(name) == 0)) {
4368 		return (DDI_PROP_INVAL_ARG);
4369 	}
4370 
4371 	if (flag & DDI_PROP_SYSTEM_DEF)
4372 		list_head = &(DEVI(dip)->devi_sys_prop_ptr);
4373 	else if (flag & DDI_PROP_HW_DEF)
4374 		list_head = &(DEVI(dip)->devi_hw_prop_ptr);
4375 
4376 	mutex_enter(&(DEVI(dip)->devi_lock));
4377 
4378 	for (propp = *list_head; propp != NULL; propp = propp->prop_next)  {
4379 		if (DDI_STRSAME(propp->prop_name, name) &&
4380 		    (dev == propp->prop_dev)) {
4381 			/*
4382 			 * Unlink this propp allowing for it to
4383 			 * be first in the list:
4384 			 */
4385 
4386 			if (lastpropp == NULL)
4387 				*list_head = propp->prop_next;
4388 			else
4389 				lastpropp->prop_next = propp->prop_next;
4390 
4391 			mutex_exit(&(DEVI(dip)->devi_lock));
4392 
4393 			/*
4394 			 * Free memory and return...
4395 			 */
4396 			kmem_free(propp->prop_name,
4397 			    strlen(propp->prop_name) + 1);
4398 			if (propp->prop_len != 0)
4399 				kmem_free(propp->prop_val, propp->prop_len);
4400 			kmem_free(propp, sizeof (ddi_prop_t));
4401 			return (DDI_PROP_SUCCESS);
4402 		}
4403 		lastpropp = propp;
4404 	}
4405 	mutex_exit(&(DEVI(dip)->devi_lock));
4406 	return (DDI_PROP_NOT_FOUND);
4407 }
4408 
4409 int
4410 ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name)
4411 {
4412 	return (ddi_prop_remove_common(dev, dip, name, 0));
4413 }
4414 
4415 int
4416 e_ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name)
4417 {
4418 	return (ddi_prop_remove_common(dev, dip, name, DDI_PROP_SYSTEM_DEF));
4419 }
4420 
4421 /*
4422  * e_ddi_prop_list_delete: remove a list of properties
4423  *	Note that the caller needs to provide the required protection
4424  *	(eg. devi_lock if these properties are still attached to a devi)
4425  */
4426 void
4427 e_ddi_prop_list_delete(ddi_prop_t *props)
4428 {
4429 	i_ddi_prop_list_delete(props);
4430 }
4431 
4432 /*
4433  * ddi_prop_remove_all_common:
4434  *	Used before unloading a driver to remove
4435  *	all properties. (undefines all dev_t's props.)
4436  *	Also removes `explicitly undefined' props.
4437  *	No errors possible.
4438  */
4439 void
4440 ddi_prop_remove_all_common(dev_info_t *dip, int flag)
4441 {
4442 	ddi_prop_t	**list_head;
4443 
4444 	mutex_enter(&(DEVI(dip)->devi_lock));
4445 	if (flag & DDI_PROP_SYSTEM_DEF) {
4446 		list_head = &(DEVI(dip)->devi_sys_prop_ptr);
4447 	} else if (flag & DDI_PROP_HW_DEF) {
4448 		list_head = &(DEVI(dip)->devi_hw_prop_ptr);
4449 	} else {
4450 		list_head = &(DEVI(dip)->devi_drv_prop_ptr);
4451 	}
4452 	i_ddi_prop_list_delete(*list_head);
4453 	*list_head = NULL;
4454 	mutex_exit(&(DEVI(dip)->devi_lock));
4455 }
4456 
4457 
4458 /*
4459  * ddi_prop_remove_all:		Remove all driver prop definitions.
4460  */
4461 
4462 void
4463 ddi_prop_remove_all(dev_info_t *dip)
4464 {
4465 	ddi_prop_remove_all_common(dip, 0);
4466 }
4467 
4468 /*
4469  * e_ddi_prop_remove_all:	Remove all system prop definitions.
4470  */
4471 
4472 void
4473 e_ddi_prop_remove_all(dev_info_t *dip)
4474 {
4475 	ddi_prop_remove_all_common(dip, (int)DDI_PROP_SYSTEM_DEF);
4476 }
4477 
4478 
4479 /*
4480  * ddi_prop_undefine:	Explicitly undefine a property.  Property
4481  *			searches which match this property return
4482  *			the error code DDI_PROP_UNDEFINED.
4483  *
4484  *			Use ddi_prop_remove to negate effect of
4485  *			ddi_prop_undefine
4486  *
4487  *			See above for error returns.
4488  */
4489 
4490 int
4491 ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name)
4492 {
4493 	if (!(flag & DDI_PROP_CANSLEEP))
4494 		flag |= DDI_PROP_DONTSLEEP;
4495 	return (ddi_prop_update_common(dev, dip,
4496 	    (flag | DDI_PROP_STACK_CREATE | DDI_PROP_UNDEF_IT |
4497 	    DDI_PROP_TYPE_ANY), name, NULL, 0, ddi_prop_fm_encode_bytes));
4498 }
4499 
4500 int
4501 e_ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name)
4502 {
4503 	if (!(flag & DDI_PROP_CANSLEEP))
4504 		flag |= DDI_PROP_DONTSLEEP;
4505 	return (ddi_prop_update_common(dev, dip,
4506 	    (flag | DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE |
4507 	    DDI_PROP_UNDEF_IT | DDI_PROP_TYPE_ANY),
4508 	    name, NULL, 0, ddi_prop_fm_encode_bytes));
4509 }
4510 
4511 /*
4512  * Code to search hardware layer (PROM), if it exists, on behalf of child.
4513  *
4514  * if input dip != child_dip, then call is on behalf of child
4515  * to search PROM, do it via ddi_prop_search_common() and ascend only
4516  * if allowed.
4517  *
4518  * if input dip == ch_dip (child_dip), call is on behalf of root driver,
4519  * to search for PROM defined props only.
4520  *
4521  * Note that the PROM search is done only if the requested dev
4522  * is either DDI_DEV_T_ANY or DDI_DEV_T_NONE. PROM properties
4523  * have no associated dev, thus are automatically associated with
4524  * DDI_DEV_T_NONE.
4525  *
4526  * Modifying flag DDI_PROP_NOTPROM inhibits the search in the h/w layer.
4527  *
4528  * Returns DDI_PROP_FOUND_1275 if found to indicate to framework
4529  * that the property resides in the prom.
4530  */
4531 int
4532 impl_ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip,
4533     ddi_prop_op_t prop_op, int mod_flags,
4534     char *name, caddr_t valuep, int *lengthp)
4535 {
4536 	int	len;
4537 	caddr_t buffer;
4538 
4539 	/*
4540 	 * If requested dev is DDI_DEV_T_NONE or DDI_DEV_T_ANY, then
4541 	 * look in caller's PROM if it's a self identifying device...
4542 	 *
4543 	 * Note that this is very similar to ddi_prop_op, but we
4544 	 * search the PROM instead of the s/w defined properties,
4545 	 * and we are called on by the parent driver to do this for
4546 	 * the child.
4547 	 */
4548 
4549 	if (((dev == DDI_DEV_T_NONE) || (dev == DDI_DEV_T_ANY)) &&
4550 	    ndi_dev_is_prom_node(ch_dip) &&
4551 	    ((mod_flags & DDI_PROP_NOTPROM) == 0)) {
4552 		len = prom_getproplen((pnode_t)DEVI(ch_dip)->devi_nodeid, name);
4553 		if (len == -1) {
4554 			return (DDI_PROP_NOT_FOUND);
4555 		}
4556 
4557 		/*
4558 		 * If exists only request, we're done
4559 		 */
4560 		if (prop_op == PROP_EXISTS) {
4561 			return (DDI_PROP_FOUND_1275);
4562 		}
4563 
4564 		/*
4565 		 * If length only request or prop length == 0, get out
4566 		 */
4567 		if ((prop_op == PROP_LEN) || (len == 0)) {
4568 			*lengthp = len;
4569 			return (DDI_PROP_FOUND_1275);
4570 		}
4571 
4572 		/*
4573 		 * Allocate buffer if required... (either way `buffer'
4574 		 * is receiving address).
4575 		 */
4576 
4577 		switch (prop_op) {
4578 
4579 		case PROP_LEN_AND_VAL_ALLOC:
4580 
4581 			buffer = kmem_alloc((size_t)len,
4582 			    mod_flags & DDI_PROP_CANSLEEP ?
4583 			    KM_SLEEP : KM_NOSLEEP);
4584 			if (buffer == NULL) {
4585 				return (DDI_PROP_NO_MEMORY);
4586 			}
4587 			*(caddr_t *)valuep = buffer;
4588 			break;
4589 
4590 		case PROP_LEN_AND_VAL_BUF:
4591 
4592 			if (len > (*lengthp)) {
4593 				*lengthp = len;
4594 				return (DDI_PROP_BUF_TOO_SMALL);
4595 			}
4596 
4597 			buffer = valuep;
4598 			break;
4599 
4600 		default:
4601 			break;
4602 		}
4603 
4604 		/*
4605 		 * Call the PROM function to do the copy.
4606 		 */
4607 		(void) prom_getprop((pnode_t)DEVI(ch_dip)->devi_nodeid,
4608 			name, buffer);
4609 
4610 		*lengthp = len; /* return the actual length to the caller */
4611 		(void) impl_fix_props(dip, ch_dip, name, len, buffer);
4612 		return (DDI_PROP_FOUND_1275);
4613 	}
4614 
4615 	return (DDI_PROP_NOT_FOUND);
4616 }
4617 
4618 /*
4619  * The ddi_bus_prop_op default bus nexus prop op function.
4620  *
4621  * Code to search hardware layer (PROM), if it exists,
4622  * on behalf of child, then, if appropriate, ascend and check
4623  * my own software defined properties...
4624  */
4625 int
4626 ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip,
4627     ddi_prop_op_t prop_op, int mod_flags,
4628     char *name, caddr_t valuep, int *lengthp)
4629 {
4630 	int	error;
4631 
4632 	error = impl_ddi_bus_prop_op(dev, dip, ch_dip, prop_op, mod_flags,
4633 				    name, valuep, lengthp);
4634 
4635 	if (error == DDI_PROP_SUCCESS || error == DDI_PROP_FOUND_1275 ||
4636 	    error == DDI_PROP_BUF_TOO_SMALL)
4637 		return (error);
4638 
4639 	if (error == DDI_PROP_NO_MEMORY) {
4640 		cmn_err(CE_CONT, prop_no_mem_msg, name);
4641 		return (DDI_PROP_NO_MEMORY);
4642 	}
4643 
4644 	/*
4645 	 * Check the 'options' node as a last resort
4646 	 */
4647 	if ((mod_flags & DDI_PROP_DONTPASS) != 0)
4648 		return (DDI_PROP_NOT_FOUND);
4649 
4650 	if (ch_dip == ddi_root_node())	{
4651 		/*
4652 		 * As a last resort, when we've reached
4653 		 * the top and still haven't found the
4654 		 * property, see if the desired property
4655 		 * is attached to the options node.
4656 		 *
4657 		 * The options dip is attached right after boot.
4658 		 */
4659 		ASSERT(options_dip != NULL);
4660 		/*
4661 		 * Force the "don't pass" flag to *just* see
4662 		 * what the options node has to offer.
4663 		 */
4664 		return (ddi_prop_search_common(dev, options_dip, prop_op,
4665 		    mod_flags|DDI_PROP_DONTPASS, name, valuep,
4666 		    (uint_t *)lengthp));
4667 	}
4668 
4669 	/*
4670 	 * Otherwise, continue search with parent's s/w defined properties...
4671 	 * NOTE: Using `dip' in following call increments the level.
4672 	 */
4673 
4674 	return (ddi_prop_search_common(dev, dip, prop_op, mod_flags,
4675 	    name, valuep, (uint_t *)lengthp));
4676 }
4677 
4678 /*
4679  * External property functions used by other parts of the kernel...
4680  */
4681 
4682 /*
4683  * e_ddi_getlongprop: See comments for ddi_get_longprop.
4684  */
4685 
4686 int
4687 e_ddi_getlongprop(dev_t dev, vtype_t type, char *name, int flags,
4688     caddr_t valuep, int *lengthp)
4689 {
4690 	_NOTE(ARGUNUSED(type))
4691 	dev_info_t *devi;
4692 	ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_ALLOC;
4693 	int error;
4694 
4695 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4696 		return (DDI_PROP_NOT_FOUND);
4697 
4698 	error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp);
4699 	ddi_release_devi(devi);
4700 	return (error);
4701 }
4702 
4703 /*
4704  * e_ddi_getlongprop_buf:	See comments for ddi_getlongprop_buf.
4705  */
4706 
4707 int
4708 e_ddi_getlongprop_buf(dev_t dev, vtype_t type, char *name, int flags,
4709     caddr_t valuep, int *lengthp)
4710 {
4711 	_NOTE(ARGUNUSED(type))
4712 	dev_info_t *devi;
4713 	ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF;
4714 	int error;
4715 
4716 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4717 		return (DDI_PROP_NOT_FOUND);
4718 
4719 	error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp);
4720 	ddi_release_devi(devi);
4721 	return (error);
4722 }
4723 
4724 /*
4725  * e_ddi_getprop:	See comments for ddi_getprop.
4726  */
4727 int
4728 e_ddi_getprop(dev_t dev, vtype_t type, char *name, int flags, int defvalue)
4729 {
4730 	_NOTE(ARGUNUSED(type))
4731 	dev_info_t *devi;
4732 	ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF;
4733 	int	propvalue = defvalue;
4734 	int	proplength = sizeof (int);
4735 	int	error;
4736 
4737 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4738 		return (defvalue);
4739 
4740 	error = cdev_prop_op(dev, devi, prop_op,
4741 	    flags, name, (caddr_t)&propvalue, &proplength);
4742 	ddi_release_devi(devi);
4743 
4744 	if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
4745 		propvalue = 1;
4746 
4747 	return (propvalue);
4748 }
4749 
4750 /*
4751  * e_ddi_getprop_int64:
4752  *
4753  * This is a typed interfaces, but predates typed properties. With the
4754  * introduction of typed properties the framework tries to ensure
4755  * consistent use of typed interfaces. This is why TYPE_INT64 is not
4756  * part of TYPE_ANY.  E_ddi_getprop_int64 is a special case where a
4757  * typed interface invokes legacy (non-typed) interfaces:
4758  * cdev_prop_op(), prop_op(9E), ddi_prop_op(9F)).  In this case the
4759  * fact that TYPE_INT64 is not part of TYPE_ANY matters.  To support
4760  * this type of lookup as a single operation we invoke the legacy
4761  * non-typed interfaces with the special CONSUMER_TYPED bit set. The
4762  * framework ddi_prop_op(9F) implementation is expected to check for
4763  * CONSUMER_TYPED and, if set, expand type bits beyond TYPE_ANY
4764  * (currently TYPE_INT64).
4765  */
4766 int64_t
4767 e_ddi_getprop_int64(dev_t dev, vtype_t type, char *name,
4768     int flags, int64_t defvalue)
4769 {
4770 	_NOTE(ARGUNUSED(type))
4771 	dev_info_t	*devi;
4772 	ddi_prop_op_t	prop_op = PROP_LEN_AND_VAL_BUF;
4773 	int64_t		propvalue = defvalue;
4774 	int		proplength = sizeof (propvalue);
4775 	int		error;
4776 
4777 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4778 		return (defvalue);
4779 
4780 	error = cdev_prop_op(dev, devi, prop_op, flags |
4781 	    DDI_PROP_CONSUMER_TYPED, name, (caddr_t)&propvalue, &proplength);
4782 	ddi_release_devi(devi);
4783 
4784 	if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
4785 		propvalue = 1;
4786 
4787 	return (propvalue);
4788 }
4789 
4790 /*
4791  * e_ddi_getproplen:	See comments for ddi_getproplen.
4792  */
4793 int
4794 e_ddi_getproplen(dev_t dev, vtype_t type, char *name, int flags, int *lengthp)
4795 {
4796 	_NOTE(ARGUNUSED(type))
4797 	dev_info_t *devi;
4798 	ddi_prop_op_t prop_op = PROP_LEN;
4799 	int error;
4800 
4801 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4802 		return (DDI_PROP_NOT_FOUND);
4803 
4804 	error = cdev_prop_op(dev, devi, prop_op, flags, name, NULL, lengthp);
4805 	ddi_release_devi(devi);
4806 	return (error);
4807 }
4808 
4809 /*
4810  * Routines to get at elements of the dev_info structure
4811  */
4812 
4813 /*
4814  * ddi_binding_name: Return the driver binding name of the devinfo node
4815  *		This is the name the OS used to bind the node to a driver.
4816  */
4817 char *
4818 ddi_binding_name(dev_info_t *dip)
4819 {
4820 	return (DEVI(dip)->devi_binding_name);
4821 }
4822 
4823 /*
4824  * ddi_driver_major: Return the major number of the driver that
4825  *		the supplied devinfo is bound to (-1 if none)
4826  */
4827 major_t
4828 ddi_driver_major(dev_info_t *devi)
4829 {
4830 	return (DEVI(devi)->devi_major);
4831 }
4832 
4833 /*
4834  * ddi_driver_name: Return the normalized driver name. this is the
4835  *		actual driver name
4836  */
4837 const char *
4838 ddi_driver_name(dev_info_t *devi)
4839 {
4840 	major_t major;
4841 
4842 	if ((major = ddi_driver_major(devi)) != (major_t)-1)
4843 		return (ddi_major_to_name(major));
4844 
4845 	return (ddi_node_name(devi));
4846 }
4847 
4848 /*
4849  * i_ddi_set_binding_name:	Set binding name.
4850  *
4851  *	Set the binding name to the given name.
4852  *	This routine is for use by the ddi implementation, not by drivers.
4853  */
4854 void
4855 i_ddi_set_binding_name(dev_info_t *dip, char *name)
4856 {
4857 	DEVI(dip)->devi_binding_name = name;
4858 
4859 }
4860 
4861 /*
4862  * ddi_get_name: A synonym of ddi_binding_name() ... returns a name
4863  * the implementation has used to bind the node to a driver.
4864  */
4865 char *
4866 ddi_get_name(dev_info_t *dip)
4867 {
4868 	return (DEVI(dip)->devi_binding_name);
4869 }
4870 
4871 /*
4872  * ddi_node_name: Return the name property of the devinfo node
4873  *		This may differ from ddi_binding_name if the node name
4874  *		does not define a binding to a driver (i.e. generic names).
4875  */
4876 char *
4877 ddi_node_name(dev_info_t *dip)
4878 {
4879 	return (DEVI(dip)->devi_node_name);
4880 }
4881 
4882 
4883 /*
4884  * ddi_get_nodeid:	Get nodeid stored in dev_info structure.
4885  */
4886 int
4887 ddi_get_nodeid(dev_info_t *dip)
4888 {
4889 	return (DEVI(dip)->devi_nodeid);
4890 }
4891 
4892 int
4893 ddi_get_instance(dev_info_t *dip)
4894 {
4895 	return (DEVI(dip)->devi_instance);
4896 }
4897 
4898 struct dev_ops *
4899 ddi_get_driver(dev_info_t *dip)
4900 {
4901 	return (DEVI(dip)->devi_ops);
4902 }
4903 
4904 void
4905 ddi_set_driver(dev_info_t *dip, struct dev_ops *devo)
4906 {
4907 	DEVI(dip)->devi_ops = devo;
4908 }
4909 
4910 /*
4911  * ddi_set_driver_private/ddi_get_driver_private:
4912  * Get/set device driver private data in devinfo.
4913  */
4914 void
4915 ddi_set_driver_private(dev_info_t *dip, void *data)
4916 {
4917 	DEVI(dip)->devi_driver_data = data;
4918 }
4919 
4920 void *
4921 ddi_get_driver_private(dev_info_t *dip)
4922 {
4923 	return (DEVI(dip)->devi_driver_data);
4924 }
4925 
4926 /*
4927  * ddi_get_parent, ddi_get_child, ddi_get_next_sibling
4928  */
4929 
4930 dev_info_t *
4931 ddi_get_parent(dev_info_t *dip)
4932 {
4933 	return ((dev_info_t *)DEVI(dip)->devi_parent);
4934 }
4935 
4936 dev_info_t *
4937 ddi_get_child(dev_info_t *dip)
4938 {
4939 	return ((dev_info_t *)DEVI(dip)->devi_child);
4940 }
4941 
4942 dev_info_t *
4943 ddi_get_next_sibling(dev_info_t *dip)
4944 {
4945 	return ((dev_info_t *)DEVI(dip)->devi_sibling);
4946 }
4947 
4948 dev_info_t *
4949 ddi_get_next(dev_info_t *dip)
4950 {
4951 	return ((dev_info_t *)DEVI(dip)->devi_next);
4952 }
4953 
4954 void
4955 ddi_set_next(dev_info_t *dip, dev_info_t *nextdip)
4956 {
4957 	DEVI(dip)->devi_next = DEVI(nextdip);
4958 }
4959 
4960 /*
4961  * ddi_root_node:		Return root node of devinfo tree
4962  */
4963 
4964 dev_info_t *
4965 ddi_root_node(void)
4966 {
4967 	extern dev_info_t *top_devinfo;
4968 
4969 	return (top_devinfo);
4970 }
4971 
4972 /*
4973  * Miscellaneous functions:
4974  */
4975 
4976 /*
4977  * Implementation specific hooks
4978  */
4979 
4980 void
4981 ddi_report_dev(dev_info_t *d)
4982 {
4983 	char *b;
4984 
4985 	(void) ddi_ctlops(d, d, DDI_CTLOPS_REPORTDEV, (void *)0, (void *)0);
4986 
4987 	/*
4988 	 * If this devinfo node has cb_ops, it's implicitly accessible from
4989 	 * userland, so we print its full name together with the instance
4990 	 * number 'abbreviation' that the driver may use internally.
4991 	 */
4992 	if (DEVI(d)->devi_ops->devo_cb_ops != (struct cb_ops *)0 &&
4993 	    (b = kmem_zalloc(MAXPATHLEN, KM_NOSLEEP))) {
4994 		cmn_err(CE_CONT, "?%s%d is %s\n",
4995 		    ddi_driver_name(d), ddi_get_instance(d),
4996 		    ddi_pathname(d, b));
4997 		kmem_free(b, MAXPATHLEN);
4998 	}
4999 }
5000 
5001 /*
5002  * ddi_ctlops() is described in the assembler not to buy a new register
5003  * window when it's called and can reduce cost in climbing the device tree
5004  * without using the tail call optimization.
5005  */
5006 int
5007 ddi_dev_regsize(dev_info_t *dev, uint_t rnumber, off_t *result)
5008 {
5009 	int ret;
5010 
5011 	ret = ddi_ctlops(dev, dev, DDI_CTLOPS_REGSIZE,
5012 	    (void *)&rnumber, (void *)result);
5013 
5014 	return (ret == DDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE);
5015 }
5016 
5017 int
5018 ddi_dev_nregs(dev_info_t *dev, int *result)
5019 {
5020 	return (ddi_ctlops(dev, dev, DDI_CTLOPS_NREGS, 0, (void *)result));
5021 }
5022 
5023 int
5024 ddi_dev_is_sid(dev_info_t *d)
5025 {
5026 	return (ddi_ctlops(d, d, DDI_CTLOPS_SIDDEV, (void *)0, (void *)0));
5027 }
5028 
5029 int
5030 ddi_slaveonly(dev_info_t *d)
5031 {
5032 	return (ddi_ctlops(d, d, DDI_CTLOPS_SLAVEONLY, (void *)0, (void *)0));
5033 }
5034 
5035 int
5036 ddi_dev_affinity(dev_info_t *a, dev_info_t *b)
5037 {
5038 	return (ddi_ctlops(a, a, DDI_CTLOPS_AFFINITY, (void *)b, (void *)0));
5039 }
5040 
5041 int
5042 ddi_streams_driver(dev_info_t *dip)
5043 {
5044 	if (i_ddi_devi_attached(dip) &&
5045 	    (DEVI(dip)->devi_ops->devo_cb_ops != NULL) &&
5046 	    (DEVI(dip)->devi_ops->devo_cb_ops->cb_str != NULL))
5047 		return (DDI_SUCCESS);
5048 	return (DDI_FAILURE);
5049 }
5050 
5051 /*
5052  * callback free list
5053  */
5054 
5055 static int ncallbacks;
5056 static int nc_low = 170;
5057 static int nc_med = 512;
5058 static int nc_high = 2048;
5059 static struct ddi_callback *callbackq;
5060 static struct ddi_callback *callbackqfree;
5061 
5062 /*
5063  * set/run callback lists
5064  */
5065 struct	cbstats	{
5066 	kstat_named_t	cb_asked;
5067 	kstat_named_t	cb_new;
5068 	kstat_named_t	cb_run;
5069 	kstat_named_t	cb_delete;
5070 	kstat_named_t	cb_maxreq;
5071 	kstat_named_t	cb_maxlist;
5072 	kstat_named_t	cb_alloc;
5073 	kstat_named_t	cb_runouts;
5074 	kstat_named_t	cb_L2;
5075 	kstat_named_t	cb_grow;
5076 } cbstats = {
5077 	{"asked",	KSTAT_DATA_UINT32},
5078 	{"new",		KSTAT_DATA_UINT32},
5079 	{"run",		KSTAT_DATA_UINT32},
5080 	{"delete",	KSTAT_DATA_UINT32},
5081 	{"maxreq",	KSTAT_DATA_UINT32},
5082 	{"maxlist",	KSTAT_DATA_UINT32},
5083 	{"alloc",	KSTAT_DATA_UINT32},
5084 	{"runouts",	KSTAT_DATA_UINT32},
5085 	{"L2",		KSTAT_DATA_UINT32},
5086 	{"grow",	KSTAT_DATA_UINT32},
5087 };
5088 
5089 #define	nc_asked	cb_asked.value.ui32
5090 #define	nc_new		cb_new.value.ui32
5091 #define	nc_run		cb_run.value.ui32
5092 #define	nc_delete	cb_delete.value.ui32
5093 #define	nc_maxreq	cb_maxreq.value.ui32
5094 #define	nc_maxlist	cb_maxlist.value.ui32
5095 #define	nc_alloc	cb_alloc.value.ui32
5096 #define	nc_runouts	cb_runouts.value.ui32
5097 #define	nc_L2		cb_L2.value.ui32
5098 #define	nc_grow		cb_grow.value.ui32
5099 
5100 static kmutex_t ddi_callback_mutex;
5101 
5102 /*
5103  * callbacks are handled using a L1/L2 cache. The L1 cache
5104  * comes out of kmem_cache_alloc and can expand/shrink dynamically. If
5105  * we can't get callbacks from the L1 cache [because pageout is doing
5106  * I/O at the time freemem is 0], we allocate callbacks out of the
5107  * L2 cache. The L2 cache is static and depends on the memory size.
5108  * [We might also count the number of devices at probe time and
5109  * allocate one structure per device and adjust for deferred attach]
5110  */
5111 void
5112 impl_ddi_callback_init(void)
5113 {
5114 	int	i;
5115 	uint_t	physmegs;
5116 	kstat_t	*ksp;
5117 
5118 	physmegs = physmem >> (20 - PAGESHIFT);
5119 	if (physmegs < 48) {
5120 		ncallbacks = nc_low;
5121 	} else if (physmegs < 128) {
5122 		ncallbacks = nc_med;
5123 	} else {
5124 		ncallbacks = nc_high;
5125 	}
5126 
5127 	/*
5128 	 * init free list
5129 	 */
5130 	callbackq = kmem_zalloc(
5131 	    ncallbacks * sizeof (struct ddi_callback), KM_SLEEP);
5132 	for (i = 0; i < ncallbacks-1; i++)
5133 		callbackq[i].c_nfree = &callbackq[i+1];
5134 	callbackqfree = callbackq;
5135 
5136 	/* init kstats */
5137 	if (ksp = kstat_create("unix", 0, "cbstats", "misc", KSTAT_TYPE_NAMED,
5138 	    sizeof (cbstats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL)) {
5139 		ksp->ks_data = (void *) &cbstats;
5140 		kstat_install(ksp);
5141 	}
5142 
5143 }
5144 
5145 static void
5146 callback_insert(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid,
5147 	int count)
5148 {
5149 	struct ddi_callback *list, *marker, *new;
5150 	size_t size = sizeof (struct ddi_callback);
5151 
5152 	list = marker = (struct ddi_callback *)*listid;
5153 	while (list != NULL) {
5154 		if (list->c_call == funcp && list->c_arg == arg) {
5155 			list->c_count += count;
5156 			return;
5157 		}
5158 		marker = list;
5159 		list = list->c_nlist;
5160 	}
5161 	new = kmem_alloc(size, KM_NOSLEEP);
5162 	if (new == NULL) {
5163 		new = callbackqfree;
5164 		if (new == NULL) {
5165 			new = kmem_alloc_tryhard(sizeof (struct ddi_callback),
5166 			    &size, KM_NOSLEEP | KM_PANIC);
5167 			cbstats.nc_grow++;
5168 		} else {
5169 			callbackqfree = new->c_nfree;
5170 			cbstats.nc_L2++;
5171 		}
5172 	}
5173 	if (marker != NULL) {
5174 		marker->c_nlist = new;
5175 	} else {
5176 		*listid = (uintptr_t)new;
5177 	}
5178 	new->c_size = size;
5179 	new->c_nlist = NULL;
5180 	new->c_call = funcp;
5181 	new->c_arg = arg;
5182 	new->c_count = count;
5183 	cbstats.nc_new++;
5184 	cbstats.nc_alloc++;
5185 	if (cbstats.nc_alloc > cbstats.nc_maxlist)
5186 		cbstats.nc_maxlist = cbstats.nc_alloc;
5187 }
5188 
5189 void
5190 ddi_set_callback(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid)
5191 {
5192 	mutex_enter(&ddi_callback_mutex);
5193 	cbstats.nc_asked++;
5194 	if ((cbstats.nc_asked - cbstats.nc_run) > cbstats.nc_maxreq)
5195 		cbstats.nc_maxreq = (cbstats.nc_asked - cbstats.nc_run);
5196 	(void) callback_insert(funcp, arg, listid, 1);
5197 	mutex_exit(&ddi_callback_mutex);
5198 }
5199 
5200 static void
5201 real_callback_run(void *Queue)
5202 {
5203 	int (*funcp)(caddr_t);
5204 	caddr_t arg;
5205 	int count, rval;
5206 	uintptr_t *listid;
5207 	struct ddi_callback *list, *marker;
5208 	int check_pending = 1;
5209 	int pending = 0;
5210 
5211 	do {
5212 		mutex_enter(&ddi_callback_mutex);
5213 		listid = Queue;
5214 		list = (struct ddi_callback *)*listid;
5215 		if (list == NULL) {
5216 			mutex_exit(&ddi_callback_mutex);
5217 			return;
5218 		}
5219 		if (check_pending) {
5220 			marker = list;
5221 			while (marker != NULL) {
5222 				pending += marker->c_count;
5223 				marker = marker->c_nlist;
5224 			}
5225 			check_pending = 0;
5226 		}
5227 		ASSERT(pending > 0);
5228 		ASSERT(list->c_count > 0);
5229 		funcp = list->c_call;
5230 		arg = list->c_arg;
5231 		count = list->c_count;
5232 		*(uintptr_t *)Queue = (uintptr_t)list->c_nlist;
5233 		if (list >= &callbackq[0] &&
5234 		    list <= &callbackq[ncallbacks-1]) {
5235 			list->c_nfree = callbackqfree;
5236 			callbackqfree = list;
5237 		} else
5238 			kmem_free(list, list->c_size);
5239 
5240 		cbstats.nc_delete++;
5241 		cbstats.nc_alloc--;
5242 		mutex_exit(&ddi_callback_mutex);
5243 
5244 		do {
5245 			if ((rval = (*funcp)(arg)) == 0) {
5246 				pending -= count;
5247 				mutex_enter(&ddi_callback_mutex);
5248 				(void) callback_insert(funcp, arg, listid,
5249 					count);
5250 				cbstats.nc_runouts++;
5251 			} else {
5252 				pending--;
5253 				mutex_enter(&ddi_callback_mutex);
5254 				cbstats.nc_run++;
5255 			}
5256 			mutex_exit(&ddi_callback_mutex);
5257 		} while (rval != 0 && (--count > 0));
5258 	} while (pending > 0);
5259 }
5260 
5261 void
5262 ddi_run_callback(uintptr_t *listid)
5263 {
5264 	softcall(real_callback_run, listid);
5265 }
5266 
5267 dev_info_t *
5268 nodevinfo(dev_t dev, int otyp)
5269 {
5270 	_NOTE(ARGUNUSED(dev, otyp))
5271 	return ((dev_info_t *)0);
5272 }
5273 
5274 /*
5275  * A driver should support its own getinfo(9E) entry point. This function
5276  * is provided as a convenience for ON drivers that don't expect their
5277  * getinfo(9E) entry point to be called. A driver that uses this must not
5278  * call ddi_create_minor_node.
5279  */
5280 int
5281 ddi_no_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
5282 {
5283 	_NOTE(ARGUNUSED(dip, infocmd, arg, result))
5284 	return (DDI_FAILURE);
5285 }
5286 
5287 /*
5288  * A driver should support its own getinfo(9E) entry point. This function
5289  * is provided as a convenience for ON drivers that where the minor number
5290  * is the instance. Drivers that do not have 1:1 mapping must implement
5291  * their own getinfo(9E) function.
5292  */
5293 int
5294 ddi_getinfo_1to1(dev_info_t *dip, ddi_info_cmd_t infocmd,
5295     void *arg, void **result)
5296 {
5297 	_NOTE(ARGUNUSED(dip))
5298 	int	instance;
5299 
5300 	if (infocmd != DDI_INFO_DEVT2INSTANCE)
5301 		return (DDI_FAILURE);
5302 
5303 	instance = getminor((dev_t)(uintptr_t)arg);
5304 	*result = (void *)(uintptr_t)instance;
5305 	return (DDI_SUCCESS);
5306 }
5307 
5308 int
5309 ddifail(dev_info_t *devi, ddi_attach_cmd_t cmd)
5310 {
5311 	_NOTE(ARGUNUSED(devi, cmd))
5312 	return (DDI_FAILURE);
5313 }
5314 
5315 int
5316 ddi_no_dma_map(dev_info_t *dip, dev_info_t *rdip,
5317     struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep)
5318 {
5319 	_NOTE(ARGUNUSED(dip, rdip, dmareqp, handlep))
5320 	return (DDI_DMA_NOMAPPING);
5321 }
5322 
5323 int
5324 ddi_no_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
5325     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
5326 {
5327 	_NOTE(ARGUNUSED(dip, rdip, attr, waitfp, arg, handlep))
5328 	return (DDI_DMA_BADATTR);
5329 }
5330 
5331 int
5332 ddi_no_dma_freehdl(dev_info_t *dip, dev_info_t *rdip,
5333     ddi_dma_handle_t handle)
5334 {
5335 	_NOTE(ARGUNUSED(dip, rdip, handle))
5336 	return (DDI_FAILURE);
5337 }
5338 
5339 int
5340 ddi_no_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
5341     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
5342     ddi_dma_cookie_t *cp, uint_t *ccountp)
5343 {
5344 	_NOTE(ARGUNUSED(dip, rdip, handle, dmareq, cp, ccountp))
5345 	return (DDI_DMA_NOMAPPING);
5346 }
5347 
5348 int
5349 ddi_no_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
5350     ddi_dma_handle_t handle)
5351 {
5352 	_NOTE(ARGUNUSED(dip, rdip, handle))
5353 	return (DDI_FAILURE);
5354 }
5355 
5356 int
5357 ddi_no_dma_flush(dev_info_t *dip, dev_info_t *rdip,
5358     ddi_dma_handle_t handle, off_t off, size_t len,
5359     uint_t cache_flags)
5360 {
5361 	_NOTE(ARGUNUSED(dip, rdip, handle, off, len, cache_flags))
5362 	return (DDI_FAILURE);
5363 }
5364 
5365 int
5366 ddi_no_dma_win(dev_info_t *dip, dev_info_t *rdip,
5367     ddi_dma_handle_t handle, uint_t win, off_t *offp,
5368     size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
5369 {
5370 	_NOTE(ARGUNUSED(dip, rdip, handle, win, offp, lenp, cookiep, ccountp))
5371 	return (DDI_FAILURE);
5372 }
5373 
5374 int
5375 ddi_no_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
5376     ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
5377     off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
5378 {
5379 	_NOTE(ARGUNUSED(dip, rdip, handle, request, offp, lenp, objp, flags))
5380 	return (DDI_FAILURE);
5381 }
5382 
5383 void
5384 ddivoid(void)
5385 {}
5386 
5387 int
5388 nochpoll(dev_t dev, short events, int anyyet, short *reventsp,
5389     struct pollhead **pollhdrp)
5390 {
5391 	_NOTE(ARGUNUSED(dev, events, anyyet, reventsp, pollhdrp))
5392 	return (ENXIO);
5393 }
5394 
5395 cred_t *
5396 ddi_get_cred(void)
5397 {
5398 	return (CRED());
5399 }
5400 
5401 clock_t
5402 ddi_get_lbolt(void)
5403 {
5404 	return (lbolt);
5405 }
5406 
5407 time_t
5408 ddi_get_time(void)
5409 {
5410 	time_t	now;
5411 
5412 	if ((now = gethrestime_sec()) == 0) {
5413 		timestruc_t ts;
5414 		mutex_enter(&tod_lock);
5415 		ts = tod_get();
5416 		mutex_exit(&tod_lock);
5417 		return (ts.tv_sec);
5418 	} else {
5419 		return (now);
5420 	}
5421 }
5422 
5423 pid_t
5424 ddi_get_pid(void)
5425 {
5426 	return (ttoproc(curthread)->p_pid);
5427 }
5428 
5429 kt_did_t
5430 ddi_get_kt_did(void)
5431 {
5432 	return (curthread->t_did);
5433 }
5434 
5435 /*
5436  * This function returns B_TRUE if the caller can reasonably expect that a call
5437  * to cv_wait_sig(9F), cv_timedwait_sig(9F), or qwait_sig(9F) could be awakened
5438  * by user-level signal.  If it returns B_FALSE, then the caller should use
5439  * other means to make certain that the wait will not hang "forever."
5440  *
5441  * It does not check the signal mask, nor for reception of any particular
5442  * signal.
5443  *
5444  * Currently, a thread can receive a signal if it's not a kernel thread and it
5445  * is not in the middle of exit(2) tear-down.  Threads that are in that
5446  * tear-down effectively convert cv_wait_sig to cv_wait, cv_timedwait_sig to
5447  * cv_timedwait, and qwait_sig to qwait.
5448  */
5449 boolean_t
5450 ddi_can_receive_sig(void)
5451 {
5452 	proc_t *pp;
5453 
5454 	if (curthread->t_proc_flag & TP_LWPEXIT)
5455 		return (B_FALSE);
5456 	if ((pp = ttoproc(curthread)) == NULL)
5457 		return (B_FALSE);
5458 	return (pp->p_as != &kas);
5459 }
5460 
5461 /*
5462  * Swap bytes in 16-bit [half-]words
5463  */
5464 void
5465 swab(void *src, void *dst, size_t nbytes)
5466 {
5467 	uchar_t *pf = (uchar_t *)src;
5468 	uchar_t *pt = (uchar_t *)dst;
5469 	uchar_t tmp;
5470 	int nshorts;
5471 
5472 	nshorts = nbytes >> 1;
5473 
5474 	while (--nshorts >= 0) {
5475 		tmp = *pf++;
5476 		*pt++ = *pf++;
5477 		*pt++ = tmp;
5478 	}
5479 }
5480 
5481 static void
5482 ddi_append_minor_node(dev_info_t *ddip, struct ddi_minor_data *dmdp)
5483 {
5484 	struct ddi_minor_data *dp;
5485 
5486 	mutex_enter(&(DEVI(ddip)->devi_lock));
5487 	i_devi_enter(ddip, DEVI_S_MD_UPDATE, DEVI_S_MD_UPDATE, 1);
5488 
5489 	if ((dp = DEVI(ddip)->devi_minor) == (struct ddi_minor_data *)NULL) {
5490 		DEVI(ddip)->devi_minor = dmdp;
5491 	} else {
5492 		while (dp->next != (struct ddi_minor_data *)NULL)
5493 			dp = dp->next;
5494 		dp->next = dmdp;
5495 	}
5496 
5497 	i_devi_exit(ddip, DEVI_S_MD_UPDATE, 1);
5498 	mutex_exit(&(DEVI(ddip)->devi_lock));
5499 }
5500 
5501 /*
5502  * Part of the obsolete SunCluster DDI Hooks.
5503  * Keep for binary compatibility
5504  */
5505 minor_t
5506 ddi_getiminor(dev_t dev)
5507 {
5508 	return (getminor(dev));
5509 }
5510 
5511 static int
5512 i_log_devfs_minor_create(dev_info_t *dip, char *minor_name)
5513 {
5514 	int se_flag;
5515 	int kmem_flag;
5516 	int se_err;
5517 	char *pathname, *class_name;
5518 	sysevent_t *ev = NULL;
5519 	sysevent_id_t eid;
5520 	sysevent_value_t se_val;
5521 	sysevent_attr_list_t *ev_attr_list = NULL;
5522 
5523 	/* determine interrupt context */
5524 	se_flag = (servicing_interrupt()) ? SE_NOSLEEP : SE_SLEEP;
5525 	kmem_flag = (se_flag == SE_SLEEP) ? KM_SLEEP : KM_NOSLEEP;
5526 
5527 	i_ddi_di_cache_invalidate(kmem_flag);
5528 
5529 #ifdef DEBUG
5530 	if ((se_flag == SE_NOSLEEP) && sunddi_debug) {
5531 		cmn_err(CE_CONT, "ddi_create_minor_node: called from "
5532 		    "interrupt level by driver %s",
5533 		    ddi_driver_name(dip));
5534 	}
5535 #endif /* DEBUG */
5536 
5537 	ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_CREATE, EP_DDI, se_flag);
5538 	if (ev == NULL) {
5539 		goto fail;
5540 	}
5541 
5542 	pathname = kmem_alloc(MAXPATHLEN, kmem_flag);
5543 	if (pathname == NULL) {
5544 		sysevent_free(ev);
5545 		goto fail;
5546 	}
5547 
5548 	(void) ddi_pathname(dip, pathname);
5549 	ASSERT(strlen(pathname));
5550 	se_val.value_type = SE_DATA_TYPE_STRING;
5551 	se_val.value.sv_string = pathname;
5552 	if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME,
5553 	    &se_val, se_flag) != 0) {
5554 		kmem_free(pathname, MAXPATHLEN);
5555 		sysevent_free(ev);
5556 		goto fail;
5557 	}
5558 	kmem_free(pathname, MAXPATHLEN);
5559 
5560 	/* add the device class attribute */
5561 	if ((class_name = i_ddi_devi_class(dip)) != NULL) {
5562 		se_val.value_type = SE_DATA_TYPE_STRING;
5563 		se_val.value.sv_string = class_name;
5564 		if (sysevent_add_attr(&ev_attr_list,
5565 		    DEVFS_DEVI_CLASS, &se_val, SE_SLEEP) != 0) {
5566 			sysevent_free_attr(ev_attr_list);
5567 			goto fail;
5568 		}
5569 	}
5570 
5571 	/*
5572 	 * allow for NULL minor names
5573 	 */
5574 	if (minor_name != NULL) {
5575 		se_val.value.sv_string = minor_name;
5576 		if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME,
5577 		    &se_val, se_flag) != 0) {
5578 			sysevent_free_attr(ev_attr_list);
5579 			sysevent_free(ev);
5580 			goto fail;
5581 		}
5582 	}
5583 
5584 	if (sysevent_attach_attributes(ev, ev_attr_list) != 0) {
5585 		sysevent_free_attr(ev_attr_list);
5586 		sysevent_free(ev);
5587 		goto fail;
5588 	}
5589 
5590 	if ((se_err = log_sysevent(ev, se_flag, &eid)) != 0) {
5591 		if (se_err == SE_NO_TRANSPORT) {
5592 			cmn_err(CE_WARN, "/devices or /dev may not be current "
5593 			    "for driver %s (%s). Run devfsadm -i %s",
5594 			    ddi_driver_name(dip), "syseventd not responding",
5595 			    ddi_driver_name(dip));
5596 		} else {
5597 			sysevent_free(ev);
5598 			goto fail;
5599 		}
5600 	}
5601 
5602 	sysevent_free(ev);
5603 	return (DDI_SUCCESS);
5604 fail:
5605 	cmn_err(CE_WARN, "/devices or /dev may not be current "
5606 	    "for driver %s. Run devfsadm -i %s",
5607 	    ddi_driver_name(dip), ddi_driver_name(dip));
5608 	return (DDI_SUCCESS);
5609 }
5610 
5611 /*
5612  * failing to remove a minor node is not of interest
5613  * therefore we do not generate an error message
5614  */
5615 static int
5616 i_log_devfs_minor_remove(dev_info_t *dip, char *minor_name)
5617 {
5618 	char *pathname, *class_name;
5619 	sysevent_t *ev;
5620 	sysevent_id_t eid;
5621 	sysevent_value_t se_val;
5622 	sysevent_attr_list_t *ev_attr_list = NULL;
5623 
5624 	/*
5625 	 * only log ddi_remove_minor_node() calls outside the scope
5626 	 * of attach/detach reconfigurations and when the dip is
5627 	 * still initialized.
5628 	 */
5629 	if (DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip) ||
5630 	    (i_ddi_node_state(dip) < DS_INITIALIZED)) {
5631 		return (DDI_SUCCESS);
5632 	}
5633 
5634 	i_ddi_di_cache_invalidate(KM_SLEEP);
5635 
5636 	ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_REMOVE, EP_DDI, SE_SLEEP);
5637 	if (ev == NULL) {
5638 		return (DDI_SUCCESS);
5639 	}
5640 
5641 	pathname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
5642 	if (pathname == NULL) {
5643 		sysevent_free(ev);
5644 		return (DDI_SUCCESS);
5645 	}
5646 
5647 	(void) ddi_pathname(dip, pathname);
5648 	ASSERT(strlen(pathname));
5649 	se_val.value_type = SE_DATA_TYPE_STRING;
5650 	se_val.value.sv_string = pathname;
5651 	if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME,
5652 	    &se_val, SE_SLEEP) != 0) {
5653 		kmem_free(pathname, MAXPATHLEN);
5654 		sysevent_free(ev);
5655 		return (DDI_SUCCESS);
5656 	}
5657 
5658 	kmem_free(pathname, MAXPATHLEN);
5659 
5660 	/*
5661 	 * allow for NULL minor names
5662 	 */
5663 	if (minor_name != NULL) {
5664 		se_val.value.sv_string = minor_name;
5665 		if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME,
5666 		    &se_val, SE_SLEEP) != 0) {
5667 			sysevent_free_attr(ev_attr_list);
5668 			goto fail;
5669 		}
5670 	}
5671 
5672 	if ((class_name = i_ddi_devi_class(dip)) != NULL) {
5673 		/* add the device class, driver name and instance attributes */
5674 
5675 		se_val.value_type = SE_DATA_TYPE_STRING;
5676 		se_val.value.sv_string = class_name;
5677 		if (sysevent_add_attr(&ev_attr_list,
5678 		    DEVFS_DEVI_CLASS, &se_val, SE_SLEEP) != 0) {
5679 			sysevent_free_attr(ev_attr_list);
5680 			goto fail;
5681 		}
5682 
5683 		se_val.value_type = SE_DATA_TYPE_STRING;
5684 		se_val.value.sv_string = (char *)ddi_driver_name(dip);
5685 		if (sysevent_add_attr(&ev_attr_list,
5686 		    DEVFS_DRIVER_NAME, &se_val, SE_SLEEP) != 0) {
5687 			sysevent_free_attr(ev_attr_list);
5688 			goto fail;
5689 		}
5690 
5691 		se_val.value_type = SE_DATA_TYPE_INT32;
5692 		se_val.value.sv_int32 = ddi_get_instance(dip);
5693 		if (sysevent_add_attr(&ev_attr_list,
5694 		    DEVFS_INSTANCE, &se_val, SE_SLEEP) != 0) {
5695 			sysevent_free_attr(ev_attr_list);
5696 			goto fail;
5697 		}
5698 
5699 	}
5700 
5701 	if (sysevent_attach_attributes(ev, ev_attr_list) != 0) {
5702 		sysevent_free_attr(ev_attr_list);
5703 	} else {
5704 		(void) log_sysevent(ev, SE_SLEEP, &eid);
5705 	}
5706 fail:
5707 	sysevent_free(ev);
5708 	return (DDI_SUCCESS);
5709 }
5710 
5711 /*
5712  * Derive the device class of the node.
5713  * Device class names aren't defined yet. Until this is done we use
5714  * devfs event subclass names as device class names.
5715  */
5716 static int
5717 derive_devi_class(dev_info_t *dip, char *node_type, int flag)
5718 {
5719 	int rv = DDI_SUCCESS;
5720 
5721 	if (i_ddi_devi_class(dip) == NULL) {
5722 		if (strncmp(node_type, DDI_NT_BLOCK,
5723 		    sizeof (DDI_NT_BLOCK) - 1) == 0 &&
5724 		    (node_type[sizeof (DDI_NT_BLOCK) - 1] == '\0' ||
5725 		    node_type[sizeof (DDI_NT_BLOCK) - 1] == ':') &&
5726 		    strcmp(node_type, DDI_NT_FD) != 0) {
5727 
5728 			rv = i_ddi_set_devi_class(dip, ESC_DISK, flag);
5729 
5730 		} else if (strncmp(node_type, DDI_NT_NET,
5731 		    sizeof (DDI_NT_NET) - 1) == 0 &&
5732 		    (node_type[sizeof (DDI_NT_NET) - 1] == '\0' ||
5733 		    node_type[sizeof (DDI_NT_NET) - 1] == ':')) {
5734 
5735 			rv = i_ddi_set_devi_class(dip, ESC_NETWORK, flag);
5736 
5737 		} else if (strncmp(node_type, DDI_NT_PRINTER,
5738 		    sizeof (DDI_NT_PRINTER) - 1) == 0 &&
5739 		    (node_type[sizeof (DDI_NT_PRINTER) - 1] == '\0' ||
5740 		    node_type[sizeof (DDI_NT_PRINTER) - 1] == ':')) {
5741 
5742 			rv = i_ddi_set_devi_class(dip, ESC_PRINTER, flag);
5743 
5744 		} else if (strncmp(node_type, DDI_PSEUDO,
5745 		    sizeof (DDI_PSEUDO) -1) == 0 &&
5746 		    (strncmp(ESC_LOFI, ddi_node_name(dip),
5747 		    sizeof (ESC_LOFI) -1) == 0)) {
5748 			rv = i_ddi_set_devi_class(dip, ESC_LOFI, flag);
5749 		}
5750 	}
5751 
5752 	return (rv);
5753 }
5754 
5755 /*
5756  * Check compliance with PSARC 2003/375:
5757  *
5758  * The name must contain only characters a-z, A-Z, 0-9 or _ and it must not
5759  * exceed IFNAMSIZ (16) characters in length.
5760  */
5761 static boolean_t
5762 verify_name(char *name)
5763 {
5764 	size_t	len = strlen(name);
5765 	char	*cp;
5766 
5767 	if (len == 0 || len > IFNAMSIZ)
5768 		return (B_FALSE);
5769 
5770 	for (cp = name; *cp != '\0'; cp++) {
5771 		if (!isalnum(*cp) && *cp != '_')
5772 			return (B_FALSE);
5773 	}
5774 
5775 	return (B_TRUE);
5776 }
5777 
5778 /*
5779  * ddi_create_minor_common:	Create a  ddi_minor_data structure and
5780  *				attach it to the given devinfo node.
5781  */
5782 
5783 int
5784 ddi_create_minor_common(dev_info_t *dip, char *name, int spec_type,
5785     minor_t minor_num, char *node_type, int flag, ddi_minor_type mtype,
5786     const char *read_priv, const char *write_priv, mode_t priv_mode)
5787 {
5788 	struct ddi_minor_data *dmdp;
5789 	major_t major;
5790 
5791 	if (spec_type != S_IFCHR && spec_type != S_IFBLK)
5792 		return (DDI_FAILURE);
5793 
5794 	if (name == NULL)
5795 		return (DDI_FAILURE);
5796 
5797 	/*
5798 	 * Log a message if the minor number the driver is creating
5799 	 * is not expressible on the on-disk filesystem (currently
5800 	 * this is limited to 18 bits both by UFS). The device can
5801 	 * be opened via devfs, but not by device special files created
5802 	 * via mknod().
5803 	 */
5804 	if (minor_num > L_MAXMIN32) {
5805 		cmn_err(CE_WARN,
5806 		    "%s%d:%s minor 0x%x too big for 32-bit applications",
5807 		    ddi_driver_name(dip), ddi_get_instance(dip),
5808 		    name, minor_num);
5809 		return (DDI_FAILURE);
5810 	}
5811 
5812 	/* dip must be bound and attached */
5813 	major = ddi_driver_major(dip);
5814 	ASSERT(major != (major_t)-1);
5815 
5816 	/*
5817 	 * Default node_type to DDI_PSEUDO and issue notice in debug mode
5818 	 */
5819 	if (node_type == NULL) {
5820 		node_type = DDI_PSEUDO;
5821 		NDI_CONFIG_DEBUG((CE_NOTE, "!illegal node_type NULL for %s%d "
5822 		    " minor node %s; default to DDI_PSEUDO",
5823 		    ddi_driver_name(dip), ddi_get_instance(dip), name));
5824 	}
5825 
5826 	/*
5827 	 * If the driver is a network driver, ensure that the name falls within
5828 	 * the interface naming constraints specified by PSARC/2003/375.
5829 	 */
5830 	if (strcmp(node_type, DDI_NT_NET) == 0) {
5831 		if (!verify_name(name))
5832 			return (DDI_FAILURE);
5833 
5834 		if (mtype == DDM_MINOR) {
5835 			struct devnames *dnp = &devnamesp[major];
5836 
5837 			/* Mark driver as a network driver */
5838 			LOCK_DEV_OPS(&dnp->dn_lock);
5839 			dnp->dn_flags |= DN_NETWORK_DRIVER;
5840 			UNLOCK_DEV_OPS(&dnp->dn_lock);
5841 		}
5842 	}
5843 
5844 	if (mtype == DDM_MINOR) {
5845 		if (derive_devi_class(dip,  node_type, KM_NOSLEEP) !=
5846 		    DDI_SUCCESS)
5847 			return (DDI_FAILURE);
5848 	}
5849 
5850 	/*
5851 	 * Take care of minor number information for the node.
5852 	 */
5853 
5854 	if ((dmdp = kmem_zalloc(sizeof (struct ddi_minor_data),
5855 	    KM_NOSLEEP)) == NULL) {
5856 		return (DDI_FAILURE);
5857 	}
5858 	if ((dmdp->ddm_name = i_ddi_strdup(name, KM_NOSLEEP)) == NULL) {
5859 		kmem_free(dmdp, sizeof (struct ddi_minor_data));
5860 		return (DDI_FAILURE);
5861 	}
5862 	dmdp->dip = dip;
5863 	dmdp->ddm_dev = makedevice(major, minor_num);
5864 	dmdp->ddm_spec_type = spec_type;
5865 	dmdp->ddm_node_type = node_type;
5866 	dmdp->type = mtype;
5867 	if (flag & CLONE_DEV) {
5868 		dmdp->type = DDM_ALIAS;
5869 		dmdp->ddm_dev = makedevice(ddi_driver_major(clone_dip), major);
5870 	}
5871 	if (flag & PRIVONLY_DEV) {
5872 		dmdp->ddm_flags |= DM_NO_FSPERM;
5873 	}
5874 	if (read_priv || write_priv) {
5875 		dmdp->ddm_node_priv =
5876 		    devpolicy_priv_by_name(read_priv, write_priv);
5877 	}
5878 	dmdp->ddm_priv_mode = priv_mode;
5879 
5880 	ddi_append_minor_node(dip, dmdp);
5881 
5882 	/*
5883 	 * only log ddi_create_minor_node() calls which occur
5884 	 * outside the scope of attach(9e)/detach(9e) reconfigurations
5885 	 */
5886 	if (!(DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip)) &&
5887 	    mtype != DDM_INTERNAL_PATH) {
5888 		(void) i_log_devfs_minor_create(dip, name);
5889 	}
5890 
5891 	/*
5892 	 * Check if any dacf rules match the creation of this minor node
5893 	 */
5894 	dacfc_match_create_minor(name, node_type, dip, dmdp, flag);
5895 	return (DDI_SUCCESS);
5896 }
5897 
5898 int
5899 ddi_create_minor_node(dev_info_t *dip, char *name, int spec_type,
5900     minor_t minor_num, char *node_type, int flag)
5901 {
5902 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5903 	    node_type, flag, DDM_MINOR, NULL, NULL, 0));
5904 }
5905 
5906 int
5907 ddi_create_priv_minor_node(dev_info_t *dip, char *name, int spec_type,
5908     minor_t minor_num, char *node_type, int flag,
5909     const char *rdpriv, const char *wrpriv, mode_t priv_mode)
5910 {
5911 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5912 	    node_type, flag, DDM_MINOR, rdpriv, wrpriv, priv_mode));
5913 }
5914 
5915 int
5916 ddi_create_default_minor_node(dev_info_t *dip, char *name, int spec_type,
5917     minor_t minor_num, char *node_type, int flag)
5918 {
5919 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5920 	    node_type, flag, DDM_DEFAULT, NULL, NULL, 0));
5921 }
5922 
5923 /*
5924  * Internal (non-ddi) routine for drivers to export names known
5925  * to the kernel (especially ddi_pathname_to_dev_t and friends)
5926  * but not exported externally to /dev
5927  */
5928 int
5929 ddi_create_internal_pathname(dev_info_t *dip, char *name, int spec_type,
5930     minor_t minor_num)
5931 {
5932 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
5933 	    "internal", 0, DDM_INTERNAL_PATH, NULL, NULL, 0));
5934 }
5935 
5936 void
5937 ddi_remove_minor_node(dev_info_t *dip, char *name)
5938 {
5939 	struct ddi_minor_data *dmdp, *dmdp1;
5940 	struct ddi_minor_data **dmdp_prev;
5941 
5942 	mutex_enter(&(DEVI(dip)->devi_lock));
5943 	i_devi_enter(dip, DEVI_S_MD_UPDATE, DEVI_S_MD_UPDATE, 1);
5944 
5945 	dmdp_prev = &DEVI(dip)->devi_minor;
5946 	dmdp = DEVI(dip)->devi_minor;
5947 	while (dmdp != NULL) {
5948 		dmdp1 = dmdp->next;
5949 		if ((name == NULL || (dmdp->ddm_name != NULL &&
5950 		    strcmp(name, dmdp->ddm_name) == 0))) {
5951 			if (dmdp->ddm_name != NULL) {
5952 				if (dmdp->type != DDM_INTERNAL_PATH)
5953 					(void) i_log_devfs_minor_remove(dip,
5954 					    dmdp->ddm_name);
5955 				kmem_free(dmdp->ddm_name,
5956 				    strlen(dmdp->ddm_name) + 1);
5957 			}
5958 			/*
5959 			 * Release device privilege, if any.
5960 			 * Release dacf client data associated with this minor
5961 			 * node by storing NULL.
5962 			 */
5963 			if (dmdp->ddm_node_priv)
5964 				dpfree(dmdp->ddm_node_priv);
5965 			dacf_store_info((dacf_infohdl_t)dmdp, NULL);
5966 			kmem_free(dmdp, sizeof (struct ddi_minor_data));
5967 			*dmdp_prev = dmdp1;
5968 			/*
5969 			 * OK, we found it, so get out now -- if we drive on,
5970 			 * we will strcmp against garbage.  See 1139209.
5971 			 */
5972 			if (name != NULL)
5973 				break;
5974 		} else {
5975 			dmdp_prev = &dmdp->next;
5976 		}
5977 		dmdp = dmdp1;
5978 	}
5979 
5980 	i_devi_exit(dip, DEVI_S_MD_UPDATE, 1);
5981 	mutex_exit(&(DEVI(dip)->devi_lock));
5982 }
5983 
5984 
5985 int
5986 ddi_in_panic()
5987 {
5988 	return (panicstr != NULL);
5989 }
5990 
5991 
5992 /*
5993  * Find first bit set in a mask (returned counting from 1 up)
5994  */
5995 
5996 int
5997 ddi_ffs(long mask)
5998 {
5999 	return (ffs(mask));
6000 }
6001 
6002 /*
6003  * Find last bit set. Take mask and clear
6004  * all but the most significant bit, and
6005  * then let ffs do the rest of the work.
6006  *
6007  * Algorithm courtesy of Steve Chessin.
6008  */
6009 
6010 int
6011 ddi_fls(long mask)
6012 {
6013 	while (mask) {
6014 		long nx;
6015 
6016 		if ((nx = (mask & (mask - 1))) == 0)
6017 			break;
6018 		mask = nx;
6019 	}
6020 	return (ffs(mask));
6021 }
6022 
6023 /*
6024  * The next five routines comprise generic storage management utilities
6025  * for driver soft state structures (in "the old days," this was done
6026  * with a statically sized array - big systems and dynamic loading
6027  * and unloading make heap allocation more attractive)
6028  */
6029 
6030 /*
6031  * Allocate a set of pointers to 'n_items' objects of size 'size'
6032  * bytes.  Each pointer is initialized to nil.
6033  *
6034  * The 'size' and 'n_items' values are stashed in the opaque
6035  * handle returned to the caller.
6036  *
6037  * This implementation interprets 'set of pointers' to mean 'array
6038  * of pointers' but note that nothing in the interface definition
6039  * precludes an implementation that uses, for example, a linked list.
6040  * However there should be a small efficiency gain from using an array
6041  * at lookup time.
6042  *
6043  * NOTE	As an optimization, we make our growable array allocations in
6044  *	powers of two (bytes), since that's how much kmem_alloc (currently)
6045  *	gives us anyway.  It should save us some free/realloc's ..
6046  *
6047  *	As a further optimization, we make the growable array start out
6048  *	with MIN_N_ITEMS in it.
6049  */
6050 
6051 #define	MIN_N_ITEMS	8	/* 8 void *'s == 32 bytes */
6052 
6053 int
6054 ddi_soft_state_init(void **state_p, size_t size, size_t n_items)
6055 {
6056 	struct i_ddi_soft_state *ss;
6057 
6058 	if (state_p == NULL || *state_p != NULL || size == 0)
6059 		return (EINVAL);
6060 
6061 	ss = kmem_zalloc(sizeof (*ss), KM_SLEEP);
6062 	mutex_init(&ss->lock, NULL, MUTEX_DRIVER, NULL);
6063 	ss->size = size;
6064 
6065 	if (n_items < MIN_N_ITEMS)
6066 		ss->n_items = MIN_N_ITEMS;
6067 	else {
6068 		int bitlog;
6069 
6070 		if ((bitlog = ddi_fls(n_items)) == ddi_ffs(n_items))
6071 			bitlog--;
6072 		ss->n_items = 1 << bitlog;
6073 	}
6074 
6075 	ASSERT(ss->n_items >= n_items);
6076 
6077 	ss->array = kmem_zalloc(ss->n_items * sizeof (void *), KM_SLEEP);
6078 
6079 	*state_p = ss;
6080 
6081 	return (0);
6082 }
6083 
6084 
6085 /*
6086  * Allocate a state structure of size 'size' to be associated
6087  * with item 'item'.
6088  *
6089  * In this implementation, the array is extended to
6090  * allow the requested offset, if needed.
6091  */
6092 int
6093 ddi_soft_state_zalloc(void *state, int item)
6094 {
6095 	struct i_ddi_soft_state *ss;
6096 	void **array;
6097 	void *new_element;
6098 
6099 	if ((ss = state) == NULL || item < 0)
6100 		return (DDI_FAILURE);
6101 
6102 	mutex_enter(&ss->lock);
6103 	if (ss->size == 0) {
6104 		mutex_exit(&ss->lock);
6105 		cmn_err(CE_WARN, "ddi_soft_state_zalloc: bad handle: %s",
6106 		    mod_containing_pc(caller()));
6107 		return (DDI_FAILURE);
6108 	}
6109 
6110 	array = ss->array;	/* NULL if ss->n_items == 0 */
6111 	ASSERT(ss->n_items != 0 && array != NULL);
6112 
6113 	/*
6114 	 * refuse to tread on an existing element
6115 	 */
6116 	if (item < ss->n_items && array[item] != NULL) {
6117 		mutex_exit(&ss->lock);
6118 		return (DDI_FAILURE);
6119 	}
6120 
6121 	/*
6122 	 * Allocate a new element to plug in
6123 	 */
6124 	new_element = kmem_zalloc(ss->size, KM_SLEEP);
6125 
6126 	/*
6127 	 * Check if the array is big enough, if not, grow it.
6128 	 */
6129 	if (item >= ss->n_items) {
6130 		void	**new_array;
6131 		size_t	new_n_items;
6132 		struct i_ddi_soft_state *dirty;
6133 
6134 		/*
6135 		 * Allocate a new array of the right length, copy
6136 		 * all the old pointers to the new array, then
6137 		 * if it exists at all, put the old array on the
6138 		 * dirty list.
6139 		 *
6140 		 * Note that we can't kmem_free() the old array.
6141 		 *
6142 		 * Why -- well the 'get' operation is 'mutex-free', so we
6143 		 * can't easily catch a suspended thread that is just about
6144 		 * to dereference the array we just grew out of.  So we
6145 		 * cons up a header and put it on a list of 'dirty'
6146 		 * pointer arrays.  (Dirty in the sense that there may
6147 		 * be suspended threads somewhere that are in the middle
6148 		 * of referencing them).  Fortunately, we -can- garbage
6149 		 * collect it all at ddi_soft_state_fini time.
6150 		 */
6151 		new_n_items = ss->n_items;
6152 		while (new_n_items < (1 + item))
6153 			new_n_items <<= 1;	/* double array size .. */
6154 
6155 		ASSERT(new_n_items >= (1 + item));	/* sanity check! */
6156 
6157 		new_array = kmem_zalloc(new_n_items * sizeof (void *),
6158 		    KM_SLEEP);
6159 		/*
6160 		 * Copy the pointers into the new array
6161 		 */
6162 		bcopy(array, new_array, ss->n_items * sizeof (void *));
6163 
6164 		/*
6165 		 * Save the old array on the dirty list
6166 		 */
6167 		dirty = kmem_zalloc(sizeof (*dirty), KM_SLEEP);
6168 		dirty->array = ss->array;
6169 		dirty->n_items = ss->n_items;
6170 		dirty->next = ss->next;
6171 		ss->next = dirty;
6172 
6173 		ss->array = (array = new_array);
6174 		ss->n_items = new_n_items;
6175 	}
6176 
6177 	ASSERT(array != NULL && item < ss->n_items && array[item] == NULL);
6178 
6179 	array[item] = new_element;
6180 
6181 	mutex_exit(&ss->lock);
6182 	return (DDI_SUCCESS);
6183 }
6184 
6185 
6186 /*
6187  * Fetch a pointer to the allocated soft state structure.
6188  *
6189  * This is designed to be cheap.
6190  *
6191  * There's an argument that there should be more checking for
6192  * nil pointers and out of bounds on the array.. but we do a lot
6193  * of that in the alloc/free routines.
6194  *
6195  * An array has the convenience that we don't need to lock read-access
6196  * to it c.f. a linked list.  However our "expanding array" strategy
6197  * means that we should hold a readers lock on the i_ddi_soft_state
6198  * structure.
6199  *
6200  * However, from a performance viewpoint, we need to do it without
6201  * any locks at all -- this also makes it a leaf routine.  The algorithm
6202  * is 'lock-free' because we only discard the pointer arrays at
6203  * ddi_soft_state_fini() time.
6204  */
6205 void *
6206 ddi_get_soft_state(void *state, int item)
6207 {
6208 	struct i_ddi_soft_state *ss = state;
6209 
6210 	ASSERT(ss != NULL && item >= 0);
6211 
6212 	if (item < ss->n_items && ss->array != NULL)
6213 		return (ss->array[item]);
6214 	return (NULL);
6215 }
6216 
6217 /*
6218  * Free the state structure corresponding to 'item.'   Freeing an
6219  * element that has either gone or was never allocated is not
6220  * considered an error.  Note that we free the state structure, but
6221  * we don't shrink our pointer array, or discard 'dirty' arrays,
6222  * since even a few pointers don't really waste too much memory.
6223  *
6224  * Passing an item number that is out of bounds, or a null pointer will
6225  * provoke an error message.
6226  */
6227 void
6228 ddi_soft_state_free(void *state, int item)
6229 {
6230 	struct i_ddi_soft_state *ss;
6231 	void **array;
6232 	void *element;
6233 	static char msg[] = "ddi_soft_state_free:";
6234 
6235 	if ((ss = state) == NULL) {
6236 		cmn_err(CE_WARN, "%s null handle: %s",
6237 		    msg, mod_containing_pc(caller()));
6238 		return;
6239 	}
6240 
6241 	element = NULL;
6242 
6243 	mutex_enter(&ss->lock);
6244 
6245 	if ((array = ss->array) == NULL || ss->size == 0) {
6246 		cmn_err(CE_WARN, "%s bad handle: %s",
6247 		    msg, mod_containing_pc(caller()));
6248 	} else if (item < 0 || item >= ss->n_items) {
6249 		cmn_err(CE_WARN, "%s item %d not in range [0..%lu]: %s",
6250 		    msg, item, ss->n_items - 1, mod_containing_pc(caller()));
6251 	} else if (array[item] != NULL) {
6252 		element = array[item];
6253 		array[item] = NULL;
6254 	}
6255 
6256 	mutex_exit(&ss->lock);
6257 
6258 	if (element)
6259 		kmem_free(element, ss->size);
6260 }
6261 
6262 
6263 /*
6264  * Free the entire set of pointers, and any
6265  * soft state structures contained therein.
6266  *
6267  * Note that we don't grab the ss->lock mutex, even though
6268  * we're inspecting the various fields of the data structure.
6269  *
6270  * There is an implicit assumption that this routine will
6271  * never run concurrently with any of the above on this
6272  * particular state structure i.e. by the time the driver
6273  * calls this routine, there should be no other threads
6274  * running in the driver.
6275  */
6276 void
6277 ddi_soft_state_fini(void **state_p)
6278 {
6279 	struct i_ddi_soft_state *ss, *dirty;
6280 	int item;
6281 	static char msg[] = "ddi_soft_state_fini:";
6282 
6283 	if (state_p == NULL || (ss = *state_p) == NULL) {
6284 		cmn_err(CE_WARN, "%s null handle: %s",
6285 		    msg, mod_containing_pc(caller()));
6286 		return;
6287 	}
6288 
6289 	if (ss->size == 0) {
6290 		cmn_err(CE_WARN, "%s bad handle: %s",
6291 		    msg, mod_containing_pc(caller()));
6292 		return;
6293 	}
6294 
6295 	if (ss->n_items > 0) {
6296 		for (item = 0; item < ss->n_items; item++)
6297 			ddi_soft_state_free(ss, item);
6298 		kmem_free(ss->array, ss->n_items * sizeof (void *));
6299 	}
6300 
6301 	/*
6302 	 * Now delete any dirty arrays from previous 'grow' operations
6303 	 */
6304 	for (dirty = ss->next; dirty; dirty = ss->next) {
6305 		ss->next = dirty->next;
6306 		kmem_free(dirty->array, dirty->n_items * sizeof (void *));
6307 		kmem_free(dirty, sizeof (*dirty));
6308 	}
6309 
6310 	mutex_destroy(&ss->lock);
6311 	kmem_free(ss, sizeof (*ss));
6312 
6313 	*state_p = NULL;
6314 }
6315 
6316 /*
6317  * This sets the devi_addr entry in the dev_info structure 'dip' to 'name'.
6318  * Storage is double buffered to prevent updates during devi_addr use -
6319  * double buffering is adaquate for reliable ddi_deviname() consumption.
6320  * The double buffer is not freed until dev_info structure destruction
6321  * (by i_ddi_free_node).
6322  */
6323 void
6324 ddi_set_name_addr(dev_info_t *dip, char *name)
6325 {
6326 	char	*buf = DEVI(dip)->devi_addr_buf;
6327 	char	*newaddr;
6328 
6329 	if (buf == NULL) {
6330 		buf = kmem_zalloc(2 * MAXNAMELEN, KM_SLEEP);
6331 		DEVI(dip)->devi_addr_buf = buf;
6332 	}
6333 
6334 	if (name) {
6335 		ASSERT(strlen(name) < MAXNAMELEN);
6336 		newaddr = (DEVI(dip)->devi_addr == buf) ?
6337 		    (buf + MAXNAMELEN) : buf;
6338 		(void) strlcpy(newaddr, name, MAXNAMELEN);
6339 	} else
6340 		newaddr = NULL;
6341 
6342 	DEVI(dip)->devi_addr = newaddr;
6343 }
6344 
6345 char *
6346 ddi_get_name_addr(dev_info_t *dip)
6347 {
6348 	return (DEVI(dip)->devi_addr);
6349 }
6350 
6351 void
6352 ddi_set_parent_data(dev_info_t *dip, void *pd)
6353 {
6354 	DEVI(dip)->devi_parent_data = pd;
6355 }
6356 
6357 void *
6358 ddi_get_parent_data(dev_info_t *dip)
6359 {
6360 	return (DEVI(dip)->devi_parent_data);
6361 }
6362 
6363 /*
6364  * ddi_name_to_major: Returns the major number of a module given its name.
6365  */
6366 major_t
6367 ddi_name_to_major(char *name)
6368 {
6369 	return (mod_name_to_major(name));
6370 }
6371 
6372 /*
6373  * ddi_major_to_name: Returns the module name bound to a major number.
6374  */
6375 char *
6376 ddi_major_to_name(major_t major)
6377 {
6378 	return (mod_major_to_name(major));
6379 }
6380 
6381 /*
6382  * Return the name of the devinfo node pointed at by 'dip' in the buffer
6383  * pointed at by 'name.'  A devinfo node is named as a result of calling
6384  * ddi_initchild().
6385  *
6386  * Note: the driver must be held before calling this function!
6387  */
6388 char *
6389 ddi_deviname(dev_info_t *dip, char *name)
6390 {
6391 	char *addrname;
6392 	char none = '\0';
6393 
6394 	if (dip == ddi_root_node()) {
6395 		*name = '\0';
6396 		return (name);
6397 	}
6398 
6399 	if (i_ddi_node_state(dip) < DS_BOUND) {
6400 		addrname = &none;
6401 	} else {
6402 		/*
6403 		 * Use ddi_get_name_addr() without checking state so we get
6404 		 * a unit-address if we are called after ddi_set_name_addr()
6405 		 * by nexus DDI_CTL_INITCHILD code, but before completing
6406 		 * node promotion to DS_INITIALIZED.  We currently have
6407 		 * two situations where we are called in this state:
6408 		 *   o  For framework processing of a path-oriented alias.
6409 		 *   o  If a SCSA nexus driver calls ddi_devid_register()
6410 		 *	from it's tran_tgt_init(9E) implementation.
6411 		 */
6412 		addrname = ddi_get_name_addr(dip);
6413 		if (addrname == NULL)
6414 			addrname = &none;
6415 	}
6416 
6417 	if (*addrname == '\0') {
6418 		(void) sprintf(name, "/%s", ddi_node_name(dip));
6419 	} else {
6420 		(void) sprintf(name, "/%s@%s", ddi_node_name(dip), addrname);
6421 	}
6422 
6423 	return (name);
6424 }
6425 
6426 /*
6427  * Spits out the name of device node, typically name@addr, for a given node,
6428  * using the driver name, not the nodename.
6429  *
6430  * Used by match_parent. Not to be used elsewhere.
6431  */
6432 char *
6433 i_ddi_parname(dev_info_t *dip, char *name)
6434 {
6435 	char *addrname;
6436 
6437 	if (dip == ddi_root_node()) {
6438 		*name = '\0';
6439 		return (name);
6440 	}
6441 
6442 	ASSERT(i_ddi_node_state(dip) >= DS_INITIALIZED);
6443 
6444 	if (*(addrname = ddi_get_name_addr(dip)) == '\0')
6445 		(void) sprintf(name, "%s", ddi_binding_name(dip));
6446 	else
6447 		(void) sprintf(name, "%s@%s", ddi_binding_name(dip), addrname);
6448 	return (name);
6449 }
6450 
6451 static char *
6452 pathname_work(dev_info_t *dip, char *path)
6453 {
6454 	char *bp;
6455 
6456 	if (dip == ddi_root_node()) {
6457 		*path = '\0';
6458 		return (path);
6459 	}
6460 	(void) pathname_work(ddi_get_parent(dip), path);
6461 	bp = path + strlen(path);
6462 	(void) ddi_deviname(dip, bp);
6463 	return (path);
6464 }
6465 
6466 char *
6467 ddi_pathname(dev_info_t *dip, char *path)
6468 {
6469 	return (pathname_work(dip, path));
6470 }
6471 
6472 /*
6473  * Given a dev_t, return the pathname of the corresponding device in the
6474  * buffer pointed at by "path."  The buffer is assumed to be large enough
6475  * to hold the pathname of the device (MAXPATHLEN).
6476  *
6477  * The pathname of a device is the pathname of the devinfo node to which
6478  * the device "belongs," concatenated with the character ':' and the name
6479  * of the minor node corresponding to the dev_t.  If spec_type is 0 then
6480  * just the pathname of the devinfo node is returned without driving attach
6481  * of that node.  For a non-zero spec_type, an attach is performed and a
6482  * search of the minor list occurs.
6483  *
6484  * It is possible that the path associated with the dev_t is not
6485  * currently available in the devinfo tree.  In order to have a
6486  * dev_t, a device must have been discovered before, which means
6487  * that the path is always in the instance tree.  The one exception
6488  * to this is if the dev_t is associated with a pseudo driver, in
6489  * which case the device must exist on the pseudo branch of the
6490  * devinfo tree as a result of parsing .conf files.
6491  */
6492 int
6493 ddi_dev_pathname(dev_t devt, int spec_type, char *path)
6494 {
6495 	major_t		major = getmajor(devt);
6496 	int		instance;
6497 	dev_info_t	*dip;
6498 	char		*minorname;
6499 	char		*drvname;
6500 
6501 	if (major >= devcnt)
6502 		goto fail;
6503 	if (major == clone_major) {
6504 		/* clone has no minor nodes, manufacture the path here */
6505 		if ((drvname = ddi_major_to_name(getminor(devt))) == NULL)
6506 			goto fail;
6507 
6508 		(void) snprintf(path, MAXPATHLEN, "%s:%s", CLONE_PATH, drvname);
6509 		return (DDI_SUCCESS);
6510 	}
6511 
6512 	/* extract instance from devt (getinfo(9E) DDI_INFO_DEVT2INSTANCE). */
6513 	if ((instance = dev_to_instance(devt)) == -1)
6514 		goto fail;
6515 
6516 	/* reconstruct the path given the major/instance */
6517 	if (e_ddi_majorinstance_to_path(major, instance, path) != DDI_SUCCESS)
6518 		goto fail;
6519 
6520 	/* if spec_type given we must drive attach and search minor nodes */
6521 	if ((spec_type == S_IFCHR) || (spec_type == S_IFBLK)) {
6522 		/* attach the path so we can search minors */
6523 		if ((dip = e_ddi_hold_devi_by_path(path, 0)) == NULL)
6524 			goto fail;
6525 
6526 		/* Add minorname to path. */
6527 		mutex_enter(&(DEVI(dip)->devi_lock));
6528 		minorname = i_ddi_devtspectype_to_minorname(dip,
6529 		    devt, spec_type);
6530 		if (minorname) {
6531 			(void) strcat(path, ":");
6532 			(void) strcat(path, minorname);
6533 		}
6534 		mutex_exit(&(DEVI(dip)->devi_lock));
6535 		ddi_release_devi(dip);
6536 		if (minorname == NULL)
6537 			goto fail;
6538 	}
6539 	ASSERT(strlen(path) < MAXPATHLEN);
6540 	return (DDI_SUCCESS);
6541 
6542 fail:	*path = 0;
6543 	return (DDI_FAILURE);
6544 }
6545 
6546 /*
6547  * Given a major number and an instance, return the path.
6548  * This interface does NOT drive attach.
6549  */
6550 int
6551 e_ddi_majorinstance_to_path(major_t major, int instance, char *path)
6552 {
6553 	struct devnames *dnp;
6554 	dev_info_t	*dip;
6555 
6556 	if ((major >= devcnt) || (instance == -1)) {
6557 		*path = 0;
6558 		return (DDI_FAILURE);
6559 	}
6560 
6561 	/* look for the major/instance in the instance tree */
6562 	if (e_ddi_instance_majorinstance_to_path(major, instance,
6563 	    path) == DDI_SUCCESS) {
6564 		ASSERT(strlen(path) < MAXPATHLEN);
6565 		return (DDI_SUCCESS);
6566 	}
6567 
6568 	/*
6569 	 * Not in instance tree, find the instance on the per driver list and
6570 	 * construct path to instance via ddi_pathname(). This is how paths
6571 	 * down the 'pseudo' branch are constructed.
6572 	 */
6573 	dnp = &(devnamesp[major]);
6574 	LOCK_DEV_OPS(&(dnp->dn_lock));
6575 	for (dip = dnp->dn_head; dip;
6576 	    dip = (dev_info_t *)DEVI(dip)->devi_next) {
6577 		/* Skip if instance does not match. */
6578 		if (DEVI(dip)->devi_instance != instance)
6579 			continue;
6580 
6581 		/*
6582 		 * An ndi_hold_devi() does not prevent DS_INITIALIZED->DS_BOUND
6583 		 * node demotion, so it is not an effective way of ensuring
6584 		 * that the ddi_pathname result has a unit-address.  Instead,
6585 		 * we reverify the node state after calling ddi_pathname().
6586 		 */
6587 		if (i_ddi_node_state(dip) >= DS_INITIALIZED) {
6588 			(void) ddi_pathname(dip, path);
6589 			if (i_ddi_node_state(dip) < DS_INITIALIZED)
6590 				continue;
6591 			UNLOCK_DEV_OPS(&(dnp->dn_lock));
6592 			ASSERT(strlen(path) < MAXPATHLEN);
6593 			return (DDI_SUCCESS);
6594 		}
6595 	}
6596 	UNLOCK_DEV_OPS(&(dnp->dn_lock));
6597 
6598 	/* can't reconstruct the path */
6599 	*path = 0;
6600 	return (DDI_FAILURE);
6601 }
6602 
6603 #define	GLD_DRIVER_PPA "SUNW,gld_v0_ppa"
6604 
6605 /*
6606  * Given the dip for a network interface return the ppa for that interface.
6607  *
6608  * In all cases except GLD v0 drivers, the ppa == instance.
6609  * In the case of GLD v0 drivers, the ppa is equal to the attach order.
6610  * So for these drivers when the attach routine calls gld_register(),
6611  * the GLD framework creates an integer property called "gld_driver_ppa"
6612  * that can be queried here.
6613  *
6614  * The only time this function is used is when a system is booting over nfs.
6615  * In this case the system has to resolve the pathname of the boot device
6616  * to it's ppa.
6617  */
6618 int
6619 i_ddi_devi_get_ppa(dev_info_t *dip)
6620 {
6621 	return (ddi_prop_get_int(DDI_DEV_T_ANY, dip,
6622 			DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
6623 			GLD_DRIVER_PPA, ddi_get_instance(dip)));
6624 }
6625 
6626 /*
6627  * i_ddi_devi_set_ppa() should only be called from gld_register()
6628  * and only for GLD v0 drivers
6629  */
6630 void
6631 i_ddi_devi_set_ppa(dev_info_t *dip, int ppa)
6632 {
6633 	(void) e_ddi_prop_update_int(DDI_DEV_T_NONE, dip, GLD_DRIVER_PPA, ppa);
6634 }
6635 
6636 
6637 /*
6638  * Private DDI Console bell functions.
6639  */
6640 void
6641 ddi_ring_console_bell(clock_t duration)
6642 {
6643 	if (ddi_console_bell_func != NULL)
6644 		(*ddi_console_bell_func)(duration);
6645 }
6646 
6647 void
6648 ddi_set_console_bell(void (*bellfunc)(clock_t duration))
6649 {
6650 	ddi_console_bell_func = bellfunc;
6651 }
6652 
6653 int
6654 ddi_dma_alloc_handle(dev_info_t *dip, ddi_dma_attr_t *attr,
6655 	int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
6656 {
6657 	int (*funcp)() = ddi_dma_allochdl;
6658 	ddi_dma_attr_t dma_attr;
6659 	struct bus_ops *bop;
6660 
6661 	if (attr == (ddi_dma_attr_t *)0)
6662 		return (DDI_DMA_BADATTR);
6663 
6664 	dma_attr = *attr;
6665 
6666 	bop = DEVI(dip)->devi_ops->devo_bus_ops;
6667 	if (bop && bop->bus_dma_allochdl)
6668 		funcp = bop->bus_dma_allochdl;
6669 
6670 	return ((*funcp)(dip, dip, &dma_attr, waitfp, arg, handlep));
6671 }
6672 
6673 void
6674 ddi_dma_free_handle(ddi_dma_handle_t *handlep)
6675 {
6676 	ddi_dma_handle_t h = *handlep;
6677 	(void) ddi_dma_freehdl(HD, HD, h);
6678 }
6679 
6680 static uintptr_t dma_mem_list_id = 0;
6681 
6682 
6683 int
6684 ddi_dma_mem_alloc(ddi_dma_handle_t handle, size_t length,
6685 	ddi_device_acc_attr_t *accattrp, uint_t flags,
6686 	int (*waitfp)(caddr_t), caddr_t arg, caddr_t *kaddrp,
6687 	size_t *real_length, ddi_acc_handle_t *handlep)
6688 {
6689 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6690 	dev_info_t *dip = hp->dmai_rdip;
6691 	ddi_acc_hdl_t *ap;
6692 	ddi_dma_attr_t *attrp = &hp->dmai_attr;
6693 	uint_t sleepflag, xfermodes;
6694 	int (*fp)(caddr_t);
6695 	int rval;
6696 
6697 	if (waitfp == DDI_DMA_SLEEP)
6698 		fp = (int (*)())KM_SLEEP;
6699 	else if (waitfp == DDI_DMA_DONTWAIT)
6700 		fp = (int (*)())KM_NOSLEEP;
6701 	else
6702 		fp = waitfp;
6703 	*handlep = impl_acc_hdl_alloc(fp, arg);
6704 	if (*handlep == NULL)
6705 		return (DDI_FAILURE);
6706 
6707 	/* check if the cache attributes are supported */
6708 	if (i_ddi_check_cache_attr(flags) == B_FALSE)
6709 		return (DDI_FAILURE);
6710 
6711 	/*
6712 	 * Transfer the meaningful bits to xfermodes.
6713 	 * Double-check if the 3rd party driver correctly sets the bits.
6714 	 * If not, set DDI_DMA_STREAMING to keep compatibility.
6715 	 */
6716 	xfermodes = flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING);
6717 	if (xfermodes == 0) {
6718 		xfermodes = DDI_DMA_STREAMING;
6719 	}
6720 
6721 	/*
6722 	 * initialize the common elements of data access handle
6723 	 */
6724 	ap = impl_acc_hdl_get(*handlep);
6725 	ap->ah_vers = VERS_ACCHDL;
6726 	ap->ah_dip = dip;
6727 	ap->ah_offset = 0;
6728 	ap->ah_len = 0;
6729 	ap->ah_xfermodes = flags;
6730 	ap->ah_acc = *accattrp;
6731 
6732 	sleepflag = ((waitfp == DDI_DMA_SLEEP) ? 1 : 0);
6733 	if (xfermodes == DDI_DMA_CONSISTENT) {
6734 		rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag,
6735 		    flags, accattrp, kaddrp, NULL, ap);
6736 		*real_length = length;
6737 	} else {
6738 		rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag,
6739 		    flags, accattrp, kaddrp, real_length, ap);
6740 	}
6741 	if (rval == DDI_SUCCESS) {
6742 		ap->ah_len = (off_t)(*real_length);
6743 		ap->ah_addr = *kaddrp;
6744 	} else {
6745 		impl_acc_hdl_free(*handlep);
6746 		*handlep = (ddi_acc_handle_t)NULL;
6747 		if (waitfp != DDI_DMA_SLEEP && waitfp != DDI_DMA_DONTWAIT) {
6748 			ddi_set_callback(waitfp, arg, &dma_mem_list_id);
6749 		}
6750 		rval = DDI_FAILURE;
6751 	}
6752 	return (rval);
6753 }
6754 
6755 void
6756 ddi_dma_mem_free(ddi_acc_handle_t *handlep)
6757 {
6758 	ddi_acc_hdl_t *ap;
6759 
6760 	ap = impl_acc_hdl_get(*handlep);
6761 	ASSERT(ap);
6762 
6763 	i_ddi_mem_free((caddr_t)ap->ah_addr, ap);
6764 
6765 	/*
6766 	 * free the handle
6767 	 */
6768 	impl_acc_hdl_free(*handlep);
6769 	*handlep = (ddi_acc_handle_t)NULL;
6770 
6771 	if (dma_mem_list_id != 0) {
6772 		ddi_run_callback(&dma_mem_list_id);
6773 	}
6774 }
6775 
6776 int
6777 ddi_dma_buf_bind_handle(ddi_dma_handle_t handle, struct buf *bp,
6778 	uint_t flags, int (*waitfp)(caddr_t), caddr_t arg,
6779 	ddi_dma_cookie_t *cookiep, uint_t *ccountp)
6780 {
6781 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6782 	dev_info_t *hdip, *dip;
6783 	struct ddi_dma_req dmareq;
6784 	int (*funcp)();
6785 
6786 	dmareq.dmar_flags = flags;
6787 	dmareq.dmar_fp = waitfp;
6788 	dmareq.dmar_arg = arg;
6789 	dmareq.dmar_object.dmao_size = (uint_t)bp->b_bcount;
6790 
6791 	if (bp->b_flags & B_PAGEIO) {
6792 		dmareq.dmar_object.dmao_type = DMA_OTYP_PAGES;
6793 		dmareq.dmar_object.dmao_obj.pp_obj.pp_pp = bp->b_pages;
6794 		dmareq.dmar_object.dmao_obj.pp_obj.pp_offset =
6795 		    (uint_t)(((uintptr_t)bp->b_un.b_addr) & MMU_PAGEOFFSET);
6796 	} else {
6797 		dmareq.dmar_object.dmao_obj.virt_obj.v_addr = bp->b_un.b_addr;
6798 		if (bp->b_flags & B_SHADOW) {
6799 			dmareq.dmar_object.dmao_obj.virt_obj.v_priv =
6800 							bp->b_shadow;
6801 			dmareq.dmar_object.dmao_type = DMA_OTYP_BUFVADDR;
6802 		} else {
6803 			dmareq.dmar_object.dmao_type =
6804 				(bp->b_flags & (B_PHYS | B_REMAPPED)) ?
6805 				DMA_OTYP_BUFVADDR : DMA_OTYP_VADDR;
6806 			dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
6807 		}
6808 
6809 		/*
6810 		 * If the buffer has no proc pointer, or the proc
6811 		 * struct has the kernel address space, or the buffer has
6812 		 * been marked B_REMAPPED (meaning that it is now
6813 		 * mapped into the kernel's address space), then
6814 		 * the address space is kas (kernel address space).
6815 		 */
6816 		if ((bp->b_proc == NULL) || (bp->b_proc->p_as == &kas) ||
6817 		    (bp->b_flags & B_REMAPPED)) {
6818 			dmareq.dmar_object.dmao_obj.virt_obj.v_as = 0;
6819 		} else {
6820 			dmareq.dmar_object.dmao_obj.virt_obj.v_as =
6821 			    bp->b_proc->p_as;
6822 		}
6823 	}
6824 
6825 	dip = hp->dmai_rdip;
6826 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
6827 	funcp = DEVI(dip)->devi_bus_dma_bindfunc;
6828 	return ((*funcp)(hdip, dip, handle, &dmareq, cookiep, ccountp));
6829 }
6830 
6831 int
6832 ddi_dma_addr_bind_handle(ddi_dma_handle_t handle, struct as *as,
6833 	caddr_t addr, size_t len, uint_t flags, int (*waitfp)(caddr_t),
6834 	caddr_t arg, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
6835 {
6836 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6837 	dev_info_t *hdip, *dip;
6838 	struct ddi_dma_req dmareq;
6839 	int (*funcp)();
6840 
6841 	if (len == (uint_t)0) {
6842 		return (DDI_DMA_NOMAPPING);
6843 	}
6844 	dmareq.dmar_flags = flags;
6845 	dmareq.dmar_fp = waitfp;
6846 	dmareq.dmar_arg = arg;
6847 	dmareq.dmar_object.dmao_size = len;
6848 	dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR;
6849 	dmareq.dmar_object.dmao_obj.virt_obj.v_as = as;
6850 	dmareq.dmar_object.dmao_obj.virt_obj.v_addr = addr;
6851 	dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
6852 
6853 	dip = hp->dmai_rdip;
6854 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
6855 	funcp = DEVI(dip)->devi_bus_dma_bindfunc;
6856 	return ((*funcp)(hdip, dip, handle, &dmareq, cookiep, ccountp));
6857 }
6858 
6859 void
6860 ddi_dma_nextcookie(ddi_dma_handle_t handle, ddi_dma_cookie_t *cookiep)
6861 {
6862 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6863 	ddi_dma_cookie_t *cp;
6864 
6865 	cp = hp->dmai_cookie;
6866 	ASSERT(cp);
6867 
6868 	cookiep->dmac_notused = cp->dmac_notused;
6869 	cookiep->dmac_type = cp->dmac_type;
6870 	cookiep->dmac_address = cp->dmac_address;
6871 	cookiep->dmac_size = cp->dmac_size;
6872 	hp->dmai_cookie++;
6873 }
6874 
6875 int
6876 ddi_dma_numwin(ddi_dma_handle_t handle, uint_t *nwinp)
6877 {
6878 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6879 	if ((hp->dmai_rflags & DDI_DMA_PARTIAL) == 0) {
6880 		return (DDI_FAILURE);
6881 	} else {
6882 		*nwinp = hp->dmai_nwin;
6883 		return (DDI_SUCCESS);
6884 	}
6885 }
6886 
6887 int
6888 ddi_dma_getwin(ddi_dma_handle_t h, uint_t win, off_t *offp,
6889 	size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
6890 {
6891 	int (*funcp)() = ddi_dma_win;
6892 	struct bus_ops *bop;
6893 
6894 	bop = DEVI(HD)->devi_ops->devo_bus_ops;
6895 	if (bop && bop->bus_dma_win)
6896 		funcp = bop->bus_dma_win;
6897 
6898 	return ((*funcp)(HD, HD, h, win, offp, lenp, cookiep, ccountp));
6899 }
6900 
6901 int
6902 ddi_dma_set_sbus64(ddi_dma_handle_t h, ulong_t burstsizes)
6903 {
6904 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_SET_SBUS64, 0,
6905 		&burstsizes, 0, 0));
6906 }
6907 
6908 int
6909 i_ddi_dma_fault_check(ddi_dma_impl_t *hp)
6910 {
6911 	return (hp->dmai_fault);
6912 }
6913 
6914 int
6915 ddi_check_dma_handle(ddi_dma_handle_t handle)
6916 {
6917 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6918 	int (*check)(ddi_dma_impl_t *);
6919 
6920 	if ((check = hp->dmai_fault_check) == NULL)
6921 		check = i_ddi_dma_fault_check;
6922 
6923 	return (((*check)(hp) == DDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
6924 }
6925 
6926 void
6927 i_ddi_dma_set_fault(ddi_dma_handle_t handle)
6928 {
6929 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6930 	void (*notify)(ddi_dma_impl_t *);
6931 
6932 	if (!hp->dmai_fault) {
6933 		hp->dmai_fault = 1;
6934 		if ((notify = hp->dmai_fault_notify) != NULL)
6935 			(*notify)(hp);
6936 	}
6937 }
6938 
6939 void
6940 i_ddi_dma_clr_fault(ddi_dma_handle_t handle)
6941 {
6942 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6943 	void (*notify)(ddi_dma_impl_t *);
6944 
6945 	if (hp->dmai_fault) {
6946 		hp->dmai_fault = 0;
6947 		if ((notify = hp->dmai_fault_notify) != NULL)
6948 			(*notify)(hp);
6949 	}
6950 }
6951 
6952 /*
6953  * register mapping routines.
6954  */
6955 int
6956 ddi_regs_map_setup(dev_info_t *dip, uint_t rnumber, caddr_t *addrp,
6957 	offset_t offset, offset_t len, ddi_device_acc_attr_t *accattrp,
6958 	ddi_acc_handle_t *handle)
6959 {
6960 	ddi_map_req_t mr;
6961 	ddi_acc_hdl_t *hp;
6962 	int result;
6963 
6964 	/*
6965 	 * Allocate and initialize the common elements of data access handle.
6966 	 */
6967 	*handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
6968 	hp = impl_acc_hdl_get(*handle);
6969 	hp->ah_vers = VERS_ACCHDL;
6970 	hp->ah_dip = dip;
6971 	hp->ah_rnumber = rnumber;
6972 	hp->ah_offset = offset;
6973 	hp->ah_len = len;
6974 	hp->ah_acc = *accattrp;
6975 
6976 	/*
6977 	 * Set up the mapping request and call to parent.
6978 	 */
6979 	mr.map_op = DDI_MO_MAP_LOCKED;
6980 	mr.map_type = DDI_MT_RNUMBER;
6981 	mr.map_obj.rnumber = rnumber;
6982 	mr.map_prot = PROT_READ | PROT_WRITE;
6983 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
6984 	mr.map_handlep = hp;
6985 	mr.map_vers = DDI_MAP_VERSION;
6986 	result = ddi_map(dip, &mr, offset, len, addrp);
6987 
6988 	/*
6989 	 * check for end result
6990 	 */
6991 	if (result != DDI_SUCCESS) {
6992 		impl_acc_hdl_free(*handle);
6993 		*handle = (ddi_acc_handle_t)NULL;
6994 	} else {
6995 		hp->ah_addr = *addrp;
6996 	}
6997 
6998 	return (result);
6999 }
7000 
7001 void
7002 ddi_regs_map_free(ddi_acc_handle_t *handlep)
7003 {
7004 	ddi_map_req_t mr;
7005 	ddi_acc_hdl_t *hp;
7006 
7007 	hp = impl_acc_hdl_get(*handlep);
7008 	ASSERT(hp);
7009 
7010 	mr.map_op = DDI_MO_UNMAP;
7011 	mr.map_type = DDI_MT_RNUMBER;
7012 	mr.map_obj.rnumber = hp->ah_rnumber;
7013 	mr.map_prot = PROT_READ | PROT_WRITE;
7014 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
7015 	mr.map_handlep = hp;
7016 	mr.map_vers = DDI_MAP_VERSION;
7017 
7018 	/*
7019 	 * Call my parent to unmap my regs.
7020 	 */
7021 	(void) ddi_map(hp->ah_dip, &mr, hp->ah_offset,
7022 		hp->ah_len, &hp->ah_addr);
7023 	/*
7024 	 * free the handle
7025 	 */
7026 	impl_acc_hdl_free(*handlep);
7027 	*handlep = (ddi_acc_handle_t)NULL;
7028 }
7029 
7030 int
7031 ddi_device_zero(ddi_acc_handle_t handle, caddr_t dev_addr, size_t bytecount,
7032 	ssize_t dev_advcnt, uint_t dev_datasz)
7033 {
7034 	uint8_t *b;
7035 	uint16_t *w;
7036 	uint32_t *l;
7037 	uint64_t *ll;
7038 
7039 	/* check for total byte count is multiple of data transfer size */
7040 	if (bytecount != ((bytecount / dev_datasz) * dev_datasz))
7041 		return (DDI_FAILURE);
7042 
7043 	switch (dev_datasz) {
7044 	case DDI_DATA_SZ01_ACC:
7045 		for (b = (uint8_t *)dev_addr;
7046 			bytecount != 0; bytecount -= 1, b += dev_advcnt)
7047 			ddi_put8(handle, b, 0);
7048 		break;
7049 	case DDI_DATA_SZ02_ACC:
7050 		for (w = (uint16_t *)dev_addr;
7051 			bytecount != 0; bytecount -= 2, w += dev_advcnt)
7052 			ddi_put16(handle, w, 0);
7053 		break;
7054 	case DDI_DATA_SZ04_ACC:
7055 		for (l = (uint32_t *)dev_addr;
7056 			bytecount != 0; bytecount -= 4, l += dev_advcnt)
7057 			ddi_put32(handle, l, 0);
7058 		break;
7059 	case DDI_DATA_SZ08_ACC:
7060 		for (ll = (uint64_t *)dev_addr;
7061 			bytecount != 0; bytecount -= 8, ll += dev_advcnt)
7062 			ddi_put64(handle, ll, 0x0ll);
7063 		break;
7064 	default:
7065 		return (DDI_FAILURE);
7066 	}
7067 	return (DDI_SUCCESS);
7068 }
7069 
7070 int
7071 ddi_device_copy(
7072 	ddi_acc_handle_t src_handle, caddr_t src_addr, ssize_t src_advcnt,
7073 	ddi_acc_handle_t dest_handle, caddr_t dest_addr, ssize_t dest_advcnt,
7074 	size_t bytecount, uint_t dev_datasz)
7075 {
7076 	uint8_t *b_src, *b_dst;
7077 	uint16_t *w_src, *w_dst;
7078 	uint32_t *l_src, *l_dst;
7079 	uint64_t *ll_src, *ll_dst;
7080 
7081 	/* check for total byte count is multiple of data transfer size */
7082 	if (bytecount != ((bytecount / dev_datasz) * dev_datasz))
7083 		return (DDI_FAILURE);
7084 
7085 	switch (dev_datasz) {
7086 	case DDI_DATA_SZ01_ACC:
7087 		b_src = (uint8_t *)src_addr;
7088 		b_dst = (uint8_t *)dest_addr;
7089 
7090 		for (; bytecount != 0; bytecount -= 1) {
7091 			ddi_put8(dest_handle, b_dst,
7092 				ddi_get8(src_handle, b_src));
7093 			b_dst += dest_advcnt;
7094 			b_src += src_advcnt;
7095 		}
7096 		break;
7097 	case DDI_DATA_SZ02_ACC:
7098 		w_src = (uint16_t *)src_addr;
7099 		w_dst = (uint16_t *)dest_addr;
7100 
7101 		for (; bytecount != 0; bytecount -= 2) {
7102 			ddi_put16(dest_handle, w_dst,
7103 				ddi_get16(src_handle, w_src));
7104 			w_dst += dest_advcnt;
7105 			w_src += src_advcnt;
7106 		}
7107 		break;
7108 	case DDI_DATA_SZ04_ACC:
7109 		l_src = (uint32_t *)src_addr;
7110 		l_dst = (uint32_t *)dest_addr;
7111 
7112 		for (; bytecount != 0; bytecount -= 4) {
7113 			ddi_put32(dest_handle, l_dst,
7114 				ddi_get32(src_handle, l_src));
7115 			l_dst += dest_advcnt;
7116 			l_src += src_advcnt;
7117 		}
7118 		break;
7119 	case DDI_DATA_SZ08_ACC:
7120 		ll_src = (uint64_t *)src_addr;
7121 		ll_dst = (uint64_t *)dest_addr;
7122 
7123 		for (; bytecount != 0; bytecount -= 8) {
7124 			ddi_put64(dest_handle, ll_dst,
7125 				ddi_get64(src_handle, ll_src));
7126 			ll_dst += dest_advcnt;
7127 			ll_src += src_advcnt;
7128 		}
7129 		break;
7130 	default:
7131 		return (DDI_FAILURE);
7132 	}
7133 	return (DDI_SUCCESS);
7134 }
7135 
7136 #define	swap16(value)  \
7137 	((((value) & 0xff) << 8) | ((value) >> 8))
7138 
7139 #define	swap32(value)	\
7140 	(((uint32_t)swap16((uint16_t)((value) & 0xffff)) << 16) | \
7141 	(uint32_t)swap16((uint16_t)((value) >> 16)))
7142 
7143 #define	swap64(value)	\
7144 	(((uint64_t)swap32((uint32_t)((value) & 0xffffffff)) \
7145 	    << 32) | \
7146 	(uint64_t)swap32((uint32_t)((value) >> 32)))
7147 
7148 uint16_t
7149 ddi_swap16(uint16_t value)
7150 {
7151 	return (swap16(value));
7152 }
7153 
7154 uint32_t
7155 ddi_swap32(uint32_t value)
7156 {
7157 	return (swap32(value));
7158 }
7159 
7160 uint64_t
7161 ddi_swap64(uint64_t value)
7162 {
7163 	return (swap64(value));
7164 }
7165 
7166 /*
7167  * Convert a binding name to a driver name.
7168  * A binding name is the name used to determine the driver for a
7169  * device - it may be either an alias for the driver or the name
7170  * of the driver itself.
7171  */
7172 char *
7173 i_binding_to_drv_name(char *bname)
7174 {
7175 	major_t major_no;
7176 
7177 	ASSERT(bname != NULL);
7178 
7179 	if ((major_no = ddi_name_to_major(bname)) == -1)
7180 		return (NULL);
7181 	return (ddi_major_to_name(major_no));
7182 }
7183 
7184 /*
7185  * Search for minor name that has specified dev_t and spec_type.
7186  * If spec_type is zero then any dev_t match works.  Since we
7187  * are returning a pointer to the minor name string, we require the
7188  * caller to do the locking.
7189  */
7190 char *
7191 i_ddi_devtspectype_to_minorname(dev_info_t *dip, dev_t dev, int spec_type)
7192 {
7193 	struct ddi_minor_data	*dmdp;
7194 
7195 	/*
7196 	 * The did layered driver currently intentionally returns a
7197 	 * devinfo ptr for an underlying sd instance based on a did
7198 	 * dev_t. In this case it is not an error.
7199 	 *
7200 	 * The did layered driver is associated with Sun Cluster.
7201 	 */
7202 	ASSERT((ddi_driver_major(dip) == getmajor(dev)) ||
7203 		(strcmp(ddi_major_to_name(getmajor(dev)), "did") == 0));
7204 	ASSERT(MUTEX_HELD(&(DEVI(dip)->devi_lock)));
7205 
7206 	for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) {
7207 		if (((dmdp->type == DDM_MINOR) ||
7208 		    (dmdp->type == DDM_INTERNAL_PATH) ||
7209 		    (dmdp->type == DDM_DEFAULT)) &&
7210 		    (dmdp->ddm_dev == dev) &&
7211 		    ((((spec_type & (S_IFCHR|S_IFBLK))) == 0) ||
7212 		    (dmdp->ddm_spec_type == spec_type)))
7213 			return (dmdp->ddm_name);
7214 	}
7215 
7216 	return (NULL);
7217 }
7218 
7219 /*
7220  * Find the devt and spectype of the specified minor_name.
7221  * Return DDI_FAILURE if minor_name not found. Since we are
7222  * returning everything via arguments we can do the locking.
7223  */
7224 int
7225 i_ddi_minorname_to_devtspectype(dev_info_t *dip, char *minor_name,
7226 	dev_t *devtp, int *spectypep)
7227 {
7228 	struct ddi_minor_data	*dmdp;
7229 
7230 	/* deal with clone minor nodes */
7231 	if (dip == clone_dip) {
7232 		major_t	major;
7233 		/*
7234 		 * Make sure minor_name is a STREAMS driver.
7235 		 * We load the driver but don't attach to any instances.
7236 		 */
7237 
7238 		major = ddi_name_to_major(minor_name);
7239 		if (major == (major_t)-1)
7240 			return (DDI_FAILURE);
7241 
7242 		if (ddi_hold_driver(major) == NULL)
7243 			return (DDI_FAILURE);
7244 
7245 		if (STREAMSTAB(major) == NULL) {
7246 			ddi_rele_driver(major);
7247 			return (DDI_FAILURE);
7248 		}
7249 		ddi_rele_driver(major);
7250 
7251 		if (devtp)
7252 			*devtp = makedevice(clone_major, (minor_t)major);
7253 
7254 		if (spectypep)
7255 			*spectypep = S_IFCHR;
7256 
7257 		return (DDI_SUCCESS);
7258 	}
7259 
7260 	ASSERT(!MUTEX_HELD(&(DEVI(dip)->devi_lock)));
7261 	mutex_enter(&(DEVI(dip)->devi_lock));
7262 
7263 	for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) {
7264 		if (((dmdp->type != DDM_MINOR) &&
7265 		    (dmdp->type != DDM_INTERNAL_PATH) &&
7266 		    (dmdp->type != DDM_DEFAULT)) ||
7267 		    strcmp(minor_name, dmdp->ddm_name))
7268 			continue;
7269 
7270 		if (devtp)
7271 			*devtp = dmdp->ddm_dev;
7272 
7273 		if (spectypep)
7274 			*spectypep = dmdp->ddm_spec_type;
7275 
7276 		mutex_exit(&(DEVI(dip)->devi_lock));
7277 		return (DDI_SUCCESS);
7278 	}
7279 
7280 	mutex_exit(&(DEVI(dip)->devi_lock));
7281 	return (DDI_FAILURE);
7282 }
7283 
7284 extern char	hw_serial[];
7285 static kmutex_t devid_gen_mutex;
7286 static short	devid_gen_number;
7287 
7288 #ifdef DEBUG
7289 
7290 static int	devid_register_corrupt = 0;
7291 static int	devid_register_corrupt_major = 0;
7292 static int	devid_register_corrupt_hint = 0;
7293 static int	devid_register_corrupt_hint_major = 0;
7294 
7295 static int devid_lyr_debug = 0;
7296 
7297 #define	DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs)		\
7298 	if (devid_lyr_debug)					\
7299 		ddi_debug_devid_devts(msg, ndevs, devs)
7300 
7301 #else
7302 
7303 #define	DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs)
7304 
7305 #endif /* DEBUG */
7306 
7307 
7308 #ifdef	DEBUG
7309 
7310 static void
7311 ddi_debug_devid_devts(char *msg, int ndevs, dev_t *devs)
7312 {
7313 	int i;
7314 
7315 	cmn_err(CE_CONT, "%s:\n", msg);
7316 	for (i = 0; i < ndevs; i++) {
7317 		cmn_err(CE_CONT, "    0x%lx\n", devs[i]);
7318 	}
7319 }
7320 
7321 static void
7322 ddi_debug_devid_paths(char *msg, int npaths, char **paths)
7323 {
7324 	int i;
7325 
7326 	cmn_err(CE_CONT, "%s:\n", msg);
7327 	for (i = 0; i < npaths; i++) {
7328 		cmn_err(CE_CONT, "    %s\n", paths[i]);
7329 	}
7330 }
7331 
7332 static void
7333 ddi_debug_devid_devts_per_path(char *path, int ndevs, dev_t *devs)
7334 {
7335 	int i;
7336 
7337 	cmn_err(CE_CONT, "dev_ts per path %s\n", path);
7338 	for (i = 0; i < ndevs; i++) {
7339 		cmn_err(CE_CONT, "    0x%lx\n", devs[i]);
7340 	}
7341 }
7342 
7343 #endif	/* DEBUG */
7344 
7345 /*
7346  * Register device id into DDI framework.
7347  * Must be called when device is attached.
7348  */
7349 static int
7350 i_ddi_devid_register(dev_info_t *dip, ddi_devid_t devid)
7351 {
7352 	impl_devid_t	*i_devid = (impl_devid_t *)devid;
7353 	size_t		driver_len;
7354 	const char	*driver_name;
7355 	char		*devid_str;
7356 	major_t		major;
7357 
7358 	if ((dip == NULL) ||
7359 	    ((major = ddi_driver_major(dip)) == (major_t)-1))
7360 		return (DDI_FAILURE);
7361 
7362 	/* verify that the devid is valid */
7363 	if (ddi_devid_valid(devid) != DDI_SUCCESS)
7364 		return (DDI_FAILURE);
7365 
7366 	/* Updating driver name hint in devid */
7367 	driver_name = ddi_driver_name(dip);
7368 	driver_len = strlen(driver_name);
7369 	if (driver_len > DEVID_HINT_SIZE) {
7370 		/* Pick up last four characters of driver name */
7371 		driver_name += driver_len - DEVID_HINT_SIZE;
7372 		driver_len = DEVID_HINT_SIZE;
7373 	}
7374 	bzero(i_devid->did_driver, DEVID_HINT_SIZE);
7375 	bcopy(driver_name, i_devid->did_driver, driver_len);
7376 
7377 #ifdef DEBUG
7378 	/* Corrupt the devid for testing. */
7379 	if (devid_register_corrupt)
7380 		i_devid->did_id[0] += devid_register_corrupt;
7381 	if (devid_register_corrupt_major &&
7382 	    (major == devid_register_corrupt_major))
7383 		i_devid->did_id[0] += 1;
7384 	if (devid_register_corrupt_hint)
7385 		i_devid->did_driver[0] += devid_register_corrupt_hint;
7386 	if (devid_register_corrupt_hint_major &&
7387 	    (major == devid_register_corrupt_hint_major))
7388 		i_devid->did_driver[0] += 1;
7389 #endif /* DEBUG */
7390 
7391 	/* encode the devid as a string */
7392 	if ((devid_str = ddi_devid_str_encode(devid, NULL)) == NULL)
7393 		return (DDI_FAILURE);
7394 
7395 	/* add string as a string property */
7396 	if (ndi_prop_update_string(DDI_DEV_T_NONE, dip,
7397 	    DEVID_PROP_NAME, devid_str) != DDI_SUCCESS) {
7398 		cmn_err(CE_WARN, "%s%d: devid property update failed",
7399 			ddi_driver_name(dip), ddi_get_instance(dip));
7400 		ddi_devid_str_free(devid_str);
7401 		return (DDI_FAILURE);
7402 	}
7403 
7404 	ddi_devid_str_free(devid_str);
7405 
7406 #ifdef	DEVID_COMPATIBILITY
7407 	/*
7408 	 * marker for devinfo snapshot compatibility.
7409 	 * This code gets deleted when di_devid is gone from libdevid
7410 	 */
7411 	DEVI(dip)->devi_devid = DEVID_COMPATIBILITY;
7412 #endif	/* DEVID_COMPATIBILITY */
7413 	return (DDI_SUCCESS);
7414 }
7415 
7416 int
7417 ddi_devid_register(dev_info_t *dip, ddi_devid_t devid)
7418 {
7419 	int rval;
7420 
7421 	rval = i_ddi_devid_register(dip, devid);
7422 	if (rval == DDI_SUCCESS) {
7423 		/*
7424 		 * Register devid in devid-to-path cache
7425 		 */
7426 		if (e_devid_cache_register(dip, devid) == DDI_SUCCESS) {
7427 			mutex_enter(&DEVI(dip)->devi_lock);
7428 			DEVI(dip)->devi_flags |= DEVI_REGISTERED_DEVID;
7429 			mutex_exit(&DEVI(dip)->devi_lock);
7430 		} else {
7431 			cmn_err(CE_WARN, "%s%d: failed to cache devid",
7432 				ddi_driver_name(dip), ddi_get_instance(dip));
7433 		}
7434 	} else {
7435 		cmn_err(CE_WARN, "%s%d: failed to register devid",
7436 			ddi_driver_name(dip), ddi_get_instance(dip));
7437 	}
7438 	return (rval);
7439 }
7440 
7441 /*
7442  * Remove (unregister) device id from DDI framework.
7443  * Must be called when device is detached.
7444  */
7445 static void
7446 i_ddi_devid_unregister(dev_info_t *dip)
7447 {
7448 #ifdef	DEVID_COMPATIBILITY
7449 	/*
7450 	 * marker for micro release devinfo snapshot compatibility.
7451 	 * This code gets deleted for the minor release.
7452 	 */
7453 	DEVI(dip)->devi_devid = NULL;		/* unset DEVID_PROP */
7454 #endif	/* DEVID_COMPATIBILITY */
7455 
7456 	/* remove the devid property */
7457 	(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, DEVID_PROP_NAME);
7458 }
7459 
7460 void
7461 ddi_devid_unregister(dev_info_t *dip)
7462 {
7463 	mutex_enter(&DEVI(dip)->devi_lock);
7464 	DEVI(dip)->devi_flags &= ~DEVI_REGISTERED_DEVID;
7465 	mutex_exit(&DEVI(dip)->devi_lock);
7466 	e_devid_cache_unregister(dip);
7467 	i_ddi_devid_unregister(dip);
7468 }
7469 
7470 /*
7471  * Allocate and initialize a device id.
7472  */
7473 int
7474 ddi_devid_init(
7475 	dev_info_t	*dip,
7476 	ushort_t	devid_type,
7477 	ushort_t	nbytes,
7478 	void		*id,
7479 	ddi_devid_t	*ret_devid)
7480 {
7481 	impl_devid_t	*i_devid;
7482 	int		sz = sizeof (*i_devid) + nbytes - sizeof (char);
7483 	int		driver_len;
7484 	const char	*driver_name;
7485 
7486 	switch (devid_type) {
7487 	case DEVID_SCSI3_WWN:
7488 		/*FALLTHRU*/
7489 	case DEVID_SCSI_SERIAL:
7490 		/*FALLTHRU*/
7491 	case DEVID_ATA_SERIAL:
7492 		/*FALLTHRU*/
7493 	case DEVID_ENCAP:
7494 		if (nbytes == 0)
7495 			return (DDI_FAILURE);
7496 		if (id == NULL)
7497 			return (DDI_FAILURE);
7498 		break;
7499 	case DEVID_FAB:
7500 		if (nbytes != 0)
7501 			return (DDI_FAILURE);
7502 		if (id != NULL)
7503 			return (DDI_FAILURE);
7504 		nbytes = sizeof (int) +
7505 		    sizeof (struct timeval32) + sizeof (short);
7506 		sz += nbytes;
7507 		break;
7508 	default:
7509 		return (DDI_FAILURE);
7510 	}
7511 
7512 	if ((i_devid = kmem_zalloc(sz, KM_SLEEP)) == NULL)
7513 		return (DDI_FAILURE);
7514 
7515 	i_devid->did_magic_hi = DEVID_MAGIC_MSB;
7516 	i_devid->did_magic_lo = DEVID_MAGIC_LSB;
7517 	i_devid->did_rev_hi = DEVID_REV_MSB;
7518 	i_devid->did_rev_lo = DEVID_REV_LSB;
7519 	DEVID_FORMTYPE(i_devid, devid_type);
7520 	DEVID_FORMLEN(i_devid, nbytes);
7521 
7522 	/* Fill in driver name hint */
7523 	driver_name = ddi_driver_name(dip);
7524 	driver_len = strlen(driver_name);
7525 	if (driver_len > DEVID_HINT_SIZE) {
7526 		/* Pick up last four characters of driver name */
7527 		driver_name += driver_len - DEVID_HINT_SIZE;
7528 		driver_len = DEVID_HINT_SIZE;
7529 	}
7530 
7531 	bcopy(driver_name, i_devid->did_driver, driver_len);
7532 
7533 	/* Fill in id field */
7534 	if (devid_type == DEVID_FAB) {
7535 		char		*cp;
7536 		int		hostid;
7537 		char		*hostid_cp = &hw_serial[0];
7538 		struct timeval32 timestamp32;
7539 		int		i;
7540 		int		*ip;
7541 		short		gen;
7542 
7543 		/* increase the generation number */
7544 		mutex_enter(&devid_gen_mutex);
7545 		gen = devid_gen_number++;
7546 		mutex_exit(&devid_gen_mutex);
7547 
7548 		cp = i_devid->did_id;
7549 
7550 		/* Fill in host id (big-endian byte ordering) */
7551 		hostid = stoi(&hostid_cp);
7552 		*cp++ = hibyte(hiword(hostid));
7553 		*cp++ = lobyte(hiword(hostid));
7554 		*cp++ = hibyte(loword(hostid));
7555 		*cp++ = lobyte(loword(hostid));
7556 
7557 		/*
7558 		 * Fill in timestamp (big-endian byte ordering)
7559 		 *
7560 		 * (Note that the format may have to be changed
7561 		 * before 2038 comes around, though it's arguably
7562 		 * unique enough as it is..)
7563 		 */
7564 		uniqtime32(&timestamp32);
7565 		ip = (int *)&timestamp32;
7566 		for (i = 0;
7567 		    i < sizeof (timestamp32) / sizeof (int); i++, ip++) {
7568 			int	val;
7569 			val = *ip;
7570 			*cp++ = hibyte(hiword(val));
7571 			*cp++ = lobyte(hiword(val));
7572 			*cp++ = hibyte(loword(val));
7573 			*cp++ = lobyte(loword(val));
7574 		}
7575 
7576 		/* fill in the generation number */
7577 		*cp++ = hibyte(gen);
7578 		*cp++ = lobyte(gen);
7579 	} else
7580 		bcopy(id, i_devid->did_id, nbytes);
7581 
7582 	/* return device id */
7583 	*ret_devid = (ddi_devid_t)i_devid;
7584 	return (DDI_SUCCESS);
7585 }
7586 
7587 int
7588 i_ddi_devi_get_devid(dev_t dev, dev_info_t *dip, ddi_devid_t *ret_devid)
7589 {
7590 	char		*devidstr;
7591 
7592 	ASSERT(dev != DDI_DEV_T_NONE);
7593 
7594 	/* look up the property, devt specific first */
7595 	if (ddi_prop_lookup_string(dev, dip, DDI_PROP_DONTPASS,
7596 	    DEVID_PROP_NAME, &devidstr) != DDI_PROP_SUCCESS) {
7597 		if ((dev == DDI_DEV_T_ANY) ||
7598 		    (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
7599 			DDI_PROP_DONTPASS, DEVID_PROP_NAME, &devidstr) !=
7600 			DDI_PROP_SUCCESS)) {
7601 				return (DDI_FAILURE);
7602 		}
7603 	}
7604 
7605 	/* convert to binary form */
7606 	if (ddi_devid_str_decode(devidstr, ret_devid, NULL) == -1) {
7607 		ddi_prop_free(devidstr);
7608 		return (DDI_FAILURE);
7609 	}
7610 	ddi_prop_free(devidstr);
7611 	return (DDI_SUCCESS);
7612 }
7613 
7614 /*
7615  * Return a copy of the device id for dev_t
7616  */
7617 int
7618 ddi_lyr_get_devid(dev_t dev, ddi_devid_t *ret_devid)
7619 {
7620 	dev_info_t	*dip;
7621 	int		rval;
7622 
7623 	/* get the dip */
7624 	if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
7625 		return (DDI_FAILURE);
7626 
7627 	rval = i_ddi_devi_get_devid(dev, dip, ret_devid);
7628 
7629 	ddi_release_devi(dip);		/* e_ddi_hold_devi_by_dev() */
7630 	return (rval);
7631 }
7632 
7633 /*
7634  * Return a copy of the minor name for dev_t and spec_type
7635  */
7636 int
7637 ddi_lyr_get_minor_name(dev_t dev, int spec_type, char **minor_name)
7638 {
7639 	dev_info_t	*dip;
7640 	char		*nm;
7641 	size_t		alloc_sz, sz;
7642 
7643 	if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
7644 		return (DDI_FAILURE);
7645 
7646 	mutex_enter(&(DEVI(dip)->devi_lock));
7647 
7648 	if ((nm = i_ddi_devtspectype_to_minorname(dip,
7649 	    dev, spec_type)) == NULL) {
7650 		mutex_exit(&(DEVI(dip)->devi_lock));
7651 		ddi_release_devi(dip);	/* e_ddi_hold_devi_by_dev() */
7652 		return (DDI_FAILURE);
7653 	}
7654 
7655 	/* make a copy */
7656 	alloc_sz = strlen(nm) + 1;
7657 retry:
7658 	/* drop lock to allocate memory */
7659 	mutex_exit(&(DEVI(dip)->devi_lock));
7660 	*minor_name = kmem_alloc(alloc_sz, KM_SLEEP);
7661 	mutex_enter(&(DEVI(dip)->devi_lock));
7662 
7663 	/* re-check things, since we dropped the lock */
7664 	if ((nm = i_ddi_devtspectype_to_minorname(dip,
7665 	    dev, spec_type)) == NULL) {
7666 		mutex_exit(&(DEVI(dip)->devi_lock));
7667 		kmem_free(*minor_name, alloc_sz);
7668 		*minor_name = NULL;
7669 		ddi_release_devi(dip);	/* e_ddi_hold_devi_by_dev() */
7670 		return (DDI_FAILURE);
7671 	}
7672 
7673 	/* verify size is the same */
7674 	sz = strlen(nm) + 1;
7675 	if (alloc_sz != sz) {
7676 		kmem_free(*minor_name, alloc_sz);
7677 		alloc_sz = sz;
7678 		goto retry;
7679 	}
7680 
7681 	/* sz == alloc_sz - make a copy */
7682 	(void) strcpy(*minor_name, nm);
7683 
7684 	mutex_exit(&(DEVI(dip)->devi_lock));
7685 	ddi_release_devi(dip);	/* e_ddi_hold_devi_by_dev() */
7686 	return (DDI_SUCCESS);
7687 }
7688 
7689 int
7690 ddi_lyr_devid_to_devlist(
7691 	ddi_devid_t	devid,
7692 	char		*minor_name,
7693 	int		*retndevs,
7694 	dev_t		**retdevs)
7695 {
7696 	ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS);
7697 
7698 	if (e_devid_cache_to_devt_list(devid, minor_name,
7699 	    retndevs, retdevs) == DDI_SUCCESS) {
7700 		ASSERT(*retndevs > 0);
7701 		DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist",
7702 			*retndevs, *retdevs);
7703 		return (DDI_SUCCESS);
7704 	}
7705 
7706 	if (e_ddi_devid_discovery(devid) == DDI_FAILURE) {
7707 		return (DDI_FAILURE);
7708 	}
7709 
7710 	if (e_devid_cache_to_devt_list(devid, minor_name,
7711 	    retndevs, retdevs) == DDI_SUCCESS) {
7712 		ASSERT(*retndevs > 0);
7713 		DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist",
7714 			*retndevs, *retdevs);
7715 		return (DDI_SUCCESS);
7716 	}
7717 
7718 	return (DDI_FAILURE);
7719 }
7720 
7721 void
7722 ddi_lyr_free_devlist(dev_t *devlist, int ndevs)
7723 {
7724 	kmem_free(devlist, sizeof (dev_t) * ndevs);
7725 }
7726 
7727 /*
7728  * Note: This will need to be fixed if we ever allow processes to
7729  * have more than one data model per exec.
7730  */
7731 model_t
7732 ddi_mmap_get_model(void)
7733 {
7734 	return (get_udatamodel());
7735 }
7736 
7737 model_t
7738 ddi_model_convert_from(model_t model)
7739 {
7740 	return ((model & DDI_MODEL_MASK) & ~DDI_MODEL_NATIVE);
7741 }
7742 
7743 /*
7744  * ddi interfaces managing storage and retrieval of eventcookies.
7745  */
7746 
7747 /*
7748  * Invoke bus nexus driver's implementation of the
7749  * (*bus_remove_eventcall)() interface to remove a registered
7750  * callback handler for "event".
7751  */
7752 int
7753 ddi_remove_event_handler(ddi_callback_id_t id)
7754 {
7755 	ndi_event_callbacks_t *cb = (ndi_event_callbacks_t *)id;
7756 	dev_info_t *ddip;
7757 
7758 	ASSERT(cb);
7759 	if (!cb) {
7760 		return (DDI_FAILURE);
7761 	}
7762 
7763 	ddip = NDI_EVENT_DDIP(cb->ndi_evtcb_cookie);
7764 	return (ndi_busop_remove_eventcall(ddip, id));
7765 }
7766 
7767 /*
7768  * Invoke bus nexus driver's implementation of the
7769  * (*bus_add_eventcall)() interface to register a callback handler
7770  * for "event".
7771  */
7772 int
7773 ddi_add_event_handler(dev_info_t *dip, ddi_eventcookie_t event,
7774     void (*handler)(dev_info_t *, ddi_eventcookie_t, void *, void *),
7775     void *arg, ddi_callback_id_t *id)
7776 {
7777 	return (ndi_busop_add_eventcall(dip, dip, event, handler, arg, id));
7778 }
7779 
7780 
7781 /*
7782  * Return a handle for event "name" by calling up the device tree
7783  * hierarchy via  (*bus_get_eventcookie)() interface until claimed
7784  * by a bus nexus or top of dev_info tree is reached.
7785  */
7786 int
7787 ddi_get_eventcookie(dev_info_t *dip, char *name,
7788     ddi_eventcookie_t *event_cookiep)
7789 {
7790 	return (ndi_busop_get_eventcookie(dip, dip,
7791 	    name, event_cookiep));
7792 }
7793 
7794 /*
7795  * single thread access to dev_info node and set state
7796  */
7797 void
7798 i_devi_enter(dev_info_t *dip, uint_t s_mask, uint_t w_mask, int has_lock)
7799 {
7800 	if (!has_lock)
7801 		mutex_enter(&(DEVI(dip)->devi_lock));
7802 
7803 	ASSERT(mutex_owned(&(DEVI(dip)->devi_lock)));
7804 
7805 	/*
7806 	 * wait until state(s) have been changed
7807 	 */
7808 	while ((DEVI(dip)->devi_state & w_mask) != 0) {
7809 		cv_wait(&(DEVI(dip)->devi_cv), &(DEVI(dip)->devi_lock));
7810 	}
7811 	DEVI(dip)->devi_state |= s_mask;
7812 
7813 	if (!has_lock)
7814 		mutex_exit(&(DEVI(dip)->devi_lock));
7815 }
7816 
7817 void
7818 i_devi_exit(dev_info_t *dip, uint_t c_mask, int has_lock)
7819 {
7820 	if (!has_lock)
7821 		mutex_enter(&(DEVI(dip)->devi_lock));
7822 
7823 	ASSERT(mutex_owned(&(DEVI(dip)->devi_lock)));
7824 
7825 	/*
7826 	 * clear the state(s) and wakeup any threads waiting
7827 	 * for state change
7828 	 */
7829 	DEVI(dip)->devi_state &= ~c_mask;
7830 	cv_broadcast(&(DEVI(dip)->devi_cv));
7831 
7832 	if (!has_lock)
7833 		mutex_exit(&(DEVI(dip)->devi_lock));
7834 }
7835 
7836 /*
7837  * This procedure is provided as the general callback function when
7838  * umem_lockmemory calls as_add_callback for long term memory locking.
7839  * When as_unmap, as_setprot, or as_free encounter segments which have
7840  * locked memory, this callback will be invoked.
7841  */
7842 void
7843 umem_lock_undo(struct as *as, void *arg, uint_t event)
7844 {
7845 	_NOTE(ARGUNUSED(as, event))
7846 	struct ddi_umem_cookie *cp = (struct ddi_umem_cookie *)arg;
7847 
7848 	/*
7849 	 * Call the cleanup function.  Decrement the cookie reference
7850 	 * count, if it goes to zero, return the memory for the cookie.
7851 	 * The i_ddi_umem_unlock for this cookie may or may not have been
7852 	 * called already.  It is the responsibility of the caller of
7853 	 * umem_lockmemory to handle the case of the cleanup routine
7854 	 * being called after a ddi_umem_unlock for the cookie
7855 	 * was called.
7856 	 */
7857 
7858 	(*cp->callbacks.cbo_umem_lock_cleanup)((ddi_umem_cookie_t)cp);
7859 
7860 	/* remove the cookie if reference goes to zero */
7861 	if (atomic_add_long_nv((ulong_t *)(&(cp->cook_refcnt)), -1) == 0) {
7862 		kmem_free(cp, sizeof (struct ddi_umem_cookie));
7863 	}
7864 }
7865 
7866 /*
7867  * The following two Consolidation Private routines provide generic
7868  * interfaces to increase/decrease the amount of device-locked memory.
7869  *
7870  * To keep project_rele and project_hold consistent, i_ddi_decr_locked_memory()
7871  * must be called every time i_ddi_incr_locked_memory() is called.
7872  */
7873 int
7874 /* ARGSUSED */
7875 i_ddi_incr_locked_memory(proc_t *procp, rctl_qty_t inc)
7876 {
7877 	ASSERT(procp != NULL);
7878 	mutex_enter(&procp->p_lock);
7879 	if (rctl_incr_locked_mem(procp, NULL, inc, 1)) {
7880 		mutex_exit(&procp->p_lock);
7881 		return (ENOMEM);
7882 	}
7883 	mutex_exit(&procp->p_lock);
7884 	return (0);
7885 }
7886 
7887 /*
7888  * To keep project_rele and project_hold consistent, i_ddi_incr_locked_memory()
7889  * must be called every time i_ddi_decr_locked_memory() is called.
7890  */
7891 /* ARGSUSED */
7892 void
7893 i_ddi_decr_locked_memory(proc_t *procp, rctl_qty_t dec)
7894 {
7895 	ASSERT(procp != NULL);
7896 	mutex_enter(&procp->p_lock);
7897 	rctl_decr_locked_mem(procp, NULL, dec, 1);
7898 	mutex_exit(&procp->p_lock);
7899 }
7900 
7901 /*
7902  * This routine checks if the max-locked-memory resource ctl is
7903  * exceeded, if not increments it, grabs a hold on the project.
7904  * Returns 0 if successful otherwise returns error code
7905  */
7906 static int
7907 umem_incr_devlockmem(struct ddi_umem_cookie *cookie)
7908 {
7909 	proc_t		*procp;
7910 	int		ret;
7911 
7912 	ASSERT(cookie);
7913 	procp = cookie->procp;
7914 	ASSERT(procp);
7915 
7916 	if ((ret = i_ddi_incr_locked_memory(procp,
7917 		cookie->size)) != 0) {
7918 		return (ret);
7919 	}
7920 	return (0);
7921 }
7922 
7923 /*
7924  * Decrements the max-locked-memory resource ctl and releases
7925  * the hold on the project that was acquired during umem_incr_devlockmem
7926  */
7927 static void
7928 umem_decr_devlockmem(struct ddi_umem_cookie *cookie)
7929 {
7930 	proc_t		*proc;
7931 
7932 	proc = (proc_t *)cookie->procp;
7933 	if (!proc)
7934 		return;
7935 
7936 	i_ddi_decr_locked_memory(proc, cookie->size);
7937 }
7938 
7939 /*
7940  * A consolidation private function which is essentially equivalent to
7941  * ddi_umem_lock but with the addition of arguments ops_vector and procp.
7942  * A call to as_add_callback is done if DDI_UMEMLOCK_LONGTERM is set, and
7943  * the ops_vector is valid.
7944  *
7945  * Lock the virtual address range in the current process and create a
7946  * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to
7947  * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export
7948  * to user space.
7949  *
7950  * Note: The resource control accounting currently uses a full charge model
7951  * in other words attempts to lock the same/overlapping areas of memory
7952  * will deduct the full size of the buffer from the projects running
7953  * counter for the device locked memory.
7954  *
7955  * addr, size should be PAGESIZE aligned
7956  *
7957  * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both
7958  *	identifies whether the locked memory will be read or written or both
7959  *      DDI_UMEMLOCK_LONGTERM  must be set when the locking will
7960  * be maintained for an indefinitely long period (essentially permanent),
7961  * rather than for what would be required for a typical I/O completion.
7962  * When DDI_UMEMLOCK_LONGTERM is set, umem_lockmemory will return EFAULT
7963  * if the memory pertains to a regular file which is mapped MAP_SHARED.
7964  * This is to prevent a deadlock if a file truncation is attempted after
7965  * after the locking is done.
7966  *
7967  * Returns 0 on success
7968  *	EINVAL - for invalid parameters
7969  *	EPERM, ENOMEM and other error codes returned by as_pagelock
7970  *	ENOMEM - is returned if the current request to lock memory exceeds
7971  *		*.max-locked-memory resource control value.
7972  *      EFAULT - memory pertains to a regular file mapped shared and
7973  *		and DDI_UMEMLOCK_LONGTERM flag is set
7974  *	EAGAIN - could not start the ddi_umem_unlock list processing thread
7975  */
7976 int
7977 umem_lockmemory(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie,
7978 		struct umem_callback_ops *ops_vector,
7979 		proc_t *procp)
7980 {
7981 	int	error;
7982 	struct ddi_umem_cookie *p;
7983 	void	(*driver_callback)() = NULL;
7984 	struct as *as = procp->p_as;
7985 	struct seg		*seg;
7986 	vnode_t			*vp;
7987 
7988 	*cookie = NULL;		/* in case of any error return */
7989 
7990 	/* These are the only three valid flags */
7991 	if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE |
7992 	    DDI_UMEMLOCK_LONGTERM)) != 0)
7993 		return (EINVAL);
7994 
7995 	/* At least one (can be both) of the two access flags must be set */
7996 	if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0)
7997 		return (EINVAL);
7998 
7999 	/* addr and len must be page-aligned */
8000 	if (((uintptr_t)addr & PAGEOFFSET) != 0)
8001 		return (EINVAL);
8002 
8003 	if ((len & PAGEOFFSET) != 0)
8004 		return (EINVAL);
8005 
8006 	/*
8007 	 * For longterm locking a driver callback must be specified; if
8008 	 * not longterm then a callback is optional.
8009 	 */
8010 	if (ops_vector != NULL) {
8011 		if (ops_vector->cbo_umem_callback_version !=
8012 		    UMEM_CALLBACK_VERSION)
8013 			return (EINVAL);
8014 		else
8015 			driver_callback = ops_vector->cbo_umem_lock_cleanup;
8016 	}
8017 	if ((driver_callback == NULL) && (flags & DDI_UMEMLOCK_LONGTERM))
8018 		return (EINVAL);
8019 
8020 	/*
8021 	 * Call i_ddi_umem_unlock_thread_start if necessary.  It will
8022 	 * be called on first ddi_umem_lock or umem_lockmemory call.
8023 	 */
8024 	if (ddi_umem_unlock_thread == NULL)
8025 		i_ddi_umem_unlock_thread_start();
8026 
8027 	/* Allocate memory for the cookie */
8028 	p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP);
8029 
8030 	/* Convert the flags to seg_rw type */
8031 	if (flags & DDI_UMEMLOCK_WRITE) {
8032 		p->s_flags = S_WRITE;
8033 	} else {
8034 		p->s_flags = S_READ;
8035 	}
8036 
8037 	/* Store procp in cookie for later iosetup/unlock */
8038 	p->procp = (void *)procp;
8039 
8040 	/*
8041 	 * Store the struct as pointer in cookie for later use by
8042 	 * ddi_umem_unlock.  The proc->p_as will be stale if ddi_umem_unlock
8043 	 * is called after relvm is called.
8044 	 */
8045 	p->asp = as;
8046 
8047 	/*
8048 	 * The size field is needed for lockmem accounting.
8049 	 */
8050 	p->size = len;
8051 
8052 	if (umem_incr_devlockmem(p) != 0) {
8053 		/*
8054 		 * The requested memory cannot be locked
8055 		 */
8056 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8057 		*cookie = (ddi_umem_cookie_t)NULL;
8058 		return (ENOMEM);
8059 	}
8060 
8061 	/* Lock the pages corresponding to addr, len in memory */
8062 	error = as_pagelock(as, &(p->pparray), addr, len, p->s_flags);
8063 	if (error != 0) {
8064 		umem_decr_devlockmem(p);
8065 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8066 		*cookie = (ddi_umem_cookie_t)NULL;
8067 		return (error);
8068 	}
8069 
8070 	/*
8071 	 * For longterm locking the addr must pertain to a seg_vn segment or
8072 	 * or a seg_spt segment.
8073 	 * If the segment pertains to a regular file, it cannot be
8074 	 * mapped MAP_SHARED.
8075 	 * This is to prevent a deadlock if a file truncation is attempted
8076 	 * after the locking is done.
8077 	 * Doing this after as_pagelock guarantees persistence of the as; if
8078 	 * an unacceptable segment is found, the cleanup includes calling
8079 	 * as_pageunlock before returning EFAULT.
8080 	 */
8081 	if (flags & DDI_UMEMLOCK_LONGTERM) {
8082 		extern  struct seg_ops segspt_shmops;
8083 		AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
8084 		for (seg = as_segat(as, addr); ; seg = AS_SEGNEXT(as, seg)) {
8085 			if (seg == NULL || seg->s_base > addr + len)
8086 				break;
8087 			if (((seg->s_ops != &segvn_ops) &&
8088 			    (seg->s_ops != &segspt_shmops)) ||
8089 			    ((SEGOP_GETVP(seg, addr, &vp) == 0 &&
8090 			    vp != NULL && vp->v_type == VREG) &&
8091 			    (SEGOP_GETTYPE(seg, addr) & MAP_SHARED))) {
8092 				as_pageunlock(as, p->pparray,
8093 						addr, len, p->s_flags);
8094 				AS_LOCK_EXIT(as, &as->a_lock);
8095 				umem_decr_devlockmem(p);
8096 				kmem_free(p, sizeof (struct ddi_umem_cookie));
8097 				*cookie = (ddi_umem_cookie_t)NULL;
8098 				return (EFAULT);
8099 			}
8100 		}
8101 		AS_LOCK_EXIT(as, &as->a_lock);
8102 	}
8103 
8104 
8105 	/* Initialize the fields in the ddi_umem_cookie */
8106 	p->cvaddr = addr;
8107 	p->type = UMEM_LOCKED;
8108 	if (driver_callback != NULL) {
8109 		/* i_ddi_umem_unlock and umem_lock_undo may need the cookie */
8110 		p->cook_refcnt = 2;
8111 		p->callbacks = *ops_vector;
8112 	} else {
8113 		/* only i_ddi_umme_unlock needs the cookie */
8114 		p->cook_refcnt = 1;
8115 	}
8116 
8117 	*cookie = (ddi_umem_cookie_t)p;
8118 
8119 	/*
8120 	 * If a driver callback was specified, add an entry to the
8121 	 * as struct callback list. The as_pagelock above guarantees
8122 	 * the persistence of as.
8123 	 */
8124 	if (driver_callback) {
8125 		error = as_add_callback(as, umem_lock_undo, p, AS_ALL_EVENT,
8126 						addr, len, KM_SLEEP);
8127 		if (error != 0) {
8128 			as_pageunlock(as, p->pparray,
8129 					addr, len, p->s_flags);
8130 			umem_decr_devlockmem(p);
8131 			kmem_free(p, sizeof (struct ddi_umem_cookie));
8132 			*cookie = (ddi_umem_cookie_t)NULL;
8133 		}
8134 	}
8135 	return (error);
8136 }
8137 
8138 /*
8139  * Unlock the pages locked by ddi_umem_lock or umem_lockmemory and free
8140  * the cookie.  Called from i_ddi_umem_unlock_thread.
8141  */
8142 
8143 static void
8144 i_ddi_umem_unlock(struct ddi_umem_cookie *p)
8145 {
8146 	uint_t	rc;
8147 
8148 	/*
8149 	 * There is no way to determine whether a callback to
8150 	 * umem_lock_undo was registered via as_add_callback.
8151 	 * (i.e. umem_lockmemory was called with DDI_MEMLOCK_LONGTERM and
8152 	 * a valid callback function structure.)  as_delete_callback
8153 	 * is called to delete a possible registered callback.  If the
8154 	 * return from as_delete_callbacks is AS_CALLBACK_DELETED, it
8155 	 * indicates that there was a callback registered, and that is was
8156 	 * successfully deleted.  Thus, the cookie reference count
8157 	 * will never be decremented by umem_lock_undo.  Just return the
8158 	 * memory for the cookie, since both users of the cookie are done.
8159 	 * A return of AS_CALLBACK_NOTFOUND indicates a callback was
8160 	 * never registered.  A return of AS_CALLBACK_DELETE_DEFERRED
8161 	 * indicates that callback processing is taking place and, and
8162 	 * umem_lock_undo is, or will be, executing, and thus decrementing
8163 	 * the cookie reference count when it is complete.
8164 	 *
8165 	 * This needs to be done before as_pageunlock so that the
8166 	 * persistence of as is guaranteed because of the locked pages.
8167 	 *
8168 	 */
8169 	rc = as_delete_callback(p->asp, p);
8170 
8171 
8172 	/*
8173 	 * The proc->p_as will be stale if i_ddi_umem_unlock is called
8174 	 * after relvm is called so use p->asp.
8175 	 */
8176 	as_pageunlock(p->asp, p->pparray, p->cvaddr, p->size, p->s_flags);
8177 
8178 	/*
8179 	 * Now that we have unlocked the memory decrement the
8180 	 * *.max-locked-memory rctl
8181 	 */
8182 	umem_decr_devlockmem(p);
8183 
8184 	if (rc == AS_CALLBACK_DELETED) {
8185 		/* umem_lock_undo will not happen, return the cookie memory */
8186 		ASSERT(p->cook_refcnt == 2);
8187 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8188 	} else {
8189 		/*
8190 		 * umem_undo_lock may happen if as_delete_callback returned
8191 		 * AS_CALLBACK_DELETE_DEFERRED.  In that case, decrement the
8192 		 * reference count, atomically, and return the cookie
8193 		 * memory if the reference count goes to zero.  The only
8194 		 * other value for rc is AS_CALLBACK_NOTFOUND.  In that
8195 		 * case, just return the cookie memory.
8196 		 */
8197 		if ((rc != AS_CALLBACK_DELETE_DEFERRED) ||
8198 		    (atomic_add_long_nv((ulong_t *)(&(p->cook_refcnt)), -1)
8199 		    == 0)) {
8200 			kmem_free(p, sizeof (struct ddi_umem_cookie));
8201 		}
8202 	}
8203 }
8204 
8205 /*
8206  * i_ddi_umem_unlock_thread - deferred ddi_umem_unlock list handler.
8207  *
8208  * Call i_ddi_umem_unlock for entries in the ddi_umem_unlock list
8209  * until it is empty.  Then, wait for more to be added.  This thread is awoken
8210  * via calls to ddi_umem_unlock.
8211  */
8212 
8213 static void
8214 i_ddi_umem_unlock_thread(void)
8215 {
8216 	struct ddi_umem_cookie	*ret_cookie;
8217 	callb_cpr_t	cprinfo;
8218 
8219 	/* process the ddi_umem_unlock list */
8220 	CALLB_CPR_INIT(&cprinfo, &ddi_umem_unlock_mutex,
8221 	    callb_generic_cpr, "unlock_thread");
8222 	for (;;) {
8223 		mutex_enter(&ddi_umem_unlock_mutex);
8224 		if (ddi_umem_unlock_head != NULL) {	/* list not empty */
8225 			ret_cookie = ddi_umem_unlock_head;
8226 			/* take if off the list */
8227 			if ((ddi_umem_unlock_head =
8228 			    ddi_umem_unlock_head->unl_forw) == NULL) {
8229 				ddi_umem_unlock_tail = NULL;
8230 			}
8231 			mutex_exit(&ddi_umem_unlock_mutex);
8232 			/* unlock the pages in this cookie */
8233 			(void) i_ddi_umem_unlock(ret_cookie);
8234 		} else {   /* list is empty, wait for next ddi_umem_unlock */
8235 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
8236 			cv_wait(&ddi_umem_unlock_cv, &ddi_umem_unlock_mutex);
8237 			CALLB_CPR_SAFE_END(&cprinfo, &ddi_umem_unlock_mutex);
8238 			mutex_exit(&ddi_umem_unlock_mutex);
8239 		}
8240 	}
8241 	/* ddi_umem_unlock_thread does not exit */
8242 	/* NOTREACHED */
8243 }
8244 
8245 /*
8246  * Start the thread that will process the ddi_umem_unlock list if it is
8247  * not already started (i_ddi_umem_unlock_thread).
8248  */
8249 static void
8250 i_ddi_umem_unlock_thread_start(void)
8251 {
8252 	mutex_enter(&ddi_umem_unlock_mutex);
8253 	if (ddi_umem_unlock_thread == NULL) {
8254 		ddi_umem_unlock_thread = thread_create(NULL, 0,
8255 		    i_ddi_umem_unlock_thread, NULL, 0, &p0,
8256 		    TS_RUN, minclsyspri);
8257 	}
8258 	mutex_exit(&ddi_umem_unlock_mutex);
8259 }
8260 
8261 /*
8262  * Lock the virtual address range in the current process and create a
8263  * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to
8264  * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export
8265  * to user space.
8266  *
8267  * Note: The resource control accounting currently uses a full charge model
8268  * in other words attempts to lock the same/overlapping areas of memory
8269  * will deduct the full size of the buffer from the projects running
8270  * counter for the device locked memory. This applies to umem_lockmemory too.
8271  *
8272  * addr, size should be PAGESIZE aligned
8273  * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both
8274  *	identifies whether the locked memory will be read or written or both
8275  *
8276  * Returns 0 on success
8277  *	EINVAL - for invalid parameters
8278  *	EPERM, ENOMEM and other error codes returned by as_pagelock
8279  *	ENOMEM - is returned if the current request to lock memory exceeds
8280  *		*.max-locked-memory resource control value.
8281  *	EAGAIN - could not start the ddi_umem_unlock list processing thread
8282  */
8283 int
8284 ddi_umem_lock(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie)
8285 {
8286 	int	error;
8287 	struct ddi_umem_cookie *p;
8288 
8289 	*cookie = NULL;		/* in case of any error return */
8290 
8291 	/* These are the only two valid flags */
8292 	if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) != 0) {
8293 		return (EINVAL);
8294 	}
8295 
8296 	/* At least one of the two flags (or both) must be set */
8297 	if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0) {
8298 		return (EINVAL);
8299 	}
8300 
8301 	/* addr and len must be page-aligned */
8302 	if (((uintptr_t)addr & PAGEOFFSET) != 0) {
8303 		return (EINVAL);
8304 	}
8305 
8306 	if ((len & PAGEOFFSET) != 0) {
8307 		return (EINVAL);
8308 	}
8309 
8310 	/*
8311 	 * Call i_ddi_umem_unlock_thread_start if necessary.  It will
8312 	 * be called on first ddi_umem_lock or umem_lockmemory call.
8313 	 */
8314 	if (ddi_umem_unlock_thread == NULL)
8315 		i_ddi_umem_unlock_thread_start();
8316 
8317 	/* Allocate memory for the cookie */
8318 	p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP);
8319 
8320 	/* Convert the flags to seg_rw type */
8321 	if (flags & DDI_UMEMLOCK_WRITE) {
8322 		p->s_flags = S_WRITE;
8323 	} else {
8324 		p->s_flags = S_READ;
8325 	}
8326 
8327 	/* Store curproc in cookie for later iosetup/unlock */
8328 	p->procp = (void *)curproc;
8329 
8330 	/*
8331 	 * Store the struct as pointer in cookie for later use by
8332 	 * ddi_umem_unlock.  The proc->p_as will be stale if ddi_umem_unlock
8333 	 * is called after relvm is called.
8334 	 */
8335 	p->asp = curproc->p_as;
8336 	/*
8337 	 * The size field is needed for lockmem accounting.
8338 	 */
8339 	p->size = len;
8340 
8341 	if (umem_incr_devlockmem(p) != 0) {
8342 		/*
8343 		 * The requested memory cannot be locked
8344 		 */
8345 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8346 		*cookie = (ddi_umem_cookie_t)NULL;
8347 		return (ENOMEM);
8348 	}
8349 
8350 	/* Lock the pages corresponding to addr, len in memory */
8351 	error = as_pagelock(((proc_t *)p->procp)->p_as, &(p->pparray),
8352 	    addr, len, p->s_flags);
8353 	if (error != 0) {
8354 		umem_decr_devlockmem(p);
8355 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8356 		*cookie = (ddi_umem_cookie_t)NULL;
8357 		return (error);
8358 	}
8359 
8360 	/* Initialize the fields in the ddi_umem_cookie */
8361 	p->cvaddr = addr;
8362 	p->type = UMEM_LOCKED;
8363 	p->cook_refcnt = 1;
8364 
8365 	*cookie = (ddi_umem_cookie_t)p;
8366 	return (error);
8367 }
8368 
8369 /*
8370  * Add the cookie to the ddi_umem_unlock list.  Pages will be
8371  * unlocked by i_ddi_umem_unlock_thread.
8372  */
8373 
8374 void
8375 ddi_umem_unlock(ddi_umem_cookie_t cookie)
8376 {
8377 	struct ddi_umem_cookie	*p = (struct ddi_umem_cookie *)cookie;
8378 
8379 	ASSERT(p->type == UMEM_LOCKED);
8380 	ASSERT(CPU_ON_INTR(CPU) == 0); /* cannot be high level */
8381 	ASSERT(ddi_umem_unlock_thread != NULL);
8382 
8383 	p->unl_forw = (struct ddi_umem_cookie *)NULL;	/* end of list */
8384 	/*
8385 	 * Queue the unlock request and notify i_ddi_umem_unlock thread
8386 	 * if it's called in the interrupt context. Otherwise, unlock pages
8387 	 * immediately.
8388 	 */
8389 	if (servicing_interrupt()) {
8390 		/* queue the unlock request and notify the thread */
8391 		mutex_enter(&ddi_umem_unlock_mutex);
8392 		if (ddi_umem_unlock_head == NULL) {
8393 			ddi_umem_unlock_head = ddi_umem_unlock_tail = p;
8394 			cv_broadcast(&ddi_umem_unlock_cv);
8395 		} else {
8396 			ddi_umem_unlock_tail->unl_forw = p;
8397 			ddi_umem_unlock_tail = p;
8398 		}
8399 		mutex_exit(&ddi_umem_unlock_mutex);
8400 	} else {
8401 		/* unlock the pages right away */
8402 		(void) i_ddi_umem_unlock(p);
8403 	}
8404 }
8405 
8406 /*
8407  * Create a buf structure from a ddi_umem_cookie
8408  * cookie - is a ddi_umem_cookie for from ddi_umem_lock and ddi_umem_alloc
8409  *		(only UMEM_LOCKED & KMEM_NON_PAGEABLE types supported)
8410  * off, len - identifies the portion of the memory represented by the cookie
8411  *		that the buf points to.
8412  *	NOTE: off, len need to follow the alignment/size restrictions of the
8413  *		device (dev) that this buf will be passed to. Some devices
8414  *		will accept unrestricted alignment/size, whereas others (such as
8415  *		st) require some block-size alignment/size. It is the caller's
8416  *		responsibility to ensure that the alignment/size restrictions
8417  *		are met (we cannot assert as we do not know the restrictions)
8418  *
8419  * direction - is one of B_READ or B_WRITE and needs to be compatible with
8420  *		the flags used in ddi_umem_lock
8421  *
8422  * The following three arguments are used to initialize fields in the
8423  * buf structure and are uninterpreted by this routine.
8424  *
8425  * dev
8426  * blkno
8427  * iodone
8428  *
8429  * sleepflag - is one of DDI_UMEM_SLEEP or DDI_UMEM_NOSLEEP
8430  *
8431  * Returns a buf structure pointer on success (to be freed by freerbuf)
8432  *	NULL on any parameter error or memory alloc failure
8433  *
8434  */
8435 struct buf *
8436 ddi_umem_iosetup(ddi_umem_cookie_t cookie, off_t off, size_t len,
8437 	int direction, dev_t dev, daddr_t blkno,
8438 	int (*iodone)(struct buf *), int sleepflag)
8439 {
8440 	struct ddi_umem_cookie *p = (struct ddi_umem_cookie *)cookie;
8441 	struct buf *bp;
8442 
8443 	/*
8444 	 * check for valid cookie offset, len
8445 	 */
8446 	if ((off + len) > p->size) {
8447 		return (NULL);
8448 	}
8449 
8450 	if (len > p->size) {
8451 		return (NULL);
8452 	}
8453 
8454 	/* direction has to be one of B_READ or B_WRITE */
8455 	if ((direction != B_READ) && (direction != B_WRITE)) {
8456 		return (NULL);
8457 	}
8458 
8459 	/* These are the only two valid sleepflags */
8460 	if ((sleepflag != DDI_UMEM_SLEEP) && (sleepflag != DDI_UMEM_NOSLEEP)) {
8461 		return (NULL);
8462 	}
8463 
8464 	/*
8465 	 * Only cookies of type UMEM_LOCKED and KMEM_NON_PAGEABLE are supported
8466 	 */
8467 	if ((p->type != UMEM_LOCKED) && (p->type != KMEM_NON_PAGEABLE)) {
8468 		return (NULL);
8469 	}
8470 
8471 	/* If type is KMEM_NON_PAGEABLE procp is NULL */
8472 	ASSERT((p->type == KMEM_NON_PAGEABLE) ?
8473 		(p->procp == NULL) : (p->procp != NULL));
8474 
8475 	bp = kmem_alloc(sizeof (struct buf), sleepflag);
8476 	if (bp == NULL) {
8477 		return (NULL);
8478 	}
8479 	bioinit(bp);
8480 
8481 	bp->b_flags = B_BUSY | B_PHYS | direction;
8482 	bp->b_edev = dev;
8483 	bp->b_lblkno = blkno;
8484 	bp->b_iodone = iodone;
8485 	bp->b_bcount = len;
8486 	bp->b_proc = (proc_t *)p->procp;
8487 	ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0);
8488 	bp->b_un.b_addr = (caddr_t)((uintptr_t)(p->cvaddr) + off);
8489 	if (p->pparray != NULL) {
8490 		bp->b_flags |= B_SHADOW;
8491 		ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0);
8492 		bp->b_shadow = p->pparray + btop(off);
8493 	}
8494 	return (bp);
8495 }
8496 
8497 /*
8498  * Fault-handling and related routines
8499  */
8500 
8501 ddi_devstate_t
8502 ddi_get_devstate(dev_info_t *dip)
8503 {
8504 	if (DEVI_IS_DEVICE_OFFLINE(dip))
8505 		return (DDI_DEVSTATE_OFFLINE);
8506 	else if (DEVI_IS_DEVICE_DOWN(dip) || DEVI_IS_BUS_DOWN(dip))
8507 		return (DDI_DEVSTATE_DOWN);
8508 	else if (DEVI_IS_BUS_QUIESCED(dip))
8509 		return (DDI_DEVSTATE_QUIESCED);
8510 	else if (DEVI_IS_DEVICE_DEGRADED(dip))
8511 		return (DDI_DEVSTATE_DEGRADED);
8512 	else
8513 		return (DDI_DEVSTATE_UP);
8514 }
8515 
8516 void
8517 ddi_dev_report_fault(dev_info_t *dip, ddi_fault_impact_t impact,
8518 	ddi_fault_location_t location, const char *message)
8519 {
8520 	struct ddi_fault_event_data fd;
8521 	ddi_eventcookie_t ec;
8522 
8523 	/*
8524 	 * Assemble all the information into a fault-event-data structure
8525 	 */
8526 	fd.f_dip = dip;
8527 	fd.f_impact = impact;
8528 	fd.f_location = location;
8529 	fd.f_message = message;
8530 	fd.f_oldstate = ddi_get_devstate(dip);
8531 
8532 	/*
8533 	 * Get eventcookie from defining parent.
8534 	 */
8535 	if (ddi_get_eventcookie(dip, DDI_DEVI_FAULT_EVENT, &ec) !=
8536 	    DDI_SUCCESS)
8537 		return;
8538 
8539 	(void) ndi_post_event(dip, dip, ec, &fd);
8540 }
8541 
8542 char *
8543 i_ddi_devi_class(dev_info_t *dip)
8544 {
8545 	return (DEVI(dip)->devi_device_class);
8546 }
8547 
8548 int
8549 i_ddi_set_devi_class(dev_info_t *dip, char *devi_class, int flag)
8550 {
8551 	struct dev_info *devi = DEVI(dip);
8552 
8553 	mutex_enter(&devi->devi_lock);
8554 
8555 	if (devi->devi_device_class)
8556 		kmem_free(devi->devi_device_class,
8557 		    strlen(devi->devi_device_class) + 1);
8558 
8559 	if ((devi->devi_device_class = i_ddi_strdup(devi_class, flag))
8560 	    != NULL) {
8561 		mutex_exit(&devi->devi_lock);
8562 		return (DDI_SUCCESS);
8563 	}
8564 
8565 	mutex_exit(&devi->devi_lock);
8566 
8567 	return (DDI_FAILURE);
8568 }
8569 
8570 
8571 /*
8572  * Task Queues DDI interfaces.
8573  */
8574 
8575 /* ARGSUSED */
8576 ddi_taskq_t *
8577 ddi_taskq_create(dev_info_t *dip, const char *name, int nthreads,
8578     pri_t pri, uint_t cflags)
8579 {
8580 	char full_name[TASKQ_NAMELEN];
8581 	const char *tq_name;
8582 	int nodeid = 0;
8583 
8584 	if (dip == NULL)
8585 		tq_name = name;
8586 	else {
8587 		nodeid = ddi_get_instance(dip);
8588 
8589 		if (name == NULL)
8590 			name = "tq";
8591 
8592 		(void) snprintf(full_name, sizeof (full_name), "%s_%s",
8593 		    ddi_driver_name(dip), name);
8594 
8595 		tq_name = full_name;
8596 	}
8597 
8598 	return ((ddi_taskq_t *)taskq_create_instance(tq_name, nodeid, nthreads,
8599 		    pri == TASKQ_DEFAULTPRI ? minclsyspri : pri,
8600 		    nthreads, INT_MAX, TASKQ_PREPOPULATE));
8601 }
8602 
8603 void
8604 ddi_taskq_destroy(ddi_taskq_t *tq)
8605 {
8606 	taskq_destroy((taskq_t *)tq);
8607 }
8608 
8609 int
8610 ddi_taskq_dispatch(ddi_taskq_t *tq, void (* func)(void *),
8611     void *arg, uint_t dflags)
8612 {
8613 	taskqid_t id = taskq_dispatch((taskq_t *)tq, func, arg,
8614 	    dflags == DDI_SLEEP ? TQ_SLEEP : TQ_NOSLEEP);
8615 
8616 	return (id != 0 ? DDI_SUCCESS : DDI_FAILURE);
8617 }
8618 
8619 void
8620 ddi_taskq_wait(ddi_taskq_t *tq)
8621 {
8622 	taskq_wait((taskq_t *)tq);
8623 }
8624 
8625 void
8626 ddi_taskq_suspend(ddi_taskq_t *tq)
8627 {
8628 	taskq_suspend((taskq_t *)tq);
8629 }
8630 
8631 boolean_t
8632 ddi_taskq_suspended(ddi_taskq_t *tq)
8633 {
8634 	return (taskq_suspended((taskq_t *)tq));
8635 }
8636 
8637 void
8638 ddi_taskq_resume(ddi_taskq_t *tq)
8639 {
8640 	taskq_resume((taskq_t *)tq);
8641 }
8642 
8643 int
8644 ddi_parse(
8645 	const char	*ifname,
8646 	char		*alnum,
8647 	uint_t		*nump)
8648 {
8649 	const char	*p;
8650 	int		l;
8651 	ulong_t		num;
8652 	boolean_t	nonum = B_TRUE;
8653 	char		c;
8654 
8655 	l = strlen(ifname);
8656 	for (p = ifname + l; p != ifname; l--) {
8657 		c = *--p;
8658 		if (!isdigit(c)) {
8659 			(void) strlcpy(alnum, ifname, l + 1);
8660 			if (ddi_strtoul(p + 1, NULL, 10, &num) != 0)
8661 				return (DDI_FAILURE);
8662 			break;
8663 		}
8664 		nonum = B_FALSE;
8665 	}
8666 	if (l == 0 || nonum)
8667 		return (DDI_FAILURE);
8668 
8669 	*nump = num;
8670 	return (DDI_SUCCESS);
8671 }
8672