xref: /titanic_52/usr/src/uts/common/os/sunddi.c (revision 36e852a172cba914383d7341c988128b2c667fbd)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/note.h>
28 #include <sys/types.h>
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/buf.h>
32 #include <sys/uio.h>
33 #include <sys/cred.h>
34 #include <sys/poll.h>
35 #include <sys/mman.h>
36 #include <sys/kmem.h>
37 #include <sys/model.h>
38 #include <sys/file.h>
39 #include <sys/proc.h>
40 #include <sys/open.h>
41 #include <sys/user.h>
42 #include <sys/t_lock.h>
43 #include <sys/vm.h>
44 #include <sys/stat.h>
45 #include <vm/hat.h>
46 #include <vm/seg.h>
47 #include <vm/seg_vn.h>
48 #include <vm/seg_dev.h>
49 #include <vm/as.h>
50 #include <sys/cmn_err.h>
51 #include <sys/cpuvar.h>
52 #include <sys/debug.h>
53 #include <sys/autoconf.h>
54 #include <sys/sunddi.h>
55 #include <sys/esunddi.h>
56 #include <sys/sunndi.h>
57 #include <sys/kstat.h>
58 #include <sys/conf.h>
59 #include <sys/ddi_impldefs.h>	/* include implementation structure defs */
60 #include <sys/ndi_impldefs.h>	/* include prototypes */
61 #include <sys/ddi_timer.h>
62 #include <sys/hwconf.h>
63 #include <sys/pathname.h>
64 #include <sys/modctl.h>
65 #include <sys/epm.h>
66 #include <sys/devctl.h>
67 #include <sys/callb.h>
68 #include <sys/cladm.h>
69 #include <sys/sysevent.h>
70 #include <sys/dacf_impl.h>
71 #include <sys/ddidevmap.h>
72 #include <sys/bootconf.h>
73 #include <sys/disp.h>
74 #include <sys/atomic.h>
75 #include <sys/promif.h>
76 #include <sys/instance.h>
77 #include <sys/sysevent/eventdefs.h>
78 #include <sys/task.h>
79 #include <sys/project.h>
80 #include <sys/taskq.h>
81 #include <sys/devpolicy.h>
82 #include <sys/ctype.h>
83 #include <net/if.h>
84 #include <sys/rctl.h>
85 #include <sys/zone.h>
86 #include <sys/clock_impl.h>
87 #include <sys/ddi.h>
88 
89 extern	pri_t	minclsyspri;
90 
91 extern	rctl_hndl_t rc_project_locked_mem;
92 extern	rctl_hndl_t rc_zone_locked_mem;
93 
94 #ifdef DEBUG
95 static int sunddi_debug = 0;
96 #endif /* DEBUG */
97 
98 /* ddi_umem_unlock miscellaneous */
99 
100 static	void	i_ddi_umem_unlock_thread_start(void);
101 
102 static	kmutex_t	ddi_umem_unlock_mutex; /* unlock list mutex */
103 static	kcondvar_t	ddi_umem_unlock_cv; /* unlock list block/unblock */
104 static	kthread_t	*ddi_umem_unlock_thread;
105 /*
106  * The ddi_umem_unlock FIFO list.  NULL head pointer indicates empty list.
107  */
108 static	struct	ddi_umem_cookie *ddi_umem_unlock_head = NULL;
109 static	struct	ddi_umem_cookie *ddi_umem_unlock_tail = NULL;
110 
111 /*
112  * DDI(Sun) Function and flag definitions:
113  */
114 
115 #if defined(__x86)
116 /*
117  * Used to indicate which entries were chosen from a range.
118  */
119 char	*chosen_reg = "chosen-reg";
120 #endif
121 
122 /*
123  * Function used to ring system console bell
124  */
125 void (*ddi_console_bell_func)(clock_t duration);
126 
127 /*
128  * Creating register mappings and handling interrupts:
129  */
130 
131 /*
132  * Generic ddi_map: Call parent to fulfill request...
133  */
134 
135 int
136 ddi_map(dev_info_t *dp, ddi_map_req_t *mp, off_t offset,
137     off_t len, caddr_t *addrp)
138 {
139 	dev_info_t *pdip;
140 
141 	ASSERT(dp);
142 	pdip = (dev_info_t *)DEVI(dp)->devi_parent;
143 	return ((DEVI(pdip)->devi_ops->devo_bus_ops->bus_map)(pdip,
144 	    dp, mp, offset, len, addrp));
145 }
146 
147 /*
148  * ddi_apply_range: (Called by nexi only.)
149  * Apply ranges in parent node dp, to child regspec rp...
150  */
151 
152 int
153 ddi_apply_range(dev_info_t *dp, dev_info_t *rdip, struct regspec *rp)
154 {
155 	return (i_ddi_apply_range(dp, rdip, rp));
156 }
157 
158 int
159 ddi_map_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset,
160     off_t len)
161 {
162 	ddi_map_req_t mr;
163 #if defined(__x86)
164 	struct {
165 		int	bus;
166 		int	addr;
167 		int	size;
168 	} reg, *reglist;
169 	uint_t	length;
170 	int	rc;
171 
172 	/*
173 	 * get the 'registers' or the 'reg' property.
174 	 * We look up the reg property as an array of
175 	 * int's.
176 	 */
177 	rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
178 	    DDI_PROP_DONTPASS, "registers", (int **)&reglist, &length);
179 	if (rc != DDI_PROP_SUCCESS)
180 		rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
181 		    DDI_PROP_DONTPASS, "reg", (int **)&reglist, &length);
182 	if (rc == DDI_PROP_SUCCESS) {
183 		/*
184 		 * point to the required entry.
185 		 */
186 		reg = reglist[rnumber];
187 		reg.addr += offset;
188 		if (len != 0)
189 			reg.size = len;
190 		/*
191 		 * make a new property containing ONLY the required tuple.
192 		 */
193 		if (ddi_prop_update_int_array(DDI_DEV_T_NONE, dip,
194 		    chosen_reg, (int *)&reg, (sizeof (reg)/sizeof (int)))
195 		    != DDI_PROP_SUCCESS) {
196 			cmn_err(CE_WARN, "%s%d: cannot create '%s' "
197 			    "property", DEVI(dip)->devi_name,
198 			    DEVI(dip)->devi_instance, chosen_reg);
199 		}
200 		/*
201 		 * free the memory allocated by
202 		 * ddi_prop_lookup_int_array ().
203 		 */
204 		ddi_prop_free((void *)reglist);
205 	}
206 #endif
207 	mr.map_op = DDI_MO_MAP_LOCKED;
208 	mr.map_type = DDI_MT_RNUMBER;
209 	mr.map_obj.rnumber = rnumber;
210 	mr.map_prot = PROT_READ | PROT_WRITE;
211 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
212 	mr.map_handlep = NULL;
213 	mr.map_vers = DDI_MAP_VERSION;
214 
215 	/*
216 	 * Call my parent to map in my regs.
217 	 */
218 
219 	return (ddi_map(dip, &mr, offset, len, kaddrp));
220 }
221 
222 void
223 ddi_unmap_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset,
224     off_t len)
225 {
226 	ddi_map_req_t mr;
227 
228 	mr.map_op = DDI_MO_UNMAP;
229 	mr.map_type = DDI_MT_RNUMBER;
230 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
231 	mr.map_prot = PROT_READ | PROT_WRITE;	/* who cares? */
232 	mr.map_obj.rnumber = rnumber;
233 	mr.map_handlep = NULL;
234 	mr.map_vers = DDI_MAP_VERSION;
235 
236 	/*
237 	 * Call my parent to unmap my regs.
238 	 */
239 
240 	(void) ddi_map(dip, &mr, offset, len, kaddrp);
241 	*kaddrp = (caddr_t)0;
242 #if defined(__x86)
243 	(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, chosen_reg);
244 #endif
245 }
246 
247 int
248 ddi_bus_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
249 	off_t offset, off_t len, caddr_t *vaddrp)
250 {
251 	return (i_ddi_bus_map(dip, rdip, mp, offset, len, vaddrp));
252 }
253 
254 /*
255  * nullbusmap:	The/DDI default bus_map entry point for nexi
256  *		not conforming to the reg/range paradigm (i.e. scsi, etc.)
257  *		with no HAT/MMU layer to be programmed at this level.
258  *
259  *		If the call is to map by rnumber, return an error,
260  *		otherwise pass anything else up the tree to my parent.
261  */
262 int
263 nullbusmap(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
264 	off_t offset, off_t len, caddr_t *vaddrp)
265 {
266 	_NOTE(ARGUNUSED(rdip))
267 	if (mp->map_type == DDI_MT_RNUMBER)
268 		return (DDI_ME_UNSUPPORTED);
269 
270 	return (ddi_map(dip, mp, offset, len, vaddrp));
271 }
272 
273 /*
274  * ddi_rnumber_to_regspec: Not for use by leaf drivers.
275  *			   Only for use by nexi using the reg/range paradigm.
276  */
277 struct regspec *
278 ddi_rnumber_to_regspec(dev_info_t *dip, int rnumber)
279 {
280 	return (i_ddi_rnumber_to_regspec(dip, rnumber));
281 }
282 
283 
284 /*
285  * Note that we allow the dip to be nil because we may be called
286  * prior even to the instantiation of the devinfo tree itself - all
287  * regular leaf and nexus drivers should always use a non-nil dip!
288  *
289  * We treat peek in a somewhat cavalier fashion .. assuming that we'll
290  * simply get a synchronous fault as soon as we touch a missing address.
291  *
292  * Poke is rather more carefully handled because we might poke to a write
293  * buffer, "succeed", then only find some time later that we got an
294  * asynchronous fault that indicated that the address we were writing to
295  * was not really backed by hardware.
296  */
297 
298 static int
299 i_ddi_peekpoke(dev_info_t *devi, ddi_ctl_enum_t cmd, size_t size,
300     void *addr, void *value_p)
301 {
302 	union {
303 		uint64_t	u64;
304 		uint32_t	u32;
305 		uint16_t	u16;
306 		uint8_t		u8;
307 	} peekpoke_value;
308 
309 	peekpoke_ctlops_t peekpoke_args;
310 	uint64_t dummy_result;
311 	int rval;
312 
313 	/* Note: size is assumed to be correct;  it is not checked. */
314 	peekpoke_args.size = size;
315 	peekpoke_args.dev_addr = (uintptr_t)addr;
316 	peekpoke_args.handle = NULL;
317 	peekpoke_args.repcount = 1;
318 	peekpoke_args.flags = 0;
319 
320 	if (cmd == DDI_CTLOPS_POKE) {
321 		switch (size) {
322 		case sizeof (uint8_t):
323 			peekpoke_value.u8 = *(uint8_t *)value_p;
324 			break;
325 		case sizeof (uint16_t):
326 			peekpoke_value.u16 = *(uint16_t *)value_p;
327 			break;
328 		case sizeof (uint32_t):
329 			peekpoke_value.u32 = *(uint32_t *)value_p;
330 			break;
331 		case sizeof (uint64_t):
332 			peekpoke_value.u64 = *(uint64_t *)value_p;
333 			break;
334 		}
335 	}
336 
337 	peekpoke_args.host_addr = (uintptr_t)&peekpoke_value.u64;
338 
339 	if (devi != NULL)
340 		rval = ddi_ctlops(devi, devi, cmd, &peekpoke_args,
341 		    &dummy_result);
342 	else
343 		rval = peekpoke_mem(cmd, &peekpoke_args);
344 
345 	/*
346 	 * A NULL value_p is permitted by ddi_peek(9F); discard the result.
347 	 */
348 	if ((cmd == DDI_CTLOPS_PEEK) & (value_p != NULL)) {
349 		switch (size) {
350 		case sizeof (uint8_t):
351 			*(uint8_t *)value_p = peekpoke_value.u8;
352 			break;
353 		case sizeof (uint16_t):
354 			*(uint16_t *)value_p = peekpoke_value.u16;
355 			break;
356 		case sizeof (uint32_t):
357 			*(uint32_t *)value_p = peekpoke_value.u32;
358 			break;
359 		case sizeof (uint64_t):
360 			*(uint64_t *)value_p = peekpoke_value.u64;
361 			break;
362 		}
363 	}
364 
365 	return (rval);
366 }
367 
368 /*
369  * Keep ddi_peek() and ddi_poke() in case 3rd parties are calling this.
370  * they shouldn't be, but the 9f manpage kind of pseudo exposes it.
371  */
372 int
373 ddi_peek(dev_info_t *devi, size_t size, void *addr, void *value_p)
374 {
375 	switch (size) {
376 	case sizeof (uint8_t):
377 	case sizeof (uint16_t):
378 	case sizeof (uint32_t):
379 	case sizeof (uint64_t):
380 		break;
381 	default:
382 		return (DDI_FAILURE);
383 	}
384 
385 	return (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, size, addr, value_p));
386 }
387 
388 int
389 ddi_poke(dev_info_t *devi, size_t size, void *addr, void *value_p)
390 {
391 	switch (size) {
392 	case sizeof (uint8_t):
393 	case sizeof (uint16_t):
394 	case sizeof (uint32_t):
395 	case sizeof (uint64_t):
396 		break;
397 	default:
398 		return (DDI_FAILURE);
399 	}
400 
401 	return (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, size, addr, value_p));
402 }
403 
404 int
405 ddi_peek8(dev_info_t *dip, int8_t *addr, int8_t *val_p)
406 {
407 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
408 	    val_p));
409 }
410 
411 int
412 ddi_peek16(dev_info_t *dip, int16_t *addr, int16_t *val_p)
413 {
414 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
415 	    val_p));
416 }
417 
418 int
419 ddi_peek32(dev_info_t *dip, int32_t *addr, int32_t *val_p)
420 {
421 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
422 	    val_p));
423 }
424 
425 int
426 ddi_peek64(dev_info_t *dip, int64_t *addr, int64_t *val_p)
427 {
428 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
429 	    val_p));
430 }
431 
432 
433 /*
434  * We need to separate the old interfaces from the new ones and leave them
435  * in here for a while. Previous versions of the OS defined the new interfaces
436  * to the old interfaces. This way we can fix things up so that we can
437  * eventually remove these interfaces.
438  * e.g. A 3rd party module/driver using ddi_peek8 and built against S10
439  * or earlier will actually have a reference to ddi_peekc in the binary.
440  */
441 #ifdef _ILP32
442 int
443 ddi_peekc(dev_info_t *dip, int8_t *addr, int8_t *val_p)
444 {
445 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
446 	    val_p));
447 }
448 
449 int
450 ddi_peeks(dev_info_t *dip, int16_t *addr, int16_t *val_p)
451 {
452 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
453 	    val_p));
454 }
455 
456 int
457 ddi_peekl(dev_info_t *dip, int32_t *addr, int32_t *val_p)
458 {
459 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
460 	    val_p));
461 }
462 
463 int
464 ddi_peekd(dev_info_t *dip, int64_t *addr, int64_t *val_p)
465 {
466 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
467 	    val_p));
468 }
469 #endif /* _ILP32 */
470 
471 int
472 ddi_poke8(dev_info_t *dip, int8_t *addr, int8_t val)
473 {
474 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
475 }
476 
477 int
478 ddi_poke16(dev_info_t *dip, int16_t *addr, int16_t val)
479 {
480 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
481 }
482 
483 int
484 ddi_poke32(dev_info_t *dip, int32_t *addr, int32_t val)
485 {
486 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
487 }
488 
489 int
490 ddi_poke64(dev_info_t *dip, int64_t *addr, int64_t val)
491 {
492 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
493 }
494 
495 /*
496  * We need to separate the old interfaces from the new ones and leave them
497  * in here for a while. Previous versions of the OS defined the new interfaces
498  * to the old interfaces. This way we can fix things up so that we can
499  * eventually remove these interfaces.
500  * e.g. A 3rd party module/driver using ddi_poke8 and built against S10
501  * or earlier will actually have a reference to ddi_pokec in the binary.
502  */
503 #ifdef _ILP32
504 int
505 ddi_pokec(dev_info_t *dip, int8_t *addr, int8_t val)
506 {
507 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
508 }
509 
510 int
511 ddi_pokes(dev_info_t *dip, int16_t *addr, int16_t val)
512 {
513 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
514 }
515 
516 int
517 ddi_pokel(dev_info_t *dip, int32_t *addr, int32_t val)
518 {
519 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
520 }
521 
522 int
523 ddi_poked(dev_info_t *dip, int64_t *addr, int64_t val)
524 {
525 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
526 }
527 #endif /* _ILP32 */
528 
529 /*
530  * ddi_peekpokeio() is used primarily by the mem drivers for moving
531  * data to and from uio structures via peek and poke.  Note that we
532  * use "internal" routines ddi_peek and ddi_poke to make this go
533  * slightly faster, avoiding the call overhead ..
534  */
535 int
536 ddi_peekpokeio(dev_info_t *devi, struct uio *uio, enum uio_rw rw,
537     caddr_t addr, size_t len, uint_t xfersize)
538 {
539 	int64_t	ibuffer;
540 	int8_t w8;
541 	size_t sz;
542 	int o;
543 
544 	if (xfersize > sizeof (long))
545 		xfersize = sizeof (long);
546 
547 	while (len != 0) {
548 		if ((len | (uintptr_t)addr) & 1) {
549 			sz = sizeof (int8_t);
550 			if (rw == UIO_WRITE) {
551 				if ((o = uwritec(uio)) == -1)
552 					return (DDI_FAILURE);
553 				if (ddi_poke8(devi, (int8_t *)addr,
554 				    (int8_t)o) != DDI_SUCCESS)
555 					return (DDI_FAILURE);
556 			} else {
557 				if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz,
558 				    (int8_t *)addr, &w8) != DDI_SUCCESS)
559 					return (DDI_FAILURE);
560 				if (ureadc(w8, uio))
561 					return (DDI_FAILURE);
562 			}
563 		} else {
564 			switch (xfersize) {
565 			case sizeof (int64_t):
566 				if (((len | (uintptr_t)addr) &
567 				    (sizeof (int64_t) - 1)) == 0) {
568 					sz = xfersize;
569 					break;
570 				}
571 				/*FALLTHROUGH*/
572 			case sizeof (int32_t):
573 				if (((len | (uintptr_t)addr) &
574 				    (sizeof (int32_t) - 1)) == 0) {
575 					sz = xfersize;
576 					break;
577 				}
578 				/*FALLTHROUGH*/
579 			default:
580 				/*
581 				 * This still assumes that we might have an
582 				 * I/O bus out there that permits 16-bit
583 				 * transfers (and that it would be upset by
584 				 * 32-bit transfers from such locations).
585 				 */
586 				sz = sizeof (int16_t);
587 				break;
588 			}
589 
590 			if (rw == UIO_READ) {
591 				if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz,
592 				    addr, &ibuffer) != DDI_SUCCESS)
593 					return (DDI_FAILURE);
594 			}
595 
596 			if (uiomove(&ibuffer, sz, rw, uio))
597 				return (DDI_FAILURE);
598 
599 			if (rw == UIO_WRITE) {
600 				if (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, sz,
601 				    addr, &ibuffer) != DDI_SUCCESS)
602 					return (DDI_FAILURE);
603 			}
604 		}
605 		addr += sz;
606 		len -= sz;
607 	}
608 	return (DDI_SUCCESS);
609 }
610 
611 /*
612  * These routines are used by drivers that do layered ioctls
613  * On sparc, they're implemented in assembler to avoid spilling
614  * register windows in the common (copyin) case ..
615  */
616 #if !defined(__sparc)
617 int
618 ddi_copyin(const void *buf, void *kernbuf, size_t size, int flags)
619 {
620 	if (flags & FKIOCTL)
621 		return (kcopy(buf, kernbuf, size) ? -1 : 0);
622 	return (copyin(buf, kernbuf, size));
623 }
624 
625 int
626 ddi_copyout(const void *buf, void *kernbuf, size_t size, int flags)
627 {
628 	if (flags & FKIOCTL)
629 		return (kcopy(buf, kernbuf, size) ? -1 : 0);
630 	return (copyout(buf, kernbuf, size));
631 }
632 #endif	/* !__sparc */
633 
634 /*
635  * Conversions in nexus pagesize units.  We don't duplicate the
636  * 'nil dip' semantics of peek/poke because btopr/btop/ptob are DDI/DKI
637  * routines anyway.
638  */
639 unsigned long
640 ddi_btop(dev_info_t *dip, unsigned long bytes)
641 {
642 	unsigned long pages;
643 
644 	(void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOP, &bytes, &pages);
645 	return (pages);
646 }
647 
648 unsigned long
649 ddi_btopr(dev_info_t *dip, unsigned long bytes)
650 {
651 	unsigned long pages;
652 
653 	(void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOPR, &bytes, &pages);
654 	return (pages);
655 }
656 
657 unsigned long
658 ddi_ptob(dev_info_t *dip, unsigned long pages)
659 {
660 	unsigned long bytes;
661 
662 	(void) ddi_ctlops(dip, dip, DDI_CTLOPS_PTOB, &pages, &bytes);
663 	return (bytes);
664 }
665 
666 unsigned int
667 ddi_enter_critical(void)
668 {
669 	return ((uint_t)spl7());
670 }
671 
672 void
673 ddi_exit_critical(unsigned int spl)
674 {
675 	splx((int)spl);
676 }
677 
678 /*
679  * Nexus ctlops punter
680  */
681 
682 #if !defined(__sparc)
683 /*
684  * Request bus_ctl parent to handle a bus_ctl request
685  *
686  * (The sparc version is in sparc_ddi.s)
687  */
688 int
689 ddi_ctlops(dev_info_t *d, dev_info_t *r, ddi_ctl_enum_t op, void *a, void *v)
690 {
691 	int (*fp)();
692 
693 	if (!d || !r)
694 		return (DDI_FAILURE);
695 
696 	if ((d = (dev_info_t *)DEVI(d)->devi_bus_ctl) == NULL)
697 		return (DDI_FAILURE);
698 
699 	fp = DEVI(d)->devi_ops->devo_bus_ops->bus_ctl;
700 	return ((*fp)(d, r, op, a, v));
701 }
702 
703 #endif
704 
705 /*
706  * DMA/DVMA setup
707  */
708 
709 #if defined(__sparc)
710 static ddi_dma_lim_t standard_limits = {
711 	(uint_t)0,	/* addr_t dlim_addr_lo */
712 	(uint_t)-1,	/* addr_t dlim_addr_hi */
713 	(uint_t)-1,	/* uint_t dlim_cntr_max */
714 	(uint_t)1,	/* uint_t dlim_burstsizes */
715 	(uint_t)1,	/* uint_t dlim_minxfer */
716 	0		/* uint_t dlim_dmaspeed */
717 };
718 #elif defined(__x86)
719 static ddi_dma_lim_t standard_limits = {
720 	(uint_t)0,		/* addr_t dlim_addr_lo */
721 	(uint_t)0xffffff,	/* addr_t dlim_addr_hi */
722 	(uint_t)0,		/* uint_t dlim_cntr_max */
723 	(uint_t)0x00000001,	/* uint_t dlim_burstsizes */
724 	(uint_t)DMA_UNIT_8,	/* uint_t dlim_minxfer */
725 	(uint_t)0,		/* uint_t dlim_dmaspeed */
726 	(uint_t)0x86<<24+0,	/* uint_t dlim_version */
727 	(uint_t)0xffff,		/* uint_t dlim_adreg_max */
728 	(uint_t)0xffff,		/* uint_t dlim_ctreg_max */
729 	(uint_t)512,		/* uint_t dlim_granular */
730 	(int)1,			/* int dlim_sgllen */
731 	(uint_t)0xffffffff	/* uint_t dlim_reqsizes */
732 };
733 
734 #endif
735 
736 int
737 ddi_dma_setup(dev_info_t *dip, struct ddi_dma_req *dmareqp,
738     ddi_dma_handle_t *handlep)
739 {
740 	int (*funcp)() = ddi_dma_map;
741 	struct bus_ops *bop;
742 #if defined(__sparc)
743 	auto ddi_dma_lim_t dma_lim;
744 
745 	if (dmareqp->dmar_limits == (ddi_dma_lim_t *)0) {
746 		dma_lim = standard_limits;
747 	} else {
748 		dma_lim = *dmareqp->dmar_limits;
749 	}
750 	dmareqp->dmar_limits = &dma_lim;
751 #endif
752 #if defined(__x86)
753 	if (dmareqp->dmar_limits == (ddi_dma_lim_t *)0)
754 		return (DDI_FAILURE);
755 #endif
756 
757 	/*
758 	 * Handle the case that the requester is both a leaf
759 	 * and a nexus driver simultaneously by calling the
760 	 * requester's bus_dma_map function directly instead
761 	 * of ddi_dma_map.
762 	 */
763 	bop = DEVI(dip)->devi_ops->devo_bus_ops;
764 	if (bop && bop->bus_dma_map)
765 		funcp = bop->bus_dma_map;
766 	return ((*funcp)(dip, dip, dmareqp, handlep));
767 }
768 
769 int
770 ddi_dma_addr_setup(dev_info_t *dip, struct as *as, caddr_t addr, size_t len,
771     uint_t flags, int (*waitfp)(), caddr_t arg,
772     ddi_dma_lim_t *limits, ddi_dma_handle_t *handlep)
773 {
774 	int (*funcp)() = ddi_dma_map;
775 	ddi_dma_lim_t dma_lim;
776 	struct ddi_dma_req dmareq;
777 	struct bus_ops *bop;
778 
779 	if (len == 0) {
780 		return (DDI_DMA_NOMAPPING);
781 	}
782 	if (limits == (ddi_dma_lim_t *)0) {
783 		dma_lim = standard_limits;
784 	} else {
785 		dma_lim = *limits;
786 	}
787 	dmareq.dmar_limits = &dma_lim;
788 	dmareq.dmar_flags = flags;
789 	dmareq.dmar_fp = waitfp;
790 	dmareq.dmar_arg = arg;
791 	dmareq.dmar_object.dmao_size = len;
792 	dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR;
793 	dmareq.dmar_object.dmao_obj.virt_obj.v_as = as;
794 	dmareq.dmar_object.dmao_obj.virt_obj.v_addr = addr;
795 	dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
796 
797 	/*
798 	 * Handle the case that the requester is both a leaf
799 	 * and a nexus driver simultaneously by calling the
800 	 * requester's bus_dma_map function directly instead
801 	 * of ddi_dma_map.
802 	 */
803 	bop = DEVI(dip)->devi_ops->devo_bus_ops;
804 	if (bop && bop->bus_dma_map)
805 		funcp = bop->bus_dma_map;
806 
807 	return ((*funcp)(dip, dip, &dmareq, handlep));
808 }
809 
810 int
811 ddi_dma_buf_setup(dev_info_t *dip, struct buf *bp, uint_t flags,
812     int (*waitfp)(), caddr_t arg, ddi_dma_lim_t *limits,
813     ddi_dma_handle_t *handlep)
814 {
815 	int (*funcp)() = ddi_dma_map;
816 	ddi_dma_lim_t dma_lim;
817 	struct ddi_dma_req dmareq;
818 	struct bus_ops *bop;
819 
820 	if (limits == (ddi_dma_lim_t *)0) {
821 		dma_lim = standard_limits;
822 	} else {
823 		dma_lim = *limits;
824 	}
825 	dmareq.dmar_limits = &dma_lim;
826 	dmareq.dmar_flags = flags;
827 	dmareq.dmar_fp = waitfp;
828 	dmareq.dmar_arg = arg;
829 	dmareq.dmar_object.dmao_size = (uint_t)bp->b_bcount;
830 
831 	if (bp->b_flags & B_PAGEIO) {
832 		dmareq.dmar_object.dmao_type = DMA_OTYP_PAGES;
833 		dmareq.dmar_object.dmao_obj.pp_obj.pp_pp = bp->b_pages;
834 		dmareq.dmar_object.dmao_obj.pp_obj.pp_offset =
835 		    (uint_t)(((uintptr_t)bp->b_un.b_addr) & MMU_PAGEOFFSET);
836 	} else {
837 		dmareq.dmar_object.dmao_type = DMA_OTYP_BUFVADDR;
838 		dmareq.dmar_object.dmao_obj.virt_obj.v_addr = bp->b_un.b_addr;
839 		if (bp->b_flags & B_SHADOW) {
840 			dmareq.dmar_object.dmao_obj.virt_obj.v_priv =
841 			    bp->b_shadow;
842 		} else {
843 			dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
844 		}
845 
846 		/*
847 		 * If the buffer has no proc pointer, or the proc
848 		 * struct has the kernel address space, or the buffer has
849 		 * been marked B_REMAPPED (meaning that it is now
850 		 * mapped into the kernel's address space), then
851 		 * the address space is kas (kernel address space).
852 		 */
853 		if ((bp->b_proc == NULL) || (bp->b_proc->p_as == &kas) ||
854 		    (bp->b_flags & B_REMAPPED)) {
855 			dmareq.dmar_object.dmao_obj.virt_obj.v_as = 0;
856 		} else {
857 			dmareq.dmar_object.dmao_obj.virt_obj.v_as =
858 			    bp->b_proc->p_as;
859 		}
860 	}
861 
862 	/*
863 	 * Handle the case that the requester is both a leaf
864 	 * and a nexus driver simultaneously by calling the
865 	 * requester's bus_dma_map function directly instead
866 	 * of ddi_dma_map.
867 	 */
868 	bop = DEVI(dip)->devi_ops->devo_bus_ops;
869 	if (bop && bop->bus_dma_map)
870 		funcp = bop->bus_dma_map;
871 
872 	return ((*funcp)(dip, dip, &dmareq, handlep));
873 }
874 
875 #if !defined(__sparc)
876 /*
877  * Request bus_dma_ctl parent to fiddle with a dma request.
878  *
879  * (The sparc version is in sparc_subr.s)
880  */
881 int
882 ddi_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
883     ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
884     off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
885 {
886 	int (*fp)();
887 
888 	if (dip != ddi_root_node())
889 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_ctl;
890 	fp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_ctl;
891 	return ((*fp) (dip, rdip, handle, request, offp, lenp, objp, flags));
892 }
893 #endif
894 
895 /*
896  * For all DMA control functions, call the DMA control
897  * routine and return status.
898  *
899  * Just plain assume that the parent is to be called.
900  * If a nexus driver or a thread outside the framework
901  * of a nexus driver or a leaf driver calls these functions,
902  * it is up to them to deal with the fact that the parent's
903  * bus_dma_ctl function will be the first one called.
904  */
905 
906 #define	HD	((ddi_dma_impl_t *)h)->dmai_rdip
907 
908 int
909 ddi_dma_kvaddrp(ddi_dma_handle_t h, off_t off, size_t len, caddr_t *kp)
910 {
911 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_KVADDR, &off, &len, kp, 0));
912 }
913 
914 int
915 ddi_dma_htoc(ddi_dma_handle_t h, off_t o, ddi_dma_cookie_t *c)
916 {
917 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_HTOC, &o, 0, (caddr_t *)c, 0));
918 }
919 
920 int
921 ddi_dma_coff(ddi_dma_handle_t h, ddi_dma_cookie_t *c, off_t *o)
922 {
923 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_COFF,
924 	    (off_t *)c, 0, (caddr_t *)o, 0));
925 }
926 
927 int
928 ddi_dma_movwin(ddi_dma_handle_t h, off_t *o, size_t *l, ddi_dma_cookie_t *c)
929 {
930 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_MOVWIN, o,
931 	    l, (caddr_t *)c, 0));
932 }
933 
934 int
935 ddi_dma_curwin(ddi_dma_handle_t h, off_t *o, size_t *l)
936 {
937 	if ((((ddi_dma_impl_t *)h)->dmai_rflags & DDI_DMA_PARTIAL) == 0)
938 		return (DDI_FAILURE);
939 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_REPWIN, o, l, 0, 0));
940 }
941 
942 int
943 ddi_dma_nextwin(ddi_dma_handle_t h, ddi_dma_win_t win,
944     ddi_dma_win_t *nwin)
945 {
946 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_NEXTWIN, (off_t *)&win, 0,
947 	    (caddr_t *)nwin, 0));
948 }
949 
950 int
951 ddi_dma_nextseg(ddi_dma_win_t win, ddi_dma_seg_t seg, ddi_dma_seg_t *nseg)
952 {
953 	ddi_dma_handle_t h = (ddi_dma_handle_t)win;
954 
955 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_NEXTSEG, (off_t *)&win,
956 	    (size_t *)&seg, (caddr_t *)nseg, 0));
957 }
958 
959 #if (defined(__i386) && !defined(__amd64)) || defined(__sparc)
960 /*
961  * This routine is Obsolete and should be removed from ALL architectures
962  * in a future release of Solaris.
963  *
964  * It is deliberately NOT ported to amd64; please fix the code that
965  * depends on this routine to use ddi_dma_nextcookie(9F).
966  *
967  * NOTE: even though we fixed the pointer through a 32-bit param issue (the fix
968  * is a side effect to some other cleanup), we're still not going to support
969  * this interface on x64.
970  */
971 int
972 ddi_dma_segtocookie(ddi_dma_seg_t seg, off_t *o, off_t *l,
973     ddi_dma_cookie_t *cookiep)
974 {
975 	ddi_dma_handle_t h = (ddi_dma_handle_t)seg;
976 
977 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_SEGTOC, o, (size_t *)l,
978 	    (caddr_t *)cookiep, 0));
979 }
980 #endif	/* (__i386 && !__amd64) || __sparc */
981 
982 #if !defined(__sparc)
983 
984 /*
985  * The SPARC versions of these routines are done in assembler to
986  * save register windows, so they're in sparc_subr.s.
987  */
988 
989 int
990 ddi_dma_map(dev_info_t *dip, dev_info_t *rdip,
991 	struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep)
992 {
993 	int (*funcp)(dev_info_t *, dev_info_t *, struct ddi_dma_req *,
994 	    ddi_dma_handle_t *);
995 
996 	if (dip != ddi_root_node())
997 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_map;
998 
999 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_map;
1000 	return ((*funcp)(dip, rdip, dmareqp, handlep));
1001 }
1002 
1003 int
1004 ddi_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
1005     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
1006 {
1007 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_attr_t *,
1008 	    int (*)(caddr_t), caddr_t, ddi_dma_handle_t *);
1009 
1010 	if (dip != ddi_root_node())
1011 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl;
1012 
1013 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_allochdl;
1014 	return ((*funcp)(dip, rdip, attr, waitfp, arg, handlep));
1015 }
1016 
1017 int
1018 ddi_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handlep)
1019 {
1020 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
1021 
1022 	if (dip != ddi_root_node())
1023 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl;
1024 
1025 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_freehdl;
1026 	return ((*funcp)(dip, rdip, handlep));
1027 }
1028 
1029 int
1030 ddi_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
1031     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
1032     ddi_dma_cookie_t *cp, uint_t *ccountp)
1033 {
1034 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
1035 	    struct ddi_dma_req *, ddi_dma_cookie_t *, uint_t *);
1036 
1037 	if (dip != ddi_root_node())
1038 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
1039 
1040 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_bindhdl;
1041 	return ((*funcp)(dip, rdip, handle, dmareq, cp, ccountp));
1042 }
1043 
1044 int
1045 ddi_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
1046     ddi_dma_handle_t handle)
1047 {
1048 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
1049 
1050 	if (dip != ddi_root_node())
1051 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl;
1052 
1053 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_unbindhdl;
1054 	return ((*funcp)(dip, rdip, handle));
1055 }
1056 
1057 
1058 int
1059 ddi_dma_flush(dev_info_t *dip, dev_info_t *rdip,
1060     ddi_dma_handle_t handle, off_t off, size_t len,
1061     uint_t cache_flags)
1062 {
1063 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
1064 	    off_t, size_t, uint_t);
1065 
1066 	if (dip != ddi_root_node())
1067 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush;
1068 
1069 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_flush;
1070 	return ((*funcp)(dip, rdip, handle, off, len, cache_flags));
1071 }
1072 
1073 int
1074 ddi_dma_win(dev_info_t *dip, dev_info_t *rdip,
1075     ddi_dma_handle_t handle, uint_t win, off_t *offp,
1076     size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
1077 {
1078 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
1079 	    uint_t, off_t *, size_t *, ddi_dma_cookie_t *, uint_t *);
1080 
1081 	if (dip != ddi_root_node())
1082 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_win;
1083 
1084 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_win;
1085 	return ((*funcp)(dip, rdip, handle, win, offp, lenp,
1086 	    cookiep, ccountp));
1087 }
1088 
1089 int
1090 ddi_dma_sync(ddi_dma_handle_t h, off_t o, size_t l, uint_t whom)
1091 {
1092 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h;
1093 	dev_info_t *dip, *rdip;
1094 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t, off_t,
1095 	    size_t, uint_t);
1096 
1097 	/*
1098 	 * the DMA nexus driver will set DMP_NOSYNC if the
1099 	 * platform does not require any sync operation. For
1100 	 * example if the memory is uncached or consistent
1101 	 * and without any I/O write buffers involved.
1102 	 */
1103 	if ((hp->dmai_rflags & DMP_NOSYNC) == DMP_NOSYNC)
1104 		return (DDI_SUCCESS);
1105 
1106 	dip = rdip = hp->dmai_rdip;
1107 	if (dip != ddi_root_node())
1108 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush;
1109 	funcp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_flush;
1110 	return ((*funcp)(dip, rdip, h, o, l, whom));
1111 }
1112 
1113 int
1114 ddi_dma_unbind_handle(ddi_dma_handle_t h)
1115 {
1116 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h;
1117 	dev_info_t *dip, *rdip;
1118 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
1119 
1120 	dip = rdip = hp->dmai_rdip;
1121 	if (dip != ddi_root_node())
1122 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl;
1123 	funcp = DEVI(rdip)->devi_bus_dma_unbindfunc;
1124 	return ((*funcp)(dip, rdip, h));
1125 }
1126 
1127 #endif	/* !__sparc */
1128 
1129 int
1130 ddi_dma_free(ddi_dma_handle_t h)
1131 {
1132 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_FREE, 0, 0, 0, 0));
1133 }
1134 
1135 int
1136 ddi_iopb_alloc(dev_info_t *dip, ddi_dma_lim_t *limp, uint_t len, caddr_t *iopbp)
1137 {
1138 	ddi_dma_lim_t defalt;
1139 	size_t size = len;
1140 
1141 	if (!limp) {
1142 		defalt = standard_limits;
1143 		limp = &defalt;
1144 	}
1145 	return (i_ddi_mem_alloc_lim(dip, limp, size, 0, 0, 0,
1146 	    iopbp, NULL, NULL));
1147 }
1148 
1149 void
1150 ddi_iopb_free(caddr_t iopb)
1151 {
1152 	i_ddi_mem_free(iopb, NULL);
1153 }
1154 
1155 int
1156 ddi_mem_alloc(dev_info_t *dip, ddi_dma_lim_t *limits, uint_t length,
1157 	uint_t flags, caddr_t *kaddrp, uint_t *real_length)
1158 {
1159 	ddi_dma_lim_t defalt;
1160 	size_t size = length;
1161 
1162 	if (!limits) {
1163 		defalt = standard_limits;
1164 		limits = &defalt;
1165 	}
1166 	return (i_ddi_mem_alloc_lim(dip, limits, size, flags & 0x1,
1167 	    1, 0, kaddrp, real_length, NULL));
1168 }
1169 
1170 void
1171 ddi_mem_free(caddr_t kaddr)
1172 {
1173 	i_ddi_mem_free(kaddr, NULL);
1174 }
1175 
1176 /*
1177  * DMA attributes, alignment, burst sizes, and transfer minimums
1178  */
1179 int
1180 ddi_dma_get_attr(ddi_dma_handle_t handle, ddi_dma_attr_t *attrp)
1181 {
1182 	ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle;
1183 
1184 	if (attrp == NULL)
1185 		return (DDI_FAILURE);
1186 	*attrp = dimp->dmai_attr;
1187 	return (DDI_SUCCESS);
1188 }
1189 
1190 int
1191 ddi_dma_burstsizes(ddi_dma_handle_t handle)
1192 {
1193 	ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle;
1194 
1195 	if (!dimp)
1196 		return (0);
1197 	else
1198 		return (dimp->dmai_burstsizes);
1199 }
1200 
1201 int
1202 ddi_dma_devalign(ddi_dma_handle_t handle, uint_t *alignment, uint_t *mineffect)
1203 {
1204 	ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle;
1205 
1206 	if (!dimp || !alignment || !mineffect)
1207 		return (DDI_FAILURE);
1208 	if (!(dimp->dmai_rflags & DDI_DMA_SBUS_64BIT)) {
1209 		*alignment = 1 << ddi_ffs(dimp->dmai_burstsizes);
1210 	} else {
1211 		if (dimp->dmai_burstsizes & 0xff0000) {
1212 			*alignment = 1 << ddi_ffs(dimp->dmai_burstsizes >> 16);
1213 		} else {
1214 			*alignment = 1 << ddi_ffs(dimp->dmai_burstsizes);
1215 		}
1216 	}
1217 	*mineffect = dimp->dmai_minxfer;
1218 	return (DDI_SUCCESS);
1219 }
1220 
1221 int
1222 ddi_iomin(dev_info_t *a, int i, int stream)
1223 {
1224 	int r;
1225 
1226 	/*
1227 	 * Make sure that the initial value is sane
1228 	 */
1229 	if (i & (i - 1))
1230 		return (0);
1231 	if (i == 0)
1232 		i = (stream) ? 4 : 1;
1233 
1234 	r = ddi_ctlops(a, a,
1235 	    DDI_CTLOPS_IOMIN, (void *)(uintptr_t)stream, (void *)&i);
1236 	if (r != DDI_SUCCESS || (i & (i - 1)))
1237 		return (0);
1238 	return (i);
1239 }
1240 
1241 /*
1242  * Given two DMA attribute structures, apply the attributes
1243  * of one to the other, following the rules of attributes
1244  * and the wishes of the caller.
1245  *
1246  * The rules of DMA attribute structures are that you cannot
1247  * make things *less* restrictive as you apply one set
1248  * of attributes to another.
1249  *
1250  */
1251 void
1252 ddi_dma_attr_merge(ddi_dma_attr_t *attr, ddi_dma_attr_t *mod)
1253 {
1254 	attr->dma_attr_addr_lo =
1255 	    MAX(attr->dma_attr_addr_lo, mod->dma_attr_addr_lo);
1256 	attr->dma_attr_addr_hi =
1257 	    MIN(attr->dma_attr_addr_hi, mod->dma_attr_addr_hi);
1258 	attr->dma_attr_count_max =
1259 	    MIN(attr->dma_attr_count_max, mod->dma_attr_count_max);
1260 	attr->dma_attr_align =
1261 	    MAX(attr->dma_attr_align,  mod->dma_attr_align);
1262 	attr->dma_attr_burstsizes =
1263 	    (uint_t)(attr->dma_attr_burstsizes & mod->dma_attr_burstsizes);
1264 	attr->dma_attr_minxfer =
1265 	    maxbit(attr->dma_attr_minxfer, mod->dma_attr_minxfer);
1266 	attr->dma_attr_maxxfer =
1267 	    MIN(attr->dma_attr_maxxfer, mod->dma_attr_maxxfer);
1268 	attr->dma_attr_seg = MIN(attr->dma_attr_seg, mod->dma_attr_seg);
1269 	attr->dma_attr_sgllen = MIN((uint_t)attr->dma_attr_sgllen,
1270 	    (uint_t)mod->dma_attr_sgllen);
1271 	attr->dma_attr_granular =
1272 	    MAX(attr->dma_attr_granular, mod->dma_attr_granular);
1273 }
1274 
1275 /*
1276  * mmap/segmap interface:
1277  */
1278 
1279 /*
1280  * ddi_segmap:		setup the default segment driver. Calls the drivers
1281  *			XXmmap routine to validate the range to be mapped.
1282  *			Return ENXIO of the range is not valid.  Create
1283  *			a seg_dev segment that contains all of the
1284  *			necessary information and will reference the
1285  *			default segment driver routines. It returns zero
1286  *			on success or non-zero on failure.
1287  */
1288 int
1289 ddi_segmap(dev_t dev, off_t offset, struct as *asp, caddr_t *addrp, off_t len,
1290     uint_t prot, uint_t maxprot, uint_t flags, cred_t *credp)
1291 {
1292 	extern int spec_segmap(dev_t, off_t, struct as *, caddr_t *,
1293 	    off_t, uint_t, uint_t, uint_t, struct cred *);
1294 
1295 	return (spec_segmap(dev, offset, asp, addrp, len,
1296 	    prot, maxprot, flags, credp));
1297 }
1298 
1299 /*
1300  * ddi_map_fault:	Resolve mappings at fault time.  Used by segment
1301  *			drivers. Allows each successive parent to resolve
1302  *			address translations and add its mappings to the
1303  *			mapping list supplied in the page structure. It
1304  *			returns zero on success	or non-zero on failure.
1305  */
1306 
1307 int
1308 ddi_map_fault(dev_info_t *dip, struct hat *hat, struct seg *seg,
1309     caddr_t addr, struct devpage *dp, pfn_t pfn, uint_t prot, uint_t lock)
1310 {
1311 	return (i_ddi_map_fault(dip, dip, hat, seg, addr, dp, pfn, prot, lock));
1312 }
1313 
1314 /*
1315  * ddi_device_mapping_check:	Called from ddi_segmap_setup.
1316  *	Invokes platform specific DDI to determine whether attributes specified
1317  *	in attr(9s) are	valid for the region of memory that will be made
1318  *	available for direct access to user process via the mmap(2) system call.
1319  */
1320 int
1321 ddi_device_mapping_check(dev_t dev, ddi_device_acc_attr_t *accattrp,
1322     uint_t rnumber, uint_t *hat_flags)
1323 {
1324 	ddi_acc_handle_t handle;
1325 	ddi_map_req_t mr;
1326 	ddi_acc_hdl_t *hp;
1327 	int result;
1328 	dev_info_t *dip;
1329 
1330 	/*
1331 	 * we use e_ddi_hold_devi_by_dev to search for the devi.  We
1332 	 * release it immediately since it should already be held by
1333 	 * a devfs vnode.
1334 	 */
1335 	if ((dip =
1336 	    e_ddi_hold_devi_by_dev(dev, E_DDI_HOLD_DEVI_NOATTACH)) == NULL)
1337 		return (-1);
1338 	ddi_release_devi(dip);		/* for e_ddi_hold_devi_by_dev() */
1339 
1340 	/*
1341 	 * Allocate and initialize the common elements of data
1342 	 * access handle.
1343 	 */
1344 	handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
1345 	if (handle == NULL)
1346 		return (-1);
1347 
1348 	hp = impl_acc_hdl_get(handle);
1349 	hp->ah_vers = VERS_ACCHDL;
1350 	hp->ah_dip = dip;
1351 	hp->ah_rnumber = rnumber;
1352 	hp->ah_offset = 0;
1353 	hp->ah_len = 0;
1354 	hp->ah_acc = *accattrp;
1355 
1356 	/*
1357 	 * Set up the mapping request and call to parent.
1358 	 */
1359 	mr.map_op = DDI_MO_MAP_HANDLE;
1360 	mr.map_type = DDI_MT_RNUMBER;
1361 	mr.map_obj.rnumber = rnumber;
1362 	mr.map_prot = PROT_READ | PROT_WRITE;
1363 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
1364 	mr.map_handlep = hp;
1365 	mr.map_vers = DDI_MAP_VERSION;
1366 	result = ddi_map(dip, &mr, 0, 0, NULL);
1367 
1368 	/*
1369 	 * Region must be mappable, pick up flags from the framework.
1370 	 */
1371 	*hat_flags = hp->ah_hat_flags;
1372 
1373 	impl_acc_hdl_free(handle);
1374 
1375 	/*
1376 	 * check for end result.
1377 	 */
1378 	if (result != DDI_SUCCESS)
1379 		return (-1);
1380 	return (0);
1381 }
1382 
1383 
1384 /*
1385  * Property functions:	 See also, ddipropdefs.h.
1386  *
1387  * These functions are the framework for the property functions,
1388  * i.e. they support software defined properties.  All implementation
1389  * specific property handling (i.e.: self-identifying devices and
1390  * PROM defined properties are handled in the implementation specific
1391  * functions (defined in ddi_implfuncs.h).
1392  */
1393 
1394 /*
1395  * nopropop:	Shouldn't be called, right?
1396  */
1397 int
1398 nopropop(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
1399     char *name, caddr_t valuep, int *lengthp)
1400 {
1401 	_NOTE(ARGUNUSED(dev, dip, prop_op, mod_flags, name, valuep, lengthp))
1402 	return (DDI_PROP_NOT_FOUND);
1403 }
1404 
1405 #ifdef	DDI_PROP_DEBUG
1406 int ddi_prop_debug_flag = 0;
1407 
1408 int
1409 ddi_prop_debug(int enable)
1410 {
1411 	int prev = ddi_prop_debug_flag;
1412 
1413 	if ((enable != 0) || (prev != 0))
1414 		printf("ddi_prop_debug: debugging %s\n",
1415 		    enable ? "enabled" : "disabled");
1416 	ddi_prop_debug_flag = enable;
1417 	return (prev);
1418 }
1419 
1420 #endif	/* DDI_PROP_DEBUG */
1421 
1422 /*
1423  * Search a property list for a match, if found return pointer
1424  * to matching prop struct, else return NULL.
1425  */
1426 
1427 ddi_prop_t *
1428 i_ddi_prop_search(dev_t dev, char *name, uint_t flags, ddi_prop_t **list_head)
1429 {
1430 	ddi_prop_t	*propp;
1431 
1432 	/*
1433 	 * find the property in child's devinfo:
1434 	 * Search order defined by this search function is first matching
1435 	 * property with input dev == DDI_DEV_T_ANY matching any dev or
1436 	 * dev == propp->prop_dev, name == propp->name, and the correct
1437 	 * data type as specified in the flags.  If a DDI_DEV_T_NONE dev
1438 	 * value made it this far then it implies a DDI_DEV_T_ANY search.
1439 	 */
1440 	if (dev == DDI_DEV_T_NONE)
1441 		dev = DDI_DEV_T_ANY;
1442 
1443 	for (propp = *list_head; propp != NULL; propp = propp->prop_next)  {
1444 
1445 		if (!DDI_STRSAME(propp->prop_name, name))
1446 			continue;
1447 
1448 		if ((dev != DDI_DEV_T_ANY) && (propp->prop_dev != dev))
1449 			continue;
1450 
1451 		if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0)
1452 			continue;
1453 
1454 		return (propp);
1455 	}
1456 
1457 	return ((ddi_prop_t *)0);
1458 }
1459 
1460 /*
1461  * Search for property within devnames structures
1462  */
1463 ddi_prop_t *
1464 i_ddi_search_global_prop(dev_t dev, char *name, uint_t flags)
1465 {
1466 	major_t		major;
1467 	struct devnames	*dnp;
1468 	ddi_prop_t	*propp;
1469 
1470 	/*
1471 	 * Valid dev_t value is needed to index into the
1472 	 * correct devnames entry, therefore a dev_t
1473 	 * value of DDI_DEV_T_ANY is not appropriate.
1474 	 */
1475 	ASSERT(dev != DDI_DEV_T_ANY);
1476 	if (dev == DDI_DEV_T_ANY) {
1477 		return ((ddi_prop_t *)0);
1478 	}
1479 
1480 	major = getmajor(dev);
1481 	dnp = &(devnamesp[major]);
1482 
1483 	if (dnp->dn_global_prop_ptr == NULL)
1484 		return ((ddi_prop_t *)0);
1485 
1486 	LOCK_DEV_OPS(&dnp->dn_lock);
1487 
1488 	for (propp = dnp->dn_global_prop_ptr->prop_list;
1489 	    propp != NULL;
1490 	    propp = (ddi_prop_t *)propp->prop_next) {
1491 
1492 		if (!DDI_STRSAME(propp->prop_name, name))
1493 			continue;
1494 
1495 		if ((!(flags & DDI_PROP_ROOTNEX_GLOBAL)) &&
1496 		    (!(flags & LDI_DEV_T_ANY)) && (propp->prop_dev != dev))
1497 			continue;
1498 
1499 		if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0)
1500 			continue;
1501 
1502 		/* Property found, return it */
1503 		UNLOCK_DEV_OPS(&dnp->dn_lock);
1504 		return (propp);
1505 	}
1506 
1507 	UNLOCK_DEV_OPS(&dnp->dn_lock);
1508 	return ((ddi_prop_t *)0);
1509 }
1510 
1511 static char prop_no_mem_msg[] = "can't allocate memory for ddi property <%s>";
1512 
1513 /*
1514  * ddi_prop_search_global:
1515  *	Search the global property list within devnames
1516  *	for the named property.  Return the encoded value.
1517  */
1518 static int
1519 i_ddi_prop_search_global(dev_t dev, uint_t flags, char *name,
1520     void *valuep, uint_t *lengthp)
1521 {
1522 	ddi_prop_t	*propp;
1523 	caddr_t		buffer;
1524 
1525 	propp =  i_ddi_search_global_prop(dev, name, flags);
1526 
1527 	/* Property NOT found, bail */
1528 	if (propp == (ddi_prop_t *)0)
1529 		return (DDI_PROP_NOT_FOUND);
1530 
1531 	if (propp->prop_flags & DDI_PROP_UNDEF_IT)
1532 		return (DDI_PROP_UNDEFINED);
1533 
1534 	if ((buffer = kmem_alloc(propp->prop_len,
1535 	    (flags & DDI_PROP_CANSLEEP) ? KM_SLEEP : KM_NOSLEEP)) == NULL) {
1536 		cmn_err(CE_CONT, prop_no_mem_msg, name);
1537 		return (DDI_PROP_NO_MEMORY);
1538 	}
1539 
1540 	/*
1541 	 * Return the encoded data
1542 	 */
1543 	*(caddr_t *)valuep = buffer;
1544 	*lengthp = propp->prop_len;
1545 	bcopy(propp->prop_val, buffer, propp->prop_len);
1546 
1547 	return (DDI_PROP_SUCCESS);
1548 }
1549 
1550 /*
1551  * ddi_prop_search_common:	Lookup and return the encoded value
1552  */
1553 int
1554 ddi_prop_search_common(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1555     uint_t flags, char *name, void *valuep, uint_t *lengthp)
1556 {
1557 	ddi_prop_t	*propp;
1558 	int		i;
1559 	caddr_t		buffer;
1560 	caddr_t		prealloc = NULL;
1561 	int		plength = 0;
1562 	dev_info_t	*pdip;
1563 	int		(*bop)();
1564 
1565 	/*CONSTANTCONDITION*/
1566 	while (1)  {
1567 
1568 		mutex_enter(&(DEVI(dip)->devi_lock));
1569 
1570 
1571 		/*
1572 		 * find the property in child's devinfo:
1573 		 * Search order is:
1574 		 *	1. driver defined properties
1575 		 *	2. system defined properties
1576 		 *	3. driver global properties
1577 		 *	4. boot defined properties
1578 		 */
1579 
1580 		propp = i_ddi_prop_search(dev, name, flags,
1581 		    &(DEVI(dip)->devi_drv_prop_ptr));
1582 		if (propp == NULL)  {
1583 			propp = i_ddi_prop_search(dev, name, flags,
1584 			    &(DEVI(dip)->devi_sys_prop_ptr));
1585 		}
1586 		if ((propp == NULL) && DEVI(dip)->devi_global_prop_list) {
1587 			propp = i_ddi_prop_search(dev, name, flags,
1588 			    &DEVI(dip)->devi_global_prop_list->prop_list);
1589 		}
1590 
1591 		if (propp == NULL)  {
1592 			propp = i_ddi_prop_search(dev, name, flags,
1593 			    &(DEVI(dip)->devi_hw_prop_ptr));
1594 		}
1595 
1596 		/*
1597 		 * Software property found?
1598 		 */
1599 		if (propp != (ddi_prop_t *)0)	{
1600 
1601 			/*
1602 			 * If explicit undefine, return now.
1603 			 */
1604 			if (propp->prop_flags & DDI_PROP_UNDEF_IT) {
1605 				mutex_exit(&(DEVI(dip)->devi_lock));
1606 				if (prealloc)
1607 					kmem_free(prealloc, plength);
1608 				return (DDI_PROP_UNDEFINED);
1609 			}
1610 
1611 			/*
1612 			 * If we only want to know if it exists, return now
1613 			 */
1614 			if (prop_op == PROP_EXISTS) {
1615 				mutex_exit(&(DEVI(dip)->devi_lock));
1616 				ASSERT(prealloc == NULL);
1617 				return (DDI_PROP_SUCCESS);
1618 			}
1619 
1620 			/*
1621 			 * If length only request or prop length == 0,
1622 			 * service request and return now.
1623 			 */
1624 			if ((prop_op == PROP_LEN) ||(propp->prop_len == 0)) {
1625 				*lengthp = propp->prop_len;
1626 
1627 				/*
1628 				 * if prop_op is PROP_LEN_AND_VAL_ALLOC
1629 				 * that means prop_len is 0, so set valuep
1630 				 * also to NULL
1631 				 */
1632 				if (prop_op == PROP_LEN_AND_VAL_ALLOC)
1633 					*(caddr_t *)valuep = NULL;
1634 
1635 				mutex_exit(&(DEVI(dip)->devi_lock));
1636 				if (prealloc)
1637 					kmem_free(prealloc, plength);
1638 				return (DDI_PROP_SUCCESS);
1639 			}
1640 
1641 			/*
1642 			 * If LEN_AND_VAL_ALLOC and the request can sleep,
1643 			 * drop the mutex, allocate the buffer, and go
1644 			 * through the loop again.  If we already allocated
1645 			 * the buffer, and the size of the property changed,
1646 			 * keep trying...
1647 			 */
1648 			if ((prop_op == PROP_LEN_AND_VAL_ALLOC) &&
1649 			    (flags & DDI_PROP_CANSLEEP))  {
1650 				if (prealloc && (propp->prop_len != plength)) {
1651 					kmem_free(prealloc, plength);
1652 					prealloc = NULL;
1653 				}
1654 				if (prealloc == NULL)  {
1655 					plength = propp->prop_len;
1656 					mutex_exit(&(DEVI(dip)->devi_lock));
1657 					prealloc = kmem_alloc(plength,
1658 					    KM_SLEEP);
1659 					continue;
1660 				}
1661 			}
1662 
1663 			/*
1664 			 * Allocate buffer, if required.  Either way,
1665 			 * set `buffer' variable.
1666 			 */
1667 			i = *lengthp;			/* Get callers length */
1668 			*lengthp = propp->prop_len;	/* Set callers length */
1669 
1670 			switch (prop_op) {
1671 
1672 			case PROP_LEN_AND_VAL_ALLOC:
1673 
1674 				if (prealloc == NULL) {
1675 					buffer = kmem_alloc(propp->prop_len,
1676 					    KM_NOSLEEP);
1677 				} else {
1678 					buffer = prealloc;
1679 				}
1680 
1681 				if (buffer == NULL)  {
1682 					mutex_exit(&(DEVI(dip)->devi_lock));
1683 					cmn_err(CE_CONT, prop_no_mem_msg, name);
1684 					return (DDI_PROP_NO_MEMORY);
1685 				}
1686 				/* Set callers buf ptr */
1687 				*(caddr_t *)valuep = buffer;
1688 				break;
1689 
1690 			case PROP_LEN_AND_VAL_BUF:
1691 
1692 				if (propp->prop_len > (i)) {
1693 					mutex_exit(&(DEVI(dip)->devi_lock));
1694 					return (DDI_PROP_BUF_TOO_SMALL);
1695 				}
1696 
1697 				buffer = valuep;  /* Get callers buf ptr */
1698 				break;
1699 
1700 			default:
1701 				break;
1702 			}
1703 
1704 			/*
1705 			 * Do the copy.
1706 			 */
1707 			bcopy(propp->prop_val, buffer, propp->prop_len);
1708 			mutex_exit(&(DEVI(dip)->devi_lock));
1709 			return (DDI_PROP_SUCCESS);
1710 		}
1711 
1712 		mutex_exit(&(DEVI(dip)->devi_lock));
1713 		if (prealloc)
1714 			kmem_free(prealloc, plength);
1715 		prealloc = NULL;
1716 
1717 		/*
1718 		 * Prop not found, call parent bus_ops to deal with possible
1719 		 * h/w layer (possible PROM defined props, etc.) and to
1720 		 * possibly ascend the hierarchy, if allowed by flags.
1721 		 */
1722 		pdip = (dev_info_t *)DEVI(dip)->devi_parent;
1723 
1724 		/*
1725 		 * One last call for the root driver PROM props?
1726 		 */
1727 		if (dip == ddi_root_node())  {
1728 			return (ddi_bus_prop_op(dev, dip, dip, prop_op,
1729 			    flags, name, valuep, (int *)lengthp));
1730 		}
1731 
1732 		/*
1733 		 * We may have been called to check for properties
1734 		 * within a single devinfo node that has no parent -
1735 		 * see make_prop()
1736 		 */
1737 		if (pdip == NULL) {
1738 			ASSERT((flags &
1739 			    (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM)) ==
1740 			    (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM));
1741 			return (DDI_PROP_NOT_FOUND);
1742 		}
1743 
1744 		/*
1745 		 * Instead of recursing, we do iterative calls up the tree.
1746 		 * As a bit of optimization, skip the bus_op level if the
1747 		 * node is a s/w node and if the parent's bus_prop_op function
1748 		 * is `ddi_bus_prop_op', because we know that in this case,
1749 		 * this function does nothing.
1750 		 *
1751 		 * 4225415: If the parent isn't attached, or the child
1752 		 * hasn't been named by the parent yet, use the default
1753 		 * ddi_bus_prop_op as a proxy for the parent.  This
1754 		 * allows property lookups in any child/parent state to
1755 		 * include 'prom' and inherited properties, even when
1756 		 * there are no drivers attached to the child or parent.
1757 		 */
1758 
1759 		bop = ddi_bus_prop_op;
1760 		if (i_ddi_devi_attached(pdip) &&
1761 		    (i_ddi_node_state(dip) >= DS_INITIALIZED))
1762 			bop = DEVI(pdip)->devi_ops->devo_bus_ops->bus_prop_op;
1763 
1764 		i = DDI_PROP_NOT_FOUND;
1765 
1766 		if ((bop != ddi_bus_prop_op) || ndi_dev_is_prom_node(dip)) {
1767 			i = (*bop)(dev, pdip, dip, prop_op,
1768 			    flags | DDI_PROP_DONTPASS,
1769 			    name, valuep, lengthp);
1770 		}
1771 
1772 		if ((flags & DDI_PROP_DONTPASS) ||
1773 		    (i != DDI_PROP_NOT_FOUND))
1774 			return (i);
1775 
1776 		dip = pdip;
1777 	}
1778 	/*NOTREACHED*/
1779 }
1780 
1781 
1782 /*
1783  * ddi_prop_op: The basic property operator for drivers.
1784  *
1785  * In ddi_prop_op, the type of valuep is interpreted based on prop_op:
1786  *
1787  *	prop_op			valuep
1788  *	------			------
1789  *
1790  *	PROP_LEN		<unused>
1791  *
1792  *	PROP_LEN_AND_VAL_BUF	Pointer to callers buffer
1793  *
1794  *	PROP_LEN_AND_VAL_ALLOC	Address of callers pointer (will be set to
1795  *				address of allocated buffer, if successful)
1796  */
1797 int
1798 ddi_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
1799     char *name, caddr_t valuep, int *lengthp)
1800 {
1801 	int	i;
1802 
1803 	ASSERT((mod_flags & DDI_PROP_TYPE_MASK) == 0);
1804 
1805 	/*
1806 	 * If this was originally an LDI prop lookup then we bail here.
1807 	 * The reason is that the LDI property lookup interfaces first call
1808 	 * a drivers prop_op() entry point to allow it to override
1809 	 * properties.  But if we've made it here, then the driver hasn't
1810 	 * overriden any properties.  We don't want to continue with the
1811 	 * property search here because we don't have any type inforamtion.
1812 	 * When we return failure, the LDI interfaces will then proceed to
1813 	 * call the typed property interfaces to look up the property.
1814 	 */
1815 	if (mod_flags & DDI_PROP_DYNAMIC)
1816 		return (DDI_PROP_NOT_FOUND);
1817 
1818 	/*
1819 	 * check for pre-typed property consumer asking for typed property:
1820 	 * see e_ddi_getprop_int64.
1821 	 */
1822 	if (mod_flags & DDI_PROP_CONSUMER_TYPED)
1823 		mod_flags |= DDI_PROP_TYPE_INT64;
1824 	mod_flags |= DDI_PROP_TYPE_ANY;
1825 
1826 	i = ddi_prop_search_common(dev, dip, prop_op,
1827 	    mod_flags, name, valuep, (uint_t *)lengthp);
1828 	if (i == DDI_PROP_FOUND_1275)
1829 		return (DDI_PROP_SUCCESS);
1830 	return (i);
1831 }
1832 
1833 /*
1834  * ddi_prop_op_nblocks_blksize: The basic property operator for drivers that
1835  * maintain size in number of blksize blocks.  Provides a dynamic property
1836  * implementation for size oriented properties based on nblocks64 and blksize
1837  * values passed in by the driver.  Fallback to ddi_prop_op if the nblocks64
1838  * is too large.  This interface should not be used with a nblocks64 that
1839  * represents the driver's idea of how to represent unknown, if nblocks is
1840  * unknown use ddi_prop_op.
1841  */
1842 int
1843 ddi_prop_op_nblocks_blksize(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1844     int mod_flags, char *name, caddr_t valuep, int *lengthp,
1845     uint64_t nblocks64, uint_t blksize)
1846 {
1847 	uint64_t size64;
1848 	int	blkshift;
1849 
1850 	/* convert block size to shift value */
1851 	ASSERT(BIT_ONLYONESET(blksize));
1852 	blkshift = highbit(blksize) - 1;
1853 
1854 	/*
1855 	 * There is no point in supporting nblocks64 values that don't have
1856 	 * an accurate uint64_t byte count representation.
1857 	 */
1858 	if (nblocks64 >= (UINT64_MAX >> blkshift))
1859 		return (ddi_prop_op(dev, dip, prop_op, mod_flags,
1860 		    name, valuep, lengthp));
1861 
1862 	size64 = nblocks64 << blkshift;
1863 	return (ddi_prop_op_size_blksize(dev, dip, prop_op, mod_flags,
1864 	    name, valuep, lengthp, size64, blksize));
1865 }
1866 
1867 /*
1868  * ddi_prop_op_nblocks: ddi_prop_op_nblocks_blksize with DEV_BSIZE blksize.
1869  */
1870 int
1871 ddi_prop_op_nblocks(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1872     int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t nblocks64)
1873 {
1874 	return (ddi_prop_op_nblocks_blksize(dev, dip, prop_op,
1875 	    mod_flags, name, valuep, lengthp, nblocks64, DEV_BSIZE));
1876 }
1877 
1878 /*
1879  * ddi_prop_op_size_blksize: The basic property operator for block drivers that
1880  * maintain size in bytes. Provides a of dynamic property implementation for
1881  * size oriented properties based on size64 value and blksize passed in by the
1882  * driver.  Fallback to ddi_prop_op if the size64 is too large. This interface
1883  * should not be used with a size64 that represents the driver's idea of how
1884  * to represent unknown, if size is unknown use ddi_prop_op.
1885  *
1886  * NOTE: the legacy "nblocks"/"size" properties are treated as 32-bit unsigned
1887  * integers. While the most likely interface to request them ([bc]devi_size)
1888  * is declared int (signed) there is no enforcement of this, which means we
1889  * can't enforce limitations here without risking regression.
1890  */
1891 int
1892 ddi_prop_op_size_blksize(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1893     int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t size64,
1894     uint_t blksize)
1895 {
1896 	uint64_t nblocks64;
1897 	int	callers_length;
1898 	caddr_t	buffer;
1899 	int	blkshift;
1900 
1901 	/*
1902 	 * This is a kludge to support capture of size(9P) pure dynamic
1903 	 * properties in snapshots for non-cmlb code (without exposing
1904 	 * i_ddi_prop_dyn changes). When everyone uses cmlb, this code
1905 	 * should be removed.
1906 	 */
1907 	if (i_ddi_prop_dyn_driver_get(dip) == NULL) {
1908 		static i_ddi_prop_dyn_t prop_dyn_size[] = {
1909 		    {"Size",		DDI_PROP_TYPE_INT64,	S_IFCHR},
1910 		    {"Nblocks",		DDI_PROP_TYPE_INT64,	S_IFBLK},
1911 		    {NULL}
1912 		};
1913 		i_ddi_prop_dyn_driver_set(dip, prop_dyn_size);
1914 	}
1915 
1916 	/* convert block size to shift value */
1917 	ASSERT(BIT_ONLYONESET(blksize));
1918 	blkshift = highbit(blksize) - 1;
1919 
1920 	/* compute DEV_BSIZE nblocks value */
1921 	nblocks64 = size64 >> blkshift;
1922 
1923 	/* get callers length, establish length of our dynamic properties */
1924 	callers_length = *lengthp;
1925 
1926 	if (strcmp(name, "Nblocks") == 0)
1927 		*lengthp = sizeof (uint64_t);
1928 	else if (strcmp(name, "Size") == 0)
1929 		*lengthp = sizeof (uint64_t);
1930 	else if ((strcmp(name, "nblocks") == 0) && (nblocks64 < UINT_MAX))
1931 		*lengthp = sizeof (uint32_t);
1932 	else if ((strcmp(name, "size") == 0) && (size64 < UINT_MAX))
1933 		*lengthp = sizeof (uint32_t);
1934 	else if ((strcmp(name, "blksize") == 0) && (blksize < UINT_MAX))
1935 		*lengthp = sizeof (uint32_t);
1936 	else {
1937 		/* fallback to ddi_prop_op */
1938 		return (ddi_prop_op(dev, dip, prop_op, mod_flags,
1939 		    name, valuep, lengthp));
1940 	}
1941 
1942 	/* service request for the length of the property */
1943 	if (prop_op == PROP_LEN)
1944 		return (DDI_PROP_SUCCESS);
1945 
1946 	switch (prop_op) {
1947 	case PROP_LEN_AND_VAL_ALLOC:
1948 		if ((buffer = kmem_alloc(*lengthp,
1949 		    (mod_flags & DDI_PROP_CANSLEEP) ?
1950 		    KM_SLEEP : KM_NOSLEEP)) == NULL)
1951 			return (DDI_PROP_NO_MEMORY);
1952 
1953 		*(caddr_t *)valuep = buffer;	/* set callers buf ptr */
1954 		break;
1955 
1956 	case PROP_LEN_AND_VAL_BUF:
1957 		/* the length of the property and the request must match */
1958 		if (callers_length != *lengthp)
1959 			return (DDI_PROP_INVAL_ARG);
1960 
1961 		buffer = valuep;		/* get callers buf ptr */
1962 		break;
1963 
1964 	default:
1965 		return (DDI_PROP_INVAL_ARG);
1966 	}
1967 
1968 	/* transfer the value into the buffer */
1969 	if (strcmp(name, "Nblocks") == 0)
1970 		*((uint64_t *)buffer) = nblocks64;
1971 	else if (strcmp(name, "Size") == 0)
1972 		*((uint64_t *)buffer) = size64;
1973 	else if (strcmp(name, "nblocks") == 0)
1974 		*((uint32_t *)buffer) = (uint32_t)nblocks64;
1975 	else if (strcmp(name, "size") == 0)
1976 		*((uint32_t *)buffer) = (uint32_t)size64;
1977 	else if (strcmp(name, "blksize") == 0)
1978 		*((uint32_t *)buffer) = (uint32_t)blksize;
1979 	return (DDI_PROP_SUCCESS);
1980 }
1981 
1982 /*
1983  * ddi_prop_op_size: ddi_prop_op_size_blksize with DEV_BSIZE block size.
1984  */
1985 int
1986 ddi_prop_op_size(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1987     int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t size64)
1988 {
1989 	return (ddi_prop_op_size_blksize(dev, dip, prop_op,
1990 	    mod_flags, name, valuep, lengthp, size64, DEV_BSIZE));
1991 }
1992 
1993 /*
1994  * Variable length props...
1995  */
1996 
1997 /*
1998  * ddi_getlongprop:	Get variable length property len+val into a buffer
1999  *		allocated by property provider via kmem_alloc. Requester
2000  *		is responsible for freeing returned property via kmem_free.
2001  *
2002  *	Arguments:
2003  *
2004  *	dev_t:	Input:	dev_t of property.
2005  *	dip:	Input:	dev_info_t pointer of child.
2006  *	flags:	Input:	Possible flag modifiers are:
2007  *		DDI_PROP_DONTPASS:	Don't pass to parent if prop not found.
2008  *		DDI_PROP_CANSLEEP:	Memory allocation may sleep.
2009  *	name:	Input:	name of property.
2010  *	valuep:	Output:	Addr of callers buffer pointer.
2011  *	lengthp:Output:	*lengthp will contain prop length on exit.
2012  *
2013  *	Possible Returns:
2014  *
2015  *		DDI_PROP_SUCCESS:	Prop found and returned.
2016  *		DDI_PROP_NOT_FOUND:	Prop not found
2017  *		DDI_PROP_UNDEFINED:	Prop explicitly undefined.
2018  *		DDI_PROP_NO_MEMORY:	Prop found, but unable to alloc mem.
2019  */
2020 
2021 int
2022 ddi_getlongprop(dev_t dev, dev_info_t *dip, int flags,
2023     char *name, caddr_t valuep, int *lengthp)
2024 {
2025 	return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_ALLOC,
2026 	    flags, name, valuep, lengthp));
2027 }
2028 
2029 /*
2030  *
2031  * ddi_getlongprop_buf:		Get long prop into pre-allocated callers
2032  *				buffer. (no memory allocation by provider).
2033  *
2034  *	dev_t:	Input:	dev_t of property.
2035  *	dip:	Input:	dev_info_t pointer of child.
2036  *	flags:	Input:	DDI_PROP_DONTPASS or NULL
2037  *	name:	Input:	name of property
2038  *	valuep:	Input:	ptr to callers buffer.
2039  *	lengthp:I/O:	ptr to length of callers buffer on entry,
2040  *			actual length of property on exit.
2041  *
2042  *	Possible returns:
2043  *
2044  *		DDI_PROP_SUCCESS	Prop found and returned
2045  *		DDI_PROP_NOT_FOUND	Prop not found
2046  *		DDI_PROP_UNDEFINED	Prop explicitly undefined.
2047  *		DDI_PROP_BUF_TOO_SMALL	Prop found, callers buf too small,
2048  *					no value returned, but actual prop
2049  *					length returned in *lengthp
2050  *
2051  */
2052 
2053 int
2054 ddi_getlongprop_buf(dev_t dev, dev_info_t *dip, int flags,
2055     char *name, caddr_t valuep, int *lengthp)
2056 {
2057 	return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
2058 	    flags, name, valuep, lengthp));
2059 }
2060 
2061 /*
2062  * Integer/boolean sized props.
2063  *
2064  * Call is value only... returns found boolean or int sized prop value or
2065  * defvalue if prop not found or is wrong length or is explicitly undefined.
2066  * Only flag is DDI_PROP_DONTPASS...
2067  *
2068  * By convention, this interface returns boolean (0) sized properties
2069  * as value (int)1.
2070  *
2071  * This never returns an error, if property not found or specifically
2072  * undefined, the input `defvalue' is returned.
2073  */
2074 
2075 int
2076 ddi_getprop(dev_t dev, dev_info_t *dip, int flags, char *name, int defvalue)
2077 {
2078 	int	propvalue = defvalue;
2079 	int	proplength = sizeof (int);
2080 	int	error;
2081 
2082 	error = ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
2083 	    flags, name, (caddr_t)&propvalue, &proplength);
2084 
2085 	if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
2086 		propvalue = 1;
2087 
2088 	return (propvalue);
2089 }
2090 
2091 /*
2092  * Get prop length interface: flags are 0 or DDI_PROP_DONTPASS
2093  * if returns DDI_PROP_SUCCESS, length returned in *lengthp.
2094  */
2095 
2096 int
2097 ddi_getproplen(dev_t dev, dev_info_t *dip, int flags, char *name, int *lengthp)
2098 {
2099 	return (ddi_prop_op(dev, dip, PROP_LEN, flags, name, NULL, lengthp));
2100 }
2101 
2102 /*
2103  * Allocate a struct prop_driver_data, along with 'size' bytes
2104  * for decoded property data.  This structure is freed by
2105  * calling ddi_prop_free(9F).
2106  */
2107 static void *
2108 ddi_prop_decode_alloc(size_t size, void (*prop_free)(struct prop_driver_data *))
2109 {
2110 	struct prop_driver_data *pdd;
2111 
2112 	/*
2113 	 * Allocate a structure with enough memory to store the decoded data.
2114 	 */
2115 	pdd = kmem_zalloc(sizeof (struct prop_driver_data) + size, KM_SLEEP);
2116 	pdd->pdd_size = (sizeof (struct prop_driver_data) + size);
2117 	pdd->pdd_prop_free = prop_free;
2118 
2119 	/*
2120 	 * Return a pointer to the location to put the decoded data.
2121 	 */
2122 	return ((void *)((caddr_t)pdd + sizeof (struct prop_driver_data)));
2123 }
2124 
2125 /*
2126  * Allocated the memory needed to store the encoded data in the property
2127  * handle.
2128  */
2129 static int
2130 ddi_prop_encode_alloc(prop_handle_t *ph, size_t size)
2131 {
2132 	/*
2133 	 * If size is zero, then set data to NULL and size to 0.  This
2134 	 * is a boolean property.
2135 	 */
2136 	if (size == 0) {
2137 		ph->ph_size = 0;
2138 		ph->ph_data = NULL;
2139 		ph->ph_cur_pos = NULL;
2140 		ph->ph_save_pos = NULL;
2141 	} else {
2142 		if (ph->ph_flags == DDI_PROP_DONTSLEEP) {
2143 			ph->ph_data = kmem_zalloc(size, KM_NOSLEEP);
2144 			if (ph->ph_data == NULL)
2145 				return (DDI_PROP_NO_MEMORY);
2146 		} else
2147 			ph->ph_data = kmem_zalloc(size, KM_SLEEP);
2148 		ph->ph_size = size;
2149 		ph->ph_cur_pos = ph->ph_data;
2150 		ph->ph_save_pos = ph->ph_data;
2151 	}
2152 	return (DDI_PROP_SUCCESS);
2153 }
2154 
2155 /*
2156  * Free the space allocated by the lookup routines.  Each lookup routine
2157  * returns a pointer to the decoded data to the driver.  The driver then
2158  * passes this pointer back to us.  This data actually lives in a struct
2159  * prop_driver_data.  We use negative indexing to find the beginning of
2160  * the structure and then free the entire structure using the size and
2161  * the free routine stored in the structure.
2162  */
2163 void
2164 ddi_prop_free(void *datap)
2165 {
2166 	struct prop_driver_data *pdd;
2167 
2168 	/*
2169 	 * Get the structure
2170 	 */
2171 	pdd = (struct prop_driver_data *)
2172 	    ((caddr_t)datap - sizeof (struct prop_driver_data));
2173 	/*
2174 	 * Call the free routine to free it
2175 	 */
2176 	(*pdd->pdd_prop_free)(pdd);
2177 }
2178 
2179 /*
2180  * Free the data associated with an array of ints,
2181  * allocated with ddi_prop_decode_alloc().
2182  */
2183 static void
2184 ddi_prop_free_ints(struct prop_driver_data *pdd)
2185 {
2186 	kmem_free(pdd, pdd->pdd_size);
2187 }
2188 
2189 /*
2190  * Free a single string property or a single string contained within
2191  * the argv style return value of an array of strings.
2192  */
2193 static void
2194 ddi_prop_free_string(struct prop_driver_data *pdd)
2195 {
2196 	kmem_free(pdd, pdd->pdd_size);
2197 
2198 }
2199 
2200 /*
2201  * Free an array of strings.
2202  */
2203 static void
2204 ddi_prop_free_strings(struct prop_driver_data *pdd)
2205 {
2206 	kmem_free(pdd, pdd->pdd_size);
2207 }
2208 
2209 /*
2210  * Free the data associated with an array of bytes.
2211  */
2212 static void
2213 ddi_prop_free_bytes(struct prop_driver_data *pdd)
2214 {
2215 	kmem_free(pdd, pdd->pdd_size);
2216 }
2217 
2218 /*
2219  * Reset the current location pointer in the property handle to the
2220  * beginning of the data.
2221  */
2222 void
2223 ddi_prop_reset_pos(prop_handle_t *ph)
2224 {
2225 	ph->ph_cur_pos = ph->ph_data;
2226 	ph->ph_save_pos = ph->ph_data;
2227 }
2228 
2229 /*
2230  * Restore the current location pointer in the property handle to the
2231  * saved position.
2232  */
2233 void
2234 ddi_prop_save_pos(prop_handle_t *ph)
2235 {
2236 	ph->ph_save_pos = ph->ph_cur_pos;
2237 }
2238 
2239 /*
2240  * Save the location that the current location pointer is pointing to..
2241  */
2242 void
2243 ddi_prop_restore_pos(prop_handle_t *ph)
2244 {
2245 	ph->ph_cur_pos = ph->ph_save_pos;
2246 }
2247 
2248 /*
2249  * Property encode/decode functions
2250  */
2251 
2252 /*
2253  * Decode a single integer property
2254  */
2255 static int
2256 ddi_prop_fm_decode_int(prop_handle_t *ph, void *data, uint_t *nelements)
2257 {
2258 	int	i;
2259 	int	tmp;
2260 
2261 	/*
2262 	 * If there is nothing to decode return an error
2263 	 */
2264 	if (ph->ph_size == 0)
2265 		return (DDI_PROP_END_OF_DATA);
2266 
2267 	/*
2268 	 * Decode the property as a single integer and return it
2269 	 * in data if we were able to decode it.
2270 	 */
2271 	i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, &tmp);
2272 	if (i < DDI_PROP_RESULT_OK) {
2273 		switch (i) {
2274 		case DDI_PROP_RESULT_EOF:
2275 			return (DDI_PROP_END_OF_DATA);
2276 
2277 		case DDI_PROP_RESULT_ERROR:
2278 			return (DDI_PROP_CANNOT_DECODE);
2279 		}
2280 	}
2281 
2282 	*(int *)data = tmp;
2283 	*nelements = 1;
2284 	return (DDI_PROP_SUCCESS);
2285 }
2286 
2287 /*
2288  * Decode a single 64 bit integer property
2289  */
2290 static int
2291 ddi_prop_fm_decode_int64(prop_handle_t *ph, void *data, uint_t *nelements)
2292 {
2293 	int	i;
2294 	int64_t	tmp;
2295 
2296 	/*
2297 	 * If there is nothing to decode return an error
2298 	 */
2299 	if (ph->ph_size == 0)
2300 		return (DDI_PROP_END_OF_DATA);
2301 
2302 	/*
2303 	 * Decode the property as a single integer and return it
2304 	 * in data if we were able to decode it.
2305 	 */
2306 	i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, &tmp);
2307 	if (i < DDI_PROP_RESULT_OK) {
2308 		switch (i) {
2309 		case DDI_PROP_RESULT_EOF:
2310 			return (DDI_PROP_END_OF_DATA);
2311 
2312 		case DDI_PROP_RESULT_ERROR:
2313 			return (DDI_PROP_CANNOT_DECODE);
2314 		}
2315 	}
2316 
2317 	*(int64_t *)data = tmp;
2318 	*nelements = 1;
2319 	return (DDI_PROP_SUCCESS);
2320 }
2321 
2322 /*
2323  * Decode an array of integers property
2324  */
2325 static int
2326 ddi_prop_fm_decode_ints(prop_handle_t *ph, void *data, uint_t *nelements)
2327 {
2328 	int	i;
2329 	int	cnt = 0;
2330 	int	*tmp;
2331 	int	*intp;
2332 	int	n;
2333 
2334 	/*
2335 	 * Figure out how many array elements there are by going through the
2336 	 * data without decoding it first and counting.
2337 	 */
2338 	for (;;) {
2339 		i = DDI_PROP_INT(ph, DDI_PROP_CMD_SKIP, NULL);
2340 		if (i < 0)
2341 			break;
2342 		cnt++;
2343 	}
2344 
2345 	/*
2346 	 * If there are no elements return an error
2347 	 */
2348 	if (cnt == 0)
2349 		return (DDI_PROP_END_OF_DATA);
2350 
2351 	/*
2352 	 * If we cannot skip through the data, we cannot decode it
2353 	 */
2354 	if (i == DDI_PROP_RESULT_ERROR)
2355 		return (DDI_PROP_CANNOT_DECODE);
2356 
2357 	/*
2358 	 * Reset the data pointer to the beginning of the encoded data
2359 	 */
2360 	ddi_prop_reset_pos(ph);
2361 
2362 	/*
2363 	 * Allocated memory to store the decoded value in.
2364 	 */
2365 	intp = ddi_prop_decode_alloc((cnt * sizeof (int)),
2366 	    ddi_prop_free_ints);
2367 
2368 	/*
2369 	 * Decode each element and place it in the space we just allocated
2370 	 */
2371 	tmp = intp;
2372 	for (n = 0; n < cnt; n++, tmp++) {
2373 		i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, tmp);
2374 		if (i < DDI_PROP_RESULT_OK) {
2375 			/*
2376 			 * Free the space we just allocated
2377 			 * and return an error.
2378 			 */
2379 			ddi_prop_free(intp);
2380 			switch (i) {
2381 			case DDI_PROP_RESULT_EOF:
2382 				return (DDI_PROP_END_OF_DATA);
2383 
2384 			case DDI_PROP_RESULT_ERROR:
2385 				return (DDI_PROP_CANNOT_DECODE);
2386 			}
2387 		}
2388 	}
2389 
2390 	*nelements = cnt;
2391 	*(int **)data = intp;
2392 
2393 	return (DDI_PROP_SUCCESS);
2394 }
2395 
2396 /*
2397  * Decode a 64 bit integer array property
2398  */
2399 static int
2400 ddi_prop_fm_decode_int64_array(prop_handle_t *ph, void *data, uint_t *nelements)
2401 {
2402 	int	i;
2403 	int	n;
2404 	int	cnt = 0;
2405 	int64_t	*tmp;
2406 	int64_t	*intp;
2407 
2408 	/*
2409 	 * Count the number of array elements by going
2410 	 * through the data without decoding it.
2411 	 */
2412 	for (;;) {
2413 		i = DDI_PROP_INT64(ph, DDI_PROP_CMD_SKIP, NULL);
2414 		if (i < 0)
2415 			break;
2416 		cnt++;
2417 	}
2418 
2419 	/*
2420 	 * If there are no elements return an error
2421 	 */
2422 	if (cnt == 0)
2423 		return (DDI_PROP_END_OF_DATA);
2424 
2425 	/*
2426 	 * If we cannot skip through the data, we cannot decode it
2427 	 */
2428 	if (i == DDI_PROP_RESULT_ERROR)
2429 		return (DDI_PROP_CANNOT_DECODE);
2430 
2431 	/*
2432 	 * Reset the data pointer to the beginning of the encoded data
2433 	 */
2434 	ddi_prop_reset_pos(ph);
2435 
2436 	/*
2437 	 * Allocate memory to store the decoded value.
2438 	 */
2439 	intp = ddi_prop_decode_alloc((cnt * sizeof (int64_t)),
2440 	    ddi_prop_free_ints);
2441 
2442 	/*
2443 	 * Decode each element and place it in the space allocated
2444 	 */
2445 	tmp = intp;
2446 	for (n = 0; n < cnt; n++, tmp++) {
2447 		i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, tmp);
2448 		if (i < DDI_PROP_RESULT_OK) {
2449 			/*
2450 			 * Free the space we just allocated
2451 			 * and return an error.
2452 			 */
2453 			ddi_prop_free(intp);
2454 			switch (i) {
2455 			case DDI_PROP_RESULT_EOF:
2456 				return (DDI_PROP_END_OF_DATA);
2457 
2458 			case DDI_PROP_RESULT_ERROR:
2459 				return (DDI_PROP_CANNOT_DECODE);
2460 			}
2461 		}
2462 	}
2463 
2464 	*nelements = cnt;
2465 	*(int64_t **)data = intp;
2466 
2467 	return (DDI_PROP_SUCCESS);
2468 }
2469 
2470 /*
2471  * Encode an array of integers property (Can be one element)
2472  */
2473 int
2474 ddi_prop_fm_encode_ints(prop_handle_t *ph, void *data, uint_t nelements)
2475 {
2476 	int	i;
2477 	int	*tmp;
2478 	int	cnt;
2479 	int	size;
2480 
2481 	/*
2482 	 * If there is no data, we cannot do anything
2483 	 */
2484 	if (nelements == 0)
2485 		return (DDI_PROP_CANNOT_ENCODE);
2486 
2487 	/*
2488 	 * Get the size of an encoded int.
2489 	 */
2490 	size = DDI_PROP_INT(ph, DDI_PROP_CMD_GET_ESIZE, NULL);
2491 
2492 	if (size < DDI_PROP_RESULT_OK) {
2493 		switch (size) {
2494 		case DDI_PROP_RESULT_EOF:
2495 			return (DDI_PROP_END_OF_DATA);
2496 
2497 		case DDI_PROP_RESULT_ERROR:
2498 			return (DDI_PROP_CANNOT_ENCODE);
2499 		}
2500 	}
2501 
2502 	/*
2503 	 * Allocate space in the handle to store the encoded int.
2504 	 */
2505 	if (ddi_prop_encode_alloc(ph, size * nelements) !=
2506 	    DDI_PROP_SUCCESS)
2507 		return (DDI_PROP_NO_MEMORY);
2508 
2509 	/*
2510 	 * Encode the array of ints.
2511 	 */
2512 	tmp = (int *)data;
2513 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2514 		i = DDI_PROP_INT(ph, DDI_PROP_CMD_ENCODE, tmp);
2515 		if (i < DDI_PROP_RESULT_OK) {
2516 			switch (i) {
2517 			case DDI_PROP_RESULT_EOF:
2518 				return (DDI_PROP_END_OF_DATA);
2519 
2520 			case DDI_PROP_RESULT_ERROR:
2521 				return (DDI_PROP_CANNOT_ENCODE);
2522 			}
2523 		}
2524 	}
2525 
2526 	return (DDI_PROP_SUCCESS);
2527 }
2528 
2529 
2530 /*
2531  * Encode a 64 bit integer array property
2532  */
2533 int
2534 ddi_prop_fm_encode_int64(prop_handle_t *ph, void *data, uint_t nelements)
2535 {
2536 	int i;
2537 	int cnt;
2538 	int size;
2539 	int64_t *tmp;
2540 
2541 	/*
2542 	 * If there is no data, we cannot do anything
2543 	 */
2544 	if (nelements == 0)
2545 		return (DDI_PROP_CANNOT_ENCODE);
2546 
2547 	/*
2548 	 * Get the size of an encoded 64 bit int.
2549 	 */
2550 	size = DDI_PROP_INT64(ph, DDI_PROP_CMD_GET_ESIZE, NULL);
2551 
2552 	if (size < DDI_PROP_RESULT_OK) {
2553 		switch (size) {
2554 		case DDI_PROP_RESULT_EOF:
2555 			return (DDI_PROP_END_OF_DATA);
2556 
2557 		case DDI_PROP_RESULT_ERROR:
2558 			return (DDI_PROP_CANNOT_ENCODE);
2559 		}
2560 	}
2561 
2562 	/*
2563 	 * Allocate space in the handle to store the encoded int.
2564 	 */
2565 	if (ddi_prop_encode_alloc(ph, size * nelements) !=
2566 	    DDI_PROP_SUCCESS)
2567 		return (DDI_PROP_NO_MEMORY);
2568 
2569 	/*
2570 	 * Encode the array of ints.
2571 	 */
2572 	tmp = (int64_t *)data;
2573 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2574 		i = DDI_PROP_INT64(ph, DDI_PROP_CMD_ENCODE, tmp);
2575 		if (i < DDI_PROP_RESULT_OK) {
2576 			switch (i) {
2577 			case DDI_PROP_RESULT_EOF:
2578 				return (DDI_PROP_END_OF_DATA);
2579 
2580 			case DDI_PROP_RESULT_ERROR:
2581 				return (DDI_PROP_CANNOT_ENCODE);
2582 			}
2583 		}
2584 	}
2585 
2586 	return (DDI_PROP_SUCCESS);
2587 }
2588 
2589 /*
2590  * Decode a single string property
2591  */
2592 static int
2593 ddi_prop_fm_decode_string(prop_handle_t *ph, void *data, uint_t *nelements)
2594 {
2595 	char		*tmp;
2596 	char		*str;
2597 	int		i;
2598 	int		size;
2599 
2600 	/*
2601 	 * If there is nothing to decode return an error
2602 	 */
2603 	if (ph->ph_size == 0)
2604 		return (DDI_PROP_END_OF_DATA);
2605 
2606 	/*
2607 	 * Get the decoded size of the encoded string.
2608 	 */
2609 	size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2610 	if (size < DDI_PROP_RESULT_OK) {
2611 		switch (size) {
2612 		case DDI_PROP_RESULT_EOF:
2613 			return (DDI_PROP_END_OF_DATA);
2614 
2615 		case DDI_PROP_RESULT_ERROR:
2616 			return (DDI_PROP_CANNOT_DECODE);
2617 		}
2618 	}
2619 
2620 	/*
2621 	 * Allocated memory to store the decoded value in.
2622 	 */
2623 	str = ddi_prop_decode_alloc((size_t)size, ddi_prop_free_string);
2624 
2625 	ddi_prop_reset_pos(ph);
2626 
2627 	/*
2628 	 * Decode the str and place it in the space we just allocated
2629 	 */
2630 	tmp = str;
2631 	i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, tmp);
2632 	if (i < DDI_PROP_RESULT_OK) {
2633 		/*
2634 		 * Free the space we just allocated
2635 		 * and return an error.
2636 		 */
2637 		ddi_prop_free(str);
2638 		switch (i) {
2639 		case DDI_PROP_RESULT_EOF:
2640 			return (DDI_PROP_END_OF_DATA);
2641 
2642 		case DDI_PROP_RESULT_ERROR:
2643 			return (DDI_PROP_CANNOT_DECODE);
2644 		}
2645 	}
2646 
2647 	*(char **)data = str;
2648 	*nelements = 1;
2649 
2650 	return (DDI_PROP_SUCCESS);
2651 }
2652 
2653 /*
2654  * Decode an array of strings.
2655  */
2656 int
2657 ddi_prop_fm_decode_strings(prop_handle_t *ph, void *data, uint_t *nelements)
2658 {
2659 	int		cnt = 0;
2660 	char		**strs;
2661 	char		**tmp;
2662 	char		*ptr;
2663 	int		i;
2664 	int		n;
2665 	int		size;
2666 	size_t		nbytes;
2667 
2668 	/*
2669 	 * Figure out how many array elements there are by going through the
2670 	 * data without decoding it first and counting.
2671 	 */
2672 	for (;;) {
2673 		i = DDI_PROP_STR(ph, DDI_PROP_CMD_SKIP, NULL);
2674 		if (i < 0)
2675 			break;
2676 		cnt++;
2677 	}
2678 
2679 	/*
2680 	 * If there are no elements return an error
2681 	 */
2682 	if (cnt == 0)
2683 		return (DDI_PROP_END_OF_DATA);
2684 
2685 	/*
2686 	 * If we cannot skip through the data, we cannot decode it
2687 	 */
2688 	if (i == DDI_PROP_RESULT_ERROR)
2689 		return (DDI_PROP_CANNOT_DECODE);
2690 
2691 	/*
2692 	 * Reset the data pointer to the beginning of the encoded data
2693 	 */
2694 	ddi_prop_reset_pos(ph);
2695 
2696 	/*
2697 	 * Figure out how much memory we need for the sum total
2698 	 */
2699 	nbytes = (cnt + 1) * sizeof (char *);
2700 
2701 	for (n = 0; n < cnt; n++) {
2702 		/*
2703 		 * Get the decoded size of the current encoded string.
2704 		 */
2705 		size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2706 		if (size < DDI_PROP_RESULT_OK) {
2707 			switch (size) {
2708 			case DDI_PROP_RESULT_EOF:
2709 				return (DDI_PROP_END_OF_DATA);
2710 
2711 			case DDI_PROP_RESULT_ERROR:
2712 				return (DDI_PROP_CANNOT_DECODE);
2713 			}
2714 		}
2715 
2716 		nbytes += size;
2717 	}
2718 
2719 	/*
2720 	 * Allocate memory in which to store the decoded strings.
2721 	 */
2722 	strs = ddi_prop_decode_alloc(nbytes, ddi_prop_free_strings);
2723 
2724 	/*
2725 	 * Set up pointers for each string by figuring out yet
2726 	 * again how long each string is.
2727 	 */
2728 	ddi_prop_reset_pos(ph);
2729 	ptr = (caddr_t)strs + ((cnt + 1) * sizeof (char *));
2730 	for (tmp = strs, n = 0; n < cnt; n++, tmp++) {
2731 		/*
2732 		 * Get the decoded size of the current encoded string.
2733 		 */
2734 		size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2735 		if (size < DDI_PROP_RESULT_OK) {
2736 			ddi_prop_free(strs);
2737 			switch (size) {
2738 			case DDI_PROP_RESULT_EOF:
2739 				return (DDI_PROP_END_OF_DATA);
2740 
2741 			case DDI_PROP_RESULT_ERROR:
2742 				return (DDI_PROP_CANNOT_DECODE);
2743 			}
2744 		}
2745 
2746 		*tmp = ptr;
2747 		ptr += size;
2748 	}
2749 
2750 	/*
2751 	 * String array is terminated by a NULL
2752 	 */
2753 	*tmp = NULL;
2754 
2755 	/*
2756 	 * Finally, we can decode each string
2757 	 */
2758 	ddi_prop_reset_pos(ph);
2759 	for (tmp = strs, n = 0; n < cnt; n++, tmp++) {
2760 		i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, *tmp);
2761 		if (i < DDI_PROP_RESULT_OK) {
2762 			/*
2763 			 * Free the space we just allocated
2764 			 * and return an error
2765 			 */
2766 			ddi_prop_free(strs);
2767 			switch (i) {
2768 			case DDI_PROP_RESULT_EOF:
2769 				return (DDI_PROP_END_OF_DATA);
2770 
2771 			case DDI_PROP_RESULT_ERROR:
2772 				return (DDI_PROP_CANNOT_DECODE);
2773 			}
2774 		}
2775 	}
2776 
2777 	*(char ***)data = strs;
2778 	*nelements = cnt;
2779 
2780 	return (DDI_PROP_SUCCESS);
2781 }
2782 
2783 /*
2784  * Encode a string.
2785  */
2786 int
2787 ddi_prop_fm_encode_string(prop_handle_t *ph, void *data, uint_t nelements)
2788 {
2789 	char		**tmp;
2790 	int		size;
2791 	int		i;
2792 
2793 	/*
2794 	 * If there is no data, we cannot do anything
2795 	 */
2796 	if (nelements == 0)
2797 		return (DDI_PROP_CANNOT_ENCODE);
2798 
2799 	/*
2800 	 * Get the size of the encoded string.
2801 	 */
2802 	tmp = (char **)data;
2803 	size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp);
2804 	if (size < DDI_PROP_RESULT_OK) {
2805 		switch (size) {
2806 		case DDI_PROP_RESULT_EOF:
2807 			return (DDI_PROP_END_OF_DATA);
2808 
2809 		case DDI_PROP_RESULT_ERROR:
2810 			return (DDI_PROP_CANNOT_ENCODE);
2811 		}
2812 	}
2813 
2814 	/*
2815 	 * Allocate space in the handle to store the encoded string.
2816 	 */
2817 	if (ddi_prop_encode_alloc(ph, size) != DDI_PROP_SUCCESS)
2818 		return (DDI_PROP_NO_MEMORY);
2819 
2820 	ddi_prop_reset_pos(ph);
2821 
2822 	/*
2823 	 * Encode the string.
2824 	 */
2825 	tmp = (char **)data;
2826 	i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp);
2827 	if (i < DDI_PROP_RESULT_OK) {
2828 		switch (i) {
2829 		case DDI_PROP_RESULT_EOF:
2830 			return (DDI_PROP_END_OF_DATA);
2831 
2832 		case DDI_PROP_RESULT_ERROR:
2833 			return (DDI_PROP_CANNOT_ENCODE);
2834 		}
2835 	}
2836 
2837 	return (DDI_PROP_SUCCESS);
2838 }
2839 
2840 
2841 /*
2842  * Encode an array of strings.
2843  */
2844 int
2845 ddi_prop_fm_encode_strings(prop_handle_t *ph, void *data, uint_t nelements)
2846 {
2847 	int		cnt = 0;
2848 	char		**tmp;
2849 	int		size;
2850 	uint_t		total_size;
2851 	int		i;
2852 
2853 	/*
2854 	 * If there is no data, we cannot do anything
2855 	 */
2856 	if (nelements == 0)
2857 		return (DDI_PROP_CANNOT_ENCODE);
2858 
2859 	/*
2860 	 * Get the total size required to encode all the strings.
2861 	 */
2862 	total_size = 0;
2863 	tmp = (char **)data;
2864 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2865 		size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp);
2866 		if (size < DDI_PROP_RESULT_OK) {
2867 			switch (size) {
2868 			case DDI_PROP_RESULT_EOF:
2869 				return (DDI_PROP_END_OF_DATA);
2870 
2871 			case DDI_PROP_RESULT_ERROR:
2872 				return (DDI_PROP_CANNOT_ENCODE);
2873 			}
2874 		}
2875 		total_size += (uint_t)size;
2876 	}
2877 
2878 	/*
2879 	 * Allocate space in the handle to store the encoded strings.
2880 	 */
2881 	if (ddi_prop_encode_alloc(ph, total_size) != DDI_PROP_SUCCESS)
2882 		return (DDI_PROP_NO_MEMORY);
2883 
2884 	ddi_prop_reset_pos(ph);
2885 
2886 	/*
2887 	 * Encode the array of strings.
2888 	 */
2889 	tmp = (char **)data;
2890 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2891 		i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp);
2892 		if (i < DDI_PROP_RESULT_OK) {
2893 			switch (i) {
2894 			case DDI_PROP_RESULT_EOF:
2895 				return (DDI_PROP_END_OF_DATA);
2896 
2897 			case DDI_PROP_RESULT_ERROR:
2898 				return (DDI_PROP_CANNOT_ENCODE);
2899 			}
2900 		}
2901 	}
2902 
2903 	return (DDI_PROP_SUCCESS);
2904 }
2905 
2906 
2907 /*
2908  * Decode an array of bytes.
2909  */
2910 static int
2911 ddi_prop_fm_decode_bytes(prop_handle_t *ph, void *data, uint_t *nelements)
2912 {
2913 	uchar_t		*tmp;
2914 	int		nbytes;
2915 	int		i;
2916 
2917 	/*
2918 	 * If there are no elements return an error
2919 	 */
2920 	if (ph->ph_size == 0)
2921 		return (DDI_PROP_END_OF_DATA);
2922 
2923 	/*
2924 	 * Get the size of the encoded array of bytes.
2925 	 */
2926 	nbytes = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_DSIZE,
2927 	    data, ph->ph_size);
2928 	if (nbytes < DDI_PROP_RESULT_OK) {
2929 		switch (nbytes) {
2930 		case DDI_PROP_RESULT_EOF:
2931 			return (DDI_PROP_END_OF_DATA);
2932 
2933 		case DDI_PROP_RESULT_ERROR:
2934 			return (DDI_PROP_CANNOT_DECODE);
2935 		}
2936 	}
2937 
2938 	/*
2939 	 * Allocated memory to store the decoded value in.
2940 	 */
2941 	tmp = ddi_prop_decode_alloc(nbytes, ddi_prop_free_bytes);
2942 
2943 	/*
2944 	 * Decode each element and place it in the space we just allocated
2945 	 */
2946 	i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_DECODE, tmp, nbytes);
2947 	if (i < DDI_PROP_RESULT_OK) {
2948 		/*
2949 		 * Free the space we just allocated
2950 		 * and return an error
2951 		 */
2952 		ddi_prop_free(tmp);
2953 		switch (i) {
2954 		case DDI_PROP_RESULT_EOF:
2955 			return (DDI_PROP_END_OF_DATA);
2956 
2957 		case DDI_PROP_RESULT_ERROR:
2958 			return (DDI_PROP_CANNOT_DECODE);
2959 		}
2960 	}
2961 
2962 	*(uchar_t **)data = tmp;
2963 	*nelements = nbytes;
2964 
2965 	return (DDI_PROP_SUCCESS);
2966 }
2967 
2968 /*
2969  * Encode an array of bytes.
2970  */
2971 int
2972 ddi_prop_fm_encode_bytes(prop_handle_t *ph, void *data, uint_t nelements)
2973 {
2974 	int		size;
2975 	int		i;
2976 
2977 	/*
2978 	 * If there are no elements, then this is a boolean property,
2979 	 * so just create a property handle with no data and return.
2980 	 */
2981 	if (nelements == 0) {
2982 		(void) ddi_prop_encode_alloc(ph, 0);
2983 		return (DDI_PROP_SUCCESS);
2984 	}
2985 
2986 	/*
2987 	 * Get the size of the encoded array of bytes.
2988 	 */
2989 	size = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_ESIZE, (uchar_t *)data,
2990 	    nelements);
2991 	if (size < DDI_PROP_RESULT_OK) {
2992 		switch (size) {
2993 		case DDI_PROP_RESULT_EOF:
2994 			return (DDI_PROP_END_OF_DATA);
2995 
2996 		case DDI_PROP_RESULT_ERROR:
2997 			return (DDI_PROP_CANNOT_DECODE);
2998 		}
2999 	}
3000 
3001 	/*
3002 	 * Allocate space in the handle to store the encoded bytes.
3003 	 */
3004 	if (ddi_prop_encode_alloc(ph, (uint_t)size) != DDI_PROP_SUCCESS)
3005 		return (DDI_PROP_NO_MEMORY);
3006 
3007 	/*
3008 	 * Encode the array of bytes.
3009 	 */
3010 	i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_ENCODE, (uchar_t *)data,
3011 	    nelements);
3012 	if (i < DDI_PROP_RESULT_OK) {
3013 		switch (i) {
3014 		case DDI_PROP_RESULT_EOF:
3015 			return (DDI_PROP_END_OF_DATA);
3016 
3017 		case DDI_PROP_RESULT_ERROR:
3018 			return (DDI_PROP_CANNOT_ENCODE);
3019 		}
3020 	}
3021 
3022 	return (DDI_PROP_SUCCESS);
3023 }
3024 
3025 /*
3026  * OBP 1275 integer, string and byte operators.
3027  *
3028  * DDI_PROP_CMD_DECODE:
3029  *
3030  *	DDI_PROP_RESULT_ERROR:		cannot decode the data
3031  *	DDI_PROP_RESULT_EOF:		end of data
3032  *	DDI_PROP_OK:			data was decoded
3033  *
3034  * DDI_PROP_CMD_ENCODE:
3035  *
3036  *	DDI_PROP_RESULT_ERROR:		cannot encode the data
3037  *	DDI_PROP_RESULT_EOF:		end of data
3038  *	DDI_PROP_OK:			data was encoded
3039  *
3040  * DDI_PROP_CMD_SKIP:
3041  *
3042  *	DDI_PROP_RESULT_ERROR:		cannot skip the data
3043  *	DDI_PROP_RESULT_EOF:		end of data
3044  *	DDI_PROP_OK:			data was skipped
3045  *
3046  * DDI_PROP_CMD_GET_ESIZE:
3047  *
3048  *	DDI_PROP_RESULT_ERROR:		cannot get encoded size
3049  *	DDI_PROP_RESULT_EOF:		end of data
3050  *	> 0:				the encoded size
3051  *
3052  * DDI_PROP_CMD_GET_DSIZE:
3053  *
3054  *	DDI_PROP_RESULT_ERROR:		cannot get decoded size
3055  *	DDI_PROP_RESULT_EOF:		end of data
3056  *	> 0:				the decoded size
3057  */
3058 
3059 /*
3060  * OBP 1275 integer operator
3061  *
3062  * OBP properties are a byte stream of data, so integers may not be
3063  * properly aligned.  Therefore we need to copy them one byte at a time.
3064  */
3065 int
3066 ddi_prop_1275_int(prop_handle_t *ph, uint_t cmd, int *data)
3067 {
3068 	int	i;
3069 
3070 	switch (cmd) {
3071 	case DDI_PROP_CMD_DECODE:
3072 		/*
3073 		 * Check that there is encoded data
3074 		 */
3075 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0)
3076 			return (DDI_PROP_RESULT_ERROR);
3077 		if (ph->ph_flags & PH_FROM_PROM) {
3078 			i = MIN(ph->ph_size, PROP_1275_INT_SIZE);
3079 			if ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
3080 			    ph->ph_size - i))
3081 				return (DDI_PROP_RESULT_ERROR);
3082 		} else {
3083 			if (ph->ph_size < sizeof (int) ||
3084 			    ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
3085 			    ph->ph_size - sizeof (int))))
3086 				return (DDI_PROP_RESULT_ERROR);
3087 		}
3088 
3089 		/*
3090 		 * Copy the integer, using the implementation-specific
3091 		 * copy function if the property is coming from the PROM.
3092 		 */
3093 		if (ph->ph_flags & PH_FROM_PROM) {
3094 			*data = impl_ddi_prop_int_from_prom(
3095 			    (uchar_t *)ph->ph_cur_pos,
3096 			    (ph->ph_size < PROP_1275_INT_SIZE) ?
3097 			    ph->ph_size : PROP_1275_INT_SIZE);
3098 		} else {
3099 			bcopy(ph->ph_cur_pos, data, sizeof (int));
3100 		}
3101 
3102 		/*
3103 		 * Move the current location to the start of the next
3104 		 * bit of undecoded data.
3105 		 */
3106 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
3107 		    PROP_1275_INT_SIZE;
3108 		return (DDI_PROP_RESULT_OK);
3109 
3110 	case DDI_PROP_CMD_ENCODE:
3111 		/*
3112 		 * Check that there is room to encoded the data
3113 		 */
3114 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3115 		    ph->ph_size < PROP_1275_INT_SIZE ||
3116 		    ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
3117 		    ph->ph_size - sizeof (int))))
3118 			return (DDI_PROP_RESULT_ERROR);
3119 
3120 		/*
3121 		 * Encode the integer into the byte stream one byte at a
3122 		 * time.
3123 		 */
3124 		bcopy(data, ph->ph_cur_pos, sizeof (int));
3125 
3126 		/*
3127 		 * Move the current location to the start of the next bit of
3128 		 * space where we can store encoded data.
3129 		 */
3130 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE;
3131 		return (DDI_PROP_RESULT_OK);
3132 
3133 	case DDI_PROP_CMD_SKIP:
3134 		/*
3135 		 * Check that there is encoded data
3136 		 */
3137 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3138 		    ph->ph_size < PROP_1275_INT_SIZE)
3139 			return (DDI_PROP_RESULT_ERROR);
3140 
3141 
3142 		if ((caddr_t)ph->ph_cur_pos ==
3143 		    (caddr_t)ph->ph_data + ph->ph_size) {
3144 			return (DDI_PROP_RESULT_EOF);
3145 		} else if ((caddr_t)ph->ph_cur_pos >
3146 		    (caddr_t)ph->ph_data + ph->ph_size) {
3147 			return (DDI_PROP_RESULT_EOF);
3148 		}
3149 
3150 		/*
3151 		 * Move the current location to the start of the next bit of
3152 		 * undecoded data.
3153 		 */
3154 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE;
3155 		return (DDI_PROP_RESULT_OK);
3156 
3157 	case DDI_PROP_CMD_GET_ESIZE:
3158 		/*
3159 		 * Return the size of an encoded integer on OBP
3160 		 */
3161 		return (PROP_1275_INT_SIZE);
3162 
3163 	case DDI_PROP_CMD_GET_DSIZE:
3164 		/*
3165 		 * Return the size of a decoded integer on the system.
3166 		 */
3167 		return (sizeof (int));
3168 
3169 	default:
3170 #ifdef DEBUG
3171 		panic("ddi_prop_1275_int: %x impossible", cmd);
3172 		/*NOTREACHED*/
3173 #else
3174 		return (DDI_PROP_RESULT_ERROR);
3175 #endif	/* DEBUG */
3176 	}
3177 }
3178 
3179 /*
3180  * 64 bit integer operator.
3181  *
3182  * This is an extension, defined by Sun, to the 1275 integer
3183  * operator.  This routine handles the encoding/decoding of
3184  * 64 bit integer properties.
3185  */
3186 int
3187 ddi_prop_int64_op(prop_handle_t *ph, uint_t cmd, int64_t *data)
3188 {
3189 
3190 	switch (cmd) {
3191 	case DDI_PROP_CMD_DECODE:
3192 		/*
3193 		 * Check that there is encoded data
3194 		 */
3195 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0)
3196 			return (DDI_PROP_RESULT_ERROR);
3197 		if (ph->ph_flags & PH_FROM_PROM) {
3198 			return (DDI_PROP_RESULT_ERROR);
3199 		} else {
3200 			if (ph->ph_size < sizeof (int64_t) ||
3201 			    ((int64_t *)ph->ph_cur_pos >
3202 			    ((int64_t *)ph->ph_data +
3203 			    ph->ph_size - sizeof (int64_t))))
3204 				return (DDI_PROP_RESULT_ERROR);
3205 		}
3206 		/*
3207 		 * Copy the integer, using the implementation-specific
3208 		 * copy function if the property is coming from the PROM.
3209 		 */
3210 		if (ph->ph_flags & PH_FROM_PROM) {
3211 			return (DDI_PROP_RESULT_ERROR);
3212 		} else {
3213 			bcopy(ph->ph_cur_pos, data, sizeof (int64_t));
3214 		}
3215 
3216 		/*
3217 		 * Move the current location to the start of the next
3218 		 * bit of undecoded data.
3219 		 */
3220 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
3221 		    sizeof (int64_t);
3222 			return (DDI_PROP_RESULT_OK);
3223 
3224 	case DDI_PROP_CMD_ENCODE:
3225 		/*
3226 		 * Check that there is room to encoded the data
3227 		 */
3228 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3229 		    ph->ph_size < sizeof (int64_t) ||
3230 		    ((int64_t *)ph->ph_cur_pos > ((int64_t *)ph->ph_data +
3231 		    ph->ph_size - sizeof (int64_t))))
3232 			return (DDI_PROP_RESULT_ERROR);
3233 
3234 		/*
3235 		 * Encode the integer into the byte stream one byte at a
3236 		 * time.
3237 		 */
3238 		bcopy(data, ph->ph_cur_pos, sizeof (int64_t));
3239 
3240 		/*
3241 		 * Move the current location to the start of the next bit of
3242 		 * space where we can store encoded data.
3243 		 */
3244 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
3245 		    sizeof (int64_t);
3246 		return (DDI_PROP_RESULT_OK);
3247 
3248 	case DDI_PROP_CMD_SKIP:
3249 		/*
3250 		 * Check that there is encoded data
3251 		 */
3252 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3253 		    ph->ph_size < sizeof (int64_t))
3254 			return (DDI_PROP_RESULT_ERROR);
3255 
3256 		if ((caddr_t)ph->ph_cur_pos ==
3257 		    (caddr_t)ph->ph_data + ph->ph_size) {
3258 			return (DDI_PROP_RESULT_EOF);
3259 		} else if ((caddr_t)ph->ph_cur_pos >
3260 		    (caddr_t)ph->ph_data + ph->ph_size) {
3261 			return (DDI_PROP_RESULT_EOF);
3262 		}
3263 
3264 		/*
3265 		 * Move the current location to the start of
3266 		 * the next bit of undecoded data.
3267 		 */
3268 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
3269 		    sizeof (int64_t);
3270 			return (DDI_PROP_RESULT_OK);
3271 
3272 	case DDI_PROP_CMD_GET_ESIZE:
3273 		/*
3274 		 * Return the size of an encoded integer on OBP
3275 		 */
3276 		return (sizeof (int64_t));
3277 
3278 	case DDI_PROP_CMD_GET_DSIZE:
3279 		/*
3280 		 * Return the size of a decoded integer on the system.
3281 		 */
3282 		return (sizeof (int64_t));
3283 
3284 	default:
3285 #ifdef DEBUG
3286 		panic("ddi_prop_int64_op: %x impossible", cmd);
3287 		/*NOTREACHED*/
3288 #else
3289 		return (DDI_PROP_RESULT_ERROR);
3290 #endif  /* DEBUG */
3291 	}
3292 }
3293 
3294 /*
3295  * OBP 1275 string operator.
3296  *
3297  * OBP strings are NULL terminated.
3298  */
3299 int
3300 ddi_prop_1275_string(prop_handle_t *ph, uint_t cmd, char *data)
3301 {
3302 	int	n;
3303 	char	*p;
3304 	char	*end;
3305 
3306 	switch (cmd) {
3307 	case DDI_PROP_CMD_DECODE:
3308 		/*
3309 		 * Check that there is encoded data
3310 		 */
3311 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
3312 			return (DDI_PROP_RESULT_ERROR);
3313 		}
3314 
3315 		/*
3316 		 * Match DDI_PROP_CMD_GET_DSIZE logic for when to stop and
3317 		 * how to NULL terminate result.
3318 		 */
3319 		p = (char *)ph->ph_cur_pos;
3320 		end = (char *)ph->ph_data + ph->ph_size;
3321 		if (p >= end)
3322 			return (DDI_PROP_RESULT_EOF);
3323 
3324 		while (p < end) {
3325 			*data++ = *p;
3326 			if (*p++ == 0) {	/* NULL from OBP */
3327 				ph->ph_cur_pos = p;
3328 				return (DDI_PROP_RESULT_OK);
3329 			}
3330 		}
3331 
3332 		/*
3333 		 * If OBP did not NULL terminate string, which happens
3334 		 * (at least) for 'true'/'false' boolean values, account for
3335 		 * the space and store null termination on decode.
3336 		 */
3337 		ph->ph_cur_pos = p;
3338 		*data = 0;
3339 		return (DDI_PROP_RESULT_OK);
3340 
3341 	case DDI_PROP_CMD_ENCODE:
3342 		/*
3343 		 * Check that there is room to encoded the data
3344 		 */
3345 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
3346 			return (DDI_PROP_RESULT_ERROR);
3347 		}
3348 
3349 		n = strlen(data) + 1;
3350 		if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3351 		    ph->ph_size - n)) {
3352 			return (DDI_PROP_RESULT_ERROR);
3353 		}
3354 
3355 		/*
3356 		 * Copy the NULL terminated string
3357 		 */
3358 		bcopy(data, ph->ph_cur_pos, n);
3359 
3360 		/*
3361 		 * Move the current location to the start of the next bit of
3362 		 * space where we can store encoded data.
3363 		 */
3364 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + n;
3365 		return (DDI_PROP_RESULT_OK);
3366 
3367 	case DDI_PROP_CMD_SKIP:
3368 		/*
3369 		 * Check that there is encoded data
3370 		 */
3371 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
3372 			return (DDI_PROP_RESULT_ERROR);
3373 		}
3374 
3375 		/*
3376 		 * Return the string length plus one for the NULL
3377 		 * We know the size of the property, we need to
3378 		 * ensure that the string is properly formatted,
3379 		 * since we may be looking up random OBP data.
3380 		 */
3381 		p = (char *)ph->ph_cur_pos;
3382 		end = (char *)ph->ph_data + ph->ph_size;
3383 		if (p >= end)
3384 			return (DDI_PROP_RESULT_EOF);
3385 
3386 		while (p < end) {
3387 			if (*p++ == 0) {	/* NULL from OBP */
3388 				ph->ph_cur_pos = p;
3389 				return (DDI_PROP_RESULT_OK);
3390 			}
3391 		}
3392 
3393 		/*
3394 		 * Accommodate the fact that OBP does not always NULL
3395 		 * terminate strings.
3396 		 */
3397 		ph->ph_cur_pos = p;
3398 		return (DDI_PROP_RESULT_OK);
3399 
3400 	case DDI_PROP_CMD_GET_ESIZE:
3401 		/*
3402 		 * Return the size of the encoded string on OBP.
3403 		 */
3404 		return (strlen(data) + 1);
3405 
3406 	case DDI_PROP_CMD_GET_DSIZE:
3407 		/*
3408 		 * Return the string length plus one for the NULL.
3409 		 * We know the size of the property, we need to
3410 		 * ensure that the string is properly formatted,
3411 		 * since we may be looking up random OBP data.
3412 		 */
3413 		p = (char *)ph->ph_cur_pos;
3414 		end = (char *)ph->ph_data + ph->ph_size;
3415 		if (p >= end)
3416 			return (DDI_PROP_RESULT_EOF);
3417 
3418 		for (n = 0; p < end; n++) {
3419 			if (*p++ == 0) {	/* NULL from OBP */
3420 				ph->ph_cur_pos = p;
3421 				return (n + 1);
3422 			}
3423 		}
3424 
3425 		/*
3426 		 * If OBP did not NULL terminate string, which happens for
3427 		 * 'true'/'false' boolean values, account for the space
3428 		 * to store null termination here.
3429 		 */
3430 		ph->ph_cur_pos = p;
3431 		return (n + 1);
3432 
3433 	default:
3434 #ifdef DEBUG
3435 		panic("ddi_prop_1275_string: %x impossible", cmd);
3436 		/*NOTREACHED*/
3437 #else
3438 		return (DDI_PROP_RESULT_ERROR);
3439 #endif	/* DEBUG */
3440 	}
3441 }
3442 
3443 /*
3444  * OBP 1275 byte operator
3445  *
3446  * Caller must specify the number of bytes to get.  OBP encodes bytes
3447  * as a byte so there is a 1-to-1 translation.
3448  */
3449 int
3450 ddi_prop_1275_bytes(prop_handle_t *ph, uint_t cmd, uchar_t *data,
3451 	uint_t nelements)
3452 {
3453 	switch (cmd) {
3454 	case DDI_PROP_CMD_DECODE:
3455 		/*
3456 		 * Check that there is encoded data
3457 		 */
3458 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3459 		    ph->ph_size < nelements ||
3460 		    ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3461 		    ph->ph_size - nelements)))
3462 			return (DDI_PROP_RESULT_ERROR);
3463 
3464 		/*
3465 		 * Copy out the bytes
3466 		 */
3467 		bcopy(ph->ph_cur_pos, data, nelements);
3468 
3469 		/*
3470 		 * Move the current location
3471 		 */
3472 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3473 		return (DDI_PROP_RESULT_OK);
3474 
3475 	case DDI_PROP_CMD_ENCODE:
3476 		/*
3477 		 * Check that there is room to encode the data
3478 		 */
3479 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3480 		    ph->ph_size < nelements ||
3481 		    ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3482 		    ph->ph_size - nelements)))
3483 			return (DDI_PROP_RESULT_ERROR);
3484 
3485 		/*
3486 		 * Copy in the bytes
3487 		 */
3488 		bcopy(data, ph->ph_cur_pos, nelements);
3489 
3490 		/*
3491 		 * Move the current location to the start of the next bit of
3492 		 * space where we can store encoded data.
3493 		 */
3494 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3495 		return (DDI_PROP_RESULT_OK);
3496 
3497 	case DDI_PROP_CMD_SKIP:
3498 		/*
3499 		 * Check that there is encoded data
3500 		 */
3501 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3502 		    ph->ph_size < nelements)
3503 			return (DDI_PROP_RESULT_ERROR);
3504 
3505 		if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3506 		    ph->ph_size - nelements))
3507 			return (DDI_PROP_RESULT_EOF);
3508 
3509 		/*
3510 		 * Move the current location
3511 		 */
3512 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3513 		return (DDI_PROP_RESULT_OK);
3514 
3515 	case DDI_PROP_CMD_GET_ESIZE:
3516 		/*
3517 		 * The size in bytes of the encoded size is the
3518 		 * same as the decoded size provided by the caller.
3519 		 */
3520 		return (nelements);
3521 
3522 	case DDI_PROP_CMD_GET_DSIZE:
3523 		/*
3524 		 * Just return the number of bytes specified by the caller.
3525 		 */
3526 		return (nelements);
3527 
3528 	default:
3529 #ifdef DEBUG
3530 		panic("ddi_prop_1275_bytes: %x impossible", cmd);
3531 		/*NOTREACHED*/
3532 #else
3533 		return (DDI_PROP_RESULT_ERROR);
3534 #endif	/* DEBUG */
3535 	}
3536 }
3537 
3538 /*
3539  * Used for properties that come from the OBP, hardware configuration files,
3540  * or that are created by calls to ddi_prop_update(9F).
3541  */
3542 static struct prop_handle_ops prop_1275_ops = {
3543 	ddi_prop_1275_int,
3544 	ddi_prop_1275_string,
3545 	ddi_prop_1275_bytes,
3546 	ddi_prop_int64_op
3547 };
3548 
3549 
3550 /*
3551  * Interface to create/modify a managed property on child's behalf...
3552  * Flags interpreted are:
3553  *	DDI_PROP_CANSLEEP:	Allow memory allocation to sleep.
3554  *	DDI_PROP_SYSTEM_DEF:	Manipulate system list rather than driver list.
3555  *
3556  * Use same dev_t when modifying or undefining a property.
3557  * Search for properties with DDI_DEV_T_ANY to match first named
3558  * property on the list.
3559  *
3560  * Properties are stored LIFO and subsequently will match the first
3561  * `matching' instance.
3562  */
3563 
3564 /*
3565  * ddi_prop_add:	Add a software defined property
3566  */
3567 
3568 /*
3569  * define to get a new ddi_prop_t.
3570  * km_flags are KM_SLEEP or KM_NOSLEEP.
3571  */
3572 
3573 #define	DDI_NEW_PROP_T(km_flags)	\
3574 	(kmem_zalloc(sizeof (ddi_prop_t), km_flags))
3575 
3576 static int
3577 ddi_prop_add(dev_t dev, dev_info_t *dip, int flags,
3578     char *name, caddr_t value, int length)
3579 {
3580 	ddi_prop_t	*new_propp, *propp;
3581 	ddi_prop_t	**list_head = &(DEVI(dip)->devi_drv_prop_ptr);
3582 	int		km_flags = KM_NOSLEEP;
3583 	int		name_buf_len;
3584 
3585 	/*
3586 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero return error.
3587 	 */
3588 
3589 	if (dev == DDI_DEV_T_ANY || name == (char *)0 || strlen(name) == 0)
3590 		return (DDI_PROP_INVAL_ARG);
3591 
3592 	if (flags & DDI_PROP_CANSLEEP)
3593 		km_flags = KM_SLEEP;
3594 
3595 	if (flags & DDI_PROP_SYSTEM_DEF)
3596 		list_head = &(DEVI(dip)->devi_sys_prop_ptr);
3597 	else if (flags & DDI_PROP_HW_DEF)
3598 		list_head = &(DEVI(dip)->devi_hw_prop_ptr);
3599 
3600 	if ((new_propp = DDI_NEW_PROP_T(km_flags)) == NULL)  {
3601 		cmn_err(CE_CONT, prop_no_mem_msg, name);
3602 		return (DDI_PROP_NO_MEMORY);
3603 	}
3604 
3605 	/*
3606 	 * If dev is major number 0, then we need to do a ddi_name_to_major
3607 	 * to get the real major number for the device.  This needs to be
3608 	 * done because some drivers need to call ddi_prop_create in their
3609 	 * attach routines but they don't have a dev.  By creating the dev
3610 	 * ourself if the major number is 0, drivers will not have to know what
3611 	 * their major number.	They can just create a dev with major number
3612 	 * 0 and pass it in.  For device 0, we will be doing a little extra
3613 	 * work by recreating the same dev that we already have, but its the
3614 	 * price you pay :-).
3615 	 *
3616 	 * This fixes bug #1098060.
3617 	 */
3618 	if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN) {
3619 		new_propp->prop_dev =
3620 		    makedevice(ddi_name_to_major(DEVI(dip)->devi_binding_name),
3621 		    getminor(dev));
3622 	} else
3623 		new_propp->prop_dev = dev;
3624 
3625 	/*
3626 	 * Allocate space for property name and copy it in...
3627 	 */
3628 
3629 	name_buf_len = strlen(name) + 1;
3630 	new_propp->prop_name = kmem_alloc(name_buf_len, km_flags);
3631 	if (new_propp->prop_name == 0)	{
3632 		kmem_free(new_propp, sizeof (ddi_prop_t));
3633 		cmn_err(CE_CONT, prop_no_mem_msg, name);
3634 		return (DDI_PROP_NO_MEMORY);
3635 	}
3636 	bcopy(name, new_propp->prop_name, name_buf_len);
3637 
3638 	/*
3639 	 * Set the property type
3640 	 */
3641 	new_propp->prop_flags = flags & DDI_PROP_TYPE_MASK;
3642 
3643 	/*
3644 	 * Set length and value ONLY if not an explicit property undefine:
3645 	 * NOTE: value and length are zero for explicit undefines.
3646 	 */
3647 
3648 	if (flags & DDI_PROP_UNDEF_IT) {
3649 		new_propp->prop_flags |= DDI_PROP_UNDEF_IT;
3650 	} else {
3651 		if ((new_propp->prop_len = length) != 0) {
3652 			new_propp->prop_val = kmem_alloc(length, km_flags);
3653 			if (new_propp->prop_val == 0)  {
3654 				kmem_free(new_propp->prop_name, name_buf_len);
3655 				kmem_free(new_propp, sizeof (ddi_prop_t));
3656 				cmn_err(CE_CONT, prop_no_mem_msg, name);
3657 				return (DDI_PROP_NO_MEMORY);
3658 			}
3659 			bcopy(value, new_propp->prop_val, length);
3660 		}
3661 	}
3662 
3663 	/*
3664 	 * Link property into beginning of list. (Properties are LIFO order.)
3665 	 */
3666 
3667 	mutex_enter(&(DEVI(dip)->devi_lock));
3668 	propp = *list_head;
3669 	new_propp->prop_next = propp;
3670 	*list_head = new_propp;
3671 	mutex_exit(&(DEVI(dip)->devi_lock));
3672 	return (DDI_PROP_SUCCESS);
3673 }
3674 
3675 
3676 /*
3677  * ddi_prop_change:	Modify a software managed property value
3678  *
3679  *			Set new length and value if found.
3680  *			returns DDI_PROP_INVAL_ARG if dev is DDI_DEV_T_ANY or
3681  *			input name is the NULL string.
3682  *			returns DDI_PROP_NO_MEMORY if unable to allocate memory
3683  *
3684  *			Note: an undef can be modified to be a define,
3685  *			(you can't go the other way.)
3686  */
3687 
3688 static int
3689 ddi_prop_change(dev_t dev, dev_info_t *dip, int flags,
3690     char *name, caddr_t value, int length)
3691 {
3692 	ddi_prop_t	*propp;
3693 	ddi_prop_t	**ppropp;
3694 	caddr_t		p = NULL;
3695 
3696 	if ((dev == DDI_DEV_T_ANY) || (name == NULL) || (strlen(name) == 0))
3697 		return (DDI_PROP_INVAL_ARG);
3698 
3699 	/*
3700 	 * Preallocate buffer, even if we don't need it...
3701 	 */
3702 	if (length != 0)  {
3703 		p = kmem_alloc(length, (flags & DDI_PROP_CANSLEEP) ?
3704 		    KM_SLEEP : KM_NOSLEEP);
3705 		if (p == NULL)	{
3706 			cmn_err(CE_CONT, prop_no_mem_msg, name);
3707 			return (DDI_PROP_NO_MEMORY);
3708 		}
3709 	}
3710 
3711 	/*
3712 	 * If the dev_t value contains DDI_MAJOR_T_UNKNOWN for the major
3713 	 * number, a real dev_t value should be created based upon the dip's
3714 	 * binding driver.  See ddi_prop_add...
3715 	 */
3716 	if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN)
3717 		dev = makedevice(
3718 		    ddi_name_to_major(DEVI(dip)->devi_binding_name),
3719 		    getminor(dev));
3720 
3721 	/*
3722 	 * Check to see if the property exists.  If so we modify it.
3723 	 * Else we create it by calling ddi_prop_add().
3724 	 */
3725 	mutex_enter(&(DEVI(dip)->devi_lock));
3726 	ppropp = &DEVI(dip)->devi_drv_prop_ptr;
3727 	if (flags & DDI_PROP_SYSTEM_DEF)
3728 		ppropp = &DEVI(dip)->devi_sys_prop_ptr;
3729 	else if (flags & DDI_PROP_HW_DEF)
3730 		ppropp = &DEVI(dip)->devi_hw_prop_ptr;
3731 
3732 	if ((propp = i_ddi_prop_search(dev, name, flags, ppropp)) != NULL) {
3733 		/*
3734 		 * Need to reallocate buffer?  If so, do it
3735 		 * carefully (reuse same space if new prop
3736 		 * is same size and non-NULL sized).
3737 		 */
3738 		if (length != 0)
3739 			bcopy(value, p, length);
3740 
3741 		if (propp->prop_len != 0)
3742 			kmem_free(propp->prop_val, propp->prop_len);
3743 
3744 		propp->prop_len = length;
3745 		propp->prop_val = p;
3746 		propp->prop_flags &= ~DDI_PROP_UNDEF_IT;
3747 		mutex_exit(&(DEVI(dip)->devi_lock));
3748 		return (DDI_PROP_SUCCESS);
3749 	}
3750 
3751 	mutex_exit(&(DEVI(dip)->devi_lock));
3752 	if (length != 0)
3753 		kmem_free(p, length);
3754 
3755 	return (ddi_prop_add(dev, dip, flags, name, value, length));
3756 }
3757 
3758 /*
3759  * Common update routine used to update and encode a property.	Creates
3760  * a property handle, calls the property encode routine, figures out if
3761  * the property already exists and updates if it does.	Otherwise it
3762  * creates if it does not exist.
3763  */
3764 int
3765 ddi_prop_update_common(dev_t match_dev, dev_info_t *dip, int flags,
3766     char *name, void *data, uint_t nelements,
3767     int (*prop_create)(prop_handle_t *, void *data, uint_t nelements))
3768 {
3769 	prop_handle_t	ph;
3770 	int		rval;
3771 	uint_t		ourflags;
3772 
3773 	/*
3774 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3775 	 * return error.
3776 	 */
3777 	if (match_dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3778 		return (DDI_PROP_INVAL_ARG);
3779 
3780 	/*
3781 	 * Create the handle
3782 	 */
3783 	ph.ph_data = NULL;
3784 	ph.ph_cur_pos = NULL;
3785 	ph.ph_save_pos = NULL;
3786 	ph.ph_size = 0;
3787 	ph.ph_ops = &prop_1275_ops;
3788 
3789 	/*
3790 	 * ourflags:
3791 	 * For compatibility with the old interfaces.  The old interfaces
3792 	 * didn't sleep by default and slept when the flag was set.  These
3793 	 * interfaces to the opposite.	So the old interfaces now set the
3794 	 * DDI_PROP_DONTSLEEP flag by default which tells us not to sleep.
3795 	 *
3796 	 * ph.ph_flags:
3797 	 * Blocked data or unblocked data allocation
3798 	 * for ph.ph_data in ddi_prop_encode_alloc()
3799 	 */
3800 	if (flags & DDI_PROP_DONTSLEEP) {
3801 		ourflags = flags;
3802 		ph.ph_flags = DDI_PROP_DONTSLEEP;
3803 	} else {
3804 		ourflags = flags | DDI_PROP_CANSLEEP;
3805 		ph.ph_flags = DDI_PROP_CANSLEEP;
3806 	}
3807 
3808 	/*
3809 	 * Encode the data and store it in the property handle by
3810 	 * calling the prop_encode routine.
3811 	 */
3812 	if ((rval = (*prop_create)(&ph, data, nelements)) !=
3813 	    DDI_PROP_SUCCESS) {
3814 		if (rval == DDI_PROP_NO_MEMORY)
3815 			cmn_err(CE_CONT, prop_no_mem_msg, name);
3816 		if (ph.ph_size != 0)
3817 			kmem_free(ph.ph_data, ph.ph_size);
3818 		return (rval);
3819 	}
3820 
3821 	/*
3822 	 * The old interfaces use a stacking approach to creating
3823 	 * properties.	If we are being called from the old interfaces,
3824 	 * the DDI_PROP_STACK_CREATE flag will be set, so we just do a
3825 	 * create without checking.
3826 	 */
3827 	if (flags & DDI_PROP_STACK_CREATE) {
3828 		rval = ddi_prop_add(match_dev, dip,
3829 		    ourflags, name, ph.ph_data, ph.ph_size);
3830 	} else {
3831 		rval = ddi_prop_change(match_dev, dip,
3832 		    ourflags, name, ph.ph_data, ph.ph_size);
3833 	}
3834 
3835 	/*
3836 	 * Free the encoded data allocated in the prop_encode routine.
3837 	 */
3838 	if (ph.ph_size != 0)
3839 		kmem_free(ph.ph_data, ph.ph_size);
3840 
3841 	return (rval);
3842 }
3843 
3844 
3845 /*
3846  * ddi_prop_create:	Define a managed property:
3847  *			See above for details.
3848  */
3849 
3850 int
3851 ddi_prop_create(dev_t dev, dev_info_t *dip, int flag,
3852     char *name, caddr_t value, int length)
3853 {
3854 	if (!(flag & DDI_PROP_CANSLEEP)) {
3855 		flag |= DDI_PROP_DONTSLEEP;
3856 #ifdef DDI_PROP_DEBUG
3857 		if (length != 0)
3858 			cmn_err(CE_NOTE, "!ddi_prop_create: interface obsolete,"
3859 			    "use ddi_prop_update (prop = %s, node = %s%d)",
3860 			    name, ddi_driver_name(dip), ddi_get_instance(dip));
3861 #endif /* DDI_PROP_DEBUG */
3862 	}
3863 	flag &= ~DDI_PROP_SYSTEM_DEF;
3864 	flag |= DDI_PROP_STACK_CREATE | DDI_PROP_TYPE_ANY;
3865 	return (ddi_prop_update_common(dev, dip, flag, name,
3866 	    value, length, ddi_prop_fm_encode_bytes));
3867 }
3868 
3869 int
3870 e_ddi_prop_create(dev_t dev, dev_info_t *dip, int flag,
3871     char *name, caddr_t value, int length)
3872 {
3873 	if (!(flag & DDI_PROP_CANSLEEP))
3874 		flag |= DDI_PROP_DONTSLEEP;
3875 	flag |= DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE | DDI_PROP_TYPE_ANY;
3876 	return (ddi_prop_update_common(dev, dip, flag,
3877 	    name, value, length, ddi_prop_fm_encode_bytes));
3878 }
3879 
3880 int
3881 ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag,
3882     char *name, caddr_t value, int length)
3883 {
3884 	ASSERT((flag & DDI_PROP_TYPE_MASK) == 0);
3885 
3886 	/*
3887 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3888 	 * return error.
3889 	 */
3890 	if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3891 		return (DDI_PROP_INVAL_ARG);
3892 
3893 	if (!(flag & DDI_PROP_CANSLEEP))
3894 		flag |= DDI_PROP_DONTSLEEP;
3895 	flag &= ~DDI_PROP_SYSTEM_DEF;
3896 	if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_NOTPROM), name) == 0)
3897 		return (DDI_PROP_NOT_FOUND);
3898 
3899 	return (ddi_prop_update_common(dev, dip,
3900 	    (flag | DDI_PROP_TYPE_BYTE), name,
3901 	    value, length, ddi_prop_fm_encode_bytes));
3902 }
3903 
3904 int
3905 e_ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag,
3906     char *name, caddr_t value, int length)
3907 {
3908 	ASSERT((flag & DDI_PROP_TYPE_MASK) == 0);
3909 
3910 	/*
3911 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3912 	 * return error.
3913 	 */
3914 	if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3915 		return (DDI_PROP_INVAL_ARG);
3916 
3917 	if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_SYSTEM_DEF), name) == 0)
3918 		return (DDI_PROP_NOT_FOUND);
3919 
3920 	if (!(flag & DDI_PROP_CANSLEEP))
3921 		flag |= DDI_PROP_DONTSLEEP;
3922 	return (ddi_prop_update_common(dev, dip,
3923 	    (flag | DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE),
3924 	    name, value, length, ddi_prop_fm_encode_bytes));
3925 }
3926 
3927 
3928 /*
3929  * Common lookup routine used to lookup and decode a property.
3930  * Creates a property handle, searches for the raw encoded data,
3931  * fills in the handle, and calls the property decode functions
3932  * passed in.
3933  *
3934  * This routine is not static because ddi_bus_prop_op() which lives in
3935  * ddi_impl.c calls it.  No driver should be calling this routine.
3936  */
3937 int
3938 ddi_prop_lookup_common(dev_t match_dev, dev_info_t *dip,
3939     uint_t flags, char *name, void *data, uint_t *nelements,
3940     int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements))
3941 {
3942 	int		rval;
3943 	uint_t		ourflags;
3944 	prop_handle_t	ph;
3945 
3946 	if ((match_dev == DDI_DEV_T_NONE) ||
3947 	    (name == NULL) || (strlen(name) == 0))
3948 		return (DDI_PROP_INVAL_ARG);
3949 
3950 	ourflags = (flags & DDI_PROP_DONTSLEEP) ? flags :
3951 	    flags | DDI_PROP_CANSLEEP;
3952 
3953 	/*
3954 	 * Get the encoded data
3955 	 */
3956 	bzero(&ph, sizeof (prop_handle_t));
3957 
3958 	if ((flags & DDI_UNBND_DLPI2) || (flags & DDI_PROP_ROOTNEX_GLOBAL)) {
3959 		/*
3960 		 * For rootnex and unbound dlpi style-2 devices, index into
3961 		 * the devnames' array and search the global
3962 		 * property list.
3963 		 */
3964 		ourflags &= ~DDI_UNBND_DLPI2;
3965 		rval = i_ddi_prop_search_global(match_dev,
3966 		    ourflags, name, &ph.ph_data, &ph.ph_size);
3967 	} else {
3968 		rval = ddi_prop_search_common(match_dev, dip,
3969 		    PROP_LEN_AND_VAL_ALLOC, ourflags, name,
3970 		    &ph.ph_data, &ph.ph_size);
3971 
3972 	}
3973 
3974 	if (rval != DDI_PROP_SUCCESS && rval != DDI_PROP_FOUND_1275) {
3975 		ASSERT(ph.ph_data == NULL);
3976 		ASSERT(ph.ph_size == 0);
3977 		return (rval);
3978 	}
3979 
3980 	/*
3981 	 * If the encoded data came from a OBP or software
3982 	 * use the 1275 OBP decode/encode routines.
3983 	 */
3984 	ph.ph_cur_pos = ph.ph_data;
3985 	ph.ph_save_pos = ph.ph_data;
3986 	ph.ph_ops = &prop_1275_ops;
3987 	ph.ph_flags = (rval == DDI_PROP_FOUND_1275) ? PH_FROM_PROM : 0;
3988 
3989 	rval = (*prop_decoder)(&ph, data, nelements);
3990 
3991 	/*
3992 	 * Free the encoded data
3993 	 */
3994 	if (ph.ph_size != 0)
3995 		kmem_free(ph.ph_data, ph.ph_size);
3996 
3997 	return (rval);
3998 }
3999 
4000 /*
4001  * Lookup and return an array of composite properties.  The driver must
4002  * provide the decode routine.
4003  */
4004 int
4005 ddi_prop_lookup(dev_t match_dev, dev_info_t *dip,
4006     uint_t flags, char *name, void *data, uint_t *nelements,
4007     int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements))
4008 {
4009 	return (ddi_prop_lookup_common(match_dev, dip,
4010 	    (flags | DDI_PROP_TYPE_COMPOSITE), name,
4011 	    data, nelements, prop_decoder));
4012 }
4013 
4014 /*
4015  * Return 1 if a property exists (no type checking done).
4016  * Return 0 if it does not exist.
4017  */
4018 int
4019 ddi_prop_exists(dev_t match_dev, dev_info_t *dip, uint_t flags, char *name)
4020 {
4021 	int	i;
4022 	uint_t	x = 0;
4023 
4024 	i = ddi_prop_search_common(match_dev, dip, PROP_EXISTS,
4025 	    flags | DDI_PROP_TYPE_MASK, name, NULL, &x);
4026 	return (i == DDI_PROP_SUCCESS || i == DDI_PROP_FOUND_1275);
4027 }
4028 
4029 
4030 /*
4031  * Update an array of composite properties.  The driver must
4032  * provide the encode routine.
4033  */
4034 int
4035 ddi_prop_update(dev_t match_dev, dev_info_t *dip,
4036     char *name, void *data, uint_t nelements,
4037     int (*prop_create)(prop_handle_t *, void *data, uint_t nelements))
4038 {
4039 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_COMPOSITE,
4040 	    name, data, nelements, prop_create));
4041 }
4042 
4043 /*
4044  * Get a single integer or boolean property and return it.
4045  * If the property does not exists, or cannot be decoded,
4046  * then return the defvalue passed in.
4047  *
4048  * This routine always succeeds.
4049  */
4050 int
4051 ddi_prop_get_int(dev_t match_dev, dev_info_t *dip, uint_t flags,
4052     char *name, int defvalue)
4053 {
4054 	int	data;
4055 	uint_t	nelements;
4056 	int	rval;
4057 
4058 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4059 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
4060 #ifdef DEBUG
4061 		if (dip != NULL) {
4062 			cmn_err(CE_WARN, "ddi_prop_get_int: invalid flag"
4063 			    " 0x%x (prop = %s, node = %s%d)", flags,
4064 			    name, ddi_driver_name(dip), ddi_get_instance(dip));
4065 		}
4066 #endif /* DEBUG */
4067 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4068 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4069 	}
4070 
4071 	if ((rval = ddi_prop_lookup_common(match_dev, dip,
4072 	    (flags | DDI_PROP_TYPE_INT), name, &data, &nelements,
4073 	    ddi_prop_fm_decode_int)) != DDI_PROP_SUCCESS) {
4074 		if (rval == DDI_PROP_END_OF_DATA)
4075 			data = 1;
4076 		else
4077 			data = defvalue;
4078 	}
4079 	return (data);
4080 }
4081 
4082 /*
4083  * Get a single 64 bit integer or boolean property and return it.
4084  * If the property does not exists, or cannot be decoded,
4085  * then return the defvalue passed in.
4086  *
4087  * This routine always succeeds.
4088  */
4089 int64_t
4090 ddi_prop_get_int64(dev_t match_dev, dev_info_t *dip, uint_t flags,
4091     char *name, int64_t defvalue)
4092 {
4093 	int64_t	data;
4094 	uint_t	nelements;
4095 	int	rval;
4096 
4097 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4098 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
4099 #ifdef DEBUG
4100 		if (dip != NULL) {
4101 			cmn_err(CE_WARN, "ddi_prop_get_int64: invalid flag"
4102 			    " 0x%x (prop = %s, node = %s%d)", flags,
4103 			    name, ddi_driver_name(dip), ddi_get_instance(dip));
4104 		}
4105 #endif /* DEBUG */
4106 		return (DDI_PROP_INVAL_ARG);
4107 	}
4108 
4109 	if ((rval = ddi_prop_lookup_common(match_dev, dip,
4110 	    (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM),
4111 	    name, &data, &nelements, ddi_prop_fm_decode_int64))
4112 	    != DDI_PROP_SUCCESS) {
4113 		if (rval == DDI_PROP_END_OF_DATA)
4114 			data = 1;
4115 		else
4116 			data = defvalue;
4117 	}
4118 	return (data);
4119 }
4120 
4121 /*
4122  * Get an array of integer property
4123  */
4124 int
4125 ddi_prop_lookup_int_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4126     char *name, int **data, uint_t *nelements)
4127 {
4128 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4129 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
4130 #ifdef DEBUG
4131 		if (dip != NULL) {
4132 			cmn_err(CE_WARN, "ddi_prop_lookup_int_array: "
4133 			    "invalid flag 0x%x (prop = %s, node = %s%d)",
4134 			    flags, name, ddi_driver_name(dip),
4135 			    ddi_get_instance(dip));
4136 		}
4137 #endif /* DEBUG */
4138 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4139 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4140 	}
4141 
4142 	return (ddi_prop_lookup_common(match_dev, dip,
4143 	    (flags | DDI_PROP_TYPE_INT), name, data,
4144 	    nelements, ddi_prop_fm_decode_ints));
4145 }
4146 
4147 /*
4148  * Get an array of 64 bit integer properties
4149  */
4150 int
4151 ddi_prop_lookup_int64_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4152     char *name, int64_t **data, uint_t *nelements)
4153 {
4154 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4155 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
4156 #ifdef DEBUG
4157 		if (dip != NULL) {
4158 			cmn_err(CE_WARN, "ddi_prop_lookup_int64_array: "
4159 			    "invalid flag 0x%x (prop = %s, node = %s%d)",
4160 			    flags, name, ddi_driver_name(dip),
4161 			    ddi_get_instance(dip));
4162 		}
4163 #endif /* DEBUG */
4164 		return (DDI_PROP_INVAL_ARG);
4165 	}
4166 
4167 	return (ddi_prop_lookup_common(match_dev, dip,
4168 	    (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM),
4169 	    name, data, nelements, ddi_prop_fm_decode_int64_array));
4170 }
4171 
4172 /*
4173  * Update a single integer property.  If the property exists on the drivers
4174  * property list it updates, else it creates it.
4175  */
4176 int
4177 ddi_prop_update_int(dev_t match_dev, dev_info_t *dip,
4178     char *name, int data)
4179 {
4180 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT,
4181 	    name, &data, 1, ddi_prop_fm_encode_ints));
4182 }
4183 
4184 /*
4185  * Update a single 64 bit integer property.
4186  * Update the driver property list if it exists, else create it.
4187  */
4188 int
4189 ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip,
4190     char *name, int64_t data)
4191 {
4192 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64,
4193 	    name, &data, 1, ddi_prop_fm_encode_int64));
4194 }
4195 
4196 int
4197 e_ddi_prop_update_int(dev_t match_dev, dev_info_t *dip,
4198     char *name, int data)
4199 {
4200 	return (ddi_prop_update_common(match_dev, dip,
4201 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT,
4202 	    name, &data, 1, ddi_prop_fm_encode_ints));
4203 }
4204 
4205 int
4206 e_ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip,
4207     char *name, int64_t data)
4208 {
4209 	return (ddi_prop_update_common(match_dev, dip,
4210 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64,
4211 	    name, &data, 1, ddi_prop_fm_encode_int64));
4212 }
4213 
4214 /*
4215  * Update an array of integer property.  If the property exists on the drivers
4216  * property list it updates, else it creates it.
4217  */
4218 int
4219 ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip,
4220     char *name, int *data, uint_t nelements)
4221 {
4222 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT,
4223 	    name, data, nelements, ddi_prop_fm_encode_ints));
4224 }
4225 
4226 /*
4227  * Update an array of 64 bit integer properties.
4228  * Update the driver property list if it exists, else create it.
4229  */
4230 int
4231 ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip,
4232     char *name, int64_t *data, uint_t nelements)
4233 {
4234 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64,
4235 	    name, data, nelements, ddi_prop_fm_encode_int64));
4236 }
4237 
4238 int
4239 e_ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip,
4240     char *name, int64_t *data, uint_t nelements)
4241 {
4242 	return (ddi_prop_update_common(match_dev, dip,
4243 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64,
4244 	    name, data, nelements, ddi_prop_fm_encode_int64));
4245 }
4246 
4247 int
4248 e_ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip,
4249     char *name, int *data, uint_t nelements)
4250 {
4251 	return (ddi_prop_update_common(match_dev, dip,
4252 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT,
4253 	    name, data, nelements, ddi_prop_fm_encode_ints));
4254 }
4255 
4256 /*
4257  * Get a single string property.
4258  */
4259 int
4260 ddi_prop_lookup_string(dev_t match_dev, dev_info_t *dip, uint_t flags,
4261     char *name, char **data)
4262 {
4263 	uint_t x;
4264 
4265 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4266 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
4267 #ifdef DEBUG
4268 		if (dip != NULL) {
4269 			cmn_err(CE_WARN, "%s: invalid flag 0x%x "
4270 			    "(prop = %s, node = %s%d); invalid bits ignored",
4271 			    "ddi_prop_lookup_string", flags, name,
4272 			    ddi_driver_name(dip), ddi_get_instance(dip));
4273 		}
4274 #endif /* DEBUG */
4275 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4276 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4277 	}
4278 
4279 	return (ddi_prop_lookup_common(match_dev, dip,
4280 	    (flags | DDI_PROP_TYPE_STRING), name, data,
4281 	    &x, ddi_prop_fm_decode_string));
4282 }
4283 
4284 /*
4285  * Get an array of strings property.
4286  */
4287 int
4288 ddi_prop_lookup_string_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4289     char *name, char ***data, uint_t *nelements)
4290 {
4291 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4292 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
4293 #ifdef DEBUG
4294 		if (dip != NULL) {
4295 			cmn_err(CE_WARN, "ddi_prop_lookup_string_array: "
4296 			    "invalid flag 0x%x (prop = %s, node = %s%d)",
4297 			    flags, name, ddi_driver_name(dip),
4298 			    ddi_get_instance(dip));
4299 		}
4300 #endif /* DEBUG */
4301 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4302 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4303 	}
4304 
4305 	return (ddi_prop_lookup_common(match_dev, dip,
4306 	    (flags | DDI_PROP_TYPE_STRING), name, data,
4307 	    nelements, ddi_prop_fm_decode_strings));
4308 }
4309 
4310 /*
4311  * Update a single string property.
4312  */
4313 int
4314 ddi_prop_update_string(dev_t match_dev, dev_info_t *dip,
4315     char *name, char *data)
4316 {
4317 	return (ddi_prop_update_common(match_dev, dip,
4318 	    DDI_PROP_TYPE_STRING, name, &data, 1,
4319 	    ddi_prop_fm_encode_string));
4320 }
4321 
4322 int
4323 e_ddi_prop_update_string(dev_t match_dev, dev_info_t *dip,
4324     char *name, char *data)
4325 {
4326 	return (ddi_prop_update_common(match_dev, dip,
4327 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING,
4328 	    name, &data, 1, ddi_prop_fm_encode_string));
4329 }
4330 
4331 
4332 /*
4333  * Update an array of strings property.
4334  */
4335 int
4336 ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip,
4337     char *name, char **data, uint_t nelements)
4338 {
4339 	return (ddi_prop_update_common(match_dev, dip,
4340 	    DDI_PROP_TYPE_STRING, name, data, nelements,
4341 	    ddi_prop_fm_encode_strings));
4342 }
4343 
4344 int
4345 e_ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip,
4346     char *name, char **data, uint_t nelements)
4347 {
4348 	return (ddi_prop_update_common(match_dev, dip,
4349 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING,
4350 	    name, data, nelements,
4351 	    ddi_prop_fm_encode_strings));
4352 }
4353 
4354 
4355 /*
4356  * Get an array of bytes property.
4357  */
4358 int
4359 ddi_prop_lookup_byte_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4360     char *name, uchar_t **data, uint_t *nelements)
4361 {
4362 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4363 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
4364 #ifdef DEBUG
4365 		if (dip != NULL) {
4366 			cmn_err(CE_WARN, "ddi_prop_lookup_byte_array: "
4367 			    " invalid flag 0x%x (prop = %s, node = %s%d)",
4368 			    flags, name, ddi_driver_name(dip),
4369 			    ddi_get_instance(dip));
4370 		}
4371 #endif /* DEBUG */
4372 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4373 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4374 	}
4375 
4376 	return (ddi_prop_lookup_common(match_dev, dip,
4377 	    (flags | DDI_PROP_TYPE_BYTE), name, data,
4378 	    nelements, ddi_prop_fm_decode_bytes));
4379 }
4380 
4381 /*
4382  * Update an array of bytes property.
4383  */
4384 int
4385 ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip,
4386     char *name, uchar_t *data, uint_t nelements)
4387 {
4388 	if (nelements == 0)
4389 		return (DDI_PROP_INVAL_ARG);
4390 
4391 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_BYTE,
4392 	    name, data, nelements, ddi_prop_fm_encode_bytes));
4393 }
4394 
4395 
4396 int
4397 e_ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip,
4398     char *name, uchar_t *data, uint_t nelements)
4399 {
4400 	if (nelements == 0)
4401 		return (DDI_PROP_INVAL_ARG);
4402 
4403 	return (ddi_prop_update_common(match_dev, dip,
4404 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE,
4405 	    name, data, nelements, ddi_prop_fm_encode_bytes));
4406 }
4407 
4408 
4409 /*
4410  * ddi_prop_remove_common:	Undefine a managed property:
4411  *			Input dev_t must match dev_t when defined.
4412  *			Returns DDI_PROP_NOT_FOUND, possibly.
4413  *			DDI_PROP_INVAL_ARG is also possible if dev is
4414  *			DDI_DEV_T_ANY or incoming name is the NULL string.
4415  */
4416 int
4417 ddi_prop_remove_common(dev_t dev, dev_info_t *dip, char *name, int flag)
4418 {
4419 	ddi_prop_t	**list_head = &(DEVI(dip)->devi_drv_prop_ptr);
4420 	ddi_prop_t	*propp;
4421 	ddi_prop_t	*lastpropp = NULL;
4422 
4423 	if ((dev == DDI_DEV_T_ANY) || (name == (char *)0) ||
4424 	    (strlen(name) == 0)) {
4425 		return (DDI_PROP_INVAL_ARG);
4426 	}
4427 
4428 	if (flag & DDI_PROP_SYSTEM_DEF)
4429 		list_head = &(DEVI(dip)->devi_sys_prop_ptr);
4430 	else if (flag & DDI_PROP_HW_DEF)
4431 		list_head = &(DEVI(dip)->devi_hw_prop_ptr);
4432 
4433 	mutex_enter(&(DEVI(dip)->devi_lock));
4434 
4435 	for (propp = *list_head; propp != NULL; propp = propp->prop_next)  {
4436 		if (DDI_STRSAME(propp->prop_name, name) &&
4437 		    (dev == propp->prop_dev)) {
4438 			/*
4439 			 * Unlink this propp allowing for it to
4440 			 * be first in the list:
4441 			 */
4442 
4443 			if (lastpropp == NULL)
4444 				*list_head = propp->prop_next;
4445 			else
4446 				lastpropp->prop_next = propp->prop_next;
4447 
4448 			mutex_exit(&(DEVI(dip)->devi_lock));
4449 
4450 			/*
4451 			 * Free memory and return...
4452 			 */
4453 			kmem_free(propp->prop_name,
4454 			    strlen(propp->prop_name) + 1);
4455 			if (propp->prop_len != 0)
4456 				kmem_free(propp->prop_val, propp->prop_len);
4457 			kmem_free(propp, sizeof (ddi_prop_t));
4458 			return (DDI_PROP_SUCCESS);
4459 		}
4460 		lastpropp = propp;
4461 	}
4462 	mutex_exit(&(DEVI(dip)->devi_lock));
4463 	return (DDI_PROP_NOT_FOUND);
4464 }
4465 
4466 int
4467 ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name)
4468 {
4469 	return (ddi_prop_remove_common(dev, dip, name, 0));
4470 }
4471 
4472 int
4473 e_ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name)
4474 {
4475 	return (ddi_prop_remove_common(dev, dip, name, DDI_PROP_SYSTEM_DEF));
4476 }
4477 
4478 /*
4479  * e_ddi_prop_list_delete: remove a list of properties
4480  *	Note that the caller needs to provide the required protection
4481  *	(eg. devi_lock if these properties are still attached to a devi)
4482  */
4483 void
4484 e_ddi_prop_list_delete(ddi_prop_t *props)
4485 {
4486 	i_ddi_prop_list_delete(props);
4487 }
4488 
4489 /*
4490  * ddi_prop_remove_all_common:
4491  *	Used before unloading a driver to remove
4492  *	all properties. (undefines all dev_t's props.)
4493  *	Also removes `explicitly undefined' props.
4494  *	No errors possible.
4495  */
4496 void
4497 ddi_prop_remove_all_common(dev_info_t *dip, int flag)
4498 {
4499 	ddi_prop_t	**list_head;
4500 
4501 	mutex_enter(&(DEVI(dip)->devi_lock));
4502 	if (flag & DDI_PROP_SYSTEM_DEF) {
4503 		list_head = &(DEVI(dip)->devi_sys_prop_ptr);
4504 	} else if (flag & DDI_PROP_HW_DEF) {
4505 		list_head = &(DEVI(dip)->devi_hw_prop_ptr);
4506 	} else {
4507 		list_head = &(DEVI(dip)->devi_drv_prop_ptr);
4508 	}
4509 	i_ddi_prop_list_delete(*list_head);
4510 	*list_head = NULL;
4511 	mutex_exit(&(DEVI(dip)->devi_lock));
4512 }
4513 
4514 
4515 /*
4516  * ddi_prop_remove_all:		Remove all driver prop definitions.
4517  */
4518 
4519 void
4520 ddi_prop_remove_all(dev_info_t *dip)
4521 {
4522 	i_ddi_prop_dyn_driver_set(dip, NULL);
4523 	ddi_prop_remove_all_common(dip, 0);
4524 }
4525 
4526 /*
4527  * e_ddi_prop_remove_all:	Remove all system prop definitions.
4528  */
4529 
4530 void
4531 e_ddi_prop_remove_all(dev_info_t *dip)
4532 {
4533 	ddi_prop_remove_all_common(dip, (int)DDI_PROP_SYSTEM_DEF);
4534 }
4535 
4536 
4537 /*
4538  * ddi_prop_undefine:	Explicitly undefine a property.  Property
4539  *			searches which match this property return
4540  *			the error code DDI_PROP_UNDEFINED.
4541  *
4542  *			Use ddi_prop_remove to negate effect of
4543  *			ddi_prop_undefine
4544  *
4545  *			See above for error returns.
4546  */
4547 
4548 int
4549 ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name)
4550 {
4551 	if (!(flag & DDI_PROP_CANSLEEP))
4552 		flag |= DDI_PROP_DONTSLEEP;
4553 	flag |= DDI_PROP_STACK_CREATE | DDI_PROP_UNDEF_IT | DDI_PROP_TYPE_ANY;
4554 	return (ddi_prop_update_common(dev, dip, flag,
4555 	    name, NULL, 0, ddi_prop_fm_encode_bytes));
4556 }
4557 
4558 int
4559 e_ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name)
4560 {
4561 	if (!(flag & DDI_PROP_CANSLEEP))
4562 		flag |= DDI_PROP_DONTSLEEP;
4563 	flag |= DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE |
4564 	    DDI_PROP_UNDEF_IT | DDI_PROP_TYPE_ANY;
4565 	return (ddi_prop_update_common(dev, dip, flag,
4566 	    name, NULL, 0, ddi_prop_fm_encode_bytes));
4567 }
4568 
4569 /*
4570  * Support for gathering dynamic properties in devinfo snapshot.
4571  */
4572 void
4573 i_ddi_prop_dyn_driver_set(dev_info_t *dip, i_ddi_prop_dyn_t *dp)
4574 {
4575 	DEVI(dip)->devi_prop_dyn_driver = dp;
4576 }
4577 
4578 i_ddi_prop_dyn_t *
4579 i_ddi_prop_dyn_driver_get(dev_info_t *dip)
4580 {
4581 	return (DEVI(dip)->devi_prop_dyn_driver);
4582 }
4583 
4584 void
4585 i_ddi_prop_dyn_parent_set(dev_info_t *dip, i_ddi_prop_dyn_t *dp)
4586 {
4587 	DEVI(dip)->devi_prop_dyn_parent = dp;
4588 }
4589 
4590 i_ddi_prop_dyn_t *
4591 i_ddi_prop_dyn_parent_get(dev_info_t *dip)
4592 {
4593 	return (DEVI(dip)->devi_prop_dyn_parent);
4594 }
4595 
4596 void
4597 i_ddi_prop_dyn_cache_invalidate(dev_info_t *dip, i_ddi_prop_dyn_t *dp)
4598 {
4599 	/* for now we invalidate the entire cached snapshot */
4600 	if (dip && dp)
4601 		i_ddi_di_cache_invalidate();
4602 }
4603 
4604 /* ARGSUSED */
4605 void
4606 ddi_prop_cache_invalidate(dev_t dev, dev_info_t *dip, char *name, int flags)
4607 {
4608 	/* for now we invalidate the entire cached snapshot */
4609 	i_ddi_di_cache_invalidate();
4610 }
4611 
4612 
4613 /*
4614  * Code to search hardware layer (PROM), if it exists, on behalf of child.
4615  *
4616  * if input dip != child_dip, then call is on behalf of child
4617  * to search PROM, do it via ddi_prop_search_common() and ascend only
4618  * if allowed.
4619  *
4620  * if input dip == ch_dip (child_dip), call is on behalf of root driver,
4621  * to search for PROM defined props only.
4622  *
4623  * Note that the PROM search is done only if the requested dev
4624  * is either DDI_DEV_T_ANY or DDI_DEV_T_NONE. PROM properties
4625  * have no associated dev, thus are automatically associated with
4626  * DDI_DEV_T_NONE.
4627  *
4628  * Modifying flag DDI_PROP_NOTPROM inhibits the search in the h/w layer.
4629  *
4630  * Returns DDI_PROP_FOUND_1275 if found to indicate to framework
4631  * that the property resides in the prom.
4632  */
4633 int
4634 impl_ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip,
4635     ddi_prop_op_t prop_op, int mod_flags,
4636     char *name, caddr_t valuep, int *lengthp)
4637 {
4638 	int	len;
4639 	caddr_t buffer;
4640 
4641 	/*
4642 	 * If requested dev is DDI_DEV_T_NONE or DDI_DEV_T_ANY, then
4643 	 * look in caller's PROM if it's a self identifying device...
4644 	 *
4645 	 * Note that this is very similar to ddi_prop_op, but we
4646 	 * search the PROM instead of the s/w defined properties,
4647 	 * and we are called on by the parent driver to do this for
4648 	 * the child.
4649 	 */
4650 
4651 	if (((dev == DDI_DEV_T_NONE) || (dev == DDI_DEV_T_ANY)) &&
4652 	    ndi_dev_is_prom_node(ch_dip) &&
4653 	    ((mod_flags & DDI_PROP_NOTPROM) == 0)) {
4654 		len = prom_getproplen((pnode_t)DEVI(ch_dip)->devi_nodeid, name);
4655 		if (len == -1) {
4656 			return (DDI_PROP_NOT_FOUND);
4657 		}
4658 
4659 		/*
4660 		 * If exists only request, we're done
4661 		 */
4662 		if (prop_op == PROP_EXISTS) {
4663 			return (DDI_PROP_FOUND_1275);
4664 		}
4665 
4666 		/*
4667 		 * If length only request or prop length == 0, get out
4668 		 */
4669 		if ((prop_op == PROP_LEN) || (len == 0)) {
4670 			*lengthp = len;
4671 			return (DDI_PROP_FOUND_1275);
4672 		}
4673 
4674 		/*
4675 		 * Allocate buffer if required... (either way `buffer'
4676 		 * is receiving address).
4677 		 */
4678 
4679 		switch (prop_op) {
4680 
4681 		case PROP_LEN_AND_VAL_ALLOC:
4682 
4683 			buffer = kmem_alloc((size_t)len,
4684 			    mod_flags & DDI_PROP_CANSLEEP ?
4685 			    KM_SLEEP : KM_NOSLEEP);
4686 			if (buffer == NULL) {
4687 				return (DDI_PROP_NO_MEMORY);
4688 			}
4689 			*(caddr_t *)valuep = buffer;
4690 			break;
4691 
4692 		case PROP_LEN_AND_VAL_BUF:
4693 
4694 			if (len > (*lengthp)) {
4695 				*lengthp = len;
4696 				return (DDI_PROP_BUF_TOO_SMALL);
4697 			}
4698 
4699 			buffer = valuep;
4700 			break;
4701 
4702 		default:
4703 			break;
4704 		}
4705 
4706 		/*
4707 		 * Call the PROM function to do the copy.
4708 		 */
4709 		(void) prom_getprop((pnode_t)DEVI(ch_dip)->devi_nodeid,
4710 		    name, buffer);
4711 
4712 		*lengthp = len; /* return the actual length to the caller */
4713 		(void) impl_fix_props(dip, ch_dip, name, len, buffer);
4714 		return (DDI_PROP_FOUND_1275);
4715 	}
4716 
4717 	return (DDI_PROP_NOT_FOUND);
4718 }
4719 
4720 /*
4721  * The ddi_bus_prop_op default bus nexus prop op function.
4722  *
4723  * Code to search hardware layer (PROM), if it exists,
4724  * on behalf of child, then, if appropriate, ascend and check
4725  * my own software defined properties...
4726  */
4727 int
4728 ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip,
4729     ddi_prop_op_t prop_op, int mod_flags,
4730     char *name, caddr_t valuep, int *lengthp)
4731 {
4732 	int	error;
4733 
4734 	error = impl_ddi_bus_prop_op(dev, dip, ch_dip, prop_op, mod_flags,
4735 	    name, valuep, lengthp);
4736 
4737 	if (error == DDI_PROP_SUCCESS || error == DDI_PROP_FOUND_1275 ||
4738 	    error == DDI_PROP_BUF_TOO_SMALL)
4739 		return (error);
4740 
4741 	if (error == DDI_PROP_NO_MEMORY) {
4742 		cmn_err(CE_CONT, prop_no_mem_msg, name);
4743 		return (DDI_PROP_NO_MEMORY);
4744 	}
4745 
4746 	/*
4747 	 * Check the 'options' node as a last resort
4748 	 */
4749 	if ((mod_flags & DDI_PROP_DONTPASS) != 0)
4750 		return (DDI_PROP_NOT_FOUND);
4751 
4752 	if (ch_dip == ddi_root_node())	{
4753 		/*
4754 		 * As a last resort, when we've reached
4755 		 * the top and still haven't found the
4756 		 * property, see if the desired property
4757 		 * is attached to the options node.
4758 		 *
4759 		 * The options dip is attached right after boot.
4760 		 */
4761 		ASSERT(options_dip != NULL);
4762 		/*
4763 		 * Force the "don't pass" flag to *just* see
4764 		 * what the options node has to offer.
4765 		 */
4766 		return (ddi_prop_search_common(dev, options_dip, prop_op,
4767 		    mod_flags|DDI_PROP_DONTPASS, name, valuep,
4768 		    (uint_t *)lengthp));
4769 	}
4770 
4771 	/*
4772 	 * Otherwise, continue search with parent's s/w defined properties...
4773 	 * NOTE: Using `dip' in following call increments the level.
4774 	 */
4775 
4776 	return (ddi_prop_search_common(dev, dip, prop_op, mod_flags,
4777 	    name, valuep, (uint_t *)lengthp));
4778 }
4779 
4780 /*
4781  * External property functions used by other parts of the kernel...
4782  */
4783 
4784 /*
4785  * e_ddi_getlongprop: See comments for ddi_get_longprop.
4786  */
4787 
4788 int
4789 e_ddi_getlongprop(dev_t dev, vtype_t type, char *name, int flags,
4790     caddr_t valuep, int *lengthp)
4791 {
4792 	_NOTE(ARGUNUSED(type))
4793 	dev_info_t *devi;
4794 	ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_ALLOC;
4795 	int error;
4796 
4797 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4798 		return (DDI_PROP_NOT_FOUND);
4799 
4800 	error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp);
4801 	ddi_release_devi(devi);
4802 	return (error);
4803 }
4804 
4805 /*
4806  * e_ddi_getlongprop_buf:	See comments for ddi_getlongprop_buf.
4807  */
4808 
4809 int
4810 e_ddi_getlongprop_buf(dev_t dev, vtype_t type, char *name, int flags,
4811     caddr_t valuep, int *lengthp)
4812 {
4813 	_NOTE(ARGUNUSED(type))
4814 	dev_info_t *devi;
4815 	ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF;
4816 	int error;
4817 
4818 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4819 		return (DDI_PROP_NOT_FOUND);
4820 
4821 	error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp);
4822 	ddi_release_devi(devi);
4823 	return (error);
4824 }
4825 
4826 /*
4827  * e_ddi_getprop:	See comments for ddi_getprop.
4828  */
4829 int
4830 e_ddi_getprop(dev_t dev, vtype_t type, char *name, int flags, int defvalue)
4831 {
4832 	_NOTE(ARGUNUSED(type))
4833 	dev_info_t *devi;
4834 	ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF;
4835 	int	propvalue = defvalue;
4836 	int	proplength = sizeof (int);
4837 	int	error;
4838 
4839 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4840 		return (defvalue);
4841 
4842 	error = cdev_prop_op(dev, devi, prop_op,
4843 	    flags, name, (caddr_t)&propvalue, &proplength);
4844 	ddi_release_devi(devi);
4845 
4846 	if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
4847 		propvalue = 1;
4848 
4849 	return (propvalue);
4850 }
4851 
4852 /*
4853  * e_ddi_getprop_int64:
4854  *
4855  * This is a typed interfaces, but predates typed properties. With the
4856  * introduction of typed properties the framework tries to ensure
4857  * consistent use of typed interfaces. This is why TYPE_INT64 is not
4858  * part of TYPE_ANY.  E_ddi_getprop_int64 is a special case where a
4859  * typed interface invokes legacy (non-typed) interfaces:
4860  * cdev_prop_op(), prop_op(9E), ddi_prop_op(9F)).  In this case the
4861  * fact that TYPE_INT64 is not part of TYPE_ANY matters.  To support
4862  * this type of lookup as a single operation we invoke the legacy
4863  * non-typed interfaces with the special CONSUMER_TYPED bit set. The
4864  * framework ddi_prop_op(9F) implementation is expected to check for
4865  * CONSUMER_TYPED and, if set, expand type bits beyond TYPE_ANY
4866  * (currently TYPE_INT64).
4867  */
4868 int64_t
4869 e_ddi_getprop_int64(dev_t dev, vtype_t type, char *name,
4870     int flags, int64_t defvalue)
4871 {
4872 	_NOTE(ARGUNUSED(type))
4873 	dev_info_t	*devi;
4874 	ddi_prop_op_t	prop_op = PROP_LEN_AND_VAL_BUF;
4875 	int64_t		propvalue = defvalue;
4876 	int		proplength = sizeof (propvalue);
4877 	int		error;
4878 
4879 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4880 		return (defvalue);
4881 
4882 	error = cdev_prop_op(dev, devi, prop_op, flags |
4883 	    DDI_PROP_CONSUMER_TYPED, name, (caddr_t)&propvalue, &proplength);
4884 	ddi_release_devi(devi);
4885 
4886 	if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
4887 		propvalue = 1;
4888 
4889 	return (propvalue);
4890 }
4891 
4892 /*
4893  * e_ddi_getproplen:	See comments for ddi_getproplen.
4894  */
4895 int
4896 e_ddi_getproplen(dev_t dev, vtype_t type, char *name, int flags, int *lengthp)
4897 {
4898 	_NOTE(ARGUNUSED(type))
4899 	dev_info_t *devi;
4900 	ddi_prop_op_t prop_op = PROP_LEN;
4901 	int error;
4902 
4903 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4904 		return (DDI_PROP_NOT_FOUND);
4905 
4906 	error = cdev_prop_op(dev, devi, prop_op, flags, name, NULL, lengthp);
4907 	ddi_release_devi(devi);
4908 	return (error);
4909 }
4910 
4911 /*
4912  * Routines to get at elements of the dev_info structure
4913  */
4914 
4915 /*
4916  * ddi_binding_name: Return the driver binding name of the devinfo node
4917  *		This is the name the OS used to bind the node to a driver.
4918  */
4919 char *
4920 ddi_binding_name(dev_info_t *dip)
4921 {
4922 	return (DEVI(dip)->devi_binding_name);
4923 }
4924 
4925 /*
4926  * ddi_driver_major: Return the major number of the driver that
4927  *	the supplied devinfo is bound to.  If not yet bound,
4928  *	DDI_MAJOR_T_NONE.
4929  *
4930  * When used by the driver bound to 'devi', this
4931  * function will reliably return the driver major number.
4932  * Other ways of determining the driver major number, such as
4933  *	major = ddi_name_to_major(ddi_get_name(devi));
4934  *	major = ddi_name_to_major(ddi_binding_name(devi));
4935  * can return a different result as the driver/alias binding
4936  * can change dynamically, and thus should be avoided.
4937  */
4938 major_t
4939 ddi_driver_major(dev_info_t *devi)
4940 {
4941 	return (DEVI(devi)->devi_major);
4942 }
4943 
4944 /*
4945  * ddi_driver_name: Return the normalized driver name. this is the
4946  *		actual driver name
4947  */
4948 const char *
4949 ddi_driver_name(dev_info_t *devi)
4950 {
4951 	major_t major;
4952 
4953 	if ((major = ddi_driver_major(devi)) != DDI_MAJOR_T_NONE)
4954 		return (ddi_major_to_name(major));
4955 
4956 	return (ddi_node_name(devi));
4957 }
4958 
4959 /*
4960  * i_ddi_set_binding_name:	Set binding name.
4961  *
4962  *	Set the binding name to the given name.
4963  *	This routine is for use by the ddi implementation, not by drivers.
4964  */
4965 void
4966 i_ddi_set_binding_name(dev_info_t *dip, char *name)
4967 {
4968 	DEVI(dip)->devi_binding_name = name;
4969 
4970 }
4971 
4972 /*
4973  * ddi_get_name: A synonym of ddi_binding_name() ... returns a name
4974  * the implementation has used to bind the node to a driver.
4975  */
4976 char *
4977 ddi_get_name(dev_info_t *dip)
4978 {
4979 	return (DEVI(dip)->devi_binding_name);
4980 }
4981 
4982 /*
4983  * ddi_node_name: Return the name property of the devinfo node
4984  *		This may differ from ddi_binding_name if the node name
4985  *		does not define a binding to a driver (i.e. generic names).
4986  */
4987 char *
4988 ddi_node_name(dev_info_t *dip)
4989 {
4990 	return (DEVI(dip)->devi_node_name);
4991 }
4992 
4993 
4994 /*
4995  * ddi_get_nodeid:	Get nodeid stored in dev_info structure.
4996  */
4997 int
4998 ddi_get_nodeid(dev_info_t *dip)
4999 {
5000 	return (DEVI(dip)->devi_nodeid);
5001 }
5002 
5003 int
5004 ddi_get_instance(dev_info_t *dip)
5005 {
5006 	return (DEVI(dip)->devi_instance);
5007 }
5008 
5009 struct dev_ops *
5010 ddi_get_driver(dev_info_t *dip)
5011 {
5012 	return (DEVI(dip)->devi_ops);
5013 }
5014 
5015 void
5016 ddi_set_driver(dev_info_t *dip, struct dev_ops *devo)
5017 {
5018 	DEVI(dip)->devi_ops = devo;
5019 }
5020 
5021 /*
5022  * ddi_set_driver_private/ddi_get_driver_private:
5023  * Get/set device driver private data in devinfo.
5024  */
5025 void
5026 ddi_set_driver_private(dev_info_t *dip, void *data)
5027 {
5028 	DEVI(dip)->devi_driver_data = data;
5029 }
5030 
5031 void *
5032 ddi_get_driver_private(dev_info_t *dip)
5033 {
5034 	return (DEVI(dip)->devi_driver_data);
5035 }
5036 
5037 /*
5038  * ddi_get_parent, ddi_get_child, ddi_get_next_sibling
5039  */
5040 
5041 dev_info_t *
5042 ddi_get_parent(dev_info_t *dip)
5043 {
5044 	return ((dev_info_t *)DEVI(dip)->devi_parent);
5045 }
5046 
5047 dev_info_t *
5048 ddi_get_child(dev_info_t *dip)
5049 {
5050 	return ((dev_info_t *)DEVI(dip)->devi_child);
5051 }
5052 
5053 dev_info_t *
5054 ddi_get_next_sibling(dev_info_t *dip)
5055 {
5056 	return ((dev_info_t *)DEVI(dip)->devi_sibling);
5057 }
5058 
5059 dev_info_t *
5060 ddi_get_next(dev_info_t *dip)
5061 {
5062 	return ((dev_info_t *)DEVI(dip)->devi_next);
5063 }
5064 
5065 void
5066 ddi_set_next(dev_info_t *dip, dev_info_t *nextdip)
5067 {
5068 	DEVI(dip)->devi_next = DEVI(nextdip);
5069 }
5070 
5071 /*
5072  * ddi_root_node:		Return root node of devinfo tree
5073  */
5074 
5075 dev_info_t *
5076 ddi_root_node(void)
5077 {
5078 	extern dev_info_t *top_devinfo;
5079 
5080 	return (top_devinfo);
5081 }
5082 
5083 /*
5084  * Miscellaneous functions:
5085  */
5086 
5087 /*
5088  * Implementation specific hooks
5089  */
5090 
5091 void
5092 ddi_report_dev(dev_info_t *d)
5093 {
5094 	char *b;
5095 
5096 	(void) ddi_ctlops(d, d, DDI_CTLOPS_REPORTDEV, (void *)0, (void *)0);
5097 
5098 	/*
5099 	 * If this devinfo node has cb_ops, it's implicitly accessible from
5100 	 * userland, so we print its full name together with the instance
5101 	 * number 'abbreviation' that the driver may use internally.
5102 	 */
5103 	if (DEVI(d)->devi_ops->devo_cb_ops != (struct cb_ops *)0 &&
5104 	    (b = kmem_zalloc(MAXPATHLEN, KM_NOSLEEP))) {
5105 		cmn_err(CE_CONT, "?%s%d is %s\n",
5106 		    ddi_driver_name(d), ddi_get_instance(d),
5107 		    ddi_pathname(d, b));
5108 		kmem_free(b, MAXPATHLEN);
5109 	}
5110 }
5111 
5112 /*
5113  * ddi_ctlops() is described in the assembler not to buy a new register
5114  * window when it's called and can reduce cost in climbing the device tree
5115  * without using the tail call optimization.
5116  */
5117 int
5118 ddi_dev_regsize(dev_info_t *dev, uint_t rnumber, off_t *result)
5119 {
5120 	int ret;
5121 
5122 	ret = ddi_ctlops(dev, dev, DDI_CTLOPS_REGSIZE,
5123 	    (void *)&rnumber, (void *)result);
5124 
5125 	return (ret == DDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE);
5126 }
5127 
5128 int
5129 ddi_dev_nregs(dev_info_t *dev, int *result)
5130 {
5131 	return (ddi_ctlops(dev, dev, DDI_CTLOPS_NREGS, 0, (void *)result));
5132 }
5133 
5134 int
5135 ddi_dev_is_sid(dev_info_t *d)
5136 {
5137 	return (ddi_ctlops(d, d, DDI_CTLOPS_SIDDEV, (void *)0, (void *)0));
5138 }
5139 
5140 int
5141 ddi_slaveonly(dev_info_t *d)
5142 {
5143 	return (ddi_ctlops(d, d, DDI_CTLOPS_SLAVEONLY, (void *)0, (void *)0));
5144 }
5145 
5146 int
5147 ddi_dev_affinity(dev_info_t *a, dev_info_t *b)
5148 {
5149 	return (ddi_ctlops(a, a, DDI_CTLOPS_AFFINITY, (void *)b, (void *)0));
5150 }
5151 
5152 int
5153 ddi_streams_driver(dev_info_t *dip)
5154 {
5155 	if (i_ddi_devi_attached(dip) &&
5156 	    (DEVI(dip)->devi_ops->devo_cb_ops != NULL) &&
5157 	    (DEVI(dip)->devi_ops->devo_cb_ops->cb_str != NULL))
5158 		return (DDI_SUCCESS);
5159 	return (DDI_FAILURE);
5160 }
5161 
5162 /*
5163  * callback free list
5164  */
5165 
5166 static int ncallbacks;
5167 static int nc_low = 170;
5168 static int nc_med = 512;
5169 static int nc_high = 2048;
5170 static struct ddi_callback *callbackq;
5171 static struct ddi_callback *callbackqfree;
5172 
5173 /*
5174  * set/run callback lists
5175  */
5176 struct	cbstats	{
5177 	kstat_named_t	cb_asked;
5178 	kstat_named_t	cb_new;
5179 	kstat_named_t	cb_run;
5180 	kstat_named_t	cb_delete;
5181 	kstat_named_t	cb_maxreq;
5182 	kstat_named_t	cb_maxlist;
5183 	kstat_named_t	cb_alloc;
5184 	kstat_named_t	cb_runouts;
5185 	kstat_named_t	cb_L2;
5186 	kstat_named_t	cb_grow;
5187 } cbstats = {
5188 	{"asked",	KSTAT_DATA_UINT32},
5189 	{"new",		KSTAT_DATA_UINT32},
5190 	{"run",		KSTAT_DATA_UINT32},
5191 	{"delete",	KSTAT_DATA_UINT32},
5192 	{"maxreq",	KSTAT_DATA_UINT32},
5193 	{"maxlist",	KSTAT_DATA_UINT32},
5194 	{"alloc",	KSTAT_DATA_UINT32},
5195 	{"runouts",	KSTAT_DATA_UINT32},
5196 	{"L2",		KSTAT_DATA_UINT32},
5197 	{"grow",	KSTAT_DATA_UINT32},
5198 };
5199 
5200 #define	nc_asked	cb_asked.value.ui32
5201 #define	nc_new		cb_new.value.ui32
5202 #define	nc_run		cb_run.value.ui32
5203 #define	nc_delete	cb_delete.value.ui32
5204 #define	nc_maxreq	cb_maxreq.value.ui32
5205 #define	nc_maxlist	cb_maxlist.value.ui32
5206 #define	nc_alloc	cb_alloc.value.ui32
5207 #define	nc_runouts	cb_runouts.value.ui32
5208 #define	nc_L2		cb_L2.value.ui32
5209 #define	nc_grow		cb_grow.value.ui32
5210 
5211 static kmutex_t ddi_callback_mutex;
5212 
5213 /*
5214  * callbacks are handled using a L1/L2 cache. The L1 cache
5215  * comes out of kmem_cache_alloc and can expand/shrink dynamically. If
5216  * we can't get callbacks from the L1 cache [because pageout is doing
5217  * I/O at the time freemem is 0], we allocate callbacks out of the
5218  * L2 cache. The L2 cache is static and depends on the memory size.
5219  * [We might also count the number of devices at probe time and
5220  * allocate one structure per device and adjust for deferred attach]
5221  */
5222 void
5223 impl_ddi_callback_init(void)
5224 {
5225 	int	i;
5226 	uint_t	physmegs;
5227 	kstat_t	*ksp;
5228 
5229 	physmegs = physmem >> (20 - PAGESHIFT);
5230 	if (physmegs < 48) {
5231 		ncallbacks = nc_low;
5232 	} else if (physmegs < 128) {
5233 		ncallbacks = nc_med;
5234 	} else {
5235 		ncallbacks = nc_high;
5236 	}
5237 
5238 	/*
5239 	 * init free list
5240 	 */
5241 	callbackq = kmem_zalloc(
5242 	    ncallbacks * sizeof (struct ddi_callback), KM_SLEEP);
5243 	for (i = 0; i < ncallbacks-1; i++)
5244 		callbackq[i].c_nfree = &callbackq[i+1];
5245 	callbackqfree = callbackq;
5246 
5247 	/* init kstats */
5248 	if (ksp = kstat_create("unix", 0, "cbstats", "misc", KSTAT_TYPE_NAMED,
5249 	    sizeof (cbstats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL)) {
5250 		ksp->ks_data = (void *) &cbstats;
5251 		kstat_install(ksp);
5252 	}
5253 
5254 }
5255 
5256 static void
5257 callback_insert(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid,
5258 	int count)
5259 {
5260 	struct ddi_callback *list, *marker, *new;
5261 	size_t size = sizeof (struct ddi_callback);
5262 
5263 	list = marker = (struct ddi_callback *)*listid;
5264 	while (list != NULL) {
5265 		if (list->c_call == funcp && list->c_arg == arg) {
5266 			list->c_count += count;
5267 			return;
5268 		}
5269 		marker = list;
5270 		list = list->c_nlist;
5271 	}
5272 	new = kmem_alloc(size, KM_NOSLEEP);
5273 	if (new == NULL) {
5274 		new = callbackqfree;
5275 		if (new == NULL) {
5276 			new = kmem_alloc_tryhard(sizeof (struct ddi_callback),
5277 			    &size, KM_NOSLEEP | KM_PANIC);
5278 			cbstats.nc_grow++;
5279 		} else {
5280 			callbackqfree = new->c_nfree;
5281 			cbstats.nc_L2++;
5282 		}
5283 	}
5284 	if (marker != NULL) {
5285 		marker->c_nlist = new;
5286 	} else {
5287 		*listid = (uintptr_t)new;
5288 	}
5289 	new->c_size = size;
5290 	new->c_nlist = NULL;
5291 	new->c_call = funcp;
5292 	new->c_arg = arg;
5293 	new->c_count = count;
5294 	cbstats.nc_new++;
5295 	cbstats.nc_alloc++;
5296 	if (cbstats.nc_alloc > cbstats.nc_maxlist)
5297 		cbstats.nc_maxlist = cbstats.nc_alloc;
5298 }
5299 
5300 void
5301 ddi_set_callback(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid)
5302 {
5303 	mutex_enter(&ddi_callback_mutex);
5304 	cbstats.nc_asked++;
5305 	if ((cbstats.nc_asked - cbstats.nc_run) > cbstats.nc_maxreq)
5306 		cbstats.nc_maxreq = (cbstats.nc_asked - cbstats.nc_run);
5307 	(void) callback_insert(funcp, arg, listid, 1);
5308 	mutex_exit(&ddi_callback_mutex);
5309 }
5310 
5311 static void
5312 real_callback_run(void *Queue)
5313 {
5314 	int (*funcp)(caddr_t);
5315 	caddr_t arg;
5316 	int count, rval;
5317 	uintptr_t *listid;
5318 	struct ddi_callback *list, *marker;
5319 	int check_pending = 1;
5320 	int pending = 0;
5321 
5322 	do {
5323 		mutex_enter(&ddi_callback_mutex);
5324 		listid = Queue;
5325 		list = (struct ddi_callback *)*listid;
5326 		if (list == NULL) {
5327 			mutex_exit(&ddi_callback_mutex);
5328 			return;
5329 		}
5330 		if (check_pending) {
5331 			marker = list;
5332 			while (marker != NULL) {
5333 				pending += marker->c_count;
5334 				marker = marker->c_nlist;
5335 			}
5336 			check_pending = 0;
5337 		}
5338 		ASSERT(pending > 0);
5339 		ASSERT(list->c_count > 0);
5340 		funcp = list->c_call;
5341 		arg = list->c_arg;
5342 		count = list->c_count;
5343 		*(uintptr_t *)Queue = (uintptr_t)list->c_nlist;
5344 		if (list >= &callbackq[0] &&
5345 		    list <= &callbackq[ncallbacks-1]) {
5346 			list->c_nfree = callbackqfree;
5347 			callbackqfree = list;
5348 		} else
5349 			kmem_free(list, list->c_size);
5350 
5351 		cbstats.nc_delete++;
5352 		cbstats.nc_alloc--;
5353 		mutex_exit(&ddi_callback_mutex);
5354 
5355 		do {
5356 			if ((rval = (*funcp)(arg)) == 0) {
5357 				pending -= count;
5358 				mutex_enter(&ddi_callback_mutex);
5359 				(void) callback_insert(funcp, arg, listid,
5360 				    count);
5361 				cbstats.nc_runouts++;
5362 			} else {
5363 				pending--;
5364 				mutex_enter(&ddi_callback_mutex);
5365 				cbstats.nc_run++;
5366 			}
5367 			mutex_exit(&ddi_callback_mutex);
5368 		} while (rval != 0 && (--count > 0));
5369 	} while (pending > 0);
5370 }
5371 
5372 void
5373 ddi_run_callback(uintptr_t *listid)
5374 {
5375 	softcall(real_callback_run, listid);
5376 }
5377 
5378 /*
5379  * ddi_periodic_t
5380  * ddi_periodic_add(void (*func)(void *), void *arg, hrtime_t interval,
5381  *     int level)
5382  *
5383  * INTERFACE LEVEL
5384  *      Solaris DDI specific (Solaris DDI)
5385  *
5386  * PARAMETERS
5387  *      func: the callback function
5388  *
5389  *            The callback function will be invoked. The function is invoked
5390  *            in kernel context if the argument level passed is the zero.
5391  *            Otherwise it's invoked in interrupt context at the specified
5392  *            level.
5393  *
5394  *       arg: the argument passed to the callback function
5395  *
5396  *  interval: interval time
5397  *
5398  *    level : callback interrupt level
5399  *
5400  *            If the value is the zero, the callback function is invoked
5401  *            in kernel context. If the value is more than the zero, but
5402  *            less than or equal to ten, the callback function is invoked in
5403  *            interrupt context at the specified interrupt level, which may
5404  *            be used for real time applications.
5405  *
5406  *            This value must be in range of 0-10, which can be a numeric
5407  *            number or a pre-defined macro (DDI_IPL_0, ... , DDI_IPL_10).
5408  *
5409  * DESCRIPTION
5410  *      ddi_periodic_add(9F) schedules the specified function to be
5411  *      periodically invoked in the interval time.
5412  *
5413  *      As well as timeout(9F), the exact time interval over which the function
5414  *      takes effect cannot be guaranteed, but the value given is a close
5415  *      approximation.
5416  *
5417  *      Drivers waiting on behalf of processes with real-time constraints must
5418  *      pass non-zero value with the level argument to ddi_periodic_add(9F).
5419  *
5420  * RETURN VALUES
5421  *      ddi_periodic_add(9F) returns a non-zero opaque value (ddi_periodic_t),
5422  *      which must be used for ddi_periodic_delete(9F) to specify the request.
5423  *
5424  * CONTEXT
5425  *      ddi_periodic_add(9F) can be called in user or kernel context, but
5426  *      it cannot be called in interrupt context, which is different from
5427  *      timeout(9F).
5428  */
5429 ddi_periodic_t
5430 ddi_periodic_add(void (*func)(void *), void *arg, hrtime_t interval, int level)
5431 {
5432 	/*
5433 	 * Sanity check of the argument level.
5434 	 */
5435 	if (level < DDI_IPL_0 || level > DDI_IPL_10)
5436 		cmn_err(CE_PANIC,
5437 		    "ddi_periodic_add: invalid interrupt level (%d).", level);
5438 
5439 	/*
5440 	 * Sanity check of the context. ddi_periodic_add() cannot be
5441 	 * called in either interrupt context or high interrupt context.
5442 	 */
5443 	if (servicing_interrupt())
5444 		cmn_err(CE_PANIC,
5445 		    "ddi_periodic_add: called in (high) interrupt context.");
5446 
5447 	return ((ddi_periodic_t)i_timeout(func, arg, interval, level));
5448 }
5449 
5450 /*
5451  * void
5452  * ddi_periodic_delete(ddi_periodic_t req)
5453  *
5454  * INTERFACE LEVEL
5455  *     Solaris DDI specific (Solaris DDI)
5456  *
5457  * PARAMETERS
5458  *     req: ddi_periodic_t opaque value ddi_periodic_add(9F) returned
5459  *     previously.
5460  *
5461  * DESCRIPTION
5462  *     ddi_periodic_delete(9F) cancels the ddi_periodic_add(9F) request
5463  *     previously requested.
5464  *
5465  *     ddi_periodic_delete(9F) will not return until the pending request
5466  *     is canceled or executed.
5467  *
5468  *     As well as untimeout(9F), calling ddi_periodic_delete(9F) for a
5469  *     timeout which is either running on another CPU, or has already
5470  *     completed causes no problems. However, unlike untimeout(9F), there is
5471  *     no restrictions on the lock which might be held across the call to
5472  *     ddi_periodic_delete(9F).
5473  *
5474  *     Drivers should be structured with the understanding that the arrival of
5475  *     both an interrupt and a timeout for that interrupt can occasionally
5476  *     occur, in either order.
5477  *
5478  * CONTEXT
5479  *     ddi_periodic_delete(9F) can be called in user or kernel context, but
5480  *     it cannot be called in interrupt context, which is different from
5481  *     untimeout(9F).
5482  */
5483 void
5484 ddi_periodic_delete(ddi_periodic_t req)
5485 {
5486 	/*
5487 	 * Sanity check of the context. ddi_periodic_delete() cannot be
5488 	 * called in either interrupt context or high interrupt context.
5489 	 */
5490 	if (servicing_interrupt())
5491 		cmn_err(CE_PANIC,
5492 		    "ddi_periodic_delete: called in (high) interrupt context.");
5493 
5494 	i_untimeout((timeout_t)req);
5495 }
5496 
5497 dev_info_t *
5498 nodevinfo(dev_t dev, int otyp)
5499 {
5500 	_NOTE(ARGUNUSED(dev, otyp))
5501 	return ((dev_info_t *)0);
5502 }
5503 
5504 /*
5505  * A driver should support its own getinfo(9E) entry point. This function
5506  * is provided as a convenience for ON drivers that don't expect their
5507  * getinfo(9E) entry point to be called. A driver that uses this must not
5508  * call ddi_create_minor_node.
5509  */
5510 int
5511 ddi_no_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
5512 {
5513 	_NOTE(ARGUNUSED(dip, infocmd, arg, result))
5514 	return (DDI_FAILURE);
5515 }
5516 
5517 /*
5518  * A driver should support its own getinfo(9E) entry point. This function
5519  * is provided as a convenience for ON drivers that where the minor number
5520  * is the instance. Drivers that do not have 1:1 mapping must implement
5521  * their own getinfo(9E) function.
5522  */
5523 int
5524 ddi_getinfo_1to1(dev_info_t *dip, ddi_info_cmd_t infocmd,
5525     void *arg, void **result)
5526 {
5527 	_NOTE(ARGUNUSED(dip))
5528 	int	instance;
5529 
5530 	if (infocmd != DDI_INFO_DEVT2INSTANCE)
5531 		return (DDI_FAILURE);
5532 
5533 	instance = getminor((dev_t)(uintptr_t)arg);
5534 	*result = (void *)(uintptr_t)instance;
5535 	return (DDI_SUCCESS);
5536 }
5537 
5538 int
5539 ddifail(dev_info_t *devi, ddi_attach_cmd_t cmd)
5540 {
5541 	_NOTE(ARGUNUSED(devi, cmd))
5542 	return (DDI_FAILURE);
5543 }
5544 
5545 int
5546 ddi_no_dma_map(dev_info_t *dip, dev_info_t *rdip,
5547     struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep)
5548 {
5549 	_NOTE(ARGUNUSED(dip, rdip, dmareqp, handlep))
5550 	return (DDI_DMA_NOMAPPING);
5551 }
5552 
5553 int
5554 ddi_no_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
5555     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
5556 {
5557 	_NOTE(ARGUNUSED(dip, rdip, attr, waitfp, arg, handlep))
5558 	return (DDI_DMA_BADATTR);
5559 }
5560 
5561 int
5562 ddi_no_dma_freehdl(dev_info_t *dip, dev_info_t *rdip,
5563     ddi_dma_handle_t handle)
5564 {
5565 	_NOTE(ARGUNUSED(dip, rdip, handle))
5566 	return (DDI_FAILURE);
5567 }
5568 
5569 int
5570 ddi_no_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
5571     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
5572     ddi_dma_cookie_t *cp, uint_t *ccountp)
5573 {
5574 	_NOTE(ARGUNUSED(dip, rdip, handle, dmareq, cp, ccountp))
5575 	return (DDI_DMA_NOMAPPING);
5576 }
5577 
5578 int
5579 ddi_no_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
5580     ddi_dma_handle_t handle)
5581 {
5582 	_NOTE(ARGUNUSED(dip, rdip, handle))
5583 	return (DDI_FAILURE);
5584 }
5585 
5586 int
5587 ddi_no_dma_flush(dev_info_t *dip, dev_info_t *rdip,
5588     ddi_dma_handle_t handle, off_t off, size_t len,
5589     uint_t cache_flags)
5590 {
5591 	_NOTE(ARGUNUSED(dip, rdip, handle, off, len, cache_flags))
5592 	return (DDI_FAILURE);
5593 }
5594 
5595 int
5596 ddi_no_dma_win(dev_info_t *dip, dev_info_t *rdip,
5597     ddi_dma_handle_t handle, uint_t win, off_t *offp,
5598     size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
5599 {
5600 	_NOTE(ARGUNUSED(dip, rdip, handle, win, offp, lenp, cookiep, ccountp))
5601 	return (DDI_FAILURE);
5602 }
5603 
5604 int
5605 ddi_no_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
5606     ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
5607     off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
5608 {
5609 	_NOTE(ARGUNUSED(dip, rdip, handle, request, offp, lenp, objp, flags))
5610 	return (DDI_FAILURE);
5611 }
5612 
5613 void
5614 ddivoid(void)
5615 {}
5616 
5617 int
5618 nochpoll(dev_t dev, short events, int anyyet, short *reventsp,
5619     struct pollhead **pollhdrp)
5620 {
5621 	_NOTE(ARGUNUSED(dev, events, anyyet, reventsp, pollhdrp))
5622 	return (ENXIO);
5623 }
5624 
5625 cred_t *
5626 ddi_get_cred(void)
5627 {
5628 	return (CRED());
5629 }
5630 
5631 clock_t
5632 ddi_get_lbolt(void)
5633 {
5634 	return ((clock_t)lbolt_hybrid());
5635 }
5636 
5637 int64_t
5638 ddi_get_lbolt64(void)
5639 {
5640 	return (lbolt_hybrid());
5641 }
5642 
5643 time_t
5644 ddi_get_time(void)
5645 {
5646 	time_t	now;
5647 
5648 	if ((now = gethrestime_sec()) == 0) {
5649 		timestruc_t ts;
5650 		mutex_enter(&tod_lock);
5651 		ts = tod_get();
5652 		mutex_exit(&tod_lock);
5653 		return (ts.tv_sec);
5654 	} else {
5655 		return (now);
5656 	}
5657 }
5658 
5659 pid_t
5660 ddi_get_pid(void)
5661 {
5662 	return (ttoproc(curthread)->p_pid);
5663 }
5664 
5665 kt_did_t
5666 ddi_get_kt_did(void)
5667 {
5668 	return (curthread->t_did);
5669 }
5670 
5671 /*
5672  * This function returns B_TRUE if the caller can reasonably expect that a call
5673  * to cv_wait_sig(9F), cv_timedwait_sig(9F), or qwait_sig(9F) could be awakened
5674  * by user-level signal.  If it returns B_FALSE, then the caller should use
5675  * other means to make certain that the wait will not hang "forever."
5676  *
5677  * It does not check the signal mask, nor for reception of any particular
5678  * signal.
5679  *
5680  * Currently, a thread can receive a signal if it's not a kernel thread and it
5681  * is not in the middle of exit(2) tear-down.  Threads that are in that
5682  * tear-down effectively convert cv_wait_sig to cv_wait, cv_timedwait_sig to
5683  * cv_timedwait, and qwait_sig to qwait.
5684  */
5685 boolean_t
5686 ddi_can_receive_sig(void)
5687 {
5688 	proc_t *pp;
5689 
5690 	if (curthread->t_proc_flag & TP_LWPEXIT)
5691 		return (B_FALSE);
5692 	if ((pp = ttoproc(curthread)) == NULL)
5693 		return (B_FALSE);
5694 	return (pp->p_as != &kas);
5695 }
5696 
5697 /*
5698  * Swap bytes in 16-bit [half-]words
5699  */
5700 void
5701 swab(void *src, void *dst, size_t nbytes)
5702 {
5703 	uchar_t *pf = (uchar_t *)src;
5704 	uchar_t *pt = (uchar_t *)dst;
5705 	uchar_t tmp;
5706 	int nshorts;
5707 
5708 	nshorts = nbytes >> 1;
5709 
5710 	while (--nshorts >= 0) {
5711 		tmp = *pf++;
5712 		*pt++ = *pf++;
5713 		*pt++ = tmp;
5714 	}
5715 }
5716 
5717 static void
5718 ddi_append_minor_node(dev_info_t *ddip, struct ddi_minor_data *dmdp)
5719 {
5720 	int			circ;
5721 	struct ddi_minor_data	*dp;
5722 
5723 	ndi_devi_enter(ddip, &circ);
5724 	if ((dp = DEVI(ddip)->devi_minor) == (struct ddi_minor_data *)NULL) {
5725 		DEVI(ddip)->devi_minor = dmdp;
5726 	} else {
5727 		while (dp->next != (struct ddi_minor_data *)NULL)
5728 			dp = dp->next;
5729 		dp->next = dmdp;
5730 	}
5731 	ndi_devi_exit(ddip, circ);
5732 }
5733 
5734 /*
5735  * Part of the obsolete SunCluster DDI Hooks.
5736  * Keep for binary compatibility
5737  */
5738 minor_t
5739 ddi_getiminor(dev_t dev)
5740 {
5741 	return (getminor(dev));
5742 }
5743 
5744 static int
5745 i_log_devfs_minor_create(dev_info_t *dip, char *minor_name)
5746 {
5747 	int se_flag;
5748 	int kmem_flag;
5749 	int se_err;
5750 	char *pathname, *class_name;
5751 	sysevent_t *ev = NULL;
5752 	sysevent_id_t eid;
5753 	sysevent_value_t se_val;
5754 	sysevent_attr_list_t *ev_attr_list = NULL;
5755 
5756 	/* determine interrupt context */
5757 	se_flag = (servicing_interrupt()) ? SE_NOSLEEP : SE_SLEEP;
5758 	kmem_flag = (se_flag == SE_SLEEP) ? KM_SLEEP : KM_NOSLEEP;
5759 
5760 	i_ddi_di_cache_invalidate();
5761 
5762 #ifdef DEBUG
5763 	if ((se_flag == SE_NOSLEEP) && sunddi_debug) {
5764 		cmn_err(CE_CONT, "ddi_create_minor_node: called from "
5765 		    "interrupt level by driver %s",
5766 		    ddi_driver_name(dip));
5767 	}
5768 #endif /* DEBUG */
5769 
5770 	ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_CREATE, EP_DDI, se_flag);
5771 	if (ev == NULL) {
5772 		goto fail;
5773 	}
5774 
5775 	pathname = kmem_alloc(MAXPATHLEN, kmem_flag);
5776 	if (pathname == NULL) {
5777 		sysevent_free(ev);
5778 		goto fail;
5779 	}
5780 
5781 	(void) ddi_pathname(dip, pathname);
5782 	ASSERT(strlen(pathname));
5783 	se_val.value_type = SE_DATA_TYPE_STRING;
5784 	se_val.value.sv_string = pathname;
5785 	if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME,
5786 	    &se_val, se_flag) != 0) {
5787 		kmem_free(pathname, MAXPATHLEN);
5788 		sysevent_free(ev);
5789 		goto fail;
5790 	}
5791 	kmem_free(pathname, MAXPATHLEN);
5792 
5793 	/* add the device class attribute */
5794 	if ((class_name = i_ddi_devi_class(dip)) != NULL) {
5795 		se_val.value_type = SE_DATA_TYPE_STRING;
5796 		se_val.value.sv_string = class_name;
5797 		if (sysevent_add_attr(&ev_attr_list,
5798 		    DEVFS_DEVI_CLASS, &se_val, SE_SLEEP) != 0) {
5799 			sysevent_free_attr(ev_attr_list);
5800 			goto fail;
5801 		}
5802 	}
5803 
5804 	/*
5805 	 * allow for NULL minor names
5806 	 */
5807 	if (minor_name != NULL) {
5808 		se_val.value.sv_string = minor_name;
5809 		if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME,
5810 		    &se_val, se_flag) != 0) {
5811 			sysevent_free_attr(ev_attr_list);
5812 			sysevent_free(ev);
5813 			goto fail;
5814 		}
5815 	}
5816 
5817 	if (sysevent_attach_attributes(ev, ev_attr_list) != 0) {
5818 		sysevent_free_attr(ev_attr_list);
5819 		sysevent_free(ev);
5820 		goto fail;
5821 	}
5822 
5823 	if ((se_err = log_sysevent(ev, se_flag, &eid)) != 0) {
5824 		if (se_err == SE_NO_TRANSPORT) {
5825 			cmn_err(CE_WARN, "/devices or /dev may not be current "
5826 			    "for driver %s (%s). Run devfsadm -i %s",
5827 			    ddi_driver_name(dip), "syseventd not responding",
5828 			    ddi_driver_name(dip));
5829 		} else {
5830 			sysevent_free(ev);
5831 			goto fail;
5832 		}
5833 	}
5834 
5835 	sysevent_free(ev);
5836 	return (DDI_SUCCESS);
5837 fail:
5838 	cmn_err(CE_WARN, "/devices or /dev may not be current "
5839 	    "for driver %s. Run devfsadm -i %s",
5840 	    ddi_driver_name(dip), ddi_driver_name(dip));
5841 	return (DDI_SUCCESS);
5842 }
5843 
5844 /*
5845  * failing to remove a minor node is not of interest
5846  * therefore we do not generate an error message
5847  */
5848 static int
5849 i_log_devfs_minor_remove(dev_info_t *dip, char *minor_name)
5850 {
5851 	char *pathname, *class_name;
5852 	sysevent_t *ev;
5853 	sysevent_id_t eid;
5854 	sysevent_value_t se_val;
5855 	sysevent_attr_list_t *ev_attr_list = NULL;
5856 
5857 	/*
5858 	 * only log ddi_remove_minor_node() calls outside the scope
5859 	 * of attach/detach reconfigurations and when the dip is
5860 	 * still initialized.
5861 	 */
5862 	if (DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip) ||
5863 	    (i_ddi_node_state(dip) < DS_INITIALIZED)) {
5864 		return (DDI_SUCCESS);
5865 	}
5866 
5867 	i_ddi_di_cache_invalidate();
5868 
5869 	ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_REMOVE, EP_DDI, SE_SLEEP);
5870 	if (ev == NULL) {
5871 		return (DDI_SUCCESS);
5872 	}
5873 
5874 	pathname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
5875 	if (pathname == NULL) {
5876 		sysevent_free(ev);
5877 		return (DDI_SUCCESS);
5878 	}
5879 
5880 	(void) ddi_pathname(dip, pathname);
5881 	ASSERT(strlen(pathname));
5882 	se_val.value_type = SE_DATA_TYPE_STRING;
5883 	se_val.value.sv_string = pathname;
5884 	if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME,
5885 	    &se_val, SE_SLEEP) != 0) {
5886 		kmem_free(pathname, MAXPATHLEN);
5887 		sysevent_free(ev);
5888 		return (DDI_SUCCESS);
5889 	}
5890 
5891 	kmem_free(pathname, MAXPATHLEN);
5892 
5893 	/*
5894 	 * allow for NULL minor names
5895 	 */
5896 	if (minor_name != NULL) {
5897 		se_val.value.sv_string = minor_name;
5898 		if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME,
5899 		    &se_val, SE_SLEEP) != 0) {
5900 			sysevent_free_attr(ev_attr_list);
5901 			goto fail;
5902 		}
5903 	}
5904 
5905 	if ((class_name = i_ddi_devi_class(dip)) != NULL) {
5906 		/* add the device class, driver name and instance attributes */
5907 
5908 		se_val.value_type = SE_DATA_TYPE_STRING;
5909 		se_val.value.sv_string = class_name;
5910 		if (sysevent_add_attr(&ev_attr_list,
5911 		    DEVFS_DEVI_CLASS, &se_val, SE_SLEEP) != 0) {
5912 			sysevent_free_attr(ev_attr_list);
5913 			goto fail;
5914 		}
5915 
5916 		se_val.value_type = SE_DATA_TYPE_STRING;
5917 		se_val.value.sv_string = (char *)ddi_driver_name(dip);
5918 		if (sysevent_add_attr(&ev_attr_list,
5919 		    DEVFS_DRIVER_NAME, &se_val, SE_SLEEP) != 0) {
5920 			sysevent_free_attr(ev_attr_list);
5921 			goto fail;
5922 		}
5923 
5924 		se_val.value_type = SE_DATA_TYPE_INT32;
5925 		se_val.value.sv_int32 = ddi_get_instance(dip);
5926 		if (sysevent_add_attr(&ev_attr_list,
5927 		    DEVFS_INSTANCE, &se_val, SE_SLEEP) != 0) {
5928 			sysevent_free_attr(ev_attr_list);
5929 			goto fail;
5930 		}
5931 
5932 	}
5933 
5934 	if (sysevent_attach_attributes(ev, ev_attr_list) != 0) {
5935 		sysevent_free_attr(ev_attr_list);
5936 	} else {
5937 		(void) log_sysevent(ev, SE_SLEEP, &eid);
5938 	}
5939 fail:
5940 	sysevent_free(ev);
5941 	return (DDI_SUCCESS);
5942 }
5943 
5944 /*
5945  * Derive the device class of the node.
5946  * Device class names aren't defined yet. Until this is done we use
5947  * devfs event subclass names as device class names.
5948  */
5949 static int
5950 derive_devi_class(dev_info_t *dip, char *node_type, int flag)
5951 {
5952 	int rv = DDI_SUCCESS;
5953 
5954 	if (i_ddi_devi_class(dip) == NULL) {
5955 		if (strncmp(node_type, DDI_NT_BLOCK,
5956 		    sizeof (DDI_NT_BLOCK) - 1) == 0 &&
5957 		    (node_type[sizeof (DDI_NT_BLOCK) - 1] == '\0' ||
5958 		    node_type[sizeof (DDI_NT_BLOCK) - 1] == ':') &&
5959 		    strcmp(node_type, DDI_NT_FD) != 0) {
5960 
5961 			rv = i_ddi_set_devi_class(dip, ESC_DISK, flag);
5962 
5963 		} else if (strncmp(node_type, DDI_NT_NET,
5964 		    sizeof (DDI_NT_NET) - 1) == 0 &&
5965 		    (node_type[sizeof (DDI_NT_NET) - 1] == '\0' ||
5966 		    node_type[sizeof (DDI_NT_NET) - 1] == ':')) {
5967 
5968 			rv = i_ddi_set_devi_class(dip, ESC_NETWORK, flag);
5969 
5970 		} else if (strncmp(node_type, DDI_NT_PRINTER,
5971 		    sizeof (DDI_NT_PRINTER) - 1) == 0 &&
5972 		    (node_type[sizeof (DDI_NT_PRINTER) - 1] == '\0' ||
5973 		    node_type[sizeof (DDI_NT_PRINTER) - 1] == ':')) {
5974 
5975 			rv = i_ddi_set_devi_class(dip, ESC_PRINTER, flag);
5976 
5977 		} else if (strncmp(node_type, DDI_PSEUDO,
5978 		    sizeof (DDI_PSEUDO) -1) == 0 &&
5979 		    (strncmp(ESC_LOFI, ddi_node_name(dip),
5980 		    sizeof (ESC_LOFI) -1) == 0)) {
5981 			rv = i_ddi_set_devi_class(dip, ESC_LOFI, flag);
5982 		}
5983 	}
5984 
5985 	return (rv);
5986 }
5987 
5988 /*
5989  * Check compliance with PSARC 2003/375:
5990  *
5991  * The name must contain only characters a-z, A-Z, 0-9 or _ and it must not
5992  * exceed IFNAMSIZ (16) characters in length.
5993  */
5994 static boolean_t
5995 verify_name(char *name)
5996 {
5997 	size_t	len = strlen(name);
5998 	char	*cp;
5999 
6000 	if (len == 0 || len > IFNAMSIZ)
6001 		return (B_FALSE);
6002 
6003 	for (cp = name; *cp != '\0'; cp++) {
6004 		if (!isalnum(*cp) && *cp != '_')
6005 			return (B_FALSE);
6006 	}
6007 
6008 	return (B_TRUE);
6009 }
6010 
6011 /*
6012  * ddi_create_minor_common:	Create a  ddi_minor_data structure and
6013  *				attach it to the given devinfo node.
6014  */
6015 
6016 int
6017 ddi_create_minor_common(dev_info_t *dip, char *name, int spec_type,
6018     minor_t minor_num, char *node_type, int flag, ddi_minor_type mtype,
6019     const char *read_priv, const char *write_priv, mode_t priv_mode)
6020 {
6021 	struct ddi_minor_data *dmdp;
6022 	major_t major;
6023 
6024 	if (spec_type != S_IFCHR && spec_type != S_IFBLK)
6025 		return (DDI_FAILURE);
6026 
6027 	if (name == NULL)
6028 		return (DDI_FAILURE);
6029 
6030 	/*
6031 	 * Log a message if the minor number the driver is creating
6032 	 * is not expressible on the on-disk filesystem (currently
6033 	 * this is limited to 18 bits both by UFS). The device can
6034 	 * be opened via devfs, but not by device special files created
6035 	 * via mknod().
6036 	 */
6037 	if (minor_num > L_MAXMIN32) {
6038 		cmn_err(CE_WARN,
6039 		    "%s%d:%s minor 0x%x too big for 32-bit applications",
6040 		    ddi_driver_name(dip), ddi_get_instance(dip),
6041 		    name, minor_num);
6042 		return (DDI_FAILURE);
6043 	}
6044 
6045 	/* dip must be bound and attached */
6046 	major = ddi_driver_major(dip);
6047 	ASSERT(major != DDI_MAJOR_T_NONE);
6048 
6049 	/*
6050 	 * Default node_type to DDI_PSEUDO and issue notice in debug mode
6051 	 */
6052 	if (node_type == NULL) {
6053 		node_type = DDI_PSEUDO;
6054 		NDI_CONFIG_DEBUG((CE_NOTE, "!illegal node_type NULL for %s%d "
6055 		    " minor node %s; default to DDI_PSEUDO",
6056 		    ddi_driver_name(dip), ddi_get_instance(dip), name));
6057 	}
6058 
6059 	/*
6060 	 * If the driver is a network driver, ensure that the name falls within
6061 	 * the interface naming constraints specified by PSARC/2003/375.
6062 	 */
6063 	if (strcmp(node_type, DDI_NT_NET) == 0) {
6064 		if (!verify_name(name))
6065 			return (DDI_FAILURE);
6066 
6067 		if (mtype == DDM_MINOR) {
6068 			struct devnames *dnp = &devnamesp[major];
6069 
6070 			/* Mark driver as a network driver */
6071 			LOCK_DEV_OPS(&dnp->dn_lock);
6072 			dnp->dn_flags |= DN_NETWORK_DRIVER;
6073 
6074 			/*
6075 			 * If this minor node is created during the device
6076 			 * attachment, this is a physical network device.
6077 			 * Mark the driver as a physical network driver.
6078 			 */
6079 			if (DEVI_IS_ATTACHING(dip))
6080 				dnp->dn_flags |= DN_NETWORK_PHYSDRIVER;
6081 			UNLOCK_DEV_OPS(&dnp->dn_lock);
6082 		}
6083 	}
6084 
6085 	if (mtype == DDM_MINOR) {
6086 		if (derive_devi_class(dip,  node_type, KM_NOSLEEP) !=
6087 		    DDI_SUCCESS)
6088 			return (DDI_FAILURE);
6089 	}
6090 
6091 	/*
6092 	 * Take care of minor number information for the node.
6093 	 */
6094 
6095 	if ((dmdp = kmem_zalloc(sizeof (struct ddi_minor_data),
6096 	    KM_NOSLEEP)) == NULL) {
6097 		return (DDI_FAILURE);
6098 	}
6099 	if ((dmdp->ddm_name = i_ddi_strdup(name, KM_NOSLEEP)) == NULL) {
6100 		kmem_free(dmdp, sizeof (struct ddi_minor_data));
6101 		return (DDI_FAILURE);
6102 	}
6103 	dmdp->dip = dip;
6104 	dmdp->ddm_dev = makedevice(major, minor_num);
6105 	dmdp->ddm_spec_type = spec_type;
6106 	dmdp->ddm_node_type = node_type;
6107 	dmdp->type = mtype;
6108 	if (flag & CLONE_DEV) {
6109 		dmdp->type = DDM_ALIAS;
6110 		dmdp->ddm_dev = makedevice(ddi_driver_major(clone_dip), major);
6111 	}
6112 	if (flag & PRIVONLY_DEV) {
6113 		dmdp->ddm_flags |= DM_NO_FSPERM;
6114 	}
6115 	if (read_priv || write_priv) {
6116 		dmdp->ddm_node_priv =
6117 		    devpolicy_priv_by_name(read_priv, write_priv);
6118 	}
6119 	dmdp->ddm_priv_mode = priv_mode;
6120 
6121 	ddi_append_minor_node(dip, dmdp);
6122 
6123 	/*
6124 	 * only log ddi_create_minor_node() calls which occur
6125 	 * outside the scope of attach(9e)/detach(9e) reconfigurations
6126 	 */
6127 	if (!(DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip)) &&
6128 	    mtype != DDM_INTERNAL_PATH) {
6129 		(void) i_log_devfs_minor_create(dip, name);
6130 	}
6131 
6132 	/*
6133 	 * Check if any dacf rules match the creation of this minor node
6134 	 */
6135 	dacfc_match_create_minor(name, node_type, dip, dmdp, flag);
6136 	return (DDI_SUCCESS);
6137 }
6138 
6139 int
6140 ddi_create_minor_node(dev_info_t *dip, char *name, int spec_type,
6141     minor_t minor_num, char *node_type, int flag)
6142 {
6143 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
6144 	    node_type, flag, DDM_MINOR, NULL, NULL, 0));
6145 }
6146 
6147 int
6148 ddi_create_priv_minor_node(dev_info_t *dip, char *name, int spec_type,
6149     minor_t minor_num, char *node_type, int flag,
6150     const char *rdpriv, const char *wrpriv, mode_t priv_mode)
6151 {
6152 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
6153 	    node_type, flag, DDM_MINOR, rdpriv, wrpriv, priv_mode));
6154 }
6155 
6156 int
6157 ddi_create_default_minor_node(dev_info_t *dip, char *name, int spec_type,
6158     minor_t minor_num, char *node_type, int flag)
6159 {
6160 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
6161 	    node_type, flag, DDM_DEFAULT, NULL, NULL, 0));
6162 }
6163 
6164 /*
6165  * Internal (non-ddi) routine for drivers to export names known
6166  * to the kernel (especially ddi_pathname_to_dev_t and friends)
6167  * but not exported externally to /dev
6168  */
6169 int
6170 ddi_create_internal_pathname(dev_info_t *dip, char *name, int spec_type,
6171     minor_t minor_num)
6172 {
6173 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
6174 	    "internal", 0, DDM_INTERNAL_PATH, NULL, NULL, 0));
6175 }
6176 
6177 void
6178 ddi_remove_minor_node(dev_info_t *dip, char *name)
6179 {
6180 	int			circ;
6181 	struct ddi_minor_data	*dmdp, *dmdp1;
6182 	struct ddi_minor_data	**dmdp_prev;
6183 
6184 	ndi_devi_enter(dip, &circ);
6185 	dmdp_prev = &DEVI(dip)->devi_minor;
6186 	dmdp = DEVI(dip)->devi_minor;
6187 	while (dmdp != NULL) {
6188 		dmdp1 = dmdp->next;
6189 		if ((name == NULL || (dmdp->ddm_name != NULL &&
6190 		    strcmp(name, dmdp->ddm_name) == 0))) {
6191 			if (dmdp->ddm_name != NULL) {
6192 				if (dmdp->type != DDM_INTERNAL_PATH)
6193 					(void) i_log_devfs_minor_remove(dip,
6194 					    dmdp->ddm_name);
6195 				kmem_free(dmdp->ddm_name,
6196 				    strlen(dmdp->ddm_name) + 1);
6197 			}
6198 			/*
6199 			 * Release device privilege, if any.
6200 			 * Release dacf client data associated with this minor
6201 			 * node by storing NULL.
6202 			 */
6203 			if (dmdp->ddm_node_priv)
6204 				dpfree(dmdp->ddm_node_priv);
6205 			dacf_store_info((dacf_infohdl_t)dmdp, NULL);
6206 			kmem_free(dmdp, sizeof (struct ddi_minor_data));
6207 			*dmdp_prev = dmdp1;
6208 			/*
6209 			 * OK, we found it, so get out now -- if we drive on,
6210 			 * we will strcmp against garbage.  See 1139209.
6211 			 */
6212 			if (name != NULL)
6213 				break;
6214 		} else {
6215 			dmdp_prev = &dmdp->next;
6216 		}
6217 		dmdp = dmdp1;
6218 	}
6219 	ndi_devi_exit(dip, circ);
6220 }
6221 
6222 
6223 int
6224 ddi_in_panic()
6225 {
6226 	return (panicstr != NULL);
6227 }
6228 
6229 
6230 /*
6231  * Find first bit set in a mask (returned counting from 1 up)
6232  */
6233 
6234 int
6235 ddi_ffs(long mask)
6236 {
6237 	return (ffs(mask));
6238 }
6239 
6240 /*
6241  * Find last bit set. Take mask and clear
6242  * all but the most significant bit, and
6243  * then let ffs do the rest of the work.
6244  *
6245  * Algorithm courtesy of Steve Chessin.
6246  */
6247 
6248 int
6249 ddi_fls(long mask)
6250 {
6251 	while (mask) {
6252 		long nx;
6253 
6254 		if ((nx = (mask & (mask - 1))) == 0)
6255 			break;
6256 		mask = nx;
6257 	}
6258 	return (ffs(mask));
6259 }
6260 
6261 /*
6262  * The ddi_soft_state_* routines comprise generic storage management utilities
6263  * for driver soft state structures (in "the old days," this was done with
6264  * statically sized array - big systems and dynamic loading and unloading
6265  * make heap allocation more attractive).
6266  */
6267 
6268 /*
6269  * Allocate a set of pointers to 'n_items' objects of size 'size'
6270  * bytes.  Each pointer is initialized to nil.
6271  *
6272  * The 'size' and 'n_items' values are stashed in the opaque
6273  * handle returned to the caller.
6274  *
6275  * This implementation interprets 'set of pointers' to mean 'array
6276  * of pointers' but note that nothing in the interface definition
6277  * precludes an implementation that uses, for example, a linked list.
6278  * However there should be a small efficiency gain from using an array
6279  * at lookup time.
6280  *
6281  * NOTE	As an optimization, we make our growable array allocations in
6282  *	powers of two (bytes), since that's how much kmem_alloc (currently)
6283  *	gives us anyway.  It should save us some free/realloc's ..
6284  *
6285  *	As a further optimization, we make the growable array start out
6286  *	with MIN_N_ITEMS in it.
6287  */
6288 
6289 #define	MIN_N_ITEMS	8	/* 8 void *'s == 32 bytes */
6290 
6291 int
6292 ddi_soft_state_init(void **state_p, size_t size, size_t n_items)
6293 {
6294 	i_ddi_soft_state	*ss;
6295 
6296 	if (state_p == NULL || size == 0)
6297 		return (EINVAL);
6298 
6299 	ss = kmem_zalloc(sizeof (*ss), KM_SLEEP);
6300 	mutex_init(&ss->lock, NULL, MUTEX_DRIVER, NULL);
6301 	ss->size = size;
6302 
6303 	if (n_items < MIN_N_ITEMS)
6304 		ss->n_items = MIN_N_ITEMS;
6305 	else {
6306 		int bitlog;
6307 
6308 		if ((bitlog = ddi_fls(n_items)) == ddi_ffs(n_items))
6309 			bitlog--;
6310 		ss->n_items = 1 << bitlog;
6311 	}
6312 
6313 	ASSERT(ss->n_items >= n_items);
6314 
6315 	ss->array = kmem_zalloc(ss->n_items * sizeof (void *), KM_SLEEP);
6316 
6317 	*state_p = ss;
6318 	return (0);
6319 }
6320 
6321 /*
6322  * Allocate a state structure of size 'size' to be associated
6323  * with item 'item'.
6324  *
6325  * In this implementation, the array is extended to
6326  * allow the requested offset, if needed.
6327  */
6328 int
6329 ddi_soft_state_zalloc(void *state, int item)
6330 {
6331 	i_ddi_soft_state	*ss = (i_ddi_soft_state *)state;
6332 	void			**array;
6333 	void			*new_element;
6334 
6335 	if ((state == NULL) || (item < 0))
6336 		return (DDI_FAILURE);
6337 
6338 	mutex_enter(&ss->lock);
6339 	if (ss->size == 0) {
6340 		mutex_exit(&ss->lock);
6341 		cmn_err(CE_WARN, "ddi_soft_state_zalloc: bad handle: %s",
6342 		    mod_containing_pc(caller()));
6343 		return (DDI_FAILURE);
6344 	}
6345 
6346 	array = ss->array;	/* NULL if ss->n_items == 0 */
6347 	ASSERT(ss->n_items != 0 && array != NULL);
6348 
6349 	/*
6350 	 * refuse to tread on an existing element
6351 	 */
6352 	if (item < ss->n_items && array[item] != NULL) {
6353 		mutex_exit(&ss->lock);
6354 		return (DDI_FAILURE);
6355 	}
6356 
6357 	/*
6358 	 * Allocate a new element to plug in
6359 	 */
6360 	new_element = kmem_zalloc(ss->size, KM_SLEEP);
6361 
6362 	/*
6363 	 * Check if the array is big enough, if not, grow it.
6364 	 */
6365 	if (item >= ss->n_items) {
6366 		void			**new_array;
6367 		size_t			new_n_items;
6368 		struct i_ddi_soft_state	*dirty;
6369 
6370 		/*
6371 		 * Allocate a new array of the right length, copy
6372 		 * all the old pointers to the new array, then
6373 		 * if it exists at all, put the old array on the
6374 		 * dirty list.
6375 		 *
6376 		 * Note that we can't kmem_free() the old array.
6377 		 *
6378 		 * Why -- well the 'get' operation is 'mutex-free', so we
6379 		 * can't easily catch a suspended thread that is just about
6380 		 * to dereference the array we just grew out of.  So we
6381 		 * cons up a header and put it on a list of 'dirty'
6382 		 * pointer arrays.  (Dirty in the sense that there may
6383 		 * be suspended threads somewhere that are in the middle
6384 		 * of referencing them).  Fortunately, we -can- garbage
6385 		 * collect it all at ddi_soft_state_fini time.
6386 		 */
6387 		new_n_items = ss->n_items;
6388 		while (new_n_items < (1 + item))
6389 			new_n_items <<= 1;	/* double array size .. */
6390 
6391 		ASSERT(new_n_items >= (1 + item));	/* sanity check! */
6392 
6393 		new_array = kmem_zalloc(new_n_items * sizeof (void *),
6394 		    KM_SLEEP);
6395 		/*
6396 		 * Copy the pointers into the new array
6397 		 */
6398 		bcopy(array, new_array, ss->n_items * sizeof (void *));
6399 
6400 		/*
6401 		 * Save the old array on the dirty list
6402 		 */
6403 		dirty = kmem_zalloc(sizeof (*dirty), KM_SLEEP);
6404 		dirty->array = ss->array;
6405 		dirty->n_items = ss->n_items;
6406 		dirty->next = ss->next;
6407 		ss->next = dirty;
6408 
6409 		ss->array = (array = new_array);
6410 		ss->n_items = new_n_items;
6411 	}
6412 
6413 	ASSERT(array != NULL && item < ss->n_items && array[item] == NULL);
6414 
6415 	array[item] = new_element;
6416 
6417 	mutex_exit(&ss->lock);
6418 	return (DDI_SUCCESS);
6419 }
6420 
6421 /*
6422  * Fetch a pointer to the allocated soft state structure.
6423  *
6424  * This is designed to be cheap.
6425  *
6426  * There's an argument that there should be more checking for
6427  * nil pointers and out of bounds on the array.. but we do a lot
6428  * of that in the alloc/free routines.
6429  *
6430  * An array has the convenience that we don't need to lock read-access
6431  * to it c.f. a linked list.  However our "expanding array" strategy
6432  * means that we should hold a readers lock on the i_ddi_soft_state
6433  * structure.
6434  *
6435  * However, from a performance viewpoint, we need to do it without
6436  * any locks at all -- this also makes it a leaf routine.  The algorithm
6437  * is 'lock-free' because we only discard the pointer arrays at
6438  * ddi_soft_state_fini() time.
6439  */
6440 void *
6441 ddi_get_soft_state(void *state, int item)
6442 {
6443 	i_ddi_soft_state	*ss = (i_ddi_soft_state *)state;
6444 
6445 	ASSERT((ss != NULL) && (item >= 0));
6446 
6447 	if (item < ss->n_items && ss->array != NULL)
6448 		return (ss->array[item]);
6449 	return (NULL);
6450 }
6451 
6452 /*
6453  * Free the state structure corresponding to 'item.'   Freeing an
6454  * element that has either gone or was never allocated is not
6455  * considered an error.  Note that we free the state structure, but
6456  * we don't shrink our pointer array, or discard 'dirty' arrays,
6457  * since even a few pointers don't really waste too much memory.
6458  *
6459  * Passing an item number that is out of bounds, or a null pointer will
6460  * provoke an error message.
6461  */
6462 void
6463 ddi_soft_state_free(void *state, int item)
6464 {
6465 	i_ddi_soft_state	*ss = (i_ddi_soft_state *)state;
6466 	void			**array;
6467 	void			*element;
6468 	static char		msg[] = "ddi_soft_state_free:";
6469 
6470 	if (ss == NULL) {
6471 		cmn_err(CE_WARN, "%s null handle: %s",
6472 		    msg, mod_containing_pc(caller()));
6473 		return;
6474 	}
6475 
6476 	element = NULL;
6477 
6478 	mutex_enter(&ss->lock);
6479 
6480 	if ((array = ss->array) == NULL || ss->size == 0) {
6481 		cmn_err(CE_WARN, "%s bad handle: %s",
6482 		    msg, mod_containing_pc(caller()));
6483 	} else if (item < 0 || item >= ss->n_items) {
6484 		cmn_err(CE_WARN, "%s item %d not in range [0..%lu]: %s",
6485 		    msg, item, ss->n_items - 1, mod_containing_pc(caller()));
6486 	} else if (array[item] != NULL) {
6487 		element = array[item];
6488 		array[item] = NULL;
6489 	}
6490 
6491 	mutex_exit(&ss->lock);
6492 
6493 	if (element)
6494 		kmem_free(element, ss->size);
6495 }
6496 
6497 /*
6498  * Free the entire set of pointers, and any
6499  * soft state structures contained therein.
6500  *
6501  * Note that we don't grab the ss->lock mutex, even though
6502  * we're inspecting the various fields of the data structure.
6503  *
6504  * There is an implicit assumption that this routine will
6505  * never run concurrently with any of the above on this
6506  * particular state structure i.e. by the time the driver
6507  * calls this routine, there should be no other threads
6508  * running in the driver.
6509  */
6510 void
6511 ddi_soft_state_fini(void **state_p)
6512 {
6513 	i_ddi_soft_state	*ss, *dirty;
6514 	int			item;
6515 	static char		msg[] = "ddi_soft_state_fini:";
6516 
6517 	if (state_p == NULL ||
6518 	    (ss = (i_ddi_soft_state *)(*state_p)) == NULL) {
6519 		cmn_err(CE_WARN, "%s null handle: %s",
6520 		    msg, mod_containing_pc(caller()));
6521 		return;
6522 	}
6523 
6524 	if (ss->size == 0) {
6525 		cmn_err(CE_WARN, "%s bad handle: %s",
6526 		    msg, mod_containing_pc(caller()));
6527 		return;
6528 	}
6529 
6530 	if (ss->n_items > 0) {
6531 		for (item = 0; item < ss->n_items; item++)
6532 			ddi_soft_state_free(ss, item);
6533 		kmem_free(ss->array, ss->n_items * sizeof (void *));
6534 	}
6535 
6536 	/*
6537 	 * Now delete any dirty arrays from previous 'grow' operations
6538 	 */
6539 	for (dirty = ss->next; dirty; dirty = ss->next) {
6540 		ss->next = dirty->next;
6541 		kmem_free(dirty->array, dirty->n_items * sizeof (void *));
6542 		kmem_free(dirty, sizeof (*dirty));
6543 	}
6544 
6545 	mutex_destroy(&ss->lock);
6546 	kmem_free(ss, sizeof (*ss));
6547 
6548 	*state_p = NULL;
6549 }
6550 
6551 #define	SS_N_ITEMS_PER_HASH	16
6552 #define	SS_MIN_HASH_SZ		16
6553 #define	SS_MAX_HASH_SZ		4096
6554 
6555 int
6556 ddi_soft_state_bystr_init(ddi_soft_state_bystr **state_p, size_t size,
6557     int n_items)
6558 {
6559 	i_ddi_soft_state_bystr	*sss;
6560 	int			hash_sz;
6561 
6562 	ASSERT(state_p && size && n_items);
6563 	if ((state_p == NULL) || (size == 0) || (n_items == 0))
6564 		return (EINVAL);
6565 
6566 	/* current implementation is based on hash, convert n_items to hash */
6567 	hash_sz = n_items / SS_N_ITEMS_PER_HASH;
6568 	if (hash_sz < SS_MIN_HASH_SZ)
6569 		hash_sz = SS_MIN_HASH_SZ;
6570 	else if (hash_sz > SS_MAX_HASH_SZ)
6571 		hash_sz = SS_MAX_HASH_SZ;
6572 
6573 	/* allocate soft_state pool */
6574 	sss = kmem_zalloc(sizeof (*sss), KM_SLEEP);
6575 	sss->ss_size = size;
6576 	sss->ss_mod_hash = mod_hash_create_strhash("soft_state_bystr",
6577 	    hash_sz, mod_hash_null_valdtor);
6578 	*state_p = (ddi_soft_state_bystr *)sss;
6579 	return (0);
6580 }
6581 
6582 int
6583 ddi_soft_state_bystr_zalloc(ddi_soft_state_bystr *state, const char *str)
6584 {
6585 	i_ddi_soft_state_bystr	*sss = (i_ddi_soft_state_bystr *)state;
6586 	void			*sso;
6587 	char			*dup_str;
6588 
6589 	ASSERT(sss && str && sss->ss_mod_hash);
6590 	if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL))
6591 		return (DDI_FAILURE);
6592 	sso = kmem_zalloc(sss->ss_size, KM_SLEEP);
6593 	dup_str = i_ddi_strdup((char *)str, KM_SLEEP);
6594 	if (mod_hash_insert(sss->ss_mod_hash,
6595 	    (mod_hash_key_t)dup_str, (mod_hash_val_t)sso) == 0)
6596 		return (DDI_SUCCESS);
6597 
6598 	/*
6599 	 * The only error from an strhash insert is caused by a duplicate key.
6600 	 * We refuse to tread on an existing elements, so free and fail.
6601 	 */
6602 	kmem_free(dup_str, strlen(dup_str) + 1);
6603 	kmem_free(sso, sss->ss_size);
6604 	return (DDI_FAILURE);
6605 }
6606 
6607 void *
6608 ddi_soft_state_bystr_get(ddi_soft_state_bystr *state, const char *str)
6609 {
6610 	i_ddi_soft_state_bystr	*sss = (i_ddi_soft_state_bystr *)state;
6611 	void			*sso;
6612 
6613 	ASSERT(sss && str && sss->ss_mod_hash);
6614 	if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL))
6615 		return (NULL);
6616 
6617 	if (mod_hash_find(sss->ss_mod_hash,
6618 	    (mod_hash_key_t)str, (mod_hash_val_t *)&sso) == 0)
6619 		return (sso);
6620 	return (NULL);
6621 }
6622 
6623 void
6624 ddi_soft_state_bystr_free(ddi_soft_state_bystr *state, const char *str)
6625 {
6626 	i_ddi_soft_state_bystr	*sss = (i_ddi_soft_state_bystr *)state;
6627 	void			*sso;
6628 
6629 	ASSERT(sss && str && sss->ss_mod_hash);
6630 	if ((sss == NULL) || (str == NULL) || (sss->ss_mod_hash == NULL))
6631 		return;
6632 
6633 	(void) mod_hash_remove(sss->ss_mod_hash,
6634 	    (mod_hash_key_t)str, (mod_hash_val_t *)&sso);
6635 	kmem_free(sso, sss->ss_size);
6636 }
6637 
6638 void
6639 ddi_soft_state_bystr_fini(ddi_soft_state_bystr **state_p)
6640 {
6641 	i_ddi_soft_state_bystr	*sss;
6642 
6643 	ASSERT(state_p);
6644 	if (state_p == NULL)
6645 		return;
6646 
6647 	sss = (i_ddi_soft_state_bystr *)(*state_p);
6648 	if (sss == NULL)
6649 		return;
6650 
6651 	ASSERT(sss->ss_mod_hash);
6652 	if (sss->ss_mod_hash) {
6653 		mod_hash_destroy_strhash(sss->ss_mod_hash);
6654 		sss->ss_mod_hash = NULL;
6655 	}
6656 
6657 	kmem_free(sss, sizeof (*sss));
6658 	*state_p = NULL;
6659 }
6660 
6661 /*
6662  * The ddi_strid_* routines provide string-to-index management utilities.
6663  */
6664 /* allocate and initialize an strid set */
6665 int
6666 ddi_strid_init(ddi_strid **strid_p, int n_items)
6667 {
6668 	i_ddi_strid	*ss;
6669 	int		hash_sz;
6670 
6671 	if (strid_p == NULL)
6672 		return (DDI_FAILURE);
6673 
6674 	/* current implementation is based on hash, convert n_items to hash */
6675 	hash_sz = n_items / SS_N_ITEMS_PER_HASH;
6676 	if (hash_sz < SS_MIN_HASH_SZ)
6677 		hash_sz = SS_MIN_HASH_SZ;
6678 	else if (hash_sz > SS_MAX_HASH_SZ)
6679 		hash_sz = SS_MAX_HASH_SZ;
6680 
6681 	ss = kmem_alloc(sizeof (*ss), KM_SLEEP);
6682 	ss->strid_chunksz = n_items;
6683 	ss->strid_spacesz = n_items;
6684 	ss->strid_space = id_space_create("strid", 1, n_items);
6685 	ss->strid_bystr = mod_hash_create_strhash("strid_bystr", hash_sz,
6686 	    mod_hash_null_valdtor);
6687 	ss->strid_byid = mod_hash_create_idhash("strid_byid", hash_sz,
6688 	    mod_hash_null_valdtor);
6689 	*strid_p = (ddi_strid *)ss;
6690 	return (DDI_SUCCESS);
6691 }
6692 
6693 /* allocate an id mapping within the specified set for str, return id */
6694 static id_t
6695 i_ddi_strid_alloc(ddi_strid *strid, char *str)
6696 {
6697 	i_ddi_strid	*ss = (i_ddi_strid *)strid;
6698 	id_t		id;
6699 	char		*s;
6700 
6701 	ASSERT(ss && str);
6702 	if ((ss == NULL) || (str == NULL))
6703 		return (0);
6704 
6705 	/*
6706 	 * Allocate an id using VM_FIRSTFIT in order to keep allocated id
6707 	 * range as compressed as possible.  This is important to minimize
6708 	 * the amount of space used when the id is used as a ddi_soft_state
6709 	 * index by the caller.
6710 	 *
6711 	 * If the id list is exhausted, increase the size of the list
6712 	 * by the chuck size specified in ddi_strid_init and reattempt
6713 	 * the allocation
6714 	 */
6715 	if ((id = id_allocff_nosleep(ss->strid_space)) == (id_t)-1) {
6716 		id_space_extend(ss->strid_space, ss->strid_spacesz,
6717 		    ss->strid_spacesz + ss->strid_chunksz);
6718 		ss->strid_spacesz += ss->strid_chunksz;
6719 		if ((id = id_allocff_nosleep(ss->strid_space)) == (id_t)-1)
6720 			return (0);
6721 	}
6722 
6723 	/*
6724 	 * NOTE: since we create and destroy in unison we can save space by
6725 	 * using bystr key as the byid value.  This means destroy must occur
6726 	 * in (byid, bystr) order.
6727 	 */
6728 	s = i_ddi_strdup(str, KM_SLEEP);
6729 	if (mod_hash_insert(ss->strid_bystr, (mod_hash_key_t)s,
6730 	    (mod_hash_val_t)(intptr_t)id) != 0) {
6731 		ddi_strid_free(strid, id);
6732 		return (0);
6733 	}
6734 	if (mod_hash_insert(ss->strid_byid, (mod_hash_key_t)(intptr_t)id,
6735 	    (mod_hash_val_t)s) != 0) {
6736 		ddi_strid_free(strid, id);
6737 		return (0);
6738 	}
6739 
6740 	/* NOTE: s if freed on mod_hash_destroy by mod_hash_strval_dtor */
6741 	return (id);
6742 }
6743 
6744 /* allocate an id mapping within the specified set for str, return id */
6745 id_t
6746 ddi_strid_alloc(ddi_strid *strid, char *str)
6747 {
6748 	return (i_ddi_strid_alloc(strid, str));
6749 }
6750 
6751 /* return the id within the specified strid given the str */
6752 id_t
6753 ddi_strid_str2id(ddi_strid *strid, char *str)
6754 {
6755 	i_ddi_strid	*ss = (i_ddi_strid *)strid;
6756 	id_t		id = 0;
6757 	mod_hash_val_t	hv;
6758 
6759 	ASSERT(ss && str);
6760 	if (ss && str && (mod_hash_find(ss->strid_bystr,
6761 	    (mod_hash_key_t)str, &hv) == 0))
6762 		id = (int)(intptr_t)hv;
6763 	return (id);
6764 }
6765 
6766 /* return str within the specified strid given the id */
6767 char *
6768 ddi_strid_id2str(ddi_strid *strid, id_t id)
6769 {
6770 	i_ddi_strid	*ss = (i_ddi_strid *)strid;
6771 	char		*str = NULL;
6772 	mod_hash_val_t	hv;
6773 
6774 	ASSERT(ss && id > 0);
6775 	if (ss && (id > 0) && (mod_hash_find(ss->strid_byid,
6776 	    (mod_hash_key_t)(uintptr_t)id, &hv) == 0))
6777 		str = (char *)hv;
6778 	return (str);
6779 }
6780 
6781 /* free the id mapping within the specified strid */
6782 void
6783 ddi_strid_free(ddi_strid *strid, id_t id)
6784 {
6785 	i_ddi_strid	*ss = (i_ddi_strid *)strid;
6786 	char		*str;
6787 
6788 	ASSERT(ss && id > 0);
6789 	if ((ss == NULL) || (id <= 0))
6790 		return;
6791 
6792 	/* bystr key is byid value: destroy order must be (byid, bystr) */
6793 	str = ddi_strid_id2str(strid, id);
6794 	(void) mod_hash_destroy(ss->strid_byid, (mod_hash_key_t)(uintptr_t)id);
6795 	id_free(ss->strid_space, id);
6796 
6797 	if (str)
6798 		(void) mod_hash_destroy(ss->strid_bystr, (mod_hash_key_t)str);
6799 }
6800 
6801 /* destroy the strid set */
6802 void
6803 ddi_strid_fini(ddi_strid **strid_p)
6804 {
6805 	i_ddi_strid	*ss;
6806 
6807 	ASSERT(strid_p);
6808 	if (strid_p == NULL)
6809 		return;
6810 
6811 	ss = (i_ddi_strid *)(*strid_p);
6812 	if (ss == NULL)
6813 		return;
6814 
6815 	/* bystr key is byid value: destroy order must be (byid, bystr) */
6816 	if (ss->strid_byid)
6817 		mod_hash_destroy_hash(ss->strid_byid);
6818 	if (ss->strid_byid)
6819 		mod_hash_destroy_hash(ss->strid_bystr);
6820 	if (ss->strid_space)
6821 		id_space_destroy(ss->strid_space);
6822 	kmem_free(ss, sizeof (*ss));
6823 	*strid_p = NULL;
6824 }
6825 
6826 /*
6827  * This sets the devi_addr entry in the dev_info structure 'dip' to 'name'.
6828  * Storage is double buffered to prevent updates during devi_addr use -
6829  * double buffering is adaquate for reliable ddi_deviname() consumption.
6830  * The double buffer is not freed until dev_info structure destruction
6831  * (by i_ddi_free_node).
6832  */
6833 void
6834 ddi_set_name_addr(dev_info_t *dip, char *name)
6835 {
6836 	char	*buf = DEVI(dip)->devi_addr_buf;
6837 	char	*newaddr;
6838 
6839 	if (buf == NULL) {
6840 		buf = kmem_zalloc(2 * MAXNAMELEN, KM_SLEEP);
6841 		DEVI(dip)->devi_addr_buf = buf;
6842 	}
6843 
6844 	if (name) {
6845 		ASSERT(strlen(name) < MAXNAMELEN);
6846 		newaddr = (DEVI(dip)->devi_addr == buf) ?
6847 		    (buf + MAXNAMELEN) : buf;
6848 		(void) strlcpy(newaddr, name, MAXNAMELEN);
6849 	} else
6850 		newaddr = NULL;
6851 
6852 	DEVI(dip)->devi_addr = newaddr;
6853 }
6854 
6855 char *
6856 ddi_get_name_addr(dev_info_t *dip)
6857 {
6858 	return (DEVI(dip)->devi_addr);
6859 }
6860 
6861 void
6862 ddi_set_parent_data(dev_info_t *dip, void *pd)
6863 {
6864 	DEVI(dip)->devi_parent_data = pd;
6865 }
6866 
6867 void *
6868 ddi_get_parent_data(dev_info_t *dip)
6869 {
6870 	return (DEVI(dip)->devi_parent_data);
6871 }
6872 
6873 /*
6874  * ddi_name_to_major: returns the major number of a named module,
6875  * derived from the current driver alias binding.
6876  *
6877  * Caveat: drivers should avoid the use of this function, in particular
6878  * together with ddi_get_name/ddi_binding name, as per
6879  *	major = ddi_name_to_major(ddi_get_name(devi));
6880  * ddi_name_to_major() relies on the state of the device/alias binding,
6881  * which can and does change dynamically as aliases are administered
6882  * over time.  An attached device instance cannot rely on the major
6883  * number returned by ddi_name_to_major() to match its own major number.
6884  *
6885  * For driver use, ddi_driver_major() reliably returns the major number
6886  * for the module to which the device was bound at attach time over
6887  * the life of the instance.
6888  *	major = ddi_driver_major(dev_info_t *)
6889  */
6890 major_t
6891 ddi_name_to_major(char *name)
6892 {
6893 	return (mod_name_to_major(name));
6894 }
6895 
6896 /*
6897  * ddi_major_to_name: Returns the module name bound to a major number.
6898  */
6899 char *
6900 ddi_major_to_name(major_t major)
6901 {
6902 	return (mod_major_to_name(major));
6903 }
6904 
6905 /*
6906  * Return the name of the devinfo node pointed at by 'dip' in the buffer
6907  * pointed at by 'name.'  A devinfo node is named as a result of calling
6908  * ddi_initchild().
6909  *
6910  * Note: the driver must be held before calling this function!
6911  */
6912 char *
6913 ddi_deviname(dev_info_t *dip, char *name)
6914 {
6915 	char *addrname;
6916 	char none = '\0';
6917 
6918 	if (dip == ddi_root_node()) {
6919 		*name = '\0';
6920 		return (name);
6921 	}
6922 
6923 	if (i_ddi_node_state(dip) < DS_BOUND) {
6924 		addrname = &none;
6925 	} else {
6926 		/*
6927 		 * Use ddi_get_name_addr() without checking state so we get
6928 		 * a unit-address if we are called after ddi_set_name_addr()
6929 		 * by nexus DDI_CTL_INITCHILD code, but before completing
6930 		 * node promotion to DS_INITIALIZED.  We currently have
6931 		 * two situations where we are called in this state:
6932 		 *   o  For framework processing of a path-oriented alias.
6933 		 *   o  If a SCSA nexus driver calls ddi_devid_register()
6934 		 *	from it's tran_tgt_init(9E) implementation.
6935 		 */
6936 		addrname = ddi_get_name_addr(dip);
6937 		if (addrname == NULL)
6938 			addrname = &none;
6939 	}
6940 
6941 	if (*addrname == '\0') {
6942 		(void) sprintf(name, "/%s", ddi_node_name(dip));
6943 	} else {
6944 		(void) sprintf(name, "/%s@%s", ddi_node_name(dip), addrname);
6945 	}
6946 
6947 	return (name);
6948 }
6949 
6950 /*
6951  * Spits out the name of device node, typically name@addr, for a given node,
6952  * using the driver name, not the nodename.
6953  *
6954  * Used by match_parent. Not to be used elsewhere.
6955  */
6956 char *
6957 i_ddi_parname(dev_info_t *dip, char *name)
6958 {
6959 	char *addrname;
6960 
6961 	if (dip == ddi_root_node()) {
6962 		*name = '\0';
6963 		return (name);
6964 	}
6965 
6966 	ASSERT(i_ddi_node_state(dip) >= DS_INITIALIZED);
6967 
6968 	if (*(addrname = ddi_get_name_addr(dip)) == '\0')
6969 		(void) sprintf(name, "%s", ddi_binding_name(dip));
6970 	else
6971 		(void) sprintf(name, "%s@%s", ddi_binding_name(dip), addrname);
6972 	return (name);
6973 }
6974 
6975 static char *
6976 pathname_work(dev_info_t *dip, char *path)
6977 {
6978 	char *bp;
6979 
6980 	if (dip == ddi_root_node()) {
6981 		*path = '\0';
6982 		return (path);
6983 	}
6984 	(void) pathname_work(ddi_get_parent(dip), path);
6985 	bp = path + strlen(path);
6986 	(void) ddi_deviname(dip, bp);
6987 	return (path);
6988 }
6989 
6990 char *
6991 ddi_pathname(dev_info_t *dip, char *path)
6992 {
6993 	return (pathname_work(dip, path));
6994 }
6995 
6996 char *
6997 ddi_pathname_minor(struct ddi_minor_data *dmdp, char *path)
6998 {
6999 	if (dmdp->dip == NULL)
7000 		*path = '\0';
7001 	else {
7002 		(void) ddi_pathname(dmdp->dip, path);
7003 		if (dmdp->ddm_name) {
7004 			(void) strcat(path, ":");
7005 			(void) strcat(path, dmdp->ddm_name);
7006 		}
7007 	}
7008 	return (path);
7009 }
7010 
7011 static char *
7012 pathname_work_obp(dev_info_t *dip, char *path)
7013 {
7014 	char *bp;
7015 	char *obp_path;
7016 
7017 	/*
7018 	 * look up the "obp-path" property, return the path if it exists
7019 	 */
7020 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
7021 	    "obp-path", &obp_path) == DDI_PROP_SUCCESS) {
7022 		(void) strcpy(path, obp_path);
7023 		ddi_prop_free(obp_path);
7024 		return (path);
7025 	}
7026 
7027 	/*
7028 	 * stop at root, no obp path
7029 	 */
7030 	if (dip == ddi_root_node()) {
7031 		return (NULL);
7032 	}
7033 
7034 	obp_path = pathname_work_obp(ddi_get_parent(dip), path);
7035 	if (obp_path == NULL)
7036 		return (NULL);
7037 
7038 	/*
7039 	 * append our component to parent's obp path
7040 	 */
7041 	bp = path + strlen(path);
7042 	if (*(bp - 1) != '/')
7043 		(void) strcat(bp++, "/");
7044 	(void) ddi_deviname(dip, bp);
7045 	return (path);
7046 }
7047 
7048 /*
7049  * return the 'obp-path' based path for the given node, or NULL if the node
7050  * does not have a different obp path. NOTE: Unlike ddi_pathname, this
7051  * function can't be called from interrupt context (since we need to
7052  * lookup a string property).
7053  */
7054 char *
7055 ddi_pathname_obp(dev_info_t *dip, char *path)
7056 {
7057 	ASSERT(!servicing_interrupt());
7058 	if (dip == NULL || path == NULL)
7059 		return (NULL);
7060 
7061 	/* split work into a separate function to aid debugging */
7062 	return (pathname_work_obp(dip, path));
7063 }
7064 
7065 int
7066 ddi_pathname_obp_set(dev_info_t *dip, char *component)
7067 {
7068 	dev_info_t *pdip;
7069 	char *obp_path = NULL;
7070 	int rc = DDI_FAILURE;
7071 
7072 	if (dip == NULL)
7073 		return (DDI_FAILURE);
7074 
7075 	obp_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
7076 
7077 	pdip = ddi_get_parent(dip);
7078 
7079 	if (ddi_pathname_obp(pdip, obp_path) == NULL) {
7080 		(void) ddi_pathname(pdip, obp_path);
7081 	}
7082 
7083 	if (component) {
7084 		(void) strncat(obp_path, "/", MAXPATHLEN);
7085 		(void) strncat(obp_path, component, MAXPATHLEN);
7086 	}
7087 	rc = ndi_prop_update_string(DDI_DEV_T_NONE, dip, "obp-path",
7088 	    obp_path);
7089 
7090 	if (obp_path)
7091 		kmem_free(obp_path, MAXPATHLEN);
7092 
7093 	return (rc);
7094 }
7095 
7096 /*
7097  * Given a dev_t, return the pathname of the corresponding device in the
7098  * buffer pointed at by "path."  The buffer is assumed to be large enough
7099  * to hold the pathname of the device (MAXPATHLEN).
7100  *
7101  * The pathname of a device is the pathname of the devinfo node to which
7102  * the device "belongs," concatenated with the character ':' and the name
7103  * of the minor node corresponding to the dev_t.  If spec_type is 0 then
7104  * just the pathname of the devinfo node is returned without driving attach
7105  * of that node.  For a non-zero spec_type, an attach is performed and a
7106  * search of the minor list occurs.
7107  *
7108  * It is possible that the path associated with the dev_t is not
7109  * currently available in the devinfo tree.  In order to have a
7110  * dev_t, a device must have been discovered before, which means
7111  * that the path is always in the instance tree.  The one exception
7112  * to this is if the dev_t is associated with a pseudo driver, in
7113  * which case the device must exist on the pseudo branch of the
7114  * devinfo tree as a result of parsing .conf files.
7115  */
7116 int
7117 ddi_dev_pathname(dev_t devt, int spec_type, char *path)
7118 {
7119 	int		circ;
7120 	major_t		major = getmajor(devt);
7121 	int		instance;
7122 	dev_info_t	*dip;
7123 	char		*minorname;
7124 	char		*drvname;
7125 
7126 	if (major >= devcnt)
7127 		goto fail;
7128 	if (major == clone_major) {
7129 		/* clone has no minor nodes, manufacture the path here */
7130 		if ((drvname = ddi_major_to_name(getminor(devt))) == NULL)
7131 			goto fail;
7132 
7133 		(void) snprintf(path, MAXPATHLEN, "%s:%s", CLONE_PATH, drvname);
7134 		return (DDI_SUCCESS);
7135 	}
7136 
7137 	/* extract instance from devt (getinfo(9E) DDI_INFO_DEVT2INSTANCE). */
7138 	if ((instance = dev_to_instance(devt)) == -1)
7139 		goto fail;
7140 
7141 	/* reconstruct the path given the major/instance */
7142 	if (e_ddi_majorinstance_to_path(major, instance, path) != DDI_SUCCESS)
7143 		goto fail;
7144 
7145 	/* if spec_type given we must drive attach and search minor nodes */
7146 	if ((spec_type == S_IFCHR) || (spec_type == S_IFBLK)) {
7147 		/* attach the path so we can search minors */
7148 		if ((dip = e_ddi_hold_devi_by_path(path, 0)) == NULL)
7149 			goto fail;
7150 
7151 		/* Add minorname to path. */
7152 		ndi_devi_enter(dip, &circ);
7153 		minorname = i_ddi_devtspectype_to_minorname(dip,
7154 		    devt, spec_type);
7155 		if (minorname) {
7156 			(void) strcat(path, ":");
7157 			(void) strcat(path, minorname);
7158 		}
7159 		ndi_devi_exit(dip, circ);
7160 		ddi_release_devi(dip);
7161 		if (minorname == NULL)
7162 			goto fail;
7163 	}
7164 	ASSERT(strlen(path) < MAXPATHLEN);
7165 	return (DDI_SUCCESS);
7166 
7167 fail:	*path = 0;
7168 	return (DDI_FAILURE);
7169 }
7170 
7171 /*
7172  * Given a major number and an instance, return the path.
7173  * This interface does NOT drive attach.
7174  */
7175 int
7176 e_ddi_majorinstance_to_path(major_t major, int instance, char *path)
7177 {
7178 	struct devnames *dnp;
7179 	dev_info_t	*dip;
7180 
7181 	if ((major >= devcnt) || (instance == -1)) {
7182 		*path = 0;
7183 		return (DDI_FAILURE);
7184 	}
7185 
7186 	/* look for the major/instance in the instance tree */
7187 	if (e_ddi_instance_majorinstance_to_path(major, instance,
7188 	    path) == DDI_SUCCESS) {
7189 		ASSERT(strlen(path) < MAXPATHLEN);
7190 		return (DDI_SUCCESS);
7191 	}
7192 
7193 	/*
7194 	 * Not in instance tree, find the instance on the per driver list and
7195 	 * construct path to instance via ddi_pathname(). This is how paths
7196 	 * down the 'pseudo' branch are constructed.
7197 	 */
7198 	dnp = &(devnamesp[major]);
7199 	LOCK_DEV_OPS(&(dnp->dn_lock));
7200 	for (dip = dnp->dn_head; dip;
7201 	    dip = (dev_info_t *)DEVI(dip)->devi_next) {
7202 		/* Skip if instance does not match. */
7203 		if (DEVI(dip)->devi_instance != instance)
7204 			continue;
7205 
7206 		/*
7207 		 * An ndi_hold_devi() does not prevent DS_INITIALIZED->DS_BOUND
7208 		 * node demotion, so it is not an effective way of ensuring
7209 		 * that the ddi_pathname result has a unit-address.  Instead,
7210 		 * we reverify the node state after calling ddi_pathname().
7211 		 */
7212 		if (i_ddi_node_state(dip) >= DS_INITIALIZED) {
7213 			(void) ddi_pathname(dip, path);
7214 			if (i_ddi_node_state(dip) < DS_INITIALIZED)
7215 				continue;
7216 			UNLOCK_DEV_OPS(&(dnp->dn_lock));
7217 			ASSERT(strlen(path) < MAXPATHLEN);
7218 			return (DDI_SUCCESS);
7219 		}
7220 	}
7221 	UNLOCK_DEV_OPS(&(dnp->dn_lock));
7222 
7223 	/* can't reconstruct the path */
7224 	*path = 0;
7225 	return (DDI_FAILURE);
7226 }
7227 
7228 #define	GLD_DRIVER_PPA "SUNW,gld_v0_ppa"
7229 
7230 /*
7231  * Given the dip for a network interface return the ppa for that interface.
7232  *
7233  * In all cases except GLD v0 drivers, the ppa == instance.
7234  * In the case of GLD v0 drivers, the ppa is equal to the attach order.
7235  * So for these drivers when the attach routine calls gld_register(),
7236  * the GLD framework creates an integer property called "gld_driver_ppa"
7237  * that can be queried here.
7238  *
7239  * The only time this function is used is when a system is booting over nfs.
7240  * In this case the system has to resolve the pathname of the boot device
7241  * to it's ppa.
7242  */
7243 int
7244 i_ddi_devi_get_ppa(dev_info_t *dip)
7245 {
7246 	return (ddi_prop_get_int(DDI_DEV_T_ANY, dip,
7247 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
7248 	    GLD_DRIVER_PPA, ddi_get_instance(dip)));
7249 }
7250 
7251 /*
7252  * i_ddi_devi_set_ppa() should only be called from gld_register()
7253  * and only for GLD v0 drivers
7254  */
7255 void
7256 i_ddi_devi_set_ppa(dev_info_t *dip, int ppa)
7257 {
7258 	(void) e_ddi_prop_update_int(DDI_DEV_T_NONE, dip, GLD_DRIVER_PPA, ppa);
7259 }
7260 
7261 
7262 /*
7263  * Private DDI Console bell functions.
7264  */
7265 void
7266 ddi_ring_console_bell(clock_t duration)
7267 {
7268 	if (ddi_console_bell_func != NULL)
7269 		(*ddi_console_bell_func)(duration);
7270 }
7271 
7272 void
7273 ddi_set_console_bell(void (*bellfunc)(clock_t duration))
7274 {
7275 	ddi_console_bell_func = bellfunc;
7276 }
7277 
7278 int
7279 ddi_dma_alloc_handle(dev_info_t *dip, ddi_dma_attr_t *attr,
7280 	int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
7281 {
7282 	int (*funcp)() = ddi_dma_allochdl;
7283 	ddi_dma_attr_t dma_attr;
7284 	struct bus_ops *bop;
7285 
7286 	if (attr == (ddi_dma_attr_t *)0)
7287 		return (DDI_DMA_BADATTR);
7288 
7289 	dma_attr = *attr;
7290 
7291 	bop = DEVI(dip)->devi_ops->devo_bus_ops;
7292 	if (bop && bop->bus_dma_allochdl)
7293 		funcp = bop->bus_dma_allochdl;
7294 
7295 	return ((*funcp)(dip, dip, &dma_attr, waitfp, arg, handlep));
7296 }
7297 
7298 void
7299 ddi_dma_free_handle(ddi_dma_handle_t *handlep)
7300 {
7301 	ddi_dma_handle_t h = *handlep;
7302 	(void) ddi_dma_freehdl(HD, HD, h);
7303 }
7304 
7305 static uintptr_t dma_mem_list_id = 0;
7306 
7307 
7308 int
7309 ddi_dma_mem_alloc(ddi_dma_handle_t handle, size_t length,
7310 	ddi_device_acc_attr_t *accattrp, uint_t flags,
7311 	int (*waitfp)(caddr_t), caddr_t arg, caddr_t *kaddrp,
7312 	size_t *real_length, ddi_acc_handle_t *handlep)
7313 {
7314 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7315 	dev_info_t *dip = hp->dmai_rdip;
7316 	ddi_acc_hdl_t *ap;
7317 	ddi_dma_attr_t *attrp = &hp->dmai_attr;
7318 	uint_t sleepflag, xfermodes;
7319 	int (*fp)(caddr_t);
7320 	int rval;
7321 
7322 	if (waitfp == DDI_DMA_SLEEP)
7323 		fp = (int (*)())KM_SLEEP;
7324 	else if (waitfp == DDI_DMA_DONTWAIT)
7325 		fp = (int (*)())KM_NOSLEEP;
7326 	else
7327 		fp = waitfp;
7328 	*handlep = impl_acc_hdl_alloc(fp, arg);
7329 	if (*handlep == NULL)
7330 		return (DDI_FAILURE);
7331 
7332 	/* check if the cache attributes are supported */
7333 	if (i_ddi_check_cache_attr(flags) == B_FALSE)
7334 		return (DDI_FAILURE);
7335 
7336 	/*
7337 	 * Transfer the meaningful bits to xfermodes.
7338 	 * Double-check if the 3rd party driver correctly sets the bits.
7339 	 * If not, set DDI_DMA_STREAMING to keep compatibility.
7340 	 */
7341 	xfermodes = flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING);
7342 	if (xfermodes == 0) {
7343 		xfermodes = DDI_DMA_STREAMING;
7344 	}
7345 
7346 	/*
7347 	 * initialize the common elements of data access handle
7348 	 */
7349 	ap = impl_acc_hdl_get(*handlep);
7350 	ap->ah_vers = VERS_ACCHDL;
7351 	ap->ah_dip = dip;
7352 	ap->ah_offset = 0;
7353 	ap->ah_len = 0;
7354 	ap->ah_xfermodes = flags;
7355 	ap->ah_acc = *accattrp;
7356 
7357 	sleepflag = ((waitfp == DDI_DMA_SLEEP) ? 1 : 0);
7358 	if (xfermodes == DDI_DMA_CONSISTENT) {
7359 		rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag,
7360 		    flags, accattrp, kaddrp, NULL, ap);
7361 		*real_length = length;
7362 	} else {
7363 		rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag,
7364 		    flags, accattrp, kaddrp, real_length, ap);
7365 	}
7366 	if (rval == DDI_SUCCESS) {
7367 		ap->ah_len = (off_t)(*real_length);
7368 		ap->ah_addr = *kaddrp;
7369 	} else {
7370 		impl_acc_hdl_free(*handlep);
7371 		*handlep = (ddi_acc_handle_t)NULL;
7372 		if (waitfp != DDI_DMA_SLEEP && waitfp != DDI_DMA_DONTWAIT) {
7373 			ddi_set_callback(waitfp, arg, &dma_mem_list_id);
7374 		}
7375 		rval = DDI_FAILURE;
7376 	}
7377 	return (rval);
7378 }
7379 
7380 void
7381 ddi_dma_mem_free(ddi_acc_handle_t *handlep)
7382 {
7383 	ddi_acc_hdl_t *ap;
7384 
7385 	ap = impl_acc_hdl_get(*handlep);
7386 	ASSERT(ap);
7387 
7388 	i_ddi_mem_free((caddr_t)ap->ah_addr, ap);
7389 
7390 	/*
7391 	 * free the handle
7392 	 */
7393 	impl_acc_hdl_free(*handlep);
7394 	*handlep = (ddi_acc_handle_t)NULL;
7395 
7396 	if (dma_mem_list_id != 0) {
7397 		ddi_run_callback(&dma_mem_list_id);
7398 	}
7399 }
7400 
7401 int
7402 ddi_dma_buf_bind_handle(ddi_dma_handle_t handle, struct buf *bp,
7403 	uint_t flags, int (*waitfp)(caddr_t), caddr_t arg,
7404 	ddi_dma_cookie_t *cookiep, uint_t *ccountp)
7405 {
7406 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7407 	dev_info_t *dip, *rdip;
7408 	struct ddi_dma_req dmareq;
7409 	int (*funcp)();
7410 
7411 	dmareq.dmar_flags = flags;
7412 	dmareq.dmar_fp = waitfp;
7413 	dmareq.dmar_arg = arg;
7414 	dmareq.dmar_object.dmao_size = (uint_t)bp->b_bcount;
7415 
7416 	if (bp->b_flags & B_PAGEIO) {
7417 		dmareq.dmar_object.dmao_type = DMA_OTYP_PAGES;
7418 		dmareq.dmar_object.dmao_obj.pp_obj.pp_pp = bp->b_pages;
7419 		dmareq.dmar_object.dmao_obj.pp_obj.pp_offset =
7420 		    (uint_t)(((uintptr_t)bp->b_un.b_addr) & MMU_PAGEOFFSET);
7421 	} else {
7422 		dmareq.dmar_object.dmao_obj.virt_obj.v_addr = bp->b_un.b_addr;
7423 		if (bp->b_flags & B_SHADOW) {
7424 			dmareq.dmar_object.dmao_obj.virt_obj.v_priv =
7425 			    bp->b_shadow;
7426 			dmareq.dmar_object.dmao_type = DMA_OTYP_BUFVADDR;
7427 		} else {
7428 			dmareq.dmar_object.dmao_type =
7429 			    (bp->b_flags & (B_PHYS | B_REMAPPED)) ?
7430 			    DMA_OTYP_BUFVADDR : DMA_OTYP_VADDR;
7431 			dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
7432 		}
7433 
7434 		/*
7435 		 * If the buffer has no proc pointer, or the proc
7436 		 * struct has the kernel address space, or the buffer has
7437 		 * been marked B_REMAPPED (meaning that it is now
7438 		 * mapped into the kernel's address space), then
7439 		 * the address space is kas (kernel address space).
7440 		 */
7441 		if ((bp->b_proc == NULL) || (bp->b_proc->p_as == &kas) ||
7442 		    (bp->b_flags & B_REMAPPED)) {
7443 			dmareq.dmar_object.dmao_obj.virt_obj.v_as = 0;
7444 		} else {
7445 			dmareq.dmar_object.dmao_obj.virt_obj.v_as =
7446 			    bp->b_proc->p_as;
7447 		}
7448 	}
7449 
7450 	dip = rdip = hp->dmai_rdip;
7451 	if (dip != ddi_root_node())
7452 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
7453 	funcp = DEVI(rdip)->devi_bus_dma_bindfunc;
7454 	return ((*funcp)(dip, rdip, handle, &dmareq, cookiep, ccountp));
7455 }
7456 
7457 int
7458 ddi_dma_addr_bind_handle(ddi_dma_handle_t handle, struct as *as,
7459 	caddr_t addr, size_t len, uint_t flags, int (*waitfp)(caddr_t),
7460 	caddr_t arg, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
7461 {
7462 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7463 	dev_info_t *dip, *rdip;
7464 	struct ddi_dma_req dmareq;
7465 	int (*funcp)();
7466 
7467 	if (len == (uint_t)0) {
7468 		return (DDI_DMA_NOMAPPING);
7469 	}
7470 	dmareq.dmar_flags = flags;
7471 	dmareq.dmar_fp = waitfp;
7472 	dmareq.dmar_arg = arg;
7473 	dmareq.dmar_object.dmao_size = len;
7474 	dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR;
7475 	dmareq.dmar_object.dmao_obj.virt_obj.v_as = as;
7476 	dmareq.dmar_object.dmao_obj.virt_obj.v_addr = addr;
7477 	dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
7478 
7479 	dip = rdip = hp->dmai_rdip;
7480 	if (dip != ddi_root_node())
7481 		dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
7482 	funcp = DEVI(rdip)->devi_bus_dma_bindfunc;
7483 	return ((*funcp)(dip, rdip, handle, &dmareq, cookiep, ccountp));
7484 }
7485 
7486 void
7487 ddi_dma_nextcookie(ddi_dma_handle_t handle, ddi_dma_cookie_t *cookiep)
7488 {
7489 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7490 	ddi_dma_cookie_t *cp;
7491 
7492 	cp = hp->dmai_cookie;
7493 	ASSERT(cp);
7494 
7495 	cookiep->dmac_notused = cp->dmac_notused;
7496 	cookiep->dmac_type = cp->dmac_type;
7497 	cookiep->dmac_address = cp->dmac_address;
7498 	cookiep->dmac_size = cp->dmac_size;
7499 	hp->dmai_cookie++;
7500 }
7501 
7502 int
7503 ddi_dma_numwin(ddi_dma_handle_t handle, uint_t *nwinp)
7504 {
7505 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7506 	if ((hp->dmai_rflags & DDI_DMA_PARTIAL) == 0) {
7507 		return (DDI_FAILURE);
7508 	} else {
7509 		*nwinp = hp->dmai_nwin;
7510 		return (DDI_SUCCESS);
7511 	}
7512 }
7513 
7514 int
7515 ddi_dma_getwin(ddi_dma_handle_t h, uint_t win, off_t *offp,
7516 	size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
7517 {
7518 	int (*funcp)() = ddi_dma_win;
7519 	struct bus_ops *bop;
7520 
7521 	bop = DEVI(HD)->devi_ops->devo_bus_ops;
7522 	if (bop && bop->bus_dma_win)
7523 		funcp = bop->bus_dma_win;
7524 
7525 	return ((*funcp)(HD, HD, h, win, offp, lenp, cookiep, ccountp));
7526 }
7527 
7528 int
7529 ddi_dma_set_sbus64(ddi_dma_handle_t h, ulong_t burstsizes)
7530 {
7531 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_SET_SBUS64, 0,
7532 	    &burstsizes, 0, 0));
7533 }
7534 
7535 int
7536 i_ddi_dma_fault_check(ddi_dma_impl_t *hp)
7537 {
7538 	return (hp->dmai_fault);
7539 }
7540 
7541 int
7542 ddi_check_dma_handle(ddi_dma_handle_t handle)
7543 {
7544 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7545 	int (*check)(ddi_dma_impl_t *);
7546 
7547 	if ((check = hp->dmai_fault_check) == NULL)
7548 		check = i_ddi_dma_fault_check;
7549 
7550 	return (((*check)(hp) == DDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
7551 }
7552 
7553 void
7554 i_ddi_dma_set_fault(ddi_dma_handle_t handle)
7555 {
7556 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7557 	void (*notify)(ddi_dma_impl_t *);
7558 
7559 	if (!hp->dmai_fault) {
7560 		hp->dmai_fault = 1;
7561 		if ((notify = hp->dmai_fault_notify) != NULL)
7562 			(*notify)(hp);
7563 	}
7564 }
7565 
7566 void
7567 i_ddi_dma_clr_fault(ddi_dma_handle_t handle)
7568 {
7569 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7570 	void (*notify)(ddi_dma_impl_t *);
7571 
7572 	if (hp->dmai_fault) {
7573 		hp->dmai_fault = 0;
7574 		if ((notify = hp->dmai_fault_notify) != NULL)
7575 			(*notify)(hp);
7576 	}
7577 }
7578 
7579 /*
7580  * register mapping routines.
7581  */
7582 int
7583 ddi_regs_map_setup(dev_info_t *dip, uint_t rnumber, caddr_t *addrp,
7584 	offset_t offset, offset_t len, ddi_device_acc_attr_t *accattrp,
7585 	ddi_acc_handle_t *handle)
7586 {
7587 	ddi_map_req_t mr;
7588 	ddi_acc_hdl_t *hp;
7589 	int result;
7590 
7591 	/*
7592 	 * Allocate and initialize the common elements of data access handle.
7593 	 */
7594 	*handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
7595 	hp = impl_acc_hdl_get(*handle);
7596 	hp->ah_vers = VERS_ACCHDL;
7597 	hp->ah_dip = dip;
7598 	hp->ah_rnumber = rnumber;
7599 	hp->ah_offset = offset;
7600 	hp->ah_len = len;
7601 	hp->ah_acc = *accattrp;
7602 
7603 	/*
7604 	 * Set up the mapping request and call to parent.
7605 	 */
7606 	mr.map_op = DDI_MO_MAP_LOCKED;
7607 	mr.map_type = DDI_MT_RNUMBER;
7608 	mr.map_obj.rnumber = rnumber;
7609 	mr.map_prot = PROT_READ | PROT_WRITE;
7610 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
7611 	mr.map_handlep = hp;
7612 	mr.map_vers = DDI_MAP_VERSION;
7613 	result = ddi_map(dip, &mr, offset, len, addrp);
7614 
7615 	/*
7616 	 * check for end result
7617 	 */
7618 	if (result != DDI_SUCCESS) {
7619 		impl_acc_hdl_free(*handle);
7620 		*handle = (ddi_acc_handle_t)NULL;
7621 	} else {
7622 		hp->ah_addr = *addrp;
7623 	}
7624 
7625 	return (result);
7626 }
7627 
7628 void
7629 ddi_regs_map_free(ddi_acc_handle_t *handlep)
7630 {
7631 	ddi_map_req_t mr;
7632 	ddi_acc_hdl_t *hp;
7633 
7634 	hp = impl_acc_hdl_get(*handlep);
7635 	ASSERT(hp);
7636 
7637 	mr.map_op = DDI_MO_UNMAP;
7638 	mr.map_type = DDI_MT_RNUMBER;
7639 	mr.map_obj.rnumber = hp->ah_rnumber;
7640 	mr.map_prot = PROT_READ | PROT_WRITE;
7641 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
7642 	mr.map_handlep = hp;
7643 	mr.map_vers = DDI_MAP_VERSION;
7644 
7645 	/*
7646 	 * Call my parent to unmap my regs.
7647 	 */
7648 	(void) ddi_map(hp->ah_dip, &mr, hp->ah_offset,
7649 	    hp->ah_len, &hp->ah_addr);
7650 	/*
7651 	 * free the handle
7652 	 */
7653 	impl_acc_hdl_free(*handlep);
7654 	*handlep = (ddi_acc_handle_t)NULL;
7655 }
7656 
7657 int
7658 ddi_device_zero(ddi_acc_handle_t handle, caddr_t dev_addr, size_t bytecount,
7659 	ssize_t dev_advcnt, uint_t dev_datasz)
7660 {
7661 	uint8_t *b;
7662 	uint16_t *w;
7663 	uint32_t *l;
7664 	uint64_t *ll;
7665 
7666 	/* check for total byte count is multiple of data transfer size */
7667 	if (bytecount != ((bytecount / dev_datasz) * dev_datasz))
7668 		return (DDI_FAILURE);
7669 
7670 	switch (dev_datasz) {
7671 	case DDI_DATA_SZ01_ACC:
7672 		for (b = (uint8_t *)dev_addr;
7673 		    bytecount != 0; bytecount -= 1, b += dev_advcnt)
7674 			ddi_put8(handle, b, 0);
7675 		break;
7676 	case DDI_DATA_SZ02_ACC:
7677 		for (w = (uint16_t *)dev_addr;
7678 		    bytecount != 0; bytecount -= 2, w += dev_advcnt)
7679 			ddi_put16(handle, w, 0);
7680 		break;
7681 	case DDI_DATA_SZ04_ACC:
7682 		for (l = (uint32_t *)dev_addr;
7683 		    bytecount != 0; bytecount -= 4, l += dev_advcnt)
7684 			ddi_put32(handle, l, 0);
7685 		break;
7686 	case DDI_DATA_SZ08_ACC:
7687 		for (ll = (uint64_t *)dev_addr;
7688 		    bytecount != 0; bytecount -= 8, ll += dev_advcnt)
7689 			ddi_put64(handle, ll, 0x0ll);
7690 		break;
7691 	default:
7692 		return (DDI_FAILURE);
7693 	}
7694 	return (DDI_SUCCESS);
7695 }
7696 
7697 int
7698 ddi_device_copy(
7699 	ddi_acc_handle_t src_handle, caddr_t src_addr, ssize_t src_advcnt,
7700 	ddi_acc_handle_t dest_handle, caddr_t dest_addr, ssize_t dest_advcnt,
7701 	size_t bytecount, uint_t dev_datasz)
7702 {
7703 	uint8_t *b_src, *b_dst;
7704 	uint16_t *w_src, *w_dst;
7705 	uint32_t *l_src, *l_dst;
7706 	uint64_t *ll_src, *ll_dst;
7707 
7708 	/* check for total byte count is multiple of data transfer size */
7709 	if (bytecount != ((bytecount / dev_datasz) * dev_datasz))
7710 		return (DDI_FAILURE);
7711 
7712 	switch (dev_datasz) {
7713 	case DDI_DATA_SZ01_ACC:
7714 		b_src = (uint8_t *)src_addr;
7715 		b_dst = (uint8_t *)dest_addr;
7716 
7717 		for (; bytecount != 0; bytecount -= 1) {
7718 			ddi_put8(dest_handle, b_dst,
7719 			    ddi_get8(src_handle, b_src));
7720 			b_dst += dest_advcnt;
7721 			b_src += src_advcnt;
7722 		}
7723 		break;
7724 	case DDI_DATA_SZ02_ACC:
7725 		w_src = (uint16_t *)src_addr;
7726 		w_dst = (uint16_t *)dest_addr;
7727 
7728 		for (; bytecount != 0; bytecount -= 2) {
7729 			ddi_put16(dest_handle, w_dst,
7730 			    ddi_get16(src_handle, w_src));
7731 			w_dst += dest_advcnt;
7732 			w_src += src_advcnt;
7733 		}
7734 		break;
7735 	case DDI_DATA_SZ04_ACC:
7736 		l_src = (uint32_t *)src_addr;
7737 		l_dst = (uint32_t *)dest_addr;
7738 
7739 		for (; bytecount != 0; bytecount -= 4) {
7740 			ddi_put32(dest_handle, l_dst,
7741 			    ddi_get32(src_handle, l_src));
7742 			l_dst += dest_advcnt;
7743 			l_src += src_advcnt;
7744 		}
7745 		break;
7746 	case DDI_DATA_SZ08_ACC:
7747 		ll_src = (uint64_t *)src_addr;
7748 		ll_dst = (uint64_t *)dest_addr;
7749 
7750 		for (; bytecount != 0; bytecount -= 8) {
7751 			ddi_put64(dest_handle, ll_dst,
7752 			    ddi_get64(src_handle, ll_src));
7753 			ll_dst += dest_advcnt;
7754 			ll_src += src_advcnt;
7755 		}
7756 		break;
7757 	default:
7758 		return (DDI_FAILURE);
7759 	}
7760 	return (DDI_SUCCESS);
7761 }
7762 
7763 #define	swap16(value)  \
7764 	((((value) & 0xff) << 8) | ((value) >> 8))
7765 
7766 #define	swap32(value)	\
7767 	(((uint32_t)swap16((uint16_t)((value) & 0xffff)) << 16) | \
7768 	(uint32_t)swap16((uint16_t)((value) >> 16)))
7769 
7770 #define	swap64(value)	\
7771 	(((uint64_t)swap32((uint32_t)((value) & 0xffffffff)) \
7772 	    << 32) | \
7773 	(uint64_t)swap32((uint32_t)((value) >> 32)))
7774 
7775 uint16_t
7776 ddi_swap16(uint16_t value)
7777 {
7778 	return (swap16(value));
7779 }
7780 
7781 uint32_t
7782 ddi_swap32(uint32_t value)
7783 {
7784 	return (swap32(value));
7785 }
7786 
7787 uint64_t
7788 ddi_swap64(uint64_t value)
7789 {
7790 	return (swap64(value));
7791 }
7792 
7793 /*
7794  * Convert a binding name to a driver name.
7795  * A binding name is the name used to determine the driver for a
7796  * device - it may be either an alias for the driver or the name
7797  * of the driver itself.
7798  */
7799 char *
7800 i_binding_to_drv_name(char *bname)
7801 {
7802 	major_t major_no;
7803 
7804 	ASSERT(bname != NULL);
7805 
7806 	if ((major_no = ddi_name_to_major(bname)) == -1)
7807 		return (NULL);
7808 	return (ddi_major_to_name(major_no));
7809 }
7810 
7811 /*
7812  * Search for minor name that has specified dev_t and spec_type.
7813  * If spec_type is zero then any dev_t match works.  Since we
7814  * are returning a pointer to the minor name string, we require the
7815  * caller to do the locking.
7816  */
7817 char *
7818 i_ddi_devtspectype_to_minorname(dev_info_t *dip, dev_t dev, int spec_type)
7819 {
7820 	struct ddi_minor_data	*dmdp;
7821 
7822 	/*
7823 	 * The did layered driver currently intentionally returns a
7824 	 * devinfo ptr for an underlying sd instance based on a did
7825 	 * dev_t. In this case it is not an error.
7826 	 *
7827 	 * The did layered driver is associated with Sun Cluster.
7828 	 */
7829 	ASSERT((ddi_driver_major(dip) == getmajor(dev)) ||
7830 	    (strcmp(ddi_major_to_name(getmajor(dev)), "did") == 0));
7831 
7832 	ASSERT(DEVI_BUSY_OWNED(dip));
7833 	for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) {
7834 		if (((dmdp->type == DDM_MINOR) ||
7835 		    (dmdp->type == DDM_INTERNAL_PATH) ||
7836 		    (dmdp->type == DDM_DEFAULT)) &&
7837 		    (dmdp->ddm_dev == dev) &&
7838 		    ((((spec_type & (S_IFCHR|S_IFBLK))) == 0) ||
7839 		    (dmdp->ddm_spec_type == spec_type)))
7840 			return (dmdp->ddm_name);
7841 	}
7842 
7843 	return (NULL);
7844 }
7845 
7846 /*
7847  * Find the devt and spectype of the specified minor_name.
7848  * Return DDI_FAILURE if minor_name not found. Since we are
7849  * returning everything via arguments we can do the locking.
7850  */
7851 int
7852 i_ddi_minorname_to_devtspectype(dev_info_t *dip, char *minor_name,
7853 	dev_t *devtp, int *spectypep)
7854 {
7855 	int			circ;
7856 	struct ddi_minor_data	*dmdp;
7857 
7858 	/* deal with clone minor nodes */
7859 	if (dip == clone_dip) {
7860 		major_t	major;
7861 		/*
7862 		 * Make sure minor_name is a STREAMS driver.
7863 		 * We load the driver but don't attach to any instances.
7864 		 */
7865 
7866 		major = ddi_name_to_major(minor_name);
7867 		if (major == DDI_MAJOR_T_NONE)
7868 			return (DDI_FAILURE);
7869 
7870 		if (ddi_hold_driver(major) == NULL)
7871 			return (DDI_FAILURE);
7872 
7873 		if (STREAMSTAB(major) == NULL) {
7874 			ddi_rele_driver(major);
7875 			return (DDI_FAILURE);
7876 		}
7877 		ddi_rele_driver(major);
7878 
7879 		if (devtp)
7880 			*devtp = makedevice(clone_major, (minor_t)major);
7881 
7882 		if (spectypep)
7883 			*spectypep = S_IFCHR;
7884 
7885 		return (DDI_SUCCESS);
7886 	}
7887 
7888 	ndi_devi_enter(dip, &circ);
7889 	for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) {
7890 		if (((dmdp->type != DDM_MINOR) &&
7891 		    (dmdp->type != DDM_INTERNAL_PATH) &&
7892 		    (dmdp->type != DDM_DEFAULT)) ||
7893 		    strcmp(minor_name, dmdp->ddm_name))
7894 			continue;
7895 
7896 		if (devtp)
7897 			*devtp = dmdp->ddm_dev;
7898 
7899 		if (spectypep)
7900 			*spectypep = dmdp->ddm_spec_type;
7901 
7902 		ndi_devi_exit(dip, circ);
7903 		return (DDI_SUCCESS);
7904 	}
7905 	ndi_devi_exit(dip, circ);
7906 
7907 	return (DDI_FAILURE);
7908 }
7909 
7910 static kmutex_t devid_gen_mutex;
7911 static short	devid_gen_number;
7912 
7913 #ifdef DEBUG
7914 
7915 static int	devid_register_corrupt = 0;
7916 static int	devid_register_corrupt_major = 0;
7917 static int	devid_register_corrupt_hint = 0;
7918 static int	devid_register_corrupt_hint_major = 0;
7919 
7920 static int devid_lyr_debug = 0;
7921 
7922 #define	DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs)		\
7923 	if (devid_lyr_debug)					\
7924 		ddi_debug_devid_devts(msg, ndevs, devs)
7925 
7926 #else
7927 
7928 #define	DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs)
7929 
7930 #endif /* DEBUG */
7931 
7932 
7933 #ifdef	DEBUG
7934 
7935 static void
7936 ddi_debug_devid_devts(char *msg, int ndevs, dev_t *devs)
7937 {
7938 	int i;
7939 
7940 	cmn_err(CE_CONT, "%s:\n", msg);
7941 	for (i = 0; i < ndevs; i++) {
7942 		cmn_err(CE_CONT, "    0x%lx\n", devs[i]);
7943 	}
7944 }
7945 
7946 static void
7947 ddi_debug_devid_paths(char *msg, int npaths, char **paths)
7948 {
7949 	int i;
7950 
7951 	cmn_err(CE_CONT, "%s:\n", msg);
7952 	for (i = 0; i < npaths; i++) {
7953 		cmn_err(CE_CONT, "    %s\n", paths[i]);
7954 	}
7955 }
7956 
7957 static void
7958 ddi_debug_devid_devts_per_path(char *path, int ndevs, dev_t *devs)
7959 {
7960 	int i;
7961 
7962 	cmn_err(CE_CONT, "dev_ts per path %s\n", path);
7963 	for (i = 0; i < ndevs; i++) {
7964 		cmn_err(CE_CONT, "    0x%lx\n", devs[i]);
7965 	}
7966 }
7967 
7968 #endif	/* DEBUG */
7969 
7970 /*
7971  * Register device id into DDI framework.
7972  * Must be called when device is attached.
7973  */
7974 static int
7975 i_ddi_devid_register(dev_info_t *dip, ddi_devid_t devid)
7976 {
7977 	impl_devid_t	*i_devid = (impl_devid_t *)devid;
7978 	size_t		driver_len;
7979 	const char	*driver_name;
7980 	char		*devid_str;
7981 	major_t		major;
7982 
7983 	if ((dip == NULL) ||
7984 	    ((major = ddi_driver_major(dip)) == DDI_MAJOR_T_NONE))
7985 		return (DDI_FAILURE);
7986 
7987 	/* verify that the devid is valid */
7988 	if (ddi_devid_valid(devid) != DDI_SUCCESS)
7989 		return (DDI_FAILURE);
7990 
7991 	/* Updating driver name hint in devid */
7992 	driver_name = ddi_driver_name(dip);
7993 	driver_len = strlen(driver_name);
7994 	if (driver_len > DEVID_HINT_SIZE) {
7995 		/* Pick up last four characters of driver name */
7996 		driver_name += driver_len - DEVID_HINT_SIZE;
7997 		driver_len = DEVID_HINT_SIZE;
7998 	}
7999 	bzero(i_devid->did_driver, DEVID_HINT_SIZE);
8000 	bcopy(driver_name, i_devid->did_driver, driver_len);
8001 
8002 #ifdef DEBUG
8003 	/* Corrupt the devid for testing. */
8004 	if (devid_register_corrupt)
8005 		i_devid->did_id[0] += devid_register_corrupt;
8006 	if (devid_register_corrupt_major &&
8007 	    (major == devid_register_corrupt_major))
8008 		i_devid->did_id[0] += 1;
8009 	if (devid_register_corrupt_hint)
8010 		i_devid->did_driver[0] += devid_register_corrupt_hint;
8011 	if (devid_register_corrupt_hint_major &&
8012 	    (major == devid_register_corrupt_hint_major))
8013 		i_devid->did_driver[0] += 1;
8014 #endif /* DEBUG */
8015 
8016 	/* encode the devid as a string */
8017 	if ((devid_str = ddi_devid_str_encode(devid, NULL)) == NULL)
8018 		return (DDI_FAILURE);
8019 
8020 	/* add string as a string property */
8021 	if (ndi_prop_update_string(DDI_DEV_T_NONE, dip,
8022 	    DEVID_PROP_NAME, devid_str) != DDI_SUCCESS) {
8023 		cmn_err(CE_WARN, "%s%d: devid property update failed",
8024 		    ddi_driver_name(dip), ddi_get_instance(dip));
8025 		ddi_devid_str_free(devid_str);
8026 		return (DDI_FAILURE);
8027 	}
8028 
8029 	/* keep pointer to devid string for interrupt context fma code */
8030 	if (DEVI(dip)->devi_devid_str)
8031 		ddi_devid_str_free(DEVI(dip)->devi_devid_str);
8032 	DEVI(dip)->devi_devid_str = devid_str;
8033 	return (DDI_SUCCESS);
8034 }
8035 
8036 int
8037 ddi_devid_register(dev_info_t *dip, ddi_devid_t devid)
8038 {
8039 	int rval;
8040 
8041 	rval = i_ddi_devid_register(dip, devid);
8042 	if (rval == DDI_SUCCESS) {
8043 		/*
8044 		 * Register devid in devid-to-path cache
8045 		 */
8046 		if (e_devid_cache_register(dip, devid) == DDI_SUCCESS) {
8047 			mutex_enter(&DEVI(dip)->devi_lock);
8048 			DEVI(dip)->devi_flags |= DEVI_REGISTERED_DEVID;
8049 			mutex_exit(&DEVI(dip)->devi_lock);
8050 		} else {
8051 			cmn_err(CE_WARN, "%s%d: failed to cache devid",
8052 			    ddi_driver_name(dip), ddi_get_instance(dip));
8053 		}
8054 	} else {
8055 		cmn_err(CE_WARN, "%s%d: failed to register devid",
8056 		    ddi_driver_name(dip), ddi_get_instance(dip));
8057 	}
8058 	return (rval);
8059 }
8060 
8061 /*
8062  * Remove (unregister) device id from DDI framework.
8063  * Must be called when device is detached.
8064  */
8065 static void
8066 i_ddi_devid_unregister(dev_info_t *dip)
8067 {
8068 	if (DEVI(dip)->devi_devid_str) {
8069 		ddi_devid_str_free(DEVI(dip)->devi_devid_str);
8070 		DEVI(dip)->devi_devid_str = NULL;
8071 	}
8072 
8073 	/* remove the devid property */
8074 	(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, DEVID_PROP_NAME);
8075 }
8076 
8077 void
8078 ddi_devid_unregister(dev_info_t *dip)
8079 {
8080 	mutex_enter(&DEVI(dip)->devi_lock);
8081 	DEVI(dip)->devi_flags &= ~DEVI_REGISTERED_DEVID;
8082 	mutex_exit(&DEVI(dip)->devi_lock);
8083 	e_devid_cache_unregister(dip);
8084 	i_ddi_devid_unregister(dip);
8085 }
8086 
8087 /*
8088  * Allocate and initialize a device id.
8089  */
8090 int
8091 ddi_devid_init(
8092 	dev_info_t	*dip,
8093 	ushort_t	devid_type,
8094 	ushort_t	nbytes,
8095 	void		*id,
8096 	ddi_devid_t	*ret_devid)
8097 {
8098 	impl_devid_t	*i_devid;
8099 	int		sz = sizeof (*i_devid) + nbytes - sizeof (char);
8100 	int		driver_len;
8101 	const char	*driver_name;
8102 
8103 	switch (devid_type) {
8104 	case DEVID_SCSI3_WWN:
8105 		/*FALLTHRU*/
8106 	case DEVID_SCSI_SERIAL:
8107 		/*FALLTHRU*/
8108 	case DEVID_ATA_SERIAL:
8109 		/*FALLTHRU*/
8110 	case DEVID_ENCAP:
8111 		if (nbytes == 0)
8112 			return (DDI_FAILURE);
8113 		if (id == NULL)
8114 			return (DDI_FAILURE);
8115 		break;
8116 	case DEVID_FAB:
8117 		if (nbytes != 0)
8118 			return (DDI_FAILURE);
8119 		if (id != NULL)
8120 			return (DDI_FAILURE);
8121 		nbytes = sizeof (int) +
8122 		    sizeof (struct timeval32) + sizeof (short);
8123 		sz += nbytes;
8124 		break;
8125 	default:
8126 		return (DDI_FAILURE);
8127 	}
8128 
8129 	if ((i_devid = kmem_zalloc(sz, KM_SLEEP)) == NULL)
8130 		return (DDI_FAILURE);
8131 
8132 	i_devid->did_magic_hi = DEVID_MAGIC_MSB;
8133 	i_devid->did_magic_lo = DEVID_MAGIC_LSB;
8134 	i_devid->did_rev_hi = DEVID_REV_MSB;
8135 	i_devid->did_rev_lo = DEVID_REV_LSB;
8136 	DEVID_FORMTYPE(i_devid, devid_type);
8137 	DEVID_FORMLEN(i_devid, nbytes);
8138 
8139 	/* Fill in driver name hint */
8140 	driver_name = ddi_driver_name(dip);
8141 	driver_len = strlen(driver_name);
8142 	if (driver_len > DEVID_HINT_SIZE) {
8143 		/* Pick up last four characters of driver name */
8144 		driver_name += driver_len - DEVID_HINT_SIZE;
8145 		driver_len = DEVID_HINT_SIZE;
8146 	}
8147 
8148 	bcopy(driver_name, i_devid->did_driver, driver_len);
8149 
8150 	/* Fill in id field */
8151 	if (devid_type == DEVID_FAB) {
8152 		char		*cp;
8153 		uint32_t	hostid;
8154 		struct timeval32 timestamp32;
8155 		int		i;
8156 		int		*ip;
8157 		short		gen;
8158 
8159 		/* increase the generation number */
8160 		mutex_enter(&devid_gen_mutex);
8161 		gen = devid_gen_number++;
8162 		mutex_exit(&devid_gen_mutex);
8163 
8164 		cp = i_devid->did_id;
8165 
8166 		/* Fill in host id (big-endian byte ordering) */
8167 		hostid = zone_get_hostid(NULL);
8168 		*cp++ = hibyte(hiword(hostid));
8169 		*cp++ = lobyte(hiword(hostid));
8170 		*cp++ = hibyte(loword(hostid));
8171 		*cp++ = lobyte(loword(hostid));
8172 
8173 		/*
8174 		 * Fill in timestamp (big-endian byte ordering)
8175 		 *
8176 		 * (Note that the format may have to be changed
8177 		 * before 2038 comes around, though it's arguably
8178 		 * unique enough as it is..)
8179 		 */
8180 		uniqtime32(&timestamp32);
8181 		ip = (int *)&timestamp32;
8182 		for (i = 0;
8183 		    i < sizeof (timestamp32) / sizeof (int); i++, ip++) {
8184 			int	val;
8185 			val = *ip;
8186 			*cp++ = hibyte(hiword(val));
8187 			*cp++ = lobyte(hiword(val));
8188 			*cp++ = hibyte(loword(val));
8189 			*cp++ = lobyte(loword(val));
8190 		}
8191 
8192 		/* fill in the generation number */
8193 		*cp++ = hibyte(gen);
8194 		*cp++ = lobyte(gen);
8195 	} else
8196 		bcopy(id, i_devid->did_id, nbytes);
8197 
8198 	/* return device id */
8199 	*ret_devid = (ddi_devid_t)i_devid;
8200 	return (DDI_SUCCESS);
8201 }
8202 
8203 int
8204 ddi_devid_get(dev_info_t *dip, ddi_devid_t *ret_devid)
8205 {
8206 	return (i_ddi_devi_get_devid(DDI_DEV_T_ANY, dip, ret_devid));
8207 }
8208 
8209 int
8210 i_ddi_devi_get_devid(dev_t dev, dev_info_t *dip, ddi_devid_t *ret_devid)
8211 {
8212 	char		*devidstr;
8213 
8214 	ASSERT(dev != DDI_DEV_T_NONE);
8215 
8216 	/* look up the property, devt specific first */
8217 	if (ddi_prop_lookup_string(dev, dip, DDI_PROP_DONTPASS,
8218 	    DEVID_PROP_NAME, &devidstr) != DDI_PROP_SUCCESS) {
8219 		if ((dev == DDI_DEV_T_ANY) ||
8220 		    (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
8221 		    DDI_PROP_DONTPASS, DEVID_PROP_NAME, &devidstr) !=
8222 		    DDI_PROP_SUCCESS)) {
8223 			return (DDI_FAILURE);
8224 		}
8225 	}
8226 
8227 	/* convert to binary form */
8228 	if (ddi_devid_str_decode(devidstr, ret_devid, NULL) == -1) {
8229 		ddi_prop_free(devidstr);
8230 		return (DDI_FAILURE);
8231 	}
8232 	ddi_prop_free(devidstr);
8233 	return (DDI_SUCCESS);
8234 }
8235 
8236 /*
8237  * Return a copy of the device id for dev_t
8238  */
8239 int
8240 ddi_lyr_get_devid(dev_t dev, ddi_devid_t *ret_devid)
8241 {
8242 	dev_info_t	*dip;
8243 	int		rval;
8244 
8245 	/* get the dip */
8246 	if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
8247 		return (DDI_FAILURE);
8248 
8249 	rval = i_ddi_devi_get_devid(dev, dip, ret_devid);
8250 
8251 	ddi_release_devi(dip);		/* e_ddi_hold_devi_by_dev() */
8252 	return (rval);
8253 }
8254 
8255 /*
8256  * Return a copy of the minor name for dev_t and spec_type
8257  */
8258 int
8259 ddi_lyr_get_minor_name(dev_t dev, int spec_type, char **minor_name)
8260 {
8261 	char		*buf;
8262 	int		circ;
8263 	dev_info_t	*dip;
8264 	char		*nm;
8265 	int		rval;
8266 
8267 	if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) {
8268 		*minor_name = NULL;
8269 		return (DDI_FAILURE);
8270 	}
8271 
8272 	/* Find the minor name and copy into max size buf */
8273 	buf = kmem_alloc(MAXNAMELEN, KM_SLEEP);
8274 	ndi_devi_enter(dip, &circ);
8275 	nm = i_ddi_devtspectype_to_minorname(dip, dev, spec_type);
8276 	if (nm)
8277 		(void) strcpy(buf, nm);
8278 	ndi_devi_exit(dip, circ);
8279 	ddi_release_devi(dip);	/* e_ddi_hold_devi_by_dev() */
8280 
8281 	if (nm) {
8282 		/* duplicate into min size buf for return result */
8283 		*minor_name = i_ddi_strdup(buf, KM_SLEEP);
8284 		rval = DDI_SUCCESS;
8285 	} else {
8286 		*minor_name = NULL;
8287 		rval = DDI_FAILURE;
8288 	}
8289 
8290 	/* free max size buf and return */
8291 	kmem_free(buf, MAXNAMELEN);
8292 	return (rval);
8293 }
8294 
8295 int
8296 ddi_lyr_devid_to_devlist(
8297 	ddi_devid_t	devid,
8298 	char		*minor_name,
8299 	int		*retndevs,
8300 	dev_t		**retdevs)
8301 {
8302 	ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS);
8303 
8304 	if (e_devid_cache_to_devt_list(devid, minor_name,
8305 	    retndevs, retdevs) == DDI_SUCCESS) {
8306 		ASSERT(*retndevs > 0);
8307 		DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist",
8308 		    *retndevs, *retdevs);
8309 		return (DDI_SUCCESS);
8310 	}
8311 
8312 	if (e_ddi_devid_discovery(devid) == DDI_FAILURE) {
8313 		return (DDI_FAILURE);
8314 	}
8315 
8316 	if (e_devid_cache_to_devt_list(devid, minor_name,
8317 	    retndevs, retdevs) == DDI_SUCCESS) {
8318 		ASSERT(*retndevs > 0);
8319 		DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist",
8320 		    *retndevs, *retdevs);
8321 		return (DDI_SUCCESS);
8322 	}
8323 
8324 	return (DDI_FAILURE);
8325 }
8326 
8327 void
8328 ddi_lyr_free_devlist(dev_t *devlist, int ndevs)
8329 {
8330 	kmem_free(devlist, sizeof (dev_t) * ndevs);
8331 }
8332 
8333 /*
8334  * Note: This will need to be fixed if we ever allow processes to
8335  * have more than one data model per exec.
8336  */
8337 model_t
8338 ddi_mmap_get_model(void)
8339 {
8340 	return (get_udatamodel());
8341 }
8342 
8343 model_t
8344 ddi_model_convert_from(model_t model)
8345 {
8346 	return ((model & DDI_MODEL_MASK) & ~DDI_MODEL_NATIVE);
8347 }
8348 
8349 /*
8350  * ddi interfaces managing storage and retrieval of eventcookies.
8351  */
8352 
8353 /*
8354  * Invoke bus nexus driver's implementation of the
8355  * (*bus_remove_eventcall)() interface to remove a registered
8356  * callback handler for "event".
8357  */
8358 int
8359 ddi_remove_event_handler(ddi_callback_id_t id)
8360 {
8361 	ndi_event_callbacks_t *cb = (ndi_event_callbacks_t *)id;
8362 	dev_info_t *ddip;
8363 
8364 	ASSERT(cb);
8365 	if (!cb) {
8366 		return (DDI_FAILURE);
8367 	}
8368 
8369 	ddip = NDI_EVENT_DDIP(cb->ndi_evtcb_cookie);
8370 	return (ndi_busop_remove_eventcall(ddip, id));
8371 }
8372 
8373 /*
8374  * Invoke bus nexus driver's implementation of the
8375  * (*bus_add_eventcall)() interface to register a callback handler
8376  * for "event".
8377  */
8378 int
8379 ddi_add_event_handler(dev_info_t *dip, ddi_eventcookie_t event,
8380     void (*handler)(dev_info_t *, ddi_eventcookie_t, void *, void *),
8381     void *arg, ddi_callback_id_t *id)
8382 {
8383 	return (ndi_busop_add_eventcall(dip, dip, event, handler, arg, id));
8384 }
8385 
8386 
8387 /*
8388  * Return a handle for event "name" by calling up the device tree
8389  * hierarchy via  (*bus_get_eventcookie)() interface until claimed
8390  * by a bus nexus or top of dev_info tree is reached.
8391  */
8392 int
8393 ddi_get_eventcookie(dev_info_t *dip, char *name,
8394     ddi_eventcookie_t *event_cookiep)
8395 {
8396 	return (ndi_busop_get_eventcookie(dip, dip,
8397 	    name, event_cookiep));
8398 }
8399 
8400 /*
8401  * This procedure is provided as the general callback function when
8402  * umem_lockmemory calls as_add_callback for long term memory locking.
8403  * When as_unmap, as_setprot, or as_free encounter segments which have
8404  * locked memory, this callback will be invoked.
8405  */
8406 void
8407 umem_lock_undo(struct as *as, void *arg, uint_t event)
8408 {
8409 	_NOTE(ARGUNUSED(as, event))
8410 	struct ddi_umem_cookie *cp = (struct ddi_umem_cookie *)arg;
8411 
8412 	/*
8413 	 * Call the cleanup function.  Decrement the cookie reference
8414 	 * count, if it goes to zero, return the memory for the cookie.
8415 	 * The i_ddi_umem_unlock for this cookie may or may not have been
8416 	 * called already.  It is the responsibility of the caller of
8417 	 * umem_lockmemory to handle the case of the cleanup routine
8418 	 * being called after a ddi_umem_unlock for the cookie
8419 	 * was called.
8420 	 */
8421 
8422 	(*cp->callbacks.cbo_umem_lock_cleanup)((ddi_umem_cookie_t)cp);
8423 
8424 	/* remove the cookie if reference goes to zero */
8425 	if (atomic_add_long_nv((ulong_t *)(&(cp->cook_refcnt)), -1) == 0) {
8426 		kmem_free(cp, sizeof (struct ddi_umem_cookie));
8427 	}
8428 }
8429 
8430 /*
8431  * The following two Consolidation Private routines provide generic
8432  * interfaces to increase/decrease the amount of device-locked memory.
8433  *
8434  * To keep project_rele and project_hold consistent, i_ddi_decr_locked_memory()
8435  * must be called every time i_ddi_incr_locked_memory() is called.
8436  */
8437 int
8438 /* ARGSUSED */
8439 i_ddi_incr_locked_memory(proc_t *procp, rctl_qty_t inc)
8440 {
8441 	ASSERT(procp != NULL);
8442 	mutex_enter(&procp->p_lock);
8443 	if (rctl_incr_locked_mem(procp, NULL, inc, 1)) {
8444 		mutex_exit(&procp->p_lock);
8445 		return (ENOMEM);
8446 	}
8447 	mutex_exit(&procp->p_lock);
8448 	return (0);
8449 }
8450 
8451 /*
8452  * To keep project_rele and project_hold consistent, i_ddi_incr_locked_memory()
8453  * must be called every time i_ddi_decr_locked_memory() is called.
8454  */
8455 /* ARGSUSED */
8456 void
8457 i_ddi_decr_locked_memory(proc_t *procp, rctl_qty_t dec)
8458 {
8459 	ASSERT(procp != NULL);
8460 	mutex_enter(&procp->p_lock);
8461 	rctl_decr_locked_mem(procp, NULL, dec, 1);
8462 	mutex_exit(&procp->p_lock);
8463 }
8464 
8465 /*
8466  * This routine checks if the max-locked-memory resource ctl is
8467  * exceeded, if not increments it, grabs a hold on the project.
8468  * Returns 0 if successful otherwise returns error code
8469  */
8470 static int
8471 umem_incr_devlockmem(struct ddi_umem_cookie *cookie)
8472 {
8473 	proc_t		*procp;
8474 	int		ret;
8475 
8476 	ASSERT(cookie);
8477 	procp = cookie->procp;
8478 	ASSERT(procp);
8479 
8480 	if ((ret = i_ddi_incr_locked_memory(procp,
8481 	    cookie->size)) != 0) {
8482 		return (ret);
8483 	}
8484 	return (0);
8485 }
8486 
8487 /*
8488  * Decrements the max-locked-memory resource ctl and releases
8489  * the hold on the project that was acquired during umem_incr_devlockmem
8490  */
8491 static void
8492 umem_decr_devlockmem(struct ddi_umem_cookie *cookie)
8493 {
8494 	proc_t		*proc;
8495 
8496 	proc = (proc_t *)cookie->procp;
8497 	if (!proc)
8498 		return;
8499 
8500 	i_ddi_decr_locked_memory(proc, cookie->size);
8501 }
8502 
8503 /*
8504  * A consolidation private function which is essentially equivalent to
8505  * ddi_umem_lock but with the addition of arguments ops_vector and procp.
8506  * A call to as_add_callback is done if DDI_UMEMLOCK_LONGTERM is set, and
8507  * the ops_vector is valid.
8508  *
8509  * Lock the virtual address range in the current process and create a
8510  * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to
8511  * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export
8512  * to user space.
8513  *
8514  * Note: The resource control accounting currently uses a full charge model
8515  * in other words attempts to lock the same/overlapping areas of memory
8516  * will deduct the full size of the buffer from the projects running
8517  * counter for the device locked memory.
8518  *
8519  * addr, size should be PAGESIZE aligned
8520  *
8521  * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both
8522  *	identifies whether the locked memory will be read or written or both
8523  *      DDI_UMEMLOCK_LONGTERM  must be set when the locking will
8524  * be maintained for an indefinitely long period (essentially permanent),
8525  * rather than for what would be required for a typical I/O completion.
8526  * When DDI_UMEMLOCK_LONGTERM is set, umem_lockmemory will return EFAULT
8527  * if the memory pertains to a regular file which is mapped MAP_SHARED.
8528  * This is to prevent a deadlock if a file truncation is attempted after
8529  * after the locking is done.
8530  *
8531  * Returns 0 on success
8532  *	EINVAL - for invalid parameters
8533  *	EPERM, ENOMEM and other error codes returned by as_pagelock
8534  *	ENOMEM - is returned if the current request to lock memory exceeds
8535  *		*.max-locked-memory resource control value.
8536  *      EFAULT - memory pertains to a regular file mapped shared and
8537  *		and DDI_UMEMLOCK_LONGTERM flag is set
8538  *	EAGAIN - could not start the ddi_umem_unlock list processing thread
8539  */
8540 int
8541 umem_lockmemory(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie,
8542 		struct umem_callback_ops *ops_vector,
8543 		proc_t *procp)
8544 {
8545 	int	error;
8546 	struct ddi_umem_cookie *p;
8547 	void	(*driver_callback)() = NULL;
8548 	struct as *as;
8549 	struct seg		*seg;
8550 	vnode_t			*vp;
8551 
8552 	/* Allow device drivers to not have to reference "curproc" */
8553 	if (procp == NULL)
8554 		procp = curproc;
8555 	as = procp->p_as;
8556 	*cookie = NULL;		/* in case of any error return */
8557 
8558 	/* These are the only three valid flags */
8559 	if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE |
8560 	    DDI_UMEMLOCK_LONGTERM)) != 0)
8561 		return (EINVAL);
8562 
8563 	/* At least one (can be both) of the two access flags must be set */
8564 	if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0)
8565 		return (EINVAL);
8566 
8567 	/* addr and len must be page-aligned */
8568 	if (((uintptr_t)addr & PAGEOFFSET) != 0)
8569 		return (EINVAL);
8570 
8571 	if ((len & PAGEOFFSET) != 0)
8572 		return (EINVAL);
8573 
8574 	/*
8575 	 * For longterm locking a driver callback must be specified; if
8576 	 * not longterm then a callback is optional.
8577 	 */
8578 	if (ops_vector != NULL) {
8579 		if (ops_vector->cbo_umem_callback_version !=
8580 		    UMEM_CALLBACK_VERSION)
8581 			return (EINVAL);
8582 		else
8583 			driver_callback = ops_vector->cbo_umem_lock_cleanup;
8584 	}
8585 	if ((driver_callback == NULL) && (flags & DDI_UMEMLOCK_LONGTERM))
8586 		return (EINVAL);
8587 
8588 	/*
8589 	 * Call i_ddi_umem_unlock_thread_start if necessary.  It will
8590 	 * be called on first ddi_umem_lock or umem_lockmemory call.
8591 	 */
8592 	if (ddi_umem_unlock_thread == NULL)
8593 		i_ddi_umem_unlock_thread_start();
8594 
8595 	/* Allocate memory for the cookie */
8596 	p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP);
8597 
8598 	/* Convert the flags to seg_rw type */
8599 	if (flags & DDI_UMEMLOCK_WRITE) {
8600 		p->s_flags = S_WRITE;
8601 	} else {
8602 		p->s_flags = S_READ;
8603 	}
8604 
8605 	/* Store procp in cookie for later iosetup/unlock */
8606 	p->procp = (void *)procp;
8607 
8608 	/*
8609 	 * Store the struct as pointer in cookie for later use by
8610 	 * ddi_umem_unlock.  The proc->p_as will be stale if ddi_umem_unlock
8611 	 * is called after relvm is called.
8612 	 */
8613 	p->asp = as;
8614 
8615 	/*
8616 	 * The size field is needed for lockmem accounting.
8617 	 */
8618 	p->size = len;
8619 
8620 	if (umem_incr_devlockmem(p) != 0) {
8621 		/*
8622 		 * The requested memory cannot be locked
8623 		 */
8624 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8625 		*cookie = (ddi_umem_cookie_t)NULL;
8626 		return (ENOMEM);
8627 	}
8628 
8629 	/* Lock the pages corresponding to addr, len in memory */
8630 	error = as_pagelock(as, &(p->pparray), addr, len, p->s_flags);
8631 	if (error != 0) {
8632 		umem_decr_devlockmem(p);
8633 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8634 		*cookie = (ddi_umem_cookie_t)NULL;
8635 		return (error);
8636 	}
8637 
8638 	/*
8639 	 * For longterm locking the addr must pertain to a seg_vn segment or
8640 	 * or a seg_spt segment.
8641 	 * If the segment pertains to a regular file, it cannot be
8642 	 * mapped MAP_SHARED.
8643 	 * This is to prevent a deadlock if a file truncation is attempted
8644 	 * after the locking is done.
8645 	 * Doing this after as_pagelock guarantees persistence of the as; if
8646 	 * an unacceptable segment is found, the cleanup includes calling
8647 	 * as_pageunlock before returning EFAULT.
8648 	 *
8649 	 * segdev is allowed here as it is already locked.  This allows
8650 	 * for memory exported by drivers through mmap() (which is already
8651 	 * locked) to be allowed for LONGTERM.
8652 	 */
8653 	if (flags & DDI_UMEMLOCK_LONGTERM) {
8654 		extern  struct seg_ops segspt_shmops;
8655 		extern	struct seg_ops segdev_ops;
8656 		AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
8657 		for (seg = as_segat(as, addr); ; seg = AS_SEGNEXT(as, seg)) {
8658 			if (seg == NULL || seg->s_base > addr + len)
8659 				break;
8660 			if (seg->s_ops == &segdev_ops)
8661 				continue;
8662 			if (((seg->s_ops != &segvn_ops) &&
8663 			    (seg->s_ops != &segspt_shmops)) ||
8664 			    ((SEGOP_GETVP(seg, addr, &vp) == 0 &&
8665 			    vp != NULL && vp->v_type == VREG) &&
8666 			    (SEGOP_GETTYPE(seg, addr) & MAP_SHARED))) {
8667 				as_pageunlock(as, p->pparray,
8668 				    addr, len, p->s_flags);
8669 				AS_LOCK_EXIT(as, &as->a_lock);
8670 				umem_decr_devlockmem(p);
8671 				kmem_free(p, sizeof (struct ddi_umem_cookie));
8672 				*cookie = (ddi_umem_cookie_t)NULL;
8673 				return (EFAULT);
8674 			}
8675 		}
8676 		AS_LOCK_EXIT(as, &as->a_lock);
8677 	}
8678 
8679 
8680 	/* Initialize the fields in the ddi_umem_cookie */
8681 	p->cvaddr = addr;
8682 	p->type = UMEM_LOCKED;
8683 	if (driver_callback != NULL) {
8684 		/* i_ddi_umem_unlock and umem_lock_undo may need the cookie */
8685 		p->cook_refcnt = 2;
8686 		p->callbacks = *ops_vector;
8687 	} else {
8688 		/* only i_ddi_umme_unlock needs the cookie */
8689 		p->cook_refcnt = 1;
8690 	}
8691 
8692 	*cookie = (ddi_umem_cookie_t)p;
8693 
8694 	/*
8695 	 * If a driver callback was specified, add an entry to the
8696 	 * as struct callback list. The as_pagelock above guarantees
8697 	 * the persistence of as.
8698 	 */
8699 	if (driver_callback) {
8700 		error = as_add_callback(as, umem_lock_undo, p, AS_ALL_EVENT,
8701 		    addr, len, KM_SLEEP);
8702 		if (error != 0) {
8703 			as_pageunlock(as, p->pparray,
8704 			    addr, len, p->s_flags);
8705 			umem_decr_devlockmem(p);
8706 			kmem_free(p, sizeof (struct ddi_umem_cookie));
8707 			*cookie = (ddi_umem_cookie_t)NULL;
8708 		}
8709 	}
8710 	return (error);
8711 }
8712 
8713 /*
8714  * Unlock the pages locked by ddi_umem_lock or umem_lockmemory and free
8715  * the cookie.  Called from i_ddi_umem_unlock_thread.
8716  */
8717 
8718 static void
8719 i_ddi_umem_unlock(struct ddi_umem_cookie *p)
8720 {
8721 	uint_t	rc;
8722 
8723 	/*
8724 	 * There is no way to determine whether a callback to
8725 	 * umem_lock_undo was registered via as_add_callback.
8726 	 * (i.e. umem_lockmemory was called with DDI_MEMLOCK_LONGTERM and
8727 	 * a valid callback function structure.)  as_delete_callback
8728 	 * is called to delete a possible registered callback.  If the
8729 	 * return from as_delete_callbacks is AS_CALLBACK_DELETED, it
8730 	 * indicates that there was a callback registered, and that is was
8731 	 * successfully deleted.  Thus, the cookie reference count
8732 	 * will never be decremented by umem_lock_undo.  Just return the
8733 	 * memory for the cookie, since both users of the cookie are done.
8734 	 * A return of AS_CALLBACK_NOTFOUND indicates a callback was
8735 	 * never registered.  A return of AS_CALLBACK_DELETE_DEFERRED
8736 	 * indicates that callback processing is taking place and, and
8737 	 * umem_lock_undo is, or will be, executing, and thus decrementing
8738 	 * the cookie reference count when it is complete.
8739 	 *
8740 	 * This needs to be done before as_pageunlock so that the
8741 	 * persistence of as is guaranteed because of the locked pages.
8742 	 *
8743 	 */
8744 	rc = as_delete_callback(p->asp, p);
8745 
8746 
8747 	/*
8748 	 * The proc->p_as will be stale if i_ddi_umem_unlock is called
8749 	 * after relvm is called so use p->asp.
8750 	 */
8751 	as_pageunlock(p->asp, p->pparray, p->cvaddr, p->size, p->s_flags);
8752 
8753 	/*
8754 	 * Now that we have unlocked the memory decrement the
8755 	 * *.max-locked-memory rctl
8756 	 */
8757 	umem_decr_devlockmem(p);
8758 
8759 	if (rc == AS_CALLBACK_DELETED) {
8760 		/* umem_lock_undo will not happen, return the cookie memory */
8761 		ASSERT(p->cook_refcnt == 2);
8762 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8763 	} else {
8764 		/*
8765 		 * umem_undo_lock may happen if as_delete_callback returned
8766 		 * AS_CALLBACK_DELETE_DEFERRED.  In that case, decrement the
8767 		 * reference count, atomically, and return the cookie
8768 		 * memory if the reference count goes to zero.  The only
8769 		 * other value for rc is AS_CALLBACK_NOTFOUND.  In that
8770 		 * case, just return the cookie memory.
8771 		 */
8772 		if ((rc != AS_CALLBACK_DELETE_DEFERRED) ||
8773 		    (atomic_add_long_nv((ulong_t *)(&(p->cook_refcnt)), -1)
8774 		    == 0)) {
8775 			kmem_free(p, sizeof (struct ddi_umem_cookie));
8776 		}
8777 	}
8778 }
8779 
8780 /*
8781  * i_ddi_umem_unlock_thread - deferred ddi_umem_unlock list handler.
8782  *
8783  * Call i_ddi_umem_unlock for entries in the ddi_umem_unlock list
8784  * until it is empty.  Then, wait for more to be added.  This thread is awoken
8785  * via calls to ddi_umem_unlock.
8786  */
8787 
8788 static void
8789 i_ddi_umem_unlock_thread(void)
8790 {
8791 	struct ddi_umem_cookie	*ret_cookie;
8792 	callb_cpr_t	cprinfo;
8793 
8794 	/* process the ddi_umem_unlock list */
8795 	CALLB_CPR_INIT(&cprinfo, &ddi_umem_unlock_mutex,
8796 	    callb_generic_cpr, "unlock_thread");
8797 	for (;;) {
8798 		mutex_enter(&ddi_umem_unlock_mutex);
8799 		if (ddi_umem_unlock_head != NULL) {	/* list not empty */
8800 			ret_cookie = ddi_umem_unlock_head;
8801 			/* take if off the list */
8802 			if ((ddi_umem_unlock_head =
8803 			    ddi_umem_unlock_head->unl_forw) == NULL) {
8804 				ddi_umem_unlock_tail = NULL;
8805 			}
8806 			mutex_exit(&ddi_umem_unlock_mutex);
8807 			/* unlock the pages in this cookie */
8808 			(void) i_ddi_umem_unlock(ret_cookie);
8809 		} else {   /* list is empty, wait for next ddi_umem_unlock */
8810 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
8811 			cv_wait(&ddi_umem_unlock_cv, &ddi_umem_unlock_mutex);
8812 			CALLB_CPR_SAFE_END(&cprinfo, &ddi_umem_unlock_mutex);
8813 			mutex_exit(&ddi_umem_unlock_mutex);
8814 		}
8815 	}
8816 	/* ddi_umem_unlock_thread does not exit */
8817 	/* NOTREACHED */
8818 }
8819 
8820 /*
8821  * Start the thread that will process the ddi_umem_unlock list if it is
8822  * not already started (i_ddi_umem_unlock_thread).
8823  */
8824 static void
8825 i_ddi_umem_unlock_thread_start(void)
8826 {
8827 	mutex_enter(&ddi_umem_unlock_mutex);
8828 	if (ddi_umem_unlock_thread == NULL) {
8829 		ddi_umem_unlock_thread = thread_create(NULL, 0,
8830 		    i_ddi_umem_unlock_thread, NULL, 0, &p0,
8831 		    TS_RUN, minclsyspri);
8832 	}
8833 	mutex_exit(&ddi_umem_unlock_mutex);
8834 }
8835 
8836 /*
8837  * Lock the virtual address range in the current process and create a
8838  * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to
8839  * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export
8840  * to user space.
8841  *
8842  * Note: The resource control accounting currently uses a full charge model
8843  * in other words attempts to lock the same/overlapping areas of memory
8844  * will deduct the full size of the buffer from the projects running
8845  * counter for the device locked memory. This applies to umem_lockmemory too.
8846  *
8847  * addr, size should be PAGESIZE aligned
8848  * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both
8849  *	identifies whether the locked memory will be read or written or both
8850  *
8851  * Returns 0 on success
8852  *	EINVAL - for invalid parameters
8853  *	EPERM, ENOMEM and other error codes returned by as_pagelock
8854  *	ENOMEM - is returned if the current request to lock memory exceeds
8855  *		*.max-locked-memory resource control value.
8856  *	EAGAIN - could not start the ddi_umem_unlock list processing thread
8857  */
8858 int
8859 ddi_umem_lock(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie)
8860 {
8861 	int	error;
8862 	struct ddi_umem_cookie *p;
8863 
8864 	*cookie = NULL;		/* in case of any error return */
8865 
8866 	/* These are the only two valid flags */
8867 	if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) != 0) {
8868 		return (EINVAL);
8869 	}
8870 
8871 	/* At least one of the two flags (or both) must be set */
8872 	if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0) {
8873 		return (EINVAL);
8874 	}
8875 
8876 	/* addr and len must be page-aligned */
8877 	if (((uintptr_t)addr & PAGEOFFSET) != 0) {
8878 		return (EINVAL);
8879 	}
8880 
8881 	if ((len & PAGEOFFSET) != 0) {
8882 		return (EINVAL);
8883 	}
8884 
8885 	/*
8886 	 * Call i_ddi_umem_unlock_thread_start if necessary.  It will
8887 	 * be called on first ddi_umem_lock or umem_lockmemory call.
8888 	 */
8889 	if (ddi_umem_unlock_thread == NULL)
8890 		i_ddi_umem_unlock_thread_start();
8891 
8892 	/* Allocate memory for the cookie */
8893 	p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP);
8894 
8895 	/* Convert the flags to seg_rw type */
8896 	if (flags & DDI_UMEMLOCK_WRITE) {
8897 		p->s_flags = S_WRITE;
8898 	} else {
8899 		p->s_flags = S_READ;
8900 	}
8901 
8902 	/* Store curproc in cookie for later iosetup/unlock */
8903 	p->procp = (void *)curproc;
8904 
8905 	/*
8906 	 * Store the struct as pointer in cookie for later use by
8907 	 * ddi_umem_unlock.  The proc->p_as will be stale if ddi_umem_unlock
8908 	 * is called after relvm is called.
8909 	 */
8910 	p->asp = curproc->p_as;
8911 	/*
8912 	 * The size field is needed for lockmem accounting.
8913 	 */
8914 	p->size = len;
8915 
8916 	if (umem_incr_devlockmem(p) != 0) {
8917 		/*
8918 		 * The requested memory cannot be locked
8919 		 */
8920 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8921 		*cookie = (ddi_umem_cookie_t)NULL;
8922 		return (ENOMEM);
8923 	}
8924 
8925 	/* Lock the pages corresponding to addr, len in memory */
8926 	error = as_pagelock(((proc_t *)p->procp)->p_as, &(p->pparray),
8927 	    addr, len, p->s_flags);
8928 	if (error != 0) {
8929 		umem_decr_devlockmem(p);
8930 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8931 		*cookie = (ddi_umem_cookie_t)NULL;
8932 		return (error);
8933 	}
8934 
8935 	/* Initialize the fields in the ddi_umem_cookie */
8936 	p->cvaddr = addr;
8937 	p->type = UMEM_LOCKED;
8938 	p->cook_refcnt = 1;
8939 
8940 	*cookie = (ddi_umem_cookie_t)p;
8941 	return (error);
8942 }
8943 
8944 /*
8945  * Add the cookie to the ddi_umem_unlock list.  Pages will be
8946  * unlocked by i_ddi_umem_unlock_thread.
8947  */
8948 
8949 void
8950 ddi_umem_unlock(ddi_umem_cookie_t cookie)
8951 {
8952 	struct ddi_umem_cookie	*p = (struct ddi_umem_cookie *)cookie;
8953 
8954 	ASSERT(p->type == UMEM_LOCKED);
8955 	ASSERT(CPU_ON_INTR(CPU) == 0); /* cannot be high level */
8956 	ASSERT(ddi_umem_unlock_thread != NULL);
8957 
8958 	p->unl_forw = (struct ddi_umem_cookie *)NULL;	/* end of list */
8959 	/*
8960 	 * Queue the unlock request and notify i_ddi_umem_unlock thread
8961 	 * if it's called in the interrupt context. Otherwise, unlock pages
8962 	 * immediately.
8963 	 */
8964 	if (servicing_interrupt()) {
8965 		/* queue the unlock request and notify the thread */
8966 		mutex_enter(&ddi_umem_unlock_mutex);
8967 		if (ddi_umem_unlock_head == NULL) {
8968 			ddi_umem_unlock_head = ddi_umem_unlock_tail = p;
8969 			cv_broadcast(&ddi_umem_unlock_cv);
8970 		} else {
8971 			ddi_umem_unlock_tail->unl_forw = p;
8972 			ddi_umem_unlock_tail = p;
8973 		}
8974 		mutex_exit(&ddi_umem_unlock_mutex);
8975 	} else {
8976 		/* unlock the pages right away */
8977 		(void) i_ddi_umem_unlock(p);
8978 	}
8979 }
8980 
8981 /*
8982  * Create a buf structure from a ddi_umem_cookie
8983  * cookie - is a ddi_umem_cookie for from ddi_umem_lock and ddi_umem_alloc
8984  *		(only UMEM_LOCKED & KMEM_NON_PAGEABLE types supported)
8985  * off, len - identifies the portion of the memory represented by the cookie
8986  *		that the buf points to.
8987  *	NOTE: off, len need to follow the alignment/size restrictions of the
8988  *		device (dev) that this buf will be passed to. Some devices
8989  *		will accept unrestricted alignment/size, whereas others (such as
8990  *		st) require some block-size alignment/size. It is the caller's
8991  *		responsibility to ensure that the alignment/size restrictions
8992  *		are met (we cannot assert as we do not know the restrictions)
8993  *
8994  * direction - is one of B_READ or B_WRITE and needs to be compatible with
8995  *		the flags used in ddi_umem_lock
8996  *
8997  * The following three arguments are used to initialize fields in the
8998  * buf structure and are uninterpreted by this routine.
8999  *
9000  * dev
9001  * blkno
9002  * iodone
9003  *
9004  * sleepflag - is one of DDI_UMEM_SLEEP or DDI_UMEM_NOSLEEP
9005  *
9006  * Returns a buf structure pointer on success (to be freed by freerbuf)
9007  *	NULL on any parameter error or memory alloc failure
9008  *
9009  */
9010 struct buf *
9011 ddi_umem_iosetup(ddi_umem_cookie_t cookie, off_t off, size_t len,
9012 	int direction, dev_t dev, daddr_t blkno,
9013 	int (*iodone)(struct buf *), int sleepflag)
9014 {
9015 	struct ddi_umem_cookie *p = (struct ddi_umem_cookie *)cookie;
9016 	struct buf *bp;
9017 
9018 	/*
9019 	 * check for valid cookie offset, len
9020 	 */
9021 	if ((off + len) > p->size) {
9022 		return (NULL);
9023 	}
9024 
9025 	if (len > p->size) {
9026 		return (NULL);
9027 	}
9028 
9029 	/* direction has to be one of B_READ or B_WRITE */
9030 	if ((direction != B_READ) && (direction != B_WRITE)) {
9031 		return (NULL);
9032 	}
9033 
9034 	/* These are the only two valid sleepflags */
9035 	if ((sleepflag != DDI_UMEM_SLEEP) && (sleepflag != DDI_UMEM_NOSLEEP)) {
9036 		return (NULL);
9037 	}
9038 
9039 	/*
9040 	 * Only cookies of type UMEM_LOCKED and KMEM_NON_PAGEABLE are supported
9041 	 */
9042 	if ((p->type != UMEM_LOCKED) && (p->type != KMEM_NON_PAGEABLE)) {
9043 		return (NULL);
9044 	}
9045 
9046 	/* If type is KMEM_NON_PAGEABLE procp is NULL */
9047 	ASSERT((p->type == KMEM_NON_PAGEABLE) ?
9048 	    (p->procp == NULL) : (p->procp != NULL));
9049 
9050 	bp = kmem_alloc(sizeof (struct buf), sleepflag);
9051 	if (bp == NULL) {
9052 		return (NULL);
9053 	}
9054 	bioinit(bp);
9055 
9056 	bp->b_flags = B_BUSY | B_PHYS | direction;
9057 	bp->b_edev = dev;
9058 	bp->b_lblkno = blkno;
9059 	bp->b_iodone = iodone;
9060 	bp->b_bcount = len;
9061 	bp->b_proc = (proc_t *)p->procp;
9062 	ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0);
9063 	bp->b_un.b_addr = (caddr_t)((uintptr_t)(p->cvaddr) + off);
9064 	if (p->pparray != NULL) {
9065 		bp->b_flags |= B_SHADOW;
9066 		ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0);
9067 		bp->b_shadow = p->pparray + btop(off);
9068 	}
9069 	return (bp);
9070 }
9071 
9072 /*
9073  * Fault-handling and related routines
9074  */
9075 
9076 ddi_devstate_t
9077 ddi_get_devstate(dev_info_t *dip)
9078 {
9079 	if (DEVI_IS_DEVICE_OFFLINE(dip))
9080 		return (DDI_DEVSTATE_OFFLINE);
9081 	else if (DEVI_IS_DEVICE_DOWN(dip) || DEVI_IS_BUS_DOWN(dip))
9082 		return (DDI_DEVSTATE_DOWN);
9083 	else if (DEVI_IS_BUS_QUIESCED(dip))
9084 		return (DDI_DEVSTATE_QUIESCED);
9085 	else if (DEVI_IS_DEVICE_DEGRADED(dip))
9086 		return (DDI_DEVSTATE_DEGRADED);
9087 	else
9088 		return (DDI_DEVSTATE_UP);
9089 }
9090 
9091 void
9092 ddi_dev_report_fault(dev_info_t *dip, ddi_fault_impact_t impact,
9093 	ddi_fault_location_t location, const char *message)
9094 {
9095 	struct ddi_fault_event_data fd;
9096 	ddi_eventcookie_t ec;
9097 
9098 	/*
9099 	 * Assemble all the information into a fault-event-data structure
9100 	 */
9101 	fd.f_dip = dip;
9102 	fd.f_impact = impact;
9103 	fd.f_location = location;
9104 	fd.f_message = message;
9105 	fd.f_oldstate = ddi_get_devstate(dip);
9106 
9107 	/*
9108 	 * Get eventcookie from defining parent.
9109 	 */
9110 	if (ddi_get_eventcookie(dip, DDI_DEVI_FAULT_EVENT, &ec) !=
9111 	    DDI_SUCCESS)
9112 		return;
9113 
9114 	(void) ndi_post_event(dip, dip, ec, &fd);
9115 }
9116 
9117 char *
9118 i_ddi_devi_class(dev_info_t *dip)
9119 {
9120 	return (DEVI(dip)->devi_device_class);
9121 }
9122 
9123 int
9124 i_ddi_set_devi_class(dev_info_t *dip, char *devi_class, int flag)
9125 {
9126 	struct dev_info *devi = DEVI(dip);
9127 
9128 	mutex_enter(&devi->devi_lock);
9129 
9130 	if (devi->devi_device_class)
9131 		kmem_free(devi->devi_device_class,
9132 		    strlen(devi->devi_device_class) + 1);
9133 
9134 	if ((devi->devi_device_class = i_ddi_strdup(devi_class, flag))
9135 	    != NULL) {
9136 		mutex_exit(&devi->devi_lock);
9137 		return (DDI_SUCCESS);
9138 	}
9139 
9140 	mutex_exit(&devi->devi_lock);
9141 
9142 	return (DDI_FAILURE);
9143 }
9144 
9145 
9146 /*
9147  * Task Queues DDI interfaces.
9148  */
9149 
9150 /* ARGSUSED */
9151 ddi_taskq_t *
9152 ddi_taskq_create(dev_info_t *dip, const char *name, int nthreads,
9153     pri_t pri, uint_t cflags)
9154 {
9155 	char full_name[TASKQ_NAMELEN];
9156 	const char *tq_name;
9157 	int nodeid = 0;
9158 
9159 	if (dip == NULL)
9160 		tq_name = name;
9161 	else {
9162 		nodeid = ddi_get_instance(dip);
9163 
9164 		if (name == NULL)
9165 			name = "tq";
9166 
9167 		(void) snprintf(full_name, sizeof (full_name), "%s_%s",
9168 		    ddi_driver_name(dip), name);
9169 
9170 		tq_name = full_name;
9171 	}
9172 
9173 	return ((ddi_taskq_t *)taskq_create_instance(tq_name, nodeid, nthreads,
9174 	    pri == TASKQ_DEFAULTPRI ? minclsyspri : pri,
9175 	    nthreads, INT_MAX, TASKQ_PREPOPULATE));
9176 }
9177 
9178 void
9179 ddi_taskq_destroy(ddi_taskq_t *tq)
9180 {
9181 	taskq_destroy((taskq_t *)tq);
9182 }
9183 
9184 int
9185 ddi_taskq_dispatch(ddi_taskq_t *tq, void (* func)(void *),
9186     void *arg, uint_t dflags)
9187 {
9188 	taskqid_t id = taskq_dispatch((taskq_t *)tq, func, arg,
9189 	    dflags == DDI_SLEEP ? TQ_SLEEP : TQ_NOSLEEP);
9190 
9191 	return (id != 0 ? DDI_SUCCESS : DDI_FAILURE);
9192 }
9193 
9194 void
9195 ddi_taskq_wait(ddi_taskq_t *tq)
9196 {
9197 	taskq_wait((taskq_t *)tq);
9198 }
9199 
9200 void
9201 ddi_taskq_suspend(ddi_taskq_t *tq)
9202 {
9203 	taskq_suspend((taskq_t *)tq);
9204 }
9205 
9206 boolean_t
9207 ddi_taskq_suspended(ddi_taskq_t *tq)
9208 {
9209 	return (taskq_suspended((taskq_t *)tq));
9210 }
9211 
9212 void
9213 ddi_taskq_resume(ddi_taskq_t *tq)
9214 {
9215 	taskq_resume((taskq_t *)tq);
9216 }
9217 
9218 int
9219 ddi_parse(
9220 	const char	*ifname,
9221 	char		*alnum,
9222 	uint_t		*nump)
9223 {
9224 	const char	*p;
9225 	int		l;
9226 	ulong_t		num;
9227 	boolean_t	nonum = B_TRUE;
9228 	char		c;
9229 
9230 	l = strlen(ifname);
9231 	for (p = ifname + l; p != ifname; l--) {
9232 		c = *--p;
9233 		if (!isdigit(c)) {
9234 			(void) strlcpy(alnum, ifname, l + 1);
9235 			if (ddi_strtoul(p + 1, NULL, 10, &num) != 0)
9236 				return (DDI_FAILURE);
9237 			break;
9238 		}
9239 		nonum = B_FALSE;
9240 	}
9241 	if (l == 0 || nonum)
9242 		return (DDI_FAILURE);
9243 
9244 	*nump = num;
9245 	return (DDI_SUCCESS);
9246 }
9247 
9248 /*
9249  * Default initialization function for drivers that don't need to quiesce.
9250  */
9251 /* ARGSUSED */
9252 int
9253 ddi_quiesce_not_needed(dev_info_t *dip)
9254 {
9255 	return (DDI_SUCCESS);
9256 }
9257 
9258 /*
9259  * Initialization function for drivers that should implement quiesce()
9260  * but haven't yet.
9261  */
9262 /* ARGSUSED */
9263 int
9264 ddi_quiesce_not_supported(dev_info_t *dip)
9265 {
9266 	return (DDI_FAILURE);
9267 }
9268 
9269 char *
9270 ddi_strdup(const char *str, int flag)
9271 {
9272 	int	n;
9273 	char	*ptr;
9274 
9275 	ASSERT(str != NULL);
9276 	ASSERT((flag == KM_SLEEP) || (flag == KM_NOSLEEP));
9277 
9278 	n = strlen(str);
9279 	if ((ptr = kmem_alloc(n + 1, flag)) == NULL)
9280 		return (NULL);
9281 	bcopy(str, ptr, n + 1);
9282 	return (ptr);
9283 }
9284 
9285 char *
9286 strdup(const char *str)
9287 {
9288 	return (ddi_strdup(str, KM_SLEEP));
9289 }
9290 
9291 void
9292 strfree(char *str)
9293 {
9294 	ASSERT(str != NULL);
9295 	kmem_free(str, strlen(str) + 1);
9296 }
9297 
9298 /*
9299  * Generic DDI callback interfaces.
9300  */
9301 
9302 int
9303 ddi_cb_register(dev_info_t *dip, ddi_cb_flags_t flags, ddi_cb_func_t cbfunc,
9304     void *arg1, void *arg2, ddi_cb_handle_t *ret_hdlp)
9305 {
9306 	ddi_cb_t	*cbp;
9307 
9308 	ASSERT(dip != NULL);
9309 	ASSERT(DDI_CB_FLAG_VALID(flags));
9310 	ASSERT(cbfunc != NULL);
9311 	ASSERT(ret_hdlp != NULL);
9312 
9313 	/* Sanity check the context */
9314 	ASSERT(!servicing_interrupt());
9315 	if (servicing_interrupt())
9316 		return (DDI_FAILURE);
9317 
9318 	/* Validate parameters */
9319 	if ((dip == NULL) || !DDI_CB_FLAG_VALID(flags) ||
9320 	    (cbfunc == NULL) || (ret_hdlp == NULL))
9321 		return (DDI_EINVAL);
9322 
9323 	/* Check for previous registration */
9324 	if (DEVI(dip)->devi_cb_p != NULL)
9325 		return (DDI_EALREADY);
9326 
9327 	/* Allocate and initialize callback */
9328 	cbp = kmem_zalloc(sizeof (ddi_cb_t), KM_SLEEP);
9329 	cbp->cb_dip = dip;
9330 	cbp->cb_func = cbfunc;
9331 	cbp->cb_arg1 = arg1;
9332 	cbp->cb_arg2 = arg2;
9333 	cbp->cb_flags = flags;
9334 	DEVI(dip)->devi_cb_p = cbp;
9335 
9336 	/* If adding an IRM callback, notify IRM */
9337 	if (flags & DDI_CB_FLAG_INTR)
9338 		i_ddi_irm_set_cb(dip, B_TRUE);
9339 
9340 	*ret_hdlp = (ddi_cb_handle_t)&(DEVI(dip)->devi_cb_p);
9341 	return (DDI_SUCCESS);
9342 }
9343 
9344 int
9345 ddi_cb_unregister(ddi_cb_handle_t hdl)
9346 {
9347 	ddi_cb_t	*cbp;
9348 	dev_info_t	*dip;
9349 
9350 	ASSERT(hdl != NULL);
9351 
9352 	/* Sanity check the context */
9353 	ASSERT(!servicing_interrupt());
9354 	if (servicing_interrupt())
9355 		return (DDI_FAILURE);
9356 
9357 	/* Validate parameters */
9358 	if ((hdl == NULL) || ((cbp = *(ddi_cb_t **)hdl) == NULL) ||
9359 	    ((dip = cbp->cb_dip) == NULL))
9360 		return (DDI_EINVAL);
9361 
9362 	/* If removing an IRM callback, notify IRM */
9363 	if (cbp->cb_flags & DDI_CB_FLAG_INTR)
9364 		i_ddi_irm_set_cb(dip, B_FALSE);
9365 
9366 	/* Destroy the callback */
9367 	kmem_free(cbp, sizeof (ddi_cb_t));
9368 	DEVI(dip)->devi_cb_p = NULL;
9369 
9370 	return (DDI_SUCCESS);
9371 }
9372