xref: /titanic_50/usr/src/uts/common/os/sunddi.c (revision 42516a0c6ebf6e259c2abcd1ca315fec43268f39)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/note.h>
28 #include <sys/types.h>
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/buf.h>
32 #include <sys/uio.h>
33 #include <sys/cred.h>
34 #include <sys/poll.h>
35 #include <sys/mman.h>
36 #include <sys/kmem.h>
37 #include <sys/model.h>
38 #include <sys/file.h>
39 #include <sys/proc.h>
40 #include <sys/open.h>
41 #include <sys/user.h>
42 #include <sys/t_lock.h>
43 #include <sys/vm.h>
44 #include <sys/stat.h>
45 #include <vm/hat.h>
46 #include <vm/seg.h>
47 #include <vm/seg_vn.h>
48 #include <vm/seg_dev.h>
49 #include <vm/as.h>
50 #include <sys/cmn_err.h>
51 #include <sys/cpuvar.h>
52 #include <sys/debug.h>
53 #include <sys/autoconf.h>
54 #include <sys/sunddi.h>
55 #include <sys/esunddi.h>
56 #include <sys/sunndi.h>
57 #include <sys/kstat.h>
58 #include <sys/conf.h>
59 #include <sys/ddi_impldefs.h>	/* include implementation structure defs */
60 #include <sys/ndi_impldefs.h>	/* include prototypes */
61 #include <sys/ddi_timer.h>
62 #include <sys/hwconf.h>
63 #include <sys/pathname.h>
64 #include <sys/modctl.h>
65 #include <sys/epm.h>
66 #include <sys/devctl.h>
67 #include <sys/callb.h>
68 #include <sys/cladm.h>
69 #include <sys/sysevent.h>
70 #include <sys/dacf_impl.h>
71 #include <sys/ddidevmap.h>
72 #include <sys/bootconf.h>
73 #include <sys/disp.h>
74 #include <sys/atomic.h>
75 #include <sys/promif.h>
76 #include <sys/instance.h>
77 #include <sys/sysevent/eventdefs.h>
78 #include <sys/task.h>
79 #include <sys/project.h>
80 #include <sys/taskq.h>
81 #include <sys/devpolicy.h>
82 #include <sys/ctype.h>
83 #include <net/if.h>
84 #include <sys/rctl.h>
85 
86 extern	pri_t	minclsyspri;
87 
88 extern	rctl_hndl_t rc_project_locked_mem;
89 extern	rctl_hndl_t rc_zone_locked_mem;
90 
91 #ifdef DEBUG
92 static int sunddi_debug = 0;
93 #endif /* DEBUG */
94 
95 /* ddi_umem_unlock miscellaneous */
96 
97 static	void	i_ddi_umem_unlock_thread_start(void);
98 
99 static	kmutex_t	ddi_umem_unlock_mutex; /* unlock list mutex */
100 static	kcondvar_t	ddi_umem_unlock_cv; /* unlock list block/unblock */
101 static	kthread_t	*ddi_umem_unlock_thread;
102 /*
103  * The ddi_umem_unlock FIFO list.  NULL head pointer indicates empty list.
104  */
105 static	struct	ddi_umem_cookie *ddi_umem_unlock_head = NULL;
106 static	struct	ddi_umem_cookie *ddi_umem_unlock_tail = NULL;
107 
108 
109 /*
110  * DDI(Sun) Function and flag definitions:
111  */
112 
113 #if defined(__x86)
114 /*
115  * Used to indicate which entries were chosen from a range.
116  */
117 char	*chosen_reg = "chosen-reg";
118 #endif
119 
120 /*
121  * Function used to ring system console bell
122  */
123 void (*ddi_console_bell_func)(clock_t duration);
124 
125 /*
126  * Creating register mappings and handling interrupts:
127  */
128 
129 /*
130  * Generic ddi_map: Call parent to fulfill request...
131  */
132 
133 int
134 ddi_map(dev_info_t *dp, ddi_map_req_t *mp, off_t offset,
135     off_t len, caddr_t *addrp)
136 {
137 	dev_info_t *pdip;
138 
139 	ASSERT(dp);
140 	pdip = (dev_info_t *)DEVI(dp)->devi_parent;
141 	return ((DEVI(pdip)->devi_ops->devo_bus_ops->bus_map)(pdip,
142 	    dp, mp, offset, len, addrp));
143 }
144 
145 /*
146  * ddi_apply_range: (Called by nexi only.)
147  * Apply ranges in parent node dp, to child regspec rp...
148  */
149 
150 int
151 ddi_apply_range(dev_info_t *dp, dev_info_t *rdip, struct regspec *rp)
152 {
153 	return (i_ddi_apply_range(dp, rdip, rp));
154 }
155 
156 int
157 ddi_map_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset,
158     off_t len)
159 {
160 	ddi_map_req_t mr;
161 #if defined(__x86)
162 	struct {
163 		int	bus;
164 		int	addr;
165 		int	size;
166 	} reg, *reglist;
167 	uint_t	length;
168 	int	rc;
169 
170 	/*
171 	 * get the 'registers' or the 'reg' property.
172 	 * We look up the reg property as an array of
173 	 * int's.
174 	 */
175 	rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
176 	    DDI_PROP_DONTPASS, "registers", (int **)&reglist, &length);
177 	if (rc != DDI_PROP_SUCCESS)
178 		rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
179 		    DDI_PROP_DONTPASS, "reg", (int **)&reglist, &length);
180 	if (rc == DDI_PROP_SUCCESS) {
181 		/*
182 		 * point to the required entry.
183 		 */
184 		reg = reglist[rnumber];
185 		reg.addr += offset;
186 		if (len != 0)
187 			reg.size = len;
188 		/*
189 		 * make a new property containing ONLY the required tuple.
190 		 */
191 		if (ddi_prop_update_int_array(DDI_DEV_T_NONE, dip,
192 		    chosen_reg, (int *)&reg, (sizeof (reg)/sizeof (int)))
193 		    != DDI_PROP_SUCCESS) {
194 			cmn_err(CE_WARN, "%s%d: cannot create '%s' "
195 			    "property", DEVI(dip)->devi_name,
196 			    DEVI(dip)->devi_instance, chosen_reg);
197 		}
198 		/*
199 		 * free the memory allocated by
200 		 * ddi_prop_lookup_int_array ().
201 		 */
202 		ddi_prop_free((void *)reglist);
203 	}
204 #endif
205 	mr.map_op = DDI_MO_MAP_LOCKED;
206 	mr.map_type = DDI_MT_RNUMBER;
207 	mr.map_obj.rnumber = rnumber;
208 	mr.map_prot = PROT_READ | PROT_WRITE;
209 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
210 	mr.map_handlep = NULL;
211 	mr.map_vers = DDI_MAP_VERSION;
212 
213 	/*
214 	 * Call my parent to map in my regs.
215 	 */
216 
217 	return (ddi_map(dip, &mr, offset, len, kaddrp));
218 }
219 
220 void
221 ddi_unmap_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset,
222     off_t len)
223 {
224 	ddi_map_req_t mr;
225 
226 	mr.map_op = DDI_MO_UNMAP;
227 	mr.map_type = DDI_MT_RNUMBER;
228 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
229 	mr.map_prot = PROT_READ | PROT_WRITE;	/* who cares? */
230 	mr.map_obj.rnumber = rnumber;
231 	mr.map_handlep = NULL;
232 	mr.map_vers = DDI_MAP_VERSION;
233 
234 	/*
235 	 * Call my parent to unmap my regs.
236 	 */
237 
238 	(void) ddi_map(dip, &mr, offset, len, kaddrp);
239 	*kaddrp = (caddr_t)0;
240 #if defined(__x86)
241 	(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, chosen_reg);
242 #endif
243 }
244 
245 int
246 ddi_bus_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
247 	off_t offset, off_t len, caddr_t *vaddrp)
248 {
249 	return (i_ddi_bus_map(dip, rdip, mp, offset, len, vaddrp));
250 }
251 
252 /*
253  * nullbusmap:	The/DDI default bus_map entry point for nexi
254  *		not conforming to the reg/range paradigm (i.e. scsi, etc.)
255  *		with no HAT/MMU layer to be programmed at this level.
256  *
257  *		If the call is to map by rnumber, return an error,
258  *		otherwise pass anything else up the tree to my parent.
259  */
260 int
261 nullbusmap(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
262 	off_t offset, off_t len, caddr_t *vaddrp)
263 {
264 	_NOTE(ARGUNUSED(rdip))
265 	if (mp->map_type == DDI_MT_RNUMBER)
266 		return (DDI_ME_UNSUPPORTED);
267 
268 	return (ddi_map(dip, mp, offset, len, vaddrp));
269 }
270 
271 /*
272  * ddi_rnumber_to_regspec: Not for use by leaf drivers.
273  *			   Only for use by nexi using the reg/range paradigm.
274  */
275 struct regspec *
276 ddi_rnumber_to_regspec(dev_info_t *dip, int rnumber)
277 {
278 	return (i_ddi_rnumber_to_regspec(dip, rnumber));
279 }
280 
281 
282 /*
283  * Note that we allow the dip to be nil because we may be called
284  * prior even to the instantiation of the devinfo tree itself - all
285  * regular leaf and nexus drivers should always use a non-nil dip!
286  *
287  * We treat peek in a somewhat cavalier fashion .. assuming that we'll
288  * simply get a synchronous fault as soon as we touch a missing address.
289  *
290  * Poke is rather more carefully handled because we might poke to a write
291  * buffer, "succeed", then only find some time later that we got an
292  * asynchronous fault that indicated that the address we were writing to
293  * was not really backed by hardware.
294  */
295 
296 static int
297 i_ddi_peekpoke(dev_info_t *devi, ddi_ctl_enum_t cmd, size_t size,
298     void *addr, void *value_p)
299 {
300 	union {
301 		uint64_t	u64;
302 		uint32_t	u32;
303 		uint16_t	u16;
304 		uint8_t		u8;
305 	} peekpoke_value;
306 
307 	peekpoke_ctlops_t peekpoke_args;
308 	uint64_t dummy_result;
309 	int rval;
310 
311 	/* Note: size is assumed to be correct;  it is not checked. */
312 	peekpoke_args.size = size;
313 	peekpoke_args.dev_addr = (uintptr_t)addr;
314 	peekpoke_args.handle = NULL;
315 	peekpoke_args.repcount = 1;
316 	peekpoke_args.flags = 0;
317 
318 	if (cmd == DDI_CTLOPS_POKE) {
319 		switch (size) {
320 		case sizeof (uint8_t):
321 			peekpoke_value.u8 = *(uint8_t *)value_p;
322 			break;
323 		case sizeof (uint16_t):
324 			peekpoke_value.u16 = *(uint16_t *)value_p;
325 			break;
326 		case sizeof (uint32_t):
327 			peekpoke_value.u32 = *(uint32_t *)value_p;
328 			break;
329 		case sizeof (uint64_t):
330 			peekpoke_value.u64 = *(uint64_t *)value_p;
331 			break;
332 		}
333 	}
334 
335 	peekpoke_args.host_addr = (uintptr_t)&peekpoke_value.u64;
336 
337 	if (devi != NULL)
338 		rval = ddi_ctlops(devi, devi, cmd, &peekpoke_args,
339 		    &dummy_result);
340 	else
341 		rval = peekpoke_mem(cmd, &peekpoke_args);
342 
343 	/*
344 	 * A NULL value_p is permitted by ddi_peek(9F); discard the result.
345 	 */
346 	if ((cmd == DDI_CTLOPS_PEEK) & (value_p != NULL)) {
347 		switch (size) {
348 		case sizeof (uint8_t):
349 			*(uint8_t *)value_p = peekpoke_value.u8;
350 			break;
351 		case sizeof (uint16_t):
352 			*(uint16_t *)value_p = peekpoke_value.u16;
353 			break;
354 		case sizeof (uint32_t):
355 			*(uint32_t *)value_p = peekpoke_value.u32;
356 			break;
357 		case sizeof (uint64_t):
358 			*(uint64_t *)value_p = peekpoke_value.u64;
359 			break;
360 		}
361 	}
362 
363 	return (rval);
364 }
365 
366 /*
367  * Keep ddi_peek() and ddi_poke() in case 3rd parties are calling this.
368  * they shouldn't be, but the 9f manpage kind of pseudo exposes it.
369  */
370 int
371 ddi_peek(dev_info_t *devi, size_t size, void *addr, void *value_p)
372 {
373 	switch (size) {
374 	case sizeof (uint8_t):
375 	case sizeof (uint16_t):
376 	case sizeof (uint32_t):
377 	case sizeof (uint64_t):
378 		break;
379 	default:
380 		return (DDI_FAILURE);
381 	}
382 
383 	return (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, size, addr, value_p));
384 }
385 
386 int
387 ddi_poke(dev_info_t *devi, size_t size, void *addr, void *value_p)
388 {
389 	switch (size) {
390 	case sizeof (uint8_t):
391 	case sizeof (uint16_t):
392 	case sizeof (uint32_t):
393 	case sizeof (uint64_t):
394 		break;
395 	default:
396 		return (DDI_FAILURE);
397 	}
398 
399 	return (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, size, addr, value_p));
400 }
401 
402 int
403 ddi_peek8(dev_info_t *dip, int8_t *addr, int8_t *val_p)
404 {
405 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
406 	    val_p));
407 }
408 
409 int
410 ddi_peek16(dev_info_t *dip, int16_t *addr, int16_t *val_p)
411 {
412 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
413 	    val_p));
414 }
415 
416 int
417 ddi_peek32(dev_info_t *dip, int32_t *addr, int32_t *val_p)
418 {
419 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
420 	    val_p));
421 }
422 
423 int
424 ddi_peek64(dev_info_t *dip, int64_t *addr, int64_t *val_p)
425 {
426 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
427 	    val_p));
428 }
429 
430 
431 /*
432  * We need to separate the old interfaces from the new ones and leave them
433  * in here for a while. Previous versions of the OS defined the new interfaces
434  * to the old interfaces. This way we can fix things up so that we can
435  * eventually remove these interfaces.
436  * e.g. A 3rd party module/driver using ddi_peek8 and built against S10
437  * or earlier will actually have a reference to ddi_peekc in the binary.
438  */
439 #ifdef _ILP32
440 int
441 ddi_peekc(dev_info_t *dip, int8_t *addr, int8_t *val_p)
442 {
443 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
444 	    val_p));
445 }
446 
447 int
448 ddi_peeks(dev_info_t *dip, int16_t *addr, int16_t *val_p)
449 {
450 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
451 	    val_p));
452 }
453 
454 int
455 ddi_peekl(dev_info_t *dip, int32_t *addr, int32_t *val_p)
456 {
457 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
458 	    val_p));
459 }
460 
461 int
462 ddi_peekd(dev_info_t *dip, int64_t *addr, int64_t *val_p)
463 {
464 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
465 	    val_p));
466 }
467 #endif /* _ILP32 */
468 
469 int
470 ddi_poke8(dev_info_t *dip, int8_t *addr, int8_t val)
471 {
472 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
473 }
474 
475 int
476 ddi_poke16(dev_info_t *dip, int16_t *addr, int16_t val)
477 {
478 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
479 }
480 
481 int
482 ddi_poke32(dev_info_t *dip, int32_t *addr, int32_t val)
483 {
484 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
485 }
486 
487 int
488 ddi_poke64(dev_info_t *dip, int64_t *addr, int64_t val)
489 {
490 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
491 }
492 
493 /*
494  * We need to separate the old interfaces from the new ones and leave them
495  * in here for a while. Previous versions of the OS defined the new interfaces
496  * to the old interfaces. This way we can fix things up so that we can
497  * eventually remove these interfaces.
498  * e.g. A 3rd party module/driver using ddi_poke8 and built against S10
499  * or earlier will actually have a reference to ddi_pokec in the binary.
500  */
501 #ifdef _ILP32
502 int
503 ddi_pokec(dev_info_t *dip, int8_t *addr, int8_t val)
504 {
505 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
506 }
507 
508 int
509 ddi_pokes(dev_info_t *dip, int16_t *addr, int16_t val)
510 {
511 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
512 }
513 
514 int
515 ddi_pokel(dev_info_t *dip, int32_t *addr, int32_t val)
516 {
517 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
518 }
519 
520 int
521 ddi_poked(dev_info_t *dip, int64_t *addr, int64_t val)
522 {
523 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
524 }
525 #endif /* _ILP32 */
526 
527 /*
528  * ddi_peekpokeio() is used primarily by the mem drivers for moving
529  * data to and from uio structures via peek and poke.  Note that we
530  * use "internal" routines ddi_peek and ddi_poke to make this go
531  * slightly faster, avoiding the call overhead ..
532  */
533 int
534 ddi_peekpokeio(dev_info_t *devi, struct uio *uio, enum uio_rw rw,
535     caddr_t addr, size_t len, uint_t xfersize)
536 {
537 	int64_t	ibuffer;
538 	int8_t w8;
539 	size_t sz;
540 	int o;
541 
542 	if (xfersize > sizeof (long))
543 		xfersize = sizeof (long);
544 
545 	while (len != 0) {
546 		if ((len | (uintptr_t)addr) & 1) {
547 			sz = sizeof (int8_t);
548 			if (rw == UIO_WRITE) {
549 				if ((o = uwritec(uio)) == -1)
550 					return (DDI_FAILURE);
551 				if (ddi_poke8(devi, (int8_t *)addr,
552 				    (int8_t)o) != DDI_SUCCESS)
553 					return (DDI_FAILURE);
554 			} else {
555 				if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz,
556 				    (int8_t *)addr, &w8) != DDI_SUCCESS)
557 					return (DDI_FAILURE);
558 				if (ureadc(w8, uio))
559 					return (DDI_FAILURE);
560 			}
561 		} else {
562 			switch (xfersize) {
563 			case sizeof (int64_t):
564 				if (((len | (uintptr_t)addr) &
565 				    (sizeof (int64_t) - 1)) == 0) {
566 					sz = xfersize;
567 					break;
568 				}
569 				/*FALLTHROUGH*/
570 			case sizeof (int32_t):
571 				if (((len | (uintptr_t)addr) &
572 				    (sizeof (int32_t) - 1)) == 0) {
573 					sz = xfersize;
574 					break;
575 				}
576 				/*FALLTHROUGH*/
577 			default:
578 				/*
579 				 * This still assumes that we might have an
580 				 * I/O bus out there that permits 16-bit
581 				 * transfers (and that it would be upset by
582 				 * 32-bit transfers from such locations).
583 				 */
584 				sz = sizeof (int16_t);
585 				break;
586 			}
587 
588 			if (rw == UIO_READ) {
589 				if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz,
590 				    addr, &ibuffer) != DDI_SUCCESS)
591 					return (DDI_FAILURE);
592 			}
593 
594 			if (uiomove(&ibuffer, sz, rw, uio))
595 				return (DDI_FAILURE);
596 
597 			if (rw == UIO_WRITE) {
598 				if (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, sz,
599 				    addr, &ibuffer) != DDI_SUCCESS)
600 					return (DDI_FAILURE);
601 			}
602 		}
603 		addr += sz;
604 		len -= sz;
605 	}
606 	return (DDI_SUCCESS);
607 }
608 
609 /*
610  * These routines are used by drivers that do layered ioctls
611  * On sparc, they're implemented in assembler to avoid spilling
612  * register windows in the common (copyin) case ..
613  */
614 #if !defined(__sparc)
615 int
616 ddi_copyin(const void *buf, void *kernbuf, size_t size, int flags)
617 {
618 	if (flags & FKIOCTL)
619 		return (kcopy(buf, kernbuf, size) ? -1 : 0);
620 	return (copyin(buf, kernbuf, size));
621 }
622 
623 int
624 ddi_copyout(const void *buf, void *kernbuf, size_t size, int flags)
625 {
626 	if (flags & FKIOCTL)
627 		return (kcopy(buf, kernbuf, size) ? -1 : 0);
628 	return (copyout(buf, kernbuf, size));
629 }
630 #endif	/* !__sparc */
631 
632 /*
633  * Conversions in nexus pagesize units.  We don't duplicate the
634  * 'nil dip' semantics of peek/poke because btopr/btop/ptob are DDI/DKI
635  * routines anyway.
636  */
637 unsigned long
638 ddi_btop(dev_info_t *dip, unsigned long bytes)
639 {
640 	unsigned long pages;
641 
642 	(void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOP, &bytes, &pages);
643 	return (pages);
644 }
645 
646 unsigned long
647 ddi_btopr(dev_info_t *dip, unsigned long bytes)
648 {
649 	unsigned long pages;
650 
651 	(void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOPR, &bytes, &pages);
652 	return (pages);
653 }
654 
655 unsigned long
656 ddi_ptob(dev_info_t *dip, unsigned long pages)
657 {
658 	unsigned long bytes;
659 
660 	(void) ddi_ctlops(dip, dip, DDI_CTLOPS_PTOB, &pages, &bytes);
661 	return (bytes);
662 }
663 
664 unsigned int
665 ddi_enter_critical(void)
666 {
667 	return ((uint_t)spl7());
668 }
669 
670 void
671 ddi_exit_critical(unsigned int spl)
672 {
673 	splx((int)spl);
674 }
675 
676 /*
677  * Nexus ctlops punter
678  */
679 
680 #if !defined(__sparc)
681 /*
682  * Request bus_ctl parent to handle a bus_ctl request
683  *
684  * (The sparc version is in sparc_ddi.s)
685  */
686 int
687 ddi_ctlops(dev_info_t *d, dev_info_t *r, ddi_ctl_enum_t op, void *a, void *v)
688 {
689 	int (*fp)();
690 
691 	if (!d || !r)
692 		return (DDI_FAILURE);
693 
694 	if ((d = (dev_info_t *)DEVI(d)->devi_bus_ctl) == NULL)
695 		return (DDI_FAILURE);
696 
697 	fp = DEVI(d)->devi_ops->devo_bus_ops->bus_ctl;
698 	return ((*fp)(d, r, op, a, v));
699 }
700 
701 #endif
702 
703 /*
704  * DMA/DVMA setup
705  */
706 
707 #if defined(__sparc)
708 static ddi_dma_lim_t standard_limits = {
709 	(uint_t)0,	/* addr_t dlim_addr_lo */
710 	(uint_t)-1,	/* addr_t dlim_addr_hi */
711 	(uint_t)-1,	/* uint_t dlim_cntr_max */
712 	(uint_t)1,	/* uint_t dlim_burstsizes */
713 	(uint_t)1,	/* uint_t dlim_minxfer */
714 	0		/* uint_t dlim_dmaspeed */
715 };
716 #elif defined(__x86)
717 static ddi_dma_lim_t standard_limits = {
718 	(uint_t)0,		/* addr_t dlim_addr_lo */
719 	(uint_t)0xffffff,	/* addr_t dlim_addr_hi */
720 	(uint_t)0,		/* uint_t dlim_cntr_max */
721 	(uint_t)0x00000001,	/* uint_t dlim_burstsizes */
722 	(uint_t)DMA_UNIT_8,	/* uint_t dlim_minxfer */
723 	(uint_t)0,		/* uint_t dlim_dmaspeed */
724 	(uint_t)0x86<<24+0,	/* uint_t dlim_version */
725 	(uint_t)0xffff,		/* uint_t dlim_adreg_max */
726 	(uint_t)0xffff,		/* uint_t dlim_ctreg_max */
727 	(uint_t)512,		/* uint_t dlim_granular */
728 	(int)1,			/* int dlim_sgllen */
729 	(uint_t)0xffffffff	/* uint_t dlim_reqsizes */
730 };
731 
732 #endif
733 
734 int
735 ddi_dma_setup(dev_info_t *dip, struct ddi_dma_req *dmareqp,
736     ddi_dma_handle_t *handlep)
737 {
738 	int (*funcp)() = ddi_dma_map;
739 	struct bus_ops *bop;
740 #if defined(__sparc)
741 	auto ddi_dma_lim_t dma_lim;
742 
743 	if (dmareqp->dmar_limits == (ddi_dma_lim_t *)0) {
744 		dma_lim = standard_limits;
745 	} else {
746 		dma_lim = *dmareqp->dmar_limits;
747 	}
748 	dmareqp->dmar_limits = &dma_lim;
749 #endif
750 #if defined(__x86)
751 	if (dmareqp->dmar_limits == (ddi_dma_lim_t *)0)
752 		return (DDI_FAILURE);
753 #endif
754 
755 	/*
756 	 * Handle the case that the requester is both a leaf
757 	 * and a nexus driver simultaneously by calling the
758 	 * requester's bus_dma_map function directly instead
759 	 * of ddi_dma_map.
760 	 */
761 	bop = DEVI(dip)->devi_ops->devo_bus_ops;
762 	if (bop && bop->bus_dma_map)
763 		funcp = bop->bus_dma_map;
764 	return ((*funcp)(dip, dip, dmareqp, handlep));
765 }
766 
767 int
768 ddi_dma_addr_setup(dev_info_t *dip, struct as *as, caddr_t addr, size_t len,
769     uint_t flags, int (*waitfp)(), caddr_t arg,
770     ddi_dma_lim_t *limits, ddi_dma_handle_t *handlep)
771 {
772 	int (*funcp)() = ddi_dma_map;
773 	ddi_dma_lim_t dma_lim;
774 	struct ddi_dma_req dmareq;
775 	struct bus_ops *bop;
776 
777 	if (len == 0) {
778 		return (DDI_DMA_NOMAPPING);
779 	}
780 	if (limits == (ddi_dma_lim_t *)0) {
781 		dma_lim = standard_limits;
782 	} else {
783 		dma_lim = *limits;
784 	}
785 	dmareq.dmar_limits = &dma_lim;
786 	dmareq.dmar_flags = flags;
787 	dmareq.dmar_fp = waitfp;
788 	dmareq.dmar_arg = arg;
789 	dmareq.dmar_object.dmao_size = len;
790 	dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR;
791 	dmareq.dmar_object.dmao_obj.virt_obj.v_as = as;
792 	dmareq.dmar_object.dmao_obj.virt_obj.v_addr = addr;
793 	dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
794 
795 	/*
796 	 * Handle the case that the requester is both a leaf
797 	 * and a nexus driver simultaneously by calling the
798 	 * requester's bus_dma_map function directly instead
799 	 * of ddi_dma_map.
800 	 */
801 	bop = DEVI(dip)->devi_ops->devo_bus_ops;
802 	if (bop && bop->bus_dma_map)
803 		funcp = bop->bus_dma_map;
804 
805 	return ((*funcp)(dip, dip, &dmareq, handlep));
806 }
807 
808 int
809 ddi_dma_buf_setup(dev_info_t *dip, struct buf *bp, uint_t flags,
810     int (*waitfp)(), caddr_t arg, ddi_dma_lim_t *limits,
811     ddi_dma_handle_t *handlep)
812 {
813 	int (*funcp)() = ddi_dma_map;
814 	ddi_dma_lim_t dma_lim;
815 	struct ddi_dma_req dmareq;
816 	struct bus_ops *bop;
817 
818 	if (limits == (ddi_dma_lim_t *)0) {
819 		dma_lim = standard_limits;
820 	} else {
821 		dma_lim = *limits;
822 	}
823 	dmareq.dmar_limits = &dma_lim;
824 	dmareq.dmar_flags = flags;
825 	dmareq.dmar_fp = waitfp;
826 	dmareq.dmar_arg = arg;
827 	dmareq.dmar_object.dmao_size = (uint_t)bp->b_bcount;
828 
829 	if (bp->b_flags & B_PAGEIO) {
830 		dmareq.dmar_object.dmao_type = DMA_OTYP_PAGES;
831 		dmareq.dmar_object.dmao_obj.pp_obj.pp_pp = bp->b_pages;
832 		dmareq.dmar_object.dmao_obj.pp_obj.pp_offset =
833 		    (uint_t)(((uintptr_t)bp->b_un.b_addr) & MMU_PAGEOFFSET);
834 	} else {
835 		dmareq.dmar_object.dmao_type = DMA_OTYP_BUFVADDR;
836 		dmareq.dmar_object.dmao_obj.virt_obj.v_addr = bp->b_un.b_addr;
837 		if (bp->b_flags & B_SHADOW) {
838 			dmareq.dmar_object.dmao_obj.virt_obj.v_priv =
839 			    bp->b_shadow;
840 		} else {
841 			dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
842 		}
843 
844 		/*
845 		 * If the buffer has no proc pointer, or the proc
846 		 * struct has the kernel address space, or the buffer has
847 		 * been marked B_REMAPPED (meaning that it is now
848 		 * mapped into the kernel's address space), then
849 		 * the address space is kas (kernel address space).
850 		 */
851 		if ((bp->b_proc == NULL) || (bp->b_proc->p_as == &kas) ||
852 		    (bp->b_flags & B_REMAPPED)) {
853 			dmareq.dmar_object.dmao_obj.virt_obj.v_as = 0;
854 		} else {
855 			dmareq.dmar_object.dmao_obj.virt_obj.v_as =
856 			    bp->b_proc->p_as;
857 		}
858 	}
859 
860 	/*
861 	 * Handle the case that the requester is both a leaf
862 	 * and a nexus driver simultaneously by calling the
863 	 * requester's bus_dma_map function directly instead
864 	 * of ddi_dma_map.
865 	 */
866 	bop = DEVI(dip)->devi_ops->devo_bus_ops;
867 	if (bop && bop->bus_dma_map)
868 		funcp = bop->bus_dma_map;
869 
870 	return ((*funcp)(dip, dip, &dmareq, handlep));
871 }
872 
873 #if !defined(__sparc)
874 /*
875  * Request bus_dma_ctl parent to fiddle with a dma request.
876  *
877  * (The sparc version is in sparc_subr.s)
878  */
879 int
880 ddi_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
881     ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
882     off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
883 {
884 	int (*fp)();
885 
886 	dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_ctl;
887 	fp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_ctl;
888 	return ((*fp) (dip, rdip, handle, request, offp, lenp, objp, flags));
889 }
890 #endif
891 
892 /*
893  * For all DMA control functions, call the DMA control
894  * routine and return status.
895  *
896  * Just plain assume that the parent is to be called.
897  * If a nexus driver or a thread outside the framework
898  * of a nexus driver or a leaf driver calls these functions,
899  * it is up to them to deal with the fact that the parent's
900  * bus_dma_ctl function will be the first one called.
901  */
902 
903 #define	HD	((ddi_dma_impl_t *)h)->dmai_rdip
904 
905 int
906 ddi_dma_kvaddrp(ddi_dma_handle_t h, off_t off, size_t len, caddr_t *kp)
907 {
908 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_KVADDR, &off, &len, kp, 0));
909 }
910 
911 int
912 ddi_dma_htoc(ddi_dma_handle_t h, off_t o, ddi_dma_cookie_t *c)
913 {
914 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_HTOC, &o, 0, (caddr_t *)c, 0));
915 }
916 
917 int
918 ddi_dma_coff(ddi_dma_handle_t h, ddi_dma_cookie_t *c, off_t *o)
919 {
920 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_COFF,
921 	    (off_t *)c, 0, (caddr_t *)o, 0));
922 }
923 
924 int
925 ddi_dma_movwin(ddi_dma_handle_t h, off_t *o, size_t *l, ddi_dma_cookie_t *c)
926 {
927 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_MOVWIN, o,
928 	    l, (caddr_t *)c, 0));
929 }
930 
931 int
932 ddi_dma_curwin(ddi_dma_handle_t h, off_t *o, size_t *l)
933 {
934 	if ((((ddi_dma_impl_t *)h)->dmai_rflags & DDI_DMA_PARTIAL) == 0)
935 		return (DDI_FAILURE);
936 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_REPWIN, o, l, 0, 0));
937 }
938 
939 int
940 ddi_dma_nextwin(ddi_dma_handle_t h, ddi_dma_win_t win,
941     ddi_dma_win_t *nwin)
942 {
943 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_NEXTWIN, (off_t *)&win, 0,
944 	    (caddr_t *)nwin, 0));
945 }
946 
947 int
948 ddi_dma_nextseg(ddi_dma_win_t win, ddi_dma_seg_t seg, ddi_dma_seg_t *nseg)
949 {
950 	ddi_dma_handle_t h = (ddi_dma_handle_t)win;
951 
952 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_NEXTSEG, (off_t *)&win,
953 	    (size_t *)&seg, (caddr_t *)nseg, 0));
954 }
955 
956 #if (defined(__i386) && !defined(__amd64)) || defined(__sparc)
957 /*
958  * This routine is Obsolete and should be removed from ALL architectures
959  * in a future release of Solaris.
960  *
961  * It is deliberately NOT ported to amd64; please fix the code that
962  * depends on this routine to use ddi_dma_nextcookie(9F).
963  *
964  * NOTE: even though we fixed the pointer through a 32-bit param issue (the fix
965  * is a side effect to some other cleanup), we're still not going to support
966  * this interface on x64.
967  */
968 int
969 ddi_dma_segtocookie(ddi_dma_seg_t seg, off_t *o, off_t *l,
970     ddi_dma_cookie_t *cookiep)
971 {
972 	ddi_dma_handle_t h = (ddi_dma_handle_t)seg;
973 
974 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_SEGTOC, o, (size_t *)l,
975 	    (caddr_t *)cookiep, 0));
976 }
977 #endif	/* (__i386 && !__amd64) || __sparc */
978 
979 #if !defined(__sparc)
980 
981 /*
982  * The SPARC versions of these routines are done in assembler to
983  * save register windows, so they're in sparc_subr.s.
984  */
985 
986 int
987 ddi_dma_map(dev_info_t *dip, dev_info_t *rdip,
988 	struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep)
989 {
990 	dev_info_t	*hdip;
991 	int (*funcp)(dev_info_t *, dev_info_t *, struct ddi_dma_req *,
992 	    ddi_dma_handle_t *);
993 
994 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_map;
995 
996 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_map;
997 	return ((*funcp)(hdip, rdip, dmareqp, handlep));
998 }
999 
1000 int
1001 ddi_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
1002     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
1003 {
1004 	dev_info_t	*hdip;
1005 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_attr_t *,
1006 	    int (*)(caddr_t), caddr_t, ddi_dma_handle_t *);
1007 
1008 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl;
1009 
1010 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_allochdl;
1011 	return ((*funcp)(hdip, rdip, attr, waitfp, arg, handlep));
1012 }
1013 
1014 int
1015 ddi_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handlep)
1016 {
1017 	dev_info_t	*hdip;
1018 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
1019 
1020 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl;
1021 
1022 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_freehdl;
1023 	return ((*funcp)(hdip, rdip, handlep));
1024 }
1025 
1026 int
1027 ddi_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
1028     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
1029     ddi_dma_cookie_t *cp, uint_t *ccountp)
1030 {
1031 	dev_info_t	*hdip;
1032 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
1033 	    struct ddi_dma_req *, ddi_dma_cookie_t *, uint_t *);
1034 
1035 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
1036 
1037 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_bindhdl;
1038 	return ((*funcp)(hdip, rdip, handle, dmareq, cp, ccountp));
1039 }
1040 
1041 int
1042 ddi_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
1043     ddi_dma_handle_t handle)
1044 {
1045 	dev_info_t	*hdip;
1046 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
1047 
1048 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl;
1049 
1050 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_unbindhdl;
1051 	return ((*funcp)(hdip, rdip, handle));
1052 }
1053 
1054 
1055 int
1056 ddi_dma_flush(dev_info_t *dip, dev_info_t *rdip,
1057     ddi_dma_handle_t handle, off_t off, size_t len,
1058     uint_t cache_flags)
1059 {
1060 	dev_info_t	*hdip;
1061 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
1062 	    off_t, size_t, uint_t);
1063 
1064 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush;
1065 
1066 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_flush;
1067 	return ((*funcp)(hdip, rdip, handle, off, len, cache_flags));
1068 }
1069 
1070 int
1071 ddi_dma_win(dev_info_t *dip, dev_info_t *rdip,
1072     ddi_dma_handle_t handle, uint_t win, off_t *offp,
1073     size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
1074 {
1075 	dev_info_t	*hdip;
1076 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
1077 	    uint_t, off_t *, size_t *, ddi_dma_cookie_t *, uint_t *);
1078 
1079 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_win;
1080 
1081 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_win;
1082 	return ((*funcp)(hdip, rdip, handle, win, offp, lenp,
1083 	    cookiep, ccountp));
1084 }
1085 
1086 int
1087 ddi_dma_sync(ddi_dma_handle_t h, off_t o, size_t l, uint_t whom)
1088 {
1089 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h;
1090 	dev_info_t *hdip, *dip;
1091 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t, off_t,
1092 	    size_t, uint_t);
1093 
1094 	/*
1095 	 * the DMA nexus driver will set DMP_NOSYNC if the
1096 	 * platform does not require any sync operation. For
1097 	 * example if the memory is uncached or consistent
1098 	 * and without any I/O write buffers involved.
1099 	 */
1100 	if ((hp->dmai_rflags & DMP_NOSYNC) == DMP_NOSYNC)
1101 		return (DDI_SUCCESS);
1102 
1103 	dip = hp->dmai_rdip;
1104 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush;
1105 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_flush;
1106 	return ((*funcp)(hdip, dip, h, o, l, whom));
1107 }
1108 
1109 int
1110 ddi_dma_unbind_handle(ddi_dma_handle_t h)
1111 {
1112 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h;
1113 	dev_info_t *hdip, *dip;
1114 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
1115 
1116 	dip = hp->dmai_rdip;
1117 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl;
1118 	funcp = DEVI(dip)->devi_bus_dma_unbindfunc;
1119 	return ((*funcp)(hdip, dip, h));
1120 }
1121 
1122 #endif	/* !__sparc */
1123 
1124 int
1125 ddi_dma_free(ddi_dma_handle_t h)
1126 {
1127 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_FREE, 0, 0, 0, 0));
1128 }
1129 
1130 int
1131 ddi_iopb_alloc(dev_info_t *dip, ddi_dma_lim_t *limp, uint_t len, caddr_t *iopbp)
1132 {
1133 	ddi_dma_lim_t defalt;
1134 	size_t size = len;
1135 
1136 	if (!limp) {
1137 		defalt = standard_limits;
1138 		limp = &defalt;
1139 	}
1140 	return (i_ddi_mem_alloc_lim(dip, limp, size, 0, 0, 0,
1141 	    iopbp, NULL, NULL));
1142 }
1143 
1144 void
1145 ddi_iopb_free(caddr_t iopb)
1146 {
1147 	i_ddi_mem_free(iopb, NULL);
1148 }
1149 
1150 int
1151 ddi_mem_alloc(dev_info_t *dip, ddi_dma_lim_t *limits, uint_t length,
1152 	uint_t flags, caddr_t *kaddrp, uint_t *real_length)
1153 {
1154 	ddi_dma_lim_t defalt;
1155 	size_t size = length;
1156 
1157 	if (!limits) {
1158 		defalt = standard_limits;
1159 		limits = &defalt;
1160 	}
1161 	return (i_ddi_mem_alloc_lim(dip, limits, size, flags & 0x1,
1162 	    1, 0, kaddrp, real_length, NULL));
1163 }
1164 
1165 void
1166 ddi_mem_free(caddr_t kaddr)
1167 {
1168 	i_ddi_mem_free(kaddr, NULL);
1169 }
1170 
1171 /*
1172  * DMA attributes, alignment, burst sizes, and transfer minimums
1173  */
1174 int
1175 ddi_dma_get_attr(ddi_dma_handle_t handle, ddi_dma_attr_t *attrp)
1176 {
1177 	ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle;
1178 
1179 	if (attrp == NULL)
1180 		return (DDI_FAILURE);
1181 	*attrp = dimp->dmai_attr;
1182 	return (DDI_SUCCESS);
1183 }
1184 
1185 int
1186 ddi_dma_burstsizes(ddi_dma_handle_t handle)
1187 {
1188 	ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle;
1189 
1190 	if (!dimp)
1191 		return (0);
1192 	else
1193 		return (dimp->dmai_burstsizes);
1194 }
1195 
1196 int
1197 ddi_dma_devalign(ddi_dma_handle_t handle, uint_t *alignment, uint_t *mineffect)
1198 {
1199 	ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle;
1200 
1201 	if (!dimp || !alignment || !mineffect)
1202 		return (DDI_FAILURE);
1203 	if (!(dimp->dmai_rflags & DDI_DMA_SBUS_64BIT)) {
1204 		*alignment = 1 << ddi_ffs(dimp->dmai_burstsizes);
1205 	} else {
1206 		if (dimp->dmai_burstsizes & 0xff0000) {
1207 			*alignment = 1 << ddi_ffs(dimp->dmai_burstsizes >> 16);
1208 		} else {
1209 			*alignment = 1 << ddi_ffs(dimp->dmai_burstsizes);
1210 		}
1211 	}
1212 	*mineffect = dimp->dmai_minxfer;
1213 	return (DDI_SUCCESS);
1214 }
1215 
1216 int
1217 ddi_iomin(dev_info_t *a, int i, int stream)
1218 {
1219 	int r;
1220 
1221 	/*
1222 	 * Make sure that the initial value is sane
1223 	 */
1224 	if (i & (i - 1))
1225 		return (0);
1226 	if (i == 0)
1227 		i = (stream) ? 4 : 1;
1228 
1229 	r = ddi_ctlops(a, a,
1230 	    DDI_CTLOPS_IOMIN, (void *)(uintptr_t)stream, (void *)&i);
1231 	if (r != DDI_SUCCESS || (i & (i - 1)))
1232 		return (0);
1233 	return (i);
1234 }
1235 
1236 /*
1237  * Given two DMA attribute structures, apply the attributes
1238  * of one to the other, following the rules of attributes
1239  * and the wishes of the caller.
1240  *
1241  * The rules of DMA attribute structures are that you cannot
1242  * make things *less* restrictive as you apply one set
1243  * of attributes to another.
1244  *
1245  */
1246 void
1247 ddi_dma_attr_merge(ddi_dma_attr_t *attr, ddi_dma_attr_t *mod)
1248 {
1249 	attr->dma_attr_addr_lo =
1250 	    MAX(attr->dma_attr_addr_lo, mod->dma_attr_addr_lo);
1251 	attr->dma_attr_addr_hi =
1252 	    MIN(attr->dma_attr_addr_hi, mod->dma_attr_addr_hi);
1253 	attr->dma_attr_count_max =
1254 	    MIN(attr->dma_attr_count_max, mod->dma_attr_count_max);
1255 	attr->dma_attr_align =
1256 	    MAX(attr->dma_attr_align,  mod->dma_attr_align);
1257 	attr->dma_attr_burstsizes =
1258 	    (uint_t)(attr->dma_attr_burstsizes & mod->dma_attr_burstsizes);
1259 	attr->dma_attr_minxfer =
1260 	    maxbit(attr->dma_attr_minxfer, mod->dma_attr_minxfer);
1261 	attr->dma_attr_maxxfer =
1262 	    MIN(attr->dma_attr_maxxfer, mod->dma_attr_maxxfer);
1263 	attr->dma_attr_seg = MIN(attr->dma_attr_seg, mod->dma_attr_seg);
1264 	attr->dma_attr_sgllen = MIN((uint_t)attr->dma_attr_sgllen,
1265 	    (uint_t)mod->dma_attr_sgllen);
1266 	attr->dma_attr_granular =
1267 	    MAX(attr->dma_attr_granular, mod->dma_attr_granular);
1268 }
1269 
1270 /*
1271  * mmap/segmap interface:
1272  */
1273 
1274 /*
1275  * ddi_segmap:		setup the default segment driver. Calls the drivers
1276  *			XXmmap routine to validate the range to be mapped.
1277  *			Return ENXIO of the range is not valid.  Create
1278  *			a seg_dev segment that contains all of the
1279  *			necessary information and will reference the
1280  *			default segment driver routines. It returns zero
1281  *			on success or non-zero on failure.
1282  */
1283 int
1284 ddi_segmap(dev_t dev, off_t offset, struct as *asp, caddr_t *addrp, off_t len,
1285     uint_t prot, uint_t maxprot, uint_t flags, cred_t *credp)
1286 {
1287 	extern int spec_segmap(dev_t, off_t, struct as *, caddr_t *,
1288 	    off_t, uint_t, uint_t, uint_t, struct cred *);
1289 
1290 	return (spec_segmap(dev, offset, asp, addrp, len,
1291 	    prot, maxprot, flags, credp));
1292 }
1293 
1294 /*
1295  * ddi_map_fault:	Resolve mappings at fault time.  Used by segment
1296  *			drivers. Allows each successive parent to resolve
1297  *			address translations and add its mappings to the
1298  *			mapping list supplied in the page structure. It
1299  *			returns zero on success	or non-zero on failure.
1300  */
1301 
1302 int
1303 ddi_map_fault(dev_info_t *dip, struct hat *hat, struct seg *seg,
1304     caddr_t addr, struct devpage *dp, pfn_t pfn, uint_t prot, uint_t lock)
1305 {
1306 	return (i_ddi_map_fault(dip, dip, hat, seg, addr, dp, pfn, prot, lock));
1307 }
1308 
1309 /*
1310  * ddi_device_mapping_check:	Called from ddi_segmap_setup.
1311  *	Invokes platform specific DDI to determine whether attributes specified
1312  *	in attr(9s) are	valid for the region of memory that will be made
1313  *	available for direct access to user process via the mmap(2) system call.
1314  */
1315 int
1316 ddi_device_mapping_check(dev_t dev, ddi_device_acc_attr_t *accattrp,
1317     uint_t rnumber, uint_t *hat_flags)
1318 {
1319 	ddi_acc_handle_t handle;
1320 	ddi_map_req_t mr;
1321 	ddi_acc_hdl_t *hp;
1322 	int result;
1323 	dev_info_t *dip;
1324 
1325 	/*
1326 	 * we use e_ddi_hold_devi_by_dev to search for the devi.  We
1327 	 * release it immediately since it should already be held by
1328 	 * a devfs vnode.
1329 	 */
1330 	if ((dip =
1331 	    e_ddi_hold_devi_by_dev(dev, E_DDI_HOLD_DEVI_NOATTACH)) == NULL)
1332 		return (-1);
1333 	ddi_release_devi(dip);		/* for e_ddi_hold_devi_by_dev() */
1334 
1335 	/*
1336 	 * Allocate and initialize the common elements of data
1337 	 * access handle.
1338 	 */
1339 	handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
1340 	if (handle == NULL)
1341 		return (-1);
1342 
1343 	hp = impl_acc_hdl_get(handle);
1344 	hp->ah_vers = VERS_ACCHDL;
1345 	hp->ah_dip = dip;
1346 	hp->ah_rnumber = rnumber;
1347 	hp->ah_offset = 0;
1348 	hp->ah_len = 0;
1349 	hp->ah_acc = *accattrp;
1350 
1351 	/*
1352 	 * Set up the mapping request and call to parent.
1353 	 */
1354 	mr.map_op = DDI_MO_MAP_HANDLE;
1355 	mr.map_type = DDI_MT_RNUMBER;
1356 	mr.map_obj.rnumber = rnumber;
1357 	mr.map_prot = PROT_READ | PROT_WRITE;
1358 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
1359 	mr.map_handlep = hp;
1360 	mr.map_vers = DDI_MAP_VERSION;
1361 	result = ddi_map(dip, &mr, 0, 0, NULL);
1362 
1363 	/*
1364 	 * Region must be mappable, pick up flags from the framework.
1365 	 */
1366 	*hat_flags = hp->ah_hat_flags;
1367 
1368 	impl_acc_hdl_free(handle);
1369 
1370 	/*
1371 	 * check for end result.
1372 	 */
1373 	if (result != DDI_SUCCESS)
1374 		return (-1);
1375 	return (0);
1376 }
1377 
1378 
1379 /*
1380  * Property functions:	 See also, ddipropdefs.h.
1381  *
1382  * These functions are the framework for the property functions,
1383  * i.e. they support software defined properties.  All implementation
1384  * specific property handling (i.e.: self-identifying devices and
1385  * PROM defined properties are handled in the implementation specific
1386  * functions (defined in ddi_implfuncs.h).
1387  */
1388 
1389 /*
1390  * nopropop:	Shouldn't be called, right?
1391  */
1392 int
1393 nopropop(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
1394     char *name, caddr_t valuep, int *lengthp)
1395 {
1396 	_NOTE(ARGUNUSED(dev, dip, prop_op, mod_flags, name, valuep, lengthp))
1397 	return (DDI_PROP_NOT_FOUND);
1398 }
1399 
1400 #ifdef	DDI_PROP_DEBUG
1401 int ddi_prop_debug_flag = 0;
1402 
1403 int
1404 ddi_prop_debug(int enable)
1405 {
1406 	int prev = ddi_prop_debug_flag;
1407 
1408 	if ((enable != 0) || (prev != 0))
1409 		printf("ddi_prop_debug: debugging %s\n",
1410 		    enable ? "enabled" : "disabled");
1411 	ddi_prop_debug_flag = enable;
1412 	return (prev);
1413 }
1414 
1415 #endif	/* DDI_PROP_DEBUG */
1416 
1417 /*
1418  * Search a property list for a match, if found return pointer
1419  * to matching prop struct, else return NULL.
1420  */
1421 
1422 ddi_prop_t *
1423 i_ddi_prop_search(dev_t dev, char *name, uint_t flags, ddi_prop_t **list_head)
1424 {
1425 	ddi_prop_t	*propp;
1426 
1427 	/*
1428 	 * find the property in child's devinfo:
1429 	 * Search order defined by this search function is first matching
1430 	 * property with input dev == DDI_DEV_T_ANY matching any dev or
1431 	 * dev == propp->prop_dev, name == propp->name, and the correct
1432 	 * data type as specified in the flags.  If a DDI_DEV_T_NONE dev
1433 	 * value made it this far then it implies a DDI_DEV_T_ANY search.
1434 	 */
1435 	if (dev == DDI_DEV_T_NONE)
1436 		dev = DDI_DEV_T_ANY;
1437 
1438 	for (propp = *list_head; propp != NULL; propp = propp->prop_next)  {
1439 
1440 		if (!DDI_STRSAME(propp->prop_name, name))
1441 			continue;
1442 
1443 		if ((dev != DDI_DEV_T_ANY) && (propp->prop_dev != dev))
1444 			continue;
1445 
1446 		if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0)
1447 			continue;
1448 
1449 		return (propp);
1450 	}
1451 
1452 	return ((ddi_prop_t *)0);
1453 }
1454 
1455 /*
1456  * Search for property within devnames structures
1457  */
1458 ddi_prop_t *
1459 i_ddi_search_global_prop(dev_t dev, char *name, uint_t flags)
1460 {
1461 	major_t		major;
1462 	struct devnames	*dnp;
1463 	ddi_prop_t	*propp;
1464 
1465 	/*
1466 	 * Valid dev_t value is needed to index into the
1467 	 * correct devnames entry, therefore a dev_t
1468 	 * value of DDI_DEV_T_ANY is not appropriate.
1469 	 */
1470 	ASSERT(dev != DDI_DEV_T_ANY);
1471 	if (dev == DDI_DEV_T_ANY) {
1472 		return ((ddi_prop_t *)0);
1473 	}
1474 
1475 	major = getmajor(dev);
1476 	dnp = &(devnamesp[major]);
1477 
1478 	if (dnp->dn_global_prop_ptr == NULL)
1479 		return ((ddi_prop_t *)0);
1480 
1481 	LOCK_DEV_OPS(&dnp->dn_lock);
1482 
1483 	for (propp = dnp->dn_global_prop_ptr->prop_list;
1484 	    propp != NULL;
1485 	    propp = (ddi_prop_t *)propp->prop_next) {
1486 
1487 		if (!DDI_STRSAME(propp->prop_name, name))
1488 			continue;
1489 
1490 		if ((!(flags & LDI_DEV_T_ANY)) && (propp->prop_dev != dev))
1491 			continue;
1492 
1493 		if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0)
1494 			continue;
1495 
1496 		/* Property found, return it */
1497 		UNLOCK_DEV_OPS(&dnp->dn_lock);
1498 		return (propp);
1499 	}
1500 
1501 	UNLOCK_DEV_OPS(&dnp->dn_lock);
1502 	return ((ddi_prop_t *)0);
1503 }
1504 
1505 static char prop_no_mem_msg[] = "can't allocate memory for ddi property <%s>";
1506 
1507 /*
1508  * ddi_prop_search_global:
1509  *	Search the global property list within devnames
1510  *	for the named property.  Return the encoded value.
1511  */
1512 static int
1513 i_ddi_prop_search_global(dev_t dev, uint_t flags, char *name,
1514     void *valuep, uint_t *lengthp)
1515 {
1516 	ddi_prop_t	*propp;
1517 	caddr_t		buffer;
1518 
1519 	propp =  i_ddi_search_global_prop(dev, name, flags);
1520 
1521 	/* Property NOT found, bail */
1522 	if (propp == (ddi_prop_t *)0)
1523 		return (DDI_PROP_NOT_FOUND);
1524 
1525 	if (propp->prop_flags & DDI_PROP_UNDEF_IT)
1526 		return (DDI_PROP_UNDEFINED);
1527 
1528 	if ((buffer = kmem_alloc(propp->prop_len,
1529 	    (flags & DDI_PROP_CANSLEEP) ? KM_SLEEP : KM_NOSLEEP)) == NULL) {
1530 		cmn_err(CE_CONT, prop_no_mem_msg, name);
1531 		return (DDI_PROP_NO_MEMORY);
1532 	}
1533 
1534 	/*
1535 	 * Return the encoded data
1536 	 */
1537 	*(caddr_t *)valuep = buffer;
1538 	*lengthp = propp->prop_len;
1539 	bcopy(propp->prop_val, buffer, propp->prop_len);
1540 
1541 	return (DDI_PROP_SUCCESS);
1542 }
1543 
1544 /*
1545  * ddi_prop_search_common:	Lookup and return the encoded value
1546  */
1547 int
1548 ddi_prop_search_common(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1549     uint_t flags, char *name, void *valuep, uint_t *lengthp)
1550 {
1551 	ddi_prop_t	*propp;
1552 	int		i;
1553 	caddr_t		buffer;
1554 	caddr_t		prealloc = NULL;
1555 	int		plength = 0;
1556 	dev_info_t	*pdip;
1557 	int		(*bop)();
1558 
1559 	/*CONSTANTCONDITION*/
1560 	while (1)  {
1561 
1562 		mutex_enter(&(DEVI(dip)->devi_lock));
1563 
1564 
1565 		/*
1566 		 * find the property in child's devinfo:
1567 		 * Search order is:
1568 		 *	1. driver defined properties
1569 		 *	2. system defined properties
1570 		 *	3. driver global properties
1571 		 *	4. boot defined properties
1572 		 */
1573 
1574 		propp = i_ddi_prop_search(dev, name, flags,
1575 		    &(DEVI(dip)->devi_drv_prop_ptr));
1576 		if (propp == NULL)  {
1577 			propp = i_ddi_prop_search(dev, name, flags,
1578 			    &(DEVI(dip)->devi_sys_prop_ptr));
1579 		}
1580 		if ((propp == NULL) && DEVI(dip)->devi_global_prop_list) {
1581 			propp = i_ddi_prop_search(dev, name, flags,
1582 			    &DEVI(dip)->devi_global_prop_list->prop_list);
1583 		}
1584 
1585 		if (propp == NULL)  {
1586 			propp = i_ddi_prop_search(dev, name, flags,
1587 			    &(DEVI(dip)->devi_hw_prop_ptr));
1588 		}
1589 
1590 		/*
1591 		 * Software property found?
1592 		 */
1593 		if (propp != (ddi_prop_t *)0)	{
1594 
1595 			/*
1596 			 * If explicit undefine, return now.
1597 			 */
1598 			if (propp->prop_flags & DDI_PROP_UNDEF_IT) {
1599 				mutex_exit(&(DEVI(dip)->devi_lock));
1600 				if (prealloc)
1601 					kmem_free(prealloc, plength);
1602 				return (DDI_PROP_UNDEFINED);
1603 			}
1604 
1605 			/*
1606 			 * If we only want to know if it exists, return now
1607 			 */
1608 			if (prop_op == PROP_EXISTS) {
1609 				mutex_exit(&(DEVI(dip)->devi_lock));
1610 				ASSERT(prealloc == NULL);
1611 				return (DDI_PROP_SUCCESS);
1612 			}
1613 
1614 			/*
1615 			 * If length only request or prop length == 0,
1616 			 * service request and return now.
1617 			 */
1618 			if ((prop_op == PROP_LEN) ||(propp->prop_len == 0)) {
1619 				*lengthp = propp->prop_len;
1620 
1621 				/*
1622 				 * if prop_op is PROP_LEN_AND_VAL_ALLOC
1623 				 * that means prop_len is 0, so set valuep
1624 				 * also to NULL
1625 				 */
1626 				if (prop_op == PROP_LEN_AND_VAL_ALLOC)
1627 					*(caddr_t *)valuep = NULL;
1628 
1629 				mutex_exit(&(DEVI(dip)->devi_lock));
1630 				if (prealloc)
1631 					kmem_free(prealloc, plength);
1632 				return (DDI_PROP_SUCCESS);
1633 			}
1634 
1635 			/*
1636 			 * If LEN_AND_VAL_ALLOC and the request can sleep,
1637 			 * drop the mutex, allocate the buffer, and go
1638 			 * through the loop again.  If we already allocated
1639 			 * the buffer, and the size of the property changed,
1640 			 * keep trying...
1641 			 */
1642 			if ((prop_op == PROP_LEN_AND_VAL_ALLOC) &&
1643 			    (flags & DDI_PROP_CANSLEEP))  {
1644 				if (prealloc && (propp->prop_len != plength)) {
1645 					kmem_free(prealloc, plength);
1646 					prealloc = NULL;
1647 				}
1648 				if (prealloc == NULL)  {
1649 					plength = propp->prop_len;
1650 					mutex_exit(&(DEVI(dip)->devi_lock));
1651 					prealloc = kmem_alloc(plength,
1652 					    KM_SLEEP);
1653 					continue;
1654 				}
1655 			}
1656 
1657 			/*
1658 			 * Allocate buffer, if required.  Either way,
1659 			 * set `buffer' variable.
1660 			 */
1661 			i = *lengthp;			/* Get callers length */
1662 			*lengthp = propp->prop_len;	/* Set callers length */
1663 
1664 			switch (prop_op) {
1665 
1666 			case PROP_LEN_AND_VAL_ALLOC:
1667 
1668 				if (prealloc == NULL) {
1669 					buffer = kmem_alloc(propp->prop_len,
1670 					    KM_NOSLEEP);
1671 				} else {
1672 					buffer = prealloc;
1673 				}
1674 
1675 				if (buffer == NULL)  {
1676 					mutex_exit(&(DEVI(dip)->devi_lock));
1677 					cmn_err(CE_CONT, prop_no_mem_msg, name);
1678 					return (DDI_PROP_NO_MEMORY);
1679 				}
1680 				/* Set callers buf ptr */
1681 				*(caddr_t *)valuep = buffer;
1682 				break;
1683 
1684 			case PROP_LEN_AND_VAL_BUF:
1685 
1686 				if (propp->prop_len > (i)) {
1687 					mutex_exit(&(DEVI(dip)->devi_lock));
1688 					return (DDI_PROP_BUF_TOO_SMALL);
1689 				}
1690 
1691 				buffer = valuep;  /* Get callers buf ptr */
1692 				break;
1693 
1694 			default:
1695 				break;
1696 			}
1697 
1698 			/*
1699 			 * Do the copy.
1700 			 */
1701 			bcopy(propp->prop_val, buffer, propp->prop_len);
1702 			mutex_exit(&(DEVI(dip)->devi_lock));
1703 			return (DDI_PROP_SUCCESS);
1704 		}
1705 
1706 		mutex_exit(&(DEVI(dip)->devi_lock));
1707 		if (prealloc)
1708 			kmem_free(prealloc, plength);
1709 		prealloc = NULL;
1710 
1711 		/*
1712 		 * Prop not found, call parent bus_ops to deal with possible
1713 		 * h/w layer (possible PROM defined props, etc.) and to
1714 		 * possibly ascend the hierarchy, if allowed by flags.
1715 		 */
1716 		pdip = (dev_info_t *)DEVI(dip)->devi_parent;
1717 
1718 		/*
1719 		 * One last call for the root driver PROM props?
1720 		 */
1721 		if (dip == ddi_root_node())  {
1722 			return (ddi_bus_prop_op(dev, dip, dip, prop_op,
1723 			    flags, name, valuep, (int *)lengthp));
1724 		}
1725 
1726 		/*
1727 		 * We may have been called to check for properties
1728 		 * within a single devinfo node that has no parent -
1729 		 * see make_prop()
1730 		 */
1731 		if (pdip == NULL) {
1732 			ASSERT((flags &
1733 			    (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM)) ==
1734 			    (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM));
1735 			return (DDI_PROP_NOT_FOUND);
1736 		}
1737 
1738 		/*
1739 		 * Instead of recursing, we do iterative calls up the tree.
1740 		 * As a bit of optimization, skip the bus_op level if the
1741 		 * node is a s/w node and if the parent's bus_prop_op function
1742 		 * is `ddi_bus_prop_op', because we know that in this case,
1743 		 * this function does nothing.
1744 		 *
1745 		 * 4225415: If the parent isn't attached, or the child
1746 		 * hasn't been named by the parent yet, use the default
1747 		 * ddi_bus_prop_op as a proxy for the parent.  This
1748 		 * allows property lookups in any child/parent state to
1749 		 * include 'prom' and inherited properties, even when
1750 		 * there are no drivers attached to the child or parent.
1751 		 */
1752 
1753 		bop = ddi_bus_prop_op;
1754 		if (i_ddi_devi_attached(pdip) &&
1755 		    (i_ddi_node_state(dip) >= DS_INITIALIZED))
1756 			bop = DEVI(pdip)->devi_ops->devo_bus_ops->bus_prop_op;
1757 
1758 		i = DDI_PROP_NOT_FOUND;
1759 
1760 		if ((bop != ddi_bus_prop_op) || ndi_dev_is_prom_node(dip)) {
1761 			i = (*bop)(dev, pdip, dip, prop_op,
1762 			    flags | DDI_PROP_DONTPASS,
1763 			    name, valuep, lengthp);
1764 		}
1765 
1766 		if ((flags & DDI_PROP_DONTPASS) ||
1767 		    (i != DDI_PROP_NOT_FOUND))
1768 			return (i);
1769 
1770 		dip = pdip;
1771 	}
1772 	/*NOTREACHED*/
1773 }
1774 
1775 
1776 /*
1777  * ddi_prop_op: The basic property operator for drivers.
1778  *
1779  * In ddi_prop_op, the type of valuep is interpreted based on prop_op:
1780  *
1781  *	prop_op			valuep
1782  *	------			------
1783  *
1784  *	PROP_LEN		<unused>
1785  *
1786  *	PROP_LEN_AND_VAL_BUF	Pointer to callers buffer
1787  *
1788  *	PROP_LEN_AND_VAL_ALLOC	Address of callers pointer (will be set to
1789  *				address of allocated buffer, if successful)
1790  */
1791 int
1792 ddi_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
1793     char *name, caddr_t valuep, int *lengthp)
1794 {
1795 	int	i;
1796 
1797 	ASSERT((mod_flags & DDI_PROP_TYPE_MASK) == 0);
1798 
1799 	/*
1800 	 * If this was originally an LDI prop lookup then we bail here.
1801 	 * The reason is that the LDI property lookup interfaces first call
1802 	 * a drivers prop_op() entry point to allow it to override
1803 	 * properties.  But if we've made it here, then the driver hasn't
1804 	 * overriden any properties.  We don't want to continue with the
1805 	 * property search here because we don't have any type inforamtion.
1806 	 * When we return failure, the LDI interfaces will then proceed to
1807 	 * call the typed property interfaces to look up the property.
1808 	 */
1809 	if (mod_flags & DDI_PROP_DYNAMIC)
1810 		return (DDI_PROP_NOT_FOUND);
1811 
1812 	/*
1813 	 * check for pre-typed property consumer asking for typed property:
1814 	 * see e_ddi_getprop_int64.
1815 	 */
1816 	if (mod_flags & DDI_PROP_CONSUMER_TYPED)
1817 		mod_flags |= DDI_PROP_TYPE_INT64;
1818 	mod_flags |= DDI_PROP_TYPE_ANY;
1819 
1820 	i = ddi_prop_search_common(dev, dip, prop_op,
1821 	    mod_flags, name, valuep, (uint_t *)lengthp);
1822 	if (i == DDI_PROP_FOUND_1275)
1823 		return (DDI_PROP_SUCCESS);
1824 	return (i);
1825 }
1826 
1827 /*
1828  * ddi_prop_op_nblocks_blksize: The basic property operator for drivers that
1829  * maintain size in number of blksize blocks.  Provides a dynamic property
1830  * implementation for size oriented properties based on nblocks64 and blksize
1831  * values passed in by the driver.  Fallback to ddi_prop_op if the nblocks64
1832  * is too large.  This interface should not be used with a nblocks64 that
1833  * represents the driver's idea of how to represent unknown, if nblocks is
1834  * unknown use ddi_prop_op.
1835  */
1836 int
1837 ddi_prop_op_nblocks_blksize(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1838     int mod_flags, char *name, caddr_t valuep, int *lengthp,
1839     uint64_t nblocks64, uint_t blksize)
1840 {
1841 	uint64_t size64;
1842 	int	blkshift;
1843 
1844 	/* convert block size to shift value */
1845 	ASSERT(BIT_ONLYONESET(blksize));
1846 	blkshift = highbit(blksize) - 1;
1847 
1848 	/*
1849 	 * There is no point in supporting nblocks64 values that don't have
1850 	 * an accurate uint64_t byte count representation.
1851 	 */
1852 	if (nblocks64 >= (UINT64_MAX >> blkshift))
1853 		return (ddi_prop_op(dev, dip, prop_op, mod_flags,
1854 		    name, valuep, lengthp));
1855 
1856 	size64 = nblocks64 << blkshift;
1857 	return (ddi_prop_op_size_blksize(dev, dip, prop_op, mod_flags,
1858 	    name, valuep, lengthp, size64, blksize));
1859 }
1860 
1861 /*
1862  * ddi_prop_op_nblocks: ddi_prop_op_nblocks_blksize with DEV_BSIZE blksize.
1863  */
1864 int
1865 ddi_prop_op_nblocks(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1866     int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t nblocks64)
1867 {
1868 	return (ddi_prop_op_nblocks_blksize(dev, dip, prop_op,
1869 	    mod_flags, name, valuep, lengthp, nblocks64, DEV_BSIZE));
1870 }
1871 
1872 /*
1873  * ddi_prop_op_size_blksize: The basic property operator for block drivers that
1874  * maintain size in bytes. Provides a of dynamic property implementation for
1875  * size oriented properties based on size64 value and blksize passed in by the
1876  * driver.  Fallback to ddi_prop_op if the size64 is too large. This interface
1877  * should not be used with a size64 that represents the driver's idea of how
1878  * to represent unknown, if size is unknown use ddi_prop_op.
1879  *
1880  * NOTE: the legacy "nblocks"/"size" properties are treated as 32-bit unsigned
1881  * integers. While the most likely interface to request them ([bc]devi_size)
1882  * is declared int (signed) there is no enforcement of this, which means we
1883  * can't enforce limitations here without risking regression.
1884  */
1885 int
1886 ddi_prop_op_size_blksize(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1887     int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t size64,
1888     uint_t blksize)
1889 {
1890 	uint64_t nblocks64;
1891 	int	callers_length;
1892 	caddr_t	buffer;
1893 	int	blkshift;
1894 
1895 	/*
1896 	 * This is a kludge to support capture of size(9P) pure dynamic
1897 	 * properties in snapshots for non-cmlb code (without exposing
1898 	 * i_ddi_prop_dyn changes). When everyone uses cmlb, this code
1899 	 * should be removed.
1900 	 */
1901 	if (i_ddi_prop_dyn_driver_get(dip) == NULL) {
1902 		static i_ddi_prop_dyn_t prop_dyn_size[] = {
1903 		    {"Size",		DDI_PROP_TYPE_INT64,	S_IFCHR},
1904 		    {"Nblocks",		DDI_PROP_TYPE_INT64,	S_IFBLK},
1905 		    {NULL}
1906 		};
1907 		i_ddi_prop_dyn_driver_set(dip, prop_dyn_size);
1908 	}
1909 
1910 	/* convert block size to shift value */
1911 	ASSERT(BIT_ONLYONESET(blksize));
1912 	blkshift = highbit(blksize) - 1;
1913 
1914 	/* compute DEV_BSIZE nblocks value */
1915 	nblocks64 = size64 >> blkshift;
1916 
1917 	/* get callers length, establish length of our dynamic properties */
1918 	callers_length = *lengthp;
1919 
1920 	if (strcmp(name, "Nblocks") == 0)
1921 		*lengthp = sizeof (uint64_t);
1922 	else if (strcmp(name, "Size") == 0)
1923 		*lengthp = sizeof (uint64_t);
1924 	else if ((strcmp(name, "nblocks") == 0) && (nblocks64 < UINT_MAX))
1925 		*lengthp = sizeof (uint32_t);
1926 	else if ((strcmp(name, "size") == 0) && (size64 < UINT_MAX))
1927 		*lengthp = sizeof (uint32_t);
1928 	else if ((strcmp(name, "blksize") == 0) && (blksize < UINT_MAX))
1929 		*lengthp = sizeof (uint32_t);
1930 	else {
1931 		/* fallback to ddi_prop_op */
1932 		return (ddi_prop_op(dev, dip, prop_op, mod_flags,
1933 		    name, valuep, lengthp));
1934 	}
1935 
1936 	/* service request for the length of the property */
1937 	if (prop_op == PROP_LEN)
1938 		return (DDI_PROP_SUCCESS);
1939 
1940 	switch (prop_op) {
1941 	case PROP_LEN_AND_VAL_ALLOC:
1942 		if ((buffer = kmem_alloc(*lengthp,
1943 		    (mod_flags & DDI_PROP_CANSLEEP) ?
1944 		    KM_SLEEP : KM_NOSLEEP)) == NULL)
1945 			return (DDI_PROP_NO_MEMORY);
1946 
1947 		*(caddr_t *)valuep = buffer;	/* set callers buf ptr */
1948 		break;
1949 
1950 	case PROP_LEN_AND_VAL_BUF:
1951 		/* the length of the property and the request must match */
1952 		if (callers_length != *lengthp)
1953 			return (DDI_PROP_INVAL_ARG);
1954 
1955 		buffer = valuep;		/* get callers buf ptr */
1956 		break;
1957 
1958 	default:
1959 		return (DDI_PROP_INVAL_ARG);
1960 	}
1961 
1962 	/* transfer the value into the buffer */
1963 	if (strcmp(name, "Nblocks") == 0)
1964 		*((uint64_t *)buffer) = nblocks64;
1965 	else if (strcmp(name, "Size") == 0)
1966 		*((uint64_t *)buffer) = size64;
1967 	else if (strcmp(name, "nblocks") == 0)
1968 		*((uint32_t *)buffer) = (uint32_t)nblocks64;
1969 	else if (strcmp(name, "size") == 0)
1970 		*((uint32_t *)buffer) = (uint32_t)size64;
1971 	else if (strcmp(name, "blksize") == 0)
1972 		*((uint32_t *)buffer) = (uint32_t)blksize;
1973 	return (DDI_PROP_SUCCESS);
1974 }
1975 
1976 /*
1977  * ddi_prop_op_size: ddi_prop_op_size_blksize with DEV_BSIZE block size.
1978  */
1979 int
1980 ddi_prop_op_size(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1981     int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t size64)
1982 {
1983 	return (ddi_prop_op_size_blksize(dev, dip, prop_op,
1984 	    mod_flags, name, valuep, lengthp, size64, DEV_BSIZE));
1985 }
1986 
1987 /*
1988  * Variable length props...
1989  */
1990 
1991 /*
1992  * ddi_getlongprop:	Get variable length property len+val into a buffer
1993  *		allocated by property provider via kmem_alloc. Requester
1994  *		is responsible for freeing returned property via kmem_free.
1995  *
1996  *	Arguments:
1997  *
1998  *	dev_t:	Input:	dev_t of property.
1999  *	dip:	Input:	dev_info_t pointer of child.
2000  *	flags:	Input:	Possible flag modifiers are:
2001  *		DDI_PROP_DONTPASS:	Don't pass to parent if prop not found.
2002  *		DDI_PROP_CANSLEEP:	Memory allocation may sleep.
2003  *	name:	Input:	name of property.
2004  *	valuep:	Output:	Addr of callers buffer pointer.
2005  *	lengthp:Output:	*lengthp will contain prop length on exit.
2006  *
2007  *	Possible Returns:
2008  *
2009  *		DDI_PROP_SUCCESS:	Prop found and returned.
2010  *		DDI_PROP_NOT_FOUND:	Prop not found
2011  *		DDI_PROP_UNDEFINED:	Prop explicitly undefined.
2012  *		DDI_PROP_NO_MEMORY:	Prop found, but unable to alloc mem.
2013  */
2014 
2015 int
2016 ddi_getlongprop(dev_t dev, dev_info_t *dip, int flags,
2017     char *name, caddr_t valuep, int *lengthp)
2018 {
2019 	return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_ALLOC,
2020 	    flags, name, valuep, lengthp));
2021 }
2022 
2023 /*
2024  *
2025  * ddi_getlongprop_buf:		Get long prop into pre-allocated callers
2026  *				buffer. (no memory allocation by provider).
2027  *
2028  *	dev_t:	Input:	dev_t of property.
2029  *	dip:	Input:	dev_info_t pointer of child.
2030  *	flags:	Input:	DDI_PROP_DONTPASS or NULL
2031  *	name:	Input:	name of property
2032  *	valuep:	Input:	ptr to callers buffer.
2033  *	lengthp:I/O:	ptr to length of callers buffer on entry,
2034  *			actual length of property on exit.
2035  *
2036  *	Possible returns:
2037  *
2038  *		DDI_PROP_SUCCESS	Prop found and returned
2039  *		DDI_PROP_NOT_FOUND	Prop not found
2040  *		DDI_PROP_UNDEFINED	Prop explicitly undefined.
2041  *		DDI_PROP_BUF_TOO_SMALL	Prop found, callers buf too small,
2042  *					no value returned, but actual prop
2043  *					length returned in *lengthp
2044  *
2045  */
2046 
2047 int
2048 ddi_getlongprop_buf(dev_t dev, dev_info_t *dip, int flags,
2049     char *name, caddr_t valuep, int *lengthp)
2050 {
2051 	return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
2052 	    flags, name, valuep, lengthp));
2053 }
2054 
2055 /*
2056  * Integer/boolean sized props.
2057  *
2058  * Call is value only... returns found boolean or int sized prop value or
2059  * defvalue if prop not found or is wrong length or is explicitly undefined.
2060  * Only flag is DDI_PROP_DONTPASS...
2061  *
2062  * By convention, this interface returns boolean (0) sized properties
2063  * as value (int)1.
2064  *
2065  * This never returns an error, if property not found or specifically
2066  * undefined, the input `defvalue' is returned.
2067  */
2068 
2069 int
2070 ddi_getprop(dev_t dev, dev_info_t *dip, int flags, char *name, int defvalue)
2071 {
2072 	int	propvalue = defvalue;
2073 	int	proplength = sizeof (int);
2074 	int	error;
2075 
2076 	error = ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
2077 	    flags, name, (caddr_t)&propvalue, &proplength);
2078 
2079 	if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
2080 		propvalue = 1;
2081 
2082 	return (propvalue);
2083 }
2084 
2085 /*
2086  * Get prop length interface: flags are 0 or DDI_PROP_DONTPASS
2087  * if returns DDI_PROP_SUCCESS, length returned in *lengthp.
2088  */
2089 
2090 int
2091 ddi_getproplen(dev_t dev, dev_info_t *dip, int flags, char *name, int *lengthp)
2092 {
2093 	return (ddi_prop_op(dev, dip, PROP_LEN, flags, name, NULL, lengthp));
2094 }
2095 
2096 /*
2097  * Allocate a struct prop_driver_data, along with 'size' bytes
2098  * for decoded property data.  This structure is freed by
2099  * calling ddi_prop_free(9F).
2100  */
2101 static void *
2102 ddi_prop_decode_alloc(size_t size, void (*prop_free)(struct prop_driver_data *))
2103 {
2104 	struct prop_driver_data *pdd;
2105 
2106 	/*
2107 	 * Allocate a structure with enough memory to store the decoded data.
2108 	 */
2109 	pdd = kmem_zalloc(sizeof (struct prop_driver_data) + size, KM_SLEEP);
2110 	pdd->pdd_size = (sizeof (struct prop_driver_data) + size);
2111 	pdd->pdd_prop_free = prop_free;
2112 
2113 	/*
2114 	 * Return a pointer to the location to put the decoded data.
2115 	 */
2116 	return ((void *)((caddr_t)pdd + sizeof (struct prop_driver_data)));
2117 }
2118 
2119 /*
2120  * Allocated the memory needed to store the encoded data in the property
2121  * handle.
2122  */
2123 static int
2124 ddi_prop_encode_alloc(prop_handle_t *ph, size_t size)
2125 {
2126 	/*
2127 	 * If size is zero, then set data to NULL and size to 0.  This
2128 	 * is a boolean property.
2129 	 */
2130 	if (size == 0) {
2131 		ph->ph_size = 0;
2132 		ph->ph_data = NULL;
2133 		ph->ph_cur_pos = NULL;
2134 		ph->ph_save_pos = NULL;
2135 	} else {
2136 		if (ph->ph_flags == DDI_PROP_DONTSLEEP) {
2137 			ph->ph_data = kmem_zalloc(size, KM_NOSLEEP);
2138 			if (ph->ph_data == NULL)
2139 				return (DDI_PROP_NO_MEMORY);
2140 		} else
2141 			ph->ph_data = kmem_zalloc(size, KM_SLEEP);
2142 		ph->ph_size = size;
2143 		ph->ph_cur_pos = ph->ph_data;
2144 		ph->ph_save_pos = ph->ph_data;
2145 	}
2146 	return (DDI_PROP_SUCCESS);
2147 }
2148 
2149 /*
2150  * Free the space allocated by the lookup routines.  Each lookup routine
2151  * returns a pointer to the decoded data to the driver.  The driver then
2152  * passes this pointer back to us.  This data actually lives in a struct
2153  * prop_driver_data.  We use negative indexing to find the beginning of
2154  * the structure and then free the entire structure using the size and
2155  * the free routine stored in the structure.
2156  */
2157 void
2158 ddi_prop_free(void *datap)
2159 {
2160 	struct prop_driver_data *pdd;
2161 
2162 	/*
2163 	 * Get the structure
2164 	 */
2165 	pdd = (struct prop_driver_data *)
2166 	    ((caddr_t)datap - sizeof (struct prop_driver_data));
2167 	/*
2168 	 * Call the free routine to free it
2169 	 */
2170 	(*pdd->pdd_prop_free)(pdd);
2171 }
2172 
2173 /*
2174  * Free the data associated with an array of ints,
2175  * allocated with ddi_prop_decode_alloc().
2176  */
2177 static void
2178 ddi_prop_free_ints(struct prop_driver_data *pdd)
2179 {
2180 	kmem_free(pdd, pdd->pdd_size);
2181 }
2182 
2183 /*
2184  * Free a single string property or a single string contained within
2185  * the argv style return value of an array of strings.
2186  */
2187 static void
2188 ddi_prop_free_string(struct prop_driver_data *pdd)
2189 {
2190 	kmem_free(pdd, pdd->pdd_size);
2191 
2192 }
2193 
2194 /*
2195  * Free an array of strings.
2196  */
2197 static void
2198 ddi_prop_free_strings(struct prop_driver_data *pdd)
2199 {
2200 	kmem_free(pdd, pdd->pdd_size);
2201 }
2202 
2203 /*
2204  * Free the data associated with an array of bytes.
2205  */
2206 static void
2207 ddi_prop_free_bytes(struct prop_driver_data *pdd)
2208 {
2209 	kmem_free(pdd, pdd->pdd_size);
2210 }
2211 
2212 /*
2213  * Reset the current location pointer in the property handle to the
2214  * beginning of the data.
2215  */
2216 void
2217 ddi_prop_reset_pos(prop_handle_t *ph)
2218 {
2219 	ph->ph_cur_pos = ph->ph_data;
2220 	ph->ph_save_pos = ph->ph_data;
2221 }
2222 
2223 /*
2224  * Restore the current location pointer in the property handle to the
2225  * saved position.
2226  */
2227 void
2228 ddi_prop_save_pos(prop_handle_t *ph)
2229 {
2230 	ph->ph_save_pos = ph->ph_cur_pos;
2231 }
2232 
2233 /*
2234  * Save the location that the current location pointer is pointing to..
2235  */
2236 void
2237 ddi_prop_restore_pos(prop_handle_t *ph)
2238 {
2239 	ph->ph_cur_pos = ph->ph_save_pos;
2240 }
2241 
2242 /*
2243  * Property encode/decode functions
2244  */
2245 
2246 /*
2247  * Decode a single integer property
2248  */
2249 static int
2250 ddi_prop_fm_decode_int(prop_handle_t *ph, void *data, uint_t *nelements)
2251 {
2252 	int	i;
2253 	int	tmp;
2254 
2255 	/*
2256 	 * If there is nothing to decode return an error
2257 	 */
2258 	if (ph->ph_size == 0)
2259 		return (DDI_PROP_END_OF_DATA);
2260 
2261 	/*
2262 	 * Decode the property as a single integer and return it
2263 	 * in data if we were able to decode it.
2264 	 */
2265 	i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, &tmp);
2266 	if (i < DDI_PROP_RESULT_OK) {
2267 		switch (i) {
2268 		case DDI_PROP_RESULT_EOF:
2269 			return (DDI_PROP_END_OF_DATA);
2270 
2271 		case DDI_PROP_RESULT_ERROR:
2272 			return (DDI_PROP_CANNOT_DECODE);
2273 		}
2274 	}
2275 
2276 	*(int *)data = tmp;
2277 	*nelements = 1;
2278 	return (DDI_PROP_SUCCESS);
2279 }
2280 
2281 /*
2282  * Decode a single 64 bit integer property
2283  */
2284 static int
2285 ddi_prop_fm_decode_int64(prop_handle_t *ph, void *data, uint_t *nelements)
2286 {
2287 	int	i;
2288 	int64_t	tmp;
2289 
2290 	/*
2291 	 * If there is nothing to decode return an error
2292 	 */
2293 	if (ph->ph_size == 0)
2294 		return (DDI_PROP_END_OF_DATA);
2295 
2296 	/*
2297 	 * Decode the property as a single integer and return it
2298 	 * in data if we were able to decode it.
2299 	 */
2300 	i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, &tmp);
2301 	if (i < DDI_PROP_RESULT_OK) {
2302 		switch (i) {
2303 		case DDI_PROP_RESULT_EOF:
2304 			return (DDI_PROP_END_OF_DATA);
2305 
2306 		case DDI_PROP_RESULT_ERROR:
2307 			return (DDI_PROP_CANNOT_DECODE);
2308 		}
2309 	}
2310 
2311 	*(int64_t *)data = tmp;
2312 	*nelements = 1;
2313 	return (DDI_PROP_SUCCESS);
2314 }
2315 
2316 /*
2317  * Decode an array of integers property
2318  */
2319 static int
2320 ddi_prop_fm_decode_ints(prop_handle_t *ph, void *data, uint_t *nelements)
2321 {
2322 	int	i;
2323 	int	cnt = 0;
2324 	int	*tmp;
2325 	int	*intp;
2326 	int	n;
2327 
2328 	/*
2329 	 * Figure out how many array elements there are by going through the
2330 	 * data without decoding it first and counting.
2331 	 */
2332 	for (;;) {
2333 		i = DDI_PROP_INT(ph, DDI_PROP_CMD_SKIP, NULL);
2334 		if (i < 0)
2335 			break;
2336 		cnt++;
2337 	}
2338 
2339 	/*
2340 	 * If there are no elements return an error
2341 	 */
2342 	if (cnt == 0)
2343 		return (DDI_PROP_END_OF_DATA);
2344 
2345 	/*
2346 	 * If we cannot skip through the data, we cannot decode it
2347 	 */
2348 	if (i == DDI_PROP_RESULT_ERROR)
2349 		return (DDI_PROP_CANNOT_DECODE);
2350 
2351 	/*
2352 	 * Reset the data pointer to the beginning of the encoded data
2353 	 */
2354 	ddi_prop_reset_pos(ph);
2355 
2356 	/*
2357 	 * Allocated memory to store the decoded value in.
2358 	 */
2359 	intp = ddi_prop_decode_alloc((cnt * sizeof (int)),
2360 	    ddi_prop_free_ints);
2361 
2362 	/*
2363 	 * Decode each element and place it in the space we just allocated
2364 	 */
2365 	tmp = intp;
2366 	for (n = 0; n < cnt; n++, tmp++) {
2367 		i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, tmp);
2368 		if (i < DDI_PROP_RESULT_OK) {
2369 			/*
2370 			 * Free the space we just allocated
2371 			 * and return an error.
2372 			 */
2373 			ddi_prop_free(intp);
2374 			switch (i) {
2375 			case DDI_PROP_RESULT_EOF:
2376 				return (DDI_PROP_END_OF_DATA);
2377 
2378 			case DDI_PROP_RESULT_ERROR:
2379 				return (DDI_PROP_CANNOT_DECODE);
2380 			}
2381 		}
2382 	}
2383 
2384 	*nelements = cnt;
2385 	*(int **)data = intp;
2386 
2387 	return (DDI_PROP_SUCCESS);
2388 }
2389 
2390 /*
2391  * Decode a 64 bit integer array property
2392  */
2393 static int
2394 ddi_prop_fm_decode_int64_array(prop_handle_t *ph, void *data, uint_t *nelements)
2395 {
2396 	int	i;
2397 	int	n;
2398 	int	cnt = 0;
2399 	int64_t	*tmp;
2400 	int64_t	*intp;
2401 
2402 	/*
2403 	 * Count the number of array elements by going
2404 	 * through the data without decoding it.
2405 	 */
2406 	for (;;) {
2407 		i = DDI_PROP_INT64(ph, DDI_PROP_CMD_SKIP, NULL);
2408 		if (i < 0)
2409 			break;
2410 		cnt++;
2411 	}
2412 
2413 	/*
2414 	 * If there are no elements return an error
2415 	 */
2416 	if (cnt == 0)
2417 		return (DDI_PROP_END_OF_DATA);
2418 
2419 	/*
2420 	 * If we cannot skip through the data, we cannot decode it
2421 	 */
2422 	if (i == DDI_PROP_RESULT_ERROR)
2423 		return (DDI_PROP_CANNOT_DECODE);
2424 
2425 	/*
2426 	 * Reset the data pointer to the beginning of the encoded data
2427 	 */
2428 	ddi_prop_reset_pos(ph);
2429 
2430 	/*
2431 	 * Allocate memory to store the decoded value.
2432 	 */
2433 	intp = ddi_prop_decode_alloc((cnt * sizeof (int64_t)),
2434 	    ddi_prop_free_ints);
2435 
2436 	/*
2437 	 * Decode each element and place it in the space allocated
2438 	 */
2439 	tmp = intp;
2440 	for (n = 0; n < cnt; n++, tmp++) {
2441 		i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, tmp);
2442 		if (i < DDI_PROP_RESULT_OK) {
2443 			/*
2444 			 * Free the space we just allocated
2445 			 * and return an error.
2446 			 */
2447 			ddi_prop_free(intp);
2448 			switch (i) {
2449 			case DDI_PROP_RESULT_EOF:
2450 				return (DDI_PROP_END_OF_DATA);
2451 
2452 			case DDI_PROP_RESULT_ERROR:
2453 				return (DDI_PROP_CANNOT_DECODE);
2454 			}
2455 		}
2456 	}
2457 
2458 	*nelements = cnt;
2459 	*(int64_t **)data = intp;
2460 
2461 	return (DDI_PROP_SUCCESS);
2462 }
2463 
2464 /*
2465  * Encode an array of integers property (Can be one element)
2466  */
2467 int
2468 ddi_prop_fm_encode_ints(prop_handle_t *ph, void *data, uint_t nelements)
2469 {
2470 	int	i;
2471 	int	*tmp;
2472 	int	cnt;
2473 	int	size;
2474 
2475 	/*
2476 	 * If there is no data, we cannot do anything
2477 	 */
2478 	if (nelements == 0)
2479 		return (DDI_PROP_CANNOT_ENCODE);
2480 
2481 	/*
2482 	 * Get the size of an encoded int.
2483 	 */
2484 	size = DDI_PROP_INT(ph, DDI_PROP_CMD_GET_ESIZE, NULL);
2485 
2486 	if (size < DDI_PROP_RESULT_OK) {
2487 		switch (size) {
2488 		case DDI_PROP_RESULT_EOF:
2489 			return (DDI_PROP_END_OF_DATA);
2490 
2491 		case DDI_PROP_RESULT_ERROR:
2492 			return (DDI_PROP_CANNOT_ENCODE);
2493 		}
2494 	}
2495 
2496 	/*
2497 	 * Allocate space in the handle to store the encoded int.
2498 	 */
2499 	if (ddi_prop_encode_alloc(ph, size * nelements) !=
2500 	    DDI_PROP_SUCCESS)
2501 		return (DDI_PROP_NO_MEMORY);
2502 
2503 	/*
2504 	 * Encode the array of ints.
2505 	 */
2506 	tmp = (int *)data;
2507 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2508 		i = DDI_PROP_INT(ph, DDI_PROP_CMD_ENCODE, tmp);
2509 		if (i < DDI_PROP_RESULT_OK) {
2510 			switch (i) {
2511 			case DDI_PROP_RESULT_EOF:
2512 				return (DDI_PROP_END_OF_DATA);
2513 
2514 			case DDI_PROP_RESULT_ERROR:
2515 				return (DDI_PROP_CANNOT_ENCODE);
2516 			}
2517 		}
2518 	}
2519 
2520 	return (DDI_PROP_SUCCESS);
2521 }
2522 
2523 
2524 /*
2525  * Encode a 64 bit integer array property
2526  */
2527 int
2528 ddi_prop_fm_encode_int64(prop_handle_t *ph, void *data, uint_t nelements)
2529 {
2530 	int i;
2531 	int cnt;
2532 	int size;
2533 	int64_t *tmp;
2534 
2535 	/*
2536 	 * If there is no data, we cannot do anything
2537 	 */
2538 	if (nelements == 0)
2539 		return (DDI_PROP_CANNOT_ENCODE);
2540 
2541 	/*
2542 	 * Get the size of an encoded 64 bit int.
2543 	 */
2544 	size = DDI_PROP_INT64(ph, DDI_PROP_CMD_GET_ESIZE, NULL);
2545 
2546 	if (size < DDI_PROP_RESULT_OK) {
2547 		switch (size) {
2548 		case DDI_PROP_RESULT_EOF:
2549 			return (DDI_PROP_END_OF_DATA);
2550 
2551 		case DDI_PROP_RESULT_ERROR:
2552 			return (DDI_PROP_CANNOT_ENCODE);
2553 		}
2554 	}
2555 
2556 	/*
2557 	 * Allocate space in the handle to store the encoded int.
2558 	 */
2559 	if (ddi_prop_encode_alloc(ph, size * nelements) !=
2560 	    DDI_PROP_SUCCESS)
2561 		return (DDI_PROP_NO_MEMORY);
2562 
2563 	/*
2564 	 * Encode the array of ints.
2565 	 */
2566 	tmp = (int64_t *)data;
2567 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2568 		i = DDI_PROP_INT64(ph, DDI_PROP_CMD_ENCODE, tmp);
2569 		if (i < DDI_PROP_RESULT_OK) {
2570 			switch (i) {
2571 			case DDI_PROP_RESULT_EOF:
2572 				return (DDI_PROP_END_OF_DATA);
2573 
2574 			case DDI_PROP_RESULT_ERROR:
2575 				return (DDI_PROP_CANNOT_ENCODE);
2576 			}
2577 		}
2578 	}
2579 
2580 	return (DDI_PROP_SUCCESS);
2581 }
2582 
2583 /*
2584  * Decode a single string property
2585  */
2586 static int
2587 ddi_prop_fm_decode_string(prop_handle_t *ph, void *data, uint_t *nelements)
2588 {
2589 	char		*tmp;
2590 	char		*str;
2591 	int		i;
2592 	int		size;
2593 
2594 	/*
2595 	 * If there is nothing to decode return an error
2596 	 */
2597 	if (ph->ph_size == 0)
2598 		return (DDI_PROP_END_OF_DATA);
2599 
2600 	/*
2601 	 * Get the decoded size of the encoded string.
2602 	 */
2603 	size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2604 	if (size < DDI_PROP_RESULT_OK) {
2605 		switch (size) {
2606 		case DDI_PROP_RESULT_EOF:
2607 			return (DDI_PROP_END_OF_DATA);
2608 
2609 		case DDI_PROP_RESULT_ERROR:
2610 			return (DDI_PROP_CANNOT_DECODE);
2611 		}
2612 	}
2613 
2614 	/*
2615 	 * Allocated memory to store the decoded value in.
2616 	 */
2617 	str = ddi_prop_decode_alloc((size_t)size, ddi_prop_free_string);
2618 
2619 	ddi_prop_reset_pos(ph);
2620 
2621 	/*
2622 	 * Decode the str and place it in the space we just allocated
2623 	 */
2624 	tmp = str;
2625 	i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, tmp);
2626 	if (i < DDI_PROP_RESULT_OK) {
2627 		/*
2628 		 * Free the space we just allocated
2629 		 * and return an error.
2630 		 */
2631 		ddi_prop_free(str);
2632 		switch (i) {
2633 		case DDI_PROP_RESULT_EOF:
2634 			return (DDI_PROP_END_OF_DATA);
2635 
2636 		case DDI_PROP_RESULT_ERROR:
2637 			return (DDI_PROP_CANNOT_DECODE);
2638 		}
2639 	}
2640 
2641 	*(char **)data = str;
2642 	*nelements = 1;
2643 
2644 	return (DDI_PROP_SUCCESS);
2645 }
2646 
2647 /*
2648  * Decode an array of strings.
2649  */
2650 int
2651 ddi_prop_fm_decode_strings(prop_handle_t *ph, void *data, uint_t *nelements)
2652 {
2653 	int		cnt = 0;
2654 	char		**strs;
2655 	char		**tmp;
2656 	char		*ptr;
2657 	int		i;
2658 	int		n;
2659 	int		size;
2660 	size_t		nbytes;
2661 
2662 	/*
2663 	 * Figure out how many array elements there are by going through the
2664 	 * data without decoding it first and counting.
2665 	 */
2666 	for (;;) {
2667 		i = DDI_PROP_STR(ph, DDI_PROP_CMD_SKIP, NULL);
2668 		if (i < 0)
2669 			break;
2670 		cnt++;
2671 	}
2672 
2673 	/*
2674 	 * If there are no elements return an error
2675 	 */
2676 	if (cnt == 0)
2677 		return (DDI_PROP_END_OF_DATA);
2678 
2679 	/*
2680 	 * If we cannot skip through the data, we cannot decode it
2681 	 */
2682 	if (i == DDI_PROP_RESULT_ERROR)
2683 		return (DDI_PROP_CANNOT_DECODE);
2684 
2685 	/*
2686 	 * Reset the data pointer to the beginning of the encoded data
2687 	 */
2688 	ddi_prop_reset_pos(ph);
2689 
2690 	/*
2691 	 * Figure out how much memory we need for the sum total
2692 	 */
2693 	nbytes = (cnt + 1) * sizeof (char *);
2694 
2695 	for (n = 0; n < cnt; n++) {
2696 		/*
2697 		 * Get the decoded size of the current encoded string.
2698 		 */
2699 		size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2700 		if (size < DDI_PROP_RESULT_OK) {
2701 			switch (size) {
2702 			case DDI_PROP_RESULT_EOF:
2703 				return (DDI_PROP_END_OF_DATA);
2704 
2705 			case DDI_PROP_RESULT_ERROR:
2706 				return (DDI_PROP_CANNOT_DECODE);
2707 			}
2708 		}
2709 
2710 		nbytes += size;
2711 	}
2712 
2713 	/*
2714 	 * Allocate memory in which to store the decoded strings.
2715 	 */
2716 	strs = ddi_prop_decode_alloc(nbytes, ddi_prop_free_strings);
2717 
2718 	/*
2719 	 * Set up pointers for each string by figuring out yet
2720 	 * again how long each string is.
2721 	 */
2722 	ddi_prop_reset_pos(ph);
2723 	ptr = (caddr_t)strs + ((cnt + 1) * sizeof (char *));
2724 	for (tmp = strs, n = 0; n < cnt; n++, tmp++) {
2725 		/*
2726 		 * Get the decoded size of the current encoded string.
2727 		 */
2728 		size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2729 		if (size < DDI_PROP_RESULT_OK) {
2730 			ddi_prop_free(strs);
2731 			switch (size) {
2732 			case DDI_PROP_RESULT_EOF:
2733 				return (DDI_PROP_END_OF_DATA);
2734 
2735 			case DDI_PROP_RESULT_ERROR:
2736 				return (DDI_PROP_CANNOT_DECODE);
2737 			}
2738 		}
2739 
2740 		*tmp = ptr;
2741 		ptr += size;
2742 	}
2743 
2744 	/*
2745 	 * String array is terminated by a NULL
2746 	 */
2747 	*tmp = NULL;
2748 
2749 	/*
2750 	 * Finally, we can decode each string
2751 	 */
2752 	ddi_prop_reset_pos(ph);
2753 	for (tmp = strs, n = 0; n < cnt; n++, tmp++) {
2754 		i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, *tmp);
2755 		if (i < DDI_PROP_RESULT_OK) {
2756 			/*
2757 			 * Free the space we just allocated
2758 			 * and return an error
2759 			 */
2760 			ddi_prop_free(strs);
2761 			switch (i) {
2762 			case DDI_PROP_RESULT_EOF:
2763 				return (DDI_PROP_END_OF_DATA);
2764 
2765 			case DDI_PROP_RESULT_ERROR:
2766 				return (DDI_PROP_CANNOT_DECODE);
2767 			}
2768 		}
2769 	}
2770 
2771 	*(char ***)data = strs;
2772 	*nelements = cnt;
2773 
2774 	return (DDI_PROP_SUCCESS);
2775 }
2776 
2777 /*
2778  * Encode a string.
2779  */
2780 int
2781 ddi_prop_fm_encode_string(prop_handle_t *ph, void *data, uint_t nelements)
2782 {
2783 	char		**tmp;
2784 	int		size;
2785 	int		i;
2786 
2787 	/*
2788 	 * If there is no data, we cannot do anything
2789 	 */
2790 	if (nelements == 0)
2791 		return (DDI_PROP_CANNOT_ENCODE);
2792 
2793 	/*
2794 	 * Get the size of the encoded string.
2795 	 */
2796 	tmp = (char **)data;
2797 	size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp);
2798 	if (size < DDI_PROP_RESULT_OK) {
2799 		switch (size) {
2800 		case DDI_PROP_RESULT_EOF:
2801 			return (DDI_PROP_END_OF_DATA);
2802 
2803 		case DDI_PROP_RESULT_ERROR:
2804 			return (DDI_PROP_CANNOT_ENCODE);
2805 		}
2806 	}
2807 
2808 	/*
2809 	 * Allocate space in the handle to store the encoded string.
2810 	 */
2811 	if (ddi_prop_encode_alloc(ph, size) != DDI_PROP_SUCCESS)
2812 		return (DDI_PROP_NO_MEMORY);
2813 
2814 	ddi_prop_reset_pos(ph);
2815 
2816 	/*
2817 	 * Encode the string.
2818 	 */
2819 	tmp = (char **)data;
2820 	i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp);
2821 	if (i < DDI_PROP_RESULT_OK) {
2822 		switch (i) {
2823 		case DDI_PROP_RESULT_EOF:
2824 			return (DDI_PROP_END_OF_DATA);
2825 
2826 		case DDI_PROP_RESULT_ERROR:
2827 			return (DDI_PROP_CANNOT_ENCODE);
2828 		}
2829 	}
2830 
2831 	return (DDI_PROP_SUCCESS);
2832 }
2833 
2834 
2835 /*
2836  * Encode an array of strings.
2837  */
2838 int
2839 ddi_prop_fm_encode_strings(prop_handle_t *ph, void *data, uint_t nelements)
2840 {
2841 	int		cnt = 0;
2842 	char		**tmp;
2843 	int		size;
2844 	uint_t		total_size;
2845 	int		i;
2846 
2847 	/*
2848 	 * If there is no data, we cannot do anything
2849 	 */
2850 	if (nelements == 0)
2851 		return (DDI_PROP_CANNOT_ENCODE);
2852 
2853 	/*
2854 	 * Get the total size required to encode all the strings.
2855 	 */
2856 	total_size = 0;
2857 	tmp = (char **)data;
2858 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2859 		size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp);
2860 		if (size < DDI_PROP_RESULT_OK) {
2861 			switch (size) {
2862 			case DDI_PROP_RESULT_EOF:
2863 				return (DDI_PROP_END_OF_DATA);
2864 
2865 			case DDI_PROP_RESULT_ERROR:
2866 				return (DDI_PROP_CANNOT_ENCODE);
2867 			}
2868 		}
2869 		total_size += (uint_t)size;
2870 	}
2871 
2872 	/*
2873 	 * Allocate space in the handle to store the encoded strings.
2874 	 */
2875 	if (ddi_prop_encode_alloc(ph, total_size) != DDI_PROP_SUCCESS)
2876 		return (DDI_PROP_NO_MEMORY);
2877 
2878 	ddi_prop_reset_pos(ph);
2879 
2880 	/*
2881 	 * Encode the array of strings.
2882 	 */
2883 	tmp = (char **)data;
2884 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2885 		i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp);
2886 		if (i < DDI_PROP_RESULT_OK) {
2887 			switch (i) {
2888 			case DDI_PROP_RESULT_EOF:
2889 				return (DDI_PROP_END_OF_DATA);
2890 
2891 			case DDI_PROP_RESULT_ERROR:
2892 				return (DDI_PROP_CANNOT_ENCODE);
2893 			}
2894 		}
2895 	}
2896 
2897 	return (DDI_PROP_SUCCESS);
2898 }
2899 
2900 
2901 /*
2902  * Decode an array of bytes.
2903  */
2904 static int
2905 ddi_prop_fm_decode_bytes(prop_handle_t *ph, void *data, uint_t *nelements)
2906 {
2907 	uchar_t		*tmp;
2908 	int		nbytes;
2909 	int		i;
2910 
2911 	/*
2912 	 * If there are no elements return an error
2913 	 */
2914 	if (ph->ph_size == 0)
2915 		return (DDI_PROP_END_OF_DATA);
2916 
2917 	/*
2918 	 * Get the size of the encoded array of bytes.
2919 	 */
2920 	nbytes = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_DSIZE,
2921 	    data, ph->ph_size);
2922 	if (nbytes < DDI_PROP_RESULT_OK) {
2923 		switch (nbytes) {
2924 		case DDI_PROP_RESULT_EOF:
2925 			return (DDI_PROP_END_OF_DATA);
2926 
2927 		case DDI_PROP_RESULT_ERROR:
2928 			return (DDI_PROP_CANNOT_DECODE);
2929 		}
2930 	}
2931 
2932 	/*
2933 	 * Allocated memory to store the decoded value in.
2934 	 */
2935 	tmp = ddi_prop_decode_alloc(nbytes, ddi_prop_free_bytes);
2936 
2937 	/*
2938 	 * Decode each element and place it in the space we just allocated
2939 	 */
2940 	i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_DECODE, tmp, nbytes);
2941 	if (i < DDI_PROP_RESULT_OK) {
2942 		/*
2943 		 * Free the space we just allocated
2944 		 * and return an error
2945 		 */
2946 		ddi_prop_free(tmp);
2947 		switch (i) {
2948 		case DDI_PROP_RESULT_EOF:
2949 			return (DDI_PROP_END_OF_DATA);
2950 
2951 		case DDI_PROP_RESULT_ERROR:
2952 			return (DDI_PROP_CANNOT_DECODE);
2953 		}
2954 	}
2955 
2956 	*(uchar_t **)data = tmp;
2957 	*nelements = nbytes;
2958 
2959 	return (DDI_PROP_SUCCESS);
2960 }
2961 
2962 /*
2963  * Encode an array of bytes.
2964  */
2965 int
2966 ddi_prop_fm_encode_bytes(prop_handle_t *ph, void *data, uint_t nelements)
2967 {
2968 	int		size;
2969 	int		i;
2970 
2971 	/*
2972 	 * If there are no elements, then this is a boolean property,
2973 	 * so just create a property handle with no data and return.
2974 	 */
2975 	if (nelements == 0) {
2976 		(void) ddi_prop_encode_alloc(ph, 0);
2977 		return (DDI_PROP_SUCCESS);
2978 	}
2979 
2980 	/*
2981 	 * Get the size of the encoded array of bytes.
2982 	 */
2983 	size = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_ESIZE, (uchar_t *)data,
2984 	    nelements);
2985 	if (size < DDI_PROP_RESULT_OK) {
2986 		switch (size) {
2987 		case DDI_PROP_RESULT_EOF:
2988 			return (DDI_PROP_END_OF_DATA);
2989 
2990 		case DDI_PROP_RESULT_ERROR:
2991 			return (DDI_PROP_CANNOT_DECODE);
2992 		}
2993 	}
2994 
2995 	/*
2996 	 * Allocate space in the handle to store the encoded bytes.
2997 	 */
2998 	if (ddi_prop_encode_alloc(ph, (uint_t)size) != DDI_PROP_SUCCESS)
2999 		return (DDI_PROP_NO_MEMORY);
3000 
3001 	/*
3002 	 * Encode the array of bytes.
3003 	 */
3004 	i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_ENCODE, (uchar_t *)data,
3005 	    nelements);
3006 	if (i < DDI_PROP_RESULT_OK) {
3007 		switch (i) {
3008 		case DDI_PROP_RESULT_EOF:
3009 			return (DDI_PROP_END_OF_DATA);
3010 
3011 		case DDI_PROP_RESULT_ERROR:
3012 			return (DDI_PROP_CANNOT_ENCODE);
3013 		}
3014 	}
3015 
3016 	return (DDI_PROP_SUCCESS);
3017 }
3018 
3019 /*
3020  * OBP 1275 integer, string and byte operators.
3021  *
3022  * DDI_PROP_CMD_DECODE:
3023  *
3024  *	DDI_PROP_RESULT_ERROR:		cannot decode the data
3025  *	DDI_PROP_RESULT_EOF:		end of data
3026  *	DDI_PROP_OK:			data was decoded
3027  *
3028  * DDI_PROP_CMD_ENCODE:
3029  *
3030  *	DDI_PROP_RESULT_ERROR:		cannot encode the data
3031  *	DDI_PROP_RESULT_EOF:		end of data
3032  *	DDI_PROP_OK:			data was encoded
3033  *
3034  * DDI_PROP_CMD_SKIP:
3035  *
3036  *	DDI_PROP_RESULT_ERROR:		cannot skip the data
3037  *	DDI_PROP_RESULT_EOF:		end of data
3038  *	DDI_PROP_OK:			data was skipped
3039  *
3040  * DDI_PROP_CMD_GET_ESIZE:
3041  *
3042  *	DDI_PROP_RESULT_ERROR:		cannot get encoded size
3043  *	DDI_PROP_RESULT_EOF:		end of data
3044  *	> 0:				the encoded size
3045  *
3046  * DDI_PROP_CMD_GET_DSIZE:
3047  *
3048  *	DDI_PROP_RESULT_ERROR:		cannot get decoded size
3049  *	DDI_PROP_RESULT_EOF:		end of data
3050  *	> 0:				the decoded size
3051  */
3052 
3053 /*
3054  * OBP 1275 integer operator
3055  *
3056  * OBP properties are a byte stream of data, so integers may not be
3057  * properly aligned.  Therefore we need to copy them one byte at a time.
3058  */
3059 int
3060 ddi_prop_1275_int(prop_handle_t *ph, uint_t cmd, int *data)
3061 {
3062 	int	i;
3063 
3064 	switch (cmd) {
3065 	case DDI_PROP_CMD_DECODE:
3066 		/*
3067 		 * Check that there is encoded data
3068 		 */
3069 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0)
3070 			return (DDI_PROP_RESULT_ERROR);
3071 		if (ph->ph_flags & PH_FROM_PROM) {
3072 			i = MIN(ph->ph_size, PROP_1275_INT_SIZE);
3073 			if ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
3074 			    ph->ph_size - i))
3075 				return (DDI_PROP_RESULT_ERROR);
3076 		} else {
3077 			if (ph->ph_size < sizeof (int) ||
3078 			    ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
3079 			    ph->ph_size - sizeof (int))))
3080 				return (DDI_PROP_RESULT_ERROR);
3081 		}
3082 
3083 		/*
3084 		 * Copy the integer, using the implementation-specific
3085 		 * copy function if the property is coming from the PROM.
3086 		 */
3087 		if (ph->ph_flags & PH_FROM_PROM) {
3088 			*data = impl_ddi_prop_int_from_prom(
3089 			    (uchar_t *)ph->ph_cur_pos,
3090 			    (ph->ph_size < PROP_1275_INT_SIZE) ?
3091 			    ph->ph_size : PROP_1275_INT_SIZE);
3092 		} else {
3093 			bcopy(ph->ph_cur_pos, data, sizeof (int));
3094 		}
3095 
3096 		/*
3097 		 * Move the current location to the start of the next
3098 		 * bit of undecoded data.
3099 		 */
3100 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
3101 		    PROP_1275_INT_SIZE;
3102 		return (DDI_PROP_RESULT_OK);
3103 
3104 	case DDI_PROP_CMD_ENCODE:
3105 		/*
3106 		 * Check that there is room to encoded the data
3107 		 */
3108 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3109 		    ph->ph_size < PROP_1275_INT_SIZE ||
3110 		    ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
3111 		    ph->ph_size - sizeof (int))))
3112 			return (DDI_PROP_RESULT_ERROR);
3113 
3114 		/*
3115 		 * Encode the integer into the byte stream one byte at a
3116 		 * time.
3117 		 */
3118 		bcopy(data, ph->ph_cur_pos, sizeof (int));
3119 
3120 		/*
3121 		 * Move the current location to the start of the next bit of
3122 		 * space where we can store encoded data.
3123 		 */
3124 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE;
3125 		return (DDI_PROP_RESULT_OK);
3126 
3127 	case DDI_PROP_CMD_SKIP:
3128 		/*
3129 		 * Check that there is encoded data
3130 		 */
3131 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3132 		    ph->ph_size < PROP_1275_INT_SIZE)
3133 			return (DDI_PROP_RESULT_ERROR);
3134 
3135 
3136 		if ((caddr_t)ph->ph_cur_pos ==
3137 		    (caddr_t)ph->ph_data + ph->ph_size) {
3138 			return (DDI_PROP_RESULT_EOF);
3139 		} else if ((caddr_t)ph->ph_cur_pos >
3140 		    (caddr_t)ph->ph_data + ph->ph_size) {
3141 			return (DDI_PROP_RESULT_EOF);
3142 		}
3143 
3144 		/*
3145 		 * Move the current location to the start of the next bit of
3146 		 * undecoded data.
3147 		 */
3148 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE;
3149 		return (DDI_PROP_RESULT_OK);
3150 
3151 	case DDI_PROP_CMD_GET_ESIZE:
3152 		/*
3153 		 * Return the size of an encoded integer on OBP
3154 		 */
3155 		return (PROP_1275_INT_SIZE);
3156 
3157 	case DDI_PROP_CMD_GET_DSIZE:
3158 		/*
3159 		 * Return the size of a decoded integer on the system.
3160 		 */
3161 		return (sizeof (int));
3162 
3163 	default:
3164 #ifdef DEBUG
3165 		panic("ddi_prop_1275_int: %x impossible", cmd);
3166 		/*NOTREACHED*/
3167 #else
3168 		return (DDI_PROP_RESULT_ERROR);
3169 #endif	/* DEBUG */
3170 	}
3171 }
3172 
3173 /*
3174  * 64 bit integer operator.
3175  *
3176  * This is an extension, defined by Sun, to the 1275 integer
3177  * operator.  This routine handles the encoding/decoding of
3178  * 64 bit integer properties.
3179  */
3180 int
3181 ddi_prop_int64_op(prop_handle_t *ph, uint_t cmd, int64_t *data)
3182 {
3183 
3184 	switch (cmd) {
3185 	case DDI_PROP_CMD_DECODE:
3186 		/*
3187 		 * Check that there is encoded data
3188 		 */
3189 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0)
3190 			return (DDI_PROP_RESULT_ERROR);
3191 		if (ph->ph_flags & PH_FROM_PROM) {
3192 			return (DDI_PROP_RESULT_ERROR);
3193 		} else {
3194 			if (ph->ph_size < sizeof (int64_t) ||
3195 			    ((int64_t *)ph->ph_cur_pos >
3196 			    ((int64_t *)ph->ph_data +
3197 			    ph->ph_size - sizeof (int64_t))))
3198 				return (DDI_PROP_RESULT_ERROR);
3199 		}
3200 		/*
3201 		 * Copy the integer, using the implementation-specific
3202 		 * copy function if the property is coming from the PROM.
3203 		 */
3204 		if (ph->ph_flags & PH_FROM_PROM) {
3205 			return (DDI_PROP_RESULT_ERROR);
3206 		} else {
3207 			bcopy(ph->ph_cur_pos, data, sizeof (int64_t));
3208 		}
3209 
3210 		/*
3211 		 * Move the current location to the start of the next
3212 		 * bit of undecoded data.
3213 		 */
3214 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
3215 		    sizeof (int64_t);
3216 			return (DDI_PROP_RESULT_OK);
3217 
3218 	case DDI_PROP_CMD_ENCODE:
3219 		/*
3220 		 * Check that there is room to encoded the data
3221 		 */
3222 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3223 		    ph->ph_size < sizeof (int64_t) ||
3224 		    ((int64_t *)ph->ph_cur_pos > ((int64_t *)ph->ph_data +
3225 		    ph->ph_size - sizeof (int64_t))))
3226 			return (DDI_PROP_RESULT_ERROR);
3227 
3228 		/*
3229 		 * Encode the integer into the byte stream one byte at a
3230 		 * time.
3231 		 */
3232 		bcopy(data, ph->ph_cur_pos, sizeof (int64_t));
3233 
3234 		/*
3235 		 * Move the current location to the start of the next bit of
3236 		 * space where we can store encoded data.
3237 		 */
3238 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
3239 		    sizeof (int64_t);
3240 		return (DDI_PROP_RESULT_OK);
3241 
3242 	case DDI_PROP_CMD_SKIP:
3243 		/*
3244 		 * Check that there is encoded data
3245 		 */
3246 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3247 		    ph->ph_size < sizeof (int64_t))
3248 			return (DDI_PROP_RESULT_ERROR);
3249 
3250 		if ((caddr_t)ph->ph_cur_pos ==
3251 		    (caddr_t)ph->ph_data + ph->ph_size) {
3252 			return (DDI_PROP_RESULT_EOF);
3253 		} else if ((caddr_t)ph->ph_cur_pos >
3254 		    (caddr_t)ph->ph_data + ph->ph_size) {
3255 			return (DDI_PROP_RESULT_EOF);
3256 		}
3257 
3258 		/*
3259 		 * Move the current location to the start of
3260 		 * the next bit of undecoded data.
3261 		 */
3262 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
3263 		    sizeof (int64_t);
3264 			return (DDI_PROP_RESULT_OK);
3265 
3266 	case DDI_PROP_CMD_GET_ESIZE:
3267 		/*
3268 		 * Return the size of an encoded integer on OBP
3269 		 */
3270 		return (sizeof (int64_t));
3271 
3272 	case DDI_PROP_CMD_GET_DSIZE:
3273 		/*
3274 		 * Return the size of a decoded integer on the system.
3275 		 */
3276 		return (sizeof (int64_t));
3277 
3278 	default:
3279 #ifdef DEBUG
3280 		panic("ddi_prop_int64_op: %x impossible", cmd);
3281 		/*NOTREACHED*/
3282 #else
3283 		return (DDI_PROP_RESULT_ERROR);
3284 #endif  /* DEBUG */
3285 	}
3286 }
3287 
3288 /*
3289  * OBP 1275 string operator.
3290  *
3291  * OBP strings are NULL terminated.
3292  */
3293 int
3294 ddi_prop_1275_string(prop_handle_t *ph, uint_t cmd, char *data)
3295 {
3296 	int	n;
3297 	char	*p;
3298 	char	*end;
3299 
3300 	switch (cmd) {
3301 	case DDI_PROP_CMD_DECODE:
3302 		/*
3303 		 * Check that there is encoded data
3304 		 */
3305 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
3306 			return (DDI_PROP_RESULT_ERROR);
3307 		}
3308 
3309 		/*
3310 		 * Match DDI_PROP_CMD_GET_DSIZE logic for when to stop and
3311 		 * how to NULL terminate result.
3312 		 */
3313 		p = (char *)ph->ph_cur_pos;
3314 		end = (char *)ph->ph_data + ph->ph_size;
3315 		if (p >= end)
3316 			return (DDI_PROP_RESULT_EOF);
3317 
3318 		while (p < end) {
3319 			*data++ = *p;
3320 			if (*p++ == 0) {	/* NULL from OBP */
3321 				ph->ph_cur_pos = p;
3322 				return (DDI_PROP_RESULT_OK);
3323 			}
3324 		}
3325 
3326 		/*
3327 		 * If OBP did not NULL terminate string, which happens
3328 		 * (at least) for 'true'/'false' boolean values, account for
3329 		 * the space and store null termination on decode.
3330 		 */
3331 		ph->ph_cur_pos = p;
3332 		*data = 0;
3333 		return (DDI_PROP_RESULT_OK);
3334 
3335 	case DDI_PROP_CMD_ENCODE:
3336 		/*
3337 		 * Check that there is room to encoded the data
3338 		 */
3339 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
3340 			return (DDI_PROP_RESULT_ERROR);
3341 		}
3342 
3343 		n = strlen(data) + 1;
3344 		if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3345 		    ph->ph_size - n)) {
3346 			return (DDI_PROP_RESULT_ERROR);
3347 		}
3348 
3349 		/*
3350 		 * Copy the NULL terminated string
3351 		 */
3352 		bcopy(data, ph->ph_cur_pos, n);
3353 
3354 		/*
3355 		 * Move the current location to the start of the next bit of
3356 		 * space where we can store encoded data.
3357 		 */
3358 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + n;
3359 		return (DDI_PROP_RESULT_OK);
3360 
3361 	case DDI_PROP_CMD_SKIP:
3362 		/*
3363 		 * Check that there is encoded data
3364 		 */
3365 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
3366 			return (DDI_PROP_RESULT_ERROR);
3367 		}
3368 
3369 		/*
3370 		 * Return the string length plus one for the NULL
3371 		 * We know the size of the property, we need to
3372 		 * ensure that the string is properly formatted,
3373 		 * since we may be looking up random OBP data.
3374 		 */
3375 		p = (char *)ph->ph_cur_pos;
3376 		end = (char *)ph->ph_data + ph->ph_size;
3377 		if (p >= end)
3378 			return (DDI_PROP_RESULT_EOF);
3379 
3380 		while (p < end) {
3381 			if (*p++ == 0) {	/* NULL from OBP */
3382 				ph->ph_cur_pos = p;
3383 				return (DDI_PROP_RESULT_OK);
3384 			}
3385 		}
3386 
3387 		/*
3388 		 * Accommodate the fact that OBP does not always NULL
3389 		 * terminate strings.
3390 		 */
3391 		ph->ph_cur_pos = p;
3392 		return (DDI_PROP_RESULT_OK);
3393 
3394 	case DDI_PROP_CMD_GET_ESIZE:
3395 		/*
3396 		 * Return the size of the encoded string on OBP.
3397 		 */
3398 		return (strlen(data) + 1);
3399 
3400 	case DDI_PROP_CMD_GET_DSIZE:
3401 		/*
3402 		 * Return the string length plus one for the NULL.
3403 		 * We know the size of the property, we need to
3404 		 * ensure that the string is properly formatted,
3405 		 * since we may be looking up random OBP data.
3406 		 */
3407 		p = (char *)ph->ph_cur_pos;
3408 		end = (char *)ph->ph_data + ph->ph_size;
3409 		if (p >= end)
3410 			return (DDI_PROP_RESULT_EOF);
3411 
3412 		for (n = 0; p < end; n++) {
3413 			if (*p++ == 0) {	/* NULL from OBP */
3414 				ph->ph_cur_pos = p;
3415 				return (n + 1);
3416 			}
3417 		}
3418 
3419 		/*
3420 		 * If OBP did not NULL terminate string, which happens for
3421 		 * 'true'/'false' boolean values, account for the space
3422 		 * to store null termination here.
3423 		 */
3424 		ph->ph_cur_pos = p;
3425 		return (n + 1);
3426 
3427 	default:
3428 #ifdef DEBUG
3429 		panic("ddi_prop_1275_string: %x impossible", cmd);
3430 		/*NOTREACHED*/
3431 #else
3432 		return (DDI_PROP_RESULT_ERROR);
3433 #endif	/* DEBUG */
3434 	}
3435 }
3436 
3437 /*
3438  * OBP 1275 byte operator
3439  *
3440  * Caller must specify the number of bytes to get.  OBP encodes bytes
3441  * as a byte so there is a 1-to-1 translation.
3442  */
3443 int
3444 ddi_prop_1275_bytes(prop_handle_t *ph, uint_t cmd, uchar_t *data,
3445 	uint_t nelements)
3446 {
3447 	switch (cmd) {
3448 	case DDI_PROP_CMD_DECODE:
3449 		/*
3450 		 * Check that there is encoded data
3451 		 */
3452 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3453 		    ph->ph_size < nelements ||
3454 		    ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3455 		    ph->ph_size - nelements)))
3456 			return (DDI_PROP_RESULT_ERROR);
3457 
3458 		/*
3459 		 * Copy out the bytes
3460 		 */
3461 		bcopy(ph->ph_cur_pos, data, nelements);
3462 
3463 		/*
3464 		 * Move the current location
3465 		 */
3466 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3467 		return (DDI_PROP_RESULT_OK);
3468 
3469 	case DDI_PROP_CMD_ENCODE:
3470 		/*
3471 		 * Check that there is room to encode the data
3472 		 */
3473 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3474 		    ph->ph_size < nelements ||
3475 		    ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3476 		    ph->ph_size - nelements)))
3477 			return (DDI_PROP_RESULT_ERROR);
3478 
3479 		/*
3480 		 * Copy in the bytes
3481 		 */
3482 		bcopy(data, ph->ph_cur_pos, nelements);
3483 
3484 		/*
3485 		 * Move the current location to the start of the next bit of
3486 		 * space where we can store encoded data.
3487 		 */
3488 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3489 		return (DDI_PROP_RESULT_OK);
3490 
3491 	case DDI_PROP_CMD_SKIP:
3492 		/*
3493 		 * Check that there is encoded data
3494 		 */
3495 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3496 		    ph->ph_size < nelements)
3497 			return (DDI_PROP_RESULT_ERROR);
3498 
3499 		if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3500 		    ph->ph_size - nelements))
3501 			return (DDI_PROP_RESULT_EOF);
3502 
3503 		/*
3504 		 * Move the current location
3505 		 */
3506 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3507 		return (DDI_PROP_RESULT_OK);
3508 
3509 	case DDI_PROP_CMD_GET_ESIZE:
3510 		/*
3511 		 * The size in bytes of the encoded size is the
3512 		 * same as the decoded size provided by the caller.
3513 		 */
3514 		return (nelements);
3515 
3516 	case DDI_PROP_CMD_GET_DSIZE:
3517 		/*
3518 		 * Just return the number of bytes specified by the caller.
3519 		 */
3520 		return (nelements);
3521 
3522 	default:
3523 #ifdef DEBUG
3524 		panic("ddi_prop_1275_bytes: %x impossible", cmd);
3525 		/*NOTREACHED*/
3526 #else
3527 		return (DDI_PROP_RESULT_ERROR);
3528 #endif	/* DEBUG */
3529 	}
3530 }
3531 
3532 /*
3533  * Used for properties that come from the OBP, hardware configuration files,
3534  * or that are created by calls to ddi_prop_update(9F).
3535  */
3536 static struct prop_handle_ops prop_1275_ops = {
3537 	ddi_prop_1275_int,
3538 	ddi_prop_1275_string,
3539 	ddi_prop_1275_bytes,
3540 	ddi_prop_int64_op
3541 };
3542 
3543 
3544 /*
3545  * Interface to create/modify a managed property on child's behalf...
3546  * Flags interpreted are:
3547  *	DDI_PROP_CANSLEEP:	Allow memory allocation to sleep.
3548  *	DDI_PROP_SYSTEM_DEF:	Manipulate system list rather than driver list.
3549  *
3550  * Use same dev_t when modifying or undefining a property.
3551  * Search for properties with DDI_DEV_T_ANY to match first named
3552  * property on the list.
3553  *
3554  * Properties are stored LIFO and subsequently will match the first
3555  * `matching' instance.
3556  */
3557 
3558 /*
3559  * ddi_prop_add:	Add a software defined property
3560  */
3561 
3562 /*
3563  * define to get a new ddi_prop_t.
3564  * km_flags are KM_SLEEP or KM_NOSLEEP.
3565  */
3566 
3567 #define	DDI_NEW_PROP_T(km_flags)	\
3568 	(kmem_zalloc(sizeof (ddi_prop_t), km_flags))
3569 
3570 static int
3571 ddi_prop_add(dev_t dev, dev_info_t *dip, int flags,
3572     char *name, caddr_t value, int length)
3573 {
3574 	ddi_prop_t	*new_propp, *propp;
3575 	ddi_prop_t	**list_head = &(DEVI(dip)->devi_drv_prop_ptr);
3576 	int		km_flags = KM_NOSLEEP;
3577 	int		name_buf_len;
3578 
3579 	/*
3580 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero return error.
3581 	 */
3582 
3583 	if (dev == DDI_DEV_T_ANY || name == (char *)0 || strlen(name) == 0)
3584 		return (DDI_PROP_INVAL_ARG);
3585 
3586 	if (flags & DDI_PROP_CANSLEEP)
3587 		km_flags = KM_SLEEP;
3588 
3589 	if (flags & DDI_PROP_SYSTEM_DEF)
3590 		list_head = &(DEVI(dip)->devi_sys_prop_ptr);
3591 	else if (flags & DDI_PROP_HW_DEF)
3592 		list_head = &(DEVI(dip)->devi_hw_prop_ptr);
3593 
3594 	if ((new_propp = DDI_NEW_PROP_T(km_flags)) == NULL)  {
3595 		cmn_err(CE_CONT, prop_no_mem_msg, name);
3596 		return (DDI_PROP_NO_MEMORY);
3597 	}
3598 
3599 	/*
3600 	 * If dev is major number 0, then we need to do a ddi_name_to_major
3601 	 * to get the real major number for the device.  This needs to be
3602 	 * done because some drivers need to call ddi_prop_create in their
3603 	 * attach routines but they don't have a dev.  By creating the dev
3604 	 * ourself if the major number is 0, drivers will not have to know what
3605 	 * their major number.	They can just create a dev with major number
3606 	 * 0 and pass it in.  For device 0, we will be doing a little extra
3607 	 * work by recreating the same dev that we already have, but its the
3608 	 * price you pay :-).
3609 	 *
3610 	 * This fixes bug #1098060.
3611 	 */
3612 	if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN) {
3613 		new_propp->prop_dev =
3614 		    makedevice(ddi_name_to_major(DEVI(dip)->devi_binding_name),
3615 		    getminor(dev));
3616 	} else
3617 		new_propp->prop_dev = dev;
3618 
3619 	/*
3620 	 * Allocate space for property name and copy it in...
3621 	 */
3622 
3623 	name_buf_len = strlen(name) + 1;
3624 	new_propp->prop_name = kmem_alloc(name_buf_len, km_flags);
3625 	if (new_propp->prop_name == 0)	{
3626 		kmem_free(new_propp, sizeof (ddi_prop_t));
3627 		cmn_err(CE_CONT, prop_no_mem_msg, name);
3628 		return (DDI_PROP_NO_MEMORY);
3629 	}
3630 	bcopy(name, new_propp->prop_name, name_buf_len);
3631 
3632 	/*
3633 	 * Set the property type
3634 	 */
3635 	new_propp->prop_flags = flags & DDI_PROP_TYPE_MASK;
3636 
3637 	/*
3638 	 * Set length and value ONLY if not an explicit property undefine:
3639 	 * NOTE: value and length are zero for explicit undefines.
3640 	 */
3641 
3642 	if (flags & DDI_PROP_UNDEF_IT) {
3643 		new_propp->prop_flags |= DDI_PROP_UNDEF_IT;
3644 	} else {
3645 		if ((new_propp->prop_len = length) != 0) {
3646 			new_propp->prop_val = kmem_alloc(length, km_flags);
3647 			if (new_propp->prop_val == 0)  {
3648 				kmem_free(new_propp->prop_name, name_buf_len);
3649 				kmem_free(new_propp, sizeof (ddi_prop_t));
3650 				cmn_err(CE_CONT, prop_no_mem_msg, name);
3651 				return (DDI_PROP_NO_MEMORY);
3652 			}
3653 			bcopy(value, new_propp->prop_val, length);
3654 		}
3655 	}
3656 
3657 	/*
3658 	 * Link property into beginning of list. (Properties are LIFO order.)
3659 	 */
3660 
3661 	mutex_enter(&(DEVI(dip)->devi_lock));
3662 	propp = *list_head;
3663 	new_propp->prop_next = propp;
3664 	*list_head = new_propp;
3665 	mutex_exit(&(DEVI(dip)->devi_lock));
3666 	return (DDI_PROP_SUCCESS);
3667 }
3668 
3669 
3670 /*
3671  * ddi_prop_change:	Modify a software managed property value
3672  *
3673  *			Set new length and value if found.
3674  *			returns DDI_PROP_INVAL_ARG if dev is DDI_DEV_T_ANY or
3675  *			input name is the NULL string.
3676  *			returns DDI_PROP_NO_MEMORY if unable to allocate memory
3677  *
3678  *			Note: an undef can be modified to be a define,
3679  *			(you can't go the other way.)
3680  */
3681 
3682 static int
3683 ddi_prop_change(dev_t dev, dev_info_t *dip, int flags,
3684     char *name, caddr_t value, int length)
3685 {
3686 	ddi_prop_t	*propp;
3687 	ddi_prop_t	**ppropp;
3688 	caddr_t		p = NULL;
3689 
3690 	if ((dev == DDI_DEV_T_ANY) || (name == NULL) || (strlen(name) == 0))
3691 		return (DDI_PROP_INVAL_ARG);
3692 
3693 	/*
3694 	 * Preallocate buffer, even if we don't need it...
3695 	 */
3696 	if (length != 0)  {
3697 		p = kmem_alloc(length, (flags & DDI_PROP_CANSLEEP) ?
3698 		    KM_SLEEP : KM_NOSLEEP);
3699 		if (p == NULL)	{
3700 			cmn_err(CE_CONT, prop_no_mem_msg, name);
3701 			return (DDI_PROP_NO_MEMORY);
3702 		}
3703 	}
3704 
3705 	/*
3706 	 * If the dev_t value contains DDI_MAJOR_T_UNKNOWN for the major
3707 	 * number, a real dev_t value should be created based upon the dip's
3708 	 * binding driver.  See ddi_prop_add...
3709 	 */
3710 	if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN)
3711 		dev = makedevice(
3712 		    ddi_name_to_major(DEVI(dip)->devi_binding_name),
3713 		    getminor(dev));
3714 
3715 	/*
3716 	 * Check to see if the property exists.  If so we modify it.
3717 	 * Else we create it by calling ddi_prop_add().
3718 	 */
3719 	mutex_enter(&(DEVI(dip)->devi_lock));
3720 	ppropp = &DEVI(dip)->devi_drv_prop_ptr;
3721 	if (flags & DDI_PROP_SYSTEM_DEF)
3722 		ppropp = &DEVI(dip)->devi_sys_prop_ptr;
3723 	else if (flags & DDI_PROP_HW_DEF)
3724 		ppropp = &DEVI(dip)->devi_hw_prop_ptr;
3725 
3726 	if ((propp = i_ddi_prop_search(dev, name, flags, ppropp)) != NULL) {
3727 		/*
3728 		 * Need to reallocate buffer?  If so, do it
3729 		 * carefully (reuse same space if new prop
3730 		 * is same size and non-NULL sized).
3731 		 */
3732 		if (length != 0)
3733 			bcopy(value, p, length);
3734 
3735 		if (propp->prop_len != 0)
3736 			kmem_free(propp->prop_val, propp->prop_len);
3737 
3738 		propp->prop_len = length;
3739 		propp->prop_val = p;
3740 		propp->prop_flags &= ~DDI_PROP_UNDEF_IT;
3741 		mutex_exit(&(DEVI(dip)->devi_lock));
3742 		return (DDI_PROP_SUCCESS);
3743 	}
3744 
3745 	mutex_exit(&(DEVI(dip)->devi_lock));
3746 	if (length != 0)
3747 		kmem_free(p, length);
3748 
3749 	return (ddi_prop_add(dev, dip, flags, name, value, length));
3750 }
3751 
3752 /*
3753  * Common update routine used to update and encode a property.	Creates
3754  * a property handle, calls the property encode routine, figures out if
3755  * the property already exists and updates if it does.	Otherwise it
3756  * creates if it does not exist.
3757  */
3758 int
3759 ddi_prop_update_common(dev_t match_dev, dev_info_t *dip, int flags,
3760     char *name, void *data, uint_t nelements,
3761     int (*prop_create)(prop_handle_t *, void *data, uint_t nelements))
3762 {
3763 	prop_handle_t	ph;
3764 	int		rval;
3765 	uint_t		ourflags;
3766 
3767 	/*
3768 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3769 	 * return error.
3770 	 */
3771 	if (match_dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3772 		return (DDI_PROP_INVAL_ARG);
3773 
3774 	/*
3775 	 * Create the handle
3776 	 */
3777 	ph.ph_data = NULL;
3778 	ph.ph_cur_pos = NULL;
3779 	ph.ph_save_pos = NULL;
3780 	ph.ph_size = 0;
3781 	ph.ph_ops = &prop_1275_ops;
3782 
3783 	/*
3784 	 * ourflags:
3785 	 * For compatibility with the old interfaces.  The old interfaces
3786 	 * didn't sleep by default and slept when the flag was set.  These
3787 	 * interfaces to the opposite.	So the old interfaces now set the
3788 	 * DDI_PROP_DONTSLEEP flag by default which tells us not to sleep.
3789 	 *
3790 	 * ph.ph_flags:
3791 	 * Blocked data or unblocked data allocation
3792 	 * for ph.ph_data in ddi_prop_encode_alloc()
3793 	 */
3794 	if (flags & DDI_PROP_DONTSLEEP) {
3795 		ourflags = flags;
3796 		ph.ph_flags = DDI_PROP_DONTSLEEP;
3797 	} else {
3798 		ourflags = flags | DDI_PROP_CANSLEEP;
3799 		ph.ph_flags = DDI_PROP_CANSLEEP;
3800 	}
3801 
3802 	/*
3803 	 * Encode the data and store it in the property handle by
3804 	 * calling the prop_encode routine.
3805 	 */
3806 	if ((rval = (*prop_create)(&ph, data, nelements)) !=
3807 	    DDI_PROP_SUCCESS) {
3808 		if (rval == DDI_PROP_NO_MEMORY)
3809 			cmn_err(CE_CONT, prop_no_mem_msg, name);
3810 		if (ph.ph_size != 0)
3811 			kmem_free(ph.ph_data, ph.ph_size);
3812 		return (rval);
3813 	}
3814 
3815 	/*
3816 	 * The old interfaces use a stacking approach to creating
3817 	 * properties.	If we are being called from the old interfaces,
3818 	 * the DDI_PROP_STACK_CREATE flag will be set, so we just do a
3819 	 * create without checking.
3820 	 */
3821 	if (flags & DDI_PROP_STACK_CREATE) {
3822 		rval = ddi_prop_add(match_dev, dip,
3823 		    ourflags, name, ph.ph_data, ph.ph_size);
3824 	} else {
3825 		rval = ddi_prop_change(match_dev, dip,
3826 		    ourflags, name, ph.ph_data, ph.ph_size);
3827 	}
3828 
3829 	/*
3830 	 * Free the encoded data allocated in the prop_encode routine.
3831 	 */
3832 	if (ph.ph_size != 0)
3833 		kmem_free(ph.ph_data, ph.ph_size);
3834 
3835 	return (rval);
3836 }
3837 
3838 
3839 /*
3840  * ddi_prop_create:	Define a managed property:
3841  *			See above for details.
3842  */
3843 
3844 int
3845 ddi_prop_create(dev_t dev, dev_info_t *dip, int flag,
3846     char *name, caddr_t value, int length)
3847 {
3848 	if (!(flag & DDI_PROP_CANSLEEP)) {
3849 		flag |= DDI_PROP_DONTSLEEP;
3850 #ifdef DDI_PROP_DEBUG
3851 		if (length != 0)
3852 			cmn_err(CE_NOTE, "!ddi_prop_create: interface obsolete,"
3853 			    "use ddi_prop_update (prop = %s, node = %s%d)",
3854 			    name, ddi_driver_name(dip), ddi_get_instance(dip));
3855 #endif /* DDI_PROP_DEBUG */
3856 	}
3857 	flag &= ~DDI_PROP_SYSTEM_DEF;
3858 	flag |= DDI_PROP_STACK_CREATE | DDI_PROP_TYPE_ANY;
3859 	return (ddi_prop_update_common(dev, dip, flag, name,
3860 	    value, length, ddi_prop_fm_encode_bytes));
3861 }
3862 
3863 int
3864 e_ddi_prop_create(dev_t dev, dev_info_t *dip, int flag,
3865     char *name, caddr_t value, int length)
3866 {
3867 	if (!(flag & DDI_PROP_CANSLEEP))
3868 		flag |= DDI_PROP_DONTSLEEP;
3869 	flag |= DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE | DDI_PROP_TYPE_ANY;
3870 	return (ddi_prop_update_common(dev, dip, flag,
3871 	    name, value, length, ddi_prop_fm_encode_bytes));
3872 }
3873 
3874 int
3875 ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag,
3876     char *name, caddr_t value, int length)
3877 {
3878 	ASSERT((flag & DDI_PROP_TYPE_MASK) == 0);
3879 
3880 	/*
3881 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3882 	 * return error.
3883 	 */
3884 	if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3885 		return (DDI_PROP_INVAL_ARG);
3886 
3887 	if (!(flag & DDI_PROP_CANSLEEP))
3888 		flag |= DDI_PROP_DONTSLEEP;
3889 	flag &= ~DDI_PROP_SYSTEM_DEF;
3890 	if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_NOTPROM), name) == 0)
3891 		return (DDI_PROP_NOT_FOUND);
3892 
3893 	return (ddi_prop_update_common(dev, dip,
3894 	    (flag | DDI_PROP_TYPE_BYTE), name,
3895 	    value, length, ddi_prop_fm_encode_bytes));
3896 }
3897 
3898 int
3899 e_ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag,
3900     char *name, caddr_t value, int length)
3901 {
3902 	ASSERT((flag & DDI_PROP_TYPE_MASK) == 0);
3903 
3904 	/*
3905 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3906 	 * return error.
3907 	 */
3908 	if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3909 		return (DDI_PROP_INVAL_ARG);
3910 
3911 	if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_SYSTEM_DEF), name) == 0)
3912 		return (DDI_PROP_NOT_FOUND);
3913 
3914 	if (!(flag & DDI_PROP_CANSLEEP))
3915 		flag |= DDI_PROP_DONTSLEEP;
3916 	return (ddi_prop_update_common(dev, dip,
3917 	    (flag | DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE),
3918 	    name, value, length, ddi_prop_fm_encode_bytes));
3919 }
3920 
3921 
3922 /*
3923  * Common lookup routine used to lookup and decode a property.
3924  * Creates a property handle, searches for the raw encoded data,
3925  * fills in the handle, and calls the property decode functions
3926  * passed in.
3927  *
3928  * This routine is not static because ddi_bus_prop_op() which lives in
3929  * ddi_impl.c calls it.  No driver should be calling this routine.
3930  */
3931 int
3932 ddi_prop_lookup_common(dev_t match_dev, dev_info_t *dip,
3933     uint_t flags, char *name, void *data, uint_t *nelements,
3934     int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements))
3935 {
3936 	int		rval;
3937 	uint_t		ourflags;
3938 	prop_handle_t	ph;
3939 
3940 	if ((match_dev == DDI_DEV_T_NONE) ||
3941 	    (name == NULL) || (strlen(name) == 0))
3942 		return (DDI_PROP_INVAL_ARG);
3943 
3944 	ourflags = (flags & DDI_PROP_DONTSLEEP) ? flags :
3945 	    flags | DDI_PROP_CANSLEEP;
3946 
3947 	/*
3948 	 * Get the encoded data
3949 	 */
3950 	bzero(&ph, sizeof (prop_handle_t));
3951 
3952 	if (flags & DDI_UNBND_DLPI2) {
3953 		/*
3954 		 * For unbound dlpi style-2 devices, index into
3955 		 * the devnames' array and search the global
3956 		 * property list.
3957 		 */
3958 		ourflags &= ~DDI_UNBND_DLPI2;
3959 		rval = i_ddi_prop_search_global(match_dev,
3960 		    ourflags, name, &ph.ph_data, &ph.ph_size);
3961 	} else {
3962 		rval = ddi_prop_search_common(match_dev, dip,
3963 		    PROP_LEN_AND_VAL_ALLOC, ourflags, name,
3964 		    &ph.ph_data, &ph.ph_size);
3965 
3966 	}
3967 
3968 	if (rval != DDI_PROP_SUCCESS && rval != DDI_PROP_FOUND_1275) {
3969 		ASSERT(ph.ph_data == NULL);
3970 		ASSERT(ph.ph_size == 0);
3971 		return (rval);
3972 	}
3973 
3974 	/*
3975 	 * If the encoded data came from a OBP or software
3976 	 * use the 1275 OBP decode/encode routines.
3977 	 */
3978 	ph.ph_cur_pos = ph.ph_data;
3979 	ph.ph_save_pos = ph.ph_data;
3980 	ph.ph_ops = &prop_1275_ops;
3981 	ph.ph_flags = (rval == DDI_PROP_FOUND_1275) ? PH_FROM_PROM : 0;
3982 
3983 	rval = (*prop_decoder)(&ph, data, nelements);
3984 
3985 	/*
3986 	 * Free the encoded data
3987 	 */
3988 	if (ph.ph_size != 0)
3989 		kmem_free(ph.ph_data, ph.ph_size);
3990 
3991 	return (rval);
3992 }
3993 
3994 /*
3995  * Lookup and return an array of composite properties.  The driver must
3996  * provide the decode routine.
3997  */
3998 int
3999 ddi_prop_lookup(dev_t match_dev, dev_info_t *dip,
4000     uint_t flags, char *name, void *data, uint_t *nelements,
4001     int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements))
4002 {
4003 	return (ddi_prop_lookup_common(match_dev, dip,
4004 	    (flags | DDI_PROP_TYPE_COMPOSITE), name,
4005 	    data, nelements, prop_decoder));
4006 }
4007 
4008 /*
4009  * Return 1 if a property exists (no type checking done).
4010  * Return 0 if it does not exist.
4011  */
4012 int
4013 ddi_prop_exists(dev_t match_dev, dev_info_t *dip, uint_t flags, char *name)
4014 {
4015 	int	i;
4016 	uint_t	x = 0;
4017 
4018 	i = ddi_prop_search_common(match_dev, dip, PROP_EXISTS,
4019 	    flags | DDI_PROP_TYPE_MASK, name, NULL, &x);
4020 	return (i == DDI_PROP_SUCCESS || i == DDI_PROP_FOUND_1275);
4021 }
4022 
4023 
4024 /*
4025  * Update an array of composite properties.  The driver must
4026  * provide the encode routine.
4027  */
4028 int
4029 ddi_prop_update(dev_t match_dev, dev_info_t *dip,
4030     char *name, void *data, uint_t nelements,
4031     int (*prop_create)(prop_handle_t *, void *data, uint_t nelements))
4032 {
4033 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_COMPOSITE,
4034 	    name, data, nelements, prop_create));
4035 }
4036 
4037 /*
4038  * Get a single integer or boolean property and return it.
4039  * If the property does not exists, or cannot be decoded,
4040  * then return the defvalue passed in.
4041  *
4042  * This routine always succeeds.
4043  */
4044 int
4045 ddi_prop_get_int(dev_t match_dev, dev_info_t *dip, uint_t flags,
4046     char *name, int defvalue)
4047 {
4048 	int	data;
4049 	uint_t	nelements;
4050 	int	rval;
4051 
4052 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4053 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) {
4054 #ifdef DEBUG
4055 		if (dip != NULL) {
4056 			cmn_err(CE_WARN, "ddi_prop_get_int: invalid flag"
4057 			    " 0x%x (prop = %s, node = %s%d)", flags,
4058 			    name, ddi_driver_name(dip), ddi_get_instance(dip));
4059 		}
4060 #endif /* DEBUG */
4061 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4062 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4063 	}
4064 
4065 	if ((rval = ddi_prop_lookup_common(match_dev, dip,
4066 	    (flags | DDI_PROP_TYPE_INT), name, &data, &nelements,
4067 	    ddi_prop_fm_decode_int)) != DDI_PROP_SUCCESS) {
4068 		if (rval == DDI_PROP_END_OF_DATA)
4069 			data = 1;
4070 		else
4071 			data = defvalue;
4072 	}
4073 	return (data);
4074 }
4075 
4076 /*
4077  * Get a single 64 bit integer or boolean property and return it.
4078  * If the property does not exists, or cannot be decoded,
4079  * then return the defvalue passed in.
4080  *
4081  * This routine always succeeds.
4082  */
4083 int64_t
4084 ddi_prop_get_int64(dev_t match_dev, dev_info_t *dip, uint_t flags,
4085     char *name, int64_t defvalue)
4086 {
4087 	int64_t	data;
4088 	uint_t	nelements;
4089 	int	rval;
4090 
4091 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4092 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) {
4093 #ifdef DEBUG
4094 		if (dip != NULL) {
4095 			cmn_err(CE_WARN, "ddi_prop_get_int64: invalid flag"
4096 			    " 0x%x (prop = %s, node = %s%d)", flags,
4097 			    name, ddi_driver_name(dip), ddi_get_instance(dip));
4098 		}
4099 #endif /* DEBUG */
4100 		return (DDI_PROP_INVAL_ARG);
4101 	}
4102 
4103 	if ((rval = ddi_prop_lookup_common(match_dev, dip,
4104 	    (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM),
4105 	    name, &data, &nelements, ddi_prop_fm_decode_int64))
4106 	    != DDI_PROP_SUCCESS) {
4107 		if (rval == DDI_PROP_END_OF_DATA)
4108 			data = 1;
4109 		else
4110 			data = defvalue;
4111 	}
4112 	return (data);
4113 }
4114 
4115 /*
4116  * Get an array of integer property
4117  */
4118 int
4119 ddi_prop_lookup_int_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4120     char *name, int **data, uint_t *nelements)
4121 {
4122 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4123 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) {
4124 #ifdef DEBUG
4125 		if (dip != NULL) {
4126 			cmn_err(CE_WARN, "ddi_prop_lookup_int_array: "
4127 			    "invalid flag 0x%x (prop = %s, node = %s%d)",
4128 			    flags, name, ddi_driver_name(dip),
4129 			    ddi_get_instance(dip));
4130 		}
4131 #endif /* DEBUG */
4132 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4133 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4134 	}
4135 
4136 	return (ddi_prop_lookup_common(match_dev, dip,
4137 	    (flags | DDI_PROP_TYPE_INT), name, data,
4138 	    nelements, ddi_prop_fm_decode_ints));
4139 }
4140 
4141 /*
4142  * Get an array of 64 bit integer properties
4143  */
4144 int
4145 ddi_prop_lookup_int64_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4146     char *name, int64_t **data, uint_t *nelements)
4147 {
4148 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4149 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) {
4150 #ifdef DEBUG
4151 		if (dip != NULL) {
4152 			cmn_err(CE_WARN, "ddi_prop_lookup_int64_array: "
4153 			    "invalid flag 0x%x (prop = %s, node = %s%d)",
4154 			    flags, name, ddi_driver_name(dip),
4155 			    ddi_get_instance(dip));
4156 		}
4157 #endif /* DEBUG */
4158 		return (DDI_PROP_INVAL_ARG);
4159 	}
4160 
4161 	return (ddi_prop_lookup_common(match_dev, dip,
4162 	    (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM),
4163 	    name, data, nelements, ddi_prop_fm_decode_int64_array));
4164 }
4165 
4166 /*
4167  * Update a single integer property.  If the property exists on the drivers
4168  * property list it updates, else it creates it.
4169  */
4170 int
4171 ddi_prop_update_int(dev_t match_dev, dev_info_t *dip,
4172     char *name, int data)
4173 {
4174 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT,
4175 	    name, &data, 1, ddi_prop_fm_encode_ints));
4176 }
4177 
4178 /*
4179  * Update a single 64 bit integer property.
4180  * Update the driver property list if it exists, else create it.
4181  */
4182 int
4183 ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip,
4184     char *name, int64_t data)
4185 {
4186 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64,
4187 	    name, &data, 1, ddi_prop_fm_encode_int64));
4188 }
4189 
4190 int
4191 e_ddi_prop_update_int(dev_t match_dev, dev_info_t *dip,
4192     char *name, int data)
4193 {
4194 	return (ddi_prop_update_common(match_dev, dip,
4195 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT,
4196 	    name, &data, 1, ddi_prop_fm_encode_ints));
4197 }
4198 
4199 int
4200 e_ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip,
4201     char *name, int64_t data)
4202 {
4203 	return (ddi_prop_update_common(match_dev, dip,
4204 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64,
4205 	    name, &data, 1, ddi_prop_fm_encode_int64));
4206 }
4207 
4208 /*
4209  * Update an array of integer property.  If the property exists on the drivers
4210  * property list it updates, else it creates it.
4211  */
4212 int
4213 ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip,
4214     char *name, int *data, uint_t nelements)
4215 {
4216 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT,
4217 	    name, data, nelements, ddi_prop_fm_encode_ints));
4218 }
4219 
4220 /*
4221  * Update an array of 64 bit integer properties.
4222  * Update the driver property list if it exists, else create it.
4223  */
4224 int
4225 ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip,
4226     char *name, int64_t *data, uint_t nelements)
4227 {
4228 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64,
4229 	    name, data, nelements, ddi_prop_fm_encode_int64));
4230 }
4231 
4232 int
4233 e_ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip,
4234     char *name, int64_t *data, uint_t nelements)
4235 {
4236 	return (ddi_prop_update_common(match_dev, dip,
4237 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64,
4238 	    name, data, nelements, ddi_prop_fm_encode_int64));
4239 }
4240 
4241 int
4242 e_ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip,
4243     char *name, int *data, uint_t nelements)
4244 {
4245 	return (ddi_prop_update_common(match_dev, dip,
4246 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT,
4247 	    name, data, nelements, ddi_prop_fm_encode_ints));
4248 }
4249 
4250 /*
4251  * Get a single string property.
4252  */
4253 int
4254 ddi_prop_lookup_string(dev_t match_dev, dev_info_t *dip, uint_t flags,
4255     char *name, char **data)
4256 {
4257 	uint_t x;
4258 
4259 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4260 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) {
4261 #ifdef DEBUG
4262 		if (dip != NULL) {
4263 			cmn_err(CE_WARN, "%s: invalid flag 0x%x "
4264 			    "(prop = %s, node = %s%d); invalid bits ignored",
4265 			    "ddi_prop_lookup_string", flags, name,
4266 			    ddi_driver_name(dip), ddi_get_instance(dip));
4267 		}
4268 #endif /* DEBUG */
4269 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4270 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4271 	}
4272 
4273 	return (ddi_prop_lookup_common(match_dev, dip,
4274 	    (flags | DDI_PROP_TYPE_STRING), name, data,
4275 	    &x, ddi_prop_fm_decode_string));
4276 }
4277 
4278 /*
4279  * Get an array of strings property.
4280  */
4281 int
4282 ddi_prop_lookup_string_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4283     char *name, char ***data, uint_t *nelements)
4284 {
4285 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4286 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) {
4287 #ifdef DEBUG
4288 		if (dip != NULL) {
4289 			cmn_err(CE_WARN, "ddi_prop_lookup_string_array: "
4290 			    "invalid flag 0x%x (prop = %s, node = %s%d)",
4291 			    flags, name, ddi_driver_name(dip),
4292 			    ddi_get_instance(dip));
4293 		}
4294 #endif /* DEBUG */
4295 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4296 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4297 	}
4298 
4299 	return (ddi_prop_lookup_common(match_dev, dip,
4300 	    (flags | DDI_PROP_TYPE_STRING), name, data,
4301 	    nelements, ddi_prop_fm_decode_strings));
4302 }
4303 
4304 /*
4305  * Update a single string property.
4306  */
4307 int
4308 ddi_prop_update_string(dev_t match_dev, dev_info_t *dip,
4309     char *name, char *data)
4310 {
4311 	return (ddi_prop_update_common(match_dev, dip,
4312 	    DDI_PROP_TYPE_STRING, name, &data, 1,
4313 	    ddi_prop_fm_encode_string));
4314 }
4315 
4316 int
4317 e_ddi_prop_update_string(dev_t match_dev, dev_info_t *dip,
4318     char *name, char *data)
4319 {
4320 	return (ddi_prop_update_common(match_dev, dip,
4321 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING,
4322 	    name, &data, 1, ddi_prop_fm_encode_string));
4323 }
4324 
4325 
4326 /*
4327  * Update an array of strings property.
4328  */
4329 int
4330 ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip,
4331     char *name, char **data, uint_t nelements)
4332 {
4333 	return (ddi_prop_update_common(match_dev, dip,
4334 	    DDI_PROP_TYPE_STRING, name, data, nelements,
4335 	    ddi_prop_fm_encode_strings));
4336 }
4337 
4338 int
4339 e_ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip,
4340     char *name, char **data, uint_t nelements)
4341 {
4342 	return (ddi_prop_update_common(match_dev, dip,
4343 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING,
4344 	    name, data, nelements,
4345 	    ddi_prop_fm_encode_strings));
4346 }
4347 
4348 
4349 /*
4350  * Get an array of bytes property.
4351  */
4352 int
4353 ddi_prop_lookup_byte_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4354     char *name, uchar_t **data, uint_t *nelements)
4355 {
4356 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4357 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2)) {
4358 #ifdef DEBUG
4359 		if (dip != NULL) {
4360 			cmn_err(CE_WARN, "ddi_prop_lookup_byte_array: "
4361 			    " invalid flag 0x%x (prop = %s, node = %s%d)",
4362 			    flags, name, ddi_driver_name(dip),
4363 			    ddi_get_instance(dip));
4364 		}
4365 #endif /* DEBUG */
4366 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4367 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4368 	}
4369 
4370 	return (ddi_prop_lookup_common(match_dev, dip,
4371 	    (flags | DDI_PROP_TYPE_BYTE), name, data,
4372 	    nelements, ddi_prop_fm_decode_bytes));
4373 }
4374 
4375 /*
4376  * Update an array of bytes property.
4377  */
4378 int
4379 ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip,
4380     char *name, uchar_t *data, uint_t nelements)
4381 {
4382 	if (nelements == 0)
4383 		return (DDI_PROP_INVAL_ARG);
4384 
4385 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_BYTE,
4386 	    name, data, nelements, ddi_prop_fm_encode_bytes));
4387 }
4388 
4389 
4390 int
4391 e_ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip,
4392     char *name, uchar_t *data, uint_t nelements)
4393 {
4394 	if (nelements == 0)
4395 		return (DDI_PROP_INVAL_ARG);
4396 
4397 	return (ddi_prop_update_common(match_dev, dip,
4398 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE,
4399 	    name, data, nelements, ddi_prop_fm_encode_bytes));
4400 }
4401 
4402 
4403 /*
4404  * ddi_prop_remove_common:	Undefine a managed property:
4405  *			Input dev_t must match dev_t when defined.
4406  *			Returns DDI_PROP_NOT_FOUND, possibly.
4407  *			DDI_PROP_INVAL_ARG is also possible if dev is
4408  *			DDI_DEV_T_ANY or incoming name is the NULL string.
4409  */
4410 int
4411 ddi_prop_remove_common(dev_t dev, dev_info_t *dip, char *name, int flag)
4412 {
4413 	ddi_prop_t	**list_head = &(DEVI(dip)->devi_drv_prop_ptr);
4414 	ddi_prop_t	*propp;
4415 	ddi_prop_t	*lastpropp = NULL;
4416 
4417 	if ((dev == DDI_DEV_T_ANY) || (name == (char *)0) ||
4418 	    (strlen(name) == 0)) {
4419 		return (DDI_PROP_INVAL_ARG);
4420 	}
4421 
4422 	if (flag & DDI_PROP_SYSTEM_DEF)
4423 		list_head = &(DEVI(dip)->devi_sys_prop_ptr);
4424 	else if (flag & DDI_PROP_HW_DEF)
4425 		list_head = &(DEVI(dip)->devi_hw_prop_ptr);
4426 
4427 	mutex_enter(&(DEVI(dip)->devi_lock));
4428 
4429 	for (propp = *list_head; propp != NULL; propp = propp->prop_next)  {
4430 		if (DDI_STRSAME(propp->prop_name, name) &&
4431 		    (dev == propp->prop_dev)) {
4432 			/*
4433 			 * Unlink this propp allowing for it to
4434 			 * be first in the list:
4435 			 */
4436 
4437 			if (lastpropp == NULL)
4438 				*list_head = propp->prop_next;
4439 			else
4440 				lastpropp->prop_next = propp->prop_next;
4441 
4442 			mutex_exit(&(DEVI(dip)->devi_lock));
4443 
4444 			/*
4445 			 * Free memory and return...
4446 			 */
4447 			kmem_free(propp->prop_name,
4448 			    strlen(propp->prop_name) + 1);
4449 			if (propp->prop_len != 0)
4450 				kmem_free(propp->prop_val, propp->prop_len);
4451 			kmem_free(propp, sizeof (ddi_prop_t));
4452 			return (DDI_PROP_SUCCESS);
4453 		}
4454 		lastpropp = propp;
4455 	}
4456 	mutex_exit(&(DEVI(dip)->devi_lock));
4457 	return (DDI_PROP_NOT_FOUND);
4458 }
4459 
4460 int
4461 ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name)
4462 {
4463 	return (ddi_prop_remove_common(dev, dip, name, 0));
4464 }
4465 
4466 int
4467 e_ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name)
4468 {
4469 	return (ddi_prop_remove_common(dev, dip, name, DDI_PROP_SYSTEM_DEF));
4470 }
4471 
4472 /*
4473  * e_ddi_prop_list_delete: remove a list of properties
4474  *	Note that the caller needs to provide the required protection
4475  *	(eg. devi_lock if these properties are still attached to a devi)
4476  */
4477 void
4478 e_ddi_prop_list_delete(ddi_prop_t *props)
4479 {
4480 	i_ddi_prop_list_delete(props);
4481 }
4482 
4483 /*
4484  * ddi_prop_remove_all_common:
4485  *	Used before unloading a driver to remove
4486  *	all properties. (undefines all dev_t's props.)
4487  *	Also removes `explicitly undefined' props.
4488  *	No errors possible.
4489  */
4490 void
4491 ddi_prop_remove_all_common(dev_info_t *dip, int flag)
4492 {
4493 	ddi_prop_t	**list_head;
4494 
4495 	mutex_enter(&(DEVI(dip)->devi_lock));
4496 	if (flag & DDI_PROP_SYSTEM_DEF) {
4497 		list_head = &(DEVI(dip)->devi_sys_prop_ptr);
4498 	} else if (flag & DDI_PROP_HW_DEF) {
4499 		list_head = &(DEVI(dip)->devi_hw_prop_ptr);
4500 	} else {
4501 		list_head = &(DEVI(dip)->devi_drv_prop_ptr);
4502 	}
4503 	i_ddi_prop_list_delete(*list_head);
4504 	*list_head = NULL;
4505 	mutex_exit(&(DEVI(dip)->devi_lock));
4506 }
4507 
4508 
4509 /*
4510  * ddi_prop_remove_all:		Remove all driver prop definitions.
4511  */
4512 
4513 void
4514 ddi_prop_remove_all(dev_info_t *dip)
4515 {
4516 	i_ddi_prop_dyn_driver_set(dip, NULL);
4517 	ddi_prop_remove_all_common(dip, 0);
4518 }
4519 
4520 /*
4521  * e_ddi_prop_remove_all:	Remove all system prop definitions.
4522  */
4523 
4524 void
4525 e_ddi_prop_remove_all(dev_info_t *dip)
4526 {
4527 	ddi_prop_remove_all_common(dip, (int)DDI_PROP_SYSTEM_DEF);
4528 }
4529 
4530 
4531 /*
4532  * ddi_prop_undefine:	Explicitly undefine a property.  Property
4533  *			searches which match this property return
4534  *			the error code DDI_PROP_UNDEFINED.
4535  *
4536  *			Use ddi_prop_remove to negate effect of
4537  *			ddi_prop_undefine
4538  *
4539  *			See above for error returns.
4540  */
4541 
4542 int
4543 ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name)
4544 {
4545 	if (!(flag & DDI_PROP_CANSLEEP))
4546 		flag |= DDI_PROP_DONTSLEEP;
4547 	flag |= DDI_PROP_STACK_CREATE | DDI_PROP_UNDEF_IT | DDI_PROP_TYPE_ANY;
4548 	return (ddi_prop_update_common(dev, dip, flag,
4549 	    name, NULL, 0, ddi_prop_fm_encode_bytes));
4550 }
4551 
4552 int
4553 e_ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name)
4554 {
4555 	if (!(flag & DDI_PROP_CANSLEEP))
4556 		flag |= DDI_PROP_DONTSLEEP;
4557 	flag |= DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE |
4558 	    DDI_PROP_UNDEF_IT | DDI_PROP_TYPE_ANY;
4559 	return (ddi_prop_update_common(dev, dip, flag,
4560 	    name, NULL, 0, ddi_prop_fm_encode_bytes));
4561 }
4562 
4563 /*
4564  * Support for gathering dynamic properties in devinfo snapshot.
4565  */
4566 void
4567 i_ddi_prop_dyn_driver_set(dev_info_t *dip, i_ddi_prop_dyn_t *dp)
4568 {
4569 	DEVI(dip)->devi_prop_dyn_driver = dp;
4570 }
4571 
4572 i_ddi_prop_dyn_t *
4573 i_ddi_prop_dyn_driver_get(dev_info_t *dip)
4574 {
4575 	return (DEVI(dip)->devi_prop_dyn_driver);
4576 }
4577 
4578 void
4579 i_ddi_prop_dyn_parent_set(dev_info_t *dip, i_ddi_prop_dyn_t *dp)
4580 {
4581 	DEVI(dip)->devi_prop_dyn_parent = dp;
4582 }
4583 
4584 i_ddi_prop_dyn_t *
4585 i_ddi_prop_dyn_parent_get(dev_info_t *dip)
4586 {
4587 	return (DEVI(dip)->devi_prop_dyn_parent);
4588 }
4589 
4590 void
4591 i_ddi_prop_dyn_cache_invalidate(dev_info_t *dip, i_ddi_prop_dyn_t *dp)
4592 {
4593 	/* for now we invalidate the entire cached snapshot */
4594 	if (dip && dp)
4595 		i_ddi_di_cache_invalidate(KM_SLEEP);
4596 }
4597 
4598 /* ARGSUSED */
4599 void
4600 ddi_prop_cache_invalidate(dev_t dev, dev_info_t *dip, char *name, int flags)
4601 {
4602 	/* for now we invalidate the entire cached snapshot */
4603 	i_ddi_di_cache_invalidate(KM_SLEEP);
4604 }
4605 
4606 
4607 /*
4608  * Code to search hardware layer (PROM), if it exists, on behalf of child.
4609  *
4610  * if input dip != child_dip, then call is on behalf of child
4611  * to search PROM, do it via ddi_prop_search_common() and ascend only
4612  * if allowed.
4613  *
4614  * if input dip == ch_dip (child_dip), call is on behalf of root driver,
4615  * to search for PROM defined props only.
4616  *
4617  * Note that the PROM search is done only if the requested dev
4618  * is either DDI_DEV_T_ANY or DDI_DEV_T_NONE. PROM properties
4619  * have no associated dev, thus are automatically associated with
4620  * DDI_DEV_T_NONE.
4621  *
4622  * Modifying flag DDI_PROP_NOTPROM inhibits the search in the h/w layer.
4623  *
4624  * Returns DDI_PROP_FOUND_1275 if found to indicate to framework
4625  * that the property resides in the prom.
4626  */
4627 int
4628 impl_ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip,
4629     ddi_prop_op_t prop_op, int mod_flags,
4630     char *name, caddr_t valuep, int *lengthp)
4631 {
4632 	int	len;
4633 	caddr_t buffer;
4634 
4635 	/*
4636 	 * If requested dev is DDI_DEV_T_NONE or DDI_DEV_T_ANY, then
4637 	 * look in caller's PROM if it's a self identifying device...
4638 	 *
4639 	 * Note that this is very similar to ddi_prop_op, but we
4640 	 * search the PROM instead of the s/w defined properties,
4641 	 * and we are called on by the parent driver to do this for
4642 	 * the child.
4643 	 */
4644 
4645 	if (((dev == DDI_DEV_T_NONE) || (dev == DDI_DEV_T_ANY)) &&
4646 	    ndi_dev_is_prom_node(ch_dip) &&
4647 	    ((mod_flags & DDI_PROP_NOTPROM) == 0)) {
4648 		len = prom_getproplen((pnode_t)DEVI(ch_dip)->devi_nodeid, name);
4649 		if (len == -1) {
4650 			return (DDI_PROP_NOT_FOUND);
4651 		}
4652 
4653 		/*
4654 		 * If exists only request, we're done
4655 		 */
4656 		if (prop_op == PROP_EXISTS) {
4657 			return (DDI_PROP_FOUND_1275);
4658 		}
4659 
4660 		/*
4661 		 * If length only request or prop length == 0, get out
4662 		 */
4663 		if ((prop_op == PROP_LEN) || (len == 0)) {
4664 			*lengthp = len;
4665 			return (DDI_PROP_FOUND_1275);
4666 		}
4667 
4668 		/*
4669 		 * Allocate buffer if required... (either way `buffer'
4670 		 * is receiving address).
4671 		 */
4672 
4673 		switch (prop_op) {
4674 
4675 		case PROP_LEN_AND_VAL_ALLOC:
4676 
4677 			buffer = kmem_alloc((size_t)len,
4678 			    mod_flags & DDI_PROP_CANSLEEP ?
4679 			    KM_SLEEP : KM_NOSLEEP);
4680 			if (buffer == NULL) {
4681 				return (DDI_PROP_NO_MEMORY);
4682 			}
4683 			*(caddr_t *)valuep = buffer;
4684 			break;
4685 
4686 		case PROP_LEN_AND_VAL_BUF:
4687 
4688 			if (len > (*lengthp)) {
4689 				*lengthp = len;
4690 				return (DDI_PROP_BUF_TOO_SMALL);
4691 			}
4692 
4693 			buffer = valuep;
4694 			break;
4695 
4696 		default:
4697 			break;
4698 		}
4699 
4700 		/*
4701 		 * Call the PROM function to do the copy.
4702 		 */
4703 		(void) prom_getprop((pnode_t)DEVI(ch_dip)->devi_nodeid,
4704 		    name, buffer);
4705 
4706 		*lengthp = len; /* return the actual length to the caller */
4707 		(void) impl_fix_props(dip, ch_dip, name, len, buffer);
4708 		return (DDI_PROP_FOUND_1275);
4709 	}
4710 
4711 	return (DDI_PROP_NOT_FOUND);
4712 }
4713 
4714 /*
4715  * The ddi_bus_prop_op default bus nexus prop op function.
4716  *
4717  * Code to search hardware layer (PROM), if it exists,
4718  * on behalf of child, then, if appropriate, ascend and check
4719  * my own software defined properties...
4720  */
4721 int
4722 ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip,
4723     ddi_prop_op_t prop_op, int mod_flags,
4724     char *name, caddr_t valuep, int *lengthp)
4725 {
4726 	int	error;
4727 
4728 	error = impl_ddi_bus_prop_op(dev, dip, ch_dip, prop_op, mod_flags,
4729 	    name, valuep, lengthp);
4730 
4731 	if (error == DDI_PROP_SUCCESS || error == DDI_PROP_FOUND_1275 ||
4732 	    error == DDI_PROP_BUF_TOO_SMALL)
4733 		return (error);
4734 
4735 	if (error == DDI_PROP_NO_MEMORY) {
4736 		cmn_err(CE_CONT, prop_no_mem_msg, name);
4737 		return (DDI_PROP_NO_MEMORY);
4738 	}
4739 
4740 	/*
4741 	 * Check the 'options' node as a last resort
4742 	 */
4743 	if ((mod_flags & DDI_PROP_DONTPASS) != 0)
4744 		return (DDI_PROP_NOT_FOUND);
4745 
4746 	if (ch_dip == ddi_root_node())	{
4747 		/*
4748 		 * As a last resort, when we've reached
4749 		 * the top and still haven't found the
4750 		 * property, see if the desired property
4751 		 * is attached to the options node.
4752 		 *
4753 		 * The options dip is attached right after boot.
4754 		 */
4755 		ASSERT(options_dip != NULL);
4756 		/*
4757 		 * Force the "don't pass" flag to *just* see
4758 		 * what the options node has to offer.
4759 		 */
4760 		return (ddi_prop_search_common(dev, options_dip, prop_op,
4761 		    mod_flags|DDI_PROP_DONTPASS, name, valuep,
4762 		    (uint_t *)lengthp));
4763 	}
4764 
4765 	/*
4766 	 * Otherwise, continue search with parent's s/w defined properties...
4767 	 * NOTE: Using `dip' in following call increments the level.
4768 	 */
4769 
4770 	return (ddi_prop_search_common(dev, dip, prop_op, mod_flags,
4771 	    name, valuep, (uint_t *)lengthp));
4772 }
4773 
4774 /*
4775  * External property functions used by other parts of the kernel...
4776  */
4777 
4778 /*
4779  * e_ddi_getlongprop: See comments for ddi_get_longprop.
4780  */
4781 
4782 int
4783 e_ddi_getlongprop(dev_t dev, vtype_t type, char *name, int flags,
4784     caddr_t valuep, int *lengthp)
4785 {
4786 	_NOTE(ARGUNUSED(type))
4787 	dev_info_t *devi;
4788 	ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_ALLOC;
4789 	int error;
4790 
4791 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4792 		return (DDI_PROP_NOT_FOUND);
4793 
4794 	error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp);
4795 	ddi_release_devi(devi);
4796 	return (error);
4797 }
4798 
4799 /*
4800  * e_ddi_getlongprop_buf:	See comments for ddi_getlongprop_buf.
4801  */
4802 
4803 int
4804 e_ddi_getlongprop_buf(dev_t dev, vtype_t type, char *name, int flags,
4805     caddr_t valuep, int *lengthp)
4806 {
4807 	_NOTE(ARGUNUSED(type))
4808 	dev_info_t *devi;
4809 	ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF;
4810 	int error;
4811 
4812 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4813 		return (DDI_PROP_NOT_FOUND);
4814 
4815 	error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp);
4816 	ddi_release_devi(devi);
4817 	return (error);
4818 }
4819 
4820 /*
4821  * e_ddi_getprop:	See comments for ddi_getprop.
4822  */
4823 int
4824 e_ddi_getprop(dev_t dev, vtype_t type, char *name, int flags, int defvalue)
4825 {
4826 	_NOTE(ARGUNUSED(type))
4827 	dev_info_t *devi;
4828 	ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF;
4829 	int	propvalue = defvalue;
4830 	int	proplength = sizeof (int);
4831 	int	error;
4832 
4833 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4834 		return (defvalue);
4835 
4836 	error = cdev_prop_op(dev, devi, prop_op,
4837 	    flags, name, (caddr_t)&propvalue, &proplength);
4838 	ddi_release_devi(devi);
4839 
4840 	if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
4841 		propvalue = 1;
4842 
4843 	return (propvalue);
4844 }
4845 
4846 /*
4847  * e_ddi_getprop_int64:
4848  *
4849  * This is a typed interfaces, but predates typed properties. With the
4850  * introduction of typed properties the framework tries to ensure
4851  * consistent use of typed interfaces. This is why TYPE_INT64 is not
4852  * part of TYPE_ANY.  E_ddi_getprop_int64 is a special case where a
4853  * typed interface invokes legacy (non-typed) interfaces:
4854  * cdev_prop_op(), prop_op(9E), ddi_prop_op(9F)).  In this case the
4855  * fact that TYPE_INT64 is not part of TYPE_ANY matters.  To support
4856  * this type of lookup as a single operation we invoke the legacy
4857  * non-typed interfaces with the special CONSUMER_TYPED bit set. The
4858  * framework ddi_prop_op(9F) implementation is expected to check for
4859  * CONSUMER_TYPED and, if set, expand type bits beyond TYPE_ANY
4860  * (currently TYPE_INT64).
4861  */
4862 int64_t
4863 e_ddi_getprop_int64(dev_t dev, vtype_t type, char *name,
4864     int flags, int64_t defvalue)
4865 {
4866 	_NOTE(ARGUNUSED(type))
4867 	dev_info_t	*devi;
4868 	ddi_prop_op_t	prop_op = PROP_LEN_AND_VAL_BUF;
4869 	int64_t		propvalue = defvalue;
4870 	int		proplength = sizeof (propvalue);
4871 	int		error;
4872 
4873 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4874 		return (defvalue);
4875 
4876 	error = cdev_prop_op(dev, devi, prop_op, flags |
4877 	    DDI_PROP_CONSUMER_TYPED, name, (caddr_t)&propvalue, &proplength);
4878 	ddi_release_devi(devi);
4879 
4880 	if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
4881 		propvalue = 1;
4882 
4883 	return (propvalue);
4884 }
4885 
4886 /*
4887  * e_ddi_getproplen:	See comments for ddi_getproplen.
4888  */
4889 int
4890 e_ddi_getproplen(dev_t dev, vtype_t type, char *name, int flags, int *lengthp)
4891 {
4892 	_NOTE(ARGUNUSED(type))
4893 	dev_info_t *devi;
4894 	ddi_prop_op_t prop_op = PROP_LEN;
4895 	int error;
4896 
4897 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4898 		return (DDI_PROP_NOT_FOUND);
4899 
4900 	error = cdev_prop_op(dev, devi, prop_op, flags, name, NULL, lengthp);
4901 	ddi_release_devi(devi);
4902 	return (error);
4903 }
4904 
4905 /*
4906  * Routines to get at elements of the dev_info structure
4907  */
4908 
4909 /*
4910  * ddi_binding_name: Return the driver binding name of the devinfo node
4911  *		This is the name the OS used to bind the node to a driver.
4912  */
4913 char *
4914 ddi_binding_name(dev_info_t *dip)
4915 {
4916 	return (DEVI(dip)->devi_binding_name);
4917 }
4918 
4919 /*
4920  * ddi_driver_major: Return the major number of the driver that
4921  *		the supplied devinfo is bound to (-1 if none)
4922  */
4923 major_t
4924 ddi_driver_major(dev_info_t *devi)
4925 {
4926 	return (DEVI(devi)->devi_major);
4927 }
4928 
4929 /*
4930  * ddi_driver_name: Return the normalized driver name. this is the
4931  *		actual driver name
4932  */
4933 const char *
4934 ddi_driver_name(dev_info_t *devi)
4935 {
4936 	major_t major;
4937 
4938 	if ((major = ddi_driver_major(devi)) != DDI_MAJOR_T_NONE)
4939 		return (ddi_major_to_name(major));
4940 
4941 	return (ddi_node_name(devi));
4942 }
4943 
4944 /*
4945  * i_ddi_set_binding_name:	Set binding name.
4946  *
4947  *	Set the binding name to the given name.
4948  *	This routine is for use by the ddi implementation, not by drivers.
4949  */
4950 void
4951 i_ddi_set_binding_name(dev_info_t *dip, char *name)
4952 {
4953 	DEVI(dip)->devi_binding_name = name;
4954 
4955 }
4956 
4957 /*
4958  * ddi_get_name: A synonym of ddi_binding_name() ... returns a name
4959  * the implementation has used to bind the node to a driver.
4960  */
4961 char *
4962 ddi_get_name(dev_info_t *dip)
4963 {
4964 	return (DEVI(dip)->devi_binding_name);
4965 }
4966 
4967 /*
4968  * ddi_node_name: Return the name property of the devinfo node
4969  *		This may differ from ddi_binding_name if the node name
4970  *		does not define a binding to a driver (i.e. generic names).
4971  */
4972 char *
4973 ddi_node_name(dev_info_t *dip)
4974 {
4975 	return (DEVI(dip)->devi_node_name);
4976 }
4977 
4978 
4979 /*
4980  * ddi_get_nodeid:	Get nodeid stored in dev_info structure.
4981  */
4982 int
4983 ddi_get_nodeid(dev_info_t *dip)
4984 {
4985 	return (DEVI(dip)->devi_nodeid);
4986 }
4987 
4988 int
4989 ddi_get_instance(dev_info_t *dip)
4990 {
4991 	return (DEVI(dip)->devi_instance);
4992 }
4993 
4994 struct dev_ops *
4995 ddi_get_driver(dev_info_t *dip)
4996 {
4997 	return (DEVI(dip)->devi_ops);
4998 }
4999 
5000 void
5001 ddi_set_driver(dev_info_t *dip, struct dev_ops *devo)
5002 {
5003 	DEVI(dip)->devi_ops = devo;
5004 }
5005 
5006 /*
5007  * ddi_set_driver_private/ddi_get_driver_private:
5008  * Get/set device driver private data in devinfo.
5009  */
5010 void
5011 ddi_set_driver_private(dev_info_t *dip, void *data)
5012 {
5013 	DEVI(dip)->devi_driver_data = data;
5014 }
5015 
5016 void *
5017 ddi_get_driver_private(dev_info_t *dip)
5018 {
5019 	return (DEVI(dip)->devi_driver_data);
5020 }
5021 
5022 /*
5023  * ddi_get_parent, ddi_get_child, ddi_get_next_sibling
5024  */
5025 
5026 dev_info_t *
5027 ddi_get_parent(dev_info_t *dip)
5028 {
5029 	return ((dev_info_t *)DEVI(dip)->devi_parent);
5030 }
5031 
5032 dev_info_t *
5033 ddi_get_child(dev_info_t *dip)
5034 {
5035 	return ((dev_info_t *)DEVI(dip)->devi_child);
5036 }
5037 
5038 dev_info_t *
5039 ddi_get_next_sibling(dev_info_t *dip)
5040 {
5041 	return ((dev_info_t *)DEVI(dip)->devi_sibling);
5042 }
5043 
5044 dev_info_t *
5045 ddi_get_next(dev_info_t *dip)
5046 {
5047 	return ((dev_info_t *)DEVI(dip)->devi_next);
5048 }
5049 
5050 void
5051 ddi_set_next(dev_info_t *dip, dev_info_t *nextdip)
5052 {
5053 	DEVI(dip)->devi_next = DEVI(nextdip);
5054 }
5055 
5056 /*
5057  * ddi_root_node:		Return root node of devinfo tree
5058  */
5059 
5060 dev_info_t *
5061 ddi_root_node(void)
5062 {
5063 	extern dev_info_t *top_devinfo;
5064 
5065 	return (top_devinfo);
5066 }
5067 
5068 /*
5069  * Miscellaneous functions:
5070  */
5071 
5072 /*
5073  * Implementation specific hooks
5074  */
5075 
5076 void
5077 ddi_report_dev(dev_info_t *d)
5078 {
5079 	char *b;
5080 
5081 	(void) ddi_ctlops(d, d, DDI_CTLOPS_REPORTDEV, (void *)0, (void *)0);
5082 
5083 	/*
5084 	 * If this devinfo node has cb_ops, it's implicitly accessible from
5085 	 * userland, so we print its full name together with the instance
5086 	 * number 'abbreviation' that the driver may use internally.
5087 	 */
5088 	if (DEVI(d)->devi_ops->devo_cb_ops != (struct cb_ops *)0 &&
5089 	    (b = kmem_zalloc(MAXPATHLEN, KM_NOSLEEP))) {
5090 		cmn_err(CE_CONT, "?%s%d is %s\n",
5091 		    ddi_driver_name(d), ddi_get_instance(d),
5092 		    ddi_pathname(d, b));
5093 		kmem_free(b, MAXPATHLEN);
5094 	}
5095 }
5096 
5097 /*
5098  * ddi_ctlops() is described in the assembler not to buy a new register
5099  * window when it's called and can reduce cost in climbing the device tree
5100  * without using the tail call optimization.
5101  */
5102 int
5103 ddi_dev_regsize(dev_info_t *dev, uint_t rnumber, off_t *result)
5104 {
5105 	int ret;
5106 
5107 	ret = ddi_ctlops(dev, dev, DDI_CTLOPS_REGSIZE,
5108 	    (void *)&rnumber, (void *)result);
5109 
5110 	return (ret == DDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE);
5111 }
5112 
5113 int
5114 ddi_dev_nregs(dev_info_t *dev, int *result)
5115 {
5116 	return (ddi_ctlops(dev, dev, DDI_CTLOPS_NREGS, 0, (void *)result));
5117 }
5118 
5119 int
5120 ddi_dev_is_sid(dev_info_t *d)
5121 {
5122 	return (ddi_ctlops(d, d, DDI_CTLOPS_SIDDEV, (void *)0, (void *)0));
5123 }
5124 
5125 int
5126 ddi_slaveonly(dev_info_t *d)
5127 {
5128 	return (ddi_ctlops(d, d, DDI_CTLOPS_SLAVEONLY, (void *)0, (void *)0));
5129 }
5130 
5131 int
5132 ddi_dev_affinity(dev_info_t *a, dev_info_t *b)
5133 {
5134 	return (ddi_ctlops(a, a, DDI_CTLOPS_AFFINITY, (void *)b, (void *)0));
5135 }
5136 
5137 int
5138 ddi_streams_driver(dev_info_t *dip)
5139 {
5140 	if (i_ddi_devi_attached(dip) &&
5141 	    (DEVI(dip)->devi_ops->devo_cb_ops != NULL) &&
5142 	    (DEVI(dip)->devi_ops->devo_cb_ops->cb_str != NULL))
5143 		return (DDI_SUCCESS);
5144 	return (DDI_FAILURE);
5145 }
5146 
5147 /*
5148  * callback free list
5149  */
5150 
5151 static int ncallbacks;
5152 static int nc_low = 170;
5153 static int nc_med = 512;
5154 static int nc_high = 2048;
5155 static struct ddi_callback *callbackq;
5156 static struct ddi_callback *callbackqfree;
5157 
5158 /*
5159  * set/run callback lists
5160  */
5161 struct	cbstats	{
5162 	kstat_named_t	cb_asked;
5163 	kstat_named_t	cb_new;
5164 	kstat_named_t	cb_run;
5165 	kstat_named_t	cb_delete;
5166 	kstat_named_t	cb_maxreq;
5167 	kstat_named_t	cb_maxlist;
5168 	kstat_named_t	cb_alloc;
5169 	kstat_named_t	cb_runouts;
5170 	kstat_named_t	cb_L2;
5171 	kstat_named_t	cb_grow;
5172 } cbstats = {
5173 	{"asked",	KSTAT_DATA_UINT32},
5174 	{"new",		KSTAT_DATA_UINT32},
5175 	{"run",		KSTAT_DATA_UINT32},
5176 	{"delete",	KSTAT_DATA_UINT32},
5177 	{"maxreq",	KSTAT_DATA_UINT32},
5178 	{"maxlist",	KSTAT_DATA_UINT32},
5179 	{"alloc",	KSTAT_DATA_UINT32},
5180 	{"runouts",	KSTAT_DATA_UINT32},
5181 	{"L2",		KSTAT_DATA_UINT32},
5182 	{"grow",	KSTAT_DATA_UINT32},
5183 };
5184 
5185 #define	nc_asked	cb_asked.value.ui32
5186 #define	nc_new		cb_new.value.ui32
5187 #define	nc_run		cb_run.value.ui32
5188 #define	nc_delete	cb_delete.value.ui32
5189 #define	nc_maxreq	cb_maxreq.value.ui32
5190 #define	nc_maxlist	cb_maxlist.value.ui32
5191 #define	nc_alloc	cb_alloc.value.ui32
5192 #define	nc_runouts	cb_runouts.value.ui32
5193 #define	nc_L2		cb_L2.value.ui32
5194 #define	nc_grow		cb_grow.value.ui32
5195 
5196 static kmutex_t ddi_callback_mutex;
5197 
5198 /*
5199  * callbacks are handled using a L1/L2 cache. The L1 cache
5200  * comes out of kmem_cache_alloc and can expand/shrink dynamically. If
5201  * we can't get callbacks from the L1 cache [because pageout is doing
5202  * I/O at the time freemem is 0], we allocate callbacks out of the
5203  * L2 cache. The L2 cache is static and depends on the memory size.
5204  * [We might also count the number of devices at probe time and
5205  * allocate one structure per device and adjust for deferred attach]
5206  */
5207 void
5208 impl_ddi_callback_init(void)
5209 {
5210 	int	i;
5211 	uint_t	physmegs;
5212 	kstat_t	*ksp;
5213 
5214 	physmegs = physmem >> (20 - PAGESHIFT);
5215 	if (physmegs < 48) {
5216 		ncallbacks = nc_low;
5217 	} else if (physmegs < 128) {
5218 		ncallbacks = nc_med;
5219 	} else {
5220 		ncallbacks = nc_high;
5221 	}
5222 
5223 	/*
5224 	 * init free list
5225 	 */
5226 	callbackq = kmem_zalloc(
5227 	    ncallbacks * sizeof (struct ddi_callback), KM_SLEEP);
5228 	for (i = 0; i < ncallbacks-1; i++)
5229 		callbackq[i].c_nfree = &callbackq[i+1];
5230 	callbackqfree = callbackq;
5231 
5232 	/* init kstats */
5233 	if (ksp = kstat_create("unix", 0, "cbstats", "misc", KSTAT_TYPE_NAMED,
5234 	    sizeof (cbstats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL)) {
5235 		ksp->ks_data = (void *) &cbstats;
5236 		kstat_install(ksp);
5237 	}
5238 
5239 }
5240 
5241 static void
5242 callback_insert(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid,
5243 	int count)
5244 {
5245 	struct ddi_callback *list, *marker, *new;
5246 	size_t size = sizeof (struct ddi_callback);
5247 
5248 	list = marker = (struct ddi_callback *)*listid;
5249 	while (list != NULL) {
5250 		if (list->c_call == funcp && list->c_arg == arg) {
5251 			list->c_count += count;
5252 			return;
5253 		}
5254 		marker = list;
5255 		list = list->c_nlist;
5256 	}
5257 	new = kmem_alloc(size, KM_NOSLEEP);
5258 	if (new == NULL) {
5259 		new = callbackqfree;
5260 		if (new == NULL) {
5261 			new = kmem_alloc_tryhard(sizeof (struct ddi_callback),
5262 			    &size, KM_NOSLEEP | KM_PANIC);
5263 			cbstats.nc_grow++;
5264 		} else {
5265 			callbackqfree = new->c_nfree;
5266 			cbstats.nc_L2++;
5267 		}
5268 	}
5269 	if (marker != NULL) {
5270 		marker->c_nlist = new;
5271 	} else {
5272 		*listid = (uintptr_t)new;
5273 	}
5274 	new->c_size = size;
5275 	new->c_nlist = NULL;
5276 	new->c_call = funcp;
5277 	new->c_arg = arg;
5278 	new->c_count = count;
5279 	cbstats.nc_new++;
5280 	cbstats.nc_alloc++;
5281 	if (cbstats.nc_alloc > cbstats.nc_maxlist)
5282 		cbstats.nc_maxlist = cbstats.nc_alloc;
5283 }
5284 
5285 void
5286 ddi_set_callback(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid)
5287 {
5288 	mutex_enter(&ddi_callback_mutex);
5289 	cbstats.nc_asked++;
5290 	if ((cbstats.nc_asked - cbstats.nc_run) > cbstats.nc_maxreq)
5291 		cbstats.nc_maxreq = (cbstats.nc_asked - cbstats.nc_run);
5292 	(void) callback_insert(funcp, arg, listid, 1);
5293 	mutex_exit(&ddi_callback_mutex);
5294 }
5295 
5296 static void
5297 real_callback_run(void *Queue)
5298 {
5299 	int (*funcp)(caddr_t);
5300 	caddr_t arg;
5301 	int count, rval;
5302 	uintptr_t *listid;
5303 	struct ddi_callback *list, *marker;
5304 	int check_pending = 1;
5305 	int pending = 0;
5306 
5307 	do {
5308 		mutex_enter(&ddi_callback_mutex);
5309 		listid = Queue;
5310 		list = (struct ddi_callback *)*listid;
5311 		if (list == NULL) {
5312 			mutex_exit(&ddi_callback_mutex);
5313 			return;
5314 		}
5315 		if (check_pending) {
5316 			marker = list;
5317 			while (marker != NULL) {
5318 				pending += marker->c_count;
5319 				marker = marker->c_nlist;
5320 			}
5321 			check_pending = 0;
5322 		}
5323 		ASSERT(pending > 0);
5324 		ASSERT(list->c_count > 0);
5325 		funcp = list->c_call;
5326 		arg = list->c_arg;
5327 		count = list->c_count;
5328 		*(uintptr_t *)Queue = (uintptr_t)list->c_nlist;
5329 		if (list >= &callbackq[0] &&
5330 		    list <= &callbackq[ncallbacks-1]) {
5331 			list->c_nfree = callbackqfree;
5332 			callbackqfree = list;
5333 		} else
5334 			kmem_free(list, list->c_size);
5335 
5336 		cbstats.nc_delete++;
5337 		cbstats.nc_alloc--;
5338 		mutex_exit(&ddi_callback_mutex);
5339 
5340 		do {
5341 			if ((rval = (*funcp)(arg)) == 0) {
5342 				pending -= count;
5343 				mutex_enter(&ddi_callback_mutex);
5344 				(void) callback_insert(funcp, arg, listid,
5345 				    count);
5346 				cbstats.nc_runouts++;
5347 			} else {
5348 				pending--;
5349 				mutex_enter(&ddi_callback_mutex);
5350 				cbstats.nc_run++;
5351 			}
5352 			mutex_exit(&ddi_callback_mutex);
5353 		} while (rval != 0 && (--count > 0));
5354 	} while (pending > 0);
5355 }
5356 
5357 void
5358 ddi_run_callback(uintptr_t *listid)
5359 {
5360 	softcall(real_callback_run, listid);
5361 }
5362 
5363 /*
5364  * ddi_periodic_t
5365  * ddi_periodic_add(void (*func)(void *), void *arg, hrtime_t interval,
5366  *     int level)
5367  *
5368  * INTERFACE LEVEL
5369  *      Solaris DDI specific (Solaris DDI)
5370  *
5371  * PARAMETERS
5372  *      func: the callback function
5373  *
5374  *            The callback function will be invoked. The function is invoked
5375  *            in kernel context if the argument level passed is the zero.
5376  *            Otherwise it's invoked in interrupt context at the specified
5377  *            level.
5378  *
5379  *       arg: the argument passed to the callback function
5380  *
5381  *  interval: interval time
5382  *
5383  *    level : callback interrupt level
5384  *
5385  *            If the value is the zero, the callback function is invoked
5386  *            in kernel context. If the value is more than the zero, but
5387  *            less than or equal to ten, the callback function is invoked in
5388  *            interrupt context at the specified interrupt level, which may
5389  *            be used for real time applications.
5390  *
5391  *            This value must be in range of 0-10, which can be a numeric
5392  *            number or a pre-defined macro (DDI_IPL_0, ... , DDI_IPL_10).
5393  *
5394  * DESCRIPTION
5395  *      ddi_periodic_add(9F) schedules the specified function to be
5396  *      periodically invoked in the interval time.
5397  *
5398  *      As well as timeout(9F), the exact time interval over which the function
5399  *      takes effect cannot be guaranteed, but the value given is a close
5400  *      approximation.
5401  *
5402  *      Drivers waiting on behalf of processes with real-time constraints must
5403  *      pass non-zero value with the level argument to ddi_periodic_add(9F).
5404  *
5405  * RETURN VALUES
5406  *      ddi_periodic_add(9F) returns a non-zero opaque value (ddi_periodic_t),
5407  *      which must be used for ddi_periodic_delete(9F) to specify the request.
5408  *
5409  * CONTEXT
5410  *      ddi_periodic_add(9F) can be called in user or kernel context, but
5411  *      it cannot be called in interrupt context, which is different from
5412  *      timeout(9F).
5413  */
5414 ddi_periodic_t
5415 ddi_periodic_add(void (*func)(void *), void *arg, hrtime_t interval, int level)
5416 {
5417 	/*
5418 	 * Sanity check of the argument level.
5419 	 */
5420 	if (level < DDI_IPL_0 || level > DDI_IPL_10)
5421 		cmn_err(CE_PANIC,
5422 		    "ddi_periodic_add: invalid interrupt level (%d).", level);
5423 
5424 	/*
5425 	 * Sanity check of the context. ddi_periodic_add() cannot be
5426 	 * called in either interrupt context or high interrupt context.
5427 	 */
5428 	if (servicing_interrupt())
5429 		cmn_err(CE_PANIC,
5430 		    "ddi_periodic_add: called in (high) interrupt context.");
5431 
5432 	return ((ddi_periodic_t)i_timeout(func, arg, interval, level));
5433 }
5434 
5435 /*
5436  * void
5437  * ddi_periodic_delete(ddi_periodic_t req)
5438  *
5439  * INTERFACE LEVEL
5440  *     Solaris DDI specific (Solaris DDI)
5441  *
5442  * PARAMETERS
5443  *     req: ddi_periodic_t opaque value ddi_periodic_add(9F) returned
5444  *     previously.
5445  *
5446  * DESCRIPTION
5447  *     ddi_periodic_delete(9F) cancels the ddi_periodic_add(9F) request
5448  *     previously requested.
5449  *
5450  *     ddi_periodic_delete(9F) will not return until the pending request
5451  *     is canceled or executed.
5452  *
5453  *     As well as untimeout(9F), calling ddi_periodic_delete(9F) for a
5454  *     timeout which is either running on another CPU, or has already
5455  *     completed causes no problems. However, unlike untimeout(9F), there is
5456  *     no restrictions on the lock which might be held across the call to
5457  *     ddi_periodic_delete(9F).
5458  *
5459  *     Drivers should be structured with the understanding that the arrival of
5460  *     both an interrupt and a timeout for that interrupt can occasionally
5461  *     occur, in either order.
5462  *
5463  * CONTEXT
5464  *     ddi_periodic_delete(9F) can be called in user or kernel context, but
5465  *     it cannot be called in interrupt context, which is different from
5466  *     untimeout(9F).
5467  */
5468 void
5469 ddi_periodic_delete(ddi_periodic_t req)
5470 {
5471 	/*
5472 	 * Sanity check of the context. ddi_periodic_delete() cannot be
5473 	 * called in either interrupt context or high interrupt context.
5474 	 */
5475 	if (servicing_interrupt())
5476 		cmn_err(CE_PANIC,
5477 		    "ddi_periodic_delete: called in (high) interrupt context.");
5478 
5479 	i_untimeout((timeout_t)req);
5480 }
5481 
5482 dev_info_t *
5483 nodevinfo(dev_t dev, int otyp)
5484 {
5485 	_NOTE(ARGUNUSED(dev, otyp))
5486 	return ((dev_info_t *)0);
5487 }
5488 
5489 /*
5490  * A driver should support its own getinfo(9E) entry point. This function
5491  * is provided as a convenience for ON drivers that don't expect their
5492  * getinfo(9E) entry point to be called. A driver that uses this must not
5493  * call ddi_create_minor_node.
5494  */
5495 int
5496 ddi_no_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
5497 {
5498 	_NOTE(ARGUNUSED(dip, infocmd, arg, result))
5499 	return (DDI_FAILURE);
5500 }
5501 
5502 /*
5503  * A driver should support its own getinfo(9E) entry point. This function
5504  * is provided as a convenience for ON drivers that where the minor number
5505  * is the instance. Drivers that do not have 1:1 mapping must implement
5506  * their own getinfo(9E) function.
5507  */
5508 int
5509 ddi_getinfo_1to1(dev_info_t *dip, ddi_info_cmd_t infocmd,
5510     void *arg, void **result)
5511 {
5512 	_NOTE(ARGUNUSED(dip))
5513 	int	instance;
5514 
5515 	if (infocmd != DDI_INFO_DEVT2INSTANCE)
5516 		return (DDI_FAILURE);
5517 
5518 	instance = getminor((dev_t)(uintptr_t)arg);
5519 	*result = (void *)(uintptr_t)instance;
5520 	return (DDI_SUCCESS);
5521 }
5522 
5523 int
5524 ddifail(dev_info_t *devi, ddi_attach_cmd_t cmd)
5525 {
5526 	_NOTE(ARGUNUSED(devi, cmd))
5527 	return (DDI_FAILURE);
5528 }
5529 
5530 int
5531 ddi_no_dma_map(dev_info_t *dip, dev_info_t *rdip,
5532     struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep)
5533 {
5534 	_NOTE(ARGUNUSED(dip, rdip, dmareqp, handlep))
5535 	return (DDI_DMA_NOMAPPING);
5536 }
5537 
5538 int
5539 ddi_no_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
5540     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
5541 {
5542 	_NOTE(ARGUNUSED(dip, rdip, attr, waitfp, arg, handlep))
5543 	return (DDI_DMA_BADATTR);
5544 }
5545 
5546 int
5547 ddi_no_dma_freehdl(dev_info_t *dip, dev_info_t *rdip,
5548     ddi_dma_handle_t handle)
5549 {
5550 	_NOTE(ARGUNUSED(dip, rdip, handle))
5551 	return (DDI_FAILURE);
5552 }
5553 
5554 int
5555 ddi_no_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
5556     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
5557     ddi_dma_cookie_t *cp, uint_t *ccountp)
5558 {
5559 	_NOTE(ARGUNUSED(dip, rdip, handle, dmareq, cp, ccountp))
5560 	return (DDI_DMA_NOMAPPING);
5561 }
5562 
5563 int
5564 ddi_no_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
5565     ddi_dma_handle_t handle)
5566 {
5567 	_NOTE(ARGUNUSED(dip, rdip, handle))
5568 	return (DDI_FAILURE);
5569 }
5570 
5571 int
5572 ddi_no_dma_flush(dev_info_t *dip, dev_info_t *rdip,
5573     ddi_dma_handle_t handle, off_t off, size_t len,
5574     uint_t cache_flags)
5575 {
5576 	_NOTE(ARGUNUSED(dip, rdip, handle, off, len, cache_flags))
5577 	return (DDI_FAILURE);
5578 }
5579 
5580 int
5581 ddi_no_dma_win(dev_info_t *dip, dev_info_t *rdip,
5582     ddi_dma_handle_t handle, uint_t win, off_t *offp,
5583     size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
5584 {
5585 	_NOTE(ARGUNUSED(dip, rdip, handle, win, offp, lenp, cookiep, ccountp))
5586 	return (DDI_FAILURE);
5587 }
5588 
5589 int
5590 ddi_no_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
5591     ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
5592     off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
5593 {
5594 	_NOTE(ARGUNUSED(dip, rdip, handle, request, offp, lenp, objp, flags))
5595 	return (DDI_FAILURE);
5596 }
5597 
5598 void
5599 ddivoid(void)
5600 {}
5601 
5602 int
5603 nochpoll(dev_t dev, short events, int anyyet, short *reventsp,
5604     struct pollhead **pollhdrp)
5605 {
5606 	_NOTE(ARGUNUSED(dev, events, anyyet, reventsp, pollhdrp))
5607 	return (ENXIO);
5608 }
5609 
5610 cred_t *
5611 ddi_get_cred(void)
5612 {
5613 	return (CRED());
5614 }
5615 
5616 clock_t
5617 ddi_get_lbolt(void)
5618 {
5619 	return (lbolt);
5620 }
5621 
5622 time_t
5623 ddi_get_time(void)
5624 {
5625 	time_t	now;
5626 
5627 	if ((now = gethrestime_sec()) == 0) {
5628 		timestruc_t ts;
5629 		mutex_enter(&tod_lock);
5630 		ts = tod_get();
5631 		mutex_exit(&tod_lock);
5632 		return (ts.tv_sec);
5633 	} else {
5634 		return (now);
5635 	}
5636 }
5637 
5638 pid_t
5639 ddi_get_pid(void)
5640 {
5641 	return (ttoproc(curthread)->p_pid);
5642 }
5643 
5644 kt_did_t
5645 ddi_get_kt_did(void)
5646 {
5647 	return (curthread->t_did);
5648 }
5649 
5650 /*
5651  * This function returns B_TRUE if the caller can reasonably expect that a call
5652  * to cv_wait_sig(9F), cv_timedwait_sig(9F), or qwait_sig(9F) could be awakened
5653  * by user-level signal.  If it returns B_FALSE, then the caller should use
5654  * other means to make certain that the wait will not hang "forever."
5655  *
5656  * It does not check the signal mask, nor for reception of any particular
5657  * signal.
5658  *
5659  * Currently, a thread can receive a signal if it's not a kernel thread and it
5660  * is not in the middle of exit(2) tear-down.  Threads that are in that
5661  * tear-down effectively convert cv_wait_sig to cv_wait, cv_timedwait_sig to
5662  * cv_timedwait, and qwait_sig to qwait.
5663  */
5664 boolean_t
5665 ddi_can_receive_sig(void)
5666 {
5667 	proc_t *pp;
5668 
5669 	if (curthread->t_proc_flag & TP_LWPEXIT)
5670 		return (B_FALSE);
5671 	if ((pp = ttoproc(curthread)) == NULL)
5672 		return (B_FALSE);
5673 	return (pp->p_as != &kas);
5674 }
5675 
5676 /*
5677  * Swap bytes in 16-bit [half-]words
5678  */
5679 void
5680 swab(void *src, void *dst, size_t nbytes)
5681 {
5682 	uchar_t *pf = (uchar_t *)src;
5683 	uchar_t *pt = (uchar_t *)dst;
5684 	uchar_t tmp;
5685 	int nshorts;
5686 
5687 	nshorts = nbytes >> 1;
5688 
5689 	while (--nshorts >= 0) {
5690 		tmp = *pf++;
5691 		*pt++ = *pf++;
5692 		*pt++ = tmp;
5693 	}
5694 }
5695 
5696 static void
5697 ddi_append_minor_node(dev_info_t *ddip, struct ddi_minor_data *dmdp)
5698 {
5699 	int			circ;
5700 	struct ddi_minor_data	*dp;
5701 
5702 	ndi_devi_enter(ddip, &circ);
5703 	if ((dp = DEVI(ddip)->devi_minor) == (struct ddi_minor_data *)NULL) {
5704 		DEVI(ddip)->devi_minor = dmdp;
5705 	} else {
5706 		while (dp->next != (struct ddi_minor_data *)NULL)
5707 			dp = dp->next;
5708 		dp->next = dmdp;
5709 	}
5710 	ndi_devi_exit(ddip, circ);
5711 }
5712 
5713 /*
5714  * Part of the obsolete SunCluster DDI Hooks.
5715  * Keep for binary compatibility
5716  */
5717 minor_t
5718 ddi_getiminor(dev_t dev)
5719 {
5720 	return (getminor(dev));
5721 }
5722 
5723 static int
5724 i_log_devfs_minor_create(dev_info_t *dip, char *minor_name)
5725 {
5726 	int se_flag;
5727 	int kmem_flag;
5728 	int se_err;
5729 	char *pathname, *class_name;
5730 	sysevent_t *ev = NULL;
5731 	sysevent_id_t eid;
5732 	sysevent_value_t se_val;
5733 	sysevent_attr_list_t *ev_attr_list = NULL;
5734 
5735 	/* determine interrupt context */
5736 	se_flag = (servicing_interrupt()) ? SE_NOSLEEP : SE_SLEEP;
5737 	kmem_flag = (se_flag == SE_SLEEP) ? KM_SLEEP : KM_NOSLEEP;
5738 
5739 	i_ddi_di_cache_invalidate(kmem_flag);
5740 
5741 #ifdef DEBUG
5742 	if ((se_flag == SE_NOSLEEP) && sunddi_debug) {
5743 		cmn_err(CE_CONT, "ddi_create_minor_node: called from "
5744 		    "interrupt level by driver %s",
5745 		    ddi_driver_name(dip));
5746 	}
5747 #endif /* DEBUG */
5748 
5749 	ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_CREATE, EP_DDI, se_flag);
5750 	if (ev == NULL) {
5751 		goto fail;
5752 	}
5753 
5754 	pathname = kmem_alloc(MAXPATHLEN, kmem_flag);
5755 	if (pathname == NULL) {
5756 		sysevent_free(ev);
5757 		goto fail;
5758 	}
5759 
5760 	(void) ddi_pathname(dip, pathname);
5761 	ASSERT(strlen(pathname));
5762 	se_val.value_type = SE_DATA_TYPE_STRING;
5763 	se_val.value.sv_string = pathname;
5764 	if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME,
5765 	    &se_val, se_flag) != 0) {
5766 		kmem_free(pathname, MAXPATHLEN);
5767 		sysevent_free(ev);
5768 		goto fail;
5769 	}
5770 	kmem_free(pathname, MAXPATHLEN);
5771 
5772 	/* add the device class attribute */
5773 	if ((class_name = i_ddi_devi_class(dip)) != NULL) {
5774 		se_val.value_type = SE_DATA_TYPE_STRING;
5775 		se_val.value.sv_string = class_name;
5776 		if (sysevent_add_attr(&ev_attr_list,
5777 		    DEVFS_DEVI_CLASS, &se_val, SE_SLEEP) != 0) {
5778 			sysevent_free_attr(ev_attr_list);
5779 			goto fail;
5780 		}
5781 	}
5782 
5783 	/*
5784 	 * allow for NULL minor names
5785 	 */
5786 	if (minor_name != NULL) {
5787 		se_val.value.sv_string = minor_name;
5788 		if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME,
5789 		    &se_val, se_flag) != 0) {
5790 			sysevent_free_attr(ev_attr_list);
5791 			sysevent_free(ev);
5792 			goto fail;
5793 		}
5794 	}
5795 
5796 	if (sysevent_attach_attributes(ev, ev_attr_list) != 0) {
5797 		sysevent_free_attr(ev_attr_list);
5798 		sysevent_free(ev);
5799 		goto fail;
5800 	}
5801 
5802 	if ((se_err = log_sysevent(ev, se_flag, &eid)) != 0) {
5803 		if (se_err == SE_NO_TRANSPORT) {
5804 			cmn_err(CE_WARN, "/devices or /dev may not be current "
5805 			    "for driver %s (%s). Run devfsadm -i %s",
5806 			    ddi_driver_name(dip), "syseventd not responding",
5807 			    ddi_driver_name(dip));
5808 		} else {
5809 			sysevent_free(ev);
5810 			goto fail;
5811 		}
5812 	}
5813 
5814 	sysevent_free(ev);
5815 	return (DDI_SUCCESS);
5816 fail:
5817 	cmn_err(CE_WARN, "/devices or /dev may not be current "
5818 	    "for driver %s. Run devfsadm -i %s",
5819 	    ddi_driver_name(dip), ddi_driver_name(dip));
5820 	return (DDI_SUCCESS);
5821 }
5822 
5823 /*
5824  * failing to remove a minor node is not of interest
5825  * therefore we do not generate an error message
5826  */
5827 static int
5828 i_log_devfs_minor_remove(dev_info_t *dip, char *minor_name)
5829 {
5830 	char *pathname, *class_name;
5831 	sysevent_t *ev;
5832 	sysevent_id_t eid;
5833 	sysevent_value_t se_val;
5834 	sysevent_attr_list_t *ev_attr_list = NULL;
5835 
5836 	/*
5837 	 * only log ddi_remove_minor_node() calls outside the scope
5838 	 * of attach/detach reconfigurations and when the dip is
5839 	 * still initialized.
5840 	 */
5841 	if (DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip) ||
5842 	    (i_ddi_node_state(dip) < DS_INITIALIZED)) {
5843 		return (DDI_SUCCESS);
5844 	}
5845 
5846 	i_ddi_di_cache_invalidate(KM_SLEEP);
5847 
5848 	ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_REMOVE, EP_DDI, SE_SLEEP);
5849 	if (ev == NULL) {
5850 		return (DDI_SUCCESS);
5851 	}
5852 
5853 	pathname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
5854 	if (pathname == NULL) {
5855 		sysevent_free(ev);
5856 		return (DDI_SUCCESS);
5857 	}
5858 
5859 	(void) ddi_pathname(dip, pathname);
5860 	ASSERT(strlen(pathname));
5861 	se_val.value_type = SE_DATA_TYPE_STRING;
5862 	se_val.value.sv_string = pathname;
5863 	if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME,
5864 	    &se_val, SE_SLEEP) != 0) {
5865 		kmem_free(pathname, MAXPATHLEN);
5866 		sysevent_free(ev);
5867 		return (DDI_SUCCESS);
5868 	}
5869 
5870 	kmem_free(pathname, MAXPATHLEN);
5871 
5872 	/*
5873 	 * allow for NULL minor names
5874 	 */
5875 	if (minor_name != NULL) {
5876 		se_val.value.sv_string = minor_name;
5877 		if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME,
5878 		    &se_val, SE_SLEEP) != 0) {
5879 			sysevent_free_attr(ev_attr_list);
5880 			goto fail;
5881 		}
5882 	}
5883 
5884 	if ((class_name = i_ddi_devi_class(dip)) != NULL) {
5885 		/* add the device class, driver name and instance attributes */
5886 
5887 		se_val.value_type = SE_DATA_TYPE_STRING;
5888 		se_val.value.sv_string = class_name;
5889 		if (sysevent_add_attr(&ev_attr_list,
5890 		    DEVFS_DEVI_CLASS, &se_val, SE_SLEEP) != 0) {
5891 			sysevent_free_attr(ev_attr_list);
5892 			goto fail;
5893 		}
5894 
5895 		se_val.value_type = SE_DATA_TYPE_STRING;
5896 		se_val.value.sv_string = (char *)ddi_driver_name(dip);
5897 		if (sysevent_add_attr(&ev_attr_list,
5898 		    DEVFS_DRIVER_NAME, &se_val, SE_SLEEP) != 0) {
5899 			sysevent_free_attr(ev_attr_list);
5900 			goto fail;
5901 		}
5902 
5903 		se_val.value_type = SE_DATA_TYPE_INT32;
5904 		se_val.value.sv_int32 = ddi_get_instance(dip);
5905 		if (sysevent_add_attr(&ev_attr_list,
5906 		    DEVFS_INSTANCE, &se_val, SE_SLEEP) != 0) {
5907 			sysevent_free_attr(ev_attr_list);
5908 			goto fail;
5909 		}
5910 
5911 	}
5912 
5913 	if (sysevent_attach_attributes(ev, ev_attr_list) != 0) {
5914 		sysevent_free_attr(ev_attr_list);
5915 	} else {
5916 		(void) log_sysevent(ev, SE_SLEEP, &eid);
5917 	}
5918 fail:
5919 	sysevent_free(ev);
5920 	return (DDI_SUCCESS);
5921 }
5922 
5923 /*
5924  * Derive the device class of the node.
5925  * Device class names aren't defined yet. Until this is done we use
5926  * devfs event subclass names as device class names.
5927  */
5928 static int
5929 derive_devi_class(dev_info_t *dip, char *node_type, int flag)
5930 {
5931 	int rv = DDI_SUCCESS;
5932 
5933 	if (i_ddi_devi_class(dip) == NULL) {
5934 		if (strncmp(node_type, DDI_NT_BLOCK,
5935 		    sizeof (DDI_NT_BLOCK) - 1) == 0 &&
5936 		    (node_type[sizeof (DDI_NT_BLOCK) - 1] == '\0' ||
5937 		    node_type[sizeof (DDI_NT_BLOCK) - 1] == ':') &&
5938 		    strcmp(node_type, DDI_NT_FD) != 0) {
5939 
5940 			rv = i_ddi_set_devi_class(dip, ESC_DISK, flag);
5941 
5942 		} else if (strncmp(node_type, DDI_NT_NET,
5943 		    sizeof (DDI_NT_NET) - 1) == 0 &&
5944 		    (node_type[sizeof (DDI_NT_NET) - 1] == '\0' ||
5945 		    node_type[sizeof (DDI_NT_NET) - 1] == ':')) {
5946 
5947 			rv = i_ddi_set_devi_class(dip, ESC_NETWORK, flag);
5948 
5949 		} else if (strncmp(node_type, DDI_NT_PRINTER,
5950 		    sizeof (DDI_NT_PRINTER) - 1) == 0 &&
5951 		    (node_type[sizeof (DDI_NT_PRINTER) - 1] == '\0' ||
5952 		    node_type[sizeof (DDI_NT_PRINTER) - 1] == ':')) {
5953 
5954 			rv = i_ddi_set_devi_class(dip, ESC_PRINTER, flag);
5955 
5956 		} else if (strncmp(node_type, DDI_PSEUDO,
5957 		    sizeof (DDI_PSEUDO) -1) == 0 &&
5958 		    (strncmp(ESC_LOFI, ddi_node_name(dip),
5959 		    sizeof (ESC_LOFI) -1) == 0)) {
5960 			rv = i_ddi_set_devi_class(dip, ESC_LOFI, flag);
5961 		}
5962 	}
5963 
5964 	return (rv);
5965 }
5966 
5967 /*
5968  * Check compliance with PSARC 2003/375:
5969  *
5970  * The name must contain only characters a-z, A-Z, 0-9 or _ and it must not
5971  * exceed IFNAMSIZ (16) characters in length.
5972  */
5973 static boolean_t
5974 verify_name(char *name)
5975 {
5976 	size_t	len = strlen(name);
5977 	char	*cp;
5978 
5979 	if (len == 0 || len > IFNAMSIZ)
5980 		return (B_FALSE);
5981 
5982 	for (cp = name; *cp != '\0'; cp++) {
5983 		if (!isalnum(*cp) && *cp != '_')
5984 			return (B_FALSE);
5985 	}
5986 
5987 	return (B_TRUE);
5988 }
5989 
5990 /*
5991  * ddi_create_minor_common:	Create a  ddi_minor_data structure and
5992  *				attach it to the given devinfo node.
5993  */
5994 
5995 int
5996 ddi_create_minor_common(dev_info_t *dip, char *name, int spec_type,
5997     minor_t minor_num, char *node_type, int flag, ddi_minor_type mtype,
5998     const char *read_priv, const char *write_priv, mode_t priv_mode)
5999 {
6000 	struct ddi_minor_data *dmdp;
6001 	major_t major;
6002 
6003 	if (spec_type != S_IFCHR && spec_type != S_IFBLK)
6004 		return (DDI_FAILURE);
6005 
6006 	if (name == NULL)
6007 		return (DDI_FAILURE);
6008 
6009 	/*
6010 	 * Log a message if the minor number the driver is creating
6011 	 * is not expressible on the on-disk filesystem (currently
6012 	 * this is limited to 18 bits both by UFS). The device can
6013 	 * be opened via devfs, but not by device special files created
6014 	 * via mknod().
6015 	 */
6016 	if (minor_num > L_MAXMIN32) {
6017 		cmn_err(CE_WARN,
6018 		    "%s%d:%s minor 0x%x too big for 32-bit applications",
6019 		    ddi_driver_name(dip), ddi_get_instance(dip),
6020 		    name, minor_num);
6021 		return (DDI_FAILURE);
6022 	}
6023 
6024 	/* dip must be bound and attached */
6025 	major = ddi_driver_major(dip);
6026 	ASSERT(major != DDI_MAJOR_T_NONE);
6027 
6028 	/*
6029 	 * Default node_type to DDI_PSEUDO and issue notice in debug mode
6030 	 */
6031 	if (node_type == NULL) {
6032 		node_type = DDI_PSEUDO;
6033 		NDI_CONFIG_DEBUG((CE_NOTE, "!illegal node_type NULL for %s%d "
6034 		    " minor node %s; default to DDI_PSEUDO",
6035 		    ddi_driver_name(dip), ddi_get_instance(dip), name));
6036 	}
6037 
6038 	/*
6039 	 * If the driver is a network driver, ensure that the name falls within
6040 	 * the interface naming constraints specified by PSARC/2003/375.
6041 	 */
6042 	if (strcmp(node_type, DDI_NT_NET) == 0) {
6043 		if (!verify_name(name))
6044 			return (DDI_FAILURE);
6045 
6046 		if (mtype == DDM_MINOR) {
6047 			struct devnames *dnp = &devnamesp[major];
6048 
6049 			/* Mark driver as a network driver */
6050 			LOCK_DEV_OPS(&dnp->dn_lock);
6051 			dnp->dn_flags |= DN_NETWORK_DRIVER;
6052 			UNLOCK_DEV_OPS(&dnp->dn_lock);
6053 		}
6054 	}
6055 
6056 	if (mtype == DDM_MINOR) {
6057 		if (derive_devi_class(dip,  node_type, KM_NOSLEEP) !=
6058 		    DDI_SUCCESS)
6059 			return (DDI_FAILURE);
6060 	}
6061 
6062 	/*
6063 	 * Take care of minor number information for the node.
6064 	 */
6065 
6066 	if ((dmdp = kmem_zalloc(sizeof (struct ddi_minor_data),
6067 	    KM_NOSLEEP)) == NULL) {
6068 		return (DDI_FAILURE);
6069 	}
6070 	if ((dmdp->ddm_name = i_ddi_strdup(name, KM_NOSLEEP)) == NULL) {
6071 		kmem_free(dmdp, sizeof (struct ddi_minor_data));
6072 		return (DDI_FAILURE);
6073 	}
6074 	dmdp->dip = dip;
6075 	dmdp->ddm_dev = makedevice(major, minor_num);
6076 	dmdp->ddm_spec_type = spec_type;
6077 	dmdp->ddm_node_type = node_type;
6078 	dmdp->type = mtype;
6079 	if (flag & CLONE_DEV) {
6080 		dmdp->type = DDM_ALIAS;
6081 		dmdp->ddm_dev = makedevice(ddi_driver_major(clone_dip), major);
6082 	}
6083 	if (flag & PRIVONLY_DEV) {
6084 		dmdp->ddm_flags |= DM_NO_FSPERM;
6085 	}
6086 	if (read_priv || write_priv) {
6087 		dmdp->ddm_node_priv =
6088 		    devpolicy_priv_by_name(read_priv, write_priv);
6089 	}
6090 	dmdp->ddm_priv_mode = priv_mode;
6091 
6092 	ddi_append_minor_node(dip, dmdp);
6093 
6094 	/*
6095 	 * only log ddi_create_minor_node() calls which occur
6096 	 * outside the scope of attach(9e)/detach(9e) reconfigurations
6097 	 */
6098 	if (!(DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip)) &&
6099 	    mtype != DDM_INTERNAL_PATH) {
6100 		(void) i_log_devfs_minor_create(dip, name);
6101 	}
6102 
6103 	/*
6104 	 * Check if any dacf rules match the creation of this minor node
6105 	 */
6106 	dacfc_match_create_minor(name, node_type, dip, dmdp, flag);
6107 	return (DDI_SUCCESS);
6108 }
6109 
6110 int
6111 ddi_create_minor_node(dev_info_t *dip, char *name, int spec_type,
6112     minor_t minor_num, char *node_type, int flag)
6113 {
6114 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
6115 	    node_type, flag, DDM_MINOR, NULL, NULL, 0));
6116 }
6117 
6118 int
6119 ddi_create_priv_minor_node(dev_info_t *dip, char *name, int spec_type,
6120     minor_t minor_num, char *node_type, int flag,
6121     const char *rdpriv, const char *wrpriv, mode_t priv_mode)
6122 {
6123 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
6124 	    node_type, flag, DDM_MINOR, rdpriv, wrpriv, priv_mode));
6125 }
6126 
6127 int
6128 ddi_create_default_minor_node(dev_info_t *dip, char *name, int spec_type,
6129     minor_t minor_num, char *node_type, int flag)
6130 {
6131 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
6132 	    node_type, flag, DDM_DEFAULT, NULL, NULL, 0));
6133 }
6134 
6135 /*
6136  * Internal (non-ddi) routine for drivers to export names known
6137  * to the kernel (especially ddi_pathname_to_dev_t and friends)
6138  * but not exported externally to /dev
6139  */
6140 int
6141 ddi_create_internal_pathname(dev_info_t *dip, char *name, int spec_type,
6142     minor_t minor_num)
6143 {
6144 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
6145 	    "internal", 0, DDM_INTERNAL_PATH, NULL, NULL, 0));
6146 }
6147 
6148 void
6149 ddi_remove_minor_node(dev_info_t *dip, char *name)
6150 {
6151 	int			circ;
6152 	struct ddi_minor_data	*dmdp, *dmdp1;
6153 	struct ddi_minor_data	**dmdp_prev;
6154 
6155 	ndi_devi_enter(dip, &circ);
6156 	dmdp_prev = &DEVI(dip)->devi_minor;
6157 	dmdp = DEVI(dip)->devi_minor;
6158 	while (dmdp != NULL) {
6159 		dmdp1 = dmdp->next;
6160 		if ((name == NULL || (dmdp->ddm_name != NULL &&
6161 		    strcmp(name, dmdp->ddm_name) == 0))) {
6162 			if (dmdp->ddm_name != NULL) {
6163 				if (dmdp->type != DDM_INTERNAL_PATH)
6164 					(void) i_log_devfs_minor_remove(dip,
6165 					    dmdp->ddm_name);
6166 				kmem_free(dmdp->ddm_name,
6167 				    strlen(dmdp->ddm_name) + 1);
6168 			}
6169 			/*
6170 			 * Release device privilege, if any.
6171 			 * Release dacf client data associated with this minor
6172 			 * node by storing NULL.
6173 			 */
6174 			if (dmdp->ddm_node_priv)
6175 				dpfree(dmdp->ddm_node_priv);
6176 			dacf_store_info((dacf_infohdl_t)dmdp, NULL);
6177 			kmem_free(dmdp, sizeof (struct ddi_minor_data));
6178 			*dmdp_prev = dmdp1;
6179 			/*
6180 			 * OK, we found it, so get out now -- if we drive on,
6181 			 * we will strcmp against garbage.  See 1139209.
6182 			 */
6183 			if (name != NULL)
6184 				break;
6185 		} else {
6186 			dmdp_prev = &dmdp->next;
6187 		}
6188 		dmdp = dmdp1;
6189 	}
6190 	ndi_devi_exit(dip, circ);
6191 }
6192 
6193 
6194 int
6195 ddi_in_panic()
6196 {
6197 	return (panicstr != NULL);
6198 }
6199 
6200 
6201 /*
6202  * Find first bit set in a mask (returned counting from 1 up)
6203  */
6204 
6205 int
6206 ddi_ffs(long mask)
6207 {
6208 	return (ffs(mask));
6209 }
6210 
6211 /*
6212  * Find last bit set. Take mask and clear
6213  * all but the most significant bit, and
6214  * then let ffs do the rest of the work.
6215  *
6216  * Algorithm courtesy of Steve Chessin.
6217  */
6218 
6219 int
6220 ddi_fls(long mask)
6221 {
6222 	while (mask) {
6223 		long nx;
6224 
6225 		if ((nx = (mask & (mask - 1))) == 0)
6226 			break;
6227 		mask = nx;
6228 	}
6229 	return (ffs(mask));
6230 }
6231 
6232 /*
6233  * The next five routines comprise generic storage management utilities
6234  * for driver soft state structures (in "the old days," this was done
6235  * with a statically sized array - big systems and dynamic loading
6236  * and unloading make heap allocation more attractive)
6237  */
6238 
6239 /*
6240  * Allocate a set of pointers to 'n_items' objects of size 'size'
6241  * bytes.  Each pointer is initialized to nil.
6242  *
6243  * The 'size' and 'n_items' values are stashed in the opaque
6244  * handle returned to the caller.
6245  *
6246  * This implementation interprets 'set of pointers' to mean 'array
6247  * of pointers' but note that nothing in the interface definition
6248  * precludes an implementation that uses, for example, a linked list.
6249  * However there should be a small efficiency gain from using an array
6250  * at lookup time.
6251  *
6252  * NOTE	As an optimization, we make our growable array allocations in
6253  *	powers of two (bytes), since that's how much kmem_alloc (currently)
6254  *	gives us anyway.  It should save us some free/realloc's ..
6255  *
6256  *	As a further optimization, we make the growable array start out
6257  *	with MIN_N_ITEMS in it.
6258  */
6259 
6260 #define	MIN_N_ITEMS	8	/* 8 void *'s == 32 bytes */
6261 
6262 int
6263 ddi_soft_state_init(void **state_p, size_t size, size_t n_items)
6264 {
6265 	struct i_ddi_soft_state *ss;
6266 
6267 	if (state_p == NULL || *state_p != NULL || size == 0)
6268 		return (EINVAL);
6269 
6270 	ss = kmem_zalloc(sizeof (*ss), KM_SLEEP);
6271 	mutex_init(&ss->lock, NULL, MUTEX_DRIVER, NULL);
6272 	ss->size = size;
6273 
6274 	if (n_items < MIN_N_ITEMS)
6275 		ss->n_items = MIN_N_ITEMS;
6276 	else {
6277 		int bitlog;
6278 
6279 		if ((bitlog = ddi_fls(n_items)) == ddi_ffs(n_items))
6280 			bitlog--;
6281 		ss->n_items = 1 << bitlog;
6282 	}
6283 
6284 	ASSERT(ss->n_items >= n_items);
6285 
6286 	ss->array = kmem_zalloc(ss->n_items * sizeof (void *), KM_SLEEP);
6287 
6288 	*state_p = ss;
6289 
6290 	return (0);
6291 }
6292 
6293 
6294 /*
6295  * Allocate a state structure of size 'size' to be associated
6296  * with item 'item'.
6297  *
6298  * In this implementation, the array is extended to
6299  * allow the requested offset, if needed.
6300  */
6301 int
6302 ddi_soft_state_zalloc(void *state, int item)
6303 {
6304 	struct i_ddi_soft_state *ss;
6305 	void **array;
6306 	void *new_element;
6307 
6308 	if ((ss = state) == NULL || item < 0)
6309 		return (DDI_FAILURE);
6310 
6311 	mutex_enter(&ss->lock);
6312 	if (ss->size == 0) {
6313 		mutex_exit(&ss->lock);
6314 		cmn_err(CE_WARN, "ddi_soft_state_zalloc: bad handle: %s",
6315 		    mod_containing_pc(caller()));
6316 		return (DDI_FAILURE);
6317 	}
6318 
6319 	array = ss->array;	/* NULL if ss->n_items == 0 */
6320 	ASSERT(ss->n_items != 0 && array != NULL);
6321 
6322 	/*
6323 	 * refuse to tread on an existing element
6324 	 */
6325 	if (item < ss->n_items && array[item] != NULL) {
6326 		mutex_exit(&ss->lock);
6327 		return (DDI_FAILURE);
6328 	}
6329 
6330 	/*
6331 	 * Allocate a new element to plug in
6332 	 */
6333 	new_element = kmem_zalloc(ss->size, KM_SLEEP);
6334 
6335 	/*
6336 	 * Check if the array is big enough, if not, grow it.
6337 	 */
6338 	if (item >= ss->n_items) {
6339 		void	**new_array;
6340 		size_t	new_n_items;
6341 		struct i_ddi_soft_state *dirty;
6342 
6343 		/*
6344 		 * Allocate a new array of the right length, copy
6345 		 * all the old pointers to the new array, then
6346 		 * if it exists at all, put the old array on the
6347 		 * dirty list.
6348 		 *
6349 		 * Note that we can't kmem_free() the old array.
6350 		 *
6351 		 * Why -- well the 'get' operation is 'mutex-free', so we
6352 		 * can't easily catch a suspended thread that is just about
6353 		 * to dereference the array we just grew out of.  So we
6354 		 * cons up a header and put it on a list of 'dirty'
6355 		 * pointer arrays.  (Dirty in the sense that there may
6356 		 * be suspended threads somewhere that are in the middle
6357 		 * of referencing them).  Fortunately, we -can- garbage
6358 		 * collect it all at ddi_soft_state_fini time.
6359 		 */
6360 		new_n_items = ss->n_items;
6361 		while (new_n_items < (1 + item))
6362 			new_n_items <<= 1;	/* double array size .. */
6363 
6364 		ASSERT(new_n_items >= (1 + item));	/* sanity check! */
6365 
6366 		new_array = kmem_zalloc(new_n_items * sizeof (void *),
6367 		    KM_SLEEP);
6368 		/*
6369 		 * Copy the pointers into the new array
6370 		 */
6371 		bcopy(array, new_array, ss->n_items * sizeof (void *));
6372 
6373 		/*
6374 		 * Save the old array on the dirty list
6375 		 */
6376 		dirty = kmem_zalloc(sizeof (*dirty), KM_SLEEP);
6377 		dirty->array = ss->array;
6378 		dirty->n_items = ss->n_items;
6379 		dirty->next = ss->next;
6380 		ss->next = dirty;
6381 
6382 		ss->array = (array = new_array);
6383 		ss->n_items = new_n_items;
6384 	}
6385 
6386 	ASSERT(array != NULL && item < ss->n_items && array[item] == NULL);
6387 
6388 	array[item] = new_element;
6389 
6390 	mutex_exit(&ss->lock);
6391 	return (DDI_SUCCESS);
6392 }
6393 
6394 
6395 /*
6396  * Fetch a pointer to the allocated soft state structure.
6397  *
6398  * This is designed to be cheap.
6399  *
6400  * There's an argument that there should be more checking for
6401  * nil pointers and out of bounds on the array.. but we do a lot
6402  * of that in the alloc/free routines.
6403  *
6404  * An array has the convenience that we don't need to lock read-access
6405  * to it c.f. a linked list.  However our "expanding array" strategy
6406  * means that we should hold a readers lock on the i_ddi_soft_state
6407  * structure.
6408  *
6409  * However, from a performance viewpoint, we need to do it without
6410  * any locks at all -- this also makes it a leaf routine.  The algorithm
6411  * is 'lock-free' because we only discard the pointer arrays at
6412  * ddi_soft_state_fini() time.
6413  */
6414 void *
6415 ddi_get_soft_state(void *state, int item)
6416 {
6417 	struct i_ddi_soft_state *ss = state;
6418 
6419 	ASSERT(ss != NULL && item >= 0);
6420 
6421 	if (item < ss->n_items && ss->array != NULL)
6422 		return (ss->array[item]);
6423 	return (NULL);
6424 }
6425 
6426 /*
6427  * Free the state structure corresponding to 'item.'   Freeing an
6428  * element that has either gone or was never allocated is not
6429  * considered an error.  Note that we free the state structure, but
6430  * we don't shrink our pointer array, or discard 'dirty' arrays,
6431  * since even a few pointers don't really waste too much memory.
6432  *
6433  * Passing an item number that is out of bounds, or a null pointer will
6434  * provoke an error message.
6435  */
6436 void
6437 ddi_soft_state_free(void *state, int item)
6438 {
6439 	struct i_ddi_soft_state *ss;
6440 	void **array;
6441 	void *element;
6442 	static char msg[] = "ddi_soft_state_free:";
6443 
6444 	if ((ss = state) == NULL) {
6445 		cmn_err(CE_WARN, "%s null handle: %s",
6446 		    msg, mod_containing_pc(caller()));
6447 		return;
6448 	}
6449 
6450 	element = NULL;
6451 
6452 	mutex_enter(&ss->lock);
6453 
6454 	if ((array = ss->array) == NULL || ss->size == 0) {
6455 		cmn_err(CE_WARN, "%s bad handle: %s",
6456 		    msg, mod_containing_pc(caller()));
6457 	} else if (item < 0 || item >= ss->n_items) {
6458 		cmn_err(CE_WARN, "%s item %d not in range [0..%lu]: %s",
6459 		    msg, item, ss->n_items - 1, mod_containing_pc(caller()));
6460 	} else if (array[item] != NULL) {
6461 		element = array[item];
6462 		array[item] = NULL;
6463 	}
6464 
6465 	mutex_exit(&ss->lock);
6466 
6467 	if (element)
6468 		kmem_free(element, ss->size);
6469 }
6470 
6471 
6472 /*
6473  * Free the entire set of pointers, and any
6474  * soft state structures contained therein.
6475  *
6476  * Note that we don't grab the ss->lock mutex, even though
6477  * we're inspecting the various fields of the data structure.
6478  *
6479  * There is an implicit assumption that this routine will
6480  * never run concurrently with any of the above on this
6481  * particular state structure i.e. by the time the driver
6482  * calls this routine, there should be no other threads
6483  * running in the driver.
6484  */
6485 void
6486 ddi_soft_state_fini(void **state_p)
6487 {
6488 	struct i_ddi_soft_state *ss, *dirty;
6489 	int item;
6490 	static char msg[] = "ddi_soft_state_fini:";
6491 
6492 	if (state_p == NULL || (ss = *state_p) == NULL) {
6493 		cmn_err(CE_WARN, "%s null handle: %s",
6494 		    msg, mod_containing_pc(caller()));
6495 		return;
6496 	}
6497 
6498 	if (ss->size == 0) {
6499 		cmn_err(CE_WARN, "%s bad handle: %s",
6500 		    msg, mod_containing_pc(caller()));
6501 		return;
6502 	}
6503 
6504 	if (ss->n_items > 0) {
6505 		for (item = 0; item < ss->n_items; item++)
6506 			ddi_soft_state_free(ss, item);
6507 		kmem_free(ss->array, ss->n_items * sizeof (void *));
6508 	}
6509 
6510 	/*
6511 	 * Now delete any dirty arrays from previous 'grow' operations
6512 	 */
6513 	for (dirty = ss->next; dirty; dirty = ss->next) {
6514 		ss->next = dirty->next;
6515 		kmem_free(dirty->array, dirty->n_items * sizeof (void *));
6516 		kmem_free(dirty, sizeof (*dirty));
6517 	}
6518 
6519 	mutex_destroy(&ss->lock);
6520 	kmem_free(ss, sizeof (*ss));
6521 
6522 	*state_p = NULL;
6523 }
6524 
6525 /*
6526  * This sets the devi_addr entry in the dev_info structure 'dip' to 'name'.
6527  * Storage is double buffered to prevent updates during devi_addr use -
6528  * double buffering is adaquate for reliable ddi_deviname() consumption.
6529  * The double buffer is not freed until dev_info structure destruction
6530  * (by i_ddi_free_node).
6531  */
6532 void
6533 ddi_set_name_addr(dev_info_t *dip, char *name)
6534 {
6535 	char	*buf = DEVI(dip)->devi_addr_buf;
6536 	char	*newaddr;
6537 
6538 	if (buf == NULL) {
6539 		buf = kmem_zalloc(2 * MAXNAMELEN, KM_SLEEP);
6540 		DEVI(dip)->devi_addr_buf = buf;
6541 	}
6542 
6543 	if (name) {
6544 		ASSERT(strlen(name) < MAXNAMELEN);
6545 		newaddr = (DEVI(dip)->devi_addr == buf) ?
6546 		    (buf + MAXNAMELEN) : buf;
6547 		(void) strlcpy(newaddr, name, MAXNAMELEN);
6548 	} else
6549 		newaddr = NULL;
6550 
6551 	DEVI(dip)->devi_addr = newaddr;
6552 }
6553 
6554 char *
6555 ddi_get_name_addr(dev_info_t *dip)
6556 {
6557 	return (DEVI(dip)->devi_addr);
6558 }
6559 
6560 void
6561 ddi_set_parent_data(dev_info_t *dip, void *pd)
6562 {
6563 	DEVI(dip)->devi_parent_data = pd;
6564 }
6565 
6566 void *
6567 ddi_get_parent_data(dev_info_t *dip)
6568 {
6569 	return (DEVI(dip)->devi_parent_data);
6570 }
6571 
6572 /*
6573  * ddi_name_to_major: Returns the major number of a module given its name.
6574  */
6575 major_t
6576 ddi_name_to_major(char *name)
6577 {
6578 	return (mod_name_to_major(name));
6579 }
6580 
6581 /*
6582  * ddi_major_to_name: Returns the module name bound to a major number.
6583  */
6584 char *
6585 ddi_major_to_name(major_t major)
6586 {
6587 	return (mod_major_to_name(major));
6588 }
6589 
6590 /*
6591  * Return the name of the devinfo node pointed at by 'dip' in the buffer
6592  * pointed at by 'name.'  A devinfo node is named as a result of calling
6593  * ddi_initchild().
6594  *
6595  * Note: the driver must be held before calling this function!
6596  */
6597 char *
6598 ddi_deviname(dev_info_t *dip, char *name)
6599 {
6600 	char *addrname;
6601 	char none = '\0';
6602 
6603 	if (dip == ddi_root_node()) {
6604 		*name = '\0';
6605 		return (name);
6606 	}
6607 
6608 	if (i_ddi_node_state(dip) < DS_BOUND) {
6609 		addrname = &none;
6610 	} else {
6611 		/*
6612 		 * Use ddi_get_name_addr() without checking state so we get
6613 		 * a unit-address if we are called after ddi_set_name_addr()
6614 		 * by nexus DDI_CTL_INITCHILD code, but before completing
6615 		 * node promotion to DS_INITIALIZED.  We currently have
6616 		 * two situations where we are called in this state:
6617 		 *   o  For framework processing of a path-oriented alias.
6618 		 *   o  If a SCSA nexus driver calls ddi_devid_register()
6619 		 *	from it's tran_tgt_init(9E) implementation.
6620 		 */
6621 		addrname = ddi_get_name_addr(dip);
6622 		if (addrname == NULL)
6623 			addrname = &none;
6624 	}
6625 
6626 	if (*addrname == '\0') {
6627 		(void) sprintf(name, "/%s", ddi_node_name(dip));
6628 	} else {
6629 		(void) sprintf(name, "/%s@%s", ddi_node_name(dip), addrname);
6630 	}
6631 
6632 	return (name);
6633 }
6634 
6635 /*
6636  * Spits out the name of device node, typically name@addr, for a given node,
6637  * using the driver name, not the nodename.
6638  *
6639  * Used by match_parent. Not to be used elsewhere.
6640  */
6641 char *
6642 i_ddi_parname(dev_info_t *dip, char *name)
6643 {
6644 	char *addrname;
6645 
6646 	if (dip == ddi_root_node()) {
6647 		*name = '\0';
6648 		return (name);
6649 	}
6650 
6651 	ASSERT(i_ddi_node_state(dip) >= DS_INITIALIZED);
6652 
6653 	if (*(addrname = ddi_get_name_addr(dip)) == '\0')
6654 		(void) sprintf(name, "%s", ddi_binding_name(dip));
6655 	else
6656 		(void) sprintf(name, "%s@%s", ddi_binding_name(dip), addrname);
6657 	return (name);
6658 }
6659 
6660 static char *
6661 pathname_work(dev_info_t *dip, char *path)
6662 {
6663 	char *bp;
6664 
6665 	if (dip == ddi_root_node()) {
6666 		*path = '\0';
6667 		return (path);
6668 	}
6669 	(void) pathname_work(ddi_get_parent(dip), path);
6670 	bp = path + strlen(path);
6671 	(void) ddi_deviname(dip, bp);
6672 	return (path);
6673 }
6674 
6675 char *
6676 ddi_pathname(dev_info_t *dip, char *path)
6677 {
6678 	return (pathname_work(dip, path));
6679 }
6680 
6681 static char *
6682 pathname_work_obp(dev_info_t *dip, char *path)
6683 {
6684 	char *bp;
6685 	char *obp_path;
6686 
6687 	/*
6688 	 * look up the "obp-path" property, return the path if it exists
6689 	 */
6690 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
6691 	    "obp-path", &obp_path) == DDI_PROP_SUCCESS) {
6692 		(void) strcpy(path, obp_path);
6693 		ddi_prop_free(obp_path);
6694 		return (path);
6695 	}
6696 
6697 	/*
6698 	 * stop at root, no obp path
6699 	 */
6700 	if (dip == ddi_root_node()) {
6701 		return (NULL);
6702 	}
6703 
6704 	obp_path = pathname_work_obp(ddi_get_parent(dip), path);
6705 	if (obp_path == NULL)
6706 		return (NULL);
6707 
6708 	/*
6709 	 * append our component to parent's obp path
6710 	 */
6711 	bp = path + strlen(path);
6712 	if (*(bp - 1) != '/')
6713 		(void) strcat(bp++, "/");
6714 	(void) ddi_deviname(dip, bp);
6715 	return (path);
6716 }
6717 
6718 /*
6719  * return the 'obp-path' based path for the given node, or NULL if the node
6720  * does not have a different obp path. NOTE: Unlike ddi_pathname, this
6721  * function can't be called from interrupt context (since we need to
6722  * lookup a string property).
6723  */
6724 char *
6725 ddi_pathname_obp(dev_info_t *dip, char *path)
6726 {
6727 	ASSERT(!servicing_interrupt());
6728 	if (dip == NULL || path == NULL)
6729 		return (NULL);
6730 
6731 	/* split work into a separate function to aid debugging */
6732 	return (pathname_work_obp(dip, path));
6733 }
6734 
6735 int
6736 ddi_pathname_obp_set(dev_info_t *dip, char *component)
6737 {
6738 	dev_info_t *pdip;
6739 	char obp_path[MAXPATHLEN];
6740 
6741 	bzero(obp_path, sizeof (obp_path));
6742 
6743 	if (dip == NULL)
6744 		return (DDI_FAILURE);
6745 	pdip = ddi_get_parent(dip);
6746 
6747 	if (ddi_pathname_obp(pdip, obp_path) == NULL) {
6748 		(void) ddi_pathname(pdip, obp_path);
6749 	}
6750 
6751 	if (component) {
6752 		(void) strncat(obp_path, "/", sizeof (obp_path));
6753 		(void) strncat(obp_path, component, sizeof (obp_path));
6754 	}
6755 	return (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "obp-path",
6756 	    obp_path));
6757 }
6758 
6759 /*
6760  * Given a dev_t, return the pathname of the corresponding device in the
6761  * buffer pointed at by "path."  The buffer is assumed to be large enough
6762  * to hold the pathname of the device (MAXPATHLEN).
6763  *
6764  * The pathname of a device is the pathname of the devinfo node to which
6765  * the device "belongs," concatenated with the character ':' and the name
6766  * of the minor node corresponding to the dev_t.  If spec_type is 0 then
6767  * just the pathname of the devinfo node is returned without driving attach
6768  * of that node.  For a non-zero spec_type, an attach is performed and a
6769  * search of the minor list occurs.
6770  *
6771  * It is possible that the path associated with the dev_t is not
6772  * currently available in the devinfo tree.  In order to have a
6773  * dev_t, a device must have been discovered before, which means
6774  * that the path is always in the instance tree.  The one exception
6775  * to this is if the dev_t is associated with a pseudo driver, in
6776  * which case the device must exist on the pseudo branch of the
6777  * devinfo tree as a result of parsing .conf files.
6778  */
6779 int
6780 ddi_dev_pathname(dev_t devt, int spec_type, char *path)
6781 {
6782 	int		circ;
6783 	major_t		major = getmajor(devt);
6784 	int		instance;
6785 	dev_info_t	*dip;
6786 	char		*minorname;
6787 	char		*drvname;
6788 
6789 	if (major >= devcnt)
6790 		goto fail;
6791 	if (major == clone_major) {
6792 		/* clone has no minor nodes, manufacture the path here */
6793 		if ((drvname = ddi_major_to_name(getminor(devt))) == NULL)
6794 			goto fail;
6795 
6796 		(void) snprintf(path, MAXPATHLEN, "%s:%s", CLONE_PATH, drvname);
6797 		return (DDI_SUCCESS);
6798 	}
6799 
6800 	/* extract instance from devt (getinfo(9E) DDI_INFO_DEVT2INSTANCE). */
6801 	if ((instance = dev_to_instance(devt)) == -1)
6802 		goto fail;
6803 
6804 	/* reconstruct the path given the major/instance */
6805 	if (e_ddi_majorinstance_to_path(major, instance, path) != DDI_SUCCESS)
6806 		goto fail;
6807 
6808 	/* if spec_type given we must drive attach and search minor nodes */
6809 	if ((spec_type == S_IFCHR) || (spec_type == S_IFBLK)) {
6810 		/* attach the path so we can search minors */
6811 		if ((dip = e_ddi_hold_devi_by_path(path, 0)) == NULL)
6812 			goto fail;
6813 
6814 		/* Add minorname to path. */
6815 		ndi_devi_enter(dip, &circ);
6816 		minorname = i_ddi_devtspectype_to_minorname(dip,
6817 		    devt, spec_type);
6818 		if (minorname) {
6819 			(void) strcat(path, ":");
6820 			(void) strcat(path, minorname);
6821 		}
6822 		ndi_devi_exit(dip, circ);
6823 		ddi_release_devi(dip);
6824 		if (minorname == NULL)
6825 			goto fail;
6826 	}
6827 	ASSERT(strlen(path) < MAXPATHLEN);
6828 	return (DDI_SUCCESS);
6829 
6830 fail:	*path = 0;
6831 	return (DDI_FAILURE);
6832 }
6833 
6834 /*
6835  * Given a major number and an instance, return the path.
6836  * This interface does NOT drive attach.
6837  */
6838 int
6839 e_ddi_majorinstance_to_path(major_t major, int instance, char *path)
6840 {
6841 	struct devnames *dnp;
6842 	dev_info_t	*dip;
6843 
6844 	if ((major >= devcnt) || (instance == -1)) {
6845 		*path = 0;
6846 		return (DDI_FAILURE);
6847 	}
6848 
6849 	/* look for the major/instance in the instance tree */
6850 	if (e_ddi_instance_majorinstance_to_path(major, instance,
6851 	    path) == DDI_SUCCESS) {
6852 		ASSERT(strlen(path) < MAXPATHLEN);
6853 		return (DDI_SUCCESS);
6854 	}
6855 
6856 	/*
6857 	 * Not in instance tree, find the instance on the per driver list and
6858 	 * construct path to instance via ddi_pathname(). This is how paths
6859 	 * down the 'pseudo' branch are constructed.
6860 	 */
6861 	dnp = &(devnamesp[major]);
6862 	LOCK_DEV_OPS(&(dnp->dn_lock));
6863 	for (dip = dnp->dn_head; dip;
6864 	    dip = (dev_info_t *)DEVI(dip)->devi_next) {
6865 		/* Skip if instance does not match. */
6866 		if (DEVI(dip)->devi_instance != instance)
6867 			continue;
6868 
6869 		/*
6870 		 * An ndi_hold_devi() does not prevent DS_INITIALIZED->DS_BOUND
6871 		 * node demotion, so it is not an effective way of ensuring
6872 		 * that the ddi_pathname result has a unit-address.  Instead,
6873 		 * we reverify the node state after calling ddi_pathname().
6874 		 */
6875 		if (i_ddi_node_state(dip) >= DS_INITIALIZED) {
6876 			(void) ddi_pathname(dip, path);
6877 			if (i_ddi_node_state(dip) < DS_INITIALIZED)
6878 				continue;
6879 			UNLOCK_DEV_OPS(&(dnp->dn_lock));
6880 			ASSERT(strlen(path) < MAXPATHLEN);
6881 			return (DDI_SUCCESS);
6882 		}
6883 	}
6884 	UNLOCK_DEV_OPS(&(dnp->dn_lock));
6885 
6886 	/* can't reconstruct the path */
6887 	*path = 0;
6888 	return (DDI_FAILURE);
6889 }
6890 
6891 #define	GLD_DRIVER_PPA "SUNW,gld_v0_ppa"
6892 
6893 /*
6894  * Given the dip for a network interface return the ppa for that interface.
6895  *
6896  * In all cases except GLD v0 drivers, the ppa == instance.
6897  * In the case of GLD v0 drivers, the ppa is equal to the attach order.
6898  * So for these drivers when the attach routine calls gld_register(),
6899  * the GLD framework creates an integer property called "gld_driver_ppa"
6900  * that can be queried here.
6901  *
6902  * The only time this function is used is when a system is booting over nfs.
6903  * In this case the system has to resolve the pathname of the boot device
6904  * to it's ppa.
6905  */
6906 int
6907 i_ddi_devi_get_ppa(dev_info_t *dip)
6908 {
6909 	return (ddi_prop_get_int(DDI_DEV_T_ANY, dip,
6910 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
6911 	    GLD_DRIVER_PPA, ddi_get_instance(dip)));
6912 }
6913 
6914 /*
6915  * i_ddi_devi_set_ppa() should only be called from gld_register()
6916  * and only for GLD v0 drivers
6917  */
6918 void
6919 i_ddi_devi_set_ppa(dev_info_t *dip, int ppa)
6920 {
6921 	(void) e_ddi_prop_update_int(DDI_DEV_T_NONE, dip, GLD_DRIVER_PPA, ppa);
6922 }
6923 
6924 
6925 /*
6926  * Private DDI Console bell functions.
6927  */
6928 void
6929 ddi_ring_console_bell(clock_t duration)
6930 {
6931 	if (ddi_console_bell_func != NULL)
6932 		(*ddi_console_bell_func)(duration);
6933 }
6934 
6935 void
6936 ddi_set_console_bell(void (*bellfunc)(clock_t duration))
6937 {
6938 	ddi_console_bell_func = bellfunc;
6939 }
6940 
6941 int
6942 ddi_dma_alloc_handle(dev_info_t *dip, ddi_dma_attr_t *attr,
6943 	int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
6944 {
6945 	int (*funcp)() = ddi_dma_allochdl;
6946 	ddi_dma_attr_t dma_attr;
6947 	struct bus_ops *bop;
6948 
6949 	if (attr == (ddi_dma_attr_t *)0)
6950 		return (DDI_DMA_BADATTR);
6951 
6952 	dma_attr = *attr;
6953 
6954 	bop = DEVI(dip)->devi_ops->devo_bus_ops;
6955 	if (bop && bop->bus_dma_allochdl)
6956 		funcp = bop->bus_dma_allochdl;
6957 
6958 	return ((*funcp)(dip, dip, &dma_attr, waitfp, arg, handlep));
6959 }
6960 
6961 void
6962 ddi_dma_free_handle(ddi_dma_handle_t *handlep)
6963 {
6964 	ddi_dma_handle_t h = *handlep;
6965 	(void) ddi_dma_freehdl(HD, HD, h);
6966 }
6967 
6968 static uintptr_t dma_mem_list_id = 0;
6969 
6970 
6971 int
6972 ddi_dma_mem_alloc(ddi_dma_handle_t handle, size_t length,
6973 	ddi_device_acc_attr_t *accattrp, uint_t flags,
6974 	int (*waitfp)(caddr_t), caddr_t arg, caddr_t *kaddrp,
6975 	size_t *real_length, ddi_acc_handle_t *handlep)
6976 {
6977 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
6978 	dev_info_t *dip = hp->dmai_rdip;
6979 	ddi_acc_hdl_t *ap;
6980 	ddi_dma_attr_t *attrp = &hp->dmai_attr;
6981 	uint_t sleepflag, xfermodes;
6982 	int (*fp)(caddr_t);
6983 	int rval;
6984 
6985 	if (waitfp == DDI_DMA_SLEEP)
6986 		fp = (int (*)())KM_SLEEP;
6987 	else if (waitfp == DDI_DMA_DONTWAIT)
6988 		fp = (int (*)())KM_NOSLEEP;
6989 	else
6990 		fp = waitfp;
6991 	*handlep = impl_acc_hdl_alloc(fp, arg);
6992 	if (*handlep == NULL)
6993 		return (DDI_FAILURE);
6994 
6995 	/* check if the cache attributes are supported */
6996 	if (i_ddi_check_cache_attr(flags) == B_FALSE)
6997 		return (DDI_FAILURE);
6998 
6999 	/*
7000 	 * Transfer the meaningful bits to xfermodes.
7001 	 * Double-check if the 3rd party driver correctly sets the bits.
7002 	 * If not, set DDI_DMA_STREAMING to keep compatibility.
7003 	 */
7004 	xfermodes = flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING);
7005 	if (xfermodes == 0) {
7006 		xfermodes = DDI_DMA_STREAMING;
7007 	}
7008 
7009 	/*
7010 	 * initialize the common elements of data access handle
7011 	 */
7012 	ap = impl_acc_hdl_get(*handlep);
7013 	ap->ah_vers = VERS_ACCHDL;
7014 	ap->ah_dip = dip;
7015 	ap->ah_offset = 0;
7016 	ap->ah_len = 0;
7017 	ap->ah_xfermodes = flags;
7018 	ap->ah_acc = *accattrp;
7019 
7020 	sleepflag = ((waitfp == DDI_DMA_SLEEP) ? 1 : 0);
7021 	if (xfermodes == DDI_DMA_CONSISTENT) {
7022 		rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag,
7023 		    flags, accattrp, kaddrp, NULL, ap);
7024 		*real_length = length;
7025 	} else {
7026 		rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag,
7027 		    flags, accattrp, kaddrp, real_length, ap);
7028 	}
7029 	if (rval == DDI_SUCCESS) {
7030 		ap->ah_len = (off_t)(*real_length);
7031 		ap->ah_addr = *kaddrp;
7032 	} else {
7033 		impl_acc_hdl_free(*handlep);
7034 		*handlep = (ddi_acc_handle_t)NULL;
7035 		if (waitfp != DDI_DMA_SLEEP && waitfp != DDI_DMA_DONTWAIT) {
7036 			ddi_set_callback(waitfp, arg, &dma_mem_list_id);
7037 		}
7038 		rval = DDI_FAILURE;
7039 	}
7040 	return (rval);
7041 }
7042 
7043 void
7044 ddi_dma_mem_free(ddi_acc_handle_t *handlep)
7045 {
7046 	ddi_acc_hdl_t *ap;
7047 
7048 	ap = impl_acc_hdl_get(*handlep);
7049 	ASSERT(ap);
7050 
7051 	i_ddi_mem_free((caddr_t)ap->ah_addr, ap);
7052 
7053 	/*
7054 	 * free the handle
7055 	 */
7056 	impl_acc_hdl_free(*handlep);
7057 	*handlep = (ddi_acc_handle_t)NULL;
7058 
7059 	if (dma_mem_list_id != 0) {
7060 		ddi_run_callback(&dma_mem_list_id);
7061 	}
7062 }
7063 
7064 int
7065 ddi_dma_buf_bind_handle(ddi_dma_handle_t handle, struct buf *bp,
7066 	uint_t flags, int (*waitfp)(caddr_t), caddr_t arg,
7067 	ddi_dma_cookie_t *cookiep, uint_t *ccountp)
7068 {
7069 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7070 	dev_info_t *hdip, *dip;
7071 	struct ddi_dma_req dmareq;
7072 	int (*funcp)();
7073 
7074 	dmareq.dmar_flags = flags;
7075 	dmareq.dmar_fp = waitfp;
7076 	dmareq.dmar_arg = arg;
7077 	dmareq.dmar_object.dmao_size = (uint_t)bp->b_bcount;
7078 
7079 	if (bp->b_flags & B_PAGEIO) {
7080 		dmareq.dmar_object.dmao_type = DMA_OTYP_PAGES;
7081 		dmareq.dmar_object.dmao_obj.pp_obj.pp_pp = bp->b_pages;
7082 		dmareq.dmar_object.dmao_obj.pp_obj.pp_offset =
7083 		    (uint_t)(((uintptr_t)bp->b_un.b_addr) & MMU_PAGEOFFSET);
7084 	} else {
7085 		dmareq.dmar_object.dmao_obj.virt_obj.v_addr = bp->b_un.b_addr;
7086 		if (bp->b_flags & B_SHADOW) {
7087 			dmareq.dmar_object.dmao_obj.virt_obj.v_priv =
7088 			    bp->b_shadow;
7089 			dmareq.dmar_object.dmao_type = DMA_OTYP_BUFVADDR;
7090 		} else {
7091 			dmareq.dmar_object.dmao_type =
7092 			    (bp->b_flags & (B_PHYS | B_REMAPPED)) ?
7093 			    DMA_OTYP_BUFVADDR : DMA_OTYP_VADDR;
7094 			dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
7095 		}
7096 
7097 		/*
7098 		 * If the buffer has no proc pointer, or the proc
7099 		 * struct has the kernel address space, or the buffer has
7100 		 * been marked B_REMAPPED (meaning that it is now
7101 		 * mapped into the kernel's address space), then
7102 		 * the address space is kas (kernel address space).
7103 		 */
7104 		if ((bp->b_proc == NULL) || (bp->b_proc->p_as == &kas) ||
7105 		    (bp->b_flags & B_REMAPPED)) {
7106 			dmareq.dmar_object.dmao_obj.virt_obj.v_as = 0;
7107 		} else {
7108 			dmareq.dmar_object.dmao_obj.virt_obj.v_as =
7109 			    bp->b_proc->p_as;
7110 		}
7111 	}
7112 
7113 	dip = hp->dmai_rdip;
7114 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
7115 	funcp = DEVI(dip)->devi_bus_dma_bindfunc;
7116 	return ((*funcp)(hdip, dip, handle, &dmareq, cookiep, ccountp));
7117 }
7118 
7119 int
7120 ddi_dma_addr_bind_handle(ddi_dma_handle_t handle, struct as *as,
7121 	caddr_t addr, size_t len, uint_t flags, int (*waitfp)(caddr_t),
7122 	caddr_t arg, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
7123 {
7124 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7125 	dev_info_t *hdip, *dip;
7126 	struct ddi_dma_req dmareq;
7127 	int (*funcp)();
7128 
7129 	if (len == (uint_t)0) {
7130 		return (DDI_DMA_NOMAPPING);
7131 	}
7132 	dmareq.dmar_flags = flags;
7133 	dmareq.dmar_fp = waitfp;
7134 	dmareq.dmar_arg = arg;
7135 	dmareq.dmar_object.dmao_size = len;
7136 	dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR;
7137 	dmareq.dmar_object.dmao_obj.virt_obj.v_as = as;
7138 	dmareq.dmar_object.dmao_obj.virt_obj.v_addr = addr;
7139 	dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
7140 
7141 	dip = hp->dmai_rdip;
7142 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
7143 	funcp = DEVI(dip)->devi_bus_dma_bindfunc;
7144 	return ((*funcp)(hdip, dip, handle, &dmareq, cookiep, ccountp));
7145 }
7146 
7147 void
7148 ddi_dma_nextcookie(ddi_dma_handle_t handle, ddi_dma_cookie_t *cookiep)
7149 {
7150 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7151 	ddi_dma_cookie_t *cp;
7152 
7153 	cp = hp->dmai_cookie;
7154 	ASSERT(cp);
7155 
7156 	cookiep->dmac_notused = cp->dmac_notused;
7157 	cookiep->dmac_type = cp->dmac_type;
7158 	cookiep->dmac_address = cp->dmac_address;
7159 	cookiep->dmac_size = cp->dmac_size;
7160 	hp->dmai_cookie++;
7161 }
7162 
7163 int
7164 ddi_dma_numwin(ddi_dma_handle_t handle, uint_t *nwinp)
7165 {
7166 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7167 	if ((hp->dmai_rflags & DDI_DMA_PARTIAL) == 0) {
7168 		return (DDI_FAILURE);
7169 	} else {
7170 		*nwinp = hp->dmai_nwin;
7171 		return (DDI_SUCCESS);
7172 	}
7173 }
7174 
7175 int
7176 ddi_dma_getwin(ddi_dma_handle_t h, uint_t win, off_t *offp,
7177 	size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
7178 {
7179 	int (*funcp)() = ddi_dma_win;
7180 	struct bus_ops *bop;
7181 
7182 	bop = DEVI(HD)->devi_ops->devo_bus_ops;
7183 	if (bop && bop->bus_dma_win)
7184 		funcp = bop->bus_dma_win;
7185 
7186 	return ((*funcp)(HD, HD, h, win, offp, lenp, cookiep, ccountp));
7187 }
7188 
7189 int
7190 ddi_dma_set_sbus64(ddi_dma_handle_t h, ulong_t burstsizes)
7191 {
7192 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_SET_SBUS64, 0,
7193 	    &burstsizes, 0, 0));
7194 }
7195 
7196 int
7197 i_ddi_dma_fault_check(ddi_dma_impl_t *hp)
7198 {
7199 	return (hp->dmai_fault);
7200 }
7201 
7202 int
7203 ddi_check_dma_handle(ddi_dma_handle_t handle)
7204 {
7205 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7206 	int (*check)(ddi_dma_impl_t *);
7207 
7208 	if ((check = hp->dmai_fault_check) == NULL)
7209 		check = i_ddi_dma_fault_check;
7210 
7211 	return (((*check)(hp) == DDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
7212 }
7213 
7214 void
7215 i_ddi_dma_set_fault(ddi_dma_handle_t handle)
7216 {
7217 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7218 	void (*notify)(ddi_dma_impl_t *);
7219 
7220 	if (!hp->dmai_fault) {
7221 		hp->dmai_fault = 1;
7222 		if ((notify = hp->dmai_fault_notify) != NULL)
7223 			(*notify)(hp);
7224 	}
7225 }
7226 
7227 void
7228 i_ddi_dma_clr_fault(ddi_dma_handle_t handle)
7229 {
7230 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7231 	void (*notify)(ddi_dma_impl_t *);
7232 
7233 	if (hp->dmai_fault) {
7234 		hp->dmai_fault = 0;
7235 		if ((notify = hp->dmai_fault_notify) != NULL)
7236 			(*notify)(hp);
7237 	}
7238 }
7239 
7240 /*
7241  * register mapping routines.
7242  */
7243 int
7244 ddi_regs_map_setup(dev_info_t *dip, uint_t rnumber, caddr_t *addrp,
7245 	offset_t offset, offset_t len, ddi_device_acc_attr_t *accattrp,
7246 	ddi_acc_handle_t *handle)
7247 {
7248 	ddi_map_req_t mr;
7249 	ddi_acc_hdl_t *hp;
7250 	int result;
7251 
7252 	/*
7253 	 * Allocate and initialize the common elements of data access handle.
7254 	 */
7255 	*handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
7256 	hp = impl_acc_hdl_get(*handle);
7257 	hp->ah_vers = VERS_ACCHDL;
7258 	hp->ah_dip = dip;
7259 	hp->ah_rnumber = rnumber;
7260 	hp->ah_offset = offset;
7261 	hp->ah_len = len;
7262 	hp->ah_acc = *accattrp;
7263 
7264 	/*
7265 	 * Set up the mapping request and call to parent.
7266 	 */
7267 	mr.map_op = DDI_MO_MAP_LOCKED;
7268 	mr.map_type = DDI_MT_RNUMBER;
7269 	mr.map_obj.rnumber = rnumber;
7270 	mr.map_prot = PROT_READ | PROT_WRITE;
7271 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
7272 	mr.map_handlep = hp;
7273 	mr.map_vers = DDI_MAP_VERSION;
7274 	result = ddi_map(dip, &mr, offset, len, addrp);
7275 
7276 	/*
7277 	 * check for end result
7278 	 */
7279 	if (result != DDI_SUCCESS) {
7280 		impl_acc_hdl_free(*handle);
7281 		*handle = (ddi_acc_handle_t)NULL;
7282 	} else {
7283 		hp->ah_addr = *addrp;
7284 	}
7285 
7286 	return (result);
7287 }
7288 
7289 void
7290 ddi_regs_map_free(ddi_acc_handle_t *handlep)
7291 {
7292 	ddi_map_req_t mr;
7293 	ddi_acc_hdl_t *hp;
7294 
7295 	hp = impl_acc_hdl_get(*handlep);
7296 	ASSERT(hp);
7297 
7298 	mr.map_op = DDI_MO_UNMAP;
7299 	mr.map_type = DDI_MT_RNUMBER;
7300 	mr.map_obj.rnumber = hp->ah_rnumber;
7301 	mr.map_prot = PROT_READ | PROT_WRITE;
7302 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
7303 	mr.map_handlep = hp;
7304 	mr.map_vers = DDI_MAP_VERSION;
7305 
7306 	/*
7307 	 * Call my parent to unmap my regs.
7308 	 */
7309 	(void) ddi_map(hp->ah_dip, &mr, hp->ah_offset,
7310 	    hp->ah_len, &hp->ah_addr);
7311 	/*
7312 	 * free the handle
7313 	 */
7314 	impl_acc_hdl_free(*handlep);
7315 	*handlep = (ddi_acc_handle_t)NULL;
7316 }
7317 
7318 int
7319 ddi_device_zero(ddi_acc_handle_t handle, caddr_t dev_addr, size_t bytecount,
7320 	ssize_t dev_advcnt, uint_t dev_datasz)
7321 {
7322 	uint8_t *b;
7323 	uint16_t *w;
7324 	uint32_t *l;
7325 	uint64_t *ll;
7326 
7327 	/* check for total byte count is multiple of data transfer size */
7328 	if (bytecount != ((bytecount / dev_datasz) * dev_datasz))
7329 		return (DDI_FAILURE);
7330 
7331 	switch (dev_datasz) {
7332 	case DDI_DATA_SZ01_ACC:
7333 		for (b = (uint8_t *)dev_addr;
7334 		    bytecount != 0; bytecount -= 1, b += dev_advcnt)
7335 			ddi_put8(handle, b, 0);
7336 		break;
7337 	case DDI_DATA_SZ02_ACC:
7338 		for (w = (uint16_t *)dev_addr;
7339 		    bytecount != 0; bytecount -= 2, w += dev_advcnt)
7340 			ddi_put16(handle, w, 0);
7341 		break;
7342 	case DDI_DATA_SZ04_ACC:
7343 		for (l = (uint32_t *)dev_addr;
7344 		    bytecount != 0; bytecount -= 4, l += dev_advcnt)
7345 			ddi_put32(handle, l, 0);
7346 		break;
7347 	case DDI_DATA_SZ08_ACC:
7348 		for (ll = (uint64_t *)dev_addr;
7349 		    bytecount != 0; bytecount -= 8, ll += dev_advcnt)
7350 			ddi_put64(handle, ll, 0x0ll);
7351 		break;
7352 	default:
7353 		return (DDI_FAILURE);
7354 	}
7355 	return (DDI_SUCCESS);
7356 }
7357 
7358 int
7359 ddi_device_copy(
7360 	ddi_acc_handle_t src_handle, caddr_t src_addr, ssize_t src_advcnt,
7361 	ddi_acc_handle_t dest_handle, caddr_t dest_addr, ssize_t dest_advcnt,
7362 	size_t bytecount, uint_t dev_datasz)
7363 {
7364 	uint8_t *b_src, *b_dst;
7365 	uint16_t *w_src, *w_dst;
7366 	uint32_t *l_src, *l_dst;
7367 	uint64_t *ll_src, *ll_dst;
7368 
7369 	/* check for total byte count is multiple of data transfer size */
7370 	if (bytecount != ((bytecount / dev_datasz) * dev_datasz))
7371 		return (DDI_FAILURE);
7372 
7373 	switch (dev_datasz) {
7374 	case DDI_DATA_SZ01_ACC:
7375 		b_src = (uint8_t *)src_addr;
7376 		b_dst = (uint8_t *)dest_addr;
7377 
7378 		for (; bytecount != 0; bytecount -= 1) {
7379 			ddi_put8(dest_handle, b_dst,
7380 			    ddi_get8(src_handle, b_src));
7381 			b_dst += dest_advcnt;
7382 			b_src += src_advcnt;
7383 		}
7384 		break;
7385 	case DDI_DATA_SZ02_ACC:
7386 		w_src = (uint16_t *)src_addr;
7387 		w_dst = (uint16_t *)dest_addr;
7388 
7389 		for (; bytecount != 0; bytecount -= 2) {
7390 			ddi_put16(dest_handle, w_dst,
7391 			    ddi_get16(src_handle, w_src));
7392 			w_dst += dest_advcnt;
7393 			w_src += src_advcnt;
7394 		}
7395 		break;
7396 	case DDI_DATA_SZ04_ACC:
7397 		l_src = (uint32_t *)src_addr;
7398 		l_dst = (uint32_t *)dest_addr;
7399 
7400 		for (; bytecount != 0; bytecount -= 4) {
7401 			ddi_put32(dest_handle, l_dst,
7402 			    ddi_get32(src_handle, l_src));
7403 			l_dst += dest_advcnt;
7404 			l_src += src_advcnt;
7405 		}
7406 		break;
7407 	case DDI_DATA_SZ08_ACC:
7408 		ll_src = (uint64_t *)src_addr;
7409 		ll_dst = (uint64_t *)dest_addr;
7410 
7411 		for (; bytecount != 0; bytecount -= 8) {
7412 			ddi_put64(dest_handle, ll_dst,
7413 			    ddi_get64(src_handle, ll_src));
7414 			ll_dst += dest_advcnt;
7415 			ll_src += src_advcnt;
7416 		}
7417 		break;
7418 	default:
7419 		return (DDI_FAILURE);
7420 	}
7421 	return (DDI_SUCCESS);
7422 }
7423 
7424 #define	swap16(value)  \
7425 	((((value) & 0xff) << 8) | ((value) >> 8))
7426 
7427 #define	swap32(value)	\
7428 	(((uint32_t)swap16((uint16_t)((value) & 0xffff)) << 16) | \
7429 	(uint32_t)swap16((uint16_t)((value) >> 16)))
7430 
7431 #define	swap64(value)	\
7432 	(((uint64_t)swap32((uint32_t)((value) & 0xffffffff)) \
7433 	    << 32) | \
7434 	(uint64_t)swap32((uint32_t)((value) >> 32)))
7435 
7436 uint16_t
7437 ddi_swap16(uint16_t value)
7438 {
7439 	return (swap16(value));
7440 }
7441 
7442 uint32_t
7443 ddi_swap32(uint32_t value)
7444 {
7445 	return (swap32(value));
7446 }
7447 
7448 uint64_t
7449 ddi_swap64(uint64_t value)
7450 {
7451 	return (swap64(value));
7452 }
7453 
7454 /*
7455  * Convert a binding name to a driver name.
7456  * A binding name is the name used to determine the driver for a
7457  * device - it may be either an alias for the driver or the name
7458  * of the driver itself.
7459  */
7460 char *
7461 i_binding_to_drv_name(char *bname)
7462 {
7463 	major_t major_no;
7464 
7465 	ASSERT(bname != NULL);
7466 
7467 	if ((major_no = ddi_name_to_major(bname)) == -1)
7468 		return (NULL);
7469 	return (ddi_major_to_name(major_no));
7470 }
7471 
7472 /*
7473  * Search for minor name that has specified dev_t and spec_type.
7474  * If spec_type is zero then any dev_t match works.  Since we
7475  * are returning a pointer to the minor name string, we require the
7476  * caller to do the locking.
7477  */
7478 char *
7479 i_ddi_devtspectype_to_minorname(dev_info_t *dip, dev_t dev, int spec_type)
7480 {
7481 	struct ddi_minor_data	*dmdp;
7482 
7483 	/*
7484 	 * The did layered driver currently intentionally returns a
7485 	 * devinfo ptr for an underlying sd instance based on a did
7486 	 * dev_t. In this case it is not an error.
7487 	 *
7488 	 * The did layered driver is associated with Sun Cluster.
7489 	 */
7490 	ASSERT((ddi_driver_major(dip) == getmajor(dev)) ||
7491 	    (strcmp(ddi_major_to_name(getmajor(dev)), "did") == 0));
7492 
7493 	ASSERT(DEVI_BUSY_OWNED(dip));
7494 	for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) {
7495 		if (((dmdp->type == DDM_MINOR) ||
7496 		    (dmdp->type == DDM_INTERNAL_PATH) ||
7497 		    (dmdp->type == DDM_DEFAULT)) &&
7498 		    (dmdp->ddm_dev == dev) &&
7499 		    ((((spec_type & (S_IFCHR|S_IFBLK))) == 0) ||
7500 		    (dmdp->ddm_spec_type == spec_type)))
7501 			return (dmdp->ddm_name);
7502 	}
7503 
7504 	return (NULL);
7505 }
7506 
7507 /*
7508  * Find the devt and spectype of the specified minor_name.
7509  * Return DDI_FAILURE if minor_name not found. Since we are
7510  * returning everything via arguments we can do the locking.
7511  */
7512 int
7513 i_ddi_minorname_to_devtspectype(dev_info_t *dip, char *minor_name,
7514 	dev_t *devtp, int *spectypep)
7515 {
7516 	int			circ;
7517 	struct ddi_minor_data	*dmdp;
7518 
7519 	/* deal with clone minor nodes */
7520 	if (dip == clone_dip) {
7521 		major_t	major;
7522 		/*
7523 		 * Make sure minor_name is a STREAMS driver.
7524 		 * We load the driver but don't attach to any instances.
7525 		 */
7526 
7527 		major = ddi_name_to_major(minor_name);
7528 		if (major == DDI_MAJOR_T_NONE)
7529 			return (DDI_FAILURE);
7530 
7531 		if (ddi_hold_driver(major) == NULL)
7532 			return (DDI_FAILURE);
7533 
7534 		if (STREAMSTAB(major) == NULL) {
7535 			ddi_rele_driver(major);
7536 			return (DDI_FAILURE);
7537 		}
7538 		ddi_rele_driver(major);
7539 
7540 		if (devtp)
7541 			*devtp = makedevice(clone_major, (minor_t)major);
7542 
7543 		if (spectypep)
7544 			*spectypep = S_IFCHR;
7545 
7546 		return (DDI_SUCCESS);
7547 	}
7548 
7549 	ndi_devi_enter(dip, &circ);
7550 	for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) {
7551 		if (((dmdp->type != DDM_MINOR) &&
7552 		    (dmdp->type != DDM_INTERNAL_PATH) &&
7553 		    (dmdp->type != DDM_DEFAULT)) ||
7554 		    strcmp(minor_name, dmdp->ddm_name))
7555 			continue;
7556 
7557 		if (devtp)
7558 			*devtp = dmdp->ddm_dev;
7559 
7560 		if (spectypep)
7561 			*spectypep = dmdp->ddm_spec_type;
7562 
7563 		ndi_devi_exit(dip, circ);
7564 		return (DDI_SUCCESS);
7565 	}
7566 	ndi_devi_exit(dip, circ);
7567 
7568 	return (DDI_FAILURE);
7569 }
7570 
7571 extern char	hw_serial[];
7572 static kmutex_t devid_gen_mutex;
7573 static short	devid_gen_number;
7574 
7575 #ifdef DEBUG
7576 
7577 static int	devid_register_corrupt = 0;
7578 static int	devid_register_corrupt_major = 0;
7579 static int	devid_register_corrupt_hint = 0;
7580 static int	devid_register_corrupt_hint_major = 0;
7581 
7582 static int devid_lyr_debug = 0;
7583 
7584 #define	DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs)		\
7585 	if (devid_lyr_debug)					\
7586 		ddi_debug_devid_devts(msg, ndevs, devs)
7587 
7588 #else
7589 
7590 #define	DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs)
7591 
7592 #endif /* DEBUG */
7593 
7594 
7595 #ifdef	DEBUG
7596 
7597 static void
7598 ddi_debug_devid_devts(char *msg, int ndevs, dev_t *devs)
7599 {
7600 	int i;
7601 
7602 	cmn_err(CE_CONT, "%s:\n", msg);
7603 	for (i = 0; i < ndevs; i++) {
7604 		cmn_err(CE_CONT, "    0x%lx\n", devs[i]);
7605 	}
7606 }
7607 
7608 static void
7609 ddi_debug_devid_paths(char *msg, int npaths, char **paths)
7610 {
7611 	int i;
7612 
7613 	cmn_err(CE_CONT, "%s:\n", msg);
7614 	for (i = 0; i < npaths; i++) {
7615 		cmn_err(CE_CONT, "    %s\n", paths[i]);
7616 	}
7617 }
7618 
7619 static void
7620 ddi_debug_devid_devts_per_path(char *path, int ndevs, dev_t *devs)
7621 {
7622 	int i;
7623 
7624 	cmn_err(CE_CONT, "dev_ts per path %s\n", path);
7625 	for (i = 0; i < ndevs; i++) {
7626 		cmn_err(CE_CONT, "    0x%lx\n", devs[i]);
7627 	}
7628 }
7629 
7630 #endif	/* DEBUG */
7631 
7632 /*
7633  * Register device id into DDI framework.
7634  * Must be called when device is attached.
7635  */
7636 static int
7637 i_ddi_devid_register(dev_info_t *dip, ddi_devid_t devid)
7638 {
7639 	impl_devid_t	*i_devid = (impl_devid_t *)devid;
7640 	size_t		driver_len;
7641 	const char	*driver_name;
7642 	char		*devid_str;
7643 	major_t		major;
7644 
7645 	if ((dip == NULL) ||
7646 	    ((major = ddi_driver_major(dip)) == DDI_MAJOR_T_NONE))
7647 		return (DDI_FAILURE);
7648 
7649 	/* verify that the devid is valid */
7650 	if (ddi_devid_valid(devid) != DDI_SUCCESS)
7651 		return (DDI_FAILURE);
7652 
7653 	/* Updating driver name hint in devid */
7654 	driver_name = ddi_driver_name(dip);
7655 	driver_len = strlen(driver_name);
7656 	if (driver_len > DEVID_HINT_SIZE) {
7657 		/* Pick up last four characters of driver name */
7658 		driver_name += driver_len - DEVID_HINT_SIZE;
7659 		driver_len = DEVID_HINT_SIZE;
7660 	}
7661 	bzero(i_devid->did_driver, DEVID_HINT_SIZE);
7662 	bcopy(driver_name, i_devid->did_driver, driver_len);
7663 
7664 #ifdef DEBUG
7665 	/* Corrupt the devid for testing. */
7666 	if (devid_register_corrupt)
7667 		i_devid->did_id[0] += devid_register_corrupt;
7668 	if (devid_register_corrupt_major &&
7669 	    (major == devid_register_corrupt_major))
7670 		i_devid->did_id[0] += 1;
7671 	if (devid_register_corrupt_hint)
7672 		i_devid->did_driver[0] += devid_register_corrupt_hint;
7673 	if (devid_register_corrupt_hint_major &&
7674 	    (major == devid_register_corrupt_hint_major))
7675 		i_devid->did_driver[0] += 1;
7676 #endif /* DEBUG */
7677 
7678 	/* encode the devid as a string */
7679 	if ((devid_str = ddi_devid_str_encode(devid, NULL)) == NULL)
7680 		return (DDI_FAILURE);
7681 
7682 	/* add string as a string property */
7683 	if (ndi_prop_update_string(DDI_DEV_T_NONE, dip,
7684 	    DEVID_PROP_NAME, devid_str) != DDI_SUCCESS) {
7685 		cmn_err(CE_WARN, "%s%d: devid property update failed",
7686 		    ddi_driver_name(dip), ddi_get_instance(dip));
7687 		ddi_devid_str_free(devid_str);
7688 		return (DDI_FAILURE);
7689 	}
7690 
7691 	/* keep pointer to devid string for interrupt context fma code */
7692 	if (DEVI(dip)->devi_devid_str)
7693 		ddi_devid_str_free(DEVI(dip)->devi_devid_str);
7694 	DEVI(dip)->devi_devid_str = devid_str;
7695 	return (DDI_SUCCESS);
7696 }
7697 
7698 int
7699 ddi_devid_register(dev_info_t *dip, ddi_devid_t devid)
7700 {
7701 	int rval;
7702 
7703 	rval = i_ddi_devid_register(dip, devid);
7704 	if (rval == DDI_SUCCESS) {
7705 		/*
7706 		 * Register devid in devid-to-path cache
7707 		 */
7708 		if (e_devid_cache_register(dip, devid) == DDI_SUCCESS) {
7709 			mutex_enter(&DEVI(dip)->devi_lock);
7710 			DEVI(dip)->devi_flags |= DEVI_REGISTERED_DEVID;
7711 			mutex_exit(&DEVI(dip)->devi_lock);
7712 		} else {
7713 			cmn_err(CE_WARN, "%s%d: failed to cache devid",
7714 			    ddi_driver_name(dip), ddi_get_instance(dip));
7715 		}
7716 	} else {
7717 		cmn_err(CE_WARN, "%s%d: failed to register devid",
7718 		    ddi_driver_name(dip), ddi_get_instance(dip));
7719 	}
7720 	return (rval);
7721 }
7722 
7723 /*
7724  * Remove (unregister) device id from DDI framework.
7725  * Must be called when device is detached.
7726  */
7727 static void
7728 i_ddi_devid_unregister(dev_info_t *dip)
7729 {
7730 	if (DEVI(dip)->devi_devid_str) {
7731 		ddi_devid_str_free(DEVI(dip)->devi_devid_str);
7732 		DEVI(dip)->devi_devid_str = NULL;
7733 	}
7734 
7735 	/* remove the devid property */
7736 	(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, DEVID_PROP_NAME);
7737 }
7738 
7739 void
7740 ddi_devid_unregister(dev_info_t *dip)
7741 {
7742 	mutex_enter(&DEVI(dip)->devi_lock);
7743 	DEVI(dip)->devi_flags &= ~DEVI_REGISTERED_DEVID;
7744 	mutex_exit(&DEVI(dip)->devi_lock);
7745 	e_devid_cache_unregister(dip);
7746 	i_ddi_devid_unregister(dip);
7747 }
7748 
7749 /*
7750  * Allocate and initialize a device id.
7751  */
7752 int
7753 ddi_devid_init(
7754 	dev_info_t	*dip,
7755 	ushort_t	devid_type,
7756 	ushort_t	nbytes,
7757 	void		*id,
7758 	ddi_devid_t	*ret_devid)
7759 {
7760 	impl_devid_t	*i_devid;
7761 	int		sz = sizeof (*i_devid) + nbytes - sizeof (char);
7762 	int		driver_len;
7763 	const char	*driver_name;
7764 
7765 	switch (devid_type) {
7766 	case DEVID_SCSI3_WWN:
7767 		/*FALLTHRU*/
7768 	case DEVID_SCSI_SERIAL:
7769 		/*FALLTHRU*/
7770 	case DEVID_ATA_SERIAL:
7771 		/*FALLTHRU*/
7772 	case DEVID_ENCAP:
7773 		if (nbytes == 0)
7774 			return (DDI_FAILURE);
7775 		if (id == NULL)
7776 			return (DDI_FAILURE);
7777 		break;
7778 	case DEVID_FAB:
7779 		if (nbytes != 0)
7780 			return (DDI_FAILURE);
7781 		if (id != NULL)
7782 			return (DDI_FAILURE);
7783 		nbytes = sizeof (int) +
7784 		    sizeof (struct timeval32) + sizeof (short);
7785 		sz += nbytes;
7786 		break;
7787 	default:
7788 		return (DDI_FAILURE);
7789 	}
7790 
7791 	if ((i_devid = kmem_zalloc(sz, KM_SLEEP)) == NULL)
7792 		return (DDI_FAILURE);
7793 
7794 	i_devid->did_magic_hi = DEVID_MAGIC_MSB;
7795 	i_devid->did_magic_lo = DEVID_MAGIC_LSB;
7796 	i_devid->did_rev_hi = DEVID_REV_MSB;
7797 	i_devid->did_rev_lo = DEVID_REV_LSB;
7798 	DEVID_FORMTYPE(i_devid, devid_type);
7799 	DEVID_FORMLEN(i_devid, nbytes);
7800 
7801 	/* Fill in driver name hint */
7802 	driver_name = ddi_driver_name(dip);
7803 	driver_len = strlen(driver_name);
7804 	if (driver_len > DEVID_HINT_SIZE) {
7805 		/* Pick up last four characters of driver name */
7806 		driver_name += driver_len - DEVID_HINT_SIZE;
7807 		driver_len = DEVID_HINT_SIZE;
7808 	}
7809 
7810 	bcopy(driver_name, i_devid->did_driver, driver_len);
7811 
7812 	/* Fill in id field */
7813 	if (devid_type == DEVID_FAB) {
7814 		char		*cp;
7815 		int		hostid;
7816 		char		*hostid_cp = &hw_serial[0];
7817 		struct timeval32 timestamp32;
7818 		int		i;
7819 		int		*ip;
7820 		short		gen;
7821 
7822 		/* increase the generation number */
7823 		mutex_enter(&devid_gen_mutex);
7824 		gen = devid_gen_number++;
7825 		mutex_exit(&devid_gen_mutex);
7826 
7827 		cp = i_devid->did_id;
7828 
7829 		/* Fill in host id (big-endian byte ordering) */
7830 		hostid = stoi(&hostid_cp);
7831 		*cp++ = hibyte(hiword(hostid));
7832 		*cp++ = lobyte(hiword(hostid));
7833 		*cp++ = hibyte(loword(hostid));
7834 		*cp++ = lobyte(loword(hostid));
7835 
7836 		/*
7837 		 * Fill in timestamp (big-endian byte ordering)
7838 		 *
7839 		 * (Note that the format may have to be changed
7840 		 * before 2038 comes around, though it's arguably
7841 		 * unique enough as it is..)
7842 		 */
7843 		uniqtime32(&timestamp32);
7844 		ip = (int *)&timestamp32;
7845 		for (i = 0;
7846 		    i < sizeof (timestamp32) / sizeof (int); i++, ip++) {
7847 			int	val;
7848 			val = *ip;
7849 			*cp++ = hibyte(hiword(val));
7850 			*cp++ = lobyte(hiword(val));
7851 			*cp++ = hibyte(loword(val));
7852 			*cp++ = lobyte(loword(val));
7853 		}
7854 
7855 		/* fill in the generation number */
7856 		*cp++ = hibyte(gen);
7857 		*cp++ = lobyte(gen);
7858 	} else
7859 		bcopy(id, i_devid->did_id, nbytes);
7860 
7861 	/* return device id */
7862 	*ret_devid = (ddi_devid_t)i_devid;
7863 	return (DDI_SUCCESS);
7864 }
7865 
7866 int
7867 ddi_devid_get(dev_info_t *dip, ddi_devid_t *ret_devid)
7868 {
7869 	return (i_ddi_devi_get_devid(DDI_DEV_T_ANY, dip, ret_devid));
7870 }
7871 
7872 int
7873 i_ddi_devi_get_devid(dev_t dev, dev_info_t *dip, ddi_devid_t *ret_devid)
7874 {
7875 	char		*devidstr;
7876 
7877 	ASSERT(dev != DDI_DEV_T_NONE);
7878 
7879 	/* look up the property, devt specific first */
7880 	if (ddi_prop_lookup_string(dev, dip, DDI_PROP_DONTPASS,
7881 	    DEVID_PROP_NAME, &devidstr) != DDI_PROP_SUCCESS) {
7882 		if ((dev == DDI_DEV_T_ANY) ||
7883 		    (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
7884 		    DDI_PROP_DONTPASS, DEVID_PROP_NAME, &devidstr) !=
7885 		    DDI_PROP_SUCCESS)) {
7886 			return (DDI_FAILURE);
7887 		}
7888 	}
7889 
7890 	/* convert to binary form */
7891 	if (ddi_devid_str_decode(devidstr, ret_devid, NULL) == -1) {
7892 		ddi_prop_free(devidstr);
7893 		return (DDI_FAILURE);
7894 	}
7895 	ddi_prop_free(devidstr);
7896 	return (DDI_SUCCESS);
7897 }
7898 
7899 /*
7900  * Return a copy of the device id for dev_t
7901  */
7902 int
7903 ddi_lyr_get_devid(dev_t dev, ddi_devid_t *ret_devid)
7904 {
7905 	dev_info_t	*dip;
7906 	int		rval;
7907 
7908 	/* get the dip */
7909 	if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
7910 		return (DDI_FAILURE);
7911 
7912 	rval = i_ddi_devi_get_devid(dev, dip, ret_devid);
7913 
7914 	ddi_release_devi(dip);		/* e_ddi_hold_devi_by_dev() */
7915 	return (rval);
7916 }
7917 
7918 /*
7919  * Return a copy of the minor name for dev_t and spec_type
7920  */
7921 int
7922 ddi_lyr_get_minor_name(dev_t dev, int spec_type, char **minor_name)
7923 {
7924 	char		*buf;
7925 	int		circ;
7926 	dev_info_t	*dip;
7927 	char		*nm;
7928 	int		rval;
7929 
7930 	if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) {
7931 		*minor_name = NULL;
7932 		return (DDI_FAILURE);
7933 	}
7934 
7935 	/* Find the minor name and copy into max size buf */
7936 	buf = kmem_alloc(MAXNAMELEN, KM_SLEEP);
7937 	ndi_devi_enter(dip, &circ);
7938 	nm = i_ddi_devtspectype_to_minorname(dip, dev, spec_type);
7939 	if (nm)
7940 		(void) strcpy(buf, nm);
7941 	ndi_devi_exit(dip, circ);
7942 	ddi_release_devi(dip);	/* e_ddi_hold_devi_by_dev() */
7943 
7944 	if (nm) {
7945 		/* duplicate into min size buf for return result */
7946 		*minor_name = i_ddi_strdup(buf, KM_SLEEP);
7947 		rval = DDI_SUCCESS;
7948 	} else {
7949 		*minor_name = NULL;
7950 		rval = DDI_FAILURE;
7951 	}
7952 
7953 	/* free max size buf and return */
7954 	kmem_free(buf, MAXNAMELEN);
7955 	return (rval);
7956 }
7957 
7958 int
7959 ddi_lyr_devid_to_devlist(
7960 	ddi_devid_t	devid,
7961 	char		*minor_name,
7962 	int		*retndevs,
7963 	dev_t		**retdevs)
7964 {
7965 	ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS);
7966 
7967 	if (e_devid_cache_to_devt_list(devid, minor_name,
7968 	    retndevs, retdevs) == DDI_SUCCESS) {
7969 		ASSERT(*retndevs > 0);
7970 		DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist",
7971 		    *retndevs, *retdevs);
7972 		return (DDI_SUCCESS);
7973 	}
7974 
7975 	if (e_ddi_devid_discovery(devid) == DDI_FAILURE) {
7976 		return (DDI_FAILURE);
7977 	}
7978 
7979 	if (e_devid_cache_to_devt_list(devid, minor_name,
7980 	    retndevs, retdevs) == DDI_SUCCESS) {
7981 		ASSERT(*retndevs > 0);
7982 		DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist",
7983 		    *retndevs, *retdevs);
7984 		return (DDI_SUCCESS);
7985 	}
7986 
7987 	return (DDI_FAILURE);
7988 }
7989 
7990 void
7991 ddi_lyr_free_devlist(dev_t *devlist, int ndevs)
7992 {
7993 	kmem_free(devlist, sizeof (dev_t) * ndevs);
7994 }
7995 
7996 /*
7997  * Note: This will need to be fixed if we ever allow processes to
7998  * have more than one data model per exec.
7999  */
8000 model_t
8001 ddi_mmap_get_model(void)
8002 {
8003 	return (get_udatamodel());
8004 }
8005 
8006 model_t
8007 ddi_model_convert_from(model_t model)
8008 {
8009 	return ((model & DDI_MODEL_MASK) & ~DDI_MODEL_NATIVE);
8010 }
8011 
8012 /*
8013  * ddi interfaces managing storage and retrieval of eventcookies.
8014  */
8015 
8016 /*
8017  * Invoke bus nexus driver's implementation of the
8018  * (*bus_remove_eventcall)() interface to remove a registered
8019  * callback handler for "event".
8020  */
8021 int
8022 ddi_remove_event_handler(ddi_callback_id_t id)
8023 {
8024 	ndi_event_callbacks_t *cb = (ndi_event_callbacks_t *)id;
8025 	dev_info_t *ddip;
8026 
8027 	ASSERT(cb);
8028 	if (!cb) {
8029 		return (DDI_FAILURE);
8030 	}
8031 
8032 	ddip = NDI_EVENT_DDIP(cb->ndi_evtcb_cookie);
8033 	return (ndi_busop_remove_eventcall(ddip, id));
8034 }
8035 
8036 /*
8037  * Invoke bus nexus driver's implementation of the
8038  * (*bus_add_eventcall)() interface to register a callback handler
8039  * for "event".
8040  */
8041 int
8042 ddi_add_event_handler(dev_info_t *dip, ddi_eventcookie_t event,
8043     void (*handler)(dev_info_t *, ddi_eventcookie_t, void *, void *),
8044     void *arg, ddi_callback_id_t *id)
8045 {
8046 	return (ndi_busop_add_eventcall(dip, dip, event, handler, arg, id));
8047 }
8048 
8049 
8050 /*
8051  * Return a handle for event "name" by calling up the device tree
8052  * hierarchy via  (*bus_get_eventcookie)() interface until claimed
8053  * by a bus nexus or top of dev_info tree is reached.
8054  */
8055 int
8056 ddi_get_eventcookie(dev_info_t *dip, char *name,
8057     ddi_eventcookie_t *event_cookiep)
8058 {
8059 	return (ndi_busop_get_eventcookie(dip, dip,
8060 	    name, event_cookiep));
8061 }
8062 
8063 /*
8064  * This procedure is provided as the general callback function when
8065  * umem_lockmemory calls as_add_callback for long term memory locking.
8066  * When as_unmap, as_setprot, or as_free encounter segments which have
8067  * locked memory, this callback will be invoked.
8068  */
8069 void
8070 umem_lock_undo(struct as *as, void *arg, uint_t event)
8071 {
8072 	_NOTE(ARGUNUSED(as, event))
8073 	struct ddi_umem_cookie *cp = (struct ddi_umem_cookie *)arg;
8074 
8075 	/*
8076 	 * Call the cleanup function.  Decrement the cookie reference
8077 	 * count, if it goes to zero, return the memory for the cookie.
8078 	 * The i_ddi_umem_unlock for this cookie may or may not have been
8079 	 * called already.  It is the responsibility of the caller of
8080 	 * umem_lockmemory to handle the case of the cleanup routine
8081 	 * being called after a ddi_umem_unlock for the cookie
8082 	 * was called.
8083 	 */
8084 
8085 	(*cp->callbacks.cbo_umem_lock_cleanup)((ddi_umem_cookie_t)cp);
8086 
8087 	/* remove the cookie if reference goes to zero */
8088 	if (atomic_add_long_nv((ulong_t *)(&(cp->cook_refcnt)), -1) == 0) {
8089 		kmem_free(cp, sizeof (struct ddi_umem_cookie));
8090 	}
8091 }
8092 
8093 /*
8094  * The following two Consolidation Private routines provide generic
8095  * interfaces to increase/decrease the amount of device-locked memory.
8096  *
8097  * To keep project_rele and project_hold consistent, i_ddi_decr_locked_memory()
8098  * must be called every time i_ddi_incr_locked_memory() is called.
8099  */
8100 int
8101 /* ARGSUSED */
8102 i_ddi_incr_locked_memory(proc_t *procp, rctl_qty_t inc)
8103 {
8104 	ASSERT(procp != NULL);
8105 	mutex_enter(&procp->p_lock);
8106 	if (rctl_incr_locked_mem(procp, NULL, inc, 1)) {
8107 		mutex_exit(&procp->p_lock);
8108 		return (ENOMEM);
8109 	}
8110 	mutex_exit(&procp->p_lock);
8111 	return (0);
8112 }
8113 
8114 /*
8115  * To keep project_rele and project_hold consistent, i_ddi_incr_locked_memory()
8116  * must be called every time i_ddi_decr_locked_memory() is called.
8117  */
8118 /* ARGSUSED */
8119 void
8120 i_ddi_decr_locked_memory(proc_t *procp, rctl_qty_t dec)
8121 {
8122 	ASSERT(procp != NULL);
8123 	mutex_enter(&procp->p_lock);
8124 	rctl_decr_locked_mem(procp, NULL, dec, 1);
8125 	mutex_exit(&procp->p_lock);
8126 }
8127 
8128 /*
8129  * This routine checks if the max-locked-memory resource ctl is
8130  * exceeded, if not increments it, grabs a hold on the project.
8131  * Returns 0 if successful otherwise returns error code
8132  */
8133 static int
8134 umem_incr_devlockmem(struct ddi_umem_cookie *cookie)
8135 {
8136 	proc_t		*procp;
8137 	int		ret;
8138 
8139 	ASSERT(cookie);
8140 	procp = cookie->procp;
8141 	ASSERT(procp);
8142 
8143 	if ((ret = i_ddi_incr_locked_memory(procp,
8144 	    cookie->size)) != 0) {
8145 		return (ret);
8146 	}
8147 	return (0);
8148 }
8149 
8150 /*
8151  * Decrements the max-locked-memory resource ctl and releases
8152  * the hold on the project that was acquired during umem_incr_devlockmem
8153  */
8154 static void
8155 umem_decr_devlockmem(struct ddi_umem_cookie *cookie)
8156 {
8157 	proc_t		*proc;
8158 
8159 	proc = (proc_t *)cookie->procp;
8160 	if (!proc)
8161 		return;
8162 
8163 	i_ddi_decr_locked_memory(proc, cookie->size);
8164 }
8165 
8166 /*
8167  * A consolidation private function which is essentially equivalent to
8168  * ddi_umem_lock but with the addition of arguments ops_vector and procp.
8169  * A call to as_add_callback is done if DDI_UMEMLOCK_LONGTERM is set, and
8170  * the ops_vector is valid.
8171  *
8172  * Lock the virtual address range in the current process and create a
8173  * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to
8174  * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export
8175  * to user space.
8176  *
8177  * Note: The resource control accounting currently uses a full charge model
8178  * in other words attempts to lock the same/overlapping areas of memory
8179  * will deduct the full size of the buffer from the projects running
8180  * counter for the device locked memory.
8181  *
8182  * addr, size should be PAGESIZE aligned
8183  *
8184  * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both
8185  *	identifies whether the locked memory will be read or written or both
8186  *      DDI_UMEMLOCK_LONGTERM  must be set when the locking will
8187  * be maintained for an indefinitely long period (essentially permanent),
8188  * rather than for what would be required for a typical I/O completion.
8189  * When DDI_UMEMLOCK_LONGTERM is set, umem_lockmemory will return EFAULT
8190  * if the memory pertains to a regular file which is mapped MAP_SHARED.
8191  * This is to prevent a deadlock if a file truncation is attempted after
8192  * after the locking is done.
8193  *
8194  * Returns 0 on success
8195  *	EINVAL - for invalid parameters
8196  *	EPERM, ENOMEM and other error codes returned by as_pagelock
8197  *	ENOMEM - is returned if the current request to lock memory exceeds
8198  *		*.max-locked-memory resource control value.
8199  *      EFAULT - memory pertains to a regular file mapped shared and
8200  *		and DDI_UMEMLOCK_LONGTERM flag is set
8201  *	EAGAIN - could not start the ddi_umem_unlock list processing thread
8202  */
8203 int
8204 umem_lockmemory(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie,
8205 		struct umem_callback_ops *ops_vector,
8206 		proc_t *procp)
8207 {
8208 	int	error;
8209 	struct ddi_umem_cookie *p;
8210 	void	(*driver_callback)() = NULL;
8211 	struct as *as = procp->p_as;
8212 	struct seg		*seg;
8213 	vnode_t			*vp;
8214 
8215 	*cookie = NULL;		/* in case of any error return */
8216 
8217 	/* These are the only three valid flags */
8218 	if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE |
8219 	    DDI_UMEMLOCK_LONGTERM)) != 0)
8220 		return (EINVAL);
8221 
8222 	/* At least one (can be both) of the two access flags must be set */
8223 	if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0)
8224 		return (EINVAL);
8225 
8226 	/* addr and len must be page-aligned */
8227 	if (((uintptr_t)addr & PAGEOFFSET) != 0)
8228 		return (EINVAL);
8229 
8230 	if ((len & PAGEOFFSET) != 0)
8231 		return (EINVAL);
8232 
8233 	/*
8234 	 * For longterm locking a driver callback must be specified; if
8235 	 * not longterm then a callback is optional.
8236 	 */
8237 	if (ops_vector != NULL) {
8238 		if (ops_vector->cbo_umem_callback_version !=
8239 		    UMEM_CALLBACK_VERSION)
8240 			return (EINVAL);
8241 		else
8242 			driver_callback = ops_vector->cbo_umem_lock_cleanup;
8243 	}
8244 	if ((driver_callback == NULL) && (flags & DDI_UMEMLOCK_LONGTERM))
8245 		return (EINVAL);
8246 
8247 	/*
8248 	 * Call i_ddi_umem_unlock_thread_start if necessary.  It will
8249 	 * be called on first ddi_umem_lock or umem_lockmemory call.
8250 	 */
8251 	if (ddi_umem_unlock_thread == NULL)
8252 		i_ddi_umem_unlock_thread_start();
8253 
8254 	/* Allocate memory for the cookie */
8255 	p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP);
8256 
8257 	/* Convert the flags to seg_rw type */
8258 	if (flags & DDI_UMEMLOCK_WRITE) {
8259 		p->s_flags = S_WRITE;
8260 	} else {
8261 		p->s_flags = S_READ;
8262 	}
8263 
8264 	/* Store procp in cookie for later iosetup/unlock */
8265 	p->procp = (void *)procp;
8266 
8267 	/*
8268 	 * Store the struct as pointer in cookie for later use by
8269 	 * ddi_umem_unlock.  The proc->p_as will be stale if ddi_umem_unlock
8270 	 * is called after relvm is called.
8271 	 */
8272 	p->asp = as;
8273 
8274 	/*
8275 	 * The size field is needed for lockmem accounting.
8276 	 */
8277 	p->size = len;
8278 
8279 	if (umem_incr_devlockmem(p) != 0) {
8280 		/*
8281 		 * The requested memory cannot be locked
8282 		 */
8283 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8284 		*cookie = (ddi_umem_cookie_t)NULL;
8285 		return (ENOMEM);
8286 	}
8287 
8288 	/* Lock the pages corresponding to addr, len in memory */
8289 	error = as_pagelock(as, &(p->pparray), addr, len, p->s_flags);
8290 	if (error != 0) {
8291 		umem_decr_devlockmem(p);
8292 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8293 		*cookie = (ddi_umem_cookie_t)NULL;
8294 		return (error);
8295 	}
8296 
8297 	/*
8298 	 * For longterm locking the addr must pertain to a seg_vn segment or
8299 	 * or a seg_spt segment.
8300 	 * If the segment pertains to a regular file, it cannot be
8301 	 * mapped MAP_SHARED.
8302 	 * This is to prevent a deadlock if a file truncation is attempted
8303 	 * after the locking is done.
8304 	 * Doing this after as_pagelock guarantees persistence of the as; if
8305 	 * an unacceptable segment is found, the cleanup includes calling
8306 	 * as_pageunlock before returning EFAULT.
8307 	 */
8308 	if (flags & DDI_UMEMLOCK_LONGTERM) {
8309 		extern  struct seg_ops segspt_shmops;
8310 		AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
8311 		for (seg = as_segat(as, addr); ; seg = AS_SEGNEXT(as, seg)) {
8312 			if (seg == NULL || seg->s_base > addr + len)
8313 				break;
8314 			if (((seg->s_ops != &segvn_ops) &&
8315 			    (seg->s_ops != &segspt_shmops)) ||
8316 			    ((SEGOP_GETVP(seg, addr, &vp) == 0 &&
8317 			    vp != NULL && vp->v_type == VREG) &&
8318 			    (SEGOP_GETTYPE(seg, addr) & MAP_SHARED))) {
8319 				as_pageunlock(as, p->pparray,
8320 				    addr, len, p->s_flags);
8321 				AS_LOCK_EXIT(as, &as->a_lock);
8322 				umem_decr_devlockmem(p);
8323 				kmem_free(p, sizeof (struct ddi_umem_cookie));
8324 				*cookie = (ddi_umem_cookie_t)NULL;
8325 				return (EFAULT);
8326 			}
8327 		}
8328 		AS_LOCK_EXIT(as, &as->a_lock);
8329 	}
8330 
8331 
8332 	/* Initialize the fields in the ddi_umem_cookie */
8333 	p->cvaddr = addr;
8334 	p->type = UMEM_LOCKED;
8335 	if (driver_callback != NULL) {
8336 		/* i_ddi_umem_unlock and umem_lock_undo may need the cookie */
8337 		p->cook_refcnt = 2;
8338 		p->callbacks = *ops_vector;
8339 	} else {
8340 		/* only i_ddi_umme_unlock needs the cookie */
8341 		p->cook_refcnt = 1;
8342 	}
8343 
8344 	*cookie = (ddi_umem_cookie_t)p;
8345 
8346 	/*
8347 	 * If a driver callback was specified, add an entry to the
8348 	 * as struct callback list. The as_pagelock above guarantees
8349 	 * the persistence of as.
8350 	 */
8351 	if (driver_callback) {
8352 		error = as_add_callback(as, umem_lock_undo, p, AS_ALL_EVENT,
8353 		    addr, len, KM_SLEEP);
8354 		if (error != 0) {
8355 			as_pageunlock(as, p->pparray,
8356 			    addr, len, p->s_flags);
8357 			umem_decr_devlockmem(p);
8358 			kmem_free(p, sizeof (struct ddi_umem_cookie));
8359 			*cookie = (ddi_umem_cookie_t)NULL;
8360 		}
8361 	}
8362 	return (error);
8363 }
8364 
8365 /*
8366  * Unlock the pages locked by ddi_umem_lock or umem_lockmemory and free
8367  * the cookie.  Called from i_ddi_umem_unlock_thread.
8368  */
8369 
8370 static void
8371 i_ddi_umem_unlock(struct ddi_umem_cookie *p)
8372 {
8373 	uint_t	rc;
8374 
8375 	/*
8376 	 * There is no way to determine whether a callback to
8377 	 * umem_lock_undo was registered via as_add_callback.
8378 	 * (i.e. umem_lockmemory was called with DDI_MEMLOCK_LONGTERM and
8379 	 * a valid callback function structure.)  as_delete_callback
8380 	 * is called to delete a possible registered callback.  If the
8381 	 * return from as_delete_callbacks is AS_CALLBACK_DELETED, it
8382 	 * indicates that there was a callback registered, and that is was
8383 	 * successfully deleted.  Thus, the cookie reference count
8384 	 * will never be decremented by umem_lock_undo.  Just return the
8385 	 * memory for the cookie, since both users of the cookie are done.
8386 	 * A return of AS_CALLBACK_NOTFOUND indicates a callback was
8387 	 * never registered.  A return of AS_CALLBACK_DELETE_DEFERRED
8388 	 * indicates that callback processing is taking place and, and
8389 	 * umem_lock_undo is, or will be, executing, and thus decrementing
8390 	 * the cookie reference count when it is complete.
8391 	 *
8392 	 * This needs to be done before as_pageunlock so that the
8393 	 * persistence of as is guaranteed because of the locked pages.
8394 	 *
8395 	 */
8396 	rc = as_delete_callback(p->asp, p);
8397 
8398 
8399 	/*
8400 	 * The proc->p_as will be stale if i_ddi_umem_unlock is called
8401 	 * after relvm is called so use p->asp.
8402 	 */
8403 	as_pageunlock(p->asp, p->pparray, p->cvaddr, p->size, p->s_flags);
8404 
8405 	/*
8406 	 * Now that we have unlocked the memory decrement the
8407 	 * *.max-locked-memory rctl
8408 	 */
8409 	umem_decr_devlockmem(p);
8410 
8411 	if (rc == AS_CALLBACK_DELETED) {
8412 		/* umem_lock_undo will not happen, return the cookie memory */
8413 		ASSERT(p->cook_refcnt == 2);
8414 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8415 	} else {
8416 		/*
8417 		 * umem_undo_lock may happen if as_delete_callback returned
8418 		 * AS_CALLBACK_DELETE_DEFERRED.  In that case, decrement the
8419 		 * reference count, atomically, and return the cookie
8420 		 * memory if the reference count goes to zero.  The only
8421 		 * other value for rc is AS_CALLBACK_NOTFOUND.  In that
8422 		 * case, just return the cookie memory.
8423 		 */
8424 		if ((rc != AS_CALLBACK_DELETE_DEFERRED) ||
8425 		    (atomic_add_long_nv((ulong_t *)(&(p->cook_refcnt)), -1)
8426 		    == 0)) {
8427 			kmem_free(p, sizeof (struct ddi_umem_cookie));
8428 		}
8429 	}
8430 }
8431 
8432 /*
8433  * i_ddi_umem_unlock_thread - deferred ddi_umem_unlock list handler.
8434  *
8435  * Call i_ddi_umem_unlock for entries in the ddi_umem_unlock list
8436  * until it is empty.  Then, wait for more to be added.  This thread is awoken
8437  * via calls to ddi_umem_unlock.
8438  */
8439 
8440 static void
8441 i_ddi_umem_unlock_thread(void)
8442 {
8443 	struct ddi_umem_cookie	*ret_cookie;
8444 	callb_cpr_t	cprinfo;
8445 
8446 	/* process the ddi_umem_unlock list */
8447 	CALLB_CPR_INIT(&cprinfo, &ddi_umem_unlock_mutex,
8448 	    callb_generic_cpr, "unlock_thread");
8449 	for (;;) {
8450 		mutex_enter(&ddi_umem_unlock_mutex);
8451 		if (ddi_umem_unlock_head != NULL) {	/* list not empty */
8452 			ret_cookie = ddi_umem_unlock_head;
8453 			/* take if off the list */
8454 			if ((ddi_umem_unlock_head =
8455 			    ddi_umem_unlock_head->unl_forw) == NULL) {
8456 				ddi_umem_unlock_tail = NULL;
8457 			}
8458 			mutex_exit(&ddi_umem_unlock_mutex);
8459 			/* unlock the pages in this cookie */
8460 			(void) i_ddi_umem_unlock(ret_cookie);
8461 		} else {   /* list is empty, wait for next ddi_umem_unlock */
8462 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
8463 			cv_wait(&ddi_umem_unlock_cv, &ddi_umem_unlock_mutex);
8464 			CALLB_CPR_SAFE_END(&cprinfo, &ddi_umem_unlock_mutex);
8465 			mutex_exit(&ddi_umem_unlock_mutex);
8466 		}
8467 	}
8468 	/* ddi_umem_unlock_thread does not exit */
8469 	/* NOTREACHED */
8470 }
8471 
8472 /*
8473  * Start the thread that will process the ddi_umem_unlock list if it is
8474  * not already started (i_ddi_umem_unlock_thread).
8475  */
8476 static void
8477 i_ddi_umem_unlock_thread_start(void)
8478 {
8479 	mutex_enter(&ddi_umem_unlock_mutex);
8480 	if (ddi_umem_unlock_thread == NULL) {
8481 		ddi_umem_unlock_thread = thread_create(NULL, 0,
8482 		    i_ddi_umem_unlock_thread, NULL, 0, &p0,
8483 		    TS_RUN, minclsyspri);
8484 	}
8485 	mutex_exit(&ddi_umem_unlock_mutex);
8486 }
8487 
8488 /*
8489  * Lock the virtual address range in the current process and create a
8490  * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to
8491  * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export
8492  * to user space.
8493  *
8494  * Note: The resource control accounting currently uses a full charge model
8495  * in other words attempts to lock the same/overlapping areas of memory
8496  * will deduct the full size of the buffer from the projects running
8497  * counter for the device locked memory. This applies to umem_lockmemory too.
8498  *
8499  * addr, size should be PAGESIZE aligned
8500  * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both
8501  *	identifies whether the locked memory will be read or written or both
8502  *
8503  * Returns 0 on success
8504  *	EINVAL - for invalid parameters
8505  *	EPERM, ENOMEM and other error codes returned by as_pagelock
8506  *	ENOMEM - is returned if the current request to lock memory exceeds
8507  *		*.max-locked-memory resource control value.
8508  *	EAGAIN - could not start the ddi_umem_unlock list processing thread
8509  */
8510 int
8511 ddi_umem_lock(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie)
8512 {
8513 	int	error;
8514 	struct ddi_umem_cookie *p;
8515 
8516 	*cookie = NULL;		/* in case of any error return */
8517 
8518 	/* These are the only two valid flags */
8519 	if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) != 0) {
8520 		return (EINVAL);
8521 	}
8522 
8523 	/* At least one of the two flags (or both) must be set */
8524 	if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0) {
8525 		return (EINVAL);
8526 	}
8527 
8528 	/* addr and len must be page-aligned */
8529 	if (((uintptr_t)addr & PAGEOFFSET) != 0) {
8530 		return (EINVAL);
8531 	}
8532 
8533 	if ((len & PAGEOFFSET) != 0) {
8534 		return (EINVAL);
8535 	}
8536 
8537 	/*
8538 	 * Call i_ddi_umem_unlock_thread_start if necessary.  It will
8539 	 * be called on first ddi_umem_lock or umem_lockmemory call.
8540 	 */
8541 	if (ddi_umem_unlock_thread == NULL)
8542 		i_ddi_umem_unlock_thread_start();
8543 
8544 	/* Allocate memory for the cookie */
8545 	p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP);
8546 
8547 	/* Convert the flags to seg_rw type */
8548 	if (flags & DDI_UMEMLOCK_WRITE) {
8549 		p->s_flags = S_WRITE;
8550 	} else {
8551 		p->s_flags = S_READ;
8552 	}
8553 
8554 	/* Store curproc in cookie for later iosetup/unlock */
8555 	p->procp = (void *)curproc;
8556 
8557 	/*
8558 	 * Store the struct as pointer in cookie for later use by
8559 	 * ddi_umem_unlock.  The proc->p_as will be stale if ddi_umem_unlock
8560 	 * is called after relvm is called.
8561 	 */
8562 	p->asp = curproc->p_as;
8563 	/*
8564 	 * The size field is needed for lockmem accounting.
8565 	 */
8566 	p->size = len;
8567 
8568 	if (umem_incr_devlockmem(p) != 0) {
8569 		/*
8570 		 * The requested memory cannot be locked
8571 		 */
8572 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8573 		*cookie = (ddi_umem_cookie_t)NULL;
8574 		return (ENOMEM);
8575 	}
8576 
8577 	/* Lock the pages corresponding to addr, len in memory */
8578 	error = as_pagelock(((proc_t *)p->procp)->p_as, &(p->pparray),
8579 	    addr, len, p->s_flags);
8580 	if (error != 0) {
8581 		umem_decr_devlockmem(p);
8582 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8583 		*cookie = (ddi_umem_cookie_t)NULL;
8584 		return (error);
8585 	}
8586 
8587 	/* Initialize the fields in the ddi_umem_cookie */
8588 	p->cvaddr = addr;
8589 	p->type = UMEM_LOCKED;
8590 	p->cook_refcnt = 1;
8591 
8592 	*cookie = (ddi_umem_cookie_t)p;
8593 	return (error);
8594 }
8595 
8596 /*
8597  * Add the cookie to the ddi_umem_unlock list.  Pages will be
8598  * unlocked by i_ddi_umem_unlock_thread.
8599  */
8600 
8601 void
8602 ddi_umem_unlock(ddi_umem_cookie_t cookie)
8603 {
8604 	struct ddi_umem_cookie	*p = (struct ddi_umem_cookie *)cookie;
8605 
8606 	ASSERT(p->type == UMEM_LOCKED);
8607 	ASSERT(CPU_ON_INTR(CPU) == 0); /* cannot be high level */
8608 	ASSERT(ddi_umem_unlock_thread != NULL);
8609 
8610 	p->unl_forw = (struct ddi_umem_cookie *)NULL;	/* end of list */
8611 	/*
8612 	 * Queue the unlock request and notify i_ddi_umem_unlock thread
8613 	 * if it's called in the interrupt context. Otherwise, unlock pages
8614 	 * immediately.
8615 	 */
8616 	if (servicing_interrupt()) {
8617 		/* queue the unlock request and notify the thread */
8618 		mutex_enter(&ddi_umem_unlock_mutex);
8619 		if (ddi_umem_unlock_head == NULL) {
8620 			ddi_umem_unlock_head = ddi_umem_unlock_tail = p;
8621 			cv_broadcast(&ddi_umem_unlock_cv);
8622 		} else {
8623 			ddi_umem_unlock_tail->unl_forw = p;
8624 			ddi_umem_unlock_tail = p;
8625 		}
8626 		mutex_exit(&ddi_umem_unlock_mutex);
8627 	} else {
8628 		/* unlock the pages right away */
8629 		(void) i_ddi_umem_unlock(p);
8630 	}
8631 }
8632 
8633 /*
8634  * Create a buf structure from a ddi_umem_cookie
8635  * cookie - is a ddi_umem_cookie for from ddi_umem_lock and ddi_umem_alloc
8636  *		(only UMEM_LOCKED & KMEM_NON_PAGEABLE types supported)
8637  * off, len - identifies the portion of the memory represented by the cookie
8638  *		that the buf points to.
8639  *	NOTE: off, len need to follow the alignment/size restrictions of the
8640  *		device (dev) that this buf will be passed to. Some devices
8641  *		will accept unrestricted alignment/size, whereas others (such as
8642  *		st) require some block-size alignment/size. It is the caller's
8643  *		responsibility to ensure that the alignment/size restrictions
8644  *		are met (we cannot assert as we do not know the restrictions)
8645  *
8646  * direction - is one of B_READ or B_WRITE and needs to be compatible with
8647  *		the flags used in ddi_umem_lock
8648  *
8649  * The following three arguments are used to initialize fields in the
8650  * buf structure and are uninterpreted by this routine.
8651  *
8652  * dev
8653  * blkno
8654  * iodone
8655  *
8656  * sleepflag - is one of DDI_UMEM_SLEEP or DDI_UMEM_NOSLEEP
8657  *
8658  * Returns a buf structure pointer on success (to be freed by freerbuf)
8659  *	NULL on any parameter error or memory alloc failure
8660  *
8661  */
8662 struct buf *
8663 ddi_umem_iosetup(ddi_umem_cookie_t cookie, off_t off, size_t len,
8664 	int direction, dev_t dev, daddr_t blkno,
8665 	int (*iodone)(struct buf *), int sleepflag)
8666 {
8667 	struct ddi_umem_cookie *p = (struct ddi_umem_cookie *)cookie;
8668 	struct buf *bp;
8669 
8670 	/*
8671 	 * check for valid cookie offset, len
8672 	 */
8673 	if ((off + len) > p->size) {
8674 		return (NULL);
8675 	}
8676 
8677 	if (len > p->size) {
8678 		return (NULL);
8679 	}
8680 
8681 	/* direction has to be one of B_READ or B_WRITE */
8682 	if ((direction != B_READ) && (direction != B_WRITE)) {
8683 		return (NULL);
8684 	}
8685 
8686 	/* These are the only two valid sleepflags */
8687 	if ((sleepflag != DDI_UMEM_SLEEP) && (sleepflag != DDI_UMEM_NOSLEEP)) {
8688 		return (NULL);
8689 	}
8690 
8691 	/*
8692 	 * Only cookies of type UMEM_LOCKED and KMEM_NON_PAGEABLE are supported
8693 	 */
8694 	if ((p->type != UMEM_LOCKED) && (p->type != KMEM_NON_PAGEABLE)) {
8695 		return (NULL);
8696 	}
8697 
8698 	/* If type is KMEM_NON_PAGEABLE procp is NULL */
8699 	ASSERT((p->type == KMEM_NON_PAGEABLE) ?
8700 	    (p->procp == NULL) : (p->procp != NULL));
8701 
8702 	bp = kmem_alloc(sizeof (struct buf), sleepflag);
8703 	if (bp == NULL) {
8704 		return (NULL);
8705 	}
8706 	bioinit(bp);
8707 
8708 	bp->b_flags = B_BUSY | B_PHYS | direction;
8709 	bp->b_edev = dev;
8710 	bp->b_lblkno = blkno;
8711 	bp->b_iodone = iodone;
8712 	bp->b_bcount = len;
8713 	bp->b_proc = (proc_t *)p->procp;
8714 	ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0);
8715 	bp->b_un.b_addr = (caddr_t)((uintptr_t)(p->cvaddr) + off);
8716 	if (p->pparray != NULL) {
8717 		bp->b_flags |= B_SHADOW;
8718 		ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0);
8719 		bp->b_shadow = p->pparray + btop(off);
8720 	}
8721 	return (bp);
8722 }
8723 
8724 /*
8725  * Fault-handling and related routines
8726  */
8727 
8728 ddi_devstate_t
8729 ddi_get_devstate(dev_info_t *dip)
8730 {
8731 	if (DEVI_IS_DEVICE_OFFLINE(dip))
8732 		return (DDI_DEVSTATE_OFFLINE);
8733 	else if (DEVI_IS_DEVICE_DOWN(dip) || DEVI_IS_BUS_DOWN(dip))
8734 		return (DDI_DEVSTATE_DOWN);
8735 	else if (DEVI_IS_BUS_QUIESCED(dip))
8736 		return (DDI_DEVSTATE_QUIESCED);
8737 	else if (DEVI_IS_DEVICE_DEGRADED(dip))
8738 		return (DDI_DEVSTATE_DEGRADED);
8739 	else
8740 		return (DDI_DEVSTATE_UP);
8741 }
8742 
8743 void
8744 ddi_dev_report_fault(dev_info_t *dip, ddi_fault_impact_t impact,
8745 	ddi_fault_location_t location, const char *message)
8746 {
8747 	struct ddi_fault_event_data fd;
8748 	ddi_eventcookie_t ec;
8749 
8750 	/*
8751 	 * Assemble all the information into a fault-event-data structure
8752 	 */
8753 	fd.f_dip = dip;
8754 	fd.f_impact = impact;
8755 	fd.f_location = location;
8756 	fd.f_message = message;
8757 	fd.f_oldstate = ddi_get_devstate(dip);
8758 
8759 	/*
8760 	 * Get eventcookie from defining parent.
8761 	 */
8762 	if (ddi_get_eventcookie(dip, DDI_DEVI_FAULT_EVENT, &ec) !=
8763 	    DDI_SUCCESS)
8764 		return;
8765 
8766 	(void) ndi_post_event(dip, dip, ec, &fd);
8767 }
8768 
8769 char *
8770 i_ddi_devi_class(dev_info_t *dip)
8771 {
8772 	return (DEVI(dip)->devi_device_class);
8773 }
8774 
8775 int
8776 i_ddi_set_devi_class(dev_info_t *dip, char *devi_class, int flag)
8777 {
8778 	struct dev_info *devi = DEVI(dip);
8779 
8780 	mutex_enter(&devi->devi_lock);
8781 
8782 	if (devi->devi_device_class)
8783 		kmem_free(devi->devi_device_class,
8784 		    strlen(devi->devi_device_class) + 1);
8785 
8786 	if ((devi->devi_device_class = i_ddi_strdup(devi_class, flag))
8787 	    != NULL) {
8788 		mutex_exit(&devi->devi_lock);
8789 		return (DDI_SUCCESS);
8790 	}
8791 
8792 	mutex_exit(&devi->devi_lock);
8793 
8794 	return (DDI_FAILURE);
8795 }
8796 
8797 
8798 /*
8799  * Task Queues DDI interfaces.
8800  */
8801 
8802 /* ARGSUSED */
8803 ddi_taskq_t *
8804 ddi_taskq_create(dev_info_t *dip, const char *name, int nthreads,
8805     pri_t pri, uint_t cflags)
8806 {
8807 	char full_name[TASKQ_NAMELEN];
8808 	const char *tq_name;
8809 	int nodeid = 0;
8810 
8811 	if (dip == NULL)
8812 		tq_name = name;
8813 	else {
8814 		nodeid = ddi_get_instance(dip);
8815 
8816 		if (name == NULL)
8817 			name = "tq";
8818 
8819 		(void) snprintf(full_name, sizeof (full_name), "%s_%s",
8820 		    ddi_driver_name(dip), name);
8821 
8822 		tq_name = full_name;
8823 	}
8824 
8825 	return ((ddi_taskq_t *)taskq_create_instance(tq_name, nodeid, nthreads,
8826 	    pri == TASKQ_DEFAULTPRI ? minclsyspri : pri,
8827 	    nthreads, INT_MAX, TASKQ_PREPOPULATE));
8828 }
8829 
8830 void
8831 ddi_taskq_destroy(ddi_taskq_t *tq)
8832 {
8833 	taskq_destroy((taskq_t *)tq);
8834 }
8835 
8836 int
8837 ddi_taskq_dispatch(ddi_taskq_t *tq, void (* func)(void *),
8838     void *arg, uint_t dflags)
8839 {
8840 	taskqid_t id = taskq_dispatch((taskq_t *)tq, func, arg,
8841 	    dflags == DDI_SLEEP ? TQ_SLEEP : TQ_NOSLEEP);
8842 
8843 	return (id != 0 ? DDI_SUCCESS : DDI_FAILURE);
8844 }
8845 
8846 void
8847 ddi_taskq_wait(ddi_taskq_t *tq)
8848 {
8849 	taskq_wait((taskq_t *)tq);
8850 }
8851 
8852 void
8853 ddi_taskq_suspend(ddi_taskq_t *tq)
8854 {
8855 	taskq_suspend((taskq_t *)tq);
8856 }
8857 
8858 boolean_t
8859 ddi_taskq_suspended(ddi_taskq_t *tq)
8860 {
8861 	return (taskq_suspended((taskq_t *)tq));
8862 }
8863 
8864 void
8865 ddi_taskq_resume(ddi_taskq_t *tq)
8866 {
8867 	taskq_resume((taskq_t *)tq);
8868 }
8869 
8870 int
8871 ddi_parse(
8872 	const char	*ifname,
8873 	char		*alnum,
8874 	uint_t		*nump)
8875 {
8876 	const char	*p;
8877 	int		l;
8878 	ulong_t		num;
8879 	boolean_t	nonum = B_TRUE;
8880 	char		c;
8881 
8882 	l = strlen(ifname);
8883 	for (p = ifname + l; p != ifname; l--) {
8884 		c = *--p;
8885 		if (!isdigit(c)) {
8886 			(void) strlcpy(alnum, ifname, l + 1);
8887 			if (ddi_strtoul(p + 1, NULL, 10, &num) != 0)
8888 				return (DDI_FAILURE);
8889 			break;
8890 		}
8891 		nonum = B_FALSE;
8892 	}
8893 	if (l == 0 || nonum)
8894 		return (DDI_FAILURE);
8895 
8896 	*nump = num;
8897 	return (DDI_SUCCESS);
8898 }
8899