xref: /titanic_50/usr/src/uts/common/os/sunddi.c (revision 408aef6a222bf32dc7e66db1ff562316a425ee72)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/note.h>
28 #include <sys/types.h>
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/buf.h>
32 #include <sys/uio.h>
33 #include <sys/cred.h>
34 #include <sys/poll.h>
35 #include <sys/mman.h>
36 #include <sys/kmem.h>
37 #include <sys/model.h>
38 #include <sys/file.h>
39 #include <sys/proc.h>
40 #include <sys/open.h>
41 #include <sys/user.h>
42 #include <sys/t_lock.h>
43 #include <sys/vm.h>
44 #include <sys/stat.h>
45 #include <vm/hat.h>
46 #include <vm/seg.h>
47 #include <vm/seg_vn.h>
48 #include <vm/seg_dev.h>
49 #include <vm/as.h>
50 #include <sys/cmn_err.h>
51 #include <sys/cpuvar.h>
52 #include <sys/debug.h>
53 #include <sys/autoconf.h>
54 #include <sys/sunddi.h>
55 #include <sys/esunddi.h>
56 #include <sys/sunndi.h>
57 #include <sys/kstat.h>
58 #include <sys/conf.h>
59 #include <sys/ddi_impldefs.h>	/* include implementation structure defs */
60 #include <sys/ndi_impldefs.h>	/* include prototypes */
61 #include <sys/ddi_timer.h>
62 #include <sys/hwconf.h>
63 #include <sys/pathname.h>
64 #include <sys/modctl.h>
65 #include <sys/epm.h>
66 #include <sys/devctl.h>
67 #include <sys/callb.h>
68 #include <sys/cladm.h>
69 #include <sys/sysevent.h>
70 #include <sys/dacf_impl.h>
71 #include <sys/ddidevmap.h>
72 #include <sys/bootconf.h>
73 #include <sys/disp.h>
74 #include <sys/atomic.h>
75 #include <sys/promif.h>
76 #include <sys/instance.h>
77 #include <sys/sysevent/eventdefs.h>
78 #include <sys/task.h>
79 #include <sys/project.h>
80 #include <sys/taskq.h>
81 #include <sys/devpolicy.h>
82 #include <sys/ctype.h>
83 #include <net/if.h>
84 #include <sys/rctl.h>
85 
86 extern	pri_t	minclsyspri;
87 
88 extern	rctl_hndl_t rc_project_locked_mem;
89 extern	rctl_hndl_t rc_zone_locked_mem;
90 
91 #ifdef DEBUG
92 static int sunddi_debug = 0;
93 #endif /* DEBUG */
94 
95 /* ddi_umem_unlock miscellaneous */
96 
97 static	void	i_ddi_umem_unlock_thread_start(void);
98 
99 static	kmutex_t	ddi_umem_unlock_mutex; /* unlock list mutex */
100 static	kcondvar_t	ddi_umem_unlock_cv; /* unlock list block/unblock */
101 static	kthread_t	*ddi_umem_unlock_thread;
102 /*
103  * The ddi_umem_unlock FIFO list.  NULL head pointer indicates empty list.
104  */
105 static	struct	ddi_umem_cookie *ddi_umem_unlock_head = NULL;
106 static	struct	ddi_umem_cookie *ddi_umem_unlock_tail = NULL;
107 
108 
109 /*
110  * DDI(Sun) Function and flag definitions:
111  */
112 
113 #if defined(__x86)
114 /*
115  * Used to indicate which entries were chosen from a range.
116  */
117 char	*chosen_reg = "chosen-reg";
118 #endif
119 
120 /*
121  * Function used to ring system console bell
122  */
123 void (*ddi_console_bell_func)(clock_t duration);
124 
125 /*
126  * Creating register mappings and handling interrupts:
127  */
128 
129 /*
130  * Generic ddi_map: Call parent to fulfill request...
131  */
132 
133 int
134 ddi_map(dev_info_t *dp, ddi_map_req_t *mp, off_t offset,
135     off_t len, caddr_t *addrp)
136 {
137 	dev_info_t *pdip;
138 
139 	ASSERT(dp);
140 	pdip = (dev_info_t *)DEVI(dp)->devi_parent;
141 	return ((DEVI(pdip)->devi_ops->devo_bus_ops->bus_map)(pdip,
142 	    dp, mp, offset, len, addrp));
143 }
144 
145 /*
146  * ddi_apply_range: (Called by nexi only.)
147  * Apply ranges in parent node dp, to child regspec rp...
148  */
149 
150 int
151 ddi_apply_range(dev_info_t *dp, dev_info_t *rdip, struct regspec *rp)
152 {
153 	return (i_ddi_apply_range(dp, rdip, rp));
154 }
155 
156 int
157 ddi_map_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset,
158     off_t len)
159 {
160 	ddi_map_req_t mr;
161 #if defined(__x86)
162 	struct {
163 		int	bus;
164 		int	addr;
165 		int	size;
166 	} reg, *reglist;
167 	uint_t	length;
168 	int	rc;
169 
170 	/*
171 	 * get the 'registers' or the 'reg' property.
172 	 * We look up the reg property as an array of
173 	 * int's.
174 	 */
175 	rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
176 	    DDI_PROP_DONTPASS, "registers", (int **)&reglist, &length);
177 	if (rc != DDI_PROP_SUCCESS)
178 		rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
179 		    DDI_PROP_DONTPASS, "reg", (int **)&reglist, &length);
180 	if (rc == DDI_PROP_SUCCESS) {
181 		/*
182 		 * point to the required entry.
183 		 */
184 		reg = reglist[rnumber];
185 		reg.addr += offset;
186 		if (len != 0)
187 			reg.size = len;
188 		/*
189 		 * make a new property containing ONLY the required tuple.
190 		 */
191 		if (ddi_prop_update_int_array(DDI_DEV_T_NONE, dip,
192 		    chosen_reg, (int *)&reg, (sizeof (reg)/sizeof (int)))
193 		    != DDI_PROP_SUCCESS) {
194 			cmn_err(CE_WARN, "%s%d: cannot create '%s' "
195 			    "property", DEVI(dip)->devi_name,
196 			    DEVI(dip)->devi_instance, chosen_reg);
197 		}
198 		/*
199 		 * free the memory allocated by
200 		 * ddi_prop_lookup_int_array ().
201 		 */
202 		ddi_prop_free((void *)reglist);
203 	}
204 #endif
205 	mr.map_op = DDI_MO_MAP_LOCKED;
206 	mr.map_type = DDI_MT_RNUMBER;
207 	mr.map_obj.rnumber = rnumber;
208 	mr.map_prot = PROT_READ | PROT_WRITE;
209 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
210 	mr.map_handlep = NULL;
211 	mr.map_vers = DDI_MAP_VERSION;
212 
213 	/*
214 	 * Call my parent to map in my regs.
215 	 */
216 
217 	return (ddi_map(dip, &mr, offset, len, kaddrp));
218 }
219 
220 void
221 ddi_unmap_regs(dev_info_t *dip, uint_t rnumber, caddr_t *kaddrp, off_t offset,
222     off_t len)
223 {
224 	ddi_map_req_t mr;
225 
226 	mr.map_op = DDI_MO_UNMAP;
227 	mr.map_type = DDI_MT_RNUMBER;
228 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
229 	mr.map_prot = PROT_READ | PROT_WRITE;	/* who cares? */
230 	mr.map_obj.rnumber = rnumber;
231 	mr.map_handlep = NULL;
232 	mr.map_vers = DDI_MAP_VERSION;
233 
234 	/*
235 	 * Call my parent to unmap my regs.
236 	 */
237 
238 	(void) ddi_map(dip, &mr, offset, len, kaddrp);
239 	*kaddrp = (caddr_t)0;
240 #if defined(__x86)
241 	(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, chosen_reg);
242 #endif
243 }
244 
245 int
246 ddi_bus_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
247 	off_t offset, off_t len, caddr_t *vaddrp)
248 {
249 	return (i_ddi_bus_map(dip, rdip, mp, offset, len, vaddrp));
250 }
251 
252 /*
253  * nullbusmap:	The/DDI default bus_map entry point for nexi
254  *		not conforming to the reg/range paradigm (i.e. scsi, etc.)
255  *		with no HAT/MMU layer to be programmed at this level.
256  *
257  *		If the call is to map by rnumber, return an error,
258  *		otherwise pass anything else up the tree to my parent.
259  */
260 int
261 nullbusmap(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
262 	off_t offset, off_t len, caddr_t *vaddrp)
263 {
264 	_NOTE(ARGUNUSED(rdip))
265 	if (mp->map_type == DDI_MT_RNUMBER)
266 		return (DDI_ME_UNSUPPORTED);
267 
268 	return (ddi_map(dip, mp, offset, len, vaddrp));
269 }
270 
271 /*
272  * ddi_rnumber_to_regspec: Not for use by leaf drivers.
273  *			   Only for use by nexi using the reg/range paradigm.
274  */
275 struct regspec *
276 ddi_rnumber_to_regspec(dev_info_t *dip, int rnumber)
277 {
278 	return (i_ddi_rnumber_to_regspec(dip, rnumber));
279 }
280 
281 
282 /*
283  * Note that we allow the dip to be nil because we may be called
284  * prior even to the instantiation of the devinfo tree itself - all
285  * regular leaf and nexus drivers should always use a non-nil dip!
286  *
287  * We treat peek in a somewhat cavalier fashion .. assuming that we'll
288  * simply get a synchronous fault as soon as we touch a missing address.
289  *
290  * Poke is rather more carefully handled because we might poke to a write
291  * buffer, "succeed", then only find some time later that we got an
292  * asynchronous fault that indicated that the address we were writing to
293  * was not really backed by hardware.
294  */
295 
296 static int
297 i_ddi_peekpoke(dev_info_t *devi, ddi_ctl_enum_t cmd, size_t size,
298     void *addr, void *value_p)
299 {
300 	union {
301 		uint64_t	u64;
302 		uint32_t	u32;
303 		uint16_t	u16;
304 		uint8_t		u8;
305 	} peekpoke_value;
306 
307 	peekpoke_ctlops_t peekpoke_args;
308 	uint64_t dummy_result;
309 	int rval;
310 
311 	/* Note: size is assumed to be correct;  it is not checked. */
312 	peekpoke_args.size = size;
313 	peekpoke_args.dev_addr = (uintptr_t)addr;
314 	peekpoke_args.handle = NULL;
315 	peekpoke_args.repcount = 1;
316 	peekpoke_args.flags = 0;
317 
318 	if (cmd == DDI_CTLOPS_POKE) {
319 		switch (size) {
320 		case sizeof (uint8_t):
321 			peekpoke_value.u8 = *(uint8_t *)value_p;
322 			break;
323 		case sizeof (uint16_t):
324 			peekpoke_value.u16 = *(uint16_t *)value_p;
325 			break;
326 		case sizeof (uint32_t):
327 			peekpoke_value.u32 = *(uint32_t *)value_p;
328 			break;
329 		case sizeof (uint64_t):
330 			peekpoke_value.u64 = *(uint64_t *)value_p;
331 			break;
332 		}
333 	}
334 
335 	peekpoke_args.host_addr = (uintptr_t)&peekpoke_value.u64;
336 
337 	if (devi != NULL)
338 		rval = ddi_ctlops(devi, devi, cmd, &peekpoke_args,
339 		    &dummy_result);
340 	else
341 		rval = peekpoke_mem(cmd, &peekpoke_args);
342 
343 	/*
344 	 * A NULL value_p is permitted by ddi_peek(9F); discard the result.
345 	 */
346 	if ((cmd == DDI_CTLOPS_PEEK) & (value_p != NULL)) {
347 		switch (size) {
348 		case sizeof (uint8_t):
349 			*(uint8_t *)value_p = peekpoke_value.u8;
350 			break;
351 		case sizeof (uint16_t):
352 			*(uint16_t *)value_p = peekpoke_value.u16;
353 			break;
354 		case sizeof (uint32_t):
355 			*(uint32_t *)value_p = peekpoke_value.u32;
356 			break;
357 		case sizeof (uint64_t):
358 			*(uint64_t *)value_p = peekpoke_value.u64;
359 			break;
360 		}
361 	}
362 
363 	return (rval);
364 }
365 
366 /*
367  * Keep ddi_peek() and ddi_poke() in case 3rd parties are calling this.
368  * they shouldn't be, but the 9f manpage kind of pseudo exposes it.
369  */
370 int
371 ddi_peek(dev_info_t *devi, size_t size, void *addr, void *value_p)
372 {
373 	switch (size) {
374 	case sizeof (uint8_t):
375 	case sizeof (uint16_t):
376 	case sizeof (uint32_t):
377 	case sizeof (uint64_t):
378 		break;
379 	default:
380 		return (DDI_FAILURE);
381 	}
382 
383 	return (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, size, addr, value_p));
384 }
385 
386 int
387 ddi_poke(dev_info_t *devi, size_t size, void *addr, void *value_p)
388 {
389 	switch (size) {
390 	case sizeof (uint8_t):
391 	case sizeof (uint16_t):
392 	case sizeof (uint32_t):
393 	case sizeof (uint64_t):
394 		break;
395 	default:
396 		return (DDI_FAILURE);
397 	}
398 
399 	return (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, size, addr, value_p));
400 }
401 
402 int
403 ddi_peek8(dev_info_t *dip, int8_t *addr, int8_t *val_p)
404 {
405 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
406 	    val_p));
407 }
408 
409 int
410 ddi_peek16(dev_info_t *dip, int16_t *addr, int16_t *val_p)
411 {
412 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
413 	    val_p));
414 }
415 
416 int
417 ddi_peek32(dev_info_t *dip, int32_t *addr, int32_t *val_p)
418 {
419 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
420 	    val_p));
421 }
422 
423 int
424 ddi_peek64(dev_info_t *dip, int64_t *addr, int64_t *val_p)
425 {
426 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
427 	    val_p));
428 }
429 
430 
431 /*
432  * We need to separate the old interfaces from the new ones and leave them
433  * in here for a while. Previous versions of the OS defined the new interfaces
434  * to the old interfaces. This way we can fix things up so that we can
435  * eventually remove these interfaces.
436  * e.g. A 3rd party module/driver using ddi_peek8 and built against S10
437  * or earlier will actually have a reference to ddi_peekc in the binary.
438  */
439 #ifdef _ILP32
440 int
441 ddi_peekc(dev_info_t *dip, int8_t *addr, int8_t *val_p)
442 {
443 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
444 	    val_p));
445 }
446 
447 int
448 ddi_peeks(dev_info_t *dip, int16_t *addr, int16_t *val_p)
449 {
450 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
451 	    val_p));
452 }
453 
454 int
455 ddi_peekl(dev_info_t *dip, int32_t *addr, int32_t *val_p)
456 {
457 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
458 	    val_p));
459 }
460 
461 int
462 ddi_peekd(dev_info_t *dip, int64_t *addr, int64_t *val_p)
463 {
464 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_PEEK, sizeof (*val_p), addr,
465 	    val_p));
466 }
467 #endif /* _ILP32 */
468 
469 int
470 ddi_poke8(dev_info_t *dip, int8_t *addr, int8_t val)
471 {
472 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
473 }
474 
475 int
476 ddi_poke16(dev_info_t *dip, int16_t *addr, int16_t val)
477 {
478 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
479 }
480 
481 int
482 ddi_poke32(dev_info_t *dip, int32_t *addr, int32_t val)
483 {
484 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
485 }
486 
487 int
488 ddi_poke64(dev_info_t *dip, int64_t *addr, int64_t val)
489 {
490 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
491 }
492 
493 /*
494  * We need to separate the old interfaces from the new ones and leave them
495  * in here for a while. Previous versions of the OS defined the new interfaces
496  * to the old interfaces. This way we can fix things up so that we can
497  * eventually remove these interfaces.
498  * e.g. A 3rd party module/driver using ddi_poke8 and built against S10
499  * or earlier will actually have a reference to ddi_pokec in the binary.
500  */
501 #ifdef _ILP32
502 int
503 ddi_pokec(dev_info_t *dip, int8_t *addr, int8_t val)
504 {
505 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
506 }
507 
508 int
509 ddi_pokes(dev_info_t *dip, int16_t *addr, int16_t val)
510 {
511 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
512 }
513 
514 int
515 ddi_pokel(dev_info_t *dip, int32_t *addr, int32_t val)
516 {
517 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
518 }
519 
520 int
521 ddi_poked(dev_info_t *dip, int64_t *addr, int64_t val)
522 {
523 	return (i_ddi_peekpoke(dip, DDI_CTLOPS_POKE, sizeof (val), addr, &val));
524 }
525 #endif /* _ILP32 */
526 
527 /*
528  * ddi_peekpokeio() is used primarily by the mem drivers for moving
529  * data to and from uio structures via peek and poke.  Note that we
530  * use "internal" routines ddi_peek and ddi_poke to make this go
531  * slightly faster, avoiding the call overhead ..
532  */
533 int
534 ddi_peekpokeio(dev_info_t *devi, struct uio *uio, enum uio_rw rw,
535     caddr_t addr, size_t len, uint_t xfersize)
536 {
537 	int64_t	ibuffer;
538 	int8_t w8;
539 	size_t sz;
540 	int o;
541 
542 	if (xfersize > sizeof (long))
543 		xfersize = sizeof (long);
544 
545 	while (len != 0) {
546 		if ((len | (uintptr_t)addr) & 1) {
547 			sz = sizeof (int8_t);
548 			if (rw == UIO_WRITE) {
549 				if ((o = uwritec(uio)) == -1)
550 					return (DDI_FAILURE);
551 				if (ddi_poke8(devi, (int8_t *)addr,
552 				    (int8_t)o) != DDI_SUCCESS)
553 					return (DDI_FAILURE);
554 			} else {
555 				if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz,
556 				    (int8_t *)addr, &w8) != DDI_SUCCESS)
557 					return (DDI_FAILURE);
558 				if (ureadc(w8, uio))
559 					return (DDI_FAILURE);
560 			}
561 		} else {
562 			switch (xfersize) {
563 			case sizeof (int64_t):
564 				if (((len | (uintptr_t)addr) &
565 				    (sizeof (int64_t) - 1)) == 0) {
566 					sz = xfersize;
567 					break;
568 				}
569 				/*FALLTHROUGH*/
570 			case sizeof (int32_t):
571 				if (((len | (uintptr_t)addr) &
572 				    (sizeof (int32_t) - 1)) == 0) {
573 					sz = xfersize;
574 					break;
575 				}
576 				/*FALLTHROUGH*/
577 			default:
578 				/*
579 				 * This still assumes that we might have an
580 				 * I/O bus out there that permits 16-bit
581 				 * transfers (and that it would be upset by
582 				 * 32-bit transfers from such locations).
583 				 */
584 				sz = sizeof (int16_t);
585 				break;
586 			}
587 
588 			if (rw == UIO_READ) {
589 				if (i_ddi_peekpoke(devi, DDI_CTLOPS_PEEK, sz,
590 				    addr, &ibuffer) != DDI_SUCCESS)
591 					return (DDI_FAILURE);
592 			}
593 
594 			if (uiomove(&ibuffer, sz, rw, uio))
595 				return (DDI_FAILURE);
596 
597 			if (rw == UIO_WRITE) {
598 				if (i_ddi_peekpoke(devi, DDI_CTLOPS_POKE, sz,
599 				    addr, &ibuffer) != DDI_SUCCESS)
600 					return (DDI_FAILURE);
601 			}
602 		}
603 		addr += sz;
604 		len -= sz;
605 	}
606 	return (DDI_SUCCESS);
607 }
608 
609 /*
610  * These routines are used by drivers that do layered ioctls
611  * On sparc, they're implemented in assembler to avoid spilling
612  * register windows in the common (copyin) case ..
613  */
614 #if !defined(__sparc)
615 int
616 ddi_copyin(const void *buf, void *kernbuf, size_t size, int flags)
617 {
618 	if (flags & FKIOCTL)
619 		return (kcopy(buf, kernbuf, size) ? -1 : 0);
620 	return (copyin(buf, kernbuf, size));
621 }
622 
623 int
624 ddi_copyout(const void *buf, void *kernbuf, size_t size, int flags)
625 {
626 	if (flags & FKIOCTL)
627 		return (kcopy(buf, kernbuf, size) ? -1 : 0);
628 	return (copyout(buf, kernbuf, size));
629 }
630 #endif	/* !__sparc */
631 
632 /*
633  * Conversions in nexus pagesize units.  We don't duplicate the
634  * 'nil dip' semantics of peek/poke because btopr/btop/ptob are DDI/DKI
635  * routines anyway.
636  */
637 unsigned long
638 ddi_btop(dev_info_t *dip, unsigned long bytes)
639 {
640 	unsigned long pages;
641 
642 	(void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOP, &bytes, &pages);
643 	return (pages);
644 }
645 
646 unsigned long
647 ddi_btopr(dev_info_t *dip, unsigned long bytes)
648 {
649 	unsigned long pages;
650 
651 	(void) ddi_ctlops(dip, dip, DDI_CTLOPS_BTOPR, &bytes, &pages);
652 	return (pages);
653 }
654 
655 unsigned long
656 ddi_ptob(dev_info_t *dip, unsigned long pages)
657 {
658 	unsigned long bytes;
659 
660 	(void) ddi_ctlops(dip, dip, DDI_CTLOPS_PTOB, &pages, &bytes);
661 	return (bytes);
662 }
663 
664 unsigned int
665 ddi_enter_critical(void)
666 {
667 	return ((uint_t)spl7());
668 }
669 
670 void
671 ddi_exit_critical(unsigned int spl)
672 {
673 	splx((int)spl);
674 }
675 
676 /*
677  * Nexus ctlops punter
678  */
679 
680 #if !defined(__sparc)
681 /*
682  * Request bus_ctl parent to handle a bus_ctl request
683  *
684  * (The sparc version is in sparc_ddi.s)
685  */
686 int
687 ddi_ctlops(dev_info_t *d, dev_info_t *r, ddi_ctl_enum_t op, void *a, void *v)
688 {
689 	int (*fp)();
690 
691 	if (!d || !r)
692 		return (DDI_FAILURE);
693 
694 	if ((d = (dev_info_t *)DEVI(d)->devi_bus_ctl) == NULL)
695 		return (DDI_FAILURE);
696 
697 	fp = DEVI(d)->devi_ops->devo_bus_ops->bus_ctl;
698 	return ((*fp)(d, r, op, a, v));
699 }
700 
701 #endif
702 
703 /*
704  * DMA/DVMA setup
705  */
706 
707 #if defined(__sparc)
708 static ddi_dma_lim_t standard_limits = {
709 	(uint_t)0,	/* addr_t dlim_addr_lo */
710 	(uint_t)-1,	/* addr_t dlim_addr_hi */
711 	(uint_t)-1,	/* uint_t dlim_cntr_max */
712 	(uint_t)1,	/* uint_t dlim_burstsizes */
713 	(uint_t)1,	/* uint_t dlim_minxfer */
714 	0		/* uint_t dlim_dmaspeed */
715 };
716 #elif defined(__x86)
717 static ddi_dma_lim_t standard_limits = {
718 	(uint_t)0,		/* addr_t dlim_addr_lo */
719 	(uint_t)0xffffff,	/* addr_t dlim_addr_hi */
720 	(uint_t)0,		/* uint_t dlim_cntr_max */
721 	(uint_t)0x00000001,	/* uint_t dlim_burstsizes */
722 	(uint_t)DMA_UNIT_8,	/* uint_t dlim_minxfer */
723 	(uint_t)0,		/* uint_t dlim_dmaspeed */
724 	(uint_t)0x86<<24+0,	/* uint_t dlim_version */
725 	(uint_t)0xffff,		/* uint_t dlim_adreg_max */
726 	(uint_t)0xffff,		/* uint_t dlim_ctreg_max */
727 	(uint_t)512,		/* uint_t dlim_granular */
728 	(int)1,			/* int dlim_sgllen */
729 	(uint_t)0xffffffff	/* uint_t dlim_reqsizes */
730 };
731 
732 #endif
733 
734 int
735 ddi_dma_setup(dev_info_t *dip, struct ddi_dma_req *dmareqp,
736     ddi_dma_handle_t *handlep)
737 {
738 	int (*funcp)() = ddi_dma_map;
739 	struct bus_ops *bop;
740 #if defined(__sparc)
741 	auto ddi_dma_lim_t dma_lim;
742 
743 	if (dmareqp->dmar_limits == (ddi_dma_lim_t *)0) {
744 		dma_lim = standard_limits;
745 	} else {
746 		dma_lim = *dmareqp->dmar_limits;
747 	}
748 	dmareqp->dmar_limits = &dma_lim;
749 #endif
750 #if defined(__x86)
751 	if (dmareqp->dmar_limits == (ddi_dma_lim_t *)0)
752 		return (DDI_FAILURE);
753 #endif
754 
755 	/*
756 	 * Handle the case that the requester is both a leaf
757 	 * and a nexus driver simultaneously by calling the
758 	 * requester's bus_dma_map function directly instead
759 	 * of ddi_dma_map.
760 	 */
761 	bop = DEVI(dip)->devi_ops->devo_bus_ops;
762 	if (bop && bop->bus_dma_map)
763 		funcp = bop->bus_dma_map;
764 	return ((*funcp)(dip, dip, dmareqp, handlep));
765 }
766 
767 int
768 ddi_dma_addr_setup(dev_info_t *dip, struct as *as, caddr_t addr, size_t len,
769     uint_t flags, int (*waitfp)(), caddr_t arg,
770     ddi_dma_lim_t *limits, ddi_dma_handle_t *handlep)
771 {
772 	int (*funcp)() = ddi_dma_map;
773 	ddi_dma_lim_t dma_lim;
774 	struct ddi_dma_req dmareq;
775 	struct bus_ops *bop;
776 
777 	if (len == 0) {
778 		return (DDI_DMA_NOMAPPING);
779 	}
780 	if (limits == (ddi_dma_lim_t *)0) {
781 		dma_lim = standard_limits;
782 	} else {
783 		dma_lim = *limits;
784 	}
785 	dmareq.dmar_limits = &dma_lim;
786 	dmareq.dmar_flags = flags;
787 	dmareq.dmar_fp = waitfp;
788 	dmareq.dmar_arg = arg;
789 	dmareq.dmar_object.dmao_size = len;
790 	dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR;
791 	dmareq.dmar_object.dmao_obj.virt_obj.v_as = as;
792 	dmareq.dmar_object.dmao_obj.virt_obj.v_addr = addr;
793 	dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
794 
795 	/*
796 	 * Handle the case that the requester is both a leaf
797 	 * and a nexus driver simultaneously by calling the
798 	 * requester's bus_dma_map function directly instead
799 	 * of ddi_dma_map.
800 	 */
801 	bop = DEVI(dip)->devi_ops->devo_bus_ops;
802 	if (bop && bop->bus_dma_map)
803 		funcp = bop->bus_dma_map;
804 
805 	return ((*funcp)(dip, dip, &dmareq, handlep));
806 }
807 
808 int
809 ddi_dma_buf_setup(dev_info_t *dip, struct buf *bp, uint_t flags,
810     int (*waitfp)(), caddr_t arg, ddi_dma_lim_t *limits,
811     ddi_dma_handle_t *handlep)
812 {
813 	int (*funcp)() = ddi_dma_map;
814 	ddi_dma_lim_t dma_lim;
815 	struct ddi_dma_req dmareq;
816 	struct bus_ops *bop;
817 
818 	if (limits == (ddi_dma_lim_t *)0) {
819 		dma_lim = standard_limits;
820 	} else {
821 		dma_lim = *limits;
822 	}
823 	dmareq.dmar_limits = &dma_lim;
824 	dmareq.dmar_flags = flags;
825 	dmareq.dmar_fp = waitfp;
826 	dmareq.dmar_arg = arg;
827 	dmareq.dmar_object.dmao_size = (uint_t)bp->b_bcount;
828 
829 	if (bp->b_flags & B_PAGEIO) {
830 		dmareq.dmar_object.dmao_type = DMA_OTYP_PAGES;
831 		dmareq.dmar_object.dmao_obj.pp_obj.pp_pp = bp->b_pages;
832 		dmareq.dmar_object.dmao_obj.pp_obj.pp_offset =
833 		    (uint_t)(((uintptr_t)bp->b_un.b_addr) & MMU_PAGEOFFSET);
834 	} else {
835 		dmareq.dmar_object.dmao_type = DMA_OTYP_BUFVADDR;
836 		dmareq.dmar_object.dmao_obj.virt_obj.v_addr = bp->b_un.b_addr;
837 		if (bp->b_flags & B_SHADOW) {
838 			dmareq.dmar_object.dmao_obj.virt_obj.v_priv =
839 			    bp->b_shadow;
840 		} else {
841 			dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
842 		}
843 
844 		/*
845 		 * If the buffer has no proc pointer, or the proc
846 		 * struct has the kernel address space, or the buffer has
847 		 * been marked B_REMAPPED (meaning that it is now
848 		 * mapped into the kernel's address space), then
849 		 * the address space is kas (kernel address space).
850 		 */
851 		if ((bp->b_proc == NULL) || (bp->b_proc->p_as == &kas) ||
852 		    (bp->b_flags & B_REMAPPED)) {
853 			dmareq.dmar_object.dmao_obj.virt_obj.v_as = 0;
854 		} else {
855 			dmareq.dmar_object.dmao_obj.virt_obj.v_as =
856 			    bp->b_proc->p_as;
857 		}
858 	}
859 
860 	/*
861 	 * Handle the case that the requester is both a leaf
862 	 * and a nexus driver simultaneously by calling the
863 	 * requester's bus_dma_map function directly instead
864 	 * of ddi_dma_map.
865 	 */
866 	bop = DEVI(dip)->devi_ops->devo_bus_ops;
867 	if (bop && bop->bus_dma_map)
868 		funcp = bop->bus_dma_map;
869 
870 	return ((*funcp)(dip, dip, &dmareq, handlep));
871 }
872 
873 #if !defined(__sparc)
874 /*
875  * Request bus_dma_ctl parent to fiddle with a dma request.
876  *
877  * (The sparc version is in sparc_subr.s)
878  */
879 int
880 ddi_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
881     ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
882     off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
883 {
884 	int (*fp)();
885 
886 	dip = (dev_info_t *)DEVI(dip)->devi_bus_dma_ctl;
887 	fp = DEVI(dip)->devi_ops->devo_bus_ops->bus_dma_ctl;
888 	return ((*fp) (dip, rdip, handle, request, offp, lenp, objp, flags));
889 }
890 #endif
891 
892 /*
893  * For all DMA control functions, call the DMA control
894  * routine and return status.
895  *
896  * Just plain assume that the parent is to be called.
897  * If a nexus driver or a thread outside the framework
898  * of a nexus driver or a leaf driver calls these functions,
899  * it is up to them to deal with the fact that the parent's
900  * bus_dma_ctl function will be the first one called.
901  */
902 
903 #define	HD	((ddi_dma_impl_t *)h)->dmai_rdip
904 
905 int
906 ddi_dma_kvaddrp(ddi_dma_handle_t h, off_t off, size_t len, caddr_t *kp)
907 {
908 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_KVADDR, &off, &len, kp, 0));
909 }
910 
911 int
912 ddi_dma_htoc(ddi_dma_handle_t h, off_t o, ddi_dma_cookie_t *c)
913 {
914 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_HTOC, &o, 0, (caddr_t *)c, 0));
915 }
916 
917 int
918 ddi_dma_coff(ddi_dma_handle_t h, ddi_dma_cookie_t *c, off_t *o)
919 {
920 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_COFF,
921 	    (off_t *)c, 0, (caddr_t *)o, 0));
922 }
923 
924 int
925 ddi_dma_movwin(ddi_dma_handle_t h, off_t *o, size_t *l, ddi_dma_cookie_t *c)
926 {
927 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_MOVWIN, o,
928 	    l, (caddr_t *)c, 0));
929 }
930 
931 int
932 ddi_dma_curwin(ddi_dma_handle_t h, off_t *o, size_t *l)
933 {
934 	if ((((ddi_dma_impl_t *)h)->dmai_rflags & DDI_DMA_PARTIAL) == 0)
935 		return (DDI_FAILURE);
936 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_REPWIN, o, l, 0, 0));
937 }
938 
939 int
940 ddi_dma_nextwin(ddi_dma_handle_t h, ddi_dma_win_t win,
941     ddi_dma_win_t *nwin)
942 {
943 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_NEXTWIN, (off_t *)&win, 0,
944 	    (caddr_t *)nwin, 0));
945 }
946 
947 int
948 ddi_dma_nextseg(ddi_dma_win_t win, ddi_dma_seg_t seg, ddi_dma_seg_t *nseg)
949 {
950 	ddi_dma_handle_t h = (ddi_dma_handle_t)win;
951 
952 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_NEXTSEG, (off_t *)&win,
953 	    (size_t *)&seg, (caddr_t *)nseg, 0));
954 }
955 
956 #if (defined(__i386) && !defined(__amd64)) || defined(__sparc)
957 /*
958  * This routine is Obsolete and should be removed from ALL architectures
959  * in a future release of Solaris.
960  *
961  * It is deliberately NOT ported to amd64; please fix the code that
962  * depends on this routine to use ddi_dma_nextcookie(9F).
963  *
964  * NOTE: even though we fixed the pointer through a 32-bit param issue (the fix
965  * is a side effect to some other cleanup), we're still not going to support
966  * this interface on x64.
967  */
968 int
969 ddi_dma_segtocookie(ddi_dma_seg_t seg, off_t *o, off_t *l,
970     ddi_dma_cookie_t *cookiep)
971 {
972 	ddi_dma_handle_t h = (ddi_dma_handle_t)seg;
973 
974 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_SEGTOC, o, (size_t *)l,
975 	    (caddr_t *)cookiep, 0));
976 }
977 #endif	/* (__i386 && !__amd64) || __sparc */
978 
979 #if !defined(__sparc)
980 
981 /*
982  * The SPARC versions of these routines are done in assembler to
983  * save register windows, so they're in sparc_subr.s.
984  */
985 
986 int
987 ddi_dma_map(dev_info_t *dip, dev_info_t *rdip,
988 	struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep)
989 {
990 	dev_info_t	*hdip;
991 	int (*funcp)(dev_info_t *, dev_info_t *, struct ddi_dma_req *,
992 	    ddi_dma_handle_t *);
993 
994 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_map;
995 
996 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_map;
997 	return ((*funcp)(hdip, rdip, dmareqp, handlep));
998 }
999 
1000 int
1001 ddi_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
1002     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
1003 {
1004 	dev_info_t	*hdip;
1005 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_attr_t *,
1006 	    int (*)(caddr_t), caddr_t, ddi_dma_handle_t *);
1007 
1008 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl;
1009 
1010 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_allochdl;
1011 	return ((*funcp)(hdip, rdip, attr, waitfp, arg, handlep));
1012 }
1013 
1014 int
1015 ddi_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handlep)
1016 {
1017 	dev_info_t	*hdip;
1018 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
1019 
1020 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_allochdl;
1021 
1022 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_freehdl;
1023 	return ((*funcp)(hdip, rdip, handlep));
1024 }
1025 
1026 int
1027 ddi_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
1028     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
1029     ddi_dma_cookie_t *cp, uint_t *ccountp)
1030 {
1031 	dev_info_t	*hdip;
1032 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
1033 	    struct ddi_dma_req *, ddi_dma_cookie_t *, uint_t *);
1034 
1035 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
1036 
1037 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_bindhdl;
1038 	return ((*funcp)(hdip, rdip, handle, dmareq, cp, ccountp));
1039 }
1040 
1041 int
1042 ddi_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
1043     ddi_dma_handle_t handle)
1044 {
1045 	dev_info_t	*hdip;
1046 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
1047 
1048 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl;
1049 
1050 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_unbindhdl;
1051 	return ((*funcp)(hdip, rdip, handle));
1052 }
1053 
1054 
1055 int
1056 ddi_dma_flush(dev_info_t *dip, dev_info_t *rdip,
1057     ddi_dma_handle_t handle, off_t off, size_t len,
1058     uint_t cache_flags)
1059 {
1060 	dev_info_t	*hdip;
1061 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
1062 	    off_t, size_t, uint_t);
1063 
1064 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush;
1065 
1066 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_flush;
1067 	return ((*funcp)(hdip, rdip, handle, off, len, cache_flags));
1068 }
1069 
1070 int
1071 ddi_dma_win(dev_info_t *dip, dev_info_t *rdip,
1072     ddi_dma_handle_t handle, uint_t win, off_t *offp,
1073     size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
1074 {
1075 	dev_info_t	*hdip;
1076 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t,
1077 	    uint_t, off_t *, size_t *, ddi_dma_cookie_t *, uint_t *);
1078 
1079 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_win;
1080 
1081 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_win;
1082 	return ((*funcp)(hdip, rdip, handle, win, offp, lenp,
1083 	    cookiep, ccountp));
1084 }
1085 
1086 int
1087 ddi_dma_sync(ddi_dma_handle_t h, off_t o, size_t l, uint_t whom)
1088 {
1089 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h;
1090 	dev_info_t *hdip, *dip;
1091 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t, off_t,
1092 	    size_t, uint_t);
1093 
1094 	/*
1095 	 * the DMA nexus driver will set DMP_NOSYNC if the
1096 	 * platform does not require any sync operation. For
1097 	 * example if the memory is uncached or consistent
1098 	 * and without any I/O write buffers involved.
1099 	 */
1100 	if ((hp->dmai_rflags & DMP_NOSYNC) == DMP_NOSYNC)
1101 		return (DDI_SUCCESS);
1102 
1103 	dip = hp->dmai_rdip;
1104 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_flush;
1105 	funcp = DEVI(hdip)->devi_ops->devo_bus_ops->bus_dma_flush;
1106 	return ((*funcp)(hdip, dip, h, o, l, whom));
1107 }
1108 
1109 int
1110 ddi_dma_unbind_handle(ddi_dma_handle_t h)
1111 {
1112 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)h;
1113 	dev_info_t *hdip, *dip;
1114 	int (*funcp)(dev_info_t *, dev_info_t *, ddi_dma_handle_t);
1115 
1116 	dip = hp->dmai_rdip;
1117 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_unbindhdl;
1118 	funcp = DEVI(dip)->devi_bus_dma_unbindfunc;
1119 	return ((*funcp)(hdip, dip, h));
1120 }
1121 
1122 #endif	/* !__sparc */
1123 
1124 int
1125 ddi_dma_free(ddi_dma_handle_t h)
1126 {
1127 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_FREE, 0, 0, 0, 0));
1128 }
1129 
1130 int
1131 ddi_iopb_alloc(dev_info_t *dip, ddi_dma_lim_t *limp, uint_t len, caddr_t *iopbp)
1132 {
1133 	ddi_dma_lim_t defalt;
1134 	size_t size = len;
1135 
1136 	if (!limp) {
1137 		defalt = standard_limits;
1138 		limp = &defalt;
1139 	}
1140 	return (i_ddi_mem_alloc_lim(dip, limp, size, 0, 0, 0,
1141 	    iopbp, NULL, NULL));
1142 }
1143 
1144 void
1145 ddi_iopb_free(caddr_t iopb)
1146 {
1147 	i_ddi_mem_free(iopb, NULL);
1148 }
1149 
1150 int
1151 ddi_mem_alloc(dev_info_t *dip, ddi_dma_lim_t *limits, uint_t length,
1152 	uint_t flags, caddr_t *kaddrp, uint_t *real_length)
1153 {
1154 	ddi_dma_lim_t defalt;
1155 	size_t size = length;
1156 
1157 	if (!limits) {
1158 		defalt = standard_limits;
1159 		limits = &defalt;
1160 	}
1161 	return (i_ddi_mem_alloc_lim(dip, limits, size, flags & 0x1,
1162 	    1, 0, kaddrp, real_length, NULL));
1163 }
1164 
1165 void
1166 ddi_mem_free(caddr_t kaddr)
1167 {
1168 	i_ddi_mem_free(kaddr, NULL);
1169 }
1170 
1171 /*
1172  * DMA attributes, alignment, burst sizes, and transfer minimums
1173  */
1174 int
1175 ddi_dma_get_attr(ddi_dma_handle_t handle, ddi_dma_attr_t *attrp)
1176 {
1177 	ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle;
1178 
1179 	if (attrp == NULL)
1180 		return (DDI_FAILURE);
1181 	*attrp = dimp->dmai_attr;
1182 	return (DDI_SUCCESS);
1183 }
1184 
1185 int
1186 ddi_dma_burstsizes(ddi_dma_handle_t handle)
1187 {
1188 	ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle;
1189 
1190 	if (!dimp)
1191 		return (0);
1192 	else
1193 		return (dimp->dmai_burstsizes);
1194 }
1195 
1196 int
1197 ddi_dma_devalign(ddi_dma_handle_t handle, uint_t *alignment, uint_t *mineffect)
1198 {
1199 	ddi_dma_impl_t *dimp = (ddi_dma_impl_t *)handle;
1200 
1201 	if (!dimp || !alignment || !mineffect)
1202 		return (DDI_FAILURE);
1203 	if (!(dimp->dmai_rflags & DDI_DMA_SBUS_64BIT)) {
1204 		*alignment = 1 << ddi_ffs(dimp->dmai_burstsizes);
1205 	} else {
1206 		if (dimp->dmai_burstsizes & 0xff0000) {
1207 			*alignment = 1 << ddi_ffs(dimp->dmai_burstsizes >> 16);
1208 		} else {
1209 			*alignment = 1 << ddi_ffs(dimp->dmai_burstsizes);
1210 		}
1211 	}
1212 	*mineffect = dimp->dmai_minxfer;
1213 	return (DDI_SUCCESS);
1214 }
1215 
1216 int
1217 ddi_iomin(dev_info_t *a, int i, int stream)
1218 {
1219 	int r;
1220 
1221 	/*
1222 	 * Make sure that the initial value is sane
1223 	 */
1224 	if (i & (i - 1))
1225 		return (0);
1226 	if (i == 0)
1227 		i = (stream) ? 4 : 1;
1228 
1229 	r = ddi_ctlops(a, a,
1230 	    DDI_CTLOPS_IOMIN, (void *)(uintptr_t)stream, (void *)&i);
1231 	if (r != DDI_SUCCESS || (i & (i - 1)))
1232 		return (0);
1233 	return (i);
1234 }
1235 
1236 /*
1237  * Given two DMA attribute structures, apply the attributes
1238  * of one to the other, following the rules of attributes
1239  * and the wishes of the caller.
1240  *
1241  * The rules of DMA attribute structures are that you cannot
1242  * make things *less* restrictive as you apply one set
1243  * of attributes to another.
1244  *
1245  */
1246 void
1247 ddi_dma_attr_merge(ddi_dma_attr_t *attr, ddi_dma_attr_t *mod)
1248 {
1249 	attr->dma_attr_addr_lo =
1250 	    MAX(attr->dma_attr_addr_lo, mod->dma_attr_addr_lo);
1251 	attr->dma_attr_addr_hi =
1252 	    MIN(attr->dma_attr_addr_hi, mod->dma_attr_addr_hi);
1253 	attr->dma_attr_count_max =
1254 	    MIN(attr->dma_attr_count_max, mod->dma_attr_count_max);
1255 	attr->dma_attr_align =
1256 	    MAX(attr->dma_attr_align,  mod->dma_attr_align);
1257 	attr->dma_attr_burstsizes =
1258 	    (uint_t)(attr->dma_attr_burstsizes & mod->dma_attr_burstsizes);
1259 	attr->dma_attr_minxfer =
1260 	    maxbit(attr->dma_attr_minxfer, mod->dma_attr_minxfer);
1261 	attr->dma_attr_maxxfer =
1262 	    MIN(attr->dma_attr_maxxfer, mod->dma_attr_maxxfer);
1263 	attr->dma_attr_seg = MIN(attr->dma_attr_seg, mod->dma_attr_seg);
1264 	attr->dma_attr_sgllen = MIN((uint_t)attr->dma_attr_sgllen,
1265 	    (uint_t)mod->dma_attr_sgllen);
1266 	attr->dma_attr_granular =
1267 	    MAX(attr->dma_attr_granular, mod->dma_attr_granular);
1268 }
1269 
1270 /*
1271  * mmap/segmap interface:
1272  */
1273 
1274 /*
1275  * ddi_segmap:		setup the default segment driver. Calls the drivers
1276  *			XXmmap routine to validate the range to be mapped.
1277  *			Return ENXIO of the range is not valid.  Create
1278  *			a seg_dev segment that contains all of the
1279  *			necessary information and will reference the
1280  *			default segment driver routines. It returns zero
1281  *			on success or non-zero on failure.
1282  */
1283 int
1284 ddi_segmap(dev_t dev, off_t offset, struct as *asp, caddr_t *addrp, off_t len,
1285     uint_t prot, uint_t maxprot, uint_t flags, cred_t *credp)
1286 {
1287 	extern int spec_segmap(dev_t, off_t, struct as *, caddr_t *,
1288 	    off_t, uint_t, uint_t, uint_t, struct cred *);
1289 
1290 	return (spec_segmap(dev, offset, asp, addrp, len,
1291 	    prot, maxprot, flags, credp));
1292 }
1293 
1294 /*
1295  * ddi_map_fault:	Resolve mappings at fault time.  Used by segment
1296  *			drivers. Allows each successive parent to resolve
1297  *			address translations and add its mappings to the
1298  *			mapping list supplied in the page structure. It
1299  *			returns zero on success	or non-zero on failure.
1300  */
1301 
1302 int
1303 ddi_map_fault(dev_info_t *dip, struct hat *hat, struct seg *seg,
1304     caddr_t addr, struct devpage *dp, pfn_t pfn, uint_t prot, uint_t lock)
1305 {
1306 	return (i_ddi_map_fault(dip, dip, hat, seg, addr, dp, pfn, prot, lock));
1307 }
1308 
1309 /*
1310  * ddi_device_mapping_check:	Called from ddi_segmap_setup.
1311  *	Invokes platform specific DDI to determine whether attributes specified
1312  *	in attr(9s) are	valid for the region of memory that will be made
1313  *	available for direct access to user process via the mmap(2) system call.
1314  */
1315 int
1316 ddi_device_mapping_check(dev_t dev, ddi_device_acc_attr_t *accattrp,
1317     uint_t rnumber, uint_t *hat_flags)
1318 {
1319 	ddi_acc_handle_t handle;
1320 	ddi_map_req_t mr;
1321 	ddi_acc_hdl_t *hp;
1322 	int result;
1323 	dev_info_t *dip;
1324 
1325 	/*
1326 	 * we use e_ddi_hold_devi_by_dev to search for the devi.  We
1327 	 * release it immediately since it should already be held by
1328 	 * a devfs vnode.
1329 	 */
1330 	if ((dip =
1331 	    e_ddi_hold_devi_by_dev(dev, E_DDI_HOLD_DEVI_NOATTACH)) == NULL)
1332 		return (-1);
1333 	ddi_release_devi(dip);		/* for e_ddi_hold_devi_by_dev() */
1334 
1335 	/*
1336 	 * Allocate and initialize the common elements of data
1337 	 * access handle.
1338 	 */
1339 	handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
1340 	if (handle == NULL)
1341 		return (-1);
1342 
1343 	hp = impl_acc_hdl_get(handle);
1344 	hp->ah_vers = VERS_ACCHDL;
1345 	hp->ah_dip = dip;
1346 	hp->ah_rnumber = rnumber;
1347 	hp->ah_offset = 0;
1348 	hp->ah_len = 0;
1349 	hp->ah_acc = *accattrp;
1350 
1351 	/*
1352 	 * Set up the mapping request and call to parent.
1353 	 */
1354 	mr.map_op = DDI_MO_MAP_HANDLE;
1355 	mr.map_type = DDI_MT_RNUMBER;
1356 	mr.map_obj.rnumber = rnumber;
1357 	mr.map_prot = PROT_READ | PROT_WRITE;
1358 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
1359 	mr.map_handlep = hp;
1360 	mr.map_vers = DDI_MAP_VERSION;
1361 	result = ddi_map(dip, &mr, 0, 0, NULL);
1362 
1363 	/*
1364 	 * Region must be mappable, pick up flags from the framework.
1365 	 */
1366 	*hat_flags = hp->ah_hat_flags;
1367 
1368 	impl_acc_hdl_free(handle);
1369 
1370 	/*
1371 	 * check for end result.
1372 	 */
1373 	if (result != DDI_SUCCESS)
1374 		return (-1);
1375 	return (0);
1376 }
1377 
1378 
1379 /*
1380  * Property functions:	 See also, ddipropdefs.h.
1381  *
1382  * These functions are the framework for the property functions,
1383  * i.e. they support software defined properties.  All implementation
1384  * specific property handling (i.e.: self-identifying devices and
1385  * PROM defined properties are handled in the implementation specific
1386  * functions (defined in ddi_implfuncs.h).
1387  */
1388 
1389 /*
1390  * nopropop:	Shouldn't be called, right?
1391  */
1392 int
1393 nopropop(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
1394     char *name, caddr_t valuep, int *lengthp)
1395 {
1396 	_NOTE(ARGUNUSED(dev, dip, prop_op, mod_flags, name, valuep, lengthp))
1397 	return (DDI_PROP_NOT_FOUND);
1398 }
1399 
1400 #ifdef	DDI_PROP_DEBUG
1401 int ddi_prop_debug_flag = 0;
1402 
1403 int
1404 ddi_prop_debug(int enable)
1405 {
1406 	int prev = ddi_prop_debug_flag;
1407 
1408 	if ((enable != 0) || (prev != 0))
1409 		printf("ddi_prop_debug: debugging %s\n",
1410 		    enable ? "enabled" : "disabled");
1411 	ddi_prop_debug_flag = enable;
1412 	return (prev);
1413 }
1414 
1415 #endif	/* DDI_PROP_DEBUG */
1416 
1417 /*
1418  * Search a property list for a match, if found return pointer
1419  * to matching prop struct, else return NULL.
1420  */
1421 
1422 ddi_prop_t *
1423 i_ddi_prop_search(dev_t dev, char *name, uint_t flags, ddi_prop_t **list_head)
1424 {
1425 	ddi_prop_t	*propp;
1426 
1427 	/*
1428 	 * find the property in child's devinfo:
1429 	 * Search order defined by this search function is first matching
1430 	 * property with input dev == DDI_DEV_T_ANY matching any dev or
1431 	 * dev == propp->prop_dev, name == propp->name, and the correct
1432 	 * data type as specified in the flags.  If a DDI_DEV_T_NONE dev
1433 	 * value made it this far then it implies a DDI_DEV_T_ANY search.
1434 	 */
1435 	if (dev == DDI_DEV_T_NONE)
1436 		dev = DDI_DEV_T_ANY;
1437 
1438 	for (propp = *list_head; propp != NULL; propp = propp->prop_next)  {
1439 
1440 		if (!DDI_STRSAME(propp->prop_name, name))
1441 			continue;
1442 
1443 		if ((dev != DDI_DEV_T_ANY) && (propp->prop_dev != dev))
1444 			continue;
1445 
1446 		if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0)
1447 			continue;
1448 
1449 		return (propp);
1450 	}
1451 
1452 	return ((ddi_prop_t *)0);
1453 }
1454 
1455 /*
1456  * Search for property within devnames structures
1457  */
1458 ddi_prop_t *
1459 i_ddi_search_global_prop(dev_t dev, char *name, uint_t flags)
1460 {
1461 	major_t		major;
1462 	struct devnames	*dnp;
1463 	ddi_prop_t	*propp;
1464 
1465 	/*
1466 	 * Valid dev_t value is needed to index into the
1467 	 * correct devnames entry, therefore a dev_t
1468 	 * value of DDI_DEV_T_ANY is not appropriate.
1469 	 */
1470 	ASSERT(dev != DDI_DEV_T_ANY);
1471 	if (dev == DDI_DEV_T_ANY) {
1472 		return ((ddi_prop_t *)0);
1473 	}
1474 
1475 	major = getmajor(dev);
1476 	dnp = &(devnamesp[major]);
1477 
1478 	if (dnp->dn_global_prop_ptr == NULL)
1479 		return ((ddi_prop_t *)0);
1480 
1481 	LOCK_DEV_OPS(&dnp->dn_lock);
1482 
1483 	for (propp = dnp->dn_global_prop_ptr->prop_list;
1484 	    propp != NULL;
1485 	    propp = (ddi_prop_t *)propp->prop_next) {
1486 
1487 		if (!DDI_STRSAME(propp->prop_name, name))
1488 			continue;
1489 
1490 		if ((!(flags & DDI_PROP_ROOTNEX_GLOBAL)) &&
1491 		    (!(flags & LDI_DEV_T_ANY)) && (propp->prop_dev != dev))
1492 			continue;
1493 
1494 		if (((propp->prop_flags & flags) & DDI_PROP_TYPE_MASK) == 0)
1495 			continue;
1496 
1497 		/* Property found, return it */
1498 		UNLOCK_DEV_OPS(&dnp->dn_lock);
1499 		return (propp);
1500 	}
1501 
1502 	UNLOCK_DEV_OPS(&dnp->dn_lock);
1503 	return ((ddi_prop_t *)0);
1504 }
1505 
1506 static char prop_no_mem_msg[] = "can't allocate memory for ddi property <%s>";
1507 
1508 /*
1509  * ddi_prop_search_global:
1510  *	Search the global property list within devnames
1511  *	for the named property.  Return the encoded value.
1512  */
1513 static int
1514 i_ddi_prop_search_global(dev_t dev, uint_t flags, char *name,
1515     void *valuep, uint_t *lengthp)
1516 {
1517 	ddi_prop_t	*propp;
1518 	caddr_t		buffer;
1519 
1520 	propp =  i_ddi_search_global_prop(dev, name, flags);
1521 
1522 	/* Property NOT found, bail */
1523 	if (propp == (ddi_prop_t *)0)
1524 		return (DDI_PROP_NOT_FOUND);
1525 
1526 	if (propp->prop_flags & DDI_PROP_UNDEF_IT)
1527 		return (DDI_PROP_UNDEFINED);
1528 
1529 	if ((buffer = kmem_alloc(propp->prop_len,
1530 	    (flags & DDI_PROP_CANSLEEP) ? KM_SLEEP : KM_NOSLEEP)) == NULL) {
1531 		cmn_err(CE_CONT, prop_no_mem_msg, name);
1532 		return (DDI_PROP_NO_MEMORY);
1533 	}
1534 
1535 	/*
1536 	 * Return the encoded data
1537 	 */
1538 	*(caddr_t *)valuep = buffer;
1539 	*lengthp = propp->prop_len;
1540 	bcopy(propp->prop_val, buffer, propp->prop_len);
1541 
1542 	return (DDI_PROP_SUCCESS);
1543 }
1544 
1545 /*
1546  * ddi_prop_search_common:	Lookup and return the encoded value
1547  */
1548 int
1549 ddi_prop_search_common(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1550     uint_t flags, char *name, void *valuep, uint_t *lengthp)
1551 {
1552 	ddi_prop_t	*propp;
1553 	int		i;
1554 	caddr_t		buffer;
1555 	caddr_t		prealloc = NULL;
1556 	int		plength = 0;
1557 	dev_info_t	*pdip;
1558 	int		(*bop)();
1559 
1560 	/*CONSTANTCONDITION*/
1561 	while (1)  {
1562 
1563 		mutex_enter(&(DEVI(dip)->devi_lock));
1564 
1565 
1566 		/*
1567 		 * find the property in child's devinfo:
1568 		 * Search order is:
1569 		 *	1. driver defined properties
1570 		 *	2. system defined properties
1571 		 *	3. driver global properties
1572 		 *	4. boot defined properties
1573 		 */
1574 
1575 		propp = i_ddi_prop_search(dev, name, flags,
1576 		    &(DEVI(dip)->devi_drv_prop_ptr));
1577 		if (propp == NULL)  {
1578 			propp = i_ddi_prop_search(dev, name, flags,
1579 			    &(DEVI(dip)->devi_sys_prop_ptr));
1580 		}
1581 		if ((propp == NULL) && DEVI(dip)->devi_global_prop_list) {
1582 			propp = i_ddi_prop_search(dev, name, flags,
1583 			    &DEVI(dip)->devi_global_prop_list->prop_list);
1584 		}
1585 
1586 		if (propp == NULL)  {
1587 			propp = i_ddi_prop_search(dev, name, flags,
1588 			    &(DEVI(dip)->devi_hw_prop_ptr));
1589 		}
1590 
1591 		/*
1592 		 * Software property found?
1593 		 */
1594 		if (propp != (ddi_prop_t *)0)	{
1595 
1596 			/*
1597 			 * If explicit undefine, return now.
1598 			 */
1599 			if (propp->prop_flags & DDI_PROP_UNDEF_IT) {
1600 				mutex_exit(&(DEVI(dip)->devi_lock));
1601 				if (prealloc)
1602 					kmem_free(prealloc, plength);
1603 				return (DDI_PROP_UNDEFINED);
1604 			}
1605 
1606 			/*
1607 			 * If we only want to know if it exists, return now
1608 			 */
1609 			if (prop_op == PROP_EXISTS) {
1610 				mutex_exit(&(DEVI(dip)->devi_lock));
1611 				ASSERT(prealloc == NULL);
1612 				return (DDI_PROP_SUCCESS);
1613 			}
1614 
1615 			/*
1616 			 * If length only request or prop length == 0,
1617 			 * service request and return now.
1618 			 */
1619 			if ((prop_op == PROP_LEN) ||(propp->prop_len == 0)) {
1620 				*lengthp = propp->prop_len;
1621 
1622 				/*
1623 				 * if prop_op is PROP_LEN_AND_VAL_ALLOC
1624 				 * that means prop_len is 0, so set valuep
1625 				 * also to NULL
1626 				 */
1627 				if (prop_op == PROP_LEN_AND_VAL_ALLOC)
1628 					*(caddr_t *)valuep = NULL;
1629 
1630 				mutex_exit(&(DEVI(dip)->devi_lock));
1631 				if (prealloc)
1632 					kmem_free(prealloc, plength);
1633 				return (DDI_PROP_SUCCESS);
1634 			}
1635 
1636 			/*
1637 			 * If LEN_AND_VAL_ALLOC and the request can sleep,
1638 			 * drop the mutex, allocate the buffer, and go
1639 			 * through the loop again.  If we already allocated
1640 			 * the buffer, and the size of the property changed,
1641 			 * keep trying...
1642 			 */
1643 			if ((prop_op == PROP_LEN_AND_VAL_ALLOC) &&
1644 			    (flags & DDI_PROP_CANSLEEP))  {
1645 				if (prealloc && (propp->prop_len != plength)) {
1646 					kmem_free(prealloc, plength);
1647 					prealloc = NULL;
1648 				}
1649 				if (prealloc == NULL)  {
1650 					plength = propp->prop_len;
1651 					mutex_exit(&(DEVI(dip)->devi_lock));
1652 					prealloc = kmem_alloc(plength,
1653 					    KM_SLEEP);
1654 					continue;
1655 				}
1656 			}
1657 
1658 			/*
1659 			 * Allocate buffer, if required.  Either way,
1660 			 * set `buffer' variable.
1661 			 */
1662 			i = *lengthp;			/* Get callers length */
1663 			*lengthp = propp->prop_len;	/* Set callers length */
1664 
1665 			switch (prop_op) {
1666 
1667 			case PROP_LEN_AND_VAL_ALLOC:
1668 
1669 				if (prealloc == NULL) {
1670 					buffer = kmem_alloc(propp->prop_len,
1671 					    KM_NOSLEEP);
1672 				} else {
1673 					buffer = prealloc;
1674 				}
1675 
1676 				if (buffer == NULL)  {
1677 					mutex_exit(&(DEVI(dip)->devi_lock));
1678 					cmn_err(CE_CONT, prop_no_mem_msg, name);
1679 					return (DDI_PROP_NO_MEMORY);
1680 				}
1681 				/* Set callers buf ptr */
1682 				*(caddr_t *)valuep = buffer;
1683 				break;
1684 
1685 			case PROP_LEN_AND_VAL_BUF:
1686 
1687 				if (propp->prop_len > (i)) {
1688 					mutex_exit(&(DEVI(dip)->devi_lock));
1689 					return (DDI_PROP_BUF_TOO_SMALL);
1690 				}
1691 
1692 				buffer = valuep;  /* Get callers buf ptr */
1693 				break;
1694 
1695 			default:
1696 				break;
1697 			}
1698 
1699 			/*
1700 			 * Do the copy.
1701 			 */
1702 			bcopy(propp->prop_val, buffer, propp->prop_len);
1703 			mutex_exit(&(DEVI(dip)->devi_lock));
1704 			return (DDI_PROP_SUCCESS);
1705 		}
1706 
1707 		mutex_exit(&(DEVI(dip)->devi_lock));
1708 		if (prealloc)
1709 			kmem_free(prealloc, plength);
1710 		prealloc = NULL;
1711 
1712 		/*
1713 		 * Prop not found, call parent bus_ops to deal with possible
1714 		 * h/w layer (possible PROM defined props, etc.) and to
1715 		 * possibly ascend the hierarchy, if allowed by flags.
1716 		 */
1717 		pdip = (dev_info_t *)DEVI(dip)->devi_parent;
1718 
1719 		/*
1720 		 * One last call for the root driver PROM props?
1721 		 */
1722 		if (dip == ddi_root_node())  {
1723 			return (ddi_bus_prop_op(dev, dip, dip, prop_op,
1724 			    flags, name, valuep, (int *)lengthp));
1725 		}
1726 
1727 		/*
1728 		 * We may have been called to check for properties
1729 		 * within a single devinfo node that has no parent -
1730 		 * see make_prop()
1731 		 */
1732 		if (pdip == NULL) {
1733 			ASSERT((flags &
1734 			    (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM)) ==
1735 			    (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM));
1736 			return (DDI_PROP_NOT_FOUND);
1737 		}
1738 
1739 		/*
1740 		 * Instead of recursing, we do iterative calls up the tree.
1741 		 * As a bit of optimization, skip the bus_op level if the
1742 		 * node is a s/w node and if the parent's bus_prop_op function
1743 		 * is `ddi_bus_prop_op', because we know that in this case,
1744 		 * this function does nothing.
1745 		 *
1746 		 * 4225415: If the parent isn't attached, or the child
1747 		 * hasn't been named by the parent yet, use the default
1748 		 * ddi_bus_prop_op as a proxy for the parent.  This
1749 		 * allows property lookups in any child/parent state to
1750 		 * include 'prom' and inherited properties, even when
1751 		 * there are no drivers attached to the child or parent.
1752 		 */
1753 
1754 		bop = ddi_bus_prop_op;
1755 		if (i_ddi_devi_attached(pdip) &&
1756 		    (i_ddi_node_state(dip) >= DS_INITIALIZED))
1757 			bop = DEVI(pdip)->devi_ops->devo_bus_ops->bus_prop_op;
1758 
1759 		i = DDI_PROP_NOT_FOUND;
1760 
1761 		if ((bop != ddi_bus_prop_op) || ndi_dev_is_prom_node(dip)) {
1762 			i = (*bop)(dev, pdip, dip, prop_op,
1763 			    flags | DDI_PROP_DONTPASS,
1764 			    name, valuep, lengthp);
1765 		}
1766 
1767 		if ((flags & DDI_PROP_DONTPASS) ||
1768 		    (i != DDI_PROP_NOT_FOUND))
1769 			return (i);
1770 
1771 		dip = pdip;
1772 	}
1773 	/*NOTREACHED*/
1774 }
1775 
1776 
1777 /*
1778  * ddi_prop_op: The basic property operator for drivers.
1779  *
1780  * In ddi_prop_op, the type of valuep is interpreted based on prop_op:
1781  *
1782  *	prop_op			valuep
1783  *	------			------
1784  *
1785  *	PROP_LEN		<unused>
1786  *
1787  *	PROP_LEN_AND_VAL_BUF	Pointer to callers buffer
1788  *
1789  *	PROP_LEN_AND_VAL_ALLOC	Address of callers pointer (will be set to
1790  *				address of allocated buffer, if successful)
1791  */
1792 int
1793 ddi_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
1794     char *name, caddr_t valuep, int *lengthp)
1795 {
1796 	int	i;
1797 
1798 	ASSERT((mod_flags & DDI_PROP_TYPE_MASK) == 0);
1799 
1800 	/*
1801 	 * If this was originally an LDI prop lookup then we bail here.
1802 	 * The reason is that the LDI property lookup interfaces first call
1803 	 * a drivers prop_op() entry point to allow it to override
1804 	 * properties.  But if we've made it here, then the driver hasn't
1805 	 * overriden any properties.  We don't want to continue with the
1806 	 * property search here because we don't have any type inforamtion.
1807 	 * When we return failure, the LDI interfaces will then proceed to
1808 	 * call the typed property interfaces to look up the property.
1809 	 */
1810 	if (mod_flags & DDI_PROP_DYNAMIC)
1811 		return (DDI_PROP_NOT_FOUND);
1812 
1813 	/*
1814 	 * check for pre-typed property consumer asking for typed property:
1815 	 * see e_ddi_getprop_int64.
1816 	 */
1817 	if (mod_flags & DDI_PROP_CONSUMER_TYPED)
1818 		mod_flags |= DDI_PROP_TYPE_INT64;
1819 	mod_flags |= DDI_PROP_TYPE_ANY;
1820 
1821 	i = ddi_prop_search_common(dev, dip, prop_op,
1822 	    mod_flags, name, valuep, (uint_t *)lengthp);
1823 	if (i == DDI_PROP_FOUND_1275)
1824 		return (DDI_PROP_SUCCESS);
1825 	return (i);
1826 }
1827 
1828 /*
1829  * ddi_prop_op_nblocks_blksize: The basic property operator for drivers that
1830  * maintain size in number of blksize blocks.  Provides a dynamic property
1831  * implementation for size oriented properties based on nblocks64 and blksize
1832  * values passed in by the driver.  Fallback to ddi_prop_op if the nblocks64
1833  * is too large.  This interface should not be used with a nblocks64 that
1834  * represents the driver's idea of how to represent unknown, if nblocks is
1835  * unknown use ddi_prop_op.
1836  */
1837 int
1838 ddi_prop_op_nblocks_blksize(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1839     int mod_flags, char *name, caddr_t valuep, int *lengthp,
1840     uint64_t nblocks64, uint_t blksize)
1841 {
1842 	uint64_t size64;
1843 	int	blkshift;
1844 
1845 	/* convert block size to shift value */
1846 	ASSERT(BIT_ONLYONESET(blksize));
1847 	blkshift = highbit(blksize) - 1;
1848 
1849 	/*
1850 	 * There is no point in supporting nblocks64 values that don't have
1851 	 * an accurate uint64_t byte count representation.
1852 	 */
1853 	if (nblocks64 >= (UINT64_MAX >> blkshift))
1854 		return (ddi_prop_op(dev, dip, prop_op, mod_flags,
1855 		    name, valuep, lengthp));
1856 
1857 	size64 = nblocks64 << blkshift;
1858 	return (ddi_prop_op_size_blksize(dev, dip, prop_op, mod_flags,
1859 	    name, valuep, lengthp, size64, blksize));
1860 }
1861 
1862 /*
1863  * ddi_prop_op_nblocks: ddi_prop_op_nblocks_blksize with DEV_BSIZE blksize.
1864  */
1865 int
1866 ddi_prop_op_nblocks(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1867     int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t nblocks64)
1868 {
1869 	return (ddi_prop_op_nblocks_blksize(dev, dip, prop_op,
1870 	    mod_flags, name, valuep, lengthp, nblocks64, DEV_BSIZE));
1871 }
1872 
1873 /*
1874  * ddi_prop_op_size_blksize: The basic property operator for block drivers that
1875  * maintain size in bytes. Provides a of dynamic property implementation for
1876  * size oriented properties based on size64 value and blksize passed in by the
1877  * driver.  Fallback to ddi_prop_op if the size64 is too large. This interface
1878  * should not be used with a size64 that represents the driver's idea of how
1879  * to represent unknown, if size is unknown use ddi_prop_op.
1880  *
1881  * NOTE: the legacy "nblocks"/"size" properties are treated as 32-bit unsigned
1882  * integers. While the most likely interface to request them ([bc]devi_size)
1883  * is declared int (signed) there is no enforcement of this, which means we
1884  * can't enforce limitations here without risking regression.
1885  */
1886 int
1887 ddi_prop_op_size_blksize(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1888     int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t size64,
1889     uint_t blksize)
1890 {
1891 	uint64_t nblocks64;
1892 	int	callers_length;
1893 	caddr_t	buffer;
1894 	int	blkshift;
1895 
1896 	/*
1897 	 * This is a kludge to support capture of size(9P) pure dynamic
1898 	 * properties in snapshots for non-cmlb code (without exposing
1899 	 * i_ddi_prop_dyn changes). When everyone uses cmlb, this code
1900 	 * should be removed.
1901 	 */
1902 	if (i_ddi_prop_dyn_driver_get(dip) == NULL) {
1903 		static i_ddi_prop_dyn_t prop_dyn_size[] = {
1904 		    {"Size",		DDI_PROP_TYPE_INT64,	S_IFCHR},
1905 		    {"Nblocks",		DDI_PROP_TYPE_INT64,	S_IFBLK},
1906 		    {NULL}
1907 		};
1908 		i_ddi_prop_dyn_driver_set(dip, prop_dyn_size);
1909 	}
1910 
1911 	/* convert block size to shift value */
1912 	ASSERT(BIT_ONLYONESET(blksize));
1913 	blkshift = highbit(blksize) - 1;
1914 
1915 	/* compute DEV_BSIZE nblocks value */
1916 	nblocks64 = size64 >> blkshift;
1917 
1918 	/* get callers length, establish length of our dynamic properties */
1919 	callers_length = *lengthp;
1920 
1921 	if (strcmp(name, "Nblocks") == 0)
1922 		*lengthp = sizeof (uint64_t);
1923 	else if (strcmp(name, "Size") == 0)
1924 		*lengthp = sizeof (uint64_t);
1925 	else if ((strcmp(name, "nblocks") == 0) && (nblocks64 < UINT_MAX))
1926 		*lengthp = sizeof (uint32_t);
1927 	else if ((strcmp(name, "size") == 0) && (size64 < UINT_MAX))
1928 		*lengthp = sizeof (uint32_t);
1929 	else if ((strcmp(name, "blksize") == 0) && (blksize < UINT_MAX))
1930 		*lengthp = sizeof (uint32_t);
1931 	else {
1932 		/* fallback to ddi_prop_op */
1933 		return (ddi_prop_op(dev, dip, prop_op, mod_flags,
1934 		    name, valuep, lengthp));
1935 	}
1936 
1937 	/* service request for the length of the property */
1938 	if (prop_op == PROP_LEN)
1939 		return (DDI_PROP_SUCCESS);
1940 
1941 	switch (prop_op) {
1942 	case PROP_LEN_AND_VAL_ALLOC:
1943 		if ((buffer = kmem_alloc(*lengthp,
1944 		    (mod_flags & DDI_PROP_CANSLEEP) ?
1945 		    KM_SLEEP : KM_NOSLEEP)) == NULL)
1946 			return (DDI_PROP_NO_MEMORY);
1947 
1948 		*(caddr_t *)valuep = buffer;	/* set callers buf ptr */
1949 		break;
1950 
1951 	case PROP_LEN_AND_VAL_BUF:
1952 		/* the length of the property and the request must match */
1953 		if (callers_length != *lengthp)
1954 			return (DDI_PROP_INVAL_ARG);
1955 
1956 		buffer = valuep;		/* get callers buf ptr */
1957 		break;
1958 
1959 	default:
1960 		return (DDI_PROP_INVAL_ARG);
1961 	}
1962 
1963 	/* transfer the value into the buffer */
1964 	if (strcmp(name, "Nblocks") == 0)
1965 		*((uint64_t *)buffer) = nblocks64;
1966 	else if (strcmp(name, "Size") == 0)
1967 		*((uint64_t *)buffer) = size64;
1968 	else if (strcmp(name, "nblocks") == 0)
1969 		*((uint32_t *)buffer) = (uint32_t)nblocks64;
1970 	else if (strcmp(name, "size") == 0)
1971 		*((uint32_t *)buffer) = (uint32_t)size64;
1972 	else if (strcmp(name, "blksize") == 0)
1973 		*((uint32_t *)buffer) = (uint32_t)blksize;
1974 	return (DDI_PROP_SUCCESS);
1975 }
1976 
1977 /*
1978  * ddi_prop_op_size: ddi_prop_op_size_blksize with DEV_BSIZE block size.
1979  */
1980 int
1981 ddi_prop_op_size(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1982     int mod_flags, char *name, caddr_t valuep, int *lengthp, uint64_t size64)
1983 {
1984 	return (ddi_prop_op_size_blksize(dev, dip, prop_op,
1985 	    mod_flags, name, valuep, lengthp, size64, DEV_BSIZE));
1986 }
1987 
1988 /*
1989  * Variable length props...
1990  */
1991 
1992 /*
1993  * ddi_getlongprop:	Get variable length property len+val into a buffer
1994  *		allocated by property provider via kmem_alloc. Requester
1995  *		is responsible for freeing returned property via kmem_free.
1996  *
1997  *	Arguments:
1998  *
1999  *	dev_t:	Input:	dev_t of property.
2000  *	dip:	Input:	dev_info_t pointer of child.
2001  *	flags:	Input:	Possible flag modifiers are:
2002  *		DDI_PROP_DONTPASS:	Don't pass to parent if prop not found.
2003  *		DDI_PROP_CANSLEEP:	Memory allocation may sleep.
2004  *	name:	Input:	name of property.
2005  *	valuep:	Output:	Addr of callers buffer pointer.
2006  *	lengthp:Output:	*lengthp will contain prop length on exit.
2007  *
2008  *	Possible Returns:
2009  *
2010  *		DDI_PROP_SUCCESS:	Prop found and returned.
2011  *		DDI_PROP_NOT_FOUND:	Prop not found
2012  *		DDI_PROP_UNDEFINED:	Prop explicitly undefined.
2013  *		DDI_PROP_NO_MEMORY:	Prop found, but unable to alloc mem.
2014  */
2015 
2016 int
2017 ddi_getlongprop(dev_t dev, dev_info_t *dip, int flags,
2018     char *name, caddr_t valuep, int *lengthp)
2019 {
2020 	return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_ALLOC,
2021 	    flags, name, valuep, lengthp));
2022 }
2023 
2024 /*
2025  *
2026  * ddi_getlongprop_buf:		Get long prop into pre-allocated callers
2027  *				buffer. (no memory allocation by provider).
2028  *
2029  *	dev_t:	Input:	dev_t of property.
2030  *	dip:	Input:	dev_info_t pointer of child.
2031  *	flags:	Input:	DDI_PROP_DONTPASS or NULL
2032  *	name:	Input:	name of property
2033  *	valuep:	Input:	ptr to callers buffer.
2034  *	lengthp:I/O:	ptr to length of callers buffer on entry,
2035  *			actual length of property on exit.
2036  *
2037  *	Possible returns:
2038  *
2039  *		DDI_PROP_SUCCESS	Prop found and returned
2040  *		DDI_PROP_NOT_FOUND	Prop not found
2041  *		DDI_PROP_UNDEFINED	Prop explicitly undefined.
2042  *		DDI_PROP_BUF_TOO_SMALL	Prop found, callers buf too small,
2043  *					no value returned, but actual prop
2044  *					length returned in *lengthp
2045  *
2046  */
2047 
2048 int
2049 ddi_getlongprop_buf(dev_t dev, dev_info_t *dip, int flags,
2050     char *name, caddr_t valuep, int *lengthp)
2051 {
2052 	return (ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
2053 	    flags, name, valuep, lengthp));
2054 }
2055 
2056 /*
2057  * Integer/boolean sized props.
2058  *
2059  * Call is value only... returns found boolean or int sized prop value or
2060  * defvalue if prop not found or is wrong length or is explicitly undefined.
2061  * Only flag is DDI_PROP_DONTPASS...
2062  *
2063  * By convention, this interface returns boolean (0) sized properties
2064  * as value (int)1.
2065  *
2066  * This never returns an error, if property not found or specifically
2067  * undefined, the input `defvalue' is returned.
2068  */
2069 
2070 int
2071 ddi_getprop(dev_t dev, dev_info_t *dip, int flags, char *name, int defvalue)
2072 {
2073 	int	propvalue = defvalue;
2074 	int	proplength = sizeof (int);
2075 	int	error;
2076 
2077 	error = ddi_prop_op(dev, dip, PROP_LEN_AND_VAL_BUF,
2078 	    flags, name, (caddr_t)&propvalue, &proplength);
2079 
2080 	if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
2081 		propvalue = 1;
2082 
2083 	return (propvalue);
2084 }
2085 
2086 /*
2087  * Get prop length interface: flags are 0 or DDI_PROP_DONTPASS
2088  * if returns DDI_PROP_SUCCESS, length returned in *lengthp.
2089  */
2090 
2091 int
2092 ddi_getproplen(dev_t dev, dev_info_t *dip, int flags, char *name, int *lengthp)
2093 {
2094 	return (ddi_prop_op(dev, dip, PROP_LEN, flags, name, NULL, lengthp));
2095 }
2096 
2097 /*
2098  * Allocate a struct prop_driver_data, along with 'size' bytes
2099  * for decoded property data.  This structure is freed by
2100  * calling ddi_prop_free(9F).
2101  */
2102 static void *
2103 ddi_prop_decode_alloc(size_t size, void (*prop_free)(struct prop_driver_data *))
2104 {
2105 	struct prop_driver_data *pdd;
2106 
2107 	/*
2108 	 * Allocate a structure with enough memory to store the decoded data.
2109 	 */
2110 	pdd = kmem_zalloc(sizeof (struct prop_driver_data) + size, KM_SLEEP);
2111 	pdd->pdd_size = (sizeof (struct prop_driver_data) + size);
2112 	pdd->pdd_prop_free = prop_free;
2113 
2114 	/*
2115 	 * Return a pointer to the location to put the decoded data.
2116 	 */
2117 	return ((void *)((caddr_t)pdd + sizeof (struct prop_driver_data)));
2118 }
2119 
2120 /*
2121  * Allocated the memory needed to store the encoded data in the property
2122  * handle.
2123  */
2124 static int
2125 ddi_prop_encode_alloc(prop_handle_t *ph, size_t size)
2126 {
2127 	/*
2128 	 * If size is zero, then set data to NULL and size to 0.  This
2129 	 * is a boolean property.
2130 	 */
2131 	if (size == 0) {
2132 		ph->ph_size = 0;
2133 		ph->ph_data = NULL;
2134 		ph->ph_cur_pos = NULL;
2135 		ph->ph_save_pos = NULL;
2136 	} else {
2137 		if (ph->ph_flags == DDI_PROP_DONTSLEEP) {
2138 			ph->ph_data = kmem_zalloc(size, KM_NOSLEEP);
2139 			if (ph->ph_data == NULL)
2140 				return (DDI_PROP_NO_MEMORY);
2141 		} else
2142 			ph->ph_data = kmem_zalloc(size, KM_SLEEP);
2143 		ph->ph_size = size;
2144 		ph->ph_cur_pos = ph->ph_data;
2145 		ph->ph_save_pos = ph->ph_data;
2146 	}
2147 	return (DDI_PROP_SUCCESS);
2148 }
2149 
2150 /*
2151  * Free the space allocated by the lookup routines.  Each lookup routine
2152  * returns a pointer to the decoded data to the driver.  The driver then
2153  * passes this pointer back to us.  This data actually lives in a struct
2154  * prop_driver_data.  We use negative indexing to find the beginning of
2155  * the structure and then free the entire structure using the size and
2156  * the free routine stored in the structure.
2157  */
2158 void
2159 ddi_prop_free(void *datap)
2160 {
2161 	struct prop_driver_data *pdd;
2162 
2163 	/*
2164 	 * Get the structure
2165 	 */
2166 	pdd = (struct prop_driver_data *)
2167 	    ((caddr_t)datap - sizeof (struct prop_driver_data));
2168 	/*
2169 	 * Call the free routine to free it
2170 	 */
2171 	(*pdd->pdd_prop_free)(pdd);
2172 }
2173 
2174 /*
2175  * Free the data associated with an array of ints,
2176  * allocated with ddi_prop_decode_alloc().
2177  */
2178 static void
2179 ddi_prop_free_ints(struct prop_driver_data *pdd)
2180 {
2181 	kmem_free(pdd, pdd->pdd_size);
2182 }
2183 
2184 /*
2185  * Free a single string property or a single string contained within
2186  * the argv style return value of an array of strings.
2187  */
2188 static void
2189 ddi_prop_free_string(struct prop_driver_data *pdd)
2190 {
2191 	kmem_free(pdd, pdd->pdd_size);
2192 
2193 }
2194 
2195 /*
2196  * Free an array of strings.
2197  */
2198 static void
2199 ddi_prop_free_strings(struct prop_driver_data *pdd)
2200 {
2201 	kmem_free(pdd, pdd->pdd_size);
2202 }
2203 
2204 /*
2205  * Free the data associated with an array of bytes.
2206  */
2207 static void
2208 ddi_prop_free_bytes(struct prop_driver_data *pdd)
2209 {
2210 	kmem_free(pdd, pdd->pdd_size);
2211 }
2212 
2213 /*
2214  * Reset the current location pointer in the property handle to the
2215  * beginning of the data.
2216  */
2217 void
2218 ddi_prop_reset_pos(prop_handle_t *ph)
2219 {
2220 	ph->ph_cur_pos = ph->ph_data;
2221 	ph->ph_save_pos = ph->ph_data;
2222 }
2223 
2224 /*
2225  * Restore the current location pointer in the property handle to the
2226  * saved position.
2227  */
2228 void
2229 ddi_prop_save_pos(prop_handle_t *ph)
2230 {
2231 	ph->ph_save_pos = ph->ph_cur_pos;
2232 }
2233 
2234 /*
2235  * Save the location that the current location pointer is pointing to..
2236  */
2237 void
2238 ddi_prop_restore_pos(prop_handle_t *ph)
2239 {
2240 	ph->ph_cur_pos = ph->ph_save_pos;
2241 }
2242 
2243 /*
2244  * Property encode/decode functions
2245  */
2246 
2247 /*
2248  * Decode a single integer property
2249  */
2250 static int
2251 ddi_prop_fm_decode_int(prop_handle_t *ph, void *data, uint_t *nelements)
2252 {
2253 	int	i;
2254 	int	tmp;
2255 
2256 	/*
2257 	 * If there is nothing to decode return an error
2258 	 */
2259 	if (ph->ph_size == 0)
2260 		return (DDI_PROP_END_OF_DATA);
2261 
2262 	/*
2263 	 * Decode the property as a single integer and return it
2264 	 * in data if we were able to decode it.
2265 	 */
2266 	i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, &tmp);
2267 	if (i < DDI_PROP_RESULT_OK) {
2268 		switch (i) {
2269 		case DDI_PROP_RESULT_EOF:
2270 			return (DDI_PROP_END_OF_DATA);
2271 
2272 		case DDI_PROP_RESULT_ERROR:
2273 			return (DDI_PROP_CANNOT_DECODE);
2274 		}
2275 	}
2276 
2277 	*(int *)data = tmp;
2278 	*nelements = 1;
2279 	return (DDI_PROP_SUCCESS);
2280 }
2281 
2282 /*
2283  * Decode a single 64 bit integer property
2284  */
2285 static int
2286 ddi_prop_fm_decode_int64(prop_handle_t *ph, void *data, uint_t *nelements)
2287 {
2288 	int	i;
2289 	int64_t	tmp;
2290 
2291 	/*
2292 	 * If there is nothing to decode return an error
2293 	 */
2294 	if (ph->ph_size == 0)
2295 		return (DDI_PROP_END_OF_DATA);
2296 
2297 	/*
2298 	 * Decode the property as a single integer and return it
2299 	 * in data if we were able to decode it.
2300 	 */
2301 	i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, &tmp);
2302 	if (i < DDI_PROP_RESULT_OK) {
2303 		switch (i) {
2304 		case DDI_PROP_RESULT_EOF:
2305 			return (DDI_PROP_END_OF_DATA);
2306 
2307 		case DDI_PROP_RESULT_ERROR:
2308 			return (DDI_PROP_CANNOT_DECODE);
2309 		}
2310 	}
2311 
2312 	*(int64_t *)data = tmp;
2313 	*nelements = 1;
2314 	return (DDI_PROP_SUCCESS);
2315 }
2316 
2317 /*
2318  * Decode an array of integers property
2319  */
2320 static int
2321 ddi_prop_fm_decode_ints(prop_handle_t *ph, void *data, uint_t *nelements)
2322 {
2323 	int	i;
2324 	int	cnt = 0;
2325 	int	*tmp;
2326 	int	*intp;
2327 	int	n;
2328 
2329 	/*
2330 	 * Figure out how many array elements there are by going through the
2331 	 * data without decoding it first and counting.
2332 	 */
2333 	for (;;) {
2334 		i = DDI_PROP_INT(ph, DDI_PROP_CMD_SKIP, NULL);
2335 		if (i < 0)
2336 			break;
2337 		cnt++;
2338 	}
2339 
2340 	/*
2341 	 * If there are no elements return an error
2342 	 */
2343 	if (cnt == 0)
2344 		return (DDI_PROP_END_OF_DATA);
2345 
2346 	/*
2347 	 * If we cannot skip through the data, we cannot decode it
2348 	 */
2349 	if (i == DDI_PROP_RESULT_ERROR)
2350 		return (DDI_PROP_CANNOT_DECODE);
2351 
2352 	/*
2353 	 * Reset the data pointer to the beginning of the encoded data
2354 	 */
2355 	ddi_prop_reset_pos(ph);
2356 
2357 	/*
2358 	 * Allocated memory to store the decoded value in.
2359 	 */
2360 	intp = ddi_prop_decode_alloc((cnt * sizeof (int)),
2361 	    ddi_prop_free_ints);
2362 
2363 	/*
2364 	 * Decode each element and place it in the space we just allocated
2365 	 */
2366 	tmp = intp;
2367 	for (n = 0; n < cnt; n++, tmp++) {
2368 		i = DDI_PROP_INT(ph, DDI_PROP_CMD_DECODE, tmp);
2369 		if (i < DDI_PROP_RESULT_OK) {
2370 			/*
2371 			 * Free the space we just allocated
2372 			 * and return an error.
2373 			 */
2374 			ddi_prop_free(intp);
2375 			switch (i) {
2376 			case DDI_PROP_RESULT_EOF:
2377 				return (DDI_PROP_END_OF_DATA);
2378 
2379 			case DDI_PROP_RESULT_ERROR:
2380 				return (DDI_PROP_CANNOT_DECODE);
2381 			}
2382 		}
2383 	}
2384 
2385 	*nelements = cnt;
2386 	*(int **)data = intp;
2387 
2388 	return (DDI_PROP_SUCCESS);
2389 }
2390 
2391 /*
2392  * Decode a 64 bit integer array property
2393  */
2394 static int
2395 ddi_prop_fm_decode_int64_array(prop_handle_t *ph, void *data, uint_t *nelements)
2396 {
2397 	int	i;
2398 	int	n;
2399 	int	cnt = 0;
2400 	int64_t	*tmp;
2401 	int64_t	*intp;
2402 
2403 	/*
2404 	 * Count the number of array elements by going
2405 	 * through the data without decoding it.
2406 	 */
2407 	for (;;) {
2408 		i = DDI_PROP_INT64(ph, DDI_PROP_CMD_SKIP, NULL);
2409 		if (i < 0)
2410 			break;
2411 		cnt++;
2412 	}
2413 
2414 	/*
2415 	 * If there are no elements return an error
2416 	 */
2417 	if (cnt == 0)
2418 		return (DDI_PROP_END_OF_DATA);
2419 
2420 	/*
2421 	 * If we cannot skip through the data, we cannot decode it
2422 	 */
2423 	if (i == DDI_PROP_RESULT_ERROR)
2424 		return (DDI_PROP_CANNOT_DECODE);
2425 
2426 	/*
2427 	 * Reset the data pointer to the beginning of the encoded data
2428 	 */
2429 	ddi_prop_reset_pos(ph);
2430 
2431 	/*
2432 	 * Allocate memory to store the decoded value.
2433 	 */
2434 	intp = ddi_prop_decode_alloc((cnt * sizeof (int64_t)),
2435 	    ddi_prop_free_ints);
2436 
2437 	/*
2438 	 * Decode each element and place it in the space allocated
2439 	 */
2440 	tmp = intp;
2441 	for (n = 0; n < cnt; n++, tmp++) {
2442 		i = DDI_PROP_INT64(ph, DDI_PROP_CMD_DECODE, tmp);
2443 		if (i < DDI_PROP_RESULT_OK) {
2444 			/*
2445 			 * Free the space we just allocated
2446 			 * and return an error.
2447 			 */
2448 			ddi_prop_free(intp);
2449 			switch (i) {
2450 			case DDI_PROP_RESULT_EOF:
2451 				return (DDI_PROP_END_OF_DATA);
2452 
2453 			case DDI_PROP_RESULT_ERROR:
2454 				return (DDI_PROP_CANNOT_DECODE);
2455 			}
2456 		}
2457 	}
2458 
2459 	*nelements = cnt;
2460 	*(int64_t **)data = intp;
2461 
2462 	return (DDI_PROP_SUCCESS);
2463 }
2464 
2465 /*
2466  * Encode an array of integers property (Can be one element)
2467  */
2468 int
2469 ddi_prop_fm_encode_ints(prop_handle_t *ph, void *data, uint_t nelements)
2470 {
2471 	int	i;
2472 	int	*tmp;
2473 	int	cnt;
2474 	int	size;
2475 
2476 	/*
2477 	 * If there is no data, we cannot do anything
2478 	 */
2479 	if (nelements == 0)
2480 		return (DDI_PROP_CANNOT_ENCODE);
2481 
2482 	/*
2483 	 * Get the size of an encoded int.
2484 	 */
2485 	size = DDI_PROP_INT(ph, DDI_PROP_CMD_GET_ESIZE, NULL);
2486 
2487 	if (size < DDI_PROP_RESULT_OK) {
2488 		switch (size) {
2489 		case DDI_PROP_RESULT_EOF:
2490 			return (DDI_PROP_END_OF_DATA);
2491 
2492 		case DDI_PROP_RESULT_ERROR:
2493 			return (DDI_PROP_CANNOT_ENCODE);
2494 		}
2495 	}
2496 
2497 	/*
2498 	 * Allocate space in the handle to store the encoded int.
2499 	 */
2500 	if (ddi_prop_encode_alloc(ph, size * nelements) !=
2501 	    DDI_PROP_SUCCESS)
2502 		return (DDI_PROP_NO_MEMORY);
2503 
2504 	/*
2505 	 * Encode the array of ints.
2506 	 */
2507 	tmp = (int *)data;
2508 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2509 		i = DDI_PROP_INT(ph, DDI_PROP_CMD_ENCODE, tmp);
2510 		if (i < DDI_PROP_RESULT_OK) {
2511 			switch (i) {
2512 			case DDI_PROP_RESULT_EOF:
2513 				return (DDI_PROP_END_OF_DATA);
2514 
2515 			case DDI_PROP_RESULT_ERROR:
2516 				return (DDI_PROP_CANNOT_ENCODE);
2517 			}
2518 		}
2519 	}
2520 
2521 	return (DDI_PROP_SUCCESS);
2522 }
2523 
2524 
2525 /*
2526  * Encode a 64 bit integer array property
2527  */
2528 int
2529 ddi_prop_fm_encode_int64(prop_handle_t *ph, void *data, uint_t nelements)
2530 {
2531 	int i;
2532 	int cnt;
2533 	int size;
2534 	int64_t *tmp;
2535 
2536 	/*
2537 	 * If there is no data, we cannot do anything
2538 	 */
2539 	if (nelements == 0)
2540 		return (DDI_PROP_CANNOT_ENCODE);
2541 
2542 	/*
2543 	 * Get the size of an encoded 64 bit int.
2544 	 */
2545 	size = DDI_PROP_INT64(ph, DDI_PROP_CMD_GET_ESIZE, NULL);
2546 
2547 	if (size < DDI_PROP_RESULT_OK) {
2548 		switch (size) {
2549 		case DDI_PROP_RESULT_EOF:
2550 			return (DDI_PROP_END_OF_DATA);
2551 
2552 		case DDI_PROP_RESULT_ERROR:
2553 			return (DDI_PROP_CANNOT_ENCODE);
2554 		}
2555 	}
2556 
2557 	/*
2558 	 * Allocate space in the handle to store the encoded int.
2559 	 */
2560 	if (ddi_prop_encode_alloc(ph, size * nelements) !=
2561 	    DDI_PROP_SUCCESS)
2562 		return (DDI_PROP_NO_MEMORY);
2563 
2564 	/*
2565 	 * Encode the array of ints.
2566 	 */
2567 	tmp = (int64_t *)data;
2568 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2569 		i = DDI_PROP_INT64(ph, DDI_PROP_CMD_ENCODE, tmp);
2570 		if (i < DDI_PROP_RESULT_OK) {
2571 			switch (i) {
2572 			case DDI_PROP_RESULT_EOF:
2573 				return (DDI_PROP_END_OF_DATA);
2574 
2575 			case DDI_PROP_RESULT_ERROR:
2576 				return (DDI_PROP_CANNOT_ENCODE);
2577 			}
2578 		}
2579 	}
2580 
2581 	return (DDI_PROP_SUCCESS);
2582 }
2583 
2584 /*
2585  * Decode a single string property
2586  */
2587 static int
2588 ddi_prop_fm_decode_string(prop_handle_t *ph, void *data, uint_t *nelements)
2589 {
2590 	char		*tmp;
2591 	char		*str;
2592 	int		i;
2593 	int		size;
2594 
2595 	/*
2596 	 * If there is nothing to decode return an error
2597 	 */
2598 	if (ph->ph_size == 0)
2599 		return (DDI_PROP_END_OF_DATA);
2600 
2601 	/*
2602 	 * Get the decoded size of the encoded string.
2603 	 */
2604 	size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2605 	if (size < DDI_PROP_RESULT_OK) {
2606 		switch (size) {
2607 		case DDI_PROP_RESULT_EOF:
2608 			return (DDI_PROP_END_OF_DATA);
2609 
2610 		case DDI_PROP_RESULT_ERROR:
2611 			return (DDI_PROP_CANNOT_DECODE);
2612 		}
2613 	}
2614 
2615 	/*
2616 	 * Allocated memory to store the decoded value in.
2617 	 */
2618 	str = ddi_prop_decode_alloc((size_t)size, ddi_prop_free_string);
2619 
2620 	ddi_prop_reset_pos(ph);
2621 
2622 	/*
2623 	 * Decode the str and place it in the space we just allocated
2624 	 */
2625 	tmp = str;
2626 	i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, tmp);
2627 	if (i < DDI_PROP_RESULT_OK) {
2628 		/*
2629 		 * Free the space we just allocated
2630 		 * and return an error.
2631 		 */
2632 		ddi_prop_free(str);
2633 		switch (i) {
2634 		case DDI_PROP_RESULT_EOF:
2635 			return (DDI_PROP_END_OF_DATA);
2636 
2637 		case DDI_PROP_RESULT_ERROR:
2638 			return (DDI_PROP_CANNOT_DECODE);
2639 		}
2640 	}
2641 
2642 	*(char **)data = str;
2643 	*nelements = 1;
2644 
2645 	return (DDI_PROP_SUCCESS);
2646 }
2647 
2648 /*
2649  * Decode an array of strings.
2650  */
2651 int
2652 ddi_prop_fm_decode_strings(prop_handle_t *ph, void *data, uint_t *nelements)
2653 {
2654 	int		cnt = 0;
2655 	char		**strs;
2656 	char		**tmp;
2657 	char		*ptr;
2658 	int		i;
2659 	int		n;
2660 	int		size;
2661 	size_t		nbytes;
2662 
2663 	/*
2664 	 * Figure out how many array elements there are by going through the
2665 	 * data without decoding it first and counting.
2666 	 */
2667 	for (;;) {
2668 		i = DDI_PROP_STR(ph, DDI_PROP_CMD_SKIP, NULL);
2669 		if (i < 0)
2670 			break;
2671 		cnt++;
2672 	}
2673 
2674 	/*
2675 	 * If there are no elements return an error
2676 	 */
2677 	if (cnt == 0)
2678 		return (DDI_PROP_END_OF_DATA);
2679 
2680 	/*
2681 	 * If we cannot skip through the data, we cannot decode it
2682 	 */
2683 	if (i == DDI_PROP_RESULT_ERROR)
2684 		return (DDI_PROP_CANNOT_DECODE);
2685 
2686 	/*
2687 	 * Reset the data pointer to the beginning of the encoded data
2688 	 */
2689 	ddi_prop_reset_pos(ph);
2690 
2691 	/*
2692 	 * Figure out how much memory we need for the sum total
2693 	 */
2694 	nbytes = (cnt + 1) * sizeof (char *);
2695 
2696 	for (n = 0; n < cnt; n++) {
2697 		/*
2698 		 * Get the decoded size of the current encoded string.
2699 		 */
2700 		size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2701 		if (size < DDI_PROP_RESULT_OK) {
2702 			switch (size) {
2703 			case DDI_PROP_RESULT_EOF:
2704 				return (DDI_PROP_END_OF_DATA);
2705 
2706 			case DDI_PROP_RESULT_ERROR:
2707 				return (DDI_PROP_CANNOT_DECODE);
2708 			}
2709 		}
2710 
2711 		nbytes += size;
2712 	}
2713 
2714 	/*
2715 	 * Allocate memory in which to store the decoded strings.
2716 	 */
2717 	strs = ddi_prop_decode_alloc(nbytes, ddi_prop_free_strings);
2718 
2719 	/*
2720 	 * Set up pointers for each string by figuring out yet
2721 	 * again how long each string is.
2722 	 */
2723 	ddi_prop_reset_pos(ph);
2724 	ptr = (caddr_t)strs + ((cnt + 1) * sizeof (char *));
2725 	for (tmp = strs, n = 0; n < cnt; n++, tmp++) {
2726 		/*
2727 		 * Get the decoded size of the current encoded string.
2728 		 */
2729 		size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_DSIZE, NULL);
2730 		if (size < DDI_PROP_RESULT_OK) {
2731 			ddi_prop_free(strs);
2732 			switch (size) {
2733 			case DDI_PROP_RESULT_EOF:
2734 				return (DDI_PROP_END_OF_DATA);
2735 
2736 			case DDI_PROP_RESULT_ERROR:
2737 				return (DDI_PROP_CANNOT_DECODE);
2738 			}
2739 		}
2740 
2741 		*tmp = ptr;
2742 		ptr += size;
2743 	}
2744 
2745 	/*
2746 	 * String array is terminated by a NULL
2747 	 */
2748 	*tmp = NULL;
2749 
2750 	/*
2751 	 * Finally, we can decode each string
2752 	 */
2753 	ddi_prop_reset_pos(ph);
2754 	for (tmp = strs, n = 0; n < cnt; n++, tmp++) {
2755 		i = DDI_PROP_STR(ph, DDI_PROP_CMD_DECODE, *tmp);
2756 		if (i < DDI_PROP_RESULT_OK) {
2757 			/*
2758 			 * Free the space we just allocated
2759 			 * and return an error
2760 			 */
2761 			ddi_prop_free(strs);
2762 			switch (i) {
2763 			case DDI_PROP_RESULT_EOF:
2764 				return (DDI_PROP_END_OF_DATA);
2765 
2766 			case DDI_PROP_RESULT_ERROR:
2767 				return (DDI_PROP_CANNOT_DECODE);
2768 			}
2769 		}
2770 	}
2771 
2772 	*(char ***)data = strs;
2773 	*nelements = cnt;
2774 
2775 	return (DDI_PROP_SUCCESS);
2776 }
2777 
2778 /*
2779  * Encode a string.
2780  */
2781 int
2782 ddi_prop_fm_encode_string(prop_handle_t *ph, void *data, uint_t nelements)
2783 {
2784 	char		**tmp;
2785 	int		size;
2786 	int		i;
2787 
2788 	/*
2789 	 * If there is no data, we cannot do anything
2790 	 */
2791 	if (nelements == 0)
2792 		return (DDI_PROP_CANNOT_ENCODE);
2793 
2794 	/*
2795 	 * Get the size of the encoded string.
2796 	 */
2797 	tmp = (char **)data;
2798 	size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp);
2799 	if (size < DDI_PROP_RESULT_OK) {
2800 		switch (size) {
2801 		case DDI_PROP_RESULT_EOF:
2802 			return (DDI_PROP_END_OF_DATA);
2803 
2804 		case DDI_PROP_RESULT_ERROR:
2805 			return (DDI_PROP_CANNOT_ENCODE);
2806 		}
2807 	}
2808 
2809 	/*
2810 	 * Allocate space in the handle to store the encoded string.
2811 	 */
2812 	if (ddi_prop_encode_alloc(ph, size) != DDI_PROP_SUCCESS)
2813 		return (DDI_PROP_NO_MEMORY);
2814 
2815 	ddi_prop_reset_pos(ph);
2816 
2817 	/*
2818 	 * Encode the string.
2819 	 */
2820 	tmp = (char **)data;
2821 	i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp);
2822 	if (i < DDI_PROP_RESULT_OK) {
2823 		switch (i) {
2824 		case DDI_PROP_RESULT_EOF:
2825 			return (DDI_PROP_END_OF_DATA);
2826 
2827 		case DDI_PROP_RESULT_ERROR:
2828 			return (DDI_PROP_CANNOT_ENCODE);
2829 		}
2830 	}
2831 
2832 	return (DDI_PROP_SUCCESS);
2833 }
2834 
2835 
2836 /*
2837  * Encode an array of strings.
2838  */
2839 int
2840 ddi_prop_fm_encode_strings(prop_handle_t *ph, void *data, uint_t nelements)
2841 {
2842 	int		cnt = 0;
2843 	char		**tmp;
2844 	int		size;
2845 	uint_t		total_size;
2846 	int		i;
2847 
2848 	/*
2849 	 * If there is no data, we cannot do anything
2850 	 */
2851 	if (nelements == 0)
2852 		return (DDI_PROP_CANNOT_ENCODE);
2853 
2854 	/*
2855 	 * Get the total size required to encode all the strings.
2856 	 */
2857 	total_size = 0;
2858 	tmp = (char **)data;
2859 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2860 		size = DDI_PROP_STR(ph, DDI_PROP_CMD_GET_ESIZE, *tmp);
2861 		if (size < DDI_PROP_RESULT_OK) {
2862 			switch (size) {
2863 			case DDI_PROP_RESULT_EOF:
2864 				return (DDI_PROP_END_OF_DATA);
2865 
2866 			case DDI_PROP_RESULT_ERROR:
2867 				return (DDI_PROP_CANNOT_ENCODE);
2868 			}
2869 		}
2870 		total_size += (uint_t)size;
2871 	}
2872 
2873 	/*
2874 	 * Allocate space in the handle to store the encoded strings.
2875 	 */
2876 	if (ddi_prop_encode_alloc(ph, total_size) != DDI_PROP_SUCCESS)
2877 		return (DDI_PROP_NO_MEMORY);
2878 
2879 	ddi_prop_reset_pos(ph);
2880 
2881 	/*
2882 	 * Encode the array of strings.
2883 	 */
2884 	tmp = (char **)data;
2885 	for (cnt = 0; cnt < nelements; cnt++, tmp++) {
2886 		i = DDI_PROP_STR(ph, DDI_PROP_CMD_ENCODE, *tmp);
2887 		if (i < DDI_PROP_RESULT_OK) {
2888 			switch (i) {
2889 			case DDI_PROP_RESULT_EOF:
2890 				return (DDI_PROP_END_OF_DATA);
2891 
2892 			case DDI_PROP_RESULT_ERROR:
2893 				return (DDI_PROP_CANNOT_ENCODE);
2894 			}
2895 		}
2896 	}
2897 
2898 	return (DDI_PROP_SUCCESS);
2899 }
2900 
2901 
2902 /*
2903  * Decode an array of bytes.
2904  */
2905 static int
2906 ddi_prop_fm_decode_bytes(prop_handle_t *ph, void *data, uint_t *nelements)
2907 {
2908 	uchar_t		*tmp;
2909 	int		nbytes;
2910 	int		i;
2911 
2912 	/*
2913 	 * If there are no elements return an error
2914 	 */
2915 	if (ph->ph_size == 0)
2916 		return (DDI_PROP_END_OF_DATA);
2917 
2918 	/*
2919 	 * Get the size of the encoded array of bytes.
2920 	 */
2921 	nbytes = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_DSIZE,
2922 	    data, ph->ph_size);
2923 	if (nbytes < DDI_PROP_RESULT_OK) {
2924 		switch (nbytes) {
2925 		case DDI_PROP_RESULT_EOF:
2926 			return (DDI_PROP_END_OF_DATA);
2927 
2928 		case DDI_PROP_RESULT_ERROR:
2929 			return (DDI_PROP_CANNOT_DECODE);
2930 		}
2931 	}
2932 
2933 	/*
2934 	 * Allocated memory to store the decoded value in.
2935 	 */
2936 	tmp = ddi_prop_decode_alloc(nbytes, ddi_prop_free_bytes);
2937 
2938 	/*
2939 	 * Decode each element and place it in the space we just allocated
2940 	 */
2941 	i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_DECODE, tmp, nbytes);
2942 	if (i < DDI_PROP_RESULT_OK) {
2943 		/*
2944 		 * Free the space we just allocated
2945 		 * and return an error
2946 		 */
2947 		ddi_prop_free(tmp);
2948 		switch (i) {
2949 		case DDI_PROP_RESULT_EOF:
2950 			return (DDI_PROP_END_OF_DATA);
2951 
2952 		case DDI_PROP_RESULT_ERROR:
2953 			return (DDI_PROP_CANNOT_DECODE);
2954 		}
2955 	}
2956 
2957 	*(uchar_t **)data = tmp;
2958 	*nelements = nbytes;
2959 
2960 	return (DDI_PROP_SUCCESS);
2961 }
2962 
2963 /*
2964  * Encode an array of bytes.
2965  */
2966 int
2967 ddi_prop_fm_encode_bytes(prop_handle_t *ph, void *data, uint_t nelements)
2968 {
2969 	int		size;
2970 	int		i;
2971 
2972 	/*
2973 	 * If there are no elements, then this is a boolean property,
2974 	 * so just create a property handle with no data and return.
2975 	 */
2976 	if (nelements == 0) {
2977 		(void) ddi_prop_encode_alloc(ph, 0);
2978 		return (DDI_PROP_SUCCESS);
2979 	}
2980 
2981 	/*
2982 	 * Get the size of the encoded array of bytes.
2983 	 */
2984 	size = DDI_PROP_BYTES(ph, DDI_PROP_CMD_GET_ESIZE, (uchar_t *)data,
2985 	    nelements);
2986 	if (size < DDI_PROP_RESULT_OK) {
2987 		switch (size) {
2988 		case DDI_PROP_RESULT_EOF:
2989 			return (DDI_PROP_END_OF_DATA);
2990 
2991 		case DDI_PROP_RESULT_ERROR:
2992 			return (DDI_PROP_CANNOT_DECODE);
2993 		}
2994 	}
2995 
2996 	/*
2997 	 * Allocate space in the handle to store the encoded bytes.
2998 	 */
2999 	if (ddi_prop_encode_alloc(ph, (uint_t)size) != DDI_PROP_SUCCESS)
3000 		return (DDI_PROP_NO_MEMORY);
3001 
3002 	/*
3003 	 * Encode the array of bytes.
3004 	 */
3005 	i = DDI_PROP_BYTES(ph, DDI_PROP_CMD_ENCODE, (uchar_t *)data,
3006 	    nelements);
3007 	if (i < DDI_PROP_RESULT_OK) {
3008 		switch (i) {
3009 		case DDI_PROP_RESULT_EOF:
3010 			return (DDI_PROP_END_OF_DATA);
3011 
3012 		case DDI_PROP_RESULT_ERROR:
3013 			return (DDI_PROP_CANNOT_ENCODE);
3014 		}
3015 	}
3016 
3017 	return (DDI_PROP_SUCCESS);
3018 }
3019 
3020 /*
3021  * OBP 1275 integer, string and byte operators.
3022  *
3023  * DDI_PROP_CMD_DECODE:
3024  *
3025  *	DDI_PROP_RESULT_ERROR:		cannot decode the data
3026  *	DDI_PROP_RESULT_EOF:		end of data
3027  *	DDI_PROP_OK:			data was decoded
3028  *
3029  * DDI_PROP_CMD_ENCODE:
3030  *
3031  *	DDI_PROP_RESULT_ERROR:		cannot encode the data
3032  *	DDI_PROP_RESULT_EOF:		end of data
3033  *	DDI_PROP_OK:			data was encoded
3034  *
3035  * DDI_PROP_CMD_SKIP:
3036  *
3037  *	DDI_PROP_RESULT_ERROR:		cannot skip the data
3038  *	DDI_PROP_RESULT_EOF:		end of data
3039  *	DDI_PROP_OK:			data was skipped
3040  *
3041  * DDI_PROP_CMD_GET_ESIZE:
3042  *
3043  *	DDI_PROP_RESULT_ERROR:		cannot get encoded size
3044  *	DDI_PROP_RESULT_EOF:		end of data
3045  *	> 0:				the encoded size
3046  *
3047  * DDI_PROP_CMD_GET_DSIZE:
3048  *
3049  *	DDI_PROP_RESULT_ERROR:		cannot get decoded size
3050  *	DDI_PROP_RESULT_EOF:		end of data
3051  *	> 0:				the decoded size
3052  */
3053 
3054 /*
3055  * OBP 1275 integer operator
3056  *
3057  * OBP properties are a byte stream of data, so integers may not be
3058  * properly aligned.  Therefore we need to copy them one byte at a time.
3059  */
3060 int
3061 ddi_prop_1275_int(prop_handle_t *ph, uint_t cmd, int *data)
3062 {
3063 	int	i;
3064 
3065 	switch (cmd) {
3066 	case DDI_PROP_CMD_DECODE:
3067 		/*
3068 		 * Check that there is encoded data
3069 		 */
3070 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0)
3071 			return (DDI_PROP_RESULT_ERROR);
3072 		if (ph->ph_flags & PH_FROM_PROM) {
3073 			i = MIN(ph->ph_size, PROP_1275_INT_SIZE);
3074 			if ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
3075 			    ph->ph_size - i))
3076 				return (DDI_PROP_RESULT_ERROR);
3077 		} else {
3078 			if (ph->ph_size < sizeof (int) ||
3079 			    ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
3080 			    ph->ph_size - sizeof (int))))
3081 				return (DDI_PROP_RESULT_ERROR);
3082 		}
3083 
3084 		/*
3085 		 * Copy the integer, using the implementation-specific
3086 		 * copy function if the property is coming from the PROM.
3087 		 */
3088 		if (ph->ph_flags & PH_FROM_PROM) {
3089 			*data = impl_ddi_prop_int_from_prom(
3090 			    (uchar_t *)ph->ph_cur_pos,
3091 			    (ph->ph_size < PROP_1275_INT_SIZE) ?
3092 			    ph->ph_size : PROP_1275_INT_SIZE);
3093 		} else {
3094 			bcopy(ph->ph_cur_pos, data, sizeof (int));
3095 		}
3096 
3097 		/*
3098 		 * Move the current location to the start of the next
3099 		 * bit of undecoded data.
3100 		 */
3101 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
3102 		    PROP_1275_INT_SIZE;
3103 		return (DDI_PROP_RESULT_OK);
3104 
3105 	case DDI_PROP_CMD_ENCODE:
3106 		/*
3107 		 * Check that there is room to encoded the data
3108 		 */
3109 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3110 		    ph->ph_size < PROP_1275_INT_SIZE ||
3111 		    ((int *)ph->ph_cur_pos > ((int *)ph->ph_data +
3112 		    ph->ph_size - sizeof (int))))
3113 			return (DDI_PROP_RESULT_ERROR);
3114 
3115 		/*
3116 		 * Encode the integer into the byte stream one byte at a
3117 		 * time.
3118 		 */
3119 		bcopy(data, ph->ph_cur_pos, sizeof (int));
3120 
3121 		/*
3122 		 * Move the current location to the start of the next bit of
3123 		 * space where we can store encoded data.
3124 		 */
3125 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE;
3126 		return (DDI_PROP_RESULT_OK);
3127 
3128 	case DDI_PROP_CMD_SKIP:
3129 		/*
3130 		 * Check that there is encoded data
3131 		 */
3132 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3133 		    ph->ph_size < PROP_1275_INT_SIZE)
3134 			return (DDI_PROP_RESULT_ERROR);
3135 
3136 
3137 		if ((caddr_t)ph->ph_cur_pos ==
3138 		    (caddr_t)ph->ph_data + ph->ph_size) {
3139 			return (DDI_PROP_RESULT_EOF);
3140 		} else if ((caddr_t)ph->ph_cur_pos >
3141 		    (caddr_t)ph->ph_data + ph->ph_size) {
3142 			return (DDI_PROP_RESULT_EOF);
3143 		}
3144 
3145 		/*
3146 		 * Move the current location to the start of the next bit of
3147 		 * undecoded data.
3148 		 */
3149 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos + PROP_1275_INT_SIZE;
3150 		return (DDI_PROP_RESULT_OK);
3151 
3152 	case DDI_PROP_CMD_GET_ESIZE:
3153 		/*
3154 		 * Return the size of an encoded integer on OBP
3155 		 */
3156 		return (PROP_1275_INT_SIZE);
3157 
3158 	case DDI_PROP_CMD_GET_DSIZE:
3159 		/*
3160 		 * Return the size of a decoded integer on the system.
3161 		 */
3162 		return (sizeof (int));
3163 
3164 	default:
3165 #ifdef DEBUG
3166 		panic("ddi_prop_1275_int: %x impossible", cmd);
3167 		/*NOTREACHED*/
3168 #else
3169 		return (DDI_PROP_RESULT_ERROR);
3170 #endif	/* DEBUG */
3171 	}
3172 }
3173 
3174 /*
3175  * 64 bit integer operator.
3176  *
3177  * This is an extension, defined by Sun, to the 1275 integer
3178  * operator.  This routine handles the encoding/decoding of
3179  * 64 bit integer properties.
3180  */
3181 int
3182 ddi_prop_int64_op(prop_handle_t *ph, uint_t cmd, int64_t *data)
3183 {
3184 
3185 	switch (cmd) {
3186 	case DDI_PROP_CMD_DECODE:
3187 		/*
3188 		 * Check that there is encoded data
3189 		 */
3190 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0)
3191 			return (DDI_PROP_RESULT_ERROR);
3192 		if (ph->ph_flags & PH_FROM_PROM) {
3193 			return (DDI_PROP_RESULT_ERROR);
3194 		} else {
3195 			if (ph->ph_size < sizeof (int64_t) ||
3196 			    ((int64_t *)ph->ph_cur_pos >
3197 			    ((int64_t *)ph->ph_data +
3198 			    ph->ph_size - sizeof (int64_t))))
3199 				return (DDI_PROP_RESULT_ERROR);
3200 		}
3201 		/*
3202 		 * Copy the integer, using the implementation-specific
3203 		 * copy function if the property is coming from the PROM.
3204 		 */
3205 		if (ph->ph_flags & PH_FROM_PROM) {
3206 			return (DDI_PROP_RESULT_ERROR);
3207 		} else {
3208 			bcopy(ph->ph_cur_pos, data, sizeof (int64_t));
3209 		}
3210 
3211 		/*
3212 		 * Move the current location to the start of the next
3213 		 * bit of undecoded data.
3214 		 */
3215 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
3216 		    sizeof (int64_t);
3217 			return (DDI_PROP_RESULT_OK);
3218 
3219 	case DDI_PROP_CMD_ENCODE:
3220 		/*
3221 		 * Check that there is room to encoded the data
3222 		 */
3223 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3224 		    ph->ph_size < sizeof (int64_t) ||
3225 		    ((int64_t *)ph->ph_cur_pos > ((int64_t *)ph->ph_data +
3226 		    ph->ph_size - sizeof (int64_t))))
3227 			return (DDI_PROP_RESULT_ERROR);
3228 
3229 		/*
3230 		 * Encode the integer into the byte stream one byte at a
3231 		 * time.
3232 		 */
3233 		bcopy(data, ph->ph_cur_pos, sizeof (int64_t));
3234 
3235 		/*
3236 		 * Move the current location to the start of the next bit of
3237 		 * space where we can store encoded data.
3238 		 */
3239 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
3240 		    sizeof (int64_t);
3241 		return (DDI_PROP_RESULT_OK);
3242 
3243 	case DDI_PROP_CMD_SKIP:
3244 		/*
3245 		 * Check that there is encoded data
3246 		 */
3247 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3248 		    ph->ph_size < sizeof (int64_t))
3249 			return (DDI_PROP_RESULT_ERROR);
3250 
3251 		if ((caddr_t)ph->ph_cur_pos ==
3252 		    (caddr_t)ph->ph_data + ph->ph_size) {
3253 			return (DDI_PROP_RESULT_EOF);
3254 		} else if ((caddr_t)ph->ph_cur_pos >
3255 		    (caddr_t)ph->ph_data + ph->ph_size) {
3256 			return (DDI_PROP_RESULT_EOF);
3257 		}
3258 
3259 		/*
3260 		 * Move the current location to the start of
3261 		 * the next bit of undecoded data.
3262 		 */
3263 		ph->ph_cur_pos = (uchar_t *)ph->ph_cur_pos +
3264 		    sizeof (int64_t);
3265 			return (DDI_PROP_RESULT_OK);
3266 
3267 	case DDI_PROP_CMD_GET_ESIZE:
3268 		/*
3269 		 * Return the size of an encoded integer on OBP
3270 		 */
3271 		return (sizeof (int64_t));
3272 
3273 	case DDI_PROP_CMD_GET_DSIZE:
3274 		/*
3275 		 * Return the size of a decoded integer on the system.
3276 		 */
3277 		return (sizeof (int64_t));
3278 
3279 	default:
3280 #ifdef DEBUG
3281 		panic("ddi_prop_int64_op: %x impossible", cmd);
3282 		/*NOTREACHED*/
3283 #else
3284 		return (DDI_PROP_RESULT_ERROR);
3285 #endif  /* DEBUG */
3286 	}
3287 }
3288 
3289 /*
3290  * OBP 1275 string operator.
3291  *
3292  * OBP strings are NULL terminated.
3293  */
3294 int
3295 ddi_prop_1275_string(prop_handle_t *ph, uint_t cmd, char *data)
3296 {
3297 	int	n;
3298 	char	*p;
3299 	char	*end;
3300 
3301 	switch (cmd) {
3302 	case DDI_PROP_CMD_DECODE:
3303 		/*
3304 		 * Check that there is encoded data
3305 		 */
3306 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
3307 			return (DDI_PROP_RESULT_ERROR);
3308 		}
3309 
3310 		/*
3311 		 * Match DDI_PROP_CMD_GET_DSIZE logic for when to stop and
3312 		 * how to NULL terminate result.
3313 		 */
3314 		p = (char *)ph->ph_cur_pos;
3315 		end = (char *)ph->ph_data + ph->ph_size;
3316 		if (p >= end)
3317 			return (DDI_PROP_RESULT_EOF);
3318 
3319 		while (p < end) {
3320 			*data++ = *p;
3321 			if (*p++ == 0) {	/* NULL from OBP */
3322 				ph->ph_cur_pos = p;
3323 				return (DDI_PROP_RESULT_OK);
3324 			}
3325 		}
3326 
3327 		/*
3328 		 * If OBP did not NULL terminate string, which happens
3329 		 * (at least) for 'true'/'false' boolean values, account for
3330 		 * the space and store null termination on decode.
3331 		 */
3332 		ph->ph_cur_pos = p;
3333 		*data = 0;
3334 		return (DDI_PROP_RESULT_OK);
3335 
3336 	case DDI_PROP_CMD_ENCODE:
3337 		/*
3338 		 * Check that there is room to encoded the data
3339 		 */
3340 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
3341 			return (DDI_PROP_RESULT_ERROR);
3342 		}
3343 
3344 		n = strlen(data) + 1;
3345 		if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3346 		    ph->ph_size - n)) {
3347 			return (DDI_PROP_RESULT_ERROR);
3348 		}
3349 
3350 		/*
3351 		 * Copy the NULL terminated string
3352 		 */
3353 		bcopy(data, ph->ph_cur_pos, n);
3354 
3355 		/*
3356 		 * Move the current location to the start of the next bit of
3357 		 * space where we can store encoded data.
3358 		 */
3359 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + n;
3360 		return (DDI_PROP_RESULT_OK);
3361 
3362 	case DDI_PROP_CMD_SKIP:
3363 		/*
3364 		 * Check that there is encoded data
3365 		 */
3366 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0) {
3367 			return (DDI_PROP_RESULT_ERROR);
3368 		}
3369 
3370 		/*
3371 		 * Return the string length plus one for the NULL
3372 		 * We know the size of the property, we need to
3373 		 * ensure that the string is properly formatted,
3374 		 * since we may be looking up random OBP data.
3375 		 */
3376 		p = (char *)ph->ph_cur_pos;
3377 		end = (char *)ph->ph_data + ph->ph_size;
3378 		if (p >= end)
3379 			return (DDI_PROP_RESULT_EOF);
3380 
3381 		while (p < end) {
3382 			if (*p++ == 0) {	/* NULL from OBP */
3383 				ph->ph_cur_pos = p;
3384 				return (DDI_PROP_RESULT_OK);
3385 			}
3386 		}
3387 
3388 		/*
3389 		 * Accommodate the fact that OBP does not always NULL
3390 		 * terminate strings.
3391 		 */
3392 		ph->ph_cur_pos = p;
3393 		return (DDI_PROP_RESULT_OK);
3394 
3395 	case DDI_PROP_CMD_GET_ESIZE:
3396 		/*
3397 		 * Return the size of the encoded string on OBP.
3398 		 */
3399 		return (strlen(data) + 1);
3400 
3401 	case DDI_PROP_CMD_GET_DSIZE:
3402 		/*
3403 		 * Return the string length plus one for the NULL.
3404 		 * We know the size of the property, we need to
3405 		 * ensure that the string is properly formatted,
3406 		 * since we may be looking up random OBP data.
3407 		 */
3408 		p = (char *)ph->ph_cur_pos;
3409 		end = (char *)ph->ph_data + ph->ph_size;
3410 		if (p >= end)
3411 			return (DDI_PROP_RESULT_EOF);
3412 
3413 		for (n = 0; p < end; n++) {
3414 			if (*p++ == 0) {	/* NULL from OBP */
3415 				ph->ph_cur_pos = p;
3416 				return (n + 1);
3417 			}
3418 		}
3419 
3420 		/*
3421 		 * If OBP did not NULL terminate string, which happens for
3422 		 * 'true'/'false' boolean values, account for the space
3423 		 * to store null termination here.
3424 		 */
3425 		ph->ph_cur_pos = p;
3426 		return (n + 1);
3427 
3428 	default:
3429 #ifdef DEBUG
3430 		panic("ddi_prop_1275_string: %x impossible", cmd);
3431 		/*NOTREACHED*/
3432 #else
3433 		return (DDI_PROP_RESULT_ERROR);
3434 #endif	/* DEBUG */
3435 	}
3436 }
3437 
3438 /*
3439  * OBP 1275 byte operator
3440  *
3441  * Caller must specify the number of bytes to get.  OBP encodes bytes
3442  * as a byte so there is a 1-to-1 translation.
3443  */
3444 int
3445 ddi_prop_1275_bytes(prop_handle_t *ph, uint_t cmd, uchar_t *data,
3446 	uint_t nelements)
3447 {
3448 	switch (cmd) {
3449 	case DDI_PROP_CMD_DECODE:
3450 		/*
3451 		 * Check that there is encoded data
3452 		 */
3453 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3454 		    ph->ph_size < nelements ||
3455 		    ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3456 		    ph->ph_size - nelements)))
3457 			return (DDI_PROP_RESULT_ERROR);
3458 
3459 		/*
3460 		 * Copy out the bytes
3461 		 */
3462 		bcopy(ph->ph_cur_pos, data, nelements);
3463 
3464 		/*
3465 		 * Move the current location
3466 		 */
3467 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3468 		return (DDI_PROP_RESULT_OK);
3469 
3470 	case DDI_PROP_CMD_ENCODE:
3471 		/*
3472 		 * Check that there is room to encode the data
3473 		 */
3474 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3475 		    ph->ph_size < nelements ||
3476 		    ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3477 		    ph->ph_size - nelements)))
3478 			return (DDI_PROP_RESULT_ERROR);
3479 
3480 		/*
3481 		 * Copy in the bytes
3482 		 */
3483 		bcopy(data, ph->ph_cur_pos, nelements);
3484 
3485 		/*
3486 		 * Move the current location to the start of the next bit of
3487 		 * space where we can store encoded data.
3488 		 */
3489 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3490 		return (DDI_PROP_RESULT_OK);
3491 
3492 	case DDI_PROP_CMD_SKIP:
3493 		/*
3494 		 * Check that there is encoded data
3495 		 */
3496 		if (ph->ph_cur_pos == NULL || ph->ph_size == 0 ||
3497 		    ph->ph_size < nelements)
3498 			return (DDI_PROP_RESULT_ERROR);
3499 
3500 		if ((char *)ph->ph_cur_pos > ((char *)ph->ph_data +
3501 		    ph->ph_size - nelements))
3502 			return (DDI_PROP_RESULT_EOF);
3503 
3504 		/*
3505 		 * Move the current location
3506 		 */
3507 		ph->ph_cur_pos = (char *)ph->ph_cur_pos + nelements;
3508 		return (DDI_PROP_RESULT_OK);
3509 
3510 	case DDI_PROP_CMD_GET_ESIZE:
3511 		/*
3512 		 * The size in bytes of the encoded size is the
3513 		 * same as the decoded size provided by the caller.
3514 		 */
3515 		return (nelements);
3516 
3517 	case DDI_PROP_CMD_GET_DSIZE:
3518 		/*
3519 		 * Just return the number of bytes specified by the caller.
3520 		 */
3521 		return (nelements);
3522 
3523 	default:
3524 #ifdef DEBUG
3525 		panic("ddi_prop_1275_bytes: %x impossible", cmd);
3526 		/*NOTREACHED*/
3527 #else
3528 		return (DDI_PROP_RESULT_ERROR);
3529 #endif	/* DEBUG */
3530 	}
3531 }
3532 
3533 /*
3534  * Used for properties that come from the OBP, hardware configuration files,
3535  * or that are created by calls to ddi_prop_update(9F).
3536  */
3537 static struct prop_handle_ops prop_1275_ops = {
3538 	ddi_prop_1275_int,
3539 	ddi_prop_1275_string,
3540 	ddi_prop_1275_bytes,
3541 	ddi_prop_int64_op
3542 };
3543 
3544 
3545 /*
3546  * Interface to create/modify a managed property on child's behalf...
3547  * Flags interpreted are:
3548  *	DDI_PROP_CANSLEEP:	Allow memory allocation to sleep.
3549  *	DDI_PROP_SYSTEM_DEF:	Manipulate system list rather than driver list.
3550  *
3551  * Use same dev_t when modifying or undefining a property.
3552  * Search for properties with DDI_DEV_T_ANY to match first named
3553  * property on the list.
3554  *
3555  * Properties are stored LIFO and subsequently will match the first
3556  * `matching' instance.
3557  */
3558 
3559 /*
3560  * ddi_prop_add:	Add a software defined property
3561  */
3562 
3563 /*
3564  * define to get a new ddi_prop_t.
3565  * km_flags are KM_SLEEP or KM_NOSLEEP.
3566  */
3567 
3568 #define	DDI_NEW_PROP_T(km_flags)	\
3569 	(kmem_zalloc(sizeof (ddi_prop_t), km_flags))
3570 
3571 static int
3572 ddi_prop_add(dev_t dev, dev_info_t *dip, int flags,
3573     char *name, caddr_t value, int length)
3574 {
3575 	ddi_prop_t	*new_propp, *propp;
3576 	ddi_prop_t	**list_head = &(DEVI(dip)->devi_drv_prop_ptr);
3577 	int		km_flags = KM_NOSLEEP;
3578 	int		name_buf_len;
3579 
3580 	/*
3581 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero return error.
3582 	 */
3583 
3584 	if (dev == DDI_DEV_T_ANY || name == (char *)0 || strlen(name) == 0)
3585 		return (DDI_PROP_INVAL_ARG);
3586 
3587 	if (flags & DDI_PROP_CANSLEEP)
3588 		km_flags = KM_SLEEP;
3589 
3590 	if (flags & DDI_PROP_SYSTEM_DEF)
3591 		list_head = &(DEVI(dip)->devi_sys_prop_ptr);
3592 	else if (flags & DDI_PROP_HW_DEF)
3593 		list_head = &(DEVI(dip)->devi_hw_prop_ptr);
3594 
3595 	if ((new_propp = DDI_NEW_PROP_T(km_flags)) == NULL)  {
3596 		cmn_err(CE_CONT, prop_no_mem_msg, name);
3597 		return (DDI_PROP_NO_MEMORY);
3598 	}
3599 
3600 	/*
3601 	 * If dev is major number 0, then we need to do a ddi_name_to_major
3602 	 * to get the real major number for the device.  This needs to be
3603 	 * done because some drivers need to call ddi_prop_create in their
3604 	 * attach routines but they don't have a dev.  By creating the dev
3605 	 * ourself if the major number is 0, drivers will not have to know what
3606 	 * their major number.	They can just create a dev with major number
3607 	 * 0 and pass it in.  For device 0, we will be doing a little extra
3608 	 * work by recreating the same dev that we already have, but its the
3609 	 * price you pay :-).
3610 	 *
3611 	 * This fixes bug #1098060.
3612 	 */
3613 	if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN) {
3614 		new_propp->prop_dev =
3615 		    makedevice(ddi_name_to_major(DEVI(dip)->devi_binding_name),
3616 		    getminor(dev));
3617 	} else
3618 		new_propp->prop_dev = dev;
3619 
3620 	/*
3621 	 * Allocate space for property name and copy it in...
3622 	 */
3623 
3624 	name_buf_len = strlen(name) + 1;
3625 	new_propp->prop_name = kmem_alloc(name_buf_len, km_flags);
3626 	if (new_propp->prop_name == 0)	{
3627 		kmem_free(new_propp, sizeof (ddi_prop_t));
3628 		cmn_err(CE_CONT, prop_no_mem_msg, name);
3629 		return (DDI_PROP_NO_MEMORY);
3630 	}
3631 	bcopy(name, new_propp->prop_name, name_buf_len);
3632 
3633 	/*
3634 	 * Set the property type
3635 	 */
3636 	new_propp->prop_flags = flags & DDI_PROP_TYPE_MASK;
3637 
3638 	/*
3639 	 * Set length and value ONLY if not an explicit property undefine:
3640 	 * NOTE: value and length are zero for explicit undefines.
3641 	 */
3642 
3643 	if (flags & DDI_PROP_UNDEF_IT) {
3644 		new_propp->prop_flags |= DDI_PROP_UNDEF_IT;
3645 	} else {
3646 		if ((new_propp->prop_len = length) != 0) {
3647 			new_propp->prop_val = kmem_alloc(length, km_flags);
3648 			if (new_propp->prop_val == 0)  {
3649 				kmem_free(new_propp->prop_name, name_buf_len);
3650 				kmem_free(new_propp, sizeof (ddi_prop_t));
3651 				cmn_err(CE_CONT, prop_no_mem_msg, name);
3652 				return (DDI_PROP_NO_MEMORY);
3653 			}
3654 			bcopy(value, new_propp->prop_val, length);
3655 		}
3656 	}
3657 
3658 	/*
3659 	 * Link property into beginning of list. (Properties are LIFO order.)
3660 	 */
3661 
3662 	mutex_enter(&(DEVI(dip)->devi_lock));
3663 	propp = *list_head;
3664 	new_propp->prop_next = propp;
3665 	*list_head = new_propp;
3666 	mutex_exit(&(DEVI(dip)->devi_lock));
3667 	return (DDI_PROP_SUCCESS);
3668 }
3669 
3670 
3671 /*
3672  * ddi_prop_change:	Modify a software managed property value
3673  *
3674  *			Set new length and value if found.
3675  *			returns DDI_PROP_INVAL_ARG if dev is DDI_DEV_T_ANY or
3676  *			input name is the NULL string.
3677  *			returns DDI_PROP_NO_MEMORY if unable to allocate memory
3678  *
3679  *			Note: an undef can be modified to be a define,
3680  *			(you can't go the other way.)
3681  */
3682 
3683 static int
3684 ddi_prop_change(dev_t dev, dev_info_t *dip, int flags,
3685     char *name, caddr_t value, int length)
3686 {
3687 	ddi_prop_t	*propp;
3688 	ddi_prop_t	**ppropp;
3689 	caddr_t		p = NULL;
3690 
3691 	if ((dev == DDI_DEV_T_ANY) || (name == NULL) || (strlen(name) == 0))
3692 		return (DDI_PROP_INVAL_ARG);
3693 
3694 	/*
3695 	 * Preallocate buffer, even if we don't need it...
3696 	 */
3697 	if (length != 0)  {
3698 		p = kmem_alloc(length, (flags & DDI_PROP_CANSLEEP) ?
3699 		    KM_SLEEP : KM_NOSLEEP);
3700 		if (p == NULL)	{
3701 			cmn_err(CE_CONT, prop_no_mem_msg, name);
3702 			return (DDI_PROP_NO_MEMORY);
3703 		}
3704 	}
3705 
3706 	/*
3707 	 * If the dev_t value contains DDI_MAJOR_T_UNKNOWN for the major
3708 	 * number, a real dev_t value should be created based upon the dip's
3709 	 * binding driver.  See ddi_prop_add...
3710 	 */
3711 	if (getmajor(dev) == DDI_MAJOR_T_UNKNOWN)
3712 		dev = makedevice(
3713 		    ddi_name_to_major(DEVI(dip)->devi_binding_name),
3714 		    getminor(dev));
3715 
3716 	/*
3717 	 * Check to see if the property exists.  If so we modify it.
3718 	 * Else we create it by calling ddi_prop_add().
3719 	 */
3720 	mutex_enter(&(DEVI(dip)->devi_lock));
3721 	ppropp = &DEVI(dip)->devi_drv_prop_ptr;
3722 	if (flags & DDI_PROP_SYSTEM_DEF)
3723 		ppropp = &DEVI(dip)->devi_sys_prop_ptr;
3724 	else if (flags & DDI_PROP_HW_DEF)
3725 		ppropp = &DEVI(dip)->devi_hw_prop_ptr;
3726 
3727 	if ((propp = i_ddi_prop_search(dev, name, flags, ppropp)) != NULL) {
3728 		/*
3729 		 * Need to reallocate buffer?  If so, do it
3730 		 * carefully (reuse same space if new prop
3731 		 * is same size and non-NULL sized).
3732 		 */
3733 		if (length != 0)
3734 			bcopy(value, p, length);
3735 
3736 		if (propp->prop_len != 0)
3737 			kmem_free(propp->prop_val, propp->prop_len);
3738 
3739 		propp->prop_len = length;
3740 		propp->prop_val = p;
3741 		propp->prop_flags &= ~DDI_PROP_UNDEF_IT;
3742 		mutex_exit(&(DEVI(dip)->devi_lock));
3743 		return (DDI_PROP_SUCCESS);
3744 	}
3745 
3746 	mutex_exit(&(DEVI(dip)->devi_lock));
3747 	if (length != 0)
3748 		kmem_free(p, length);
3749 
3750 	return (ddi_prop_add(dev, dip, flags, name, value, length));
3751 }
3752 
3753 /*
3754  * Common update routine used to update and encode a property.	Creates
3755  * a property handle, calls the property encode routine, figures out if
3756  * the property already exists and updates if it does.	Otherwise it
3757  * creates if it does not exist.
3758  */
3759 int
3760 ddi_prop_update_common(dev_t match_dev, dev_info_t *dip, int flags,
3761     char *name, void *data, uint_t nelements,
3762     int (*prop_create)(prop_handle_t *, void *data, uint_t nelements))
3763 {
3764 	prop_handle_t	ph;
3765 	int		rval;
3766 	uint_t		ourflags;
3767 
3768 	/*
3769 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3770 	 * return error.
3771 	 */
3772 	if (match_dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3773 		return (DDI_PROP_INVAL_ARG);
3774 
3775 	/*
3776 	 * Create the handle
3777 	 */
3778 	ph.ph_data = NULL;
3779 	ph.ph_cur_pos = NULL;
3780 	ph.ph_save_pos = NULL;
3781 	ph.ph_size = 0;
3782 	ph.ph_ops = &prop_1275_ops;
3783 
3784 	/*
3785 	 * ourflags:
3786 	 * For compatibility with the old interfaces.  The old interfaces
3787 	 * didn't sleep by default and slept when the flag was set.  These
3788 	 * interfaces to the opposite.	So the old interfaces now set the
3789 	 * DDI_PROP_DONTSLEEP flag by default which tells us not to sleep.
3790 	 *
3791 	 * ph.ph_flags:
3792 	 * Blocked data or unblocked data allocation
3793 	 * for ph.ph_data in ddi_prop_encode_alloc()
3794 	 */
3795 	if (flags & DDI_PROP_DONTSLEEP) {
3796 		ourflags = flags;
3797 		ph.ph_flags = DDI_PROP_DONTSLEEP;
3798 	} else {
3799 		ourflags = flags | DDI_PROP_CANSLEEP;
3800 		ph.ph_flags = DDI_PROP_CANSLEEP;
3801 	}
3802 
3803 	/*
3804 	 * Encode the data and store it in the property handle by
3805 	 * calling the prop_encode routine.
3806 	 */
3807 	if ((rval = (*prop_create)(&ph, data, nelements)) !=
3808 	    DDI_PROP_SUCCESS) {
3809 		if (rval == DDI_PROP_NO_MEMORY)
3810 			cmn_err(CE_CONT, prop_no_mem_msg, name);
3811 		if (ph.ph_size != 0)
3812 			kmem_free(ph.ph_data, ph.ph_size);
3813 		return (rval);
3814 	}
3815 
3816 	/*
3817 	 * The old interfaces use a stacking approach to creating
3818 	 * properties.	If we are being called from the old interfaces,
3819 	 * the DDI_PROP_STACK_CREATE flag will be set, so we just do a
3820 	 * create without checking.
3821 	 */
3822 	if (flags & DDI_PROP_STACK_CREATE) {
3823 		rval = ddi_prop_add(match_dev, dip,
3824 		    ourflags, name, ph.ph_data, ph.ph_size);
3825 	} else {
3826 		rval = ddi_prop_change(match_dev, dip,
3827 		    ourflags, name, ph.ph_data, ph.ph_size);
3828 	}
3829 
3830 	/*
3831 	 * Free the encoded data allocated in the prop_encode routine.
3832 	 */
3833 	if (ph.ph_size != 0)
3834 		kmem_free(ph.ph_data, ph.ph_size);
3835 
3836 	return (rval);
3837 }
3838 
3839 
3840 /*
3841  * ddi_prop_create:	Define a managed property:
3842  *			See above for details.
3843  */
3844 
3845 int
3846 ddi_prop_create(dev_t dev, dev_info_t *dip, int flag,
3847     char *name, caddr_t value, int length)
3848 {
3849 	if (!(flag & DDI_PROP_CANSLEEP)) {
3850 		flag |= DDI_PROP_DONTSLEEP;
3851 #ifdef DDI_PROP_DEBUG
3852 		if (length != 0)
3853 			cmn_err(CE_NOTE, "!ddi_prop_create: interface obsolete,"
3854 			    "use ddi_prop_update (prop = %s, node = %s%d)",
3855 			    name, ddi_driver_name(dip), ddi_get_instance(dip));
3856 #endif /* DDI_PROP_DEBUG */
3857 	}
3858 	flag &= ~DDI_PROP_SYSTEM_DEF;
3859 	flag |= DDI_PROP_STACK_CREATE | DDI_PROP_TYPE_ANY;
3860 	return (ddi_prop_update_common(dev, dip, flag, name,
3861 	    value, length, ddi_prop_fm_encode_bytes));
3862 }
3863 
3864 int
3865 e_ddi_prop_create(dev_t dev, dev_info_t *dip, int flag,
3866     char *name, caddr_t value, int length)
3867 {
3868 	if (!(flag & DDI_PROP_CANSLEEP))
3869 		flag |= DDI_PROP_DONTSLEEP;
3870 	flag |= DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE | DDI_PROP_TYPE_ANY;
3871 	return (ddi_prop_update_common(dev, dip, flag,
3872 	    name, value, length, ddi_prop_fm_encode_bytes));
3873 }
3874 
3875 int
3876 ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag,
3877     char *name, caddr_t value, int length)
3878 {
3879 	ASSERT((flag & DDI_PROP_TYPE_MASK) == 0);
3880 
3881 	/*
3882 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3883 	 * return error.
3884 	 */
3885 	if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3886 		return (DDI_PROP_INVAL_ARG);
3887 
3888 	if (!(flag & DDI_PROP_CANSLEEP))
3889 		flag |= DDI_PROP_DONTSLEEP;
3890 	flag &= ~DDI_PROP_SYSTEM_DEF;
3891 	if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_NOTPROM), name) == 0)
3892 		return (DDI_PROP_NOT_FOUND);
3893 
3894 	return (ddi_prop_update_common(dev, dip,
3895 	    (flag | DDI_PROP_TYPE_BYTE), name,
3896 	    value, length, ddi_prop_fm_encode_bytes));
3897 }
3898 
3899 int
3900 e_ddi_prop_modify(dev_t dev, dev_info_t *dip, int flag,
3901     char *name, caddr_t value, int length)
3902 {
3903 	ASSERT((flag & DDI_PROP_TYPE_MASK) == 0);
3904 
3905 	/*
3906 	 * If dev_t is DDI_DEV_T_ANY or name's length is zero,
3907 	 * return error.
3908 	 */
3909 	if (dev == DDI_DEV_T_ANY || name == NULL || strlen(name) == 0)
3910 		return (DDI_PROP_INVAL_ARG);
3911 
3912 	if (ddi_prop_exists(dev, dip, (flag | DDI_PROP_SYSTEM_DEF), name) == 0)
3913 		return (DDI_PROP_NOT_FOUND);
3914 
3915 	if (!(flag & DDI_PROP_CANSLEEP))
3916 		flag |= DDI_PROP_DONTSLEEP;
3917 	return (ddi_prop_update_common(dev, dip,
3918 	    (flag | DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE),
3919 	    name, value, length, ddi_prop_fm_encode_bytes));
3920 }
3921 
3922 
3923 /*
3924  * Common lookup routine used to lookup and decode a property.
3925  * Creates a property handle, searches for the raw encoded data,
3926  * fills in the handle, and calls the property decode functions
3927  * passed in.
3928  *
3929  * This routine is not static because ddi_bus_prop_op() which lives in
3930  * ddi_impl.c calls it.  No driver should be calling this routine.
3931  */
3932 int
3933 ddi_prop_lookup_common(dev_t match_dev, dev_info_t *dip,
3934     uint_t flags, char *name, void *data, uint_t *nelements,
3935     int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements))
3936 {
3937 	int		rval;
3938 	uint_t		ourflags;
3939 	prop_handle_t	ph;
3940 
3941 	if ((match_dev == DDI_DEV_T_NONE) ||
3942 	    (name == NULL) || (strlen(name) == 0))
3943 		return (DDI_PROP_INVAL_ARG);
3944 
3945 	ourflags = (flags & DDI_PROP_DONTSLEEP) ? flags :
3946 	    flags | DDI_PROP_CANSLEEP;
3947 
3948 	/*
3949 	 * Get the encoded data
3950 	 */
3951 	bzero(&ph, sizeof (prop_handle_t));
3952 
3953 	if ((flags & DDI_UNBND_DLPI2) || (flags & DDI_PROP_ROOTNEX_GLOBAL)) {
3954 		/*
3955 		 * For rootnex and unbound dlpi style-2 devices, index into
3956 		 * the devnames' array and search the global
3957 		 * property list.
3958 		 */
3959 		ourflags &= ~DDI_UNBND_DLPI2;
3960 		rval = i_ddi_prop_search_global(match_dev,
3961 		    ourflags, name, &ph.ph_data, &ph.ph_size);
3962 	} else {
3963 		rval = ddi_prop_search_common(match_dev, dip,
3964 		    PROP_LEN_AND_VAL_ALLOC, ourflags, name,
3965 		    &ph.ph_data, &ph.ph_size);
3966 
3967 	}
3968 
3969 	if (rval != DDI_PROP_SUCCESS && rval != DDI_PROP_FOUND_1275) {
3970 		ASSERT(ph.ph_data == NULL);
3971 		ASSERT(ph.ph_size == 0);
3972 		return (rval);
3973 	}
3974 
3975 	/*
3976 	 * If the encoded data came from a OBP or software
3977 	 * use the 1275 OBP decode/encode routines.
3978 	 */
3979 	ph.ph_cur_pos = ph.ph_data;
3980 	ph.ph_save_pos = ph.ph_data;
3981 	ph.ph_ops = &prop_1275_ops;
3982 	ph.ph_flags = (rval == DDI_PROP_FOUND_1275) ? PH_FROM_PROM : 0;
3983 
3984 	rval = (*prop_decoder)(&ph, data, nelements);
3985 
3986 	/*
3987 	 * Free the encoded data
3988 	 */
3989 	if (ph.ph_size != 0)
3990 		kmem_free(ph.ph_data, ph.ph_size);
3991 
3992 	return (rval);
3993 }
3994 
3995 /*
3996  * Lookup and return an array of composite properties.  The driver must
3997  * provide the decode routine.
3998  */
3999 int
4000 ddi_prop_lookup(dev_t match_dev, dev_info_t *dip,
4001     uint_t flags, char *name, void *data, uint_t *nelements,
4002     int (*prop_decoder)(prop_handle_t *, void *data, uint_t *nelements))
4003 {
4004 	return (ddi_prop_lookup_common(match_dev, dip,
4005 	    (flags | DDI_PROP_TYPE_COMPOSITE), name,
4006 	    data, nelements, prop_decoder));
4007 }
4008 
4009 /*
4010  * Return 1 if a property exists (no type checking done).
4011  * Return 0 if it does not exist.
4012  */
4013 int
4014 ddi_prop_exists(dev_t match_dev, dev_info_t *dip, uint_t flags, char *name)
4015 {
4016 	int	i;
4017 	uint_t	x = 0;
4018 
4019 	i = ddi_prop_search_common(match_dev, dip, PROP_EXISTS,
4020 	    flags | DDI_PROP_TYPE_MASK, name, NULL, &x);
4021 	return (i == DDI_PROP_SUCCESS || i == DDI_PROP_FOUND_1275);
4022 }
4023 
4024 
4025 /*
4026  * Update an array of composite properties.  The driver must
4027  * provide the encode routine.
4028  */
4029 int
4030 ddi_prop_update(dev_t match_dev, dev_info_t *dip,
4031     char *name, void *data, uint_t nelements,
4032     int (*prop_create)(prop_handle_t *, void *data, uint_t nelements))
4033 {
4034 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_COMPOSITE,
4035 	    name, data, nelements, prop_create));
4036 }
4037 
4038 /*
4039  * Get a single integer or boolean property and return it.
4040  * If the property does not exists, or cannot be decoded,
4041  * then return the defvalue passed in.
4042  *
4043  * This routine always succeeds.
4044  */
4045 int
4046 ddi_prop_get_int(dev_t match_dev, dev_info_t *dip, uint_t flags,
4047     char *name, int defvalue)
4048 {
4049 	int	data;
4050 	uint_t	nelements;
4051 	int	rval;
4052 
4053 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4054 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
4055 #ifdef DEBUG
4056 		if (dip != NULL) {
4057 			cmn_err(CE_WARN, "ddi_prop_get_int: invalid flag"
4058 			    " 0x%x (prop = %s, node = %s%d)", flags,
4059 			    name, ddi_driver_name(dip), ddi_get_instance(dip));
4060 		}
4061 #endif /* DEBUG */
4062 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4063 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4064 	}
4065 
4066 	if ((rval = ddi_prop_lookup_common(match_dev, dip,
4067 	    (flags | DDI_PROP_TYPE_INT), name, &data, &nelements,
4068 	    ddi_prop_fm_decode_int)) != DDI_PROP_SUCCESS) {
4069 		if (rval == DDI_PROP_END_OF_DATA)
4070 			data = 1;
4071 		else
4072 			data = defvalue;
4073 	}
4074 	return (data);
4075 }
4076 
4077 /*
4078  * Get a single 64 bit integer or boolean property and return it.
4079  * If the property does not exists, or cannot be decoded,
4080  * then return the defvalue passed in.
4081  *
4082  * This routine always succeeds.
4083  */
4084 int64_t
4085 ddi_prop_get_int64(dev_t match_dev, dev_info_t *dip, uint_t flags,
4086     char *name, int64_t defvalue)
4087 {
4088 	int64_t	data;
4089 	uint_t	nelements;
4090 	int	rval;
4091 
4092 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4093 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
4094 #ifdef DEBUG
4095 		if (dip != NULL) {
4096 			cmn_err(CE_WARN, "ddi_prop_get_int64: invalid flag"
4097 			    " 0x%x (prop = %s, node = %s%d)", flags,
4098 			    name, ddi_driver_name(dip), ddi_get_instance(dip));
4099 		}
4100 #endif /* DEBUG */
4101 		return (DDI_PROP_INVAL_ARG);
4102 	}
4103 
4104 	if ((rval = ddi_prop_lookup_common(match_dev, dip,
4105 	    (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM),
4106 	    name, &data, &nelements, ddi_prop_fm_decode_int64))
4107 	    != DDI_PROP_SUCCESS) {
4108 		if (rval == DDI_PROP_END_OF_DATA)
4109 			data = 1;
4110 		else
4111 			data = defvalue;
4112 	}
4113 	return (data);
4114 }
4115 
4116 /*
4117  * Get an array of integer property
4118  */
4119 int
4120 ddi_prop_lookup_int_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4121     char *name, int **data, uint_t *nelements)
4122 {
4123 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4124 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
4125 #ifdef DEBUG
4126 		if (dip != NULL) {
4127 			cmn_err(CE_WARN, "ddi_prop_lookup_int_array: "
4128 			    "invalid flag 0x%x (prop = %s, node = %s%d)",
4129 			    flags, name, ddi_driver_name(dip),
4130 			    ddi_get_instance(dip));
4131 		}
4132 #endif /* DEBUG */
4133 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4134 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4135 	}
4136 
4137 	return (ddi_prop_lookup_common(match_dev, dip,
4138 	    (flags | DDI_PROP_TYPE_INT), name, data,
4139 	    nelements, ddi_prop_fm_decode_ints));
4140 }
4141 
4142 /*
4143  * Get an array of 64 bit integer properties
4144  */
4145 int
4146 ddi_prop_lookup_int64_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4147     char *name, int64_t **data, uint_t *nelements)
4148 {
4149 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4150 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
4151 #ifdef DEBUG
4152 		if (dip != NULL) {
4153 			cmn_err(CE_WARN, "ddi_prop_lookup_int64_array: "
4154 			    "invalid flag 0x%x (prop = %s, node = %s%d)",
4155 			    flags, name, ddi_driver_name(dip),
4156 			    ddi_get_instance(dip));
4157 		}
4158 #endif /* DEBUG */
4159 		return (DDI_PROP_INVAL_ARG);
4160 	}
4161 
4162 	return (ddi_prop_lookup_common(match_dev, dip,
4163 	    (flags | DDI_PROP_TYPE_INT64 | DDI_PROP_NOTPROM),
4164 	    name, data, nelements, ddi_prop_fm_decode_int64_array));
4165 }
4166 
4167 /*
4168  * Update a single integer property.  If the property exists on the drivers
4169  * property list it updates, else it creates it.
4170  */
4171 int
4172 ddi_prop_update_int(dev_t match_dev, dev_info_t *dip,
4173     char *name, int data)
4174 {
4175 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT,
4176 	    name, &data, 1, ddi_prop_fm_encode_ints));
4177 }
4178 
4179 /*
4180  * Update a single 64 bit integer property.
4181  * Update the driver property list if it exists, else create it.
4182  */
4183 int
4184 ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip,
4185     char *name, int64_t data)
4186 {
4187 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64,
4188 	    name, &data, 1, ddi_prop_fm_encode_int64));
4189 }
4190 
4191 int
4192 e_ddi_prop_update_int(dev_t match_dev, dev_info_t *dip,
4193     char *name, int data)
4194 {
4195 	return (ddi_prop_update_common(match_dev, dip,
4196 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT,
4197 	    name, &data, 1, ddi_prop_fm_encode_ints));
4198 }
4199 
4200 int
4201 e_ddi_prop_update_int64(dev_t match_dev, dev_info_t *dip,
4202     char *name, int64_t data)
4203 {
4204 	return (ddi_prop_update_common(match_dev, dip,
4205 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64,
4206 	    name, &data, 1, ddi_prop_fm_encode_int64));
4207 }
4208 
4209 /*
4210  * Update an array of integer property.  If the property exists on the drivers
4211  * property list it updates, else it creates it.
4212  */
4213 int
4214 ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip,
4215     char *name, int *data, uint_t nelements)
4216 {
4217 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT,
4218 	    name, data, nelements, ddi_prop_fm_encode_ints));
4219 }
4220 
4221 /*
4222  * Update an array of 64 bit integer properties.
4223  * Update the driver property list if it exists, else create it.
4224  */
4225 int
4226 ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip,
4227     char *name, int64_t *data, uint_t nelements)
4228 {
4229 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_INT64,
4230 	    name, data, nelements, ddi_prop_fm_encode_int64));
4231 }
4232 
4233 int
4234 e_ddi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip,
4235     char *name, int64_t *data, uint_t nelements)
4236 {
4237 	return (ddi_prop_update_common(match_dev, dip,
4238 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT64,
4239 	    name, data, nelements, ddi_prop_fm_encode_int64));
4240 }
4241 
4242 int
4243 e_ddi_prop_update_int_array(dev_t match_dev, dev_info_t *dip,
4244     char *name, int *data, uint_t nelements)
4245 {
4246 	return (ddi_prop_update_common(match_dev, dip,
4247 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_INT,
4248 	    name, data, nelements, ddi_prop_fm_encode_ints));
4249 }
4250 
4251 /*
4252  * Get a single string property.
4253  */
4254 int
4255 ddi_prop_lookup_string(dev_t match_dev, dev_info_t *dip, uint_t flags,
4256     char *name, char **data)
4257 {
4258 	uint_t x;
4259 
4260 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4261 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
4262 #ifdef DEBUG
4263 		if (dip != NULL) {
4264 			cmn_err(CE_WARN, "%s: invalid flag 0x%x "
4265 			    "(prop = %s, node = %s%d); invalid bits ignored",
4266 			    "ddi_prop_lookup_string", flags, name,
4267 			    ddi_driver_name(dip), ddi_get_instance(dip));
4268 		}
4269 #endif /* DEBUG */
4270 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4271 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4272 	}
4273 
4274 	return (ddi_prop_lookup_common(match_dev, dip,
4275 	    (flags | DDI_PROP_TYPE_STRING), name, data,
4276 	    &x, ddi_prop_fm_decode_string));
4277 }
4278 
4279 /*
4280  * Get an array of strings property.
4281  */
4282 int
4283 ddi_prop_lookup_string_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4284     char *name, char ***data, uint_t *nelements)
4285 {
4286 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4287 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
4288 #ifdef DEBUG
4289 		if (dip != NULL) {
4290 			cmn_err(CE_WARN, "ddi_prop_lookup_string_array: "
4291 			    "invalid flag 0x%x (prop = %s, node = %s%d)",
4292 			    flags, name, ddi_driver_name(dip),
4293 			    ddi_get_instance(dip));
4294 		}
4295 #endif /* DEBUG */
4296 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4297 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4298 	}
4299 
4300 	return (ddi_prop_lookup_common(match_dev, dip,
4301 	    (flags | DDI_PROP_TYPE_STRING), name, data,
4302 	    nelements, ddi_prop_fm_decode_strings));
4303 }
4304 
4305 /*
4306  * Update a single string property.
4307  */
4308 int
4309 ddi_prop_update_string(dev_t match_dev, dev_info_t *dip,
4310     char *name, char *data)
4311 {
4312 	return (ddi_prop_update_common(match_dev, dip,
4313 	    DDI_PROP_TYPE_STRING, name, &data, 1,
4314 	    ddi_prop_fm_encode_string));
4315 }
4316 
4317 int
4318 e_ddi_prop_update_string(dev_t match_dev, dev_info_t *dip,
4319     char *name, char *data)
4320 {
4321 	return (ddi_prop_update_common(match_dev, dip,
4322 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING,
4323 	    name, &data, 1, ddi_prop_fm_encode_string));
4324 }
4325 
4326 
4327 /*
4328  * Update an array of strings property.
4329  */
4330 int
4331 ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip,
4332     char *name, char **data, uint_t nelements)
4333 {
4334 	return (ddi_prop_update_common(match_dev, dip,
4335 	    DDI_PROP_TYPE_STRING, name, data, nelements,
4336 	    ddi_prop_fm_encode_strings));
4337 }
4338 
4339 int
4340 e_ddi_prop_update_string_array(dev_t match_dev, dev_info_t *dip,
4341     char *name, char **data, uint_t nelements)
4342 {
4343 	return (ddi_prop_update_common(match_dev, dip,
4344 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_STRING,
4345 	    name, data, nelements,
4346 	    ddi_prop_fm_encode_strings));
4347 }
4348 
4349 
4350 /*
4351  * Get an array of bytes property.
4352  */
4353 int
4354 ddi_prop_lookup_byte_array(dev_t match_dev, dev_info_t *dip, uint_t flags,
4355     char *name, uchar_t **data, uint_t *nelements)
4356 {
4357 	if (flags & ~(DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4358 	    LDI_DEV_T_ANY | DDI_UNBND_DLPI2 | DDI_PROP_ROOTNEX_GLOBAL)) {
4359 #ifdef DEBUG
4360 		if (dip != NULL) {
4361 			cmn_err(CE_WARN, "ddi_prop_lookup_byte_array: "
4362 			    " invalid flag 0x%x (prop = %s, node = %s%d)",
4363 			    flags, name, ddi_driver_name(dip),
4364 			    ddi_get_instance(dip));
4365 		}
4366 #endif /* DEBUG */
4367 		flags &= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
4368 		    LDI_DEV_T_ANY | DDI_UNBND_DLPI2;
4369 	}
4370 
4371 	return (ddi_prop_lookup_common(match_dev, dip,
4372 	    (flags | DDI_PROP_TYPE_BYTE), name, data,
4373 	    nelements, ddi_prop_fm_decode_bytes));
4374 }
4375 
4376 /*
4377  * Update an array of bytes property.
4378  */
4379 int
4380 ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip,
4381     char *name, uchar_t *data, uint_t nelements)
4382 {
4383 	if (nelements == 0)
4384 		return (DDI_PROP_INVAL_ARG);
4385 
4386 	return (ddi_prop_update_common(match_dev, dip, DDI_PROP_TYPE_BYTE,
4387 	    name, data, nelements, ddi_prop_fm_encode_bytes));
4388 }
4389 
4390 
4391 int
4392 e_ddi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip,
4393     char *name, uchar_t *data, uint_t nelements)
4394 {
4395 	if (nelements == 0)
4396 		return (DDI_PROP_INVAL_ARG);
4397 
4398 	return (ddi_prop_update_common(match_dev, dip,
4399 	    DDI_PROP_SYSTEM_DEF | DDI_PROP_TYPE_BYTE,
4400 	    name, data, nelements, ddi_prop_fm_encode_bytes));
4401 }
4402 
4403 
4404 /*
4405  * ddi_prop_remove_common:	Undefine a managed property:
4406  *			Input dev_t must match dev_t when defined.
4407  *			Returns DDI_PROP_NOT_FOUND, possibly.
4408  *			DDI_PROP_INVAL_ARG is also possible if dev is
4409  *			DDI_DEV_T_ANY or incoming name is the NULL string.
4410  */
4411 int
4412 ddi_prop_remove_common(dev_t dev, dev_info_t *dip, char *name, int flag)
4413 {
4414 	ddi_prop_t	**list_head = &(DEVI(dip)->devi_drv_prop_ptr);
4415 	ddi_prop_t	*propp;
4416 	ddi_prop_t	*lastpropp = NULL;
4417 
4418 	if ((dev == DDI_DEV_T_ANY) || (name == (char *)0) ||
4419 	    (strlen(name) == 0)) {
4420 		return (DDI_PROP_INVAL_ARG);
4421 	}
4422 
4423 	if (flag & DDI_PROP_SYSTEM_DEF)
4424 		list_head = &(DEVI(dip)->devi_sys_prop_ptr);
4425 	else if (flag & DDI_PROP_HW_DEF)
4426 		list_head = &(DEVI(dip)->devi_hw_prop_ptr);
4427 
4428 	mutex_enter(&(DEVI(dip)->devi_lock));
4429 
4430 	for (propp = *list_head; propp != NULL; propp = propp->prop_next)  {
4431 		if (DDI_STRSAME(propp->prop_name, name) &&
4432 		    (dev == propp->prop_dev)) {
4433 			/*
4434 			 * Unlink this propp allowing for it to
4435 			 * be first in the list:
4436 			 */
4437 
4438 			if (lastpropp == NULL)
4439 				*list_head = propp->prop_next;
4440 			else
4441 				lastpropp->prop_next = propp->prop_next;
4442 
4443 			mutex_exit(&(DEVI(dip)->devi_lock));
4444 
4445 			/*
4446 			 * Free memory and return...
4447 			 */
4448 			kmem_free(propp->prop_name,
4449 			    strlen(propp->prop_name) + 1);
4450 			if (propp->prop_len != 0)
4451 				kmem_free(propp->prop_val, propp->prop_len);
4452 			kmem_free(propp, sizeof (ddi_prop_t));
4453 			return (DDI_PROP_SUCCESS);
4454 		}
4455 		lastpropp = propp;
4456 	}
4457 	mutex_exit(&(DEVI(dip)->devi_lock));
4458 	return (DDI_PROP_NOT_FOUND);
4459 }
4460 
4461 int
4462 ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name)
4463 {
4464 	return (ddi_prop_remove_common(dev, dip, name, 0));
4465 }
4466 
4467 int
4468 e_ddi_prop_remove(dev_t dev, dev_info_t *dip, char *name)
4469 {
4470 	return (ddi_prop_remove_common(dev, dip, name, DDI_PROP_SYSTEM_DEF));
4471 }
4472 
4473 /*
4474  * e_ddi_prop_list_delete: remove a list of properties
4475  *	Note that the caller needs to provide the required protection
4476  *	(eg. devi_lock if these properties are still attached to a devi)
4477  */
4478 void
4479 e_ddi_prop_list_delete(ddi_prop_t *props)
4480 {
4481 	i_ddi_prop_list_delete(props);
4482 }
4483 
4484 /*
4485  * ddi_prop_remove_all_common:
4486  *	Used before unloading a driver to remove
4487  *	all properties. (undefines all dev_t's props.)
4488  *	Also removes `explicitly undefined' props.
4489  *	No errors possible.
4490  */
4491 void
4492 ddi_prop_remove_all_common(dev_info_t *dip, int flag)
4493 {
4494 	ddi_prop_t	**list_head;
4495 
4496 	mutex_enter(&(DEVI(dip)->devi_lock));
4497 	if (flag & DDI_PROP_SYSTEM_DEF) {
4498 		list_head = &(DEVI(dip)->devi_sys_prop_ptr);
4499 	} else if (flag & DDI_PROP_HW_DEF) {
4500 		list_head = &(DEVI(dip)->devi_hw_prop_ptr);
4501 	} else {
4502 		list_head = &(DEVI(dip)->devi_drv_prop_ptr);
4503 	}
4504 	i_ddi_prop_list_delete(*list_head);
4505 	*list_head = NULL;
4506 	mutex_exit(&(DEVI(dip)->devi_lock));
4507 }
4508 
4509 
4510 /*
4511  * ddi_prop_remove_all:		Remove all driver prop definitions.
4512  */
4513 
4514 void
4515 ddi_prop_remove_all(dev_info_t *dip)
4516 {
4517 	i_ddi_prop_dyn_driver_set(dip, NULL);
4518 	ddi_prop_remove_all_common(dip, 0);
4519 }
4520 
4521 /*
4522  * e_ddi_prop_remove_all:	Remove all system prop definitions.
4523  */
4524 
4525 void
4526 e_ddi_prop_remove_all(dev_info_t *dip)
4527 {
4528 	ddi_prop_remove_all_common(dip, (int)DDI_PROP_SYSTEM_DEF);
4529 }
4530 
4531 
4532 /*
4533  * ddi_prop_undefine:	Explicitly undefine a property.  Property
4534  *			searches which match this property return
4535  *			the error code DDI_PROP_UNDEFINED.
4536  *
4537  *			Use ddi_prop_remove to negate effect of
4538  *			ddi_prop_undefine
4539  *
4540  *			See above for error returns.
4541  */
4542 
4543 int
4544 ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name)
4545 {
4546 	if (!(flag & DDI_PROP_CANSLEEP))
4547 		flag |= DDI_PROP_DONTSLEEP;
4548 	flag |= DDI_PROP_STACK_CREATE | DDI_PROP_UNDEF_IT | DDI_PROP_TYPE_ANY;
4549 	return (ddi_prop_update_common(dev, dip, flag,
4550 	    name, NULL, 0, ddi_prop_fm_encode_bytes));
4551 }
4552 
4553 int
4554 e_ddi_prop_undefine(dev_t dev, dev_info_t *dip, int flag, char *name)
4555 {
4556 	if (!(flag & DDI_PROP_CANSLEEP))
4557 		flag |= DDI_PROP_DONTSLEEP;
4558 	flag |= DDI_PROP_SYSTEM_DEF | DDI_PROP_STACK_CREATE |
4559 	    DDI_PROP_UNDEF_IT | DDI_PROP_TYPE_ANY;
4560 	return (ddi_prop_update_common(dev, dip, flag,
4561 	    name, NULL, 0, ddi_prop_fm_encode_bytes));
4562 }
4563 
4564 /*
4565  * Support for gathering dynamic properties in devinfo snapshot.
4566  */
4567 void
4568 i_ddi_prop_dyn_driver_set(dev_info_t *dip, i_ddi_prop_dyn_t *dp)
4569 {
4570 	DEVI(dip)->devi_prop_dyn_driver = dp;
4571 }
4572 
4573 i_ddi_prop_dyn_t *
4574 i_ddi_prop_dyn_driver_get(dev_info_t *dip)
4575 {
4576 	return (DEVI(dip)->devi_prop_dyn_driver);
4577 }
4578 
4579 void
4580 i_ddi_prop_dyn_parent_set(dev_info_t *dip, i_ddi_prop_dyn_t *dp)
4581 {
4582 	DEVI(dip)->devi_prop_dyn_parent = dp;
4583 }
4584 
4585 i_ddi_prop_dyn_t *
4586 i_ddi_prop_dyn_parent_get(dev_info_t *dip)
4587 {
4588 	return (DEVI(dip)->devi_prop_dyn_parent);
4589 }
4590 
4591 void
4592 i_ddi_prop_dyn_cache_invalidate(dev_info_t *dip, i_ddi_prop_dyn_t *dp)
4593 {
4594 	/* for now we invalidate the entire cached snapshot */
4595 	if (dip && dp)
4596 		i_ddi_di_cache_invalidate(KM_SLEEP);
4597 }
4598 
4599 /* ARGSUSED */
4600 void
4601 ddi_prop_cache_invalidate(dev_t dev, dev_info_t *dip, char *name, int flags)
4602 {
4603 	/* for now we invalidate the entire cached snapshot */
4604 	i_ddi_di_cache_invalidate(KM_SLEEP);
4605 }
4606 
4607 
4608 /*
4609  * Code to search hardware layer (PROM), if it exists, on behalf of child.
4610  *
4611  * if input dip != child_dip, then call is on behalf of child
4612  * to search PROM, do it via ddi_prop_search_common() and ascend only
4613  * if allowed.
4614  *
4615  * if input dip == ch_dip (child_dip), call is on behalf of root driver,
4616  * to search for PROM defined props only.
4617  *
4618  * Note that the PROM search is done only if the requested dev
4619  * is either DDI_DEV_T_ANY or DDI_DEV_T_NONE. PROM properties
4620  * have no associated dev, thus are automatically associated with
4621  * DDI_DEV_T_NONE.
4622  *
4623  * Modifying flag DDI_PROP_NOTPROM inhibits the search in the h/w layer.
4624  *
4625  * Returns DDI_PROP_FOUND_1275 if found to indicate to framework
4626  * that the property resides in the prom.
4627  */
4628 int
4629 impl_ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip,
4630     ddi_prop_op_t prop_op, int mod_flags,
4631     char *name, caddr_t valuep, int *lengthp)
4632 {
4633 	int	len;
4634 	caddr_t buffer;
4635 
4636 	/*
4637 	 * If requested dev is DDI_DEV_T_NONE or DDI_DEV_T_ANY, then
4638 	 * look in caller's PROM if it's a self identifying device...
4639 	 *
4640 	 * Note that this is very similar to ddi_prop_op, but we
4641 	 * search the PROM instead of the s/w defined properties,
4642 	 * and we are called on by the parent driver to do this for
4643 	 * the child.
4644 	 */
4645 
4646 	if (((dev == DDI_DEV_T_NONE) || (dev == DDI_DEV_T_ANY)) &&
4647 	    ndi_dev_is_prom_node(ch_dip) &&
4648 	    ((mod_flags & DDI_PROP_NOTPROM) == 0)) {
4649 		len = prom_getproplen((pnode_t)DEVI(ch_dip)->devi_nodeid, name);
4650 		if (len == -1) {
4651 			return (DDI_PROP_NOT_FOUND);
4652 		}
4653 
4654 		/*
4655 		 * If exists only request, we're done
4656 		 */
4657 		if (prop_op == PROP_EXISTS) {
4658 			return (DDI_PROP_FOUND_1275);
4659 		}
4660 
4661 		/*
4662 		 * If length only request or prop length == 0, get out
4663 		 */
4664 		if ((prop_op == PROP_LEN) || (len == 0)) {
4665 			*lengthp = len;
4666 			return (DDI_PROP_FOUND_1275);
4667 		}
4668 
4669 		/*
4670 		 * Allocate buffer if required... (either way `buffer'
4671 		 * is receiving address).
4672 		 */
4673 
4674 		switch (prop_op) {
4675 
4676 		case PROP_LEN_AND_VAL_ALLOC:
4677 
4678 			buffer = kmem_alloc((size_t)len,
4679 			    mod_flags & DDI_PROP_CANSLEEP ?
4680 			    KM_SLEEP : KM_NOSLEEP);
4681 			if (buffer == NULL) {
4682 				return (DDI_PROP_NO_MEMORY);
4683 			}
4684 			*(caddr_t *)valuep = buffer;
4685 			break;
4686 
4687 		case PROP_LEN_AND_VAL_BUF:
4688 
4689 			if (len > (*lengthp)) {
4690 				*lengthp = len;
4691 				return (DDI_PROP_BUF_TOO_SMALL);
4692 			}
4693 
4694 			buffer = valuep;
4695 			break;
4696 
4697 		default:
4698 			break;
4699 		}
4700 
4701 		/*
4702 		 * Call the PROM function to do the copy.
4703 		 */
4704 		(void) prom_getprop((pnode_t)DEVI(ch_dip)->devi_nodeid,
4705 		    name, buffer);
4706 
4707 		*lengthp = len; /* return the actual length to the caller */
4708 		(void) impl_fix_props(dip, ch_dip, name, len, buffer);
4709 		return (DDI_PROP_FOUND_1275);
4710 	}
4711 
4712 	return (DDI_PROP_NOT_FOUND);
4713 }
4714 
4715 /*
4716  * The ddi_bus_prop_op default bus nexus prop op function.
4717  *
4718  * Code to search hardware layer (PROM), if it exists,
4719  * on behalf of child, then, if appropriate, ascend and check
4720  * my own software defined properties...
4721  */
4722 int
4723 ddi_bus_prop_op(dev_t dev, dev_info_t *dip, dev_info_t *ch_dip,
4724     ddi_prop_op_t prop_op, int mod_flags,
4725     char *name, caddr_t valuep, int *lengthp)
4726 {
4727 	int	error;
4728 
4729 	error = impl_ddi_bus_prop_op(dev, dip, ch_dip, prop_op, mod_flags,
4730 	    name, valuep, lengthp);
4731 
4732 	if (error == DDI_PROP_SUCCESS || error == DDI_PROP_FOUND_1275 ||
4733 	    error == DDI_PROP_BUF_TOO_SMALL)
4734 		return (error);
4735 
4736 	if (error == DDI_PROP_NO_MEMORY) {
4737 		cmn_err(CE_CONT, prop_no_mem_msg, name);
4738 		return (DDI_PROP_NO_MEMORY);
4739 	}
4740 
4741 	/*
4742 	 * Check the 'options' node as a last resort
4743 	 */
4744 	if ((mod_flags & DDI_PROP_DONTPASS) != 0)
4745 		return (DDI_PROP_NOT_FOUND);
4746 
4747 	if (ch_dip == ddi_root_node())	{
4748 		/*
4749 		 * As a last resort, when we've reached
4750 		 * the top and still haven't found the
4751 		 * property, see if the desired property
4752 		 * is attached to the options node.
4753 		 *
4754 		 * The options dip is attached right after boot.
4755 		 */
4756 		ASSERT(options_dip != NULL);
4757 		/*
4758 		 * Force the "don't pass" flag to *just* see
4759 		 * what the options node has to offer.
4760 		 */
4761 		return (ddi_prop_search_common(dev, options_dip, prop_op,
4762 		    mod_flags|DDI_PROP_DONTPASS, name, valuep,
4763 		    (uint_t *)lengthp));
4764 	}
4765 
4766 	/*
4767 	 * Otherwise, continue search with parent's s/w defined properties...
4768 	 * NOTE: Using `dip' in following call increments the level.
4769 	 */
4770 
4771 	return (ddi_prop_search_common(dev, dip, prop_op, mod_flags,
4772 	    name, valuep, (uint_t *)lengthp));
4773 }
4774 
4775 /*
4776  * External property functions used by other parts of the kernel...
4777  */
4778 
4779 /*
4780  * e_ddi_getlongprop: See comments for ddi_get_longprop.
4781  */
4782 
4783 int
4784 e_ddi_getlongprop(dev_t dev, vtype_t type, char *name, int flags,
4785     caddr_t valuep, int *lengthp)
4786 {
4787 	_NOTE(ARGUNUSED(type))
4788 	dev_info_t *devi;
4789 	ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_ALLOC;
4790 	int error;
4791 
4792 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4793 		return (DDI_PROP_NOT_FOUND);
4794 
4795 	error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp);
4796 	ddi_release_devi(devi);
4797 	return (error);
4798 }
4799 
4800 /*
4801  * e_ddi_getlongprop_buf:	See comments for ddi_getlongprop_buf.
4802  */
4803 
4804 int
4805 e_ddi_getlongprop_buf(dev_t dev, vtype_t type, char *name, int flags,
4806     caddr_t valuep, int *lengthp)
4807 {
4808 	_NOTE(ARGUNUSED(type))
4809 	dev_info_t *devi;
4810 	ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF;
4811 	int error;
4812 
4813 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4814 		return (DDI_PROP_NOT_FOUND);
4815 
4816 	error = cdev_prop_op(dev, devi, prop_op, flags, name, valuep, lengthp);
4817 	ddi_release_devi(devi);
4818 	return (error);
4819 }
4820 
4821 /*
4822  * e_ddi_getprop:	See comments for ddi_getprop.
4823  */
4824 int
4825 e_ddi_getprop(dev_t dev, vtype_t type, char *name, int flags, int defvalue)
4826 {
4827 	_NOTE(ARGUNUSED(type))
4828 	dev_info_t *devi;
4829 	ddi_prop_op_t prop_op = PROP_LEN_AND_VAL_BUF;
4830 	int	propvalue = defvalue;
4831 	int	proplength = sizeof (int);
4832 	int	error;
4833 
4834 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4835 		return (defvalue);
4836 
4837 	error = cdev_prop_op(dev, devi, prop_op,
4838 	    flags, name, (caddr_t)&propvalue, &proplength);
4839 	ddi_release_devi(devi);
4840 
4841 	if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
4842 		propvalue = 1;
4843 
4844 	return (propvalue);
4845 }
4846 
4847 /*
4848  * e_ddi_getprop_int64:
4849  *
4850  * This is a typed interfaces, but predates typed properties. With the
4851  * introduction of typed properties the framework tries to ensure
4852  * consistent use of typed interfaces. This is why TYPE_INT64 is not
4853  * part of TYPE_ANY.  E_ddi_getprop_int64 is a special case where a
4854  * typed interface invokes legacy (non-typed) interfaces:
4855  * cdev_prop_op(), prop_op(9E), ddi_prop_op(9F)).  In this case the
4856  * fact that TYPE_INT64 is not part of TYPE_ANY matters.  To support
4857  * this type of lookup as a single operation we invoke the legacy
4858  * non-typed interfaces with the special CONSUMER_TYPED bit set. The
4859  * framework ddi_prop_op(9F) implementation is expected to check for
4860  * CONSUMER_TYPED and, if set, expand type bits beyond TYPE_ANY
4861  * (currently TYPE_INT64).
4862  */
4863 int64_t
4864 e_ddi_getprop_int64(dev_t dev, vtype_t type, char *name,
4865     int flags, int64_t defvalue)
4866 {
4867 	_NOTE(ARGUNUSED(type))
4868 	dev_info_t	*devi;
4869 	ddi_prop_op_t	prop_op = PROP_LEN_AND_VAL_BUF;
4870 	int64_t		propvalue = defvalue;
4871 	int		proplength = sizeof (propvalue);
4872 	int		error;
4873 
4874 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4875 		return (defvalue);
4876 
4877 	error = cdev_prop_op(dev, devi, prop_op, flags |
4878 	    DDI_PROP_CONSUMER_TYPED, name, (caddr_t)&propvalue, &proplength);
4879 	ddi_release_devi(devi);
4880 
4881 	if ((error == DDI_PROP_SUCCESS) && (proplength == 0))
4882 		propvalue = 1;
4883 
4884 	return (propvalue);
4885 }
4886 
4887 /*
4888  * e_ddi_getproplen:	See comments for ddi_getproplen.
4889  */
4890 int
4891 e_ddi_getproplen(dev_t dev, vtype_t type, char *name, int flags, int *lengthp)
4892 {
4893 	_NOTE(ARGUNUSED(type))
4894 	dev_info_t *devi;
4895 	ddi_prop_op_t prop_op = PROP_LEN;
4896 	int error;
4897 
4898 	if ((devi = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
4899 		return (DDI_PROP_NOT_FOUND);
4900 
4901 	error = cdev_prop_op(dev, devi, prop_op, flags, name, NULL, lengthp);
4902 	ddi_release_devi(devi);
4903 	return (error);
4904 }
4905 
4906 /*
4907  * Routines to get at elements of the dev_info structure
4908  */
4909 
4910 /*
4911  * ddi_binding_name: Return the driver binding name of the devinfo node
4912  *		This is the name the OS used to bind the node to a driver.
4913  */
4914 char *
4915 ddi_binding_name(dev_info_t *dip)
4916 {
4917 	return (DEVI(dip)->devi_binding_name);
4918 }
4919 
4920 /*
4921  * ddi_driver_major: Return the major number of the driver that
4922  *	the supplied devinfo is bound to.  If not yet bound,
4923  *	DDI_MAJOR_T_NONE.
4924  *
4925  * When used by the driver bound to 'devi', this
4926  * function will reliably return the driver major number.
4927  * Other ways of determining the driver major number, such as
4928  *	major = ddi_name_to_major(ddi_get_name(devi));
4929  *	major = ddi_name_to_major(ddi_binding_name(devi));
4930  * can return a different result as the driver/alias binding
4931  * can change dynamically, and thus should be avoided.
4932  */
4933 major_t
4934 ddi_driver_major(dev_info_t *devi)
4935 {
4936 	return (DEVI(devi)->devi_major);
4937 }
4938 
4939 /*
4940  * ddi_driver_name: Return the normalized driver name. this is the
4941  *		actual driver name
4942  */
4943 const char *
4944 ddi_driver_name(dev_info_t *devi)
4945 {
4946 	major_t major;
4947 
4948 	if ((major = ddi_driver_major(devi)) != DDI_MAJOR_T_NONE)
4949 		return (ddi_major_to_name(major));
4950 
4951 	return (ddi_node_name(devi));
4952 }
4953 
4954 /*
4955  * i_ddi_set_binding_name:	Set binding name.
4956  *
4957  *	Set the binding name to the given name.
4958  *	This routine is for use by the ddi implementation, not by drivers.
4959  */
4960 void
4961 i_ddi_set_binding_name(dev_info_t *dip, char *name)
4962 {
4963 	DEVI(dip)->devi_binding_name = name;
4964 
4965 }
4966 
4967 /*
4968  * ddi_get_name: A synonym of ddi_binding_name() ... returns a name
4969  * the implementation has used to bind the node to a driver.
4970  */
4971 char *
4972 ddi_get_name(dev_info_t *dip)
4973 {
4974 	return (DEVI(dip)->devi_binding_name);
4975 }
4976 
4977 /*
4978  * ddi_node_name: Return the name property of the devinfo node
4979  *		This may differ from ddi_binding_name if the node name
4980  *		does not define a binding to a driver (i.e. generic names).
4981  */
4982 char *
4983 ddi_node_name(dev_info_t *dip)
4984 {
4985 	return (DEVI(dip)->devi_node_name);
4986 }
4987 
4988 
4989 /*
4990  * ddi_get_nodeid:	Get nodeid stored in dev_info structure.
4991  */
4992 int
4993 ddi_get_nodeid(dev_info_t *dip)
4994 {
4995 	return (DEVI(dip)->devi_nodeid);
4996 }
4997 
4998 int
4999 ddi_get_instance(dev_info_t *dip)
5000 {
5001 	return (DEVI(dip)->devi_instance);
5002 }
5003 
5004 struct dev_ops *
5005 ddi_get_driver(dev_info_t *dip)
5006 {
5007 	return (DEVI(dip)->devi_ops);
5008 }
5009 
5010 void
5011 ddi_set_driver(dev_info_t *dip, struct dev_ops *devo)
5012 {
5013 	DEVI(dip)->devi_ops = devo;
5014 }
5015 
5016 /*
5017  * ddi_set_driver_private/ddi_get_driver_private:
5018  * Get/set device driver private data in devinfo.
5019  */
5020 void
5021 ddi_set_driver_private(dev_info_t *dip, void *data)
5022 {
5023 	DEVI(dip)->devi_driver_data = data;
5024 }
5025 
5026 void *
5027 ddi_get_driver_private(dev_info_t *dip)
5028 {
5029 	return (DEVI(dip)->devi_driver_data);
5030 }
5031 
5032 /*
5033  * ddi_get_parent, ddi_get_child, ddi_get_next_sibling
5034  */
5035 
5036 dev_info_t *
5037 ddi_get_parent(dev_info_t *dip)
5038 {
5039 	return ((dev_info_t *)DEVI(dip)->devi_parent);
5040 }
5041 
5042 dev_info_t *
5043 ddi_get_child(dev_info_t *dip)
5044 {
5045 	return ((dev_info_t *)DEVI(dip)->devi_child);
5046 }
5047 
5048 dev_info_t *
5049 ddi_get_next_sibling(dev_info_t *dip)
5050 {
5051 	return ((dev_info_t *)DEVI(dip)->devi_sibling);
5052 }
5053 
5054 dev_info_t *
5055 ddi_get_next(dev_info_t *dip)
5056 {
5057 	return ((dev_info_t *)DEVI(dip)->devi_next);
5058 }
5059 
5060 void
5061 ddi_set_next(dev_info_t *dip, dev_info_t *nextdip)
5062 {
5063 	DEVI(dip)->devi_next = DEVI(nextdip);
5064 }
5065 
5066 /*
5067  * ddi_root_node:		Return root node of devinfo tree
5068  */
5069 
5070 dev_info_t *
5071 ddi_root_node(void)
5072 {
5073 	extern dev_info_t *top_devinfo;
5074 
5075 	return (top_devinfo);
5076 }
5077 
5078 /*
5079  * Miscellaneous functions:
5080  */
5081 
5082 /*
5083  * Implementation specific hooks
5084  */
5085 
5086 void
5087 ddi_report_dev(dev_info_t *d)
5088 {
5089 	char *b;
5090 
5091 	(void) ddi_ctlops(d, d, DDI_CTLOPS_REPORTDEV, (void *)0, (void *)0);
5092 
5093 	/*
5094 	 * If this devinfo node has cb_ops, it's implicitly accessible from
5095 	 * userland, so we print its full name together with the instance
5096 	 * number 'abbreviation' that the driver may use internally.
5097 	 */
5098 	if (DEVI(d)->devi_ops->devo_cb_ops != (struct cb_ops *)0 &&
5099 	    (b = kmem_zalloc(MAXPATHLEN, KM_NOSLEEP))) {
5100 		cmn_err(CE_CONT, "?%s%d is %s\n",
5101 		    ddi_driver_name(d), ddi_get_instance(d),
5102 		    ddi_pathname(d, b));
5103 		kmem_free(b, MAXPATHLEN);
5104 	}
5105 }
5106 
5107 /*
5108  * ddi_ctlops() is described in the assembler not to buy a new register
5109  * window when it's called and can reduce cost in climbing the device tree
5110  * without using the tail call optimization.
5111  */
5112 int
5113 ddi_dev_regsize(dev_info_t *dev, uint_t rnumber, off_t *result)
5114 {
5115 	int ret;
5116 
5117 	ret = ddi_ctlops(dev, dev, DDI_CTLOPS_REGSIZE,
5118 	    (void *)&rnumber, (void *)result);
5119 
5120 	return (ret == DDI_SUCCESS ? DDI_SUCCESS : DDI_FAILURE);
5121 }
5122 
5123 int
5124 ddi_dev_nregs(dev_info_t *dev, int *result)
5125 {
5126 	return (ddi_ctlops(dev, dev, DDI_CTLOPS_NREGS, 0, (void *)result));
5127 }
5128 
5129 int
5130 ddi_dev_is_sid(dev_info_t *d)
5131 {
5132 	return (ddi_ctlops(d, d, DDI_CTLOPS_SIDDEV, (void *)0, (void *)0));
5133 }
5134 
5135 int
5136 ddi_slaveonly(dev_info_t *d)
5137 {
5138 	return (ddi_ctlops(d, d, DDI_CTLOPS_SLAVEONLY, (void *)0, (void *)0));
5139 }
5140 
5141 int
5142 ddi_dev_affinity(dev_info_t *a, dev_info_t *b)
5143 {
5144 	return (ddi_ctlops(a, a, DDI_CTLOPS_AFFINITY, (void *)b, (void *)0));
5145 }
5146 
5147 int
5148 ddi_streams_driver(dev_info_t *dip)
5149 {
5150 	if (i_ddi_devi_attached(dip) &&
5151 	    (DEVI(dip)->devi_ops->devo_cb_ops != NULL) &&
5152 	    (DEVI(dip)->devi_ops->devo_cb_ops->cb_str != NULL))
5153 		return (DDI_SUCCESS);
5154 	return (DDI_FAILURE);
5155 }
5156 
5157 /*
5158  * callback free list
5159  */
5160 
5161 static int ncallbacks;
5162 static int nc_low = 170;
5163 static int nc_med = 512;
5164 static int nc_high = 2048;
5165 static struct ddi_callback *callbackq;
5166 static struct ddi_callback *callbackqfree;
5167 
5168 /*
5169  * set/run callback lists
5170  */
5171 struct	cbstats	{
5172 	kstat_named_t	cb_asked;
5173 	kstat_named_t	cb_new;
5174 	kstat_named_t	cb_run;
5175 	kstat_named_t	cb_delete;
5176 	kstat_named_t	cb_maxreq;
5177 	kstat_named_t	cb_maxlist;
5178 	kstat_named_t	cb_alloc;
5179 	kstat_named_t	cb_runouts;
5180 	kstat_named_t	cb_L2;
5181 	kstat_named_t	cb_grow;
5182 } cbstats = {
5183 	{"asked",	KSTAT_DATA_UINT32},
5184 	{"new",		KSTAT_DATA_UINT32},
5185 	{"run",		KSTAT_DATA_UINT32},
5186 	{"delete",	KSTAT_DATA_UINT32},
5187 	{"maxreq",	KSTAT_DATA_UINT32},
5188 	{"maxlist",	KSTAT_DATA_UINT32},
5189 	{"alloc",	KSTAT_DATA_UINT32},
5190 	{"runouts",	KSTAT_DATA_UINT32},
5191 	{"L2",		KSTAT_DATA_UINT32},
5192 	{"grow",	KSTAT_DATA_UINT32},
5193 };
5194 
5195 #define	nc_asked	cb_asked.value.ui32
5196 #define	nc_new		cb_new.value.ui32
5197 #define	nc_run		cb_run.value.ui32
5198 #define	nc_delete	cb_delete.value.ui32
5199 #define	nc_maxreq	cb_maxreq.value.ui32
5200 #define	nc_maxlist	cb_maxlist.value.ui32
5201 #define	nc_alloc	cb_alloc.value.ui32
5202 #define	nc_runouts	cb_runouts.value.ui32
5203 #define	nc_L2		cb_L2.value.ui32
5204 #define	nc_grow		cb_grow.value.ui32
5205 
5206 static kmutex_t ddi_callback_mutex;
5207 
5208 /*
5209  * callbacks are handled using a L1/L2 cache. The L1 cache
5210  * comes out of kmem_cache_alloc and can expand/shrink dynamically. If
5211  * we can't get callbacks from the L1 cache [because pageout is doing
5212  * I/O at the time freemem is 0], we allocate callbacks out of the
5213  * L2 cache. The L2 cache is static and depends on the memory size.
5214  * [We might also count the number of devices at probe time and
5215  * allocate one structure per device and adjust for deferred attach]
5216  */
5217 void
5218 impl_ddi_callback_init(void)
5219 {
5220 	int	i;
5221 	uint_t	physmegs;
5222 	kstat_t	*ksp;
5223 
5224 	physmegs = physmem >> (20 - PAGESHIFT);
5225 	if (physmegs < 48) {
5226 		ncallbacks = nc_low;
5227 	} else if (physmegs < 128) {
5228 		ncallbacks = nc_med;
5229 	} else {
5230 		ncallbacks = nc_high;
5231 	}
5232 
5233 	/*
5234 	 * init free list
5235 	 */
5236 	callbackq = kmem_zalloc(
5237 	    ncallbacks * sizeof (struct ddi_callback), KM_SLEEP);
5238 	for (i = 0; i < ncallbacks-1; i++)
5239 		callbackq[i].c_nfree = &callbackq[i+1];
5240 	callbackqfree = callbackq;
5241 
5242 	/* init kstats */
5243 	if (ksp = kstat_create("unix", 0, "cbstats", "misc", KSTAT_TYPE_NAMED,
5244 	    sizeof (cbstats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL)) {
5245 		ksp->ks_data = (void *) &cbstats;
5246 		kstat_install(ksp);
5247 	}
5248 
5249 }
5250 
5251 static void
5252 callback_insert(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid,
5253 	int count)
5254 {
5255 	struct ddi_callback *list, *marker, *new;
5256 	size_t size = sizeof (struct ddi_callback);
5257 
5258 	list = marker = (struct ddi_callback *)*listid;
5259 	while (list != NULL) {
5260 		if (list->c_call == funcp && list->c_arg == arg) {
5261 			list->c_count += count;
5262 			return;
5263 		}
5264 		marker = list;
5265 		list = list->c_nlist;
5266 	}
5267 	new = kmem_alloc(size, KM_NOSLEEP);
5268 	if (new == NULL) {
5269 		new = callbackqfree;
5270 		if (new == NULL) {
5271 			new = kmem_alloc_tryhard(sizeof (struct ddi_callback),
5272 			    &size, KM_NOSLEEP | KM_PANIC);
5273 			cbstats.nc_grow++;
5274 		} else {
5275 			callbackqfree = new->c_nfree;
5276 			cbstats.nc_L2++;
5277 		}
5278 	}
5279 	if (marker != NULL) {
5280 		marker->c_nlist = new;
5281 	} else {
5282 		*listid = (uintptr_t)new;
5283 	}
5284 	new->c_size = size;
5285 	new->c_nlist = NULL;
5286 	new->c_call = funcp;
5287 	new->c_arg = arg;
5288 	new->c_count = count;
5289 	cbstats.nc_new++;
5290 	cbstats.nc_alloc++;
5291 	if (cbstats.nc_alloc > cbstats.nc_maxlist)
5292 		cbstats.nc_maxlist = cbstats.nc_alloc;
5293 }
5294 
5295 void
5296 ddi_set_callback(int (*funcp)(caddr_t), caddr_t arg, uintptr_t *listid)
5297 {
5298 	mutex_enter(&ddi_callback_mutex);
5299 	cbstats.nc_asked++;
5300 	if ((cbstats.nc_asked - cbstats.nc_run) > cbstats.nc_maxreq)
5301 		cbstats.nc_maxreq = (cbstats.nc_asked - cbstats.nc_run);
5302 	(void) callback_insert(funcp, arg, listid, 1);
5303 	mutex_exit(&ddi_callback_mutex);
5304 }
5305 
5306 static void
5307 real_callback_run(void *Queue)
5308 {
5309 	int (*funcp)(caddr_t);
5310 	caddr_t arg;
5311 	int count, rval;
5312 	uintptr_t *listid;
5313 	struct ddi_callback *list, *marker;
5314 	int check_pending = 1;
5315 	int pending = 0;
5316 
5317 	do {
5318 		mutex_enter(&ddi_callback_mutex);
5319 		listid = Queue;
5320 		list = (struct ddi_callback *)*listid;
5321 		if (list == NULL) {
5322 			mutex_exit(&ddi_callback_mutex);
5323 			return;
5324 		}
5325 		if (check_pending) {
5326 			marker = list;
5327 			while (marker != NULL) {
5328 				pending += marker->c_count;
5329 				marker = marker->c_nlist;
5330 			}
5331 			check_pending = 0;
5332 		}
5333 		ASSERT(pending > 0);
5334 		ASSERT(list->c_count > 0);
5335 		funcp = list->c_call;
5336 		arg = list->c_arg;
5337 		count = list->c_count;
5338 		*(uintptr_t *)Queue = (uintptr_t)list->c_nlist;
5339 		if (list >= &callbackq[0] &&
5340 		    list <= &callbackq[ncallbacks-1]) {
5341 			list->c_nfree = callbackqfree;
5342 			callbackqfree = list;
5343 		} else
5344 			kmem_free(list, list->c_size);
5345 
5346 		cbstats.nc_delete++;
5347 		cbstats.nc_alloc--;
5348 		mutex_exit(&ddi_callback_mutex);
5349 
5350 		do {
5351 			if ((rval = (*funcp)(arg)) == 0) {
5352 				pending -= count;
5353 				mutex_enter(&ddi_callback_mutex);
5354 				(void) callback_insert(funcp, arg, listid,
5355 				    count);
5356 				cbstats.nc_runouts++;
5357 			} else {
5358 				pending--;
5359 				mutex_enter(&ddi_callback_mutex);
5360 				cbstats.nc_run++;
5361 			}
5362 			mutex_exit(&ddi_callback_mutex);
5363 		} while (rval != 0 && (--count > 0));
5364 	} while (pending > 0);
5365 }
5366 
5367 void
5368 ddi_run_callback(uintptr_t *listid)
5369 {
5370 	softcall(real_callback_run, listid);
5371 }
5372 
5373 /*
5374  * ddi_periodic_t
5375  * ddi_periodic_add(void (*func)(void *), void *arg, hrtime_t interval,
5376  *     int level)
5377  *
5378  * INTERFACE LEVEL
5379  *      Solaris DDI specific (Solaris DDI)
5380  *
5381  * PARAMETERS
5382  *      func: the callback function
5383  *
5384  *            The callback function will be invoked. The function is invoked
5385  *            in kernel context if the argument level passed is the zero.
5386  *            Otherwise it's invoked in interrupt context at the specified
5387  *            level.
5388  *
5389  *       arg: the argument passed to the callback function
5390  *
5391  *  interval: interval time
5392  *
5393  *    level : callback interrupt level
5394  *
5395  *            If the value is the zero, the callback function is invoked
5396  *            in kernel context. If the value is more than the zero, but
5397  *            less than or equal to ten, the callback function is invoked in
5398  *            interrupt context at the specified interrupt level, which may
5399  *            be used for real time applications.
5400  *
5401  *            This value must be in range of 0-10, which can be a numeric
5402  *            number or a pre-defined macro (DDI_IPL_0, ... , DDI_IPL_10).
5403  *
5404  * DESCRIPTION
5405  *      ddi_periodic_add(9F) schedules the specified function to be
5406  *      periodically invoked in the interval time.
5407  *
5408  *      As well as timeout(9F), the exact time interval over which the function
5409  *      takes effect cannot be guaranteed, but the value given is a close
5410  *      approximation.
5411  *
5412  *      Drivers waiting on behalf of processes with real-time constraints must
5413  *      pass non-zero value with the level argument to ddi_periodic_add(9F).
5414  *
5415  * RETURN VALUES
5416  *      ddi_periodic_add(9F) returns a non-zero opaque value (ddi_periodic_t),
5417  *      which must be used for ddi_periodic_delete(9F) to specify the request.
5418  *
5419  * CONTEXT
5420  *      ddi_periodic_add(9F) can be called in user or kernel context, but
5421  *      it cannot be called in interrupt context, which is different from
5422  *      timeout(9F).
5423  */
5424 ddi_periodic_t
5425 ddi_periodic_add(void (*func)(void *), void *arg, hrtime_t interval, int level)
5426 {
5427 	/*
5428 	 * Sanity check of the argument level.
5429 	 */
5430 	if (level < DDI_IPL_0 || level > DDI_IPL_10)
5431 		cmn_err(CE_PANIC,
5432 		    "ddi_periodic_add: invalid interrupt level (%d).", level);
5433 
5434 	/*
5435 	 * Sanity check of the context. ddi_periodic_add() cannot be
5436 	 * called in either interrupt context or high interrupt context.
5437 	 */
5438 	if (servicing_interrupt())
5439 		cmn_err(CE_PANIC,
5440 		    "ddi_periodic_add: called in (high) interrupt context.");
5441 
5442 	return ((ddi_periodic_t)i_timeout(func, arg, interval, level));
5443 }
5444 
5445 /*
5446  * void
5447  * ddi_periodic_delete(ddi_periodic_t req)
5448  *
5449  * INTERFACE LEVEL
5450  *     Solaris DDI specific (Solaris DDI)
5451  *
5452  * PARAMETERS
5453  *     req: ddi_periodic_t opaque value ddi_periodic_add(9F) returned
5454  *     previously.
5455  *
5456  * DESCRIPTION
5457  *     ddi_periodic_delete(9F) cancels the ddi_periodic_add(9F) request
5458  *     previously requested.
5459  *
5460  *     ddi_periodic_delete(9F) will not return until the pending request
5461  *     is canceled or executed.
5462  *
5463  *     As well as untimeout(9F), calling ddi_periodic_delete(9F) for a
5464  *     timeout which is either running on another CPU, or has already
5465  *     completed causes no problems. However, unlike untimeout(9F), there is
5466  *     no restrictions on the lock which might be held across the call to
5467  *     ddi_periodic_delete(9F).
5468  *
5469  *     Drivers should be structured with the understanding that the arrival of
5470  *     both an interrupt and a timeout for that interrupt can occasionally
5471  *     occur, in either order.
5472  *
5473  * CONTEXT
5474  *     ddi_periodic_delete(9F) can be called in user or kernel context, but
5475  *     it cannot be called in interrupt context, which is different from
5476  *     untimeout(9F).
5477  */
5478 void
5479 ddi_periodic_delete(ddi_periodic_t req)
5480 {
5481 	/*
5482 	 * Sanity check of the context. ddi_periodic_delete() cannot be
5483 	 * called in either interrupt context or high interrupt context.
5484 	 */
5485 	if (servicing_interrupt())
5486 		cmn_err(CE_PANIC,
5487 		    "ddi_periodic_delete: called in (high) interrupt context.");
5488 
5489 	i_untimeout((timeout_t)req);
5490 }
5491 
5492 dev_info_t *
5493 nodevinfo(dev_t dev, int otyp)
5494 {
5495 	_NOTE(ARGUNUSED(dev, otyp))
5496 	return ((dev_info_t *)0);
5497 }
5498 
5499 /*
5500  * A driver should support its own getinfo(9E) entry point. This function
5501  * is provided as a convenience for ON drivers that don't expect their
5502  * getinfo(9E) entry point to be called. A driver that uses this must not
5503  * call ddi_create_minor_node.
5504  */
5505 int
5506 ddi_no_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
5507 {
5508 	_NOTE(ARGUNUSED(dip, infocmd, arg, result))
5509 	return (DDI_FAILURE);
5510 }
5511 
5512 /*
5513  * A driver should support its own getinfo(9E) entry point. This function
5514  * is provided as a convenience for ON drivers that where the minor number
5515  * is the instance. Drivers that do not have 1:1 mapping must implement
5516  * their own getinfo(9E) function.
5517  */
5518 int
5519 ddi_getinfo_1to1(dev_info_t *dip, ddi_info_cmd_t infocmd,
5520     void *arg, void **result)
5521 {
5522 	_NOTE(ARGUNUSED(dip))
5523 	int	instance;
5524 
5525 	if (infocmd != DDI_INFO_DEVT2INSTANCE)
5526 		return (DDI_FAILURE);
5527 
5528 	instance = getminor((dev_t)(uintptr_t)arg);
5529 	*result = (void *)(uintptr_t)instance;
5530 	return (DDI_SUCCESS);
5531 }
5532 
5533 int
5534 ddifail(dev_info_t *devi, ddi_attach_cmd_t cmd)
5535 {
5536 	_NOTE(ARGUNUSED(devi, cmd))
5537 	return (DDI_FAILURE);
5538 }
5539 
5540 int
5541 ddi_no_dma_map(dev_info_t *dip, dev_info_t *rdip,
5542     struct ddi_dma_req *dmareqp, ddi_dma_handle_t *handlep)
5543 {
5544 	_NOTE(ARGUNUSED(dip, rdip, dmareqp, handlep))
5545 	return (DDI_DMA_NOMAPPING);
5546 }
5547 
5548 int
5549 ddi_no_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr,
5550     int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
5551 {
5552 	_NOTE(ARGUNUSED(dip, rdip, attr, waitfp, arg, handlep))
5553 	return (DDI_DMA_BADATTR);
5554 }
5555 
5556 int
5557 ddi_no_dma_freehdl(dev_info_t *dip, dev_info_t *rdip,
5558     ddi_dma_handle_t handle)
5559 {
5560 	_NOTE(ARGUNUSED(dip, rdip, handle))
5561 	return (DDI_FAILURE);
5562 }
5563 
5564 int
5565 ddi_no_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
5566     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
5567     ddi_dma_cookie_t *cp, uint_t *ccountp)
5568 {
5569 	_NOTE(ARGUNUSED(dip, rdip, handle, dmareq, cp, ccountp))
5570 	return (DDI_DMA_NOMAPPING);
5571 }
5572 
5573 int
5574 ddi_no_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
5575     ddi_dma_handle_t handle)
5576 {
5577 	_NOTE(ARGUNUSED(dip, rdip, handle))
5578 	return (DDI_FAILURE);
5579 }
5580 
5581 int
5582 ddi_no_dma_flush(dev_info_t *dip, dev_info_t *rdip,
5583     ddi_dma_handle_t handle, off_t off, size_t len,
5584     uint_t cache_flags)
5585 {
5586 	_NOTE(ARGUNUSED(dip, rdip, handle, off, len, cache_flags))
5587 	return (DDI_FAILURE);
5588 }
5589 
5590 int
5591 ddi_no_dma_win(dev_info_t *dip, dev_info_t *rdip,
5592     ddi_dma_handle_t handle, uint_t win, off_t *offp,
5593     size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
5594 {
5595 	_NOTE(ARGUNUSED(dip, rdip, handle, win, offp, lenp, cookiep, ccountp))
5596 	return (DDI_FAILURE);
5597 }
5598 
5599 int
5600 ddi_no_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
5601     ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
5602     off_t *offp, size_t *lenp, caddr_t *objp, uint_t flags)
5603 {
5604 	_NOTE(ARGUNUSED(dip, rdip, handle, request, offp, lenp, objp, flags))
5605 	return (DDI_FAILURE);
5606 }
5607 
5608 void
5609 ddivoid(void)
5610 {}
5611 
5612 int
5613 nochpoll(dev_t dev, short events, int anyyet, short *reventsp,
5614     struct pollhead **pollhdrp)
5615 {
5616 	_NOTE(ARGUNUSED(dev, events, anyyet, reventsp, pollhdrp))
5617 	return (ENXIO);
5618 }
5619 
5620 cred_t *
5621 ddi_get_cred(void)
5622 {
5623 	return (CRED());
5624 }
5625 
5626 clock_t
5627 ddi_get_lbolt(void)
5628 {
5629 	return (lbolt);
5630 }
5631 
5632 time_t
5633 ddi_get_time(void)
5634 {
5635 	time_t	now;
5636 
5637 	if ((now = gethrestime_sec()) == 0) {
5638 		timestruc_t ts;
5639 		mutex_enter(&tod_lock);
5640 		ts = tod_get();
5641 		mutex_exit(&tod_lock);
5642 		return (ts.tv_sec);
5643 	} else {
5644 		return (now);
5645 	}
5646 }
5647 
5648 pid_t
5649 ddi_get_pid(void)
5650 {
5651 	return (ttoproc(curthread)->p_pid);
5652 }
5653 
5654 kt_did_t
5655 ddi_get_kt_did(void)
5656 {
5657 	return (curthread->t_did);
5658 }
5659 
5660 /*
5661  * This function returns B_TRUE if the caller can reasonably expect that a call
5662  * to cv_wait_sig(9F), cv_timedwait_sig(9F), or qwait_sig(9F) could be awakened
5663  * by user-level signal.  If it returns B_FALSE, then the caller should use
5664  * other means to make certain that the wait will not hang "forever."
5665  *
5666  * It does not check the signal mask, nor for reception of any particular
5667  * signal.
5668  *
5669  * Currently, a thread can receive a signal if it's not a kernel thread and it
5670  * is not in the middle of exit(2) tear-down.  Threads that are in that
5671  * tear-down effectively convert cv_wait_sig to cv_wait, cv_timedwait_sig to
5672  * cv_timedwait, and qwait_sig to qwait.
5673  */
5674 boolean_t
5675 ddi_can_receive_sig(void)
5676 {
5677 	proc_t *pp;
5678 
5679 	if (curthread->t_proc_flag & TP_LWPEXIT)
5680 		return (B_FALSE);
5681 	if ((pp = ttoproc(curthread)) == NULL)
5682 		return (B_FALSE);
5683 	return (pp->p_as != &kas);
5684 }
5685 
5686 /*
5687  * Swap bytes in 16-bit [half-]words
5688  */
5689 void
5690 swab(void *src, void *dst, size_t nbytes)
5691 {
5692 	uchar_t *pf = (uchar_t *)src;
5693 	uchar_t *pt = (uchar_t *)dst;
5694 	uchar_t tmp;
5695 	int nshorts;
5696 
5697 	nshorts = nbytes >> 1;
5698 
5699 	while (--nshorts >= 0) {
5700 		tmp = *pf++;
5701 		*pt++ = *pf++;
5702 		*pt++ = tmp;
5703 	}
5704 }
5705 
5706 static void
5707 ddi_append_minor_node(dev_info_t *ddip, struct ddi_minor_data *dmdp)
5708 {
5709 	int			circ;
5710 	struct ddi_minor_data	*dp;
5711 
5712 	ndi_devi_enter(ddip, &circ);
5713 	if ((dp = DEVI(ddip)->devi_minor) == (struct ddi_minor_data *)NULL) {
5714 		DEVI(ddip)->devi_minor = dmdp;
5715 	} else {
5716 		while (dp->next != (struct ddi_minor_data *)NULL)
5717 			dp = dp->next;
5718 		dp->next = dmdp;
5719 	}
5720 	ndi_devi_exit(ddip, circ);
5721 }
5722 
5723 /*
5724  * Part of the obsolete SunCluster DDI Hooks.
5725  * Keep for binary compatibility
5726  */
5727 minor_t
5728 ddi_getiminor(dev_t dev)
5729 {
5730 	return (getminor(dev));
5731 }
5732 
5733 static int
5734 i_log_devfs_minor_create(dev_info_t *dip, char *minor_name)
5735 {
5736 	int se_flag;
5737 	int kmem_flag;
5738 	int se_err;
5739 	char *pathname, *class_name;
5740 	sysevent_t *ev = NULL;
5741 	sysevent_id_t eid;
5742 	sysevent_value_t se_val;
5743 	sysevent_attr_list_t *ev_attr_list = NULL;
5744 
5745 	/* determine interrupt context */
5746 	se_flag = (servicing_interrupt()) ? SE_NOSLEEP : SE_SLEEP;
5747 	kmem_flag = (se_flag == SE_SLEEP) ? KM_SLEEP : KM_NOSLEEP;
5748 
5749 	i_ddi_di_cache_invalidate(kmem_flag);
5750 
5751 #ifdef DEBUG
5752 	if ((se_flag == SE_NOSLEEP) && sunddi_debug) {
5753 		cmn_err(CE_CONT, "ddi_create_minor_node: called from "
5754 		    "interrupt level by driver %s",
5755 		    ddi_driver_name(dip));
5756 	}
5757 #endif /* DEBUG */
5758 
5759 	ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_CREATE, EP_DDI, se_flag);
5760 	if (ev == NULL) {
5761 		goto fail;
5762 	}
5763 
5764 	pathname = kmem_alloc(MAXPATHLEN, kmem_flag);
5765 	if (pathname == NULL) {
5766 		sysevent_free(ev);
5767 		goto fail;
5768 	}
5769 
5770 	(void) ddi_pathname(dip, pathname);
5771 	ASSERT(strlen(pathname));
5772 	se_val.value_type = SE_DATA_TYPE_STRING;
5773 	se_val.value.sv_string = pathname;
5774 	if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME,
5775 	    &se_val, se_flag) != 0) {
5776 		kmem_free(pathname, MAXPATHLEN);
5777 		sysevent_free(ev);
5778 		goto fail;
5779 	}
5780 	kmem_free(pathname, MAXPATHLEN);
5781 
5782 	/* add the device class attribute */
5783 	if ((class_name = i_ddi_devi_class(dip)) != NULL) {
5784 		se_val.value_type = SE_DATA_TYPE_STRING;
5785 		se_val.value.sv_string = class_name;
5786 		if (sysevent_add_attr(&ev_attr_list,
5787 		    DEVFS_DEVI_CLASS, &se_val, SE_SLEEP) != 0) {
5788 			sysevent_free_attr(ev_attr_list);
5789 			goto fail;
5790 		}
5791 	}
5792 
5793 	/*
5794 	 * allow for NULL minor names
5795 	 */
5796 	if (minor_name != NULL) {
5797 		se_val.value.sv_string = minor_name;
5798 		if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME,
5799 		    &se_val, se_flag) != 0) {
5800 			sysevent_free_attr(ev_attr_list);
5801 			sysevent_free(ev);
5802 			goto fail;
5803 		}
5804 	}
5805 
5806 	if (sysevent_attach_attributes(ev, ev_attr_list) != 0) {
5807 		sysevent_free_attr(ev_attr_list);
5808 		sysevent_free(ev);
5809 		goto fail;
5810 	}
5811 
5812 	if ((se_err = log_sysevent(ev, se_flag, &eid)) != 0) {
5813 		if (se_err == SE_NO_TRANSPORT) {
5814 			cmn_err(CE_WARN, "/devices or /dev may not be current "
5815 			    "for driver %s (%s). Run devfsadm -i %s",
5816 			    ddi_driver_name(dip), "syseventd not responding",
5817 			    ddi_driver_name(dip));
5818 		} else {
5819 			sysevent_free(ev);
5820 			goto fail;
5821 		}
5822 	}
5823 
5824 	sysevent_free(ev);
5825 	return (DDI_SUCCESS);
5826 fail:
5827 	cmn_err(CE_WARN, "/devices or /dev may not be current "
5828 	    "for driver %s. Run devfsadm -i %s",
5829 	    ddi_driver_name(dip), ddi_driver_name(dip));
5830 	return (DDI_SUCCESS);
5831 }
5832 
5833 /*
5834  * failing to remove a minor node is not of interest
5835  * therefore we do not generate an error message
5836  */
5837 static int
5838 i_log_devfs_minor_remove(dev_info_t *dip, char *minor_name)
5839 {
5840 	char *pathname, *class_name;
5841 	sysevent_t *ev;
5842 	sysevent_id_t eid;
5843 	sysevent_value_t se_val;
5844 	sysevent_attr_list_t *ev_attr_list = NULL;
5845 
5846 	/*
5847 	 * only log ddi_remove_minor_node() calls outside the scope
5848 	 * of attach/detach reconfigurations and when the dip is
5849 	 * still initialized.
5850 	 */
5851 	if (DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip) ||
5852 	    (i_ddi_node_state(dip) < DS_INITIALIZED)) {
5853 		return (DDI_SUCCESS);
5854 	}
5855 
5856 	i_ddi_di_cache_invalidate(KM_SLEEP);
5857 
5858 	ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_MINOR_REMOVE, EP_DDI, SE_SLEEP);
5859 	if (ev == NULL) {
5860 		return (DDI_SUCCESS);
5861 	}
5862 
5863 	pathname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
5864 	if (pathname == NULL) {
5865 		sysevent_free(ev);
5866 		return (DDI_SUCCESS);
5867 	}
5868 
5869 	(void) ddi_pathname(dip, pathname);
5870 	ASSERT(strlen(pathname));
5871 	se_val.value_type = SE_DATA_TYPE_STRING;
5872 	se_val.value.sv_string = pathname;
5873 	if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME,
5874 	    &se_val, SE_SLEEP) != 0) {
5875 		kmem_free(pathname, MAXPATHLEN);
5876 		sysevent_free(ev);
5877 		return (DDI_SUCCESS);
5878 	}
5879 
5880 	kmem_free(pathname, MAXPATHLEN);
5881 
5882 	/*
5883 	 * allow for NULL minor names
5884 	 */
5885 	if (minor_name != NULL) {
5886 		se_val.value.sv_string = minor_name;
5887 		if (sysevent_add_attr(&ev_attr_list, DEVFS_MINOR_NAME,
5888 		    &se_val, SE_SLEEP) != 0) {
5889 			sysevent_free_attr(ev_attr_list);
5890 			goto fail;
5891 		}
5892 	}
5893 
5894 	if ((class_name = i_ddi_devi_class(dip)) != NULL) {
5895 		/* add the device class, driver name and instance attributes */
5896 
5897 		se_val.value_type = SE_DATA_TYPE_STRING;
5898 		se_val.value.sv_string = class_name;
5899 		if (sysevent_add_attr(&ev_attr_list,
5900 		    DEVFS_DEVI_CLASS, &se_val, SE_SLEEP) != 0) {
5901 			sysevent_free_attr(ev_attr_list);
5902 			goto fail;
5903 		}
5904 
5905 		se_val.value_type = SE_DATA_TYPE_STRING;
5906 		se_val.value.sv_string = (char *)ddi_driver_name(dip);
5907 		if (sysevent_add_attr(&ev_attr_list,
5908 		    DEVFS_DRIVER_NAME, &se_val, SE_SLEEP) != 0) {
5909 			sysevent_free_attr(ev_attr_list);
5910 			goto fail;
5911 		}
5912 
5913 		se_val.value_type = SE_DATA_TYPE_INT32;
5914 		se_val.value.sv_int32 = ddi_get_instance(dip);
5915 		if (sysevent_add_attr(&ev_attr_list,
5916 		    DEVFS_INSTANCE, &se_val, SE_SLEEP) != 0) {
5917 			sysevent_free_attr(ev_attr_list);
5918 			goto fail;
5919 		}
5920 
5921 	}
5922 
5923 	if (sysevent_attach_attributes(ev, ev_attr_list) != 0) {
5924 		sysevent_free_attr(ev_attr_list);
5925 	} else {
5926 		(void) log_sysevent(ev, SE_SLEEP, &eid);
5927 	}
5928 fail:
5929 	sysevent_free(ev);
5930 	return (DDI_SUCCESS);
5931 }
5932 
5933 /*
5934  * Derive the device class of the node.
5935  * Device class names aren't defined yet. Until this is done we use
5936  * devfs event subclass names as device class names.
5937  */
5938 static int
5939 derive_devi_class(dev_info_t *dip, char *node_type, int flag)
5940 {
5941 	int rv = DDI_SUCCESS;
5942 
5943 	if (i_ddi_devi_class(dip) == NULL) {
5944 		if (strncmp(node_type, DDI_NT_BLOCK,
5945 		    sizeof (DDI_NT_BLOCK) - 1) == 0 &&
5946 		    (node_type[sizeof (DDI_NT_BLOCK) - 1] == '\0' ||
5947 		    node_type[sizeof (DDI_NT_BLOCK) - 1] == ':') &&
5948 		    strcmp(node_type, DDI_NT_FD) != 0) {
5949 
5950 			rv = i_ddi_set_devi_class(dip, ESC_DISK, flag);
5951 
5952 		} else if (strncmp(node_type, DDI_NT_NET,
5953 		    sizeof (DDI_NT_NET) - 1) == 0 &&
5954 		    (node_type[sizeof (DDI_NT_NET) - 1] == '\0' ||
5955 		    node_type[sizeof (DDI_NT_NET) - 1] == ':')) {
5956 
5957 			rv = i_ddi_set_devi_class(dip, ESC_NETWORK, flag);
5958 
5959 		} else if (strncmp(node_type, DDI_NT_PRINTER,
5960 		    sizeof (DDI_NT_PRINTER) - 1) == 0 &&
5961 		    (node_type[sizeof (DDI_NT_PRINTER) - 1] == '\0' ||
5962 		    node_type[sizeof (DDI_NT_PRINTER) - 1] == ':')) {
5963 
5964 			rv = i_ddi_set_devi_class(dip, ESC_PRINTER, flag);
5965 
5966 		} else if (strncmp(node_type, DDI_PSEUDO,
5967 		    sizeof (DDI_PSEUDO) -1) == 0 &&
5968 		    (strncmp(ESC_LOFI, ddi_node_name(dip),
5969 		    sizeof (ESC_LOFI) -1) == 0)) {
5970 			rv = i_ddi_set_devi_class(dip, ESC_LOFI, flag);
5971 		}
5972 	}
5973 
5974 	return (rv);
5975 }
5976 
5977 /*
5978  * Check compliance with PSARC 2003/375:
5979  *
5980  * The name must contain only characters a-z, A-Z, 0-9 or _ and it must not
5981  * exceed IFNAMSIZ (16) characters in length.
5982  */
5983 static boolean_t
5984 verify_name(char *name)
5985 {
5986 	size_t	len = strlen(name);
5987 	char	*cp;
5988 
5989 	if (len == 0 || len > IFNAMSIZ)
5990 		return (B_FALSE);
5991 
5992 	for (cp = name; *cp != '\0'; cp++) {
5993 		if (!isalnum(*cp) && *cp != '_')
5994 			return (B_FALSE);
5995 	}
5996 
5997 	return (B_TRUE);
5998 }
5999 
6000 /*
6001  * ddi_create_minor_common:	Create a  ddi_minor_data structure and
6002  *				attach it to the given devinfo node.
6003  */
6004 
6005 int
6006 ddi_create_minor_common(dev_info_t *dip, char *name, int spec_type,
6007     minor_t minor_num, char *node_type, int flag, ddi_minor_type mtype,
6008     const char *read_priv, const char *write_priv, mode_t priv_mode)
6009 {
6010 	struct ddi_minor_data *dmdp;
6011 	major_t major;
6012 
6013 	if (spec_type != S_IFCHR && spec_type != S_IFBLK)
6014 		return (DDI_FAILURE);
6015 
6016 	if (name == NULL)
6017 		return (DDI_FAILURE);
6018 
6019 	/*
6020 	 * Log a message if the minor number the driver is creating
6021 	 * is not expressible on the on-disk filesystem (currently
6022 	 * this is limited to 18 bits both by UFS). The device can
6023 	 * be opened via devfs, but not by device special files created
6024 	 * via mknod().
6025 	 */
6026 	if (minor_num > L_MAXMIN32) {
6027 		cmn_err(CE_WARN,
6028 		    "%s%d:%s minor 0x%x too big for 32-bit applications",
6029 		    ddi_driver_name(dip), ddi_get_instance(dip),
6030 		    name, minor_num);
6031 		return (DDI_FAILURE);
6032 	}
6033 
6034 	/* dip must be bound and attached */
6035 	major = ddi_driver_major(dip);
6036 	ASSERT(major != DDI_MAJOR_T_NONE);
6037 
6038 	/*
6039 	 * Default node_type to DDI_PSEUDO and issue notice in debug mode
6040 	 */
6041 	if (node_type == NULL) {
6042 		node_type = DDI_PSEUDO;
6043 		NDI_CONFIG_DEBUG((CE_NOTE, "!illegal node_type NULL for %s%d "
6044 		    " minor node %s; default to DDI_PSEUDO",
6045 		    ddi_driver_name(dip), ddi_get_instance(dip), name));
6046 	}
6047 
6048 	/*
6049 	 * If the driver is a network driver, ensure that the name falls within
6050 	 * the interface naming constraints specified by PSARC/2003/375.
6051 	 */
6052 	if (strcmp(node_type, DDI_NT_NET) == 0) {
6053 		if (!verify_name(name))
6054 			return (DDI_FAILURE);
6055 
6056 		if (mtype == DDM_MINOR) {
6057 			struct devnames *dnp = &devnamesp[major];
6058 
6059 			/* Mark driver as a network driver */
6060 			LOCK_DEV_OPS(&dnp->dn_lock);
6061 			dnp->dn_flags |= DN_NETWORK_DRIVER;
6062 			UNLOCK_DEV_OPS(&dnp->dn_lock);
6063 		}
6064 	}
6065 
6066 	if (mtype == DDM_MINOR) {
6067 		if (derive_devi_class(dip,  node_type, KM_NOSLEEP) !=
6068 		    DDI_SUCCESS)
6069 			return (DDI_FAILURE);
6070 	}
6071 
6072 	/*
6073 	 * Take care of minor number information for the node.
6074 	 */
6075 
6076 	if ((dmdp = kmem_zalloc(sizeof (struct ddi_minor_data),
6077 	    KM_NOSLEEP)) == NULL) {
6078 		return (DDI_FAILURE);
6079 	}
6080 	if ((dmdp->ddm_name = i_ddi_strdup(name, KM_NOSLEEP)) == NULL) {
6081 		kmem_free(dmdp, sizeof (struct ddi_minor_data));
6082 		return (DDI_FAILURE);
6083 	}
6084 	dmdp->dip = dip;
6085 	dmdp->ddm_dev = makedevice(major, minor_num);
6086 	dmdp->ddm_spec_type = spec_type;
6087 	dmdp->ddm_node_type = node_type;
6088 	dmdp->type = mtype;
6089 	if (flag & CLONE_DEV) {
6090 		dmdp->type = DDM_ALIAS;
6091 		dmdp->ddm_dev = makedevice(ddi_driver_major(clone_dip), major);
6092 	}
6093 	if (flag & PRIVONLY_DEV) {
6094 		dmdp->ddm_flags |= DM_NO_FSPERM;
6095 	}
6096 	if (read_priv || write_priv) {
6097 		dmdp->ddm_node_priv =
6098 		    devpolicy_priv_by_name(read_priv, write_priv);
6099 	}
6100 	dmdp->ddm_priv_mode = priv_mode;
6101 
6102 	ddi_append_minor_node(dip, dmdp);
6103 
6104 	/*
6105 	 * only log ddi_create_minor_node() calls which occur
6106 	 * outside the scope of attach(9e)/detach(9e) reconfigurations
6107 	 */
6108 	if (!(DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip)) &&
6109 	    mtype != DDM_INTERNAL_PATH) {
6110 		(void) i_log_devfs_minor_create(dip, name);
6111 	}
6112 
6113 	/*
6114 	 * Check if any dacf rules match the creation of this minor node
6115 	 */
6116 	dacfc_match_create_minor(name, node_type, dip, dmdp, flag);
6117 	return (DDI_SUCCESS);
6118 }
6119 
6120 int
6121 ddi_create_minor_node(dev_info_t *dip, char *name, int spec_type,
6122     minor_t minor_num, char *node_type, int flag)
6123 {
6124 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
6125 	    node_type, flag, DDM_MINOR, NULL, NULL, 0));
6126 }
6127 
6128 int
6129 ddi_create_priv_minor_node(dev_info_t *dip, char *name, int spec_type,
6130     minor_t minor_num, char *node_type, int flag,
6131     const char *rdpriv, const char *wrpriv, mode_t priv_mode)
6132 {
6133 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
6134 	    node_type, flag, DDM_MINOR, rdpriv, wrpriv, priv_mode));
6135 }
6136 
6137 int
6138 ddi_create_default_minor_node(dev_info_t *dip, char *name, int spec_type,
6139     minor_t minor_num, char *node_type, int flag)
6140 {
6141 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
6142 	    node_type, flag, DDM_DEFAULT, NULL, NULL, 0));
6143 }
6144 
6145 /*
6146  * Internal (non-ddi) routine for drivers to export names known
6147  * to the kernel (especially ddi_pathname_to_dev_t and friends)
6148  * but not exported externally to /dev
6149  */
6150 int
6151 ddi_create_internal_pathname(dev_info_t *dip, char *name, int spec_type,
6152     minor_t minor_num)
6153 {
6154 	return (ddi_create_minor_common(dip, name, spec_type, minor_num,
6155 	    "internal", 0, DDM_INTERNAL_PATH, NULL, NULL, 0));
6156 }
6157 
6158 void
6159 ddi_remove_minor_node(dev_info_t *dip, char *name)
6160 {
6161 	int			circ;
6162 	struct ddi_minor_data	*dmdp, *dmdp1;
6163 	struct ddi_minor_data	**dmdp_prev;
6164 
6165 	ndi_devi_enter(dip, &circ);
6166 	dmdp_prev = &DEVI(dip)->devi_minor;
6167 	dmdp = DEVI(dip)->devi_minor;
6168 	while (dmdp != NULL) {
6169 		dmdp1 = dmdp->next;
6170 		if ((name == NULL || (dmdp->ddm_name != NULL &&
6171 		    strcmp(name, dmdp->ddm_name) == 0))) {
6172 			if (dmdp->ddm_name != NULL) {
6173 				if (dmdp->type != DDM_INTERNAL_PATH)
6174 					(void) i_log_devfs_minor_remove(dip,
6175 					    dmdp->ddm_name);
6176 				kmem_free(dmdp->ddm_name,
6177 				    strlen(dmdp->ddm_name) + 1);
6178 			}
6179 			/*
6180 			 * Release device privilege, if any.
6181 			 * Release dacf client data associated with this minor
6182 			 * node by storing NULL.
6183 			 */
6184 			if (dmdp->ddm_node_priv)
6185 				dpfree(dmdp->ddm_node_priv);
6186 			dacf_store_info((dacf_infohdl_t)dmdp, NULL);
6187 			kmem_free(dmdp, sizeof (struct ddi_minor_data));
6188 			*dmdp_prev = dmdp1;
6189 			/*
6190 			 * OK, we found it, so get out now -- if we drive on,
6191 			 * we will strcmp against garbage.  See 1139209.
6192 			 */
6193 			if (name != NULL)
6194 				break;
6195 		} else {
6196 			dmdp_prev = &dmdp->next;
6197 		}
6198 		dmdp = dmdp1;
6199 	}
6200 	ndi_devi_exit(dip, circ);
6201 }
6202 
6203 
6204 int
6205 ddi_in_panic()
6206 {
6207 	return (panicstr != NULL);
6208 }
6209 
6210 
6211 /*
6212  * Find first bit set in a mask (returned counting from 1 up)
6213  */
6214 
6215 int
6216 ddi_ffs(long mask)
6217 {
6218 	return (ffs(mask));
6219 }
6220 
6221 /*
6222  * Find last bit set. Take mask and clear
6223  * all but the most significant bit, and
6224  * then let ffs do the rest of the work.
6225  *
6226  * Algorithm courtesy of Steve Chessin.
6227  */
6228 
6229 int
6230 ddi_fls(long mask)
6231 {
6232 	while (mask) {
6233 		long nx;
6234 
6235 		if ((nx = (mask & (mask - 1))) == 0)
6236 			break;
6237 		mask = nx;
6238 	}
6239 	return (ffs(mask));
6240 }
6241 
6242 /*
6243  * The next five routines comprise generic storage management utilities
6244  * for driver soft state structures (in "the old days," this was done
6245  * with a statically sized array - big systems and dynamic loading
6246  * and unloading make heap allocation more attractive)
6247  */
6248 
6249 /*
6250  * Allocate a set of pointers to 'n_items' objects of size 'size'
6251  * bytes.  Each pointer is initialized to nil.
6252  *
6253  * The 'size' and 'n_items' values are stashed in the opaque
6254  * handle returned to the caller.
6255  *
6256  * This implementation interprets 'set of pointers' to mean 'array
6257  * of pointers' but note that nothing in the interface definition
6258  * precludes an implementation that uses, for example, a linked list.
6259  * However there should be a small efficiency gain from using an array
6260  * at lookup time.
6261  *
6262  * NOTE	As an optimization, we make our growable array allocations in
6263  *	powers of two (bytes), since that's how much kmem_alloc (currently)
6264  *	gives us anyway.  It should save us some free/realloc's ..
6265  *
6266  *	As a further optimization, we make the growable array start out
6267  *	with MIN_N_ITEMS in it.
6268  */
6269 
6270 #define	MIN_N_ITEMS	8	/* 8 void *'s == 32 bytes */
6271 
6272 int
6273 ddi_soft_state_init(void **state_p, size_t size, size_t n_items)
6274 {
6275 	struct i_ddi_soft_state *ss;
6276 
6277 	if (state_p == NULL || *state_p != NULL || size == 0)
6278 		return (EINVAL);
6279 
6280 	ss = kmem_zalloc(sizeof (*ss), KM_SLEEP);
6281 	mutex_init(&ss->lock, NULL, MUTEX_DRIVER, NULL);
6282 	ss->size = size;
6283 
6284 	if (n_items < MIN_N_ITEMS)
6285 		ss->n_items = MIN_N_ITEMS;
6286 	else {
6287 		int bitlog;
6288 
6289 		if ((bitlog = ddi_fls(n_items)) == ddi_ffs(n_items))
6290 			bitlog--;
6291 		ss->n_items = 1 << bitlog;
6292 	}
6293 
6294 	ASSERT(ss->n_items >= n_items);
6295 
6296 	ss->array = kmem_zalloc(ss->n_items * sizeof (void *), KM_SLEEP);
6297 
6298 	*state_p = ss;
6299 
6300 	return (0);
6301 }
6302 
6303 
6304 /*
6305  * Allocate a state structure of size 'size' to be associated
6306  * with item 'item'.
6307  *
6308  * In this implementation, the array is extended to
6309  * allow the requested offset, if needed.
6310  */
6311 int
6312 ddi_soft_state_zalloc(void *state, int item)
6313 {
6314 	struct i_ddi_soft_state *ss;
6315 	void **array;
6316 	void *new_element;
6317 
6318 	if ((ss = state) == NULL || item < 0)
6319 		return (DDI_FAILURE);
6320 
6321 	mutex_enter(&ss->lock);
6322 	if (ss->size == 0) {
6323 		mutex_exit(&ss->lock);
6324 		cmn_err(CE_WARN, "ddi_soft_state_zalloc: bad handle: %s",
6325 		    mod_containing_pc(caller()));
6326 		return (DDI_FAILURE);
6327 	}
6328 
6329 	array = ss->array;	/* NULL if ss->n_items == 0 */
6330 	ASSERT(ss->n_items != 0 && array != NULL);
6331 
6332 	/*
6333 	 * refuse to tread on an existing element
6334 	 */
6335 	if (item < ss->n_items && array[item] != NULL) {
6336 		mutex_exit(&ss->lock);
6337 		return (DDI_FAILURE);
6338 	}
6339 
6340 	/*
6341 	 * Allocate a new element to plug in
6342 	 */
6343 	new_element = kmem_zalloc(ss->size, KM_SLEEP);
6344 
6345 	/*
6346 	 * Check if the array is big enough, if not, grow it.
6347 	 */
6348 	if (item >= ss->n_items) {
6349 		void	**new_array;
6350 		size_t	new_n_items;
6351 		struct i_ddi_soft_state *dirty;
6352 
6353 		/*
6354 		 * Allocate a new array of the right length, copy
6355 		 * all the old pointers to the new array, then
6356 		 * if it exists at all, put the old array on the
6357 		 * dirty list.
6358 		 *
6359 		 * Note that we can't kmem_free() the old array.
6360 		 *
6361 		 * Why -- well the 'get' operation is 'mutex-free', so we
6362 		 * can't easily catch a suspended thread that is just about
6363 		 * to dereference the array we just grew out of.  So we
6364 		 * cons up a header and put it on a list of 'dirty'
6365 		 * pointer arrays.  (Dirty in the sense that there may
6366 		 * be suspended threads somewhere that are in the middle
6367 		 * of referencing them).  Fortunately, we -can- garbage
6368 		 * collect it all at ddi_soft_state_fini time.
6369 		 */
6370 		new_n_items = ss->n_items;
6371 		while (new_n_items < (1 + item))
6372 			new_n_items <<= 1;	/* double array size .. */
6373 
6374 		ASSERT(new_n_items >= (1 + item));	/* sanity check! */
6375 
6376 		new_array = kmem_zalloc(new_n_items * sizeof (void *),
6377 		    KM_SLEEP);
6378 		/*
6379 		 * Copy the pointers into the new array
6380 		 */
6381 		bcopy(array, new_array, ss->n_items * sizeof (void *));
6382 
6383 		/*
6384 		 * Save the old array on the dirty list
6385 		 */
6386 		dirty = kmem_zalloc(sizeof (*dirty), KM_SLEEP);
6387 		dirty->array = ss->array;
6388 		dirty->n_items = ss->n_items;
6389 		dirty->next = ss->next;
6390 		ss->next = dirty;
6391 
6392 		ss->array = (array = new_array);
6393 		ss->n_items = new_n_items;
6394 	}
6395 
6396 	ASSERT(array != NULL && item < ss->n_items && array[item] == NULL);
6397 
6398 	array[item] = new_element;
6399 
6400 	mutex_exit(&ss->lock);
6401 	return (DDI_SUCCESS);
6402 }
6403 
6404 
6405 /*
6406  * Fetch a pointer to the allocated soft state structure.
6407  *
6408  * This is designed to be cheap.
6409  *
6410  * There's an argument that there should be more checking for
6411  * nil pointers and out of bounds on the array.. but we do a lot
6412  * of that in the alloc/free routines.
6413  *
6414  * An array has the convenience that we don't need to lock read-access
6415  * to it c.f. a linked list.  However our "expanding array" strategy
6416  * means that we should hold a readers lock on the i_ddi_soft_state
6417  * structure.
6418  *
6419  * However, from a performance viewpoint, we need to do it without
6420  * any locks at all -- this also makes it a leaf routine.  The algorithm
6421  * is 'lock-free' because we only discard the pointer arrays at
6422  * ddi_soft_state_fini() time.
6423  */
6424 void *
6425 ddi_get_soft_state(void *state, int item)
6426 {
6427 	struct i_ddi_soft_state *ss = state;
6428 
6429 	ASSERT(ss != NULL && item >= 0);
6430 
6431 	if (item < ss->n_items && ss->array != NULL)
6432 		return (ss->array[item]);
6433 	return (NULL);
6434 }
6435 
6436 /*
6437  * Free the state structure corresponding to 'item.'   Freeing an
6438  * element that has either gone or was never allocated is not
6439  * considered an error.  Note that we free the state structure, but
6440  * we don't shrink our pointer array, or discard 'dirty' arrays,
6441  * since even a few pointers don't really waste too much memory.
6442  *
6443  * Passing an item number that is out of bounds, or a null pointer will
6444  * provoke an error message.
6445  */
6446 void
6447 ddi_soft_state_free(void *state, int item)
6448 {
6449 	struct i_ddi_soft_state *ss;
6450 	void **array;
6451 	void *element;
6452 	static char msg[] = "ddi_soft_state_free:";
6453 
6454 	if ((ss = state) == NULL) {
6455 		cmn_err(CE_WARN, "%s null handle: %s",
6456 		    msg, mod_containing_pc(caller()));
6457 		return;
6458 	}
6459 
6460 	element = NULL;
6461 
6462 	mutex_enter(&ss->lock);
6463 
6464 	if ((array = ss->array) == NULL || ss->size == 0) {
6465 		cmn_err(CE_WARN, "%s bad handle: %s",
6466 		    msg, mod_containing_pc(caller()));
6467 	} else if (item < 0 || item >= ss->n_items) {
6468 		cmn_err(CE_WARN, "%s item %d not in range [0..%lu]: %s",
6469 		    msg, item, ss->n_items - 1, mod_containing_pc(caller()));
6470 	} else if (array[item] != NULL) {
6471 		element = array[item];
6472 		array[item] = NULL;
6473 	}
6474 
6475 	mutex_exit(&ss->lock);
6476 
6477 	if (element)
6478 		kmem_free(element, ss->size);
6479 }
6480 
6481 
6482 /*
6483  * Free the entire set of pointers, and any
6484  * soft state structures contained therein.
6485  *
6486  * Note that we don't grab the ss->lock mutex, even though
6487  * we're inspecting the various fields of the data structure.
6488  *
6489  * There is an implicit assumption that this routine will
6490  * never run concurrently with any of the above on this
6491  * particular state structure i.e. by the time the driver
6492  * calls this routine, there should be no other threads
6493  * running in the driver.
6494  */
6495 void
6496 ddi_soft_state_fini(void **state_p)
6497 {
6498 	struct i_ddi_soft_state *ss, *dirty;
6499 	int item;
6500 	static char msg[] = "ddi_soft_state_fini:";
6501 
6502 	if (state_p == NULL || (ss = *state_p) == NULL) {
6503 		cmn_err(CE_WARN, "%s null handle: %s",
6504 		    msg, mod_containing_pc(caller()));
6505 		return;
6506 	}
6507 
6508 	if (ss->size == 0) {
6509 		cmn_err(CE_WARN, "%s bad handle: %s",
6510 		    msg, mod_containing_pc(caller()));
6511 		return;
6512 	}
6513 
6514 	if (ss->n_items > 0) {
6515 		for (item = 0; item < ss->n_items; item++)
6516 			ddi_soft_state_free(ss, item);
6517 		kmem_free(ss->array, ss->n_items * sizeof (void *));
6518 	}
6519 
6520 	/*
6521 	 * Now delete any dirty arrays from previous 'grow' operations
6522 	 */
6523 	for (dirty = ss->next; dirty; dirty = ss->next) {
6524 		ss->next = dirty->next;
6525 		kmem_free(dirty->array, dirty->n_items * sizeof (void *));
6526 		kmem_free(dirty, sizeof (*dirty));
6527 	}
6528 
6529 	mutex_destroy(&ss->lock);
6530 	kmem_free(ss, sizeof (*ss));
6531 
6532 	*state_p = NULL;
6533 }
6534 
6535 /*
6536  * This sets the devi_addr entry in the dev_info structure 'dip' to 'name'.
6537  * Storage is double buffered to prevent updates during devi_addr use -
6538  * double buffering is adaquate for reliable ddi_deviname() consumption.
6539  * The double buffer is not freed until dev_info structure destruction
6540  * (by i_ddi_free_node).
6541  */
6542 void
6543 ddi_set_name_addr(dev_info_t *dip, char *name)
6544 {
6545 	char	*buf = DEVI(dip)->devi_addr_buf;
6546 	char	*newaddr;
6547 
6548 	if (buf == NULL) {
6549 		buf = kmem_zalloc(2 * MAXNAMELEN, KM_SLEEP);
6550 		DEVI(dip)->devi_addr_buf = buf;
6551 	}
6552 
6553 	if (name) {
6554 		ASSERT(strlen(name) < MAXNAMELEN);
6555 		newaddr = (DEVI(dip)->devi_addr == buf) ?
6556 		    (buf + MAXNAMELEN) : buf;
6557 		(void) strlcpy(newaddr, name, MAXNAMELEN);
6558 	} else
6559 		newaddr = NULL;
6560 
6561 	DEVI(dip)->devi_addr = newaddr;
6562 }
6563 
6564 char *
6565 ddi_get_name_addr(dev_info_t *dip)
6566 {
6567 	return (DEVI(dip)->devi_addr);
6568 }
6569 
6570 void
6571 ddi_set_parent_data(dev_info_t *dip, void *pd)
6572 {
6573 	DEVI(dip)->devi_parent_data = pd;
6574 }
6575 
6576 void *
6577 ddi_get_parent_data(dev_info_t *dip)
6578 {
6579 	return (DEVI(dip)->devi_parent_data);
6580 }
6581 
6582 /*
6583  * ddi_name_to_major: returns the major number of a named module,
6584  * derived from the current driver alias binding.
6585  *
6586  * Caveat: drivers should avoid the use of this function, in particular
6587  * together with ddi_get_name/ddi_binding name, as per
6588  *	major = ddi_name_to_major(ddi_get_name(devi));
6589  * ddi_name_to_major() relies on the state of the device/alias binding,
6590  * which can and does change dynamically as aliases are administered
6591  * over time.  An attached device instance cannot rely on the major
6592  * number returned by ddi_name_to_major() to match its own major number.
6593  *
6594  * For driver use, ddi_driver_major() reliably returns the major number
6595  * for the module to which the device was bound at attach time over
6596  * the life of the instance.
6597  *	major = ddi_driver_major(dev_info_t *)
6598  */
6599 major_t
6600 ddi_name_to_major(char *name)
6601 {
6602 	return (mod_name_to_major(name));
6603 }
6604 
6605 /*
6606  * ddi_major_to_name: Returns the module name bound to a major number.
6607  */
6608 char *
6609 ddi_major_to_name(major_t major)
6610 {
6611 	return (mod_major_to_name(major));
6612 }
6613 
6614 /*
6615  * Return the name of the devinfo node pointed at by 'dip' in the buffer
6616  * pointed at by 'name.'  A devinfo node is named as a result of calling
6617  * ddi_initchild().
6618  *
6619  * Note: the driver must be held before calling this function!
6620  */
6621 char *
6622 ddi_deviname(dev_info_t *dip, char *name)
6623 {
6624 	char *addrname;
6625 	char none = '\0';
6626 
6627 	if (dip == ddi_root_node()) {
6628 		*name = '\0';
6629 		return (name);
6630 	}
6631 
6632 	if (i_ddi_node_state(dip) < DS_BOUND) {
6633 		addrname = &none;
6634 	} else {
6635 		/*
6636 		 * Use ddi_get_name_addr() without checking state so we get
6637 		 * a unit-address if we are called after ddi_set_name_addr()
6638 		 * by nexus DDI_CTL_INITCHILD code, but before completing
6639 		 * node promotion to DS_INITIALIZED.  We currently have
6640 		 * two situations where we are called in this state:
6641 		 *   o  For framework processing of a path-oriented alias.
6642 		 *   o  If a SCSA nexus driver calls ddi_devid_register()
6643 		 *	from it's tran_tgt_init(9E) implementation.
6644 		 */
6645 		addrname = ddi_get_name_addr(dip);
6646 		if (addrname == NULL)
6647 			addrname = &none;
6648 	}
6649 
6650 	if (*addrname == '\0') {
6651 		(void) sprintf(name, "/%s", ddi_node_name(dip));
6652 	} else {
6653 		(void) sprintf(name, "/%s@%s", ddi_node_name(dip), addrname);
6654 	}
6655 
6656 	return (name);
6657 }
6658 
6659 /*
6660  * Spits out the name of device node, typically name@addr, for a given node,
6661  * using the driver name, not the nodename.
6662  *
6663  * Used by match_parent. Not to be used elsewhere.
6664  */
6665 char *
6666 i_ddi_parname(dev_info_t *dip, char *name)
6667 {
6668 	char *addrname;
6669 
6670 	if (dip == ddi_root_node()) {
6671 		*name = '\0';
6672 		return (name);
6673 	}
6674 
6675 	ASSERT(i_ddi_node_state(dip) >= DS_INITIALIZED);
6676 
6677 	if (*(addrname = ddi_get_name_addr(dip)) == '\0')
6678 		(void) sprintf(name, "%s", ddi_binding_name(dip));
6679 	else
6680 		(void) sprintf(name, "%s@%s", ddi_binding_name(dip), addrname);
6681 	return (name);
6682 }
6683 
6684 static char *
6685 pathname_work(dev_info_t *dip, char *path)
6686 {
6687 	char *bp;
6688 
6689 	if (dip == ddi_root_node()) {
6690 		*path = '\0';
6691 		return (path);
6692 	}
6693 	(void) pathname_work(ddi_get_parent(dip), path);
6694 	bp = path + strlen(path);
6695 	(void) ddi_deviname(dip, bp);
6696 	return (path);
6697 }
6698 
6699 char *
6700 ddi_pathname(dev_info_t *dip, char *path)
6701 {
6702 	return (pathname_work(dip, path));
6703 }
6704 
6705 char *
6706 ddi_pathname_minor(struct ddi_minor_data *dmdp, char *path)
6707 {
6708 	if (dmdp->dip == NULL)
6709 		*path = '\0';
6710 	else {
6711 		(void) ddi_pathname(dmdp->dip, path);
6712 		if (dmdp->ddm_name) {
6713 			(void) strcat(path, ":");
6714 			(void) strcat(path, dmdp->ddm_name);
6715 		}
6716 	}
6717 	return (path);
6718 }
6719 
6720 static char *
6721 pathname_work_obp(dev_info_t *dip, char *path)
6722 {
6723 	char *bp;
6724 	char *obp_path;
6725 
6726 	/*
6727 	 * look up the "obp-path" property, return the path if it exists
6728 	 */
6729 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
6730 	    "obp-path", &obp_path) == DDI_PROP_SUCCESS) {
6731 		(void) strcpy(path, obp_path);
6732 		ddi_prop_free(obp_path);
6733 		return (path);
6734 	}
6735 
6736 	/*
6737 	 * stop at root, no obp path
6738 	 */
6739 	if (dip == ddi_root_node()) {
6740 		return (NULL);
6741 	}
6742 
6743 	obp_path = pathname_work_obp(ddi_get_parent(dip), path);
6744 	if (obp_path == NULL)
6745 		return (NULL);
6746 
6747 	/*
6748 	 * append our component to parent's obp path
6749 	 */
6750 	bp = path + strlen(path);
6751 	if (*(bp - 1) != '/')
6752 		(void) strcat(bp++, "/");
6753 	(void) ddi_deviname(dip, bp);
6754 	return (path);
6755 }
6756 
6757 /*
6758  * return the 'obp-path' based path for the given node, or NULL if the node
6759  * does not have a different obp path. NOTE: Unlike ddi_pathname, this
6760  * function can't be called from interrupt context (since we need to
6761  * lookup a string property).
6762  */
6763 char *
6764 ddi_pathname_obp(dev_info_t *dip, char *path)
6765 {
6766 	ASSERT(!servicing_interrupt());
6767 	if (dip == NULL || path == NULL)
6768 		return (NULL);
6769 
6770 	/* split work into a separate function to aid debugging */
6771 	return (pathname_work_obp(dip, path));
6772 }
6773 
6774 int
6775 ddi_pathname_obp_set(dev_info_t *dip, char *component)
6776 {
6777 	dev_info_t *pdip;
6778 	char *obp_path = NULL;
6779 	int rc = DDI_FAILURE;
6780 
6781 	if (dip == NULL)
6782 		return (DDI_FAILURE);
6783 
6784 	obp_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
6785 
6786 	pdip = ddi_get_parent(dip);
6787 
6788 	if (ddi_pathname_obp(pdip, obp_path) == NULL) {
6789 		(void) ddi_pathname(pdip, obp_path);
6790 	}
6791 
6792 	if (component) {
6793 		(void) strncat(obp_path, "/", MAXPATHLEN);
6794 		(void) strncat(obp_path, component, MAXPATHLEN);
6795 	}
6796 	rc = ndi_prop_update_string(DDI_DEV_T_NONE, dip, "obp-path",
6797 	    obp_path);
6798 
6799 	if (obp_path)
6800 		kmem_free(obp_path, MAXPATHLEN);
6801 
6802 	return (rc);
6803 }
6804 
6805 /*
6806  * Given a dev_t, return the pathname of the corresponding device in the
6807  * buffer pointed at by "path."  The buffer is assumed to be large enough
6808  * to hold the pathname of the device (MAXPATHLEN).
6809  *
6810  * The pathname of a device is the pathname of the devinfo node to which
6811  * the device "belongs," concatenated with the character ':' and the name
6812  * of the minor node corresponding to the dev_t.  If spec_type is 0 then
6813  * just the pathname of the devinfo node is returned without driving attach
6814  * of that node.  For a non-zero spec_type, an attach is performed and a
6815  * search of the minor list occurs.
6816  *
6817  * It is possible that the path associated with the dev_t is not
6818  * currently available in the devinfo tree.  In order to have a
6819  * dev_t, a device must have been discovered before, which means
6820  * that the path is always in the instance tree.  The one exception
6821  * to this is if the dev_t is associated with a pseudo driver, in
6822  * which case the device must exist on the pseudo branch of the
6823  * devinfo tree as a result of parsing .conf files.
6824  */
6825 int
6826 ddi_dev_pathname(dev_t devt, int spec_type, char *path)
6827 {
6828 	int		circ;
6829 	major_t		major = getmajor(devt);
6830 	int		instance;
6831 	dev_info_t	*dip;
6832 	char		*minorname;
6833 	char		*drvname;
6834 
6835 	if (major >= devcnt)
6836 		goto fail;
6837 	if (major == clone_major) {
6838 		/* clone has no minor nodes, manufacture the path here */
6839 		if ((drvname = ddi_major_to_name(getminor(devt))) == NULL)
6840 			goto fail;
6841 
6842 		(void) snprintf(path, MAXPATHLEN, "%s:%s", CLONE_PATH, drvname);
6843 		return (DDI_SUCCESS);
6844 	}
6845 
6846 	/* extract instance from devt (getinfo(9E) DDI_INFO_DEVT2INSTANCE). */
6847 	if ((instance = dev_to_instance(devt)) == -1)
6848 		goto fail;
6849 
6850 	/* reconstruct the path given the major/instance */
6851 	if (e_ddi_majorinstance_to_path(major, instance, path) != DDI_SUCCESS)
6852 		goto fail;
6853 
6854 	/* if spec_type given we must drive attach and search minor nodes */
6855 	if ((spec_type == S_IFCHR) || (spec_type == S_IFBLK)) {
6856 		/* attach the path so we can search minors */
6857 		if ((dip = e_ddi_hold_devi_by_path(path, 0)) == NULL)
6858 			goto fail;
6859 
6860 		/* Add minorname to path. */
6861 		ndi_devi_enter(dip, &circ);
6862 		minorname = i_ddi_devtspectype_to_minorname(dip,
6863 		    devt, spec_type);
6864 		if (minorname) {
6865 			(void) strcat(path, ":");
6866 			(void) strcat(path, minorname);
6867 		}
6868 		ndi_devi_exit(dip, circ);
6869 		ddi_release_devi(dip);
6870 		if (minorname == NULL)
6871 			goto fail;
6872 	}
6873 	ASSERT(strlen(path) < MAXPATHLEN);
6874 	return (DDI_SUCCESS);
6875 
6876 fail:	*path = 0;
6877 	return (DDI_FAILURE);
6878 }
6879 
6880 /*
6881  * Given a major number and an instance, return the path.
6882  * This interface does NOT drive attach.
6883  */
6884 int
6885 e_ddi_majorinstance_to_path(major_t major, int instance, char *path)
6886 {
6887 	struct devnames *dnp;
6888 	dev_info_t	*dip;
6889 
6890 	if ((major >= devcnt) || (instance == -1)) {
6891 		*path = 0;
6892 		return (DDI_FAILURE);
6893 	}
6894 
6895 	/* look for the major/instance in the instance tree */
6896 	if (e_ddi_instance_majorinstance_to_path(major, instance,
6897 	    path) == DDI_SUCCESS) {
6898 		ASSERT(strlen(path) < MAXPATHLEN);
6899 		return (DDI_SUCCESS);
6900 	}
6901 
6902 	/*
6903 	 * Not in instance tree, find the instance on the per driver list and
6904 	 * construct path to instance via ddi_pathname(). This is how paths
6905 	 * down the 'pseudo' branch are constructed.
6906 	 */
6907 	dnp = &(devnamesp[major]);
6908 	LOCK_DEV_OPS(&(dnp->dn_lock));
6909 	for (dip = dnp->dn_head; dip;
6910 	    dip = (dev_info_t *)DEVI(dip)->devi_next) {
6911 		/* Skip if instance does not match. */
6912 		if (DEVI(dip)->devi_instance != instance)
6913 			continue;
6914 
6915 		/*
6916 		 * An ndi_hold_devi() does not prevent DS_INITIALIZED->DS_BOUND
6917 		 * node demotion, so it is not an effective way of ensuring
6918 		 * that the ddi_pathname result has a unit-address.  Instead,
6919 		 * we reverify the node state after calling ddi_pathname().
6920 		 */
6921 		if (i_ddi_node_state(dip) >= DS_INITIALIZED) {
6922 			(void) ddi_pathname(dip, path);
6923 			if (i_ddi_node_state(dip) < DS_INITIALIZED)
6924 				continue;
6925 			UNLOCK_DEV_OPS(&(dnp->dn_lock));
6926 			ASSERT(strlen(path) < MAXPATHLEN);
6927 			return (DDI_SUCCESS);
6928 		}
6929 	}
6930 	UNLOCK_DEV_OPS(&(dnp->dn_lock));
6931 
6932 	/* can't reconstruct the path */
6933 	*path = 0;
6934 	return (DDI_FAILURE);
6935 }
6936 
6937 #define	GLD_DRIVER_PPA "SUNW,gld_v0_ppa"
6938 
6939 /*
6940  * Given the dip for a network interface return the ppa for that interface.
6941  *
6942  * In all cases except GLD v0 drivers, the ppa == instance.
6943  * In the case of GLD v0 drivers, the ppa is equal to the attach order.
6944  * So for these drivers when the attach routine calls gld_register(),
6945  * the GLD framework creates an integer property called "gld_driver_ppa"
6946  * that can be queried here.
6947  *
6948  * The only time this function is used is when a system is booting over nfs.
6949  * In this case the system has to resolve the pathname of the boot device
6950  * to it's ppa.
6951  */
6952 int
6953 i_ddi_devi_get_ppa(dev_info_t *dip)
6954 {
6955 	return (ddi_prop_get_int(DDI_DEV_T_ANY, dip,
6956 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
6957 	    GLD_DRIVER_PPA, ddi_get_instance(dip)));
6958 }
6959 
6960 /*
6961  * i_ddi_devi_set_ppa() should only be called from gld_register()
6962  * and only for GLD v0 drivers
6963  */
6964 void
6965 i_ddi_devi_set_ppa(dev_info_t *dip, int ppa)
6966 {
6967 	(void) e_ddi_prop_update_int(DDI_DEV_T_NONE, dip, GLD_DRIVER_PPA, ppa);
6968 }
6969 
6970 
6971 /*
6972  * Private DDI Console bell functions.
6973  */
6974 void
6975 ddi_ring_console_bell(clock_t duration)
6976 {
6977 	if (ddi_console_bell_func != NULL)
6978 		(*ddi_console_bell_func)(duration);
6979 }
6980 
6981 void
6982 ddi_set_console_bell(void (*bellfunc)(clock_t duration))
6983 {
6984 	ddi_console_bell_func = bellfunc;
6985 }
6986 
6987 int
6988 ddi_dma_alloc_handle(dev_info_t *dip, ddi_dma_attr_t *attr,
6989 	int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep)
6990 {
6991 	int (*funcp)() = ddi_dma_allochdl;
6992 	ddi_dma_attr_t dma_attr;
6993 	struct bus_ops *bop;
6994 
6995 	if (attr == (ddi_dma_attr_t *)0)
6996 		return (DDI_DMA_BADATTR);
6997 
6998 	dma_attr = *attr;
6999 
7000 	bop = DEVI(dip)->devi_ops->devo_bus_ops;
7001 	if (bop && bop->bus_dma_allochdl)
7002 		funcp = bop->bus_dma_allochdl;
7003 
7004 	return ((*funcp)(dip, dip, &dma_attr, waitfp, arg, handlep));
7005 }
7006 
7007 void
7008 ddi_dma_free_handle(ddi_dma_handle_t *handlep)
7009 {
7010 	ddi_dma_handle_t h = *handlep;
7011 	(void) ddi_dma_freehdl(HD, HD, h);
7012 }
7013 
7014 static uintptr_t dma_mem_list_id = 0;
7015 
7016 
7017 int
7018 ddi_dma_mem_alloc(ddi_dma_handle_t handle, size_t length,
7019 	ddi_device_acc_attr_t *accattrp, uint_t flags,
7020 	int (*waitfp)(caddr_t), caddr_t arg, caddr_t *kaddrp,
7021 	size_t *real_length, ddi_acc_handle_t *handlep)
7022 {
7023 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7024 	dev_info_t *dip = hp->dmai_rdip;
7025 	ddi_acc_hdl_t *ap;
7026 	ddi_dma_attr_t *attrp = &hp->dmai_attr;
7027 	uint_t sleepflag, xfermodes;
7028 	int (*fp)(caddr_t);
7029 	int rval;
7030 
7031 	if (waitfp == DDI_DMA_SLEEP)
7032 		fp = (int (*)())KM_SLEEP;
7033 	else if (waitfp == DDI_DMA_DONTWAIT)
7034 		fp = (int (*)())KM_NOSLEEP;
7035 	else
7036 		fp = waitfp;
7037 	*handlep = impl_acc_hdl_alloc(fp, arg);
7038 	if (*handlep == NULL)
7039 		return (DDI_FAILURE);
7040 
7041 	/* check if the cache attributes are supported */
7042 	if (i_ddi_check_cache_attr(flags) == B_FALSE)
7043 		return (DDI_FAILURE);
7044 
7045 	/*
7046 	 * Transfer the meaningful bits to xfermodes.
7047 	 * Double-check if the 3rd party driver correctly sets the bits.
7048 	 * If not, set DDI_DMA_STREAMING to keep compatibility.
7049 	 */
7050 	xfermodes = flags & (DDI_DMA_CONSISTENT | DDI_DMA_STREAMING);
7051 	if (xfermodes == 0) {
7052 		xfermodes = DDI_DMA_STREAMING;
7053 	}
7054 
7055 	/*
7056 	 * initialize the common elements of data access handle
7057 	 */
7058 	ap = impl_acc_hdl_get(*handlep);
7059 	ap->ah_vers = VERS_ACCHDL;
7060 	ap->ah_dip = dip;
7061 	ap->ah_offset = 0;
7062 	ap->ah_len = 0;
7063 	ap->ah_xfermodes = flags;
7064 	ap->ah_acc = *accattrp;
7065 
7066 	sleepflag = ((waitfp == DDI_DMA_SLEEP) ? 1 : 0);
7067 	if (xfermodes == DDI_DMA_CONSISTENT) {
7068 		rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag,
7069 		    flags, accattrp, kaddrp, NULL, ap);
7070 		*real_length = length;
7071 	} else {
7072 		rval = i_ddi_mem_alloc(dip, attrp, length, sleepflag,
7073 		    flags, accattrp, kaddrp, real_length, ap);
7074 	}
7075 	if (rval == DDI_SUCCESS) {
7076 		ap->ah_len = (off_t)(*real_length);
7077 		ap->ah_addr = *kaddrp;
7078 	} else {
7079 		impl_acc_hdl_free(*handlep);
7080 		*handlep = (ddi_acc_handle_t)NULL;
7081 		if (waitfp != DDI_DMA_SLEEP && waitfp != DDI_DMA_DONTWAIT) {
7082 			ddi_set_callback(waitfp, arg, &dma_mem_list_id);
7083 		}
7084 		rval = DDI_FAILURE;
7085 	}
7086 	return (rval);
7087 }
7088 
7089 void
7090 ddi_dma_mem_free(ddi_acc_handle_t *handlep)
7091 {
7092 	ddi_acc_hdl_t *ap;
7093 
7094 	ap = impl_acc_hdl_get(*handlep);
7095 	ASSERT(ap);
7096 
7097 	i_ddi_mem_free((caddr_t)ap->ah_addr, ap);
7098 
7099 	/*
7100 	 * free the handle
7101 	 */
7102 	impl_acc_hdl_free(*handlep);
7103 	*handlep = (ddi_acc_handle_t)NULL;
7104 
7105 	if (dma_mem_list_id != 0) {
7106 		ddi_run_callback(&dma_mem_list_id);
7107 	}
7108 }
7109 
7110 int
7111 ddi_dma_buf_bind_handle(ddi_dma_handle_t handle, struct buf *bp,
7112 	uint_t flags, int (*waitfp)(caddr_t), caddr_t arg,
7113 	ddi_dma_cookie_t *cookiep, uint_t *ccountp)
7114 {
7115 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7116 	dev_info_t *hdip, *dip;
7117 	struct ddi_dma_req dmareq;
7118 	int (*funcp)();
7119 
7120 	dmareq.dmar_flags = flags;
7121 	dmareq.dmar_fp = waitfp;
7122 	dmareq.dmar_arg = arg;
7123 	dmareq.dmar_object.dmao_size = (uint_t)bp->b_bcount;
7124 
7125 	if (bp->b_flags & B_PAGEIO) {
7126 		dmareq.dmar_object.dmao_type = DMA_OTYP_PAGES;
7127 		dmareq.dmar_object.dmao_obj.pp_obj.pp_pp = bp->b_pages;
7128 		dmareq.dmar_object.dmao_obj.pp_obj.pp_offset =
7129 		    (uint_t)(((uintptr_t)bp->b_un.b_addr) & MMU_PAGEOFFSET);
7130 	} else {
7131 		dmareq.dmar_object.dmao_obj.virt_obj.v_addr = bp->b_un.b_addr;
7132 		if (bp->b_flags & B_SHADOW) {
7133 			dmareq.dmar_object.dmao_obj.virt_obj.v_priv =
7134 			    bp->b_shadow;
7135 			dmareq.dmar_object.dmao_type = DMA_OTYP_BUFVADDR;
7136 		} else {
7137 			dmareq.dmar_object.dmao_type =
7138 			    (bp->b_flags & (B_PHYS | B_REMAPPED)) ?
7139 			    DMA_OTYP_BUFVADDR : DMA_OTYP_VADDR;
7140 			dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
7141 		}
7142 
7143 		/*
7144 		 * If the buffer has no proc pointer, or the proc
7145 		 * struct has the kernel address space, or the buffer has
7146 		 * been marked B_REMAPPED (meaning that it is now
7147 		 * mapped into the kernel's address space), then
7148 		 * the address space is kas (kernel address space).
7149 		 */
7150 		if ((bp->b_proc == NULL) || (bp->b_proc->p_as == &kas) ||
7151 		    (bp->b_flags & B_REMAPPED)) {
7152 			dmareq.dmar_object.dmao_obj.virt_obj.v_as = 0;
7153 		} else {
7154 			dmareq.dmar_object.dmao_obj.virt_obj.v_as =
7155 			    bp->b_proc->p_as;
7156 		}
7157 	}
7158 
7159 	dip = hp->dmai_rdip;
7160 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
7161 	funcp = DEVI(dip)->devi_bus_dma_bindfunc;
7162 	return ((*funcp)(hdip, dip, handle, &dmareq, cookiep, ccountp));
7163 }
7164 
7165 int
7166 ddi_dma_addr_bind_handle(ddi_dma_handle_t handle, struct as *as,
7167 	caddr_t addr, size_t len, uint_t flags, int (*waitfp)(caddr_t),
7168 	caddr_t arg, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
7169 {
7170 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7171 	dev_info_t *hdip, *dip;
7172 	struct ddi_dma_req dmareq;
7173 	int (*funcp)();
7174 
7175 	if (len == (uint_t)0) {
7176 		return (DDI_DMA_NOMAPPING);
7177 	}
7178 	dmareq.dmar_flags = flags;
7179 	dmareq.dmar_fp = waitfp;
7180 	dmareq.dmar_arg = arg;
7181 	dmareq.dmar_object.dmao_size = len;
7182 	dmareq.dmar_object.dmao_type = DMA_OTYP_VADDR;
7183 	dmareq.dmar_object.dmao_obj.virt_obj.v_as = as;
7184 	dmareq.dmar_object.dmao_obj.virt_obj.v_addr = addr;
7185 	dmareq.dmar_object.dmao_obj.virt_obj.v_priv = NULL;
7186 
7187 	dip = hp->dmai_rdip;
7188 	hdip = (dev_info_t *)DEVI(dip)->devi_bus_dma_bindhdl;
7189 	funcp = DEVI(dip)->devi_bus_dma_bindfunc;
7190 	return ((*funcp)(hdip, dip, handle, &dmareq, cookiep, ccountp));
7191 }
7192 
7193 void
7194 ddi_dma_nextcookie(ddi_dma_handle_t handle, ddi_dma_cookie_t *cookiep)
7195 {
7196 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7197 	ddi_dma_cookie_t *cp;
7198 
7199 	cp = hp->dmai_cookie;
7200 	ASSERT(cp);
7201 
7202 	cookiep->dmac_notused = cp->dmac_notused;
7203 	cookiep->dmac_type = cp->dmac_type;
7204 	cookiep->dmac_address = cp->dmac_address;
7205 	cookiep->dmac_size = cp->dmac_size;
7206 	hp->dmai_cookie++;
7207 }
7208 
7209 int
7210 ddi_dma_numwin(ddi_dma_handle_t handle, uint_t *nwinp)
7211 {
7212 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7213 	if ((hp->dmai_rflags & DDI_DMA_PARTIAL) == 0) {
7214 		return (DDI_FAILURE);
7215 	} else {
7216 		*nwinp = hp->dmai_nwin;
7217 		return (DDI_SUCCESS);
7218 	}
7219 }
7220 
7221 int
7222 ddi_dma_getwin(ddi_dma_handle_t h, uint_t win, off_t *offp,
7223 	size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
7224 {
7225 	int (*funcp)() = ddi_dma_win;
7226 	struct bus_ops *bop;
7227 
7228 	bop = DEVI(HD)->devi_ops->devo_bus_ops;
7229 	if (bop && bop->bus_dma_win)
7230 		funcp = bop->bus_dma_win;
7231 
7232 	return ((*funcp)(HD, HD, h, win, offp, lenp, cookiep, ccountp));
7233 }
7234 
7235 int
7236 ddi_dma_set_sbus64(ddi_dma_handle_t h, ulong_t burstsizes)
7237 {
7238 	return (ddi_dma_mctl(HD, HD, h, DDI_DMA_SET_SBUS64, 0,
7239 	    &burstsizes, 0, 0));
7240 }
7241 
7242 int
7243 i_ddi_dma_fault_check(ddi_dma_impl_t *hp)
7244 {
7245 	return (hp->dmai_fault);
7246 }
7247 
7248 int
7249 ddi_check_dma_handle(ddi_dma_handle_t handle)
7250 {
7251 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7252 	int (*check)(ddi_dma_impl_t *);
7253 
7254 	if ((check = hp->dmai_fault_check) == NULL)
7255 		check = i_ddi_dma_fault_check;
7256 
7257 	return (((*check)(hp) == DDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
7258 }
7259 
7260 void
7261 i_ddi_dma_set_fault(ddi_dma_handle_t handle)
7262 {
7263 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7264 	void (*notify)(ddi_dma_impl_t *);
7265 
7266 	if (!hp->dmai_fault) {
7267 		hp->dmai_fault = 1;
7268 		if ((notify = hp->dmai_fault_notify) != NULL)
7269 			(*notify)(hp);
7270 	}
7271 }
7272 
7273 void
7274 i_ddi_dma_clr_fault(ddi_dma_handle_t handle)
7275 {
7276 	ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle;
7277 	void (*notify)(ddi_dma_impl_t *);
7278 
7279 	if (hp->dmai_fault) {
7280 		hp->dmai_fault = 0;
7281 		if ((notify = hp->dmai_fault_notify) != NULL)
7282 			(*notify)(hp);
7283 	}
7284 }
7285 
7286 /*
7287  * register mapping routines.
7288  */
7289 int
7290 ddi_regs_map_setup(dev_info_t *dip, uint_t rnumber, caddr_t *addrp,
7291 	offset_t offset, offset_t len, ddi_device_acc_attr_t *accattrp,
7292 	ddi_acc_handle_t *handle)
7293 {
7294 	ddi_map_req_t mr;
7295 	ddi_acc_hdl_t *hp;
7296 	int result;
7297 
7298 	/*
7299 	 * Allocate and initialize the common elements of data access handle.
7300 	 */
7301 	*handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
7302 	hp = impl_acc_hdl_get(*handle);
7303 	hp->ah_vers = VERS_ACCHDL;
7304 	hp->ah_dip = dip;
7305 	hp->ah_rnumber = rnumber;
7306 	hp->ah_offset = offset;
7307 	hp->ah_len = len;
7308 	hp->ah_acc = *accattrp;
7309 
7310 	/*
7311 	 * Set up the mapping request and call to parent.
7312 	 */
7313 	mr.map_op = DDI_MO_MAP_LOCKED;
7314 	mr.map_type = DDI_MT_RNUMBER;
7315 	mr.map_obj.rnumber = rnumber;
7316 	mr.map_prot = PROT_READ | PROT_WRITE;
7317 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
7318 	mr.map_handlep = hp;
7319 	mr.map_vers = DDI_MAP_VERSION;
7320 	result = ddi_map(dip, &mr, offset, len, addrp);
7321 
7322 	/*
7323 	 * check for end result
7324 	 */
7325 	if (result != DDI_SUCCESS) {
7326 		impl_acc_hdl_free(*handle);
7327 		*handle = (ddi_acc_handle_t)NULL;
7328 	} else {
7329 		hp->ah_addr = *addrp;
7330 	}
7331 
7332 	return (result);
7333 }
7334 
7335 void
7336 ddi_regs_map_free(ddi_acc_handle_t *handlep)
7337 {
7338 	ddi_map_req_t mr;
7339 	ddi_acc_hdl_t *hp;
7340 
7341 	hp = impl_acc_hdl_get(*handlep);
7342 	ASSERT(hp);
7343 
7344 	mr.map_op = DDI_MO_UNMAP;
7345 	mr.map_type = DDI_MT_RNUMBER;
7346 	mr.map_obj.rnumber = hp->ah_rnumber;
7347 	mr.map_prot = PROT_READ | PROT_WRITE;
7348 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
7349 	mr.map_handlep = hp;
7350 	mr.map_vers = DDI_MAP_VERSION;
7351 
7352 	/*
7353 	 * Call my parent to unmap my regs.
7354 	 */
7355 	(void) ddi_map(hp->ah_dip, &mr, hp->ah_offset,
7356 	    hp->ah_len, &hp->ah_addr);
7357 	/*
7358 	 * free the handle
7359 	 */
7360 	impl_acc_hdl_free(*handlep);
7361 	*handlep = (ddi_acc_handle_t)NULL;
7362 }
7363 
7364 int
7365 ddi_device_zero(ddi_acc_handle_t handle, caddr_t dev_addr, size_t bytecount,
7366 	ssize_t dev_advcnt, uint_t dev_datasz)
7367 {
7368 	uint8_t *b;
7369 	uint16_t *w;
7370 	uint32_t *l;
7371 	uint64_t *ll;
7372 
7373 	/* check for total byte count is multiple of data transfer size */
7374 	if (bytecount != ((bytecount / dev_datasz) * dev_datasz))
7375 		return (DDI_FAILURE);
7376 
7377 	switch (dev_datasz) {
7378 	case DDI_DATA_SZ01_ACC:
7379 		for (b = (uint8_t *)dev_addr;
7380 		    bytecount != 0; bytecount -= 1, b += dev_advcnt)
7381 			ddi_put8(handle, b, 0);
7382 		break;
7383 	case DDI_DATA_SZ02_ACC:
7384 		for (w = (uint16_t *)dev_addr;
7385 		    bytecount != 0; bytecount -= 2, w += dev_advcnt)
7386 			ddi_put16(handle, w, 0);
7387 		break;
7388 	case DDI_DATA_SZ04_ACC:
7389 		for (l = (uint32_t *)dev_addr;
7390 		    bytecount != 0; bytecount -= 4, l += dev_advcnt)
7391 			ddi_put32(handle, l, 0);
7392 		break;
7393 	case DDI_DATA_SZ08_ACC:
7394 		for (ll = (uint64_t *)dev_addr;
7395 		    bytecount != 0; bytecount -= 8, ll += dev_advcnt)
7396 			ddi_put64(handle, ll, 0x0ll);
7397 		break;
7398 	default:
7399 		return (DDI_FAILURE);
7400 	}
7401 	return (DDI_SUCCESS);
7402 }
7403 
7404 int
7405 ddi_device_copy(
7406 	ddi_acc_handle_t src_handle, caddr_t src_addr, ssize_t src_advcnt,
7407 	ddi_acc_handle_t dest_handle, caddr_t dest_addr, ssize_t dest_advcnt,
7408 	size_t bytecount, uint_t dev_datasz)
7409 {
7410 	uint8_t *b_src, *b_dst;
7411 	uint16_t *w_src, *w_dst;
7412 	uint32_t *l_src, *l_dst;
7413 	uint64_t *ll_src, *ll_dst;
7414 
7415 	/* check for total byte count is multiple of data transfer size */
7416 	if (bytecount != ((bytecount / dev_datasz) * dev_datasz))
7417 		return (DDI_FAILURE);
7418 
7419 	switch (dev_datasz) {
7420 	case DDI_DATA_SZ01_ACC:
7421 		b_src = (uint8_t *)src_addr;
7422 		b_dst = (uint8_t *)dest_addr;
7423 
7424 		for (; bytecount != 0; bytecount -= 1) {
7425 			ddi_put8(dest_handle, b_dst,
7426 			    ddi_get8(src_handle, b_src));
7427 			b_dst += dest_advcnt;
7428 			b_src += src_advcnt;
7429 		}
7430 		break;
7431 	case DDI_DATA_SZ02_ACC:
7432 		w_src = (uint16_t *)src_addr;
7433 		w_dst = (uint16_t *)dest_addr;
7434 
7435 		for (; bytecount != 0; bytecount -= 2) {
7436 			ddi_put16(dest_handle, w_dst,
7437 			    ddi_get16(src_handle, w_src));
7438 			w_dst += dest_advcnt;
7439 			w_src += src_advcnt;
7440 		}
7441 		break;
7442 	case DDI_DATA_SZ04_ACC:
7443 		l_src = (uint32_t *)src_addr;
7444 		l_dst = (uint32_t *)dest_addr;
7445 
7446 		for (; bytecount != 0; bytecount -= 4) {
7447 			ddi_put32(dest_handle, l_dst,
7448 			    ddi_get32(src_handle, l_src));
7449 			l_dst += dest_advcnt;
7450 			l_src += src_advcnt;
7451 		}
7452 		break;
7453 	case DDI_DATA_SZ08_ACC:
7454 		ll_src = (uint64_t *)src_addr;
7455 		ll_dst = (uint64_t *)dest_addr;
7456 
7457 		for (; bytecount != 0; bytecount -= 8) {
7458 			ddi_put64(dest_handle, ll_dst,
7459 			    ddi_get64(src_handle, ll_src));
7460 			ll_dst += dest_advcnt;
7461 			ll_src += src_advcnt;
7462 		}
7463 		break;
7464 	default:
7465 		return (DDI_FAILURE);
7466 	}
7467 	return (DDI_SUCCESS);
7468 }
7469 
7470 #define	swap16(value)  \
7471 	((((value) & 0xff) << 8) | ((value) >> 8))
7472 
7473 #define	swap32(value)	\
7474 	(((uint32_t)swap16((uint16_t)((value) & 0xffff)) << 16) | \
7475 	(uint32_t)swap16((uint16_t)((value) >> 16)))
7476 
7477 #define	swap64(value)	\
7478 	(((uint64_t)swap32((uint32_t)((value) & 0xffffffff)) \
7479 	    << 32) | \
7480 	(uint64_t)swap32((uint32_t)((value) >> 32)))
7481 
7482 uint16_t
7483 ddi_swap16(uint16_t value)
7484 {
7485 	return (swap16(value));
7486 }
7487 
7488 uint32_t
7489 ddi_swap32(uint32_t value)
7490 {
7491 	return (swap32(value));
7492 }
7493 
7494 uint64_t
7495 ddi_swap64(uint64_t value)
7496 {
7497 	return (swap64(value));
7498 }
7499 
7500 /*
7501  * Convert a binding name to a driver name.
7502  * A binding name is the name used to determine the driver for a
7503  * device - it may be either an alias for the driver or the name
7504  * of the driver itself.
7505  */
7506 char *
7507 i_binding_to_drv_name(char *bname)
7508 {
7509 	major_t major_no;
7510 
7511 	ASSERT(bname != NULL);
7512 
7513 	if ((major_no = ddi_name_to_major(bname)) == -1)
7514 		return (NULL);
7515 	return (ddi_major_to_name(major_no));
7516 }
7517 
7518 /*
7519  * Search for minor name that has specified dev_t and spec_type.
7520  * If spec_type is zero then any dev_t match works.  Since we
7521  * are returning a pointer to the minor name string, we require the
7522  * caller to do the locking.
7523  */
7524 char *
7525 i_ddi_devtspectype_to_minorname(dev_info_t *dip, dev_t dev, int spec_type)
7526 {
7527 	struct ddi_minor_data	*dmdp;
7528 
7529 	/*
7530 	 * The did layered driver currently intentionally returns a
7531 	 * devinfo ptr for an underlying sd instance based on a did
7532 	 * dev_t. In this case it is not an error.
7533 	 *
7534 	 * The did layered driver is associated with Sun Cluster.
7535 	 */
7536 	ASSERT((ddi_driver_major(dip) == getmajor(dev)) ||
7537 	    (strcmp(ddi_major_to_name(getmajor(dev)), "did") == 0));
7538 
7539 	ASSERT(DEVI_BUSY_OWNED(dip));
7540 	for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) {
7541 		if (((dmdp->type == DDM_MINOR) ||
7542 		    (dmdp->type == DDM_INTERNAL_PATH) ||
7543 		    (dmdp->type == DDM_DEFAULT)) &&
7544 		    (dmdp->ddm_dev == dev) &&
7545 		    ((((spec_type & (S_IFCHR|S_IFBLK))) == 0) ||
7546 		    (dmdp->ddm_spec_type == spec_type)))
7547 			return (dmdp->ddm_name);
7548 	}
7549 
7550 	return (NULL);
7551 }
7552 
7553 /*
7554  * Find the devt and spectype of the specified minor_name.
7555  * Return DDI_FAILURE if minor_name not found. Since we are
7556  * returning everything via arguments we can do the locking.
7557  */
7558 int
7559 i_ddi_minorname_to_devtspectype(dev_info_t *dip, char *minor_name,
7560 	dev_t *devtp, int *spectypep)
7561 {
7562 	int			circ;
7563 	struct ddi_minor_data	*dmdp;
7564 
7565 	/* deal with clone minor nodes */
7566 	if (dip == clone_dip) {
7567 		major_t	major;
7568 		/*
7569 		 * Make sure minor_name is a STREAMS driver.
7570 		 * We load the driver but don't attach to any instances.
7571 		 */
7572 
7573 		major = ddi_name_to_major(minor_name);
7574 		if (major == DDI_MAJOR_T_NONE)
7575 			return (DDI_FAILURE);
7576 
7577 		if (ddi_hold_driver(major) == NULL)
7578 			return (DDI_FAILURE);
7579 
7580 		if (STREAMSTAB(major) == NULL) {
7581 			ddi_rele_driver(major);
7582 			return (DDI_FAILURE);
7583 		}
7584 		ddi_rele_driver(major);
7585 
7586 		if (devtp)
7587 			*devtp = makedevice(clone_major, (minor_t)major);
7588 
7589 		if (spectypep)
7590 			*spectypep = S_IFCHR;
7591 
7592 		return (DDI_SUCCESS);
7593 	}
7594 
7595 	ndi_devi_enter(dip, &circ);
7596 	for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) {
7597 		if (((dmdp->type != DDM_MINOR) &&
7598 		    (dmdp->type != DDM_INTERNAL_PATH) &&
7599 		    (dmdp->type != DDM_DEFAULT)) ||
7600 		    strcmp(minor_name, dmdp->ddm_name))
7601 			continue;
7602 
7603 		if (devtp)
7604 			*devtp = dmdp->ddm_dev;
7605 
7606 		if (spectypep)
7607 			*spectypep = dmdp->ddm_spec_type;
7608 
7609 		ndi_devi_exit(dip, circ);
7610 		return (DDI_SUCCESS);
7611 	}
7612 	ndi_devi_exit(dip, circ);
7613 
7614 	return (DDI_FAILURE);
7615 }
7616 
7617 extern char	hw_serial[];
7618 static kmutex_t devid_gen_mutex;
7619 static short	devid_gen_number;
7620 
7621 #ifdef DEBUG
7622 
7623 static int	devid_register_corrupt = 0;
7624 static int	devid_register_corrupt_major = 0;
7625 static int	devid_register_corrupt_hint = 0;
7626 static int	devid_register_corrupt_hint_major = 0;
7627 
7628 static int devid_lyr_debug = 0;
7629 
7630 #define	DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs)		\
7631 	if (devid_lyr_debug)					\
7632 		ddi_debug_devid_devts(msg, ndevs, devs)
7633 
7634 #else
7635 
7636 #define	DDI_DEBUG_DEVID_DEVTS(msg, ndevs, devs)
7637 
7638 #endif /* DEBUG */
7639 
7640 
7641 #ifdef	DEBUG
7642 
7643 static void
7644 ddi_debug_devid_devts(char *msg, int ndevs, dev_t *devs)
7645 {
7646 	int i;
7647 
7648 	cmn_err(CE_CONT, "%s:\n", msg);
7649 	for (i = 0; i < ndevs; i++) {
7650 		cmn_err(CE_CONT, "    0x%lx\n", devs[i]);
7651 	}
7652 }
7653 
7654 static void
7655 ddi_debug_devid_paths(char *msg, int npaths, char **paths)
7656 {
7657 	int i;
7658 
7659 	cmn_err(CE_CONT, "%s:\n", msg);
7660 	for (i = 0; i < npaths; i++) {
7661 		cmn_err(CE_CONT, "    %s\n", paths[i]);
7662 	}
7663 }
7664 
7665 static void
7666 ddi_debug_devid_devts_per_path(char *path, int ndevs, dev_t *devs)
7667 {
7668 	int i;
7669 
7670 	cmn_err(CE_CONT, "dev_ts per path %s\n", path);
7671 	for (i = 0; i < ndevs; i++) {
7672 		cmn_err(CE_CONT, "    0x%lx\n", devs[i]);
7673 	}
7674 }
7675 
7676 #endif	/* DEBUG */
7677 
7678 /*
7679  * Register device id into DDI framework.
7680  * Must be called when device is attached.
7681  */
7682 static int
7683 i_ddi_devid_register(dev_info_t *dip, ddi_devid_t devid)
7684 {
7685 	impl_devid_t	*i_devid = (impl_devid_t *)devid;
7686 	size_t		driver_len;
7687 	const char	*driver_name;
7688 	char		*devid_str;
7689 	major_t		major;
7690 
7691 	if ((dip == NULL) ||
7692 	    ((major = ddi_driver_major(dip)) == DDI_MAJOR_T_NONE))
7693 		return (DDI_FAILURE);
7694 
7695 	/* verify that the devid is valid */
7696 	if (ddi_devid_valid(devid) != DDI_SUCCESS)
7697 		return (DDI_FAILURE);
7698 
7699 	/* Updating driver name hint in devid */
7700 	driver_name = ddi_driver_name(dip);
7701 	driver_len = strlen(driver_name);
7702 	if (driver_len > DEVID_HINT_SIZE) {
7703 		/* Pick up last four characters of driver name */
7704 		driver_name += driver_len - DEVID_HINT_SIZE;
7705 		driver_len = DEVID_HINT_SIZE;
7706 	}
7707 	bzero(i_devid->did_driver, DEVID_HINT_SIZE);
7708 	bcopy(driver_name, i_devid->did_driver, driver_len);
7709 
7710 #ifdef DEBUG
7711 	/* Corrupt the devid for testing. */
7712 	if (devid_register_corrupt)
7713 		i_devid->did_id[0] += devid_register_corrupt;
7714 	if (devid_register_corrupt_major &&
7715 	    (major == devid_register_corrupt_major))
7716 		i_devid->did_id[0] += 1;
7717 	if (devid_register_corrupt_hint)
7718 		i_devid->did_driver[0] += devid_register_corrupt_hint;
7719 	if (devid_register_corrupt_hint_major &&
7720 	    (major == devid_register_corrupt_hint_major))
7721 		i_devid->did_driver[0] += 1;
7722 #endif /* DEBUG */
7723 
7724 	/* encode the devid as a string */
7725 	if ((devid_str = ddi_devid_str_encode(devid, NULL)) == NULL)
7726 		return (DDI_FAILURE);
7727 
7728 	/* add string as a string property */
7729 	if (ndi_prop_update_string(DDI_DEV_T_NONE, dip,
7730 	    DEVID_PROP_NAME, devid_str) != DDI_SUCCESS) {
7731 		cmn_err(CE_WARN, "%s%d: devid property update failed",
7732 		    ddi_driver_name(dip), ddi_get_instance(dip));
7733 		ddi_devid_str_free(devid_str);
7734 		return (DDI_FAILURE);
7735 	}
7736 
7737 	/* keep pointer to devid string for interrupt context fma code */
7738 	if (DEVI(dip)->devi_devid_str)
7739 		ddi_devid_str_free(DEVI(dip)->devi_devid_str);
7740 	DEVI(dip)->devi_devid_str = devid_str;
7741 	return (DDI_SUCCESS);
7742 }
7743 
7744 int
7745 ddi_devid_register(dev_info_t *dip, ddi_devid_t devid)
7746 {
7747 	int rval;
7748 
7749 	rval = i_ddi_devid_register(dip, devid);
7750 	if (rval == DDI_SUCCESS) {
7751 		/*
7752 		 * Register devid in devid-to-path cache
7753 		 */
7754 		if (e_devid_cache_register(dip, devid) == DDI_SUCCESS) {
7755 			mutex_enter(&DEVI(dip)->devi_lock);
7756 			DEVI(dip)->devi_flags |= DEVI_REGISTERED_DEVID;
7757 			mutex_exit(&DEVI(dip)->devi_lock);
7758 		} else {
7759 			cmn_err(CE_WARN, "%s%d: failed to cache devid",
7760 			    ddi_driver_name(dip), ddi_get_instance(dip));
7761 		}
7762 	} else {
7763 		cmn_err(CE_WARN, "%s%d: failed to register devid",
7764 		    ddi_driver_name(dip), ddi_get_instance(dip));
7765 	}
7766 	return (rval);
7767 }
7768 
7769 /*
7770  * Remove (unregister) device id from DDI framework.
7771  * Must be called when device is detached.
7772  */
7773 static void
7774 i_ddi_devid_unregister(dev_info_t *dip)
7775 {
7776 	if (DEVI(dip)->devi_devid_str) {
7777 		ddi_devid_str_free(DEVI(dip)->devi_devid_str);
7778 		DEVI(dip)->devi_devid_str = NULL;
7779 	}
7780 
7781 	/* remove the devid property */
7782 	(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, DEVID_PROP_NAME);
7783 }
7784 
7785 void
7786 ddi_devid_unregister(dev_info_t *dip)
7787 {
7788 	mutex_enter(&DEVI(dip)->devi_lock);
7789 	DEVI(dip)->devi_flags &= ~DEVI_REGISTERED_DEVID;
7790 	mutex_exit(&DEVI(dip)->devi_lock);
7791 	e_devid_cache_unregister(dip);
7792 	i_ddi_devid_unregister(dip);
7793 }
7794 
7795 /*
7796  * Allocate and initialize a device id.
7797  */
7798 int
7799 ddi_devid_init(
7800 	dev_info_t	*dip,
7801 	ushort_t	devid_type,
7802 	ushort_t	nbytes,
7803 	void		*id,
7804 	ddi_devid_t	*ret_devid)
7805 {
7806 	impl_devid_t	*i_devid;
7807 	int		sz = sizeof (*i_devid) + nbytes - sizeof (char);
7808 	int		driver_len;
7809 	const char	*driver_name;
7810 
7811 	switch (devid_type) {
7812 	case DEVID_SCSI3_WWN:
7813 		/*FALLTHRU*/
7814 	case DEVID_SCSI_SERIAL:
7815 		/*FALLTHRU*/
7816 	case DEVID_ATA_SERIAL:
7817 		/*FALLTHRU*/
7818 	case DEVID_ENCAP:
7819 		if (nbytes == 0)
7820 			return (DDI_FAILURE);
7821 		if (id == NULL)
7822 			return (DDI_FAILURE);
7823 		break;
7824 	case DEVID_FAB:
7825 		if (nbytes != 0)
7826 			return (DDI_FAILURE);
7827 		if (id != NULL)
7828 			return (DDI_FAILURE);
7829 		nbytes = sizeof (int) +
7830 		    sizeof (struct timeval32) + sizeof (short);
7831 		sz += nbytes;
7832 		break;
7833 	default:
7834 		return (DDI_FAILURE);
7835 	}
7836 
7837 	if ((i_devid = kmem_zalloc(sz, KM_SLEEP)) == NULL)
7838 		return (DDI_FAILURE);
7839 
7840 	i_devid->did_magic_hi = DEVID_MAGIC_MSB;
7841 	i_devid->did_magic_lo = DEVID_MAGIC_LSB;
7842 	i_devid->did_rev_hi = DEVID_REV_MSB;
7843 	i_devid->did_rev_lo = DEVID_REV_LSB;
7844 	DEVID_FORMTYPE(i_devid, devid_type);
7845 	DEVID_FORMLEN(i_devid, nbytes);
7846 
7847 	/* Fill in driver name hint */
7848 	driver_name = ddi_driver_name(dip);
7849 	driver_len = strlen(driver_name);
7850 	if (driver_len > DEVID_HINT_SIZE) {
7851 		/* Pick up last four characters of driver name */
7852 		driver_name += driver_len - DEVID_HINT_SIZE;
7853 		driver_len = DEVID_HINT_SIZE;
7854 	}
7855 
7856 	bcopy(driver_name, i_devid->did_driver, driver_len);
7857 
7858 	/* Fill in id field */
7859 	if (devid_type == DEVID_FAB) {
7860 		char		*cp;
7861 		int		hostid;
7862 		char		*hostid_cp = &hw_serial[0];
7863 		struct timeval32 timestamp32;
7864 		int		i;
7865 		int		*ip;
7866 		short		gen;
7867 
7868 		/* increase the generation number */
7869 		mutex_enter(&devid_gen_mutex);
7870 		gen = devid_gen_number++;
7871 		mutex_exit(&devid_gen_mutex);
7872 
7873 		cp = i_devid->did_id;
7874 
7875 		/* Fill in host id (big-endian byte ordering) */
7876 		hostid = stoi(&hostid_cp);
7877 		*cp++ = hibyte(hiword(hostid));
7878 		*cp++ = lobyte(hiword(hostid));
7879 		*cp++ = hibyte(loword(hostid));
7880 		*cp++ = lobyte(loword(hostid));
7881 
7882 		/*
7883 		 * Fill in timestamp (big-endian byte ordering)
7884 		 *
7885 		 * (Note that the format may have to be changed
7886 		 * before 2038 comes around, though it's arguably
7887 		 * unique enough as it is..)
7888 		 */
7889 		uniqtime32(&timestamp32);
7890 		ip = (int *)&timestamp32;
7891 		for (i = 0;
7892 		    i < sizeof (timestamp32) / sizeof (int); i++, ip++) {
7893 			int	val;
7894 			val = *ip;
7895 			*cp++ = hibyte(hiword(val));
7896 			*cp++ = lobyte(hiword(val));
7897 			*cp++ = hibyte(loword(val));
7898 			*cp++ = lobyte(loword(val));
7899 		}
7900 
7901 		/* fill in the generation number */
7902 		*cp++ = hibyte(gen);
7903 		*cp++ = lobyte(gen);
7904 	} else
7905 		bcopy(id, i_devid->did_id, nbytes);
7906 
7907 	/* return device id */
7908 	*ret_devid = (ddi_devid_t)i_devid;
7909 	return (DDI_SUCCESS);
7910 }
7911 
7912 int
7913 ddi_devid_get(dev_info_t *dip, ddi_devid_t *ret_devid)
7914 {
7915 	return (i_ddi_devi_get_devid(DDI_DEV_T_ANY, dip, ret_devid));
7916 }
7917 
7918 int
7919 i_ddi_devi_get_devid(dev_t dev, dev_info_t *dip, ddi_devid_t *ret_devid)
7920 {
7921 	char		*devidstr;
7922 
7923 	ASSERT(dev != DDI_DEV_T_NONE);
7924 
7925 	/* look up the property, devt specific first */
7926 	if (ddi_prop_lookup_string(dev, dip, DDI_PROP_DONTPASS,
7927 	    DEVID_PROP_NAME, &devidstr) != DDI_PROP_SUCCESS) {
7928 		if ((dev == DDI_DEV_T_ANY) ||
7929 		    (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
7930 		    DDI_PROP_DONTPASS, DEVID_PROP_NAME, &devidstr) !=
7931 		    DDI_PROP_SUCCESS)) {
7932 			return (DDI_FAILURE);
7933 		}
7934 	}
7935 
7936 	/* convert to binary form */
7937 	if (ddi_devid_str_decode(devidstr, ret_devid, NULL) == -1) {
7938 		ddi_prop_free(devidstr);
7939 		return (DDI_FAILURE);
7940 	}
7941 	ddi_prop_free(devidstr);
7942 	return (DDI_SUCCESS);
7943 }
7944 
7945 /*
7946  * Return a copy of the device id for dev_t
7947  */
7948 int
7949 ddi_lyr_get_devid(dev_t dev, ddi_devid_t *ret_devid)
7950 {
7951 	dev_info_t	*dip;
7952 	int		rval;
7953 
7954 	/* get the dip */
7955 	if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL)
7956 		return (DDI_FAILURE);
7957 
7958 	rval = i_ddi_devi_get_devid(dev, dip, ret_devid);
7959 
7960 	ddi_release_devi(dip);		/* e_ddi_hold_devi_by_dev() */
7961 	return (rval);
7962 }
7963 
7964 /*
7965  * Return a copy of the minor name for dev_t and spec_type
7966  */
7967 int
7968 ddi_lyr_get_minor_name(dev_t dev, int spec_type, char **minor_name)
7969 {
7970 	char		*buf;
7971 	int		circ;
7972 	dev_info_t	*dip;
7973 	char		*nm;
7974 	int		rval;
7975 
7976 	if ((dip = e_ddi_hold_devi_by_dev(dev, 0)) == NULL) {
7977 		*minor_name = NULL;
7978 		return (DDI_FAILURE);
7979 	}
7980 
7981 	/* Find the minor name and copy into max size buf */
7982 	buf = kmem_alloc(MAXNAMELEN, KM_SLEEP);
7983 	ndi_devi_enter(dip, &circ);
7984 	nm = i_ddi_devtspectype_to_minorname(dip, dev, spec_type);
7985 	if (nm)
7986 		(void) strcpy(buf, nm);
7987 	ndi_devi_exit(dip, circ);
7988 	ddi_release_devi(dip);	/* e_ddi_hold_devi_by_dev() */
7989 
7990 	if (nm) {
7991 		/* duplicate into min size buf for return result */
7992 		*minor_name = i_ddi_strdup(buf, KM_SLEEP);
7993 		rval = DDI_SUCCESS;
7994 	} else {
7995 		*minor_name = NULL;
7996 		rval = DDI_FAILURE;
7997 	}
7998 
7999 	/* free max size buf and return */
8000 	kmem_free(buf, MAXNAMELEN);
8001 	return (rval);
8002 }
8003 
8004 int
8005 ddi_lyr_devid_to_devlist(
8006 	ddi_devid_t	devid,
8007 	char		*minor_name,
8008 	int		*retndevs,
8009 	dev_t		**retdevs)
8010 {
8011 	ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS);
8012 
8013 	if (e_devid_cache_to_devt_list(devid, minor_name,
8014 	    retndevs, retdevs) == DDI_SUCCESS) {
8015 		ASSERT(*retndevs > 0);
8016 		DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist",
8017 		    *retndevs, *retdevs);
8018 		return (DDI_SUCCESS);
8019 	}
8020 
8021 	if (e_ddi_devid_discovery(devid) == DDI_FAILURE) {
8022 		return (DDI_FAILURE);
8023 	}
8024 
8025 	if (e_devid_cache_to_devt_list(devid, minor_name,
8026 	    retndevs, retdevs) == DDI_SUCCESS) {
8027 		ASSERT(*retndevs > 0);
8028 		DDI_DEBUG_DEVID_DEVTS("ddi_lyr_devid_to_devlist",
8029 		    *retndevs, *retdevs);
8030 		return (DDI_SUCCESS);
8031 	}
8032 
8033 	return (DDI_FAILURE);
8034 }
8035 
8036 void
8037 ddi_lyr_free_devlist(dev_t *devlist, int ndevs)
8038 {
8039 	kmem_free(devlist, sizeof (dev_t) * ndevs);
8040 }
8041 
8042 /*
8043  * Note: This will need to be fixed if we ever allow processes to
8044  * have more than one data model per exec.
8045  */
8046 model_t
8047 ddi_mmap_get_model(void)
8048 {
8049 	return (get_udatamodel());
8050 }
8051 
8052 model_t
8053 ddi_model_convert_from(model_t model)
8054 {
8055 	return ((model & DDI_MODEL_MASK) & ~DDI_MODEL_NATIVE);
8056 }
8057 
8058 /*
8059  * ddi interfaces managing storage and retrieval of eventcookies.
8060  */
8061 
8062 /*
8063  * Invoke bus nexus driver's implementation of the
8064  * (*bus_remove_eventcall)() interface to remove a registered
8065  * callback handler for "event".
8066  */
8067 int
8068 ddi_remove_event_handler(ddi_callback_id_t id)
8069 {
8070 	ndi_event_callbacks_t *cb = (ndi_event_callbacks_t *)id;
8071 	dev_info_t *ddip;
8072 
8073 	ASSERT(cb);
8074 	if (!cb) {
8075 		return (DDI_FAILURE);
8076 	}
8077 
8078 	ddip = NDI_EVENT_DDIP(cb->ndi_evtcb_cookie);
8079 	return (ndi_busop_remove_eventcall(ddip, id));
8080 }
8081 
8082 /*
8083  * Invoke bus nexus driver's implementation of the
8084  * (*bus_add_eventcall)() interface to register a callback handler
8085  * for "event".
8086  */
8087 int
8088 ddi_add_event_handler(dev_info_t *dip, ddi_eventcookie_t event,
8089     void (*handler)(dev_info_t *, ddi_eventcookie_t, void *, void *),
8090     void *arg, ddi_callback_id_t *id)
8091 {
8092 	return (ndi_busop_add_eventcall(dip, dip, event, handler, arg, id));
8093 }
8094 
8095 
8096 /*
8097  * Return a handle for event "name" by calling up the device tree
8098  * hierarchy via  (*bus_get_eventcookie)() interface until claimed
8099  * by a bus nexus or top of dev_info tree is reached.
8100  */
8101 int
8102 ddi_get_eventcookie(dev_info_t *dip, char *name,
8103     ddi_eventcookie_t *event_cookiep)
8104 {
8105 	return (ndi_busop_get_eventcookie(dip, dip,
8106 	    name, event_cookiep));
8107 }
8108 
8109 /*
8110  * This procedure is provided as the general callback function when
8111  * umem_lockmemory calls as_add_callback for long term memory locking.
8112  * When as_unmap, as_setprot, or as_free encounter segments which have
8113  * locked memory, this callback will be invoked.
8114  */
8115 void
8116 umem_lock_undo(struct as *as, void *arg, uint_t event)
8117 {
8118 	_NOTE(ARGUNUSED(as, event))
8119 	struct ddi_umem_cookie *cp = (struct ddi_umem_cookie *)arg;
8120 
8121 	/*
8122 	 * Call the cleanup function.  Decrement the cookie reference
8123 	 * count, if it goes to zero, return the memory for the cookie.
8124 	 * The i_ddi_umem_unlock for this cookie may or may not have been
8125 	 * called already.  It is the responsibility of the caller of
8126 	 * umem_lockmemory to handle the case of the cleanup routine
8127 	 * being called after a ddi_umem_unlock for the cookie
8128 	 * was called.
8129 	 */
8130 
8131 	(*cp->callbacks.cbo_umem_lock_cleanup)((ddi_umem_cookie_t)cp);
8132 
8133 	/* remove the cookie if reference goes to zero */
8134 	if (atomic_add_long_nv((ulong_t *)(&(cp->cook_refcnt)), -1) == 0) {
8135 		kmem_free(cp, sizeof (struct ddi_umem_cookie));
8136 	}
8137 }
8138 
8139 /*
8140  * The following two Consolidation Private routines provide generic
8141  * interfaces to increase/decrease the amount of device-locked memory.
8142  *
8143  * To keep project_rele and project_hold consistent, i_ddi_decr_locked_memory()
8144  * must be called every time i_ddi_incr_locked_memory() is called.
8145  */
8146 int
8147 /* ARGSUSED */
8148 i_ddi_incr_locked_memory(proc_t *procp, rctl_qty_t inc)
8149 {
8150 	ASSERT(procp != NULL);
8151 	mutex_enter(&procp->p_lock);
8152 	if (rctl_incr_locked_mem(procp, NULL, inc, 1)) {
8153 		mutex_exit(&procp->p_lock);
8154 		return (ENOMEM);
8155 	}
8156 	mutex_exit(&procp->p_lock);
8157 	return (0);
8158 }
8159 
8160 /*
8161  * To keep project_rele and project_hold consistent, i_ddi_incr_locked_memory()
8162  * must be called every time i_ddi_decr_locked_memory() is called.
8163  */
8164 /* ARGSUSED */
8165 void
8166 i_ddi_decr_locked_memory(proc_t *procp, rctl_qty_t dec)
8167 {
8168 	ASSERT(procp != NULL);
8169 	mutex_enter(&procp->p_lock);
8170 	rctl_decr_locked_mem(procp, NULL, dec, 1);
8171 	mutex_exit(&procp->p_lock);
8172 }
8173 
8174 /*
8175  * This routine checks if the max-locked-memory resource ctl is
8176  * exceeded, if not increments it, grabs a hold on the project.
8177  * Returns 0 if successful otherwise returns error code
8178  */
8179 static int
8180 umem_incr_devlockmem(struct ddi_umem_cookie *cookie)
8181 {
8182 	proc_t		*procp;
8183 	int		ret;
8184 
8185 	ASSERT(cookie);
8186 	procp = cookie->procp;
8187 	ASSERT(procp);
8188 
8189 	if ((ret = i_ddi_incr_locked_memory(procp,
8190 	    cookie->size)) != 0) {
8191 		return (ret);
8192 	}
8193 	return (0);
8194 }
8195 
8196 /*
8197  * Decrements the max-locked-memory resource ctl and releases
8198  * the hold on the project that was acquired during umem_incr_devlockmem
8199  */
8200 static void
8201 umem_decr_devlockmem(struct ddi_umem_cookie *cookie)
8202 {
8203 	proc_t		*proc;
8204 
8205 	proc = (proc_t *)cookie->procp;
8206 	if (!proc)
8207 		return;
8208 
8209 	i_ddi_decr_locked_memory(proc, cookie->size);
8210 }
8211 
8212 /*
8213  * A consolidation private function which is essentially equivalent to
8214  * ddi_umem_lock but with the addition of arguments ops_vector and procp.
8215  * A call to as_add_callback is done if DDI_UMEMLOCK_LONGTERM is set, and
8216  * the ops_vector is valid.
8217  *
8218  * Lock the virtual address range in the current process and create a
8219  * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to
8220  * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export
8221  * to user space.
8222  *
8223  * Note: The resource control accounting currently uses a full charge model
8224  * in other words attempts to lock the same/overlapping areas of memory
8225  * will deduct the full size of the buffer from the projects running
8226  * counter for the device locked memory.
8227  *
8228  * addr, size should be PAGESIZE aligned
8229  *
8230  * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both
8231  *	identifies whether the locked memory will be read or written or both
8232  *      DDI_UMEMLOCK_LONGTERM  must be set when the locking will
8233  * be maintained for an indefinitely long period (essentially permanent),
8234  * rather than for what would be required for a typical I/O completion.
8235  * When DDI_UMEMLOCK_LONGTERM is set, umem_lockmemory will return EFAULT
8236  * if the memory pertains to a regular file which is mapped MAP_SHARED.
8237  * This is to prevent a deadlock if a file truncation is attempted after
8238  * after the locking is done.
8239  *
8240  * Returns 0 on success
8241  *	EINVAL - for invalid parameters
8242  *	EPERM, ENOMEM and other error codes returned by as_pagelock
8243  *	ENOMEM - is returned if the current request to lock memory exceeds
8244  *		*.max-locked-memory resource control value.
8245  *      EFAULT - memory pertains to a regular file mapped shared and
8246  *		and DDI_UMEMLOCK_LONGTERM flag is set
8247  *	EAGAIN - could not start the ddi_umem_unlock list processing thread
8248  */
8249 int
8250 umem_lockmemory(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie,
8251 		struct umem_callback_ops *ops_vector,
8252 		proc_t *procp)
8253 {
8254 	int	error;
8255 	struct ddi_umem_cookie *p;
8256 	void	(*driver_callback)() = NULL;
8257 	struct as *as = procp->p_as;
8258 	struct seg		*seg;
8259 	vnode_t			*vp;
8260 
8261 	*cookie = NULL;		/* in case of any error return */
8262 
8263 	/* These are the only three valid flags */
8264 	if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE |
8265 	    DDI_UMEMLOCK_LONGTERM)) != 0)
8266 		return (EINVAL);
8267 
8268 	/* At least one (can be both) of the two access flags must be set */
8269 	if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0)
8270 		return (EINVAL);
8271 
8272 	/* addr and len must be page-aligned */
8273 	if (((uintptr_t)addr & PAGEOFFSET) != 0)
8274 		return (EINVAL);
8275 
8276 	if ((len & PAGEOFFSET) != 0)
8277 		return (EINVAL);
8278 
8279 	/*
8280 	 * For longterm locking a driver callback must be specified; if
8281 	 * not longterm then a callback is optional.
8282 	 */
8283 	if (ops_vector != NULL) {
8284 		if (ops_vector->cbo_umem_callback_version !=
8285 		    UMEM_CALLBACK_VERSION)
8286 			return (EINVAL);
8287 		else
8288 			driver_callback = ops_vector->cbo_umem_lock_cleanup;
8289 	}
8290 	if ((driver_callback == NULL) && (flags & DDI_UMEMLOCK_LONGTERM))
8291 		return (EINVAL);
8292 
8293 	/*
8294 	 * Call i_ddi_umem_unlock_thread_start if necessary.  It will
8295 	 * be called on first ddi_umem_lock or umem_lockmemory call.
8296 	 */
8297 	if (ddi_umem_unlock_thread == NULL)
8298 		i_ddi_umem_unlock_thread_start();
8299 
8300 	/* Allocate memory for the cookie */
8301 	p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP);
8302 
8303 	/* Convert the flags to seg_rw type */
8304 	if (flags & DDI_UMEMLOCK_WRITE) {
8305 		p->s_flags = S_WRITE;
8306 	} else {
8307 		p->s_flags = S_READ;
8308 	}
8309 
8310 	/* Store procp in cookie for later iosetup/unlock */
8311 	p->procp = (void *)procp;
8312 
8313 	/*
8314 	 * Store the struct as pointer in cookie for later use by
8315 	 * ddi_umem_unlock.  The proc->p_as will be stale if ddi_umem_unlock
8316 	 * is called after relvm is called.
8317 	 */
8318 	p->asp = as;
8319 
8320 	/*
8321 	 * The size field is needed for lockmem accounting.
8322 	 */
8323 	p->size = len;
8324 
8325 	if (umem_incr_devlockmem(p) != 0) {
8326 		/*
8327 		 * The requested memory cannot be locked
8328 		 */
8329 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8330 		*cookie = (ddi_umem_cookie_t)NULL;
8331 		return (ENOMEM);
8332 	}
8333 
8334 	/* Lock the pages corresponding to addr, len in memory */
8335 	error = as_pagelock(as, &(p->pparray), addr, len, p->s_flags);
8336 	if (error != 0) {
8337 		umem_decr_devlockmem(p);
8338 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8339 		*cookie = (ddi_umem_cookie_t)NULL;
8340 		return (error);
8341 	}
8342 
8343 	/*
8344 	 * For longterm locking the addr must pertain to a seg_vn segment or
8345 	 * or a seg_spt segment.
8346 	 * If the segment pertains to a regular file, it cannot be
8347 	 * mapped MAP_SHARED.
8348 	 * This is to prevent a deadlock if a file truncation is attempted
8349 	 * after the locking is done.
8350 	 * Doing this after as_pagelock guarantees persistence of the as; if
8351 	 * an unacceptable segment is found, the cleanup includes calling
8352 	 * as_pageunlock before returning EFAULT.
8353 	 */
8354 	if (flags & DDI_UMEMLOCK_LONGTERM) {
8355 		extern  struct seg_ops segspt_shmops;
8356 		AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
8357 		for (seg = as_segat(as, addr); ; seg = AS_SEGNEXT(as, seg)) {
8358 			if (seg == NULL || seg->s_base > addr + len)
8359 				break;
8360 			if (((seg->s_ops != &segvn_ops) &&
8361 			    (seg->s_ops != &segspt_shmops)) ||
8362 			    ((SEGOP_GETVP(seg, addr, &vp) == 0 &&
8363 			    vp != NULL && vp->v_type == VREG) &&
8364 			    (SEGOP_GETTYPE(seg, addr) & MAP_SHARED))) {
8365 				as_pageunlock(as, p->pparray,
8366 				    addr, len, p->s_flags);
8367 				AS_LOCK_EXIT(as, &as->a_lock);
8368 				umem_decr_devlockmem(p);
8369 				kmem_free(p, sizeof (struct ddi_umem_cookie));
8370 				*cookie = (ddi_umem_cookie_t)NULL;
8371 				return (EFAULT);
8372 			}
8373 		}
8374 		AS_LOCK_EXIT(as, &as->a_lock);
8375 	}
8376 
8377 
8378 	/* Initialize the fields in the ddi_umem_cookie */
8379 	p->cvaddr = addr;
8380 	p->type = UMEM_LOCKED;
8381 	if (driver_callback != NULL) {
8382 		/* i_ddi_umem_unlock and umem_lock_undo may need the cookie */
8383 		p->cook_refcnt = 2;
8384 		p->callbacks = *ops_vector;
8385 	} else {
8386 		/* only i_ddi_umme_unlock needs the cookie */
8387 		p->cook_refcnt = 1;
8388 	}
8389 
8390 	*cookie = (ddi_umem_cookie_t)p;
8391 
8392 	/*
8393 	 * If a driver callback was specified, add an entry to the
8394 	 * as struct callback list. The as_pagelock above guarantees
8395 	 * the persistence of as.
8396 	 */
8397 	if (driver_callback) {
8398 		error = as_add_callback(as, umem_lock_undo, p, AS_ALL_EVENT,
8399 		    addr, len, KM_SLEEP);
8400 		if (error != 0) {
8401 			as_pageunlock(as, p->pparray,
8402 			    addr, len, p->s_flags);
8403 			umem_decr_devlockmem(p);
8404 			kmem_free(p, sizeof (struct ddi_umem_cookie));
8405 			*cookie = (ddi_umem_cookie_t)NULL;
8406 		}
8407 	}
8408 	return (error);
8409 }
8410 
8411 /*
8412  * Unlock the pages locked by ddi_umem_lock or umem_lockmemory and free
8413  * the cookie.  Called from i_ddi_umem_unlock_thread.
8414  */
8415 
8416 static void
8417 i_ddi_umem_unlock(struct ddi_umem_cookie *p)
8418 {
8419 	uint_t	rc;
8420 
8421 	/*
8422 	 * There is no way to determine whether a callback to
8423 	 * umem_lock_undo was registered via as_add_callback.
8424 	 * (i.e. umem_lockmemory was called with DDI_MEMLOCK_LONGTERM and
8425 	 * a valid callback function structure.)  as_delete_callback
8426 	 * is called to delete a possible registered callback.  If the
8427 	 * return from as_delete_callbacks is AS_CALLBACK_DELETED, it
8428 	 * indicates that there was a callback registered, and that is was
8429 	 * successfully deleted.  Thus, the cookie reference count
8430 	 * will never be decremented by umem_lock_undo.  Just return the
8431 	 * memory for the cookie, since both users of the cookie are done.
8432 	 * A return of AS_CALLBACK_NOTFOUND indicates a callback was
8433 	 * never registered.  A return of AS_CALLBACK_DELETE_DEFERRED
8434 	 * indicates that callback processing is taking place and, and
8435 	 * umem_lock_undo is, or will be, executing, and thus decrementing
8436 	 * the cookie reference count when it is complete.
8437 	 *
8438 	 * This needs to be done before as_pageunlock so that the
8439 	 * persistence of as is guaranteed because of the locked pages.
8440 	 *
8441 	 */
8442 	rc = as_delete_callback(p->asp, p);
8443 
8444 
8445 	/*
8446 	 * The proc->p_as will be stale if i_ddi_umem_unlock is called
8447 	 * after relvm is called so use p->asp.
8448 	 */
8449 	as_pageunlock(p->asp, p->pparray, p->cvaddr, p->size, p->s_flags);
8450 
8451 	/*
8452 	 * Now that we have unlocked the memory decrement the
8453 	 * *.max-locked-memory rctl
8454 	 */
8455 	umem_decr_devlockmem(p);
8456 
8457 	if (rc == AS_CALLBACK_DELETED) {
8458 		/* umem_lock_undo will not happen, return the cookie memory */
8459 		ASSERT(p->cook_refcnt == 2);
8460 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8461 	} else {
8462 		/*
8463 		 * umem_undo_lock may happen if as_delete_callback returned
8464 		 * AS_CALLBACK_DELETE_DEFERRED.  In that case, decrement the
8465 		 * reference count, atomically, and return the cookie
8466 		 * memory if the reference count goes to zero.  The only
8467 		 * other value for rc is AS_CALLBACK_NOTFOUND.  In that
8468 		 * case, just return the cookie memory.
8469 		 */
8470 		if ((rc != AS_CALLBACK_DELETE_DEFERRED) ||
8471 		    (atomic_add_long_nv((ulong_t *)(&(p->cook_refcnt)), -1)
8472 		    == 0)) {
8473 			kmem_free(p, sizeof (struct ddi_umem_cookie));
8474 		}
8475 	}
8476 }
8477 
8478 /*
8479  * i_ddi_umem_unlock_thread - deferred ddi_umem_unlock list handler.
8480  *
8481  * Call i_ddi_umem_unlock for entries in the ddi_umem_unlock list
8482  * until it is empty.  Then, wait for more to be added.  This thread is awoken
8483  * via calls to ddi_umem_unlock.
8484  */
8485 
8486 static void
8487 i_ddi_umem_unlock_thread(void)
8488 {
8489 	struct ddi_umem_cookie	*ret_cookie;
8490 	callb_cpr_t	cprinfo;
8491 
8492 	/* process the ddi_umem_unlock list */
8493 	CALLB_CPR_INIT(&cprinfo, &ddi_umem_unlock_mutex,
8494 	    callb_generic_cpr, "unlock_thread");
8495 	for (;;) {
8496 		mutex_enter(&ddi_umem_unlock_mutex);
8497 		if (ddi_umem_unlock_head != NULL) {	/* list not empty */
8498 			ret_cookie = ddi_umem_unlock_head;
8499 			/* take if off the list */
8500 			if ((ddi_umem_unlock_head =
8501 			    ddi_umem_unlock_head->unl_forw) == NULL) {
8502 				ddi_umem_unlock_tail = NULL;
8503 			}
8504 			mutex_exit(&ddi_umem_unlock_mutex);
8505 			/* unlock the pages in this cookie */
8506 			(void) i_ddi_umem_unlock(ret_cookie);
8507 		} else {   /* list is empty, wait for next ddi_umem_unlock */
8508 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
8509 			cv_wait(&ddi_umem_unlock_cv, &ddi_umem_unlock_mutex);
8510 			CALLB_CPR_SAFE_END(&cprinfo, &ddi_umem_unlock_mutex);
8511 			mutex_exit(&ddi_umem_unlock_mutex);
8512 		}
8513 	}
8514 	/* ddi_umem_unlock_thread does not exit */
8515 	/* NOTREACHED */
8516 }
8517 
8518 /*
8519  * Start the thread that will process the ddi_umem_unlock list if it is
8520  * not already started (i_ddi_umem_unlock_thread).
8521  */
8522 static void
8523 i_ddi_umem_unlock_thread_start(void)
8524 {
8525 	mutex_enter(&ddi_umem_unlock_mutex);
8526 	if (ddi_umem_unlock_thread == NULL) {
8527 		ddi_umem_unlock_thread = thread_create(NULL, 0,
8528 		    i_ddi_umem_unlock_thread, NULL, 0, &p0,
8529 		    TS_RUN, minclsyspri);
8530 	}
8531 	mutex_exit(&ddi_umem_unlock_mutex);
8532 }
8533 
8534 /*
8535  * Lock the virtual address range in the current process and create a
8536  * ddi_umem_cookie (of type UMEM_LOCKED). This can be used to pass to
8537  * ddi_umem_iosetup to create a buf or do devmap_umem_setup/remap to export
8538  * to user space.
8539  *
8540  * Note: The resource control accounting currently uses a full charge model
8541  * in other words attempts to lock the same/overlapping areas of memory
8542  * will deduct the full size of the buffer from the projects running
8543  * counter for the device locked memory. This applies to umem_lockmemory too.
8544  *
8545  * addr, size should be PAGESIZE aligned
8546  * flags - DDI_UMEMLOCK_READ, DDI_UMEMLOCK_WRITE or both
8547  *	identifies whether the locked memory will be read or written or both
8548  *
8549  * Returns 0 on success
8550  *	EINVAL - for invalid parameters
8551  *	EPERM, ENOMEM and other error codes returned by as_pagelock
8552  *	ENOMEM - is returned if the current request to lock memory exceeds
8553  *		*.max-locked-memory resource control value.
8554  *	EAGAIN - could not start the ddi_umem_unlock list processing thread
8555  */
8556 int
8557 ddi_umem_lock(caddr_t addr, size_t len, int flags, ddi_umem_cookie_t *cookie)
8558 {
8559 	int	error;
8560 	struct ddi_umem_cookie *p;
8561 
8562 	*cookie = NULL;		/* in case of any error return */
8563 
8564 	/* These are the only two valid flags */
8565 	if ((flags & ~(DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) != 0) {
8566 		return (EINVAL);
8567 	}
8568 
8569 	/* At least one of the two flags (or both) must be set */
8570 	if ((flags & (DDI_UMEMLOCK_READ | DDI_UMEMLOCK_WRITE)) == 0) {
8571 		return (EINVAL);
8572 	}
8573 
8574 	/* addr and len must be page-aligned */
8575 	if (((uintptr_t)addr & PAGEOFFSET) != 0) {
8576 		return (EINVAL);
8577 	}
8578 
8579 	if ((len & PAGEOFFSET) != 0) {
8580 		return (EINVAL);
8581 	}
8582 
8583 	/*
8584 	 * Call i_ddi_umem_unlock_thread_start if necessary.  It will
8585 	 * be called on first ddi_umem_lock or umem_lockmemory call.
8586 	 */
8587 	if (ddi_umem_unlock_thread == NULL)
8588 		i_ddi_umem_unlock_thread_start();
8589 
8590 	/* Allocate memory for the cookie */
8591 	p = kmem_zalloc(sizeof (struct ddi_umem_cookie), KM_SLEEP);
8592 
8593 	/* Convert the flags to seg_rw type */
8594 	if (flags & DDI_UMEMLOCK_WRITE) {
8595 		p->s_flags = S_WRITE;
8596 	} else {
8597 		p->s_flags = S_READ;
8598 	}
8599 
8600 	/* Store curproc in cookie for later iosetup/unlock */
8601 	p->procp = (void *)curproc;
8602 
8603 	/*
8604 	 * Store the struct as pointer in cookie for later use by
8605 	 * ddi_umem_unlock.  The proc->p_as will be stale if ddi_umem_unlock
8606 	 * is called after relvm is called.
8607 	 */
8608 	p->asp = curproc->p_as;
8609 	/*
8610 	 * The size field is needed for lockmem accounting.
8611 	 */
8612 	p->size = len;
8613 
8614 	if (umem_incr_devlockmem(p) != 0) {
8615 		/*
8616 		 * The requested memory cannot be locked
8617 		 */
8618 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8619 		*cookie = (ddi_umem_cookie_t)NULL;
8620 		return (ENOMEM);
8621 	}
8622 
8623 	/* Lock the pages corresponding to addr, len in memory */
8624 	error = as_pagelock(((proc_t *)p->procp)->p_as, &(p->pparray),
8625 	    addr, len, p->s_flags);
8626 	if (error != 0) {
8627 		umem_decr_devlockmem(p);
8628 		kmem_free(p, sizeof (struct ddi_umem_cookie));
8629 		*cookie = (ddi_umem_cookie_t)NULL;
8630 		return (error);
8631 	}
8632 
8633 	/* Initialize the fields in the ddi_umem_cookie */
8634 	p->cvaddr = addr;
8635 	p->type = UMEM_LOCKED;
8636 	p->cook_refcnt = 1;
8637 
8638 	*cookie = (ddi_umem_cookie_t)p;
8639 	return (error);
8640 }
8641 
8642 /*
8643  * Add the cookie to the ddi_umem_unlock list.  Pages will be
8644  * unlocked by i_ddi_umem_unlock_thread.
8645  */
8646 
8647 void
8648 ddi_umem_unlock(ddi_umem_cookie_t cookie)
8649 {
8650 	struct ddi_umem_cookie	*p = (struct ddi_umem_cookie *)cookie;
8651 
8652 	ASSERT(p->type == UMEM_LOCKED);
8653 	ASSERT(CPU_ON_INTR(CPU) == 0); /* cannot be high level */
8654 	ASSERT(ddi_umem_unlock_thread != NULL);
8655 
8656 	p->unl_forw = (struct ddi_umem_cookie *)NULL;	/* end of list */
8657 	/*
8658 	 * Queue the unlock request and notify i_ddi_umem_unlock thread
8659 	 * if it's called in the interrupt context. Otherwise, unlock pages
8660 	 * immediately.
8661 	 */
8662 	if (servicing_interrupt()) {
8663 		/* queue the unlock request and notify the thread */
8664 		mutex_enter(&ddi_umem_unlock_mutex);
8665 		if (ddi_umem_unlock_head == NULL) {
8666 			ddi_umem_unlock_head = ddi_umem_unlock_tail = p;
8667 			cv_broadcast(&ddi_umem_unlock_cv);
8668 		} else {
8669 			ddi_umem_unlock_tail->unl_forw = p;
8670 			ddi_umem_unlock_tail = p;
8671 		}
8672 		mutex_exit(&ddi_umem_unlock_mutex);
8673 	} else {
8674 		/* unlock the pages right away */
8675 		(void) i_ddi_umem_unlock(p);
8676 	}
8677 }
8678 
8679 /*
8680  * Create a buf structure from a ddi_umem_cookie
8681  * cookie - is a ddi_umem_cookie for from ddi_umem_lock and ddi_umem_alloc
8682  *		(only UMEM_LOCKED & KMEM_NON_PAGEABLE types supported)
8683  * off, len - identifies the portion of the memory represented by the cookie
8684  *		that the buf points to.
8685  *	NOTE: off, len need to follow the alignment/size restrictions of the
8686  *		device (dev) that this buf will be passed to. Some devices
8687  *		will accept unrestricted alignment/size, whereas others (such as
8688  *		st) require some block-size alignment/size. It is the caller's
8689  *		responsibility to ensure that the alignment/size restrictions
8690  *		are met (we cannot assert as we do not know the restrictions)
8691  *
8692  * direction - is one of B_READ or B_WRITE and needs to be compatible with
8693  *		the flags used in ddi_umem_lock
8694  *
8695  * The following three arguments are used to initialize fields in the
8696  * buf structure and are uninterpreted by this routine.
8697  *
8698  * dev
8699  * blkno
8700  * iodone
8701  *
8702  * sleepflag - is one of DDI_UMEM_SLEEP or DDI_UMEM_NOSLEEP
8703  *
8704  * Returns a buf structure pointer on success (to be freed by freerbuf)
8705  *	NULL on any parameter error or memory alloc failure
8706  *
8707  */
8708 struct buf *
8709 ddi_umem_iosetup(ddi_umem_cookie_t cookie, off_t off, size_t len,
8710 	int direction, dev_t dev, daddr_t blkno,
8711 	int (*iodone)(struct buf *), int sleepflag)
8712 {
8713 	struct ddi_umem_cookie *p = (struct ddi_umem_cookie *)cookie;
8714 	struct buf *bp;
8715 
8716 	/*
8717 	 * check for valid cookie offset, len
8718 	 */
8719 	if ((off + len) > p->size) {
8720 		return (NULL);
8721 	}
8722 
8723 	if (len > p->size) {
8724 		return (NULL);
8725 	}
8726 
8727 	/* direction has to be one of B_READ or B_WRITE */
8728 	if ((direction != B_READ) && (direction != B_WRITE)) {
8729 		return (NULL);
8730 	}
8731 
8732 	/* These are the only two valid sleepflags */
8733 	if ((sleepflag != DDI_UMEM_SLEEP) && (sleepflag != DDI_UMEM_NOSLEEP)) {
8734 		return (NULL);
8735 	}
8736 
8737 	/*
8738 	 * Only cookies of type UMEM_LOCKED and KMEM_NON_PAGEABLE are supported
8739 	 */
8740 	if ((p->type != UMEM_LOCKED) && (p->type != KMEM_NON_PAGEABLE)) {
8741 		return (NULL);
8742 	}
8743 
8744 	/* If type is KMEM_NON_PAGEABLE procp is NULL */
8745 	ASSERT((p->type == KMEM_NON_PAGEABLE) ?
8746 	    (p->procp == NULL) : (p->procp != NULL));
8747 
8748 	bp = kmem_alloc(sizeof (struct buf), sleepflag);
8749 	if (bp == NULL) {
8750 		return (NULL);
8751 	}
8752 	bioinit(bp);
8753 
8754 	bp->b_flags = B_BUSY | B_PHYS | direction;
8755 	bp->b_edev = dev;
8756 	bp->b_lblkno = blkno;
8757 	bp->b_iodone = iodone;
8758 	bp->b_bcount = len;
8759 	bp->b_proc = (proc_t *)p->procp;
8760 	ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0);
8761 	bp->b_un.b_addr = (caddr_t)((uintptr_t)(p->cvaddr) + off);
8762 	if (p->pparray != NULL) {
8763 		bp->b_flags |= B_SHADOW;
8764 		ASSERT(((uintptr_t)(p->cvaddr) & PAGEOFFSET) == 0);
8765 		bp->b_shadow = p->pparray + btop(off);
8766 	}
8767 	return (bp);
8768 }
8769 
8770 /*
8771  * Fault-handling and related routines
8772  */
8773 
8774 ddi_devstate_t
8775 ddi_get_devstate(dev_info_t *dip)
8776 {
8777 	if (DEVI_IS_DEVICE_OFFLINE(dip))
8778 		return (DDI_DEVSTATE_OFFLINE);
8779 	else if (DEVI_IS_DEVICE_DOWN(dip) || DEVI_IS_BUS_DOWN(dip))
8780 		return (DDI_DEVSTATE_DOWN);
8781 	else if (DEVI_IS_BUS_QUIESCED(dip))
8782 		return (DDI_DEVSTATE_QUIESCED);
8783 	else if (DEVI_IS_DEVICE_DEGRADED(dip))
8784 		return (DDI_DEVSTATE_DEGRADED);
8785 	else
8786 		return (DDI_DEVSTATE_UP);
8787 }
8788 
8789 void
8790 ddi_dev_report_fault(dev_info_t *dip, ddi_fault_impact_t impact,
8791 	ddi_fault_location_t location, const char *message)
8792 {
8793 	struct ddi_fault_event_data fd;
8794 	ddi_eventcookie_t ec;
8795 
8796 	/*
8797 	 * Assemble all the information into a fault-event-data structure
8798 	 */
8799 	fd.f_dip = dip;
8800 	fd.f_impact = impact;
8801 	fd.f_location = location;
8802 	fd.f_message = message;
8803 	fd.f_oldstate = ddi_get_devstate(dip);
8804 
8805 	/*
8806 	 * Get eventcookie from defining parent.
8807 	 */
8808 	if (ddi_get_eventcookie(dip, DDI_DEVI_FAULT_EVENT, &ec) !=
8809 	    DDI_SUCCESS)
8810 		return;
8811 
8812 	(void) ndi_post_event(dip, dip, ec, &fd);
8813 }
8814 
8815 char *
8816 i_ddi_devi_class(dev_info_t *dip)
8817 {
8818 	return (DEVI(dip)->devi_device_class);
8819 }
8820 
8821 int
8822 i_ddi_set_devi_class(dev_info_t *dip, char *devi_class, int flag)
8823 {
8824 	struct dev_info *devi = DEVI(dip);
8825 
8826 	mutex_enter(&devi->devi_lock);
8827 
8828 	if (devi->devi_device_class)
8829 		kmem_free(devi->devi_device_class,
8830 		    strlen(devi->devi_device_class) + 1);
8831 
8832 	if ((devi->devi_device_class = i_ddi_strdup(devi_class, flag))
8833 	    != NULL) {
8834 		mutex_exit(&devi->devi_lock);
8835 		return (DDI_SUCCESS);
8836 	}
8837 
8838 	mutex_exit(&devi->devi_lock);
8839 
8840 	return (DDI_FAILURE);
8841 }
8842 
8843 
8844 /*
8845  * Task Queues DDI interfaces.
8846  */
8847 
8848 /* ARGSUSED */
8849 ddi_taskq_t *
8850 ddi_taskq_create(dev_info_t *dip, const char *name, int nthreads,
8851     pri_t pri, uint_t cflags)
8852 {
8853 	char full_name[TASKQ_NAMELEN];
8854 	const char *tq_name;
8855 	int nodeid = 0;
8856 
8857 	if (dip == NULL)
8858 		tq_name = name;
8859 	else {
8860 		nodeid = ddi_get_instance(dip);
8861 
8862 		if (name == NULL)
8863 			name = "tq";
8864 
8865 		(void) snprintf(full_name, sizeof (full_name), "%s_%s",
8866 		    ddi_driver_name(dip), name);
8867 
8868 		tq_name = full_name;
8869 	}
8870 
8871 	return ((ddi_taskq_t *)taskq_create_instance(tq_name, nodeid, nthreads,
8872 	    pri == TASKQ_DEFAULTPRI ? minclsyspri : pri,
8873 	    nthreads, INT_MAX, TASKQ_PREPOPULATE));
8874 }
8875 
8876 void
8877 ddi_taskq_destroy(ddi_taskq_t *tq)
8878 {
8879 	taskq_destroy((taskq_t *)tq);
8880 }
8881 
8882 int
8883 ddi_taskq_dispatch(ddi_taskq_t *tq, void (* func)(void *),
8884     void *arg, uint_t dflags)
8885 {
8886 	taskqid_t id = taskq_dispatch((taskq_t *)tq, func, arg,
8887 	    dflags == DDI_SLEEP ? TQ_SLEEP : TQ_NOSLEEP);
8888 
8889 	return (id != 0 ? DDI_SUCCESS : DDI_FAILURE);
8890 }
8891 
8892 void
8893 ddi_taskq_wait(ddi_taskq_t *tq)
8894 {
8895 	taskq_wait((taskq_t *)tq);
8896 }
8897 
8898 void
8899 ddi_taskq_suspend(ddi_taskq_t *tq)
8900 {
8901 	taskq_suspend((taskq_t *)tq);
8902 }
8903 
8904 boolean_t
8905 ddi_taskq_suspended(ddi_taskq_t *tq)
8906 {
8907 	return (taskq_suspended((taskq_t *)tq));
8908 }
8909 
8910 void
8911 ddi_taskq_resume(ddi_taskq_t *tq)
8912 {
8913 	taskq_resume((taskq_t *)tq);
8914 }
8915 
8916 int
8917 ddi_parse(
8918 	const char	*ifname,
8919 	char		*alnum,
8920 	uint_t		*nump)
8921 {
8922 	const char	*p;
8923 	int		l;
8924 	ulong_t		num;
8925 	boolean_t	nonum = B_TRUE;
8926 	char		c;
8927 
8928 	l = strlen(ifname);
8929 	for (p = ifname + l; p != ifname; l--) {
8930 		c = *--p;
8931 		if (!isdigit(c)) {
8932 			(void) strlcpy(alnum, ifname, l + 1);
8933 			if (ddi_strtoul(p + 1, NULL, 10, &num) != 0)
8934 				return (DDI_FAILURE);
8935 			break;
8936 		}
8937 		nonum = B_FALSE;
8938 	}
8939 	if (l == 0 || nonum)
8940 		return (DDI_FAILURE);
8941 
8942 	*nump = num;
8943 	return (DDI_SUCCESS);
8944 }
8945 
8946 /*
8947  * Default initialization function for drivers that don't need to quiesce.
8948  */
8949 /* ARGSUSED */
8950 int
8951 ddi_quiesce_not_needed(dev_info_t *dip)
8952 {
8953 	return (DDI_SUCCESS);
8954 }
8955 
8956 /*
8957  * Initialization function for drivers that should implement quiesce()
8958  * but haven't yet.
8959  */
8960 /* ARGSUSED */
8961 int
8962 ddi_quiesce_not_supported(dev_info_t *dip)
8963 {
8964 	return (DDI_FAILURE);
8965 }
8966 
8967 /*
8968  * Generic DDI callback interfaces.
8969  */
8970 
8971 int
8972 ddi_cb_register(dev_info_t *dip, ddi_cb_flags_t flags, ddi_cb_func_t cbfunc,
8973     void *arg1, void *arg2, ddi_cb_handle_t *ret_hdlp)
8974 {
8975 	ddi_cb_t	*cbp;
8976 
8977 	ASSERT(dip != NULL);
8978 	ASSERT(DDI_CB_FLAG_VALID(flags));
8979 	ASSERT(cbfunc != NULL);
8980 	ASSERT(ret_hdlp != NULL);
8981 
8982 	/* Sanity check the context */
8983 	ASSERT(!servicing_interrupt());
8984 	if (servicing_interrupt())
8985 		return (DDI_FAILURE);
8986 
8987 	/* Validate parameters */
8988 	if ((dip == NULL) || !DDI_CB_FLAG_VALID(flags) ||
8989 	    (cbfunc == NULL) || (ret_hdlp == NULL))
8990 		return (DDI_EINVAL);
8991 
8992 	/* Check for previous registration */
8993 	if (DEVI(dip)->devi_cb_p != NULL)
8994 		return (DDI_EALREADY);
8995 
8996 	/* Allocate and initialize callback */
8997 	cbp = kmem_zalloc(sizeof (ddi_cb_t), KM_SLEEP);
8998 	cbp->cb_dip = dip;
8999 	cbp->cb_func = cbfunc;
9000 	cbp->cb_arg1 = arg1;
9001 	cbp->cb_arg2 = arg2;
9002 	cbp->cb_flags = flags;
9003 	DEVI(dip)->devi_cb_p = cbp;
9004 
9005 	/* If adding an IRM callback, notify IRM */
9006 	if (flags & DDI_CB_FLAG_INTR)
9007 		i_ddi_irm_set_cb(dip, B_TRUE);
9008 
9009 	*ret_hdlp = (ddi_cb_handle_t)&(DEVI(dip)->devi_cb_p);
9010 	return (DDI_SUCCESS);
9011 }
9012 
9013 int
9014 ddi_cb_unregister(ddi_cb_handle_t hdl)
9015 {
9016 	ddi_cb_t	*cbp;
9017 	dev_info_t	*dip;
9018 
9019 	ASSERT(hdl != NULL);
9020 
9021 	/* Sanity check the context */
9022 	ASSERT(!servicing_interrupt());
9023 	if (servicing_interrupt())
9024 		return (DDI_FAILURE);
9025 
9026 	/* Validate parameters */
9027 	if ((hdl == NULL) || ((cbp = *(ddi_cb_t **)hdl) == NULL) ||
9028 	    ((dip = cbp->cb_dip) == NULL))
9029 		return (DDI_EINVAL);
9030 
9031 	/* If removing an IRM callback, notify IRM */
9032 	if (cbp->cb_flags & DDI_CB_FLAG_INTR)
9033 		i_ddi_irm_set_cb(dip, B_FALSE);
9034 
9035 	/* Destroy the callback */
9036 	kmem_free(cbp, sizeof (ddi_cb_t));
9037 	DEVI(dip)->devi_cb_p = NULL;
9038 
9039 	return (DDI_SUCCESS);
9040 }
9041