xref: /titanic_50/usr/src/uts/sun4/os/ddi_impl.c (revision d2d5cf7c5d909b74a88d499283e24750a9a52c5d)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * sun4 specific DDI implementation
31  */
32 #include <sys/cpuvar.h>
33 #include <sys/ddi_subrdefs.h>
34 #include <sys/machsystm.h>
35 #include <sys/sunndi.h>
36 #include <sys/sysmacros.h>
37 #include <sys/ontrap.h>
38 #include <vm/seg_kmem.h>
39 #include <sys/membar.h>
40 #include <sys/dditypes.h>
41 #include <sys/ndifm.h>
42 #include <sys/fm/io/ddi.h>
43 #include <sys/ivintr.h>
44 #include <sys/bootconf.h>
45 #include <sys/conf.h>
46 #include <sys/ethernet.h>
47 #include <sys/idprom.h>
48 #include <sys/promif.h>
49 #include <sys/prom_plat.h>
50 #include <sys/systeminfo.h>
51 #include <sys/fpu/fpusystm.h>
52 #include <sys/vm.h>
53 #include <sys/fs/dv_node.h>
54 #include <sys/fs/snode.h>
55 #include <sys/ddi_isa.h>
56 #include <sys/modhash.h>
57 #include <sys/modctl.h>
58 #include <sys/sunldi_impl.h>
59 
60 dev_info_t *get_intr_parent(dev_info_t *, dev_info_t *,
61     ddi_intr_handle_impl_t *);
62 #pragma weak get_intr_parent
63 
64 int process_intr_ops(dev_info_t *, dev_info_t *, ddi_intr_op_t,
65     ddi_intr_handle_impl_t *, void *);
66 #pragma weak process_intr_ops
67 
68 void cells_1275_copy(prop_1275_cell_t *, prop_1275_cell_t *, int32_t);
69     prop_1275_cell_t *cells_1275_cmp(prop_1275_cell_t *, prop_1275_cell_t *,
70     int32_t len);
71 #pragma weak cells_1275_copy
72 
73 /*
74  * Wrapper for ddi_prop_lookup_int_array().
75  * This is handy because it returns the prop length in
76  * bytes which is what most of the callers require.
77  */
78 
79 static int
80 get_prop_int_array(dev_info_t *di, char *pname, int **pval, uint_t *plen)
81 {
82 	int ret;
83 
84 	if ((ret = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, di,
85 	    DDI_PROP_DONTPASS, pname, pval, plen)) == DDI_PROP_SUCCESS) {
86 		*plen = (*plen) * (uint_t)sizeof (int);
87 	}
88 	return (ret);
89 }
90 
91 /*
92  * SECTION: DDI Node Configuration
93  */
94 
95 /*
96  * init_regspec_64:
97  *
98  * If the parent #size-cells is 2, convert the upa-style or
99  * safari-style reg property from 2-size cells to 1 size cell
100  * format, ignoring the size_hi, which must be zero for devices.
101  * (It won't be zero in the memory list properties in the memory
102  * nodes, but that doesn't matter here.)
103  */
104 struct ddi_parent_private_data *
105 init_regspec_64(dev_info_t *dip)
106 {
107 	struct ddi_parent_private_data *pd;
108 	dev_info_t *parent;
109 	int size_cells;
110 
111 	/*
112 	 * If there are no "reg"s in the child node, return.
113 	 */
114 	pd = ddi_get_parent_data(dip);
115 	if ((pd == NULL) || (pd->par_nreg == 0)) {
116 		return (pd);
117 	}
118 	parent = ddi_get_parent(dip);
119 
120 	size_cells = ddi_prop_get_int(DDI_DEV_T_ANY, parent,
121 	    DDI_PROP_DONTPASS, "#size-cells", 1);
122 
123 	if (size_cells != 1)  {
124 
125 		int n, j;
126 		struct regspec *irp;
127 		struct reg_64 {
128 			uint_t addr_hi, addr_lo, size_hi, size_lo;
129 		};
130 		struct reg_64 *r64_rp;
131 		struct regspec *rp;
132 		uint_t len = 0;
133 		int *reg_prop;
134 
135 		ASSERT(size_cells == 2);
136 
137 		/*
138 		 * We already looked the property up once before if
139 		 * pd is non-NULL.
140 		 */
141 		(void) ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
142 		    DDI_PROP_DONTPASS, OBP_REG, &reg_prop, &len);
143 		ASSERT(len != 0);
144 
145 		n = sizeof (struct reg_64) / sizeof (int);
146 		n = len / n;
147 
148 		/*
149 		 * We're allocating a buffer the size of the PROM's property,
150 		 * but we're only using a smaller portion when we assign it
151 		 * to a regspec.  We do this so that in the
152 		 * impl_ddi_sunbus_removechild function, we will
153 		 * always free the right amount of memory.
154 		 */
155 		irp = rp = (struct regspec *)reg_prop;
156 		r64_rp = (struct reg_64 *)pd->par_reg;
157 
158 		for (j = 0; j < n; ++j, ++rp, ++r64_rp) {
159 			ASSERT(r64_rp->size_hi == 0);
160 			rp->regspec_bustype = r64_rp->addr_hi;
161 			rp->regspec_addr = r64_rp->addr_lo;
162 			rp->regspec_size = r64_rp->size_lo;
163 		}
164 
165 		ddi_prop_free((void *)pd->par_reg);
166 		pd->par_nreg = n;
167 		pd->par_reg = irp;
168 	}
169 	return (pd);
170 }
171 
172 /*
173  * Create a ddi_parent_private_data structure from the ddi properties of
174  * the dev_info node.
175  *
176  * The "reg" is required if the driver wishes to create mappings on behalf
177  * of the device. The "reg" property is assumed to be a list of at least
178  * one triplet
179  *
180  *	<bustype, address, size>*1
181  *
182  * The "interrupt" property is no longer part of parent private data on
183  * sun4u. The interrupt parent is may not be the device tree parent.
184  *
185  * The "ranges" property describes the mapping of child addresses to parent
186  * addresses.
187  *
188  * N.B. struct rangespec is defined for the following default values:
189  *			parent  child
190  *	#address-cells	2	2
191  *	#size-cells	1	1
192  * This function doesn't deal with non-default cells and will not create
193  * ranges in such cases.
194  */
195 void
196 make_ddi_ppd(dev_info_t *child, struct ddi_parent_private_data **ppd)
197 {
198 	struct ddi_parent_private_data *pdptr;
199 	int *reg_prop, *rng_prop;
200 	uint_t reg_len = 0, rng_len = 0;
201 	dev_info_t *parent;
202 	int parent_addr_cells, parent_size_cells;
203 	int child_addr_cells, child_size_cells;
204 
205 	*ppd = pdptr = kmem_zalloc(sizeof (*pdptr), KM_SLEEP);
206 
207 	/*
208 	 * root node has no parent private data, so *ppd should
209 	 * be initialized for naming to work properly.
210 	 */
211 	if ((parent = ddi_get_parent(child)) == NULL)
212 		return;
213 
214 	/*
215 	 * Set reg field of parent data from "reg" property
216 	 */
217 	if ((get_prop_int_array(child, OBP_REG, &reg_prop, &reg_len)
218 	    == DDI_PROP_SUCCESS) && (reg_len != 0)) {
219 		pdptr->par_nreg = (int)(reg_len / sizeof (struct regspec));
220 		pdptr->par_reg = (struct regspec *)reg_prop;
221 	}
222 
223 	/*
224 	 * "ranges" property ...
225 	 *
226 	 * This function does not handle cases where #address-cells != 2
227 	 * and * min(parent, child) #size-cells != 1 (see bugid 4211124).
228 	 *
229 	 * Nexus drivers with such exceptions (e.g. pci ranges)
230 	 * should either create a separate function for handling
231 	 * ranges or not use parent private data to store ranges.
232 	 */
233 
234 	/* root node has no ranges */
235 	if ((parent = ddi_get_parent(child)) == NULL)
236 		return;
237 
238 	child_addr_cells = ddi_prop_get_int(DDI_DEV_T_ANY, child,
239 	    DDI_PROP_DONTPASS, "#address-cells", 2);
240 	child_size_cells = ddi_prop_get_int(DDI_DEV_T_ANY, child,
241 	    DDI_PROP_DONTPASS, "#size-cells", 1);
242 	parent_addr_cells = ddi_prop_get_int(DDI_DEV_T_ANY, parent,
243 	    DDI_PROP_DONTPASS, "#address-cells", 2);
244 	parent_size_cells = ddi_prop_get_int(DDI_DEV_T_ANY, parent,
245 	    DDI_PROP_DONTPASS, "#size-cells", 1);
246 	if (child_addr_cells != 2 || parent_addr_cells != 2 ||
247 	    (child_size_cells != 1 && parent_size_cells != 1)) {
248 		NDI_CONFIG_DEBUG((CE_NOTE, "!ranges not made in parent data; "
249 		    "#address-cells or #size-cells have non-default value"));
250 		return;
251 	}
252 
253 	if (get_prop_int_array(child, OBP_RANGES, &rng_prop, &rng_len)
254 	    == DDI_PROP_SUCCESS) {
255 		pdptr->par_nrng = rng_len / (int)(sizeof (struct rangespec));
256 		pdptr->par_rng = (struct rangespec *)rng_prop;
257 	}
258 }
259 
260 /*
261  * Free ddi_parent_private_data structure
262  */
263 void
264 impl_free_ddi_ppd(dev_info_t *dip)
265 {
266 	struct ddi_parent_private_data *pdptr = ddi_get_parent_data(dip);
267 
268 	if (pdptr == NULL)
269 		return;
270 
271 	if (pdptr->par_nrng != 0)
272 		ddi_prop_free((void *)pdptr->par_rng);
273 
274 	if (pdptr->par_nreg != 0)
275 		ddi_prop_free((void *)pdptr->par_reg);
276 
277 	kmem_free(pdptr, sizeof (*pdptr));
278 	ddi_set_parent_data(dip, NULL);
279 }
280 
281 /*
282  * Name a child of sun busses based on the reg spec.
283  * Handles the following properties:
284  *
285  *	Property	value
286  *	Name		type
287  *
288  *	reg		register spec
289  *	interrupts	new (bus-oriented) interrupt spec
290  *	ranges		range spec
291  *
292  * This may be called multiple times, independent of
293  * initchild calls.
294  */
295 static int
296 impl_sunbus_name_child(dev_info_t *child, char *name, int namelen)
297 {
298 	struct ddi_parent_private_data *pdptr;
299 	struct regspec *rp;
300 
301 	/*
302 	 * Fill in parent-private data and this function returns to us
303 	 * an indication if it used "registers" to fill in the data.
304 	 */
305 	if (ddi_get_parent_data(child) == NULL) {
306 		make_ddi_ppd(child, &pdptr);
307 		ddi_set_parent_data(child, pdptr);
308 	}
309 
310 	/*
311 	 * No reg property, return null string as address
312 	 * (e.g. root node)
313 	 */
314 	name[0] = '\0';
315 	if (sparc_pd_getnreg(child) == 0) {
316 		return (DDI_SUCCESS);
317 	}
318 
319 	rp = sparc_pd_getreg(child, 0);
320 	(void) snprintf(name, namelen, "%x,%x",
321 	    rp->regspec_bustype, rp->regspec_addr);
322 	return (DDI_SUCCESS);
323 }
324 
325 
326 /*
327  * Called from the bus_ctl op of some drivers.
328  * to implement the DDI_CTLOPS_INITCHILD operation.
329  *
330  * NEW drivers should NOT use this function, but should declare
331  * there own initchild/uninitchild handlers. (This function assumes
332  * the layout of the parent private data and the format of "reg",
333  * "ranges", "interrupts" properties and that #address-cells and
334  * #size-cells of the parent bus are defined to be default values.)
335  */
336 int
337 impl_ddi_sunbus_initchild(dev_info_t *child)
338 {
339 	char name[MAXNAMELEN];
340 
341 	(void) impl_sunbus_name_child(child, name, MAXNAMELEN);
342 	ddi_set_name_addr(child, name);
343 
344 	/*
345 	 * Try to merge .conf node. If successful, return failure to
346 	 * remove this child.
347 	 */
348 	if ((ndi_dev_is_persistent_node(child) == 0) &&
349 	    (ndi_merge_node(child, impl_sunbus_name_child) == DDI_SUCCESS)) {
350 		impl_ddi_sunbus_removechild(child);
351 		return (DDI_FAILURE);
352 	}
353 	return (DDI_SUCCESS);
354 }
355 
356 /*
357  * A better name for this function would be impl_ddi_sunbus_uninitchild()
358  * It does not remove the child, it uninitializes it, reclaiming the
359  * resources taken by impl_ddi_sunbus_initchild.
360  */
361 void
362 impl_ddi_sunbus_removechild(dev_info_t *dip)
363 {
364 	impl_free_ddi_ppd(dip);
365 	ddi_set_name_addr(dip, NULL);
366 	/*
367 	 * Strip the node to properly convert it back to prototype form
368 	 */
369 	impl_rem_dev_props(dip);
370 }
371 
372 /*
373  * SECTION: DDI Interrupt
374  */
375 
376 void
377 cells_1275_copy(prop_1275_cell_t *from, prop_1275_cell_t *to, int32_t len)
378 {
379 	int i;
380 	for (i = 0; i < len; i++)
381 		*to = *from;
382 }
383 
384 prop_1275_cell_t *
385 cells_1275_cmp(prop_1275_cell_t *cell1, prop_1275_cell_t *cell2, int32_t len)
386 {
387 	prop_1275_cell_t *match_cell = 0;
388 	int32_t i;
389 
390 	for (i = 0; i < len; i++)
391 		if (cell1[i] != cell2[i]) {
392 			match_cell = &cell1[i];
393 			break;
394 		}
395 
396 	return (match_cell);
397 }
398 
399 /*
400  * get_intr_parent() is a generic routine that process a 1275 interrupt
401  * map (imap) property.  This function returns a dev_info_t structure
402  * which claims ownership of the interrupt domain.
403  * It also returns the new interrupt translation within this new domain.
404  * If an interrupt-parent or interrupt-map property are not found,
405  * then we fallback to using the device tree's parent.
406  *
407  * imap entry format:
408  * <reg>,<interrupt>,<phandle>,<translated interrupt>
409  * reg - The register specification in the interrupts domain
410  * interrupt - The interrupt specification
411  * phandle - PROM handle of the device that owns the xlated interrupt domain
412  * translated interrupt - interrupt specifier in the parents domain
413  * note: <reg>,<interrupt> - The reg and interrupt can be combined to create
414  *	a unique entry called a unit interrupt specifier.
415  *
416  * Here's the processing steps:
417  * step1 - If the interrupt-parent property exists, create the ispec and
418  *	return the dip of the interrupt parent.
419  * step2 - Extract the interrupt-map property and the interrupt-map-mask
420  *	If these don't exist, just return the device tree parent.
421  * step3 - build up the unit interrupt specifier to match against the
422  *	interrupt map property
423  * step4 - Scan the interrupt-map property until a match is found
424  * step4a - Extract the interrupt parent
425  * step4b - Compare the unit interrupt specifier
426  */
427 dev_info_t *
428 get_intr_parent(dev_info_t *pdip, dev_info_t *dip, ddi_intr_handle_impl_t *hdlp)
429 {
430 	prop_1275_cell_t *imap, *imap_mask, *scan, *reg_p, *match_req;
431 	int32_t imap_sz, imap_cells, imap_scan_cells, imap_mask_sz,
432 	    addr_cells, intr_cells, reg_len, i, j;
433 	int32_t match_found = 0;
434 	dev_info_t *intr_parent_dip = NULL;
435 	uint32_t *intr = &hdlp->ih_vector;
436 	uint32_t nodeid;
437 #ifdef DEBUG
438 	static int debug = 0;
439 #endif
440 
441 	/*
442 	 * step1
443 	 * If we have an interrupt-parent property, this property represents
444 	 * the nodeid of our interrupt parent.
445 	 */
446 	if ((nodeid = ddi_getprop(DDI_DEV_T_ANY, dip, 0,
447 	    "interrupt-parent", -1)) != -1) {
448 		intr_parent_dip = e_ddi_nodeid_to_dip(nodeid);
449 		ASSERT(intr_parent_dip);
450 
451 		/*
452 		 * Attach the interrupt parent.
453 		 *
454 		 * N.B. e_ddi_nodeid_to_dip() isn't safe under DR.
455 		 *	Also, interrupt parent isn't held. This needs
456 		 *	to be revisited if DR-capable platforms implement
457 		 *	interrupt redirection.
458 		 */
459 		if (i_ddi_attach_node_hierarchy(intr_parent_dip)
460 		    != DDI_SUCCESS) {
461 			ndi_rele_devi(intr_parent_dip);
462 			return (NULL);
463 		}
464 
465 		return (intr_parent_dip);
466 	}
467 
468 	/*
469 	 * step2
470 	 * Get interrupt map structure from PROM property
471 	 */
472 	if (ddi_getlongprop(DDI_DEV_T_ANY, pdip, DDI_PROP_DONTPASS,
473 	    "interrupt-map", (caddr_t)&imap, &imap_sz)
474 	    != DDI_PROP_SUCCESS) {
475 		/*
476 		 * If we don't have an imap property, default to using the
477 		 * device tree.
478 		 */
479 
480 		ndi_hold_devi(pdip);
481 		return (pdip);
482 	}
483 
484 	/* Get the interrupt mask property */
485 	if (ddi_getlongprop(DDI_DEV_T_ANY, pdip, DDI_PROP_DONTPASS,
486 	    "interrupt-map-mask", (caddr_t)&imap_mask, &imap_mask_sz)
487 	    != DDI_PROP_SUCCESS) {
488 		/*
489 		 * If we don't find this property, we have to fail the request
490 		 * because the 1275 imap property wasn't defined correctly.
491 		 */
492 		ASSERT(intr_parent_dip == NULL);
493 		goto exit2;
494 	}
495 
496 	/* Get the address cell size */
497 	addr_cells = ddi_getprop(DDI_DEV_T_ANY, pdip, 0,
498 	    "#address-cells", 2);
499 
500 	/* Get the interrupts cell size */
501 	intr_cells = ddi_getprop(DDI_DEV_T_ANY, pdip, 0,
502 	    "#interrupt-cells", 1);
503 
504 	/*
505 	 * step3
506 	 * Now lets build up the unit interrupt specifier e.g. reg,intr
507 	 * and apply the imap mask.  match_req will hold this when we're
508 	 * through.
509 	 */
510 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "reg",
511 	    (caddr_t)&reg_p, &reg_len) != DDI_SUCCESS) {
512 		ASSERT(intr_parent_dip == NULL);
513 		goto exit3;
514 	}
515 
516 	match_req = kmem_alloc(CELLS_1275_TO_BYTES(addr_cells) +
517 	    CELLS_1275_TO_BYTES(intr_cells), KM_SLEEP);
518 
519 	for (i = 0; i < addr_cells; i++)
520 		match_req[i] = (reg_p[i] & imap_mask[i]);
521 
522 	for (j = 0; j < intr_cells; i++, j++)
523 		match_req[i] = (intr[j] & imap_mask[i]);
524 
525 	/* Calculate the imap size in cells */
526 	imap_cells = BYTES_TO_1275_CELLS(imap_sz);
527 
528 #ifdef DEBUG
529 	if (debug)
530 		prom_printf("reg cell size 0x%x, intr cell size 0x%x, "
531 		    "match_request 0x%p, imap 0x%p\n", addr_cells, intr_cells,
532 		    (void *)match_req, (void *)imap);
533 #endif
534 
535 	/*
536 	 * Scan the imap property looking for a match of the interrupt unit
537 	 * specifier.  This loop is rather complex since the data within the
538 	 * imap property may vary in size.
539 	 */
540 	for (scan = imap, imap_scan_cells = i = 0;
541 	    imap_scan_cells < imap_cells; scan += i, imap_scan_cells += i) {
542 		int new_intr_cells;
543 
544 		/* Set the index to the nodeid field */
545 		i = addr_cells + intr_cells;
546 
547 		/*
548 		 * step4a
549 		 * Translate the nodeid field to a dip
550 		 */
551 		ASSERT(intr_parent_dip == NULL);
552 		intr_parent_dip = e_ddi_nodeid_to_dip((uint_t)scan[i++]);
553 
554 		ASSERT(intr_parent_dip != 0);
555 #ifdef DEBUG
556 		if (debug)
557 			prom_printf("scan 0x%p\n", (void *)scan);
558 #endif
559 		/*
560 		 * The tmp_dip describes the new domain, get it's interrupt
561 		 * cell size
562 		 */
563 		new_intr_cells = ddi_getprop(DDI_DEV_T_ANY, intr_parent_dip, 0,
564 		    "#interrupts-cells", 1);
565 
566 		/*
567 		 * step4b
568 		 * See if we have a match on the interrupt unit specifier
569 		 */
570 		if (cells_1275_cmp(match_req, scan, addr_cells + intr_cells)
571 		    == 0) {
572 			uint32_t *intr;
573 
574 			match_found = 1;
575 
576 			/*
577 			 * If we have an imap parent whose not in our device
578 			 * tree path, we need to hold and install that driver.
579 			 */
580 			if (i_ddi_attach_node_hierarchy(intr_parent_dip)
581 			    != DDI_SUCCESS) {
582 				ndi_rele_devi(intr_parent_dip);
583 				intr_parent_dip = (dev_info_t *)NULL;
584 				goto exit4;
585 			}
586 
587 			/*
588 			 * We need to handcraft an ispec along with a bus
589 			 * interrupt value, so we can dup it into our
590 			 * standard ispec structure.
591 			 */
592 			/* Extract the translated interrupt information */
593 			intr = kmem_alloc(
594 			    CELLS_1275_TO_BYTES(new_intr_cells), KM_SLEEP);
595 
596 			for (j = 0; j < new_intr_cells; j++, i++)
597 				intr[j] = scan[i];
598 
599 			cells_1275_copy(intr, &hdlp->ih_vector, new_intr_cells);
600 
601 			kmem_free(intr, CELLS_1275_TO_BYTES(new_intr_cells));
602 
603 #ifdef DEBUG
604 			if (debug)
605 				prom_printf("dip 0x%p\n",
606 				    (void *)intr_parent_dip);
607 #endif
608 			break;
609 		} else {
610 #ifdef DEBUG
611 			if (debug)
612 				prom_printf("dip 0x%p\n",
613 				    (void *)intr_parent_dip);
614 #endif
615 			ndi_rele_devi(intr_parent_dip);
616 			intr_parent_dip = NULL;
617 			i += new_intr_cells;
618 		}
619 	}
620 
621 	/*
622 	 * If we haven't found our interrupt parent at this point, fallback
623 	 * to using the device tree.
624 	 */
625 	if (!match_found) {
626 		ndi_hold_devi(pdip);
627 		ASSERT(intr_parent_dip == NULL);
628 		intr_parent_dip = pdip;
629 	}
630 
631 	ASSERT(intr_parent_dip != NULL);
632 
633 exit4:
634 	kmem_free(reg_p, reg_len);
635 	kmem_free(match_req, CELLS_1275_TO_BYTES(addr_cells) +
636 	    CELLS_1275_TO_BYTES(intr_cells));
637 
638 exit3:
639 	kmem_free(imap_mask, imap_mask_sz);
640 
641 exit2:
642 	kmem_free(imap, imap_sz);
643 
644 	return (intr_parent_dip);
645 }
646 
647 /*
648  * process_intr_ops:
649  *
650  * Process the interrupt op via the interrupt parent.
651  */
652 int
653 process_intr_ops(dev_info_t *pdip, dev_info_t *rdip, ddi_intr_op_t op,
654     ddi_intr_handle_impl_t *hdlp, void *result)
655 {
656 	int		ret = DDI_FAILURE;
657 
658 	if (NEXUS_HAS_INTR_OP(pdip)) {
659 		ret = (*(DEVI(pdip)->devi_ops->devo_bus_ops->
660 		    bus_intr_op)) (pdip, rdip, op, hdlp, result);
661 	} else {
662 		cmn_err(CE_WARN, "Failed to process interrupt "
663 		    "for %s%d due to down-rev nexus driver %s%d",
664 		    ddi_get_name(rdip), ddi_get_instance(rdip),
665 		    ddi_get_name(pdip), ddi_get_instance(pdip));
666 	}
667 
668 	return (ret);
669 }
670 
671 /*ARGSUSED*/
672 uint_t
673 softlevel1(caddr_t arg)
674 {
675 	softint();
676 	return (1);
677 }
678 
679 /*
680  * indirection table, to save us some large switch statements
681  * NOTE: This must agree with "INTLEVEL_foo" constants in
682  *	<sys/avintr.h>
683  */
684 struct autovec *const vectorlist[] = { 0 };
685 
686 /*
687  * This value is exported here for the functions in avintr.c
688  */
689 const uint_t maxautovec = (sizeof (vectorlist) / sizeof (vectorlist[0]));
690 
691 /*
692  * Check for machine specific interrupt levels which cannot be reassigned by
693  * settrap(), sun4u version.
694  *
695  * sun4u does not support V8 SPARC "fast trap" handlers.
696  */
697 /*ARGSUSED*/
698 int
699 exclude_settrap(int lvl)
700 {
701 	return (1);
702 }
703 
704 /*
705  * Check for machine specific interrupt levels which cannot have interrupt
706  * handlers added. We allow levels 1 through 15; level 0 is nonsense.
707  */
708 /*ARGSUSED*/
709 int
710 exclude_level(int lvl)
711 {
712 	return ((lvl < 1) || (lvl > 15));
713 }
714 
715 /*
716  * Wrapper functions used by New DDI interrupt framework.
717  */
718 
719 /*
720  * i_ddi_intr_ops:
721  */
722 int
723 i_ddi_intr_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t op,
724     ddi_intr_handle_impl_t *hdlp, void *result)
725 {
726 	dev_info_t	*pdip = ddi_get_parent(dip);
727 	int		ret = DDI_FAILURE;
728 
729 	/*
730 	 * The following check is required to address
731 	 * one of the test case of ADDI test suite.
732 	 */
733 	if (pdip == NULL)
734 		return (DDI_FAILURE);
735 
736 	if (hdlp->ih_type != DDI_INTR_TYPE_FIXED)
737 		return (process_intr_ops(pdip, rdip, op, hdlp, result));
738 
739 	if (hdlp->ih_vector == 0)
740 		hdlp->ih_vector = i_ddi_get_inum(rdip, hdlp->ih_inum);
741 
742 	if (hdlp->ih_pri == 0)
743 		hdlp->ih_pri = i_ddi_get_intr_pri(rdip, hdlp->ih_inum);
744 
745 	switch (op) {
746 	case DDI_INTROP_ADDISR:
747 	case DDI_INTROP_REMISR:
748 	case DDI_INTROP_ENABLE:
749 	case DDI_INTROP_DISABLE:
750 	case DDI_INTROP_BLOCKENABLE:
751 	case DDI_INTROP_BLOCKDISABLE:
752 		/*
753 		 * Try and determine our parent and possibly an interrupt
754 		 * translation. intr parent dip returned held
755 		 */
756 		if ((pdip = get_intr_parent(pdip, dip, hdlp)) == NULL)
757 			goto done;
758 	}
759 
760 	ret = process_intr_ops(pdip, rdip, op, hdlp, result);
761 
762 done:
763 	switch (op) {
764 	case DDI_INTROP_ADDISR:
765 	case DDI_INTROP_REMISR:
766 	case DDI_INTROP_ENABLE:
767 	case DDI_INTROP_DISABLE:
768 	case DDI_INTROP_BLOCKENABLE:
769 	case DDI_INTROP_BLOCKDISABLE:
770 		/* Release hold acquired in get_intr_parent() */
771 		if (pdip)
772 			ndi_rele_devi(pdip);
773 	}
774 
775 	hdlp->ih_vector = 0;
776 
777 	return (ret);
778 }
779 
780 /*
781  * i_ddi_add_ivintr:
782  */
783 /*ARGSUSED*/
784 int
785 i_ddi_add_ivintr(ddi_intr_handle_impl_t *hdlp)
786 {
787 	/*
788 	 * If the PIL was set and is valid use it, otherwise
789 	 * default it to 1
790 	 */
791 	if ((hdlp->ih_pri < 1) || (hdlp->ih_pri > PIL_MAX))
792 		hdlp->ih_pri = 1;
793 
794 	VERIFY(add_ivintr(hdlp->ih_vector, hdlp->ih_pri,
795 	    (intrfunc)hdlp->ih_cb_func, hdlp->ih_cb_arg1,
796 	    hdlp->ih_cb_arg2, NULL) == 0);
797 
798 	return (DDI_SUCCESS);
799 }
800 
801 /*
802  * i_ddi_rem_ivintr:
803  */
804 /*ARGSUSED*/
805 void
806 i_ddi_rem_ivintr(ddi_intr_handle_impl_t *hdlp)
807 {
808 	VERIFY(rem_ivintr(hdlp->ih_vector, hdlp->ih_pri) == 0);
809 }
810 
811 /*
812  * i_ddi_get_inum - Get the interrupt number property from the
813  * specified device. Note that this function is called only for
814  * the FIXED interrupt type.
815  */
816 uint32_t
817 i_ddi_get_inum(dev_info_t *dip, uint_t inumber)
818 {
819 	int32_t			intrlen, intr_cells, max_intrs;
820 	prop_1275_cell_t	*ip, intr_sz;
821 	uint32_t		intr = 0;
822 
823 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS |
824 	    DDI_PROP_CANSLEEP,
825 	    "interrupts", (caddr_t)&ip, &intrlen) == DDI_SUCCESS) {
826 
827 		intr_cells = ddi_getprop(DDI_DEV_T_ANY, dip, 0,
828 		    "#interrupt-cells", 1);
829 
830 		/* adjust for number of bytes */
831 		intr_sz = CELLS_1275_TO_BYTES(intr_cells);
832 
833 		/* Calculate the number of interrupts */
834 		max_intrs = intrlen / intr_sz;
835 
836 		if (inumber < max_intrs) {
837 			prop_1275_cell_t *intrp = ip;
838 
839 			/* Index into interrupt property */
840 			intrp += (inumber * intr_cells);
841 
842 			cells_1275_copy(intrp, &intr, intr_cells);
843 		}
844 
845 		kmem_free(ip, intrlen);
846 	}
847 
848 	return (intr);
849 }
850 
851 /*
852  * i_ddi_get_intr_pri - Get the interrupt-priorities property from
853  * the specified device. Note that this function is called only for
854  * the FIXED interrupt type.
855  */
856 uint32_t
857 i_ddi_get_intr_pri(dev_info_t *dip, uint_t inumber)
858 {
859 	uint32_t	*intr_prio_p;
860 	uint32_t	pri = 0;
861 	int32_t		i;
862 
863 	/*
864 	 * Use the "interrupt-priorities" property to determine the
865 	 * the pil/ipl for the interrupt handler.
866 	 */
867 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
868 	    "interrupt-priorities", (caddr_t)&intr_prio_p,
869 	    &i) == DDI_SUCCESS) {
870 		if (inumber < (i / sizeof (int32_t)))
871 			pri = intr_prio_p[inumber];
872 		kmem_free(intr_prio_p, i);
873 	}
874 
875 	return (pri);
876 }
877 
878 int
879 i_ddi_get_intx_nintrs(dev_info_t *dip)
880 {
881 	int32_t intrlen;
882 	prop_1275_cell_t intr_sz;
883 	prop_1275_cell_t *ip;
884 	int32_t ret = 0;
885 
886 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS |
887 	    DDI_PROP_CANSLEEP,
888 	    "interrupts", (caddr_t)&ip, &intrlen) == DDI_SUCCESS) {
889 
890 		intr_sz = ddi_getprop(DDI_DEV_T_ANY, dip, 0,
891 		    "#interrupt-cells", 1);
892 		/* adjust for number of bytes */
893 		intr_sz = CELLS_1275_TO_BYTES(intr_sz);
894 
895 		ret = intrlen / intr_sz;
896 
897 		kmem_free(ip, intrlen);
898 	}
899 
900 	return (ret);
901 }
902 
903 /*
904  * i_ddi_add_softint - allocate and add a software interrupt.
905  *
906  * NOTE: All software interrupts that are registered through DDI
907  *	 should be triggered only on a single target or CPU.
908  */
909 int
910 i_ddi_add_softint(ddi_softint_hdl_impl_t *hdlp)
911 {
912 	if ((hdlp->ih_private = (void *)add_softintr(hdlp->ih_pri,
913 	    hdlp->ih_cb_func, hdlp->ih_cb_arg1, SOFTINT_ST)) == NULL)
914 		return (DDI_FAILURE);
915 
916 	return (DDI_SUCCESS);
917 }
918 
919 /*
920  * i_ddi_remove_softint - remove and free a software interrupt.
921  */
922 void
923 i_ddi_remove_softint(ddi_softint_hdl_impl_t *hdlp)
924 {
925 	ASSERT(hdlp->ih_private != NULL);
926 
927 	if (rem_softintr((uint64_t)hdlp->ih_private) == 0)
928 		hdlp->ih_private = NULL;
929 }
930 
931 /*
932  * i_ddi_trigger_softint - trigger a software interrupt.
933  */
934 int
935 i_ddi_trigger_softint(ddi_softint_hdl_impl_t *hdlp, void *arg2)
936 {
937 	int	ret;
938 
939 	ASSERT(hdlp->ih_private != NULL);
940 
941 	/* Update the second argument for the software interrupt */
942 	if ((ret = update_softint_arg2((uint64_t)hdlp->ih_private, arg2)) == 0)
943 		setsoftint((uint64_t)hdlp->ih_private);
944 
945 	return (ret ? DDI_EPENDING : DDI_SUCCESS);
946 }
947 
948 /*
949  * i_ddi_set_softint_pri - change software interrupt priority.
950  */
951 /* ARGSUSED */
952 int
953 i_ddi_set_softint_pri(ddi_softint_hdl_impl_t *hdlp, uint_t old_pri)
954 {
955 	int	ret;
956 
957 	ASSERT(hdlp->ih_private != NULL);
958 
959 	/* Update the interrupt priority for the software interrupt */
960 	ret = update_softint_pri((uint64_t)hdlp->ih_private, hdlp->ih_pri);
961 
962 	return (ret ? DDI_FAILURE : DDI_SUCCESS);
963 }
964 
965 /*ARGSUSED*/
966 void
967 i_ddi_alloc_intr_phdl(ddi_intr_handle_impl_t *hdlp)
968 {
969 }
970 
971 /*ARGSUSED*/
972 void
973 i_ddi_free_intr_phdl(ddi_intr_handle_impl_t *hdlp)
974 {
975 }
976 
977 /*
978  * SECTION: DDI Memory/DMA
979  */
980 
981 /* set HAT endianess attributes from ddi_device_acc_attr */
982 void
983 i_ddi_devacc_to_hatacc(ddi_device_acc_attr_t *devaccp, uint_t *hataccp)
984 {
985 	if (devaccp != NULL) {
986 		if (devaccp->devacc_attr_endian_flags == DDI_STRUCTURE_LE_ACC) {
987 			*hataccp &= ~HAT_ENDIAN_MASK;
988 			*hataccp |= HAT_STRUCTURE_LE;
989 		}
990 	}
991 }
992 
993 /*
994  * Check if the specified cache attribute is supported on the platform.
995  * This function must be called before i_ddi_cacheattr_to_hatacc().
996  */
997 boolean_t
998 i_ddi_check_cache_attr(uint_t flags)
999 {
1000 	/*
1001 	 * The cache attributes are mutually exclusive. Any combination of
1002 	 * the attributes leads to a failure.
1003 	 */
1004 	uint_t cache_attr = IOMEM_CACHE_ATTR(flags);
1005 	if ((cache_attr != 0) && ((cache_attr & (cache_attr - 1)) != 0))
1006 		return (B_FALSE);
1007 
1008 	/*
1009 	 * On the sparc architecture, only IOMEM_DATA_CACHED is meaningful,
1010 	 * but others lead to a failure.
1011 	 */
1012 	if (cache_attr & IOMEM_DATA_CACHED)
1013 		return (B_TRUE);
1014 	else
1015 		return (B_FALSE);
1016 }
1017 
1018 /* set HAT cache attributes from the cache attributes */
1019 void
1020 i_ddi_cacheattr_to_hatacc(uint_t flags, uint_t *hataccp)
1021 {
1022 	uint_t cache_attr = IOMEM_CACHE_ATTR(flags);
1023 	static char *fname = "i_ddi_cacheattr_to_hatacc";
1024 #if defined(lint)
1025 	*hataccp = *hataccp;
1026 #endif
1027 	/*
1028 	 * set HAT attrs according to the cache attrs.
1029 	 */
1030 	switch (cache_attr) {
1031 	/*
1032 	 * The cache coherency is always maintained on SPARC, and
1033 	 * nothing is required.
1034 	 */
1035 	case IOMEM_DATA_CACHED:
1036 		break;
1037 	/*
1038 	 * Both IOMEM_DATA_UC_WRITE_COMBINED and IOMEM_DATA_UNCACHED are
1039 	 * not supported on SPARC -- this case must not occur because the
1040 	 * cache attribute is scrutinized before this function is called.
1041 	 */
1042 	case IOMEM_DATA_UNCACHED:
1043 	case IOMEM_DATA_UC_WR_COMBINE:
1044 	default:
1045 		cmn_err(CE_WARN, "%s: cache_attr=0x%x is ignored.",
1046 		    fname, cache_attr);
1047 	}
1048 }
1049 
1050 static vmem_t *little_endian_arena;
1051 static vmem_t *big_endian_arena;
1052 
1053 static void *
1054 segkmem_alloc_le(vmem_t *vmp, size_t size, int flag)
1055 {
1056 	return (segkmem_xalloc(vmp, NULL, size, flag, HAT_STRUCTURE_LE,
1057 	    segkmem_page_create, NULL));
1058 }
1059 
1060 static void *
1061 segkmem_alloc_be(vmem_t *vmp, size_t size, int flag)
1062 {
1063 	return (segkmem_xalloc(vmp, NULL, size, flag, HAT_STRUCTURE_BE,
1064 	    segkmem_page_create, NULL));
1065 }
1066 
1067 void
1068 ka_init(void)
1069 {
1070 	little_endian_arena = vmem_create("little_endian", NULL, 0, 1,
1071 	    segkmem_alloc_le, segkmem_free, heap_arena, 0, VM_SLEEP);
1072 	big_endian_arena = vmem_create("big_endian", NULL, 0, 1,
1073 	    segkmem_alloc_be, segkmem_free, heap_arena, 0, VM_SLEEP);
1074 }
1075 
1076 /*
1077  * Allocate from the system, aligned on a specific boundary.
1078  * The alignment, if non-zero, must be a power of 2.
1079  */
1080 static void *
1081 kalloca(size_t size, size_t align, int cansleep, uint_t endian_flags)
1082 {
1083 	size_t *addr, *raddr, rsize;
1084 	size_t hdrsize = 4 * sizeof (size_t);	/* must be power of 2 */
1085 
1086 	align = MAX(align, hdrsize);
1087 	ASSERT((align & (align - 1)) == 0);
1088 
1089 	/*
1090 	 * We need to allocate
1091 	 *    rsize = size + hdrsize + align - MIN(hdrsize, buffer_alignment)
1092 	 * bytes to be sure we have enough freedom to satisfy the request.
1093 	 * Since the buffer alignment depends on the request size, this is
1094 	 * not straightforward to use directly.
1095 	 *
1096 	 * kmem guarantees that any allocation of a 64-byte multiple will be
1097 	 * 64-byte aligned.  Since rounding up the request could add more
1098 	 * than we save, we compute the size with and without alignment, and
1099 	 * use the smaller of the two.
1100 	 */
1101 	rsize = size + hdrsize + align;
1102 
1103 	if (endian_flags == DDI_STRUCTURE_LE_ACC) {
1104 		raddr = vmem_alloc(little_endian_arena, rsize,
1105 		    cansleep ? VM_SLEEP : VM_NOSLEEP);
1106 	} else {
1107 		raddr = vmem_alloc(big_endian_arena, rsize,
1108 		    cansleep ? VM_SLEEP : VM_NOSLEEP);
1109 	}
1110 
1111 	if (raddr == NULL)
1112 		return (NULL);
1113 
1114 	addr = (size_t *)P2ROUNDUP((uintptr_t)raddr + hdrsize, align);
1115 	ASSERT((uintptr_t)addr + size - (uintptr_t)raddr <= rsize);
1116 
1117 	addr[-3] = (size_t)endian_flags;
1118 	addr[-2] = (size_t)raddr;
1119 	addr[-1] = rsize;
1120 
1121 	return (addr);
1122 }
1123 
1124 static void
1125 kfreea(void *addr)
1126 {
1127 	size_t *saddr = addr;
1128 
1129 	if (saddr[-3] == DDI_STRUCTURE_LE_ACC)
1130 		vmem_free(little_endian_arena, (void *)saddr[-2], saddr[-1]);
1131 	else
1132 		vmem_free(big_endian_arena, (void *)saddr[-2], saddr[-1]);
1133 }
1134 
1135 int
1136 i_ddi_mem_alloc(dev_info_t *dip, ddi_dma_attr_t *attr,
1137     size_t length, int cansleep, int flags,
1138     ddi_device_acc_attr_t *accattrp,
1139     caddr_t *kaddrp, size_t *real_length, ddi_acc_hdl_t *handlep)
1140 {
1141 	caddr_t a;
1142 	int iomin, align, streaming;
1143 	uint_t endian_flags = DDI_NEVERSWAP_ACC;
1144 
1145 #if defined(lint)
1146 	*handlep = *handlep;
1147 #endif
1148 
1149 	/*
1150 	 * Check legality of arguments
1151 	 */
1152 	if (length == 0 || kaddrp == NULL || attr == NULL) {
1153 		return (DDI_FAILURE);
1154 	}
1155 
1156 	if (attr->dma_attr_minxfer == 0 || attr->dma_attr_align == 0 ||
1157 	    (attr->dma_attr_align & (attr->dma_attr_align - 1)) ||
1158 	    (attr->dma_attr_minxfer & (attr->dma_attr_minxfer - 1))) {
1159 		return (DDI_FAILURE);
1160 	}
1161 
1162 	/*
1163 	 * check if a streaming sequential xfer is requested.
1164 	 */
1165 	streaming = (flags & DDI_DMA_STREAMING) ? 1 : 0;
1166 
1167 	/*
1168 	 * Drivers for 64-bit capable SBus devices will encode
1169 	 * the burtsizes for 64-bit xfers in the upper 16-bits.
1170 	 * For DMA alignment, we use the most restrictive
1171 	 * alignment of 32-bit and 64-bit xfers.
1172 	 */
1173 	iomin = (attr->dma_attr_burstsizes & 0xffff) |
1174 	    ((attr->dma_attr_burstsizes >> 16) & 0xffff);
1175 	/*
1176 	 * If a driver set burtsizes to 0, we give him byte alignment.
1177 	 * Otherwise align at the burtsizes boundary.
1178 	 */
1179 	if (iomin == 0)
1180 		iomin = 1;
1181 	else
1182 		iomin = 1 << (ddi_fls(iomin) - 1);
1183 	iomin = maxbit(iomin, attr->dma_attr_minxfer);
1184 	iomin = maxbit(iomin, attr->dma_attr_align);
1185 	iomin = ddi_iomin(dip, iomin, streaming);
1186 	if (iomin == 0)
1187 		return (DDI_FAILURE);
1188 
1189 	ASSERT((iomin & (iomin - 1)) == 0);
1190 	ASSERT(iomin >= attr->dma_attr_minxfer);
1191 	ASSERT(iomin >= attr->dma_attr_align);
1192 
1193 	length = P2ROUNDUP(length, iomin);
1194 	align = iomin;
1195 
1196 	if (accattrp != NULL)
1197 		endian_flags = accattrp->devacc_attr_endian_flags;
1198 
1199 	a = kalloca(length, align, cansleep, endian_flags);
1200 	if ((*kaddrp = a) == 0) {
1201 		return (DDI_FAILURE);
1202 	} else {
1203 		if (real_length) {
1204 			*real_length = length;
1205 		}
1206 		if (handlep) {
1207 			/*
1208 			 * assign handle information
1209 			 */
1210 			impl_acc_hdl_init(handlep);
1211 		}
1212 		return (DDI_SUCCESS);
1213 	}
1214 }
1215 
1216 /*
1217  * covert old DMA limits structure to DMA attribute structure
1218  * and continue
1219  */
1220 int
1221 i_ddi_mem_alloc_lim(dev_info_t *dip, ddi_dma_lim_t *limits,
1222     size_t length, int cansleep, int streaming,
1223     ddi_device_acc_attr_t *accattrp, caddr_t *kaddrp,
1224     uint_t *real_length, ddi_acc_hdl_t *ap)
1225 {
1226 	ddi_dma_attr_t dma_attr, *attrp;
1227 	size_t rlen;
1228 	int ret;
1229 
1230 	ASSERT(limits);
1231 	attrp = &dma_attr;
1232 	attrp->dma_attr_version = DMA_ATTR_V0;
1233 	attrp->dma_attr_addr_lo = (uint64_t)limits->dlim_addr_lo;
1234 	attrp->dma_attr_addr_hi = (uint64_t)limits->dlim_addr_hi;
1235 	attrp->dma_attr_count_max = (uint64_t)-1;
1236 	attrp->dma_attr_align = 1;
1237 	attrp->dma_attr_burstsizes = (uint_t)limits->dlim_burstsizes;
1238 	attrp->dma_attr_minxfer = (uint32_t)limits->dlim_minxfer;
1239 	attrp->dma_attr_maxxfer = (uint64_t)-1;
1240 	attrp->dma_attr_seg = (uint64_t)limits->dlim_cntr_max;
1241 	attrp->dma_attr_sgllen = 1;
1242 	attrp->dma_attr_granular = 1;
1243 	attrp->dma_attr_flags = 0;
1244 
1245 	ret = i_ddi_mem_alloc(dip, attrp, length, cansleep, streaming,
1246 	    accattrp, kaddrp, &rlen, ap);
1247 	if (ret == DDI_SUCCESS) {
1248 		if (real_length)
1249 			*real_length = (uint_t)rlen;
1250 	}
1251 	return (ret);
1252 }
1253 
1254 /* ARGSUSED */
1255 void
1256 i_ddi_mem_free(caddr_t kaddr, ddi_acc_hdl_t *ap)
1257 {
1258 	kfreea(kaddr);
1259 }
1260 
1261 /*
1262  * SECTION: DDI Data Access
1263  */
1264 
1265 static uintptr_t impl_acc_hdl_id = 0;
1266 
1267 /*
1268  * access handle allocator
1269  */
1270 ddi_acc_hdl_t *
1271 impl_acc_hdl_get(ddi_acc_handle_t hdl)
1272 {
1273 	/*
1274 	 * Extract the access handle address from the DDI implemented
1275 	 * access handle
1276 	 */
1277 	return (&((ddi_acc_impl_t *)hdl)->ahi_common);
1278 }
1279 
1280 ddi_acc_handle_t
1281 impl_acc_hdl_alloc(int (*waitfp)(caddr_t), caddr_t arg)
1282 {
1283 	ddi_acc_impl_t *hp;
1284 	on_trap_data_t *otp;
1285 	int sleepflag;
1286 
1287 	sleepflag = ((waitfp == (int (*)())KM_SLEEP) ? KM_SLEEP : KM_NOSLEEP);
1288 
1289 	/*
1290 	 * Allocate and initialize the data access handle and error status.
1291 	 */
1292 	if ((hp = kmem_zalloc(sizeof (ddi_acc_impl_t), sleepflag)) == NULL)
1293 		goto fail;
1294 	if ((hp->ahi_err = (ndi_err_t *)kmem_zalloc(
1295 	    sizeof (ndi_err_t), sleepflag)) == NULL) {
1296 		kmem_free(hp, sizeof (ddi_acc_impl_t));
1297 		goto fail;
1298 	}
1299 	if ((otp = (on_trap_data_t *)kmem_zalloc(
1300 	    sizeof (on_trap_data_t), sleepflag)) == NULL) {
1301 		kmem_free(hp->ahi_err, sizeof (ndi_err_t));
1302 		kmem_free(hp, sizeof (ddi_acc_impl_t));
1303 		goto fail;
1304 	}
1305 	hp->ahi_err->err_ontrap = otp;
1306 	hp->ahi_common.ah_platform_private = (void *)hp;
1307 
1308 	return ((ddi_acc_handle_t)hp);
1309 fail:
1310 	if ((waitfp != (int (*)())KM_SLEEP) &&
1311 	    (waitfp != (int (*)())KM_NOSLEEP))
1312 		ddi_set_callback(waitfp, arg, &impl_acc_hdl_id);
1313 	return (NULL);
1314 }
1315 
1316 void
1317 impl_acc_hdl_free(ddi_acc_handle_t handle)
1318 {
1319 	ddi_acc_impl_t *hp;
1320 
1321 	/*
1322 	 * The supplied (ddi_acc_handle_t) is actually a (ddi_acc_impl_t *),
1323 	 * because that's what we allocated in impl_acc_hdl_alloc() above.
1324 	 */
1325 	hp = (ddi_acc_impl_t *)handle;
1326 	if (hp) {
1327 		kmem_free(hp->ahi_err->err_ontrap, sizeof (on_trap_data_t));
1328 		kmem_free(hp->ahi_err, sizeof (ndi_err_t));
1329 		kmem_free(hp, sizeof (ddi_acc_impl_t));
1330 		if (impl_acc_hdl_id)
1331 			ddi_run_callback(&impl_acc_hdl_id);
1332 	}
1333 }
1334 
1335 #define	PCI_GET_MP_PFN(mp, page_no)	((mp)->dmai_ndvmapages == 1 ? \
1336 	(pfn_t)(mp)->dmai_iopte:(((pfn_t *)(mp)->dmai_iopte)[page_no]))
1337 
1338 /*
1339  * Function called after a dma fault occurred to find out whether the
1340  * fault address is associated with a driver that is able to handle faults
1341  * and recover from faults.
1342  */
1343 /* ARGSUSED */
1344 int
1345 impl_dma_check(dev_info_t *dip, const void *handle, const void *addr,
1346     const void *not_used)
1347 {
1348 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
1349 	pfn_t fault_pfn = mmu_btop(*(uint64_t *)addr);
1350 	pfn_t comp_pfn;
1351 
1352 	/*
1353 	 * The driver has to set DDI_DMA_FLAGERR to recover from dma faults.
1354 	 */
1355 	int page;
1356 
1357 	ASSERT(mp);
1358 	for (page = 0; page < mp->dmai_ndvmapages; page++) {
1359 		comp_pfn = PCI_GET_MP_PFN(mp, page);
1360 		if (fault_pfn == comp_pfn)
1361 			return (DDI_FM_NONFATAL);
1362 	}
1363 	return (DDI_FM_UNKNOWN);
1364 }
1365 
1366 /*
1367  * Function used to check if a given access handle owns the failing address.
1368  * Called by ndi_fmc_error, when we detect a PIO error.
1369  */
1370 /* ARGSUSED */
1371 static int
1372 impl_acc_check(dev_info_t *dip, const void *handle, const void *addr,
1373     const void *not_used)
1374 {
1375 	pfn_t pfn, fault_pfn;
1376 	ddi_acc_hdl_t *hp;
1377 
1378 	hp = impl_acc_hdl_get((ddi_acc_handle_t)handle);
1379 
1380 	ASSERT(hp);
1381 
1382 	if (addr != NULL) {
1383 		pfn = hp->ah_pfn;
1384 		fault_pfn = mmu_btop(*(uint64_t *)addr);
1385 		if (fault_pfn >= pfn && fault_pfn < (pfn + hp->ah_pnum))
1386 			return (DDI_FM_NONFATAL);
1387 	}
1388 	return (DDI_FM_UNKNOWN);
1389 }
1390 
1391 void
1392 impl_acc_err_init(ddi_acc_hdl_t *handlep)
1393 {
1394 	int fmcap;
1395 	ndi_err_t *errp;
1396 	on_trap_data_t *otp;
1397 	ddi_acc_impl_t *hp = (ddi_acc_impl_t *)handlep;
1398 
1399 	fmcap = ddi_fm_capable(handlep->ah_dip);
1400 
1401 	if (handlep->ah_acc.devacc_attr_version < DDI_DEVICE_ATTR_V1 ||
1402 	    !DDI_FM_ACC_ERR_CAP(fmcap)) {
1403 		handlep->ah_acc.devacc_attr_access = DDI_DEFAULT_ACC;
1404 	} else if (DDI_FM_ACC_ERR_CAP(fmcap)) {
1405 		if (handlep->ah_acc.devacc_attr_access == DDI_DEFAULT_ACC) {
1406 			i_ddi_drv_ereport_post(handlep->ah_dip, DVR_EFMCAP,
1407 			    NULL, DDI_NOSLEEP);
1408 		} else {
1409 			errp = hp->ahi_err;
1410 			otp = (on_trap_data_t *)errp->err_ontrap;
1411 			otp->ot_handle = (void *)(hp);
1412 			otp->ot_prot = OT_DATA_ACCESS;
1413 			if (handlep->ah_acc.devacc_attr_access ==
1414 			    DDI_CAUTIOUS_ACC)
1415 				otp->ot_trampoline =
1416 				    (uintptr_t)&i_ddi_caut_trampoline;
1417 			else
1418 				otp->ot_trampoline =
1419 				    (uintptr_t)&i_ddi_prot_trampoline;
1420 			errp->err_status = DDI_FM_OK;
1421 			errp->err_expected = DDI_FM_ERR_UNEXPECTED;
1422 			errp->err_cf = impl_acc_check;
1423 		}
1424 	}
1425 }
1426 
1427 void
1428 impl_acc_hdl_init(ddi_acc_hdl_t *handlep)
1429 {
1430 	ddi_acc_impl_t *hp;
1431 
1432 	ASSERT(handlep);
1433 
1434 	hp = (ddi_acc_impl_t *)handlep;
1435 
1436 	/*
1437 	 * check for SW byte-swapping
1438 	 */
1439 	hp->ahi_get8 = i_ddi_get8;
1440 	hp->ahi_put8 = i_ddi_put8;
1441 	hp->ahi_rep_get8 = i_ddi_rep_get8;
1442 	hp->ahi_rep_put8 = i_ddi_rep_put8;
1443 	if (handlep->ah_acc.devacc_attr_endian_flags & DDI_STRUCTURE_LE_ACC) {
1444 		hp->ahi_get16 = i_ddi_swap_get16;
1445 		hp->ahi_get32 = i_ddi_swap_get32;
1446 		hp->ahi_get64 = i_ddi_swap_get64;
1447 		hp->ahi_put16 = i_ddi_swap_put16;
1448 		hp->ahi_put32 = i_ddi_swap_put32;
1449 		hp->ahi_put64 = i_ddi_swap_put64;
1450 		hp->ahi_rep_get16 = i_ddi_swap_rep_get16;
1451 		hp->ahi_rep_get32 = i_ddi_swap_rep_get32;
1452 		hp->ahi_rep_get64 = i_ddi_swap_rep_get64;
1453 		hp->ahi_rep_put16 = i_ddi_swap_rep_put16;
1454 		hp->ahi_rep_put32 = i_ddi_swap_rep_put32;
1455 		hp->ahi_rep_put64 = i_ddi_swap_rep_put64;
1456 	} else {
1457 		hp->ahi_get16 = i_ddi_get16;
1458 		hp->ahi_get32 = i_ddi_get32;
1459 		hp->ahi_get64 = i_ddi_get64;
1460 		hp->ahi_put16 = i_ddi_put16;
1461 		hp->ahi_put32 = i_ddi_put32;
1462 		hp->ahi_put64 = i_ddi_put64;
1463 		hp->ahi_rep_get16 = i_ddi_rep_get16;
1464 		hp->ahi_rep_get32 = i_ddi_rep_get32;
1465 		hp->ahi_rep_get64 = i_ddi_rep_get64;
1466 		hp->ahi_rep_put16 = i_ddi_rep_put16;
1467 		hp->ahi_rep_put32 = i_ddi_rep_put32;
1468 		hp->ahi_rep_put64 = i_ddi_rep_put64;
1469 	}
1470 
1471 	/* Legacy fault flags and support */
1472 	hp->ahi_fault_check = i_ddi_acc_fault_check;
1473 	hp->ahi_fault_notify = i_ddi_acc_fault_notify;
1474 	hp->ahi_fault = 0;
1475 	impl_acc_err_init(handlep);
1476 }
1477 
1478 void
1479 i_ddi_acc_set_fault(ddi_acc_handle_t handle)
1480 {
1481 	ddi_acc_impl_t *hp = (ddi_acc_impl_t *)handle;
1482 
1483 	if (!hp->ahi_fault) {
1484 		hp->ahi_fault = 1;
1485 			(*hp->ahi_fault_notify)(hp);
1486 	}
1487 }
1488 
1489 void
1490 i_ddi_acc_clr_fault(ddi_acc_handle_t handle)
1491 {
1492 	ddi_acc_impl_t *hp = (ddi_acc_impl_t *)handle;
1493 
1494 	if (hp->ahi_fault) {
1495 		hp->ahi_fault = 0;
1496 			(*hp->ahi_fault_notify)(hp);
1497 	}
1498 }
1499 
1500 /* ARGSUSED */
1501 void
1502 i_ddi_acc_fault_notify(ddi_acc_impl_t *hp)
1503 {
1504 	/* Default version, does nothing */
1505 }
1506 
1507 /*
1508  * SECTION: Misc functions
1509  */
1510 
1511 /*
1512  * instance wrappers
1513  */
1514 /*ARGSUSED*/
1515 uint_t
1516 impl_assign_instance(dev_info_t *dip)
1517 {
1518 	return ((uint_t)-1);
1519 }
1520 
1521 /*ARGSUSED*/
1522 int
1523 impl_keep_instance(dev_info_t *dip)
1524 {
1525 	return (DDI_FAILURE);
1526 }
1527 
1528 /*ARGSUSED*/
1529 int
1530 impl_free_instance(dev_info_t *dip)
1531 {
1532 	return (DDI_FAILURE);
1533 }
1534 
1535 /*ARGSUSED*/
1536 int
1537 impl_check_cpu(dev_info_t *devi)
1538 {
1539 	return (DDI_SUCCESS);
1540 }
1541 
1542 
1543 static const char *nocopydevs[] = {
1544 	"SUNW,ffb",
1545 	"SUNW,afb",
1546 	NULL
1547 };
1548 
1549 /*
1550  * Perform a copy from a memory mapped device (whose devinfo pointer is devi)
1551  * separately mapped at devaddr in the kernel to a kernel buffer at kaddr.
1552  */
1553 /*ARGSUSED*/
1554 int
1555 e_ddi_copyfromdev(dev_info_t *devi,
1556     off_t off, const void *devaddr, void *kaddr, size_t len)
1557 {
1558 	const char **argv;
1559 
1560 	for (argv = nocopydevs; *argv; argv++)
1561 		if (strcmp(ddi_binding_name(devi), *argv) == 0) {
1562 			bzero(kaddr, len);
1563 			return (0);
1564 		}
1565 
1566 	bcopy(devaddr, kaddr, len);
1567 	return (0);
1568 }
1569 
1570 /*
1571  * Perform a copy to a memory mapped device (whose devinfo pointer is devi)
1572  * separately mapped at devaddr in the kernel from a kernel buffer at kaddr.
1573  */
1574 /*ARGSUSED*/
1575 int
1576 e_ddi_copytodev(dev_info_t *devi,
1577     off_t off, const void *kaddr, void *devaddr, size_t len)
1578 {
1579 	const char **argv;
1580 
1581 	for (argv = nocopydevs; *argv; argv++)
1582 		if (strcmp(ddi_binding_name(devi), *argv) == 0)
1583 			return (1);
1584 
1585 	bcopy(kaddr, devaddr, len);
1586 	return (0);
1587 }
1588 
1589 /*
1590  * Boot Configuration
1591  */
1592 idprom_t idprom;
1593 
1594 /*
1595  * Configure the hardware on the system.
1596  * Called before the rootfs is mounted
1597  */
1598 void
1599 configure(void)
1600 {
1601 	extern void i_ddi_init_root();
1602 
1603 	/* We better have released boot by this time! */
1604 	ASSERT(!bootops);
1605 
1606 	/*
1607 	 * Determine whether or not to use the fpu, V9 SPARC cpus
1608 	 * always have one. Could check for existence of a fp queue,
1609 	 * Ultra I, II and IIa do not have a fp queue.
1610 	 */
1611 	if (fpu_exists)
1612 		fpu_probe();
1613 	else
1614 		cmn_err(CE_CONT, "FPU not in use\n");
1615 
1616 #if 0 /* XXXQ - not necessary for sun4u */
1617 	/*
1618 	 * This following line fixes bugid 1041296; we need to do a
1619 	 * prom_nextnode(0) because this call ALSO patches the DMA+
1620 	 * bug in Campus-B and Phoenix. The prom uncaches the traptable
1621 	 * page as a side-effect of devr_next(0) (which prom_nextnode calls),
1622 	 * so this *must* be executed early on. (XXX This is untrue for sun4u)
1623 	 */
1624 	(void) prom_nextnode((pnode_t)0);
1625 #endif
1626 
1627 	/*
1628 	 * Initialize devices on the machine.
1629 	 * Uses configuration tree built by the PROMs to determine what
1630 	 * is present, and builds a tree of prototype dev_info nodes
1631 	 * corresponding to the hardware which identified itself.
1632 	 */
1633 	i_ddi_init_root();
1634 
1635 #ifdef	DDI_PROP_DEBUG
1636 	(void) ddi_prop_debug(1);	/* Enable property debugging */
1637 #endif	/* DDI_PROP_DEBUG */
1638 }
1639 
1640 /*
1641  * The "status" property indicates the operational status of a device.
1642  * If this property is present, the value is a string indicating the
1643  * status of the device as follows:
1644  *
1645  *	"okay"		operational.
1646  *	"disabled"	not operational, but might become operational.
1647  *	"fail"		not operational because a fault has been detected,
1648  *			and it is unlikely that the device will become
1649  *			operational without repair. no additional details
1650  *			are available.
1651  *	"fail-xxx"	not operational because a fault has been detected,
1652  *			and it is unlikely that the device will become
1653  *			operational without repair. "xxx" is additional
1654  *			human-readable information about the particular
1655  *			fault condition that was detected.
1656  *
1657  * The absence of this property means that the operational status is
1658  * unknown or okay.
1659  *
1660  * This routine checks the status property of the specified device node
1661  * and returns 0 if the operational status indicates failure, and 1 otherwise.
1662  *
1663  * The property may exist on plug-in cards the existed before IEEE 1275-1994.
1664  * And, in that case, the property may not even be a string. So we carefully
1665  * check for the value "fail", in the beginning of the string, noting
1666  * the property length.
1667  */
1668 int
1669 status_okay(int id, char *buf, int buflen)
1670 {
1671 	char status_buf[OBP_MAXPROPNAME];
1672 	char *bufp = buf;
1673 	int len = buflen;
1674 	int proplen;
1675 	static const char *status = "status";
1676 	static const char *fail = "fail";
1677 	size_t fail_len = strlen(fail);
1678 
1679 	/*
1680 	 * Get the proplen ... if it's smaller than "fail",
1681 	 * or doesn't exist ... then we don't care, since
1682 	 * the value can't begin with the char string "fail".
1683 	 *
1684 	 * NB: proplen, if it's a string, includes the NULL in the
1685 	 * the size of the property, and fail_len does not.
1686 	 */
1687 	proplen = prom_getproplen((pnode_t)id, (caddr_t)status);
1688 	if (proplen <= fail_len)	/* nonexistent or uninteresting len */
1689 		return (1);
1690 
1691 	/*
1692 	 * if a buffer was provided, use it
1693 	 */
1694 	if ((buf == (char *)NULL) || (buflen <= 0)) {
1695 		bufp = status_buf;
1696 		len = sizeof (status_buf);
1697 	}
1698 	*bufp = (char)0;
1699 
1700 	/*
1701 	 * Get the property into the buffer, to the extent of the buffer,
1702 	 * and in case the buffer is smaller than the property size,
1703 	 * NULL terminate the buffer. (This handles the case where
1704 	 * a buffer was passed in and the caller wants to print the
1705 	 * value, but the buffer was too small).
1706 	 */
1707 	(void) prom_bounded_getprop((pnode_t)id, (caddr_t)status,
1708 	    (caddr_t)bufp, len);
1709 	*(bufp + len - 1) = (char)0;
1710 
1711 	/*
1712 	 * If the value begins with the char string "fail",
1713 	 * then it means the node is failed. We don't care
1714 	 * about any other values. We assume the node is ok
1715 	 * although it might be 'disabled'.
1716 	 */
1717 	if (strncmp(bufp, fail, fail_len) == 0)
1718 		return (0);
1719 
1720 	return (1);
1721 }
1722 
1723 
1724 /*
1725  * We set the cpu type from the idprom, if we can.
1726  * Note that we just read out the contents of it, for the most part.
1727  */
1728 void
1729 setcputype(void)
1730 {
1731 	/*
1732 	 * We cache the idprom info early on so that we don't
1733 	 * rummage through the NVRAM unnecessarily later.
1734 	 */
1735 	(void) prom_getidprom((caddr_t)&idprom, sizeof (idprom));
1736 }
1737 
1738 /*
1739  *  Here is where we actually infer meanings to the members of idprom_t
1740  */
1741 void
1742 parse_idprom(void)
1743 {
1744 	if (idprom.id_format == IDFORM_1) {
1745 		uint_t i;
1746 
1747 		(void) localetheraddr((struct ether_addr *)idprom.id_ether,
1748 		    (struct ether_addr *)NULL);
1749 
1750 		i = idprom.id_machine << 24;
1751 		i = i + idprom.id_serial;
1752 		numtos((ulong_t)i, hw_serial);
1753 	} else
1754 		prom_printf("Invalid format code in IDprom.\n");
1755 }
1756 
1757 /*
1758  * Allow for implementation specific correction of PROM property values.
1759  */
1760 /*ARGSUSED*/
1761 void
1762 impl_fix_props(dev_info_t *dip, dev_info_t *ch_dip, char *name, int len,
1763     caddr_t buffer)
1764 {
1765 	/*
1766 	 * There are no adjustments needed in this implementation.
1767 	 */
1768 }
1769 
1770 /*
1771  * The following functions ready a cautious request to go up to the nexus
1772  * driver.  It is up to the nexus driver to decide how to process the request.
1773  * It may choose to call i_ddi_do_caut_get/put in this file, or do it
1774  * differently.
1775  */
1776 
1777 static void
1778 i_ddi_caut_getput_ctlops(
1779     ddi_acc_impl_t *hp, uint64_t host_addr, uint64_t dev_addr, size_t size,
1780     size_t repcount, uint_t flags, ddi_ctl_enum_t cmd)
1781 {
1782 	peekpoke_ctlops_t	cautacc_ctlops_arg;
1783 
1784 	cautacc_ctlops_arg.size = size;
1785 	cautacc_ctlops_arg.dev_addr = dev_addr;
1786 	cautacc_ctlops_arg.host_addr = host_addr;
1787 	cautacc_ctlops_arg.handle = (ddi_acc_handle_t)hp;
1788 	cautacc_ctlops_arg.repcount = repcount;
1789 	cautacc_ctlops_arg.flags = flags;
1790 
1791 	(void) ddi_ctlops(hp->ahi_common.ah_dip, hp->ahi_common.ah_dip, cmd,
1792 	    &cautacc_ctlops_arg, NULL);
1793 }
1794 
1795 uint8_t
1796 i_ddi_caut_get8(ddi_acc_impl_t *hp, uint8_t *addr)
1797 {
1798 	uint8_t value;
1799 	i_ddi_caut_getput_ctlops(hp, (uint64_t)&value, (uint64_t)addr,
1800 	    sizeof (uint8_t), 1, 0, DDI_CTLOPS_PEEK);
1801 
1802 	return (value);
1803 }
1804 
1805 uint16_t
1806 i_ddi_caut_get16(ddi_acc_impl_t *hp, uint16_t *addr)
1807 {
1808 	uint16_t value;
1809 	i_ddi_caut_getput_ctlops(hp, (uint64_t)&value, (uint64_t)addr,
1810 	    sizeof (uint16_t), 1, 0, DDI_CTLOPS_PEEK);
1811 
1812 	return (value);
1813 }
1814 
1815 uint32_t
1816 i_ddi_caut_get32(ddi_acc_impl_t *hp, uint32_t *addr)
1817 {
1818 	uint32_t value;
1819 	i_ddi_caut_getput_ctlops(hp, (uint64_t)&value, (uint64_t)addr,
1820 	    sizeof (uint32_t), 1, 0, DDI_CTLOPS_PEEK);
1821 
1822 	return (value);
1823 }
1824 
1825 uint64_t
1826 i_ddi_caut_get64(ddi_acc_impl_t *hp, uint64_t *addr)
1827 {
1828 	uint64_t value;
1829 	i_ddi_caut_getput_ctlops(hp, (uint64_t)&value, (uint64_t)addr,
1830 	    sizeof (uint64_t), 1, 0, DDI_CTLOPS_PEEK);
1831 
1832 	return (value);
1833 }
1834 
1835 void
1836 i_ddi_caut_put8(ddi_acc_impl_t *hp, uint8_t *addr, uint8_t value)
1837 {
1838 	i_ddi_caut_getput_ctlops(hp, (uint64_t)&value, (uint64_t)addr,
1839 	    sizeof (uint8_t), 1, 0, DDI_CTLOPS_POKE);
1840 }
1841 
1842 void
1843 i_ddi_caut_put16(ddi_acc_impl_t *hp, uint16_t *addr, uint16_t value)
1844 {
1845 	i_ddi_caut_getput_ctlops(hp, (uint64_t)&value, (uint64_t)addr,
1846 	    sizeof (uint16_t), 1, 0, DDI_CTLOPS_POKE);
1847 }
1848 
1849 void
1850 i_ddi_caut_put32(ddi_acc_impl_t *hp, uint32_t *addr, uint32_t value)
1851 {
1852 	i_ddi_caut_getput_ctlops(hp, (uint64_t)&value, (uint64_t)addr,
1853 	    sizeof (uint32_t), 1, 0, DDI_CTLOPS_POKE);
1854 }
1855 
1856 void
1857 i_ddi_caut_put64(ddi_acc_impl_t *hp, uint64_t *addr, uint64_t value)
1858 {
1859 	i_ddi_caut_getput_ctlops(hp, (uint64_t)&value, (uint64_t)addr,
1860 	    sizeof (uint64_t), 1, 0, DDI_CTLOPS_POKE);
1861 }
1862 
1863 void
1864 i_ddi_caut_rep_get8(ddi_acc_impl_t *hp, uint8_t *host_addr, uint8_t *dev_addr,
1865 	size_t repcount, uint_t flags)
1866 {
1867 	i_ddi_caut_getput_ctlops(hp, (uint64_t)host_addr, (uint64_t)dev_addr,
1868 	    sizeof (uint8_t), repcount, flags, DDI_CTLOPS_PEEK);
1869 }
1870 
1871 void
1872 i_ddi_caut_rep_get16(ddi_acc_impl_t *hp, uint16_t *host_addr,
1873     uint16_t *dev_addr, size_t repcount, uint_t flags)
1874 {
1875 	i_ddi_caut_getput_ctlops(hp, (uint64_t)host_addr, (uint64_t)dev_addr,
1876 	    sizeof (uint16_t), repcount, flags, DDI_CTLOPS_PEEK);
1877 }
1878 
1879 void
1880 i_ddi_caut_rep_get32(ddi_acc_impl_t *hp, uint32_t *host_addr,
1881     uint32_t *dev_addr, size_t repcount, uint_t flags)
1882 {
1883 	i_ddi_caut_getput_ctlops(hp, (uint64_t)host_addr, (uint64_t)dev_addr,
1884 	    sizeof (uint32_t), repcount, flags, DDI_CTLOPS_PEEK);
1885 }
1886 
1887 void
1888 i_ddi_caut_rep_get64(ddi_acc_impl_t *hp, uint64_t *host_addr,
1889     uint64_t *dev_addr, size_t repcount, uint_t flags)
1890 {
1891 	i_ddi_caut_getput_ctlops(hp, (uint64_t)host_addr, (uint64_t)dev_addr,
1892 	    sizeof (uint64_t), repcount, flags, DDI_CTLOPS_PEEK);
1893 }
1894 
1895 void
1896 i_ddi_caut_rep_put8(ddi_acc_impl_t *hp, uint8_t *host_addr, uint8_t *dev_addr,
1897 	size_t repcount, uint_t flags)
1898 {
1899 	i_ddi_caut_getput_ctlops(hp, (uint64_t)host_addr, (uint64_t)dev_addr,
1900 	    sizeof (uint8_t), repcount, flags, DDI_CTLOPS_POKE);
1901 }
1902 
1903 void
1904 i_ddi_caut_rep_put16(ddi_acc_impl_t *hp, uint16_t *host_addr,
1905     uint16_t *dev_addr, size_t repcount, uint_t flags)
1906 {
1907 	i_ddi_caut_getput_ctlops(hp, (uint64_t)host_addr, (uint64_t)dev_addr,
1908 	    sizeof (uint16_t), repcount, flags, DDI_CTLOPS_POKE);
1909 }
1910 
1911 void
1912 i_ddi_caut_rep_put32(ddi_acc_impl_t *hp, uint32_t *host_addr,
1913     uint32_t *dev_addr, size_t repcount, uint_t flags)
1914 {
1915 	i_ddi_caut_getput_ctlops(hp, (uint64_t)host_addr, (uint64_t)dev_addr,
1916 	    sizeof (uint32_t), repcount, flags, DDI_CTLOPS_POKE);
1917 }
1918 
1919 void
1920 i_ddi_caut_rep_put64(ddi_acc_impl_t *hp, uint64_t *host_addr,
1921     uint64_t *dev_addr, size_t repcount, uint_t flags)
1922 {
1923 	i_ddi_caut_getput_ctlops(hp, (uint64_t)host_addr, (uint64_t)dev_addr,
1924 	    sizeof (uint64_t), repcount, flags, DDI_CTLOPS_POKE);
1925 }
1926 
1927 /*
1928  * This is called only to process peek/poke when the DIP is NULL.
1929  * Assume that this is for memory, as nexi take care of device safe accesses.
1930  */
1931 int
1932 peekpoke_mem(ddi_ctl_enum_t cmd, peekpoke_ctlops_t *in_args)
1933 {
1934 	int err = DDI_SUCCESS;
1935 	on_trap_data_t otd;
1936 
1937 	/* Set up protected environment. */
1938 	if (!on_trap(&otd, OT_DATA_ACCESS)) {
1939 		uintptr_t tramp = otd.ot_trampoline;
1940 
1941 		if (cmd == DDI_CTLOPS_POKE) {
1942 			otd.ot_trampoline = (uintptr_t)&poke_fault;
1943 			err = do_poke(in_args->size, (void *)in_args->dev_addr,
1944 			    (void *)in_args->host_addr);
1945 		} else {
1946 			otd.ot_trampoline = (uintptr_t)&peek_fault;
1947 			err = do_peek(in_args->size, (void *)in_args->dev_addr,
1948 			    (void *)in_args->host_addr);
1949 		}
1950 		otd.ot_trampoline = tramp;
1951 	} else
1952 		err = DDI_FAILURE;
1953 
1954 	/* Take down protected environment. */
1955 	no_trap();
1956 
1957 	return (err);
1958 }
1959 
1960 /*
1961  * Platform independent DR routines
1962  */
1963 
1964 static int
1965 ndi2errno(int n)
1966 {
1967 	int err = 0;
1968 
1969 	switch (n) {
1970 		case NDI_NOMEM:
1971 			err = ENOMEM;
1972 			break;
1973 		case NDI_BUSY:
1974 			err = EBUSY;
1975 			break;
1976 		case NDI_FAULT:
1977 			err = EFAULT;
1978 			break;
1979 		case NDI_FAILURE:
1980 			err = EIO;
1981 			break;
1982 		case NDI_SUCCESS:
1983 			break;
1984 		case NDI_BADHANDLE:
1985 		default:
1986 			err = EINVAL;
1987 			break;
1988 	}
1989 	return (err);
1990 }
1991 
1992 /*
1993  * Prom tree node list
1994  */
1995 struct ptnode {
1996 	pnode_t		nodeid;
1997 	struct ptnode	*next;
1998 };
1999 
2000 /*
2001  * Prom tree walk arg
2002  */
2003 struct pta {
2004 	dev_info_t	*pdip;
2005 	devi_branch_t	*bp;
2006 	uint_t		flags;
2007 	dev_info_t	*fdip;
2008 	struct ptnode	*head;
2009 };
2010 
2011 static void
2012 visit_node(pnode_t nodeid, struct pta *ap)
2013 {
2014 	struct ptnode	**nextp;
2015 	int		(*select)(pnode_t, void *, uint_t);
2016 
2017 	ASSERT(nodeid != OBP_NONODE && nodeid != OBP_BADNODE);
2018 
2019 	select = ap->bp->create.prom_branch_select;
2020 
2021 	ASSERT(select);
2022 
2023 	if (select(nodeid, ap->bp->arg, 0) == DDI_SUCCESS) {
2024 
2025 		for (nextp = &ap->head; *nextp; nextp = &(*nextp)->next)
2026 			;
2027 
2028 		*nextp = kmem_zalloc(sizeof (struct ptnode), KM_SLEEP);
2029 
2030 		(*nextp)->nodeid = nodeid;
2031 	}
2032 
2033 	if ((ap->flags & DEVI_BRANCH_CHILD) == DEVI_BRANCH_CHILD)
2034 		return;
2035 
2036 	nodeid = prom_childnode(nodeid);
2037 	while (nodeid != OBP_NONODE && nodeid != OBP_BADNODE) {
2038 		visit_node(nodeid, ap);
2039 		nodeid = prom_nextnode(nodeid);
2040 	}
2041 }
2042 
2043 /*
2044  * NOTE: The caller of this function must check for device contracts
2045  * or LDI callbacks against this dip before setting the dip offline.
2046  */
2047 static int
2048 set_infant_dip_offline(dev_info_t *dip, void *arg)
2049 {
2050 	char	*path = (char *)arg;
2051 
2052 	ASSERT(dip);
2053 	ASSERT(arg);
2054 
2055 	if (i_ddi_node_state(dip) >= DS_ATTACHED) {
2056 		(void) ddi_pathname(dip, path);
2057 		cmn_err(CE_WARN, "Attempt to set offline flag on attached "
2058 		    "node: %s", path);
2059 		return (DDI_FAILURE);
2060 	}
2061 
2062 	mutex_enter(&(DEVI(dip)->devi_lock));
2063 	if (!DEVI_IS_DEVICE_OFFLINE(dip))
2064 		DEVI_SET_DEVICE_OFFLINE(dip);
2065 	mutex_exit(&(DEVI(dip)->devi_lock));
2066 
2067 	return (DDI_SUCCESS);
2068 }
2069 
2070 typedef struct result {
2071 	char	*path;
2072 	int	result;
2073 } result_t;
2074 
2075 static int
2076 dip_set_offline(dev_info_t *dip, void *arg)
2077 {
2078 	int end;
2079 	result_t *resp = (result_t *)arg;
2080 
2081 	ASSERT(dip);
2082 	ASSERT(resp);
2083 
2084 	/*
2085 	 * We stop the walk if e_ddi_offline_notify() returns
2086 	 * failure, because this implies that one or more consumers
2087 	 * (either LDI or contract based) has blocked the offline.
2088 	 * So there is no point in conitnuing the walk
2089 	 */
2090 	if (e_ddi_offline_notify(dip) == DDI_FAILURE) {
2091 		resp->result = DDI_FAILURE;
2092 		return (DDI_WALK_TERMINATE);
2093 	}
2094 
2095 	/*
2096 	 * If set_infant_dip_offline() returns failure, it implies
2097 	 * that we failed to set a particular dip offline. This
2098 	 * does not imply that the offline as a whole should fail.
2099 	 * We want to do the best we can, so we continue the walk.
2100 	 */
2101 	if (set_infant_dip_offline(dip, resp->path) == DDI_SUCCESS)
2102 		end = DDI_SUCCESS;
2103 	else
2104 		end = DDI_FAILURE;
2105 
2106 	e_ddi_offline_finalize(dip, end);
2107 
2108 	return (DDI_WALK_CONTINUE);
2109 }
2110 
2111 /*
2112  * The call to e_ddi_offline_notify() exists for the
2113  * unlikely error case that a branch we are trying to
2114  * create already exists and has device contracts or LDI
2115  * event callbacks against it.
2116  *
2117  * We allow create to succeed for such branches only if
2118  * no constraints block the offline.
2119  */
2120 static int
2121 branch_set_offline(dev_info_t *dip, char *path)
2122 {
2123 	int		circ;
2124 	int		end;
2125 	result_t	res;
2126 
2127 
2128 	if (e_ddi_offline_notify(dip) == DDI_FAILURE) {
2129 		return (DDI_FAILURE);
2130 	}
2131 
2132 	if (set_infant_dip_offline(dip, path) == DDI_SUCCESS)
2133 		end = DDI_SUCCESS;
2134 	else
2135 		end = DDI_FAILURE;
2136 
2137 	e_ddi_offline_finalize(dip, end);
2138 
2139 	if (end == DDI_FAILURE)
2140 		return (DDI_FAILURE);
2141 
2142 	res.result = DDI_SUCCESS;
2143 	res.path = path;
2144 
2145 	ndi_devi_enter(dip, &circ);
2146 	ddi_walk_devs(ddi_get_child(dip), dip_set_offline, &res);
2147 	ndi_devi_exit(dip, circ);
2148 
2149 	return (res.result);
2150 }
2151 
2152 /*ARGSUSED*/
2153 static int
2154 create_prom_branch(void *arg, int has_changed)
2155 {
2156 	int		circ;
2157 	int		exists, rv;
2158 	pnode_t		nodeid;
2159 	struct ptnode	*tnp;
2160 	dev_info_t	*dip;
2161 	struct pta	*ap = arg;
2162 	devi_branch_t	*bp;
2163 	char		*path;
2164 
2165 	ASSERT(ap);
2166 	ASSERT(ap->fdip == NULL);
2167 	ASSERT(ap->pdip && ndi_dev_is_prom_node(ap->pdip));
2168 
2169 	bp = ap->bp;
2170 
2171 	nodeid = ddi_get_nodeid(ap->pdip);
2172 	if (nodeid == OBP_NONODE || nodeid == OBP_BADNODE) {
2173 		cmn_err(CE_WARN, "create_prom_branch: invalid "
2174 		    "nodeid: 0x%x", nodeid);
2175 		return (EINVAL);
2176 	}
2177 
2178 	ap->head = NULL;
2179 
2180 	nodeid = prom_childnode(nodeid);
2181 	while (nodeid != OBP_NONODE && nodeid != OBP_BADNODE) {
2182 		visit_node(nodeid, ap);
2183 		nodeid = prom_nextnode(nodeid);
2184 	}
2185 
2186 	if (ap->head == NULL)
2187 		return (ENODEV);
2188 
2189 	path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2190 	rv = 0;
2191 	while ((tnp = ap->head) != NULL) {
2192 		ap->head = tnp->next;
2193 
2194 		ndi_devi_enter(ap->pdip, &circ);
2195 
2196 		/*
2197 		 * Check if the branch already exists.
2198 		 */
2199 		exists = 0;
2200 		dip = e_ddi_nodeid_to_dip(tnp->nodeid);
2201 		if (dip != NULL) {
2202 			exists = 1;
2203 
2204 			/* Parent is held busy, so release hold */
2205 			ndi_rele_devi(dip);
2206 #ifdef	DEBUG
2207 			cmn_err(CE_WARN, "create_prom_branch: dip(%p) exists"
2208 			    " for nodeid 0x%x", (void *)dip, tnp->nodeid);
2209 #endif
2210 		} else {
2211 			dip = i_ddi_create_branch(ap->pdip, tnp->nodeid);
2212 		}
2213 
2214 		kmem_free(tnp, sizeof (struct ptnode));
2215 
2216 		/*
2217 		 * Hold the branch if it is not already held
2218 		 */
2219 		if (dip && !exists) {
2220 			e_ddi_branch_hold(dip);
2221 		}
2222 
2223 		ASSERT(dip == NULL || e_ddi_branch_held(dip));
2224 
2225 		/*
2226 		 * Set all dips in the newly created branch offline so that
2227 		 * only a "configure" operation can attach
2228 		 * the branch
2229 		 */
2230 		if (dip == NULL || branch_set_offline(dip, path)
2231 		    == DDI_FAILURE) {
2232 			ndi_devi_exit(ap->pdip, circ);
2233 			rv = EIO;
2234 			continue;
2235 		}
2236 
2237 		ASSERT(ddi_get_parent(dip) == ap->pdip);
2238 
2239 		ndi_devi_exit(ap->pdip, circ);
2240 
2241 		if (ap->flags & DEVI_BRANCH_CONFIGURE) {
2242 			int error = e_ddi_branch_configure(dip, &ap->fdip, 0);
2243 			if (error && rv == 0)
2244 				rv = error;
2245 		}
2246 
2247 		/*
2248 		 * Invoke devi_branch_callback() (if it exists) only for
2249 		 * newly created branches
2250 		 */
2251 		if (bp->devi_branch_callback && !exists)
2252 			bp->devi_branch_callback(dip, bp->arg, 0);
2253 	}
2254 
2255 	kmem_free(path, MAXPATHLEN);
2256 
2257 	return (rv);
2258 }
2259 
2260 static int
2261 sid_node_create(dev_info_t *pdip, devi_branch_t *bp, dev_info_t **rdipp)
2262 {
2263 	int			rv, circ, len;
2264 	int			i, flags, ret;
2265 	dev_info_t		*dip;
2266 	char			*nbuf;
2267 	char			*path;
2268 	static const char	*noname = "<none>";
2269 
2270 	ASSERT(pdip);
2271 	ASSERT(DEVI_BUSY_OWNED(pdip));
2272 
2273 	flags = 0;
2274 
2275 	/*
2276 	 * Creating the root of a branch ?
2277 	 */
2278 	if (rdipp) {
2279 		*rdipp = NULL;
2280 		flags = DEVI_BRANCH_ROOT;
2281 	}
2282 
2283 	ndi_devi_alloc_sleep(pdip, (char *)noname, DEVI_SID_NODEID, &dip);
2284 	rv = bp->create.sid_branch_create(dip, bp->arg, flags);
2285 
2286 	nbuf = kmem_alloc(OBP_MAXDRVNAME, KM_SLEEP);
2287 
2288 	if (rv == DDI_WALK_ERROR) {
2289 		cmn_err(CE_WARN, "e_ddi_branch_create: Error setting"
2290 		    " properties on devinfo node %p",  (void *)dip);
2291 		goto fail;
2292 	}
2293 
2294 	len = OBP_MAXDRVNAME;
2295 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
2296 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "name", nbuf, &len)
2297 	    != DDI_PROP_SUCCESS) {
2298 		cmn_err(CE_WARN, "e_ddi_branch_create: devinfo node %p has"
2299 		    "no name property", (void *)dip);
2300 		goto fail;
2301 	}
2302 
2303 	ASSERT(i_ddi_node_state(dip) == DS_PROTO);
2304 	if (ndi_devi_set_nodename(dip, nbuf, 0) != NDI_SUCCESS) {
2305 		cmn_err(CE_WARN, "e_ddi_branch_create: cannot set name (%s)"
2306 		    " for devinfo node %p", nbuf, (void *)dip);
2307 		goto fail;
2308 	}
2309 
2310 	kmem_free(nbuf, OBP_MAXDRVNAME);
2311 
2312 	/*
2313 	 * Ignore bind failures just like boot does
2314 	 */
2315 	(void) ndi_devi_bind_driver(dip, 0);
2316 
2317 	switch (rv) {
2318 	case DDI_WALK_CONTINUE:
2319 	case DDI_WALK_PRUNESIB:
2320 		ndi_devi_enter(dip, &circ);
2321 
2322 		i = DDI_WALK_CONTINUE;
2323 		for (; i == DDI_WALK_CONTINUE; ) {
2324 			i = sid_node_create(dip, bp, NULL);
2325 		}
2326 
2327 		ASSERT(i == DDI_WALK_ERROR || i == DDI_WALK_PRUNESIB);
2328 		if (i == DDI_WALK_ERROR)
2329 			rv = i;
2330 		/*
2331 		 * If PRUNESIB stop creating siblings
2332 		 * of dip's child. Subsequent walk behavior
2333 		 * is determined by rv returned by dip.
2334 		 */
2335 
2336 		ndi_devi_exit(dip, circ);
2337 		break;
2338 	case DDI_WALK_TERMINATE:
2339 		/*
2340 		 * Don't create children and ask our parent
2341 		 * to not create siblings either.
2342 		 */
2343 		rv = DDI_WALK_PRUNESIB;
2344 		break;
2345 	case DDI_WALK_PRUNECHILD:
2346 		/*
2347 		 * Don't create children, but ask parent to continue
2348 		 * with siblings.
2349 		 */
2350 		rv = DDI_WALK_CONTINUE;
2351 		break;
2352 	default:
2353 		ASSERT(0);
2354 		break;
2355 	}
2356 
2357 	if (rdipp)
2358 		*rdipp = dip;
2359 
2360 	/*
2361 	 * Set device offline - only the "configure" op should cause an attach.
2362 	 * Note that it is safe to set the dip offline without checking
2363 	 * for either device contract or layered driver (LDI) based constraints
2364 	 * since there cannot be any contracts or LDI opens of this device.
2365 	 * This is because this node is a newly created dip with the parent busy
2366 	 * held, so no other thread can come in and attach this dip. A dip that
2367 	 * has never been attached cannot have contracts since by definition
2368 	 * a device contract (an agreement between a process and a device minor
2369 	 * node) can only be created against a device that has minor nodes
2370 	 * i.e is attached. Similarly an LDI open will only succeed if the
2371 	 * dip is attached. We assert below that the dip is not attached.
2372 	 */
2373 	ASSERT(i_ddi_node_state(dip) < DS_ATTACHED);
2374 	path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2375 	ret = set_infant_dip_offline(dip, path);
2376 	ASSERT(ret == DDI_SUCCESS);
2377 	kmem_free(path, MAXPATHLEN);
2378 
2379 	return (rv);
2380 fail:
2381 	(void) ndi_devi_free(dip);
2382 	kmem_free(nbuf, OBP_MAXDRVNAME);
2383 	return (DDI_WALK_ERROR);
2384 }
2385 
2386 static int
2387 create_sid_branch(
2388 	dev_info_t	*pdip,
2389 	devi_branch_t	*bp,
2390 	dev_info_t	**dipp,
2391 	uint_t		flags)
2392 {
2393 	int		rv = 0, state = DDI_WALK_CONTINUE;
2394 	dev_info_t	*rdip;
2395 
2396 	while (state == DDI_WALK_CONTINUE) {
2397 		int	circ;
2398 
2399 		ndi_devi_enter(pdip, &circ);
2400 
2401 		state = sid_node_create(pdip, bp, &rdip);
2402 		if (rdip == NULL) {
2403 			ndi_devi_exit(pdip, circ);
2404 			ASSERT(state == DDI_WALK_ERROR);
2405 			break;
2406 		}
2407 
2408 		e_ddi_branch_hold(rdip);
2409 
2410 		ndi_devi_exit(pdip, circ);
2411 
2412 		if (flags & DEVI_BRANCH_CONFIGURE) {
2413 			int error = e_ddi_branch_configure(rdip, dipp, 0);
2414 			if (error && rv == 0)
2415 				rv = error;
2416 		}
2417 
2418 		/*
2419 		 * devi_branch_callback() is optional
2420 		 */
2421 		if (bp->devi_branch_callback)
2422 			bp->devi_branch_callback(rdip, bp->arg, 0);
2423 	}
2424 
2425 	ASSERT(state == DDI_WALK_ERROR || state == DDI_WALK_PRUNESIB);
2426 
2427 	return (state == DDI_WALK_ERROR ? EIO : rv);
2428 }
2429 
2430 int
2431 e_ddi_branch_create(
2432 	dev_info_t	*pdip,
2433 	devi_branch_t	*bp,
2434 	dev_info_t	**dipp,
2435 	uint_t		flags)
2436 {
2437 	int prom_devi, sid_devi, error;
2438 
2439 	if (pdip == NULL || bp == NULL || bp->type == 0)
2440 		return (EINVAL);
2441 
2442 	prom_devi = (bp->type == DEVI_BRANCH_PROM) ? 1 : 0;
2443 	sid_devi = (bp->type == DEVI_BRANCH_SID) ? 1 : 0;
2444 
2445 	if (prom_devi && bp->create.prom_branch_select == NULL)
2446 		return (EINVAL);
2447 	else if (sid_devi && bp->create.sid_branch_create == NULL)
2448 		return (EINVAL);
2449 	else if (!prom_devi && !sid_devi)
2450 		return (EINVAL);
2451 
2452 	if (flags & DEVI_BRANCH_EVENT)
2453 		return (EINVAL);
2454 
2455 	if (prom_devi) {
2456 		struct pta pta = {0};
2457 
2458 		pta.pdip = pdip;
2459 		pta.bp = bp;
2460 		pta.flags = flags;
2461 
2462 		error = prom_tree_access(create_prom_branch, &pta, NULL);
2463 
2464 		if (dipp)
2465 			*dipp = pta.fdip;
2466 		else if (pta.fdip)
2467 			ndi_rele_devi(pta.fdip);
2468 	} else {
2469 		error = create_sid_branch(pdip, bp, dipp, flags);
2470 	}
2471 
2472 	return (error);
2473 }
2474 
2475 int
2476 e_ddi_branch_configure(dev_info_t *rdip, dev_info_t **dipp, uint_t flags)
2477 {
2478 	int		circ, rv;
2479 	char		*devnm;
2480 	dev_info_t	*pdip;
2481 
2482 	if (dipp)
2483 		*dipp = NULL;
2484 
2485 	if (rdip == NULL || flags != 0 || (flags & DEVI_BRANCH_EVENT))
2486 		return (EINVAL);
2487 
2488 	pdip = ddi_get_parent(rdip);
2489 
2490 	ndi_devi_enter(pdip, &circ);
2491 
2492 	if (!e_ddi_branch_held(rdip)) {
2493 		ndi_devi_exit(pdip, circ);
2494 		cmn_err(CE_WARN, "e_ddi_branch_configure: "
2495 		    "dip(%p) not held", (void *)rdip);
2496 		return (EINVAL);
2497 	}
2498 
2499 	if (i_ddi_node_state(rdip) < DS_INITIALIZED) {
2500 		/*
2501 		 * First attempt to bind a driver. If we fail, return
2502 		 * success (On some platforms, dips for some device
2503 		 * types (CPUs) may not have a driver)
2504 		 */
2505 		if (ndi_devi_bind_driver(rdip, 0) != NDI_SUCCESS) {
2506 			ndi_devi_exit(pdip, circ);
2507 			return (0);
2508 		}
2509 
2510 		if (ddi_initchild(pdip, rdip) != DDI_SUCCESS) {
2511 			rv = NDI_FAILURE;
2512 			goto out;
2513 		}
2514 	}
2515 
2516 	ASSERT(i_ddi_node_state(rdip) >= DS_INITIALIZED);
2517 
2518 	devnm = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
2519 
2520 	(void) ddi_deviname(rdip, devnm);
2521 
2522 	if ((rv = ndi_devi_config_one(pdip, devnm+1, &rdip,
2523 	    NDI_DEVI_ONLINE | NDI_CONFIG)) == NDI_SUCCESS) {
2524 		/* release hold from ndi_devi_config_one() */
2525 		ndi_rele_devi(rdip);
2526 	}
2527 
2528 	kmem_free(devnm, MAXNAMELEN + 1);
2529 out:
2530 	if (rv != NDI_SUCCESS && dipp) {
2531 		ndi_hold_devi(rdip);
2532 		*dipp = rdip;
2533 	}
2534 	ndi_devi_exit(pdip, circ);
2535 	return (ndi2errno(rv));
2536 }
2537 
2538 void
2539 e_ddi_branch_hold(dev_info_t *rdip)
2540 {
2541 	if (e_ddi_branch_held(rdip)) {
2542 		cmn_err(CE_WARN, "e_ddi_branch_hold: branch already held");
2543 		return;
2544 	}
2545 
2546 	mutex_enter(&DEVI(rdip)->devi_lock);
2547 	if ((DEVI(rdip)->devi_flags & DEVI_BRANCH_HELD) == 0) {
2548 		DEVI(rdip)->devi_flags |= DEVI_BRANCH_HELD;
2549 		DEVI(rdip)->devi_ref++;
2550 	}
2551 	ASSERT(DEVI(rdip)->devi_ref > 0);
2552 	mutex_exit(&DEVI(rdip)->devi_lock);
2553 }
2554 
2555 int
2556 e_ddi_branch_held(dev_info_t *rdip)
2557 {
2558 	int rv = 0;
2559 
2560 	mutex_enter(&DEVI(rdip)->devi_lock);
2561 	if ((DEVI(rdip)->devi_flags & DEVI_BRANCH_HELD) &&
2562 	    DEVI(rdip)->devi_ref > 0) {
2563 		rv = 1;
2564 	}
2565 	mutex_exit(&DEVI(rdip)->devi_lock);
2566 
2567 	return (rv);
2568 }
2569 void
2570 e_ddi_branch_rele(dev_info_t *rdip)
2571 {
2572 	mutex_enter(&DEVI(rdip)->devi_lock);
2573 	DEVI(rdip)->devi_flags &= ~DEVI_BRANCH_HELD;
2574 	DEVI(rdip)->devi_ref--;
2575 	mutex_exit(&DEVI(rdip)->devi_lock);
2576 }
2577 
2578 int
2579 e_ddi_branch_unconfigure(
2580 	dev_info_t *rdip,
2581 	dev_info_t **dipp,
2582 	uint_t flags)
2583 {
2584 	int	circ, rv;
2585 	int	destroy;
2586 	char	*devnm;
2587 	uint_t	nflags;
2588 	dev_info_t *pdip;
2589 
2590 	if (dipp)
2591 		*dipp = NULL;
2592 
2593 	if (rdip == NULL)
2594 		return (EINVAL);
2595 
2596 	pdip = ddi_get_parent(rdip);
2597 
2598 	ASSERT(pdip);
2599 
2600 	/*
2601 	 * Check if caller holds pdip busy - can cause deadlocks during
2602 	 * devfs_clean()
2603 	 */
2604 	if (DEVI_BUSY_OWNED(pdip)) {
2605 		cmn_err(CE_WARN, "e_ddi_branch_unconfigure: failed: parent"
2606 		    " devinfo node(%p) is busy held", (void *)pdip);
2607 		return (EINVAL);
2608 	}
2609 
2610 	destroy = (flags & DEVI_BRANCH_DESTROY) ? 1 : 0;
2611 
2612 	devnm = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
2613 
2614 	ndi_devi_enter(pdip, &circ);
2615 	(void) ddi_deviname(rdip, devnm);
2616 	ndi_devi_exit(pdip, circ);
2617 
2618 	/*
2619 	 * ddi_deviname() returns a component name with / prepended.
2620 	 */
2621 	(void) devfs_clean(pdip, devnm + 1, DV_CLEAN_FORCE);
2622 
2623 	ndi_devi_enter(pdip, &circ);
2624 
2625 	/*
2626 	 * Recreate device name as it may have changed state (init/uninit)
2627 	 * when parent busy lock was dropped for devfs_clean()
2628 	 */
2629 	(void) ddi_deviname(rdip, devnm);
2630 
2631 	if (!e_ddi_branch_held(rdip)) {
2632 		kmem_free(devnm, MAXNAMELEN + 1);
2633 		ndi_devi_exit(pdip, circ);
2634 		cmn_err(CE_WARN, "e_ddi_%s_branch: dip(%p) not held",
2635 		    destroy ? "destroy" : "unconfigure", (void *)rdip);
2636 		return (EINVAL);
2637 	}
2638 
2639 	/*
2640 	 * Release hold on the branch. This is ok since we are holding the
2641 	 * parent busy. If rdip is not removed, we must do a hold on the
2642 	 * branch before returning.
2643 	 */
2644 	e_ddi_branch_rele(rdip);
2645 
2646 	nflags = NDI_DEVI_OFFLINE;
2647 	if (destroy || (flags & DEVI_BRANCH_DESTROY)) {
2648 		nflags |= NDI_DEVI_REMOVE;
2649 		destroy = 1;
2650 	} else {
2651 		nflags |= NDI_UNCONFIG;		/* uninit but don't remove */
2652 	}
2653 
2654 	if (flags & DEVI_BRANCH_EVENT)
2655 		nflags |= NDI_POST_EVENT;
2656 
2657 	if (i_ddi_devi_attached(pdip) &&
2658 	    (i_ddi_node_state(rdip) >= DS_INITIALIZED)) {
2659 		rv = ndi_devi_unconfig_one(pdip, devnm+1, dipp, nflags);
2660 	} else {
2661 		rv = e_ddi_devi_unconfig(rdip, dipp, nflags);
2662 		if (rv == NDI_SUCCESS) {
2663 			ASSERT(!destroy || ddi_get_child(rdip) == NULL);
2664 			rv = ndi_devi_offline(rdip, nflags);
2665 		}
2666 	}
2667 
2668 	if (!destroy || rv != NDI_SUCCESS) {
2669 		/* The dip still exists, so do a hold */
2670 		e_ddi_branch_hold(rdip);
2671 	}
2672 out:
2673 	kmem_free(devnm, MAXNAMELEN + 1);
2674 	ndi_devi_exit(pdip, circ);
2675 	return (ndi2errno(rv));
2676 }
2677 
2678 int
2679 e_ddi_branch_destroy(dev_info_t *rdip, dev_info_t **dipp, uint_t flag)
2680 {
2681 	return (e_ddi_branch_unconfigure(rdip, dipp,
2682 	    flag|DEVI_BRANCH_DESTROY));
2683 }
2684 
2685 /*
2686  * Number of chains for hash table
2687  */
2688 #define	NUMCHAINS	17
2689 
2690 /*
2691  * Devinfo busy arg
2692  */
2693 struct devi_busy {
2694 	int dv_total;
2695 	int s_total;
2696 	mod_hash_t *dv_hash;
2697 	mod_hash_t *s_hash;
2698 	int (*callback)(dev_info_t *, void *, uint_t);
2699 	void *arg;
2700 };
2701 
2702 static int
2703 visit_dip(dev_info_t *dip, void *arg)
2704 {
2705 	uintptr_t sbusy, dvbusy, ref;
2706 	struct devi_busy *bsp = arg;
2707 
2708 	ASSERT(bsp->callback);
2709 
2710 	/*
2711 	 * A dip cannot be busy if its reference count is 0
2712 	 */
2713 	if ((ref = e_ddi_devi_holdcnt(dip)) == 0) {
2714 		return (bsp->callback(dip, bsp->arg, 0));
2715 	}
2716 
2717 	if (mod_hash_find(bsp->dv_hash, dip, (mod_hash_val_t *)&dvbusy))
2718 		dvbusy = 0;
2719 
2720 	/*
2721 	 * To catch device opens currently maintained on specfs common snodes.
2722 	 */
2723 	if (mod_hash_find(bsp->s_hash, dip, (mod_hash_val_t *)&sbusy))
2724 		sbusy = 0;
2725 
2726 #ifdef	DEBUG
2727 	if (ref < sbusy || ref < dvbusy) {
2728 		cmn_err(CE_WARN, "dip(%p): sopen = %lu, dvopen = %lu "
2729 		    "dip ref = %lu\n", (void *)dip, sbusy, dvbusy, ref);
2730 	}
2731 #endif
2732 
2733 	dvbusy = (sbusy > dvbusy) ? sbusy : dvbusy;
2734 
2735 	return (bsp->callback(dip, bsp->arg, dvbusy));
2736 }
2737 
2738 static int
2739 visit_snode(struct snode *sp, void *arg)
2740 {
2741 	uintptr_t sbusy;
2742 	dev_info_t *dip;
2743 	int count;
2744 	struct devi_busy *bsp = arg;
2745 
2746 	ASSERT(sp);
2747 
2748 	/*
2749 	 * The stable lock is held. This prevents
2750 	 * the snode and its associated dip from
2751 	 * going away.
2752 	 */
2753 	dip = NULL;
2754 	count = spec_devi_open_count(sp, &dip);
2755 
2756 	if (count <= 0)
2757 		return (DDI_WALK_CONTINUE);
2758 
2759 	ASSERT(dip);
2760 
2761 	if (mod_hash_remove(bsp->s_hash, dip, (mod_hash_val_t *)&sbusy))
2762 		sbusy = count;
2763 	else
2764 		sbusy += count;
2765 
2766 	if (mod_hash_insert(bsp->s_hash, dip, (mod_hash_val_t)sbusy)) {
2767 		cmn_err(CE_WARN, "%s: s_hash insert failed: dip=0x%p, "
2768 		    "sbusy = %lu", "e_ddi_branch_referenced",
2769 		    (void *)dip, sbusy);
2770 	}
2771 
2772 	bsp->s_total += count;
2773 
2774 	return (DDI_WALK_CONTINUE);
2775 }
2776 
2777 static void
2778 visit_dvnode(struct dv_node *dv, void *arg)
2779 {
2780 	uintptr_t dvbusy;
2781 	uint_t count;
2782 	struct vnode *vp;
2783 	struct devi_busy *bsp = arg;
2784 
2785 	ASSERT(dv && dv->dv_devi);
2786 
2787 	vp = DVTOV(dv);
2788 
2789 	mutex_enter(&vp->v_lock);
2790 	count = vp->v_count;
2791 	mutex_exit(&vp->v_lock);
2792 
2793 	if (!count)
2794 		return;
2795 
2796 	if (mod_hash_remove(bsp->dv_hash, dv->dv_devi,
2797 	    (mod_hash_val_t *)&dvbusy))
2798 		dvbusy = count;
2799 	else
2800 		dvbusy += count;
2801 
2802 	if (mod_hash_insert(bsp->dv_hash, dv->dv_devi,
2803 	    (mod_hash_val_t)dvbusy)) {
2804 		cmn_err(CE_WARN, "%s: dv_hash insert failed: dip=0x%p, "
2805 		    "dvbusy=%lu", "e_ddi_branch_referenced",
2806 		    (void *)dv->dv_devi, dvbusy);
2807 	}
2808 
2809 	bsp->dv_total += count;
2810 }
2811 
2812 /*
2813  * Returns reference count on success or -1 on failure.
2814  */
2815 int
2816 e_ddi_branch_referenced(
2817 	dev_info_t *rdip,
2818 	int (*callback)(dev_info_t *dip, void *arg, uint_t ref),
2819 	void *arg)
2820 {
2821 	int circ;
2822 	char *path;
2823 	dev_info_t *pdip;
2824 	struct devi_busy bsa = {0};
2825 
2826 	ASSERT(rdip);
2827 
2828 	path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2829 
2830 	ndi_hold_devi(rdip);
2831 
2832 	pdip = ddi_get_parent(rdip);
2833 
2834 	ASSERT(pdip);
2835 
2836 	/*
2837 	 * Check if caller holds pdip busy - can cause deadlocks during
2838 	 * devfs_walk()
2839 	 */
2840 	if (!e_ddi_branch_held(rdip) || DEVI_BUSY_OWNED(pdip)) {
2841 		cmn_err(CE_WARN, "e_ddi_branch_referenced: failed: "
2842 		    "devinfo branch(%p) not held or parent busy held",
2843 		    (void *)rdip);
2844 		ndi_rele_devi(rdip);
2845 		kmem_free(path, MAXPATHLEN);
2846 		return (-1);
2847 	}
2848 
2849 	ndi_devi_enter(pdip, &circ);
2850 	(void) ddi_pathname(rdip, path);
2851 	ndi_devi_exit(pdip, circ);
2852 
2853 	bsa.dv_hash = mod_hash_create_ptrhash("dv_node busy hash", NUMCHAINS,
2854 	    mod_hash_null_valdtor, sizeof (struct dev_info));
2855 
2856 	bsa.s_hash = mod_hash_create_ptrhash("snode busy hash", NUMCHAINS,
2857 	    mod_hash_null_valdtor, sizeof (struct snode));
2858 
2859 	if (devfs_walk(path, visit_dvnode, &bsa)) {
2860 		cmn_err(CE_WARN, "e_ddi_branch_referenced: "
2861 		    "devfs walk failed for: %s", path);
2862 		kmem_free(path, MAXPATHLEN);
2863 		bsa.s_total = bsa.dv_total = -1;
2864 		goto out;
2865 	}
2866 
2867 	kmem_free(path, MAXPATHLEN);
2868 
2869 	/*
2870 	 * Walk the snode table to detect device opens, which are currently
2871 	 * maintained on specfs common snodes.
2872 	 */
2873 	spec_snode_walk(visit_snode, &bsa);
2874 
2875 	if (callback == NULL)
2876 		goto out;
2877 
2878 	bsa.callback = callback;
2879 	bsa.arg = arg;
2880 
2881 	if (visit_dip(rdip, &bsa) == DDI_WALK_CONTINUE) {
2882 		ndi_devi_enter(rdip, &circ);
2883 		ddi_walk_devs(ddi_get_child(rdip), visit_dip, &bsa);
2884 		ndi_devi_exit(rdip, circ);
2885 	}
2886 
2887 out:
2888 	ndi_rele_devi(rdip);
2889 	mod_hash_destroy_ptrhash(bsa.s_hash);
2890 	mod_hash_destroy_ptrhash(bsa.dv_hash);
2891 	return (bsa.s_total > bsa.dv_total ? bsa.s_total : bsa.dv_total);
2892 }
2893