xref: /titanic_52/usr/src/uts/sun4u/io/opl_cfg.c (revision bea83d026ee1bd1b2a2419e1d0232f107a5d7d9b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/conf.h>
29 #include <sys/kmem.h>
30 #include <sys/debug.h>
31 #include <sys/modctl.h>
32 #include <sys/autoconf.h>
33 #include <sys/hwconf.h>
34 #include <sys/ddi_impldefs.h>
35 #include <sys/ddi.h>
36 #include <sys/sunddi.h>
37 #include <sys/sunndi.h>
38 #include <sys/ndi_impldefs.h>
39 #include <sys/machsystm.h>
40 #include <sys/fcode.h>
41 #include <sys/promif.h>
42 #include <sys/promimpl.h>
43 #include <sys/opl_cfg.h>
44 #include <sys/scfd/scfostoescf.h>
45 
46 static unsigned int		opl_cfg_inited;
47 static opl_board_cfg_t		opl_boards[HWD_SBS_PER_DOMAIN];
48 
49 /*
50  * Module control operations
51  */
52 
53 extern struct mod_ops mod_miscops;
54 
55 static struct modlmisc modlmisc = {
56 	&mod_miscops,				/* Type of module */
57 	"OPL opl_cfg %I%"
58 };
59 
60 static struct modlinkage modlinkage = {
61 	MODREV_1, (void *)&modlmisc, NULL
62 };
63 
64 static int	opl_map_in(dev_info_t *, fco_handle_t, fc_ci_t *);
65 static int	opl_map_out(dev_info_t *, fco_handle_t, fc_ci_t *);
66 static int	opl_register_fetch(dev_info_t *, fco_handle_t, fc_ci_t *);
67 static int	opl_register_store(dev_info_t *, fco_handle_t, fc_ci_t *);
68 
69 static int	opl_claim_memory(dev_info_t *, fco_handle_t, fc_ci_t *);
70 static int	opl_release_memory(dev_info_t *, fco_handle_t, fc_ci_t *);
71 static int	opl_vtop(dev_info_t *, fco_handle_t, fc_ci_t *);
72 
73 static int	opl_config_child(dev_info_t *, fco_handle_t, fc_ci_t *);
74 
75 static int	opl_get_fcode_size(dev_info_t *, fco_handle_t, fc_ci_t *);
76 static int	opl_get_fcode(dev_info_t *, fco_handle_t, fc_ci_t *);
77 
78 static int	opl_map_phys(dev_info_t *, struct regspec *,  caddr_t *,
79 				ddi_device_acc_attr_t *, ddi_acc_handle_t *);
80 static void	opl_unmap_phys(ddi_acc_handle_t *);
81 static int	opl_get_hwd_va(dev_info_t *, fco_handle_t, fc_ci_t *);
82 static int	opl_master_interrupt(dev_info_t *, fco_handle_t, fc_ci_t *);
83 
84 extern int	prom_get_fcode_size(char *);
85 extern int	prom_get_fcode(char *, char *);
86 
87 static int	master_interrupt_init(uint32_t, uint32_t);
88 
89 #define	PROBE_STR_SIZE	64
90 #define	UNIT_ADDR_SIZE	64
91 
92 opl_fc_ops_t	opl_fc_ops[] = {
93 
94 	{	FC_MAP_IN,		opl_map_in},
95 	{	FC_MAP_OUT,		opl_map_out},
96 	{	"rx@",			opl_register_fetch},
97 	{	FC_RL_FETCH,		opl_register_fetch},
98 	{	FC_RW_FETCH,		opl_register_fetch},
99 	{	FC_RB_FETCH,		opl_register_fetch},
100 	{	"rx!",			opl_register_store},
101 	{	FC_RL_STORE,		opl_register_store},
102 	{	FC_RW_STORE,		opl_register_store},
103 	{	FC_RB_STORE,		opl_register_store},
104 	{	"claim-memory",		opl_claim_memory},
105 	{	"release-memory",	opl_release_memory},
106 	{	"vtop",			opl_vtop},
107 	{	FC_CONFIG_CHILD,	opl_config_child},
108 	{	FC_GET_FCODE_SIZE,	opl_get_fcode_size},
109 	{	FC_GET_FCODE,		opl_get_fcode},
110 	{	"get-hwd-va",		opl_get_hwd_va},
111 	{	"master-interrupt",	opl_master_interrupt},
112 	{	NULL,			NULL}
113 
114 };
115 
116 extern caddr_t	efcode_vaddr;
117 extern int	efcode_size;
118 
119 #ifdef DEBUG
120 #define	HWDDUMP_OFFSETS		1
121 #define	HWDDUMP_ALL_STATUS	2
122 #define	HWDDUMP_CHUNKS		3
123 #define	HWDDUMP_SBP		4
124 
125 int		hwddump_flags = HWDDUMP_SBP | HWDDUMP_CHUNKS;
126 #endif
127 
128 static int	master_interrupt_inited = 0;
129 
130 int
131 _init()
132 {
133 	int	err = 0;
134 
135 	/*
136 	 * Create a resource map for the contiguous memory allocated
137 	 * at start-of-day in startup.c
138 	 */
139 	err = ndi_ra_map_setup(ddi_root_node(), "opl-fcodemem");
140 	if (err == NDI_FAILURE) {
141 		cmn_err(CE_WARN, "Cannot setup resource map opl-fcodemem\n");
142 		return (1);
143 	}
144 
145 	/*
146 	 * Put the allocated memory into the pool.
147 	 */
148 	(void) ndi_ra_free(ddi_root_node(), (uint64_t)efcode_vaddr,
149 	    (uint64_t)efcode_size, "opl-fcodemem", 0);
150 
151 	if ((err = mod_install(&modlinkage)) != 0) {
152 		cmn_err(CE_WARN, "opl_cfg failed to load, error=%d", err);
153 		(void) ndi_ra_map_destroy(ddi_root_node(), "opl-fcodemem");
154 	}
155 
156 	return (err);
157 }
158 
159 int
160 _fini(void)
161 {
162 	int ret;
163 
164 	ret = (mod_remove(&modlinkage));
165 	if (ret != 0)
166 		return (ret);
167 
168 	(void) ndi_ra_map_destroy(ddi_root_node(), "opl-fcodemem");
169 
170 	return (ret);
171 }
172 
173 int
174 _info(modinfop)
175 struct modinfo *modinfop;
176 {
177 	return (mod_info(&modlinkage, modinfop));
178 }
179 
180 #ifdef DEBUG
181 static void
182 opl_dump_hwd(opl_probe_t *probe)
183 {
184 	hwd_header_t		*hdrp;
185 	hwd_sb_status_t		*statp;
186 	hwd_domain_info_t	*dinfop;
187 	hwd_sb_t		*sbp;
188 	hwd_cpu_chip_t		*chips;
189 	hwd_pci_ch_t		*channels;
190 	int			board, i, status;
191 
192 	board = probe->pr_board;
193 
194 	hdrp = probe->pr_hdr;
195 	statp = probe->pr_sb_status;
196 	dinfop = probe->pr_dinfo;
197 	sbp = probe->pr_sb;
198 
199 	printf("HWD: board %d\n", board);
200 	printf("HWD:magic = 0x%x\n", hdrp->hdr_magic);
201 	printf("HWD:version = 0x%x.%x\n", hdrp->hdr_version.major,
202 	    hdrp->hdr_version.minor);
203 
204 	if (hwddump_flags & HWDDUMP_OFFSETS) {
205 		printf("HWD:status offset = 0x%x\n",
206 		    hdrp->hdr_sb_status_offset);
207 		printf("HWD:domain offset = 0x%x\n",
208 		    hdrp->hdr_domain_info_offset);
209 		printf("HWD:board offset = 0x%x\n", hdrp->hdr_sb_info_offset);
210 	}
211 
212 	if (hwddump_flags & HWDDUMP_SBP)
213 		printf("HWD:sb_t ptr = 0x%p\n", (void *)probe->pr_sb);
214 
215 	if (hwddump_flags & HWDDUMP_ALL_STATUS) {
216 		int bd;
217 		printf("HWD:board status =");
218 		for (bd = 0; bd < HWD_SBS_PER_DOMAIN; bd++)
219 			printf("%x ", statp->sb_status[bd]);
220 		printf("\n");
221 	} else {
222 		printf("HWD:board status = %d\n", statp->sb_status[board]);
223 	}
224 
225 	printf("HWD:banner name = %s\n", dinfop->dinf_banner_name);
226 	printf("HWD:platform = %s\n", dinfop->dinf_platform_token);
227 
228 	printf("HWD:chip status:\n");
229 	chips = &sbp->sb_cmu.cmu_cpu_chips[0];
230 	for (i = 0; i < HWD_CPU_CHIPS_PER_CMU; i++) {
231 
232 		status = chips[i].chip_status;
233 		printf("chip[%d] = ", i);
234 		if (HWD_STATUS_NONE(status))
235 			printf("none");
236 		else if (HWD_STATUS_FAILED(status))
237 			printf("fail");
238 		else if (HWD_STATUS_OK(status))
239 			printf("ok");
240 		printf("\n");
241 	}
242 
243 	if (hwddump_flags & HWDDUMP_CHUNKS) {
244 		int chunk;
245 		hwd_memory_t *mem = &sbp->sb_cmu.cmu_memory;
246 		printf("HWD:chunks:\n");
247 		for (chunk = 0; chunk < HWD_MAX_MEM_CHUNKS; chunk++)
248 			printf("\t%d 0x%lx 0x%lx\n", chunk,
249 			    mem->mem_chunks[chunk].chnk_start_address,
250 			    mem->mem_chunks[chunk].chnk_size);
251 	}
252 
253 	printf("HWD:channel status:\n");
254 	channels = &sbp->sb_pci_ch[0];
255 	for (i = 0; i < HWD_PCI_CHANNELS_PER_SB; i++) {
256 
257 		status = channels[i].pci_status;
258 		printf("channels[%d] = ", i);
259 		if (HWD_STATUS_NONE(status))
260 			printf("none");
261 		else if (HWD_STATUS_FAILED(status))
262 			printf("fail");
263 		else if (HWD_STATUS_OK(status))
264 			printf("ok");
265 		printf("\n");
266 	}
267 	printf("channels[%d] = ", i);
268 	status = sbp->sb_cmu.cmu_ch.chan_status;
269 	if (HWD_STATUS_NONE(status))
270 		printf("none");
271 	else if (HWD_STATUS_FAILED(status))
272 		printf("fail");
273 	else if (HWD_STATUS_OK(status))
274 		printf("ok");
275 	printf("\n");
276 }
277 #endif /* DEBUG */
278 
279 #ifdef UCTEST
280 	/*
281 	 * For SesamI debugging, just map the SRAM directly to a kernel
282 	 * VA and read it out from there
283 	 */
284 
285 #include <sys/vmem.h>
286 #include <vm/seg_kmem.h>
287 
288 /*
289  * 0x4081F1323000LL is the HWD base address for LSB 0. But we need to map
290  * at page boundaries. So, we use a base address of 0x4081F1322000LL.
291  * Note that this has to match the HWD base pa set in .sesami-common-defs.
292  *
293  * The size specified for the HWD in the SCF spec is 36K. But since
294  * we adjusted the base address by 4K, we need to use 40K for the
295  * mapping size to cover the HWD. And 40K is also a multiple of the
296  * base page size.
297  */
298 #define	OPL_HWD_BASE(lsb)       \
299 (0x4081F1322000LL | (((uint64_t)(lsb)) << 40))
300 
301 	void    *opl_hwd_vaddr;
302 #endif /* UCTEST */
303 
304 /*
305  * Get the hardware descriptor from SCF.
306  */
307 
308 /*ARGSUSED*/
309 int
310 opl_read_hwd(int board, hwd_header_t **hdrp, hwd_sb_status_t **statp,
311 	hwd_domain_info_t **dinfop, hwd_sb_t **sbp)
312 {
313 	static int (*getinfop)(uint32_t, uint8_t, uint32_t, uint32_t *,
314 	    void *) = NULL;
315 	void *hwdp;
316 
317 	uint32_t key = KEY_ESCF;	/* required value */
318 	uint8_t  type = 0x40;		/* SUB_OS_RECEIVE_HWD */
319 	uint32_t transid = board;
320 	uint32_t datasize = HWD_DATA_SIZE;
321 
322 	hwd_header_t		*hd;
323 	hwd_sb_status_t		*st;
324 	hwd_domain_info_t	*di;
325 	hwd_sb_t		*sb;
326 
327 	int	ret;
328 
329 	if (opl_boards[board].cfg_hwd == NULL) {
330 #ifdef UCTEST
331 		/*
332 		 * Just map the HWD in SRAM to a kernel VA
333 		 */
334 
335 		size_t			size;
336 		pfn_t			pfn;
337 
338 		size = 0xA000;
339 
340 		opl_hwd_vaddr = vmem_alloc(heap_arena, size, VM_SLEEP);
341 		if (opl_hwd_vaddr == NULL) {
342 			cmn_err(CE_NOTE, "No space for HWD");
343 			return (-1);
344 		}
345 
346 		pfn = btop(OPL_HWD_BASE(board));
347 		hat_devload(kas.a_hat, opl_hwd_vaddr, size, pfn, PROT_READ,
348 		    HAT_LOAD_NOCONSIST | HAT_LOAD_LOCK);
349 
350 		hwdp = (void *)((char *)opl_hwd_vaddr + 0x1000);
351 		opl_boards[board].cfg_hwd = hwdp;
352 		ret = 0;
353 #else
354 
355 		/* find the scf_service_getinfo() function */
356 		if (getinfop == NULL)
357 			getinfop = (int (*)(uint32_t, uint8_t, uint32_t,
358 			    uint32_t *,
359 			    void *))modgetsymvalue("scf_service_getinfo", 0);
360 
361 		if (getinfop == NULL)
362 			return (-1);
363 
364 		/* allocate memory to receive the data */
365 		hwdp = kmem_alloc(HWD_DATA_SIZE, KM_SLEEP);
366 
367 		/* get the HWD */
368 		ret = (*getinfop)(key, type, transid, &datasize, hwdp);
369 		if (ret == 0)
370 			opl_boards[board].cfg_hwd = hwdp;
371 		else
372 			kmem_free(hwdp, HWD_DATA_SIZE);
373 #endif
374 	} else {
375 		hwdp = opl_boards[board].cfg_hwd;
376 		ret = 0;
377 	}
378 
379 	/* copy the data to the destination */
380 	if (ret == 0) {
381 		hd = (hwd_header_t *)hwdp;
382 		st = (hwd_sb_status_t *)
383 		    ((char *)hwdp + hd->hdr_sb_status_offset);
384 		di = (hwd_domain_info_t *)
385 		    ((char *)hwdp + hd->hdr_domain_info_offset);
386 		sb = (hwd_sb_t *)
387 		    ((char *)hwdp + hd->hdr_sb_info_offset);
388 		if (hdrp != NULL)
389 			*hdrp = hd;
390 		if (statp != NULL)
391 			*statp = st;
392 		if (dinfop != NULL)
393 			*dinfop = di;
394 		if (sbp != NULL)
395 			*sbp = sb;
396 	}
397 
398 	return (ret);
399 }
400 
401 /*
402  * The opl_probe_t probe structure is used to pass all sorts of parameters
403  * to callback functions during probing. It also contains a snapshot of
404  * the hardware descriptor that is taken at the beginning of a probe.
405  */
406 static int
407 opl_probe_init(opl_probe_t *probe)
408 {
409 	hwd_header_t		**hdrp;
410 	hwd_sb_status_t		**statp;
411 	hwd_domain_info_t	**dinfop;
412 	hwd_sb_t		**sbp;
413 	int			board, ret;
414 
415 	board = probe->pr_board;
416 
417 	hdrp = &probe->pr_hdr;
418 	statp = &probe->pr_sb_status;
419 	dinfop = &probe->pr_dinfo;
420 	sbp = &probe->pr_sb;
421 
422 	/*
423 	 * Read the hardware descriptor.
424 	 */
425 	ret = opl_read_hwd(board, hdrp, statp, dinfop, sbp);
426 	if (ret != 0) {
427 
428 		cmn_err(CE_WARN, "IKP: failed to read HWD header");
429 		return (-1);
430 	}
431 
432 #ifdef DEBUG
433 	opl_dump_hwd(probe);
434 #endif
435 	return (0);
436 }
437 
438 /*
439  * This function is used to obtain pointers to relevant device nodes
440  * which are created by Solaris at boot time.
441  *
442  * This function walks the child nodes of a given node, extracts
443  * the "name" property, if it exists, and passes the node to a
444  * callback init function. The callback determines if this node is
445  * interesting or not. If it is, then a pointer to the node is
446  * stored away by the callback for use during unprobe.
447  *
448  * The DDI get property function allocates storage for the name
449  * property. That needs to be freed within this function.
450  */
451 static int
452 opl_init_nodes(dev_info_t *parent, opl_init_func_t init)
453 {
454 	dev_info_t	*node;
455 	char		*name;
456 	int 		circ, ret;
457 	int		len;
458 
459 	ASSERT(parent != NULL);
460 
461 	/*
462 	 * Hold parent node busy to walk its child list
463 	 */
464 	ndi_devi_enter(parent, &circ);
465 	node = ddi_get_child(parent);
466 
467 	while (node != NULL) {
468 
469 		ret = OPL_GET_PROP(string, node, "name", &name, &len);
470 		if (ret != DDI_PROP_SUCCESS) {
471 			/*
472 			 * The property does not exist for this node.
473 			 */
474 			node = ddi_get_next_sibling(node);
475 			continue;
476 		}
477 
478 		ret = init(node, name, len);
479 		kmem_free(name, len);
480 		if (ret != 0) {
481 
482 			ndi_devi_exit(parent, circ);
483 			return (-1);
484 		}
485 
486 		node = ddi_get_next_sibling(node);
487 	}
488 
489 	ndi_devi_exit(parent, circ);
490 
491 	return (0);
492 }
493 
494 /*
495  * This init function finds all the interesting nodes under the
496  * root node and stores pointers to them. The following nodes
497  * are considered interesting by this implementation:
498  *
499  *	"cmp"
500  *		These are nodes that represent processor chips.
501  *
502  *	"pci"
503  *		These are nodes that represent PCI leaves.
504  *
505  *	"pseudo-mc"
506  *		These are nodes that contain memory information.
507  */
508 static int
509 opl_init_root_nodes(dev_info_t *node, char *name, int len)
510 {
511 	int		portid, board, chip, channel, leaf;
512 	int		ret;
513 
514 	if (strncmp(name, OPL_CPU_CHIP_NODE, len) == 0) {
515 
516 		ret = OPL_GET_PROP(int, node, "portid", &portid, -1);
517 		if (ret != DDI_PROP_SUCCESS)
518 			return (-1);
519 
520 		ret = OPL_GET_PROP(int, node, "board#", &board, -1);
521 		if (ret != DDI_PROP_SUCCESS)
522 			return (-1);
523 
524 		chip = OPL_CPU_CHIP(portid);
525 		opl_boards[board].cfg_cpu_chips[chip] = node;
526 
527 	} else if (strncmp(name, OPL_PCI_LEAF_NODE, len) == 0) {
528 
529 		ret = OPL_GET_PROP(int, node, "portid", &portid, -1);
530 		if (ret != DDI_PROP_SUCCESS)
531 			return (-1);
532 
533 		board = OPL_IO_PORTID_TO_LSB(portid);
534 		channel = OPL_PORTID_TO_CHANNEL(portid);
535 
536 		if (channel == OPL_CMU_CHANNEL) {
537 
538 			opl_boards[board].cfg_cmuch_leaf = node;
539 
540 		} else {
541 
542 			leaf = OPL_PORTID_TO_LEAF(portid);
543 			opl_boards[board].cfg_pcich_leaf[channel][leaf] = node;
544 		}
545 	} else if (strncmp(name, OPL_PSEUDO_MC_NODE, len) == 0) {
546 
547 		ret = OPL_GET_PROP(int, node, "board#", &board, -1);
548 		if (ret != DDI_PROP_SUCCESS)
549 			return (-1);
550 
551 		ASSERT((board >= 0) && (board < HWD_SBS_PER_DOMAIN));
552 
553 		opl_boards[board].cfg_pseudo_mc = node;
554 	}
555 
556 	return (0);
557 }
558 
559 /*
560  * This function initializes the OPL IKP feature. Currently, all it does
561  * is find the interesting nodes that Solaris has created at boot time
562  * for boards present at boot time and store pointers to them. This
563  * is useful if those boards are unprobed by DR.
564  */
565 int
566 opl_init_cfg()
567 {
568 	dev_info_t	*root;
569 
570 	if (opl_cfg_inited == 0) {
571 
572 		root = ddi_root_node();
573 		if ((opl_init_nodes(root, opl_init_root_nodes) != 0)) {
574 			cmn_err(CE_WARN, "IKP: init failed");
575 			return (1);
576 		}
577 
578 		opl_cfg_inited = 1;
579 	}
580 
581 	return (0);
582 }
583 
584 /*
585  * When DR is initialized, we walk the device tree and acquire a hold on
586  * all the nodes that are interesting to IKP. This is so that the corresponding
587  * branches cannot be deleted.
588  *
589  * The following function informs the walk about which nodes are interesting
590  * so that it can hold the corresponding branches.
591  */
592 static int
593 opl_hold_node(char *name)
594 {
595 	/*
596 	 * We only need to hold/release the following nodes which
597 	 * represent separate branches that must be managed.
598 	 */
599 	return ((strcmp(name, OPL_CPU_CHIP_NODE) == 0) ||
600 	    (strcmp(name, OPL_PSEUDO_MC_NODE) == 0) ||
601 	    (strcmp(name, OPL_PCI_LEAF_NODE) == 0));
602 }
603 
604 static int
605 opl_hold_rele_devtree(dev_info_t *rdip, void *arg)
606 {
607 
608 	int	*holdp = (int *)arg;
609 	char	*name = ddi_node_name(rdip);
610 
611 	/*
612 	 * We only need to hold/release the following nodes which
613 	 * represent separate branches that must be managed.
614 	 */
615 	if (opl_hold_node(name) == 0) {
616 		/* Not of interest to us */
617 		return (DDI_WALK_PRUNECHILD);
618 	}
619 	if (*holdp) {
620 		ASSERT(!e_ddi_branch_held(rdip));
621 		e_ddi_branch_hold(rdip);
622 	} else {
623 		ASSERT(e_ddi_branch_held(rdip));
624 		e_ddi_branch_rele(rdip);
625 	}
626 
627 	return (DDI_WALK_PRUNECHILD);
628 }
629 
630 void
631 opl_hold_devtree()
632 {
633 	dev_info_t *dip;
634 	int circ;
635 	int hold = 1;
636 
637 	dip = ddi_root_node();
638 	ndi_devi_enter(dip, &circ);
639 	ddi_walk_devs(ddi_get_child(dip), opl_hold_rele_devtree, &hold);
640 	ndi_devi_exit(dip, circ);
641 }
642 
643 void
644 opl_release_devtree()
645 {
646 	dev_info_t *dip;
647 	int circ;
648 	int hold = 0;
649 
650 	dip = ddi_root_node();
651 	ndi_devi_enter(dip, &circ);
652 	ddi_walk_devs(ddi_get_child(dip), opl_hold_rele_devtree, &hold);
653 	ndi_devi_exit(dip, circ);
654 }
655 
656 /*
657  * This is a helper function that allows opl_create_node() to return a
658  * pointer to a newly created node to its caller.
659  */
660 /*ARGSUSED*/
661 static void
662 opl_set_node(dev_info_t *node, void *arg, uint_t flags)
663 {
664 	opl_probe_t	*probe;
665 
666 	probe = arg;
667 	probe->pr_node = node;
668 }
669 
670 /*
671  * Function to create a node in the device tree under a specified parent.
672  *
673  * e_ddi_branch_create() allows the creation of a whole branch with a
674  * single call of the function. However, we only use it to create one node
675  * at a time in the case of non-I/O device nodes. In other words, we
676  * create branches by repeatedly using this function. This makes the
677  * code more readable.
678  *
679  * The branch descriptor passed to e_ddi_branch_create() takes two
680  * callbacks. The create() callback is used to set the properties of a
681  * newly created node. The other callback is used to return a pointer
682  * to the newly created node. The create() callback is passed by the
683  * caller of this function based on the kind of node he wishes to
684  * create.
685  *
686  * e_ddi_branch_create() returns with the newly created node held. We
687  * only need to hold the top nodes of the branches we create. We release
688  * the hold for the others. E.g., the "cmp" node needs to be held. Since
689  * we hold the "cmp" node, there is no need to hold the "core" and "cpu"
690  * nodes below it.
691  */
692 static dev_info_t *
693 opl_create_node(opl_probe_t *probe)
694 {
695 	devi_branch_t	branch;
696 
697 	probe->pr_node = NULL;
698 
699 	branch.arg = probe;
700 	branch.type = DEVI_BRANCH_SID;
701 	branch.create.sid_branch_create = probe->pr_create;
702 	branch.devi_branch_callback = opl_set_node;
703 
704 	if (e_ddi_branch_create(probe->pr_parent, &branch, NULL, 0) != 0)
705 		return (NULL);
706 
707 	ASSERT(probe->pr_node != NULL);
708 
709 	if (probe->pr_hold == 0)
710 		e_ddi_branch_rele(probe->pr_node);
711 
712 	return (probe->pr_node);
713 }
714 
715 /*
716  * Function to tear down a whole branch rooted at the specified node.
717  *
718  * Although we create each node of a branch individually, we destroy
719  * a whole branch in one call. This is more efficient.
720  */
721 static int
722 opl_destroy_node(dev_info_t *node)
723 {
724 	if (e_ddi_branch_destroy(node, NULL, 0) != 0) {
725 		char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
726 		(void) ddi_pathname(node, path);
727 		cmn_err(CE_WARN, "OPL node removal failed: %s (%p)", path,
728 		    (void *)node);
729 		kmem_free(path, MAXPATHLEN);
730 		return (-1);
731 	}
732 
733 	return (0);
734 }
735 
736 /*
737  * Set the properties for a "cpu" node.
738  */
739 /*ARGSUSED*/
740 static int
741 opl_create_cpu(dev_info_t *node, void *arg, uint_t flags)
742 {
743 	opl_probe_t	*probe;
744 	hwd_cpu_chip_t	*chip;
745 	hwd_core_t	*core;
746 	hwd_cpu_t	*cpu;
747 	int		ret;
748 
749 	probe = arg;
750 	chip = &probe->pr_sb->sb_cmu.cmu_cpu_chips[probe->pr_cpu_chip];
751 	core = &chip->chip_cores[probe->pr_core];
752 	cpu = &core->core_cpus[probe->pr_cpu];
753 	OPL_UPDATE_PROP(string, node, "name", OPL_CPU_NODE);
754 	OPL_UPDATE_PROP(string, node, "device_type", OPL_CPU_NODE);
755 
756 	OPL_UPDATE_PROP(int, node, "cpuid", cpu->cpu_cpuid);
757 	OPL_UPDATE_PROP(int, node, "reg", probe->pr_cpu);
758 
759 	OPL_UPDATE_PROP(string, node, "status", "okay");
760 
761 	return (DDI_WALK_TERMINATE);
762 }
763 
764 /*
765  * Create "cpu" nodes as child nodes of a given "core" node.
766  */
767 static int
768 opl_probe_cpus(opl_probe_t *probe)
769 {
770 	int		i;
771 	hwd_cpu_chip_t	*chip;
772 	hwd_core_t	*core;
773 	hwd_cpu_t	*cpus;
774 
775 	chip = &probe->pr_sb->sb_cmu.cmu_cpu_chips[probe->pr_cpu_chip];
776 	core = &chip->chip_cores[probe->pr_core];
777 	cpus = &core->core_cpus[0];
778 
779 	for (i = 0; i < HWD_CPUS_PER_CORE; i++) {
780 
781 		/*
782 		 * Olympus-C has 2 cpus per core.
783 		 * Jupiter has 4 cpus per core.
784 		 * For the Olympus-C based platform, we expect the cpu_status
785 		 * of the non-existent cpus to be set to missing.
786 		 */
787 		if (!HWD_STATUS_OK(cpus[i].cpu_status))
788 			continue;
789 
790 		probe->pr_create = opl_create_cpu;
791 		probe->pr_cpu = i;
792 		if (opl_create_node(probe) == NULL) {
793 
794 			cmn_err(CE_WARN, "IKP: create cpu (%d-%d-%d-%d) failed",
795 			    probe->pr_board, probe->pr_cpu_chip, probe->pr_core,
796 			    probe->pr_cpu);
797 			return (-1);
798 		}
799 	}
800 
801 	return (0);
802 }
803 
804 /*
805  * Set the properties for a "core" node.
806  */
807 /*ARGSUSED*/
808 static int
809 opl_create_core(dev_info_t *node, void *arg, uint_t flags)
810 {
811 	opl_probe_t	*probe;
812 	hwd_cpu_chip_t	*chip;
813 	hwd_core_t	*core;
814 	int		sharing[2];
815 	int		ret;
816 
817 	probe = arg;
818 	chip = &probe->pr_sb->sb_cmu.cmu_cpu_chips[probe->pr_cpu_chip];
819 	core = &chip->chip_cores[probe->pr_core];
820 
821 	OPL_UPDATE_PROP(string, node, "name", OPL_CORE_NODE);
822 	OPL_UPDATE_PROP(string, node, "device_type", OPL_CORE_NODE);
823 	OPL_UPDATE_PROP(string, node, "compatible", chip->chip_compatible);
824 
825 	OPL_UPDATE_PROP(int, node, "reg", probe->pr_core);
826 	OPL_UPDATE_PROP(int, node, "manufacturer#", core->core_manufacturer);
827 	OPL_UPDATE_PROP(int, node, "implementation#",
828 	    core->core_implementation);
829 	OPL_UPDATE_PROP(int, node, "mask#", core->core_mask);
830 
831 	OPL_UPDATE_PROP(int, node, "sparc-version", 9);
832 	OPL_UPDATE_PROP(int, node, "clock-frequency", core->core_frequency);
833 
834 	OPL_UPDATE_PROP(int, node, "l1-icache-size", core->core_l1_icache_size);
835 	OPL_UPDATE_PROP(int, node, "l1-icache-line-size",
836 	    core->core_l1_icache_line_size);
837 	OPL_UPDATE_PROP(int, node, "l1-icache-associativity",
838 	    core->core_l1_icache_associativity);
839 	OPL_UPDATE_PROP(int, node, "#itlb-entries",
840 	    core->core_num_itlb_entries);
841 
842 	OPL_UPDATE_PROP(int, node, "l1-dcache-size", core->core_l1_dcache_size);
843 	OPL_UPDATE_PROP(int, node, "l1-dcache-line-size",
844 	    core->core_l1_dcache_line_size);
845 	OPL_UPDATE_PROP(int, node, "l1-dcache-associativity",
846 	    core->core_l1_dcache_associativity);
847 	OPL_UPDATE_PROP(int, node, "#dtlb-entries",
848 	    core->core_num_dtlb_entries);
849 
850 	OPL_UPDATE_PROP(int, node, "l2-cache-size", core->core_l2_cache_size);
851 	OPL_UPDATE_PROP(int, node, "l2-cache-line-size",
852 	    core->core_l2_cache_line_size);
853 	OPL_UPDATE_PROP(int, node, "l2-cache-associativity",
854 	    core->core_l2_cache_associativity);
855 	sharing[0] = 0;
856 	sharing[1] = core->core_l2_cache_sharing;
857 	OPL_UPDATE_PROP_ARRAY(int, node, "l2-cache-sharing", sharing, 2);
858 
859 	OPL_UPDATE_PROP(string, node, "status", "okay");
860 
861 	return (DDI_WALK_TERMINATE);
862 }
863 
864 /*
865  * Create "core" nodes as child nodes of a given "cmp" node.
866  *
867  * Create the branch below each "core" node".
868  */
869 static int
870 opl_probe_cores(opl_probe_t *probe)
871 {
872 	int		i;
873 	hwd_cpu_chip_t	*chip;
874 	hwd_core_t	*cores;
875 	dev_info_t	*parent, *node;
876 
877 	chip = &probe->pr_sb->sb_cmu.cmu_cpu_chips[probe->pr_cpu_chip];
878 	cores = &chip->chip_cores[0];
879 	parent = probe->pr_parent;
880 
881 	for (i = 0; i < HWD_CORES_PER_CPU_CHIP; i++) {
882 
883 		if (!HWD_STATUS_OK(cores[i].core_status))
884 			continue;
885 
886 		probe->pr_parent = parent;
887 		probe->pr_create = opl_create_core;
888 		probe->pr_core = i;
889 		node = opl_create_node(probe);
890 		if (node == NULL) {
891 
892 			cmn_err(CE_WARN, "IKP: create core (%d-%d-%d) failed",
893 			    probe->pr_board, probe->pr_cpu_chip,
894 			    probe->pr_core);
895 			return (-1);
896 		}
897 
898 		/*
899 		 * Create "cpu" nodes below "core".
900 		 */
901 		probe->pr_parent = node;
902 		if (opl_probe_cpus(probe) != 0)
903 			return (-1);
904 		probe->pr_cpu_impl |= (1 << cores[i].core_implementation);
905 	}
906 
907 	return (0);
908 }
909 
910 /*
911  * Set the properties for a "cmp" node.
912  */
913 /*ARGSUSED*/
914 static int
915 opl_create_cpu_chip(dev_info_t *node, void *arg, uint_t flags)
916 {
917 	opl_probe_t	*probe;
918 	hwd_cpu_chip_t	*chip;
919 	opl_range_t	range;
920 	uint64_t	dummy_addr;
921 	int		ret;
922 
923 	probe = arg;
924 	chip = &probe->pr_sb->sb_cmu.cmu_cpu_chips[probe->pr_cpu_chip];
925 
926 	OPL_UPDATE_PROP(string, node, "name", OPL_CPU_CHIP_NODE);
927 
928 	OPL_UPDATE_PROP(int, node, "portid", chip->chip_portid);
929 	OPL_UPDATE_PROP(int, node, "board#", probe->pr_board);
930 
931 	dummy_addr = OPL_PROC_AS(probe->pr_board, probe->pr_cpu_chip);
932 	range.rg_addr_hi = OPL_HI(dummy_addr);
933 	range.rg_addr_lo = OPL_LO(dummy_addr);
934 	range.rg_size_hi = 0;
935 	range.rg_size_lo = 0;
936 	OPL_UPDATE_PROP_ARRAY(int, node, "reg", (int *)&range, 4);
937 
938 	OPL_UPDATE_PROP(int, node, "#address-cells", 1);
939 	OPL_UPDATE_PROP(int, node, "#size-cells", 0);
940 
941 	OPL_UPDATE_PROP(string, node, "status", "okay");
942 
943 	return (DDI_WALK_TERMINATE);
944 }
945 
946 /*
947  * Create "cmp" nodes as child nodes of the root node.
948  *
949  * Create the branch below each "cmp" node.
950  */
951 static int
952 opl_probe_cpu_chips(opl_probe_t *probe)
953 {
954 	int		i;
955 	dev_info_t	**cfg_cpu_chips;
956 	hwd_cpu_chip_t	*chips;
957 	dev_info_t	*node;
958 
959 	cfg_cpu_chips = opl_boards[probe->pr_board].cfg_cpu_chips;
960 	chips = &probe->pr_sb->sb_cmu.cmu_cpu_chips[0];
961 
962 	for (i = 0; i < HWD_CPU_CHIPS_PER_CMU; i++) {
963 
964 		ASSERT(cfg_cpu_chips[i] == NULL);
965 
966 		if (!HWD_STATUS_OK(chips[i].chip_status))
967 			continue;
968 
969 		probe->pr_parent = ddi_root_node();
970 		probe->pr_create = opl_create_cpu_chip;
971 		probe->pr_cpu_chip = i;
972 		probe->pr_hold = 1;
973 		node = opl_create_node(probe);
974 		if (node == NULL) {
975 
976 			cmn_err(CE_WARN, "IKP: create chip (%d-%d) failed",
977 			    probe->pr_board, probe->pr_cpu_chip);
978 			return (-1);
979 		}
980 
981 		cfg_cpu_chips[i] = node;
982 
983 		/*
984 		 * Create "core" nodes below "cmp".
985 		 * We hold the "cmp" node. So, there is no need to hold
986 		 * the "core" and "cpu" nodes below it.
987 		 */
988 		probe->pr_parent = node;
989 		probe->pr_hold = 0;
990 		if (opl_probe_cores(probe) != 0)
991 			return (-1);
992 	}
993 
994 	return (0);
995 }
996 
997 /*
998  * Set the properties for a "pseudo-mc" node.
999  */
1000 /*ARGSUSED*/
1001 static int
1002 opl_create_pseudo_mc(dev_info_t *node, void *arg, uint_t flags)
1003 {
1004 	opl_probe_t	*probe;
1005 	int		board, portid;
1006 	hwd_bank_t	*bank;
1007 	hwd_memory_t	*mem;
1008 	opl_range_t	range;
1009 	opl_mc_addr_t	mc[HWD_BANKS_PER_CMU];
1010 	int		status[2][7];
1011 	int		i, j;
1012 	int		ret;
1013 
1014 	probe = arg;
1015 	board = probe->pr_board;
1016 
1017 	OPL_UPDATE_PROP(string, node, "name", OPL_PSEUDO_MC_NODE);
1018 	OPL_UPDATE_PROP(string, node, "device_type", "memory-controller");
1019 	OPL_UPDATE_PROP(string, node, "compatible", "FJSV,oplmc");
1020 
1021 	portid = OPL_LSB_TO_PSEUDOMC_PORTID(board);
1022 	OPL_UPDATE_PROP(int, node, "portid", portid);
1023 
1024 	range.rg_addr_hi = OPL_HI(OPL_MC_AS(board));
1025 	range.rg_addr_lo = 0x200;
1026 	range.rg_size_hi = 0;
1027 	range.rg_size_lo = 0;
1028 	OPL_UPDATE_PROP_ARRAY(int, node, "reg", (int *)&range, 4);
1029 
1030 	OPL_UPDATE_PROP(int, node, "board#", board);
1031 	OPL_UPDATE_PROP(int, node, "physical-board#",
1032 	    probe->pr_sb->sb_psb_number);
1033 
1034 	OPL_UPDATE_PROP(int, node, "#address-cells", 1);
1035 	OPL_UPDATE_PROP(int, node, "#size-cells", 2);
1036 
1037 	mem = &probe->pr_sb->sb_cmu.cmu_memory;
1038 
1039 	range.rg_addr_hi = OPL_HI(mem->mem_start_address);
1040 	range.rg_addr_lo = OPL_LO(mem->mem_start_address);
1041 	range.rg_size_hi = OPL_HI(mem->mem_size);
1042 	range.rg_size_lo = OPL_LO(mem->mem_size);
1043 	OPL_UPDATE_PROP_ARRAY(int, node, "sb-mem-ranges", (int *)&range, 4);
1044 
1045 	bank = probe->pr_sb->sb_cmu.cmu_memory.mem_banks;
1046 	for (i = 0, j = 0; i < HWD_BANKS_PER_CMU; i++) {
1047 
1048 		if (!HWD_STATUS_OK(bank[i].bank_status))
1049 			continue;
1050 
1051 		mc[j].mc_bank = i;
1052 		mc[j].mc_hi = OPL_HI(bank[i].bank_register_address);
1053 		mc[j].mc_lo = OPL_LO(bank[i].bank_register_address);
1054 		j++;
1055 	}
1056 
1057 	if (j > 0) {
1058 		OPL_UPDATE_PROP_ARRAY(int, node, "mc-addr", (int *)mc, j*3);
1059 	} else {
1060 		/*
1061 		 * If there is no memory, we need the mc-addr property, but
1062 		 * it is length 0.  The only way to do this using ndi seems
1063 		 * to be by creating a boolean property.
1064 		 */
1065 		ret = ndi_prop_create_boolean(DDI_DEV_T_NONE, node, "mc-addr");
1066 		OPL_UPDATE_PROP_ERR(ret, "mc-addr");
1067 	}
1068 
1069 	OPL_UPDATE_PROP_ARRAY(byte, node, "cs0-mc-pa-trans-table",
1070 	    mem->mem_cs[0].cs_pa_mac_table, 64);
1071 	OPL_UPDATE_PROP_ARRAY(byte, node, "cs1-mc-pa-trans-table",
1072 	    mem->mem_cs[1].cs_pa_mac_table, 64);
1073 
1074 #define	CS_PER_MEM 2
1075 
1076 	for (i = 0, j = 0; i < CS_PER_MEM; i++) {
1077 		if (HWD_STATUS_OK(mem->mem_cs[i].cs_status) ||
1078 		    HWD_STATUS_FAILED(mem->mem_cs[i].cs_status)) {
1079 			status[j][0] = i;
1080 			if (HWD_STATUS_OK(mem->mem_cs[i].cs_status))
1081 				status[j][1] = 0;
1082 			else
1083 				status[j][1] = 1;
1084 			status[j][2] =
1085 			    OPL_HI(mem->mem_cs[i].cs_available_capacity);
1086 			status[j][3] =
1087 			    OPL_LO(mem->mem_cs[i].cs_available_capacity);
1088 			status[j][4] = OPL_HI(mem->mem_cs[i].cs_dimm_capacity);
1089 			status[j][5] = OPL_LO(mem->mem_cs[i].cs_dimm_capacity);
1090 			status[j][6] = mem->mem_cs[i].cs_number_of_dimms;
1091 			j++;
1092 		}
1093 	}
1094 
1095 	if (j > 0) {
1096 		OPL_UPDATE_PROP_ARRAY(int, node, "cs-status", (int *)status,
1097 		    j*7);
1098 	} else {
1099 		/*
1100 		 * If there is no memory, we need the cs-status property, but
1101 		 * it is length 0.  The only way to do this using ndi seems
1102 		 * to be by creating a boolean property.
1103 		 */
1104 		ret = ndi_prop_create_boolean(DDI_DEV_T_NONE, node,
1105 		    "cs-status");
1106 		OPL_UPDATE_PROP_ERR(ret, "cs-status");
1107 	}
1108 
1109 	return (DDI_WALK_TERMINATE);
1110 }
1111 
1112 /*
1113  * Create "pseudo-mc" nodes
1114  */
1115 static int
1116 opl_probe_memory(opl_probe_t *probe)
1117 {
1118 	int		board;
1119 	opl_board_cfg_t	*board_cfg;
1120 	dev_info_t	*node;
1121 
1122 	board = probe->pr_board;
1123 	board_cfg = &opl_boards[board];
1124 
1125 	ASSERT(board_cfg->cfg_pseudo_mc == NULL);
1126 
1127 	probe->pr_parent = ddi_root_node();
1128 	probe->pr_create = opl_create_pseudo_mc;
1129 	probe->pr_hold = 1;
1130 	node = opl_create_node(probe);
1131 	if (node == NULL) {
1132 
1133 		cmn_err(CE_WARN, "IKP: create pseudo-mc (%d) failed", board);
1134 		return (-1);
1135 	}
1136 
1137 	board_cfg->cfg_pseudo_mc = node;
1138 
1139 	return (0);
1140 }
1141 
1142 /*
1143  * Allocate the fcode ops handle.
1144  */
1145 /*ARGSUSED*/
1146 static
1147 fco_handle_t
1148 opl_fc_ops_alloc_handle(dev_info_t *parent, dev_info_t *child,
1149 			void *fcode, size_t fcode_size, char *unit_address,
1150 			char *my_args)
1151 {
1152 	fco_handle_t	rp;
1153 	phandle_t	h;
1154 	char		*buf;
1155 
1156 	rp = kmem_zalloc(sizeof (struct fc_resource_list), KM_SLEEP);
1157 	rp->next_handle = fc_ops_alloc_handle(parent, child, fcode, fcode_size,
1158 	    unit_address, NULL);
1159 	rp->ap = parent;
1160 	rp->child = child;
1161 	rp->fcode = fcode;
1162 	rp->fcode_size = fcode_size;
1163 	rp->my_args = my_args;
1164 
1165 	if (unit_address) {
1166 		buf = kmem_zalloc(UNIT_ADDR_SIZE, KM_SLEEP);
1167 		(void) strcpy(buf, unit_address);
1168 		rp->unit_address = buf;
1169 	}
1170 
1171 	/*
1172 	 * Add the child's nodeid to our table...
1173 	 */
1174 	h = ddi_get_nodeid(rp->child);
1175 	fc_add_dip_to_phandle(fc_handle_to_phandle_head(rp), rp->child, h);
1176 
1177 	return (rp);
1178 }
1179 
1180 
1181 static void
1182 opl_fc_ops_free_handle(fco_handle_t rp)
1183 {
1184 	struct fc_resource	*resp, *nresp;
1185 
1186 	ASSERT(rp);
1187 
1188 	if (rp->next_handle)
1189 		fc_ops_free_handle(rp->next_handle);
1190 	if (rp->unit_address)
1191 		kmem_free(rp->unit_address, UNIT_ADDR_SIZE);
1192 
1193 	/*
1194 	 * Release all the resources from the resource list
1195 	 */
1196 	for (resp = rp->head; resp != NULL; resp = nresp) {
1197 		nresp = resp->next;
1198 		switch (resp->type) {
1199 
1200 		case RT_MAP:
1201 			/*
1202 			 * If this is still mapped, we'd better unmap it now,
1203 			 * or all our structures that are tracking it will
1204 			 * be leaked.
1205 			 */
1206 			if (resp->fc_map_handle != NULL)
1207 				opl_unmap_phys(&resp->fc_map_handle);
1208 			break;
1209 
1210 		case RT_DMA:
1211 			/*
1212 			 * DMA has to be freed up at exit time.
1213 			 */
1214 			cmn_err(CE_CONT,
1215 			    "opl_fc_ops_free_handle: Unexpected DMA seen!");
1216 			break;
1217 
1218 		case RT_CONTIGIOUS:
1219 			FC_DEBUG2(1, CE_CONT, "opl_fc_ops_free: "
1220 			    "Free claim-memory resource 0x%lx size 0x%x\n",
1221 			    resp->fc_contig_virt, resp->fc_contig_len);
1222 
1223 			(void) ndi_ra_free(ddi_root_node(),
1224 			    (uint64_t)resp->fc_contig_virt,
1225 			    resp->fc_contig_len, "opl-fcodemem",
1226 			    NDI_RA_PASS);
1227 
1228 			break;
1229 
1230 		default:
1231 			cmn_err(CE_CONT, "opl_fc_ops_free: "
1232 			    "unknown resource type %d", resp->type);
1233 			break;
1234 		}
1235 		fc_rem_resource(rp, resp);
1236 		kmem_free(resp, sizeof (struct fc_resource));
1237 	}
1238 
1239 	kmem_free(rp, sizeof (struct fc_resource_list));
1240 }
1241 
1242 int
1243 opl_fc_do_op(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1244 {
1245 	opl_fc_ops_t	*op;
1246 	char		*service = fc_cell2ptr(cp->svc_name);
1247 
1248 	ASSERT(rp);
1249 
1250 	FC_DEBUG1(1, CE_CONT, "opl_fc_do_op: <%s>\n", service);
1251 
1252 	/*
1253 	 * First try the generic fc_ops.
1254 	 */
1255 	if (fc_ops(ap, rp->next_handle, cp) == 0)
1256 		return (0);
1257 
1258 	/*
1259 	 * Now try the Jupiter-specific ops.
1260 	 */
1261 	for (op = opl_fc_ops; op->fc_service != NULL; ++op)
1262 		if (strcmp(op->fc_service, service) == 0)
1263 			return (op->fc_op(ap, rp, cp));
1264 
1265 	FC_DEBUG1(9, CE_CONT, "opl_fc_do_op: <%s> not serviced\n", service);
1266 
1267 	return (-1);
1268 }
1269 
1270 /*
1271  * map-in  (phys.lo phys.hi size -- virt)
1272  */
1273 static int
1274 opl_map_in(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1275 {
1276 	size_t			len;
1277 	int			error;
1278 	caddr_t			virt;
1279 	struct fc_resource	*resp;
1280 	struct regspec		rspec;
1281 	ddi_device_acc_attr_t	acc;
1282 	ddi_acc_handle_t	h;
1283 
1284 	if (fc_cell2int(cp->nargs) != 3)
1285 		return (fc_syntax_error(cp, "nargs must be 3"));
1286 
1287 	if (fc_cell2int(cp->nresults) < 1)
1288 		return (fc_syntax_error(cp, "nresults must be >= 1"));
1289 
1290 	rspec.regspec_size = len = fc_cell2size(fc_arg(cp, 0));
1291 	rspec.regspec_bustype = fc_cell2uint(fc_arg(cp, 1));
1292 	rspec.regspec_addr = fc_cell2uint(fc_arg(cp, 2));
1293 
1294 	acc.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1295 	acc.devacc_attr_endian_flags = DDI_STRUCTURE_BE_ACC;
1296 	acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1297 
1298 	FC_DEBUG3(1, CE_CONT, "opl_map_in: attempting map in "
1299 	    "address 0x%08x.%08x length %x\n", rspec.regspec_bustype,
1300 	    rspec.regspec_addr, rspec.regspec_size);
1301 
1302 	error = opl_map_phys(rp->child, &rspec, &virt, &acc, &h);
1303 
1304 	if (error)  {
1305 		FC_DEBUG3(1, CE_CONT, "opl_map_in: map in failed - "
1306 		    "address 0x%08x.%08x length %x\n", rspec.regspec_bustype,
1307 		    rspec.regspec_addr, rspec.regspec_size);
1308 
1309 		return (fc_priv_error(cp, "opl map-in failed"));
1310 	}
1311 
1312 	FC_DEBUG1(3, CE_CONT, "opl_map_in: returning virt %p\n", virt);
1313 
1314 	cp->nresults = fc_int2cell(1);
1315 	fc_result(cp, 0) = fc_ptr2cell(virt);
1316 
1317 	/*
1318 	 * Log this resource ...
1319 	 */
1320 	resp = kmem_zalloc(sizeof (struct fc_resource), KM_SLEEP);
1321 	resp->type = RT_MAP;
1322 	resp->fc_map_virt = virt;
1323 	resp->fc_map_len = len;
1324 	resp->fc_map_handle = h;
1325 	fc_add_resource(rp, resp);
1326 
1327 	return (fc_success_op(ap, rp, cp));
1328 }
1329 
1330 /*
1331  * map-out (virt size -- )
1332  */
1333 static int
1334 opl_map_out(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1335 {
1336 	caddr_t			virt;
1337 	size_t			len;
1338 	struct fc_resource	*resp;
1339 
1340 	if (fc_cell2int(cp->nargs) != 2)
1341 		return (fc_syntax_error(cp, "nargs must be 2"));
1342 
1343 	virt = fc_cell2ptr(fc_arg(cp, 1));
1344 
1345 	len = fc_cell2size(fc_arg(cp, 0));
1346 
1347 	FC_DEBUG2(1, CE_CONT, "opl_map_out: attempting map out %p %x\n",
1348 	    virt, len);
1349 
1350 	/*
1351 	 * Find if this request matches a mapping resource we set up.
1352 	 */
1353 	fc_lock_resource_list(rp);
1354 	for (resp = rp->head; resp != NULL; resp = resp->next) {
1355 		if (resp->type != RT_MAP)
1356 			continue;
1357 		if (resp->fc_map_virt != virt)
1358 			continue;
1359 		if (resp->fc_map_len == len)
1360 			break;
1361 	}
1362 	fc_unlock_resource_list(rp);
1363 
1364 	if (resp == NULL)
1365 		return (fc_priv_error(cp, "request doesn't match a "
1366 		    "known mapping"));
1367 
1368 	opl_unmap_phys(&resp->fc_map_handle);
1369 
1370 	/*
1371 	 * remove the resource from the list and release it.
1372 	 */
1373 	fc_rem_resource(rp, resp);
1374 	kmem_free(resp, sizeof (struct fc_resource));
1375 
1376 	cp->nresults = fc_int2cell(0);
1377 	return (fc_success_op(ap, rp, cp));
1378 }
1379 
1380 static int
1381 opl_register_fetch(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1382 {
1383 	size_t			len;
1384 	caddr_t			virt;
1385 	int			error = 0;
1386 	uint64_t		v;
1387 	uint64_t		x;
1388 	uint32_t		l;
1389 	uint16_t		w;
1390 	uint8_t			b;
1391 	char			*service = fc_cell2ptr(cp->svc_name);
1392 	struct fc_resource	*resp;
1393 
1394 	if (fc_cell2int(cp->nargs) != 1)
1395 		return (fc_syntax_error(cp, "nargs must be 1"));
1396 
1397 	if (fc_cell2int(cp->nresults) < 1)
1398 		return (fc_syntax_error(cp, "nresults must be >= 1"));
1399 
1400 	virt = fc_cell2ptr(fc_arg(cp, 0));
1401 
1402 	/*
1403 	 * Determine the access width .. we can switch on the 2nd
1404 	 * character of the name which is "rx@", "rl@", "rb@" or "rw@"
1405 	 */
1406 	switch (*(service + 1)) {
1407 	case 'x':	len = sizeof (x); break;
1408 	case 'l':	len = sizeof (l); break;
1409 	case 'w':	len = sizeof (w); break;
1410 	case 'b':	len = sizeof (b); break;
1411 	}
1412 
1413 	/*
1414 	 * Check the alignment ...
1415 	 */
1416 	if (((intptr_t)virt & (len - 1)) != 0)
1417 		return (fc_priv_error(cp, "unaligned access"));
1418 
1419 	/*
1420 	 * Find if this virt is 'within' a request we know about
1421 	 */
1422 	fc_lock_resource_list(rp);
1423 	for (resp = rp->head; resp != NULL; resp = resp->next) {
1424 		if (resp->type == RT_MAP) {
1425 			if ((virt >= (caddr_t)resp->fc_map_virt) &&
1426 			    ((virt + len) <=
1427 			    ((caddr_t)resp->fc_map_virt + resp->fc_map_len)))
1428 				break;
1429 		} else if (resp->type == RT_CONTIGIOUS) {
1430 			if ((virt >= (caddr_t)resp->fc_contig_virt) &&
1431 			    ((virt + len) <= ((caddr_t)resp->fc_contig_virt +
1432 			    resp->fc_contig_len)))
1433 				break;
1434 		}
1435 	}
1436 	fc_unlock_resource_list(rp);
1437 
1438 	if (resp == NULL) {
1439 		return (fc_priv_error(cp, "request not within "
1440 		    "known mappings"));
1441 	}
1442 
1443 	switch (len) {
1444 	case sizeof (x):
1445 		if (resp->type == RT_MAP)
1446 			error = ddi_peek64(rp->child, (int64_t *)virt,
1447 			    (int64_t *)&x);
1448 		else /* RT_CONTIGIOUS */
1449 			x = *(int64_t *)virt;
1450 		v = x;
1451 		break;
1452 	case sizeof (l):
1453 		if (resp->type == RT_MAP)
1454 			error = ddi_peek32(rp->child, (int32_t *)virt,
1455 			    (int32_t *)&l);
1456 		else /* RT_CONTIGIOUS */
1457 			l = *(int32_t *)virt;
1458 		v = l;
1459 		break;
1460 	case sizeof (w):
1461 		if (resp->type == RT_MAP)
1462 			error = ddi_peek16(rp->child, (int16_t *)virt,
1463 			    (int16_t *)&w);
1464 		else /* RT_CONTIGIOUS */
1465 			w = *(int16_t *)virt;
1466 		v = w;
1467 		break;
1468 	case sizeof (b):
1469 		if (resp->type == RT_MAP)
1470 			error = ddi_peek8(rp->child, (int8_t *)virt,
1471 			    (int8_t *)&b);
1472 		else /* RT_CONTIGIOUS */
1473 			b = *(int8_t *)virt;
1474 		v = b;
1475 		break;
1476 	}
1477 
1478 	if (error == DDI_FAILURE) {
1479 		FC_DEBUG2(1, CE_CONT, "opl_register_fetch: access error "
1480 		    "accessing virt %p len %d\n", virt, len);
1481 		return (fc_priv_error(cp, "access error"));
1482 	}
1483 
1484 	FC_DEBUG3(1, CE_CONT, "register_fetch (%s) %llx %llx\n",
1485 	    service, virt, v);
1486 
1487 	cp->nresults = fc_int2cell(1);
1488 	switch (len) {
1489 	case sizeof (x): fc_result(cp, 0) = x; break;
1490 	case sizeof (l): fc_result(cp, 0) = fc_uint32_t2cell(l); break;
1491 	case sizeof (w): fc_result(cp, 0) = fc_uint16_t2cell(w); break;
1492 	case sizeof (b): fc_result(cp, 0) = fc_uint8_t2cell(b); break;
1493 	}
1494 	return (fc_success_op(ap, rp, cp));
1495 }
1496 
1497 static int
1498 opl_register_store(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1499 {
1500 	size_t			len;
1501 	caddr_t			virt;
1502 	uint64_t		v;
1503 	uint64_t		x;
1504 	uint32_t		l;
1505 	uint16_t		w;
1506 	uint8_t			b;
1507 	char			*service = fc_cell2ptr(cp->svc_name);
1508 	struct fc_resource	*resp;
1509 	int			error = 0;
1510 
1511 	if (fc_cell2int(cp->nargs) != 2)
1512 		return (fc_syntax_error(cp, "nargs must be 2"));
1513 
1514 	virt = fc_cell2ptr(fc_arg(cp, 0));
1515 
1516 	/*
1517 	 * Determine the access width .. we can switch on the 2nd
1518 	 * character of the name which is "rx!", "rl!", "rb!" or "rw!"
1519 	 */
1520 	switch (*(service + 1)) {
1521 	case 'x':
1522 		len = sizeof (x);
1523 		x = fc_arg(cp, 1);
1524 		v = x;
1525 		break;
1526 	case 'l':
1527 		len = sizeof (l);
1528 		l = fc_cell2uint32_t(fc_arg(cp, 1));
1529 		v = l;
1530 		break;
1531 	case 'w':
1532 		len = sizeof (w);
1533 		w = fc_cell2uint16_t(fc_arg(cp, 1));
1534 		v = w;
1535 		break;
1536 	case 'b':
1537 		len = sizeof (b);
1538 		b = fc_cell2uint8_t(fc_arg(cp, 1));
1539 		v = b;
1540 		break;
1541 	}
1542 
1543 	FC_DEBUG3(1, CE_CONT, "register_store (%s) %llx %llx\n",
1544 	    service, virt, v);
1545 
1546 	/*
1547 	 * Check the alignment ...
1548 	 */
1549 	if (((intptr_t)virt & (len - 1)) != 0)
1550 		return (fc_priv_error(cp, "unaligned access"));
1551 
1552 	/*
1553 	 * Find if this virt is 'within' a request we know about
1554 	 */
1555 	fc_lock_resource_list(rp);
1556 	for (resp = rp->head; resp != NULL; resp = resp->next) {
1557 		if (resp->type == RT_MAP) {
1558 			if ((virt >= (caddr_t)resp->fc_map_virt) &&
1559 			    ((virt + len) <=
1560 			    ((caddr_t)resp->fc_map_virt + resp->fc_map_len)))
1561 				break;
1562 		} else if (resp->type == RT_CONTIGIOUS) {
1563 			if ((virt >= (caddr_t)resp->fc_contig_virt) &&
1564 			    ((virt + len) <= ((caddr_t)resp->fc_contig_virt +
1565 			    resp->fc_contig_len)))
1566 				break;
1567 		}
1568 	}
1569 	fc_unlock_resource_list(rp);
1570 
1571 	if (resp == NULL)
1572 		return (fc_priv_error(cp, "request not within"
1573 		    "known mappings"));
1574 
1575 	switch (len) {
1576 	case sizeof (x):
1577 		if (resp->type == RT_MAP)
1578 			error = ddi_poke64(rp->child, (int64_t *)virt, x);
1579 		else if (resp->type == RT_CONTIGIOUS)
1580 			*(uint64_t *)virt = x;
1581 		break;
1582 	case sizeof (l):
1583 		if (resp->type == RT_MAP)
1584 			error = ddi_poke32(rp->child, (int32_t *)virt, l);
1585 		else if (resp->type == RT_CONTIGIOUS)
1586 			*(uint32_t *)virt = l;
1587 		break;
1588 	case sizeof (w):
1589 		if (resp->type == RT_MAP)
1590 			error = ddi_poke16(rp->child, (int16_t *)virt, w);
1591 		else if (resp->type == RT_CONTIGIOUS)
1592 			*(uint16_t *)virt = w;
1593 		break;
1594 	case sizeof (b):
1595 		if (resp->type == RT_MAP)
1596 			error = ddi_poke8(rp->child, (int8_t *)virt, b);
1597 		else if (resp->type == RT_CONTIGIOUS)
1598 			*(uint8_t *)virt = b;
1599 		break;
1600 	}
1601 
1602 	if (error == DDI_FAILURE) {
1603 		FC_DEBUG2(1, CE_CONT, "opl_register_store: access error "
1604 		    "accessing virt %p len %d\n", virt, len);
1605 		return (fc_priv_error(cp, "access error"));
1606 	}
1607 
1608 	cp->nresults = fc_int2cell(0);
1609 	return (fc_success_op(ap, rp, cp));
1610 }
1611 
1612 /*
1613  * opl_claim_memory
1614  *
1615  * claim-memory (align size vhint -- vaddr)
1616  */
1617 static int
1618 opl_claim_memory(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1619 {
1620 	int			align, size, vhint;
1621 	uint64_t		answer, alen;
1622 	ndi_ra_request_t	request;
1623 	struct fc_resource	*resp;
1624 
1625 	if (fc_cell2int(cp->nargs) != 3)
1626 		return (fc_syntax_error(cp, "nargs must be 3"));
1627 
1628 	if (fc_cell2int(cp->nresults) < 1)
1629 		return (fc_syntax_error(cp, "nresults must be >= 1"));
1630 
1631 	vhint = fc_cell2int(fc_arg(cp, 2));
1632 	size  = fc_cell2int(fc_arg(cp, 1));
1633 	align = fc_cell2int(fc_arg(cp, 0));
1634 
1635 	FC_DEBUG3(1, CE_CONT, "opl_claim_memory: align=0x%x size=0x%x "
1636 	    "vhint=0x%x\n", align, size, vhint);
1637 
1638 	if (size == 0) {
1639 		cmn_err(CE_WARN, "opl_claim_memory - unable to allocate "
1640 		    "contiguous memory of size zero\n");
1641 		return (fc_priv_error(cp, "allocation error"));
1642 	}
1643 
1644 	if (vhint) {
1645 		cmn_err(CE_WARN, "opl_claim_memory - vhint is not zero "
1646 		    "vhint=0x%x - Ignoring Argument\n", vhint);
1647 	}
1648 
1649 	bzero((caddr_t)&request, sizeof (ndi_ra_request_t));
1650 	request.ra_flags	= NDI_RA_ALLOC_BOUNDED;
1651 	request.ra_boundbase	= 0;
1652 	request.ra_boundlen	= 0xffffffff;
1653 	request.ra_len		= size;
1654 	request.ra_align_mask	= align - 1;
1655 
1656 	if (ndi_ra_alloc(ddi_root_node(), &request, &answer, &alen,
1657 	    "opl-fcodemem", NDI_RA_PASS) != NDI_SUCCESS) {
1658 		cmn_err(CE_WARN, "opl_claim_memory - unable to allocate "
1659 		    "contiguous memory\n");
1660 		return (fc_priv_error(cp, "allocation error"));
1661 	}
1662 
1663 	FC_DEBUG2(1, CE_CONT, "opl_claim_memory: address allocated=0x%lx "
1664 	    "size=0x%x\n", answer, alen);
1665 
1666 	cp->nresults = fc_int2cell(1);
1667 	fc_result(cp, 0) = answer;
1668 
1669 	/*
1670 	 * Log this resource ...
1671 	 */
1672 	resp = kmem_zalloc(sizeof (struct fc_resource), KM_SLEEP);
1673 	resp->type = RT_CONTIGIOUS;
1674 	resp->fc_contig_virt = (void *)answer;
1675 	resp->fc_contig_len = size;
1676 	fc_add_resource(rp, resp);
1677 
1678 	return (fc_success_op(ap, rp, cp));
1679 }
1680 
1681 /*
1682  * opl_release_memory
1683  *
1684  * release-memory (size vaddr -- )
1685  */
1686 static int
1687 opl_release_memory(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1688 {
1689 	int32_t			vaddr, size;
1690 	struct fc_resource	*resp;
1691 
1692 	if (fc_cell2int(cp->nargs) != 2)
1693 		return (fc_syntax_error(cp, "nargs must be 2"));
1694 
1695 	if (fc_cell2int(cp->nresults) != 0)
1696 		return (fc_syntax_error(cp, "nresults must be 0"));
1697 
1698 	vaddr = fc_cell2int(fc_arg(cp, 1));
1699 	size  = fc_cell2int(fc_arg(cp, 0));
1700 
1701 	FC_DEBUG2(1, CE_CONT, "opl_release_memory: vaddr=0x%x size=0x%x\n",
1702 	    vaddr, size);
1703 
1704 	/*
1705 	 * Find if this request matches a mapping resource we set up.
1706 	 */
1707 	fc_lock_resource_list(rp);
1708 	for (resp = rp->head; resp != NULL; resp = resp->next) {
1709 		if (resp->type != RT_CONTIGIOUS)
1710 			continue;
1711 		if (resp->fc_contig_virt != (void *)(uintptr_t)vaddr)
1712 			continue;
1713 		if (resp->fc_contig_len == size)
1714 			break;
1715 	}
1716 	fc_unlock_resource_list(rp);
1717 
1718 	if (resp == NULL)
1719 		return (fc_priv_error(cp, "request doesn't match a "
1720 		    "known mapping"));
1721 
1722 	(void) ndi_ra_free(ddi_root_node(), vaddr, size,
1723 	    "opl-fcodemem", NDI_RA_PASS);
1724 
1725 	/*
1726 	 * remove the resource from the list and release it.
1727 	 */
1728 	fc_rem_resource(rp, resp);
1729 	kmem_free(resp, sizeof (struct fc_resource));
1730 
1731 	cp->nresults = fc_int2cell(0);
1732 
1733 	return (fc_success_op(ap, rp, cp));
1734 }
1735 
1736 /*
1737  * opl_vtop
1738  *
1739  * vtop (vaddr -- paddr.lo paddr.hi)
1740  */
1741 static int
1742 opl_vtop(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1743 {
1744 	int			vaddr;
1745 	uint64_t		paddr;
1746 	struct fc_resource	*resp;
1747 
1748 	if (fc_cell2int(cp->nargs) != 1)
1749 		return (fc_syntax_error(cp, "nargs must be 1"));
1750 
1751 	if (fc_cell2int(cp->nresults) >= 3)
1752 		return (fc_syntax_error(cp, "nresults must be less than 2"));
1753 
1754 	vaddr = fc_cell2int(fc_arg(cp, 0));
1755 
1756 	/*
1757 	 * Find if this request matches a mapping resource we set up.
1758 	 */
1759 	fc_lock_resource_list(rp);
1760 	for (resp = rp->head; resp != NULL; resp = resp->next) {
1761 		if (resp->type != RT_CONTIGIOUS)
1762 			continue;
1763 		if (((uint64_t)resp->fc_contig_virt <= vaddr) &&
1764 		    (vaddr < (uint64_t)resp->fc_contig_virt +
1765 		    resp->fc_contig_len))
1766 			break;
1767 	}
1768 	fc_unlock_resource_list(rp);
1769 
1770 	if (resp == NULL)
1771 		return (fc_priv_error(cp, "request doesn't match a "
1772 		    "known mapping"));
1773 
1774 	paddr = va_to_pa((void *)(uintptr_t)vaddr);
1775 
1776 	FC_DEBUG2(1, CE_CONT, "opl_vtop: vaddr=0x%x paddr=0x%x\n",
1777 	    vaddr, paddr);
1778 
1779 	cp->nresults = fc_int2cell(2);
1780 
1781 	fc_result(cp, 0) = paddr;
1782 	fc_result(cp, 1) = 0;
1783 
1784 	return (fc_success_op(ap, rp, cp));
1785 }
1786 
1787 static int
1788 opl_config_child(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1789 {
1790 	fc_phandle_t h;
1791 
1792 	if (fc_cell2int(cp->nargs) != 0)
1793 		return (fc_syntax_error(cp, "nargs must be 0"));
1794 
1795 	if (fc_cell2int(cp->nresults) < 1)
1796 		return (fc_syntax_error(cp, "nresults must be >= 1"));
1797 
1798 	h = fc_dip_to_phandle(fc_handle_to_phandle_head(rp), rp->child);
1799 
1800 	cp->nresults = fc_int2cell(1);
1801 	fc_result(cp, 0) = fc_phandle2cell(h);
1802 
1803 	return (fc_success_op(ap, rp, cp));
1804 }
1805 
1806 static int
1807 opl_get_fcode(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1808 {
1809 	caddr_t		dropin_name_virt, fcode_virt;
1810 	char		*dropin_name, *fcode;
1811 	int		fcode_len, status;
1812 
1813 	if (fc_cell2int(cp->nargs) != 3)
1814 		return (fc_syntax_error(cp, "nargs must be 3"));
1815 
1816 	if (fc_cell2int(cp->nresults) < 1)
1817 		return (fc_syntax_error(cp, "nresults must be >= 1"));
1818 
1819 	dropin_name_virt = fc_cell2ptr(fc_arg(cp, 0));
1820 
1821 	fcode_virt = fc_cell2ptr(fc_arg(cp, 1));
1822 
1823 	fcode_len = fc_cell2int(fc_arg(cp, 2));
1824 
1825 	dropin_name = kmem_zalloc(FC_SVC_NAME_LEN, KM_SLEEP);
1826 
1827 	FC_DEBUG2(1, CE_CONT, "get_fcode: %x %d\n", fcode_virt, fcode_len);
1828 
1829 	if (copyinstr(fc_cell2ptr(dropin_name_virt), dropin_name,
1830 	    FC_SVC_NAME_LEN - 1, NULL))  {
1831 		FC_DEBUG1(1, CE_CONT, "opl_get_fcode: "
1832 		    "fault copying in drop in name %p\n", dropin_name_virt);
1833 		status = 0;
1834 	} else {
1835 		FC_DEBUG1(1, CE_CONT, "get_fcode: %s\n", dropin_name);
1836 
1837 		fcode = kmem_zalloc(fcode_len, KM_SLEEP);
1838 
1839 		if ((status = prom_get_fcode(dropin_name, fcode)) != 0) {
1840 
1841 			if (copyout((void *)fcode, (void *)fcode_virt,
1842 			    fcode_len)) {
1843 				cmn_err(CE_WARN, " opl_get_fcode: Unable "
1844 				    "to copy out fcode image");
1845 				status = 0;
1846 			}
1847 		}
1848 
1849 		kmem_free(fcode, fcode_len);
1850 	}
1851 
1852 	kmem_free(dropin_name, FC_SVC_NAME_LEN);
1853 
1854 	cp->nresults = fc_int2cell(1);
1855 	fc_result(cp, 0) = status;
1856 
1857 	return (fc_success_op(ap, rp, cp));
1858 }
1859 
1860 static int
1861 opl_get_fcode_size(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1862 {
1863 	caddr_t		virt;
1864 	char		*dropin_name;
1865 	int		len;
1866 
1867 	if (fc_cell2int(cp->nargs) != 1)
1868 		return (fc_syntax_error(cp, "nargs must be 1"));
1869 
1870 	if (fc_cell2int(cp->nresults) < 1)
1871 		return (fc_syntax_error(cp, "nresults must be >= 1"));
1872 
1873 	virt = fc_cell2ptr(fc_arg(cp, 0));
1874 
1875 	dropin_name = kmem_zalloc(FC_SVC_NAME_LEN, KM_SLEEP);
1876 
1877 	FC_DEBUG0(1, CE_CONT, "opl_get_fcode_size:\n");
1878 
1879 	if (copyinstr(fc_cell2ptr(virt), dropin_name,
1880 	    FC_SVC_NAME_LEN - 1, NULL))  {
1881 		FC_DEBUG1(1, CE_CONT, "opl_get_fcode_size: "
1882 		    "fault copying in drop in name %p\n", virt);
1883 		len = 0;
1884 	} else {
1885 		FC_DEBUG1(1, CE_CONT, "opl_get_fcode_size: %s\n", dropin_name);
1886 
1887 		len = prom_get_fcode_size(dropin_name);
1888 	}
1889 
1890 	kmem_free(dropin_name, FC_SVC_NAME_LEN);
1891 
1892 	FC_DEBUG1(1, CE_CONT, "opl_get_fcode_size: fcode_len = %d\n", len);
1893 
1894 	cp->nresults = fc_int2cell(1);
1895 	fc_result(cp, 0) = len;
1896 
1897 	return (fc_success_op(ap, rp, cp));
1898 }
1899 
1900 static int
1901 opl_map_phys(dev_info_t *dip, struct regspec *phys_spec,
1902     caddr_t *addrp, ddi_device_acc_attr_t *accattrp,
1903     ddi_acc_handle_t *handlep)
1904 {
1905 	ddi_map_req_t 	mapreq;
1906 	ddi_acc_hdl_t	*acc_handlep;
1907 	int		result;
1908 	struct regspec	*rspecp;
1909 
1910 	*handlep = impl_acc_hdl_alloc(KM_SLEEP, NULL);
1911 	acc_handlep = impl_acc_hdl_get(*handlep);
1912 	acc_handlep->ah_vers = VERS_ACCHDL;
1913 	acc_handlep->ah_dip = dip;
1914 	acc_handlep->ah_rnumber = 0;
1915 	acc_handlep->ah_offset = 0;
1916 	acc_handlep->ah_len = 0;
1917 	acc_handlep->ah_acc = *accattrp;
1918 	rspecp = kmem_zalloc(sizeof (struct regspec), KM_SLEEP);
1919 	*rspecp = *phys_spec;
1920 	/*
1921 	 * cache a copy of the reg spec
1922 	 */
1923 	acc_handlep->ah_bus_private = rspecp;
1924 
1925 	mapreq.map_op = DDI_MO_MAP_LOCKED;
1926 	mapreq.map_type = DDI_MT_REGSPEC;
1927 	mapreq.map_obj.rp = (struct regspec *)phys_spec;
1928 	mapreq.map_prot = PROT_READ | PROT_WRITE;
1929 	mapreq.map_flags = DDI_MF_KERNEL_MAPPING;
1930 	mapreq.map_handlep = acc_handlep;
1931 	mapreq.map_vers = DDI_MAP_VERSION;
1932 
1933 	result = ddi_map(dip, &mapreq, 0, 0, addrp);
1934 
1935 	if (result != DDI_SUCCESS) {
1936 		impl_acc_hdl_free(*handlep);
1937 		kmem_free(rspecp, sizeof (struct regspec));
1938 		*handlep = (ddi_acc_handle_t)NULL;
1939 	} else {
1940 		acc_handlep->ah_addr = *addrp;
1941 	}
1942 
1943 	return (result);
1944 }
1945 
1946 static void
1947 opl_unmap_phys(ddi_acc_handle_t *handlep)
1948 {
1949 	ddi_map_req_t	mapreq;
1950 	ddi_acc_hdl_t	*acc_handlep;
1951 	struct regspec	*rspecp;
1952 
1953 	acc_handlep = impl_acc_hdl_get(*handlep);
1954 	ASSERT(acc_handlep);
1955 	rspecp = acc_handlep->ah_bus_private;
1956 
1957 	mapreq.map_op = DDI_MO_UNMAP;
1958 	mapreq.map_type = DDI_MT_REGSPEC;
1959 	mapreq.map_obj.rp = (struct regspec *)rspecp;
1960 	mapreq.map_prot = PROT_READ | PROT_WRITE;
1961 	mapreq.map_flags = DDI_MF_KERNEL_MAPPING;
1962 	mapreq.map_handlep = acc_handlep;
1963 	mapreq.map_vers = DDI_MAP_VERSION;
1964 
1965 	(void) ddi_map(acc_handlep->ah_dip, &mapreq, acc_handlep->ah_offset,
1966 	    acc_handlep->ah_len, &acc_handlep->ah_addr);
1967 
1968 	impl_acc_hdl_free(*handlep);
1969 	/*
1970 	 * Free the cached copy
1971 	 */
1972 	kmem_free(rspecp, sizeof (struct regspec));
1973 	*handlep = (ddi_acc_handle_t)NULL;
1974 }
1975 
1976 static int
1977 opl_get_hwd_va(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1978 {
1979 	uint32_t	portid;
1980 	void		*hwd_virt;
1981 	hwd_header_t	*hwd_h = NULL;
1982 	hwd_sb_t	*hwd_sb = NULL;
1983 	int		lsb, ch, leaf;
1984 	int		status = 1;
1985 
1986 	/* Check the argument */
1987 	if (fc_cell2int(cp->nargs) != 2)
1988 		return (fc_syntax_error(cp, "nargs must be 2"));
1989 
1990 	if (fc_cell2int(cp->nresults) < 1)
1991 		return (fc_syntax_error(cp, "nresults must be >= 1"));
1992 
1993 	/* Get the parameters */
1994 	portid = fc_cell2uint32_t(fc_arg(cp, 0));
1995 	hwd_virt = (void *)fc_cell2ptr(fc_arg(cp, 1));
1996 
1997 	/* Get the ID numbers */
1998 	lsb  = OPL_IO_PORTID_TO_LSB(portid);
1999 	ch   = OPL_PORTID_TO_CHANNEL(portid);
2000 	leaf = OPL_PORTID_TO_LEAF(portid);
2001 	ASSERT(OPL_IO_PORTID(lsb, ch, leaf) == portid);
2002 
2003 	/* Set the pointer of hwd. */
2004 	if ((hwd_h = (hwd_header_t *)opl_boards[lsb].cfg_hwd) == NULL) {
2005 		return (fc_priv_error(cp, "null hwd header"));
2006 	}
2007 	/* Set the pointer of hwd sb. */
2008 	if ((hwd_sb = (hwd_sb_t *)((char *)hwd_h + hwd_h->hdr_sb_info_offset))
2009 	    == NULL) {
2010 		return (fc_priv_error(cp, "null hwd sb"));
2011 	}
2012 
2013 	if (ch == OPL_CMU_CHANNEL) {
2014 		/* Copyout CMU-CH HW Descriptor */
2015 		if (copyout((void *)&hwd_sb->sb_cmu.cmu_ch,
2016 		    (void *)hwd_virt, sizeof (hwd_cmu_chan_t))) {
2017 			cmn_err(CE_WARN, "opl_get_hwd_va: "
2018 			"Unable to copy out cmuch descriptor for %x",
2019 			    portid);
2020 			status = 0;
2021 		}
2022 	} else {
2023 		/* Copyout PCI-CH HW Descriptor */
2024 		if (copyout((void *)&hwd_sb->sb_pci_ch[ch].pci_leaf[leaf],
2025 		    (void *)hwd_virt, sizeof (hwd_leaf_t))) {
2026 			cmn_err(CE_WARN, "opl_get_hwd_va: "
2027 			"Unable to copy out pcich descriptor for %x",
2028 			    portid);
2029 			status = 0;
2030 		}
2031 	}
2032 
2033 	cp->nresults = fc_int2cell(1);
2034 	fc_result(cp, 0) = status;
2035 
2036 	return (fc_success_op(ap, rp, cp));
2037 }
2038 
2039 /*
2040  * After Solaris boots, a user can enter OBP using L1A, etc. While in OBP,
2041  * interrupts may be received from PCI devices. These interrupts
2042  * cannot be handled meaningfully since the system is in OBP. These
2043  * interrupts need to be cleared on the CPU side so that the CPU may
2044  * continue with whatever it is doing. Devices that have raised the
2045  * interrupts are expected to reraise the interrupts after sometime
2046  * as they have not been handled. At that time, Solaris will have a
2047  * chance to properly service the interrupts.
2048  *
2049  * The location of the interrupt registers depends on what is present
2050  * at a port. OPL currently supports the Oberon and the CMU channel.
2051  * The following handler handles both kinds of ports and computes
2052  * interrupt register addresses from the specifications and Jupiter Bus
2053  * device bindings.
2054  *
2055  * Fcode drivers install their interrupt handler via a "master-interrupt"
2056  * service. For boot time devices, this takes place within OBP. In the case
2057  * of DR, OPL uses IKP. The Fcode drivers that run within the efcode framework
2058  * attempt to install their handler via the "master-interrupt" service.
2059  * However, we cannot meaningfully install the Fcode driver's handler.
2060  * Instead, we install our own handler in OBP which does the same thing.
2061  *
2062  * Note that the only handling done for interrupts here is to clear it
2063  * on the CPU side. If any device in the future requires more special
2064  * handling, we would have to put in some kind of framework for adding
2065  * device-specific handlers. This is *highly* unlikely, but possible.
2066  *
2067  * Finally, OBP provides a hook called "unix-interrupt-handler" to install
2068  * a Solaris-defined master-interrupt handler for a port. The default
2069  * definition for this method does nothing. Solaris may override this
2070  * with its own definition. This is the way the following handler gets
2071  * control from OBP when interrupts happen at a port after L1A, etc.
2072  */
2073 
2074 static char define_master_interrupt_handler[] =
2075 
2076 /*
2077  * This method translates an Oberon port id to the base (physical) address
2078  * of the interrupt clear registers for that port id.
2079  */
2080 
2081 ": pcich-mid>clear-int-pa   ( mid -- pa ) "
2082 "   dup 1 >> 7 and          ( mid ch# ) "
2083 "   over 4 >> h# 1f and     ( mid ch# lsb# ) "
2084 "   1 d# 46 <<              ( mid ch# lsb# pa ) "
2085 "   swap d# 40 << or        ( mid ch# pa ) "
2086 "   swap d# 37 << or        ( mid pa ) "
2087 "   swap 1 and if h# 70.0000 else h# 60.0000 then "
2088 "   or h# 1400 or           ( pa ) "
2089 "; "
2090 
2091 /*
2092  * This method translates a CMU channel port id to the base (physical) address
2093  * of the interrupt clear registers for that port id. There are two classes of
2094  * interrupts that need to be handled for a CMU channel:
2095  *	- obio interrupts
2096  *	- pci interrupts
2097  * So, there are two addresses that need to be computed.
2098  */
2099 
2100 ": cmuch-mid>clear-int-pa   ( mid -- obio-pa pci-pa ) "
2101 "   dup 1 >> 7 and          ( mid ch# ) "
2102 "   over 4 >> h# 1f and     ( mid ch# lsb# ) "
2103 "   1 d# 46 <<              ( mid ch# lsb# pa ) "
2104 "   swap d# 40 << or        ( mid ch# pa ) "
2105 "   swap d# 37 << or        ( mid pa ) "
2106 "   nip dup h# 1800 +       ( pa obio-pa ) "
2107 "   swap h# 1400 +          ( obio-pa pci-pa ) "
2108 "; "
2109 
2110 /*
2111  * This method checks if a given I/O port ID is valid or not.
2112  * For a given LSB,
2113  *	Oberon ports range from 0 - 3
2114  *	CMU ch ports range from 4 - 4
2115  *
2116  * Also, the Oberon supports leaves 0 and 1.
2117  * The CMU ch supports only one leaf, leaf 0.
2118  */
2119 
2120 ": valid-io-mid? ( mid -- flag ) "
2121 "   dup 1 >> 7 and                     ( mid ch# ) "
2122 "   dup 4 > if 2drop false exit then   ( mid ch# ) "
2123 "   4 = swap 1 and 1 = and not "
2124 "; "
2125 
2126 /*
2127  * This method checks if a given port id is a CMU ch.
2128  */
2129 
2130 ": cmuch? ( mid -- flag ) 1 >> 7 and 4 = ; "
2131 
2132 /*
2133  * Given the base address of the array of interrupt clear registers for
2134  * a port id, this method iterates over the given interrupt number bitmap
2135  * and resets the interrupt on the CPU side for every interrupt number
2136  * in the bitmap. Note that physical addresses are used to perform the
2137  * writes, not virtual addresses. This allows the handler to work without
2138  * any involvement from Solaris.
2139  */
2140 
2141 ": clear-ints ( pa bitmap count -- ) "
2142 "   0 do                            ( pa bitmap ) "
2143 "      dup 0= if 2drop unloop exit then "
2144 "      tuck                         ( bitmap pa bitmap ) "
2145 "      1 and if                     ( bitmap pa ) "
2146 "	 dup i 8 * + 0 swap         ( bitmap pa 0 pa' ) "
2147 "	 h# 15 spacex!              ( bitmap pa ) "
2148 "      then                         ( bitmap pa ) "
2149 "      swap 1 >>                    ( pa bitmap ) "
2150 "   loop "
2151 "; "
2152 
2153 /*
2154  * This method replaces the master-interrupt handler in OBP. Once
2155  * this method is plumbed into OBP, OBP transfers control to this
2156  * handler while returning to Solaris from OBP after L1A. This method's
2157  * task is to simply reset received interrupts on the CPU side.
2158  * When the devices reassert the interrupts later, Solaris will
2159  * be able to see them and handle them.
2160  *
2161  * For each port ID that has interrupts, this method is called
2162  * once by OBP. The input arguments are:
2163  *	mid	portid
2164  *	bitmap	bitmap of interrupts that have happened
2165  *
2166  * This method returns true, if it is able to handle the interrupts.
2167  * OBP does nothing further.
2168  *
2169  * This method returns false, if it encountered a problem. Currently,
2170  * the only problem could be an invalid port id. OBP needs to do
2171  * its own processing in that case. If this method returns false,
2172  * it preserves the mid and bitmap arguments for OBP.
2173  */
2174 
2175 ": unix-resend-mondos ( mid bitmap -- [ mid bitmap false ] | true ) "
2176 
2177 /*
2178  * Uncomment the following line if you want to display the input arguments.
2179  * This is meant for debugging.
2180  * "   .\" Bitmap=\" dup u. .\" MID=\" over u. cr "
2181  */
2182 
2183 /*
2184  * If the port id is not valid (according to the Oberon and CMU ch
2185  * specifications, then return false to OBP to continue further
2186  * processing.
2187  */
2188 
2189 "   over valid-io-mid? not if       ( mid bitmap ) "
2190 "      false exit "
2191 "   then "
2192 
2193 /*
2194  * If the port is a CMU ch, then the 64-bit bitmap represents
2195  * 2 32-bit bitmaps:
2196  *	- obio interrupt bitmap (20 bits)
2197  *	- pci interrupt bitmap (32 bits)
2198  *
2199  * - Split the bitmap into two
2200  * - Compute the base addresses of the interrupt clear registers
2201  *   for both pci interrupts and obio interrupts
2202  * - Clear obio interrupts
2203  * - Clear pci interrupts
2204  */
2205 
2206 "   over cmuch? if                  ( mid bitmap ) "
2207 "      xlsplit                      ( mid pci-bit obio-bit ) "
2208 "      rot cmuch-mid>clear-int-pa   ( pci-bit obio-bit obio-pa pci-pa ) "
2209 "      >r                           ( pci-bit obio-bit obio-pa ) ( r: pci-pa ) "
2210 "      swap d# 20 clear-ints        ( pci-bit ) ( r: pci-pa ) "
2211 "      r> swap d# 32 clear-ints     (  ) ( r: ) "
2212 
2213 /*
2214  * If the port is an Oberon, then the 64-bit bitmap is used fully.
2215  *
2216  * - Compute the base address of the interrupt clear registers
2217  * - Clear interrupts
2218  */
2219 
2220 "   else                            ( mid bitmap ) "
2221 "      swap pcich-mid>clear-int-pa  ( bitmap pa ) "
2222 "      swap d# 64 clear-ints        (  ) "
2223 "   then "
2224 
2225 /*
2226  * Always return true from here.
2227  */
2228 
2229 "   true                            ( true ) "
2230 "; "
2231 ;
2232 
2233 static char	install_master_interrupt_handler[] =
2234 	"' unix-resend-mondos to unix-interrupt-handler";
2235 static char	handler[] = "unix-interrupt-handler";
2236 static char	handler_defined[] = "p\" %s\" find nip swap l! ";
2237 
2238 /*ARGSUSED*/
2239 static int
2240 master_interrupt_init(uint32_t portid, uint32_t xt)
2241 {
2242 	uint_t	defined;
2243 	char	buf[sizeof (handler) + sizeof (handler_defined)];
2244 
2245 	if (master_interrupt_inited)
2246 		return (1);
2247 
2248 	/*
2249 	 * Check if the defer word "unix-interrupt-handler" is defined.
2250 	 * This must be defined for OPL systems. So, this is only a
2251 	 * sanity check.
2252 	 */
2253 	(void) sprintf(buf, handler_defined, handler);
2254 	prom_interpret(buf, (uintptr_t)&defined, 0, 0, 0, 0);
2255 	if (!defined) {
2256 		cmn_err(CE_WARN, "master_interrupt_init: "
2257 		    "%s is not defined\n", handler);
2258 		return (0);
2259 	}
2260 
2261 	/*
2262 	 * Install the generic master-interrupt handler. Note that
2263 	 * this is only done one time on the first DR operation.
2264 	 * This is because, for OPL, one, single generic handler
2265 	 * handles all ports (Oberon and CMU channel) and all
2266 	 * interrupt sources within each port.
2267 	 *
2268 	 * The current support is only for the Oberon and CMU-channel.
2269 	 * If any others need to be supported, the handler has to be
2270 	 * modified accordingly.
2271 	 */
2272 
2273 	/*
2274 	 * Define the OPL master interrupt handler
2275 	 */
2276 	prom_interpret(define_master_interrupt_handler, 0, 0, 0, 0, 0);
2277 
2278 	/*
2279 	 * Take over the master interrupt handler from OBP.
2280 	 */
2281 	prom_interpret(install_master_interrupt_handler, 0, 0, 0, 0, 0);
2282 
2283 	master_interrupt_inited = 1;
2284 
2285 	/*
2286 	 * prom_interpret() does not return a status. So, we assume
2287 	 * that the calls succeeded. In reality, the calls may fail
2288 	 * if there is a syntax error, etc in the strings.
2289 	 */
2290 
2291 	return (1);
2292 }
2293 
2294 /*
2295  * Install the master-interrupt handler for a device.
2296  */
2297 static int
2298 opl_master_interrupt(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
2299 {
2300 	uint32_t	portid, xt;
2301 	int		board, channel, leaf;
2302 	int		status;
2303 
2304 	/* Check the argument */
2305 	if (fc_cell2int(cp->nargs) != 2)
2306 		return (fc_syntax_error(cp, "nargs must be 2"));
2307 
2308 	if (fc_cell2int(cp->nresults) < 1)
2309 		return (fc_syntax_error(cp, "nresults must be >= 1"));
2310 
2311 	/* Get the parameters */
2312 	portid = fc_cell2uint32_t(fc_arg(cp, 0));
2313 	xt = fc_cell2uint32_t(fc_arg(cp, 1));
2314 
2315 	board = OPL_IO_PORTID_TO_LSB(portid);
2316 	channel = OPL_PORTID_TO_CHANNEL(portid);
2317 	leaf = OPL_PORTID_TO_LEAF(portid);
2318 
2319 	if ((board >= HWD_SBS_PER_DOMAIN) || !OPL_VALID_CHANNEL(channel) ||
2320 	    (OPL_OBERON_CHANNEL(channel) && !OPL_VALID_LEAF(leaf)) ||
2321 	    ((channel == OPL_CMU_CHANNEL) && (leaf != 0))) {
2322 		FC_DEBUG1(1, CE_CONT, "opl_master_interrupt: invalid port %x\n",
2323 		    portid);
2324 		status = 0;
2325 	} else {
2326 		status = master_interrupt_init(portid, xt);
2327 	}
2328 
2329 	cp->nresults = fc_int2cell(1);
2330 	fc_result(cp, 0) = status;
2331 
2332 	return (fc_success_op(ap, rp, cp));
2333 }
2334 
2335 /*
2336  * Set the properties for a leaf node (Oberon leaf or CMU channel leaf).
2337  */
2338 /*ARGSUSED*/
2339 static int
2340 opl_create_leaf(dev_info_t *node, void *arg, uint_t flags)
2341 {
2342 	int ret;
2343 
2344 	OPL_UPDATE_PROP(string, node, "name", OPL_PCI_LEAF_NODE);
2345 
2346 	OPL_UPDATE_PROP(string, node, "status", "okay");
2347 
2348 	return (DDI_WALK_TERMINATE);
2349 }
2350 
2351 static char *
2352 opl_get_probe_string(opl_probe_t *probe, int channel, int leaf)
2353 {
2354 	char 		*probe_string;
2355 	int		portid;
2356 
2357 	probe_string = kmem_zalloc(PROBE_STR_SIZE, KM_SLEEP);
2358 
2359 	if (channel == OPL_CMU_CHANNEL)
2360 		portid = probe->pr_sb->sb_cmu.cmu_ch.chan_portid;
2361 	else
2362 		portid = probe->
2363 		    pr_sb->sb_pci_ch[channel].pci_leaf[leaf].leaf_port_id;
2364 
2365 	(void) sprintf(probe_string, "%x", portid);
2366 
2367 	return (probe_string);
2368 }
2369 
2370 static int
2371 opl_probe_leaf(opl_probe_t *probe)
2372 {
2373 	int		channel, leaf, portid, error, circ;
2374 	int		board;
2375 	fco_handle_t	fco_handle, *cfg_handle;
2376 	dev_info_t	*parent, *leaf_node;
2377 	char		unit_address[UNIT_ADDR_SIZE];
2378 	char		*probe_string;
2379 	opl_board_cfg_t	*board_cfg;
2380 
2381 	board = probe->pr_board;
2382 	channel = probe->pr_channel;
2383 	leaf = probe->pr_leaf;
2384 	parent = ddi_root_node();
2385 	board_cfg = &opl_boards[board];
2386 
2387 	ASSERT(OPL_VALID_CHANNEL(channel));
2388 	ASSERT(OPL_VALID_LEAF(leaf));
2389 
2390 	if (channel == OPL_CMU_CHANNEL) {
2391 		portid = probe->pr_sb->sb_cmu.cmu_ch.chan_portid;
2392 		cfg_handle = &board_cfg->cfg_cmuch_handle;
2393 	} else {
2394 		portid = probe->
2395 		    pr_sb->sb_pci_ch[channel].pci_leaf[leaf].leaf_port_id;
2396 		cfg_handle = &board_cfg->cfg_pcich_handle[channel][leaf];
2397 	}
2398 
2399 	/*
2400 	 * Prevent any changes to leaf_node until we have bound
2401 	 * it to the correct driver.
2402 	 */
2403 	ndi_devi_enter(parent, &circ);
2404 
2405 	/*
2406 	 * Ideally, fcode would be run from the "sid_branch_create"
2407 	 * callback (that is the primary purpose of that callback).
2408 	 * However, the fcode interpreter was written with the
2409 	 * assumption that the "new_child" was linked into the
2410 	 * device tree. The callback is invoked with the devinfo node
2411 	 * in the DS_PROTO state. More investigation is needed before
2412 	 * we can invoke the interpreter from the callback. For now,
2413 	 * we create the "new_child" in the BOUND state, invoke the
2414 	 * fcode interpreter and then rebind the dip to use any
2415 	 * compatible properties created by fcode.
2416 	 */
2417 
2418 	probe->pr_parent = parent;
2419 	probe->pr_create = opl_create_leaf;
2420 	probe->pr_hold = 1;
2421 
2422 	leaf_node = opl_create_node(probe);
2423 	if (leaf_node == NULL) {
2424 
2425 		cmn_err(CE_WARN, "IKP: create leaf (%d-%d-%d) failed",
2426 		    probe->pr_board, probe->pr_channel, probe->pr_leaf);
2427 		ndi_devi_exit(parent, circ);
2428 		return (-1);
2429 	}
2430 
2431 	/*
2432 	 * The platform DR interfaces created the dip in
2433 	 * bound state. Bring devinfo node down to linked
2434 	 * state and hold it there until compatible
2435 	 * properties are created.
2436 	 */
2437 	e_ddi_branch_rele(leaf_node);
2438 	(void) i_ndi_unconfig_node(leaf_node, DS_LINKED, 0);
2439 	ASSERT(i_ddi_node_state(leaf_node) == DS_LINKED);
2440 	e_ddi_branch_hold(leaf_node);
2441 
2442 	mutex_enter(&DEVI(leaf_node)->devi_lock);
2443 	DEVI(leaf_node)->devi_flags |= DEVI_NO_BIND;
2444 	mutex_exit(&DEVI(leaf_node)->devi_lock);
2445 
2446 	/*
2447 	 * Drop the busy-hold on parent before calling
2448 	 * fcode_interpreter to prevent potential deadlocks
2449 	 */
2450 	ndi_devi_exit(parent, circ);
2451 
2452 	(void) sprintf(unit_address, "%x", portid);
2453 
2454 	/*
2455 	 * Get the probe string
2456 	 */
2457 	probe_string = opl_get_probe_string(probe, channel, leaf);
2458 
2459 	/*
2460 	 * The fcode pointer specified here is NULL and the fcode
2461 	 * size specified here is 0. This causes the user-level
2462 	 * fcode interpreter to issue a request to the fcode
2463 	 * driver to get the Oberon/cmu-ch fcode.
2464 	 */
2465 	fco_handle = opl_fc_ops_alloc_handle(parent, leaf_node,
2466 	    NULL, 0, unit_address, probe_string);
2467 
2468 	error = fcode_interpreter(parent, &opl_fc_do_op, fco_handle);
2469 
2470 	if (error != 0) {
2471 		cmn_err(CE_WARN, "IKP: Unable to probe PCI leaf (%d-%d-%d)",
2472 		    probe->pr_board, probe->pr_channel, probe->pr_leaf);
2473 
2474 		opl_fc_ops_free_handle(fco_handle);
2475 
2476 		if (probe_string != NULL)
2477 			kmem_free(probe_string, PROBE_STR_SIZE);
2478 
2479 		(void) opl_destroy_node(leaf_node);
2480 	} else {
2481 		*cfg_handle = fco_handle;
2482 
2483 		if (channel == OPL_CMU_CHANNEL)
2484 			board_cfg->cfg_cmuch_probe_str = probe_string;
2485 		else
2486 			board_cfg->cfg_pcich_probe_str[channel][leaf]
2487 			    = probe_string;
2488 
2489 		/*
2490 		 * Compatible properties (if any) have been created,
2491 		 * so bind driver.
2492 		 */
2493 		ndi_devi_enter(parent, &circ);
2494 		ASSERT(i_ddi_node_state(leaf_node) <= DS_LINKED);
2495 
2496 		mutex_enter(&DEVI(leaf_node)->devi_lock);
2497 		DEVI(leaf_node)->devi_flags &= ~DEVI_NO_BIND;
2498 		mutex_exit(&DEVI(leaf_node)->devi_lock);
2499 
2500 		ndi_devi_exit(parent, circ);
2501 
2502 		if (ndi_devi_bind_driver(leaf_node, 0) != DDI_SUCCESS) {
2503 			cmn_err(CE_WARN, "IKP: Unable to bind PCI leaf "
2504 			    "(%d-%d-%d)", probe->pr_board, probe->pr_channel,
2505 			    probe->pr_leaf);
2506 		}
2507 	}
2508 
2509 	if ((error != 0) && (channel == OPL_CMU_CHANNEL))
2510 		return (-1);
2511 
2512 	return (0);
2513 }
2514 
2515 static void
2516 opl_init_leaves(int myboard)
2517 {
2518 	dev_info_t	*parent, *node;
2519 	char		*name;
2520 	int 		circ, ret;
2521 	int		len, portid, board, channel, leaf;
2522 	opl_board_cfg_t	*cfg;
2523 
2524 	parent = ddi_root_node();
2525 
2526 	/*
2527 	 * Hold parent node busy to walk its child list
2528 	 */
2529 	ndi_devi_enter(parent, &circ);
2530 
2531 	for (node = ddi_get_child(parent); (node != NULL); node =
2532 	    ddi_get_next_sibling(node)) {
2533 
2534 		ret = OPL_GET_PROP(string, node, "name", &name, &len);
2535 		if (ret != DDI_PROP_SUCCESS) {
2536 			/*
2537 			 * The property does not exist for this node.
2538 			 */
2539 			continue;
2540 		}
2541 
2542 		if (strncmp(name, OPL_PCI_LEAF_NODE, len) == 0) {
2543 
2544 			ret = OPL_GET_PROP(int, node, "portid", &portid, -1);
2545 			if (ret == DDI_PROP_SUCCESS) {
2546 
2547 				ret = OPL_GET_PROP(int, node, "board#",
2548 				    &board, -1);
2549 				if ((ret != DDI_PROP_SUCCESS) ||
2550 				    (board != myboard)) {
2551 					kmem_free(name, len);
2552 					continue;
2553 				}
2554 
2555 				cfg = &opl_boards[board];
2556 				channel = OPL_PORTID_TO_CHANNEL(portid);
2557 				if (channel == OPL_CMU_CHANNEL) {
2558 
2559 					if (cfg->cfg_cmuch_handle != NULL)
2560 						cfg->cfg_cmuch_leaf = node;
2561 
2562 				} else {
2563 
2564 					leaf = OPL_PORTID_TO_LEAF(portid);
2565 					if (cfg->cfg_pcich_handle[
2566 					    channel][leaf] != NULL)
2567 						cfg->cfg_pcich_leaf[
2568 						    channel][leaf] = node;
2569 				}
2570 			}
2571 		}
2572 
2573 		kmem_free(name, len);
2574 		if (ret != DDI_PROP_SUCCESS)
2575 			break;
2576 	}
2577 
2578 	ndi_devi_exit(parent, circ);
2579 }
2580 
2581 /*
2582  * Create "pci" node and hierarchy for the Oberon channels and the
2583  * CMU channel.
2584  */
2585 /*ARGSUSED*/
2586 static int
2587 opl_probe_io(opl_probe_t *probe)
2588 {
2589 
2590 	int		i, j;
2591 	hwd_pci_ch_t	*channels;
2592 
2593 	if (HWD_STATUS_OK(probe->pr_sb->sb_cmu.cmu_ch.chan_status)) {
2594 
2595 		probe->pr_channel = HWD_CMU_CHANNEL;
2596 		probe->pr_channel_status =
2597 		    probe->pr_sb->sb_cmu.cmu_ch.chan_status;
2598 		probe->pr_leaf = 0;
2599 		probe->pr_leaf_status = probe->pr_channel_status;
2600 
2601 		if (opl_probe_leaf(probe) != 0)
2602 			return (-1);
2603 	}
2604 
2605 	channels = &probe->pr_sb->sb_pci_ch[0];
2606 
2607 	for (i = 0; i < HWD_PCI_CHANNELS_PER_SB; i++) {
2608 
2609 		if (!HWD_STATUS_OK(channels[i].pci_status))
2610 			continue;
2611 
2612 		probe->pr_channel = i;
2613 		probe->pr_channel_status = channels[i].pci_status;
2614 
2615 		for (j = 0; j < HWD_LEAVES_PER_PCI_CHANNEL; j++) {
2616 
2617 			probe->pr_leaf = j;
2618 			probe->pr_leaf_status =
2619 			    channels[i].pci_leaf[j].leaf_status;
2620 
2621 			if (!HWD_STATUS_OK(probe->pr_leaf_status))
2622 				continue;
2623 
2624 			(void) opl_probe_leaf(probe);
2625 		}
2626 	}
2627 	opl_init_leaves(probe->pr_board);
2628 	return (0);
2629 }
2630 
2631 /*
2632  * Perform the probe in the following order:
2633  *
2634  *	processors
2635  *	memory
2636  *	IO
2637  *
2638  * Each probe function returns 0 on sucess and a non-zero value on failure.
2639  * What is a failure is determined by the implementor of the probe function.
2640  * For example, while probing CPUs, any error encountered during probe
2641  * is considered a failure and causes the whole probe operation to fail.
2642  * However, for I/O, an error encountered while probing one device
2643  * should not prevent other devices from being probed. It should not cause
2644  * the whole probe operation to fail.
2645  */
2646 int
2647 opl_probe_sb(int board, unsigned *cpu_impl)
2648 {
2649 	opl_probe_t	*probe;
2650 	int		ret;
2651 
2652 	if ((board < 0) || (board >= HWD_SBS_PER_DOMAIN))
2653 		return (-1);
2654 
2655 	ASSERT(opl_cfg_inited != 0);
2656 
2657 	/*
2658 	 * If the previous probe failed and left a partially configured
2659 	 * board, we need to unprobe the board and start with a clean slate.
2660 	 */
2661 	if ((opl_boards[board].cfg_hwd != NULL) &&
2662 	    (opl_unprobe_sb(board) != 0))
2663 		return (-1);
2664 
2665 	ret = 0;
2666 
2667 	probe = kmem_zalloc(sizeof (opl_probe_t), KM_SLEEP);
2668 	probe->pr_board = board;
2669 
2670 	if ((opl_probe_init(probe) != 0) ||
2671 
2672 	    (opl_probe_cpu_chips(probe) != 0) ||
2673 
2674 	    (opl_probe_memory(probe) != 0) ||
2675 
2676 	    (opl_probe_io(probe) != 0)) {
2677 
2678 		/*
2679 		 * Probe failed. Perform cleanup.
2680 		 */
2681 		(void) opl_unprobe_sb(board);
2682 		ret = -1;
2683 	}
2684 
2685 	*cpu_impl = probe->pr_cpu_impl;
2686 
2687 	kmem_free(probe, sizeof (opl_probe_t));
2688 
2689 	return (ret);
2690 }
2691 
2692 /*
2693  * This unprobing also includes CMU-CH.
2694  */
2695 /*ARGSUSED*/
2696 static int
2697 opl_unprobe_io(int board)
2698 {
2699 	int		i, j, ret;
2700 	opl_board_cfg_t	*board_cfg;
2701 	dev_info_t	**node;
2702 	fco_handle_t	*hand;
2703 	char		**probe_str;
2704 
2705 	board_cfg = &opl_boards[board];
2706 
2707 	for (i = 0; i < HWD_PCI_CHANNELS_PER_SB; i++) {
2708 
2709 		for (j = 0; j < HWD_LEAVES_PER_PCI_CHANNEL; j++) {
2710 
2711 			node = &board_cfg->cfg_pcich_leaf[i][j];
2712 			hand = &board_cfg->cfg_pcich_handle[i][j];
2713 			probe_str = &board_cfg->cfg_pcich_probe_str[i][j];
2714 
2715 			if (*node == NULL)
2716 				continue;
2717 
2718 			if (*hand != NULL) {
2719 				opl_fc_ops_free_handle(*hand);
2720 				*hand = NULL;
2721 			}
2722 
2723 			if (*probe_str != NULL) {
2724 				kmem_free(*probe_str, PROBE_STR_SIZE);
2725 				*probe_str = NULL;
2726 			}
2727 
2728 			ret = opl_destroy_node(*node);
2729 			if (ret != 0) {
2730 
2731 				cmn_err(CE_WARN, "IKP: destroy pci (%d-%d-%d) "
2732 				    "failed", board, i, j);
2733 				return (-1);
2734 			}
2735 
2736 			*node = NULL;
2737 
2738 		}
2739 	}
2740 
2741 	node = &board_cfg->cfg_cmuch_leaf;
2742 	hand = &board_cfg->cfg_cmuch_handle;
2743 	probe_str = &board_cfg->cfg_cmuch_probe_str;
2744 
2745 	if (*node == NULL)
2746 		return (0);
2747 
2748 	if (*hand != NULL) {
2749 		opl_fc_ops_free_handle(*hand);
2750 		*hand = NULL;
2751 	}
2752 
2753 	if (*probe_str != NULL) {
2754 		kmem_free(*probe_str, PROBE_STR_SIZE);
2755 		*probe_str = NULL;
2756 	}
2757 
2758 	if (opl_destroy_node(*node) != 0) {
2759 
2760 		cmn_err(CE_WARN, "IKP: destroy pci (%d-%d-%d) failed", board,
2761 		    OPL_CMU_CHANNEL, 0);
2762 		return (-1);
2763 	}
2764 
2765 	*node = NULL;
2766 
2767 	return (0);
2768 }
2769 
2770 /*
2771  * Destroy the "pseudo-mc" node for a board.
2772  */
2773 static int
2774 opl_unprobe_memory(int board)
2775 {
2776 	opl_board_cfg_t	*board_cfg;
2777 
2778 	board_cfg = &opl_boards[board];
2779 
2780 	if (board_cfg->cfg_pseudo_mc == NULL)
2781 		return (0);
2782 
2783 	if (opl_destroy_node(board_cfg->cfg_pseudo_mc) != 0) {
2784 
2785 		cmn_err(CE_WARN, "IKP: destroy pseudo-mc (%d) failed", board);
2786 		return (-1);
2787 	}
2788 
2789 	board_cfg->cfg_pseudo_mc = NULL;
2790 
2791 	return (0);
2792 }
2793 
2794 /*
2795  * Destroy the "cmp" nodes for a board. This also destroys the "core"
2796  * and "cpu" nodes below the "cmp" nodes.
2797  */
2798 static int
2799 opl_unprobe_processors(int board)
2800 {
2801 	int		i;
2802 	dev_info_t	**cfg_cpu_chips;
2803 
2804 	cfg_cpu_chips = opl_boards[board].cfg_cpu_chips;
2805 
2806 	for (i = 0; i < HWD_CPU_CHIPS_PER_CMU; i++) {
2807 
2808 		if (cfg_cpu_chips[i] == NULL)
2809 			continue;
2810 
2811 		if (opl_destroy_node(cfg_cpu_chips[i]) != 0) {
2812 
2813 			cmn_err(CE_WARN, "IKP: destroy chip (%d-%d) failed",
2814 			    board, i);
2815 			return (-1);
2816 		}
2817 
2818 		cfg_cpu_chips[i] = NULL;
2819 	}
2820 
2821 	return (0);
2822 }
2823 
2824 /*
2825  * Perform the unprobe in the following order:
2826  *
2827  *	IO
2828  *	memory
2829  *	processors
2830  */
2831 int
2832 opl_unprobe_sb(int board)
2833 {
2834 	if ((board < 0) || (board >= HWD_SBS_PER_DOMAIN))
2835 		return (-1);
2836 
2837 	ASSERT(opl_cfg_inited != 0);
2838 
2839 	if ((opl_unprobe_io(board) != 0) ||
2840 
2841 	    (opl_unprobe_memory(board) != 0) ||
2842 
2843 	    (opl_unprobe_processors(board) != 0))
2844 
2845 		return (-1);
2846 
2847 	if (opl_boards[board].cfg_hwd != NULL) {
2848 #ifdef UCTEST
2849 		size_t			size = 0xA000;
2850 #endif
2851 		/* Release the memory for the HWD */
2852 		void *hwdp = opl_boards[board].cfg_hwd;
2853 		opl_boards[board].cfg_hwd = NULL;
2854 #ifdef UCTEST
2855 		hwdp = (void *)((char *)hwdp - 0x1000);
2856 		hat_unload(kas.a_hat, hwdp, size, HAT_UNLOAD_UNLOCK);
2857 		vmem_free(heap_arena, hwdp, size);
2858 #else
2859 		kmem_free(hwdp, HWD_DATA_SIZE);
2860 #endif
2861 	}
2862 	return (0);
2863 }
2864 
2865 /*
2866  * For MAC patrol support, we need to update the PA-related properties
2867  * when there is a copy-rename event.  This should be called after the
2868  * physical copy and rename has been done by DR, and before the MAC
2869  * patrol is restarted.
2870  */
2871 int
2872 oplcfg_pa_swap(int from, int to)
2873 {
2874 	dev_info_t *from_node = opl_boards[from].cfg_pseudo_mc;
2875 	dev_info_t *to_node = opl_boards[to].cfg_pseudo_mc;
2876 	opl_range_t *rangef, *ranget;
2877 	int elems;
2878 	int ret;
2879 
2880 	if ((OPL_GET_PROP_ARRAY(int, from_node, "sb-mem-ranges", rangef,
2881 	    elems) != DDI_SUCCESS) || (elems != 4)) {
2882 		/* XXX -- bad news */
2883 		return (-1);
2884 	}
2885 	if ((OPL_GET_PROP_ARRAY(int, to_node, "sb-mem-ranges", ranget,
2886 	    elems) != DDI_SUCCESS) || (elems != 4)) {
2887 		/* XXX -- bad news */
2888 		return (-1);
2889 	}
2890 	OPL_UPDATE_PROP_ARRAY(int, from_node, "sb-mem-ranges", (int *)ranget,
2891 	    4);
2892 	OPL_UPDATE_PROP_ARRAY(int, to_node, "sb-mem-ranges", (int *)rangef,
2893 	    4);
2894 
2895 	OPL_FREE_PROP(ranget);
2896 	OPL_FREE_PROP(rangef);
2897 
2898 	return (0);
2899 }
2900