xref: /titanic_44/usr/src/uts/sun4u/io/opl_cfg.c (revision 88294e09b5c27cbb12b6735e2fb247a86b76666d)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/conf.h>
27 #include <sys/kmem.h>
28 #include <sys/debug.h>
29 #include <sys/modctl.h>
30 #include <sys/autoconf.h>
31 #include <sys/hwconf.h>
32 #include <sys/ddi_impldefs.h>
33 #include <sys/ddi.h>
34 #include <sys/sunddi.h>
35 #include <sys/sunndi.h>
36 #include <sys/ndi_impldefs.h>
37 #include <sys/machsystm.h>
38 #include <sys/fcode.h>
39 #include <sys/promif.h>
40 #include <sys/promimpl.h>
41 #include <sys/opl_cfg.h>
42 #include <sys/scfd/scfostoescf.h>
43 
44 static unsigned int		opl_cfg_inited;
45 static opl_board_cfg_t		opl_boards[HWD_SBS_PER_DOMAIN];
46 
47 /*
48  * Module control operations
49  */
50 
51 extern struct mod_ops mod_miscops;
52 
53 static struct modlmisc modlmisc = {
54 	&mod_miscops,				/* Type of module */
55 	"OPL opl_cfg"
56 };
57 
58 static struct modlinkage modlinkage = {
59 	MODREV_1, (void *)&modlmisc, NULL
60 };
61 
62 static int	opl_map_in(dev_info_t *, fco_handle_t, fc_ci_t *);
63 static int	opl_map_out(dev_info_t *, fco_handle_t, fc_ci_t *);
64 static int	opl_register_fetch(dev_info_t *, fco_handle_t, fc_ci_t *);
65 static int	opl_register_store(dev_info_t *, fco_handle_t, fc_ci_t *);
66 
67 static int	opl_claim_memory(dev_info_t *, fco_handle_t, fc_ci_t *);
68 static int	opl_release_memory(dev_info_t *, fco_handle_t, fc_ci_t *);
69 static int	opl_vtop(dev_info_t *, fco_handle_t, fc_ci_t *);
70 
71 static int	opl_config_child(dev_info_t *, fco_handle_t, fc_ci_t *);
72 
73 static int	opl_get_fcode_size(dev_info_t *, fco_handle_t, fc_ci_t *);
74 static int	opl_get_fcode(dev_info_t *, fco_handle_t, fc_ci_t *);
75 
76 static int	opl_map_phys(dev_info_t *, struct regspec *,  caddr_t *,
77 				ddi_device_acc_attr_t *, ddi_acc_handle_t *);
78 static void	opl_unmap_phys(ddi_acc_handle_t *);
79 static int	opl_get_hwd_va(dev_info_t *, fco_handle_t, fc_ci_t *);
80 static int	opl_master_interrupt(dev_info_t *, fco_handle_t, fc_ci_t *);
81 
82 extern int	prom_get_fcode_size(char *);
83 extern int	prom_get_fcode(char *, char *);
84 
85 static int	master_interrupt_init(uint32_t, uint32_t);
86 
87 #define	PROBE_STR_SIZE	64
88 #define	UNIT_ADDR_SIZE	64
89 
90 opl_fc_ops_t	opl_fc_ops[] = {
91 
92 	{	FC_MAP_IN,		opl_map_in},
93 	{	FC_MAP_OUT,		opl_map_out},
94 	{	"rx@",			opl_register_fetch},
95 	{	FC_RL_FETCH,		opl_register_fetch},
96 	{	FC_RW_FETCH,		opl_register_fetch},
97 	{	FC_RB_FETCH,		opl_register_fetch},
98 	{	"rx!",			opl_register_store},
99 	{	FC_RL_STORE,		opl_register_store},
100 	{	FC_RW_STORE,		opl_register_store},
101 	{	FC_RB_STORE,		opl_register_store},
102 	{	"claim-memory",		opl_claim_memory},
103 	{	"release-memory",	opl_release_memory},
104 	{	"vtop",			opl_vtop},
105 	{	FC_CONFIG_CHILD,	opl_config_child},
106 	{	FC_GET_FCODE_SIZE,	opl_get_fcode_size},
107 	{	FC_GET_FCODE,		opl_get_fcode},
108 	{	"get-hwd-va",		opl_get_hwd_va},
109 	{	"master-interrupt",	opl_master_interrupt},
110 	{	NULL,			NULL}
111 
112 };
113 
114 extern caddr_t	efcode_vaddr;
115 extern int	efcode_size;
116 
117 #ifdef DEBUG
118 #define	HWDDUMP_OFFSETS		1
119 #define	HWDDUMP_ALL_STATUS	2
120 #define	HWDDUMP_CHUNKS		3
121 #define	HWDDUMP_SBP		4
122 
123 int		hwddump_flags = HWDDUMP_SBP | HWDDUMP_CHUNKS;
124 #endif
125 
126 static int	master_interrupt_inited = 0;
127 
128 int
_init()129 _init()
130 {
131 	int	err = 0;
132 
133 	/*
134 	 * Create a resource map for the contiguous memory allocated
135 	 * at start-of-day in startup.c
136 	 */
137 	err = ndi_ra_map_setup(ddi_root_node(), "opl-fcodemem");
138 	if (err == NDI_FAILURE) {
139 		cmn_err(CE_WARN, "Cannot setup resource map opl-fcodemem\n");
140 		return (1);
141 	}
142 
143 	/*
144 	 * Put the allocated memory into the pool.
145 	 */
146 	(void) ndi_ra_free(ddi_root_node(), (uint64_t)efcode_vaddr,
147 	    (uint64_t)efcode_size, "opl-fcodemem", 0);
148 
149 	if ((err = mod_install(&modlinkage)) != 0) {
150 		cmn_err(CE_WARN, "opl_cfg failed to load, error=%d", err);
151 		(void) ndi_ra_map_destroy(ddi_root_node(), "opl-fcodemem");
152 	}
153 
154 	return (err);
155 }
156 
157 int
_fini(void)158 _fini(void)
159 {
160 	int ret;
161 
162 	ret = (mod_remove(&modlinkage));
163 	if (ret != 0)
164 		return (ret);
165 
166 	(void) ndi_ra_map_destroy(ddi_root_node(), "opl-fcodemem");
167 
168 	return (ret);
169 }
170 
171 int
_info(modinfop)172 _info(modinfop)
173 struct modinfo *modinfop;
174 {
175 	return (mod_info(&modlinkage, modinfop));
176 }
177 
178 #ifdef DEBUG
179 static void
opl_dump_hwd(opl_probe_t * probe)180 opl_dump_hwd(opl_probe_t *probe)
181 {
182 	hwd_header_t		*hdrp;
183 	hwd_sb_status_t		*statp;
184 	hwd_domain_info_t	*dinfop;
185 	hwd_sb_t		*sbp;
186 	hwd_cpu_chip_t		*chips;
187 	hwd_pci_ch_t		*channels;
188 	int			board, i, status;
189 
190 	board = probe->pr_board;
191 
192 	hdrp = probe->pr_hdr;
193 	statp = probe->pr_sb_status;
194 	dinfop = probe->pr_dinfo;
195 	sbp = probe->pr_sb;
196 
197 	printf("HWD: board %d\n", board);
198 	printf("HWD:magic = 0x%x\n", hdrp->hdr_magic);
199 	printf("HWD:version = 0x%x.%x\n", hdrp->hdr_version.major,
200 	    hdrp->hdr_version.minor);
201 
202 	if (hwddump_flags & HWDDUMP_OFFSETS) {
203 		printf("HWD:status offset = 0x%x\n",
204 		    hdrp->hdr_sb_status_offset);
205 		printf("HWD:domain offset = 0x%x\n",
206 		    hdrp->hdr_domain_info_offset);
207 		printf("HWD:board offset = 0x%x\n", hdrp->hdr_sb_info_offset);
208 	}
209 
210 	if (hwddump_flags & HWDDUMP_SBP)
211 		printf("HWD:sb_t ptr = 0x%p\n", (void *)probe->pr_sb);
212 
213 	if (hwddump_flags & HWDDUMP_ALL_STATUS) {
214 		int bd;
215 		printf("HWD:board status =");
216 		for (bd = 0; bd < HWD_SBS_PER_DOMAIN; bd++)
217 			printf("%x ", statp->sb_status[bd]);
218 		printf("\n");
219 	} else {
220 		printf("HWD:board status = %d\n", statp->sb_status[board]);
221 	}
222 
223 	printf("HWD:banner name = %s\n", dinfop->dinf_banner_name);
224 	printf("HWD:platform = %s\n", dinfop->dinf_platform_token);
225 
226 	printf("HWD:chip status:\n");
227 	chips = &sbp->sb_cmu.cmu_cpu_chips[0];
228 	for (i = 0; i < HWD_CPU_CHIPS_PER_CMU; i++) {
229 
230 		status = chips[i].chip_status;
231 		printf("chip[%d] = ", i);
232 		if (HWD_STATUS_NONE(status))
233 			printf("none");
234 		else if (HWD_STATUS_FAILED(status))
235 			printf("fail");
236 		else if (HWD_STATUS_OK(status))
237 			printf("ok");
238 		printf("\n");
239 	}
240 
241 	if (hwddump_flags & HWDDUMP_CHUNKS) {
242 		int chunk;
243 		hwd_memory_t *mem = &sbp->sb_cmu.cmu_memory;
244 		printf("HWD:chunks:\n");
245 		for (chunk = 0; chunk < HWD_MAX_MEM_CHUNKS; chunk++)
246 			printf("\t%d 0x%lx 0x%lx\n", chunk,
247 			    mem->mem_chunks[chunk].chnk_start_address,
248 			    mem->mem_chunks[chunk].chnk_size);
249 	}
250 
251 	printf("HWD:channel status:\n");
252 	channels = &sbp->sb_pci_ch[0];
253 	for (i = 0; i < HWD_PCI_CHANNELS_PER_SB; i++) {
254 
255 		status = channels[i].pci_status;
256 		printf("channels[%d] = ", i);
257 		if (HWD_STATUS_NONE(status))
258 			printf("none");
259 		else if (HWD_STATUS_FAILED(status))
260 			printf("fail");
261 		else if (HWD_STATUS_OK(status))
262 			printf("ok");
263 		printf("\n");
264 	}
265 	printf("channels[%d] = ", i);
266 	status = sbp->sb_cmu.cmu_ch.chan_status;
267 	if (HWD_STATUS_NONE(status))
268 		printf("none");
269 	else if (HWD_STATUS_FAILED(status))
270 		printf("fail");
271 	else if (HWD_STATUS_OK(status))
272 		printf("ok");
273 	printf("\n");
274 }
275 #endif /* DEBUG */
276 
277 #ifdef UCTEST
278 	/*
279 	 * For SesamI debugging, just map the SRAM directly to a kernel
280 	 * VA and read it out from there
281 	 */
282 
283 #include <sys/vmem.h>
284 #include <vm/seg_kmem.h>
285 
286 /*
287  * 0x4081F1323000LL is the HWD base address for LSB 0. But we need to map
288  * at page boundaries. So, we use a base address of 0x4081F1322000LL.
289  * Note that this has to match the HWD base pa set in .sesami-common-defs.
290  *
291  * The size specified for the HWD in the SCF spec is 36K. But since
292  * we adjusted the base address by 4K, we need to use 40K for the
293  * mapping size to cover the HWD. And 40K is also a multiple of the
294  * base page size.
295  */
296 #define	OPL_HWD_BASE(lsb)       \
297 (0x4081F1322000LL | (((uint64_t)(lsb)) << 40))
298 
299 	void    *opl_hwd_vaddr;
300 #endif /* UCTEST */
301 
302 /*
303  * Get the hardware descriptor from SCF.
304  */
305 
306 /*ARGSUSED*/
307 int
opl_read_hwd(int board,hwd_header_t ** hdrp,hwd_sb_status_t ** statp,hwd_domain_info_t ** dinfop,hwd_sb_t ** sbp)308 opl_read_hwd(int board, hwd_header_t **hdrp, hwd_sb_status_t **statp,
309 	hwd_domain_info_t **dinfop, hwd_sb_t **sbp)
310 {
311 	static int (*getinfop)(uint32_t, uint8_t, uint32_t, uint32_t *,
312 	    void *) = NULL;
313 	void *hwdp;
314 
315 	uint32_t key = KEY_ESCF;	/* required value */
316 	uint8_t  type = 0x40;		/* SUB_OS_RECEIVE_HWD */
317 	uint32_t transid = board;
318 	uint32_t datasize = HWD_DATA_SIZE;
319 
320 	hwd_header_t		*hd;
321 	hwd_sb_status_t		*st;
322 	hwd_domain_info_t	*di;
323 	hwd_sb_t		*sb;
324 
325 	int	ret;
326 
327 	if (opl_boards[board].cfg_hwd == NULL) {
328 #ifdef UCTEST
329 		/*
330 		 * Just map the HWD in SRAM to a kernel VA
331 		 */
332 
333 		size_t			size;
334 		pfn_t			pfn;
335 
336 		size = 0xA000;
337 
338 		opl_hwd_vaddr = vmem_alloc(heap_arena, size, VM_SLEEP);
339 		if (opl_hwd_vaddr == NULL) {
340 			cmn_err(CE_NOTE, "No space for HWD");
341 			return (-1);
342 		}
343 
344 		pfn = btop(OPL_HWD_BASE(board));
345 		hat_devload(kas.a_hat, opl_hwd_vaddr, size, pfn, PROT_READ,
346 		    HAT_LOAD_NOCONSIST | HAT_LOAD_LOCK);
347 
348 		hwdp = (void *)((char *)opl_hwd_vaddr + 0x1000);
349 		opl_boards[board].cfg_hwd = hwdp;
350 		ret = 0;
351 #else
352 
353 		/* find the scf_service_getinfo() function */
354 		if (getinfop == NULL)
355 			getinfop = (int (*)(uint32_t, uint8_t, uint32_t,
356 			    uint32_t *,
357 			    void *))modgetsymvalue("scf_service_getinfo", 0);
358 
359 		if (getinfop == NULL)
360 			return (-1);
361 
362 		/* allocate memory to receive the data */
363 		hwdp = kmem_alloc(HWD_DATA_SIZE, KM_SLEEP);
364 
365 		/* get the HWD */
366 		ret = (*getinfop)(key, type, transid, &datasize, hwdp);
367 		if (ret == 0)
368 			opl_boards[board].cfg_hwd = hwdp;
369 		else
370 			kmem_free(hwdp, HWD_DATA_SIZE);
371 #endif
372 	} else {
373 		hwdp = opl_boards[board].cfg_hwd;
374 		ret = 0;
375 	}
376 
377 	/* copy the data to the destination */
378 	if (ret == 0) {
379 		hd = (hwd_header_t *)hwdp;
380 		st = (hwd_sb_status_t *)
381 		    ((char *)hwdp + hd->hdr_sb_status_offset);
382 		di = (hwd_domain_info_t *)
383 		    ((char *)hwdp + hd->hdr_domain_info_offset);
384 		sb = (hwd_sb_t *)
385 		    ((char *)hwdp + hd->hdr_sb_info_offset);
386 		if (hdrp != NULL)
387 			*hdrp = hd;
388 		if (statp != NULL)
389 			*statp = st;
390 		if (dinfop != NULL)
391 			*dinfop = di;
392 		if (sbp != NULL)
393 			*sbp = sb;
394 	}
395 
396 	return (ret);
397 }
398 
399 /*
400  * The opl_probe_t probe structure is used to pass all sorts of parameters
401  * to callback functions during probing. It also contains a snapshot of
402  * the hardware descriptor that is taken at the beginning of a probe.
403  */
404 static int
opl_probe_init(opl_probe_t * probe)405 opl_probe_init(opl_probe_t *probe)
406 {
407 	hwd_header_t		**hdrp;
408 	hwd_sb_status_t		**statp;
409 	hwd_domain_info_t	**dinfop;
410 	hwd_sb_t		**sbp;
411 	int			board, ret;
412 
413 	board = probe->pr_board;
414 
415 	hdrp = &probe->pr_hdr;
416 	statp = &probe->pr_sb_status;
417 	dinfop = &probe->pr_dinfo;
418 	sbp = &probe->pr_sb;
419 
420 	/*
421 	 * Read the hardware descriptor.
422 	 */
423 	ret = opl_read_hwd(board, hdrp, statp, dinfop, sbp);
424 	if (ret != 0) {
425 
426 		cmn_err(CE_WARN, "IKP: failed to read HWD header");
427 		return (-1);
428 	}
429 
430 #ifdef DEBUG
431 	opl_dump_hwd(probe);
432 #endif
433 	return (0);
434 }
435 
436 /*
437  * This function is used to obtain pointers to relevant device nodes
438  * which are created by Solaris at boot time.
439  *
440  * This function walks the child nodes of a given node, extracts
441  * the "name" property, if it exists, and passes the node to a
442  * callback init function. The callback determines if this node is
443  * interesting or not. If it is, then a pointer to the node is
444  * stored away by the callback for use during unprobe.
445  *
446  * The DDI get property function allocates storage for the name
447  * property. That needs to be freed within this function.
448  */
449 static int
opl_init_nodes(dev_info_t * parent,opl_init_func_t init)450 opl_init_nodes(dev_info_t *parent, opl_init_func_t init)
451 {
452 	dev_info_t	*node;
453 	char		*name;
454 	int 		circ, ret;
455 	int		len;
456 
457 	ASSERT(parent != NULL);
458 
459 	/*
460 	 * Hold parent node busy to walk its child list
461 	 */
462 	ndi_devi_enter(parent, &circ);
463 	node = ddi_get_child(parent);
464 
465 	while (node != NULL) {
466 
467 		ret = OPL_GET_PROP(string, node, "name", &name, &len);
468 		if (ret != DDI_PROP_SUCCESS) {
469 			/*
470 			 * The property does not exist for this node.
471 			 */
472 			node = ddi_get_next_sibling(node);
473 			continue;
474 		}
475 
476 		ret = init(node, name, len);
477 		kmem_free(name, len);
478 		if (ret != 0) {
479 
480 			ndi_devi_exit(parent, circ);
481 			return (-1);
482 		}
483 
484 		node = ddi_get_next_sibling(node);
485 	}
486 
487 	ndi_devi_exit(parent, circ);
488 
489 	return (0);
490 }
491 
492 /*
493  * This init function finds all the interesting nodes under the
494  * root node and stores pointers to them. The following nodes
495  * are considered interesting by this implementation:
496  *
497  *	"cmp"
498  *		These are nodes that represent processor chips.
499  *
500  *	"pci"
501  *		These are nodes that represent PCI leaves.
502  *
503  *	"pseudo-mc"
504  *		These are nodes that contain memory information.
505  */
506 static int
opl_init_root_nodes(dev_info_t * node,char * name,int len)507 opl_init_root_nodes(dev_info_t *node, char *name, int len)
508 {
509 	int		portid, board, chip, channel, leaf;
510 	int		ret;
511 
512 	if (strncmp(name, OPL_CPU_CHIP_NODE, len) == 0) {
513 
514 		ret = OPL_GET_PROP(int, node, "portid", &portid, -1);
515 		if (ret != DDI_PROP_SUCCESS)
516 			return (-1);
517 
518 		ret = OPL_GET_PROP(int, node, "board#", &board, -1);
519 		if (ret != DDI_PROP_SUCCESS)
520 			return (-1);
521 
522 		chip = OPL_CPU_CHIP(portid);
523 		opl_boards[board].cfg_cpu_chips[chip] = node;
524 
525 	} else if (strncmp(name, OPL_PCI_LEAF_NODE, len) == 0) {
526 
527 		ret = OPL_GET_PROP(int, node, "portid", &portid, -1);
528 		if (ret != DDI_PROP_SUCCESS)
529 			return (-1);
530 
531 		board = OPL_IO_PORTID_TO_LSB(portid);
532 		channel = OPL_PORTID_TO_CHANNEL(portid);
533 
534 		if (channel == OPL_CMU_CHANNEL) {
535 
536 			opl_boards[board].cfg_cmuch_leaf = node;
537 
538 		} else {
539 
540 			leaf = OPL_PORTID_TO_LEAF(portid);
541 			opl_boards[board].cfg_pcich_leaf[channel][leaf] = node;
542 		}
543 	} else if (strncmp(name, OPL_PSEUDO_MC_NODE, len) == 0) {
544 
545 		ret = OPL_GET_PROP(int, node, "board#", &board, -1);
546 		if (ret != DDI_PROP_SUCCESS)
547 			return (-1);
548 
549 		ASSERT((board >= 0) && (board < HWD_SBS_PER_DOMAIN));
550 
551 		opl_boards[board].cfg_pseudo_mc = node;
552 	}
553 
554 	return (0);
555 }
556 
557 /*
558  * This function initializes the OPL IKP feature. Currently, all it does
559  * is find the interesting nodes that Solaris has created at boot time
560  * for boards present at boot time and store pointers to them. This
561  * is useful if those boards are unprobed by DR.
562  */
563 int
opl_init_cfg()564 opl_init_cfg()
565 {
566 	dev_info_t	*root;
567 
568 	if (opl_cfg_inited == 0) {
569 
570 		root = ddi_root_node();
571 		if ((opl_init_nodes(root, opl_init_root_nodes) != 0)) {
572 			cmn_err(CE_WARN, "IKP: init failed");
573 			return (1);
574 		}
575 
576 		opl_cfg_inited = 1;
577 	}
578 
579 	return (0);
580 }
581 
582 /*
583  * When DR is initialized, we walk the device tree and acquire a hold on
584  * all the nodes that are interesting to IKP. This is so that the corresponding
585  * branches cannot be deleted.
586  *
587  * The following function informs the walk about which nodes are interesting
588  * so that it can hold the corresponding branches.
589  */
590 static int
opl_hold_node(char * name)591 opl_hold_node(char *name)
592 {
593 	/*
594 	 * We only need to hold/release the following nodes which
595 	 * represent separate branches that must be managed.
596 	 */
597 	return ((strcmp(name, OPL_CPU_CHIP_NODE) == 0) ||
598 	    (strcmp(name, OPL_PSEUDO_MC_NODE) == 0) ||
599 	    (strcmp(name, OPL_PCI_LEAF_NODE) == 0));
600 }
601 
602 static int
opl_hold_rele_devtree(dev_info_t * rdip,void * arg)603 opl_hold_rele_devtree(dev_info_t *rdip, void *arg)
604 {
605 
606 	int	*holdp = (int *)arg;
607 	char	*name = ddi_node_name(rdip);
608 
609 	/*
610 	 * We only need to hold/release the following nodes which
611 	 * represent separate branches that must be managed.
612 	 */
613 	if (opl_hold_node(name) == 0) {
614 		/* Not of interest to us */
615 		return (DDI_WALK_PRUNECHILD);
616 	}
617 	if (*holdp) {
618 		ASSERT(!e_ddi_branch_held(rdip));
619 		e_ddi_branch_hold(rdip);
620 	} else {
621 		ASSERT(e_ddi_branch_held(rdip));
622 		e_ddi_branch_rele(rdip);
623 	}
624 
625 	return (DDI_WALK_PRUNECHILD);
626 }
627 
628 void
opl_hold_devtree()629 opl_hold_devtree()
630 {
631 	dev_info_t *dip;
632 	int circ;
633 	int hold = 1;
634 
635 	dip = ddi_root_node();
636 	ndi_devi_enter(dip, &circ);
637 	ddi_walk_devs(ddi_get_child(dip), opl_hold_rele_devtree, &hold);
638 	ndi_devi_exit(dip, circ);
639 }
640 
641 void
opl_release_devtree()642 opl_release_devtree()
643 {
644 	dev_info_t *dip;
645 	int circ;
646 	int hold = 0;
647 
648 	dip = ddi_root_node();
649 	ndi_devi_enter(dip, &circ);
650 	ddi_walk_devs(ddi_get_child(dip), opl_hold_rele_devtree, &hold);
651 	ndi_devi_exit(dip, circ);
652 }
653 
654 /*
655  * This is a helper function that allows opl_create_node() to return a
656  * pointer to a newly created node to its caller.
657  */
658 /*ARGSUSED*/
659 static void
opl_set_node(dev_info_t * node,void * arg,uint_t flags)660 opl_set_node(dev_info_t *node, void *arg, uint_t flags)
661 {
662 	opl_probe_t	*probe;
663 
664 	probe = arg;
665 	probe->pr_node = node;
666 }
667 
668 /*
669  * Function to create a node in the device tree under a specified parent.
670  *
671  * e_ddi_branch_create() allows the creation of a whole branch with a
672  * single call of the function. However, we only use it to create one node
673  * at a time in the case of non-I/O device nodes. In other words, we
674  * create branches by repeatedly using this function. This makes the
675  * code more readable.
676  *
677  * The branch descriptor passed to e_ddi_branch_create() takes two
678  * callbacks. The create() callback is used to set the properties of a
679  * newly created node. The other callback is used to return a pointer
680  * to the newly created node. The create() callback is passed by the
681  * caller of this function based on the kind of node he wishes to
682  * create.
683  *
684  * e_ddi_branch_create() returns with the newly created node held. We
685  * only need to hold the top nodes of the branches we create. We release
686  * the hold for the others. E.g., the "cmp" node needs to be held. Since
687  * we hold the "cmp" node, there is no need to hold the "core" and "cpu"
688  * nodes below it.
689  */
690 static dev_info_t *
opl_create_node(opl_probe_t * probe)691 opl_create_node(opl_probe_t *probe)
692 {
693 	devi_branch_t	branch;
694 
695 	probe->pr_node = NULL;
696 
697 	branch.arg = probe;
698 	branch.type = DEVI_BRANCH_SID;
699 	branch.create.sid_branch_create = probe->pr_create;
700 	branch.devi_branch_callback = opl_set_node;
701 
702 	if (e_ddi_branch_create(probe->pr_parent, &branch, NULL, 0) != 0)
703 		return (NULL);
704 
705 	ASSERT(probe->pr_node != NULL);
706 
707 	if (probe->pr_hold == 0)
708 		e_ddi_branch_rele(probe->pr_node);
709 
710 	return (probe->pr_node);
711 }
712 
713 /*
714  * Function to tear down a whole branch rooted at the specified node.
715  *
716  * Although we create each node of a branch individually, we destroy
717  * a whole branch in one call. This is more efficient.
718  */
719 static int
opl_destroy_node(dev_info_t * node)720 opl_destroy_node(dev_info_t *node)
721 {
722 	if (e_ddi_branch_destroy(node, NULL, 0) != 0) {
723 		char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
724 		(void) ddi_pathname(node, path);
725 		cmn_err(CE_WARN, "OPL node removal failed: %s (%p)", path,
726 		    (void *)node);
727 		kmem_free(path, MAXPATHLEN);
728 		return (-1);
729 	}
730 
731 	return (0);
732 }
733 
734 /*
735  * Set the properties for a "cpu" node.
736  */
737 /*ARGSUSED*/
738 static int
opl_create_cpu(dev_info_t * node,void * arg,uint_t flags)739 opl_create_cpu(dev_info_t *node, void *arg, uint_t flags)
740 {
741 	opl_probe_t	*probe;
742 	hwd_cpu_chip_t	*chip;
743 	hwd_core_t	*core;
744 	hwd_cpu_t	*cpu;
745 	int		ret;
746 
747 	probe = arg;
748 	chip = &probe->pr_sb->sb_cmu.cmu_cpu_chips[probe->pr_cpu_chip];
749 	core = &chip->chip_cores[probe->pr_core];
750 	cpu = &core->core_cpus[probe->pr_cpu];
751 	OPL_UPDATE_PROP(string, node, "name", OPL_CPU_NODE);
752 	OPL_UPDATE_PROP(string, node, "device_type", OPL_CPU_NODE);
753 
754 	OPL_UPDATE_PROP(int, node, "cpuid", cpu->cpu_cpuid);
755 	OPL_UPDATE_PROP(int, node, "reg", probe->pr_cpu);
756 
757 	OPL_UPDATE_PROP(string, node, "status", "okay");
758 
759 	return (DDI_WALK_TERMINATE);
760 }
761 
762 /*
763  * Create "cpu" nodes as child nodes of a given "core" node.
764  */
765 static int
opl_probe_cpus(opl_probe_t * probe)766 opl_probe_cpus(opl_probe_t *probe)
767 {
768 	int		i;
769 	hwd_cpu_chip_t	*chip;
770 	hwd_core_t	*core;
771 	hwd_cpu_t	*cpus;
772 
773 	chip = &probe->pr_sb->sb_cmu.cmu_cpu_chips[probe->pr_cpu_chip];
774 	core = &chip->chip_cores[probe->pr_core];
775 	cpus = &core->core_cpus[0];
776 
777 	for (i = 0; i < HWD_CPUS_PER_CORE; i++) {
778 
779 		/*
780 		 * Olympus-C has 2 cpus per core.
781 		 * Jupiter has 4 cpus per core.
782 		 * For the Olympus-C based platform, we expect the cpu_status
783 		 * of the non-existent cpus to be set to missing.
784 		 */
785 		if (!HWD_STATUS_OK(cpus[i].cpu_status))
786 			continue;
787 
788 		probe->pr_create = opl_create_cpu;
789 		probe->pr_cpu = i;
790 		if (opl_create_node(probe) == NULL) {
791 
792 			cmn_err(CE_WARN, "IKP: create cpu (%d-%d-%d-%d) failed",
793 			    probe->pr_board, probe->pr_cpu_chip, probe->pr_core,
794 			    probe->pr_cpu);
795 			return (-1);
796 		}
797 	}
798 
799 	return (0);
800 }
801 
802 /*
803  * Set the properties for a "core" node.
804  */
805 /*ARGSUSED*/
806 static int
opl_create_core(dev_info_t * node,void * arg,uint_t flags)807 opl_create_core(dev_info_t *node, void *arg, uint_t flags)
808 {
809 	opl_probe_t	*probe;
810 	hwd_cpu_chip_t	*chip;
811 	hwd_core_t	*core;
812 	int		sharing[2];
813 	int		ret;
814 
815 	probe = arg;
816 	chip = &probe->pr_sb->sb_cmu.cmu_cpu_chips[probe->pr_cpu_chip];
817 	core = &chip->chip_cores[probe->pr_core];
818 
819 	OPL_UPDATE_PROP(string, node, "name", OPL_CORE_NODE);
820 	OPL_UPDATE_PROP(string, node, "device_type", OPL_CORE_NODE);
821 	OPL_UPDATE_PROP(string, node, "compatible", chip->chip_compatible);
822 
823 	OPL_UPDATE_PROP(int, node, "reg", probe->pr_core);
824 	OPL_UPDATE_PROP(int, node, "manufacturer#", core->core_manufacturer);
825 	OPL_UPDATE_PROP(int, node, "implementation#",
826 	    core->core_implementation);
827 	OPL_UPDATE_PROP(int, node, "mask#", core->core_mask);
828 
829 	OPL_UPDATE_PROP(int, node, "sparc-version", 9);
830 	OPL_UPDATE_PROP(int, node, "clock-frequency", core->core_frequency);
831 
832 	OPL_UPDATE_PROP(int, node, "l1-icache-size", core->core_l1_icache_size);
833 	OPL_UPDATE_PROP(int, node, "l1-icache-line-size",
834 	    core->core_l1_icache_line_size);
835 	OPL_UPDATE_PROP(int, node, "l1-icache-associativity",
836 	    core->core_l1_icache_associativity);
837 	OPL_UPDATE_PROP(int, node, "#itlb-entries",
838 	    core->core_num_itlb_entries);
839 
840 	OPL_UPDATE_PROP(int, node, "l1-dcache-size", core->core_l1_dcache_size);
841 	OPL_UPDATE_PROP(int, node, "l1-dcache-line-size",
842 	    core->core_l1_dcache_line_size);
843 	OPL_UPDATE_PROP(int, node, "l1-dcache-associativity",
844 	    core->core_l1_dcache_associativity);
845 	OPL_UPDATE_PROP(int, node, "#dtlb-entries",
846 	    core->core_num_dtlb_entries);
847 
848 	OPL_UPDATE_PROP(int, node, "l2-cache-size", core->core_l2_cache_size);
849 	OPL_UPDATE_PROP(int, node, "l2-cache-line-size",
850 	    core->core_l2_cache_line_size);
851 	OPL_UPDATE_PROP(int, node, "l2-cache-associativity",
852 	    core->core_l2_cache_associativity);
853 	sharing[0] = 0;
854 	sharing[1] = core->core_l2_cache_sharing;
855 	OPL_UPDATE_PROP_ARRAY(int, node, "l2-cache-sharing", sharing, 2);
856 
857 	OPL_UPDATE_PROP(string, node, "status", "okay");
858 
859 	return (DDI_WALK_TERMINATE);
860 }
861 
862 /*
863  * Create "core" nodes as child nodes of a given "cmp" node.
864  *
865  * Create the branch below each "core" node".
866  */
867 static int
opl_probe_cores(opl_probe_t * probe)868 opl_probe_cores(opl_probe_t *probe)
869 {
870 	int		i;
871 	hwd_cpu_chip_t	*chip;
872 	hwd_core_t	*cores;
873 	dev_info_t	*parent, *node;
874 
875 	chip = &probe->pr_sb->sb_cmu.cmu_cpu_chips[probe->pr_cpu_chip];
876 	cores = &chip->chip_cores[0];
877 	parent = probe->pr_parent;
878 
879 	for (i = 0; i < HWD_CORES_PER_CPU_CHIP; i++) {
880 
881 		if (!HWD_STATUS_OK(cores[i].core_status))
882 			continue;
883 
884 		probe->pr_parent = parent;
885 		probe->pr_create = opl_create_core;
886 		probe->pr_core = i;
887 		node = opl_create_node(probe);
888 		if (node == NULL) {
889 
890 			cmn_err(CE_WARN, "IKP: create core (%d-%d-%d) failed",
891 			    probe->pr_board, probe->pr_cpu_chip,
892 			    probe->pr_core);
893 			return (-1);
894 		}
895 
896 		/*
897 		 * Create "cpu" nodes below "core".
898 		 */
899 		probe->pr_parent = node;
900 		if (opl_probe_cpus(probe) != 0)
901 			return (-1);
902 		probe->pr_cpu_impl |= (1 << cores[i].core_implementation);
903 	}
904 
905 	return (0);
906 }
907 
908 /*
909  * Set the properties for a "cmp" node.
910  */
911 /*ARGSUSED*/
912 static int
opl_create_cpu_chip(dev_info_t * node,void * arg,uint_t flags)913 opl_create_cpu_chip(dev_info_t *node, void *arg, uint_t flags)
914 {
915 	opl_probe_t	*probe;
916 	hwd_cpu_chip_t	*chip;
917 	opl_range_t	range;
918 	uint64_t	dummy_addr;
919 	int		ret;
920 
921 	probe = arg;
922 	chip = &probe->pr_sb->sb_cmu.cmu_cpu_chips[probe->pr_cpu_chip];
923 
924 	OPL_UPDATE_PROP(string, node, "name", OPL_CPU_CHIP_NODE);
925 
926 	OPL_UPDATE_PROP(int, node, "portid", chip->chip_portid);
927 	OPL_UPDATE_PROP(int, node, "board#", probe->pr_board);
928 
929 	dummy_addr = OPL_PROC_AS(probe->pr_board, probe->pr_cpu_chip);
930 	range.rg_addr_hi = OPL_HI(dummy_addr);
931 	range.rg_addr_lo = OPL_LO(dummy_addr);
932 	range.rg_size_hi = 0;
933 	range.rg_size_lo = 0;
934 	OPL_UPDATE_PROP_ARRAY(int, node, "reg", (int *)&range, 4);
935 
936 	OPL_UPDATE_PROP(int, node, "#address-cells", 1);
937 	OPL_UPDATE_PROP(int, node, "#size-cells", 0);
938 
939 	OPL_UPDATE_PROP(string, node, "status", "okay");
940 
941 	return (DDI_WALK_TERMINATE);
942 }
943 
944 /*
945  * Create "cmp" nodes as child nodes of the root node.
946  *
947  * Create the branch below each "cmp" node.
948  */
949 static int
opl_probe_cpu_chips(opl_probe_t * probe)950 opl_probe_cpu_chips(opl_probe_t *probe)
951 {
952 	int		i;
953 	dev_info_t	**cfg_cpu_chips;
954 	hwd_cpu_chip_t	*chips;
955 	dev_info_t	*node;
956 
957 	cfg_cpu_chips = opl_boards[probe->pr_board].cfg_cpu_chips;
958 	chips = &probe->pr_sb->sb_cmu.cmu_cpu_chips[0];
959 
960 	for (i = 0; i < HWD_CPU_CHIPS_PER_CMU; i++) {
961 
962 		ASSERT(cfg_cpu_chips[i] == NULL);
963 
964 		if (!HWD_STATUS_OK(chips[i].chip_status))
965 			continue;
966 
967 		probe->pr_parent = ddi_root_node();
968 		probe->pr_create = opl_create_cpu_chip;
969 		probe->pr_cpu_chip = i;
970 		probe->pr_hold = 1;
971 		node = opl_create_node(probe);
972 		if (node == NULL) {
973 
974 			cmn_err(CE_WARN, "IKP: create chip (%d-%d) failed",
975 			    probe->pr_board, probe->pr_cpu_chip);
976 			return (-1);
977 		}
978 
979 		cfg_cpu_chips[i] = node;
980 
981 		/*
982 		 * Create "core" nodes below "cmp".
983 		 * We hold the "cmp" node. So, there is no need to hold
984 		 * the "core" and "cpu" nodes below it.
985 		 */
986 		probe->pr_parent = node;
987 		probe->pr_hold = 0;
988 		if (opl_probe_cores(probe) != 0)
989 			return (-1);
990 	}
991 
992 	return (0);
993 }
994 
995 /*
996  * Set the properties for a "pseudo-mc" node.
997  */
998 /*ARGSUSED*/
999 static int
opl_create_pseudo_mc(dev_info_t * node,void * arg,uint_t flags)1000 opl_create_pseudo_mc(dev_info_t *node, void *arg, uint_t flags)
1001 {
1002 	opl_probe_t	*probe;
1003 	int		board, portid;
1004 	hwd_bank_t	*bank;
1005 	hwd_memory_t	*mem;
1006 	opl_range_t	range;
1007 	opl_mc_addr_t	mc[HWD_BANKS_PER_CMU];
1008 	int		status[2][7];
1009 	int		i, j;
1010 	int		ret;
1011 
1012 	probe = arg;
1013 	board = probe->pr_board;
1014 
1015 	OPL_UPDATE_PROP(string, node, "name", OPL_PSEUDO_MC_NODE);
1016 	OPL_UPDATE_PROP(string, node, "device_type", "memory-controller");
1017 	OPL_UPDATE_PROP(string, node, "compatible", "FJSV,oplmc");
1018 
1019 	portid = OPL_LSB_TO_PSEUDOMC_PORTID(board);
1020 	OPL_UPDATE_PROP(int, node, "portid", portid);
1021 
1022 	range.rg_addr_hi = OPL_HI(OPL_MC_AS(board));
1023 	range.rg_addr_lo = 0x200;
1024 	range.rg_size_hi = 0;
1025 	range.rg_size_lo = 0;
1026 	OPL_UPDATE_PROP_ARRAY(int, node, "reg", (int *)&range, 4);
1027 
1028 	OPL_UPDATE_PROP(int, node, "board#", board);
1029 	OPL_UPDATE_PROP(int, node, "physical-board#",
1030 	    probe->pr_sb->sb_psb_number);
1031 
1032 	OPL_UPDATE_PROP(int, node, "#address-cells", 1);
1033 	OPL_UPDATE_PROP(int, node, "#size-cells", 2);
1034 
1035 	mem = &probe->pr_sb->sb_cmu.cmu_memory;
1036 
1037 	range.rg_addr_hi = OPL_HI(mem->mem_start_address);
1038 	range.rg_addr_lo = OPL_LO(mem->mem_start_address);
1039 	range.rg_size_hi = OPL_HI(mem->mem_size);
1040 	range.rg_size_lo = OPL_LO(mem->mem_size);
1041 	OPL_UPDATE_PROP_ARRAY(int, node, "sb-mem-ranges", (int *)&range, 4);
1042 
1043 	bank = probe->pr_sb->sb_cmu.cmu_memory.mem_banks;
1044 	for (i = 0, j = 0; i < HWD_BANKS_PER_CMU; i++) {
1045 
1046 		if (!HWD_STATUS_OK(bank[i].bank_status))
1047 			continue;
1048 
1049 		mc[j].mc_bank = i;
1050 		mc[j].mc_hi = OPL_HI(bank[i].bank_register_address);
1051 		mc[j].mc_lo = OPL_LO(bank[i].bank_register_address);
1052 		j++;
1053 	}
1054 
1055 	if (j > 0) {
1056 		OPL_UPDATE_PROP_ARRAY(int, node, "mc-addr", (int *)mc, j*3);
1057 	} else {
1058 		/*
1059 		 * If there is no memory, we need the mc-addr property, but
1060 		 * it is length 0.  The only way to do this using ndi seems
1061 		 * to be by creating a boolean property.
1062 		 */
1063 		ret = ndi_prop_create_boolean(DDI_DEV_T_NONE, node, "mc-addr");
1064 		OPL_UPDATE_PROP_ERR(ret, "mc-addr");
1065 	}
1066 
1067 	OPL_UPDATE_PROP_ARRAY(byte, node, "cs0-mc-pa-trans-table",
1068 	    mem->mem_cs[0].cs_pa_mac_table, 64);
1069 	OPL_UPDATE_PROP_ARRAY(byte, node, "cs1-mc-pa-trans-table",
1070 	    mem->mem_cs[1].cs_pa_mac_table, 64);
1071 
1072 #define	CS_PER_MEM 2
1073 
1074 	for (i = 0, j = 0; i < CS_PER_MEM; i++) {
1075 		if (HWD_STATUS_OK(mem->mem_cs[i].cs_status) ||
1076 		    HWD_STATUS_FAILED(mem->mem_cs[i].cs_status)) {
1077 			status[j][0] = i;
1078 			if (HWD_STATUS_OK(mem->mem_cs[i].cs_status))
1079 				status[j][1] = 0;
1080 			else
1081 				status[j][1] = 1;
1082 			status[j][2] =
1083 			    OPL_HI(mem->mem_cs[i].cs_available_capacity);
1084 			status[j][3] =
1085 			    OPL_LO(mem->mem_cs[i].cs_available_capacity);
1086 			status[j][4] = OPL_HI(mem->mem_cs[i].cs_dimm_capacity);
1087 			status[j][5] = OPL_LO(mem->mem_cs[i].cs_dimm_capacity);
1088 			status[j][6] = mem->mem_cs[i].cs_number_of_dimms;
1089 			j++;
1090 		}
1091 	}
1092 
1093 	if (j > 0) {
1094 		OPL_UPDATE_PROP_ARRAY(int, node, "cs-status", (int *)status,
1095 		    j*7);
1096 	} else {
1097 		/*
1098 		 * If there is no memory, we need the cs-status property, but
1099 		 * it is length 0.  The only way to do this using ndi seems
1100 		 * to be by creating a boolean property.
1101 		 */
1102 		ret = ndi_prop_create_boolean(DDI_DEV_T_NONE, node,
1103 		    "cs-status");
1104 		OPL_UPDATE_PROP_ERR(ret, "cs-status");
1105 	}
1106 
1107 	return (DDI_WALK_TERMINATE);
1108 }
1109 
1110 /*
1111  * Create "pseudo-mc" nodes
1112  */
1113 static int
opl_probe_memory(opl_probe_t * probe)1114 opl_probe_memory(opl_probe_t *probe)
1115 {
1116 	int		board;
1117 	opl_board_cfg_t	*board_cfg;
1118 	dev_info_t	*node;
1119 
1120 	board = probe->pr_board;
1121 	board_cfg = &opl_boards[board];
1122 
1123 	ASSERT(board_cfg->cfg_pseudo_mc == NULL);
1124 
1125 	probe->pr_parent = ddi_root_node();
1126 	probe->pr_create = opl_create_pseudo_mc;
1127 	probe->pr_hold = 1;
1128 	node = opl_create_node(probe);
1129 	if (node == NULL) {
1130 
1131 		cmn_err(CE_WARN, "IKP: create pseudo-mc (%d) failed", board);
1132 		return (-1);
1133 	}
1134 
1135 	board_cfg->cfg_pseudo_mc = node;
1136 
1137 	return (0);
1138 }
1139 
1140 /*
1141  * Allocate the fcode ops handle.
1142  */
1143 /*ARGSUSED*/
1144 static
1145 fco_handle_t
opl_fc_ops_alloc_handle(dev_info_t * parent,dev_info_t * child,void * fcode,size_t fcode_size,char * unit_address,char * my_args)1146 opl_fc_ops_alloc_handle(dev_info_t *parent, dev_info_t *child,
1147 			void *fcode, size_t fcode_size, char *unit_address,
1148 			char *my_args)
1149 {
1150 	fco_handle_t	rp;
1151 	phandle_t	h;
1152 	char		*buf;
1153 
1154 	rp = kmem_zalloc(sizeof (struct fc_resource_list), KM_SLEEP);
1155 	rp->next_handle = fc_ops_alloc_handle(parent, child, fcode, fcode_size,
1156 	    unit_address, NULL);
1157 	rp->ap = parent;
1158 	rp->child = child;
1159 	rp->fcode = fcode;
1160 	rp->fcode_size = fcode_size;
1161 	rp->my_args = my_args;
1162 
1163 	if (unit_address) {
1164 		buf = kmem_zalloc(UNIT_ADDR_SIZE, KM_SLEEP);
1165 		(void) strcpy(buf, unit_address);
1166 		rp->unit_address = buf;
1167 	}
1168 
1169 	/*
1170 	 * Add the child's nodeid to our table...
1171 	 */
1172 	h = ddi_get_nodeid(rp->child);
1173 	fc_add_dip_to_phandle(fc_handle_to_phandle_head(rp), rp->child, h);
1174 
1175 	return (rp);
1176 }
1177 
1178 
1179 static void
opl_fc_ops_free_handle(fco_handle_t rp)1180 opl_fc_ops_free_handle(fco_handle_t rp)
1181 {
1182 	struct fc_resource	*resp, *nresp;
1183 
1184 	ASSERT(rp);
1185 
1186 	if (rp->next_handle)
1187 		fc_ops_free_handle(rp->next_handle);
1188 	if (rp->unit_address)
1189 		kmem_free(rp->unit_address, UNIT_ADDR_SIZE);
1190 
1191 	/*
1192 	 * Release all the resources from the resource list
1193 	 */
1194 	for (resp = rp->head; resp != NULL; resp = nresp) {
1195 		nresp = resp->next;
1196 		switch (resp->type) {
1197 
1198 		case RT_MAP:
1199 			/*
1200 			 * If this is still mapped, we'd better unmap it now,
1201 			 * or all our structures that are tracking it will
1202 			 * be leaked.
1203 			 */
1204 			if (resp->fc_map_handle != NULL)
1205 				opl_unmap_phys(&resp->fc_map_handle);
1206 			break;
1207 
1208 		case RT_DMA:
1209 			/*
1210 			 * DMA has to be freed up at exit time.
1211 			 */
1212 			cmn_err(CE_CONT,
1213 			    "opl_fc_ops_free_handle: Unexpected DMA seen!");
1214 			break;
1215 
1216 		case RT_CONTIGIOUS:
1217 			FC_DEBUG2(1, CE_CONT, "opl_fc_ops_free: "
1218 			    "Free claim-memory resource 0x%lx size 0x%x\n",
1219 			    resp->fc_contig_virt, resp->fc_contig_len);
1220 
1221 			(void) ndi_ra_free(ddi_root_node(),
1222 			    (uint64_t)resp->fc_contig_virt,
1223 			    resp->fc_contig_len, "opl-fcodemem",
1224 			    NDI_RA_PASS);
1225 
1226 			break;
1227 
1228 		default:
1229 			cmn_err(CE_CONT, "opl_fc_ops_free: "
1230 			    "unknown resource type %d", resp->type);
1231 			break;
1232 		}
1233 		fc_rem_resource(rp, resp);
1234 		kmem_free(resp, sizeof (struct fc_resource));
1235 	}
1236 
1237 	kmem_free(rp, sizeof (struct fc_resource_list));
1238 }
1239 
1240 int
opl_fc_do_op(dev_info_t * ap,fco_handle_t rp,fc_ci_t * cp)1241 opl_fc_do_op(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1242 {
1243 	opl_fc_ops_t	*op;
1244 	char		*service = fc_cell2ptr(cp->svc_name);
1245 
1246 	ASSERT(rp);
1247 
1248 	FC_DEBUG1(1, CE_CONT, "opl_fc_do_op: <%s>\n", service);
1249 
1250 	/*
1251 	 * First try the generic fc_ops.
1252 	 */
1253 	if (fc_ops(ap, rp->next_handle, cp) == 0)
1254 		return (0);
1255 
1256 	/*
1257 	 * Now try the Jupiter-specific ops.
1258 	 */
1259 	for (op = opl_fc_ops; op->fc_service != NULL; ++op)
1260 		if (strcmp(op->fc_service, service) == 0)
1261 			return (op->fc_op(ap, rp, cp));
1262 
1263 	FC_DEBUG1(9, CE_CONT, "opl_fc_do_op: <%s> not serviced\n", service);
1264 
1265 	return (-1);
1266 }
1267 
1268 /*
1269  * map-in  (phys.lo phys.hi size -- virt)
1270  */
1271 static int
opl_map_in(dev_info_t * ap,fco_handle_t rp,fc_ci_t * cp)1272 opl_map_in(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1273 {
1274 	size_t			len;
1275 	int			error;
1276 	caddr_t			virt;
1277 	struct fc_resource	*resp;
1278 	struct regspec		rspec;
1279 	ddi_device_acc_attr_t	acc;
1280 	ddi_acc_handle_t	h;
1281 
1282 	if (fc_cell2int(cp->nargs) != 3)
1283 		return (fc_syntax_error(cp, "nargs must be 3"));
1284 
1285 	if (fc_cell2int(cp->nresults) < 1)
1286 		return (fc_syntax_error(cp, "nresults must be >= 1"));
1287 
1288 	rspec.regspec_size = len = fc_cell2size(fc_arg(cp, 0));
1289 	rspec.regspec_bustype = fc_cell2uint(fc_arg(cp, 1));
1290 	rspec.regspec_addr = fc_cell2uint(fc_arg(cp, 2));
1291 
1292 	acc.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1293 	acc.devacc_attr_endian_flags = DDI_STRUCTURE_BE_ACC;
1294 	acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1295 
1296 	FC_DEBUG3(1, CE_CONT, "opl_map_in: attempting map in "
1297 	    "address 0x%08x.%08x length %x\n", rspec.regspec_bustype,
1298 	    rspec.regspec_addr, rspec.regspec_size);
1299 
1300 	error = opl_map_phys(rp->child, &rspec, &virt, &acc, &h);
1301 
1302 	if (error)  {
1303 		FC_DEBUG3(1, CE_CONT, "opl_map_in: map in failed - "
1304 		    "address 0x%08x.%08x length %x\n", rspec.regspec_bustype,
1305 		    rspec.regspec_addr, rspec.regspec_size);
1306 
1307 		return (fc_priv_error(cp, "opl map-in failed"));
1308 	}
1309 
1310 	FC_DEBUG1(3, CE_CONT, "opl_map_in: returning virt %p\n", virt);
1311 
1312 	cp->nresults = fc_int2cell(1);
1313 	fc_result(cp, 0) = fc_ptr2cell(virt);
1314 
1315 	/*
1316 	 * Log this resource ...
1317 	 */
1318 	resp = kmem_zalloc(sizeof (struct fc_resource), KM_SLEEP);
1319 	resp->type = RT_MAP;
1320 	resp->fc_map_virt = virt;
1321 	resp->fc_map_len = len;
1322 	resp->fc_map_handle = h;
1323 	fc_add_resource(rp, resp);
1324 
1325 	return (fc_success_op(ap, rp, cp));
1326 }
1327 
1328 /*
1329  * map-out (virt size -- )
1330  */
1331 static int
opl_map_out(dev_info_t * ap,fco_handle_t rp,fc_ci_t * cp)1332 opl_map_out(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1333 {
1334 	caddr_t			virt;
1335 	size_t			len;
1336 	struct fc_resource	*resp;
1337 
1338 	if (fc_cell2int(cp->nargs) != 2)
1339 		return (fc_syntax_error(cp, "nargs must be 2"));
1340 
1341 	virt = fc_cell2ptr(fc_arg(cp, 1));
1342 
1343 	len = fc_cell2size(fc_arg(cp, 0));
1344 
1345 	FC_DEBUG2(1, CE_CONT, "opl_map_out: attempting map out %p %x\n",
1346 	    virt, len);
1347 
1348 	/*
1349 	 * Find if this request matches a mapping resource we set up.
1350 	 */
1351 	fc_lock_resource_list(rp);
1352 	for (resp = rp->head; resp != NULL; resp = resp->next) {
1353 		if (resp->type != RT_MAP)
1354 			continue;
1355 		if (resp->fc_map_virt != virt)
1356 			continue;
1357 		if (resp->fc_map_len == len)
1358 			break;
1359 	}
1360 	fc_unlock_resource_list(rp);
1361 
1362 	if (resp == NULL)
1363 		return (fc_priv_error(cp, "request doesn't match a "
1364 		    "known mapping"));
1365 
1366 	opl_unmap_phys(&resp->fc_map_handle);
1367 
1368 	/*
1369 	 * remove the resource from the list and release it.
1370 	 */
1371 	fc_rem_resource(rp, resp);
1372 	kmem_free(resp, sizeof (struct fc_resource));
1373 
1374 	cp->nresults = fc_int2cell(0);
1375 	return (fc_success_op(ap, rp, cp));
1376 }
1377 
1378 static int
opl_register_fetch(dev_info_t * ap,fco_handle_t rp,fc_ci_t * cp)1379 opl_register_fetch(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1380 {
1381 	size_t			len;
1382 	caddr_t			virt;
1383 	int			error = 0;
1384 	uint64_t		v;
1385 	uint64_t		x;
1386 	uint32_t		l;
1387 	uint16_t		w;
1388 	uint8_t			b;
1389 	char			*service = fc_cell2ptr(cp->svc_name);
1390 	struct fc_resource	*resp;
1391 
1392 	if (fc_cell2int(cp->nargs) != 1)
1393 		return (fc_syntax_error(cp, "nargs must be 1"));
1394 
1395 	if (fc_cell2int(cp->nresults) < 1)
1396 		return (fc_syntax_error(cp, "nresults must be >= 1"));
1397 
1398 	virt = fc_cell2ptr(fc_arg(cp, 0));
1399 
1400 	/*
1401 	 * Determine the access width .. we can switch on the 2nd
1402 	 * character of the name which is "rx@", "rl@", "rb@" or "rw@"
1403 	 */
1404 	switch (*(service + 1)) {
1405 	case 'x':	len = sizeof (x); break;
1406 	case 'l':	len = sizeof (l); break;
1407 	case 'w':	len = sizeof (w); break;
1408 	case 'b':	len = sizeof (b); break;
1409 	}
1410 
1411 	/*
1412 	 * Check the alignment ...
1413 	 */
1414 	if (((intptr_t)virt & (len - 1)) != 0)
1415 		return (fc_priv_error(cp, "unaligned access"));
1416 
1417 	/*
1418 	 * Find if this virt is 'within' a request we know about
1419 	 */
1420 	fc_lock_resource_list(rp);
1421 	for (resp = rp->head; resp != NULL; resp = resp->next) {
1422 		if (resp->type == RT_MAP) {
1423 			if ((virt >= (caddr_t)resp->fc_map_virt) &&
1424 			    ((virt + len) <=
1425 			    ((caddr_t)resp->fc_map_virt + resp->fc_map_len)))
1426 				break;
1427 		} else if (resp->type == RT_CONTIGIOUS) {
1428 			if ((virt >= (caddr_t)resp->fc_contig_virt) &&
1429 			    ((virt + len) <= ((caddr_t)resp->fc_contig_virt +
1430 			    resp->fc_contig_len)))
1431 				break;
1432 		}
1433 	}
1434 	fc_unlock_resource_list(rp);
1435 
1436 	if (resp == NULL) {
1437 		return (fc_priv_error(cp, "request not within "
1438 		    "known mappings"));
1439 	}
1440 
1441 	switch (len) {
1442 	case sizeof (x):
1443 		if (resp->type == RT_MAP)
1444 			error = ddi_peek64(rp->child, (int64_t *)virt,
1445 			    (int64_t *)&x);
1446 		else /* RT_CONTIGIOUS */
1447 			x = *(int64_t *)virt;
1448 		v = x;
1449 		break;
1450 	case sizeof (l):
1451 		if (resp->type == RT_MAP)
1452 			error = ddi_peek32(rp->child, (int32_t *)virt,
1453 			    (int32_t *)&l);
1454 		else /* RT_CONTIGIOUS */
1455 			l = *(int32_t *)virt;
1456 		v = l;
1457 		break;
1458 	case sizeof (w):
1459 		if (resp->type == RT_MAP)
1460 			error = ddi_peek16(rp->child, (int16_t *)virt,
1461 			    (int16_t *)&w);
1462 		else /* RT_CONTIGIOUS */
1463 			w = *(int16_t *)virt;
1464 		v = w;
1465 		break;
1466 	case sizeof (b):
1467 		if (resp->type == RT_MAP)
1468 			error = ddi_peek8(rp->child, (int8_t *)virt,
1469 			    (int8_t *)&b);
1470 		else /* RT_CONTIGIOUS */
1471 			b = *(int8_t *)virt;
1472 		v = b;
1473 		break;
1474 	}
1475 
1476 	if (error == DDI_FAILURE) {
1477 		FC_DEBUG2(1, CE_CONT, "opl_register_fetch: access error "
1478 		    "accessing virt %p len %d\n", virt, len);
1479 		return (fc_priv_error(cp, "access error"));
1480 	}
1481 
1482 	FC_DEBUG3(1, CE_CONT, "register_fetch (%s) %llx %llx\n",
1483 	    service, virt, v);
1484 
1485 	cp->nresults = fc_int2cell(1);
1486 	switch (len) {
1487 	case sizeof (x): fc_result(cp, 0) = x; break;
1488 	case sizeof (l): fc_result(cp, 0) = fc_uint32_t2cell(l); break;
1489 	case sizeof (w): fc_result(cp, 0) = fc_uint16_t2cell(w); break;
1490 	case sizeof (b): fc_result(cp, 0) = fc_uint8_t2cell(b); break;
1491 	}
1492 	return (fc_success_op(ap, rp, cp));
1493 }
1494 
1495 static int
opl_register_store(dev_info_t * ap,fco_handle_t rp,fc_ci_t * cp)1496 opl_register_store(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1497 {
1498 	size_t			len;
1499 	caddr_t			virt;
1500 	uint64_t		v;
1501 	uint64_t		x;
1502 	uint32_t		l;
1503 	uint16_t		w;
1504 	uint8_t			b;
1505 	char			*service = fc_cell2ptr(cp->svc_name);
1506 	struct fc_resource	*resp;
1507 	int			error = 0;
1508 
1509 	if (fc_cell2int(cp->nargs) != 2)
1510 		return (fc_syntax_error(cp, "nargs must be 2"));
1511 
1512 	virt = fc_cell2ptr(fc_arg(cp, 0));
1513 
1514 	/*
1515 	 * Determine the access width .. we can switch on the 2nd
1516 	 * character of the name which is "rx!", "rl!", "rb!" or "rw!"
1517 	 */
1518 	switch (*(service + 1)) {
1519 	case 'x':
1520 		len = sizeof (x);
1521 		x = fc_arg(cp, 1);
1522 		v = x;
1523 		break;
1524 	case 'l':
1525 		len = sizeof (l);
1526 		l = fc_cell2uint32_t(fc_arg(cp, 1));
1527 		v = l;
1528 		break;
1529 	case 'w':
1530 		len = sizeof (w);
1531 		w = fc_cell2uint16_t(fc_arg(cp, 1));
1532 		v = w;
1533 		break;
1534 	case 'b':
1535 		len = sizeof (b);
1536 		b = fc_cell2uint8_t(fc_arg(cp, 1));
1537 		v = b;
1538 		break;
1539 	}
1540 
1541 	FC_DEBUG3(1, CE_CONT, "register_store (%s) %llx %llx\n",
1542 	    service, virt, v);
1543 
1544 	/*
1545 	 * Check the alignment ...
1546 	 */
1547 	if (((intptr_t)virt & (len - 1)) != 0)
1548 		return (fc_priv_error(cp, "unaligned access"));
1549 
1550 	/*
1551 	 * Find if this virt is 'within' a request we know about
1552 	 */
1553 	fc_lock_resource_list(rp);
1554 	for (resp = rp->head; resp != NULL; resp = resp->next) {
1555 		if (resp->type == RT_MAP) {
1556 			if ((virt >= (caddr_t)resp->fc_map_virt) &&
1557 			    ((virt + len) <=
1558 			    ((caddr_t)resp->fc_map_virt + resp->fc_map_len)))
1559 				break;
1560 		} else if (resp->type == RT_CONTIGIOUS) {
1561 			if ((virt >= (caddr_t)resp->fc_contig_virt) &&
1562 			    ((virt + len) <= ((caddr_t)resp->fc_contig_virt +
1563 			    resp->fc_contig_len)))
1564 				break;
1565 		}
1566 	}
1567 	fc_unlock_resource_list(rp);
1568 
1569 	if (resp == NULL)
1570 		return (fc_priv_error(cp, "request not within"
1571 		    "known mappings"));
1572 
1573 	switch (len) {
1574 	case sizeof (x):
1575 		if (resp->type == RT_MAP)
1576 			error = ddi_poke64(rp->child, (int64_t *)virt, x);
1577 		else if (resp->type == RT_CONTIGIOUS)
1578 			*(uint64_t *)virt = x;
1579 		break;
1580 	case sizeof (l):
1581 		if (resp->type == RT_MAP)
1582 			error = ddi_poke32(rp->child, (int32_t *)virt, l);
1583 		else if (resp->type == RT_CONTIGIOUS)
1584 			*(uint32_t *)virt = l;
1585 		break;
1586 	case sizeof (w):
1587 		if (resp->type == RT_MAP)
1588 			error = ddi_poke16(rp->child, (int16_t *)virt, w);
1589 		else if (resp->type == RT_CONTIGIOUS)
1590 			*(uint16_t *)virt = w;
1591 		break;
1592 	case sizeof (b):
1593 		if (resp->type == RT_MAP)
1594 			error = ddi_poke8(rp->child, (int8_t *)virt, b);
1595 		else if (resp->type == RT_CONTIGIOUS)
1596 			*(uint8_t *)virt = b;
1597 		break;
1598 	}
1599 
1600 	if (error == DDI_FAILURE) {
1601 		FC_DEBUG2(1, CE_CONT, "opl_register_store: access error "
1602 		    "accessing virt %p len %d\n", virt, len);
1603 		return (fc_priv_error(cp, "access error"));
1604 	}
1605 
1606 	cp->nresults = fc_int2cell(0);
1607 	return (fc_success_op(ap, rp, cp));
1608 }
1609 
1610 /*
1611  * opl_claim_memory
1612  *
1613  * claim-memory (align size vhint -- vaddr)
1614  */
1615 static int
opl_claim_memory(dev_info_t * ap,fco_handle_t rp,fc_ci_t * cp)1616 opl_claim_memory(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1617 {
1618 	int			align, size, vhint;
1619 	uint64_t		answer, alen;
1620 	ndi_ra_request_t	request;
1621 	struct fc_resource	*resp;
1622 
1623 	if (fc_cell2int(cp->nargs) != 3)
1624 		return (fc_syntax_error(cp, "nargs must be 3"));
1625 
1626 	if (fc_cell2int(cp->nresults) < 1)
1627 		return (fc_syntax_error(cp, "nresults must be >= 1"));
1628 
1629 	vhint = fc_cell2int(fc_arg(cp, 2));
1630 	size  = fc_cell2int(fc_arg(cp, 1));
1631 	align = fc_cell2int(fc_arg(cp, 0));
1632 
1633 	FC_DEBUG3(1, CE_CONT, "opl_claim_memory: align=0x%x size=0x%x "
1634 	    "vhint=0x%x\n", align, size, vhint);
1635 
1636 	if (size == 0) {
1637 		cmn_err(CE_WARN, "opl_claim_memory - unable to allocate "
1638 		    "contiguous memory of size zero\n");
1639 		return (fc_priv_error(cp, "allocation error"));
1640 	}
1641 
1642 	if (vhint) {
1643 		cmn_err(CE_WARN, "opl_claim_memory - vhint is not zero "
1644 		    "vhint=0x%x - Ignoring Argument\n", vhint);
1645 	}
1646 
1647 	bzero((caddr_t)&request, sizeof (ndi_ra_request_t));
1648 	request.ra_flags	= NDI_RA_ALLOC_BOUNDED;
1649 	request.ra_boundbase	= 0;
1650 	request.ra_boundlen	= 0xffffffff;
1651 	request.ra_len		= size;
1652 	request.ra_align_mask	= align - 1;
1653 
1654 	if (ndi_ra_alloc(ddi_root_node(), &request, &answer, &alen,
1655 	    "opl-fcodemem", NDI_RA_PASS) != NDI_SUCCESS) {
1656 		cmn_err(CE_WARN, "opl_claim_memory - unable to allocate "
1657 		    "contiguous memory\n");
1658 		return (fc_priv_error(cp, "allocation error"));
1659 	}
1660 
1661 	FC_DEBUG2(1, CE_CONT, "opl_claim_memory: address allocated=0x%lx "
1662 	    "size=0x%x\n", answer, alen);
1663 
1664 	cp->nresults = fc_int2cell(1);
1665 	fc_result(cp, 0) = answer;
1666 
1667 	/*
1668 	 * Log this resource ...
1669 	 */
1670 	resp = kmem_zalloc(sizeof (struct fc_resource), KM_SLEEP);
1671 	resp->type = RT_CONTIGIOUS;
1672 	resp->fc_contig_virt = (void *)answer;
1673 	resp->fc_contig_len = size;
1674 	fc_add_resource(rp, resp);
1675 
1676 	return (fc_success_op(ap, rp, cp));
1677 }
1678 
1679 /*
1680  * opl_release_memory
1681  *
1682  * release-memory (size vaddr -- )
1683  */
1684 static int
opl_release_memory(dev_info_t * ap,fco_handle_t rp,fc_ci_t * cp)1685 opl_release_memory(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1686 {
1687 	int32_t			vaddr, size;
1688 	struct fc_resource	*resp;
1689 
1690 	if (fc_cell2int(cp->nargs) != 2)
1691 		return (fc_syntax_error(cp, "nargs must be 2"));
1692 
1693 	if (fc_cell2int(cp->nresults) != 0)
1694 		return (fc_syntax_error(cp, "nresults must be 0"));
1695 
1696 	vaddr = fc_cell2int(fc_arg(cp, 1));
1697 	size  = fc_cell2int(fc_arg(cp, 0));
1698 
1699 	FC_DEBUG2(1, CE_CONT, "opl_release_memory: vaddr=0x%x size=0x%x\n",
1700 	    vaddr, size);
1701 
1702 	/*
1703 	 * Find if this request matches a mapping resource we set up.
1704 	 */
1705 	fc_lock_resource_list(rp);
1706 	for (resp = rp->head; resp != NULL; resp = resp->next) {
1707 		if (resp->type != RT_CONTIGIOUS)
1708 			continue;
1709 		if (resp->fc_contig_virt != (void *)(uintptr_t)vaddr)
1710 			continue;
1711 		if (resp->fc_contig_len == size)
1712 			break;
1713 	}
1714 	fc_unlock_resource_list(rp);
1715 
1716 	if (resp == NULL)
1717 		return (fc_priv_error(cp, "request doesn't match a "
1718 		    "known mapping"));
1719 
1720 	(void) ndi_ra_free(ddi_root_node(), vaddr, size,
1721 	    "opl-fcodemem", NDI_RA_PASS);
1722 
1723 	/*
1724 	 * remove the resource from the list and release it.
1725 	 */
1726 	fc_rem_resource(rp, resp);
1727 	kmem_free(resp, sizeof (struct fc_resource));
1728 
1729 	cp->nresults = fc_int2cell(0);
1730 
1731 	return (fc_success_op(ap, rp, cp));
1732 }
1733 
1734 /*
1735  * opl_vtop
1736  *
1737  * vtop (vaddr -- paddr.lo paddr.hi)
1738  */
1739 static int
opl_vtop(dev_info_t * ap,fco_handle_t rp,fc_ci_t * cp)1740 opl_vtop(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1741 {
1742 	int			vaddr;
1743 	uint64_t		paddr;
1744 	struct fc_resource	*resp;
1745 
1746 	if (fc_cell2int(cp->nargs) != 1)
1747 		return (fc_syntax_error(cp, "nargs must be 1"));
1748 
1749 	if (fc_cell2int(cp->nresults) >= 3)
1750 		return (fc_syntax_error(cp, "nresults must be less than 2"));
1751 
1752 	vaddr = fc_cell2int(fc_arg(cp, 0));
1753 
1754 	/*
1755 	 * Find if this request matches a mapping resource we set up.
1756 	 */
1757 	fc_lock_resource_list(rp);
1758 	for (resp = rp->head; resp != NULL; resp = resp->next) {
1759 		if (resp->type != RT_CONTIGIOUS)
1760 			continue;
1761 		if (((uint64_t)resp->fc_contig_virt <= vaddr) &&
1762 		    (vaddr < (uint64_t)resp->fc_contig_virt +
1763 		    resp->fc_contig_len))
1764 			break;
1765 	}
1766 	fc_unlock_resource_list(rp);
1767 
1768 	if (resp == NULL)
1769 		return (fc_priv_error(cp, "request doesn't match a "
1770 		    "known mapping"));
1771 
1772 	paddr = va_to_pa((void *)(uintptr_t)vaddr);
1773 
1774 	FC_DEBUG2(1, CE_CONT, "opl_vtop: vaddr=0x%x paddr=0x%x\n",
1775 	    vaddr, paddr);
1776 
1777 	cp->nresults = fc_int2cell(2);
1778 
1779 	fc_result(cp, 0) = paddr;
1780 	fc_result(cp, 1) = 0;
1781 
1782 	return (fc_success_op(ap, rp, cp));
1783 }
1784 
1785 static int
opl_config_child(dev_info_t * ap,fco_handle_t rp,fc_ci_t * cp)1786 opl_config_child(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1787 {
1788 	fc_phandle_t h;
1789 
1790 	if (fc_cell2int(cp->nargs) != 0)
1791 		return (fc_syntax_error(cp, "nargs must be 0"));
1792 
1793 	if (fc_cell2int(cp->nresults) < 1)
1794 		return (fc_syntax_error(cp, "nresults must be >= 1"));
1795 
1796 	h = fc_dip_to_phandle(fc_handle_to_phandle_head(rp), rp->child);
1797 
1798 	cp->nresults = fc_int2cell(1);
1799 	fc_result(cp, 0) = fc_phandle2cell(h);
1800 
1801 	return (fc_success_op(ap, rp, cp));
1802 }
1803 
1804 static int
opl_get_fcode(dev_info_t * ap,fco_handle_t rp,fc_ci_t * cp)1805 opl_get_fcode(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1806 {
1807 	caddr_t		dropin_name_virt, fcode_virt;
1808 	char		*dropin_name, *fcode;
1809 	int		fcode_len, status;
1810 
1811 	if (fc_cell2int(cp->nargs) != 3)
1812 		return (fc_syntax_error(cp, "nargs must be 3"));
1813 
1814 	if (fc_cell2int(cp->nresults) < 1)
1815 		return (fc_syntax_error(cp, "nresults must be >= 1"));
1816 
1817 	dropin_name_virt = fc_cell2ptr(fc_arg(cp, 0));
1818 
1819 	fcode_virt = fc_cell2ptr(fc_arg(cp, 1));
1820 
1821 	fcode_len = fc_cell2int(fc_arg(cp, 2));
1822 
1823 	dropin_name = kmem_zalloc(FC_SVC_NAME_LEN, KM_SLEEP);
1824 
1825 	FC_DEBUG2(1, CE_CONT, "get_fcode: %x %d\n", fcode_virt, fcode_len);
1826 
1827 	if (copyinstr(fc_cell2ptr(dropin_name_virt), dropin_name,
1828 	    FC_SVC_NAME_LEN - 1, NULL))  {
1829 		FC_DEBUG1(1, CE_CONT, "opl_get_fcode: "
1830 		    "fault copying in drop in name %p\n", dropin_name_virt);
1831 		status = 0;
1832 	} else {
1833 		FC_DEBUG1(1, CE_CONT, "get_fcode: %s\n", dropin_name);
1834 
1835 		fcode = kmem_zalloc(fcode_len, KM_SLEEP);
1836 
1837 		if ((status = prom_get_fcode(dropin_name, fcode)) != 0) {
1838 
1839 			if (copyout((void *)fcode, (void *)fcode_virt,
1840 			    fcode_len)) {
1841 				cmn_err(CE_WARN, " opl_get_fcode: Unable "
1842 				    "to copy out fcode image");
1843 				status = 0;
1844 			}
1845 		}
1846 
1847 		kmem_free(fcode, fcode_len);
1848 	}
1849 
1850 	kmem_free(dropin_name, FC_SVC_NAME_LEN);
1851 
1852 	cp->nresults = fc_int2cell(1);
1853 	fc_result(cp, 0) = status;
1854 
1855 	return (fc_success_op(ap, rp, cp));
1856 }
1857 
1858 static int
opl_get_fcode_size(dev_info_t * ap,fco_handle_t rp,fc_ci_t * cp)1859 opl_get_fcode_size(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1860 {
1861 	caddr_t		virt;
1862 	char		*dropin_name;
1863 	int		len;
1864 
1865 	if (fc_cell2int(cp->nargs) != 1)
1866 		return (fc_syntax_error(cp, "nargs must be 1"));
1867 
1868 	if (fc_cell2int(cp->nresults) < 1)
1869 		return (fc_syntax_error(cp, "nresults must be >= 1"));
1870 
1871 	virt = fc_cell2ptr(fc_arg(cp, 0));
1872 
1873 	dropin_name = kmem_zalloc(FC_SVC_NAME_LEN, KM_SLEEP);
1874 
1875 	FC_DEBUG0(1, CE_CONT, "opl_get_fcode_size:\n");
1876 
1877 	if (copyinstr(fc_cell2ptr(virt), dropin_name,
1878 	    FC_SVC_NAME_LEN - 1, NULL))  {
1879 		FC_DEBUG1(1, CE_CONT, "opl_get_fcode_size: "
1880 		    "fault copying in drop in name %p\n", virt);
1881 		len = 0;
1882 	} else {
1883 		FC_DEBUG1(1, CE_CONT, "opl_get_fcode_size: %s\n", dropin_name);
1884 
1885 		len = prom_get_fcode_size(dropin_name);
1886 	}
1887 
1888 	kmem_free(dropin_name, FC_SVC_NAME_LEN);
1889 
1890 	FC_DEBUG1(1, CE_CONT, "opl_get_fcode_size: fcode_len = %d\n", len);
1891 
1892 	cp->nresults = fc_int2cell(1);
1893 	fc_result(cp, 0) = len;
1894 
1895 	return (fc_success_op(ap, rp, cp));
1896 }
1897 
1898 static int
opl_map_phys(dev_info_t * dip,struct regspec * phys_spec,caddr_t * addrp,ddi_device_acc_attr_t * accattrp,ddi_acc_handle_t * handlep)1899 opl_map_phys(dev_info_t *dip, struct regspec *phys_spec,
1900     caddr_t *addrp, ddi_device_acc_attr_t *accattrp,
1901     ddi_acc_handle_t *handlep)
1902 {
1903 	ddi_map_req_t 	mapreq;
1904 	ddi_acc_hdl_t	*acc_handlep;
1905 	int		result;
1906 	struct regspec	*rspecp;
1907 
1908 	*handlep = impl_acc_hdl_alloc(KM_SLEEP, NULL);
1909 	acc_handlep = impl_acc_hdl_get(*handlep);
1910 	acc_handlep->ah_vers = VERS_ACCHDL;
1911 	acc_handlep->ah_dip = dip;
1912 	acc_handlep->ah_rnumber = 0;
1913 	acc_handlep->ah_offset = 0;
1914 	acc_handlep->ah_len = 0;
1915 	acc_handlep->ah_acc = *accattrp;
1916 	rspecp = kmem_zalloc(sizeof (struct regspec), KM_SLEEP);
1917 	*rspecp = *phys_spec;
1918 	/*
1919 	 * cache a copy of the reg spec
1920 	 */
1921 	acc_handlep->ah_bus_private = rspecp;
1922 
1923 	mapreq.map_op = DDI_MO_MAP_LOCKED;
1924 	mapreq.map_type = DDI_MT_REGSPEC;
1925 	mapreq.map_obj.rp = (struct regspec *)phys_spec;
1926 	mapreq.map_prot = PROT_READ | PROT_WRITE;
1927 	mapreq.map_flags = DDI_MF_KERNEL_MAPPING;
1928 	mapreq.map_handlep = acc_handlep;
1929 	mapreq.map_vers = DDI_MAP_VERSION;
1930 
1931 	result = ddi_map(dip, &mapreq, 0, 0, addrp);
1932 
1933 	if (result != DDI_SUCCESS) {
1934 		impl_acc_hdl_free(*handlep);
1935 		kmem_free(rspecp, sizeof (struct regspec));
1936 		*handlep = (ddi_acc_handle_t)NULL;
1937 	} else {
1938 		acc_handlep->ah_addr = *addrp;
1939 	}
1940 
1941 	return (result);
1942 }
1943 
1944 static void
opl_unmap_phys(ddi_acc_handle_t * handlep)1945 opl_unmap_phys(ddi_acc_handle_t *handlep)
1946 {
1947 	ddi_map_req_t	mapreq;
1948 	ddi_acc_hdl_t	*acc_handlep;
1949 	struct regspec	*rspecp;
1950 
1951 	acc_handlep = impl_acc_hdl_get(*handlep);
1952 	ASSERT(acc_handlep);
1953 	rspecp = acc_handlep->ah_bus_private;
1954 
1955 	mapreq.map_op = DDI_MO_UNMAP;
1956 	mapreq.map_type = DDI_MT_REGSPEC;
1957 	mapreq.map_obj.rp = (struct regspec *)rspecp;
1958 	mapreq.map_prot = PROT_READ | PROT_WRITE;
1959 	mapreq.map_flags = DDI_MF_KERNEL_MAPPING;
1960 	mapreq.map_handlep = acc_handlep;
1961 	mapreq.map_vers = DDI_MAP_VERSION;
1962 
1963 	(void) ddi_map(acc_handlep->ah_dip, &mapreq, acc_handlep->ah_offset,
1964 	    acc_handlep->ah_len, &acc_handlep->ah_addr);
1965 
1966 	impl_acc_hdl_free(*handlep);
1967 	/*
1968 	 * Free the cached copy
1969 	 */
1970 	kmem_free(rspecp, sizeof (struct regspec));
1971 	*handlep = (ddi_acc_handle_t)NULL;
1972 }
1973 
1974 static int
opl_get_hwd_va(dev_info_t * ap,fco_handle_t rp,fc_ci_t * cp)1975 opl_get_hwd_va(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1976 {
1977 	uint32_t	portid;
1978 	void		*hwd_virt;
1979 	hwd_header_t	*hwd_h = NULL;
1980 	hwd_sb_t	*hwd_sb = NULL;
1981 	int		lsb, ch, leaf;
1982 	int		status = 1;
1983 
1984 	/* Check the argument */
1985 	if (fc_cell2int(cp->nargs) != 2)
1986 		return (fc_syntax_error(cp, "nargs must be 2"));
1987 
1988 	if (fc_cell2int(cp->nresults) < 1)
1989 		return (fc_syntax_error(cp, "nresults must be >= 1"));
1990 
1991 	/* Get the parameters */
1992 	portid = fc_cell2uint32_t(fc_arg(cp, 0));
1993 	hwd_virt = (void *)fc_cell2ptr(fc_arg(cp, 1));
1994 
1995 	/* Get the ID numbers */
1996 	lsb  = OPL_IO_PORTID_TO_LSB(portid);
1997 	ch   = OPL_PORTID_TO_CHANNEL(portid);
1998 	leaf = OPL_PORTID_TO_LEAF(portid);
1999 	ASSERT(OPL_IO_PORTID(lsb, ch, leaf) == portid);
2000 
2001 	/* Set the pointer of hwd. */
2002 	if ((hwd_h = (hwd_header_t *)opl_boards[lsb].cfg_hwd) == NULL) {
2003 		return (fc_priv_error(cp, "null hwd header"));
2004 	}
2005 	/* Set the pointer of hwd sb. */
2006 	if ((hwd_sb = (hwd_sb_t *)((char *)hwd_h + hwd_h->hdr_sb_info_offset))
2007 	    == NULL) {
2008 		return (fc_priv_error(cp, "null hwd sb"));
2009 	}
2010 
2011 	if (ch == OPL_CMU_CHANNEL) {
2012 		/* Copyout CMU-CH HW Descriptor */
2013 		if (copyout((void *)&hwd_sb->sb_cmu.cmu_ch,
2014 		    (void *)hwd_virt, sizeof (hwd_cmu_chan_t))) {
2015 			cmn_err(CE_WARN, "opl_get_hwd_va: "
2016 			"Unable to copy out cmuch descriptor for %x",
2017 			    portid);
2018 			status = 0;
2019 		}
2020 	} else {
2021 		/* Copyout PCI-CH HW Descriptor */
2022 		if (copyout((void *)&hwd_sb->sb_pci_ch[ch].pci_leaf[leaf],
2023 		    (void *)hwd_virt, sizeof (hwd_leaf_t))) {
2024 			cmn_err(CE_WARN, "opl_get_hwd_va: "
2025 			"Unable to copy out pcich descriptor for %x",
2026 			    portid);
2027 			status = 0;
2028 		}
2029 	}
2030 
2031 	cp->nresults = fc_int2cell(1);
2032 	fc_result(cp, 0) = status;
2033 
2034 	return (fc_success_op(ap, rp, cp));
2035 }
2036 
2037 /*
2038  * After Solaris boots, a user can enter OBP using L1A, etc. While in OBP,
2039  * interrupts may be received from PCI devices. These interrupts
2040  * cannot be handled meaningfully since the system is in OBP. These
2041  * interrupts need to be cleared on the CPU side so that the CPU may
2042  * continue with whatever it is doing. Devices that have raised the
2043  * interrupts are expected to reraise the interrupts after sometime
2044  * as they have not been handled. At that time, Solaris will have a
2045  * chance to properly service the interrupts.
2046  *
2047  * The location of the interrupt registers depends on what is present
2048  * at a port. OPL currently supports the Oberon and the CMU channel.
2049  * The following handler handles both kinds of ports and computes
2050  * interrupt register addresses from the specifications and Jupiter Bus
2051  * device bindings.
2052  *
2053  * Fcode drivers install their interrupt handler via a "master-interrupt"
2054  * service. For boot time devices, this takes place within OBP. In the case
2055  * of DR, OPL uses IKP. The Fcode drivers that run within the efcode framework
2056  * attempt to install their handler via the "master-interrupt" service.
2057  * However, we cannot meaningfully install the Fcode driver's handler.
2058  * Instead, we install our own handler in OBP which does the same thing.
2059  *
2060  * Note that the only handling done for interrupts here is to clear it
2061  * on the CPU side. If any device in the future requires more special
2062  * handling, we would have to put in some kind of framework for adding
2063  * device-specific handlers. This is *highly* unlikely, but possible.
2064  *
2065  * Finally, OBP provides a hook called "unix-interrupt-handler" to install
2066  * a Solaris-defined master-interrupt handler for a port. The default
2067  * definition for this method does nothing. Solaris may override this
2068  * with its own definition. This is the way the following handler gets
2069  * control from OBP when interrupts happen at a port after L1A, etc.
2070  */
2071 
2072 static char define_master_interrupt_handler[] =
2073 
2074 /*
2075  * This method translates an Oberon port id to the base (physical) address
2076  * of the interrupt clear registers for that port id.
2077  */
2078 
2079 ": pcich-mid>clear-int-pa   ( mid -- pa ) "
2080 "   dup 1 >> 7 and          ( mid ch# ) "
2081 "   over 4 >> h# 1f and     ( mid ch# lsb# ) "
2082 "   1 d# 46 <<              ( mid ch# lsb# pa ) "
2083 "   swap d# 40 << or        ( mid ch# pa ) "
2084 "   swap d# 37 << or        ( mid pa ) "
2085 "   swap 1 and if h# 70.0000 else h# 60.0000 then "
2086 "   or h# 1400 or           ( pa ) "
2087 "; "
2088 
2089 /*
2090  * This method translates a CMU channel port id to the base (physical) address
2091  * of the interrupt clear registers for that port id. There are two classes of
2092  * interrupts that need to be handled for a CMU channel:
2093  *	- obio interrupts
2094  *	- pci interrupts
2095  * So, there are two addresses that need to be computed.
2096  */
2097 
2098 ": cmuch-mid>clear-int-pa   ( mid -- obio-pa pci-pa ) "
2099 "   dup 1 >> 7 and          ( mid ch# ) "
2100 "   over 4 >> h# 1f and     ( mid ch# lsb# ) "
2101 "   1 d# 46 <<              ( mid ch# lsb# pa ) "
2102 "   swap d# 40 << or        ( mid ch# pa ) "
2103 "   swap d# 37 << or        ( mid pa ) "
2104 "   nip dup h# 1800 +       ( pa obio-pa ) "
2105 "   swap h# 1400 +          ( obio-pa pci-pa ) "
2106 "; "
2107 
2108 /*
2109  * This method checks if a given I/O port ID is valid or not.
2110  * For a given LSB,
2111  *	Oberon ports range from 0 - 3
2112  *	CMU ch ports range from 4 - 4
2113  *
2114  * Also, the Oberon supports leaves 0 and 1.
2115  * The CMU ch supports only one leaf, leaf 0.
2116  */
2117 
2118 ": valid-io-mid? ( mid -- flag ) "
2119 "   dup 1 >> 7 and                     ( mid ch# ) "
2120 "   dup 4 > if 2drop false exit then   ( mid ch# ) "
2121 "   4 = swap 1 and 1 = and not "
2122 "; "
2123 
2124 /*
2125  * This method checks if a given port id is a CMU ch.
2126  */
2127 
2128 ": cmuch? ( mid -- flag ) 1 >> 7 and 4 = ; "
2129 
2130 /*
2131  * Given the base address of the array of interrupt clear registers for
2132  * a port id, this method iterates over the given interrupt number bitmap
2133  * and resets the interrupt on the CPU side for every interrupt number
2134  * in the bitmap. Note that physical addresses are used to perform the
2135  * writes, not virtual addresses. This allows the handler to work without
2136  * any involvement from Solaris.
2137  */
2138 
2139 ": clear-ints ( pa bitmap count -- ) "
2140 "   0 do                            ( pa bitmap ) "
2141 "      dup 0= if 2drop unloop exit then "
2142 "      tuck                         ( bitmap pa bitmap ) "
2143 "      1 and if                     ( bitmap pa ) "
2144 "	 dup i 8 * + 0 swap         ( bitmap pa 0 pa' ) "
2145 "	 h# 15 spacex!              ( bitmap pa ) "
2146 "      then                         ( bitmap pa ) "
2147 "      swap 1 >>                    ( pa bitmap ) "
2148 "   loop "
2149 "; "
2150 
2151 /*
2152  * This method replaces the master-interrupt handler in OBP. Once
2153  * this method is plumbed into OBP, OBP transfers control to this
2154  * handler while returning to Solaris from OBP after L1A. This method's
2155  * task is to simply reset received interrupts on the CPU side.
2156  * When the devices reassert the interrupts later, Solaris will
2157  * be able to see them and handle them.
2158  *
2159  * For each port ID that has interrupts, this method is called
2160  * once by OBP. The input arguments are:
2161  *	mid	portid
2162  *	bitmap	bitmap of interrupts that have happened
2163  *
2164  * This method returns true, if it is able to handle the interrupts.
2165  * OBP does nothing further.
2166  *
2167  * This method returns false, if it encountered a problem. Currently,
2168  * the only problem could be an invalid port id. OBP needs to do
2169  * its own processing in that case. If this method returns false,
2170  * it preserves the mid and bitmap arguments for OBP.
2171  */
2172 
2173 ": unix-resend-mondos ( mid bitmap -- [ mid bitmap false ] | true ) "
2174 
2175 /*
2176  * Uncomment the following line if you want to display the input arguments.
2177  * This is meant for debugging.
2178  * "   .\" Bitmap=\" dup u. .\" MID=\" over u. cr "
2179  */
2180 
2181 /*
2182  * If the port id is not valid (according to the Oberon and CMU ch
2183  * specifications, then return false to OBP to continue further
2184  * processing.
2185  */
2186 
2187 "   over valid-io-mid? not if       ( mid bitmap ) "
2188 "      false exit "
2189 "   then "
2190 
2191 /*
2192  * If the port is a CMU ch, then the 64-bit bitmap represents
2193  * 2 32-bit bitmaps:
2194  *	- obio interrupt bitmap (20 bits)
2195  *	- pci interrupt bitmap (32 bits)
2196  *
2197  * - Split the bitmap into two
2198  * - Compute the base addresses of the interrupt clear registers
2199  *   for both pci interrupts and obio interrupts
2200  * - Clear obio interrupts
2201  * - Clear pci interrupts
2202  */
2203 
2204 "   over cmuch? if                  ( mid bitmap ) "
2205 "      xlsplit                      ( mid pci-bit obio-bit ) "
2206 "      rot cmuch-mid>clear-int-pa   ( pci-bit obio-bit obio-pa pci-pa ) "
2207 "      >r                           ( pci-bit obio-bit obio-pa ) ( r: pci-pa ) "
2208 "      swap d# 20 clear-ints        ( pci-bit ) ( r: pci-pa ) "
2209 "      r> swap d# 32 clear-ints     (  ) ( r: ) "
2210 
2211 /*
2212  * If the port is an Oberon, then the 64-bit bitmap is used fully.
2213  *
2214  * - Compute the base address of the interrupt clear registers
2215  * - Clear interrupts
2216  */
2217 
2218 "   else                            ( mid bitmap ) "
2219 "      swap pcich-mid>clear-int-pa  ( bitmap pa ) "
2220 "      swap d# 64 clear-ints        (  ) "
2221 "   then "
2222 
2223 /*
2224  * Always return true from here.
2225  */
2226 
2227 "   true                            ( true ) "
2228 "; "
2229 ;
2230 
2231 static char	install_master_interrupt_handler[] =
2232 	"' unix-resend-mondos to unix-interrupt-handler";
2233 static char	handler[] = "unix-interrupt-handler";
2234 static char	handler_defined[] = "p\" %s\" find nip swap l! ";
2235 
2236 /*ARGSUSED*/
2237 static int
master_interrupt_init(uint32_t portid,uint32_t xt)2238 master_interrupt_init(uint32_t portid, uint32_t xt)
2239 {
2240 	uint_t	defined;
2241 	char	buf[sizeof (handler) + sizeof (handler_defined)];
2242 
2243 	if (master_interrupt_inited)
2244 		return (1);
2245 
2246 	/*
2247 	 * Check if the defer word "unix-interrupt-handler" is defined.
2248 	 * This must be defined for OPL systems. So, this is only a
2249 	 * sanity check.
2250 	 */
2251 	(void) sprintf(buf, handler_defined, handler);
2252 	prom_interpret(buf, (uintptr_t)&defined, 0, 0, 0, 0);
2253 	if (!defined) {
2254 		cmn_err(CE_WARN, "master_interrupt_init: "
2255 		    "%s is not defined\n", handler);
2256 		return (0);
2257 	}
2258 
2259 	/*
2260 	 * Install the generic master-interrupt handler. Note that
2261 	 * this is only done one time on the first DR operation.
2262 	 * This is because, for OPL, one, single generic handler
2263 	 * handles all ports (Oberon and CMU channel) and all
2264 	 * interrupt sources within each port.
2265 	 *
2266 	 * The current support is only for the Oberon and CMU-channel.
2267 	 * If any others need to be supported, the handler has to be
2268 	 * modified accordingly.
2269 	 */
2270 
2271 	/*
2272 	 * Define the OPL master interrupt handler
2273 	 */
2274 	prom_interpret(define_master_interrupt_handler, 0, 0, 0, 0, 0);
2275 
2276 	/*
2277 	 * Take over the master interrupt handler from OBP.
2278 	 */
2279 	prom_interpret(install_master_interrupt_handler, 0, 0, 0, 0, 0);
2280 
2281 	master_interrupt_inited = 1;
2282 
2283 	/*
2284 	 * prom_interpret() does not return a status. So, we assume
2285 	 * that the calls succeeded. In reality, the calls may fail
2286 	 * if there is a syntax error, etc in the strings.
2287 	 */
2288 
2289 	return (1);
2290 }
2291 
2292 /*
2293  * Install the master-interrupt handler for a device.
2294  */
2295 static int
opl_master_interrupt(dev_info_t * ap,fco_handle_t rp,fc_ci_t * cp)2296 opl_master_interrupt(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
2297 {
2298 	uint32_t	portid, xt;
2299 	int		board, channel, leaf;
2300 	int		status;
2301 
2302 	/* Check the argument */
2303 	if (fc_cell2int(cp->nargs) != 2)
2304 		return (fc_syntax_error(cp, "nargs must be 2"));
2305 
2306 	if (fc_cell2int(cp->nresults) < 1)
2307 		return (fc_syntax_error(cp, "nresults must be >= 1"));
2308 
2309 	/* Get the parameters */
2310 	portid = fc_cell2uint32_t(fc_arg(cp, 0));
2311 	xt = fc_cell2uint32_t(fc_arg(cp, 1));
2312 
2313 	board = OPL_IO_PORTID_TO_LSB(portid);
2314 	channel = OPL_PORTID_TO_CHANNEL(portid);
2315 	leaf = OPL_PORTID_TO_LEAF(portid);
2316 
2317 	if ((board >= HWD_SBS_PER_DOMAIN) || !OPL_VALID_CHANNEL(channel) ||
2318 	    (OPL_OBERON_CHANNEL(channel) && !OPL_VALID_LEAF(leaf)) ||
2319 	    ((channel == OPL_CMU_CHANNEL) && (leaf != 0))) {
2320 		FC_DEBUG1(1, CE_CONT, "opl_master_interrupt: invalid port %x\n",
2321 		    portid);
2322 		status = 0;
2323 	} else {
2324 		status = master_interrupt_init(portid, xt);
2325 	}
2326 
2327 	cp->nresults = fc_int2cell(1);
2328 	fc_result(cp, 0) = status;
2329 
2330 	return (fc_success_op(ap, rp, cp));
2331 }
2332 
2333 /*
2334  * Set the properties for a leaf node (Oberon leaf or CMU channel leaf).
2335  */
2336 /*ARGSUSED*/
2337 static int
opl_create_leaf(dev_info_t * node,void * arg,uint_t flags)2338 opl_create_leaf(dev_info_t *node, void *arg, uint_t flags)
2339 {
2340 	int ret;
2341 
2342 	OPL_UPDATE_PROP(string, node, "name", OPL_PCI_LEAF_NODE);
2343 
2344 	OPL_UPDATE_PROP(string, node, "status", "okay");
2345 
2346 	return (DDI_WALK_TERMINATE);
2347 }
2348 
2349 static char *
opl_get_probe_string(opl_probe_t * probe,int channel,int leaf)2350 opl_get_probe_string(opl_probe_t *probe, int channel, int leaf)
2351 {
2352 	char 		*probe_string;
2353 	int		portid;
2354 
2355 	probe_string = kmem_zalloc(PROBE_STR_SIZE, KM_SLEEP);
2356 
2357 	if (channel == OPL_CMU_CHANNEL)
2358 		portid = probe->pr_sb->sb_cmu.cmu_ch.chan_portid;
2359 	else
2360 		portid = probe->
2361 		    pr_sb->sb_pci_ch[channel].pci_leaf[leaf].leaf_port_id;
2362 
2363 	(void) sprintf(probe_string, "%x", portid);
2364 
2365 	return (probe_string);
2366 }
2367 
2368 static int
opl_probe_leaf(opl_probe_t * probe)2369 opl_probe_leaf(opl_probe_t *probe)
2370 {
2371 	int		channel, leaf, portid, error, circ;
2372 	int		board;
2373 	fco_handle_t	fco_handle, *cfg_handle;
2374 	dev_info_t	*parent, *leaf_node;
2375 	char		unit_address[UNIT_ADDR_SIZE];
2376 	char		*probe_string;
2377 	opl_board_cfg_t	*board_cfg;
2378 
2379 	board = probe->pr_board;
2380 	channel = probe->pr_channel;
2381 	leaf = probe->pr_leaf;
2382 	parent = ddi_root_node();
2383 	board_cfg = &opl_boards[board];
2384 
2385 	ASSERT(OPL_VALID_CHANNEL(channel));
2386 	ASSERT(OPL_VALID_LEAF(leaf));
2387 
2388 	if (channel == OPL_CMU_CHANNEL) {
2389 		portid = probe->pr_sb->sb_cmu.cmu_ch.chan_portid;
2390 		cfg_handle = &board_cfg->cfg_cmuch_handle;
2391 	} else {
2392 		portid = probe->
2393 		    pr_sb->sb_pci_ch[channel].pci_leaf[leaf].leaf_port_id;
2394 		cfg_handle = &board_cfg->cfg_pcich_handle[channel][leaf];
2395 	}
2396 
2397 	/*
2398 	 * Prevent any changes to leaf_node until we have bound
2399 	 * it to the correct driver.
2400 	 */
2401 	ndi_devi_enter(parent, &circ);
2402 
2403 	/*
2404 	 * Ideally, fcode would be run from the "sid_branch_create"
2405 	 * callback (that is the primary purpose of that callback).
2406 	 * However, the fcode interpreter was written with the
2407 	 * assumption that the "new_child" was linked into the
2408 	 * device tree. The callback is invoked with the devinfo node
2409 	 * in the DS_PROTO state. More investigation is needed before
2410 	 * we can invoke the interpreter from the callback. For now,
2411 	 * we create the "new_child" in the BOUND state, invoke the
2412 	 * fcode interpreter and then rebind the dip to use any
2413 	 * compatible properties created by fcode.
2414 	 */
2415 
2416 	probe->pr_parent = parent;
2417 	probe->pr_create = opl_create_leaf;
2418 	probe->pr_hold = 1;
2419 
2420 	leaf_node = opl_create_node(probe);
2421 	if (leaf_node == NULL) {
2422 
2423 		cmn_err(CE_WARN, "IKP: create leaf (%d-%d-%d) failed",
2424 		    probe->pr_board, probe->pr_channel, probe->pr_leaf);
2425 		ndi_devi_exit(parent, circ);
2426 		return (-1);
2427 	}
2428 
2429 	/*
2430 	 * The platform DR interfaces created the dip in
2431 	 * bound state. Bring devinfo node down to linked
2432 	 * state and hold it there until compatible
2433 	 * properties are created.
2434 	 */
2435 	e_ddi_branch_rele(leaf_node);
2436 	(void) i_ndi_unconfig_node(leaf_node, DS_LINKED, 0);
2437 	ASSERT(i_ddi_node_state(leaf_node) == DS_LINKED);
2438 	e_ddi_branch_hold(leaf_node);
2439 
2440 	mutex_enter(&DEVI(leaf_node)->devi_lock);
2441 	DEVI(leaf_node)->devi_flags |= DEVI_NO_BIND;
2442 	mutex_exit(&DEVI(leaf_node)->devi_lock);
2443 
2444 	/*
2445 	 * Drop the busy-hold on parent before calling
2446 	 * fcode_interpreter to prevent potential deadlocks
2447 	 */
2448 	ndi_devi_exit(parent, circ);
2449 
2450 	(void) sprintf(unit_address, "%x", portid);
2451 
2452 	/*
2453 	 * Get the probe string
2454 	 */
2455 	probe_string = opl_get_probe_string(probe, channel, leaf);
2456 
2457 	/*
2458 	 * The fcode pointer specified here is NULL and the fcode
2459 	 * size specified here is 0. This causes the user-level
2460 	 * fcode interpreter to issue a request to the fcode
2461 	 * driver to get the Oberon/cmu-ch fcode.
2462 	 */
2463 	fco_handle = opl_fc_ops_alloc_handle(parent, leaf_node,
2464 	    NULL, 0, unit_address, probe_string);
2465 
2466 	error = fcode_interpreter(parent, &opl_fc_do_op, fco_handle);
2467 
2468 	if (error != 0) {
2469 		cmn_err(CE_WARN, "IKP: Unable to probe PCI leaf (%d-%d-%d)",
2470 		    probe->pr_board, probe->pr_channel, probe->pr_leaf);
2471 
2472 		opl_fc_ops_free_handle(fco_handle);
2473 
2474 		if (probe_string != NULL)
2475 			kmem_free(probe_string, PROBE_STR_SIZE);
2476 
2477 		(void) opl_destroy_node(leaf_node);
2478 	} else {
2479 		*cfg_handle = fco_handle;
2480 
2481 		if (channel == OPL_CMU_CHANNEL)
2482 			board_cfg->cfg_cmuch_probe_str = probe_string;
2483 		else
2484 			board_cfg->cfg_pcich_probe_str[channel][leaf]
2485 			    = probe_string;
2486 
2487 		/*
2488 		 * Compatible properties (if any) have been created,
2489 		 * so bind driver.
2490 		 */
2491 		ndi_devi_enter(parent, &circ);
2492 		ASSERT(i_ddi_node_state(leaf_node) <= DS_LINKED);
2493 
2494 		mutex_enter(&DEVI(leaf_node)->devi_lock);
2495 		DEVI(leaf_node)->devi_flags &= ~DEVI_NO_BIND;
2496 		mutex_exit(&DEVI(leaf_node)->devi_lock);
2497 
2498 		ndi_devi_exit(parent, circ);
2499 
2500 		if (ndi_devi_bind_driver(leaf_node, 0) != DDI_SUCCESS) {
2501 			cmn_err(CE_WARN, "IKP: Unable to bind PCI leaf "
2502 			    "(%d-%d-%d)", probe->pr_board, probe->pr_channel,
2503 			    probe->pr_leaf);
2504 		}
2505 	}
2506 
2507 	if ((error != 0) && (channel == OPL_CMU_CHANNEL))
2508 		return (-1);
2509 
2510 	return (0);
2511 }
2512 
2513 static void
opl_init_leaves(int myboard)2514 opl_init_leaves(int myboard)
2515 {
2516 	dev_info_t	*parent, *node;
2517 	char		*name;
2518 	int 		circ, ret;
2519 	int		len, portid, board, channel, leaf;
2520 	opl_board_cfg_t	*cfg;
2521 
2522 	parent = ddi_root_node();
2523 
2524 	/*
2525 	 * Hold parent node busy to walk its child list
2526 	 */
2527 	ndi_devi_enter(parent, &circ);
2528 
2529 	for (node = ddi_get_child(parent); (node != NULL); node =
2530 	    ddi_get_next_sibling(node)) {
2531 
2532 		ret = OPL_GET_PROP(string, node, "name", &name, &len);
2533 		if (ret != DDI_PROP_SUCCESS) {
2534 			/*
2535 			 * The property does not exist for this node.
2536 			 */
2537 			continue;
2538 		}
2539 
2540 		if (strncmp(name, OPL_PCI_LEAF_NODE, len) == 0) {
2541 
2542 			ret = OPL_GET_PROP(int, node, "portid", &portid, -1);
2543 			if (ret == DDI_PROP_SUCCESS) {
2544 
2545 				ret = OPL_GET_PROP(int, node, "board#",
2546 				    &board, -1);
2547 				if ((ret != DDI_PROP_SUCCESS) ||
2548 				    (board != myboard)) {
2549 					kmem_free(name, len);
2550 					continue;
2551 				}
2552 
2553 				cfg = &opl_boards[board];
2554 				channel = OPL_PORTID_TO_CHANNEL(portid);
2555 				if (channel == OPL_CMU_CHANNEL) {
2556 
2557 					if (cfg->cfg_cmuch_handle != NULL)
2558 						cfg->cfg_cmuch_leaf = node;
2559 
2560 				} else {
2561 
2562 					leaf = OPL_PORTID_TO_LEAF(portid);
2563 					if (cfg->cfg_pcich_handle[
2564 					    channel][leaf] != NULL)
2565 						cfg->cfg_pcich_leaf[
2566 						    channel][leaf] = node;
2567 				}
2568 			}
2569 		}
2570 
2571 		kmem_free(name, len);
2572 		if (ret != DDI_PROP_SUCCESS)
2573 			break;
2574 	}
2575 
2576 	ndi_devi_exit(parent, circ);
2577 }
2578 
2579 /*
2580  * Create "pci" node and hierarchy for the Oberon channels and the
2581  * CMU channel.
2582  */
2583 /*ARGSUSED*/
2584 static int
opl_probe_io(opl_probe_t * probe)2585 opl_probe_io(opl_probe_t *probe)
2586 {
2587 
2588 	int		i, j;
2589 	hwd_pci_ch_t	*channels;
2590 
2591 	if (HWD_STATUS_OK(probe->pr_sb->sb_cmu.cmu_ch.chan_status)) {
2592 
2593 		probe->pr_channel = HWD_CMU_CHANNEL;
2594 		probe->pr_channel_status =
2595 		    probe->pr_sb->sb_cmu.cmu_ch.chan_status;
2596 		probe->pr_leaf = 0;
2597 		probe->pr_leaf_status = probe->pr_channel_status;
2598 
2599 		if (opl_probe_leaf(probe) != 0)
2600 			return (-1);
2601 	}
2602 
2603 	channels = &probe->pr_sb->sb_pci_ch[0];
2604 
2605 	for (i = 0; i < HWD_PCI_CHANNELS_PER_SB; i++) {
2606 
2607 		if (!HWD_STATUS_OK(channels[i].pci_status))
2608 			continue;
2609 
2610 		probe->pr_channel = i;
2611 		probe->pr_channel_status = channels[i].pci_status;
2612 
2613 		for (j = 0; j < HWD_LEAVES_PER_PCI_CHANNEL; j++) {
2614 
2615 			probe->pr_leaf = j;
2616 			probe->pr_leaf_status =
2617 			    channels[i].pci_leaf[j].leaf_status;
2618 
2619 			if (!HWD_STATUS_OK(probe->pr_leaf_status))
2620 				continue;
2621 
2622 			(void) opl_probe_leaf(probe);
2623 		}
2624 	}
2625 	opl_init_leaves(probe->pr_board);
2626 	return (0);
2627 }
2628 
2629 /*
2630  * Perform the probe in the following order:
2631  *
2632  *	processors
2633  *	memory
2634  *	IO
2635  *
2636  * Each probe function returns 0 on sucess and a non-zero value on failure.
2637  * What is a failure is determined by the implementor of the probe function.
2638  * For example, while probing CPUs, any error encountered during probe
2639  * is considered a failure and causes the whole probe operation to fail.
2640  * However, for I/O, an error encountered while probing one device
2641  * should not prevent other devices from being probed. It should not cause
2642  * the whole probe operation to fail.
2643  */
2644 int
opl_probe_sb(int board,unsigned * cpu_impl)2645 opl_probe_sb(int board, unsigned *cpu_impl)
2646 {
2647 	opl_probe_t	*probe;
2648 	int		ret;
2649 
2650 	if ((board < 0) || (board >= HWD_SBS_PER_DOMAIN))
2651 		return (-1);
2652 
2653 	ASSERT(opl_cfg_inited != 0);
2654 
2655 	/*
2656 	 * If the previous probe failed and left a partially configured
2657 	 * board, we need to unprobe the board and start with a clean slate.
2658 	 */
2659 	if ((opl_boards[board].cfg_hwd != NULL) &&
2660 	    (opl_unprobe_sb(board) != 0))
2661 		return (-1);
2662 
2663 	ret = 0;
2664 
2665 	probe = kmem_zalloc(sizeof (opl_probe_t), KM_SLEEP);
2666 	probe->pr_board = board;
2667 
2668 	if ((opl_probe_init(probe) != 0) ||
2669 
2670 	    (opl_probe_cpu_chips(probe) != 0) ||
2671 
2672 	    (opl_probe_memory(probe) != 0) ||
2673 
2674 	    (opl_probe_io(probe) != 0)) {
2675 
2676 		/*
2677 		 * Probe failed. Perform cleanup.
2678 		 */
2679 		(void) opl_unprobe_sb(board);
2680 		ret = -1;
2681 	}
2682 
2683 	*cpu_impl = probe->pr_cpu_impl;
2684 
2685 	kmem_free(probe, sizeof (opl_probe_t));
2686 
2687 	return (ret);
2688 }
2689 
2690 /*
2691  * This unprobing also includes CMU-CH.
2692  */
2693 /*ARGSUSED*/
2694 static int
opl_unprobe_io(int board)2695 opl_unprobe_io(int board)
2696 {
2697 	int		i, j, ret;
2698 	opl_board_cfg_t	*board_cfg;
2699 	dev_info_t	**node;
2700 	fco_handle_t	*hand;
2701 	char		**probe_str;
2702 
2703 	board_cfg = &opl_boards[board];
2704 
2705 	for (i = 0; i < HWD_PCI_CHANNELS_PER_SB; i++) {
2706 
2707 		for (j = 0; j < HWD_LEAVES_PER_PCI_CHANNEL; j++) {
2708 
2709 			node = &board_cfg->cfg_pcich_leaf[i][j];
2710 			hand = &board_cfg->cfg_pcich_handle[i][j];
2711 			probe_str = &board_cfg->cfg_pcich_probe_str[i][j];
2712 
2713 			if (*node == NULL)
2714 				continue;
2715 
2716 			if (*hand != NULL) {
2717 				opl_fc_ops_free_handle(*hand);
2718 				*hand = NULL;
2719 			}
2720 
2721 			if (*probe_str != NULL) {
2722 				kmem_free(*probe_str, PROBE_STR_SIZE);
2723 				*probe_str = NULL;
2724 			}
2725 
2726 			ret = opl_destroy_node(*node);
2727 			if (ret != 0) {
2728 
2729 				cmn_err(CE_WARN, "IKP: destroy pci (%d-%d-%d) "
2730 				    "failed", board, i, j);
2731 				return (-1);
2732 			}
2733 
2734 			*node = NULL;
2735 
2736 		}
2737 	}
2738 
2739 	node = &board_cfg->cfg_cmuch_leaf;
2740 	hand = &board_cfg->cfg_cmuch_handle;
2741 	probe_str = &board_cfg->cfg_cmuch_probe_str;
2742 
2743 	if (*node == NULL)
2744 		return (0);
2745 
2746 	if (*hand != NULL) {
2747 		opl_fc_ops_free_handle(*hand);
2748 		*hand = NULL;
2749 	}
2750 
2751 	if (*probe_str != NULL) {
2752 		kmem_free(*probe_str, PROBE_STR_SIZE);
2753 		*probe_str = NULL;
2754 	}
2755 
2756 	if (opl_destroy_node(*node) != 0) {
2757 
2758 		cmn_err(CE_WARN, "IKP: destroy pci (%d-%d-%d) failed", board,
2759 		    OPL_CMU_CHANNEL, 0);
2760 		return (-1);
2761 	}
2762 
2763 	*node = NULL;
2764 
2765 	return (0);
2766 }
2767 
2768 /*
2769  * Destroy the "pseudo-mc" node for a board.
2770  */
2771 static int
opl_unprobe_memory(int board)2772 opl_unprobe_memory(int board)
2773 {
2774 	opl_board_cfg_t	*board_cfg;
2775 
2776 	board_cfg = &opl_boards[board];
2777 
2778 	if (board_cfg->cfg_pseudo_mc == NULL)
2779 		return (0);
2780 
2781 	if (opl_destroy_node(board_cfg->cfg_pseudo_mc) != 0) {
2782 
2783 		cmn_err(CE_WARN, "IKP: destroy pseudo-mc (%d) failed", board);
2784 		return (-1);
2785 	}
2786 
2787 	board_cfg->cfg_pseudo_mc = NULL;
2788 
2789 	return (0);
2790 }
2791 
2792 /*
2793  * Destroy the "cmp" nodes for a board. This also destroys the "core"
2794  * and "cpu" nodes below the "cmp" nodes.
2795  */
2796 static int
opl_unprobe_processors(int board)2797 opl_unprobe_processors(int board)
2798 {
2799 	int		i;
2800 	dev_info_t	**cfg_cpu_chips;
2801 
2802 	cfg_cpu_chips = opl_boards[board].cfg_cpu_chips;
2803 
2804 	for (i = 0; i < HWD_CPU_CHIPS_PER_CMU; i++) {
2805 
2806 		if (cfg_cpu_chips[i] == NULL)
2807 			continue;
2808 
2809 		if (opl_destroy_node(cfg_cpu_chips[i]) != 0) {
2810 
2811 			cmn_err(CE_WARN, "IKP: destroy chip (%d-%d) failed",
2812 			    board, i);
2813 			return (-1);
2814 		}
2815 
2816 		cfg_cpu_chips[i] = NULL;
2817 	}
2818 
2819 	return (0);
2820 }
2821 
2822 /*
2823  * Perform the unprobe in the following order:
2824  *
2825  *	IO
2826  *	memory
2827  *	processors
2828  */
2829 int
opl_unprobe_sb(int board)2830 opl_unprobe_sb(int board)
2831 {
2832 	if ((board < 0) || (board >= HWD_SBS_PER_DOMAIN))
2833 		return (-1);
2834 
2835 	ASSERT(opl_cfg_inited != 0);
2836 
2837 	if ((opl_unprobe_io(board) != 0) ||
2838 
2839 	    (opl_unprobe_memory(board) != 0) ||
2840 
2841 	    (opl_unprobe_processors(board) != 0))
2842 
2843 		return (-1);
2844 
2845 	if (opl_boards[board].cfg_hwd != NULL) {
2846 #ifdef UCTEST
2847 		size_t			size = 0xA000;
2848 #endif
2849 		/* Release the memory for the HWD */
2850 		void *hwdp = opl_boards[board].cfg_hwd;
2851 		opl_boards[board].cfg_hwd = NULL;
2852 #ifdef UCTEST
2853 		hwdp = (void *)((char *)hwdp - 0x1000);
2854 		hat_unload(kas.a_hat, hwdp, size, HAT_UNLOAD_UNLOCK);
2855 		vmem_free(heap_arena, hwdp, size);
2856 #else
2857 		kmem_free(hwdp, HWD_DATA_SIZE);
2858 #endif
2859 	}
2860 	return (0);
2861 }
2862 
2863 /*
2864  * For MAC patrol support, we need to update the PA-related properties
2865  * when there is a copy-rename event.  This should be called after the
2866  * physical copy and rename has been done by DR, and before the MAC
2867  * patrol is restarted.
2868  */
2869 int
oplcfg_pa_swap(int from,int to)2870 oplcfg_pa_swap(int from, int to)
2871 {
2872 	dev_info_t *from_node = opl_boards[from].cfg_pseudo_mc;
2873 	dev_info_t *to_node = opl_boards[to].cfg_pseudo_mc;
2874 	opl_range_t *rangef, *ranget;
2875 	int elems;
2876 	int ret;
2877 
2878 	if ((OPL_GET_PROP_ARRAY(int, from_node, "sb-mem-ranges", rangef,
2879 	    elems) != DDI_SUCCESS) || (elems != 4)) {
2880 		/* XXX -- bad news */
2881 		return (-1);
2882 	}
2883 	if ((OPL_GET_PROP_ARRAY(int, to_node, "sb-mem-ranges", ranget,
2884 	    elems) != DDI_SUCCESS) || (elems != 4)) {
2885 		/* XXX -- bad news */
2886 		return (-1);
2887 	}
2888 	OPL_UPDATE_PROP_ARRAY(int, from_node, "sb-mem-ranges", (int *)ranget,
2889 	    4);
2890 	OPL_UPDATE_PROP_ARRAY(int, to_node, "sb-mem-ranges", (int *)rangef,
2891 	    4);
2892 
2893 	OPL_FREE_PROP(ranget);
2894 	OPL_FREE_PROP(rangef);
2895 
2896 	return (0);
2897 }
2898