xref: /illumos-gate/usr/src/uts/sun4u/io/opl_cfg.c (revision d5ebc4938a50bb2fb1914062e396761dc9161a51)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  * Copyright (c) 2016 by Delphix. All rights reserved.
25  * Copyright 2023 Oxide Computer Company
26  */
27 
28 #include <sys/conf.h>
29 #include <sys/kmem.h>
30 #include <sys/debug.h>
31 #include <sys/modctl.h>
32 #include <sys/autoconf.h>
33 #include <sys/hwconf.h>
34 #include <sys/ddi_impldefs.h>
35 #include <sys/ddi.h>
36 #include <sys/sunddi.h>
37 #include <sys/sunndi.h>
38 #include <sys/ndi_impldefs.h>
39 #include <sys/machsystm.h>
40 #include <sys/fcode.h>
41 #include <sys/promif.h>
42 #include <sys/promimpl.h>
43 #include <sys/opl_cfg.h>
44 #include <sys/scfd/scfostoescf.h>
45 
46 static unsigned int		opl_cfg_inited;
47 static opl_board_cfg_t		opl_boards[HWD_SBS_PER_DOMAIN];
48 
49 /*
50  * Module control operations
51  */
52 
53 extern struct mod_ops mod_miscops;
54 
55 static struct modlmisc modlmisc = {
56 	&mod_miscops,				/* Type of module */
57 	"OPL opl_cfg"
58 };
59 
60 static struct modlinkage modlinkage = {
61 	MODREV_1, (void *)&modlmisc, NULL
62 };
63 
64 static int	opl_map_in(dev_info_t *, fco_handle_t, fc_ci_t *);
65 static int	opl_map_out(dev_info_t *, fco_handle_t, fc_ci_t *);
66 static int	opl_register_fetch(dev_info_t *, fco_handle_t, fc_ci_t *);
67 static int	opl_register_store(dev_info_t *, fco_handle_t, fc_ci_t *);
68 
69 static int	opl_claim_memory(dev_info_t *, fco_handle_t, fc_ci_t *);
70 static int	opl_release_memory(dev_info_t *, fco_handle_t, fc_ci_t *);
71 static int	opl_vtop(dev_info_t *, fco_handle_t, fc_ci_t *);
72 
73 static int	opl_config_child(dev_info_t *, fco_handle_t, fc_ci_t *);
74 
75 static int	opl_get_fcode_size(dev_info_t *, fco_handle_t, fc_ci_t *);
76 static int	opl_get_fcode(dev_info_t *, fco_handle_t, fc_ci_t *);
77 
78 static int	opl_map_phys(dev_info_t *, struct regspec *,  caddr_t *,
79 				ddi_device_acc_attr_t *, ddi_acc_handle_t *);
80 static void	opl_unmap_phys(ddi_acc_handle_t *);
81 static int	opl_get_hwd_va(dev_info_t *, fco_handle_t, fc_ci_t *);
82 static int	opl_master_interrupt(dev_info_t *, fco_handle_t, fc_ci_t *);
83 
84 extern int	prom_get_fcode_size(char *);
85 extern int	prom_get_fcode(char *, char *);
86 
87 static int	master_interrupt_init(uint32_t, uint32_t);
88 
89 #define	PROBE_STR_SIZE	64
90 #define	UNIT_ADDR_SIZE	64
91 
92 opl_fc_ops_t	opl_fc_ops[] = {
93 
94 	{	FC_MAP_IN,		opl_map_in},
95 	{	FC_MAP_OUT,		opl_map_out},
96 	{	"rx@",			opl_register_fetch},
97 	{	FC_RL_FETCH,		opl_register_fetch},
98 	{	FC_RW_FETCH,		opl_register_fetch},
99 	{	FC_RB_FETCH,		opl_register_fetch},
100 	{	"rx!",			opl_register_store},
101 	{	FC_RL_STORE,		opl_register_store},
102 	{	FC_RW_STORE,		opl_register_store},
103 	{	FC_RB_STORE,		opl_register_store},
104 	{	"claim-memory",		opl_claim_memory},
105 	{	"release-memory",	opl_release_memory},
106 	{	"vtop",			opl_vtop},
107 	{	FC_CONFIG_CHILD,	opl_config_child},
108 	{	FC_GET_FCODE_SIZE,	opl_get_fcode_size},
109 	{	FC_GET_FCODE,		opl_get_fcode},
110 	{	"get-hwd-va",		opl_get_hwd_va},
111 	{	"master-interrupt",	opl_master_interrupt},
112 	{	NULL,			NULL}
113 
114 };
115 
116 extern caddr_t	efcode_vaddr;
117 extern int	efcode_size;
118 
119 #ifdef DEBUG
120 #define	HWDDUMP_OFFSETS		1
121 #define	HWDDUMP_ALL_STATUS	2
122 #define	HWDDUMP_CHUNKS		3
123 #define	HWDDUMP_SBP		4
124 
125 int		hwddump_flags = HWDDUMP_SBP | HWDDUMP_CHUNKS;
126 #endif
127 
128 static int	master_interrupt_inited = 0;
129 
130 int
_init()131 _init()
132 {
133 	int	err = 0;
134 
135 	/*
136 	 * Create a resource map for the contiguous memory allocated
137 	 * at start-of-day in startup.c
138 	 */
139 	err = ndi_ra_map_setup(ddi_root_node(), "opl-fcodemem");
140 	if (err == NDI_FAILURE) {
141 		cmn_err(CE_WARN, "Cannot setup resource map opl-fcodemem\n");
142 		return (1);
143 	}
144 
145 	/*
146 	 * Put the allocated memory into the pool.
147 	 */
148 	(void) ndi_ra_free(ddi_root_node(), (uint64_t)efcode_vaddr,
149 	    (uint64_t)efcode_size, "opl-fcodemem", 0);
150 
151 	if ((err = mod_install(&modlinkage)) != 0) {
152 		cmn_err(CE_WARN, "opl_cfg failed to load, error=%d", err);
153 		(void) ndi_ra_map_destroy(ddi_root_node(), "opl-fcodemem");
154 	}
155 
156 	return (err);
157 }
158 
159 int
_fini(void)160 _fini(void)
161 {
162 	int ret;
163 
164 	ret = (mod_remove(&modlinkage));
165 	if (ret != 0)
166 		return (ret);
167 
168 	(void) ndi_ra_map_destroy(ddi_root_node(), "opl-fcodemem");
169 
170 	return (ret);
171 }
172 
173 int
_info(struct modinfo * modinfop)174 _info(struct modinfo *modinfop)
175 {
176 	return (mod_info(&modlinkage, modinfop));
177 }
178 
179 #ifdef DEBUG
180 static void
opl_dump_hwd(opl_probe_t * probe)181 opl_dump_hwd(opl_probe_t *probe)
182 {
183 	hwd_header_t		*hdrp;
184 	hwd_sb_status_t		*statp;
185 	hwd_domain_info_t	*dinfop;
186 	hwd_sb_t		*sbp;
187 	hwd_cpu_chip_t		*chips;
188 	hwd_pci_ch_t		*channels;
189 	int			board, i, status;
190 
191 	board = probe->pr_board;
192 
193 	hdrp = probe->pr_hdr;
194 	statp = probe->pr_sb_status;
195 	dinfop = probe->pr_dinfo;
196 	sbp = probe->pr_sb;
197 
198 	printf("HWD: board %d\n", board);
199 	printf("HWD:magic = 0x%x\n", hdrp->hdr_magic);
200 	printf("HWD:version = 0x%x.%x\n", hdrp->hdr_version.major,
201 	    hdrp->hdr_version.minor);
202 
203 	if (hwddump_flags & HWDDUMP_OFFSETS) {
204 		printf("HWD:status offset = 0x%x\n",
205 		    hdrp->hdr_sb_status_offset);
206 		printf("HWD:domain offset = 0x%x\n",
207 		    hdrp->hdr_domain_info_offset);
208 		printf("HWD:board offset = 0x%x\n", hdrp->hdr_sb_info_offset);
209 	}
210 
211 	if (hwddump_flags & HWDDUMP_SBP)
212 		printf("HWD:sb_t ptr = 0x%p\n", (void *)probe->pr_sb);
213 
214 	if (hwddump_flags & HWDDUMP_ALL_STATUS) {
215 		int bd;
216 		printf("HWD:board status =");
217 		for (bd = 0; bd < HWD_SBS_PER_DOMAIN; bd++)
218 			printf("%x ", statp->sb_status[bd]);
219 		printf("\n");
220 	} else {
221 		printf("HWD:board status = %d\n", statp->sb_status[board]);
222 	}
223 
224 	printf("HWD:banner name = %s\n", dinfop->dinf_banner_name);
225 	printf("HWD:platform = %s\n", dinfop->dinf_platform_token);
226 
227 	printf("HWD:chip status:\n");
228 	chips = &sbp->sb_cmu.cmu_cpu_chips[0];
229 	for (i = 0; i < HWD_CPU_CHIPS_PER_CMU; i++) {
230 
231 		status = chips[i].chip_status;
232 		printf("chip[%d] = ", i);
233 		if (HWD_STATUS_NONE(status))
234 			printf("none");
235 		else if (HWD_STATUS_FAILED(status))
236 			printf("fail");
237 		else if (HWD_STATUS_OK(status))
238 			printf("ok");
239 		printf("\n");
240 	}
241 
242 	if (hwddump_flags & HWDDUMP_CHUNKS) {
243 		int chunk;
244 		hwd_memory_t *mem = &sbp->sb_cmu.cmu_memory;
245 		printf("HWD:chunks:\n");
246 		for (chunk = 0; chunk < HWD_MAX_MEM_CHUNKS; chunk++)
247 			printf("\t%d 0x%lx 0x%lx\n", chunk,
248 			    mem->mem_chunks[chunk].chnk_start_address,
249 			    mem->mem_chunks[chunk].chnk_size);
250 	}
251 
252 	printf("HWD:channel status:\n");
253 	channels = &sbp->sb_pci_ch[0];
254 	for (i = 0; i < HWD_PCI_CHANNELS_PER_SB; i++) {
255 
256 		status = channels[i].pci_status;
257 		printf("channels[%d] = ", i);
258 		if (HWD_STATUS_NONE(status))
259 			printf("none");
260 		else if (HWD_STATUS_FAILED(status))
261 			printf("fail");
262 		else if (HWD_STATUS_OK(status))
263 			printf("ok");
264 		printf("\n");
265 	}
266 	printf("channels[%d] = ", i);
267 	status = sbp->sb_cmu.cmu_ch.chan_status;
268 	if (HWD_STATUS_NONE(status))
269 		printf("none");
270 	else if (HWD_STATUS_FAILED(status))
271 		printf("fail");
272 	else if (HWD_STATUS_OK(status))
273 		printf("ok");
274 	printf("\n");
275 }
276 #endif /* DEBUG */
277 
278 #ifdef UCTEST
279 	/*
280 	 * For SesamI debugging, just map the SRAM directly to a kernel
281 	 * VA and read it out from there
282 	 */
283 
284 #include <sys/vmem.h>
285 #include <vm/seg_kmem.h>
286 
287 /*
288  * 0x4081F1323000LL is the HWD base address for LSB 0. But we need to map
289  * at page boundaries. So, we use a base address of 0x4081F1322000LL.
290  * Note that this has to match the HWD base pa set in .sesami-common-defs.
291  *
292  * The size specified for the HWD in the SCF spec is 36K. But since
293  * we adjusted the base address by 4K, we need to use 40K for the
294  * mapping size to cover the HWD. And 40K is also a multiple of the
295  * base page size.
296  */
297 #define	OPL_HWD_BASE(lsb)       \
298 (0x4081F1322000LL | (((uint64_t)(lsb)) << 40))
299 
300 	void    *opl_hwd_vaddr;
301 #endif /* UCTEST */
302 
303 /*
304  * Get the hardware descriptor from SCF.
305  */
306 
307 /*ARGSUSED*/
308 int
opl_read_hwd(int board,hwd_header_t ** hdrp,hwd_sb_status_t ** statp,hwd_domain_info_t ** dinfop,hwd_sb_t ** sbp)309 opl_read_hwd(int board, hwd_header_t **hdrp, hwd_sb_status_t **statp,
310     hwd_domain_info_t **dinfop, hwd_sb_t **sbp)
311 {
312 	static int (*getinfop)(uint32_t, uint8_t, uint32_t, uint32_t *,
313 	    void *) = NULL;
314 	void *hwdp;
315 
316 	uint32_t key = KEY_ESCF;	/* required value */
317 	uint8_t  type = 0x40;		/* SUB_OS_RECEIVE_HWD */
318 	uint32_t transid = board;
319 	uint32_t datasize = HWD_DATA_SIZE;
320 
321 	hwd_header_t		*hd;
322 	hwd_sb_status_t		*st;
323 	hwd_domain_info_t	*di;
324 	hwd_sb_t		*sb;
325 
326 	int	ret;
327 
328 	if (opl_boards[board].cfg_hwd == NULL) {
329 #ifdef UCTEST
330 		/*
331 		 * Just map the HWD in SRAM to a kernel VA
332 		 */
333 
334 		size_t			size;
335 		pfn_t			pfn;
336 
337 		size = 0xA000;
338 
339 		opl_hwd_vaddr = vmem_alloc(heap_arena, size, VM_SLEEP);
340 		if (opl_hwd_vaddr == NULL) {
341 			cmn_err(CE_NOTE, "No space for HWD");
342 			return (-1);
343 		}
344 
345 		pfn = btop(OPL_HWD_BASE(board));
346 		hat_devload(kas.a_hat, opl_hwd_vaddr, size, pfn, PROT_READ,
347 		    HAT_LOAD_NOCONSIST | HAT_LOAD_LOCK);
348 
349 		hwdp = (void *)((char *)opl_hwd_vaddr + 0x1000);
350 		opl_boards[board].cfg_hwd = hwdp;
351 		ret = 0;
352 #else
353 
354 		/* find the scf_service_getinfo() function */
355 		if (getinfop == NULL)
356 			getinfop = (int (*)(uint32_t, uint8_t, uint32_t,
357 			    uint32_t *,
358 			    void *))modgetsymvalue("scf_service_getinfo", 0);
359 
360 		if (getinfop == NULL)
361 			return (-1);
362 
363 		/* allocate memory to receive the data */
364 		hwdp = kmem_alloc(HWD_DATA_SIZE, KM_SLEEP);
365 
366 		/* get the HWD */
367 		ret = (*getinfop)(key, type, transid, &datasize, hwdp);
368 		if (ret == 0)
369 			opl_boards[board].cfg_hwd = hwdp;
370 		else
371 			kmem_free(hwdp, HWD_DATA_SIZE);
372 #endif
373 	} else {
374 		hwdp = opl_boards[board].cfg_hwd;
375 		ret = 0;
376 	}
377 
378 	/* copy the data to the destination */
379 	if (ret == 0) {
380 		hd = (hwd_header_t *)hwdp;
381 		st = (hwd_sb_status_t *)
382 		    ((char *)hwdp + hd->hdr_sb_status_offset);
383 		di = (hwd_domain_info_t *)
384 		    ((char *)hwdp + hd->hdr_domain_info_offset);
385 		sb = (hwd_sb_t *)
386 		    ((char *)hwdp + hd->hdr_sb_info_offset);
387 		if (hdrp != NULL)
388 			*hdrp = hd;
389 		if (statp != NULL)
390 			*statp = st;
391 		if (dinfop != NULL)
392 			*dinfop = di;
393 		if (sbp != NULL)
394 			*sbp = sb;
395 	}
396 
397 	return (ret);
398 }
399 
400 /*
401  * The opl_probe_t probe structure is used to pass all sorts of parameters
402  * to callback functions during probing. It also contains a snapshot of
403  * the hardware descriptor that is taken at the beginning of a probe.
404  */
405 static int
opl_probe_init(opl_probe_t * probe)406 opl_probe_init(opl_probe_t *probe)
407 {
408 	hwd_header_t		**hdrp;
409 	hwd_sb_status_t		**statp;
410 	hwd_domain_info_t	**dinfop;
411 	hwd_sb_t		**sbp;
412 	int			board, ret;
413 
414 	board = probe->pr_board;
415 
416 	hdrp = &probe->pr_hdr;
417 	statp = &probe->pr_sb_status;
418 	dinfop = &probe->pr_dinfo;
419 	sbp = &probe->pr_sb;
420 
421 	/*
422 	 * Read the hardware descriptor.
423 	 */
424 	ret = opl_read_hwd(board, hdrp, statp, dinfop, sbp);
425 	if (ret != 0) {
426 
427 		cmn_err(CE_WARN, "IKP: failed to read HWD header");
428 		return (-1);
429 	}
430 
431 #ifdef DEBUG
432 	opl_dump_hwd(probe);
433 #endif
434 	return (0);
435 }
436 
437 /*
438  * This function is used to obtain pointers to relevant device nodes
439  * which are created by Solaris at boot time.
440  *
441  * This function walks the child nodes of a given node, extracts
442  * the "name" property, if it exists, and passes the node to a
443  * callback init function. The callback determines if this node is
444  * interesting or not. If it is, then a pointer to the node is
445  * stored away by the callback for use during unprobe.
446  *
447  * The DDI get property function allocates storage for the name
448  * property. That needs to be freed within this function.
449  */
450 static int
opl_init_nodes(dev_info_t * parent,opl_init_func_t init)451 opl_init_nodes(dev_info_t *parent, opl_init_func_t init)
452 {
453 	dev_info_t	*node;
454 	char		*name;
455 	int		ret;
456 	int		len;
457 
458 	ASSERT(parent != NULL);
459 
460 	/*
461 	 * Hold parent node busy to walk its child list
462 	 */
463 	ndi_devi_enter(parent);
464 	node = ddi_get_child(parent);
465 
466 	while (node != NULL) {
467 
468 		ret = OPL_GET_PROP(string, node, "name", &name, &len);
469 		if (ret != DDI_PROP_SUCCESS) {
470 			/*
471 			 * The property does not exist for this node.
472 			 */
473 			node = ddi_get_next_sibling(node);
474 			continue;
475 		}
476 
477 		ret = init(node, name, len);
478 		kmem_free(name, len);
479 		if (ret != 0) {
480 
481 			ndi_devi_exit(parent);
482 			return (-1);
483 		}
484 
485 		node = ddi_get_next_sibling(node);
486 	}
487 
488 	ndi_devi_exit(parent);
489 
490 	return (0);
491 }
492 
493 /*
494  * This init function finds all the interesting nodes under the
495  * root node and stores pointers to them. The following nodes
496  * are considered interesting by this implementation:
497  *
498  *	"cmp"
499  *		These are nodes that represent processor chips.
500  *
501  *	"pci"
502  *		These are nodes that represent PCI leaves.
503  *
504  *	"pseudo-mc"
505  *		These are nodes that contain memory information.
506  */
507 static int
opl_init_root_nodes(dev_info_t * node,char * name,int len)508 opl_init_root_nodes(dev_info_t *node, char *name, int len)
509 {
510 	int		portid, board, chip, channel, leaf;
511 	int		ret;
512 
513 	if (strncmp(name, OPL_CPU_CHIP_NODE, len) == 0) {
514 
515 		ret = OPL_GET_PROP(int, node, "portid", &portid, -1);
516 		if (ret != DDI_PROP_SUCCESS)
517 			return (-1);
518 
519 		ret = OPL_GET_PROP(int, node, "board#", &board, -1);
520 		if (ret != DDI_PROP_SUCCESS)
521 			return (-1);
522 
523 		chip = OPL_CPU_CHIP(portid);
524 		opl_boards[board].cfg_cpu_chips[chip] = node;
525 
526 	} else if (strncmp(name, OPL_PCI_LEAF_NODE, len) == 0) {
527 
528 		ret = OPL_GET_PROP(int, node, "portid", &portid, -1);
529 		if (ret != DDI_PROP_SUCCESS)
530 			return (-1);
531 
532 		board = OPL_IO_PORTID_TO_LSB(portid);
533 		channel = OPL_PORTID_TO_CHANNEL(portid);
534 
535 		if (channel == OPL_CMU_CHANNEL) {
536 
537 			opl_boards[board].cfg_cmuch_leaf = node;
538 
539 		} else {
540 
541 			leaf = OPL_PORTID_TO_LEAF(portid);
542 			opl_boards[board].cfg_pcich_leaf[channel][leaf] = node;
543 		}
544 	} else if (strncmp(name, OPL_PSEUDO_MC_NODE, len) == 0) {
545 
546 		ret = OPL_GET_PROP(int, node, "board#", &board, -1);
547 		if (ret != DDI_PROP_SUCCESS)
548 			return (-1);
549 
550 		ASSERT((board >= 0) && (board < HWD_SBS_PER_DOMAIN));
551 
552 		opl_boards[board].cfg_pseudo_mc = node;
553 	}
554 
555 	return (0);
556 }
557 
558 /*
559  * This function initializes the OPL IKP feature. Currently, all it does
560  * is find the interesting nodes that Solaris has created at boot time
561  * for boards present at boot time and store pointers to them. This
562  * is useful if those boards are unprobed by DR.
563  */
564 int
opl_init_cfg()565 opl_init_cfg()
566 {
567 	dev_info_t	*root;
568 
569 	if (opl_cfg_inited == 0) {
570 
571 		root = ddi_root_node();
572 		if ((opl_init_nodes(root, opl_init_root_nodes) != 0)) {
573 			cmn_err(CE_WARN, "IKP: init failed");
574 			return (1);
575 		}
576 
577 		opl_cfg_inited = 1;
578 	}
579 
580 	return (0);
581 }
582 
583 /*
584  * When DR is initialized, we walk the device tree and acquire a hold on
585  * all the nodes that are interesting to IKP. This is so that the corresponding
586  * branches cannot be deleted.
587  *
588  * The following function informs the walk about which nodes are interesting
589  * so that it can hold the corresponding branches.
590  */
591 static int
opl_hold_node(char * name)592 opl_hold_node(char *name)
593 {
594 	/*
595 	 * We only need to hold/release the following nodes which
596 	 * represent separate branches that must be managed.
597 	 */
598 	return ((strcmp(name, OPL_CPU_CHIP_NODE) == 0) ||
599 	    (strcmp(name, OPL_PSEUDO_MC_NODE) == 0) ||
600 	    (strcmp(name, OPL_PCI_LEAF_NODE) == 0));
601 }
602 
603 static int
opl_hold_rele_devtree(dev_info_t * rdip,void * arg)604 opl_hold_rele_devtree(dev_info_t *rdip, void *arg)
605 {
606 
607 	int	*holdp = (int *)arg;
608 	char	*name = ddi_node_name(rdip);
609 
610 	/*
611 	 * We only need to hold/release the following nodes which
612 	 * represent separate branches that must be managed.
613 	 */
614 	if (opl_hold_node(name) == 0) {
615 		/* Not of interest to us */
616 		return (DDI_WALK_PRUNECHILD);
617 	}
618 	if (*holdp) {
619 		ASSERT(!e_ddi_branch_held(rdip));
620 		e_ddi_branch_hold(rdip);
621 	} else {
622 		ASSERT(e_ddi_branch_held(rdip));
623 		e_ddi_branch_rele(rdip);
624 	}
625 
626 	return (DDI_WALK_PRUNECHILD);
627 }
628 
629 void
opl_hold_devtree()630 opl_hold_devtree()
631 {
632 	dev_info_t *dip;
633 	int hold = 1;
634 
635 	dip = ddi_root_node();
636 	ndi_devi_enter(dip);
637 	ddi_walk_devs(ddi_get_child(dip), opl_hold_rele_devtree, &hold);
638 	ndi_devi_exit(dip);
639 }
640 
641 void
opl_release_devtree()642 opl_release_devtree()
643 {
644 	dev_info_t *dip;
645 	int hold = 0;
646 
647 	dip = ddi_root_node();
648 	ndi_devi_enter(dip);
649 	ddi_walk_devs(ddi_get_child(dip), opl_hold_rele_devtree, &hold);
650 	ndi_devi_exit(dip);
651 }
652 
653 /*
654  * This is a helper function that allows opl_create_node() to return a
655  * pointer to a newly created node to its caller.
656  */
657 /*ARGSUSED*/
658 static void
opl_set_node(dev_info_t * node,void * arg,uint_t flags)659 opl_set_node(dev_info_t *node, void *arg, uint_t flags)
660 {
661 	opl_probe_t	*probe;
662 
663 	probe = arg;
664 	probe->pr_node = node;
665 }
666 
667 /*
668  * Function to create a node in the device tree under a specified parent.
669  *
670  * e_ddi_branch_create() allows the creation of a whole branch with a
671  * single call of the function. However, we only use it to create one node
672  * at a time in the case of non-I/O device nodes. In other words, we
673  * create branches by repeatedly using this function. This makes the
674  * code more readable.
675  *
676  * The branch descriptor passed to e_ddi_branch_create() takes two
677  * callbacks. The create() callback is used to set the properties of a
678  * newly created node. The other callback is used to return a pointer
679  * to the newly created node. The create() callback is passed by the
680  * caller of this function based on the kind of node it wishes to
681  * create.
682  *
683  * e_ddi_branch_create() returns with the newly created node held. We
684  * only need to hold the top nodes of the branches we create. We release
685  * the hold for the others. E.g., the "cmp" node needs to be held. Since
686  * we hold the "cmp" node, there is no need to hold the "core" and "cpu"
687  * nodes below it.
688  */
689 static dev_info_t *
opl_create_node(opl_probe_t * probe)690 opl_create_node(opl_probe_t *probe)
691 {
692 	devi_branch_t	branch;
693 
694 	probe->pr_node = NULL;
695 
696 	branch.arg = probe;
697 	branch.type = DEVI_BRANCH_SID;
698 	branch.create.sid_branch_create = probe->pr_create;
699 	branch.devi_branch_callback = opl_set_node;
700 
701 	if (e_ddi_branch_create(probe->pr_parent, &branch, NULL, 0) != 0)
702 		return (NULL);
703 
704 	ASSERT(probe->pr_node != NULL);
705 
706 	if (probe->pr_hold == 0)
707 		e_ddi_branch_rele(probe->pr_node);
708 
709 	return (probe->pr_node);
710 }
711 
712 /*
713  * Function to tear down a whole branch rooted at the specified node.
714  *
715  * Although we create each node of a branch individually, we destroy
716  * a whole branch in one call. This is more efficient.
717  */
718 static int
opl_destroy_node(dev_info_t * node)719 opl_destroy_node(dev_info_t *node)
720 {
721 	if (e_ddi_branch_destroy(node, NULL, 0) != 0) {
722 		char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
723 		(void) ddi_pathname(node, path);
724 		cmn_err(CE_WARN, "OPL node removal failed: %s (%p)", path,
725 		    (void *)node);
726 		kmem_free(path, MAXPATHLEN);
727 		return (-1);
728 	}
729 
730 	return (0);
731 }
732 
733 /*
734  * Set the properties for a "cpu" node.
735  */
736 /*ARGSUSED*/
737 static int
opl_create_cpu(dev_info_t * node,void * arg,uint_t flags)738 opl_create_cpu(dev_info_t *node, void *arg, uint_t flags)
739 {
740 	opl_probe_t	*probe;
741 	hwd_cpu_chip_t	*chip;
742 	hwd_core_t	*core;
743 	hwd_cpu_t	*cpu;
744 	int		ret;
745 
746 	probe = arg;
747 	chip = &probe->pr_sb->sb_cmu.cmu_cpu_chips[probe->pr_cpu_chip];
748 	core = &chip->chip_cores[probe->pr_core];
749 	cpu = &core->core_cpus[probe->pr_cpu];
750 	OPL_UPDATE_PROP(string, node, "name", OPL_CPU_NODE);
751 	OPL_UPDATE_PROP(string, node, "device_type", OPL_CPU_NODE);
752 
753 	OPL_UPDATE_PROP(int, node, "cpuid", cpu->cpu_cpuid);
754 	OPL_UPDATE_PROP(int, node, "reg", probe->pr_cpu);
755 
756 	OPL_UPDATE_PROP(string, node, "status", "okay");
757 
758 	return (DDI_WALK_TERMINATE);
759 }
760 
761 /*
762  * Create "cpu" nodes as child nodes of a given "core" node.
763  */
764 static int
opl_probe_cpus(opl_probe_t * probe)765 opl_probe_cpus(opl_probe_t *probe)
766 {
767 	int		i;
768 	hwd_cpu_chip_t	*chip;
769 	hwd_core_t	*core;
770 	hwd_cpu_t	*cpus;
771 
772 	chip = &probe->pr_sb->sb_cmu.cmu_cpu_chips[probe->pr_cpu_chip];
773 	core = &chip->chip_cores[probe->pr_core];
774 	cpus = &core->core_cpus[0];
775 
776 	for (i = 0; i < HWD_CPUS_PER_CORE; i++) {
777 
778 		/*
779 		 * Olympus-C has 2 cpus per core.
780 		 * Jupiter has 4 cpus per core.
781 		 * For the Olympus-C based platform, we expect the cpu_status
782 		 * of the non-existent cpus to be set to missing.
783 		 */
784 		if (!HWD_STATUS_OK(cpus[i].cpu_status))
785 			continue;
786 
787 		probe->pr_create = opl_create_cpu;
788 		probe->pr_cpu = i;
789 		if (opl_create_node(probe) == NULL) {
790 
791 			cmn_err(CE_WARN, "IKP: create cpu (%d-%d-%d-%d) failed",
792 			    probe->pr_board, probe->pr_cpu_chip, probe->pr_core,
793 			    probe->pr_cpu);
794 			return (-1);
795 		}
796 	}
797 
798 	return (0);
799 }
800 
801 /*
802  * Set the properties for a "core" node.
803  */
804 /*ARGSUSED*/
805 static int
opl_create_core(dev_info_t * node,void * arg,uint_t flags)806 opl_create_core(dev_info_t *node, void *arg, uint_t flags)
807 {
808 	opl_probe_t	*probe;
809 	hwd_cpu_chip_t	*chip;
810 	hwd_core_t	*core;
811 	int		sharing[2];
812 	int		ret;
813 
814 	probe = arg;
815 	chip = &probe->pr_sb->sb_cmu.cmu_cpu_chips[probe->pr_cpu_chip];
816 	core = &chip->chip_cores[probe->pr_core];
817 
818 	OPL_UPDATE_PROP(string, node, "name", OPL_CORE_NODE);
819 	OPL_UPDATE_PROP(string, node, "device_type", OPL_CORE_NODE);
820 	OPL_UPDATE_PROP(string, node, "compatible", chip->chip_compatible);
821 
822 	OPL_UPDATE_PROP(int, node, "reg", probe->pr_core);
823 	OPL_UPDATE_PROP(int, node, "manufacturer#", core->core_manufacturer);
824 	OPL_UPDATE_PROP(int, node, "implementation#",
825 	    core->core_implementation);
826 	OPL_UPDATE_PROP(int, node, "mask#", core->core_mask);
827 
828 	OPL_UPDATE_PROP(int, node, "sparc-version", 9);
829 	OPL_UPDATE_PROP(int, node, "clock-frequency", core->core_frequency);
830 
831 	OPL_UPDATE_PROP(int, node, "l1-icache-size", core->core_l1_icache_size);
832 	OPL_UPDATE_PROP(int, node, "l1-icache-line-size",
833 	    core->core_l1_icache_line_size);
834 	OPL_UPDATE_PROP(int, node, "l1-icache-associativity",
835 	    core->core_l1_icache_associativity);
836 	OPL_UPDATE_PROP(int, node, "#itlb-entries",
837 	    core->core_num_itlb_entries);
838 
839 	OPL_UPDATE_PROP(int, node, "l1-dcache-size", core->core_l1_dcache_size);
840 	OPL_UPDATE_PROP(int, node, "l1-dcache-line-size",
841 	    core->core_l1_dcache_line_size);
842 	OPL_UPDATE_PROP(int, node, "l1-dcache-associativity",
843 	    core->core_l1_dcache_associativity);
844 	OPL_UPDATE_PROP(int, node, "#dtlb-entries",
845 	    core->core_num_dtlb_entries);
846 
847 	OPL_UPDATE_PROP(int, node, "l2-cache-size", core->core_l2_cache_size);
848 	OPL_UPDATE_PROP(int, node, "l2-cache-line-size",
849 	    core->core_l2_cache_line_size);
850 	OPL_UPDATE_PROP(int, node, "l2-cache-associativity",
851 	    core->core_l2_cache_associativity);
852 	sharing[0] = 0;
853 	sharing[1] = core->core_l2_cache_sharing;
854 	OPL_UPDATE_PROP_ARRAY(int, node, "l2-cache-sharing", sharing, 2);
855 
856 	OPL_UPDATE_PROP(string, node, "status", "okay");
857 
858 	return (DDI_WALK_TERMINATE);
859 }
860 
861 /*
862  * Create "core" nodes as child nodes of a given "cmp" node.
863  *
864  * Create the branch below each "core" node".
865  */
866 static int
opl_probe_cores(opl_probe_t * probe)867 opl_probe_cores(opl_probe_t *probe)
868 {
869 	int		i;
870 	hwd_cpu_chip_t	*chip;
871 	hwd_core_t	*cores;
872 	dev_info_t	*parent, *node;
873 
874 	chip = &probe->pr_sb->sb_cmu.cmu_cpu_chips[probe->pr_cpu_chip];
875 	cores = &chip->chip_cores[0];
876 	parent = probe->pr_parent;
877 
878 	for (i = 0; i < HWD_CORES_PER_CPU_CHIP; i++) {
879 
880 		if (!HWD_STATUS_OK(cores[i].core_status))
881 			continue;
882 
883 		probe->pr_parent = parent;
884 		probe->pr_create = opl_create_core;
885 		probe->pr_core = i;
886 		node = opl_create_node(probe);
887 		if (node == NULL) {
888 
889 			cmn_err(CE_WARN, "IKP: create core (%d-%d-%d) failed",
890 			    probe->pr_board, probe->pr_cpu_chip,
891 			    probe->pr_core);
892 			return (-1);
893 		}
894 
895 		/*
896 		 * Create "cpu" nodes below "core".
897 		 */
898 		probe->pr_parent = node;
899 		if (opl_probe_cpus(probe) != 0)
900 			return (-1);
901 		probe->pr_cpu_impl |= (1 << cores[i].core_implementation);
902 	}
903 
904 	return (0);
905 }
906 
907 /*
908  * Set the properties for a "cmp" node.
909  */
910 /*ARGSUSED*/
911 static int
opl_create_cpu_chip(dev_info_t * node,void * arg,uint_t flags)912 opl_create_cpu_chip(dev_info_t *node, void *arg, uint_t flags)
913 {
914 	opl_probe_t	*probe;
915 	hwd_cpu_chip_t	*chip;
916 	opl_range_t	range;
917 	uint64_t	dummy_addr;
918 	int		ret;
919 
920 	probe = arg;
921 	chip = &probe->pr_sb->sb_cmu.cmu_cpu_chips[probe->pr_cpu_chip];
922 
923 	OPL_UPDATE_PROP(string, node, "name", OPL_CPU_CHIP_NODE);
924 
925 	OPL_UPDATE_PROP(int, node, "portid", chip->chip_portid);
926 	OPL_UPDATE_PROP(int, node, "board#", probe->pr_board);
927 
928 	dummy_addr = OPL_PROC_AS(probe->pr_board, probe->pr_cpu_chip);
929 	range.rg_addr_hi = OPL_HI(dummy_addr);
930 	range.rg_addr_lo = OPL_LO(dummy_addr);
931 	range.rg_size_hi = 0;
932 	range.rg_size_lo = 0;
933 	OPL_UPDATE_PROP_ARRAY(int, node, "reg", (int *)&range, 4);
934 
935 	OPL_UPDATE_PROP(int, node, "#address-cells", 1);
936 	OPL_UPDATE_PROP(int, node, "#size-cells", 0);
937 
938 	OPL_UPDATE_PROP(string, node, "status", "okay");
939 
940 	return (DDI_WALK_TERMINATE);
941 }
942 
943 /*
944  * Create "cmp" nodes as child nodes of the root node.
945  *
946  * Create the branch below each "cmp" node.
947  */
948 static int
opl_probe_cpu_chips(opl_probe_t * probe)949 opl_probe_cpu_chips(opl_probe_t *probe)
950 {
951 	int		i;
952 	dev_info_t	**cfg_cpu_chips;
953 	hwd_cpu_chip_t	*chips;
954 	dev_info_t	*node;
955 
956 	cfg_cpu_chips = opl_boards[probe->pr_board].cfg_cpu_chips;
957 	chips = &probe->pr_sb->sb_cmu.cmu_cpu_chips[0];
958 
959 	for (i = 0; i < HWD_CPU_CHIPS_PER_CMU; i++) {
960 
961 		ASSERT(cfg_cpu_chips[i] == NULL);
962 
963 		if (!HWD_STATUS_OK(chips[i].chip_status))
964 			continue;
965 
966 		probe->pr_parent = ddi_root_node();
967 		probe->pr_create = opl_create_cpu_chip;
968 		probe->pr_cpu_chip = i;
969 		probe->pr_hold = 1;
970 		node = opl_create_node(probe);
971 		if (node == NULL) {
972 
973 			cmn_err(CE_WARN, "IKP: create chip (%d-%d) failed",
974 			    probe->pr_board, probe->pr_cpu_chip);
975 			return (-1);
976 		}
977 
978 		cfg_cpu_chips[i] = node;
979 
980 		/*
981 		 * Create "core" nodes below "cmp".
982 		 * We hold the "cmp" node. So, there is no need to hold
983 		 * the "core" and "cpu" nodes below it.
984 		 */
985 		probe->pr_parent = node;
986 		probe->pr_hold = 0;
987 		if (opl_probe_cores(probe) != 0)
988 			return (-1);
989 	}
990 
991 	return (0);
992 }
993 
994 /*
995  * Set the properties for a "pseudo-mc" node.
996  */
997 /*ARGSUSED*/
998 static int
opl_create_pseudo_mc(dev_info_t * node,void * arg,uint_t flags)999 opl_create_pseudo_mc(dev_info_t *node, void *arg, uint_t flags)
1000 {
1001 	opl_probe_t	*probe;
1002 	int		board, portid;
1003 	hwd_bank_t	*bank;
1004 	hwd_memory_t	*mem;
1005 	opl_range_t	range;
1006 	opl_mc_addr_t	mc[HWD_BANKS_PER_CMU];
1007 	int		status[2][7];
1008 	int		i, j;
1009 	int		ret;
1010 
1011 	probe = arg;
1012 	board = probe->pr_board;
1013 
1014 	OPL_UPDATE_PROP(string, node, "name", OPL_PSEUDO_MC_NODE);
1015 	OPL_UPDATE_PROP(string, node, "device_type", "memory-controller");
1016 	OPL_UPDATE_PROP(string, node, "compatible", "FJSV,oplmc");
1017 
1018 	portid = OPL_LSB_TO_PSEUDOMC_PORTID(board);
1019 	OPL_UPDATE_PROP(int, node, "portid", portid);
1020 
1021 	range.rg_addr_hi = OPL_HI(OPL_MC_AS(board));
1022 	range.rg_addr_lo = 0x200;
1023 	range.rg_size_hi = 0;
1024 	range.rg_size_lo = 0;
1025 	OPL_UPDATE_PROP_ARRAY(int, node, "reg", (int *)&range, 4);
1026 
1027 	OPL_UPDATE_PROP(int, node, "board#", board);
1028 	OPL_UPDATE_PROP(int, node, "physical-board#",
1029 	    probe->pr_sb->sb_psb_number);
1030 
1031 	OPL_UPDATE_PROP(int, node, "#address-cells", 1);
1032 	OPL_UPDATE_PROP(int, node, "#size-cells", 2);
1033 
1034 	mem = &probe->pr_sb->sb_cmu.cmu_memory;
1035 
1036 	range.rg_addr_hi = OPL_HI(mem->mem_start_address);
1037 	range.rg_addr_lo = OPL_LO(mem->mem_start_address);
1038 	range.rg_size_hi = OPL_HI(mem->mem_size);
1039 	range.rg_size_lo = OPL_LO(mem->mem_size);
1040 	OPL_UPDATE_PROP_ARRAY(int, node, "sb-mem-ranges", (int *)&range, 4);
1041 
1042 	bank = probe->pr_sb->sb_cmu.cmu_memory.mem_banks;
1043 	for (i = 0, j = 0; i < HWD_BANKS_PER_CMU; i++) {
1044 
1045 		if (!HWD_STATUS_OK(bank[i].bank_status))
1046 			continue;
1047 
1048 		mc[j].mc_bank = i;
1049 		mc[j].mc_hi = OPL_HI(bank[i].bank_register_address);
1050 		mc[j].mc_lo = OPL_LO(bank[i].bank_register_address);
1051 		j++;
1052 	}
1053 
1054 	if (j > 0) {
1055 		OPL_UPDATE_PROP_ARRAY(int, node, "mc-addr", (int *)mc, j*3);
1056 	} else {
1057 		/*
1058 		 * If there is no memory, we need the mc-addr property, but
1059 		 * it is length 0.  The only way to do this using ndi seems
1060 		 * to be by creating a boolean property.
1061 		 */
1062 		ret = ndi_prop_create_boolean(DDI_DEV_T_NONE, node, "mc-addr");
1063 		OPL_UPDATE_PROP_ERR(ret, "mc-addr");
1064 	}
1065 
1066 	OPL_UPDATE_PROP_ARRAY(byte, node, "cs0-mc-pa-trans-table",
1067 	    mem->mem_cs[0].cs_pa_mac_table, 64);
1068 	OPL_UPDATE_PROP_ARRAY(byte, node, "cs1-mc-pa-trans-table",
1069 	    mem->mem_cs[1].cs_pa_mac_table, 64);
1070 
1071 #define	CS_PER_MEM 2
1072 
1073 	for (i = 0, j = 0; i < CS_PER_MEM; i++) {
1074 		if (HWD_STATUS_OK(mem->mem_cs[i].cs_status) ||
1075 		    HWD_STATUS_FAILED(mem->mem_cs[i].cs_status)) {
1076 			status[j][0] = i;
1077 			if (HWD_STATUS_OK(mem->mem_cs[i].cs_status))
1078 				status[j][1] = 0;
1079 			else
1080 				status[j][1] = 1;
1081 			status[j][2] =
1082 			    OPL_HI(mem->mem_cs[i].cs_available_capacity);
1083 			status[j][3] =
1084 			    OPL_LO(mem->mem_cs[i].cs_available_capacity);
1085 			status[j][4] = OPL_HI(mem->mem_cs[i].cs_dimm_capacity);
1086 			status[j][5] = OPL_LO(mem->mem_cs[i].cs_dimm_capacity);
1087 			status[j][6] = mem->mem_cs[i].cs_number_of_dimms;
1088 			j++;
1089 		}
1090 	}
1091 
1092 	if (j > 0) {
1093 		OPL_UPDATE_PROP_ARRAY(int, node, "cs-status", (int *)status,
1094 		    j*7);
1095 	} else {
1096 		/*
1097 		 * If there is no memory, we need the cs-status property, but
1098 		 * it is length 0.  The only way to do this using ndi seems
1099 		 * to be by creating a boolean property.
1100 		 */
1101 		ret = ndi_prop_create_boolean(DDI_DEV_T_NONE, node,
1102 		    "cs-status");
1103 		OPL_UPDATE_PROP_ERR(ret, "cs-status");
1104 	}
1105 
1106 	return (DDI_WALK_TERMINATE);
1107 }
1108 
1109 /*
1110  * Create "pseudo-mc" nodes
1111  */
1112 static int
opl_probe_memory(opl_probe_t * probe)1113 opl_probe_memory(opl_probe_t *probe)
1114 {
1115 	int		board;
1116 	opl_board_cfg_t	*board_cfg;
1117 	dev_info_t	*node;
1118 
1119 	board = probe->pr_board;
1120 	board_cfg = &opl_boards[board];
1121 
1122 	ASSERT(board_cfg->cfg_pseudo_mc == NULL);
1123 
1124 	probe->pr_parent = ddi_root_node();
1125 	probe->pr_create = opl_create_pseudo_mc;
1126 	probe->pr_hold = 1;
1127 	node = opl_create_node(probe);
1128 	if (node == NULL) {
1129 
1130 		cmn_err(CE_WARN, "IKP: create pseudo-mc (%d) failed", board);
1131 		return (-1);
1132 	}
1133 
1134 	board_cfg->cfg_pseudo_mc = node;
1135 
1136 	return (0);
1137 }
1138 
1139 /*
1140  * Allocate the fcode ops handle.
1141  */
1142 /*ARGSUSED*/
1143 static
1144 fco_handle_t
opl_fc_ops_alloc_handle(dev_info_t * parent,dev_info_t * child,void * fcode,size_t fcode_size,char * unit_address,char * my_args)1145 opl_fc_ops_alloc_handle(dev_info_t *parent, dev_info_t *child,
1146     void *fcode, size_t fcode_size, char *unit_address,
1147     char *my_args)
1148 {
1149 	fco_handle_t	rp;
1150 	phandle_t	h;
1151 	char		*buf;
1152 
1153 	rp = kmem_zalloc(sizeof (struct fc_resource_list), KM_SLEEP);
1154 	rp->next_handle = fc_ops_alloc_handle(parent, child, fcode, fcode_size,
1155 	    unit_address, NULL);
1156 	rp->ap = parent;
1157 	rp->child = child;
1158 	rp->fcode = fcode;
1159 	rp->fcode_size = fcode_size;
1160 	rp->my_args = my_args;
1161 
1162 	if (unit_address) {
1163 		buf = kmem_zalloc(UNIT_ADDR_SIZE, KM_SLEEP);
1164 		(void) strcpy(buf, unit_address);
1165 		rp->unit_address = buf;
1166 	}
1167 
1168 	/*
1169 	 * Add the child's nodeid to our table...
1170 	 */
1171 	h = ddi_get_nodeid(rp->child);
1172 	fc_add_dip_to_phandle(fc_handle_to_phandle_head(rp), rp->child, h);
1173 
1174 	return (rp);
1175 }
1176 
1177 
1178 static void
opl_fc_ops_free_handle(fco_handle_t rp)1179 opl_fc_ops_free_handle(fco_handle_t rp)
1180 {
1181 	struct fc_resource	*resp, *nresp;
1182 
1183 	ASSERT(rp);
1184 
1185 	if (rp->next_handle)
1186 		fc_ops_free_handle(rp->next_handle);
1187 	if (rp->unit_address)
1188 		kmem_free(rp->unit_address, UNIT_ADDR_SIZE);
1189 
1190 	/*
1191 	 * Release all the resources from the resource list
1192 	 */
1193 	for (resp = rp->head; resp != NULL; resp = nresp) {
1194 		nresp = resp->next;
1195 		switch (resp->type) {
1196 
1197 		case RT_MAP:
1198 			/*
1199 			 * If this is still mapped, we'd better unmap it now,
1200 			 * or all our structures that are tracking it will
1201 			 * be leaked.
1202 			 */
1203 			if (resp->fc_map_handle != NULL)
1204 				opl_unmap_phys(&resp->fc_map_handle);
1205 			break;
1206 
1207 		case RT_DMA:
1208 			/*
1209 			 * DMA has to be freed up at exit time.
1210 			 */
1211 			cmn_err(CE_CONT,
1212 			    "opl_fc_ops_free_handle: Unexpected DMA seen!");
1213 			break;
1214 
1215 		case RT_CONTIGIOUS:
1216 			FC_DEBUG2(1, CE_CONT, "opl_fc_ops_free: "
1217 			    "Free claim-memory resource 0x%lx size 0x%x\n",
1218 			    resp->fc_contig_virt, resp->fc_contig_len);
1219 
1220 			(void) ndi_ra_free(ddi_root_node(),
1221 			    (uint64_t)resp->fc_contig_virt,
1222 			    resp->fc_contig_len, "opl-fcodemem",
1223 			    NDI_RA_PASS);
1224 
1225 			break;
1226 
1227 		default:
1228 			cmn_err(CE_CONT, "opl_fc_ops_free: "
1229 			    "unknown resource type %d", resp->type);
1230 			break;
1231 		}
1232 		fc_rem_resource(rp, resp);
1233 		kmem_free(resp, sizeof (struct fc_resource));
1234 	}
1235 
1236 	kmem_free(rp, sizeof (struct fc_resource_list));
1237 }
1238 
1239 int
opl_fc_do_op(dev_info_t * ap,fco_handle_t rp,fc_ci_t * cp)1240 opl_fc_do_op(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1241 {
1242 	opl_fc_ops_t	*op;
1243 	char		*service = fc_cell2ptr(cp->svc_name);
1244 
1245 	ASSERT(rp);
1246 
1247 	FC_DEBUG1(1, CE_CONT, "opl_fc_do_op: <%s>\n", service);
1248 
1249 	/*
1250 	 * First try the generic fc_ops.
1251 	 */
1252 	if (fc_ops(ap, rp->next_handle, cp) == 0)
1253 		return (0);
1254 
1255 	/*
1256 	 * Now try the Jupiter-specific ops.
1257 	 */
1258 	for (op = opl_fc_ops; op->fc_service != NULL; ++op)
1259 		if (strcmp(op->fc_service, service) == 0)
1260 			return (op->fc_op(ap, rp, cp));
1261 
1262 	FC_DEBUG1(9, CE_CONT, "opl_fc_do_op: <%s> not serviced\n", service);
1263 
1264 	return (-1);
1265 }
1266 
1267 /*
1268  * map-in  (phys.lo phys.hi size -- virt)
1269  */
1270 static int
opl_map_in(dev_info_t * ap,fco_handle_t rp,fc_ci_t * cp)1271 opl_map_in(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1272 {
1273 	size_t			len;
1274 	int			error;
1275 	caddr_t			virt;
1276 	struct fc_resource	*resp;
1277 	struct regspec		rspec;
1278 	ddi_device_acc_attr_t	acc;
1279 	ddi_acc_handle_t	h;
1280 
1281 	if (fc_cell2int(cp->nargs) != 3)
1282 		return (fc_syntax_error(cp, "nargs must be 3"));
1283 
1284 	if (fc_cell2int(cp->nresults) < 1)
1285 		return (fc_syntax_error(cp, "nresults must be >= 1"));
1286 
1287 	rspec.regspec_size = len = fc_cell2size(fc_arg(cp, 0));
1288 	rspec.regspec_bustype = fc_cell2uint(fc_arg(cp, 1));
1289 	rspec.regspec_addr = fc_cell2uint(fc_arg(cp, 2));
1290 
1291 	acc.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1292 	acc.devacc_attr_endian_flags = DDI_STRUCTURE_BE_ACC;
1293 	acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1294 
1295 	FC_DEBUG3(1, CE_CONT, "opl_map_in: attempting map in "
1296 	    "address 0x%08x.%08x length %x\n", rspec.regspec_bustype,
1297 	    rspec.regspec_addr, rspec.regspec_size);
1298 
1299 	error = opl_map_phys(rp->child, &rspec, &virt, &acc, &h);
1300 
1301 	if (error)  {
1302 		FC_DEBUG3(1, CE_CONT, "opl_map_in: map in failed - "
1303 		    "address 0x%08x.%08x length %x\n", rspec.regspec_bustype,
1304 		    rspec.regspec_addr, rspec.regspec_size);
1305 
1306 		return (fc_priv_error(cp, "opl map-in failed"));
1307 	}
1308 
1309 	FC_DEBUG1(3, CE_CONT, "opl_map_in: returning virt %p\n", virt);
1310 
1311 	cp->nresults = fc_int2cell(1);
1312 	fc_result(cp, 0) = fc_ptr2cell(virt);
1313 
1314 	/*
1315 	 * Log this resource ...
1316 	 */
1317 	resp = kmem_zalloc(sizeof (struct fc_resource), KM_SLEEP);
1318 	resp->type = RT_MAP;
1319 	resp->fc_map_virt = virt;
1320 	resp->fc_map_len = len;
1321 	resp->fc_map_handle = h;
1322 	fc_add_resource(rp, resp);
1323 
1324 	return (fc_success_op(ap, rp, cp));
1325 }
1326 
1327 /*
1328  * map-out (virt size -- )
1329  */
1330 static int
opl_map_out(dev_info_t * ap,fco_handle_t rp,fc_ci_t * cp)1331 opl_map_out(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1332 {
1333 	caddr_t			virt;
1334 	size_t			len;
1335 	struct fc_resource	*resp;
1336 
1337 	if (fc_cell2int(cp->nargs) != 2)
1338 		return (fc_syntax_error(cp, "nargs must be 2"));
1339 
1340 	virt = fc_cell2ptr(fc_arg(cp, 1));
1341 
1342 	len = fc_cell2size(fc_arg(cp, 0));
1343 
1344 	FC_DEBUG2(1, CE_CONT, "opl_map_out: attempting map out %p %x\n",
1345 	    virt, len);
1346 
1347 	/*
1348 	 * Find if this request matches a mapping resource we set up.
1349 	 */
1350 	fc_lock_resource_list(rp);
1351 	for (resp = rp->head; resp != NULL; resp = resp->next) {
1352 		if (resp->type != RT_MAP)
1353 			continue;
1354 		if (resp->fc_map_virt != virt)
1355 			continue;
1356 		if (resp->fc_map_len == len)
1357 			break;
1358 	}
1359 	fc_unlock_resource_list(rp);
1360 
1361 	if (resp == NULL)
1362 		return (fc_priv_error(cp, "request doesn't match a "
1363 		    "known mapping"));
1364 
1365 	opl_unmap_phys(&resp->fc_map_handle);
1366 
1367 	/*
1368 	 * remove the resource from the list and release it.
1369 	 */
1370 	fc_rem_resource(rp, resp);
1371 	kmem_free(resp, sizeof (struct fc_resource));
1372 
1373 	cp->nresults = fc_int2cell(0);
1374 	return (fc_success_op(ap, rp, cp));
1375 }
1376 
1377 static int
opl_register_fetch(dev_info_t * ap,fco_handle_t rp,fc_ci_t * cp)1378 opl_register_fetch(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1379 {
1380 	size_t			len;
1381 	caddr_t			virt;
1382 	int			error = 0;
1383 	uint64_t		v;
1384 	uint64_t		x;
1385 	uint32_t		l;
1386 	uint16_t		w;
1387 	uint8_t			b;
1388 	char			*service = fc_cell2ptr(cp->svc_name);
1389 	struct fc_resource	*resp;
1390 
1391 	if (fc_cell2int(cp->nargs) != 1)
1392 		return (fc_syntax_error(cp, "nargs must be 1"));
1393 
1394 	if (fc_cell2int(cp->nresults) < 1)
1395 		return (fc_syntax_error(cp, "nresults must be >= 1"));
1396 
1397 	virt = fc_cell2ptr(fc_arg(cp, 0));
1398 
1399 	/*
1400 	 * Determine the access width .. we can switch on the 2nd
1401 	 * character of the name which is "rx@", "rl@", "rb@" or "rw@"
1402 	 */
1403 	switch (*(service + 1)) {
1404 	case 'x':	len = sizeof (x); break;
1405 	case 'l':	len = sizeof (l); break;
1406 	case 'w':	len = sizeof (w); break;
1407 	case 'b':	len = sizeof (b); break;
1408 	}
1409 
1410 	/*
1411 	 * Check the alignment ...
1412 	 */
1413 	if (((intptr_t)virt & (len - 1)) != 0)
1414 		return (fc_priv_error(cp, "unaligned access"));
1415 
1416 	/*
1417 	 * Find if this virt is 'within' a request we know about
1418 	 */
1419 	fc_lock_resource_list(rp);
1420 	for (resp = rp->head; resp != NULL; resp = resp->next) {
1421 		if (resp->type == RT_MAP) {
1422 			if ((virt >= (caddr_t)resp->fc_map_virt) &&
1423 			    ((virt + len) <=
1424 			    ((caddr_t)resp->fc_map_virt + resp->fc_map_len)))
1425 				break;
1426 		} else if (resp->type == RT_CONTIGIOUS) {
1427 			if ((virt >= (caddr_t)resp->fc_contig_virt) &&
1428 			    ((virt + len) <= ((caddr_t)resp->fc_contig_virt +
1429 			    resp->fc_contig_len)))
1430 				break;
1431 		}
1432 	}
1433 	fc_unlock_resource_list(rp);
1434 
1435 	if (resp == NULL) {
1436 		return (fc_priv_error(cp, "request not within "
1437 		    "known mappings"));
1438 	}
1439 
1440 	switch (len) {
1441 	case sizeof (x):
1442 		if (resp->type == RT_MAP)
1443 			error = ddi_peek64(rp->child, (int64_t *)virt,
1444 			    (int64_t *)&x);
1445 		else /* RT_CONTIGIOUS */
1446 			x = *(int64_t *)virt;
1447 		v = x;
1448 		break;
1449 	case sizeof (l):
1450 		if (resp->type == RT_MAP)
1451 			error = ddi_peek32(rp->child, (int32_t *)virt,
1452 			    (int32_t *)&l);
1453 		else /* RT_CONTIGIOUS */
1454 			l = *(int32_t *)virt;
1455 		v = l;
1456 		break;
1457 	case sizeof (w):
1458 		if (resp->type == RT_MAP)
1459 			error = ddi_peek16(rp->child, (int16_t *)virt,
1460 			    (int16_t *)&w);
1461 		else /* RT_CONTIGIOUS */
1462 			w = *(int16_t *)virt;
1463 		v = w;
1464 		break;
1465 	case sizeof (b):
1466 		if (resp->type == RT_MAP)
1467 			error = ddi_peek8(rp->child, (int8_t *)virt,
1468 			    (int8_t *)&b);
1469 		else /* RT_CONTIGIOUS */
1470 			b = *(int8_t *)virt;
1471 		v = b;
1472 		break;
1473 	}
1474 
1475 	if (error == DDI_FAILURE) {
1476 		FC_DEBUG2(1, CE_CONT, "opl_register_fetch: access error "
1477 		    "accessing virt %p len %d\n", virt, len);
1478 		return (fc_priv_error(cp, "access error"));
1479 	}
1480 
1481 	FC_DEBUG3(1, CE_CONT, "register_fetch (%s) %llx %llx\n",
1482 	    service, virt, v);
1483 
1484 	cp->nresults = fc_int2cell(1);
1485 	switch (len) {
1486 	case sizeof (x): fc_result(cp, 0) = x; break;
1487 	case sizeof (l): fc_result(cp, 0) = fc_uint32_t2cell(l); break;
1488 	case sizeof (w): fc_result(cp, 0) = fc_uint16_t2cell(w); break;
1489 	case sizeof (b): fc_result(cp, 0) = fc_uint8_t2cell(b); break;
1490 	}
1491 	return (fc_success_op(ap, rp, cp));
1492 }
1493 
1494 static int
opl_register_store(dev_info_t * ap,fco_handle_t rp,fc_ci_t * cp)1495 opl_register_store(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1496 {
1497 	size_t			len;
1498 	caddr_t			virt;
1499 	uint64_t		v;
1500 	uint64_t		x;
1501 	uint32_t		l;
1502 	uint16_t		w;
1503 	uint8_t			b;
1504 	char			*service = fc_cell2ptr(cp->svc_name);
1505 	struct fc_resource	*resp;
1506 	int			error = 0;
1507 
1508 	if (fc_cell2int(cp->nargs) != 2)
1509 		return (fc_syntax_error(cp, "nargs must be 2"));
1510 
1511 	virt = fc_cell2ptr(fc_arg(cp, 0));
1512 
1513 	/*
1514 	 * Determine the access width .. we can switch on the 2nd
1515 	 * character of the name which is "rx!", "rl!", "rb!" or "rw!"
1516 	 */
1517 	switch (*(service + 1)) {
1518 	case 'x':
1519 		len = sizeof (x);
1520 		x = fc_arg(cp, 1);
1521 		v = x;
1522 		break;
1523 	case 'l':
1524 		len = sizeof (l);
1525 		l = fc_cell2uint32_t(fc_arg(cp, 1));
1526 		v = l;
1527 		break;
1528 	case 'w':
1529 		len = sizeof (w);
1530 		w = fc_cell2uint16_t(fc_arg(cp, 1));
1531 		v = w;
1532 		break;
1533 	case 'b':
1534 		len = sizeof (b);
1535 		b = fc_cell2uint8_t(fc_arg(cp, 1));
1536 		v = b;
1537 		break;
1538 	}
1539 
1540 	FC_DEBUG3(1, CE_CONT, "register_store (%s) %llx %llx\n",
1541 	    service, virt, v);
1542 
1543 	/*
1544 	 * Check the alignment ...
1545 	 */
1546 	if (((intptr_t)virt & (len - 1)) != 0)
1547 		return (fc_priv_error(cp, "unaligned access"));
1548 
1549 	/*
1550 	 * Find if this virt is 'within' a request we know about
1551 	 */
1552 	fc_lock_resource_list(rp);
1553 	for (resp = rp->head; resp != NULL; resp = resp->next) {
1554 		if (resp->type == RT_MAP) {
1555 			if ((virt >= (caddr_t)resp->fc_map_virt) &&
1556 			    ((virt + len) <=
1557 			    ((caddr_t)resp->fc_map_virt + resp->fc_map_len)))
1558 				break;
1559 		} else if (resp->type == RT_CONTIGIOUS) {
1560 			if ((virt >= (caddr_t)resp->fc_contig_virt) &&
1561 			    ((virt + len) <= ((caddr_t)resp->fc_contig_virt +
1562 			    resp->fc_contig_len)))
1563 				break;
1564 		}
1565 	}
1566 	fc_unlock_resource_list(rp);
1567 
1568 	if (resp == NULL)
1569 		return (fc_priv_error(cp, "request not within"
1570 		    "known mappings"));
1571 
1572 	switch (len) {
1573 	case sizeof (x):
1574 		if (resp->type == RT_MAP)
1575 			error = ddi_poke64(rp->child, (int64_t *)virt, x);
1576 		else if (resp->type == RT_CONTIGIOUS)
1577 			*(uint64_t *)virt = x;
1578 		break;
1579 	case sizeof (l):
1580 		if (resp->type == RT_MAP)
1581 			error = ddi_poke32(rp->child, (int32_t *)virt, l);
1582 		else if (resp->type == RT_CONTIGIOUS)
1583 			*(uint32_t *)virt = l;
1584 		break;
1585 	case sizeof (w):
1586 		if (resp->type == RT_MAP)
1587 			error = ddi_poke16(rp->child, (int16_t *)virt, w);
1588 		else if (resp->type == RT_CONTIGIOUS)
1589 			*(uint16_t *)virt = w;
1590 		break;
1591 	case sizeof (b):
1592 		if (resp->type == RT_MAP)
1593 			error = ddi_poke8(rp->child, (int8_t *)virt, b);
1594 		else if (resp->type == RT_CONTIGIOUS)
1595 			*(uint8_t *)virt = b;
1596 		break;
1597 	}
1598 
1599 	if (error == DDI_FAILURE) {
1600 		FC_DEBUG2(1, CE_CONT, "opl_register_store: access error "
1601 		    "accessing virt %p len %d\n", virt, len);
1602 		return (fc_priv_error(cp, "access error"));
1603 	}
1604 
1605 	cp->nresults = fc_int2cell(0);
1606 	return (fc_success_op(ap, rp, cp));
1607 }
1608 
1609 /*
1610  * opl_claim_memory
1611  *
1612  * claim-memory (align size vhint -- vaddr)
1613  */
1614 static int
opl_claim_memory(dev_info_t * ap,fco_handle_t rp,fc_ci_t * cp)1615 opl_claim_memory(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1616 {
1617 	int			align, size, vhint;
1618 	uint64_t		answer, alen;
1619 	ndi_ra_request_t	request;
1620 	struct fc_resource	*resp;
1621 
1622 	if (fc_cell2int(cp->nargs) != 3)
1623 		return (fc_syntax_error(cp, "nargs must be 3"));
1624 
1625 	if (fc_cell2int(cp->nresults) < 1)
1626 		return (fc_syntax_error(cp, "nresults must be >= 1"));
1627 
1628 	vhint = fc_cell2int(fc_arg(cp, 2));
1629 	size  = fc_cell2int(fc_arg(cp, 1));
1630 	align = fc_cell2int(fc_arg(cp, 0));
1631 
1632 	FC_DEBUG3(1, CE_CONT, "opl_claim_memory: align=0x%x size=0x%x "
1633 	    "vhint=0x%x\n", align, size, vhint);
1634 
1635 	if (size == 0) {
1636 		cmn_err(CE_WARN, "opl_claim_memory - unable to allocate "
1637 		    "contiguous memory of size zero\n");
1638 		return (fc_priv_error(cp, "allocation error"));
1639 	}
1640 
1641 	if (vhint) {
1642 		cmn_err(CE_WARN, "opl_claim_memory - vhint is not zero "
1643 		    "vhint=0x%x - Ignoring Argument\n", vhint);
1644 	}
1645 
1646 	bzero((caddr_t)&request, sizeof (ndi_ra_request_t));
1647 	request.ra_flags	= NDI_RA_ALLOC_BOUNDED;
1648 	request.ra_boundbase	= 0;
1649 	request.ra_boundlen	= 0xffffffff;
1650 	request.ra_len		= size;
1651 	request.ra_align_mask	= align - 1;
1652 
1653 	if (ndi_ra_alloc(ddi_root_node(), &request, &answer, &alen,
1654 	    "opl-fcodemem", NDI_RA_PASS) != NDI_SUCCESS) {
1655 		cmn_err(CE_WARN, "opl_claim_memory - unable to allocate "
1656 		    "contiguous memory\n");
1657 		return (fc_priv_error(cp, "allocation error"));
1658 	}
1659 
1660 	FC_DEBUG2(1, CE_CONT, "opl_claim_memory: address allocated=0x%lx "
1661 	    "size=0x%x\n", answer, alen);
1662 
1663 	cp->nresults = fc_int2cell(1);
1664 	fc_result(cp, 0) = answer;
1665 
1666 	/*
1667 	 * Log this resource ...
1668 	 */
1669 	resp = kmem_zalloc(sizeof (struct fc_resource), KM_SLEEP);
1670 	resp->type = RT_CONTIGIOUS;
1671 	resp->fc_contig_virt = (void *)answer;
1672 	resp->fc_contig_len = size;
1673 	fc_add_resource(rp, resp);
1674 
1675 	return (fc_success_op(ap, rp, cp));
1676 }
1677 
1678 /*
1679  * opl_release_memory
1680  *
1681  * release-memory (size vaddr -- )
1682  */
1683 static int
opl_release_memory(dev_info_t * ap,fco_handle_t rp,fc_ci_t * cp)1684 opl_release_memory(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1685 {
1686 	int32_t			vaddr, size;
1687 	struct fc_resource	*resp;
1688 
1689 	if (fc_cell2int(cp->nargs) != 2)
1690 		return (fc_syntax_error(cp, "nargs must be 2"));
1691 
1692 	if (fc_cell2int(cp->nresults) != 0)
1693 		return (fc_syntax_error(cp, "nresults must be 0"));
1694 
1695 	vaddr = fc_cell2int(fc_arg(cp, 1));
1696 	size  = fc_cell2int(fc_arg(cp, 0));
1697 
1698 	FC_DEBUG2(1, CE_CONT, "opl_release_memory: vaddr=0x%x size=0x%x\n",
1699 	    vaddr, size);
1700 
1701 	/*
1702 	 * Find if this request matches a mapping resource we set up.
1703 	 */
1704 	fc_lock_resource_list(rp);
1705 	for (resp = rp->head; resp != NULL; resp = resp->next) {
1706 		if (resp->type != RT_CONTIGIOUS)
1707 			continue;
1708 		if (resp->fc_contig_virt != (void *)(uintptr_t)vaddr)
1709 			continue;
1710 		if (resp->fc_contig_len == size)
1711 			break;
1712 	}
1713 	fc_unlock_resource_list(rp);
1714 
1715 	if (resp == NULL)
1716 		return (fc_priv_error(cp, "request doesn't match a "
1717 		    "known mapping"));
1718 
1719 	(void) ndi_ra_free(ddi_root_node(), vaddr, size,
1720 	    "opl-fcodemem", NDI_RA_PASS);
1721 
1722 	/*
1723 	 * remove the resource from the list and release it.
1724 	 */
1725 	fc_rem_resource(rp, resp);
1726 	kmem_free(resp, sizeof (struct fc_resource));
1727 
1728 	cp->nresults = fc_int2cell(0);
1729 
1730 	return (fc_success_op(ap, rp, cp));
1731 }
1732 
1733 /*
1734  * opl_vtop
1735  *
1736  * vtop (vaddr -- paddr.lo paddr.hi)
1737  */
1738 static int
opl_vtop(dev_info_t * ap,fco_handle_t rp,fc_ci_t * cp)1739 opl_vtop(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1740 {
1741 	int			vaddr;
1742 	uint64_t		paddr;
1743 	struct fc_resource	*resp;
1744 
1745 	if (fc_cell2int(cp->nargs) != 1)
1746 		return (fc_syntax_error(cp, "nargs must be 1"));
1747 
1748 	if (fc_cell2int(cp->nresults) >= 3)
1749 		return (fc_syntax_error(cp, "nresults must be less than 2"));
1750 
1751 	vaddr = fc_cell2int(fc_arg(cp, 0));
1752 
1753 	/*
1754 	 * Find if this request matches a mapping resource we set up.
1755 	 */
1756 	fc_lock_resource_list(rp);
1757 	for (resp = rp->head; resp != NULL; resp = resp->next) {
1758 		if (resp->type != RT_CONTIGIOUS)
1759 			continue;
1760 		if (((uint64_t)resp->fc_contig_virt <= vaddr) &&
1761 		    (vaddr < (uint64_t)resp->fc_contig_virt +
1762 		    resp->fc_contig_len))
1763 			break;
1764 	}
1765 	fc_unlock_resource_list(rp);
1766 
1767 	if (resp == NULL)
1768 		return (fc_priv_error(cp, "request doesn't match a "
1769 		    "known mapping"));
1770 
1771 	paddr = va_to_pa((void *)(uintptr_t)vaddr);
1772 
1773 	FC_DEBUG2(1, CE_CONT, "opl_vtop: vaddr=0x%x paddr=0x%x\n",
1774 	    vaddr, paddr);
1775 
1776 	cp->nresults = fc_int2cell(2);
1777 
1778 	fc_result(cp, 0) = paddr;
1779 	fc_result(cp, 1) = 0;
1780 
1781 	return (fc_success_op(ap, rp, cp));
1782 }
1783 
1784 static int
opl_config_child(dev_info_t * ap,fco_handle_t rp,fc_ci_t * cp)1785 opl_config_child(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1786 {
1787 	fc_phandle_t h;
1788 
1789 	if (fc_cell2int(cp->nargs) != 0)
1790 		return (fc_syntax_error(cp, "nargs must be 0"));
1791 
1792 	if (fc_cell2int(cp->nresults) < 1)
1793 		return (fc_syntax_error(cp, "nresults must be >= 1"));
1794 
1795 	h = fc_dip_to_phandle(fc_handle_to_phandle_head(rp), rp->child);
1796 
1797 	cp->nresults = fc_int2cell(1);
1798 	fc_result(cp, 0) = fc_phandle2cell(h);
1799 
1800 	return (fc_success_op(ap, rp, cp));
1801 }
1802 
1803 static int
opl_get_fcode(dev_info_t * ap,fco_handle_t rp,fc_ci_t * cp)1804 opl_get_fcode(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1805 {
1806 	caddr_t		dropin_name_virt, fcode_virt;
1807 	char		*dropin_name, *fcode;
1808 	int		fcode_len, status;
1809 
1810 	if (fc_cell2int(cp->nargs) != 3)
1811 		return (fc_syntax_error(cp, "nargs must be 3"));
1812 
1813 	if (fc_cell2int(cp->nresults) < 1)
1814 		return (fc_syntax_error(cp, "nresults must be >= 1"));
1815 
1816 	dropin_name_virt = fc_cell2ptr(fc_arg(cp, 0));
1817 
1818 	fcode_virt = fc_cell2ptr(fc_arg(cp, 1));
1819 
1820 	fcode_len = fc_cell2int(fc_arg(cp, 2));
1821 
1822 	dropin_name = kmem_zalloc(FC_SVC_NAME_LEN, KM_SLEEP);
1823 
1824 	FC_DEBUG2(1, CE_CONT, "get_fcode: %x %d\n", fcode_virt, fcode_len);
1825 
1826 	if (copyinstr(fc_cell2ptr(dropin_name_virt), dropin_name,
1827 	    FC_SVC_NAME_LEN - 1, NULL))  {
1828 		FC_DEBUG1(1, CE_CONT, "opl_get_fcode: "
1829 		    "fault copying in drop in name %p\n", dropin_name_virt);
1830 		status = 0;
1831 	} else {
1832 		FC_DEBUG1(1, CE_CONT, "get_fcode: %s\n", dropin_name);
1833 
1834 		fcode = kmem_zalloc(fcode_len, KM_SLEEP);
1835 
1836 		if ((status = prom_get_fcode(dropin_name, fcode)) != 0) {
1837 
1838 			if (copyout((void *)fcode, (void *)fcode_virt,
1839 			    fcode_len)) {
1840 				cmn_err(CE_WARN, " opl_get_fcode: Unable "
1841 				    "to copy out fcode image");
1842 				status = 0;
1843 			}
1844 		}
1845 
1846 		kmem_free(fcode, fcode_len);
1847 	}
1848 
1849 	kmem_free(dropin_name, FC_SVC_NAME_LEN);
1850 
1851 	cp->nresults = fc_int2cell(1);
1852 	fc_result(cp, 0) = status;
1853 
1854 	return (fc_success_op(ap, rp, cp));
1855 }
1856 
1857 static int
opl_get_fcode_size(dev_info_t * ap,fco_handle_t rp,fc_ci_t * cp)1858 opl_get_fcode_size(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1859 {
1860 	caddr_t		virt;
1861 	char		*dropin_name;
1862 	int		len;
1863 
1864 	if (fc_cell2int(cp->nargs) != 1)
1865 		return (fc_syntax_error(cp, "nargs must be 1"));
1866 
1867 	if (fc_cell2int(cp->nresults) < 1)
1868 		return (fc_syntax_error(cp, "nresults must be >= 1"));
1869 
1870 	virt = fc_cell2ptr(fc_arg(cp, 0));
1871 
1872 	dropin_name = kmem_zalloc(FC_SVC_NAME_LEN, KM_SLEEP);
1873 
1874 	FC_DEBUG0(1, CE_CONT, "opl_get_fcode_size:\n");
1875 
1876 	if (copyinstr(fc_cell2ptr(virt), dropin_name,
1877 	    FC_SVC_NAME_LEN - 1, NULL))  {
1878 		FC_DEBUG1(1, CE_CONT, "opl_get_fcode_size: "
1879 		    "fault copying in drop in name %p\n", virt);
1880 		len = 0;
1881 	} else {
1882 		FC_DEBUG1(1, CE_CONT, "opl_get_fcode_size: %s\n", dropin_name);
1883 
1884 		len = prom_get_fcode_size(dropin_name);
1885 	}
1886 
1887 	kmem_free(dropin_name, FC_SVC_NAME_LEN);
1888 
1889 	FC_DEBUG1(1, CE_CONT, "opl_get_fcode_size: fcode_len = %d\n", len);
1890 
1891 	cp->nresults = fc_int2cell(1);
1892 	fc_result(cp, 0) = len;
1893 
1894 	return (fc_success_op(ap, rp, cp));
1895 }
1896 
1897 static int
opl_map_phys(dev_info_t * dip,struct regspec * phys_spec,caddr_t * addrp,ddi_device_acc_attr_t * accattrp,ddi_acc_handle_t * handlep)1898 opl_map_phys(dev_info_t *dip, struct regspec *phys_spec,
1899     caddr_t *addrp, ddi_device_acc_attr_t *accattrp,
1900     ddi_acc_handle_t *handlep)
1901 {
1902 	ddi_map_req_t	mapreq;
1903 	ddi_acc_hdl_t	*acc_handlep;
1904 	int		result;
1905 	struct regspec	*rspecp;
1906 
1907 	*handlep = impl_acc_hdl_alloc(KM_SLEEP, NULL);
1908 	acc_handlep = impl_acc_hdl_get(*handlep);
1909 	acc_handlep->ah_vers = VERS_ACCHDL;
1910 	acc_handlep->ah_dip = dip;
1911 	acc_handlep->ah_rnumber = 0;
1912 	acc_handlep->ah_offset = 0;
1913 	acc_handlep->ah_len = 0;
1914 	acc_handlep->ah_acc = *accattrp;
1915 	rspecp = kmem_zalloc(sizeof (struct regspec), KM_SLEEP);
1916 	*rspecp = *phys_spec;
1917 	/*
1918 	 * cache a copy of the reg spec
1919 	 */
1920 	acc_handlep->ah_bus_private = rspecp;
1921 
1922 	mapreq.map_op = DDI_MO_MAP_LOCKED;
1923 	mapreq.map_type = DDI_MT_REGSPEC;
1924 	mapreq.map_obj.rp = (struct regspec *)phys_spec;
1925 	mapreq.map_prot = PROT_READ | PROT_WRITE;
1926 	mapreq.map_flags = DDI_MF_KERNEL_MAPPING;
1927 	mapreq.map_handlep = acc_handlep;
1928 	mapreq.map_vers = DDI_MAP_VERSION;
1929 
1930 	result = ddi_map(dip, &mapreq, 0, 0, addrp);
1931 
1932 	if (result != DDI_SUCCESS) {
1933 		impl_acc_hdl_free(*handlep);
1934 		kmem_free(rspecp, sizeof (struct regspec));
1935 		*handlep = (ddi_acc_handle_t)NULL;
1936 	} else {
1937 		acc_handlep->ah_addr = *addrp;
1938 	}
1939 
1940 	return (result);
1941 }
1942 
1943 static void
opl_unmap_phys(ddi_acc_handle_t * handlep)1944 opl_unmap_phys(ddi_acc_handle_t *handlep)
1945 {
1946 	ddi_map_req_t	mapreq;
1947 	ddi_acc_hdl_t	*acc_handlep;
1948 	struct regspec	*rspecp;
1949 
1950 	acc_handlep = impl_acc_hdl_get(*handlep);
1951 	ASSERT(acc_handlep);
1952 	rspecp = acc_handlep->ah_bus_private;
1953 
1954 	mapreq.map_op = DDI_MO_UNMAP;
1955 	mapreq.map_type = DDI_MT_REGSPEC;
1956 	mapreq.map_obj.rp = (struct regspec *)rspecp;
1957 	mapreq.map_prot = PROT_READ | PROT_WRITE;
1958 	mapreq.map_flags = DDI_MF_KERNEL_MAPPING;
1959 	mapreq.map_handlep = acc_handlep;
1960 	mapreq.map_vers = DDI_MAP_VERSION;
1961 
1962 	(void) ddi_map(acc_handlep->ah_dip, &mapreq, acc_handlep->ah_offset,
1963 	    acc_handlep->ah_len, &acc_handlep->ah_addr);
1964 
1965 	impl_acc_hdl_free(*handlep);
1966 	/*
1967 	 * Free the cached copy
1968 	 */
1969 	kmem_free(rspecp, sizeof (struct regspec));
1970 	*handlep = (ddi_acc_handle_t)NULL;
1971 }
1972 
1973 static int
opl_get_hwd_va(dev_info_t * ap,fco_handle_t rp,fc_ci_t * cp)1974 opl_get_hwd_va(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1975 {
1976 	uint32_t	portid;
1977 	void		*hwd_virt;
1978 	hwd_header_t	*hwd_h = NULL;
1979 	hwd_sb_t	*hwd_sb = NULL;
1980 	int		lsb, ch, leaf;
1981 	int		status = 1;
1982 
1983 	/* Check the argument */
1984 	if (fc_cell2int(cp->nargs) != 2)
1985 		return (fc_syntax_error(cp, "nargs must be 2"));
1986 
1987 	if (fc_cell2int(cp->nresults) < 1)
1988 		return (fc_syntax_error(cp, "nresults must be >= 1"));
1989 
1990 	/* Get the parameters */
1991 	portid = fc_cell2uint32_t(fc_arg(cp, 0));
1992 	hwd_virt = (void *)fc_cell2ptr(fc_arg(cp, 1));
1993 
1994 	/* Get the ID numbers */
1995 	lsb  = OPL_IO_PORTID_TO_LSB(portid);
1996 	ch   = OPL_PORTID_TO_CHANNEL(portid);
1997 	leaf = OPL_PORTID_TO_LEAF(portid);
1998 	ASSERT(OPL_IO_PORTID(lsb, ch, leaf) == portid);
1999 
2000 	/* Set the pointer of hwd. */
2001 	if ((hwd_h = (hwd_header_t *)opl_boards[lsb].cfg_hwd) == NULL) {
2002 		return (fc_priv_error(cp, "null hwd header"));
2003 	}
2004 	/* Set the pointer of hwd sb. */
2005 	if ((hwd_sb = (hwd_sb_t *)((char *)hwd_h + hwd_h->hdr_sb_info_offset))
2006 	    == NULL) {
2007 		return (fc_priv_error(cp, "null hwd sb"));
2008 	}
2009 
2010 	if (ch == OPL_CMU_CHANNEL) {
2011 		/* Copyout CMU-CH HW Descriptor */
2012 		if (copyout((void *)&hwd_sb->sb_cmu.cmu_ch,
2013 		    (void *)hwd_virt, sizeof (hwd_cmu_chan_t))) {
2014 			cmn_err(CE_WARN, "opl_get_hwd_va: "
2015 			"Unable to copy out cmuch descriptor for %x",
2016 			    portid);
2017 			status = 0;
2018 		}
2019 	} else {
2020 		/* Copyout PCI-CH HW Descriptor */
2021 		if (copyout((void *)&hwd_sb->sb_pci_ch[ch].pci_leaf[leaf],
2022 		    (void *)hwd_virt, sizeof (hwd_leaf_t))) {
2023 			cmn_err(CE_WARN, "opl_get_hwd_va: "
2024 			"Unable to copy out pcich descriptor for %x",
2025 			    portid);
2026 			status = 0;
2027 		}
2028 	}
2029 
2030 	cp->nresults = fc_int2cell(1);
2031 	fc_result(cp, 0) = status;
2032 
2033 	return (fc_success_op(ap, rp, cp));
2034 }
2035 
2036 /*
2037  * After Solaris boots, a user can enter OBP using L1A, etc. While in OBP,
2038  * interrupts may be received from PCI devices. These interrupts
2039  * cannot be handled meaningfully since the system is in OBP. These
2040  * interrupts need to be cleared on the CPU side so that the CPU may
2041  * continue with whatever it is doing. Devices that have raised the
2042  * interrupts are expected to reraise the interrupts after sometime
2043  * as they have not been handled. At that time, Solaris will have a
2044  * chance to properly service the interrupts.
2045  *
2046  * The location of the interrupt registers depends on what is present
2047  * at a port. OPL currently supports the Oberon and the CMU channel.
2048  * The following handler handles both kinds of ports and computes
2049  * interrupt register addresses from the specifications and Jupiter Bus
2050  * device bindings.
2051  *
2052  * Fcode drivers install their interrupt handler via a "master-interrupt"
2053  * service. For boot time devices, this takes place within OBP. In the case
2054  * of DR, OPL uses IKP. The Fcode drivers that run within the efcode framework
2055  * attempt to install their handler via the "master-interrupt" service.
2056  * However, we cannot meaningfully install the Fcode driver's handler.
2057  * Instead, we install our own handler in OBP which does the same thing.
2058  *
2059  * Note that the only handling done for interrupts here is to clear it
2060  * on the CPU side. If any device in the future requires more special
2061  * handling, we would have to put in some kind of framework for adding
2062  * device-specific handlers. This is *highly* unlikely, but possible.
2063  *
2064  * Finally, OBP provides a hook called "unix-interrupt-handler" to install
2065  * a Solaris-defined master-interrupt handler for a port. The default
2066  * definition for this method does nothing. Solaris may override this
2067  * with its own definition. This is the way the following handler gets
2068  * control from OBP when interrupts happen at a port after L1A, etc.
2069  */
2070 
2071 static char define_master_interrupt_handler[] =
2072 
2073 /*
2074  * This method translates an Oberon port id to the base (physical) address
2075  * of the interrupt clear registers for that port id.
2076  */
2077 
2078 ": pcich-mid>clear-int-pa   ( mid -- pa ) "
2079 "   dup 1 >> 7 and          ( mid ch# ) "
2080 "   over 4 >> h# 1f and     ( mid ch# lsb# ) "
2081 "   1 d# 46 <<              ( mid ch# lsb# pa ) "
2082 "   swap d# 40 << or        ( mid ch# pa ) "
2083 "   swap d# 37 << or        ( mid pa ) "
2084 "   swap 1 and if h# 70.0000 else h# 60.0000 then "
2085 "   or h# 1400 or           ( pa ) "
2086 "; "
2087 
2088 /*
2089  * This method translates a CMU channel port id to the base (physical) address
2090  * of the interrupt clear registers for that port id. There are two classes of
2091  * interrupts that need to be handled for a CMU channel:
2092  *	- obio interrupts
2093  *	- pci interrupts
2094  * So, there are two addresses that need to be computed.
2095  */
2096 
2097 ": cmuch-mid>clear-int-pa   ( mid -- obio-pa pci-pa ) "
2098 "   dup 1 >> 7 and          ( mid ch# ) "
2099 "   over 4 >> h# 1f and     ( mid ch# lsb# ) "
2100 "   1 d# 46 <<              ( mid ch# lsb# pa ) "
2101 "   swap d# 40 << or        ( mid ch# pa ) "
2102 "   swap d# 37 << or        ( mid pa ) "
2103 "   nip dup h# 1800 +       ( pa obio-pa ) "
2104 "   swap h# 1400 +          ( obio-pa pci-pa ) "
2105 "; "
2106 
2107 /*
2108  * This method checks if a given I/O port ID is valid or not.
2109  * For a given LSB,
2110  *	Oberon ports range from 0 - 3
2111  *	CMU ch ports range from 4 - 4
2112  *
2113  * Also, the Oberon supports leaves 0 and 1.
2114  * The CMU ch supports only one leaf, leaf 0.
2115  */
2116 
2117 ": valid-io-mid? ( mid -- flag ) "
2118 "   dup 1 >> 7 and                     ( mid ch# ) "
2119 "   dup 4 > if 2drop false exit then   ( mid ch# ) "
2120 "   4 = swap 1 and 1 = and not "
2121 "; "
2122 
2123 /*
2124  * This method checks if a given port id is a CMU ch.
2125  */
2126 
2127 ": cmuch? ( mid -- flag ) 1 >> 7 and 4 = ; "
2128 
2129 /*
2130  * Given the base address of the array of interrupt clear registers for
2131  * a port id, this method iterates over the given interrupt number bitmap
2132  * and resets the interrupt on the CPU side for every interrupt number
2133  * in the bitmap. Note that physical addresses are used to perform the
2134  * writes, not virtual addresses. This allows the handler to work without
2135  * any involvement from Solaris.
2136  */
2137 
2138 ": clear-ints ( pa bitmap count -- ) "
2139 "   0 do                            ( pa bitmap ) "
2140 "      dup 0= if 2drop unloop exit then "
2141 "      tuck                         ( bitmap pa bitmap ) "
2142 "      1 and if                     ( bitmap pa ) "
2143 "	 dup i 8 * + 0 swap         ( bitmap pa 0 pa' ) "
2144 "	 h# 15 spacex!              ( bitmap pa ) "
2145 "      then                         ( bitmap pa ) "
2146 "      swap 1 >>                    ( pa bitmap ) "
2147 "   loop "
2148 "; "
2149 
2150 /*
2151  * This method replaces the master-interrupt handler in OBP. Once
2152  * this method is plumbed into OBP, OBP transfers control to this
2153  * handler while returning to Solaris from OBP after L1A. This method's
2154  * task is to simply reset received interrupts on the CPU side.
2155  * When the devices reassert the interrupts later, Solaris will
2156  * be able to see them and handle them.
2157  *
2158  * For each port ID that has interrupts, this method is called
2159  * once by OBP. The input arguments are:
2160  *	mid	portid
2161  *	bitmap	bitmap of interrupts that have happened
2162  *
2163  * This method returns true, if it is able to handle the interrupts.
2164  * OBP does nothing further.
2165  *
2166  * This method returns false, if it encountered a problem. Currently,
2167  * the only problem could be an invalid port id. OBP needs to do
2168  * its own processing in that case. If this method returns false,
2169  * it preserves the mid and bitmap arguments for OBP.
2170  */
2171 
2172 ": unix-resend-mondos ( mid bitmap -- [ mid bitmap false ] | true ) "
2173 
2174 /*
2175  * Uncomment the following line if you want to display the input arguments.
2176  * This is meant for debugging.
2177  * "   .\" Bitmap=\" dup u. .\" MID=\" over u. cr "
2178  */
2179 
2180 /*
2181  * If the port id is not valid (according to the Oberon and CMU ch
2182  * specifications, then return false to OBP to continue further
2183  * processing.
2184  */
2185 
2186 "   over valid-io-mid? not if       ( mid bitmap ) "
2187 "      false exit "
2188 "   then "
2189 
2190 /*
2191  * If the port is a CMU ch, then the 64-bit bitmap represents
2192  * 2 32-bit bitmaps:
2193  *	- obio interrupt bitmap (20 bits)
2194  *	- pci interrupt bitmap (32 bits)
2195  *
2196  * - Split the bitmap into two
2197  * - Compute the base addresses of the interrupt clear registers
2198  *   for both pci interrupts and obio interrupts
2199  * - Clear obio interrupts
2200  * - Clear pci interrupts
2201  */
2202 
2203 "   over cmuch? if                  ( mid bitmap ) "
2204 "      xlsplit                      ( mid pci-bit obio-bit ) "
2205 "      rot cmuch-mid>clear-int-pa   ( pci-bit obio-bit obio-pa pci-pa ) "
2206 "      >r                           ( pci-bit obio-bit obio-pa ) ( r: pci-pa ) "
2207 "      swap d# 20 clear-ints        ( pci-bit ) ( r: pci-pa ) "
2208 "      r> swap d# 32 clear-ints     (  ) ( r: ) "
2209 
2210 /*
2211  * If the port is an Oberon, then the 64-bit bitmap is used fully.
2212  *
2213  * - Compute the base address of the interrupt clear registers
2214  * - Clear interrupts
2215  */
2216 
2217 "   else                            ( mid bitmap ) "
2218 "      swap pcich-mid>clear-int-pa  ( bitmap pa ) "
2219 "      swap d# 64 clear-ints        (  ) "
2220 "   then "
2221 
2222 /*
2223  * Always return true from here.
2224  */
2225 
2226 "   true                            ( true ) "
2227 "; "
2228 ;
2229 
2230 static char	install_master_interrupt_handler[] =
2231 	"' unix-resend-mondos to unix-interrupt-handler";
2232 static char	handler[] = "unix-interrupt-handler";
2233 static char	handler_defined[] = "p\" %s\" find nip swap l! ";
2234 
2235 /*ARGSUSED*/
2236 static int
master_interrupt_init(uint32_t portid,uint32_t xt)2237 master_interrupt_init(uint32_t portid, uint32_t xt)
2238 {
2239 	uint_t	defined;
2240 	char	buf[sizeof (handler) + sizeof (handler_defined)];
2241 
2242 	if (master_interrupt_inited)
2243 		return (1);
2244 
2245 	/*
2246 	 * Check if the defer word "unix-interrupt-handler" is defined.
2247 	 * This must be defined for OPL systems. So, this is only a
2248 	 * sanity check.
2249 	 */
2250 	(void) sprintf(buf, handler_defined, handler);
2251 	prom_interpret(buf, (uintptr_t)&defined, 0, 0, 0, 0);
2252 	if (!defined) {
2253 		cmn_err(CE_WARN, "master_interrupt_init: "
2254 		    "%s is not defined\n", handler);
2255 		return (0);
2256 	}
2257 
2258 	/*
2259 	 * Install the generic master-interrupt handler. Note that
2260 	 * this is only done one time on the first DR operation.
2261 	 * This is because, for OPL, one, single generic handler
2262 	 * handles all ports (Oberon and CMU channel) and all
2263 	 * interrupt sources within each port.
2264 	 *
2265 	 * The current support is only for the Oberon and CMU-channel.
2266 	 * If any others need to be supported, the handler has to be
2267 	 * modified accordingly.
2268 	 */
2269 
2270 	/*
2271 	 * Define the OPL master interrupt handler
2272 	 */
2273 	prom_interpret(define_master_interrupt_handler, 0, 0, 0, 0, 0);
2274 
2275 	/*
2276 	 * Take over the master interrupt handler from OBP.
2277 	 */
2278 	prom_interpret(install_master_interrupt_handler, 0, 0, 0, 0, 0);
2279 
2280 	master_interrupt_inited = 1;
2281 
2282 	/*
2283 	 * prom_interpret() does not return a status. So, we assume
2284 	 * that the calls succeeded. In reality, the calls may fail
2285 	 * if there is a syntax error, etc in the strings.
2286 	 */
2287 
2288 	return (1);
2289 }
2290 
2291 /*
2292  * Install the master-interrupt handler for a device.
2293  */
2294 static int
opl_master_interrupt(dev_info_t * ap,fco_handle_t rp,fc_ci_t * cp)2295 opl_master_interrupt(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
2296 {
2297 	uint32_t	portid, xt;
2298 	int		board, channel, leaf;
2299 	int		status;
2300 
2301 	/* Check the argument */
2302 	if (fc_cell2int(cp->nargs) != 2)
2303 		return (fc_syntax_error(cp, "nargs must be 2"));
2304 
2305 	if (fc_cell2int(cp->nresults) < 1)
2306 		return (fc_syntax_error(cp, "nresults must be >= 1"));
2307 
2308 	/* Get the parameters */
2309 	portid = fc_cell2uint32_t(fc_arg(cp, 0));
2310 	xt = fc_cell2uint32_t(fc_arg(cp, 1));
2311 
2312 	board = OPL_IO_PORTID_TO_LSB(portid);
2313 	channel = OPL_PORTID_TO_CHANNEL(portid);
2314 	leaf = OPL_PORTID_TO_LEAF(portid);
2315 
2316 	if ((board >= HWD_SBS_PER_DOMAIN) || !OPL_VALID_CHANNEL(channel) ||
2317 	    (OPL_OBERON_CHANNEL(channel) && !OPL_VALID_LEAF(leaf)) ||
2318 	    ((channel == OPL_CMU_CHANNEL) && (leaf != 0))) {
2319 		FC_DEBUG1(1, CE_CONT, "opl_master_interrupt: invalid port %x\n",
2320 		    portid);
2321 		status = 0;
2322 	} else {
2323 		status = master_interrupt_init(portid, xt);
2324 	}
2325 
2326 	cp->nresults = fc_int2cell(1);
2327 	fc_result(cp, 0) = status;
2328 
2329 	return (fc_success_op(ap, rp, cp));
2330 }
2331 
2332 /*
2333  * Set the properties for a leaf node (Oberon leaf or CMU channel leaf).
2334  */
2335 /*ARGSUSED*/
2336 static int
opl_create_leaf(dev_info_t * node,void * arg,uint_t flags)2337 opl_create_leaf(dev_info_t *node, void *arg, uint_t flags)
2338 {
2339 	int ret;
2340 
2341 	OPL_UPDATE_PROP(string, node, "name", OPL_PCI_LEAF_NODE);
2342 
2343 	OPL_UPDATE_PROP(string, node, "status", "okay");
2344 
2345 	return (DDI_WALK_TERMINATE);
2346 }
2347 
2348 static char *
opl_get_probe_string(opl_probe_t * probe,int channel,int leaf)2349 opl_get_probe_string(opl_probe_t *probe, int channel, int leaf)
2350 {
2351 	char		*probe_string;
2352 	int		portid;
2353 
2354 	probe_string = kmem_zalloc(PROBE_STR_SIZE, KM_SLEEP);
2355 
2356 	if (channel == OPL_CMU_CHANNEL)
2357 		portid = probe->pr_sb->sb_cmu.cmu_ch.chan_portid;
2358 	else
2359 		portid = probe->
2360 		    pr_sb->sb_pci_ch[channel].pci_leaf[leaf].leaf_port_id;
2361 
2362 	(void) sprintf(probe_string, "%x", portid);
2363 
2364 	return (probe_string);
2365 }
2366 
2367 static int
opl_probe_leaf(opl_probe_t * probe)2368 opl_probe_leaf(opl_probe_t *probe)
2369 {
2370 	int		channel, leaf, portid, error;
2371 	int		board;
2372 	fco_handle_t	fco_handle, *cfg_handle;
2373 	dev_info_t	*parent, *leaf_node;
2374 	char		unit_address[UNIT_ADDR_SIZE];
2375 	char		*probe_string;
2376 	opl_board_cfg_t	*board_cfg;
2377 
2378 	board = probe->pr_board;
2379 	channel = probe->pr_channel;
2380 	leaf = probe->pr_leaf;
2381 	parent = ddi_root_node();
2382 	board_cfg = &opl_boards[board];
2383 
2384 	ASSERT(OPL_VALID_CHANNEL(channel));
2385 	ASSERT(OPL_VALID_LEAF(leaf));
2386 
2387 	if (channel == OPL_CMU_CHANNEL) {
2388 		portid = probe->pr_sb->sb_cmu.cmu_ch.chan_portid;
2389 		cfg_handle = &board_cfg->cfg_cmuch_handle;
2390 	} else {
2391 		portid = probe->
2392 		    pr_sb->sb_pci_ch[channel].pci_leaf[leaf].leaf_port_id;
2393 		cfg_handle = &board_cfg->cfg_pcich_handle[channel][leaf];
2394 	}
2395 
2396 	/*
2397 	 * Prevent any changes to leaf_node until we have bound
2398 	 * it to the correct driver.
2399 	 */
2400 	ndi_devi_enter(parent);
2401 
2402 	/*
2403 	 * Ideally, fcode would be run from the "sid_branch_create"
2404 	 * callback (that is the primary purpose of that callback).
2405 	 * However, the fcode interpreter was written with the
2406 	 * assumption that the "new_child" was linked into the
2407 	 * device tree. The callback is invoked with the devinfo node
2408 	 * in the DS_PROTO state. More investigation is needed before
2409 	 * we can invoke the interpreter from the callback. For now,
2410 	 * we create the "new_child" in the BOUND state, invoke the
2411 	 * fcode interpreter and then rebind the dip to use any
2412 	 * compatible properties created by fcode.
2413 	 */
2414 
2415 	probe->pr_parent = parent;
2416 	probe->pr_create = opl_create_leaf;
2417 	probe->pr_hold = 1;
2418 
2419 	leaf_node = opl_create_node(probe);
2420 	if (leaf_node == NULL) {
2421 
2422 		cmn_err(CE_WARN, "IKP: create leaf (%d-%d-%d) failed",
2423 		    probe->pr_board, probe->pr_channel, probe->pr_leaf);
2424 		ndi_devi_exit(parent);
2425 		return (-1);
2426 	}
2427 
2428 	/*
2429 	 * The platform DR interfaces created the dip in
2430 	 * bound state. Bring devinfo node down to linked
2431 	 * state and hold it there until compatible
2432 	 * properties are created.
2433 	 */
2434 	e_ddi_branch_rele(leaf_node);
2435 	(void) i_ndi_unconfig_node(leaf_node, DS_LINKED, 0);
2436 	ASSERT(i_ddi_node_state(leaf_node) == DS_LINKED);
2437 	e_ddi_branch_hold(leaf_node);
2438 
2439 	mutex_enter(&DEVI(leaf_node)->devi_lock);
2440 	DEVI(leaf_node)->devi_flags |= DEVI_NO_BIND;
2441 	mutex_exit(&DEVI(leaf_node)->devi_lock);
2442 
2443 	/*
2444 	 * Drop the busy-hold on parent before calling
2445 	 * fcode_interpreter to prevent potential deadlocks
2446 	 */
2447 	ndi_devi_exit(parent);
2448 
2449 	(void) sprintf(unit_address, "%x", portid);
2450 
2451 	/*
2452 	 * Get the probe string
2453 	 */
2454 	probe_string = opl_get_probe_string(probe, channel, leaf);
2455 
2456 	/*
2457 	 * The fcode pointer specified here is NULL and the fcode
2458 	 * size specified here is 0. This causes the user-level
2459 	 * fcode interpreter to issue a request to the fcode
2460 	 * driver to get the Oberon/cmu-ch fcode.
2461 	 */
2462 	fco_handle = opl_fc_ops_alloc_handle(parent, leaf_node,
2463 	    NULL, 0, unit_address, probe_string);
2464 
2465 	error = fcode_interpreter(parent, &opl_fc_do_op, fco_handle);
2466 
2467 	if (error != 0) {
2468 		cmn_err(CE_WARN, "IKP: Unable to probe PCI leaf (%d-%d-%d)",
2469 		    probe->pr_board, probe->pr_channel, probe->pr_leaf);
2470 
2471 		opl_fc_ops_free_handle(fco_handle);
2472 
2473 		if (probe_string != NULL)
2474 			kmem_free(probe_string, PROBE_STR_SIZE);
2475 
2476 		(void) opl_destroy_node(leaf_node);
2477 	} else {
2478 		*cfg_handle = fco_handle;
2479 
2480 		if (channel == OPL_CMU_CHANNEL)
2481 			board_cfg->cfg_cmuch_probe_str = probe_string;
2482 		else
2483 			board_cfg->cfg_pcich_probe_str[channel][leaf]
2484 			    = probe_string;
2485 
2486 		/*
2487 		 * Compatible properties (if any) have been created,
2488 		 * so bind driver.
2489 		 */
2490 		ndi_devi_enter(parent);
2491 		ASSERT(i_ddi_node_state(leaf_node) <= DS_LINKED);
2492 
2493 		mutex_enter(&DEVI(leaf_node)->devi_lock);
2494 		DEVI(leaf_node)->devi_flags &= ~DEVI_NO_BIND;
2495 		mutex_exit(&DEVI(leaf_node)->devi_lock);
2496 
2497 		ndi_devi_exit(parent);
2498 
2499 		if (ndi_devi_bind_driver(leaf_node, 0) != DDI_SUCCESS) {
2500 			cmn_err(CE_WARN, "IKP: Unable to bind PCI leaf "
2501 			    "(%d-%d-%d)", probe->pr_board, probe->pr_channel,
2502 			    probe->pr_leaf);
2503 		}
2504 	}
2505 
2506 	if ((error != 0) && (channel == OPL_CMU_CHANNEL))
2507 		return (-1);
2508 
2509 	return (0);
2510 }
2511 
2512 static void
opl_init_leaves(int myboard)2513 opl_init_leaves(int myboard)
2514 {
2515 	dev_info_t	*parent, *node;
2516 	char		*name;
2517 	int		ret;
2518 	int		len, portid, board, channel, leaf;
2519 	opl_board_cfg_t	*cfg;
2520 
2521 	parent = ddi_root_node();
2522 
2523 	/*
2524 	 * Hold parent node busy to walk its child list
2525 	 */
2526 	ndi_devi_enter(parent);
2527 
2528 	for (node = ddi_get_child(parent); (node != NULL); node =
2529 	    ddi_get_next_sibling(node)) {
2530 
2531 		ret = OPL_GET_PROP(string, node, "name", &name, &len);
2532 		if (ret != DDI_PROP_SUCCESS) {
2533 			/*
2534 			 * The property does not exist for this node.
2535 			 */
2536 			continue;
2537 		}
2538 
2539 		if (strncmp(name, OPL_PCI_LEAF_NODE, len) == 0) {
2540 
2541 			ret = OPL_GET_PROP(int, node, "portid", &portid, -1);
2542 			if (ret == DDI_PROP_SUCCESS) {
2543 
2544 				ret = OPL_GET_PROP(int, node, "board#",
2545 				    &board, -1);
2546 				if ((ret != DDI_PROP_SUCCESS) ||
2547 				    (board != myboard)) {
2548 					kmem_free(name, len);
2549 					continue;
2550 				}
2551 
2552 				cfg = &opl_boards[board];
2553 				channel = OPL_PORTID_TO_CHANNEL(portid);
2554 				if (channel == OPL_CMU_CHANNEL) {
2555 
2556 					if (cfg->cfg_cmuch_handle != NULL)
2557 						cfg->cfg_cmuch_leaf = node;
2558 
2559 				} else {
2560 
2561 					leaf = OPL_PORTID_TO_LEAF(portid);
2562 					if (cfg->cfg_pcich_handle[
2563 					    channel][leaf] != NULL)
2564 						cfg->cfg_pcich_leaf[
2565 						    channel][leaf] = node;
2566 				}
2567 			}
2568 		}
2569 
2570 		kmem_free(name, len);
2571 		if (ret != DDI_PROP_SUCCESS)
2572 			break;
2573 	}
2574 
2575 	ndi_devi_exit(parent);
2576 }
2577 
2578 /*
2579  * Create "pci" node and hierarchy for the Oberon channels and the
2580  * CMU channel.
2581  */
2582 /*ARGSUSED*/
2583 static int
opl_probe_io(opl_probe_t * probe)2584 opl_probe_io(opl_probe_t *probe)
2585 {
2586 
2587 	int		i, j;
2588 	hwd_pci_ch_t	*channels;
2589 
2590 	if (HWD_STATUS_OK(probe->pr_sb->sb_cmu.cmu_ch.chan_status)) {
2591 
2592 		probe->pr_channel = HWD_CMU_CHANNEL;
2593 		probe->pr_channel_status =
2594 		    probe->pr_sb->sb_cmu.cmu_ch.chan_status;
2595 		probe->pr_leaf = 0;
2596 		probe->pr_leaf_status = probe->pr_channel_status;
2597 
2598 		if (opl_probe_leaf(probe) != 0)
2599 			return (-1);
2600 	}
2601 
2602 	channels = &probe->pr_sb->sb_pci_ch[0];
2603 
2604 	for (i = 0; i < HWD_PCI_CHANNELS_PER_SB; i++) {
2605 
2606 		if (!HWD_STATUS_OK(channels[i].pci_status))
2607 			continue;
2608 
2609 		probe->pr_channel = i;
2610 		probe->pr_channel_status = channels[i].pci_status;
2611 
2612 		for (j = 0; j < HWD_LEAVES_PER_PCI_CHANNEL; j++) {
2613 
2614 			probe->pr_leaf = j;
2615 			probe->pr_leaf_status =
2616 			    channels[i].pci_leaf[j].leaf_status;
2617 
2618 			if (!HWD_STATUS_OK(probe->pr_leaf_status))
2619 				continue;
2620 
2621 			(void) opl_probe_leaf(probe);
2622 		}
2623 	}
2624 	opl_init_leaves(probe->pr_board);
2625 	return (0);
2626 }
2627 
2628 /*
2629  * Perform the probe in the following order:
2630  *
2631  *	processors
2632  *	memory
2633  *	IO
2634  *
2635  * Each probe function returns 0 on sucess and a non-zero value on failure.
2636  * What is a failure is determined by the implementor of the probe function.
2637  * For example, while probing CPUs, any error encountered during probe
2638  * is considered a failure and causes the whole probe operation to fail.
2639  * However, for I/O, an error encountered while probing one device
2640  * should not prevent other devices from being probed. It should not cause
2641  * the whole probe operation to fail.
2642  */
2643 int
opl_probe_sb(int board,unsigned * cpu_impl)2644 opl_probe_sb(int board, unsigned *cpu_impl)
2645 {
2646 	opl_probe_t	*probe;
2647 	int		ret;
2648 
2649 	if ((board < 0) || (board >= HWD_SBS_PER_DOMAIN))
2650 		return (-1);
2651 
2652 	ASSERT(opl_cfg_inited != 0);
2653 
2654 	/*
2655 	 * If the previous probe failed and left a partially configured
2656 	 * board, we need to unprobe the board and start with a clean slate.
2657 	 */
2658 	if ((opl_boards[board].cfg_hwd != NULL) &&
2659 	    (opl_unprobe_sb(board) != 0))
2660 		return (-1);
2661 
2662 	ret = 0;
2663 
2664 	probe = kmem_zalloc(sizeof (opl_probe_t), KM_SLEEP);
2665 	probe->pr_board = board;
2666 
2667 	if ((opl_probe_init(probe) != 0) ||
2668 
2669 	    (opl_probe_cpu_chips(probe) != 0) ||
2670 
2671 	    (opl_probe_memory(probe) != 0) ||
2672 
2673 	    (opl_probe_io(probe) != 0)) {
2674 
2675 		/*
2676 		 * Probe failed. Perform cleanup.
2677 		 */
2678 		(void) opl_unprobe_sb(board);
2679 		ret = -1;
2680 	}
2681 
2682 	*cpu_impl = probe->pr_cpu_impl;
2683 
2684 	kmem_free(probe, sizeof (opl_probe_t));
2685 
2686 	return (ret);
2687 }
2688 
2689 /*
2690  * This unprobing also includes CMU-CH.
2691  */
2692 /*ARGSUSED*/
2693 static int
opl_unprobe_io(int board)2694 opl_unprobe_io(int board)
2695 {
2696 	int		i, j, ret;
2697 	opl_board_cfg_t	*board_cfg;
2698 	dev_info_t	**node;
2699 	fco_handle_t	*hand;
2700 	char		**probe_str;
2701 
2702 	board_cfg = &opl_boards[board];
2703 
2704 	for (i = 0; i < HWD_PCI_CHANNELS_PER_SB; i++) {
2705 
2706 		for (j = 0; j < HWD_LEAVES_PER_PCI_CHANNEL; j++) {
2707 
2708 			node = &board_cfg->cfg_pcich_leaf[i][j];
2709 			hand = &board_cfg->cfg_pcich_handle[i][j];
2710 			probe_str = &board_cfg->cfg_pcich_probe_str[i][j];
2711 
2712 			if (*node == NULL)
2713 				continue;
2714 
2715 			if (*hand != NULL) {
2716 				opl_fc_ops_free_handle(*hand);
2717 				*hand = NULL;
2718 			}
2719 
2720 			if (*probe_str != NULL) {
2721 				kmem_free(*probe_str, PROBE_STR_SIZE);
2722 				*probe_str = NULL;
2723 			}
2724 
2725 			ret = opl_destroy_node(*node);
2726 			if (ret != 0) {
2727 
2728 				cmn_err(CE_WARN, "IKP: destroy pci (%d-%d-%d) "
2729 				    "failed", board, i, j);
2730 				return (-1);
2731 			}
2732 
2733 			*node = NULL;
2734 
2735 		}
2736 	}
2737 
2738 	node = &board_cfg->cfg_cmuch_leaf;
2739 	hand = &board_cfg->cfg_cmuch_handle;
2740 	probe_str = &board_cfg->cfg_cmuch_probe_str;
2741 
2742 	if (*node == NULL)
2743 		return (0);
2744 
2745 	if (*hand != NULL) {
2746 		opl_fc_ops_free_handle(*hand);
2747 		*hand = NULL;
2748 	}
2749 
2750 	if (*probe_str != NULL) {
2751 		kmem_free(*probe_str, PROBE_STR_SIZE);
2752 		*probe_str = NULL;
2753 	}
2754 
2755 	if (opl_destroy_node(*node) != 0) {
2756 
2757 		cmn_err(CE_WARN, "IKP: destroy pci (%d-%d-%d) failed", board,
2758 		    OPL_CMU_CHANNEL, 0);
2759 		return (-1);
2760 	}
2761 
2762 	*node = NULL;
2763 
2764 	return (0);
2765 }
2766 
2767 /*
2768  * Destroy the "pseudo-mc" node for a board.
2769  */
2770 static int
opl_unprobe_memory(int board)2771 opl_unprobe_memory(int board)
2772 {
2773 	opl_board_cfg_t	*board_cfg;
2774 
2775 	board_cfg = &opl_boards[board];
2776 
2777 	if (board_cfg->cfg_pseudo_mc == NULL)
2778 		return (0);
2779 
2780 	if (opl_destroy_node(board_cfg->cfg_pseudo_mc) != 0) {
2781 
2782 		cmn_err(CE_WARN, "IKP: destroy pseudo-mc (%d) failed", board);
2783 		return (-1);
2784 	}
2785 
2786 	board_cfg->cfg_pseudo_mc = NULL;
2787 
2788 	return (0);
2789 }
2790 
2791 /*
2792  * Destroy the "cmp" nodes for a board. This also destroys the "core"
2793  * and "cpu" nodes below the "cmp" nodes.
2794  */
2795 static int
opl_unprobe_processors(int board)2796 opl_unprobe_processors(int board)
2797 {
2798 	int		i;
2799 	dev_info_t	**cfg_cpu_chips;
2800 
2801 	cfg_cpu_chips = opl_boards[board].cfg_cpu_chips;
2802 
2803 	for (i = 0; i < HWD_CPU_CHIPS_PER_CMU; i++) {
2804 
2805 		if (cfg_cpu_chips[i] == NULL)
2806 			continue;
2807 
2808 		if (opl_destroy_node(cfg_cpu_chips[i]) != 0) {
2809 
2810 			cmn_err(CE_WARN, "IKP: destroy chip (%d-%d) failed",
2811 			    board, i);
2812 			return (-1);
2813 		}
2814 
2815 		cfg_cpu_chips[i] = NULL;
2816 	}
2817 
2818 	return (0);
2819 }
2820 
2821 /*
2822  * Perform the unprobe in the following order:
2823  *
2824  *	IO
2825  *	memory
2826  *	processors
2827  */
2828 int
opl_unprobe_sb(int board)2829 opl_unprobe_sb(int board)
2830 {
2831 	if ((board < 0) || (board >= HWD_SBS_PER_DOMAIN))
2832 		return (-1);
2833 
2834 	ASSERT(opl_cfg_inited != 0);
2835 
2836 	if ((opl_unprobe_io(board) != 0) ||
2837 
2838 	    (opl_unprobe_memory(board) != 0) ||
2839 
2840 	    (opl_unprobe_processors(board) != 0))
2841 
2842 		return (-1);
2843 
2844 	if (opl_boards[board].cfg_hwd != NULL) {
2845 #ifdef UCTEST
2846 		size_t			size = 0xA000;
2847 #endif
2848 		/* Release the memory for the HWD */
2849 		void *hwdp = opl_boards[board].cfg_hwd;
2850 		opl_boards[board].cfg_hwd = NULL;
2851 #ifdef UCTEST
2852 		hwdp = (void *)((char *)hwdp - 0x1000);
2853 		hat_unload(kas.a_hat, hwdp, size, HAT_UNLOAD_UNLOCK);
2854 		vmem_free(heap_arena, hwdp, size);
2855 #else
2856 		kmem_free(hwdp, HWD_DATA_SIZE);
2857 #endif
2858 	}
2859 	return (0);
2860 }
2861 
2862 /*
2863  * For MAC patrol support, we need to update the PA-related properties
2864  * when there is a copy-rename event.  This should be called after the
2865  * physical copy and rename has been done by DR, and before the MAC
2866  * patrol is restarted.
2867  */
2868 int
oplcfg_pa_swap(int from,int to)2869 oplcfg_pa_swap(int from, int to)
2870 {
2871 	dev_info_t *from_node = opl_boards[from].cfg_pseudo_mc;
2872 	dev_info_t *to_node = opl_boards[to].cfg_pseudo_mc;
2873 	opl_range_t *rangef, *ranget;
2874 	int elems;
2875 	int ret;
2876 
2877 	if ((OPL_GET_PROP_ARRAY(int, from_node, "sb-mem-ranges", rangef,
2878 	    elems) != DDI_SUCCESS) || (elems != 4)) {
2879 		/* XXX -- bad news */
2880 		return (-1);
2881 	}
2882 	if ((OPL_GET_PROP_ARRAY(int, to_node, "sb-mem-ranges", ranget,
2883 	    elems) != DDI_SUCCESS) || (elems != 4)) {
2884 		/* XXX -- bad news */
2885 		return (-1);
2886 	}
2887 	OPL_UPDATE_PROP_ARRAY(int, from_node, "sb-mem-ranges", (int *)ranget,
2888 	    4);
2889 	OPL_UPDATE_PROP_ARRAY(int, to_node, "sb-mem-ranges", (int *)rangef,
2890 	    4);
2891 
2892 	OPL_FREE_PROP(ranget);
2893 	OPL_FREE_PROP(rangef);
2894 
2895 	return (0);
2896 }
2897