xref: /titanic_52/usr/src/uts/sun4u/lw8/os/lw8_platmod.c (revision 03831d35f7499c87d51205817c93e9a8d42c4bae)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/time.h>
30 #include <sys/cpuvar.h>
31 #include <sys/dditypes.h>
32 #include <sys/ddipropdefs.h>
33 #include <sys/ddi_impldefs.h>
34 #include <sys/sunddi.h>
35 #include <sys/esunddi.h>
36 #include <sys/sunndi.h>
37 #include <sys/platform_module.h>
38 #include <sys/errno.h>
39 #include <sys/conf.h>
40 #include <sys/modctl.h>
41 #include <sys/promif.h>
42 #include <sys/promimpl.h>
43 #include <sys/prom_plat.h>
44 #include <sys/cmn_err.h>
45 #include <sys/sysmacros.h>
46 #include <sys/mem_cage.h>
47 #include <sys/kobj.h>
48 #include <sys/utsname.h>
49 #include <sys/cpu_sgnblk_defs.h>
50 #include <sys/atomic.h>
51 #include <sys/kdi_impl.h>
52 
53 #include <sys/sgsbbc.h>
54 #include <sys/sgsbbc_iosram.h>
55 #include <sys/sgsbbc_iosram_priv.h>
56 #include <sys/sgsbbc_mailbox.h>
57 #include <sys/sgsgn.h>
58 #include <sys/serengeti.h>
59 #include <sys/sgfrutypes.h>
60 #include <sys/machsystm.h>
61 #include <sys/sbd_ioctl.h>
62 #include <sys/sbd.h>
63 #include <sys/sbdp_mem.h>
64 #include <sys/sgcn.h>
65 
66 #include <sys/memnode.h>
67 #include <vm/vm_dep.h>
68 #include <vm/page.h>
69 
70 #include <sys/cheetahregs.h>
71 #include <sys/plat_ecc_unum.h>
72 #include <sys/plat_ecc_dimm.h>
73 
74 #include <sys/lgrp.h>
75 
76 static int sg_debug = 0;
77 
78 #ifdef DEBUG
79 #define	DCMNERR if (sg_debug) cmn_err
80 #else
81 #define	DCMNERR
82 #endif
83 
84 int (*p2get_mem_unum)(int, uint64_t, char *, int, int *);
85 
86 /* local functions */
87 static void cpu_sgn_update(ushort_t sgn, uchar_t state,
88     uchar_t sub_state, int cpuid);
89 
90 
91 /*
92  * Local data.
93  *
94  * iosram_write_ptr is a pointer to iosram_write().  Because of
95  * kernel dynamic linking, we can't get to the function by name,
96  * but we can look up its address, and store it in this variable
97  * instead.
98  *
99  * We include the extern for iosram_write() here not because we call
100  * it, but to force compilation errors if its prototype doesn't
101  * match the prototype of iosram_write_ptr.
102  *
103  * The same issues apply to iosram_read() and iosram_read_ptr.
104  */
105 /*CSTYLED*/
106 extern int   iosram_write     (int, uint32_t, caddr_t, uint32_t);
107 static int (*iosram_write_ptr)(int, uint32_t, caddr_t, uint32_t) = NULL;
108 /*CSTYLED*/
109 extern int   iosram_read     (int, uint32_t, caddr_t, uint32_t);
110 static int (*iosram_read_ptr)(int, uint32_t, caddr_t, uint32_t) = NULL;
111 
112 
113 /*
114  * Variable to indicate if the date should be obtained from the SC or not.
115  */
116 int todsg_use_sc = FALSE;	/* set the false at the beginning */
117 
118 /*
119  * Preallocation of spare tsb's for DR
120  *
121  * We don't allocate spares for Wildcat since TSBs should come
122  * out of memory local to the node.
123  */
124 #define	IOMMU_PER_SCHIZO	2
125 int serengeti_tsb_spares = (SG_MAX_IO_BDS * SG_SCHIZO_PER_IO_BD *
126 	IOMMU_PER_SCHIZO);
127 
128 /*
129  * sg_max_ncpus is the maximum number of CPUs supported on Serengeti
130  * and Wildcat at GA.  We assume that the maximum number of SSM nodes
131  * supported at GA is 4.  sg_max_ncpus is set to be smaller than NCPU
132  * to reduce the amount of memory the logs take up until we have a
133  * dynamic log memory allocation solution.
134  */
135 int sg_max_ncpus = (24 * 4);	/* (CPUs per node * max number of nodes) */
136 
137 /*
138  * variables to control mailbox message timeouts.
139  * These can be patched via /etc/system or mdb.
140  */
141 int	sbbc_mbox_default_timeout = MBOX_DEFAULT_TIMEOUT;
142 int	sbbc_mbox_min_timeout = MBOX_MIN_TIMEOUT;
143 
144 /* cached 'chosen' node_id */
145 pnode_t chosen_nodeid = (pnode_t)0;
146 
147 /*
148  * Table that maps memory slices to a specific memnode.
149  */
150 int slice_to_memnode[SG_MAX_SLICE];
151 
152 /*
153  * We define and use LW8_MAX_CPU_BDS here instead of SG_MAX_CPU_BDS
154  * since a LW8 machine will never have a CPU/Mem board #5 (SB5).
155  * A LW8 machine can only have a maximum of three CPU/Mem boards, but
156  * the board numbers assigned are 0, 2, and 4.  LW8_MAX_CPU_BDS is
157  * defined to be 5 since the entries in the domain_dimm_sids array
158  * are keyed by board number.  Not perfect but some wasted space
159  * is avoided.
160  */
161 #define	LW8_MAX_CPU_BDS		5
162 
163 plat_dimm_sid_board_t	domain_dimm_sids[LW8_MAX_CPU_BDS];
164 
165 int
166 set_platform_tsb_spares()
167 {
168 	return (MIN(serengeti_tsb_spares, MAX_UPA));
169 }
170 
171 #pragma weak mmu_init_large_pages
172 
173 void
174 set_platform_defaults(void)
175 {
176 	extern int watchdog_enable;
177 	extern uint64_t xc_tick_limit_scale;
178 	extern void mmu_init_large_pages(size_t);
179 
180 #ifdef DEBUG
181 	char *todsg_name = "todsg";
182 	ce_verbose_memory = 2;
183 	ce_verbose_other = 2;
184 #endif /* DEBUG */
185 
186 	watchdog_enable = TRUE;
187 	watchdog_available = TRUE;
188 
189 	cpu_sgn_func = cpu_sgn_update;
190 
191 #ifdef DEBUG
192 	/* tod_module_name should be set to "todsg" from OBP property */
193 	if (tod_module_name && (strcmp(tod_module_name, todsg_name) == 0))
194 		prom_printf("Using todsg driver\n");
195 	else {
196 		prom_printf("Force using todsg driver\n");
197 		tod_module_name = todsg_name;
198 	}
199 #endif /* DEBUG */
200 
201 	/* lw8 does not support forthdebug */
202 	forthdebug_supported = 0;
203 
204 
205 	/*
206 	 * Some DR operations require the system to be sync paused.
207 	 * Sync pause on Serengeti could potentially take up to 4
208 	 * seconds to complete depending on the load on the SC.  To
209 	 * avoid send_mond panics during such operations, we need to
210 	 * increase xc_tick_limit to a larger value on Serengeti by
211 	 * setting xc_tick_limit_scale to 5.
212 	 */
213 	xc_tick_limit_scale = 5;
214 
215 	if ((mmu_page_sizes == max_mmu_page_sizes) &&
216 	    (mmu_ism_pagesize != MMU_PAGESIZE32M)) {
217 		if (&mmu_init_large_pages)
218 			mmu_init_large_pages(mmu_ism_pagesize);
219 	}
220 }
221 
222 void
223 load_platform_modules(void)
224 {
225 	if (modload("misc", "pcihp") < 0) {
226 		cmn_err(CE_NOTE, "pcihp driver failed to load");
227 	}
228 }
229 
230 /*ARGSUSED*/
231 int
232 plat_cpu_poweron(struct cpu *cp)
233 {
234 	int (*serengeti_cpu_poweron)(struct cpu *) = NULL;
235 
236 	serengeti_cpu_poweron =
237 	    (int (*)(struct cpu *))modgetsymvalue("sbdp_cpu_poweron", 0);
238 
239 	if (serengeti_cpu_poweron == NULL)
240 		return (ENOTSUP);
241 	else
242 		return ((serengeti_cpu_poweron)(cp));
243 }
244 
245 /*ARGSUSED*/
246 int
247 plat_cpu_poweroff(struct cpu *cp)
248 {
249 	int (*serengeti_cpu_poweroff)(struct cpu *) = NULL;
250 
251 	serengeti_cpu_poweroff =
252 	    (int (*)(struct cpu *))modgetsymvalue("sbdp_cpu_poweroff", 0);
253 
254 	if (serengeti_cpu_poweroff == NULL)
255 		return (ENOTSUP);
256 	else
257 		return ((serengeti_cpu_poweroff)(cp));
258 }
259 
260 #ifdef DEBUG
261 pgcnt_t serengeti_cage_size_limit;
262 #endif
263 
264 /* Preferred minimum cage size (expressed in pages)... for DR */
265 pgcnt_t serengeti_minimum_cage_size = 0;
266 
267 void
268 set_platform_cage_params(void)
269 {
270 	extern pgcnt_t total_pages;
271 	extern struct memlist *phys_avail;
272 	int ret;
273 
274 	if (kernel_cage_enable) {
275 		pgcnt_t preferred_cage_size;
276 
277 		preferred_cage_size =
278 		    MAX(serengeti_minimum_cage_size, total_pages / 256);
279 #ifdef DEBUG
280 		if (serengeti_cage_size_limit)
281 			preferred_cage_size = serengeti_cage_size_limit;
282 #endif
283 		kcage_range_lock();
284 		/*
285 		 * Post copies obp into the lowest slice.  This requires the
286 		 * cage to grow upwards
287 		 */
288 		ret = kcage_range_init(phys_avail, 0);
289 		if (ret == 0)
290 			kcage_init(preferred_cage_size);
291 		kcage_range_unlock();
292 	}
293 
294 	/* Only note when the cage is off since it should always be on. */
295 	if (!kcage_on)
296 		cmn_err(CE_NOTE, "!DR Kernel Cage is DISABLED");
297 }
298 
299 #define	ALIGN(x, a)	((a) == 0 ? (uint64_t)(x) : \
300 	(((uint64_t)(x) + (uint64_t)(a) - 1l) & ~((uint64_t)(a) - 1l)))
301 
302 void
303 update_mem_bounds(int brd, uint64_t base, uint64_t sz)
304 {
305 	uint64_t	end;
306 	int		mnode;
307 
308 	end = base + sz - 1;
309 
310 	/*
311 	 * First see if this board already has a memnode associated
312 	 * with it.  If not, see if this slice has a memnode.  This
313 	 * covers the cases where a single slice covers multiple
314 	 * boards (cross-board interleaving) and where a single
315 	 * board has multiple slices (1+GB DIMMs).
316 	 */
317 	if ((mnode = plat_lgrphand_to_mem_node(brd)) == -1) {
318 		if ((mnode = slice_to_memnode[PA_2_SLICE(base)]) == -1)
319 			mnode = mem_node_alloc();
320 		plat_assign_lgrphand_to_mem_node(brd, mnode);
321 	}
322 
323 	/*
324 	 * Align base at 16GB boundary
325 	 */
326 	base = ALIGN(base, (1ul << PA_SLICE_SHIFT));
327 
328 	while (base < end) {
329 		slice_to_memnode[PA_2_SLICE(base)] = mnode;
330 		base += (1ul << PA_SLICE_SHIFT);
331 	}
332 }
333 
334 /*
335  * Dynamically detect memory slices in the system by decoding
336  * the cpu memory decoder registers at boot time.
337  */
338 void
339 plat_fill_mc(pnode_t nodeid)
340 {
341 	uint64_t	mc_addr, mask;
342 	uint64_t	mc_decode[SG_MAX_BANKS_PER_MC];
343 	uint64_t	base, size;
344 	uint32_t	regs[4];
345 	int		len;
346 	int		local_mc;
347 	int		portid;
348 	int		boardid;
349 	int		i;
350 
351 	if ((prom_getprop(nodeid, "portid", (caddr_t)&portid) < 0) ||
352 	    (portid == -1))
353 		return;
354 
355 	/*
356 	 * Decode the board number from the MC portid
357 	 */
358 	boardid = SG_PORTID_TO_BOARD_NUM(portid);
359 
360 	/*
361 	 * The "reg" property returns 4 32-bit values. The first two are
362 	 * combined to form a 64-bit address.  The second two are for a
363 	 * 64-bit size, but we don't actually need to look at that value.
364 	 */
365 	len = prom_getproplen(nodeid, "reg");
366 	if (len != (sizeof (uint32_t) * 4)) {
367 		prom_printf("Warning: malformed 'reg' property\n");
368 		return;
369 	}
370 	if (prom_getprop(nodeid, "reg", (caddr_t)regs) < 0)
371 		return;
372 	mc_addr = ((uint64_t)regs[0]) << 32;
373 	mc_addr |= (uint64_t)regs[1];
374 
375 	/*
376 	 * Figure out whether the memory controller we are examining
377 	 * belongs to this CPU or a different one.
378 	 */
379 	if (portid == cpunodes[CPU->cpu_id].portid)
380 		local_mc = 1;
381 	else
382 		local_mc = 0;
383 
384 	for (i = 0; i < SG_MAX_BANKS_PER_MC; i++) {
385 		mask = SG_REG_2_OFFSET(i);
386 
387 		/*
388 		 * If the memory controller is local to this CPU, we use
389 		 * the special ASI to read the decode registers.
390 		 * Otherwise, we load the values from a magic address in
391 		 * I/O space.
392 		 */
393 		if (local_mc)
394 			mc_decode[i] = lddmcdecode(mask & MC_OFFSET_MASK);
395 		else
396 			mc_decode[i] = lddphysio((mc_addr | mask));
397 
398 		if (mc_decode[i] >> MC_VALID_SHIFT) {
399 			/*
400 			 * The memory decode register is a bitmask field,
401 			 * so we can decode that into both a base and
402 			 * a span.
403 			 */
404 			base = MC_BASE(mc_decode[i]) << PHYS2UM_SHIFT;
405 			size = MC_UK2SPAN(mc_decode[i]);
406 			update_mem_bounds(boardid, base, size);
407 		}
408 	}
409 }
410 
411 /*
412  * This routine is run midway through the boot process.  By the time we get
413  * here, we know about all the active CPU boards in the system, and we have
414  * extracted information about each board's memory from the memory
415  * controllers.  We have also figured out which ranges of memory will be
416  * assigned to which memnodes, so we walk the slice table to build the table
417  * of memnodes.
418  */
419 /* ARGSUSED */
420 void
421 plat_build_mem_nodes(u_longlong_t *list, size_t  nelems)
422 {
423 	int	slice;
424 	pfn_t	basepfn;
425 	pgcnt_t	npgs;
426 
427 	mem_node_pfn_shift = PFN_SLICE_SHIFT;
428 	mem_node_physalign = (1ull << PA_SLICE_SHIFT);
429 
430 	for (slice = 0; slice < SG_MAX_SLICE; slice++) {
431 		if (slice_to_memnode[slice] == -1)
432 			continue;
433 		basepfn = (uint64_t)slice << PFN_SLICE_SHIFT;
434 		npgs = 1ull << PFN_SLICE_SHIFT;
435 		mem_node_add_slice(basepfn, basepfn + npgs - 1);
436 	}
437 }
438 
439 int
440 plat_pfn_to_mem_node(pfn_t pfn)
441 {
442 	int node;
443 
444 	node = slice_to_memnode[PFN_2_SLICE(pfn)];
445 
446 	ASSERT(node >= 0);
447 	return (node);
448 }
449 
450 /*
451  * Serengeti support for lgroups.
452  *
453  * On Serengeti, an lgroup platform handle == board number.
454  *
455  * Mappings between lgroup handles and memnodes are managed
456  * in addition to mappings between memory slices and memnodes
457  * to support cross-board interleaving as well as multiple
458  * slices per board (e.g. >1GB DIMMs). The initial mapping
459  * of memnodes to lgroup handles is determined at boot time.
460  * A DR addition of memory adds a new mapping. A DR copy-rename
461  * swaps mappings.
462  */
463 
464 /*
465  * Macro for extracting the board number from the CPU id
466  */
467 #define	CPUID_TO_BOARD(id)	(((id) >> 2) & 0x7)
468 
469 /*
470  * Return the platform handle for the lgroup containing the given CPU
471  *
472  * For Serengeti, lgroup platform handle == board number
473  */
474 lgrp_handle_t
475 plat_lgrp_cpu_to_hand(processorid_t id)
476 {
477 	return (CPUID_TO_BOARD(id));
478 }
479 
480 /*
481  * Platform specific lgroup initialization
482  */
483 void
484 plat_lgrp_init(void)
485 {
486 	int i;
487 	extern uint32_t lgrp_expand_proc_thresh;
488 	extern uint32_t lgrp_expand_proc_diff;
489 
490 	/*
491 	 * Initialize lookup tables to invalid values so we catch
492 	 * any illegal use of them.
493 	 */
494 	for (i = 0; i < SG_MAX_SLICE; i++) {
495 		slice_to_memnode[i] = -1;
496 	}
497 
498 	/*
499 	 * Set tuneables for Serengeti architecture
500 	 *
501 	 * lgrp_expand_proc_thresh is the minimum load on the lgroups
502 	 * this process is currently running on before considering
503 	 * expanding threads to another lgroup.
504 	 *
505 	 * lgrp_expand_proc_diff determines how much less the remote lgroup
506 	 * must be loaded before expanding to it.
507 	 *
508 	 * Bandwidth is maximized on Serengeti by spreading load across
509 	 * the machine. The impact to inter-thread communication isn't
510 	 * too costly since remote latencies are relatively low.  These
511 	 * values equate to one CPU's load and so attempt to spread the
512 	 * load out across as many lgroups as possible one CPU at a time.
513 	 */
514 	lgrp_expand_proc_thresh = LGRP_LOADAVG_THREAD_MAX;
515 	lgrp_expand_proc_diff = LGRP_LOADAVG_THREAD_MAX;
516 }
517 
518 /*
519  * Platform notification of lgroup (re)configuration changes
520  */
521 /*ARGSUSED*/
522 void
523 plat_lgrp_config(lgrp_config_flag_t evt, uintptr_t arg)
524 {
525 	update_membounds_t	*umb;
526 	lgrp_config_mem_rename_t lmr;
527 	lgrp_handle_t		shand, thand;
528 	int			snode, tnode;
529 
530 	switch (evt) {
531 
532 	case LGRP_CONFIG_MEM_ADD:
533 		umb = (update_membounds_t *)arg;
534 		update_mem_bounds(umb->u_board, umb->u_base, umb->u_len);
535 
536 		break;
537 
538 	case LGRP_CONFIG_MEM_DEL:
539 		/* We don't have to do anything */
540 
541 		break;
542 
543 	case LGRP_CONFIG_MEM_RENAME:
544 		/*
545 		 * During a DR copy-rename operation, all of the memory
546 		 * on one board is moved to another board -- but the
547 		 * addresses/pfns and memnodes don't change. This means
548 		 * the memory has changed locations without changing identity.
549 		 *
550 		 * Source is where we are copying from and target is where we
551 		 * are copying to.  After source memnode is copied to target
552 		 * memnode, the physical addresses of the target memnode are
553 		 * renamed to match what the source memnode had.  Then target
554 		 * memnode can be removed and source memnode can take its
555 		 * place.
556 		 *
557 		 * To do this, swap the lgroup handle to memnode mappings for
558 		 * the boards, so target lgroup will have source memnode and
559 		 * source lgroup will have empty target memnode which is where
560 		 * its memory will go (if any is added to it later).
561 		 *
562 		 * Then source memnode needs to be removed from its lgroup
563 		 * and added to the target lgroup where the memory was living
564 		 * but under a different name/memnode.  The memory was in the
565 		 * target memnode and now lives in the source memnode with
566 		 * different physical addresses even though it is the same
567 		 * memory.
568 		 */
569 		shand = arg & 0xffff;
570 		thand = (arg & 0xffff0000) >> 16;
571 		snode = plat_lgrphand_to_mem_node(shand);
572 		tnode = plat_lgrphand_to_mem_node(thand);
573 
574 		plat_assign_lgrphand_to_mem_node(thand, snode);
575 		plat_assign_lgrphand_to_mem_node(shand, tnode);
576 
577 		/*
578 		 * Remove source memnode of copy rename from its lgroup
579 		 * and add it to its new target lgroup
580 		 */
581 		lmr.lmem_rename_from = shand;
582 		lmr.lmem_rename_to = thand;
583 
584 		lgrp_config(LGRP_CONFIG_MEM_RENAME, (uintptr_t)snode,
585 		    (uintptr_t)&lmr);
586 
587 		break;
588 
589 	default:
590 		break;
591 	}
592 }
593 
594 /*
595  * Return latency between "from" and "to" lgroups
596  *
597  * This latency number can only be used for relative comparison
598  * between lgroups on the running system, cannot be used across platforms,
599  * and may not reflect the actual latency.  It is platform and implementation
600  * specific, so platform gets to decide its value.  It would be nice if the
601  * number was at least proportional to make comparisons more meaningful though.
602  * NOTE: The numbers below are supposed to be load latencies for uncached
603  * memory divided by 10.
604  */
605 int
606 plat_lgrp_latency(lgrp_handle_t from, lgrp_handle_t to)
607 {
608 	/*
609 	 * Return min remote latency when there are more than two lgroups
610 	 * (root and child) and getting latency between two different lgroups
611 	 * or root is involved
612 	 */
613 	if (lgrp_optimizations() && (from != to ||
614 	    from == LGRP_DEFAULT_HANDLE || to == LGRP_DEFAULT_HANDLE))
615 		return (28);
616 	else
617 		return (23);
618 }
619 
620 /* ARGSUSED */
621 void
622 plat_freelist_process(int mnode)
623 {
624 }
625 
626 /*
627  * Find dip for chosen IOSRAM
628  */
629 dev_info_t *
630 find_chosen_dip(void)
631 {
632 	dev_info_t	*dip;
633 	char		master_sbbc[MAXNAMELEN];
634 	int		nodeid;
635 	uint_t		tunnel;
636 
637 	/*
638 	 * find the /chosen SBBC node, prom interface will handle errors
639 	 */
640 	nodeid = prom_chosennode();
641 	/*
642 	 * get the 'iosram' property from the /chosen node
643 	 */
644 	if (prom_getprop(nodeid, IOSRAM_CHOSEN_PROP, (caddr_t)&tunnel) <= 0) {
645 		SBBC_ERR(CE_PANIC, "No iosram property found! \n");
646 	}
647 
648 	if (prom_phandle_to_path((phandle_t)tunnel, master_sbbc,
649 	    sizeof (master_sbbc)) < 0) {
650 		SBBC_ERR1(CE_PANIC, "prom_phandle_to_path(%d) failed\n",
651 		    tunnel);
652 	}
653 
654 	chosen_nodeid = nodeid;
655 
656 	/*
657 	 * load and attach the sgsbbc driver.
658 	 * This will also attach all the sgsbbc driver instances
659 	 */
660 	if (i_ddi_attach_hw_nodes("sgsbbc") != DDI_SUCCESS) {
661 		cmn_err(CE_WARN, "sgsbbc failed to load\n");
662 	}
663 	/* translate a path name to a dev_info_t */
664 	dip = e_ddi_hold_devi_by_path(master_sbbc, 0);
665 	if ((dip == NULL) || (ddi_get_nodeid(dip) != tunnel)) {
666 		cmn_err(CE_PANIC,
667 			"e_ddi_hold_devi_by_path(%x) failed for SBBC\n",
668 		    tunnel);
669 	}
670 
671 	/* make sure devi_ref is ZERO */
672 	ndi_rele_devi(dip);
673 	DCMNERR(CE_CONT, "Chosen IOSRAM is at %s \n", master_sbbc);
674 
675 	return (dip);
676 }
677 
678 void
679 load_platform_drivers(void)
680 {
681 	int ret;
682 
683 	/*
684 	 * Load the mc-us3 memory driver.
685 	 */
686 	if (i_ddi_attach_hw_nodes("mc-us3") != DDI_SUCCESS)
687 		cmn_err(CE_WARN, "mc-us3 failed to load");
688 	else
689 		(void) ddi_hold_driver(ddi_name_to_major("mc-us3"));
690 
691 	/*
692 	 * Initialize the chosen IOSRAM before its clients
693 	 * are loaded.
694 	 */
695 	(void) find_chosen_dip();
696 
697 	/*
698 	 * Load the environmentals driver (sgenv)
699 	 *
700 	 * We need this driver to handle events from the SC when state
701 	 * changes occur in the environmental data.
702 	 */
703 	if (i_ddi_attach_hw_nodes("sgenv") != DDI_SUCCESS)
704 		cmn_err(CE_WARN, "sgenv failed to load");
705 
706 	/*
707 	 * Ideally, we'd do this in set_platform_defaults(), but
708 	 * at that point it's too early to look up symbols.
709 	 */
710 	iosram_write_ptr = (int (*)(int, uint32_t, caddr_t, uint32_t))
711 	    modgetsymvalue("iosram_write", 0);
712 
713 	if (iosram_write_ptr == NULL) {
714 		DCMNERR(CE_WARN, "load_platform_defaults: iosram_write()"
715 		    " not found; signatures will not be updated\n");
716 	} else {
717 		/*
718 		 * The iosram read ptr is only needed if we can actually
719 		 * write CPU signatures, so only bother setting it if we
720 		 * set a valid write pointer, above.
721 		 */
722 		iosram_read_ptr = (int (*)(int, uint32_t, caddr_t, uint32_t))
723 		    modgetsymvalue("iosram_read", 0);
724 
725 		if (iosram_read_ptr == NULL)
726 			DCMNERR(CE_WARN, "load_platform_defaults: iosram_read()"
727 			    " not found\n");
728 	}
729 
730 	/*
731 	 * Set todsg_use_sc to TRUE so that we will be getting date
732 	 * from the SC.
733 	 */
734 	todsg_use_sc = TRUE;
735 
736 	/*
737 	 * Now is a good time to activate hardware watchdog (if one exists).
738 	 */
739 	mutex_enter(&tod_lock);
740 	if (watchdog_enable)
741 		ret = tod_ops.tod_set_watchdog_timer(watchdog_timeout_seconds);
742 	mutex_exit(&tod_lock);
743 	if (ret != 0)
744 		printf("Hardware watchdog enabled\n");
745 
746 	plat_ecc_init();
747 }
748 
749 /*
750  * No platform drivers on this platform
751  */
752 char *platform_module_list[] = {
753 	(char *)0
754 };
755 
756 /*ARGSUSED*/
757 void
758 plat_tod_fault(enum tod_fault_type tod_bad)
759 {
760 }
761 int
762 plat_max_boards()
763 {
764 	return (SG_MAX_BDS);
765 }
766 int
767 plat_max_io_units_per_board()
768 {
769 	return (SG_MAX_IO_PER_BD);
770 }
771 int
772 plat_max_cmp_units_per_board()
773 {
774 	return (SG_MAX_CMPS_PER_BD);
775 }
776 int
777 plat_max_cpu_units_per_board()
778 {
779 	return (SG_MAX_CPUS_PER_BD);
780 }
781 
782 int
783 plat_max_mc_units_per_board()
784 {
785 	return (SG_MAX_CMPS_PER_BD); /* each CPU die has a memory controller */
786 }
787 
788 int
789 plat_max_mem_units_per_board()
790 {
791 	return (SG_MAX_MEM_PER_BD);
792 }
793 
794 int
795 plat_max_cpumem_boards(void)
796 {
797 	return (LW8_MAX_CPU_BDS);
798 }
799 
800 int
801 set_platform_max_ncpus(void)
802 {
803 	return (sg_max_ncpus);
804 }
805 
806 void
807 plat_dmv_params(uint_t *hwint, uint_t *swint)
808 {
809 	*hwint = MAX_UPA;
810 	*swint = 0;
811 }
812 
813 static int (*sg_mbox)(sbbc_msg_t *, sbbc_msg_t *, time_t) = NULL;
814 
815 /*
816  * Our nodename has been set, pass it along to the SC.
817  */
818 void
819 plat_nodename_set(void)
820 {
821 	sbbc_msg_t	req;	/* request */
822 	sbbc_msg_t	resp;	/* response */
823 	int		rv;	/* return value from call to mbox */
824 	struct nodename_info {
825 		int32_t	namelen;
826 		char	nodename[_SYS_NMLN];
827 	} nni;
828 
829 	/*
830 	 * find the symbol for the mailbox routine
831 	 */
832 	if (sg_mbox == NULL)
833 		sg_mbox = (int (*)(sbbc_msg_t *, sbbc_msg_t *, time_t))
834 			modgetsymvalue("sbbc_mbox_request_response", 0);
835 
836 	if (sg_mbox == NULL) {
837 		cmn_err(CE_NOTE, "!plat_nodename_set: sg_mbox not found\n");
838 		return;
839 	}
840 
841 	/*
842 	 * construct the message telling the SC our nodename
843 	 */
844 	(void) strcpy(nni.nodename, utsname.nodename);
845 	nni.namelen = (int32_t)strlen(nni.nodename);
846 
847 	req.msg_type.type = INFO_MBOX;
848 	req.msg_type.sub_type = INFO_MBOX_NODENAME;
849 	req.msg_status = 0;
850 	req.msg_len = (int)(nni.namelen + sizeof (nni.namelen));
851 	req.msg_bytes = 0;
852 	req.msg_buf = (caddr_t)&nni;
853 	req.msg_data[0] = 0;
854 	req.msg_data[1] = 0;
855 
856 	/*
857 	 * initialize the response back from the SC
858 	 */
859 	resp.msg_type.type = INFO_MBOX;
860 	resp.msg_type.sub_type = INFO_MBOX_NODENAME;
861 	resp.msg_status = 0;
862 	resp.msg_len = 0;
863 	resp.msg_bytes = 0;
864 	resp.msg_buf = (caddr_t)0;
865 	resp.msg_data[0] = 0;
866 	resp.msg_data[1] = 0;
867 
868 	/*
869 	 * ship it and check for success
870 	 */
871 	rv = (sg_mbox)(&req, &resp, sbbc_mbox_default_timeout);
872 
873 	if (rv != 0) {
874 		cmn_err(CE_NOTE, "!plat_nodename_set: sg_mbox retval %d\n", rv);
875 	} else if (resp.msg_status != 0) {
876 		cmn_err(CE_NOTE, "!plat_nodename_set: msg_status %d\n",
877 			resp.msg_status);
878 	} else {
879 		DCMNERR(CE_NOTE, "!plat_nodename_set was successful\n");
880 
881 		/*
882 		 * It is necessary to exchange capability the bitmap
883 		 * with SC before sending any ecc error information and
884 		 * indictment. We are calling the plat_ecc_capability_send()
885 		 * here just after sending the nodename successfully.
886 		 */
887 		rv = plat_ecc_capability_send();
888 		if (rv == 0) {
889 			DCMNERR(CE_NOTE, "!plat_ecc_capability_send was"
890 			    "successful\n");
891 		}
892 	}
893 }
894 
895 /*
896  * flag to allow users switch between using OBP's
897  * prom_get_unum() and mc-us3 driver's p2get_mem_unum()
898  * (for main memory errors only).
899  */
900 int sg_use_prom_get_unum = 0;
901 
902 /*
903  * Debugging flag: set to 1 to call into obp for get_unum, or set it to 0
904  * to call into the unum cache system.  This is the E$ equivalent of
905  * sg_use_prom_get_unum.
906  */
907 int sg_use_prom_ecache_unum = 0;
908 
909 /* used for logging ECC errors to the SC */
910 #define	SG_MEMORY_ECC	1
911 #define	SG_ECACHE_ECC	2
912 #define	SG_UNKNOWN_ECC	(-1)
913 
914 /*
915  * plat_get_mem_unum() generates a string identifying either the
916  * memory or E$ DIMM(s) during error logging. Depending on whether
917  * the error is E$ or memory related, the appropriate support
918  * routine is called to assist in the string generation.
919  *
920  * - For main memory errors we can use the mc-us3 drivers p2getunum()
921  *   (or prom_get_unum() for debugging purposes).
922  *
923  * - For E$ errors we call sg_get_ecacheunum() to generate the unum (or
924  *   prom_serengeti_get_ecacheunum() for debugging purposes).
925  */
926 
927 static int
928 sg_prom_get_unum(int synd_code, uint64_t paddr, char *buf, int buflen,
929     int *lenp)
930 {
931 	if ((prom_get_unum(synd_code, (unsigned long long)paddr,
932 	    buf, buflen, lenp)) != 0)
933 		return (EIO);
934 	else if (*lenp <= 1)
935 		return (EINVAL);
936 	else
937 		return (0);
938 }
939 
940 /*ARGSUSED*/
941 int
942 plat_get_mem_unum(int synd_code, uint64_t flt_addr, int flt_bus_id,
943     int flt_in_memory, ushort_t flt_status, char *buf, int buflen, int *lenp)
944 {
945 	/*
946 	 * unum_func will either point to the memory drivers p2get_mem_unum()
947 	 * or to prom_get_unum() for memory errors.
948 	 */
949 	int (*unum_func)(int synd_code, uint64_t paddr, char *buf,
950 	    int buflen, int *lenp) = p2get_mem_unum;
951 
952 	/*
953 	 * check if it's a Memory or an Ecache error.
954 	 */
955 	if (flt_in_memory) {
956 		/*
957 		 * It's a main memory error.
958 		 *
959 		 * For debugging we allow the user to switch between
960 		 * using OBP's get_unum and the memory driver's get_unum
961 		 * so we create a pointer to the functions and switch
962 		 * depending on the sg_use_prom_get_unum flag.
963 		 */
964 		if (sg_use_prom_get_unum) {
965 			DCMNERR(CE_NOTE, "Using prom_get_unum from OBP");
966 			return (sg_prom_get_unum(synd_code,
967 			    P2ALIGN(flt_addr, 8), buf, buflen, lenp));
968 		} else if (unum_func != NULL) {
969 			return (unum_func(synd_code, P2ALIGN(flt_addr, 8),
970 			    buf, buflen, lenp));
971 		} else {
972 			return (ENOTSUP);
973 		}
974 	} else if (flt_status & ECC_ECACHE) {
975 		/*
976 		 * It's an E$ error.
977 		 */
978 		if (sg_use_prom_ecache_unum) {
979 			/*
980 			 * We call to OBP to handle this.
981 			 */
982 			DCMNERR(CE_NOTE,
983 			    "Using prom_serengeti_get_ecacheunum from OBP");
984 			if (prom_serengeti_get_ecacheunum(flt_bus_id,
985 			    P2ALIGN(flt_addr, 8), buf, buflen, lenp) != 0) {
986 				return (EIO);
987 			}
988 		} else {
989 			return (sg_get_ecacheunum(flt_bus_id, flt_addr,
990 			    buf, buflen, lenp));
991 		}
992 	} else {
993 		return (ENOTSUP);
994 	}
995 
996 	return (0);
997 }
998 
999 /*
1000  * This platform hook gets called from mc_add_mem_unum_label() in the mc-us3
1001  * driver giving each platform the opportunity to add platform
1002  * specific label information to the unum for ECC error logging purposes.
1003  */
1004 void
1005 plat_add_mem_unum_label(char *unum, int mcid, int bank, int dimm)
1006 {
1007 	char	new_unum[UNUM_NAMLEN] = "";
1008 	int	node = SG_PORTID_TO_NODEID(mcid);
1009 	int	board = SG_CPU_BD_PORTID_TO_BD_NUM(mcid);
1010 	int	position = SG_PORTID_TO_CPU_POSN(mcid);
1011 
1012 	/*
1013 	 * The mc-us3 driver deals with logical banks but for unum
1014 	 * purposes we need to use physical banks so that the correct
1015 	 * dimm can be physically located. Logical banks 0 and 2
1016 	 * make up physical bank 0. Logical banks 1 and 3 make up
1017 	 * physical bank 1. Here we do the necessary conversion.
1018 	 */
1019 	bank = (bank % 2);
1020 
1021 	if (dimm == -1) {
1022 		SG_SET_FRU_NAME_NODE(new_unum, node);
1023 		SG_SET_FRU_NAME_CPU_BOARD(new_unum, board);
1024 		SG_SET_FRU_NAME_MODULE(new_unum, position);
1025 		SG_SET_FRU_NAME_BANK(new_unum, bank);
1026 
1027 	} else {
1028 		SG_SET_FRU_NAME_NODE(new_unum, node);
1029 		SG_SET_FRU_NAME_CPU_BOARD(new_unum, board);
1030 		SG_SET_FRU_NAME_MODULE(new_unum, position);
1031 		SG_SET_FRU_NAME_BANK(new_unum, bank);
1032 		SG_SET_FRU_NAME_DIMM(new_unum, dimm);
1033 
1034 		strcat(new_unum, " ");
1035 		strcat(new_unum, unum);
1036 	}
1037 
1038 	strcpy(unum, new_unum);
1039 }
1040 
1041 int
1042 plat_get_cpu_unum(int cpuid, char *buf, int buflen, int *lenp)
1043 {
1044 	int	node = SG_PORTID_TO_NODEID(cpuid);
1045 	int	board = SG_CPU_BD_PORTID_TO_BD_NUM(cpuid);
1046 
1047 	if (snprintf(buf, buflen, "/N%d/%s%d", node,
1048 	    SG_HPU_TYPE_CPU_BOARD_ID, board) >= buflen) {
1049 		return (ENOSPC);
1050 	} else {
1051 		*lenp = strlen(buf);
1052 		return (0);
1053 	}
1054 }
1055 
1056 static void (*sg_ecc_taskq_func)(sbbc_ecc_mbox_t *) = NULL;
1057 static int (*sg_ecc_mbox_func)(sbbc_ecc_mbox_t *) = NULL;
1058 
1059 /*
1060  * We log all ECC errors to the SC so we send a mailbox
1061  * message to the SC passing it the relevant data.
1062  * ECC mailbox messages are sent via a taskq mechanism to
1063  * prevent impaired system performance during ECC floods.
1064  * Indictments have already passed through a taskq, so they
1065  * are not queued here.
1066  */
1067 int
1068 plat_send_ecc_mailbox_msg(plat_ecc_message_type_t msg_type, void *datap)
1069 {
1070 	sbbc_ecc_mbox_t	*msgp;
1071 	uint16_t	msg_subtype;
1072 	int		sleep_flag, log_error;
1073 	size_t		msg_size;
1074 
1075 	if (sg_ecc_taskq_func == NULL) {
1076 		sg_ecc_taskq_func = (void (*)(sbbc_ecc_mbox_t *))
1077 		    modgetsymvalue("sbbc_mbox_queue_ecc_event", 0);
1078 		if (sg_ecc_taskq_func == NULL) {
1079 			cmn_err(CE_NOTE, "!plat_send_ecc_mailbox_msg: "
1080 			    "sbbc_mbox_queue_ecc_event not found");
1081 			return (ENODEV);
1082 		}
1083 	}
1084 	if (sg_ecc_mbox_func == NULL) {
1085 		sg_ecc_mbox_func = (int (*)(sbbc_ecc_mbox_t *))
1086 		    modgetsymvalue("sbbc_mbox_ecc_output", 0);
1087 		if (sg_ecc_mbox_func == NULL) {
1088 			cmn_err(CE_NOTE, "!plat_send_ecc_mailbox_msg: "
1089 			    "sbbc_mbox_ecc_output not found");
1090 			return (ENODEV);
1091 		}
1092 	}
1093 
1094 	/*
1095 	 * Initialize the request and response structures
1096 	 */
1097 	switch (msg_type) {
1098 	case PLAT_ECC_ERROR_MESSAGE:
1099 		msg_subtype = INFO_MBOX_ERROR_ECC;
1100 		msg_size = sizeof (plat_ecc_error_data_t);
1101 		sleep_flag = KM_NOSLEEP;
1102 		log_error = 1;
1103 		break;
1104 	case PLAT_ECC_ERROR2_MESSAGE:
1105 		msg_subtype = INFO_MBOX_ECC;
1106 		msg_size = sizeof (plat_ecc_error2_data_t);
1107 		sleep_flag = KM_NOSLEEP;
1108 		log_error = 1;
1109 		break;
1110 	case PLAT_ECC_INDICTMENT_MESSAGE:
1111 		msg_subtype = INFO_MBOX_ERROR_INDICT;
1112 		msg_size = sizeof (plat_ecc_indictment_data_t);
1113 		sleep_flag = KM_SLEEP;
1114 		log_error = 0;
1115 		break;
1116 	case PLAT_ECC_INDICTMENT2_MESSAGE:
1117 		msg_subtype = INFO_MBOX_ECC;
1118 		msg_size = sizeof (plat_ecc_indictment2_data_t);
1119 		sleep_flag = KM_SLEEP;
1120 		log_error = 0;
1121 		break;
1122 	case PLAT_ECC_CAPABILITY_MESSAGE:
1123 		msg_subtype = INFO_MBOX_ECC_CAP;
1124 		msg_size = sizeof (plat_capability_data_t) +
1125 		    strlen(utsname.release) + strlen(utsname.version) + 2;
1126 		sleep_flag = KM_SLEEP;
1127 		log_error = 0;
1128 		break;
1129 	case PLAT_ECC_DIMM_SID_MESSAGE:
1130 		msg_subtype = INFO_MBOX_ECC;
1131 		msg_size = sizeof (plat_dimm_sid_request_data_t);
1132 		sleep_flag = KM_SLEEP;
1133 		log_error = 0;
1134 		break;
1135 	default:
1136 		return (EINVAL);
1137 	}
1138 
1139 	msgp = (sbbc_ecc_mbox_t *)kmem_zalloc(sizeof (sbbc_ecc_mbox_t),
1140 		sleep_flag);
1141 	if (msgp == NULL) {
1142 		cmn_err(CE_NOTE, "!plat_send_ecc_mailbox_msg: "
1143 				"unable to allocate sbbc_ecc_mbox");
1144 		return (ENOMEM);
1145 	}
1146 
1147 	msgp->ecc_log_error = log_error;
1148 
1149 	msgp->ecc_req.msg_type.type = INFO_MBOX;
1150 	msgp->ecc_req.msg_type.sub_type = msg_subtype;
1151 	msgp->ecc_req.msg_status = 0;
1152 	msgp->ecc_req.msg_len = (int)msg_size;
1153 	msgp->ecc_req.msg_bytes = 0;
1154 	msgp->ecc_req.msg_buf = (caddr_t)kmem_zalloc(msg_size, sleep_flag);
1155 	msgp->ecc_req.msg_data[0] = 0;
1156 	msgp->ecc_req.msg_data[1] = 0;
1157 
1158 	if (msgp->ecc_req.msg_buf == NULL) {
1159 		cmn_err(CE_NOTE, "!plat_send_ecc_mailbox_msg: "
1160 				"unable to allocate request msg_buf");
1161 		kmem_free((void *)msgp, sizeof (sbbc_ecc_mbox_t));
1162 		return (ENOMEM);
1163 	}
1164 
1165 	bcopy(datap, (void *)msgp->ecc_req.msg_buf, msg_size);
1166 
1167 	/*
1168 	 * initialize the response back from the SC
1169 	 */
1170 	msgp->ecc_resp.msg_type.type = INFO_MBOX;
1171 	msgp->ecc_resp.msg_type.sub_type = msg_subtype;
1172 	msgp->ecc_resp.msg_status = 0;
1173 	msgp->ecc_resp.msg_len = 0;
1174 	msgp->ecc_resp.msg_bytes = 0;
1175 	msgp->ecc_resp.msg_buf = NULL;
1176 	msgp->ecc_resp.msg_data[0] = 0;
1177 	msgp->ecc_resp.msg_data[1] = 0;
1178 
1179 	switch (msg_type) {
1180 	case PLAT_ECC_ERROR_MESSAGE:
1181 	case PLAT_ECC_ERROR2_MESSAGE:
1182 		/*
1183 		 * For Error Messages, we go through a taskq.
1184 		 * Queue up message for processing
1185 		 */
1186 		(*sg_ecc_taskq_func)(msgp);
1187 		return (0);
1188 
1189 	case PLAT_ECC_CAPABILITY_MESSAGE:
1190 		/*
1191 		 * For indictment and capability messages, we've already gone
1192 		 * through the taskq, so we can call the mailbox routine
1193 		 * directly.  Find the symbol for the routine that sends
1194 		 * the mailbox msg
1195 		 */
1196 		msgp->ecc_resp.msg_len = (int)msg_size;
1197 		msgp->ecc_resp.msg_buf = (caddr_t)kmem_zalloc(msg_size,
1198 		    sleep_flag);
1199 		/* FALLTHRU */
1200 
1201 	case PLAT_ECC_INDICTMENT_MESSAGE:
1202 	case PLAT_ECC_INDICTMENT2_MESSAGE:
1203 		return ((*sg_ecc_mbox_func)(msgp));
1204 
1205 	case PLAT_ECC_DIMM_SID_MESSAGE:
1206 		msgp->ecc_resp.msg_len = sizeof (plat_dimm_sid_board_data_t);
1207 		msgp->ecc_resp.msg_buf = (caddr_t)kmem_zalloc(
1208 		    sizeof (plat_dimm_sid_board_data_t), sleep_flag);
1209 
1210 		return ((*sg_ecc_mbox_func)(msgp));
1211 
1212 	default:
1213 		ASSERT(0);
1214 		return (EINVAL);
1215 	}
1216 }
1217 
1218 /*
1219  * m is redundant on serengeti as the multiplyer is always 4
1220  */
1221 /*ARGSUSED*/
1222 int
1223 plat_make_fru_cpuid(int sb, int m, int proc)
1224 {
1225 	return (MAKE_CPUID(sb, proc));
1226 }
1227 
1228 /*
1229  * board number for a given proc
1230  */
1231 int
1232 plat_make_fru_boardnum(int proc)
1233 {
1234 	return (SG_PORTID_TO_BOARD_NUM(proc));
1235 }
1236 
1237 static
1238 void
1239 cpu_sgn_update(ushort_t sig, uchar_t state, uchar_t sub_state, int cpuid)
1240 {
1241 	uint32_t signature = CPU_SIG_BLD(sig, state, sub_state);
1242 	sig_state_t current_sgn;
1243 	int i;
1244 
1245 	if (iosram_write_ptr == NULL) {
1246 		/*
1247 		 * If the IOSRAM write pointer isn't set, we won't be able
1248 		 * to write signatures to ANYTHING, so we may as well just
1249 		 * write out an error message (if desired) and exit this
1250 		 * routine now...
1251 		 */
1252 		DCMNERR(CE_WARN,
1253 		    "cpu_sgn_update: iosram_write() not found;"
1254 		    " cannot write signature 0x%x for CPU(s) or domain\n",
1255 		    signature);
1256 		return;
1257 	}
1258 
1259 
1260 	/*
1261 	 * Differentiate a panic reboot from a non-panic reboot in the
1262 	 * setting of the substate of the signature.
1263 	 *
1264 	 * If the new substate is REBOOT and we're rebooting due to a panic,
1265 	 * then set the new substate to a special value indicating a panic
1266 	 * reboot, SIGSUBST_PANIC_REBOOT.
1267 	 *
1268 	 * A panic reboot is detected by a current (previous) domain signature
1269 	 * state of SIGST_EXIT, and a new signature substate of SIGSUBST_REBOOT.
1270 	 * The domain signature state SIGST_EXIT is used as the panic flow
1271 	 * progresses.
1272 	 *
1273 	 * At the end of the panic flow, the reboot occurs but we should now
1274 	 * one that was involuntary, something that may be quite useful to know
1275 	 * at OBP level.
1276 	 */
1277 	if (sub_state == SIGSUBST_REBOOT) {
1278 		if (iosram_read_ptr == NULL) {
1279 			DCMNERR(CE_WARN,
1280 			    "cpu_sgn_update: iosram_read() not found;"
1281 			    " could not check current domain signature\n");
1282 		} else {
1283 			(void) (*iosram_read_ptr)(SBBC_SIGBLCK_KEY,
1284 				SG_SGNBLK_DOMAINSIG_OFFSET,
1285 				(char *)&current_sgn, sizeof (current_sgn));
1286 			if (current_sgn.state_t.state == SIGST_EXIT)
1287 				signature = CPU_SIG_BLD(sig, state,
1288 					SIGSUBST_PANIC_REBOOT);
1289 		}
1290 	}
1291 
1292 	/*
1293 	 * cpuid == -1 indicates that the operation applies to all cpus.
1294 	 */
1295 	if (cpuid >= 0) {
1296 		(void) (*iosram_write_ptr)(SBBC_SIGBLCK_KEY,
1297 			SG_SGNBLK_CPUSIG_OFFSET(cpuid), (char *)&signature,
1298 			sizeof (signature));
1299 	} else {
1300 		for (i = 0; i < NCPU; i++) {
1301 			if (cpu[i] == NULL || !(cpu[i]->cpu_flags &
1302 				(CPU_EXISTS|CPU_QUIESCED))) {
1303 				continue;
1304 			}
1305 			(void) (*iosram_write_ptr)(SBBC_SIGBLCK_KEY,
1306 				SG_SGNBLK_CPUSIG_OFFSET(i), (char *)&signature,
1307 				sizeof (signature));
1308 		}
1309 	}
1310 
1311 	if (state == SIGST_OFFLINE || state == SIGST_DETACHED) {
1312 		return;
1313 	}
1314 
1315 	(void) (*iosram_write_ptr)(SBBC_SIGBLCK_KEY,
1316 		SG_SGNBLK_DOMAINSIG_OFFSET, (char *)&signature,
1317 		sizeof (signature));
1318 }
1319 
1320 void
1321 startup_platform(void)
1322 {
1323 }
1324 
1325 /*
1326  * A routine to convert a number (represented as a string) to
1327  * the integer value it represents.
1328  */
1329 
1330 static int
1331 isdigit(int ch)
1332 {
1333 	return (ch >= '0' && ch <= '9');
1334 }
1335 
1336 #define	isspace(c)	((c) == ' ' || (c) == '\t' || (c) == '\n')
1337 
1338 static int
1339 strtoi(char *p, char **pos)
1340 {
1341 	int n;
1342 	int c, neg = 0;
1343 
1344 	if (!isdigit(c = *p)) {
1345 		while (isspace(c))
1346 			c = *++p;
1347 		switch (c) {
1348 			case '-':
1349 				neg++;
1350 				/* FALLTHROUGH */
1351 			case '+':
1352 			c = *++p;
1353 		}
1354 		if (!isdigit(c)) {
1355 			if (pos != NULL)
1356 				*pos = p;
1357 			return (0);
1358 		}
1359 	}
1360 	for (n = '0' - c; isdigit(c = *++p); ) {
1361 		n *= 10; /* two steps to avoid unnecessary overflow */
1362 		n += '0' - c; /* accum neg to avoid surprises at MAX */
1363 	}
1364 	if (pos != NULL)
1365 		*pos = p;
1366 	return (neg ? n : -n);
1367 }
1368 
1369 /*
1370  * Get the three parts of the Serengeti PROM version.
1371  * Used for feature readiness tests.
1372  *
1373  * Return 0 if version extracted successfully, -1 otherwise.
1374  */
1375 
1376 int
1377 sg_get_prom_version(int *sysp, int *intfp, int *bldp)
1378 {
1379 	int plen;
1380 	char vers[512];
1381 	static pnode_t node;
1382 	static char version[] = "version";
1383 	char *verp, *ep;
1384 
1385 	node = prom_finddevice("/openprom");
1386 	if (node == OBP_BADNODE)
1387 		return (-1);
1388 
1389 	plen = prom_getproplen(node, version);
1390 	if (plen <= 0 || plen >= sizeof (vers))
1391 		return (-1);
1392 	(void) prom_getprop(node, version, vers);
1393 	vers[plen] = '\0';
1394 
1395 	/* Make sure it's an OBP flashprom */
1396 	if (vers[0] != 'O' && vers[1] != 'B' && vers[2] != 'P') {
1397 		cmn_err(CE_WARN, "sg_get_prom_version: "
1398 		    "unknown <version> string in </openprom>\n");
1399 		return (-1);
1400 	}
1401 	verp = &vers[4];
1402 
1403 	*sysp = strtoi(verp, &ep);
1404 	if (ep == verp || *ep != '.')
1405 		return (-1);
1406 	verp = ep + 1;
1407 
1408 	*intfp = strtoi(verp, &ep);
1409 	if (ep == verp || *ep != '.')
1410 		return (-1);
1411 	verp = ep + 1;
1412 
1413 	*bldp = strtoi(verp, &ep);
1414 	if (ep == verp || (*ep != '\0' && !isspace(*ep)))
1415 		return (-1);
1416 	return (0);
1417 }
1418 
1419 /*
1420  * Return 0 if system board Dynamic Reconfiguration
1421  * is supported by the firmware, -1 otherwise.
1422  */
1423 int
1424 sg_prom_sb_dr_check(void)
1425 {
1426 	static int prom_res = 1;
1427 
1428 	if (prom_res == 1) {
1429 		int sys, intf, bld;
1430 		int rv;
1431 
1432 		rv = sg_get_prom_version(&sys, &intf, &bld);
1433 		if (rv == 0 && sys == 5 &&
1434 		    (intf >= 12 || (intf == 11 && bld >= 200))) {
1435 			prom_res = 0;
1436 		} else {
1437 			prom_res = -1;
1438 		}
1439 	}
1440 	return (prom_res);
1441 }
1442 
1443 /*
1444  * Return 0 if cPCI Dynamic Reconfiguration
1445  * is supported by the firmware, -1 otherwise.
1446  */
1447 int
1448 sg_prom_cpci_dr_check(void)
1449 {
1450 	/*
1451 	 * The version check is currently the same as for
1452 	 * system boards. Since the two DR sub-systems are
1453 	 * independent, this could change.
1454 	 */
1455 	return (sg_prom_sb_dr_check());
1456 }
1457 
1458 /*
1459  * Our implementation of this KDI op updates the CPU signature in the system
1460  * controller.  Note that we set the signature to OBP_SIG, rather than DBG_SIG.
1461  * The Forth words we execute will, among other things, transform our OBP_SIG
1462  * into DBG_SIG.  They won't function properly if we try to use DBG_SIG.
1463  */
1464 static void
1465 sg_system_claim(void)
1466 {
1467 	prom_interpret("sigb-sig! my-sigb-sig!", OBP_SIG, OBP_SIG, 0, 0, 0);
1468 }
1469 
1470 static void
1471 sg_system_release(void)
1472 {
1473 	prom_interpret("sigb-sig! my-sigb-sig!", OS_SIG, OS_SIG, 0, 0, 0);
1474 }
1475 
1476 static void
1477 sg_console_claim(void)
1478 {
1479 	prom_serengeti_set_console_input(SGCN_OBP_STR);
1480 }
1481 
1482 static void
1483 sg_console_release(void)
1484 {
1485 	prom_serengeti_set_console_input(SGCN_CLNT_STR);
1486 }
1487 
1488 void
1489 plat_kdi_init(kdi_t *kdi)
1490 {
1491 	kdi->pkdi_system_claim = sg_system_claim;
1492 	kdi->pkdi_system_release = sg_system_release;
1493 	kdi->pkdi_console_claim = sg_console_claim;
1494 	kdi->pkdi_console_release = sg_console_release;
1495 }
1496