xref: /titanic_50/usr/src/uts/sun4u/lw8/os/lw8_platmod.c (revision 9acbbeaf2a1ffe5c14b244867d427714fab43c5c)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/time.h>
30 #include <sys/cpuvar.h>
31 #include <sys/dditypes.h>
32 #include <sys/ddipropdefs.h>
33 #include <sys/ddi_impldefs.h>
34 #include <sys/sunddi.h>
35 #include <sys/esunddi.h>
36 #include <sys/sunndi.h>
37 #include <sys/platform_module.h>
38 #include <sys/errno.h>
39 #include <sys/conf.h>
40 #include <sys/modctl.h>
41 #include <sys/promif.h>
42 #include <sys/promimpl.h>
43 #include <sys/prom_plat.h>
44 #include <sys/cmn_err.h>
45 #include <sys/sysmacros.h>
46 #include <sys/mem_cage.h>
47 #include <sys/kobj.h>
48 #include <sys/utsname.h>
49 #include <sys/cpu_sgnblk_defs.h>
50 #include <sys/atomic.h>
51 #include <sys/kdi_impl.h>
52 
53 #include <sys/sgsbbc.h>
54 #include <sys/sgsbbc_iosram.h>
55 #include <sys/sgsbbc_iosram_priv.h>
56 #include <sys/sgsbbc_mailbox.h>
57 #include <sys/sgsgn.h>
58 #include <sys/serengeti.h>
59 #include <sys/sgfrutypes.h>
60 #include <sys/machsystm.h>
61 #include <sys/sbd_ioctl.h>
62 #include <sys/sbd.h>
63 #include <sys/sbdp_mem.h>
64 #include <sys/sgcn.h>
65 
66 #include <sys/memnode.h>
67 #include <vm/vm_dep.h>
68 #include <vm/page.h>
69 
70 #include <sys/cheetahregs.h>
71 #include <sys/plat_ecc_unum.h>
72 #include <sys/plat_ecc_dimm.h>
73 
74 #include <sys/lgrp.h>
75 
76 static int sg_debug = 0;
77 
78 #ifdef DEBUG
79 #define	DCMNERR if (sg_debug) cmn_err
80 #else
81 #define	DCMNERR
82 #endif
83 
84 int (*p2get_mem_unum)(int, uint64_t, char *, int, int *);
85 
86 /* local functions */
87 static void cpu_sgn_update(ushort_t sgn, uchar_t state,
88     uchar_t sub_state, int cpuid);
89 
90 
91 /*
92  * Local data.
93  *
94  * iosram_write_ptr is a pointer to iosram_write().  Because of
95  * kernel dynamic linking, we can't get to the function by name,
96  * but we can look up its address, and store it in this variable
97  * instead.
98  *
99  * We include the extern for iosram_write() here not because we call
100  * it, but to force compilation errors if its prototype doesn't
101  * match the prototype of iosram_write_ptr.
102  *
103  * The same issues apply to iosram_read() and iosram_read_ptr.
104  */
105 /*CSTYLED*/
106 extern int   iosram_write     (int, uint32_t, caddr_t, uint32_t);
107 static int (*iosram_write_ptr)(int, uint32_t, caddr_t, uint32_t) = NULL;
108 /*CSTYLED*/
109 extern int   iosram_read     (int, uint32_t, caddr_t, uint32_t);
110 static int (*iosram_read_ptr)(int, uint32_t, caddr_t, uint32_t) = NULL;
111 
112 
113 /*
114  * Variable to indicate if the date should be obtained from the SC or not.
115  */
116 int todsg_use_sc = FALSE;	/* set the false at the beginning */
117 
118 /*
119  * Preallocation of spare tsb's for DR
120  *
121  * We don't allocate spares for Wildcat since TSBs should come
122  * out of memory local to the node.
123  */
124 #define	IOMMU_PER_SCHIZO	2
125 int serengeti_tsb_spares = (SG_MAX_IO_BDS * SG_SCHIZO_PER_IO_BD *
126 	IOMMU_PER_SCHIZO);
127 
128 /*
129  * sg_max_ncpus is the maximum number of CPUs supported on lw8.
130  * sg_max_ncpus is set to be smaller than NCPU to reduce the amount of
131  * memory the logs take up until we have a dynamic log memory allocation
132  * solution.
133  */
134 int sg_max_ncpus = (12 * 2);    /* (max # of processors * # of cores/proc) */
135 
136 /*
137  * variables to control mailbox message timeouts.
138  * These can be patched via /etc/system or mdb.
139  */
140 int	sbbc_mbox_default_timeout = MBOX_DEFAULT_TIMEOUT;
141 int	sbbc_mbox_min_timeout = MBOX_MIN_TIMEOUT;
142 
143 /* cached 'chosen' node_id */
144 pnode_t chosen_nodeid = (pnode_t)0;
145 
146 /*
147  * Table that maps memory slices to a specific memnode.
148  */
149 int slice_to_memnode[SG_MAX_SLICE];
150 
151 /*
152  * We define and use LW8_MAX_CPU_BDS here instead of SG_MAX_CPU_BDS
153  * since a LW8 machine will never have a CPU/Mem board #5 (SB5).
154  * A LW8 machine can only have a maximum of three CPU/Mem boards, but
155  * the board numbers assigned are 0, 2, and 4.  LW8_MAX_CPU_BDS is
156  * defined to be 5 since the entries in the domain_dimm_sids array
157  * are keyed by board number.  Not perfect but some wasted space
158  * is avoided.
159  */
160 #define	LW8_MAX_CPU_BDS		5
161 
162 plat_dimm_sid_board_t	domain_dimm_sids[LW8_MAX_CPU_BDS];
163 
164 int
165 set_platform_tsb_spares()
166 {
167 	return (MIN(serengeti_tsb_spares, MAX_UPA));
168 }
169 
170 #pragma weak mmu_init_large_pages
171 
172 void
173 set_platform_defaults(void)
174 {
175 	extern int watchdog_enable;
176 	extern uint64_t xc_tick_limit_scale;
177 	extern void mmu_init_large_pages(size_t);
178 
179 #ifdef DEBUG
180 	char *todsg_name = "todsg";
181 	ce_verbose_memory = 2;
182 	ce_verbose_other = 2;
183 #endif /* DEBUG */
184 
185 	watchdog_enable = TRUE;
186 	watchdog_available = TRUE;
187 
188 	cpu_sgn_func = cpu_sgn_update;
189 
190 #ifdef DEBUG
191 	/* tod_module_name should be set to "todsg" from OBP property */
192 	if (tod_module_name && (strcmp(tod_module_name, todsg_name) == 0))
193 		prom_printf("Using todsg driver\n");
194 	else {
195 		prom_printf("Force using todsg driver\n");
196 		tod_module_name = todsg_name;
197 	}
198 #endif /* DEBUG */
199 
200 	/* lw8 does not support forthdebug */
201 	forthdebug_supported = 0;
202 
203 
204 	/*
205 	 * Some DR operations require the system to be sync paused.
206 	 * Sync pause on Serengeti could potentially take up to 4
207 	 * seconds to complete depending on the load on the SC.  To
208 	 * avoid send_mond panics during such operations, we need to
209 	 * increase xc_tick_limit to a larger value on Serengeti by
210 	 * setting xc_tick_limit_scale to 5.
211 	 */
212 	xc_tick_limit_scale = 5;
213 
214 	if ((mmu_page_sizes == max_mmu_page_sizes) &&
215 	    (mmu_ism_pagesize != DEFAULT_ISM_PAGESIZE)) {
216 		if (&mmu_init_large_pages)
217 			mmu_init_large_pages(mmu_ism_pagesize);
218 	}
219 }
220 
221 void
222 load_platform_modules(void)
223 {
224 	if (modload("misc", "pcihp") < 0) {
225 		cmn_err(CE_NOTE, "pcihp driver failed to load");
226 	}
227 }
228 
229 /*ARGSUSED*/
230 int
231 plat_cpu_poweron(struct cpu *cp)
232 {
233 	int (*serengeti_cpu_poweron)(struct cpu *) = NULL;
234 
235 	serengeti_cpu_poweron =
236 	    (int (*)(struct cpu *))modgetsymvalue("sbdp_cpu_poweron", 0);
237 
238 	if (serengeti_cpu_poweron == NULL)
239 		return (ENOTSUP);
240 	else
241 		return ((serengeti_cpu_poweron)(cp));
242 }
243 
244 /*ARGSUSED*/
245 int
246 plat_cpu_poweroff(struct cpu *cp)
247 {
248 	int (*serengeti_cpu_poweroff)(struct cpu *) = NULL;
249 
250 	serengeti_cpu_poweroff =
251 	    (int (*)(struct cpu *))modgetsymvalue("sbdp_cpu_poweroff", 0);
252 
253 	if (serengeti_cpu_poweroff == NULL)
254 		return (ENOTSUP);
255 	else
256 		return ((serengeti_cpu_poweroff)(cp));
257 }
258 
259 #ifdef DEBUG
260 pgcnt_t serengeti_cage_size_limit;
261 #endif
262 
263 /* Preferred minimum cage size (expressed in pages)... for DR */
264 pgcnt_t serengeti_minimum_cage_size = 0;
265 
266 void
267 set_platform_cage_params(void)
268 {
269 	extern pgcnt_t total_pages;
270 	extern struct memlist *phys_avail;
271 	int ret;
272 
273 	if (kernel_cage_enable) {
274 		pgcnt_t preferred_cage_size;
275 
276 		preferred_cage_size =
277 		    MAX(serengeti_minimum_cage_size, total_pages / 256);
278 #ifdef DEBUG
279 		if (serengeti_cage_size_limit)
280 			preferred_cage_size = serengeti_cage_size_limit;
281 #endif
282 		kcage_range_lock();
283 		/*
284 		 * Post copies obp into the lowest slice.  This requires the
285 		 * cage to grow upwards
286 		 */
287 		ret = kcage_range_init(phys_avail, 0);
288 		if (ret == 0)
289 			kcage_init(preferred_cage_size);
290 		kcage_range_unlock();
291 	}
292 
293 	/* Only note when the cage is off since it should always be on. */
294 	if (!kcage_on)
295 		cmn_err(CE_NOTE, "!DR Kernel Cage is DISABLED");
296 }
297 
298 #define	ALIGN(x, a)	((a) == 0 ? (uint64_t)(x) : \
299 	(((uint64_t)(x) + (uint64_t)(a) - 1l) & ~((uint64_t)(a) - 1l)))
300 
301 void
302 update_mem_bounds(int brd, uint64_t base, uint64_t sz)
303 {
304 	uint64_t	end;
305 	int		mnode;
306 
307 	end = base + sz - 1;
308 
309 	/*
310 	 * First see if this board already has a memnode associated
311 	 * with it.  If not, see if this slice has a memnode.  This
312 	 * covers the cases where a single slice covers multiple
313 	 * boards (cross-board interleaving) and where a single
314 	 * board has multiple slices (1+GB DIMMs).
315 	 */
316 	if ((mnode = plat_lgrphand_to_mem_node(brd)) == -1) {
317 		if ((mnode = slice_to_memnode[PA_2_SLICE(base)]) == -1)
318 			mnode = mem_node_alloc();
319 		plat_assign_lgrphand_to_mem_node(brd, mnode);
320 	}
321 
322 	/*
323 	 * Align base at 16GB boundary
324 	 */
325 	base = ALIGN(base, (1ul << PA_SLICE_SHIFT));
326 
327 	while (base < end) {
328 		slice_to_memnode[PA_2_SLICE(base)] = mnode;
329 		base += (1ul << PA_SLICE_SHIFT);
330 	}
331 }
332 
333 /*
334  * Dynamically detect memory slices in the system by decoding
335  * the cpu memory decoder registers at boot time.
336  */
337 void
338 plat_fill_mc(pnode_t nodeid)
339 {
340 	uint64_t	mc_addr, mask;
341 	uint64_t	mc_decode[SG_MAX_BANKS_PER_MC];
342 	uint64_t	base, size;
343 	uint32_t	regs[4];
344 	int		len;
345 	int		local_mc;
346 	int		portid;
347 	int		boardid;
348 	int		i;
349 
350 	if ((prom_getprop(nodeid, "portid", (caddr_t)&portid) < 0) ||
351 	    (portid == -1))
352 		return;
353 
354 	/*
355 	 * Decode the board number from the MC portid
356 	 */
357 	boardid = SG_PORTID_TO_BOARD_NUM(portid);
358 
359 	/*
360 	 * The "reg" property returns 4 32-bit values. The first two are
361 	 * combined to form a 64-bit address.  The second two are for a
362 	 * 64-bit size, but we don't actually need to look at that value.
363 	 */
364 	len = prom_getproplen(nodeid, "reg");
365 	if (len != (sizeof (uint32_t) * 4)) {
366 		prom_printf("Warning: malformed 'reg' property\n");
367 		return;
368 	}
369 	if (prom_getprop(nodeid, "reg", (caddr_t)regs) < 0)
370 		return;
371 	mc_addr = ((uint64_t)regs[0]) << 32;
372 	mc_addr |= (uint64_t)regs[1];
373 
374 	/*
375 	 * Figure out whether the memory controller we are examining
376 	 * belongs to this CPU or a different one.
377 	 */
378 	if (portid == cpunodes[CPU->cpu_id].portid)
379 		local_mc = 1;
380 	else
381 		local_mc = 0;
382 
383 	for (i = 0; i < SG_MAX_BANKS_PER_MC; i++) {
384 		mask = SG_REG_2_OFFSET(i);
385 
386 		/*
387 		 * If the memory controller is local to this CPU, we use
388 		 * the special ASI to read the decode registers.
389 		 * Otherwise, we load the values from a magic address in
390 		 * I/O space.
391 		 */
392 		if (local_mc)
393 			mc_decode[i] = lddmcdecode(mask & MC_OFFSET_MASK);
394 		else
395 			mc_decode[i] = lddphysio((mc_addr | mask));
396 
397 		if (mc_decode[i] >> MC_VALID_SHIFT) {
398 			/*
399 			 * The memory decode register is a bitmask field,
400 			 * so we can decode that into both a base and
401 			 * a span.
402 			 */
403 			base = MC_BASE(mc_decode[i]) << PHYS2UM_SHIFT;
404 			size = MC_UK2SPAN(mc_decode[i]);
405 			update_mem_bounds(boardid, base, size);
406 		}
407 	}
408 }
409 
410 /*
411  * This routine is run midway through the boot process.  By the time we get
412  * here, we know about all the active CPU boards in the system, and we have
413  * extracted information about each board's memory from the memory
414  * controllers.  We have also figured out which ranges of memory will be
415  * assigned to which memnodes, so we walk the slice table to build the table
416  * of memnodes.
417  */
418 /* ARGSUSED */
419 void
420 plat_build_mem_nodes(u_longlong_t *list, size_t  nelems)
421 {
422 	int	slice;
423 	pfn_t	basepfn;
424 	pgcnt_t	npgs;
425 
426 	mem_node_pfn_shift = PFN_SLICE_SHIFT;
427 	mem_node_physalign = (1ull << PA_SLICE_SHIFT);
428 
429 	for (slice = 0; slice < SG_MAX_SLICE; slice++) {
430 		if (slice_to_memnode[slice] == -1)
431 			continue;
432 		basepfn = (uint64_t)slice << PFN_SLICE_SHIFT;
433 		npgs = 1ull << PFN_SLICE_SHIFT;
434 		mem_node_add_slice(basepfn, basepfn + npgs - 1);
435 	}
436 }
437 
438 int
439 plat_pfn_to_mem_node(pfn_t pfn)
440 {
441 	int node;
442 
443 	node = slice_to_memnode[PFN_2_SLICE(pfn)];
444 
445 	return (node);
446 }
447 
448 /*
449  * Serengeti support for lgroups.
450  *
451  * On Serengeti, an lgroup platform handle == board number.
452  *
453  * Mappings between lgroup handles and memnodes are managed
454  * in addition to mappings between memory slices and memnodes
455  * to support cross-board interleaving as well as multiple
456  * slices per board (e.g. >1GB DIMMs). The initial mapping
457  * of memnodes to lgroup handles is determined at boot time.
458  * A DR addition of memory adds a new mapping. A DR copy-rename
459  * swaps mappings.
460  */
461 
462 /*
463  * Macro for extracting the board number from the CPU id
464  */
465 #define	CPUID_TO_BOARD(id)	(((id) >> 2) & 0x7)
466 
467 /*
468  * Return the platform handle for the lgroup containing the given CPU
469  *
470  * For Serengeti, lgroup platform handle == board number
471  */
472 lgrp_handle_t
473 plat_lgrp_cpu_to_hand(processorid_t id)
474 {
475 	return (CPUID_TO_BOARD(id));
476 }
477 
478 /*
479  * Platform specific lgroup initialization
480  */
481 void
482 plat_lgrp_init(void)
483 {
484 	int i;
485 	extern uint32_t lgrp_expand_proc_thresh;
486 	extern uint32_t lgrp_expand_proc_diff;
487 
488 	/*
489 	 * Initialize lookup tables to invalid values so we catch
490 	 * any illegal use of them.
491 	 */
492 	for (i = 0; i < SG_MAX_SLICE; i++) {
493 		slice_to_memnode[i] = -1;
494 	}
495 
496 	/*
497 	 * Set tuneables for Serengeti architecture
498 	 *
499 	 * lgrp_expand_proc_thresh is the minimum load on the lgroups
500 	 * this process is currently running on before considering
501 	 * expanding threads to another lgroup.
502 	 *
503 	 * lgrp_expand_proc_diff determines how much less the remote lgroup
504 	 * must be loaded before expanding to it.
505 	 *
506 	 * Bandwidth is maximized on Serengeti by spreading load across
507 	 * the machine. The impact to inter-thread communication isn't
508 	 * too costly since remote latencies are relatively low.  These
509 	 * values equate to one CPU's load and so attempt to spread the
510 	 * load out across as many lgroups as possible one CPU at a time.
511 	 */
512 	lgrp_expand_proc_thresh = LGRP_LOADAVG_THREAD_MAX;
513 	lgrp_expand_proc_diff = LGRP_LOADAVG_THREAD_MAX;
514 }
515 
516 /*
517  * Platform notification of lgroup (re)configuration changes
518  */
519 /*ARGSUSED*/
520 void
521 plat_lgrp_config(lgrp_config_flag_t evt, uintptr_t arg)
522 {
523 	update_membounds_t	*umb;
524 	lgrp_config_mem_rename_t lmr;
525 	lgrp_handle_t		shand, thand;
526 	int			snode, tnode;
527 
528 	switch (evt) {
529 
530 	case LGRP_CONFIG_MEM_ADD:
531 		umb = (update_membounds_t *)arg;
532 		update_mem_bounds(umb->u_board, umb->u_base, umb->u_len);
533 
534 		break;
535 
536 	case LGRP_CONFIG_MEM_DEL:
537 		/* We don't have to do anything */
538 
539 		break;
540 
541 	case LGRP_CONFIG_MEM_RENAME:
542 		/*
543 		 * During a DR copy-rename operation, all of the memory
544 		 * on one board is moved to another board -- but the
545 		 * addresses/pfns and memnodes don't change. This means
546 		 * the memory has changed locations without changing identity.
547 		 *
548 		 * Source is where we are copying from and target is where we
549 		 * are copying to.  After source memnode is copied to target
550 		 * memnode, the physical addresses of the target memnode are
551 		 * renamed to match what the source memnode had.  Then target
552 		 * memnode can be removed and source memnode can take its
553 		 * place.
554 		 *
555 		 * To do this, swap the lgroup handle to memnode mappings for
556 		 * the boards, so target lgroup will have source memnode and
557 		 * source lgroup will have empty target memnode which is where
558 		 * its memory will go (if any is added to it later).
559 		 *
560 		 * Then source memnode needs to be removed from its lgroup
561 		 * and added to the target lgroup where the memory was living
562 		 * but under a different name/memnode.  The memory was in the
563 		 * target memnode and now lives in the source memnode with
564 		 * different physical addresses even though it is the same
565 		 * memory.
566 		 */
567 		shand = arg & 0xffff;
568 		thand = (arg & 0xffff0000) >> 16;
569 		snode = plat_lgrphand_to_mem_node(shand);
570 		tnode = plat_lgrphand_to_mem_node(thand);
571 
572 		plat_assign_lgrphand_to_mem_node(thand, snode);
573 		plat_assign_lgrphand_to_mem_node(shand, tnode);
574 
575 		/*
576 		 * Remove source memnode of copy rename from its lgroup
577 		 * and add it to its new target lgroup
578 		 */
579 		lmr.lmem_rename_from = shand;
580 		lmr.lmem_rename_to = thand;
581 
582 		lgrp_config(LGRP_CONFIG_MEM_RENAME, (uintptr_t)snode,
583 		    (uintptr_t)&lmr);
584 
585 		break;
586 
587 	default:
588 		break;
589 	}
590 }
591 
592 /*
593  * Return latency between "from" and "to" lgroups
594  *
595  * This latency number can only be used for relative comparison
596  * between lgroups on the running system, cannot be used across platforms,
597  * and may not reflect the actual latency.  It is platform and implementation
598  * specific, so platform gets to decide its value.  It would be nice if the
599  * number was at least proportional to make comparisons more meaningful though.
600  * NOTE: The numbers below are supposed to be load latencies for uncached
601  * memory divided by 10.
602  */
603 int
604 plat_lgrp_latency(lgrp_handle_t from, lgrp_handle_t to)
605 {
606 	/*
607 	 * Return min remote latency when there are more than two lgroups
608 	 * (root and child) and getting latency between two different lgroups
609 	 * or root is involved
610 	 */
611 	if (lgrp_optimizations() && (from != to ||
612 	    from == LGRP_DEFAULT_HANDLE || to == LGRP_DEFAULT_HANDLE))
613 		return (28);
614 	else
615 		return (23);
616 }
617 
618 /* ARGSUSED */
619 void
620 plat_freelist_process(int mnode)
621 {
622 }
623 
624 /*
625  * Find dip for chosen IOSRAM
626  */
627 dev_info_t *
628 find_chosen_dip(void)
629 {
630 	dev_info_t	*dip;
631 	char		master_sbbc[MAXNAMELEN];
632 	int		nodeid;
633 	uint_t		tunnel;
634 
635 	/*
636 	 * find the /chosen SBBC node, prom interface will handle errors
637 	 */
638 	nodeid = prom_chosennode();
639 	/*
640 	 * get the 'iosram' property from the /chosen node
641 	 */
642 	if (prom_getprop(nodeid, IOSRAM_CHOSEN_PROP, (caddr_t)&tunnel) <= 0) {
643 		SBBC_ERR(CE_PANIC, "No iosram property found! \n");
644 	}
645 
646 	if (prom_phandle_to_path((phandle_t)tunnel, master_sbbc,
647 	    sizeof (master_sbbc)) < 0) {
648 		SBBC_ERR1(CE_PANIC, "prom_phandle_to_path(%d) failed\n",
649 		    tunnel);
650 	}
651 
652 	chosen_nodeid = nodeid;
653 
654 	/*
655 	 * load and attach the sgsbbc driver.
656 	 * This will also attach all the sgsbbc driver instances
657 	 */
658 	if (i_ddi_attach_hw_nodes("sgsbbc") != DDI_SUCCESS) {
659 		cmn_err(CE_WARN, "sgsbbc failed to load\n");
660 	}
661 	/* translate a path name to a dev_info_t */
662 	dip = e_ddi_hold_devi_by_path(master_sbbc, 0);
663 	if ((dip == NULL) || (ddi_get_nodeid(dip) != tunnel)) {
664 		cmn_err(CE_PANIC,
665 			"e_ddi_hold_devi_by_path(%x) failed for SBBC\n",
666 		    tunnel);
667 	}
668 
669 	/* make sure devi_ref is ZERO */
670 	ndi_rele_devi(dip);
671 	DCMNERR(CE_CONT, "Chosen IOSRAM is at %s \n", master_sbbc);
672 
673 	return (dip);
674 }
675 
676 void
677 load_platform_drivers(void)
678 {
679 	int ret;
680 
681 	/*
682 	 * Load the mc-us3 memory driver.
683 	 */
684 	if (i_ddi_attach_hw_nodes("mc-us3") != DDI_SUCCESS)
685 		cmn_err(CE_WARN, "mc-us3 failed to load");
686 	else
687 		(void) ddi_hold_driver(ddi_name_to_major("mc-us3"));
688 
689 	/*
690 	 * Initialize the chosen IOSRAM before its clients
691 	 * are loaded.
692 	 */
693 	(void) find_chosen_dip();
694 
695 	/*
696 	 * Load the environmentals driver (sgenv)
697 	 *
698 	 * We need this driver to handle events from the SC when state
699 	 * changes occur in the environmental data.
700 	 */
701 	if (i_ddi_attach_hw_nodes("sgenv") != DDI_SUCCESS)
702 		cmn_err(CE_WARN, "sgenv failed to load");
703 
704 	/*
705 	 * Ideally, we'd do this in set_platform_defaults(), but
706 	 * at that point it's too early to look up symbols.
707 	 */
708 	iosram_write_ptr = (int (*)(int, uint32_t, caddr_t, uint32_t))
709 	    modgetsymvalue("iosram_write", 0);
710 
711 	if (iosram_write_ptr == NULL) {
712 		DCMNERR(CE_WARN, "load_platform_defaults: iosram_write()"
713 		    " not found; signatures will not be updated\n");
714 	} else {
715 		/*
716 		 * The iosram read ptr is only needed if we can actually
717 		 * write CPU signatures, so only bother setting it if we
718 		 * set a valid write pointer, above.
719 		 */
720 		iosram_read_ptr = (int (*)(int, uint32_t, caddr_t, uint32_t))
721 		    modgetsymvalue("iosram_read", 0);
722 
723 		if (iosram_read_ptr == NULL)
724 			DCMNERR(CE_WARN, "load_platform_defaults: iosram_read()"
725 			    " not found\n");
726 	}
727 
728 	/*
729 	 * Set todsg_use_sc to TRUE so that we will be getting date
730 	 * from the SC.
731 	 */
732 	todsg_use_sc = TRUE;
733 
734 	/*
735 	 * Now is a good time to activate hardware watchdog (if one exists).
736 	 */
737 	mutex_enter(&tod_lock);
738 	if (watchdog_enable)
739 		ret = tod_ops.tod_set_watchdog_timer(watchdog_timeout_seconds);
740 	mutex_exit(&tod_lock);
741 	if (ret != 0)
742 		printf("Hardware watchdog enabled\n");
743 
744 	plat_ecc_init();
745 }
746 
747 /*
748  * No platform drivers on this platform
749  */
750 char *platform_module_list[] = {
751 	(char *)0
752 };
753 
754 /*ARGSUSED*/
755 void
756 plat_tod_fault(enum tod_fault_type tod_bad)
757 {
758 }
759 int
760 plat_max_boards()
761 {
762 	return (SG_MAX_BDS);
763 }
764 int
765 plat_max_io_units_per_board()
766 {
767 	return (SG_MAX_IO_PER_BD);
768 }
769 int
770 plat_max_cmp_units_per_board()
771 {
772 	return (SG_MAX_CMPS_PER_BD);
773 }
774 int
775 plat_max_cpu_units_per_board()
776 {
777 	return (SG_MAX_CPUS_PER_BD);
778 }
779 
780 int
781 plat_max_mc_units_per_board()
782 {
783 	return (SG_MAX_CMPS_PER_BD); /* each CPU die has a memory controller */
784 }
785 
786 int
787 plat_max_mem_units_per_board()
788 {
789 	return (SG_MAX_MEM_PER_BD);
790 }
791 
792 int
793 plat_max_cpumem_boards(void)
794 {
795 	return (LW8_MAX_CPU_BDS);
796 }
797 
798 int
799 set_platform_max_ncpus(void)
800 {
801 	return (sg_max_ncpus);
802 }
803 
804 void
805 plat_dmv_params(uint_t *hwint, uint_t *swint)
806 {
807 	*hwint = MAX_UPA;
808 	*swint = 0;
809 }
810 
811 static int (*sg_mbox)(sbbc_msg_t *, sbbc_msg_t *, time_t) = NULL;
812 
813 /*
814  * Our nodename has been set, pass it along to the SC.
815  */
816 void
817 plat_nodename_set(void)
818 {
819 	sbbc_msg_t	req;	/* request */
820 	sbbc_msg_t	resp;	/* response */
821 	int		rv;	/* return value from call to mbox */
822 	struct nodename_info {
823 		int32_t	namelen;
824 		char	nodename[_SYS_NMLN];
825 	} nni;
826 
827 	/*
828 	 * find the symbol for the mailbox routine
829 	 */
830 	if (sg_mbox == NULL)
831 		sg_mbox = (int (*)(sbbc_msg_t *, sbbc_msg_t *, time_t))
832 			modgetsymvalue("sbbc_mbox_request_response", 0);
833 
834 	if (sg_mbox == NULL) {
835 		cmn_err(CE_NOTE, "!plat_nodename_set: sg_mbox not found\n");
836 		return;
837 	}
838 
839 	/*
840 	 * construct the message telling the SC our nodename
841 	 */
842 	(void) strcpy(nni.nodename, utsname.nodename);
843 	nni.namelen = (int32_t)strlen(nni.nodename);
844 
845 	req.msg_type.type = INFO_MBOX;
846 	req.msg_type.sub_type = INFO_MBOX_NODENAME;
847 	req.msg_status = 0;
848 	req.msg_len = (int)(nni.namelen + sizeof (nni.namelen));
849 	req.msg_bytes = 0;
850 	req.msg_buf = (caddr_t)&nni;
851 	req.msg_data[0] = 0;
852 	req.msg_data[1] = 0;
853 
854 	/*
855 	 * initialize the response back from the SC
856 	 */
857 	resp.msg_type.type = INFO_MBOX;
858 	resp.msg_type.sub_type = INFO_MBOX_NODENAME;
859 	resp.msg_status = 0;
860 	resp.msg_len = 0;
861 	resp.msg_bytes = 0;
862 	resp.msg_buf = (caddr_t)0;
863 	resp.msg_data[0] = 0;
864 	resp.msg_data[1] = 0;
865 
866 	/*
867 	 * ship it and check for success
868 	 */
869 	rv = (sg_mbox)(&req, &resp, sbbc_mbox_default_timeout);
870 
871 	if (rv != 0) {
872 		cmn_err(CE_NOTE, "!plat_nodename_set: sg_mbox retval %d\n", rv);
873 	} else if (resp.msg_status != 0) {
874 		cmn_err(CE_NOTE, "!plat_nodename_set: msg_status %d\n",
875 			resp.msg_status);
876 	} else {
877 		DCMNERR(CE_NOTE, "!plat_nodename_set was successful\n");
878 
879 		/*
880 		 * It is necessary to exchange capability the bitmap
881 		 * with SC before sending any ecc error information and
882 		 * indictment. We are calling the plat_ecc_capability_send()
883 		 * here just after sending the nodename successfully.
884 		 */
885 		rv = plat_ecc_capability_send();
886 		if (rv == 0) {
887 			DCMNERR(CE_NOTE, "!plat_ecc_capability_send was"
888 			    "successful\n");
889 		}
890 	}
891 }
892 
893 /*
894  * flag to allow users switch between using OBP's
895  * prom_get_unum() and mc-us3 driver's p2get_mem_unum()
896  * (for main memory errors only).
897  */
898 int sg_use_prom_get_unum = 0;
899 
900 /*
901  * Debugging flag: set to 1 to call into obp for get_unum, or set it to 0
902  * to call into the unum cache system.  This is the E$ equivalent of
903  * sg_use_prom_get_unum.
904  */
905 int sg_use_prom_ecache_unum = 0;
906 
907 /* used for logging ECC errors to the SC */
908 #define	SG_MEMORY_ECC	1
909 #define	SG_ECACHE_ECC	2
910 #define	SG_UNKNOWN_ECC	(-1)
911 
912 /*
913  * plat_get_mem_unum() generates a string identifying either the
914  * memory or E$ DIMM(s) during error logging. Depending on whether
915  * the error is E$ or memory related, the appropriate support
916  * routine is called to assist in the string generation.
917  *
918  * - For main memory errors we can use the mc-us3 drivers p2getunum()
919  *   (or prom_get_unum() for debugging purposes).
920  *
921  * - For E$ errors we call sg_get_ecacheunum() to generate the unum (or
922  *   prom_serengeti_get_ecacheunum() for debugging purposes).
923  */
924 
925 static int
926 sg_prom_get_unum(int synd_code, uint64_t paddr, char *buf, int buflen,
927     int *lenp)
928 {
929 	if ((prom_get_unum(synd_code, (unsigned long long)paddr,
930 	    buf, buflen, lenp)) != 0)
931 		return (EIO);
932 	else if (*lenp <= 1)
933 		return (EINVAL);
934 	else
935 		return (0);
936 }
937 
938 /*ARGSUSED*/
939 int
940 plat_get_mem_unum(int synd_code, uint64_t flt_addr, int flt_bus_id,
941     int flt_in_memory, ushort_t flt_status, char *buf, int buflen, int *lenp)
942 {
943 	/*
944 	 * unum_func will either point to the memory drivers p2get_mem_unum()
945 	 * or to prom_get_unum() for memory errors.
946 	 */
947 	int (*unum_func)(int synd_code, uint64_t paddr, char *buf,
948 	    int buflen, int *lenp) = p2get_mem_unum;
949 
950 	/*
951 	 * check if it's a Memory or an Ecache error.
952 	 */
953 	if (flt_in_memory) {
954 		/*
955 		 * It's a main memory error.
956 		 *
957 		 * For debugging we allow the user to switch between
958 		 * using OBP's get_unum and the memory driver's get_unum
959 		 * so we create a pointer to the functions and switch
960 		 * depending on the sg_use_prom_get_unum flag.
961 		 */
962 		if (sg_use_prom_get_unum) {
963 			DCMNERR(CE_NOTE, "Using prom_get_unum from OBP");
964 			return (sg_prom_get_unum(synd_code,
965 			    P2ALIGN(flt_addr, 8), buf, buflen, lenp));
966 		} else if (unum_func != NULL) {
967 			return (unum_func(synd_code, P2ALIGN(flt_addr, 8),
968 			    buf, buflen, lenp));
969 		} else {
970 			return (ENOTSUP);
971 		}
972 	} else if (flt_status & ECC_ECACHE) {
973 		/*
974 		 * It's an E$ error.
975 		 */
976 		if (sg_use_prom_ecache_unum) {
977 			/*
978 			 * We call to OBP to handle this.
979 			 */
980 			DCMNERR(CE_NOTE,
981 			    "Using prom_serengeti_get_ecacheunum from OBP");
982 			if (prom_serengeti_get_ecacheunum(flt_bus_id,
983 			    P2ALIGN(flt_addr, 8), buf, buflen, lenp) != 0) {
984 				return (EIO);
985 			}
986 		} else {
987 			return (sg_get_ecacheunum(flt_bus_id, flt_addr,
988 			    buf, buflen, lenp));
989 		}
990 	} else {
991 		return (ENOTSUP);
992 	}
993 
994 	return (0);
995 }
996 
997 /*
998  * This platform hook gets called from mc_add_mem_unum_label() in the mc-us3
999  * driver giving each platform the opportunity to add platform
1000  * specific label information to the unum for ECC error logging purposes.
1001  */
1002 void
1003 plat_add_mem_unum_label(char *unum, int mcid, int bank, int dimm)
1004 {
1005 	char	new_unum[UNUM_NAMLEN] = "";
1006 	int	node = SG_PORTID_TO_NODEID(mcid);
1007 	int	board = SG_CPU_BD_PORTID_TO_BD_NUM(mcid);
1008 	int	position = SG_PORTID_TO_CPU_POSN(mcid);
1009 
1010 	/*
1011 	 * The mc-us3 driver deals with logical banks but for unum
1012 	 * purposes we need to use physical banks so that the correct
1013 	 * dimm can be physically located. Logical banks 0 and 2
1014 	 * make up physical bank 0. Logical banks 1 and 3 make up
1015 	 * physical bank 1. Here we do the necessary conversion.
1016 	 */
1017 	bank = (bank % 2);
1018 
1019 	if (dimm == -1) {
1020 		SG_SET_FRU_NAME_NODE(new_unum, node);
1021 		SG_SET_FRU_NAME_CPU_BOARD(new_unum, board);
1022 		SG_SET_FRU_NAME_MODULE(new_unum, position);
1023 		SG_SET_FRU_NAME_BANK(new_unum, bank);
1024 
1025 	} else {
1026 		SG_SET_FRU_NAME_NODE(new_unum, node);
1027 		SG_SET_FRU_NAME_CPU_BOARD(new_unum, board);
1028 		SG_SET_FRU_NAME_MODULE(new_unum, position);
1029 		SG_SET_FRU_NAME_BANK(new_unum, bank);
1030 		SG_SET_FRU_NAME_DIMM(new_unum, dimm);
1031 
1032 		strcat(new_unum, " ");
1033 		strcat(new_unum, unum);
1034 	}
1035 
1036 	strcpy(unum, new_unum);
1037 }
1038 
1039 int
1040 plat_get_cpu_unum(int cpuid, char *buf, int buflen, int *lenp)
1041 {
1042 	int	node = SG_PORTID_TO_NODEID(cpuid);
1043 	int	board = SG_CPU_BD_PORTID_TO_BD_NUM(cpuid);
1044 
1045 	if (snprintf(buf, buflen, "/N%d/%s%d", node,
1046 	    SG_HPU_TYPE_CPU_BOARD_ID, board) >= buflen) {
1047 		return (ENOSPC);
1048 	} else {
1049 		*lenp = strlen(buf);
1050 		return (0);
1051 	}
1052 }
1053 
1054 static void (*sg_ecc_taskq_func)(sbbc_ecc_mbox_t *) = NULL;
1055 static int (*sg_ecc_mbox_func)(sbbc_ecc_mbox_t *) = NULL;
1056 
1057 /*
1058  * We log all ECC errors to the SC so we send a mailbox
1059  * message to the SC passing it the relevant data.
1060  * ECC mailbox messages are sent via a taskq mechanism to
1061  * prevent impaired system performance during ECC floods.
1062  * Indictments have already passed through a taskq, so they
1063  * are not queued here.
1064  */
1065 int
1066 plat_send_ecc_mailbox_msg(plat_ecc_message_type_t msg_type, void *datap)
1067 {
1068 	sbbc_ecc_mbox_t	*msgp;
1069 	uint16_t	msg_subtype;
1070 	int		sleep_flag, log_error;
1071 	size_t		msg_size;
1072 
1073 	if (sg_ecc_taskq_func == NULL) {
1074 		sg_ecc_taskq_func = (void (*)(sbbc_ecc_mbox_t *))
1075 		    modgetsymvalue("sbbc_mbox_queue_ecc_event", 0);
1076 		if (sg_ecc_taskq_func == NULL) {
1077 			cmn_err(CE_NOTE, "!plat_send_ecc_mailbox_msg: "
1078 			    "sbbc_mbox_queue_ecc_event not found");
1079 			return (ENODEV);
1080 		}
1081 	}
1082 	if (sg_ecc_mbox_func == NULL) {
1083 		sg_ecc_mbox_func = (int (*)(sbbc_ecc_mbox_t *))
1084 		    modgetsymvalue("sbbc_mbox_ecc_output", 0);
1085 		if (sg_ecc_mbox_func == NULL) {
1086 			cmn_err(CE_NOTE, "!plat_send_ecc_mailbox_msg: "
1087 			    "sbbc_mbox_ecc_output not found");
1088 			return (ENODEV);
1089 		}
1090 	}
1091 
1092 	/*
1093 	 * Initialize the request and response structures
1094 	 */
1095 	switch (msg_type) {
1096 	case PLAT_ECC_ERROR_MESSAGE:
1097 		msg_subtype = INFO_MBOX_ERROR_ECC;
1098 		msg_size = sizeof (plat_ecc_error_data_t);
1099 		sleep_flag = KM_NOSLEEP;
1100 		log_error = 1;
1101 		break;
1102 	case PLAT_ECC_ERROR2_MESSAGE:
1103 		msg_subtype = INFO_MBOX_ECC;
1104 		msg_size = sizeof (plat_ecc_error2_data_t);
1105 		sleep_flag = KM_NOSLEEP;
1106 		log_error = 1;
1107 		break;
1108 	case PLAT_ECC_INDICTMENT_MESSAGE:
1109 		msg_subtype = INFO_MBOX_ERROR_INDICT;
1110 		msg_size = sizeof (plat_ecc_indictment_data_t);
1111 		sleep_flag = KM_SLEEP;
1112 		log_error = 0;
1113 		break;
1114 	case PLAT_ECC_INDICTMENT2_MESSAGE:
1115 		msg_subtype = INFO_MBOX_ECC;
1116 		msg_size = sizeof (plat_ecc_indictment2_data_t);
1117 		sleep_flag = KM_SLEEP;
1118 		log_error = 0;
1119 		break;
1120 	case PLAT_ECC_CAPABILITY_MESSAGE:
1121 		msg_subtype = INFO_MBOX_ECC_CAP;
1122 		msg_size = sizeof (plat_capability_data_t) +
1123 		    strlen(utsname.release) + strlen(utsname.version) + 2;
1124 		sleep_flag = KM_SLEEP;
1125 		log_error = 0;
1126 		break;
1127 	case PLAT_ECC_DIMM_SID_MESSAGE:
1128 		msg_subtype = INFO_MBOX_ECC;
1129 		msg_size = sizeof (plat_dimm_sid_request_data_t);
1130 		sleep_flag = KM_SLEEP;
1131 		log_error = 0;
1132 		break;
1133 	default:
1134 		return (EINVAL);
1135 	}
1136 
1137 	msgp = (sbbc_ecc_mbox_t *)kmem_zalloc(sizeof (sbbc_ecc_mbox_t),
1138 		sleep_flag);
1139 	if (msgp == NULL) {
1140 		cmn_err(CE_NOTE, "!plat_send_ecc_mailbox_msg: "
1141 				"unable to allocate sbbc_ecc_mbox");
1142 		return (ENOMEM);
1143 	}
1144 
1145 	msgp->ecc_log_error = log_error;
1146 
1147 	msgp->ecc_req.msg_type.type = INFO_MBOX;
1148 	msgp->ecc_req.msg_type.sub_type = msg_subtype;
1149 	msgp->ecc_req.msg_status = 0;
1150 	msgp->ecc_req.msg_len = (int)msg_size;
1151 	msgp->ecc_req.msg_bytes = 0;
1152 	msgp->ecc_req.msg_buf = (caddr_t)kmem_zalloc(msg_size, sleep_flag);
1153 	msgp->ecc_req.msg_data[0] = 0;
1154 	msgp->ecc_req.msg_data[1] = 0;
1155 
1156 	if (msgp->ecc_req.msg_buf == NULL) {
1157 		cmn_err(CE_NOTE, "!plat_send_ecc_mailbox_msg: "
1158 				"unable to allocate request msg_buf");
1159 		kmem_free((void *)msgp, sizeof (sbbc_ecc_mbox_t));
1160 		return (ENOMEM);
1161 	}
1162 
1163 	bcopy(datap, (void *)msgp->ecc_req.msg_buf, msg_size);
1164 
1165 	/*
1166 	 * initialize the response back from the SC
1167 	 */
1168 	msgp->ecc_resp.msg_type.type = INFO_MBOX;
1169 	msgp->ecc_resp.msg_type.sub_type = msg_subtype;
1170 	msgp->ecc_resp.msg_status = 0;
1171 	msgp->ecc_resp.msg_len = 0;
1172 	msgp->ecc_resp.msg_bytes = 0;
1173 	msgp->ecc_resp.msg_buf = NULL;
1174 	msgp->ecc_resp.msg_data[0] = 0;
1175 	msgp->ecc_resp.msg_data[1] = 0;
1176 
1177 	switch (msg_type) {
1178 	case PLAT_ECC_ERROR_MESSAGE:
1179 	case PLAT_ECC_ERROR2_MESSAGE:
1180 		/*
1181 		 * For Error Messages, we go through a taskq.
1182 		 * Queue up message for processing
1183 		 */
1184 		(*sg_ecc_taskq_func)(msgp);
1185 		return (0);
1186 
1187 	case PLAT_ECC_CAPABILITY_MESSAGE:
1188 		/*
1189 		 * For indictment and capability messages, we've already gone
1190 		 * through the taskq, so we can call the mailbox routine
1191 		 * directly.  Find the symbol for the routine that sends
1192 		 * the mailbox msg
1193 		 */
1194 		msgp->ecc_resp.msg_len = (int)msg_size;
1195 		msgp->ecc_resp.msg_buf = (caddr_t)kmem_zalloc(msg_size,
1196 		    sleep_flag);
1197 		/* FALLTHRU */
1198 
1199 	case PLAT_ECC_INDICTMENT_MESSAGE:
1200 	case PLAT_ECC_INDICTMENT2_MESSAGE:
1201 		return ((*sg_ecc_mbox_func)(msgp));
1202 
1203 	case PLAT_ECC_DIMM_SID_MESSAGE:
1204 		msgp->ecc_resp.msg_len = sizeof (plat_dimm_sid_board_data_t);
1205 		msgp->ecc_resp.msg_buf = (caddr_t)kmem_zalloc(
1206 		    sizeof (plat_dimm_sid_board_data_t), sleep_flag);
1207 
1208 		return ((*sg_ecc_mbox_func)(msgp));
1209 
1210 	default:
1211 		ASSERT(0);
1212 		return (EINVAL);
1213 	}
1214 }
1215 
1216 /*
1217  * m is redundant on serengeti as the multiplyer is always 4
1218  */
1219 /*ARGSUSED*/
1220 int
1221 plat_make_fru_cpuid(int sb, int m, int proc)
1222 {
1223 	return (MAKE_CPUID(sb, proc));
1224 }
1225 
1226 /*
1227  * board number for a given proc
1228  */
1229 int
1230 plat_make_fru_boardnum(int proc)
1231 {
1232 	return (SG_PORTID_TO_BOARD_NUM(proc));
1233 }
1234 
1235 static
1236 void
1237 cpu_sgn_update(ushort_t sig, uchar_t state, uchar_t sub_state, int cpuid)
1238 {
1239 	uint32_t signature = CPU_SIG_BLD(sig, state, sub_state);
1240 	sig_state_t current_sgn;
1241 	int i;
1242 
1243 	if (iosram_write_ptr == NULL) {
1244 		/*
1245 		 * If the IOSRAM write pointer isn't set, we won't be able
1246 		 * to write signatures to ANYTHING, so we may as well just
1247 		 * write out an error message (if desired) and exit this
1248 		 * routine now...
1249 		 */
1250 		DCMNERR(CE_WARN,
1251 		    "cpu_sgn_update: iosram_write() not found;"
1252 		    " cannot write signature 0x%x for CPU(s) or domain\n",
1253 		    signature);
1254 		return;
1255 	}
1256 
1257 
1258 	/*
1259 	 * Differentiate a panic reboot from a non-panic reboot in the
1260 	 * setting of the substate of the signature.
1261 	 *
1262 	 * If the new substate is REBOOT and we're rebooting due to a panic,
1263 	 * then set the new substate to a special value indicating a panic
1264 	 * reboot, SIGSUBST_PANIC_REBOOT.
1265 	 *
1266 	 * A panic reboot is detected by a current (previous) domain signature
1267 	 * state of SIGST_EXIT, and a new signature substate of SIGSUBST_REBOOT.
1268 	 * The domain signature state SIGST_EXIT is used as the panic flow
1269 	 * progresses.
1270 	 *
1271 	 * At the end of the panic flow, the reboot occurs but we should now
1272 	 * one that was involuntary, something that may be quite useful to know
1273 	 * at OBP level.
1274 	 */
1275 	if (sub_state == SIGSUBST_REBOOT) {
1276 		if (iosram_read_ptr == NULL) {
1277 			DCMNERR(CE_WARN,
1278 			    "cpu_sgn_update: iosram_read() not found;"
1279 			    " could not check current domain signature\n");
1280 		} else {
1281 			(void) (*iosram_read_ptr)(SBBC_SIGBLCK_KEY,
1282 				SG_SGNBLK_DOMAINSIG_OFFSET,
1283 				(char *)&current_sgn, sizeof (current_sgn));
1284 			if (current_sgn.state_t.state == SIGST_EXIT)
1285 				signature = CPU_SIG_BLD(sig, state,
1286 					SIGSUBST_PANIC_REBOOT);
1287 		}
1288 	}
1289 
1290 	/*
1291 	 * cpuid == -1 indicates that the operation applies to all cpus.
1292 	 */
1293 	if (cpuid >= 0) {
1294 		(void) (*iosram_write_ptr)(SBBC_SIGBLCK_KEY,
1295 			SG_SGNBLK_CPUSIG_OFFSET(cpuid), (char *)&signature,
1296 			sizeof (signature));
1297 	} else {
1298 		for (i = 0; i < NCPU; i++) {
1299 			if (cpu[i] == NULL || !(cpu[i]->cpu_flags &
1300 				(CPU_EXISTS|CPU_QUIESCED))) {
1301 				continue;
1302 			}
1303 			(void) (*iosram_write_ptr)(SBBC_SIGBLCK_KEY,
1304 				SG_SGNBLK_CPUSIG_OFFSET(i), (char *)&signature,
1305 				sizeof (signature));
1306 		}
1307 	}
1308 
1309 	if (state == SIGST_OFFLINE || state == SIGST_DETACHED) {
1310 		return;
1311 	}
1312 
1313 	(void) (*iosram_write_ptr)(SBBC_SIGBLCK_KEY,
1314 		SG_SGNBLK_DOMAINSIG_OFFSET, (char *)&signature,
1315 		sizeof (signature));
1316 }
1317 
1318 void
1319 startup_platform(void)
1320 {
1321 }
1322 
1323 /*
1324  * A routine to convert a number (represented as a string) to
1325  * the integer value it represents.
1326  */
1327 
1328 static int
1329 isdigit(int ch)
1330 {
1331 	return (ch >= '0' && ch <= '9');
1332 }
1333 
1334 #define	isspace(c)	((c) == ' ' || (c) == '\t' || (c) == '\n')
1335 
1336 static int
1337 strtoi(char *p, char **pos)
1338 {
1339 	int n;
1340 	int c, neg = 0;
1341 
1342 	if (!isdigit(c = *p)) {
1343 		while (isspace(c))
1344 			c = *++p;
1345 		switch (c) {
1346 			case '-':
1347 				neg++;
1348 				/* FALLTHROUGH */
1349 			case '+':
1350 			c = *++p;
1351 		}
1352 		if (!isdigit(c)) {
1353 			if (pos != NULL)
1354 				*pos = p;
1355 			return (0);
1356 		}
1357 	}
1358 	for (n = '0' - c; isdigit(c = *++p); ) {
1359 		n *= 10; /* two steps to avoid unnecessary overflow */
1360 		n += '0' - c; /* accum neg to avoid surprises at MAX */
1361 	}
1362 	if (pos != NULL)
1363 		*pos = p;
1364 	return (neg ? n : -n);
1365 }
1366 
1367 /*
1368  * Get the three parts of the Serengeti PROM version.
1369  * Used for feature readiness tests.
1370  *
1371  * Return 0 if version extracted successfully, -1 otherwise.
1372  */
1373 
1374 int
1375 sg_get_prom_version(int *sysp, int *intfp, int *bldp)
1376 {
1377 	int plen;
1378 	char vers[512];
1379 	static pnode_t node;
1380 	static char version[] = "version";
1381 	char *verp, *ep;
1382 
1383 	node = prom_finddevice("/openprom");
1384 	if (node == OBP_BADNODE)
1385 		return (-1);
1386 
1387 	plen = prom_getproplen(node, version);
1388 	if (plen <= 0 || plen >= sizeof (vers))
1389 		return (-1);
1390 	(void) prom_getprop(node, version, vers);
1391 	vers[plen] = '\0';
1392 
1393 	/* Make sure it's an OBP flashprom */
1394 	if (vers[0] != 'O' && vers[1] != 'B' && vers[2] != 'P') {
1395 		cmn_err(CE_WARN, "sg_get_prom_version: "
1396 		    "unknown <version> string in </openprom>\n");
1397 		return (-1);
1398 	}
1399 	verp = &vers[4];
1400 
1401 	*sysp = strtoi(verp, &ep);
1402 	if (ep == verp || *ep != '.')
1403 		return (-1);
1404 	verp = ep + 1;
1405 
1406 	*intfp = strtoi(verp, &ep);
1407 	if (ep == verp || *ep != '.')
1408 		return (-1);
1409 	verp = ep + 1;
1410 
1411 	*bldp = strtoi(verp, &ep);
1412 	if (ep == verp || (*ep != '\0' && !isspace(*ep)))
1413 		return (-1);
1414 	return (0);
1415 }
1416 
1417 /*
1418  * Return 0 if system board Dynamic Reconfiguration
1419  * is supported by the firmware, -1 otherwise.
1420  */
1421 int
1422 sg_prom_sb_dr_check(void)
1423 {
1424 	static int prom_res = 1;
1425 
1426 	if (prom_res == 1) {
1427 		int sys, intf, bld;
1428 		int rv;
1429 
1430 		rv = sg_get_prom_version(&sys, &intf, &bld);
1431 		if (rv == 0 && sys == 5 &&
1432 		    (intf >= 12 || (intf == 11 && bld >= 200))) {
1433 			prom_res = 0;
1434 		} else {
1435 			prom_res = -1;
1436 		}
1437 	}
1438 	return (prom_res);
1439 }
1440 
1441 /*
1442  * Return 0 if cPCI Dynamic Reconfiguration
1443  * is supported by the firmware, -1 otherwise.
1444  */
1445 int
1446 sg_prom_cpci_dr_check(void)
1447 {
1448 	/*
1449 	 * The version check is currently the same as for
1450 	 * system boards. Since the two DR sub-systems are
1451 	 * independent, this could change.
1452 	 */
1453 	return (sg_prom_sb_dr_check());
1454 }
1455 
1456 /*
1457  * Our implementation of this KDI op updates the CPU signature in the system
1458  * controller.  Note that we set the signature to OBP_SIG, rather than DBG_SIG.
1459  * The Forth words we execute will, among other things, transform our OBP_SIG
1460  * into DBG_SIG.  They won't function properly if we try to use DBG_SIG.
1461  */
1462 static void
1463 sg_system_claim(void)
1464 {
1465 	prom_interpret("sigb-sig! my-sigb-sig!", OBP_SIG, OBP_SIG, 0, 0, 0);
1466 }
1467 
1468 static void
1469 sg_system_release(void)
1470 {
1471 	prom_interpret("sigb-sig! my-sigb-sig!", OS_SIG, OS_SIG, 0, 0, 0);
1472 }
1473 
1474 static void
1475 sg_console_claim(void)
1476 {
1477 	prom_serengeti_set_console_input(SGCN_OBP_STR);
1478 }
1479 
1480 static void
1481 sg_console_release(void)
1482 {
1483 	prom_serengeti_set_console_input(SGCN_CLNT_STR);
1484 }
1485 
1486 void
1487 plat_kdi_init(kdi_t *kdi)
1488 {
1489 	kdi->pkdi_system_claim = sg_system_claim;
1490 	kdi->pkdi_system_release = sg_system_release;
1491 	kdi->pkdi_console_claim = sg_console_claim;
1492 	kdi->pkdi_console_release = sg_console_release;
1493 }
1494