xref: /illumos-gate/usr/src/uts/sun4u/lw8/os/lw8_platmod.c (revision c39996a7c853f35e9cf2fc40b30e0d2eec0e9996)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/time.h>
30 #include <sys/cpuvar.h>
31 #include <sys/dditypes.h>
32 #include <sys/ddipropdefs.h>
33 #include <sys/ddi_impldefs.h>
34 #include <sys/sunddi.h>
35 #include <sys/esunddi.h>
36 #include <sys/sunndi.h>
37 #include <sys/platform_module.h>
38 #include <sys/errno.h>
39 #include <sys/conf.h>
40 #include <sys/modctl.h>
41 #include <sys/promif.h>
42 #include <sys/promimpl.h>
43 #include <sys/prom_plat.h>
44 #include <sys/cmn_err.h>
45 #include <sys/sysmacros.h>
46 #include <sys/mem_cage.h>
47 #include <sys/kobj.h>
48 #include <sys/utsname.h>
49 #include <sys/cpu_sgnblk_defs.h>
50 #include <sys/atomic.h>
51 #include <sys/kdi_impl.h>
52 
53 #include <sys/sgsbbc.h>
54 #include <sys/sgsbbc_iosram.h>
55 #include <sys/sgsbbc_iosram_priv.h>
56 #include <sys/sgsbbc_mailbox.h>
57 #include <sys/sgsgn.h>
58 #include <sys/serengeti.h>
59 #include <sys/sgfrutypes.h>
60 #include <sys/machsystm.h>
61 #include <sys/sbd_ioctl.h>
62 #include <sys/sbd.h>
63 #include <sys/sbdp_mem.h>
64 #include <sys/sgcn.h>
65 
66 #include <sys/memnode.h>
67 #include <vm/vm_dep.h>
68 #include <vm/page.h>
69 
70 #include <sys/cheetahregs.h>
71 #include <sys/plat_ecc_unum.h>
72 #include <sys/plat_ecc_dimm.h>
73 
74 #include <sys/lgrp.h>
75 
76 static int sg_debug = 0;
77 
78 #ifdef DEBUG
79 #define	DCMNERR if (sg_debug) cmn_err
80 #else
81 #define	DCMNERR
82 #endif
83 
84 int (*p2get_mem_unum)(int, uint64_t, char *, int, int *);
85 
86 /* local functions */
87 static void cpu_sgn_update(ushort_t sgn, uchar_t state,
88     uchar_t sub_state, int cpuid);
89 
90 
91 /*
92  * Local data.
93  *
94  * iosram_write_ptr is a pointer to iosram_write().  Because of
95  * kernel dynamic linking, we can't get to the function by name,
96  * but we can look up its address, and store it in this variable
97  * instead.
98  *
99  * We include the extern for iosram_write() here not because we call
100  * it, but to force compilation errors if its prototype doesn't
101  * match the prototype of iosram_write_ptr.
102  *
103  * The same issues apply to iosram_read() and iosram_read_ptr.
104  */
105 /*CSTYLED*/
106 extern int   iosram_write     (int, uint32_t, caddr_t, uint32_t);
107 static int (*iosram_write_ptr)(int, uint32_t, caddr_t, uint32_t) = NULL;
108 /*CSTYLED*/
109 extern int   iosram_read     (int, uint32_t, caddr_t, uint32_t);
110 static int (*iosram_read_ptr)(int, uint32_t, caddr_t, uint32_t) = NULL;
111 
112 
113 /*
114  * Variable to indicate if the date should be obtained from the SC or not.
115  */
116 int todsg_use_sc = FALSE;	/* set the false at the beginning */
117 
118 /*
119  * Preallocation of spare tsb's for DR
120  *
121  * We don't allocate spares for Wildcat since TSBs should come
122  * out of memory local to the node.
123  */
124 #define	IOMMU_PER_SCHIZO	2
125 int serengeti_tsb_spares = (SG_MAX_IO_BDS * SG_SCHIZO_PER_IO_BD *
126 	IOMMU_PER_SCHIZO);
127 
128 /*
129  * sg_max_ncpus is the maximum number of CPUs supported on Serengeti
130  * and Wildcat at GA.  We assume that the maximum number of SSM nodes
131  * supported at GA is 4.  sg_max_ncpus is set to be smaller than NCPU
132  * to reduce the amount of memory the logs take up until we have a
133  * dynamic log memory allocation solution.
134  */
135 int sg_max_ncpus = (24 * 4);	/* (CPUs per node * max number of nodes) */
136 
137 /*
138  * variables to control mailbox message timeouts.
139  * These can be patched via /etc/system or mdb.
140  */
141 int	sbbc_mbox_default_timeout = MBOX_DEFAULT_TIMEOUT;
142 int	sbbc_mbox_min_timeout = MBOX_MIN_TIMEOUT;
143 
144 /* cached 'chosen' node_id */
145 pnode_t chosen_nodeid = (pnode_t)0;
146 
147 /*
148  * Table that maps memory slices to a specific memnode.
149  */
150 int slice_to_memnode[SG_MAX_SLICE];
151 
152 /*
153  * We define and use LW8_MAX_CPU_BDS here instead of SG_MAX_CPU_BDS
154  * since a LW8 machine will never have a CPU/Mem board #5 (SB5).
155  * A LW8 machine can only have a maximum of three CPU/Mem boards, but
156  * the board numbers assigned are 0, 2, and 4.  LW8_MAX_CPU_BDS is
157  * defined to be 5 since the entries in the domain_dimm_sids array
158  * are keyed by board number.  Not perfect but some wasted space
159  * is avoided.
160  */
161 #define	LW8_MAX_CPU_BDS		5
162 
163 plat_dimm_sid_board_t	domain_dimm_sids[LW8_MAX_CPU_BDS];
164 
165 int
166 set_platform_tsb_spares()
167 {
168 	return (MIN(serengeti_tsb_spares, MAX_UPA));
169 }
170 
171 #pragma weak mmu_init_large_pages
172 
173 void
174 set_platform_defaults(void)
175 {
176 	extern int watchdog_enable;
177 	extern uint64_t xc_tick_limit_scale;
178 	extern void mmu_init_large_pages(size_t);
179 
180 #ifdef DEBUG
181 	char *todsg_name = "todsg";
182 	ce_verbose_memory = 2;
183 	ce_verbose_other = 2;
184 #endif /* DEBUG */
185 
186 	watchdog_enable = TRUE;
187 	watchdog_available = TRUE;
188 
189 	cpu_sgn_func = cpu_sgn_update;
190 
191 #ifdef DEBUG
192 	/* tod_module_name should be set to "todsg" from OBP property */
193 	if (tod_module_name && (strcmp(tod_module_name, todsg_name) == 0))
194 		prom_printf("Using todsg driver\n");
195 	else {
196 		prom_printf("Force using todsg driver\n");
197 		tod_module_name = todsg_name;
198 	}
199 #endif /* DEBUG */
200 
201 	/* lw8 does not support forthdebug */
202 	forthdebug_supported = 0;
203 
204 
205 	/*
206 	 * Some DR operations require the system to be sync paused.
207 	 * Sync pause on Serengeti could potentially take up to 4
208 	 * seconds to complete depending on the load on the SC.  To
209 	 * avoid send_mond panics during such operations, we need to
210 	 * increase xc_tick_limit to a larger value on Serengeti by
211 	 * setting xc_tick_limit_scale to 5.
212 	 */
213 	xc_tick_limit_scale = 5;
214 
215 	if ((mmu_page_sizes == max_mmu_page_sizes) &&
216 	    (mmu_ism_pagesize != MMU_PAGESIZE32M)) {
217 		if (&mmu_init_large_pages)
218 			mmu_init_large_pages(mmu_ism_pagesize);
219 	}
220 }
221 
222 void
223 load_platform_modules(void)
224 {
225 	if (modload("misc", "pcihp") < 0) {
226 		cmn_err(CE_NOTE, "pcihp driver failed to load");
227 	}
228 }
229 
230 /*ARGSUSED*/
231 int
232 plat_cpu_poweron(struct cpu *cp)
233 {
234 	int (*serengeti_cpu_poweron)(struct cpu *) = NULL;
235 
236 	serengeti_cpu_poweron =
237 	    (int (*)(struct cpu *))modgetsymvalue("sbdp_cpu_poweron", 0);
238 
239 	if (serengeti_cpu_poweron == NULL)
240 		return (ENOTSUP);
241 	else
242 		return ((serengeti_cpu_poweron)(cp));
243 }
244 
245 /*ARGSUSED*/
246 int
247 plat_cpu_poweroff(struct cpu *cp)
248 {
249 	int (*serengeti_cpu_poweroff)(struct cpu *) = NULL;
250 
251 	serengeti_cpu_poweroff =
252 	    (int (*)(struct cpu *))modgetsymvalue("sbdp_cpu_poweroff", 0);
253 
254 	if (serengeti_cpu_poweroff == NULL)
255 		return (ENOTSUP);
256 	else
257 		return ((serengeti_cpu_poweroff)(cp));
258 }
259 
260 #ifdef DEBUG
261 pgcnt_t serengeti_cage_size_limit;
262 #endif
263 
264 /* Preferred minimum cage size (expressed in pages)... for DR */
265 pgcnt_t serengeti_minimum_cage_size = 0;
266 
267 void
268 set_platform_cage_params(void)
269 {
270 	extern pgcnt_t total_pages;
271 	extern struct memlist *phys_avail;
272 	int ret;
273 
274 	if (kernel_cage_enable) {
275 		pgcnt_t preferred_cage_size;
276 
277 		preferred_cage_size =
278 		    MAX(serengeti_minimum_cage_size, total_pages / 256);
279 #ifdef DEBUG
280 		if (serengeti_cage_size_limit)
281 			preferred_cage_size = serengeti_cage_size_limit;
282 #endif
283 		kcage_range_lock();
284 		/*
285 		 * Post copies obp into the lowest slice.  This requires the
286 		 * cage to grow upwards
287 		 */
288 		ret = kcage_range_init(phys_avail, 0);
289 		if (ret == 0)
290 			kcage_init(preferred_cage_size);
291 		kcage_range_unlock();
292 	}
293 
294 	/* Only note when the cage is off since it should always be on. */
295 	if (!kcage_on)
296 		cmn_err(CE_NOTE, "!DR Kernel Cage is DISABLED");
297 }
298 
299 #define	ALIGN(x, a)	((a) == 0 ? (uint64_t)(x) : \
300 	(((uint64_t)(x) + (uint64_t)(a) - 1l) & ~((uint64_t)(a) - 1l)))
301 
302 void
303 update_mem_bounds(int brd, uint64_t base, uint64_t sz)
304 {
305 	uint64_t	end;
306 	int		mnode;
307 
308 	end = base + sz - 1;
309 
310 	/*
311 	 * First see if this board already has a memnode associated
312 	 * with it.  If not, see if this slice has a memnode.  This
313 	 * covers the cases where a single slice covers multiple
314 	 * boards (cross-board interleaving) and where a single
315 	 * board has multiple slices (1+GB DIMMs).
316 	 */
317 	if ((mnode = plat_lgrphand_to_mem_node(brd)) == -1) {
318 		if ((mnode = slice_to_memnode[PA_2_SLICE(base)]) == -1)
319 			mnode = mem_node_alloc();
320 		plat_assign_lgrphand_to_mem_node(brd, mnode);
321 	}
322 
323 	/*
324 	 * Align base at 16GB boundary
325 	 */
326 	base = ALIGN(base, (1ul << PA_SLICE_SHIFT));
327 
328 	while (base < end) {
329 		slice_to_memnode[PA_2_SLICE(base)] = mnode;
330 		base += (1ul << PA_SLICE_SHIFT);
331 	}
332 }
333 
334 /*
335  * Dynamically detect memory slices in the system by decoding
336  * the cpu memory decoder registers at boot time.
337  */
338 void
339 plat_fill_mc(pnode_t nodeid)
340 {
341 	uint64_t	mc_addr, mask;
342 	uint64_t	mc_decode[SG_MAX_BANKS_PER_MC];
343 	uint64_t	base, size;
344 	uint32_t	regs[4];
345 	int		len;
346 	int		local_mc;
347 	int		portid;
348 	int		boardid;
349 	int		i;
350 
351 	if ((prom_getprop(nodeid, "portid", (caddr_t)&portid) < 0) ||
352 	    (portid == -1))
353 		return;
354 
355 	/*
356 	 * Decode the board number from the MC portid
357 	 */
358 	boardid = SG_PORTID_TO_BOARD_NUM(portid);
359 
360 	/*
361 	 * The "reg" property returns 4 32-bit values. The first two are
362 	 * combined to form a 64-bit address.  The second two are for a
363 	 * 64-bit size, but we don't actually need to look at that value.
364 	 */
365 	len = prom_getproplen(nodeid, "reg");
366 	if (len != (sizeof (uint32_t) * 4)) {
367 		prom_printf("Warning: malformed 'reg' property\n");
368 		return;
369 	}
370 	if (prom_getprop(nodeid, "reg", (caddr_t)regs) < 0)
371 		return;
372 	mc_addr = ((uint64_t)regs[0]) << 32;
373 	mc_addr |= (uint64_t)regs[1];
374 
375 	/*
376 	 * Figure out whether the memory controller we are examining
377 	 * belongs to this CPU or a different one.
378 	 */
379 	if (portid == cpunodes[CPU->cpu_id].portid)
380 		local_mc = 1;
381 	else
382 		local_mc = 0;
383 
384 	for (i = 0; i < SG_MAX_BANKS_PER_MC; i++) {
385 		mask = SG_REG_2_OFFSET(i);
386 
387 		/*
388 		 * If the memory controller is local to this CPU, we use
389 		 * the special ASI to read the decode registers.
390 		 * Otherwise, we load the values from a magic address in
391 		 * I/O space.
392 		 */
393 		if (local_mc)
394 			mc_decode[i] = lddmcdecode(mask & MC_OFFSET_MASK);
395 		else
396 			mc_decode[i] = lddphysio((mc_addr | mask));
397 
398 		if (mc_decode[i] >> MC_VALID_SHIFT) {
399 			/*
400 			 * The memory decode register is a bitmask field,
401 			 * so we can decode that into both a base and
402 			 * a span.
403 			 */
404 			base = MC_BASE(mc_decode[i]) << PHYS2UM_SHIFT;
405 			size = MC_UK2SPAN(mc_decode[i]);
406 			update_mem_bounds(boardid, base, size);
407 		}
408 	}
409 }
410 
411 /*
412  * This routine is run midway through the boot process.  By the time we get
413  * here, we know about all the active CPU boards in the system, and we have
414  * extracted information about each board's memory from the memory
415  * controllers.  We have also figured out which ranges of memory will be
416  * assigned to which memnodes, so we walk the slice table to build the table
417  * of memnodes.
418  */
419 /* ARGSUSED */
420 void
421 plat_build_mem_nodes(u_longlong_t *list, size_t  nelems)
422 {
423 	int	slice;
424 	pfn_t	basepfn;
425 	pgcnt_t	npgs;
426 
427 	mem_node_pfn_shift = PFN_SLICE_SHIFT;
428 	mem_node_physalign = (1ull << PA_SLICE_SHIFT);
429 
430 	for (slice = 0; slice < SG_MAX_SLICE; slice++) {
431 		if (slice_to_memnode[slice] == -1)
432 			continue;
433 		basepfn = (uint64_t)slice << PFN_SLICE_SHIFT;
434 		npgs = 1ull << PFN_SLICE_SHIFT;
435 		mem_node_add_slice(basepfn, basepfn + npgs - 1);
436 	}
437 }
438 
439 int
440 plat_pfn_to_mem_node(pfn_t pfn)
441 {
442 	int node;
443 
444 	node = slice_to_memnode[PFN_2_SLICE(pfn)];
445 
446 	return (node);
447 }
448 
449 /*
450  * Serengeti support for lgroups.
451  *
452  * On Serengeti, an lgroup platform handle == board number.
453  *
454  * Mappings between lgroup handles and memnodes are managed
455  * in addition to mappings between memory slices and memnodes
456  * to support cross-board interleaving as well as multiple
457  * slices per board (e.g. >1GB DIMMs). The initial mapping
458  * of memnodes to lgroup handles is determined at boot time.
459  * A DR addition of memory adds a new mapping. A DR copy-rename
460  * swaps mappings.
461  */
462 
463 /*
464  * Macro for extracting the board number from the CPU id
465  */
466 #define	CPUID_TO_BOARD(id)	(((id) >> 2) & 0x7)
467 
468 /*
469  * Return the platform handle for the lgroup containing the given CPU
470  *
471  * For Serengeti, lgroup platform handle == board number
472  */
473 lgrp_handle_t
474 plat_lgrp_cpu_to_hand(processorid_t id)
475 {
476 	return (CPUID_TO_BOARD(id));
477 }
478 
479 /*
480  * Platform specific lgroup initialization
481  */
482 void
483 plat_lgrp_init(void)
484 {
485 	int i;
486 	extern uint32_t lgrp_expand_proc_thresh;
487 	extern uint32_t lgrp_expand_proc_diff;
488 
489 	/*
490 	 * Initialize lookup tables to invalid values so we catch
491 	 * any illegal use of them.
492 	 */
493 	for (i = 0; i < SG_MAX_SLICE; i++) {
494 		slice_to_memnode[i] = -1;
495 	}
496 
497 	/*
498 	 * Set tuneables for Serengeti architecture
499 	 *
500 	 * lgrp_expand_proc_thresh is the minimum load on the lgroups
501 	 * this process is currently running on before considering
502 	 * expanding threads to another lgroup.
503 	 *
504 	 * lgrp_expand_proc_diff determines how much less the remote lgroup
505 	 * must be loaded before expanding to it.
506 	 *
507 	 * Bandwidth is maximized on Serengeti by spreading load across
508 	 * the machine. The impact to inter-thread communication isn't
509 	 * too costly since remote latencies are relatively low.  These
510 	 * values equate to one CPU's load and so attempt to spread the
511 	 * load out across as many lgroups as possible one CPU at a time.
512 	 */
513 	lgrp_expand_proc_thresh = LGRP_LOADAVG_THREAD_MAX;
514 	lgrp_expand_proc_diff = LGRP_LOADAVG_THREAD_MAX;
515 }
516 
517 /*
518  * Platform notification of lgroup (re)configuration changes
519  */
520 /*ARGSUSED*/
521 void
522 plat_lgrp_config(lgrp_config_flag_t evt, uintptr_t arg)
523 {
524 	update_membounds_t	*umb;
525 	lgrp_config_mem_rename_t lmr;
526 	lgrp_handle_t		shand, thand;
527 	int			snode, tnode;
528 
529 	switch (evt) {
530 
531 	case LGRP_CONFIG_MEM_ADD:
532 		umb = (update_membounds_t *)arg;
533 		update_mem_bounds(umb->u_board, umb->u_base, umb->u_len);
534 
535 		break;
536 
537 	case LGRP_CONFIG_MEM_DEL:
538 		/* We don't have to do anything */
539 
540 		break;
541 
542 	case LGRP_CONFIG_MEM_RENAME:
543 		/*
544 		 * During a DR copy-rename operation, all of the memory
545 		 * on one board is moved to another board -- but the
546 		 * addresses/pfns and memnodes don't change. This means
547 		 * the memory has changed locations without changing identity.
548 		 *
549 		 * Source is where we are copying from and target is where we
550 		 * are copying to.  After source memnode is copied to target
551 		 * memnode, the physical addresses of the target memnode are
552 		 * renamed to match what the source memnode had.  Then target
553 		 * memnode can be removed and source memnode can take its
554 		 * place.
555 		 *
556 		 * To do this, swap the lgroup handle to memnode mappings for
557 		 * the boards, so target lgroup will have source memnode and
558 		 * source lgroup will have empty target memnode which is where
559 		 * its memory will go (if any is added to it later).
560 		 *
561 		 * Then source memnode needs to be removed from its lgroup
562 		 * and added to the target lgroup where the memory was living
563 		 * but under a different name/memnode.  The memory was in the
564 		 * target memnode and now lives in the source memnode with
565 		 * different physical addresses even though it is the same
566 		 * memory.
567 		 */
568 		shand = arg & 0xffff;
569 		thand = (arg & 0xffff0000) >> 16;
570 		snode = plat_lgrphand_to_mem_node(shand);
571 		tnode = plat_lgrphand_to_mem_node(thand);
572 
573 		plat_assign_lgrphand_to_mem_node(thand, snode);
574 		plat_assign_lgrphand_to_mem_node(shand, tnode);
575 
576 		/*
577 		 * Remove source memnode of copy rename from its lgroup
578 		 * and add it to its new target lgroup
579 		 */
580 		lmr.lmem_rename_from = shand;
581 		lmr.lmem_rename_to = thand;
582 
583 		lgrp_config(LGRP_CONFIG_MEM_RENAME, (uintptr_t)snode,
584 		    (uintptr_t)&lmr);
585 
586 		break;
587 
588 	default:
589 		break;
590 	}
591 }
592 
593 /*
594  * Return latency between "from" and "to" lgroups
595  *
596  * This latency number can only be used for relative comparison
597  * between lgroups on the running system, cannot be used across platforms,
598  * and may not reflect the actual latency.  It is platform and implementation
599  * specific, so platform gets to decide its value.  It would be nice if the
600  * number was at least proportional to make comparisons more meaningful though.
601  * NOTE: The numbers below are supposed to be load latencies for uncached
602  * memory divided by 10.
603  */
604 int
605 plat_lgrp_latency(lgrp_handle_t from, lgrp_handle_t to)
606 {
607 	/*
608 	 * Return min remote latency when there are more than two lgroups
609 	 * (root and child) and getting latency between two different lgroups
610 	 * or root is involved
611 	 */
612 	if (lgrp_optimizations() && (from != to ||
613 	    from == LGRP_DEFAULT_HANDLE || to == LGRP_DEFAULT_HANDLE))
614 		return (28);
615 	else
616 		return (23);
617 }
618 
619 /* ARGSUSED */
620 void
621 plat_freelist_process(int mnode)
622 {
623 }
624 
625 /*
626  * Find dip for chosen IOSRAM
627  */
628 dev_info_t *
629 find_chosen_dip(void)
630 {
631 	dev_info_t	*dip;
632 	char		master_sbbc[MAXNAMELEN];
633 	int		nodeid;
634 	uint_t		tunnel;
635 
636 	/*
637 	 * find the /chosen SBBC node, prom interface will handle errors
638 	 */
639 	nodeid = prom_chosennode();
640 	/*
641 	 * get the 'iosram' property from the /chosen node
642 	 */
643 	if (prom_getprop(nodeid, IOSRAM_CHOSEN_PROP, (caddr_t)&tunnel) <= 0) {
644 		SBBC_ERR(CE_PANIC, "No iosram property found! \n");
645 	}
646 
647 	if (prom_phandle_to_path((phandle_t)tunnel, master_sbbc,
648 	    sizeof (master_sbbc)) < 0) {
649 		SBBC_ERR1(CE_PANIC, "prom_phandle_to_path(%d) failed\n",
650 		    tunnel);
651 	}
652 
653 	chosen_nodeid = nodeid;
654 
655 	/*
656 	 * load and attach the sgsbbc driver.
657 	 * This will also attach all the sgsbbc driver instances
658 	 */
659 	if (i_ddi_attach_hw_nodes("sgsbbc") != DDI_SUCCESS) {
660 		cmn_err(CE_WARN, "sgsbbc failed to load\n");
661 	}
662 	/* translate a path name to a dev_info_t */
663 	dip = e_ddi_hold_devi_by_path(master_sbbc, 0);
664 	if ((dip == NULL) || (ddi_get_nodeid(dip) != tunnel)) {
665 		cmn_err(CE_PANIC,
666 			"e_ddi_hold_devi_by_path(%x) failed for SBBC\n",
667 		    tunnel);
668 	}
669 
670 	/* make sure devi_ref is ZERO */
671 	ndi_rele_devi(dip);
672 	DCMNERR(CE_CONT, "Chosen IOSRAM is at %s \n", master_sbbc);
673 
674 	return (dip);
675 }
676 
677 void
678 load_platform_drivers(void)
679 {
680 	int ret;
681 
682 	/*
683 	 * Load the mc-us3 memory driver.
684 	 */
685 	if (i_ddi_attach_hw_nodes("mc-us3") != DDI_SUCCESS)
686 		cmn_err(CE_WARN, "mc-us3 failed to load");
687 	else
688 		(void) ddi_hold_driver(ddi_name_to_major("mc-us3"));
689 
690 	/*
691 	 * Initialize the chosen IOSRAM before its clients
692 	 * are loaded.
693 	 */
694 	(void) find_chosen_dip();
695 
696 	/*
697 	 * Load the environmentals driver (sgenv)
698 	 *
699 	 * We need this driver to handle events from the SC when state
700 	 * changes occur in the environmental data.
701 	 */
702 	if (i_ddi_attach_hw_nodes("sgenv") != DDI_SUCCESS)
703 		cmn_err(CE_WARN, "sgenv failed to load");
704 
705 	/*
706 	 * Ideally, we'd do this in set_platform_defaults(), but
707 	 * at that point it's too early to look up symbols.
708 	 */
709 	iosram_write_ptr = (int (*)(int, uint32_t, caddr_t, uint32_t))
710 	    modgetsymvalue("iosram_write", 0);
711 
712 	if (iosram_write_ptr == NULL) {
713 		DCMNERR(CE_WARN, "load_platform_defaults: iosram_write()"
714 		    " not found; signatures will not be updated\n");
715 	} else {
716 		/*
717 		 * The iosram read ptr is only needed if we can actually
718 		 * write CPU signatures, so only bother setting it if we
719 		 * set a valid write pointer, above.
720 		 */
721 		iosram_read_ptr = (int (*)(int, uint32_t, caddr_t, uint32_t))
722 		    modgetsymvalue("iosram_read", 0);
723 
724 		if (iosram_read_ptr == NULL)
725 			DCMNERR(CE_WARN, "load_platform_defaults: iosram_read()"
726 			    " not found\n");
727 	}
728 
729 	/*
730 	 * Set todsg_use_sc to TRUE so that we will be getting date
731 	 * from the SC.
732 	 */
733 	todsg_use_sc = TRUE;
734 
735 	/*
736 	 * Now is a good time to activate hardware watchdog (if one exists).
737 	 */
738 	mutex_enter(&tod_lock);
739 	if (watchdog_enable)
740 		ret = tod_ops.tod_set_watchdog_timer(watchdog_timeout_seconds);
741 	mutex_exit(&tod_lock);
742 	if (ret != 0)
743 		printf("Hardware watchdog enabled\n");
744 
745 	plat_ecc_init();
746 }
747 
748 /*
749  * No platform drivers on this platform
750  */
751 char *platform_module_list[] = {
752 	(char *)0
753 };
754 
755 /*ARGSUSED*/
756 void
757 plat_tod_fault(enum tod_fault_type tod_bad)
758 {
759 }
760 int
761 plat_max_boards()
762 {
763 	return (SG_MAX_BDS);
764 }
765 int
766 plat_max_io_units_per_board()
767 {
768 	return (SG_MAX_IO_PER_BD);
769 }
770 int
771 plat_max_cmp_units_per_board()
772 {
773 	return (SG_MAX_CMPS_PER_BD);
774 }
775 int
776 plat_max_cpu_units_per_board()
777 {
778 	return (SG_MAX_CPUS_PER_BD);
779 }
780 
781 int
782 plat_max_mc_units_per_board()
783 {
784 	return (SG_MAX_CMPS_PER_BD); /* each CPU die has a memory controller */
785 }
786 
787 int
788 plat_max_mem_units_per_board()
789 {
790 	return (SG_MAX_MEM_PER_BD);
791 }
792 
793 int
794 plat_max_cpumem_boards(void)
795 {
796 	return (LW8_MAX_CPU_BDS);
797 }
798 
799 int
800 set_platform_max_ncpus(void)
801 {
802 	return (sg_max_ncpus);
803 }
804 
805 void
806 plat_dmv_params(uint_t *hwint, uint_t *swint)
807 {
808 	*hwint = MAX_UPA;
809 	*swint = 0;
810 }
811 
812 static int (*sg_mbox)(sbbc_msg_t *, sbbc_msg_t *, time_t) = NULL;
813 
814 /*
815  * Our nodename has been set, pass it along to the SC.
816  */
817 void
818 plat_nodename_set(void)
819 {
820 	sbbc_msg_t	req;	/* request */
821 	sbbc_msg_t	resp;	/* response */
822 	int		rv;	/* return value from call to mbox */
823 	struct nodename_info {
824 		int32_t	namelen;
825 		char	nodename[_SYS_NMLN];
826 	} nni;
827 
828 	/*
829 	 * find the symbol for the mailbox routine
830 	 */
831 	if (sg_mbox == NULL)
832 		sg_mbox = (int (*)(sbbc_msg_t *, sbbc_msg_t *, time_t))
833 			modgetsymvalue("sbbc_mbox_request_response", 0);
834 
835 	if (sg_mbox == NULL) {
836 		cmn_err(CE_NOTE, "!plat_nodename_set: sg_mbox not found\n");
837 		return;
838 	}
839 
840 	/*
841 	 * construct the message telling the SC our nodename
842 	 */
843 	(void) strcpy(nni.nodename, utsname.nodename);
844 	nni.namelen = (int32_t)strlen(nni.nodename);
845 
846 	req.msg_type.type = INFO_MBOX;
847 	req.msg_type.sub_type = INFO_MBOX_NODENAME;
848 	req.msg_status = 0;
849 	req.msg_len = (int)(nni.namelen + sizeof (nni.namelen));
850 	req.msg_bytes = 0;
851 	req.msg_buf = (caddr_t)&nni;
852 	req.msg_data[0] = 0;
853 	req.msg_data[1] = 0;
854 
855 	/*
856 	 * initialize the response back from the SC
857 	 */
858 	resp.msg_type.type = INFO_MBOX;
859 	resp.msg_type.sub_type = INFO_MBOX_NODENAME;
860 	resp.msg_status = 0;
861 	resp.msg_len = 0;
862 	resp.msg_bytes = 0;
863 	resp.msg_buf = (caddr_t)0;
864 	resp.msg_data[0] = 0;
865 	resp.msg_data[1] = 0;
866 
867 	/*
868 	 * ship it and check for success
869 	 */
870 	rv = (sg_mbox)(&req, &resp, sbbc_mbox_default_timeout);
871 
872 	if (rv != 0) {
873 		cmn_err(CE_NOTE, "!plat_nodename_set: sg_mbox retval %d\n", rv);
874 	} else if (resp.msg_status != 0) {
875 		cmn_err(CE_NOTE, "!plat_nodename_set: msg_status %d\n",
876 			resp.msg_status);
877 	} else {
878 		DCMNERR(CE_NOTE, "!plat_nodename_set was successful\n");
879 
880 		/*
881 		 * It is necessary to exchange capability the bitmap
882 		 * with SC before sending any ecc error information and
883 		 * indictment. We are calling the plat_ecc_capability_send()
884 		 * here just after sending the nodename successfully.
885 		 */
886 		rv = plat_ecc_capability_send();
887 		if (rv == 0) {
888 			DCMNERR(CE_NOTE, "!plat_ecc_capability_send was"
889 			    "successful\n");
890 		}
891 	}
892 }
893 
894 /*
895  * flag to allow users switch between using OBP's
896  * prom_get_unum() and mc-us3 driver's p2get_mem_unum()
897  * (for main memory errors only).
898  */
899 int sg_use_prom_get_unum = 0;
900 
901 /*
902  * Debugging flag: set to 1 to call into obp for get_unum, or set it to 0
903  * to call into the unum cache system.  This is the E$ equivalent of
904  * sg_use_prom_get_unum.
905  */
906 int sg_use_prom_ecache_unum = 0;
907 
908 /* used for logging ECC errors to the SC */
909 #define	SG_MEMORY_ECC	1
910 #define	SG_ECACHE_ECC	2
911 #define	SG_UNKNOWN_ECC	(-1)
912 
913 /*
914  * plat_get_mem_unum() generates a string identifying either the
915  * memory or E$ DIMM(s) during error logging. Depending on whether
916  * the error is E$ or memory related, the appropriate support
917  * routine is called to assist in the string generation.
918  *
919  * - For main memory errors we can use the mc-us3 drivers p2getunum()
920  *   (or prom_get_unum() for debugging purposes).
921  *
922  * - For E$ errors we call sg_get_ecacheunum() to generate the unum (or
923  *   prom_serengeti_get_ecacheunum() for debugging purposes).
924  */
925 
926 static int
927 sg_prom_get_unum(int synd_code, uint64_t paddr, char *buf, int buflen,
928     int *lenp)
929 {
930 	if ((prom_get_unum(synd_code, (unsigned long long)paddr,
931 	    buf, buflen, lenp)) != 0)
932 		return (EIO);
933 	else if (*lenp <= 1)
934 		return (EINVAL);
935 	else
936 		return (0);
937 }
938 
939 /*ARGSUSED*/
940 int
941 plat_get_mem_unum(int synd_code, uint64_t flt_addr, int flt_bus_id,
942     int flt_in_memory, ushort_t flt_status, char *buf, int buflen, int *lenp)
943 {
944 	/*
945 	 * unum_func will either point to the memory drivers p2get_mem_unum()
946 	 * or to prom_get_unum() for memory errors.
947 	 */
948 	int (*unum_func)(int synd_code, uint64_t paddr, char *buf,
949 	    int buflen, int *lenp) = p2get_mem_unum;
950 
951 	/*
952 	 * check if it's a Memory or an Ecache error.
953 	 */
954 	if (flt_in_memory) {
955 		/*
956 		 * It's a main memory error.
957 		 *
958 		 * For debugging we allow the user to switch between
959 		 * using OBP's get_unum and the memory driver's get_unum
960 		 * so we create a pointer to the functions and switch
961 		 * depending on the sg_use_prom_get_unum flag.
962 		 */
963 		if (sg_use_prom_get_unum) {
964 			DCMNERR(CE_NOTE, "Using prom_get_unum from OBP");
965 			return (sg_prom_get_unum(synd_code,
966 			    P2ALIGN(flt_addr, 8), buf, buflen, lenp));
967 		} else if (unum_func != NULL) {
968 			return (unum_func(synd_code, P2ALIGN(flt_addr, 8),
969 			    buf, buflen, lenp));
970 		} else {
971 			return (ENOTSUP);
972 		}
973 	} else if (flt_status & ECC_ECACHE) {
974 		/*
975 		 * It's an E$ error.
976 		 */
977 		if (sg_use_prom_ecache_unum) {
978 			/*
979 			 * We call to OBP to handle this.
980 			 */
981 			DCMNERR(CE_NOTE,
982 			    "Using prom_serengeti_get_ecacheunum from OBP");
983 			if (prom_serengeti_get_ecacheunum(flt_bus_id,
984 			    P2ALIGN(flt_addr, 8), buf, buflen, lenp) != 0) {
985 				return (EIO);
986 			}
987 		} else {
988 			return (sg_get_ecacheunum(flt_bus_id, flt_addr,
989 			    buf, buflen, lenp));
990 		}
991 	} else {
992 		return (ENOTSUP);
993 	}
994 
995 	return (0);
996 }
997 
998 /*
999  * This platform hook gets called from mc_add_mem_unum_label() in the mc-us3
1000  * driver giving each platform the opportunity to add platform
1001  * specific label information to the unum for ECC error logging purposes.
1002  */
1003 void
1004 plat_add_mem_unum_label(char *unum, int mcid, int bank, int dimm)
1005 {
1006 	char	new_unum[UNUM_NAMLEN] = "";
1007 	int	node = SG_PORTID_TO_NODEID(mcid);
1008 	int	board = SG_CPU_BD_PORTID_TO_BD_NUM(mcid);
1009 	int	position = SG_PORTID_TO_CPU_POSN(mcid);
1010 
1011 	/*
1012 	 * The mc-us3 driver deals with logical banks but for unum
1013 	 * purposes we need to use physical banks so that the correct
1014 	 * dimm can be physically located. Logical banks 0 and 2
1015 	 * make up physical bank 0. Logical banks 1 and 3 make up
1016 	 * physical bank 1. Here we do the necessary conversion.
1017 	 */
1018 	bank = (bank % 2);
1019 
1020 	if (dimm == -1) {
1021 		SG_SET_FRU_NAME_NODE(new_unum, node);
1022 		SG_SET_FRU_NAME_CPU_BOARD(new_unum, board);
1023 		SG_SET_FRU_NAME_MODULE(new_unum, position);
1024 		SG_SET_FRU_NAME_BANK(new_unum, bank);
1025 
1026 	} else {
1027 		SG_SET_FRU_NAME_NODE(new_unum, node);
1028 		SG_SET_FRU_NAME_CPU_BOARD(new_unum, board);
1029 		SG_SET_FRU_NAME_MODULE(new_unum, position);
1030 		SG_SET_FRU_NAME_BANK(new_unum, bank);
1031 		SG_SET_FRU_NAME_DIMM(new_unum, dimm);
1032 
1033 		strcat(new_unum, " ");
1034 		strcat(new_unum, unum);
1035 	}
1036 
1037 	strcpy(unum, new_unum);
1038 }
1039 
1040 int
1041 plat_get_cpu_unum(int cpuid, char *buf, int buflen, int *lenp)
1042 {
1043 	int	node = SG_PORTID_TO_NODEID(cpuid);
1044 	int	board = SG_CPU_BD_PORTID_TO_BD_NUM(cpuid);
1045 
1046 	if (snprintf(buf, buflen, "/N%d/%s%d", node,
1047 	    SG_HPU_TYPE_CPU_BOARD_ID, board) >= buflen) {
1048 		return (ENOSPC);
1049 	} else {
1050 		*lenp = strlen(buf);
1051 		return (0);
1052 	}
1053 }
1054 
1055 static void (*sg_ecc_taskq_func)(sbbc_ecc_mbox_t *) = NULL;
1056 static int (*sg_ecc_mbox_func)(sbbc_ecc_mbox_t *) = NULL;
1057 
1058 /*
1059  * We log all ECC errors to the SC so we send a mailbox
1060  * message to the SC passing it the relevant data.
1061  * ECC mailbox messages are sent via a taskq mechanism to
1062  * prevent impaired system performance during ECC floods.
1063  * Indictments have already passed through a taskq, so they
1064  * are not queued here.
1065  */
1066 int
1067 plat_send_ecc_mailbox_msg(plat_ecc_message_type_t msg_type, void *datap)
1068 {
1069 	sbbc_ecc_mbox_t	*msgp;
1070 	uint16_t	msg_subtype;
1071 	int		sleep_flag, log_error;
1072 	size_t		msg_size;
1073 
1074 	if (sg_ecc_taskq_func == NULL) {
1075 		sg_ecc_taskq_func = (void (*)(sbbc_ecc_mbox_t *))
1076 		    modgetsymvalue("sbbc_mbox_queue_ecc_event", 0);
1077 		if (sg_ecc_taskq_func == NULL) {
1078 			cmn_err(CE_NOTE, "!plat_send_ecc_mailbox_msg: "
1079 			    "sbbc_mbox_queue_ecc_event not found");
1080 			return (ENODEV);
1081 		}
1082 	}
1083 	if (sg_ecc_mbox_func == NULL) {
1084 		sg_ecc_mbox_func = (int (*)(sbbc_ecc_mbox_t *))
1085 		    modgetsymvalue("sbbc_mbox_ecc_output", 0);
1086 		if (sg_ecc_mbox_func == NULL) {
1087 			cmn_err(CE_NOTE, "!plat_send_ecc_mailbox_msg: "
1088 			    "sbbc_mbox_ecc_output not found");
1089 			return (ENODEV);
1090 		}
1091 	}
1092 
1093 	/*
1094 	 * Initialize the request and response structures
1095 	 */
1096 	switch (msg_type) {
1097 	case PLAT_ECC_ERROR_MESSAGE:
1098 		msg_subtype = INFO_MBOX_ERROR_ECC;
1099 		msg_size = sizeof (plat_ecc_error_data_t);
1100 		sleep_flag = KM_NOSLEEP;
1101 		log_error = 1;
1102 		break;
1103 	case PLAT_ECC_ERROR2_MESSAGE:
1104 		msg_subtype = INFO_MBOX_ECC;
1105 		msg_size = sizeof (plat_ecc_error2_data_t);
1106 		sleep_flag = KM_NOSLEEP;
1107 		log_error = 1;
1108 		break;
1109 	case PLAT_ECC_INDICTMENT_MESSAGE:
1110 		msg_subtype = INFO_MBOX_ERROR_INDICT;
1111 		msg_size = sizeof (plat_ecc_indictment_data_t);
1112 		sleep_flag = KM_SLEEP;
1113 		log_error = 0;
1114 		break;
1115 	case PLAT_ECC_INDICTMENT2_MESSAGE:
1116 		msg_subtype = INFO_MBOX_ECC;
1117 		msg_size = sizeof (plat_ecc_indictment2_data_t);
1118 		sleep_flag = KM_SLEEP;
1119 		log_error = 0;
1120 		break;
1121 	case PLAT_ECC_CAPABILITY_MESSAGE:
1122 		msg_subtype = INFO_MBOX_ECC_CAP;
1123 		msg_size = sizeof (plat_capability_data_t) +
1124 		    strlen(utsname.release) + strlen(utsname.version) + 2;
1125 		sleep_flag = KM_SLEEP;
1126 		log_error = 0;
1127 		break;
1128 	case PLAT_ECC_DIMM_SID_MESSAGE:
1129 		msg_subtype = INFO_MBOX_ECC;
1130 		msg_size = sizeof (plat_dimm_sid_request_data_t);
1131 		sleep_flag = KM_SLEEP;
1132 		log_error = 0;
1133 		break;
1134 	default:
1135 		return (EINVAL);
1136 	}
1137 
1138 	msgp = (sbbc_ecc_mbox_t *)kmem_zalloc(sizeof (sbbc_ecc_mbox_t),
1139 		sleep_flag);
1140 	if (msgp == NULL) {
1141 		cmn_err(CE_NOTE, "!plat_send_ecc_mailbox_msg: "
1142 				"unable to allocate sbbc_ecc_mbox");
1143 		return (ENOMEM);
1144 	}
1145 
1146 	msgp->ecc_log_error = log_error;
1147 
1148 	msgp->ecc_req.msg_type.type = INFO_MBOX;
1149 	msgp->ecc_req.msg_type.sub_type = msg_subtype;
1150 	msgp->ecc_req.msg_status = 0;
1151 	msgp->ecc_req.msg_len = (int)msg_size;
1152 	msgp->ecc_req.msg_bytes = 0;
1153 	msgp->ecc_req.msg_buf = (caddr_t)kmem_zalloc(msg_size, sleep_flag);
1154 	msgp->ecc_req.msg_data[0] = 0;
1155 	msgp->ecc_req.msg_data[1] = 0;
1156 
1157 	if (msgp->ecc_req.msg_buf == NULL) {
1158 		cmn_err(CE_NOTE, "!plat_send_ecc_mailbox_msg: "
1159 				"unable to allocate request msg_buf");
1160 		kmem_free((void *)msgp, sizeof (sbbc_ecc_mbox_t));
1161 		return (ENOMEM);
1162 	}
1163 
1164 	bcopy(datap, (void *)msgp->ecc_req.msg_buf, msg_size);
1165 
1166 	/*
1167 	 * initialize the response back from the SC
1168 	 */
1169 	msgp->ecc_resp.msg_type.type = INFO_MBOX;
1170 	msgp->ecc_resp.msg_type.sub_type = msg_subtype;
1171 	msgp->ecc_resp.msg_status = 0;
1172 	msgp->ecc_resp.msg_len = 0;
1173 	msgp->ecc_resp.msg_bytes = 0;
1174 	msgp->ecc_resp.msg_buf = NULL;
1175 	msgp->ecc_resp.msg_data[0] = 0;
1176 	msgp->ecc_resp.msg_data[1] = 0;
1177 
1178 	switch (msg_type) {
1179 	case PLAT_ECC_ERROR_MESSAGE:
1180 	case PLAT_ECC_ERROR2_MESSAGE:
1181 		/*
1182 		 * For Error Messages, we go through a taskq.
1183 		 * Queue up message for processing
1184 		 */
1185 		(*sg_ecc_taskq_func)(msgp);
1186 		return (0);
1187 
1188 	case PLAT_ECC_CAPABILITY_MESSAGE:
1189 		/*
1190 		 * For indictment and capability messages, we've already gone
1191 		 * through the taskq, so we can call the mailbox routine
1192 		 * directly.  Find the symbol for the routine that sends
1193 		 * the mailbox msg
1194 		 */
1195 		msgp->ecc_resp.msg_len = (int)msg_size;
1196 		msgp->ecc_resp.msg_buf = (caddr_t)kmem_zalloc(msg_size,
1197 		    sleep_flag);
1198 		/* FALLTHRU */
1199 
1200 	case PLAT_ECC_INDICTMENT_MESSAGE:
1201 	case PLAT_ECC_INDICTMENT2_MESSAGE:
1202 		return ((*sg_ecc_mbox_func)(msgp));
1203 
1204 	case PLAT_ECC_DIMM_SID_MESSAGE:
1205 		msgp->ecc_resp.msg_len = sizeof (plat_dimm_sid_board_data_t);
1206 		msgp->ecc_resp.msg_buf = (caddr_t)kmem_zalloc(
1207 		    sizeof (plat_dimm_sid_board_data_t), sleep_flag);
1208 
1209 		return ((*sg_ecc_mbox_func)(msgp));
1210 
1211 	default:
1212 		ASSERT(0);
1213 		return (EINVAL);
1214 	}
1215 }
1216 
1217 /*
1218  * m is redundant on serengeti as the multiplyer is always 4
1219  */
1220 /*ARGSUSED*/
1221 int
1222 plat_make_fru_cpuid(int sb, int m, int proc)
1223 {
1224 	return (MAKE_CPUID(sb, proc));
1225 }
1226 
1227 /*
1228  * board number for a given proc
1229  */
1230 int
1231 plat_make_fru_boardnum(int proc)
1232 {
1233 	return (SG_PORTID_TO_BOARD_NUM(proc));
1234 }
1235 
1236 static
1237 void
1238 cpu_sgn_update(ushort_t sig, uchar_t state, uchar_t sub_state, int cpuid)
1239 {
1240 	uint32_t signature = CPU_SIG_BLD(sig, state, sub_state);
1241 	sig_state_t current_sgn;
1242 	int i;
1243 
1244 	if (iosram_write_ptr == NULL) {
1245 		/*
1246 		 * If the IOSRAM write pointer isn't set, we won't be able
1247 		 * to write signatures to ANYTHING, so we may as well just
1248 		 * write out an error message (if desired) and exit this
1249 		 * routine now...
1250 		 */
1251 		DCMNERR(CE_WARN,
1252 		    "cpu_sgn_update: iosram_write() not found;"
1253 		    " cannot write signature 0x%x for CPU(s) or domain\n",
1254 		    signature);
1255 		return;
1256 	}
1257 
1258 
1259 	/*
1260 	 * Differentiate a panic reboot from a non-panic reboot in the
1261 	 * setting of the substate of the signature.
1262 	 *
1263 	 * If the new substate is REBOOT and we're rebooting due to a panic,
1264 	 * then set the new substate to a special value indicating a panic
1265 	 * reboot, SIGSUBST_PANIC_REBOOT.
1266 	 *
1267 	 * A panic reboot is detected by a current (previous) domain signature
1268 	 * state of SIGST_EXIT, and a new signature substate of SIGSUBST_REBOOT.
1269 	 * The domain signature state SIGST_EXIT is used as the panic flow
1270 	 * progresses.
1271 	 *
1272 	 * At the end of the panic flow, the reboot occurs but we should now
1273 	 * one that was involuntary, something that may be quite useful to know
1274 	 * at OBP level.
1275 	 */
1276 	if (sub_state == SIGSUBST_REBOOT) {
1277 		if (iosram_read_ptr == NULL) {
1278 			DCMNERR(CE_WARN,
1279 			    "cpu_sgn_update: iosram_read() not found;"
1280 			    " could not check current domain signature\n");
1281 		} else {
1282 			(void) (*iosram_read_ptr)(SBBC_SIGBLCK_KEY,
1283 				SG_SGNBLK_DOMAINSIG_OFFSET,
1284 				(char *)&current_sgn, sizeof (current_sgn));
1285 			if (current_sgn.state_t.state == SIGST_EXIT)
1286 				signature = CPU_SIG_BLD(sig, state,
1287 					SIGSUBST_PANIC_REBOOT);
1288 		}
1289 	}
1290 
1291 	/*
1292 	 * cpuid == -1 indicates that the operation applies to all cpus.
1293 	 */
1294 	if (cpuid >= 0) {
1295 		(void) (*iosram_write_ptr)(SBBC_SIGBLCK_KEY,
1296 			SG_SGNBLK_CPUSIG_OFFSET(cpuid), (char *)&signature,
1297 			sizeof (signature));
1298 	} else {
1299 		for (i = 0; i < NCPU; i++) {
1300 			if (cpu[i] == NULL || !(cpu[i]->cpu_flags &
1301 				(CPU_EXISTS|CPU_QUIESCED))) {
1302 				continue;
1303 			}
1304 			(void) (*iosram_write_ptr)(SBBC_SIGBLCK_KEY,
1305 				SG_SGNBLK_CPUSIG_OFFSET(i), (char *)&signature,
1306 				sizeof (signature));
1307 		}
1308 	}
1309 
1310 	if (state == SIGST_OFFLINE || state == SIGST_DETACHED) {
1311 		return;
1312 	}
1313 
1314 	(void) (*iosram_write_ptr)(SBBC_SIGBLCK_KEY,
1315 		SG_SGNBLK_DOMAINSIG_OFFSET, (char *)&signature,
1316 		sizeof (signature));
1317 }
1318 
1319 void
1320 startup_platform(void)
1321 {
1322 }
1323 
1324 /*
1325  * A routine to convert a number (represented as a string) to
1326  * the integer value it represents.
1327  */
1328 
1329 static int
1330 isdigit(int ch)
1331 {
1332 	return (ch >= '0' && ch <= '9');
1333 }
1334 
1335 #define	isspace(c)	((c) == ' ' || (c) == '\t' || (c) == '\n')
1336 
1337 static int
1338 strtoi(char *p, char **pos)
1339 {
1340 	int n;
1341 	int c, neg = 0;
1342 
1343 	if (!isdigit(c = *p)) {
1344 		while (isspace(c))
1345 			c = *++p;
1346 		switch (c) {
1347 			case '-':
1348 				neg++;
1349 				/* FALLTHROUGH */
1350 			case '+':
1351 			c = *++p;
1352 		}
1353 		if (!isdigit(c)) {
1354 			if (pos != NULL)
1355 				*pos = p;
1356 			return (0);
1357 		}
1358 	}
1359 	for (n = '0' - c; isdigit(c = *++p); ) {
1360 		n *= 10; /* two steps to avoid unnecessary overflow */
1361 		n += '0' - c; /* accum neg to avoid surprises at MAX */
1362 	}
1363 	if (pos != NULL)
1364 		*pos = p;
1365 	return (neg ? n : -n);
1366 }
1367 
1368 /*
1369  * Get the three parts of the Serengeti PROM version.
1370  * Used for feature readiness tests.
1371  *
1372  * Return 0 if version extracted successfully, -1 otherwise.
1373  */
1374 
1375 int
1376 sg_get_prom_version(int *sysp, int *intfp, int *bldp)
1377 {
1378 	int plen;
1379 	char vers[512];
1380 	static pnode_t node;
1381 	static char version[] = "version";
1382 	char *verp, *ep;
1383 
1384 	node = prom_finddevice("/openprom");
1385 	if (node == OBP_BADNODE)
1386 		return (-1);
1387 
1388 	plen = prom_getproplen(node, version);
1389 	if (plen <= 0 || plen >= sizeof (vers))
1390 		return (-1);
1391 	(void) prom_getprop(node, version, vers);
1392 	vers[plen] = '\0';
1393 
1394 	/* Make sure it's an OBP flashprom */
1395 	if (vers[0] != 'O' && vers[1] != 'B' && vers[2] != 'P') {
1396 		cmn_err(CE_WARN, "sg_get_prom_version: "
1397 		    "unknown <version> string in </openprom>\n");
1398 		return (-1);
1399 	}
1400 	verp = &vers[4];
1401 
1402 	*sysp = strtoi(verp, &ep);
1403 	if (ep == verp || *ep != '.')
1404 		return (-1);
1405 	verp = ep + 1;
1406 
1407 	*intfp = strtoi(verp, &ep);
1408 	if (ep == verp || *ep != '.')
1409 		return (-1);
1410 	verp = ep + 1;
1411 
1412 	*bldp = strtoi(verp, &ep);
1413 	if (ep == verp || (*ep != '\0' && !isspace(*ep)))
1414 		return (-1);
1415 	return (0);
1416 }
1417 
1418 /*
1419  * Return 0 if system board Dynamic Reconfiguration
1420  * is supported by the firmware, -1 otherwise.
1421  */
1422 int
1423 sg_prom_sb_dr_check(void)
1424 {
1425 	static int prom_res = 1;
1426 
1427 	if (prom_res == 1) {
1428 		int sys, intf, bld;
1429 		int rv;
1430 
1431 		rv = sg_get_prom_version(&sys, &intf, &bld);
1432 		if (rv == 0 && sys == 5 &&
1433 		    (intf >= 12 || (intf == 11 && bld >= 200))) {
1434 			prom_res = 0;
1435 		} else {
1436 			prom_res = -1;
1437 		}
1438 	}
1439 	return (prom_res);
1440 }
1441 
1442 /*
1443  * Return 0 if cPCI Dynamic Reconfiguration
1444  * is supported by the firmware, -1 otherwise.
1445  */
1446 int
1447 sg_prom_cpci_dr_check(void)
1448 {
1449 	/*
1450 	 * The version check is currently the same as for
1451 	 * system boards. Since the two DR sub-systems are
1452 	 * independent, this could change.
1453 	 */
1454 	return (sg_prom_sb_dr_check());
1455 }
1456 
1457 /*
1458  * Our implementation of this KDI op updates the CPU signature in the system
1459  * controller.  Note that we set the signature to OBP_SIG, rather than DBG_SIG.
1460  * The Forth words we execute will, among other things, transform our OBP_SIG
1461  * into DBG_SIG.  They won't function properly if we try to use DBG_SIG.
1462  */
1463 static void
1464 sg_system_claim(void)
1465 {
1466 	prom_interpret("sigb-sig! my-sigb-sig!", OBP_SIG, OBP_SIG, 0, 0, 0);
1467 }
1468 
1469 static void
1470 sg_system_release(void)
1471 {
1472 	prom_interpret("sigb-sig! my-sigb-sig!", OS_SIG, OS_SIG, 0, 0, 0);
1473 }
1474 
1475 static void
1476 sg_console_claim(void)
1477 {
1478 	prom_serengeti_set_console_input(SGCN_OBP_STR);
1479 }
1480 
1481 static void
1482 sg_console_release(void)
1483 {
1484 	prom_serengeti_set_console_input(SGCN_CLNT_STR);
1485 }
1486 
1487 void
1488 plat_kdi_init(kdi_t *kdi)
1489 {
1490 	kdi->pkdi_system_claim = sg_system_claim;
1491 	kdi->pkdi_system_release = sg_system_release;
1492 	kdi->pkdi_console_claim = sg_console_claim;
1493 	kdi->pkdi_console_release = sg_console_release;
1494 }
1495