xref: /titanic_41/usr/src/uts/sun4u/serengeti/io/sbdp_cpu.c (revision fe1c642d06e14b412cd83ae2179303186ab08972)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * CPU management for serengeti DR
29  *
30  * There are three states a CPU can be in:
31  *
32  *	disconnected:		In reset
33  *	connect,unconfigured:	Idling in OBP's idle loop
34  *	configured:		Running Solaris
35  *
36  * State transitions:
37  *
38  *                connect              configure
39  *              ------------>         ------------>
40  * disconnected              connected             configured
41  *                          unconfigured
42  *              <-----------         <-------------
43  *                disconnect           unconfigure
44  *
45  * Firmware involvements
46  *
47  *              start_cpu(SC)
48  *      prom_serengeti_wakeupcpu(OBP)
49  *              ------------>         ------------------------->
50  * disconnected              connected                         configured
51  *                          unconfigured
52  *              <-----------          <-------------------------
53  *      prom_serengeti_cpu_off(OBP)  prom_serengeti_cpu_off(OBP)
54  *               stop_cpu(SC)        prom_serengeti_wakeupcpu(OBP)
55  *
56  * SIR (Software Initiated Reset) is used to unconfigure a CPU.
57  * After the CPU has completed flushing the caches, it issues an
58  * sir instruction to put itself through POST.  POST detects that
59  * it is an SIR, and re-enters OBP as a slave.  When the operation
60  * completes successfully, the CPU will be idling in OBP.
61  */
62 
63 #include <sys/obpdefs.h>
64 #include <sys/types.h>
65 #include <sys/cmn_err.h>
66 #include <sys/cpuvar.h>
67 #include <sys/membar.h>
68 #include <sys/x_call.h>
69 #include <sys/machsystm.h>
70 #include <sys/cpu_sgnblk_defs.h>
71 #include <sys/pte.h>
72 #include <vm/hat_sfmmu.h>
73 #include <sys/promif.h>
74 #include <sys/note.h>
75 #include <sys/vmsystm.h>
76 #include <vm/seg_kmem.h>
77 
78 #include <sys/sbd_ioctl.h>
79 #include <sys/sbd.h>
80 #include <sys/sbdp_priv.h>
81 #include <sys/sbdp_mem.h>
82 #include <sys/sbdp_error.h>
83 #include <sys/sgsbbc_iosram.h>
84 #include <sys/prom_plat.h>
85 #include <sys/cheetahregs.h>
86 
87 uint64_t	*sbdp_valp;
88 extern uint64_t	va_to_pa(void *);
89 static int	sbdp_cpu_ntries = 50000;
90 static int	sbdp_cpu_delay = 100;
91 void		sbdp_get_cpu_sram_addr(uint64_t, uint64_t);
92 static int	cpusram_map(caddr_t *, pgcnt_t *);
93 static void	cpusram_unmap(caddr_t *, pgcnt_t);
94 extern int	prom_serengeti_wakeupcpu(pnode_t);
95 extern int	prom_serengeti_cpu_off(pnode_t);
96 extern sbdp_wnode_t *sbdp_get_wnodep(int);
97 extern caddr_t	sbdp_shutdown_va;
98 static int	sbdp_prom_get_cpu(void *arg, int changed);
99 
100 
101 int
102 sbdp_disconnect_cpu(sbdp_handle_t *hp, dev_info_t *dip, processorid_t cpuid)
103 {
104 	pnode_t		nodeid;
105 	int		bd, wnode;
106 	sbdp_wnode_t	*wnodep;
107 	sbdp_bd_t	*bdp = NULL;
108 	int		rv = 0;
109 	processorid_t	cpu = cpuid;
110 	processorid_t	portid;
111 	static fn_t	f = "sbdp_disconnect_cpu";
112 
113 	SBDP_DBG_FUNC("%s\n", f);
114 
115 	nodeid = ddi_get_nodeid(dip);
116 
117 	/*
118 	 * Get board number and node number
119 	 * The check for determining if nodeid is valid is done inside
120 	 * sbdp_get_bd_and_wnode_num.
121 	 */
122 	if (SBDP_INJECT_ERROR(f, 0) ||
123 	    sbdp_get_bd_and_wnode_num(nodeid, &bd, &wnode) != 0) {
124 
125 		rv = -1;
126 		goto out;
127 	}
128 
129 	/*
130 	 * Grab the lock to prevent status threads from accessing
131 	 * registers on the CPU when it is being put into reset.
132 	 */
133 	wnodep = sbdp_get_wnodep(wnode);
134 	bdp = &wnodep->bds[bd];
135 	ASSERT(bdp);
136 	mutex_enter(&bdp->bd_mutex);
137 
138 	/*
139 	 * Mark the CPU in reset.  This should be done before calling
140 	 * the SC because we won't know at which stage it failed if
141 	 * the SC call returns failure.
142 	 */
143 	sbdp_cpu_in_reset(wnode, bd, SG_CPUID_TO_CPU_UNIT(cpuid), 1);
144 
145 	/*
146 	 * Ask OBP to mark the CPU as in POST
147 	 */
148 	if (SBDP_INJECT_ERROR(f, 1) || prom_serengeti_cpu_off(nodeid) != 0) {
149 
150 		rv = -1;
151 		goto out;
152 	}
153 
154 	/*
155 	 * Ask the SC to put the CPU into reset. If the first
156 	 * core is not present, the stop CPU interface needs
157 	 * to be called with the portid rather than the cpuid.
158 	 */
159 	portid = SG_CPUID_TO_PORTID(cpuid);
160 	if (!SBDP_IS_CPU_PRESENT(bdp, SG_CPUID_TO_CPU_UNIT(portid))) {
161 		cpu = portid;
162 	}
163 
164 	if (SBDP_INJECT_ERROR(f, 2) || sbdp_stop_cpu(cpu) != 0) {
165 
166 		rv = -1;
167 		goto out;
168 	}
169 
170 out:
171 	if (bdp != NULL) {
172 		mutex_exit(&bdp->bd_mutex);
173 	}
174 
175 	if (rv != 0) {
176 		sbdp_set_err(hp->h_err, ESGT_STOPCPU, NULL);
177 	}
178 
179 	return (rv);
180 }
181 
182 int
183 sbdp_connect_cpu(sbdp_handle_t *hp, dev_info_t *dip, processorid_t cpuid)
184 {
185 	pnode_t		nodeid;
186 	sbd_error_t	*sep;
187 	int		i;
188 	int		bd, wnode;
189 	int		rv = 0;
190 	static fn_t	f = "sbdp_connect_cpu";
191 
192 	SBDP_DBG_FUNC("%s\n", f);
193 
194 	sep = hp->h_err;
195 
196 	nodeid = ddi_get_nodeid(dip);
197 
198 	/*
199 	 * The check for determining if nodeid is valid is done inside
200 	 * sbdp_get_bd_and_wnode_num.
201 	 */
202 	if (SBDP_INJECT_ERROR(f, 0) ||
203 	    sbdp_get_bd_and_wnode_num(nodeid, &bd, &wnode) != 0) {
204 
205 		rv = -1;
206 		goto out;
207 	}
208 
209 	/*
210 	 * Ask the SC to bring the CPU out of reset.
211 	 * At this point, the sb_dev_present bit is not set for the CPU.
212 	 * From sbd point of view the CPU is not present yet.  No
213 	 * status threads will try to read registers off the CPU.
214 	 * Since we are already holding sb_mutex, it is not necessary
215 	 * to grab the board mutex when checking and setting the
216 	 * cpus_in_reset bit.
217 	 */
218 	if (sbdp_is_cpu_in_reset(wnode, bd, SG_CPUID_TO_CPU_UNIT(cpuid))) {
219 
220 		sbdp_wnode_t	*wnodep;
221 		sbdp_bd_t	*bdp = NULL;
222 		processorid_t	cpu = cpuid;
223 		processorid_t	portid;
224 
225 		wnodep = sbdp_get_wnodep(wnode);
226 		bdp = &wnodep->bds[bd];
227 		ASSERT(bdp);
228 
229 		/*
230 		 * If the first core is not present, the start CPU
231 		 * interface needs to be called with the portid rather
232 		 * than the cpuid.
233 		 */
234 		portid = SG_CPUID_TO_PORTID(cpuid);
235 		if (!SBDP_IS_CPU_PRESENT(bdp, SG_CPUID_TO_CPU_UNIT(portid))) {
236 			cpu = portid;
237 		}
238 
239 		if (SBDP_INJECT_ERROR(f, 1) || sbdp_start_cpu(cpu) != 0) {
240 
241 			rv = -1;
242 			goto out;
243 		}
244 
245 		if (SBDP_INJECT_ERROR(f, 2) ||
246 		    prom_serengeti_wakeupcpu(nodeid) != 0) {
247 
248 			rv = -1;
249 			goto out;
250 		}
251 	}
252 
253 	/*
254 	 * Mark the CPU out of reset.
255 	 */
256 	sbdp_cpu_in_reset(wnode, bd, SG_CPUID_TO_CPU_UNIT(cpuid), 0);
257 
258 	/*
259 	 * Refresh the bd info
260 	 * we need to wait until all cpus are out of reset
261 	 */
262 	for (i = 0; i < SG_MAX_CPUS_PER_BD; i++)
263 		if (sbdp_is_cpu_present(wnode, bd, i) &&
264 		    sbdp_is_cpu_in_reset(wnode, bd, i) == 1) {
265 			break;
266 		}
267 
268 	if (i == SG_MAX_CPUS_PER_BD) {
269 		/*
270 		 * All cpus are out of reset so it is safe to
271 		 * update the bd info
272 		 */
273 		sbdp_add_new_bd_info(wnode, bd);
274 	}
275 
276 out:
277 	if (rv != 0)
278 		sbdp_set_err(sep, ESGT_WAKEUPCPU, NULL);
279 
280 	return (rv);
281 }
282 
283 int
284 sbdp_cpu_poweron(struct cpu *cp)
285 {
286 	int		cpuid;
287 	int		ntries;
288 	pnode_t		nodeid;
289 	extern void	restart_other_cpu(int);
290 	static fn_t	f = "sbdp_cpu_poweron";
291 
292 	SBDP_DBG_FUNC("%s\n", f);
293 
294 	ASSERT(MUTEX_HELD(&cpu_lock));
295 
296 	ntries = sbdp_cpu_ntries;
297 	cpuid = cp->cpu_id;
298 
299 	nodeid = cpunodes[cpuid].nodeid;
300 	ASSERT(nodeid != (pnode_t)0);
301 
302 	/*
303 	 * This is a safe guard in case the CPU has taken a trap
304 	 * and idling in POST.
305 	 */
306 	if (SBDP_INJECT_ERROR(f, 0) ||
307 	    prom_serengeti_wakeupcpu(nodeid) != 0) {
308 
309 		return (EBUSY);
310 	}
311 
312 	cp->cpu_flags &= ~CPU_POWEROFF;
313 
314 	/*
315 	 * NOTE: restart_other_cpu pauses cpus during the
316 	 *	slave cpu start.  This helps to quiesce the
317 	 *	bus traffic a bit which makes the tick sync
318 	 *	routine in the prom more robust.
319 	 */
320 	SBDP_DBG_CPU("%s: COLD START for cpu (%d)\n", f, cpuid);
321 
322 	restart_other_cpu(cpuid);
323 
324 	SBDP_DBG_CPU("after restarting other cpus\n");
325 
326 	/*
327 	 * Wait for the cpu to reach its idle thread before
328 	 * we zap him with a request to blow away the mappings
329 	 * he (might) have for the sbdp_shutdown_asm code
330 	 * he may have executed on unconfigure.
331 	 */
332 	while ((cp->cpu_thread != cp->cpu_idle_thread) && (ntries > 0)) {
333 		DELAY(sbdp_cpu_delay);
334 		ntries--;
335 	}
336 
337 	SBDP_DBG_CPU("%s: waited %d out of %d loops for cpu %d\n",
338 	    f, sbdp_cpu_ntries - ntries, sbdp_cpu_ntries, cpuid);
339 
340 	return (0);
341 }
342 
343 
344 #define	SBDP_CPU_SRAM_ADDR	0x7fff0900000ull
345 #define	SBDP_CPU_SRAM_SIZE	0x20000ull
346 
347 static const char cpyren_key[] = "COPYREN";
348 
349 static uint64_t bbsram_pa;
350 static uint_t bbsram_size;
351 
352 typedef struct {
353 	caddr_t		vaddr;
354 	pgcnt_t		npages;
355 	uint64_t	*pa;
356 	uint_t		*size;
357 } sbdp_cpu_sram_map_t;
358 
359 int
360 sbdp_cpu_poweroff(struct cpu *cp)
361 {
362 	processorid_t	cpuid;
363 	static void	sbdp_cpu_shutdown_self(void);
364 	pnode_t		nodeid;
365 	sbdp_cpu_sram_map_t	map;
366 	static fn_t	f = "sbdp_cpu_poweroff";
367 
368 	SBDP_DBG_FUNC("%s\n", f);
369 
370 	ASSERT(MUTEX_HELD(&cpu_lock));
371 
372 	/*
373 	 * Capture all CPUs (except for detaching proc) to prevent
374 	 * crosscalls to the detaching proc until it has cleared its
375 	 * bit in cpu_ready_set.
376 	 */
377 	cpuid = cp->cpu_id;
378 
379 	nodeid = cpunodes[cpuid].nodeid;
380 	ASSERT(nodeid != (pnode_t)0);
381 
382 	*sbdp_valp = 0ull;
383 	/*
384 	 * Do the cpu sram mapping now.  This avoids problems with
385 	 * mutexes and high PILS
386 	 */
387 	if (SBDP_INJECT_ERROR(f, 0) ||
388 	    cpusram_map(&map.vaddr, &map.npages) != DDI_SUCCESS) {
389 		return (EBUSY);
390 	}
391 
392 	map.pa = &bbsram_pa;
393 	map.size = &bbsram_size;
394 
395 	/*
396 	 * Do a cross call to the cpu so it obtains the base address
397 	 */
398 	xc_one(cpuid, sbdp_get_cpu_sram_addr, (uint64_t)&map,
399 	    (uint64_t)NULL);
400 
401 	cpusram_unmap(&map.vaddr, map.npages);
402 
403 	if (SBDP_INJECT_ERROR(f, 1) || bbsram_size == 0) {
404 		cmn_err(CE_WARN, "cpu%d: Key \"%s\" missing from CPU SRAM TOC",
405 		    cpuid, cpyren_key);
406 		return (EBUSY);
407 	}
408 
409 	if ((bbsram_pa & MMU_PAGEOFFSET) != 0) {
410 		cmn_err(CE_WARN, "cpu%d: CPU SRAM key \"%s\" not page aligned, "
411 		    "offset = 0x%lx", cpuid, cpyren_key,
412 		    (bbsram_pa - (uint64_t)SBDP_CPU_SRAM_ADDR));
413 		return (EBUSY);
414 	}
415 
416 	if (bbsram_size < MMU_PAGESIZE) {
417 		cmn_err(CE_WARN, "cpu%d: CPU SRAM key \"%s\" too small, "
418 		    "size = 0x%x", cpuid, cpyren_key, bbsram_size);
419 		return (EBUSY);
420 	}
421 
422 	/*
423 	 * Capture all CPUs (except for detaching proc) to prevent
424 	 * crosscalls to the detaching proc until it has cleared its
425 	 * bit in cpu_ready_set.
426 	 *
427 	 * The CPU's remain paused and the prom_mutex is known to be free.
428 	 * This prevents the x-trap victim from blocking when doing prom
429 	 * IEEE-1275 calls at a high PIL level.
430 	 */
431 
432 	promsafe_pause_cpus();
433 
434 	/*
435 	 * Quiesce interrupts on the target CPU. We do this by setting
436 	 * the CPU 'not ready'- (i.e. removing the CPU from cpu_ready_set) to
437 	 * prevent it from receiving cross calls and cross traps.
438 	 * This prevents the processor from receiving any new soft interrupts.
439 	 */
440 
441 	mp_cpu_quiesce(cp);
442 
443 	/* tell the prom the cpu is going away */
444 	if (SBDP_INJECT_ERROR(f, 2) || prom_serengeti_cpu_off(nodeid) != 0)
445 		return (EBUSY);
446 
447 	/*
448 	 * An sir instruction is issued at the end of the shutdown
449 	 * routine to make the CPU go through POST and re-enter OBP.
450 	 */
451 	xt_one_unchecked(cp->cpu_id, (xcfunc_t *)idle_stop_xcall,
452 	    (uint64_t)sbdp_cpu_shutdown_self, 0);
453 
454 	*sbdp_valp = 3ull;
455 
456 	start_cpus();
457 
458 	/*
459 	 * Wait until we reach the OBP idle loop or time out.
460 	 * prom_serengeti_wakeupcpu waits for up to 60 seconds for the
461 	 * CPU to reach OBP idle loop.
462 	 */
463 	if (SBDP_INJECT_ERROR(f, 3) ||
464 	    prom_serengeti_wakeupcpu(nodeid) != 0) {
465 
466 		/*
467 		 * If it fails here, we still consider the unconfigure
468 		 * operation as successful.
469 		 */
470 		cmn_err(CE_WARN, "cpu%d: CPU failed to enter OBP idle loop.\n",
471 		    cpuid);
472 	}
473 
474 	ASSERT(!(CPU_IN_SET(cpu_ready_set, cpuid)));
475 
476 	bbsram_pa = 0;
477 	bbsram_size = 0;
478 
479 	return (0);
480 }
481 
482 processorid_t
483 sbdp_get_cpuid(sbdp_handle_t *hp, dev_info_t *dip)
484 {
485 	int		cpuid;
486 	char		type[OBP_MAXPROPNAME];
487 	pnode_t		nodeid;
488 	sbd_error_t	*sep;
489 	static fn_t	f = "sbdp_get_cpuid";
490 
491 	SBDP_DBG_FUNC("%s\n", f);
492 
493 	nodeid = ddi_get_nodeid(dip);
494 	if (sbdp_is_node_bad(nodeid))
495 		return (-1);
496 
497 	sep = hp->h_err;
498 
499 	if (prom_getproplen(nodeid, "device_type") < OBP_MAXPROPNAME)
500 		(void) prom_getprop(nodeid, "device_type", (caddr_t)type);
501 	else {
502 		sbdp_set_err(sep, ESGT_NO_DEV_TYPE, NULL);
503 		return (-1);
504 	}
505 
506 	if (strcmp(type, "cpu") != 0) {
507 		sbdp_set_err(sep, ESGT_NOT_CPUTYPE, NULL);
508 		return (-1);
509 	}
510 
511 	/*
512 	 * Check to see if property "cpuid" exists first.
513 	 * If not, check for "portid".
514 	 */
515 	if (prom_getprop(nodeid, "cpuid", (caddr_t)&cpuid) == -1)
516 		if (prom_getprop(nodeid, "portid", (caddr_t)&cpuid) == -1) {
517 
518 			return (-1);
519 	}
520 
521 	return ((processorid_t)cpuid & SG_CPU_ID_MASK);
522 }
523 
524 int
525 sbdp_cpu_get_impl(sbdp_handle_t *hp, dev_info_t *dip)
526 {
527 	int		impl;
528 	char		type[OBP_MAXPROPNAME];
529 	pnode_t		nodeid;
530 	sbd_error_t	*sep;
531 	static fn_t	f = "sbdp_cpu_get_impl";
532 
533 	SBDP_DBG_FUNC("%s\n", f);
534 
535 	nodeid = ddi_get_nodeid(dip);
536 	if (sbdp_is_node_bad(nodeid))
537 		return (-1);
538 
539 	sep = hp->h_err;
540 
541 	if (prom_getproplen(nodeid, "device_type") < OBP_MAXPROPNAME)
542 		(void) prom_getprop(nodeid, "device_type", (caddr_t)type);
543 	else {
544 		sbdp_set_err(sep, ESGT_NO_DEV_TYPE, NULL);
545 		return (-1);
546 	}
547 
548 	if (strcmp(type, "cpu") != 0) {
549 		sbdp_set_err(sep, ESGT_NOT_CPUTYPE, NULL);
550 		return (-1);
551 	}
552 
553 	/*
554 	 * Get the implementation# property.
555 	 */
556 	if (prom_getprop(nodeid, "implementation#", (caddr_t)&impl) == -1)
557 		return (-1);
558 
559 	return (impl);
560 }
561 
562 struct sbdp_prom_get_node_args {
563 	pnode_t node;		/* current node */
564 	processorid_t portid;	/* portid we are looking for */
565 	pnode_t result_node;	/* node found with the above portid */
566 };
567 
568 pnode_t
569 sbdp_find_nearby_cpu_by_portid(pnode_t nodeid, processorid_t portid)
570 {
571 	struct sbdp_prom_get_node_args arg;
572 	static fn_t	f = "sbdp_find_nearby_cpu_by_portid";
573 
574 	SBDP_DBG_FUNC("%s\n", f);
575 
576 	arg.node = nodeid;
577 	arg.portid = portid;
578 	(void) prom_tree_access(sbdp_prom_get_cpu, &arg, NULL);
579 
580 	return (arg.result_node);
581 }
582 
583 /*ARGSUSED*/
584 static int
585 sbdp_prom_get_cpu(void *arg, int changed)
586 {
587 	int	portid;
588 	pnode_t	parent, cur_node;
589 	struct sbdp_prom_get_node_args *argp = arg;
590 	static fn_t	f = "sbdp_prom_get_cpu";
591 
592 	SBDP_DBG_FUNC("%s\n", f);
593 
594 	parent = prom_parentnode(argp->node);
595 
596 	for (cur_node = prom_childnode(parent); cur_node != OBP_NONODE;
597 	    cur_node = prom_nextnode(cur_node)) {
598 
599 		if (prom_getprop(cur_node, OBP_PORTID, (caddr_t)&portid) < 0)
600 			continue;
601 
602 		if ((portid == argp->portid) && (cur_node != argp->node))
603 			break;
604 	}
605 
606 	argp->result_node = cur_node;
607 
608 	return (0);
609 }
610 
611 
612 /*
613  * A detaching CPU is xcalled with an xtrap to sbdp_cpu_stop_self() after
614  * it has been offlined. The function of this routine is to get the cpu
615  * spinning in a safe place. The requirement is that the system will not
616  * reference anything on the detaching board (memory and i/o is detached
617  * elsewhere) and that the CPU not reference anything on any other board
618  * in the system.  This isolation is required during and after the writes
619  * to the domain masks to remove the board from the domain.
620  *
621  * To accomplish this isolation the following is done:
622  *	0) Map the CPUSRAM to obtain the correct address in SRAM
623  *      1) Create a locked mapping to a location in CPU SRAM where
624  *      the cpu will execute.
625  *      2) Copy the target function (sbdp_shutdown_asm) in which
626  *      the cpu will execute into CPU SRAM.
627  *      3) Jump into function with CPU SRAM.
628  *      Function will:
629  *      3.1) Flush its Ecache (displacement).
630  *      3.2) Flush its Dcache with HW mechanism.
631  *      3.3) Flush its Icache with HW mechanism.
632  *      3.4) Flush all valid and _unlocked_ D-TLB entries.
633  *      3.5) Flush all valid and _unlocked_ I-TLB entries.
634  *      4) Jump into a tight loop.
635  */
636 
637 static void
638 sbdp_cpu_stop_self(uint64_t pa)
639 {
640 	cpu_t		*cp = CPU;
641 	int		cpuid = cp->cpu_id;
642 	tte_t		tte;
643 	volatile uint_t	*src, *dst;
644 	uint_t		funclen;
645 	sbdp_shutdown_t	sht;
646 	uint_t		bbsram_pfn;
647 	uint64_t	bbsram_addr;
648 	void		(*bbsram_func)(sbdp_shutdown_t *);
649 	extern void	sbdp_shutdown_asm(sbdp_shutdown_t *);
650 	extern void	sbdp_shutdown_asm_end(void);
651 
652 	funclen = (uint_t)sbdp_shutdown_asm_end - (uint_t)sbdp_shutdown_asm;
653 	ASSERT(funclen <= MMU_PAGESIZE);
654 	ASSERT(bbsram_pa != 0);
655 	ASSERT((bbsram_pa & MMU_PAGEOFFSET) == 0);
656 	ASSERT(bbsram_size >= MMU_PAGESIZE);
657 
658 	stdphys(pa, 3);
659 	bbsram_pfn = (uint_t)(bbsram_pa >> MMU_PAGESHIFT);
660 
661 	bbsram_addr = (uint64_t)sbdp_shutdown_va;
662 	sht.estack = bbsram_addr + MMU_PAGESIZE;
663 	sht.flushaddr = ecache_flushaddr;
664 
665 	tte.tte_inthi = TTE_VALID_INT | TTE_SZ_INT(TTE8K) |
666 	    TTE_PFN_INTHI(bbsram_pfn);
667 	tte.tte_intlo = TTE_PFN_INTLO(bbsram_pfn) |
668 	    TTE_HWWR_INT | TTE_PRIV_INT | TTE_LCK_INT;
669 	sfmmu_dtlb_ld_kva(sbdp_shutdown_va, &tte); /* load dtlb */
670 	sfmmu_itlb_ld_kva(sbdp_shutdown_va, &tte); /* load itlb */
671 
672 	for (src = (uint_t *)sbdp_shutdown_asm, dst = (uint_t *)bbsram_addr;
673 	    src < (uint_t *)sbdp_shutdown_asm_end; src++, dst++)
674 	*dst = *src;
675 
676 	bbsram_func = (void (*)())bbsram_addr;
677 	sht.size = (uint32_t)cpunodes[cpuid].ecache_size << 1;
678 	sht.linesize = (uint32_t)cpunodes[cpuid].ecache_linesize;
679 	sht.physaddr = pa;
680 
681 	/*
682 	 * Signal to sbdp_cpu_poweroff() that we're just
683 	 * about done.
684 	 */
685 	cp->cpu_m.in_prom = 1;
686 
687 	stdphys(pa, 4);
688 	(*bbsram_func)(&sht);
689 }
690 
691 /* ARGSUSED */
692 void
693 sbdp_get_cpu_sram_addr(uint64_t arg1, uint64_t arg2)
694 {
695 	uint64_t	*pap;
696 	uint_t		*sizep;
697 	struct iosram_toc *tocp;
698 	uint_t		offset;
699 	uint_t		size;
700 	sbdp_cpu_sram_map_t *map;
701 	int		i;
702 	fn_t		f = "sbdp_get_cpu_sram_addr";
703 
704 	SBDP_DBG_FUNC("%s\n", f);
705 
706 	map = (sbdp_cpu_sram_map_t *)arg1;
707 	tocp = (struct iosram_toc *)map->vaddr;
708 	pap = map->pa;
709 	sizep = map->size;
710 
711 	for (i = 0; i < tocp->iosram_tagno; i++) {
712 		if (strcmp(tocp->iosram_keys[i].key, cpyren_key) == 0)
713 			break;
714 	}
715 	if (i == tocp->iosram_tagno) {
716 		*pap = 0;
717 		*sizep = 0;
718 		return;
719 	}
720 	offset = tocp->iosram_keys[i].offset;
721 	size = tocp->iosram_keys[i].size;
722 
723 	/*
724 	 * The address we want is the begining of cpusram + offset
725 	 */
726 	*pap = SBDP_CPU_SRAM_ADDR + offset;
727 
728 	*sizep = size;
729 }
730 
731 static int
732 cpusram_map(caddr_t *vaddrp, pgcnt_t *npp)
733 {
734 	uint_t		pgoffset;
735 	pgcnt_t		npages;
736 	pfn_t		pfn;
737 	uint64_t	base;
738 	caddr_t		kaddr;
739 	uint_t		mapping_attr;
740 
741 	base = (uint64_t)SBDP_CPU_SRAM_ADDR & (~MMU_PAGEOFFSET);
742 	pfn = mmu_btop(base);
743 
744 	/*
745 	 * Do a quick sanity check to make sure we are in I/O space.
746 	 */
747 	if (pf_is_memory(pfn))
748 		return (DDI_FAILURE);
749 
750 	pgoffset = (ulong_t)SBDP_CPU_SRAM_ADDR & MMU_PAGEOFFSET;
751 	npages = mmu_btopr(SBDP_CPU_SRAM_SIZE + pgoffset);
752 
753 	kaddr = vmem_alloc(heap_arena, ptob(npages), VM_NOSLEEP);
754 	if (kaddr == NULL)
755 		return (DDI_ME_NORESOURCES);
756 
757 	mapping_attr = PROT_READ;
758 	/*
759 	 * Now map in the pages we've allocated...
760 	 */
761 	hat_devload(kas.a_hat, kaddr, ptob(npages), pfn, mapping_attr,
762 	    HAT_LOAD_LOCK);
763 
764 	*vaddrp = kaddr + pgoffset;
765 	*npp = npages;
766 
767 	return (DDI_SUCCESS);
768 }
769 
770 static void
771 cpusram_unmap(caddr_t *vaddrp, pgcnt_t npages)
772 {
773 	uint_t  pgoffset;
774 	caddr_t base;
775 	caddr_t addr = *vaddrp;
776 
777 
778 	pgoffset = (ulong_t)SBDP_CPU_SRAM_ADDR & MMU_PAGEOFFSET;
779 	base = addr - pgoffset;
780 	hat_unload(kas.a_hat, base, ptob(npages), HAT_UNLOAD_UNLOCK);
781 	vmem_free(heap_arena, base, ptob(npages));
782 
783 	*vaddrp = 0;
784 }
785 
786 
787 static void
788 sbdp_cpu_shutdown_self(void)
789 {
790 	cpu_t		*cp = CPU;
791 	int		cpuid = cp->cpu_id;
792 	extern void	flush_windows(void);
793 	uint64_t	pa = va_to_pa((void *)sbdp_valp);
794 
795 	stdphys(pa, 8);
796 	flush_windows();
797 
798 	(void) spl8();
799 
800 	stdphys(pa, 6);
801 
802 	ASSERT(cp->cpu_intr_actv == 0);
803 	ASSERT(cp->cpu_thread == cp->cpu_idle_thread ||
804 	    cp->cpu_thread == cp->cpu_startup_thread);
805 
806 	cp->cpu_flags = CPU_OFFLINE | CPU_QUIESCED | CPU_POWEROFF;
807 
808 	CPU_SIGNATURE(OS_SIG, SIGST_DETACHED, SIGSUBST_NULL, cpuid);
809 
810 	stdphys(pa, 7);
811 	sbdp_cpu_stop_self(pa);
812 
813 	cmn_err(CE_PANIC, "sbdp_cpu_shutdown_self: CPU %d FAILED TO SHUTDOWN",
814 	    cpuid);
815 }
816 
817 typedef struct {
818 	int	node;
819 	int	board;
820 	int 	non_panther_cpus;
821 } sbdp_node_walk_t;
822 
823 static int
824 sbdp_find_non_panther_cpus(dev_info_t *dip, void *node_args)
825 {
826 	int	impl, cpuid, portid;
827 	int	buflen;
828 	char	buf[OBP_MAXPROPNAME];
829 	sbdp_node_walk_t *args = (sbdp_node_walk_t *)node_args;
830 
831 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
832 	    DDI_PROP_DONTPASS, OBP_DEVICETYPE, (caddr_t)buf,
833 	    &buflen) != DDI_PROP_SUCCESS) {
834 		return (DDI_WALK_CONTINUE);
835 	}
836 
837 	if (strcmp(buf, "cpu") != 0) {
838 		return (DDI_WALK_CONTINUE);
839 	}
840 
841 	if ((impl = ddi_getprop(DDI_DEV_T_ANY, dip,
842 	    DDI_PROP_DONTPASS, "implementation#", -1)) == -1) {
843 		return (DDI_WALK_CONTINUE);
844 	}
845 
846 	if ((cpuid = ddi_getprop(DDI_DEV_T_ANY, dip,
847 	    DDI_PROP_DONTPASS, "cpuid", -1)) == -1) {
848 		return (DDI_WALK_CONTINUE);
849 	}
850 
851 	portid = SG_CPUID_TO_PORTID(cpuid);
852 
853 	/* filter out nodes not on this board */
854 	if (SG_PORTID_TO_BOARD_NUM(portid) != args->board ||
855 	    SG_PORTID_TO_NODEID(portid) != args->node) {
856 		return (DDI_WALK_PRUNECHILD);
857 	}
858 
859 	switch (impl) {
860 	case CHEETAH_IMPL:
861 	case CHEETAH_PLUS_IMPL:
862 	case JAGUAR_IMPL:
863 		args->non_panther_cpus++;
864 		break;
865 	case PANTHER_IMPL:
866 		break;
867 	default:
868 		ASSERT(0);
869 		args->non_panther_cpus++;
870 		break;
871 	}
872 
873 	SBDP_DBG_CPU("cpuid=0x%x, portid=0x%x, impl=0x%x, device_type=%s",
874 	    cpuid, portid, impl, buf);
875 
876 	return (DDI_WALK_CONTINUE);
877 }
878 
879 int
880 sbdp_board_non_panther_cpus(int node, int board)
881 {
882 	sbdp_node_walk_t arg = {0};
883 
884 	arg.node = node;
885 	arg.board = board;
886 
887 	/*
888 	 * Root node doesn't have to be held.
889 	 */
890 	ddi_walk_devs(ddi_root_node(), sbdp_find_non_panther_cpus,
891 	    (void *)&arg);
892 
893 	return (arg.non_panther_cpus);
894 }
895