xref: /titanic_52/usr/src/uts/sun4u/serengeti/io/sbdp_cpu.c (revision e0731422366620894c16c1ee6515551c5f00733d)
103831d35Sstevel /*
203831d35Sstevel  * CDDL HEADER START
303831d35Sstevel  *
403831d35Sstevel  * The contents of this file are subject to the terms of the
503831d35Sstevel  * Common Development and Distribution License (the "License").
603831d35Sstevel  * You may not use this file except in compliance with the License.
703831d35Sstevel  *
803831d35Sstevel  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
903831d35Sstevel  * or http://www.opensolaris.org/os/licensing.
1003831d35Sstevel  * See the License for the specific language governing permissions
1103831d35Sstevel  * and limitations under the License.
1203831d35Sstevel  *
1303831d35Sstevel  * When distributing Covered Code, include this CDDL HEADER in each
1403831d35Sstevel  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
1503831d35Sstevel  * If applicable, add the following below this CDDL HEADER, with the
1603831d35Sstevel  * fields enclosed by brackets "[]" replaced with your own identifying
1703831d35Sstevel  * information: Portions Copyright [yyyy] [name of copyright owner]
1803831d35Sstevel  *
1903831d35Sstevel  * CDDL HEADER END
2003831d35Sstevel  */
2103831d35Sstevel 
2203831d35Sstevel /*
2307d06da5SSurya Prakki  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
2403831d35Sstevel  * Use is subject to license terms.
2503831d35Sstevel  */
2603831d35Sstevel 
2703831d35Sstevel /*
2803831d35Sstevel  * CPU management for serengeti DR
2903831d35Sstevel  *
3003831d35Sstevel  * There are three states a CPU can be in:
3103831d35Sstevel  *
3203831d35Sstevel  *	disconnected:		In reset
3303831d35Sstevel  *	connect,unconfigured:	Idling in OBP's idle loop
3403831d35Sstevel  *	configured:		Running Solaris
3503831d35Sstevel  *
3603831d35Sstevel  * State transitions:
3703831d35Sstevel  *
3803831d35Sstevel  *                connect              configure
3903831d35Sstevel  *              ------------>         ------------>
4003831d35Sstevel  * disconnected              connected             configured
4103831d35Sstevel  *                          unconfigured
4203831d35Sstevel  *              <-----------         <-------------
4303831d35Sstevel  *                disconnect           unconfigure
4403831d35Sstevel  *
4503831d35Sstevel  * Firmware involvements
4603831d35Sstevel  *
4703831d35Sstevel  *              start_cpu(SC)
4803831d35Sstevel  *      prom_serengeti_wakeupcpu(OBP)
4903831d35Sstevel  *              ------------>         ------------------------->
5003831d35Sstevel  * disconnected              connected                         configured
5103831d35Sstevel  *                          unconfigured
5203831d35Sstevel  *              <-----------          <-------------------------
5303831d35Sstevel  *      prom_serengeti_cpu_off(OBP)  prom_serengeti_cpu_off(OBP)
5403831d35Sstevel  *               stop_cpu(SC)        prom_serengeti_wakeupcpu(OBP)
5503831d35Sstevel  *
5603831d35Sstevel  * SIR (Software Initiated Reset) is used to unconfigure a CPU.
5703831d35Sstevel  * After the CPU has completed flushing the caches, it issues an
5803831d35Sstevel  * sir instruction to put itself through POST.  POST detects that
5903831d35Sstevel  * it is an SIR, and re-enters OBP as a slave.  When the operation
6003831d35Sstevel  * completes successfully, the CPU will be idling in OBP.
6103831d35Sstevel  */
6203831d35Sstevel 
6303831d35Sstevel #include <sys/obpdefs.h>
6403831d35Sstevel #include <sys/types.h>
6503831d35Sstevel #include <sys/cmn_err.h>
6603831d35Sstevel #include <sys/cpuvar.h>
6703831d35Sstevel #include <sys/membar.h>
6803831d35Sstevel #include <sys/x_call.h>
6903831d35Sstevel #include <sys/machsystm.h>
7003831d35Sstevel #include <sys/cpu_sgnblk_defs.h>
7103831d35Sstevel #include <sys/pte.h>
7203831d35Sstevel #include <vm/hat_sfmmu.h>
7303831d35Sstevel #include <sys/promif.h>
7403831d35Sstevel #include <sys/note.h>
7503831d35Sstevel #include <sys/vmsystm.h>
7603831d35Sstevel #include <vm/seg_kmem.h>
7703831d35Sstevel 
7803831d35Sstevel #include <sys/sbd_ioctl.h>
7903831d35Sstevel #include <sys/sbd.h>
8003831d35Sstevel #include <sys/sbdp_priv.h>
8103831d35Sstevel #include <sys/sbdp_mem.h>
8203831d35Sstevel #include <sys/sbdp_error.h>
8303831d35Sstevel #include <sys/sgsbbc_iosram.h>
8403831d35Sstevel #include <sys/prom_plat.h>
8503831d35Sstevel #include <sys/cheetahregs.h>
8603831d35Sstevel 
8703831d35Sstevel uint64_t	*sbdp_valp;
8803831d35Sstevel extern uint64_t	va_to_pa(void *);
8903831d35Sstevel static int	sbdp_cpu_ntries = 50000;
9003831d35Sstevel static int	sbdp_cpu_delay = 100;
9103831d35Sstevel void		sbdp_get_cpu_sram_addr(uint64_t, uint64_t);
9203831d35Sstevel static int	cpusram_map(caddr_t *, pgcnt_t *);
9303831d35Sstevel static void	cpusram_unmap(caddr_t *, pgcnt_t);
9403831d35Sstevel extern int	prom_serengeti_wakeupcpu(pnode_t);
9503831d35Sstevel extern int	prom_serengeti_cpu_off(pnode_t);
9603831d35Sstevel extern sbdp_wnode_t *sbdp_get_wnodep(int);
9703831d35Sstevel extern caddr_t	sbdp_shutdown_va;
9803831d35Sstevel static int	sbdp_prom_get_cpu(void *arg, int changed);
998682d1efSRichard Lowe static void	sbdp_cpu_shutdown_self(void);
10003831d35Sstevel 
10103831d35Sstevel int
10203831d35Sstevel sbdp_disconnect_cpu(sbdp_handle_t *hp, dev_info_t *dip, processorid_t cpuid)
10303831d35Sstevel {
10403831d35Sstevel 	pnode_t		nodeid;
10503831d35Sstevel 	int		bd, wnode;
10603831d35Sstevel 	sbdp_wnode_t	*wnodep;
10703831d35Sstevel 	sbdp_bd_t	*bdp = NULL;
10803831d35Sstevel 	int		rv = 0;
10903831d35Sstevel 	processorid_t	cpu = cpuid;
11003831d35Sstevel 	processorid_t	portid;
11103831d35Sstevel 	static fn_t	f = "sbdp_disconnect_cpu";
11203831d35Sstevel 
11303831d35Sstevel 	SBDP_DBG_FUNC("%s\n", f);
11403831d35Sstevel 
11503831d35Sstevel 	nodeid = ddi_get_nodeid(dip);
11603831d35Sstevel 
11703831d35Sstevel 	/*
11803831d35Sstevel 	 * Get board number and node number
11903831d35Sstevel 	 * The check for determining if nodeid is valid is done inside
12003831d35Sstevel 	 * sbdp_get_bd_and_wnode_num.
12103831d35Sstevel 	 */
12203831d35Sstevel 	if (SBDP_INJECT_ERROR(f, 0) ||
12303831d35Sstevel 	    sbdp_get_bd_and_wnode_num(nodeid, &bd, &wnode) != 0) {
12403831d35Sstevel 
12503831d35Sstevel 		rv = -1;
12603831d35Sstevel 		goto out;
12703831d35Sstevel 	}
12803831d35Sstevel 
12903831d35Sstevel 	/*
13003831d35Sstevel 	 * Grab the lock to prevent status threads from accessing
13103831d35Sstevel 	 * registers on the CPU when it is being put into reset.
13203831d35Sstevel 	 */
13303831d35Sstevel 	wnodep = sbdp_get_wnodep(wnode);
13403831d35Sstevel 	bdp = &wnodep->bds[bd];
13503831d35Sstevel 	ASSERT(bdp);
13603831d35Sstevel 	mutex_enter(&bdp->bd_mutex);
13703831d35Sstevel 
13803831d35Sstevel 	/*
13903831d35Sstevel 	 * Mark the CPU in reset.  This should be done before calling
14003831d35Sstevel 	 * the SC because we won't know at which stage it failed if
14103831d35Sstevel 	 * the SC call returns failure.
14203831d35Sstevel 	 */
14303831d35Sstevel 	sbdp_cpu_in_reset(wnode, bd, SG_CPUID_TO_CPU_UNIT(cpuid), 1);
14403831d35Sstevel 
14503831d35Sstevel 	/*
14603831d35Sstevel 	 * Ask OBP to mark the CPU as in POST
14703831d35Sstevel 	 */
14803831d35Sstevel 	if (SBDP_INJECT_ERROR(f, 1) || prom_serengeti_cpu_off(nodeid) != 0) {
14903831d35Sstevel 
15003831d35Sstevel 		rv = -1;
15103831d35Sstevel 		goto out;
15203831d35Sstevel 	}
15303831d35Sstevel 
15403831d35Sstevel 	/*
15503831d35Sstevel 	 * Ask the SC to put the CPU into reset. If the first
15603831d35Sstevel 	 * core is not present, the stop CPU interface needs
15703831d35Sstevel 	 * to be called with the portid rather than the cpuid.
15803831d35Sstevel 	 */
15903831d35Sstevel 	portid = SG_CPUID_TO_PORTID(cpuid);
16003831d35Sstevel 	if (!SBDP_IS_CPU_PRESENT(bdp, SG_CPUID_TO_CPU_UNIT(portid))) {
16103831d35Sstevel 		cpu = portid;
16203831d35Sstevel 	}
16303831d35Sstevel 
16403831d35Sstevel 	if (SBDP_INJECT_ERROR(f, 2) || sbdp_stop_cpu(cpu) != 0) {
16503831d35Sstevel 
16603831d35Sstevel 		rv = -1;
16703831d35Sstevel 		goto out;
16803831d35Sstevel 	}
16903831d35Sstevel 
17003831d35Sstevel out:
17103831d35Sstevel 	if (bdp != NULL) {
17203831d35Sstevel 		mutex_exit(&bdp->bd_mutex);
17303831d35Sstevel 	}
17403831d35Sstevel 
17503831d35Sstevel 	if (rv != 0) {
17603831d35Sstevel 		sbdp_set_err(hp->h_err, ESGT_STOPCPU, NULL);
17703831d35Sstevel 	}
17803831d35Sstevel 
17903831d35Sstevel 	return (rv);
18003831d35Sstevel }
18103831d35Sstevel 
18203831d35Sstevel int
18303831d35Sstevel sbdp_connect_cpu(sbdp_handle_t *hp, dev_info_t *dip, processorid_t cpuid)
18403831d35Sstevel {
18503831d35Sstevel 	pnode_t		nodeid;
18603831d35Sstevel 	sbd_error_t	*sep;
18703831d35Sstevel 	int		i;
18803831d35Sstevel 	int		bd, wnode;
18903831d35Sstevel 	int		rv = 0;
19003831d35Sstevel 	static fn_t	f = "sbdp_connect_cpu";
19103831d35Sstevel 
19203831d35Sstevel 	SBDP_DBG_FUNC("%s\n", f);
19303831d35Sstevel 
19403831d35Sstevel 	sep = hp->h_err;
19503831d35Sstevel 
19603831d35Sstevel 	nodeid = ddi_get_nodeid(dip);
19703831d35Sstevel 
19803831d35Sstevel 	/*
19903831d35Sstevel 	 * The check for determining if nodeid is valid is done inside
20003831d35Sstevel 	 * sbdp_get_bd_and_wnode_num.
20103831d35Sstevel 	 */
20203831d35Sstevel 	if (SBDP_INJECT_ERROR(f, 0) ||
20303831d35Sstevel 	    sbdp_get_bd_and_wnode_num(nodeid, &bd, &wnode) != 0) {
20403831d35Sstevel 
20503831d35Sstevel 		rv = -1;
20603831d35Sstevel 		goto out;
20703831d35Sstevel 	}
20803831d35Sstevel 
20903831d35Sstevel 	/*
21003831d35Sstevel 	 * Ask the SC to bring the CPU out of reset.
21103831d35Sstevel 	 * At this point, the sb_dev_present bit is not set for the CPU.
21203831d35Sstevel 	 * From sbd point of view the CPU is not present yet.  No
21303831d35Sstevel 	 * status threads will try to read registers off the CPU.
21403831d35Sstevel 	 * Since we are already holding sb_mutex, it is not necessary
21503831d35Sstevel 	 * to grab the board mutex when checking and setting the
21603831d35Sstevel 	 * cpus_in_reset bit.
21703831d35Sstevel 	 */
21803831d35Sstevel 	if (sbdp_is_cpu_in_reset(wnode, bd, SG_CPUID_TO_CPU_UNIT(cpuid))) {
21903831d35Sstevel 
22003831d35Sstevel 		sbdp_wnode_t	*wnodep;
22103831d35Sstevel 		sbdp_bd_t	*bdp = NULL;
22203831d35Sstevel 		processorid_t	cpu = cpuid;
22303831d35Sstevel 		processorid_t	portid;
22403831d35Sstevel 
22503831d35Sstevel 		wnodep = sbdp_get_wnodep(wnode);
22603831d35Sstevel 		bdp = &wnodep->bds[bd];
22703831d35Sstevel 		ASSERT(bdp);
22803831d35Sstevel 
22903831d35Sstevel 		/*
23003831d35Sstevel 		 * If the first core is not present, the start CPU
23103831d35Sstevel 		 * interface needs to be called with the portid rather
23203831d35Sstevel 		 * than the cpuid.
23303831d35Sstevel 		 */
23403831d35Sstevel 		portid = SG_CPUID_TO_PORTID(cpuid);
23503831d35Sstevel 		if (!SBDP_IS_CPU_PRESENT(bdp, SG_CPUID_TO_CPU_UNIT(portid))) {
23603831d35Sstevel 			cpu = portid;
23703831d35Sstevel 		}
23803831d35Sstevel 
23903831d35Sstevel 		if (SBDP_INJECT_ERROR(f, 1) || sbdp_start_cpu(cpu) != 0) {
24003831d35Sstevel 
24103831d35Sstevel 			rv = -1;
24203831d35Sstevel 			goto out;
24303831d35Sstevel 		}
24403831d35Sstevel 
24503831d35Sstevel 		if (SBDP_INJECT_ERROR(f, 2) ||
24603831d35Sstevel 		    prom_serengeti_wakeupcpu(nodeid) != 0) {
24703831d35Sstevel 
24803831d35Sstevel 			rv = -1;
24903831d35Sstevel 			goto out;
25003831d35Sstevel 		}
25103831d35Sstevel 	}
25203831d35Sstevel 
25303831d35Sstevel 	/*
25403831d35Sstevel 	 * Mark the CPU out of reset.
25503831d35Sstevel 	 */
25603831d35Sstevel 	sbdp_cpu_in_reset(wnode, bd, SG_CPUID_TO_CPU_UNIT(cpuid), 0);
25703831d35Sstevel 
25803831d35Sstevel 	/*
25903831d35Sstevel 	 * Refresh the bd info
26003831d35Sstevel 	 * we need to wait until all cpus are out of reset
26103831d35Sstevel 	 */
26203831d35Sstevel 	for (i = 0; i < SG_MAX_CPUS_PER_BD; i++)
26303831d35Sstevel 		if (sbdp_is_cpu_present(wnode, bd, i) &&
26403831d35Sstevel 		    sbdp_is_cpu_in_reset(wnode, bd, i) == 1) {
26503831d35Sstevel 			break;
26603831d35Sstevel 		}
26703831d35Sstevel 
26803831d35Sstevel 	if (i == SG_MAX_CPUS_PER_BD) {
26903831d35Sstevel 		/*
27003831d35Sstevel 		 * All cpus are out of reset so it is safe to
27103831d35Sstevel 		 * update the bd info
27203831d35Sstevel 		 */
27303831d35Sstevel 		sbdp_add_new_bd_info(wnode, bd);
27403831d35Sstevel 	}
27503831d35Sstevel 
27603831d35Sstevel out:
27703831d35Sstevel 	if (rv != 0)
27803831d35Sstevel 		sbdp_set_err(sep, ESGT_WAKEUPCPU, NULL);
27903831d35Sstevel 
28003831d35Sstevel 	return (rv);
28103831d35Sstevel }
28203831d35Sstevel 
28303831d35Sstevel int
28403831d35Sstevel sbdp_cpu_poweron(struct cpu *cp)
28503831d35Sstevel {
28603831d35Sstevel 	int		cpuid;
28703831d35Sstevel 	int		ntries;
28803831d35Sstevel 	pnode_t		nodeid;
28903831d35Sstevel 	extern void	restart_other_cpu(int);
29003831d35Sstevel 	static fn_t	f = "sbdp_cpu_poweron";
29103831d35Sstevel 
29203831d35Sstevel 	SBDP_DBG_FUNC("%s\n", f);
29303831d35Sstevel 
29403831d35Sstevel 	ASSERT(MUTEX_HELD(&cpu_lock));
29503831d35Sstevel 
29603831d35Sstevel 	ntries = sbdp_cpu_ntries;
29703831d35Sstevel 	cpuid = cp->cpu_id;
29803831d35Sstevel 
29903831d35Sstevel 	nodeid = cpunodes[cpuid].nodeid;
30003831d35Sstevel 	ASSERT(nodeid != (pnode_t)0);
30103831d35Sstevel 
30203831d35Sstevel 	/*
30303831d35Sstevel 	 * This is a safe guard in case the CPU has taken a trap
30403831d35Sstevel 	 * and idling in POST.
30503831d35Sstevel 	 */
30603831d35Sstevel 	if (SBDP_INJECT_ERROR(f, 0) ||
30703831d35Sstevel 	    prom_serengeti_wakeupcpu(nodeid) != 0) {
30803831d35Sstevel 
30903831d35Sstevel 		return (EBUSY);
31003831d35Sstevel 	}
31103831d35Sstevel 
31203831d35Sstevel 	cp->cpu_flags &= ~CPU_POWEROFF;
31303831d35Sstevel 
31403831d35Sstevel 	/*
31503831d35Sstevel 	 * NOTE: restart_other_cpu pauses cpus during the
31603831d35Sstevel 	 *	slave cpu start.  This helps to quiesce the
31703831d35Sstevel 	 *	bus traffic a bit which makes the tick sync
31803831d35Sstevel 	 *	routine in the prom more robust.
31903831d35Sstevel 	 */
32003831d35Sstevel 	SBDP_DBG_CPU("%s: COLD START for cpu (%d)\n", f, cpuid);
32103831d35Sstevel 
32203831d35Sstevel 	restart_other_cpu(cpuid);
32303831d35Sstevel 
32403831d35Sstevel 	SBDP_DBG_CPU("after restarting other cpus\n");
32503831d35Sstevel 
32603831d35Sstevel 	/*
32703831d35Sstevel 	 * Wait for the cpu to reach its idle thread before
32803831d35Sstevel 	 * we zap him with a request to blow away the mappings
32903831d35Sstevel 	 * he (might) have for the sbdp_shutdown_asm code
33003831d35Sstevel 	 * he may have executed on unconfigure.
33103831d35Sstevel 	 */
33203831d35Sstevel 	while ((cp->cpu_thread != cp->cpu_idle_thread) && (ntries > 0)) {
33303831d35Sstevel 		DELAY(sbdp_cpu_delay);
33403831d35Sstevel 		ntries--;
33503831d35Sstevel 	}
33603831d35Sstevel 
33703831d35Sstevel 	SBDP_DBG_CPU("%s: waited %d out of %d loops for cpu %d\n",
33803831d35Sstevel 	    f, sbdp_cpu_ntries - ntries, sbdp_cpu_ntries, cpuid);
33903831d35Sstevel 
34003831d35Sstevel 	return (0);
34103831d35Sstevel }
34203831d35Sstevel 
34303831d35Sstevel 
34403831d35Sstevel #define	SBDP_CPU_SRAM_ADDR	0x7fff0900000ull
34503831d35Sstevel #define	SBDP_CPU_SRAM_SIZE	0x20000ull
34603831d35Sstevel 
34703831d35Sstevel static const char cpyren_key[] = "COPYREN";
34803831d35Sstevel 
34903831d35Sstevel static uint64_t bbsram_pa;
35003831d35Sstevel static uint_t bbsram_size;
35103831d35Sstevel 
35203831d35Sstevel typedef struct {
35303831d35Sstevel 	caddr_t		vaddr;
35403831d35Sstevel 	pgcnt_t		npages;
35503831d35Sstevel 	uint64_t	*pa;
35603831d35Sstevel 	uint_t		*size;
35703831d35Sstevel } sbdp_cpu_sram_map_t;
35803831d35Sstevel 
35903831d35Sstevel int
36003831d35Sstevel sbdp_cpu_poweroff(struct cpu *cp)
36103831d35Sstevel {
36203831d35Sstevel 	processorid_t	cpuid;
36303831d35Sstevel 	pnode_t		nodeid;
36403831d35Sstevel 	sbdp_cpu_sram_map_t	map;
36503831d35Sstevel 	static fn_t	f = "sbdp_cpu_poweroff";
36603831d35Sstevel 
36703831d35Sstevel 	SBDP_DBG_FUNC("%s\n", f);
36803831d35Sstevel 
36903831d35Sstevel 	ASSERT(MUTEX_HELD(&cpu_lock));
37003831d35Sstevel 
37103831d35Sstevel 	/*
37203831d35Sstevel 	 * Capture all CPUs (except for detaching proc) to prevent
37303831d35Sstevel 	 * crosscalls to the detaching proc until it has cleared its
37403831d35Sstevel 	 * bit in cpu_ready_set.
37503831d35Sstevel 	 */
37603831d35Sstevel 	cpuid = cp->cpu_id;
37703831d35Sstevel 
37803831d35Sstevel 	nodeid = cpunodes[cpuid].nodeid;
37903831d35Sstevel 	ASSERT(nodeid != (pnode_t)0);
38003831d35Sstevel 
38103831d35Sstevel 	*sbdp_valp = 0ull;
38203831d35Sstevel 	/*
38303831d35Sstevel 	 * Do the cpu sram mapping now.  This avoids problems with
38403831d35Sstevel 	 * mutexes and high PILS
38503831d35Sstevel 	 */
38603831d35Sstevel 	if (SBDP_INJECT_ERROR(f, 0) ||
38703831d35Sstevel 	    cpusram_map(&map.vaddr, &map.npages) != DDI_SUCCESS) {
38803831d35Sstevel 		return (EBUSY);
38903831d35Sstevel 	}
39003831d35Sstevel 
39103831d35Sstevel 	map.pa = &bbsram_pa;
39203831d35Sstevel 	map.size = &bbsram_size;
39303831d35Sstevel 
39403831d35Sstevel 	/*
39503831d35Sstevel 	 * Do a cross call to the cpu so it obtains the base address
39603831d35Sstevel 	 */
39703831d35Sstevel 	xc_one(cpuid, sbdp_get_cpu_sram_addr, (uint64_t)&map,
39803831d35Sstevel 	    (uint64_t)NULL);
39903831d35Sstevel 
40003831d35Sstevel 	cpusram_unmap(&map.vaddr, map.npages);
40103831d35Sstevel 
40203831d35Sstevel 	if (SBDP_INJECT_ERROR(f, 1) || bbsram_size == 0) {
40303831d35Sstevel 		cmn_err(CE_WARN, "cpu%d: Key \"%s\" missing from CPU SRAM TOC",
40403831d35Sstevel 		    cpuid, cpyren_key);
40503831d35Sstevel 		return (EBUSY);
40603831d35Sstevel 	}
40703831d35Sstevel 
40803831d35Sstevel 	if ((bbsram_pa & MMU_PAGEOFFSET) != 0) {
40903831d35Sstevel 		cmn_err(CE_WARN, "cpu%d: CPU SRAM key \"%s\" not page aligned, "
41003831d35Sstevel 		    "offset = 0x%lx", cpuid, cpyren_key,
41107d06da5SSurya Prakki 		    (bbsram_pa - (uint64_t)SBDP_CPU_SRAM_ADDR));
41203831d35Sstevel 		return (EBUSY);
41303831d35Sstevel 	}
41403831d35Sstevel 
41503831d35Sstevel 	if (bbsram_size < MMU_PAGESIZE) {
41603831d35Sstevel 		cmn_err(CE_WARN, "cpu%d: CPU SRAM key \"%s\" too small, "
41703831d35Sstevel 		    "size = 0x%x", cpuid, cpyren_key, bbsram_size);
41803831d35Sstevel 		return (EBUSY);
41903831d35Sstevel 	}
42003831d35Sstevel 
42103831d35Sstevel 	/*
42203831d35Sstevel 	 * Capture all CPUs (except for detaching proc) to prevent
42303831d35Sstevel 	 * crosscalls to the detaching proc until it has cleared its
42403831d35Sstevel 	 * bit in cpu_ready_set.
42503831d35Sstevel 	 *
42603831d35Sstevel 	 * The CPU's remain paused and the prom_mutex is known to be free.
42703831d35Sstevel 	 * This prevents the x-trap victim from blocking when doing prom
42803831d35Sstevel 	 * IEEE-1275 calls at a high PIL level.
42903831d35Sstevel 	 */
43003831d35Sstevel 
43103831d35Sstevel 	promsafe_pause_cpus();
43203831d35Sstevel 
43303831d35Sstevel 	/*
43403831d35Sstevel 	 * Quiesce interrupts on the target CPU. We do this by setting
43503831d35Sstevel 	 * the CPU 'not ready'- (i.e. removing the CPU from cpu_ready_set) to
43603831d35Sstevel 	 * prevent it from receiving cross calls and cross traps.
43703831d35Sstevel 	 * This prevents the processor from receiving any new soft interrupts.
43803831d35Sstevel 	 */
43903831d35Sstevel 
44003831d35Sstevel 	mp_cpu_quiesce(cp);
44103831d35Sstevel 
44203831d35Sstevel 	/* tell the prom the cpu is going away */
44303831d35Sstevel 	if (SBDP_INJECT_ERROR(f, 2) || prom_serengeti_cpu_off(nodeid) != 0)
44403831d35Sstevel 		return (EBUSY);
44503831d35Sstevel 
44603831d35Sstevel 	/*
44703831d35Sstevel 	 * An sir instruction is issued at the end of the shutdown
44803831d35Sstevel 	 * routine to make the CPU go through POST and re-enter OBP.
44903831d35Sstevel 	 */
45003831d35Sstevel 	xt_one_unchecked(cp->cpu_id, (xcfunc_t *)idle_stop_xcall,
45103831d35Sstevel 	    (uint64_t)sbdp_cpu_shutdown_self, 0);
45203831d35Sstevel 
45303831d35Sstevel 	*sbdp_valp = 3ull;
45403831d35Sstevel 
45503831d35Sstevel 	start_cpus();
45603831d35Sstevel 
45703831d35Sstevel 	/*
45803831d35Sstevel 	 * Wait until we reach the OBP idle loop or time out.
45903831d35Sstevel 	 * prom_serengeti_wakeupcpu waits for up to 60 seconds for the
46003831d35Sstevel 	 * CPU to reach OBP idle loop.
46103831d35Sstevel 	 */
46203831d35Sstevel 	if (SBDP_INJECT_ERROR(f, 3) ||
46303831d35Sstevel 	    prom_serengeti_wakeupcpu(nodeid) != 0) {
46403831d35Sstevel 
46503831d35Sstevel 		/*
46603831d35Sstevel 		 * If it fails here, we still consider the unconfigure
46703831d35Sstevel 		 * operation as successful.
46803831d35Sstevel 		 */
46903831d35Sstevel 		cmn_err(CE_WARN, "cpu%d: CPU failed to enter OBP idle loop.\n",
47003831d35Sstevel 		    cpuid);
47103831d35Sstevel 	}
47203831d35Sstevel 
47303831d35Sstevel 	ASSERT(!(CPU_IN_SET(cpu_ready_set, cpuid)));
47403831d35Sstevel 
47503831d35Sstevel 	bbsram_pa = 0;
47603831d35Sstevel 	bbsram_size = 0;
47703831d35Sstevel 
47803831d35Sstevel 	return (0);
47903831d35Sstevel }
48003831d35Sstevel 
48103831d35Sstevel processorid_t
48203831d35Sstevel sbdp_get_cpuid(sbdp_handle_t *hp, dev_info_t *dip)
48303831d35Sstevel {
48403831d35Sstevel 	int		cpuid;
48503831d35Sstevel 	char		type[OBP_MAXPROPNAME];
48603831d35Sstevel 	pnode_t		nodeid;
48703831d35Sstevel 	sbd_error_t	*sep;
48803831d35Sstevel 	static fn_t	f = "sbdp_get_cpuid";
48903831d35Sstevel 
49003831d35Sstevel 	SBDP_DBG_FUNC("%s\n", f);
49103831d35Sstevel 
49203831d35Sstevel 	nodeid = ddi_get_nodeid(dip);
49303831d35Sstevel 	if (sbdp_is_node_bad(nodeid))
49403831d35Sstevel 		return (-1);
49503831d35Sstevel 
49603831d35Sstevel 	sep = hp->h_err;
49703831d35Sstevel 
49803831d35Sstevel 	if (prom_getproplen(nodeid, "device_type") < OBP_MAXPROPNAME)
49903831d35Sstevel 		(void) prom_getprop(nodeid, "device_type", (caddr_t)type);
50003831d35Sstevel 	else {
50103831d35Sstevel 		sbdp_set_err(sep, ESGT_NO_DEV_TYPE, NULL);
50203831d35Sstevel 		return (-1);
50303831d35Sstevel 	}
50403831d35Sstevel 
50503831d35Sstevel 	if (strcmp(type, "cpu") != 0) {
50603831d35Sstevel 		sbdp_set_err(sep, ESGT_NOT_CPUTYPE, NULL);
50703831d35Sstevel 		return (-1);
50803831d35Sstevel 	}
50903831d35Sstevel 
51003831d35Sstevel 	/*
51103831d35Sstevel 	 * Check to see if property "cpuid" exists first.
51203831d35Sstevel 	 * If not, check for "portid".
51303831d35Sstevel 	 */
51403831d35Sstevel 	if (prom_getprop(nodeid, "cpuid", (caddr_t)&cpuid) == -1)
51503831d35Sstevel 		if (prom_getprop(nodeid, "portid", (caddr_t)&cpuid) == -1) {
51603831d35Sstevel 
51703831d35Sstevel 			return (-1);
51803831d35Sstevel 	}
51903831d35Sstevel 
52003831d35Sstevel 	return ((processorid_t)cpuid & SG_CPU_ID_MASK);
52103831d35Sstevel }
52203831d35Sstevel 
52303831d35Sstevel int
52403831d35Sstevel sbdp_cpu_get_impl(sbdp_handle_t *hp, dev_info_t *dip)
52503831d35Sstevel {
52603831d35Sstevel 	int		impl;
52703831d35Sstevel 	char		type[OBP_MAXPROPNAME];
52803831d35Sstevel 	pnode_t		nodeid;
52903831d35Sstevel 	sbd_error_t	*sep;
53003831d35Sstevel 	static fn_t	f = "sbdp_cpu_get_impl";
53103831d35Sstevel 
53203831d35Sstevel 	SBDP_DBG_FUNC("%s\n", f);
53303831d35Sstevel 
53403831d35Sstevel 	nodeid = ddi_get_nodeid(dip);
53503831d35Sstevel 	if (sbdp_is_node_bad(nodeid))
53603831d35Sstevel 		return (-1);
53703831d35Sstevel 
53803831d35Sstevel 	sep = hp->h_err;
53903831d35Sstevel 
54003831d35Sstevel 	if (prom_getproplen(nodeid, "device_type") < OBP_MAXPROPNAME)
54103831d35Sstevel 		(void) prom_getprop(nodeid, "device_type", (caddr_t)type);
54203831d35Sstevel 	else {
54303831d35Sstevel 		sbdp_set_err(sep, ESGT_NO_DEV_TYPE, NULL);
54403831d35Sstevel 		return (-1);
54503831d35Sstevel 	}
54603831d35Sstevel 
54703831d35Sstevel 	if (strcmp(type, "cpu") != 0) {
54803831d35Sstevel 		sbdp_set_err(sep, ESGT_NOT_CPUTYPE, NULL);
54903831d35Sstevel 		return (-1);
55003831d35Sstevel 	}
55103831d35Sstevel 
55203831d35Sstevel 	/*
55303831d35Sstevel 	 * Get the implementation# property.
55403831d35Sstevel 	 */
55503831d35Sstevel 	if (prom_getprop(nodeid, "implementation#", (caddr_t)&impl) == -1)
55603831d35Sstevel 		return (-1);
55703831d35Sstevel 
55803831d35Sstevel 	return (impl);
55903831d35Sstevel }
56003831d35Sstevel 
56103831d35Sstevel struct sbdp_prom_get_node_args {
56203831d35Sstevel 	pnode_t node;		/* current node */
56303831d35Sstevel 	processorid_t portid;	/* portid we are looking for */
56403831d35Sstevel 	pnode_t result_node;	/* node found with the above portid */
56503831d35Sstevel };
56603831d35Sstevel 
56703831d35Sstevel pnode_t
56803831d35Sstevel sbdp_find_nearby_cpu_by_portid(pnode_t nodeid, processorid_t portid)
56903831d35Sstevel {
57003831d35Sstevel 	struct sbdp_prom_get_node_args arg;
57103831d35Sstevel 	static fn_t	f = "sbdp_find_nearby_cpu_by_portid";
57203831d35Sstevel 
57303831d35Sstevel 	SBDP_DBG_FUNC("%s\n", f);
57403831d35Sstevel 
57503831d35Sstevel 	arg.node = nodeid;
57603831d35Sstevel 	arg.portid = portid;
57703831d35Sstevel 	(void) prom_tree_access(sbdp_prom_get_cpu, &arg, NULL);
57803831d35Sstevel 
57903831d35Sstevel 	return (arg.result_node);
58003831d35Sstevel }
58103831d35Sstevel 
58203831d35Sstevel /*ARGSUSED*/
58303831d35Sstevel static int
58403831d35Sstevel sbdp_prom_get_cpu(void *arg, int changed)
58503831d35Sstevel {
58603831d35Sstevel 	int	portid;
58703831d35Sstevel 	pnode_t	parent, cur_node;
58803831d35Sstevel 	struct sbdp_prom_get_node_args *argp = arg;
58903831d35Sstevel 	static fn_t	f = "sbdp_prom_get_cpu";
59003831d35Sstevel 
59103831d35Sstevel 	SBDP_DBG_FUNC("%s\n", f);
59203831d35Sstevel 
59303831d35Sstevel 	parent = prom_parentnode(argp->node);
59403831d35Sstevel 
59503831d35Sstevel 	for (cur_node = prom_childnode(parent); cur_node != OBP_NONODE;
59603831d35Sstevel 	    cur_node = prom_nextnode(cur_node)) {
59703831d35Sstevel 
59803831d35Sstevel 		if (prom_getprop(cur_node, OBP_PORTID, (caddr_t)&portid) < 0)
59903831d35Sstevel 			continue;
60003831d35Sstevel 
60103831d35Sstevel 		if ((portid == argp->portid) && (cur_node != argp->node))
60203831d35Sstevel 			break;
60303831d35Sstevel 	}
60403831d35Sstevel 
60503831d35Sstevel 	argp->result_node = cur_node;
60603831d35Sstevel 
60703831d35Sstevel 	return (0);
60803831d35Sstevel }
60903831d35Sstevel 
61003831d35Sstevel 
61103831d35Sstevel /*
61203831d35Sstevel  * A detaching CPU is xcalled with an xtrap to sbdp_cpu_stop_self() after
61303831d35Sstevel  * it has been offlined. The function of this routine is to get the cpu
61403831d35Sstevel  * spinning in a safe place. The requirement is that the system will not
61503831d35Sstevel  * reference anything on the detaching board (memory and i/o is detached
61603831d35Sstevel  * elsewhere) and that the CPU not reference anything on any other board
61703831d35Sstevel  * in the system.  This isolation is required during and after the writes
61803831d35Sstevel  * to the domain masks to remove the board from the domain.
61903831d35Sstevel  *
62003831d35Sstevel  * To accomplish this isolation the following is done:
62103831d35Sstevel  *	0) Map the CPUSRAM to obtain the correct address in SRAM
62203831d35Sstevel  *      1) Create a locked mapping to a location in CPU SRAM where
62303831d35Sstevel  *      the cpu will execute.
62403831d35Sstevel  *      2) Copy the target function (sbdp_shutdown_asm) in which
62503831d35Sstevel  *      the cpu will execute into CPU SRAM.
62603831d35Sstevel  *      3) Jump into function with CPU SRAM.
62703831d35Sstevel  *      Function will:
62803831d35Sstevel  *      3.1) Flush its Ecache (displacement).
62903831d35Sstevel  *      3.2) Flush its Dcache with HW mechanism.
63003831d35Sstevel  *      3.3) Flush its Icache with HW mechanism.
63103831d35Sstevel  *      3.4) Flush all valid and _unlocked_ D-TLB entries.
63203831d35Sstevel  *      3.5) Flush all valid and _unlocked_ I-TLB entries.
63303831d35Sstevel  *      4) Jump into a tight loop.
63403831d35Sstevel  */
63503831d35Sstevel 
63603831d35Sstevel static void
63703831d35Sstevel sbdp_cpu_stop_self(uint64_t pa)
63803831d35Sstevel {
63903831d35Sstevel 	cpu_t		*cp = CPU;
64003831d35Sstevel 	int		cpuid = cp->cpu_id;
64103831d35Sstevel 	tte_t		tte;
64203831d35Sstevel 	volatile uint_t	*src, *dst;
643*e0731422SRichard Lowe 	size_t		funclen;
64403831d35Sstevel 	sbdp_shutdown_t	sht;
64503831d35Sstevel 	uint_t		bbsram_pfn;
64603831d35Sstevel 	uint64_t	bbsram_addr;
64703831d35Sstevel 	void		(*bbsram_func)(sbdp_shutdown_t *);
64803831d35Sstevel 	extern void	sbdp_shutdown_asm(sbdp_shutdown_t *);
64903831d35Sstevel 	extern void	sbdp_shutdown_asm_end(void);
65003831d35Sstevel 
651*e0731422SRichard Lowe 	funclen = (uintptr_t)sbdp_shutdown_asm_end -
652*e0731422SRichard Lowe 	    (uintptr_t)sbdp_shutdown_asm;
65303831d35Sstevel 	ASSERT(funclen <= MMU_PAGESIZE);
65403831d35Sstevel 	ASSERT(bbsram_pa != 0);
65503831d35Sstevel 	ASSERT((bbsram_pa & MMU_PAGEOFFSET) == 0);
65603831d35Sstevel 	ASSERT(bbsram_size >= MMU_PAGESIZE);
65703831d35Sstevel 
65803831d35Sstevel 	stdphys(pa, 3);
65903831d35Sstevel 	bbsram_pfn = (uint_t)(bbsram_pa >> MMU_PAGESHIFT);
66003831d35Sstevel 
66103831d35Sstevel 	bbsram_addr = (uint64_t)sbdp_shutdown_va;
66203831d35Sstevel 	sht.estack = bbsram_addr + MMU_PAGESIZE;
66303831d35Sstevel 	sht.flushaddr = ecache_flushaddr;
66403831d35Sstevel 
66503831d35Sstevel 	tte.tte_inthi = TTE_VALID_INT | TTE_SZ_INT(TTE8K) |
66603831d35Sstevel 	    TTE_PFN_INTHI(bbsram_pfn);
66703831d35Sstevel 	tte.tte_intlo = TTE_PFN_INTLO(bbsram_pfn) |
66803831d35Sstevel 	    TTE_HWWR_INT | TTE_PRIV_INT | TTE_LCK_INT;
6691e2e7a75Shuah 	sfmmu_dtlb_ld_kva(sbdp_shutdown_va, &tte); /* load dtlb */
6701e2e7a75Shuah 	sfmmu_itlb_ld_kva(sbdp_shutdown_va, &tte); /* load itlb */
67103831d35Sstevel 
67203831d35Sstevel 	for (src = (uint_t *)sbdp_shutdown_asm, dst = (uint_t *)bbsram_addr;
67303831d35Sstevel 	    src < (uint_t *)sbdp_shutdown_asm_end; src++, dst++)
67403831d35Sstevel 	*dst = *src;
67503831d35Sstevel 
67603831d35Sstevel 	bbsram_func = (void (*)())bbsram_addr;
67703831d35Sstevel 	sht.size = (uint32_t)cpunodes[cpuid].ecache_size << 1;
67803831d35Sstevel 	sht.linesize = (uint32_t)cpunodes[cpuid].ecache_linesize;
67903831d35Sstevel 	sht.physaddr = pa;
68003831d35Sstevel 
68103831d35Sstevel 	/*
68203831d35Sstevel 	 * Signal to sbdp_cpu_poweroff() that we're just
68303831d35Sstevel 	 * about done.
68403831d35Sstevel 	 */
68503831d35Sstevel 	cp->cpu_m.in_prom = 1;
68603831d35Sstevel 
68703831d35Sstevel 	stdphys(pa, 4);
68803831d35Sstevel 	(*bbsram_func)(&sht);
68903831d35Sstevel }
69003831d35Sstevel 
69103831d35Sstevel /* ARGSUSED */
69203831d35Sstevel void
69303831d35Sstevel sbdp_get_cpu_sram_addr(uint64_t arg1, uint64_t arg2)
69403831d35Sstevel {
69503831d35Sstevel 	uint64_t	*pap;
69603831d35Sstevel 	uint_t		*sizep;
69703831d35Sstevel 	struct iosram_toc *tocp;
69803831d35Sstevel 	uint_t		offset;
69903831d35Sstevel 	uint_t		size;
70003831d35Sstevel 	sbdp_cpu_sram_map_t *map;
70103831d35Sstevel 	int		i;
70203831d35Sstevel 	fn_t		f = "sbdp_get_cpu_sram_addr";
70303831d35Sstevel 
70403831d35Sstevel 	SBDP_DBG_FUNC("%s\n", f);
70503831d35Sstevel 
70603831d35Sstevel 	map = (sbdp_cpu_sram_map_t *)arg1;
70703831d35Sstevel 	tocp = (struct iosram_toc *)map->vaddr;
70803831d35Sstevel 	pap = map->pa;
70903831d35Sstevel 	sizep = map->size;
71003831d35Sstevel 
71103831d35Sstevel 	for (i = 0; i < tocp->iosram_tagno; i++) {
71203831d35Sstevel 		if (strcmp(tocp->iosram_keys[i].key, cpyren_key) == 0)
71303831d35Sstevel 			break;
71403831d35Sstevel 	}
71503831d35Sstevel 	if (i == tocp->iosram_tagno) {
71603831d35Sstevel 		*pap = 0;
71703831d35Sstevel 		*sizep = 0;
71803831d35Sstevel 		return;
71903831d35Sstevel 	}
72003831d35Sstevel 	offset = tocp->iosram_keys[i].offset;
72103831d35Sstevel 	size = tocp->iosram_keys[i].size;
72203831d35Sstevel 
72303831d35Sstevel 	/*
72403831d35Sstevel 	 * The address we want is the begining of cpusram + offset
72503831d35Sstevel 	 */
72603831d35Sstevel 	*pap = SBDP_CPU_SRAM_ADDR + offset;
72703831d35Sstevel 
72803831d35Sstevel 	*sizep = size;
72903831d35Sstevel }
73003831d35Sstevel 
73103831d35Sstevel static int
73203831d35Sstevel cpusram_map(caddr_t *vaddrp, pgcnt_t *npp)
73303831d35Sstevel {
73403831d35Sstevel 	uint_t		pgoffset;
73503831d35Sstevel 	pgcnt_t		npages;
73603831d35Sstevel 	pfn_t		pfn;
73703831d35Sstevel 	uint64_t	base;
73803831d35Sstevel 	caddr_t		kaddr;
73903831d35Sstevel 	uint_t		mapping_attr;
74003831d35Sstevel 
74103831d35Sstevel 	base = (uint64_t)SBDP_CPU_SRAM_ADDR & (~MMU_PAGEOFFSET);
74203831d35Sstevel 	pfn = mmu_btop(base);
74303831d35Sstevel 
74403831d35Sstevel 	/*
74503831d35Sstevel 	 * Do a quick sanity check to make sure we are in I/O space.
74603831d35Sstevel 	 */
74703831d35Sstevel 	if (pf_is_memory(pfn))
74803831d35Sstevel 		return (DDI_FAILURE);
74903831d35Sstevel 
75003831d35Sstevel 	pgoffset = (ulong_t)SBDP_CPU_SRAM_ADDR & MMU_PAGEOFFSET;
75103831d35Sstevel 	npages = mmu_btopr(SBDP_CPU_SRAM_SIZE + pgoffset);
75203831d35Sstevel 
75303831d35Sstevel 	kaddr = vmem_alloc(heap_arena, ptob(npages), VM_NOSLEEP);
75403831d35Sstevel 	if (kaddr == NULL)
75503831d35Sstevel 		return (DDI_ME_NORESOURCES);
75603831d35Sstevel 
75703831d35Sstevel 	mapping_attr = PROT_READ;
75803831d35Sstevel 	/*
75903831d35Sstevel 	 * Now map in the pages we've allocated...
76003831d35Sstevel 	 */
76103831d35Sstevel 	hat_devload(kas.a_hat, kaddr, ptob(npages), pfn, mapping_attr,
76203831d35Sstevel 	    HAT_LOAD_LOCK);
76303831d35Sstevel 
76403831d35Sstevel 	*vaddrp = kaddr + pgoffset;
76503831d35Sstevel 	*npp = npages;
76603831d35Sstevel 
76703831d35Sstevel 	return (DDI_SUCCESS);
76803831d35Sstevel }
76903831d35Sstevel 
77003831d35Sstevel static void
77103831d35Sstevel cpusram_unmap(caddr_t *vaddrp, pgcnt_t npages)
77203831d35Sstevel {
77303831d35Sstevel 	uint_t  pgoffset;
77403831d35Sstevel 	caddr_t base;
77503831d35Sstevel 	caddr_t addr = *vaddrp;
77603831d35Sstevel 
77703831d35Sstevel 
77803831d35Sstevel 	pgoffset = (ulong_t)SBDP_CPU_SRAM_ADDR & MMU_PAGEOFFSET;
77903831d35Sstevel 	base = addr - pgoffset;
78003831d35Sstevel 	hat_unload(kas.a_hat, base, ptob(npages), HAT_UNLOAD_UNLOCK);
78103831d35Sstevel 	vmem_free(heap_arena, base, ptob(npages));
78203831d35Sstevel 
78303831d35Sstevel 	*vaddrp = 0;
78403831d35Sstevel }
78503831d35Sstevel 
78603831d35Sstevel 
78703831d35Sstevel static void
78803831d35Sstevel sbdp_cpu_shutdown_self(void)
78903831d35Sstevel {
79003831d35Sstevel 	cpu_t		*cp = CPU;
79103831d35Sstevel 	int		cpuid = cp->cpu_id;
79203831d35Sstevel 	extern void	flush_windows(void);
79303831d35Sstevel 	uint64_t	pa = va_to_pa((void *)sbdp_valp);
79403831d35Sstevel 
79503831d35Sstevel 	stdphys(pa, 8);
79603831d35Sstevel 	flush_windows();
79703831d35Sstevel 
79803831d35Sstevel 	(void) spl8();
79903831d35Sstevel 
80003831d35Sstevel 	stdphys(pa, 6);
80103831d35Sstevel 
80203831d35Sstevel 	ASSERT(cp->cpu_intr_actv == 0);
80303831d35Sstevel 	ASSERT(cp->cpu_thread == cp->cpu_idle_thread ||
80403831d35Sstevel 	    cp->cpu_thread == cp->cpu_startup_thread);
80503831d35Sstevel 
80603831d35Sstevel 	cp->cpu_flags = CPU_OFFLINE | CPU_QUIESCED | CPU_POWEROFF;
80703831d35Sstevel 
80803831d35Sstevel 	CPU_SIGNATURE(OS_SIG, SIGST_DETACHED, SIGSUBST_NULL, cpuid);
80903831d35Sstevel 
81003831d35Sstevel 	stdphys(pa, 7);
81103831d35Sstevel 	sbdp_cpu_stop_self(pa);
81203831d35Sstevel 
81303831d35Sstevel 	cmn_err(CE_PANIC, "sbdp_cpu_shutdown_self: CPU %d FAILED TO SHUTDOWN",
81403831d35Sstevel 	    cpuid);
81503831d35Sstevel }
81603831d35Sstevel 
81703831d35Sstevel typedef struct {
81803831d35Sstevel 	int	node;
81903831d35Sstevel 	int	board;
82003831d35Sstevel 	int 	non_panther_cpus;
82103831d35Sstevel } sbdp_node_walk_t;
82203831d35Sstevel 
82303831d35Sstevel static int
82403831d35Sstevel sbdp_find_non_panther_cpus(dev_info_t *dip, void *node_args)
82503831d35Sstevel {
82603831d35Sstevel 	int	impl, cpuid, portid;
82703831d35Sstevel 	int	buflen;
82803831d35Sstevel 	char	buf[OBP_MAXPROPNAME];
82903831d35Sstevel 	sbdp_node_walk_t *args = (sbdp_node_walk_t *)node_args;
83003831d35Sstevel 
83103831d35Sstevel 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
83203831d35Sstevel 	    DDI_PROP_DONTPASS, OBP_DEVICETYPE, (caddr_t)buf,
83303831d35Sstevel 	    &buflen) != DDI_PROP_SUCCESS) {
83403831d35Sstevel 		return (DDI_WALK_CONTINUE);
83503831d35Sstevel 	}
83603831d35Sstevel 
83703831d35Sstevel 	if (strcmp(buf, "cpu") != 0) {
83803831d35Sstevel 		return (DDI_WALK_CONTINUE);
83903831d35Sstevel 	}
84003831d35Sstevel 
84103831d35Sstevel 	if ((impl = ddi_getprop(DDI_DEV_T_ANY, dip,
84203831d35Sstevel 	    DDI_PROP_DONTPASS, "implementation#", -1)) == -1) {
84303831d35Sstevel 		return (DDI_WALK_CONTINUE);
84403831d35Sstevel 	}
84503831d35Sstevel 
84603831d35Sstevel 	if ((cpuid = ddi_getprop(DDI_DEV_T_ANY, dip,
84703831d35Sstevel 	    DDI_PROP_DONTPASS, "cpuid", -1)) == -1) {
84803831d35Sstevel 		return (DDI_WALK_CONTINUE);
84903831d35Sstevel 	}
85003831d35Sstevel 
85103831d35Sstevel 	portid = SG_CPUID_TO_PORTID(cpuid);
85203831d35Sstevel 
85303831d35Sstevel 	/* filter out nodes not on this board */
85403831d35Sstevel 	if (SG_PORTID_TO_BOARD_NUM(portid) != args->board ||
85503831d35Sstevel 	    SG_PORTID_TO_NODEID(portid) != args->node) {
85603831d35Sstevel 		return (DDI_WALK_PRUNECHILD);
85703831d35Sstevel 	}
85803831d35Sstevel 
85903831d35Sstevel 	switch (impl) {
86003831d35Sstevel 	case CHEETAH_IMPL:
86103831d35Sstevel 	case CHEETAH_PLUS_IMPL:
86203831d35Sstevel 	case JAGUAR_IMPL:
86303831d35Sstevel 		args->non_panther_cpus++;
86403831d35Sstevel 		break;
86503831d35Sstevel 	case PANTHER_IMPL:
86603831d35Sstevel 		break;
86703831d35Sstevel 	default:
86803831d35Sstevel 		ASSERT(0);
86903831d35Sstevel 		args->non_panther_cpus++;
87003831d35Sstevel 		break;
87103831d35Sstevel 	}
87203831d35Sstevel 
87303831d35Sstevel 	SBDP_DBG_CPU("cpuid=0x%x, portid=0x%x, impl=0x%x, device_type=%s",
87403831d35Sstevel 	    cpuid, portid, impl, buf);
87503831d35Sstevel 
87603831d35Sstevel 	return (DDI_WALK_CONTINUE);
87703831d35Sstevel }
87803831d35Sstevel 
87903831d35Sstevel int
88003831d35Sstevel sbdp_board_non_panther_cpus(int node, int board)
88103831d35Sstevel {
88203831d35Sstevel 	sbdp_node_walk_t arg = {0};
88303831d35Sstevel 
88403831d35Sstevel 	arg.node = node;
88503831d35Sstevel 	arg.board = board;
88603831d35Sstevel 
88703831d35Sstevel 	/*
88803831d35Sstevel 	 * Root node doesn't have to be held.
88903831d35Sstevel 	 */
89003831d35Sstevel 	ddi_walk_devs(ddi_root_node(), sbdp_find_non_panther_cpus,
89103831d35Sstevel 	    (void *)&arg);
89203831d35Sstevel 
89303831d35Sstevel 	return (arg.non_panther_cpus);
89403831d35Sstevel }
895