xref: /titanic_44/usr/src/uts/sun4u/ngdr/io/dr_cpu.c (revision 2b4a78020b9c38d1b95e2f3fefa6d6e4be382d1f)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * CPU support routines for DR
30  */
31 
32 #include <sys/note.h>
33 #include <sys/debug.h>
34 #include <sys/types.h>
35 #include <sys/errno.h>
36 #include <sys/cred.h>
37 #include <sys/dditypes.h>
38 #include <sys/devops.h>
39 #include <sys/modctl.h>
40 #include <sys/poll.h>
41 #include <sys/conf.h>
42 #include <sys/ddi.h>
43 #include <sys/sunddi.h>
44 #include <sys/sunndi.h>
45 #include <sys/ndi_impldefs.h>
46 #include <sys/stat.h>
47 #include <sys/kmem.h>
48 #include <sys/processor.h>
49 #include <sys/cpuvar.h>
50 #include <sys/mem_config.h>
51 #include <sys/promif.h>
52 #include <sys/x_call.h>
53 #include <sys/cpu_sgnblk_defs.h>
54 #include <sys/membar.h>
55 #include <sys/stack.h>
56 #include <sys/sysmacros.h>
57 #include <sys/machsystm.h>
58 #include <sys/spitregs.h>
59 
60 #include <sys/archsystm.h>
61 #include <vm/hat_sfmmu.h>
62 #include <sys/pte.h>
63 #include <sys/mmu.h>
64 #include <sys/x_call.h>
65 #include <sys/cpu_module.h>
66 #include <sys/cpu_impl.h>
67 
68 #include <sys/autoconf.h>
69 #include <sys/cmn_err.h>
70 
71 #include <sys/dr.h>
72 #include <sys/dr_util.h>
73 
74 #ifdef _STARFIRE
75 #include <sys/starfire.h>
76 extern struct cpu	*SIGBCPU;
77 #else
78 /* for the DR*INTERNAL_ERROR macros.  see sys/dr.h. */
79 static char *dr_ie_fmt = "dr_cpu.c %d";
80 #endif /* _STARFIRE */
81 
82 int
83 dr_cpu_unit_is_sane(dr_board_t *bp, dr_cpu_unit_t *cp)
84 {
85 #ifdef DEBUG
86 	processorid_t	cpuid;
87 
88 	/*
89 	 * cpuid and unit number should never be different
90 	 * than they were at discovery/connect time
91 	 */
92 	ASSERT(drmach_cpu_get_id(cp->sbc_cm.sbdev_id, &cpuid) == 0);
93 
94 	ASSERT(cp->sbc_cm.sbdev_bp == bp);
95 	ASSERT(cp->sbc_cm.sbdev_type == SBD_COMP_CPU);
96 	ASSERT(cp->sbc_cpu_id == cpuid);
97 #else
98 	_NOTE(ARGUNUSED(bp))
99 	_NOTE(ARGUNUSED(cp))
100 #endif
101 
102 	return (1);
103 }
104 
105 static int
106 dr_errno2ecode(int error)
107 {
108 	int	rv;
109 
110 	switch (error) {
111 	case EBUSY:
112 		rv = ESBD_BUSY;
113 		break;
114 	case EINVAL:
115 		rv = ESBD_INVAL;
116 		break;
117 	case EALREADY:
118 		rv = ESBD_ALREADY;
119 		break;
120 	case ENODEV:
121 		rv = ESBD_NODEV;
122 		break;
123 	case ENOMEM:
124 		rv = ESBD_NOMEM;
125 		break;
126 	default:
127 		rv = ESBD_INVAL;
128 	}
129 
130 	return (rv);
131 }
132 
133 static void
134 dr_cpu_set_prop(dr_cpu_unit_t *cp)
135 {
136 	sbd_error_t	*err;
137 	dev_info_t	*dip;
138 	uint64_t	clock_freq;
139 	int		ecache_size = 0;
140 	char		*cache_str = NULL;
141 
142 	err = drmach_get_dip(cp->sbc_cm.sbdev_id, &dip);
143 	if (err) {
144 		DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
145 		return;
146 	}
147 
148 	if (dip == NULL) {
149 #ifndef _STARFIRE
150 		/*
151 		 * Do not report an error on Starfire since
152 		 * the dip will not be created until after
153 		 * the CPU has been configured.
154 		 */
155 		DR_DEV_INTERNAL_ERROR(&cp->sbc_cm);
156 #endif /* !_STARFIRE */
157 		return;
158 	}
159 
160 	/* read in the CPU speed */
161 
162 	/*
163 	 * If the property is not found in the CPU node, it has to be
164 	 * kept in the core or cmp node so we just keep looking.
165 	 */
166 	clock_freq = (unsigned int)ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
167 	    "clock-frequency", 0);
168 
169 	ASSERT(clock_freq != 0);
170 
171 	/*
172 	 * The ecache property string is not the same
173 	 * for all CPU implementations.
174 	 */
175 
176 	switch (cp->sbc_cpu_impl) {
177 	case BLACKBIRD_IMPL:
178 	case CHEETAH_IMPL:
179 	case CHEETAH_PLUS_IMPL:
180 		cache_str = "ecache-size";
181 		break;
182 	case JAGUAR_IMPL:
183 	case OLYMPUS_C_IMPL:
184 	case JUPITER_IMPL:
185 		cache_str = "l2-cache-size";
186 		break;
187 	case PANTHER_IMPL:
188 		cache_str = "l3-cache-size";
189 		break;
190 	default:
191 		cmn_err(CE_WARN, "Unknown cpu implementation=0x%x",
192 		    cp->sbc_cpu_impl);
193 		ASSERT(0);
194 		break;
195 	}
196 
197 	if (cache_str != NULL) {
198 		/* read in the ecache size */
199 		/*
200 		 * If the property is not found in the CPU node,
201 		 * it has to be kept in the core or cmp node so
202 		 * we just keep looking.
203 		 */
204 
205 		ecache_size = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
206 		    cache_str, 0);
207 	}
208 
209 	ASSERT(ecache_size != 0);
210 
211 	/* convert to the proper units */
212 	cp->sbc_speed = (clock_freq + 500000) / 1000000;
213 	cp->sbc_ecache = ecache_size / (1024 * 1024);
214 }
215 
216 void
217 dr_init_cpu_unit(dr_cpu_unit_t *cp)
218 {
219 	sbd_error_t	*err;
220 	dr_state_t	new_state;
221 	int		cpuid;
222 	int		impl;
223 
224 	if (DR_DEV_IS_ATTACHED(&cp->sbc_cm)) {
225 		new_state = DR_STATE_CONFIGURED;
226 		cp->sbc_cm.sbdev_cond = SBD_COND_OK;
227 	} else if (DR_DEV_IS_PRESENT(&cp->sbc_cm)) {
228 		new_state = DR_STATE_CONNECTED;
229 		cp->sbc_cm.sbdev_cond = SBD_COND_OK;
230 	} else {
231 		new_state = DR_STATE_EMPTY;
232 		cp->sbc_cm.sbdev_cond = SBD_COND_UNKNOWN;
233 	}
234 
235 	if (DR_DEV_IS_PRESENT(&cp->sbc_cm)) {
236 		err = drmach_cpu_get_id(cp->sbc_cm.sbdev_id, &cpuid);
237 		if (err) {
238 			DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
239 			new_state = DR_STATE_FATAL;
240 			goto done;
241 		}
242 
243 		err = drmach_cpu_get_impl(cp->sbc_cm.sbdev_id, &impl);
244 		if (err) {
245 			DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
246 			new_state = DR_STATE_FATAL;
247 			goto done;
248 		}
249 	} else {
250 		cp->sbc_cpu_id = -1;
251 		cp->sbc_cpu_impl = -1;
252 		goto done;
253 	}
254 
255 	cp->sbc_cpu_id = cpuid;
256 	cp->sbc_cpu_impl = impl;
257 
258 	/* if true at init time, it must always be true */
259 	ASSERT(dr_cpu_unit_is_sane(cp->sbc_cm.sbdev_bp, cp));
260 
261 	mutex_enter(&cpu_lock);
262 	if ((cpuid >= 0) && cpu[cpuid])
263 		cp->sbc_cpu_flags = cpu[cpuid]->cpu_flags;
264 	else
265 		cp->sbc_cpu_flags = P_OFFLINE | P_POWEROFF;
266 	mutex_exit(&cpu_lock);
267 
268 	dr_cpu_set_prop(cp);
269 
270 done:
271 	/* delay transition until fully initialized */
272 	dr_device_transition(&cp->sbc_cm, new_state);
273 }
274 
275 int
276 dr_pre_attach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
277 {
278 	int		i;
279 	int		curr_cpu;
280 	int		next_cpu;
281 	static fn_t	f = "dr_pre_attach_cpu";
282 
283 	PR_CPU("%s...\n", f);
284 
285 	for (next_cpu = 0, i = 0; i < devnum; i++) {
286 		dr_cpu_unit_t *up = (dr_cpu_unit_t *)devlist[i];
287 
288 		ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
289 
290 		/*
291 		 * Print a console message for each attachment
292 		 * point. For CMP devices, this means that only
293 		 * one message should be printed, no matter how
294 		 * many cores are actually present.
295 		 */
296 		curr_cpu = DR_UNUM2SBD_UNUM(up->sbc_cm.sbdev_unum,
297 		    SBD_COMP_CPU);
298 		if (curr_cpu >= next_cpu) {
299 			cmn_err(CE_CONT, "OS configure %s",
300 			    up->sbc_cm.sbdev_path);
301 			next_cpu = curr_cpu + 1;
302 		}
303 
304 		if (up->sbc_cm.sbdev_state == DR_STATE_UNCONFIGURED) {
305 			/*
306 			 * If we're coming from the UNCONFIGURED
307 			 * state then the cpu's sigblock will
308 			 * still be mapped in.  Need to unmap it
309 			 * before continuing with attachment.
310 			 */
311 			PR_CPU("%s: unmapping sigblk for cpu %d\n", f,
312 			    up->sbc_cpu_id);
313 
314 			CPU_SGN_MAPOUT(up->sbc_cpu_id);
315 		}
316 	}
317 
318 	/*
319 	 * Block out status threads while creating
320 	 * devinfo tree branches
321 	 */
322 	dr_lock_status(hp->h_bd);
323 	ndi_devi_enter(ddi_root_node(), (int *)(&hp->h_ndi));
324 	mutex_enter(&cpu_lock);
325 
326 	return (0);
327 }
328 
329 /*ARGSUSED*/
330 void
331 dr_attach_cpu(dr_handle_t *hp, dr_common_unit_t *cp)
332 {
333 	sbd_error_t	*err;
334 	processorid_t	 cpuid;
335 	int		 rv;
336 
337 	ASSERT(MUTEX_HELD(&cpu_lock));
338 
339 	err = drmach_configure(cp->sbdev_id, 0);
340 	if (err) {
341 		DRERR_SET_C(&cp->sbdev_error, &err);
342 		return;
343 	}
344 
345 	err = drmach_cpu_get_id(cp->sbdev_id, &cpuid);
346 	if (err) {
347 		DRERR_SET_C(&cp->sbdev_error, &err);
348 
349 		err = drmach_unconfigure(cp->sbdev_id, DEVI_BRANCH_DESTROY);
350 		if (err)
351 			sbd_err_clear(&err);
352 	} else if ((rv = cpu_configure(cpuid)) != 0) {
353 		dr_dev_err(CE_WARN, cp, dr_errno2ecode(rv));
354 		err = drmach_unconfigure(cp->sbdev_id, DEVI_BRANCH_DESTROY);
355 		if (err)
356 			sbd_err_clear(&err);
357 	}
358 }
359 
360 /*
361  * dr_post_attach_cpu
362  *
363  * sbd error policy: Does not stop on error.  Processes all units in list.
364  */
365 int
366 dr_post_attach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
367 {
368 	int		i;
369 	int		errflag = 0;
370 	static fn_t	f = "dr_post_attach_cpu";
371 
372 	PR_CPU("%s...\n", f);
373 
374 	/* Startup and online newly-attached CPUs */
375 	for (i = 0; i < devnum; i++) {
376 		dr_cpu_unit_t *up = (dr_cpu_unit_t *)devlist[i];
377 		struct cpu	*cp;
378 
379 		ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
380 
381 		cp = cpu_get(up->sbc_cpu_id);
382 		if (cp == NULL) {
383 			cmn_err(CE_WARN, "%s: cpu_get failed for cpu %d",
384 			    f, up->sbc_cpu_id);
385 			continue;
386 		}
387 
388 		if (cpu_is_poweredoff(cp)) {
389 			if (cpu_poweron(cp) != 0) {
390 				dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_CPUSTART);
391 				errflag = 1;
392 			}
393 			PR_CPU("%s: cpu %d powered ON\n", f, up->sbc_cpu_id);
394 		}
395 
396 		if (cpu_is_offline(cp)) {
397 			PR_CPU("%s: onlining cpu %d...\n", f, up->sbc_cpu_id);
398 
399 			if (cpu_online(cp) != 0) {
400 				dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_ONLINE);
401 				errflag = 1;
402 			}
403 		}
404 
405 	}
406 
407 	mutex_exit(&cpu_lock);
408 	ndi_devi_exit(ddi_root_node(), hp->h_ndi);
409 	dr_unlock_status(hp->h_bd);
410 
411 	if (errflag)
412 		return (-1);
413 	else
414 		return (0);
415 }
416 
417 /*
418  * dr_pre_release_cpu
419  *
420  * sbd error policy: Stops on first error.
421  */
422 int
423 dr_pre_release_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
424 {
425 	int		c, cix, i, lastoffline = -1, rv = 0;
426 	processorid_t	cpuid;
427 	struct cpu	*cp;
428 	dr_cpu_unit_t	*up;
429 	dr_devset_t	devset;
430 	sbd_dev_stat_t	*ds;
431 	static fn_t	f = "dr_pre_release_cpu";
432 	int		cpu_flags = 0;
433 
434 	devset = DR_DEVS_PRESENT(hp->h_bd);
435 
436 	/* allocate status struct storage. */
437 	ds = (sbd_dev_stat_t *) kmem_zalloc(sizeof (sbd_dev_stat_t) *
438 	    MAX_CPU_UNITS_PER_BOARD, KM_SLEEP);
439 
440 	cix = dr_cpu_status(hp, devset, ds);
441 
442 	mutex_enter(&cpu_lock);
443 
444 	for (i = 0; i < devnum; i++) {
445 		up = (dr_cpu_unit_t *)devlist[i];
446 		ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
447 
448 		/*
449 		 * The STARCAT platform borrows cpus for use by POST in
450 		 * iocage testing.  These cpus cannot be unconfigured
451 		 * while they are in use for the iocage.
452 		 * This check determines if a CPU is currently in use
453 		 * for iocage testing, and if so, returns a "Device busy"
454 		 * error.
455 		 */
456 		for (c = 0; c < cix; c++) {
457 			if (ds[c].d_cpu.cs_unit == up->sbc_cm.sbdev_unum) {
458 				if (ds[c].d_cpu.cs_busy) {
459 					dr_dev_err(CE_WARN, &up->sbc_cm,
460 					    ESBD_BUSY);
461 					rv = -1;
462 					break;
463 				}
464 			}
465 		}
466 		if (c < cix)
467 			break;
468 		cpuid = up->sbc_cpu_id;
469 		if ((cp = cpu_get(cpuid)) == NULL) {
470 			dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_OFFLINE);
471 			rv = -1;
472 			break;
473 		}
474 
475 		/* used by dr_cancel_cpu during error flow */
476 		up->sbc_cpu_flags = cp->cpu_flags;
477 
478 		if (CPU_ACTIVE(cp)) {
479 			if (dr_cmd_flags(hp) & SBD_FLAG_FORCE)
480 				cpu_flags = CPU_FORCED;
481 
482 			PR_CPU("%s: offlining cpu %d\n", f, cpuid);
483 			if (cpu_offline(cp, cpu_flags)) {
484 				PR_CPU("%s: failed to offline cpu %d\n", f,
485 				    cpuid);
486 				dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_OFFLINE);
487 				if (disp_bound_threads(cp, 0)) {
488 					cmn_err(CE_WARN, "%s: thread(s) bound "
489 					    "to cpu %d", f, cp->cpu_id);
490 				}
491 				rv = -1;
492 				break;
493 			} else
494 				lastoffline = i;
495 		}
496 
497 		if (!rv) {
498 			sbd_error_t *err;
499 
500 			err = drmach_release(up->sbc_cm.sbdev_id);
501 			if (err) {
502 				DRERR_SET_C(&up->sbc_cm.sbdev_error, &err);
503 				rv = -1;
504 				break;
505 			}
506 		}
507 	}
508 
509 	mutex_exit(&cpu_lock);
510 
511 	if (rv) {
512 		/*
513 		 * Need to unwind others since at this level (pre-release)
514 		 * the device state has not yet transitioned and failures
515 		 * will prevent us from reaching the "post" release
516 		 * function where states are normally transitioned.
517 		 */
518 		for (i = lastoffline; i >= 0; i--) {
519 			up = (dr_cpu_unit_t *)devlist[i];
520 			(void) dr_cancel_cpu(up);
521 		}
522 	}
523 
524 	kmem_free(ds, sizeof (sbd_dev_stat_t) * MAX_CPU_UNITS_PER_BOARD);
525 	return (rv);
526 }
527 
528 /*
529  * dr_pre_detach_cpu
530  *
531  * sbd error policy: Stops on first error.
532  */
533 int
534 dr_pre_detach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
535 {
536 	_NOTE(ARGUNUSED(hp))
537 
538 	int		i;
539 	int		curr_cpu;
540 	int		next_cpu;
541 	int		cpu_flags = 0;
542 	static fn_t	f = "dr_pre_detach_cpu";
543 
544 	PR_CPU("%s...\n", f);
545 
546 	/*
547 	 * Block out status threads while destroying devinfo tree
548 	 * branches
549 	 */
550 	dr_lock_status(hp->h_bd);
551 	mutex_enter(&cpu_lock);
552 
553 	for (next_cpu = 0, i = 0; i < devnum; i++) {
554 		dr_cpu_unit_t *up = (dr_cpu_unit_t *)devlist[i];
555 		struct cpu	*cp;
556 
557 		ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
558 
559 		cp = cpu_get(up->sbc_cpu_id);
560 		if (cp == NULL)
561 			continue;
562 
563 		/*
564 		 * Print a console message for each attachment
565 		 * point. For CMP devices, this means that only
566 		 * one message should be printed, no matter how
567 		 * many cores are actually present.
568 		 */
569 		curr_cpu = DR_UNUM2SBD_UNUM(up->sbc_cm.sbdev_unum,
570 		    SBD_COMP_CPU);
571 		if (curr_cpu >= next_cpu) {
572 			cmn_err(CE_CONT, "OS unconfigure %s\n",
573 			    up->sbc_cm.sbdev_path);
574 			next_cpu = curr_cpu + 1;
575 		}
576 
577 		/*
578 		 * CPUs were offlined during Release.
579 		 */
580 		if (cpu_is_poweredoff(cp)) {
581 			PR_CPU("%s: cpu %d already powered OFF\n",
582 			    f, up->sbc_cpu_id);
583 			continue;
584 		}
585 
586 		if (!cpu_is_offline(cp)) {
587 			if (dr_cmd_flags(hp) & SBD_FLAG_FORCE)
588 				cpu_flags = CPU_FORCED;
589 			/* cpu was onlined after release.  Offline it again */
590 			PR_CPU("%s: offlining cpu %d\n", f, up->sbc_cpu_id);
591 			if (cpu_offline(cp, cpu_flags)) {
592 				PR_CPU("%s: failed to offline cpu %d\n",
593 				    f, up->sbc_cpu_id);
594 				dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_OFFLINE);
595 				if (disp_bound_threads(cp, 0)) {
596 					cmn_err(CE_WARN, "%s: thread(s) bound "
597 					    "to cpu %d", f, cp->cpu_id);
598 				}
599 				goto err;
600 			}
601 		}
602 		if (cpu_poweroff(cp) != 0) {
603 			dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_CPUSTOP);
604 			goto err;
605 		} else {
606 			PR_CPU("%s: cpu %d powered OFF\n", f, up->sbc_cpu_id);
607 		}
608 	}
609 
610 	return (0);
611 
612 err:
613 	mutex_exit(&cpu_lock);
614 	dr_unlock_status(hp->h_bd);
615 	return (-1);
616 }
617 
618 /*ARGSUSED*/
619 void
620 dr_detach_cpu(dr_handle_t *hp, dr_common_unit_t *cp)
621 {
622 	sbd_error_t	*err;
623 	processorid_t	 cpuid;
624 	int		 rv;
625 
626 	ASSERT(MUTEX_HELD(&cpu_lock));
627 
628 	err = drmach_cpu_get_id(cp->sbdev_id, &cpuid);
629 	if (err) {
630 		DRERR_SET_C(&cp->sbdev_error, &err);
631 	} else if ((rv = cpu_unconfigure(cpuid)) != 0) {
632 		dr_dev_err(CE_IGNORE, cp, dr_errno2ecode(rv));
633 	} else {
634 		err = drmach_unconfigure(cp->sbdev_id, DEVI_BRANCH_DESTROY);
635 		if (err) {
636 			DRERR_SET_C(&cp->sbdev_error, &err);
637 		}
638 	}
639 }
640 
641 /*ARGSUSED1*/
642 int
643 dr_post_detach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
644 {
645 	static fn_t	f = "dr_post_detach_cpu";
646 
647 	PR_CPU("%s...\n", f);
648 	hp->h_ndi = 0;
649 
650 	mutex_exit(&cpu_lock);
651 	dr_unlock_status(hp->h_bd);
652 
653 	return (0);
654 }
655 
656 static void
657 dr_fill_cpu_stat(dr_cpu_unit_t *cp, drmach_status_t *pstat, sbd_cpu_stat_t *csp)
658 {
659 	ASSERT(cp && pstat && csp);
660 
661 	/* Fill in the common status information */
662 	bzero((caddr_t)csp, sizeof (*csp));
663 	csp->cs_type = cp->sbc_cm.sbdev_type;
664 	csp->cs_unit = cp->sbc_cm.sbdev_unum;
665 	strncpy(csp->cs_name, pstat->type, sizeof (csp->cs_name));
666 	csp->cs_cond = cp->sbc_cm.sbdev_cond;
667 	csp->cs_busy = cp->sbc_cm.sbdev_busy | pstat->busy;
668 	csp->cs_time = cp->sbc_cm.sbdev_time;
669 	csp->cs_ostate = cp->sbc_cm.sbdev_ostate;
670 	csp->cs_suspend = 0;
671 
672 	/* CPU specific status data */
673 	csp->cs_cpuid = cp->sbc_cpu_id;
674 
675 #ifdef _STARFIRE
676 	csp->cs_isbootproc = (SIGBCPU->cpu_id == cp->sbc_cpu_id) ? 1 : 0;
677 #endif /* _STARFIRE */
678 
679 	/*
680 	 * If the speed and ecache properties have not been
681 	 * cached yet, read them in from the device tree.
682 	 */
683 	if ((cp->sbc_speed == 0) || (cp->sbc_ecache == 0))
684 		dr_cpu_set_prop(cp);
685 
686 	/* use the cached speed and ecache values */
687 	csp->cs_speed = cp->sbc_speed;
688 	csp->cs_ecache = cp->sbc_ecache;
689 
690 	mutex_enter(&cpu_lock);
691 	if (!cpu_get(csp->cs_cpuid)) {
692 		/* ostate must be UNCONFIGURED */
693 		csp->cs_cm.c_ostate = SBD_STAT_UNCONFIGURED;
694 	}
695 	mutex_exit(&cpu_lock);
696 }
697 
698 static void
699 dr_fill_cmp_stat(sbd_cpu_stat_t *csp, int ncores, int impl, sbd_cmp_stat_t *psp)
700 {
701 	int	core;
702 
703 	ASSERT(csp && psp && (ncores >= 1));
704 
705 	bzero((caddr_t)psp, sizeof (*psp));
706 
707 	/*
708 	 * Fill in the common status information based
709 	 * on the data for the first core.
710 	 */
711 	psp->ps_type = SBD_COMP_CMP;
712 	psp->ps_unit = DR_UNUM2SBD_UNUM(csp->cs_unit, SBD_COMP_CMP);
713 	strncpy(psp->ps_name, csp->cs_name, sizeof (psp->ps_name));
714 	psp->ps_cond = csp->cs_cond;
715 	psp->ps_busy = csp->cs_busy;
716 	psp->ps_time = csp->cs_time;
717 	psp->ps_ostate = csp->cs_ostate;
718 	psp->ps_suspend = csp->cs_suspend;
719 
720 	/* CMP specific status data */
721 	*psp->ps_cpuid = csp->cs_cpuid;
722 	psp->ps_ncores = 1;
723 	psp->ps_speed = csp->cs_speed;
724 	psp->ps_ecache = csp->cs_ecache;
725 
726 	/*
727 	 * Walk through the data for the remaining cores.
728 	 * Make any adjustments to the common status data,
729 	 * or the shared CMP specific data if necessary.
730 	 */
731 	for (core = 1; core < ncores; core++) {
732 
733 		/*
734 		 * The following properties should be the same
735 		 * for all the cores of the CMP.
736 		 */
737 		ASSERT(psp->ps_unit == DR_UNUM2SBD_UNUM(csp[core].cs_unit,
738 		    SBD_COMP_CMP));
739 		ASSERT(psp->ps_speed == csp[core].cs_speed);
740 
741 		psp->ps_cpuid[core] = csp[core].cs_cpuid;
742 		psp->ps_ncores++;
743 
744 		/*
745 		 * Jaguar has a split ecache, so the ecache
746 		 * for each core must be added together to
747 		 * get the total ecache for the whole chip.
748 		 */
749 		if (IS_JAGUAR(impl)) {
750 			psp->ps_ecache += csp[core].cs_ecache;
751 		}
752 
753 		/* adjust time if necessary */
754 		if (csp[core].cs_time > psp->ps_time) {
755 			psp->ps_time = csp[core].cs_time;
756 		}
757 
758 		psp->ps_busy |= csp[core].cs_busy;
759 
760 		/*
761 		 * If any of the cores are configured, the
762 		 * entire CMP is marked as configured.
763 		 */
764 		if (csp[core].cs_ostate == SBD_STAT_CONFIGURED) {
765 			psp->ps_ostate = csp[core].cs_ostate;
766 		}
767 	}
768 }
769 
770 int
771 dr_cpu_status(dr_handle_t *hp, dr_devset_t devset, sbd_dev_stat_t *dsp)
772 {
773 	int		cmp;
774 	int		core;
775 	int		ncpu;
776 	dr_board_t	*bp;
777 	sbd_cpu_stat_t	cstat[MAX_CORES_PER_CMP];
778 	int		impl;
779 
780 	bp = hp->h_bd;
781 	ncpu = 0;
782 
783 	devset &= DR_DEVS_PRESENT(bp);
784 
785 	/*
786 	 * Treat every CPU as a CMP. In the case where the
787 	 * device is not a CMP, treat it as a CMP with only
788 	 * one core.
789 	 */
790 	for (cmp = 0; cmp < MAX_CMP_UNITS_PER_BOARD; cmp++) {
791 
792 		int		ncores;
793 		dr_cpu_unit_t	*cp;
794 		drmach_status_t	pstat;
795 		sbd_error_t	*err;
796 		sbd_cmp_stat_t	*psp;
797 
798 		if ((devset & DEVSET(SBD_COMP_CMP, cmp)) == 0) {
799 			continue;
800 		}
801 
802 		ncores = 0;
803 
804 		for (core = 0; core < MAX_CORES_PER_CMP; core++) {
805 
806 			cp = dr_get_cpu_unit(bp, DR_CMP_CORE_UNUM(cmp, core));
807 
808 			if (cp->sbc_cm.sbdev_state == DR_STATE_EMPTY) {
809 				/* present, but not fully initialized */
810 				continue;
811 			}
812 
813 			ASSERT(dr_cpu_unit_is_sane(hp->h_bd, cp));
814 
815 			/* skip if not present */
816 			if (cp->sbc_cm.sbdev_id == (drmachid_t)0) {
817 				continue;
818 			}
819 
820 			/* fetch platform status */
821 			err = drmach_status(cp->sbc_cm.sbdev_id, &pstat);
822 			if (err) {
823 				DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
824 				continue;
825 			}
826 
827 			dr_fill_cpu_stat(cp, &pstat, &cstat[ncores++]);
828 			/*
829 			 * We should set impl here because the last core
830 			 * found might be EMPTY or not present.
831 			 */
832 			impl = cp->sbc_cpu_impl;
833 		}
834 
835 		if (ncores == 0) {
836 			continue;
837 		}
838 
839 		/*
840 		 * Store the data to the outgoing array. If the
841 		 * device is a CMP, combine all the data for the
842 		 * cores into a single stat structure.
843 		 *
844 		 * The check for a CMP device uses the last core
845 		 * found, assuming that all cores will have the
846 		 * same implementation.
847 		 */
848 
849 		if (CPU_IMPL_IS_CMP(impl)) {
850 			psp = (sbd_cmp_stat_t *)dsp;
851 			dr_fill_cmp_stat(cstat, ncores, impl, psp);
852 		} else {
853 			ASSERT(ncores == 1);
854 			bcopy(cstat, dsp, sizeof (sbd_cpu_stat_t));
855 		}
856 
857 		dsp++;
858 		ncpu++;
859 	}
860 
861 	return (ncpu);
862 }
863 
864 /*
865  * Cancel previous release operation for cpu.
866  * For cpus this means simply bringing cpus that
867  * were offline back online.  Note that they had
868  * to have been online at the time there were
869  * released.
870  */
871 int
872 dr_cancel_cpu(dr_cpu_unit_t *up)
873 {
874 	int		rv = 0;
875 	static fn_t	f = "dr_cancel_cpu";
876 
877 	ASSERT(dr_cpu_unit_is_sane(up->sbc_cm.sbdev_bp, up));
878 
879 	if (cpu_flagged_active(up->sbc_cpu_flags)) {
880 		struct cpu	*cp;
881 
882 		/*
883 		 * CPU had been online, go ahead
884 		 * bring it back online.
885 		 */
886 		PR_CPU("%s: bringing cpu %d back ONLINE\n", f, up->sbc_cpu_id);
887 
888 		mutex_enter(&cpu_lock);
889 		cp = cpu[up->sbc_cpu_id];
890 
891 		if (cpu_is_poweredoff(cp)) {
892 			if (cpu_poweron(cp)) {
893 				cmn_err(CE_WARN, "%s: failed to power-on "
894 				    "cpu %d", f, up->sbc_cpu_id);
895 				rv = -1;
896 			}
897 		}
898 
899 		if (cpu_is_offline(cp)) {
900 			if (cpu_online(cp)) {
901 				cmn_err(CE_WARN, "%s: failed to online cpu %d",
902 				    f, up->sbc_cpu_id);
903 				rv = -1;
904 			}
905 		}
906 
907 		if (cpu_is_online(cp)) {
908 			if (cpu_flagged_nointr(up->sbc_cpu_flags)) {
909 				if (cpu_intr_disable(cp) != 0) {
910 					cmn_err(CE_WARN, "%s: failed to "
911 					    "disable interrupts on cpu %d", f,
912 					    up->sbc_cpu_id);
913 				}
914 			}
915 		}
916 
917 		mutex_exit(&cpu_lock);
918 	}
919 
920 	return (rv);
921 }
922 
923 int
924 dr_disconnect_cpu(dr_cpu_unit_t *up)
925 {
926 	sbd_error_t	*err;
927 	static fn_t	f = "dr_disconnect_cpu";
928 
929 	PR_CPU("%s...\n", f);
930 
931 	ASSERT((up->sbc_cm.sbdev_state == DR_STATE_CONNECTED) ||
932 	    (up->sbc_cm.sbdev_state == DR_STATE_UNCONFIGURED));
933 
934 	ASSERT(dr_cpu_unit_is_sane(up->sbc_cm.sbdev_bp, up));
935 
936 	if (up->sbc_cm.sbdev_state == DR_STATE_CONNECTED) {
937 		/*
938 		 * Cpus were never brought in and so are still
939 		 * effectively disconnected, so nothing to do here.
940 		 */
941 		PR_CPU("%s: cpu %d never brought in\n", f, up->sbc_cpu_id);
942 		return (0);
943 	}
944 
945 	err = drmach_cpu_disconnect(up->sbc_cm.sbdev_id);
946 	if (err == NULL)
947 		return (0);
948 	else {
949 		DRERR_SET_C(&up->sbc_cm.sbdev_error, &err);
950 		return (-1);
951 	}
952 	/*NOTREACHED*/
953 }
954