xref: /titanic_52/usr/src/uts/i86pc/io/dr/dr_cpu.c (revision 269e59f9a28bf47e0f463e64fc5af4a408b73b21)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 /*
27  * Copyright (c) 2010, Intel Corporation.
28  * All rights reserved.
29  */
30 
31 /*
32  * CPU support routines for DR
33  */
34 
35 #include <sys/note.h>
36 #include <sys/debug.h>
37 #include <sys/types.h>
38 #include <sys/errno.h>
39 #include <sys/dditypes.h>
40 #include <sys/ddi.h>
41 #include <sys/sunddi.h>
42 #include <sys/sunndi.h>
43 #include <sys/ndi_impldefs.h>
44 #include <sys/kmem.h>
45 #include <sys/processor.h>
46 #include <sys/cpuvar.h>
47 #include <sys/promif.h>
48 #include <sys/sysmacros.h>
49 #include <sys/archsystm.h>
50 #include <sys/machsystm.h>
51 #include <sys/cpu_module.h>
52 #include <sys/cmn_err.h>
53 
54 #include <sys/dr.h>
55 #include <sys/dr_util.h>
56 
57 /* for the DR*INTERNAL_ERROR macros.  see sys/dr.h. */
58 static char *dr_ie_fmt = "dr_cpu.c %d";
59 
60 int
61 dr_cpu_unit_is_sane(dr_board_t *bp, dr_cpu_unit_t *cp)
62 {
63 #ifdef DEBUG
64 	ASSERT(cp->sbc_cm.sbdev_bp == bp);
65 	ASSERT(cp->sbc_cm.sbdev_type == SBD_COMP_CPU);
66 #else
67 	_NOTE(ARGUNUSED(bp))
68 	_NOTE(ARGUNUSED(cp))
69 #endif
70 
71 	return (1);
72 }
73 
74 static int
75 dr_errno2ecode(int error)
76 {
77 	int	rv;
78 
79 	switch (error) {
80 	case EBUSY:
81 		rv = ESBD_BUSY;
82 		break;
83 	case EINVAL:
84 		rv = ESBD_INVAL;
85 		break;
86 	case EALREADY:
87 		rv = ESBD_ALREADY;
88 		break;
89 	case ENODEV:
90 		rv = ESBD_NODEV;
91 		break;
92 	case ENOMEM:
93 		rv = ESBD_NOMEM;
94 		break;
95 	default:
96 		rv = ESBD_INVAL;
97 	}
98 
99 	return (rv);
100 }
101 
102 /*
103  * On x86, the "clock-frequency" and cache size device properties may be
104  * unavailable before CPU starts. If they are unavailabe, just set them to zero.
105  */
106 static void
107 dr_cpu_set_prop(dr_cpu_unit_t *cp)
108 {
109 	sbd_error_t	*err;
110 	dev_info_t	*dip;
111 	uint64_t	clock_freq;
112 	int		ecache_size = 0;
113 	char		*cache_str = NULL;
114 
115 	err = drmach_get_dip(cp->sbc_cm.sbdev_id, &dip);
116 	if (err) {
117 		DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
118 		return;
119 	}
120 
121 	if (dip == NULL) {
122 		DR_DEV_INTERNAL_ERROR(&cp->sbc_cm);
123 		return;
124 	}
125 
126 	/* read in the CPU speed */
127 	clock_freq = (unsigned int)ddi_prop_get_int(DDI_DEV_T_ANY, dip,
128 	    DDI_PROP_DONTPASS, "clock-frequency", 0);
129 
130 	/*
131 	 * The ecache property string is not the same
132 	 * for all CPU implementations.
133 	 */
134 	switch (cp->sbc_cpu_impl) {
135 	case X86_CPU_IMPL_NEHALEM_EX:
136 		cache_str = "l3-cache-size";
137 		break;
138 	default:
139 		cmn_err(CE_WARN, "Unknown cpu implementation=0x%x",
140 		    cp->sbc_cpu_impl);
141 		break;
142 	}
143 
144 	if (cache_str != NULL) {
145 		/* read in the ecache size */
146 		/*
147 		 * If the property is not found in the CPU node,
148 		 * it has to be kept in the core or cmp node so
149 		 * we just keep looking.
150 		 */
151 
152 		ecache_size = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
153 		    cache_str, 0);
154 	}
155 
156 	/* convert to the proper units */
157 	cp->sbc_speed = (clock_freq + 500000) / 1000000;
158 	cp->sbc_ecache = ecache_size / (1024 * 1024);
159 }
160 
161 void
162 dr_init_cpu_unit(dr_cpu_unit_t *cp)
163 {
164 	sbd_error_t	*err;
165 	dr_state_t	new_state;
166 	int		cpuid;
167 	int		impl;
168 
169 	if (DR_DEV_IS_ATTACHED(&cp->sbc_cm)) {
170 		new_state = DR_STATE_CONFIGURED;
171 		cp->sbc_cm.sbdev_cond = SBD_COND_OK;
172 	} else if (DR_DEV_IS_PRESENT(&cp->sbc_cm)) {
173 		new_state = DR_STATE_CONNECTED;
174 		cp->sbc_cm.sbdev_cond = SBD_COND_OK;
175 	} else {
176 		new_state = DR_STATE_EMPTY;
177 		cp->sbc_cm.sbdev_cond = SBD_COND_UNKNOWN;
178 	}
179 
180 	if (DR_DEV_IS_PRESENT(&cp->sbc_cm)) {
181 		err = drmach_cpu_get_id(cp->sbc_cm.sbdev_id, &cpuid);
182 		if (err) {
183 			DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
184 			new_state = DR_STATE_FATAL;
185 			goto done;
186 		}
187 
188 		err = drmach_cpu_get_impl(cp->sbc_cm.sbdev_id, &impl);
189 		if (err) {
190 			DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
191 			new_state = DR_STATE_FATAL;
192 			goto done;
193 		}
194 	} else {
195 		cp->sbc_cpu_id = -1;
196 		cp->sbc_cpu_impl = -1;
197 		goto done;
198 	}
199 
200 	cp->sbc_cpu_id = cpuid;
201 	cp->sbc_cpu_impl = impl;
202 
203 	/* if true at init time, it must always be true */
204 	ASSERT(dr_cpu_unit_is_sane(cp->sbc_cm.sbdev_bp, cp));
205 
206 	mutex_enter(&cpu_lock);
207 	if ((cpuid >= 0) && cpu[cpuid])
208 		cp->sbc_cpu_flags = cpu[cpuid]->cpu_flags;
209 	else
210 		cp->sbc_cpu_flags = P_OFFLINE | P_POWEROFF;
211 	mutex_exit(&cpu_lock);
212 
213 	dr_cpu_set_prop(cp);
214 
215 done:
216 	/* delay transition until fully initialized */
217 	dr_device_transition(&cp->sbc_cm, new_state);
218 }
219 
220 int
221 dr_pre_attach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
222 {
223 	int		i;
224 	static fn_t	f = "dr_pre_attach_cpu";
225 
226 	PR_CPU("%s...\n", f);
227 
228 	for (i = 0; i < devnum; i++) {
229 		dr_cpu_unit_t *up = (dr_cpu_unit_t *)devlist[i];
230 
231 		ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
232 
233 		/*
234 		 * Print a console message for each attachment
235 		 * point. For CMP devices, this means that only
236 		 * one message should be printed, no matter how
237 		 * many cores are actually present.
238 		 */
239 		if ((up->sbc_cm.sbdev_unum % MAX_CORES_PER_CMP) == 0) {
240 			cmn_err(CE_CONT, "OS configure %s",
241 			    up->sbc_cm.sbdev_path);
242 		}
243 	}
244 
245 	/*
246 	 * Block out status threads while creating
247 	 * devinfo tree branches
248 	 */
249 	dr_lock_status(hp->h_bd);
250 	ndi_devi_enter(ddi_root_node(), (int *)(&hp->h_ndi));
251 	mutex_enter(&cpu_lock);
252 
253 	return (0);
254 }
255 
256 /*ARGSUSED*/
257 void
258 dr_attach_cpu(dr_handle_t *hp, dr_common_unit_t *cp)
259 {
260 	sbd_error_t	*err;
261 	processorid_t	 cpuid;
262 	int		 rv;
263 
264 	ASSERT(MUTEX_HELD(&cpu_lock));
265 
266 	err = drmach_configure(cp->sbdev_id, 0);
267 	if (err) {
268 		DRERR_SET_C(&cp->sbdev_error, &err);
269 		return;
270 	}
271 
272 	err = drmach_cpu_get_id(cp->sbdev_id, &cpuid);
273 	if (err) {
274 		DRERR_SET_C(&cp->sbdev_error, &err);
275 
276 		err = drmach_unconfigure(cp->sbdev_id, DEVI_BRANCH_DESTROY);
277 		if (err)
278 			sbd_err_clear(&err);
279 	} else if ((rv = cpu_configure(cpuid)) != 0) {
280 		dr_dev_err(CE_WARN, cp, dr_errno2ecode(rv));
281 		err = drmach_unconfigure(cp->sbdev_id, DEVI_BRANCH_DESTROY);
282 		if (err)
283 			sbd_err_clear(&err);
284 	} else {
285 		dr_cpu_unit_t *up = (dr_cpu_unit_t *)cp;
286 		up->sbc_cpu_id = cpuid;
287 	}
288 }
289 
290 /*
291  * dr_post_attach_cpu
292  *
293  * sbd error policy: Does not stop on error.  Processes all units in list.
294  */
295 int
296 dr_post_attach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
297 {
298 	int		i;
299 	int		errflag = 0;
300 	static fn_t	f = "dr_post_attach_cpu";
301 
302 	PR_CPU("%s...\n", f);
303 
304 	/* Startup and online newly-attached CPUs */
305 	for (i = 0; i < devnum; i++) {
306 		dr_cpu_unit_t *up = (dr_cpu_unit_t *)devlist[i];
307 		struct cpu	*cp;
308 
309 		ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
310 
311 		cp = cpu_get(up->sbc_cpu_id);
312 		if (cp == NULL) {
313 			cmn_err(CE_WARN, "%s: cpu_get failed for cpu %d",
314 			    f, up->sbc_cpu_id);
315 			continue;
316 		}
317 
318 		if (cpu_is_poweredoff(cp)) {
319 			if (cpu_poweron(cp) != 0) {
320 				dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_CPUSTART);
321 				errflag = 1;
322 			}
323 			PR_CPU("%s: cpu %d powered ON\n", f, up->sbc_cpu_id);
324 		}
325 
326 		if (cpu_is_offline(cp)) {
327 			PR_CPU("%s: onlining cpu %d...\n", f, up->sbc_cpu_id);
328 
329 			if (cpu_online(cp) != 0) {
330 				dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_ONLINE);
331 				errflag = 1;
332 			}
333 		}
334 
335 	}
336 
337 	mutex_exit(&cpu_lock);
338 	ndi_devi_exit(ddi_root_node(), hp->h_ndi);
339 	dr_unlock_status(hp->h_bd);
340 
341 	if (errflag)
342 		return (-1);
343 	else
344 		return (0);
345 }
346 
347 /*
348  * dr_pre_release_cpu
349  *
350  * sbd error policy: Stops on first error.
351  */
352 int
353 dr_pre_release_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
354 {
355 	int		c, cix, i, lastoffline = -1, rv = 0;
356 	processorid_t	cpuid;
357 	struct cpu	*cp;
358 	dr_cpu_unit_t	*up;
359 	dr_devset_t	devset;
360 	sbd_dev_stat_t	*ds;
361 	static fn_t	f = "dr_pre_release_cpu";
362 	int		cpu_flags = 0;
363 
364 	devset = DR_DEVS_PRESENT(hp->h_bd);
365 
366 	/* allocate status struct storage. */
367 	ds = (sbd_dev_stat_t *) kmem_zalloc(sizeof (sbd_dev_stat_t) *
368 	    MAX_CPU_UNITS_PER_BOARD, KM_SLEEP);
369 
370 	cix = dr_cpu_status(hp, devset, ds);
371 
372 	mutex_enter(&cpu_lock);
373 
374 	for (i = 0; i < devnum; i++) {
375 		up = (dr_cpu_unit_t *)devlist[i];
376 		if (!DR_DEV_IS_ATTACHED(&up->sbc_cm)) {
377 			continue;
378 		}
379 		ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
380 
381 		/*
382 		 * On x86 systems, some CPUs can't be unconfigured.
383 		 * For example, CPU0 can't be unconfigured because many other
384 		 * components have a dependency on it.
385 		 * This check determines if a CPU is currently in use and
386 		 * returns a "Device busy" error if so.
387 		 */
388 		for (c = 0; c < cix; c++) {
389 			if (ds[c].d_cpu.cs_unit == up->sbc_cm.sbdev_unum) {
390 				if (ds[c].d_cpu.cs_busy) {
391 					dr_dev_err(CE_WARN, &up->sbc_cm,
392 					    ESBD_BUSY);
393 					rv = -1;
394 					break;
395 				}
396 			}
397 		}
398 		if (c < cix)
399 			break;
400 
401 		cpuid = up->sbc_cpu_id;
402 		if ((cp = cpu_get(cpuid)) == NULL) {
403 			dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_OFFLINE);
404 			rv = -1;
405 			break;
406 		}
407 
408 		/* used by dr_cancel_cpu during error flow */
409 		up->sbc_cpu_flags = cp->cpu_flags;
410 
411 		if (CPU_ACTIVE(cp)) {
412 			if (dr_cmd_flags(hp) & SBD_FLAG_FORCE)
413 				cpu_flags = CPU_FORCED;
414 
415 			PR_CPU("%s: offlining cpu %d\n", f, cpuid);
416 			if (cpu_offline(cp, cpu_flags)) {
417 				PR_CPU("%s: failed to offline cpu %d\n", f,
418 				    cpuid);
419 				dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_OFFLINE);
420 				if (disp_bound_threads(cp, 0)) {
421 					cmn_err(CE_WARN, "%s: thread(s) bound "
422 					    "to cpu %d", f, cp->cpu_id);
423 				}
424 				rv = -1;
425 				break;
426 			} else
427 				lastoffline = i;
428 		}
429 
430 		if (!rv) {
431 			sbd_error_t *err;
432 
433 			err = drmach_release(up->sbc_cm.sbdev_id);
434 			if (err) {
435 				DRERR_SET_C(&up->sbc_cm.sbdev_error, &err);
436 				rv = -1;
437 				break;
438 			}
439 		}
440 	}
441 
442 	mutex_exit(&cpu_lock);
443 
444 	if (rv) {
445 		/*
446 		 * Need to unwind others since at this level (pre-release)
447 		 * the device state has not yet transitioned and failures
448 		 * will prevent us from reaching the "post" release
449 		 * function where states are normally transitioned.
450 		 */
451 		for (i = lastoffline; i >= 0; i--) {
452 			up = (dr_cpu_unit_t *)devlist[i];
453 			(void) dr_cancel_cpu(up);
454 		}
455 	}
456 
457 	kmem_free(ds, sizeof (sbd_dev_stat_t) * MAX_CPU_UNITS_PER_BOARD);
458 	return (rv);
459 }
460 
461 /*
462  * dr_pre_detach_cpu
463  *
464  * sbd error policy: Stops on first error.
465  */
466 int
467 dr_pre_detach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
468 {
469 	_NOTE(ARGUNUSED(hp))
470 
471 	int		i;
472 	int		cpu_flags = 0;
473 	static fn_t	f = "dr_pre_detach_cpu";
474 
475 	PR_CPU("%s...\n", f);
476 
477 	/*
478 	 * Block out status threads while destroying devinfo tree
479 	 * branches
480 	 */
481 	dr_lock_status(hp->h_bd);
482 	mutex_enter(&cpu_lock);
483 
484 	for (i = 0; i < devnum; i++) {
485 		dr_cpu_unit_t *up = (dr_cpu_unit_t *)devlist[i];
486 		struct cpu	*cp;
487 
488 		if (!DR_DEV_IS_ATTACHED(&up->sbc_cm)) {
489 			continue;
490 		}
491 
492 		ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
493 
494 		cp = cpu_get(up->sbc_cpu_id);
495 		if (cp == NULL)
496 			continue;
497 
498 		/*
499 		 * Print a console message for each attachment
500 		 * point. For CMP devices, this means that only
501 		 * one message should be printed, no matter how
502 		 * many cores are actually present.
503 		 */
504 		if ((up->sbc_cm.sbdev_unum % MAX_CORES_PER_CMP) == 0) {
505 			cmn_err(CE_CONT, "OS unconfigure %s\n",
506 			    up->sbc_cm.sbdev_path);
507 		}
508 
509 		/*
510 		 * CPUs were offlined during Release.
511 		 */
512 		if (cpu_is_poweredoff(cp)) {
513 			PR_CPU("%s: cpu %d already powered OFF\n",
514 			    f, up->sbc_cpu_id);
515 			continue;
516 		}
517 
518 		if (!cpu_is_offline(cp)) {
519 			if (dr_cmd_flags(hp) & SBD_FLAG_FORCE)
520 				cpu_flags = CPU_FORCED;
521 			/* cpu was onlined after release.  Offline it again */
522 			PR_CPU("%s: offlining cpu %d\n", f, up->sbc_cpu_id);
523 			if (cpu_offline(cp, cpu_flags)) {
524 				PR_CPU("%s: failed to offline cpu %d\n",
525 				    f, up->sbc_cpu_id);
526 				dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_OFFLINE);
527 				if (disp_bound_threads(cp, 0)) {
528 					cmn_err(CE_WARN, "%s: thread(s) bound "
529 					    "to cpu %d", f, cp->cpu_id);
530 				}
531 				goto err;
532 			}
533 		}
534 		if (cpu_poweroff(cp) != 0) {
535 			dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_CPUSTOP);
536 			goto err;
537 		} else {
538 			PR_CPU("%s: cpu %d powered OFF\n", f, up->sbc_cpu_id);
539 		}
540 	}
541 
542 	return (0);
543 
544 err:
545 	mutex_exit(&cpu_lock);
546 	dr_unlock_status(hp->h_bd);
547 	return (-1);
548 }
549 
550 /*ARGSUSED*/
551 void
552 dr_detach_cpu(dr_handle_t *hp, dr_common_unit_t *cp)
553 {
554 	sbd_error_t	*err;
555 	processorid_t	 cpuid;
556 	int		 rv;
557 	dr_cpu_unit_t	*up = (dr_cpu_unit_t *)cp;
558 
559 	ASSERT(MUTEX_HELD(&cpu_lock));
560 
561 	if (!DR_DEV_IS_ATTACHED(&up->sbc_cm)) {
562 		return;
563 	}
564 
565 	err = drmach_cpu_get_id(cp->sbdev_id, &cpuid);
566 	if (err) {
567 		DRERR_SET_C(&cp->sbdev_error, &err);
568 	} else if ((rv = cpu_unconfigure(cpuid)) != 0) {
569 		dr_dev_err(CE_IGNORE, cp, dr_errno2ecode(rv));
570 	} else {
571 		err = drmach_unconfigure(cp->sbdev_id, DEVI_BRANCH_DESTROY);
572 		if (err) {
573 			DRERR_SET_C(&cp->sbdev_error, &err);
574 		} else {
575 			up->sbc_cpu_id = -1;
576 		}
577 	}
578 }
579 
580 /*ARGSUSED1*/
581 int
582 dr_post_detach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
583 {
584 	static fn_t	f = "dr_post_detach_cpu";
585 
586 	PR_CPU("%s...\n", f);
587 	hp->h_ndi = 0;
588 
589 	mutex_exit(&cpu_lock);
590 	dr_unlock_status(hp->h_bd);
591 
592 	return (0);
593 }
594 
595 static void
596 dr_fill_cpu_stat(dr_cpu_unit_t *cp, drmach_status_t *pstat, sbd_cpu_stat_t *csp)
597 {
598 	ASSERT(cp && pstat && csp);
599 
600 	/* Fill in the common status information */
601 	bzero((caddr_t)csp, sizeof (*csp));
602 	csp->cs_type = cp->sbc_cm.sbdev_type;
603 	csp->cs_unit = cp->sbc_cm.sbdev_unum;
604 	(void) strlcpy(csp->cs_name, pstat->type, sizeof (csp->cs_name));
605 	csp->cs_cond = cp->sbc_cm.sbdev_cond;
606 	csp->cs_busy = cp->sbc_cm.sbdev_busy | pstat->busy;
607 	csp->cs_time = cp->sbc_cm.sbdev_time;
608 	csp->cs_ostate = cp->sbc_cm.sbdev_ostate;
609 	csp->cs_suspend = 0;
610 
611 	/* CPU specific status data */
612 	csp->cs_cpuid = cp->sbc_cpu_id;
613 
614 	/*
615 	 * If the speed and ecache properties have not been
616 	 * cached yet, read them in from the device tree.
617 	 */
618 	if ((cp->sbc_speed == 0) || (cp->sbc_ecache == 0))
619 		dr_cpu_set_prop(cp);
620 
621 	/* use the cached speed and ecache values */
622 	csp->cs_speed = cp->sbc_speed;
623 	csp->cs_ecache = cp->sbc_ecache;
624 
625 	mutex_enter(&cpu_lock);
626 	if (!cpu_get(csp->cs_cpuid)) {
627 		/* ostate must be UNCONFIGURED */
628 		csp->cs_cm.c_ostate = SBD_STAT_UNCONFIGURED;
629 	}
630 	mutex_exit(&cpu_lock);
631 }
632 
633 /*ARGSUSED2*/
634 static void
635 dr_fill_cmp_stat(sbd_cpu_stat_t *csp, int ncores, int impl, sbd_cmp_stat_t *psp)
636 {
637 	int	core;
638 
639 	ASSERT(csp && psp && (ncores >= 1));
640 
641 	bzero((caddr_t)psp, sizeof (*psp));
642 
643 	/*
644 	 * Fill in the common status information based
645 	 * on the data for the first core.
646 	 */
647 	psp->ps_type = SBD_COMP_CMP;
648 	psp->ps_unit = DR_UNUM2SBD_UNUM(csp->cs_unit, SBD_COMP_CMP);
649 	(void) strlcpy(psp->ps_name, csp->cs_name, sizeof (psp->ps_name));
650 	psp->ps_cond = csp->cs_cond;
651 	psp->ps_busy = csp->cs_busy;
652 	psp->ps_time = csp->cs_time;
653 	psp->ps_ostate = csp->cs_ostate;
654 	psp->ps_suspend = csp->cs_suspend;
655 
656 	/* CMP specific status data */
657 	*psp->ps_cpuid = csp->cs_cpuid;
658 	psp->ps_ncores = 1;
659 	psp->ps_speed = csp->cs_speed;
660 	psp->ps_ecache = csp->cs_ecache;
661 
662 	/*
663 	 * Walk through the data for the remaining cores.
664 	 * Make any adjustments to the common status data,
665 	 * or the shared CMP specific data if necessary.
666 	 */
667 	for (core = 1; core < ncores; core++) {
668 		/*
669 		 * The following properties should be the same
670 		 * for all the cores of the CMP.
671 		 */
672 		ASSERT(psp->ps_unit == DR_UNUM2SBD_UNUM(csp[core].cs_unit,
673 		    SBD_COMP_CMP));
674 
675 		if (csp[core].cs_speed > psp->ps_speed)
676 			psp->ps_speed = csp[core].cs_speed;
677 		if (csp[core].cs_ecache > psp->ps_ecache)
678 			psp->ps_ecache = csp[core].cs_ecache;
679 
680 		psp->ps_cpuid[core] = csp[core].cs_cpuid;
681 		psp->ps_ncores++;
682 
683 		/* adjust time if necessary */
684 		if (csp[core].cs_time > psp->ps_time) {
685 			psp->ps_time = csp[core].cs_time;
686 		}
687 
688 		psp->ps_busy |= csp[core].cs_busy;
689 
690 		/*
691 		 * If any of the cores are configured, the
692 		 * entire CMP is marked as configured.
693 		 */
694 		if (csp[core].cs_ostate == SBD_STAT_CONFIGURED) {
695 			psp->ps_ostate = csp[core].cs_ostate;
696 		}
697 	}
698 }
699 
700 int
701 dr_cpu_status(dr_handle_t *hp, dr_devset_t devset, sbd_dev_stat_t *dsp)
702 {
703 	int		cmp;
704 	int		core;
705 	int		ncpu;
706 	dr_board_t	*bp;
707 	sbd_cpu_stat_t	*cstat;
708 	int		impl;
709 
710 	bp = hp->h_bd;
711 	ncpu = 0;
712 
713 	devset &= DR_DEVS_PRESENT(bp);
714 	cstat = kmem_zalloc(sizeof (sbd_cpu_stat_t) * MAX_CORES_PER_CMP,
715 	    KM_SLEEP);
716 
717 	/*
718 	 * Treat every CPU as a CMP. In the case where the
719 	 * device is not a CMP, treat it as a CMP with only
720 	 * one core.
721 	 */
722 	for (cmp = 0; cmp < MAX_CMP_UNITS_PER_BOARD; cmp++) {
723 		int		ncores;
724 		dr_cpu_unit_t	*cp;
725 		drmach_status_t	pstat;
726 		sbd_error_t	*err;
727 		sbd_cmp_stat_t	*psp;
728 
729 		if ((devset & DEVSET(SBD_COMP_CMP, cmp)) == 0) {
730 			continue;
731 		}
732 
733 		ncores = 0;
734 
735 		for (core = 0; core < MAX_CORES_PER_CMP; core++) {
736 
737 			cp = dr_get_cpu_unit(bp, DR_CMP_CORE_UNUM(cmp, core));
738 
739 			if (cp->sbc_cm.sbdev_state == DR_STATE_EMPTY) {
740 				/* present, but not fully initialized */
741 				continue;
742 			}
743 
744 			ASSERT(dr_cpu_unit_is_sane(hp->h_bd, cp));
745 
746 			/* skip if not present */
747 			if (cp->sbc_cm.sbdev_id == (drmachid_t)0) {
748 				continue;
749 			}
750 
751 			/* fetch platform status */
752 			err = drmach_status(cp->sbc_cm.sbdev_id, &pstat);
753 			if (err) {
754 				DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
755 				continue;
756 			}
757 
758 			dr_fill_cpu_stat(cp, &pstat, &cstat[ncores++]);
759 			/*
760 			 * We should set impl here because the last core
761 			 * found might be EMPTY or not present.
762 			 */
763 			impl = cp->sbc_cpu_impl;
764 		}
765 
766 		if (ncores == 0) {
767 			continue;
768 		}
769 
770 		/*
771 		 * Store the data to the outgoing array. If the
772 		 * device is a CMP, combine all the data for the
773 		 * cores into a single stat structure.
774 		 *
775 		 * The check for a CMP device uses the last core
776 		 * found, assuming that all cores will have the
777 		 * same implementation.
778 		 */
779 		if (CPU_IMPL_IS_CMP(impl)) {
780 			psp = (sbd_cmp_stat_t *)dsp;
781 			dr_fill_cmp_stat(cstat, ncores, impl, psp);
782 		} else {
783 			ASSERT(ncores == 1);
784 			bcopy(cstat, dsp, sizeof (sbd_cpu_stat_t));
785 		}
786 
787 		dsp++;
788 		ncpu++;
789 	}
790 
791 	kmem_free(cstat, sizeof (sbd_cpu_stat_t) * MAX_CORES_PER_CMP);
792 
793 	return (ncpu);
794 }
795 
796 /*
797  * Cancel previous release operation for cpu.
798  * For cpus this means simply bringing cpus that
799  * were offline back online.  Note that they had
800  * to have been online at the time there were
801  * released.
802  */
803 int
804 dr_cancel_cpu(dr_cpu_unit_t *up)
805 {
806 	int		rv = 0;
807 	static fn_t	f = "dr_cancel_cpu";
808 
809 	ASSERT(dr_cpu_unit_is_sane(up->sbc_cm.sbdev_bp, up));
810 
811 	if (cpu_flagged_active(up->sbc_cpu_flags)) {
812 		struct cpu	*cp;
813 
814 		/*
815 		 * CPU had been online, go ahead
816 		 * bring it back online.
817 		 */
818 		PR_CPU("%s: bringing cpu %d back ONLINE\n", f, up->sbc_cpu_id);
819 
820 		mutex_enter(&cpu_lock);
821 		cp = cpu[up->sbc_cpu_id];
822 
823 		if (cpu_is_poweredoff(cp)) {
824 			if (cpu_poweron(cp)) {
825 				cmn_err(CE_WARN, "%s: failed to power-on "
826 				    "cpu %d", f, up->sbc_cpu_id);
827 				rv = -1;
828 			}
829 		}
830 
831 		if (rv == 0 && cpu_is_offline(cp)) {
832 			if (cpu_online(cp)) {
833 				cmn_err(CE_WARN, "%s: failed to online cpu %d",
834 				    f, up->sbc_cpu_id);
835 				rv = -1;
836 			}
837 		}
838 
839 		if (rv == 0 && cpu_is_online(cp)) {
840 			if (cpu_flagged_nointr(up->sbc_cpu_flags)) {
841 				if (cpu_intr_disable(cp) != 0) {
842 					cmn_err(CE_WARN, "%s: failed to "
843 					    "disable interrupts on cpu %d", f,
844 					    up->sbc_cpu_id);
845 				}
846 			}
847 		}
848 
849 		mutex_exit(&cpu_lock);
850 	}
851 
852 	return (rv);
853 }
854 
855 int
856 dr_disconnect_cpu(dr_cpu_unit_t *up)
857 {
858 	sbd_error_t	*err;
859 	static fn_t	f = "dr_disconnect_cpu";
860 
861 	PR_CPU("%s...\n", f);
862 
863 	ASSERT((up->sbc_cm.sbdev_state == DR_STATE_CONNECTED) ||
864 	    (up->sbc_cm.sbdev_state == DR_STATE_UNCONFIGURED));
865 
866 	ASSERT(dr_cpu_unit_is_sane(up->sbc_cm.sbdev_bp, up));
867 
868 	if (up->sbc_cm.sbdev_state == DR_STATE_CONNECTED) {
869 		/*
870 		 * Cpus were never brought in and so are still
871 		 * effectively disconnected, so nothing to do here.
872 		 */
873 		PR_CPU("%s: cpu %d never brought in\n", f, up->sbc_cpu_id);
874 		return (0);
875 	}
876 
877 	err = drmach_cpu_disconnect(up->sbc_cm.sbdev_id);
878 	if (err == NULL)
879 		return (0);
880 	else {
881 		DRERR_SET_C(&up->sbc_cm.sbdev_error, &err);
882 		return (-1);
883 	}
884 	/*NOTREACHED*/
885 }
886