xref: /illumos-gate/usr/src/uts/i86pc/io/dr/dr_cpu.c (revision 4224cf35431a1b80d14862409ecf0beeaa49e0d8)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 /*
27  * Copyright (c) 2010, Intel Corporation.
28  * All rights reserved.
29  */
30 
31 /*
32  * Copyright 2019 Joyent, Inc.
33  */
34 
35 /*
36  * CPU support routines for DR
37  */
38 
39 #include <sys/note.h>
40 #include <sys/debug.h>
41 #include <sys/types.h>
42 #include <sys/errno.h>
43 #include <sys/dditypes.h>
44 #include <sys/ddi.h>
45 #include <sys/sunddi.h>
46 #include <sys/sunndi.h>
47 #include <sys/ndi_impldefs.h>
48 #include <sys/kmem.h>
49 #include <sys/processor.h>
50 #include <sys/cpuvar.h>
51 #include <sys/promif.h>
52 #include <sys/sysmacros.h>
53 #include <sys/archsystm.h>
54 #include <sys/machsystm.h>
55 #include <sys/cpu_module.h>
56 #include <sys/cmn_err.h>
57 
58 #include <sys/dr.h>
59 #include <sys/dr_util.h>
60 
61 /* for the DR*INTERNAL_ERROR macros.  see sys/dr.h. */
62 static char *dr_ie_fmt = "dr_cpu.c %d";
63 
64 int
65 dr_cpu_unit_is_sane(dr_board_t *bp, dr_cpu_unit_t *cp)
66 {
67 #ifdef DEBUG
68 	ASSERT(cp->sbc_cm.sbdev_bp == bp);
69 	ASSERT(cp->sbc_cm.sbdev_type == SBD_COMP_CPU);
70 #else
71 	_NOTE(ARGUNUSED(bp))
72 	_NOTE(ARGUNUSED(cp))
73 #endif
74 
75 	return (1);
76 }
77 
78 static int
79 dr_errno2ecode(int error)
80 {
81 	int	rv;
82 
83 	switch (error) {
84 	case EBUSY:
85 		rv = ESBD_BUSY;
86 		break;
87 	case EINVAL:
88 		rv = ESBD_INVAL;
89 		break;
90 	case EALREADY:
91 		rv = ESBD_ALREADY;
92 		break;
93 	case ENODEV:
94 		rv = ESBD_NODEV;
95 		break;
96 	case ENOMEM:
97 		rv = ESBD_NOMEM;
98 		break;
99 	default:
100 		rv = ESBD_INVAL;
101 	}
102 
103 	return (rv);
104 }
105 
106 /*
107  * On x86, the "clock-frequency" and cache size device properties may be
108  * unavailable before CPU starts. If they are unavailabe, just set them to zero.
109  */
110 static void
111 dr_cpu_set_prop(dr_cpu_unit_t *cp)
112 {
113 	sbd_error_t	*err;
114 	dev_info_t	*dip;
115 	uint64_t	clock_freq;
116 	int		ecache_size = 0;
117 	char		*cache_str = NULL;
118 
119 	err = drmach_get_dip(cp->sbc_cm.sbdev_id, &dip);
120 	if (err) {
121 		DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
122 		return;
123 	}
124 
125 	if (dip == NULL) {
126 		DR_DEV_INTERNAL_ERROR(&cp->sbc_cm);
127 		return;
128 	}
129 
130 	/* read in the CPU speed */
131 	clock_freq = (unsigned int)ddi_prop_get_int(DDI_DEV_T_ANY, dip,
132 	    DDI_PROP_DONTPASS, "clock-frequency", 0);
133 
134 	/*
135 	 * The ecache property string is not the same
136 	 * for all CPU implementations.
137 	 */
138 	switch (cp->sbc_cpu_impl) {
139 	case X86_CPU_IMPL_NEHALEM_EX:
140 		cache_str = "l3-cache-size";
141 		break;
142 	default:
143 		cmn_err(CE_WARN, "Unknown cpu implementation=0x%x",
144 		    cp->sbc_cpu_impl);
145 		break;
146 	}
147 
148 	if (cache_str != NULL) {
149 		/* read in the ecache size */
150 		/*
151 		 * If the property is not found in the CPU node,
152 		 * it has to be kept in the core or cmp node so
153 		 * we just keep looking.
154 		 */
155 
156 		ecache_size = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
157 		    cache_str, 0);
158 	}
159 
160 	/* convert to the proper units */
161 	cp->sbc_speed = (clock_freq + 500000) / 1000000;
162 	cp->sbc_ecache = ecache_size / (1024 * 1024);
163 }
164 
165 void
166 dr_init_cpu_unit(dr_cpu_unit_t *cp)
167 {
168 	sbd_error_t	*err;
169 	dr_state_t	new_state;
170 	int		cpuid;
171 	int		impl;
172 
173 	if (DR_DEV_IS_ATTACHED(&cp->sbc_cm)) {
174 		new_state = DR_STATE_CONFIGURED;
175 		cp->sbc_cm.sbdev_cond = SBD_COND_OK;
176 	} else if (DR_DEV_IS_PRESENT(&cp->sbc_cm)) {
177 		new_state = DR_STATE_CONNECTED;
178 		cp->sbc_cm.sbdev_cond = SBD_COND_OK;
179 	} else {
180 		new_state = DR_STATE_EMPTY;
181 		cp->sbc_cm.sbdev_cond = SBD_COND_UNKNOWN;
182 	}
183 
184 	if (DR_DEV_IS_PRESENT(&cp->sbc_cm)) {
185 		err = drmach_cpu_get_id(cp->sbc_cm.sbdev_id, &cpuid);
186 		if (err) {
187 			DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
188 			new_state = DR_STATE_FATAL;
189 			goto done;
190 		}
191 
192 		err = drmach_cpu_get_impl(cp->sbc_cm.sbdev_id, &impl);
193 		if (err) {
194 			DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
195 			new_state = DR_STATE_FATAL;
196 			goto done;
197 		}
198 	} else {
199 		cp->sbc_cpu_id = -1;
200 		cp->sbc_cpu_impl = -1;
201 		goto done;
202 	}
203 
204 	cp->sbc_cpu_id = cpuid;
205 	cp->sbc_cpu_impl = impl;
206 
207 	/* if true at init time, it must always be true */
208 	ASSERT(dr_cpu_unit_is_sane(cp->sbc_cm.sbdev_bp, cp));
209 
210 	mutex_enter(&cpu_lock);
211 	if ((cpuid >= 0) && cpu[cpuid])
212 		cp->sbc_cpu_flags = cpu[cpuid]->cpu_flags;
213 	else
214 		cp->sbc_cpu_flags = P_OFFLINE | P_POWEROFF;
215 	mutex_exit(&cpu_lock);
216 
217 	dr_cpu_set_prop(cp);
218 
219 done:
220 	/* delay transition until fully initialized */
221 	dr_device_transition(&cp->sbc_cm, new_state);
222 }
223 
224 int
225 dr_pre_attach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
226 {
227 	int		i;
228 	static fn_t	f = "dr_pre_attach_cpu";
229 
230 	PR_CPU("%s...\n", f);
231 
232 	for (i = 0; i < devnum; i++) {
233 		dr_cpu_unit_t *up = (dr_cpu_unit_t *)devlist[i];
234 
235 		ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
236 
237 		/*
238 		 * Print a console message for each attachment
239 		 * point. For CMP devices, this means that only
240 		 * one message should be printed, no matter how
241 		 * many cores are actually present.
242 		 */
243 		if ((up->sbc_cm.sbdev_unum % MAX_CORES_PER_CMP) == 0) {
244 			cmn_err(CE_CONT, "OS configure %s",
245 			    up->sbc_cm.sbdev_path);
246 		}
247 	}
248 
249 	/*
250 	 * Block out status threads while creating
251 	 * devinfo tree branches
252 	 */
253 	dr_lock_status(hp->h_bd);
254 	ndi_devi_enter(ddi_root_node(), (int *)(&hp->h_ndi));
255 	mutex_enter(&cpu_lock);
256 
257 	return (0);
258 }
259 
260 /*ARGSUSED*/
261 void
262 dr_attach_cpu(dr_handle_t *hp, dr_common_unit_t *cp)
263 {
264 	sbd_error_t	*err;
265 	processorid_t	 cpuid;
266 	int		 rv;
267 
268 	ASSERT(MUTEX_HELD(&cpu_lock));
269 
270 	err = drmach_configure(cp->sbdev_id, 0);
271 	if (err) {
272 		DRERR_SET_C(&cp->sbdev_error, &err);
273 		return;
274 	}
275 
276 	err = drmach_cpu_get_id(cp->sbdev_id, &cpuid);
277 	if (err) {
278 		DRERR_SET_C(&cp->sbdev_error, &err);
279 
280 		err = drmach_unconfigure(cp->sbdev_id, DEVI_BRANCH_DESTROY);
281 		if (err)
282 			sbd_err_clear(&err);
283 	} else if ((rv = cpu_configure(cpuid)) != 0) {
284 		dr_dev_err(CE_WARN, cp, dr_errno2ecode(rv));
285 		err = drmach_unconfigure(cp->sbdev_id, DEVI_BRANCH_DESTROY);
286 		if (err)
287 			sbd_err_clear(&err);
288 	} else {
289 		dr_cpu_unit_t *up = (dr_cpu_unit_t *)cp;
290 		up->sbc_cpu_id = cpuid;
291 	}
292 }
293 
294 /*
295  * dr_post_attach_cpu
296  *
297  * sbd error policy: Does not stop on error.  Processes all units in list.
298  */
299 int
300 dr_post_attach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
301 {
302 	int		i;
303 	int		errflag = 0;
304 	static fn_t	f = "dr_post_attach_cpu";
305 
306 	PR_CPU("%s...\n", f);
307 
308 	/* Startup and online newly-attached CPUs */
309 	for (i = 0; i < devnum; i++) {
310 		dr_cpu_unit_t *up = (dr_cpu_unit_t *)devlist[i];
311 		struct cpu	*cp;
312 
313 		ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
314 
315 		cp = cpu_get(up->sbc_cpu_id);
316 		if (cp == NULL) {
317 			cmn_err(CE_WARN, "%s: cpu_get failed for cpu %d",
318 			    f, up->sbc_cpu_id);
319 			continue;
320 		}
321 
322 		if (cpu_is_poweredoff(cp)) {
323 			if (cpu_poweron(cp) != 0) {
324 				dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_CPUSTART);
325 				errflag = 1;
326 			}
327 			PR_CPU("%s: cpu %d powered ON\n", f, up->sbc_cpu_id);
328 		}
329 
330 		if (cpu_is_offline(cp)) {
331 			PR_CPU("%s: onlining cpu %d...\n", f, up->sbc_cpu_id);
332 
333 			if (cpu_online(cp, 0) != 0) {
334 				dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_ONLINE);
335 				errflag = 1;
336 			}
337 		}
338 
339 	}
340 
341 	mutex_exit(&cpu_lock);
342 	ndi_devi_exit(ddi_root_node(), hp->h_ndi);
343 	dr_unlock_status(hp->h_bd);
344 
345 	if (errflag)
346 		return (-1);
347 	else
348 		return (0);
349 }
350 
351 /*
352  * dr_pre_release_cpu
353  *
354  * sbd error policy: Stops on first error.
355  */
356 int
357 dr_pre_release_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
358 {
359 	int		c, cix, i, lastoffline = -1, rv = 0;
360 	processorid_t	cpuid;
361 	struct cpu	*cp;
362 	dr_cpu_unit_t	*up;
363 	dr_devset_t	devset;
364 	sbd_dev_stat_t	*ds;
365 	static fn_t	f = "dr_pre_release_cpu";
366 	int		cpu_flags = 0;
367 
368 	devset = DR_DEVS_PRESENT(hp->h_bd);
369 
370 	/* allocate status struct storage. */
371 	ds = (sbd_dev_stat_t *) kmem_zalloc(sizeof (sbd_dev_stat_t) *
372 	    MAX_CPU_UNITS_PER_BOARD, KM_SLEEP);
373 
374 	cix = dr_cpu_status(hp, devset, ds);
375 
376 	mutex_enter(&cpu_lock);
377 
378 	for (i = 0; i < devnum; i++) {
379 		up = (dr_cpu_unit_t *)devlist[i];
380 		if (!DR_DEV_IS_ATTACHED(&up->sbc_cm)) {
381 			continue;
382 		}
383 		ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
384 
385 		/*
386 		 * On x86 systems, some CPUs can't be unconfigured.
387 		 * For example, CPU0 can't be unconfigured because many other
388 		 * components have a dependency on it.
389 		 * This check determines if a CPU is currently in use and
390 		 * returns a "Device busy" error if so.
391 		 */
392 		for (c = 0; c < cix; c++) {
393 			if (ds[c].d_cpu.cs_unit == up->sbc_cm.sbdev_unum) {
394 				if (ds[c].d_cpu.cs_busy) {
395 					dr_dev_err(CE_WARN, &up->sbc_cm,
396 					    ESBD_BUSY);
397 					rv = -1;
398 					break;
399 				}
400 			}
401 		}
402 		if (c < cix)
403 			break;
404 
405 		cpuid = up->sbc_cpu_id;
406 		if ((cp = cpu_get(cpuid)) == NULL) {
407 			dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_OFFLINE);
408 			rv = -1;
409 			break;
410 		}
411 
412 		/* used by dr_cancel_cpu during error flow */
413 		up->sbc_cpu_flags = cp->cpu_flags;
414 
415 		if (CPU_ACTIVE(cp)) {
416 			if (dr_cmd_flags(hp) & SBD_FLAG_FORCE)
417 				cpu_flags = CPU_FORCED;
418 
419 			PR_CPU("%s: offlining cpu %d\n", f, cpuid);
420 			if (cpu_offline(cp, cpu_flags)) {
421 				PR_CPU("%s: failed to offline cpu %d\n", f,
422 				    cpuid);
423 				dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_OFFLINE);
424 				if (disp_bound_threads(cp, 0)) {
425 					cmn_err(CE_WARN, "%s: thread(s) bound "
426 					    "to cpu %d", f, cp->cpu_id);
427 				}
428 				rv = -1;
429 				break;
430 			} else
431 				lastoffline = i;
432 		}
433 
434 		if (!rv) {
435 			sbd_error_t *err;
436 
437 			err = drmach_release(up->sbc_cm.sbdev_id);
438 			if (err) {
439 				DRERR_SET_C(&up->sbc_cm.sbdev_error, &err);
440 				rv = -1;
441 				break;
442 			}
443 		}
444 	}
445 
446 	mutex_exit(&cpu_lock);
447 
448 	if (rv) {
449 		/*
450 		 * Need to unwind others since at this level (pre-release)
451 		 * the device state has not yet transitioned and failures
452 		 * will prevent us from reaching the "post" release
453 		 * function where states are normally transitioned.
454 		 */
455 		for (i = lastoffline; i >= 0; i--) {
456 			up = (dr_cpu_unit_t *)devlist[i];
457 			(void) dr_cancel_cpu(up);
458 		}
459 	}
460 
461 	kmem_free(ds, sizeof (sbd_dev_stat_t) * MAX_CPU_UNITS_PER_BOARD);
462 	return (rv);
463 }
464 
465 /*
466  * dr_pre_detach_cpu
467  *
468  * sbd error policy: Stops on first error.
469  */
470 int
471 dr_pre_detach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
472 {
473 	_NOTE(ARGUNUSED(hp))
474 
475 	int		i;
476 	int		cpu_flags = 0;
477 	static fn_t	f = "dr_pre_detach_cpu";
478 
479 	PR_CPU("%s...\n", f);
480 
481 	/*
482 	 * Block out status threads while destroying devinfo tree
483 	 * branches
484 	 */
485 	dr_lock_status(hp->h_bd);
486 	mutex_enter(&cpu_lock);
487 
488 	for (i = 0; i < devnum; i++) {
489 		dr_cpu_unit_t *up = (dr_cpu_unit_t *)devlist[i];
490 		struct cpu	*cp;
491 
492 		if (!DR_DEV_IS_ATTACHED(&up->sbc_cm)) {
493 			continue;
494 		}
495 
496 		ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
497 
498 		cp = cpu_get(up->sbc_cpu_id);
499 		if (cp == NULL)
500 			continue;
501 
502 		/*
503 		 * Print a console message for each attachment
504 		 * point. For CMP devices, this means that only
505 		 * one message should be printed, no matter how
506 		 * many cores are actually present.
507 		 */
508 		if ((up->sbc_cm.sbdev_unum % MAX_CORES_PER_CMP) == 0) {
509 			cmn_err(CE_CONT, "OS unconfigure %s\n",
510 			    up->sbc_cm.sbdev_path);
511 		}
512 
513 		/*
514 		 * CPUs were offlined during Release.
515 		 */
516 		if (cpu_is_poweredoff(cp)) {
517 			PR_CPU("%s: cpu %d already powered OFF\n",
518 			    f, up->sbc_cpu_id);
519 			continue;
520 		}
521 
522 		if (!cpu_is_offline(cp)) {
523 			if (dr_cmd_flags(hp) & SBD_FLAG_FORCE)
524 				cpu_flags = CPU_FORCED;
525 			/* cpu was onlined after release.  Offline it again */
526 			PR_CPU("%s: offlining cpu %d\n", f, up->sbc_cpu_id);
527 			if (cpu_offline(cp, cpu_flags)) {
528 				PR_CPU("%s: failed to offline cpu %d\n",
529 				    f, up->sbc_cpu_id);
530 				dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_OFFLINE);
531 				if (disp_bound_threads(cp, 0)) {
532 					cmn_err(CE_WARN, "%s: thread(s) bound "
533 					    "to cpu %d", f, cp->cpu_id);
534 				}
535 				goto err;
536 			}
537 		}
538 		if (cpu_poweroff(cp) != 0) {
539 			dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_CPUSTOP);
540 			goto err;
541 		} else {
542 			PR_CPU("%s: cpu %d powered OFF\n", f, up->sbc_cpu_id);
543 		}
544 	}
545 
546 	return (0);
547 
548 err:
549 	mutex_exit(&cpu_lock);
550 	dr_unlock_status(hp->h_bd);
551 	return (-1);
552 }
553 
554 /*ARGSUSED*/
555 void
556 dr_detach_cpu(dr_handle_t *hp, dr_common_unit_t *cp)
557 {
558 	sbd_error_t	*err;
559 	processorid_t	 cpuid;
560 	int		 rv;
561 	dr_cpu_unit_t	*up = (dr_cpu_unit_t *)cp;
562 
563 	ASSERT(MUTEX_HELD(&cpu_lock));
564 
565 	if (!DR_DEV_IS_ATTACHED(&up->sbc_cm)) {
566 		return;
567 	}
568 
569 	err = drmach_cpu_get_id(cp->sbdev_id, &cpuid);
570 	if (err) {
571 		DRERR_SET_C(&cp->sbdev_error, &err);
572 	} else if ((rv = cpu_unconfigure(cpuid)) != 0) {
573 		dr_dev_err(CE_IGNORE, cp, dr_errno2ecode(rv));
574 	} else {
575 		err = drmach_unconfigure(cp->sbdev_id, DEVI_BRANCH_DESTROY);
576 		if (err) {
577 			DRERR_SET_C(&cp->sbdev_error, &err);
578 		} else {
579 			up->sbc_cpu_id = -1;
580 		}
581 	}
582 }
583 
584 /*ARGSUSED1*/
585 int
586 dr_post_detach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
587 {
588 	static fn_t	f = "dr_post_detach_cpu";
589 
590 	PR_CPU("%s...\n", f);
591 	hp->h_ndi = 0;
592 
593 	mutex_exit(&cpu_lock);
594 	dr_unlock_status(hp->h_bd);
595 
596 	return (0);
597 }
598 
599 static void
600 dr_fill_cpu_stat(dr_cpu_unit_t *cp, drmach_status_t *pstat, sbd_cpu_stat_t *csp)
601 {
602 	ASSERT(cp && pstat && csp);
603 
604 	/* Fill in the common status information */
605 	bzero((caddr_t)csp, sizeof (*csp));
606 	csp->cs_type = cp->sbc_cm.sbdev_type;
607 	csp->cs_unit = cp->sbc_cm.sbdev_unum;
608 	(void) strlcpy(csp->cs_name, pstat->type, sizeof (csp->cs_name));
609 	csp->cs_cond = cp->sbc_cm.sbdev_cond;
610 	csp->cs_busy = cp->sbc_cm.sbdev_busy | pstat->busy;
611 	csp->cs_time = cp->sbc_cm.sbdev_time;
612 	csp->cs_ostate = cp->sbc_cm.sbdev_ostate;
613 	csp->cs_suspend = 0;
614 
615 	/* CPU specific status data */
616 	csp->cs_cpuid = cp->sbc_cpu_id;
617 
618 	/*
619 	 * If the speed and ecache properties have not been
620 	 * cached yet, read them in from the device tree.
621 	 */
622 	if ((cp->sbc_speed == 0) || (cp->sbc_ecache == 0))
623 		dr_cpu_set_prop(cp);
624 
625 	/* use the cached speed and ecache values */
626 	csp->cs_speed = cp->sbc_speed;
627 	csp->cs_ecache = cp->sbc_ecache;
628 
629 	mutex_enter(&cpu_lock);
630 	if (!cpu_get(csp->cs_cpuid)) {
631 		/* ostate must be UNCONFIGURED */
632 		csp->cs_cm.c_ostate = SBD_STAT_UNCONFIGURED;
633 	}
634 	mutex_exit(&cpu_lock);
635 }
636 
637 /*ARGSUSED2*/
638 static void
639 dr_fill_cmp_stat(sbd_cpu_stat_t *csp, int ncores, int impl, sbd_cmp_stat_t *psp)
640 {
641 	int	core;
642 
643 	ASSERT(csp && psp && (ncores >= 1));
644 
645 	bzero((caddr_t)psp, sizeof (*psp));
646 
647 	/*
648 	 * Fill in the common status information based
649 	 * on the data for the first core.
650 	 */
651 	psp->ps_type = SBD_COMP_CMP;
652 	psp->ps_unit = DR_UNUM2SBD_UNUM(csp->cs_unit, SBD_COMP_CMP);
653 	(void) strlcpy(psp->ps_name, csp->cs_name, sizeof (psp->ps_name));
654 	psp->ps_cond = csp->cs_cond;
655 	psp->ps_busy = csp->cs_busy;
656 	psp->ps_time = csp->cs_time;
657 	psp->ps_ostate = csp->cs_ostate;
658 	psp->ps_suspend = csp->cs_suspend;
659 
660 	/* CMP specific status data */
661 	*psp->ps_cpuid = csp->cs_cpuid;
662 	psp->ps_ncores = 1;
663 	psp->ps_speed = csp->cs_speed;
664 	psp->ps_ecache = csp->cs_ecache;
665 
666 	/*
667 	 * Walk through the data for the remaining cores.
668 	 * Make any adjustments to the common status data,
669 	 * or the shared CMP specific data if necessary.
670 	 */
671 	for (core = 1; core < ncores; core++) {
672 		/*
673 		 * The following properties should be the same
674 		 * for all the cores of the CMP.
675 		 */
676 		ASSERT(psp->ps_unit == DR_UNUM2SBD_UNUM(csp[core].cs_unit,
677 		    SBD_COMP_CMP));
678 
679 		if (csp[core].cs_speed > psp->ps_speed)
680 			psp->ps_speed = csp[core].cs_speed;
681 		if (csp[core].cs_ecache > psp->ps_ecache)
682 			psp->ps_ecache = csp[core].cs_ecache;
683 
684 		psp->ps_cpuid[core] = csp[core].cs_cpuid;
685 		psp->ps_ncores++;
686 
687 		/* adjust time if necessary */
688 		if (csp[core].cs_time > psp->ps_time) {
689 			psp->ps_time = csp[core].cs_time;
690 		}
691 
692 		psp->ps_busy |= csp[core].cs_busy;
693 
694 		/*
695 		 * If any of the cores are configured, the
696 		 * entire CMP is marked as configured.
697 		 */
698 		if (csp[core].cs_ostate == SBD_STAT_CONFIGURED) {
699 			psp->ps_ostate = csp[core].cs_ostate;
700 		}
701 	}
702 }
703 
704 int
705 dr_cpu_status(dr_handle_t *hp, dr_devset_t devset, sbd_dev_stat_t *dsp)
706 {
707 	int		cmp;
708 	int		core;
709 	int		ncpu;
710 	dr_board_t	*bp;
711 	sbd_cpu_stat_t	*cstat;
712 	int		impl;
713 
714 	bp = hp->h_bd;
715 	ncpu = 0;
716 
717 	devset &= DR_DEVS_PRESENT(bp);
718 	cstat = kmem_zalloc(sizeof (sbd_cpu_stat_t) * MAX_CORES_PER_CMP,
719 	    KM_SLEEP);
720 
721 	/*
722 	 * Treat every CPU as a CMP. In the case where the
723 	 * device is not a CMP, treat it as a CMP with only
724 	 * one core.
725 	 */
726 	for (cmp = 0; cmp < MAX_CMP_UNITS_PER_BOARD; cmp++) {
727 		int		ncores;
728 		dr_cpu_unit_t	*cp;
729 		drmach_status_t	pstat;
730 		sbd_error_t	*err;
731 		sbd_cmp_stat_t	*psp;
732 
733 		if ((devset & DEVSET(SBD_COMP_CMP, cmp)) == 0) {
734 			continue;
735 		}
736 
737 		ncores = 0;
738 
739 		for (core = 0; core < MAX_CORES_PER_CMP; core++) {
740 
741 			cp = dr_get_cpu_unit(bp, DR_CMP_CORE_UNUM(cmp, core));
742 
743 			if (cp->sbc_cm.sbdev_state == DR_STATE_EMPTY) {
744 				/* present, but not fully initialized */
745 				continue;
746 			}
747 
748 			ASSERT(dr_cpu_unit_is_sane(hp->h_bd, cp));
749 
750 			/* skip if not present */
751 			if (cp->sbc_cm.sbdev_id == (drmachid_t)0) {
752 				continue;
753 			}
754 
755 			/* fetch platform status */
756 			err = drmach_status(cp->sbc_cm.sbdev_id, &pstat);
757 			if (err) {
758 				DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
759 				continue;
760 			}
761 
762 			dr_fill_cpu_stat(cp, &pstat, &cstat[ncores++]);
763 			/*
764 			 * We should set impl here because the last core
765 			 * found might be EMPTY or not present.
766 			 */
767 			impl = cp->sbc_cpu_impl;
768 		}
769 
770 		if (ncores == 0) {
771 			continue;
772 		}
773 
774 		/*
775 		 * Store the data to the outgoing array. If the
776 		 * device is a CMP, combine all the data for the
777 		 * cores into a single stat structure.
778 		 *
779 		 * The check for a CMP device uses the last core
780 		 * found, assuming that all cores will have the
781 		 * same implementation.
782 		 */
783 		if (CPU_IMPL_IS_CMP(impl)) {
784 			psp = (sbd_cmp_stat_t *)dsp;
785 			dr_fill_cmp_stat(cstat, ncores, impl, psp);
786 		} else {
787 			ASSERT(ncores == 1);
788 			bcopy(cstat, dsp, sizeof (sbd_cpu_stat_t));
789 		}
790 
791 		dsp++;
792 		ncpu++;
793 	}
794 
795 	kmem_free(cstat, sizeof (sbd_cpu_stat_t) * MAX_CORES_PER_CMP);
796 
797 	return (ncpu);
798 }
799 
800 /*
801  * Cancel previous release operation for cpu.
802  * For cpus this means simply bringing cpus that
803  * were offline back online.  Note that they had
804  * to have been online at the time there were
805  * released.
806  */
807 int
808 dr_cancel_cpu(dr_cpu_unit_t *up)
809 {
810 	int		rv = 0;
811 	static fn_t	f = "dr_cancel_cpu";
812 
813 	ASSERT(dr_cpu_unit_is_sane(up->sbc_cm.sbdev_bp, up));
814 
815 	if (cpu_flagged_active(up->sbc_cpu_flags)) {
816 		struct cpu	*cp;
817 
818 		/*
819 		 * CPU had been online, go ahead
820 		 * bring it back online.
821 		 */
822 		PR_CPU("%s: bringing cpu %d back ONLINE\n", f, up->sbc_cpu_id);
823 
824 		mutex_enter(&cpu_lock);
825 		cp = cpu[up->sbc_cpu_id];
826 
827 		if (cpu_is_poweredoff(cp)) {
828 			if (cpu_poweron(cp)) {
829 				cmn_err(CE_WARN, "%s: failed to power-on "
830 				    "cpu %d", f, up->sbc_cpu_id);
831 				rv = -1;
832 			}
833 		}
834 
835 		if (rv == 0 && cpu_is_offline(cp)) {
836 			if (cpu_online(cp, 0)) {
837 				cmn_err(CE_WARN, "%s: failed to online cpu %d",
838 				    f, up->sbc_cpu_id);
839 				rv = -1;
840 			}
841 		}
842 
843 		if (rv == 0 && cpu_is_online(cp)) {
844 			if (cpu_flagged_nointr(up->sbc_cpu_flags)) {
845 				if (cpu_intr_disable(cp) != 0) {
846 					cmn_err(CE_WARN, "%s: failed to "
847 					    "disable interrupts on cpu %d", f,
848 					    up->sbc_cpu_id);
849 				}
850 			}
851 		}
852 
853 		mutex_exit(&cpu_lock);
854 	}
855 
856 	return (rv);
857 }
858 
859 int
860 dr_disconnect_cpu(dr_cpu_unit_t *up)
861 {
862 	sbd_error_t	*err;
863 	static fn_t	f = "dr_disconnect_cpu";
864 
865 	PR_CPU("%s...\n", f);
866 
867 	ASSERT((up->sbc_cm.sbdev_state == DR_STATE_CONNECTED) ||
868 	    (up->sbc_cm.sbdev_state == DR_STATE_UNCONFIGURED));
869 
870 	ASSERT(dr_cpu_unit_is_sane(up->sbc_cm.sbdev_bp, up));
871 
872 	if (up->sbc_cm.sbdev_state == DR_STATE_CONNECTED) {
873 		/*
874 		 * Cpus were never brought in and so are still
875 		 * effectively disconnected, so nothing to do here.
876 		 */
877 		PR_CPU("%s: cpu %d never brought in\n", f, up->sbc_cpu_id);
878 		return (0);
879 	}
880 
881 	err = drmach_cpu_disconnect(up->sbc_cm.sbdev_id);
882 	if (err == NULL)
883 		return (0);
884 	else {
885 		DRERR_SET_C(&up->sbc_cm.sbdev_error, &err);
886 		return (-1);
887 	}
888 	/*NOTREACHED*/
889 }
890