xref: /illumos-gate/usr/src/uts/i86pc/io/dr/dr_cpu.c (revision 9b9d39d2a32ff806d2431dbcc50968ef1e6d46b2)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 /*
27  * Copyright (c) 2010, Intel Corporation.
28  * All rights reserved.
29  */
30 
31 /*
32  * Copyright 2019 Joyent, Inc.
33  * Copyright 2023 Oxide Computer Company
34  */
35 
36 /*
37  * CPU support routines for DR
38  */
39 
40 #include <sys/note.h>
41 #include <sys/debug.h>
42 #include <sys/types.h>
43 #include <sys/errno.h>
44 #include <sys/dditypes.h>
45 #include <sys/ddi.h>
46 #include <sys/sunddi.h>
47 #include <sys/sunndi.h>
48 #include <sys/ndi_impldefs.h>
49 #include <sys/kmem.h>
50 #include <sys/processor.h>
51 #include <sys/cpuvar.h>
52 #include <sys/promif.h>
53 #include <sys/sysmacros.h>
54 #include <sys/archsystm.h>
55 #include <sys/machsystm.h>
56 #include <sys/cpu_module.h>
57 #include <sys/cmn_err.h>
58 
59 #include <sys/dr.h>
60 #include <sys/dr_util.h>
61 
62 /* for the DR*INTERNAL_ERROR macros.  see sys/dr.h. */
63 static char *dr_ie_fmt = "dr_cpu.c %d";
64 
65 int
66 dr_cpu_unit_is_sane(dr_board_t *bp, dr_cpu_unit_t *cp)
67 {
68 #ifdef DEBUG
69 	ASSERT(cp->sbc_cm.sbdev_bp == bp);
70 	ASSERT(cp->sbc_cm.sbdev_type == SBD_COMP_CPU);
71 #else
72 	_NOTE(ARGUNUSED(bp))
73 	_NOTE(ARGUNUSED(cp))
74 #endif
75 
76 	return (1);
77 }
78 
79 static int
80 dr_errno2ecode(int error)
81 {
82 	int	rv;
83 
84 	switch (error) {
85 	case EBUSY:
86 		rv = ESBD_BUSY;
87 		break;
88 	case EINVAL:
89 		rv = ESBD_INVAL;
90 		break;
91 	case EALREADY:
92 		rv = ESBD_ALREADY;
93 		break;
94 	case ENODEV:
95 		rv = ESBD_NODEV;
96 		break;
97 	case ENOMEM:
98 		rv = ESBD_NOMEM;
99 		break;
100 	default:
101 		rv = ESBD_INVAL;
102 	}
103 
104 	return (rv);
105 }
106 
107 /*
108  * On x86, the "clock-frequency" and cache size device properties may be
109  * unavailable before CPU starts. If they are unavailabe, just set them to zero.
110  */
111 static void
112 dr_cpu_set_prop(dr_cpu_unit_t *cp)
113 {
114 	sbd_error_t	*err;
115 	dev_info_t	*dip;
116 	uint64_t	clock_freq;
117 	int		ecache_size = 0;
118 	char		*cache_str = NULL;
119 
120 	err = drmach_get_dip(cp->sbc_cm.sbdev_id, &dip);
121 	if (err) {
122 		DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
123 		return;
124 	}
125 
126 	if (dip == NULL) {
127 		DR_DEV_INTERNAL_ERROR(&cp->sbc_cm);
128 		return;
129 	}
130 
131 	/* read in the CPU speed */
132 	clock_freq = (unsigned int)ddi_prop_get_int(DDI_DEV_T_ANY, dip,
133 	    DDI_PROP_DONTPASS, "clock-frequency", 0);
134 
135 	/*
136 	 * The ecache property string is not the same
137 	 * for all CPU implementations.
138 	 */
139 	switch (cp->sbc_cpu_impl) {
140 	case X86_CPU_IMPL_NEHALEM_EX:
141 		cache_str = "l3-cache-size";
142 		break;
143 	default:
144 		cmn_err(CE_WARN, "Unknown cpu implementation=0x%x",
145 		    cp->sbc_cpu_impl);
146 		break;
147 	}
148 
149 	if (cache_str != NULL) {
150 		/* read in the ecache size */
151 		/*
152 		 * If the property is not found in the CPU node,
153 		 * it has to be kept in the core or cmp node so
154 		 * we just keep looking.
155 		 */
156 
157 		ecache_size = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
158 		    cache_str, 0);
159 	}
160 
161 	/* convert to the proper units */
162 	cp->sbc_speed = (clock_freq + 500000) / 1000000;
163 	cp->sbc_ecache = ecache_size / (1024 * 1024);
164 }
165 
166 void
167 dr_init_cpu_unit(dr_cpu_unit_t *cp)
168 {
169 	sbd_error_t	*err;
170 	dr_state_t	new_state;
171 	int		cpuid;
172 	int		impl;
173 
174 	if (DR_DEV_IS_ATTACHED(&cp->sbc_cm)) {
175 		new_state = DR_STATE_CONFIGURED;
176 		cp->sbc_cm.sbdev_cond = SBD_COND_OK;
177 	} else if (DR_DEV_IS_PRESENT(&cp->sbc_cm)) {
178 		new_state = DR_STATE_CONNECTED;
179 		cp->sbc_cm.sbdev_cond = SBD_COND_OK;
180 	} else {
181 		new_state = DR_STATE_EMPTY;
182 		cp->sbc_cm.sbdev_cond = SBD_COND_UNKNOWN;
183 	}
184 
185 	if (DR_DEV_IS_PRESENT(&cp->sbc_cm)) {
186 		err = drmach_cpu_get_id(cp->sbc_cm.sbdev_id, &cpuid);
187 		if (err) {
188 			DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
189 			new_state = DR_STATE_FATAL;
190 			goto done;
191 		}
192 
193 		err = drmach_cpu_get_impl(cp->sbc_cm.sbdev_id, &impl);
194 		if (err) {
195 			DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
196 			new_state = DR_STATE_FATAL;
197 			goto done;
198 		}
199 	} else {
200 		cp->sbc_cpu_id = -1;
201 		cp->sbc_cpu_impl = -1;
202 		goto done;
203 	}
204 
205 	cp->sbc_cpu_id = cpuid;
206 	cp->sbc_cpu_impl = impl;
207 
208 	/* if true at init time, it must always be true */
209 	ASSERT(dr_cpu_unit_is_sane(cp->sbc_cm.sbdev_bp, cp));
210 
211 	mutex_enter(&cpu_lock);
212 	if ((cpuid >= 0) && cpu[cpuid])
213 		cp->sbc_cpu_flags = cpu[cpuid]->cpu_flags;
214 	else
215 		cp->sbc_cpu_flags = P_OFFLINE | P_POWEROFF;
216 	mutex_exit(&cpu_lock);
217 
218 	dr_cpu_set_prop(cp);
219 
220 done:
221 	/* delay transition until fully initialized */
222 	dr_device_transition(&cp->sbc_cm, new_state);
223 }
224 
225 int
226 dr_pre_attach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
227 {
228 	int		i;
229 	static fn_t	f = "dr_pre_attach_cpu";
230 
231 	PR_CPU("%s...\n", f);
232 
233 	for (i = 0; i < devnum; i++) {
234 		dr_cpu_unit_t *up = (dr_cpu_unit_t *)devlist[i];
235 
236 		ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
237 
238 		/*
239 		 * Print a console message for each attachment
240 		 * point. For CMP devices, this means that only
241 		 * one message should be printed, no matter how
242 		 * many cores are actually present.
243 		 */
244 		if ((up->sbc_cm.sbdev_unum % MAX_CORES_PER_CMP) == 0) {
245 			cmn_err(CE_CONT, "OS configure %s",
246 			    up->sbc_cm.sbdev_path);
247 		}
248 	}
249 
250 	/*
251 	 * Block out status threads while creating
252 	 * devinfo tree branches
253 	 */
254 	dr_lock_status(hp->h_bd);
255 	ndi_devi_enter(ddi_root_node());
256 	mutex_enter(&cpu_lock);
257 
258 	return (0);
259 }
260 
261 /*ARGSUSED*/
262 void
263 dr_attach_cpu(dr_handle_t *hp, dr_common_unit_t *cp)
264 {
265 	sbd_error_t	*err;
266 	processorid_t	 cpuid;
267 	int		 rv;
268 
269 	ASSERT(MUTEX_HELD(&cpu_lock));
270 
271 	err = drmach_configure(cp->sbdev_id, 0);
272 	if (err) {
273 		DRERR_SET_C(&cp->sbdev_error, &err);
274 		return;
275 	}
276 
277 	err = drmach_cpu_get_id(cp->sbdev_id, &cpuid);
278 	if (err) {
279 		DRERR_SET_C(&cp->sbdev_error, &err);
280 
281 		err = drmach_unconfigure(cp->sbdev_id, DEVI_BRANCH_DESTROY);
282 		if (err)
283 			sbd_err_clear(&err);
284 	} else if ((rv = cpu_configure(cpuid)) != 0) {
285 		dr_dev_err(CE_WARN, cp, dr_errno2ecode(rv));
286 		err = drmach_unconfigure(cp->sbdev_id, DEVI_BRANCH_DESTROY);
287 		if (err)
288 			sbd_err_clear(&err);
289 	} else {
290 		dr_cpu_unit_t *up = (dr_cpu_unit_t *)cp;
291 		up->sbc_cpu_id = cpuid;
292 	}
293 }
294 
295 /*
296  * dr_post_attach_cpu
297  *
298  * sbd error policy: Does not stop on error.  Processes all units in list.
299  */
300 int
301 dr_post_attach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
302 {
303 	int		i;
304 	int		errflag = 0;
305 	static fn_t	f = "dr_post_attach_cpu";
306 
307 	PR_CPU("%s...\n", f);
308 
309 	/* Startup and online newly-attached CPUs */
310 	for (i = 0; i < devnum; i++) {
311 		dr_cpu_unit_t *up = (dr_cpu_unit_t *)devlist[i];
312 		struct cpu	*cp;
313 
314 		ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
315 
316 		cp = cpu_get(up->sbc_cpu_id);
317 		if (cp == NULL) {
318 			cmn_err(CE_WARN, "%s: cpu_get failed for cpu %d",
319 			    f, up->sbc_cpu_id);
320 			continue;
321 		}
322 
323 		if (cpu_is_poweredoff(cp)) {
324 			if (cpu_poweron(cp) != 0) {
325 				dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_CPUSTART);
326 				errflag = 1;
327 			}
328 			PR_CPU("%s: cpu %d powered ON\n", f, up->sbc_cpu_id);
329 		}
330 
331 		if (cpu_is_offline(cp)) {
332 			PR_CPU("%s: onlining cpu %d...\n", f, up->sbc_cpu_id);
333 
334 			if (cpu_online(cp, 0) != 0) {
335 				dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_ONLINE);
336 				errflag = 1;
337 			}
338 		}
339 
340 	}
341 
342 	mutex_exit(&cpu_lock);
343 	ndi_devi_exit(ddi_root_node());
344 	dr_unlock_status(hp->h_bd);
345 
346 	if (errflag)
347 		return (-1);
348 	else
349 		return (0);
350 }
351 
352 /*
353  * dr_pre_release_cpu
354  *
355  * sbd error policy: Stops on first error.
356  */
357 int
358 dr_pre_release_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
359 {
360 	int		c, cix, i, lastoffline = -1, rv = 0;
361 	processorid_t	cpuid;
362 	struct cpu	*cp;
363 	dr_cpu_unit_t	*up;
364 	dr_devset_t	devset;
365 	sbd_dev_stat_t	*ds;
366 	static fn_t	f = "dr_pre_release_cpu";
367 	int		cpu_flags = 0;
368 
369 	devset = DR_DEVS_PRESENT(hp->h_bd);
370 
371 	/* allocate status struct storage. */
372 	ds = (sbd_dev_stat_t *) kmem_zalloc(sizeof (sbd_dev_stat_t) *
373 	    MAX_CPU_UNITS_PER_BOARD, KM_SLEEP);
374 
375 	cix = dr_cpu_status(hp, devset, ds);
376 
377 	mutex_enter(&cpu_lock);
378 
379 	for (i = 0; i < devnum; i++) {
380 		up = (dr_cpu_unit_t *)devlist[i];
381 		if (!DR_DEV_IS_ATTACHED(&up->sbc_cm)) {
382 			continue;
383 		}
384 		ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
385 
386 		/*
387 		 * On x86 systems, some CPUs can't be unconfigured.
388 		 * For example, CPU0 can't be unconfigured because many other
389 		 * components have a dependency on it.
390 		 * This check determines if a CPU is currently in use and
391 		 * returns a "Device busy" error if so.
392 		 */
393 		for (c = 0; c < cix; c++) {
394 			if (ds[c].d_cpu.cs_unit == up->sbc_cm.sbdev_unum) {
395 				if (ds[c].d_cpu.cs_busy) {
396 					dr_dev_err(CE_WARN, &up->sbc_cm,
397 					    ESBD_BUSY);
398 					rv = -1;
399 					break;
400 				}
401 			}
402 		}
403 		if (c < cix)
404 			break;
405 
406 		cpuid = up->sbc_cpu_id;
407 		if ((cp = cpu_get(cpuid)) == NULL) {
408 			dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_OFFLINE);
409 			rv = -1;
410 			break;
411 		}
412 
413 		/* used by dr_cancel_cpu during error flow */
414 		up->sbc_cpu_flags = cp->cpu_flags;
415 
416 		if (CPU_ACTIVE(cp)) {
417 			if (dr_cmd_flags(hp) & SBD_FLAG_FORCE)
418 				cpu_flags = CPU_FORCED;
419 
420 			PR_CPU("%s: offlining cpu %d\n", f, cpuid);
421 			if (cpu_offline(cp, cpu_flags)) {
422 				PR_CPU("%s: failed to offline cpu %d\n", f,
423 				    cpuid);
424 				dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_OFFLINE);
425 				if (disp_bound_threads(cp, 0)) {
426 					cmn_err(CE_WARN, "%s: thread(s) bound "
427 					    "to cpu %d", f, cp->cpu_id);
428 				}
429 				rv = -1;
430 				break;
431 			} else
432 				lastoffline = i;
433 		}
434 
435 		if (!rv) {
436 			sbd_error_t *err;
437 
438 			err = drmach_release(up->sbc_cm.sbdev_id);
439 			if (err) {
440 				DRERR_SET_C(&up->sbc_cm.sbdev_error, &err);
441 				rv = -1;
442 				break;
443 			}
444 		}
445 	}
446 
447 	mutex_exit(&cpu_lock);
448 
449 	if (rv) {
450 		/*
451 		 * Need to unwind others since at this level (pre-release)
452 		 * the device state has not yet transitioned and failures
453 		 * will prevent us from reaching the "post" release
454 		 * function where states are normally transitioned.
455 		 */
456 		for (i = lastoffline; i >= 0; i--) {
457 			up = (dr_cpu_unit_t *)devlist[i];
458 			(void) dr_cancel_cpu(up);
459 		}
460 	}
461 
462 	kmem_free(ds, sizeof (sbd_dev_stat_t) * MAX_CPU_UNITS_PER_BOARD);
463 	return (rv);
464 }
465 
466 /*
467  * dr_pre_detach_cpu
468  *
469  * sbd error policy: Stops on first error.
470  */
471 int
472 dr_pre_detach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
473 {
474 	_NOTE(ARGUNUSED(hp))
475 
476 	int		i;
477 	int		cpu_flags = 0;
478 	static fn_t	f = "dr_pre_detach_cpu";
479 
480 	PR_CPU("%s...\n", f);
481 
482 	/*
483 	 * Block out status threads while destroying devinfo tree
484 	 * branches
485 	 */
486 	dr_lock_status(hp->h_bd);
487 	mutex_enter(&cpu_lock);
488 
489 	for (i = 0; i < devnum; i++) {
490 		dr_cpu_unit_t *up = (dr_cpu_unit_t *)devlist[i];
491 		struct cpu	*cp;
492 
493 		if (!DR_DEV_IS_ATTACHED(&up->sbc_cm)) {
494 			continue;
495 		}
496 
497 		ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
498 
499 		cp = cpu_get(up->sbc_cpu_id);
500 		if (cp == NULL)
501 			continue;
502 
503 		/*
504 		 * Print a console message for each attachment
505 		 * point. For CMP devices, this means that only
506 		 * one message should be printed, no matter how
507 		 * many cores are actually present.
508 		 */
509 		if ((up->sbc_cm.sbdev_unum % MAX_CORES_PER_CMP) == 0) {
510 			cmn_err(CE_CONT, "OS unconfigure %s\n",
511 			    up->sbc_cm.sbdev_path);
512 		}
513 
514 		/*
515 		 * CPUs were offlined during Release.
516 		 */
517 		if (cpu_is_poweredoff(cp)) {
518 			PR_CPU("%s: cpu %d already powered OFF\n",
519 			    f, up->sbc_cpu_id);
520 			continue;
521 		}
522 
523 		if (!cpu_is_offline(cp)) {
524 			if (dr_cmd_flags(hp) & SBD_FLAG_FORCE)
525 				cpu_flags = CPU_FORCED;
526 			/* cpu was onlined after release.  Offline it again */
527 			PR_CPU("%s: offlining cpu %d\n", f, up->sbc_cpu_id);
528 			if (cpu_offline(cp, cpu_flags)) {
529 				PR_CPU("%s: failed to offline cpu %d\n",
530 				    f, up->sbc_cpu_id);
531 				dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_OFFLINE);
532 				if (disp_bound_threads(cp, 0)) {
533 					cmn_err(CE_WARN, "%s: thread(s) bound "
534 					    "to cpu %d", f, cp->cpu_id);
535 				}
536 				goto err;
537 			}
538 		}
539 		if (cpu_poweroff(cp) != 0) {
540 			dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_CPUSTOP);
541 			goto err;
542 		} else {
543 			PR_CPU("%s: cpu %d powered OFF\n", f, up->sbc_cpu_id);
544 		}
545 	}
546 
547 	return (0);
548 
549 err:
550 	mutex_exit(&cpu_lock);
551 	dr_unlock_status(hp->h_bd);
552 	return (-1);
553 }
554 
555 /*ARGSUSED*/
556 void
557 dr_detach_cpu(dr_handle_t *hp, dr_common_unit_t *cp)
558 {
559 	sbd_error_t	*err;
560 	processorid_t	 cpuid;
561 	int		 rv;
562 	dr_cpu_unit_t	*up = (dr_cpu_unit_t *)cp;
563 
564 	ASSERT(MUTEX_HELD(&cpu_lock));
565 
566 	if (!DR_DEV_IS_ATTACHED(&up->sbc_cm)) {
567 		return;
568 	}
569 
570 	err = drmach_cpu_get_id(cp->sbdev_id, &cpuid);
571 	if (err) {
572 		DRERR_SET_C(&cp->sbdev_error, &err);
573 	} else if ((rv = cpu_unconfigure(cpuid)) != 0) {
574 		dr_dev_err(CE_IGNORE, cp, dr_errno2ecode(rv));
575 	} else {
576 		err = drmach_unconfigure(cp->sbdev_id, DEVI_BRANCH_DESTROY);
577 		if (err) {
578 			DRERR_SET_C(&cp->sbdev_error, &err);
579 		} else {
580 			up->sbc_cpu_id = -1;
581 		}
582 	}
583 }
584 
585 /*ARGSUSED1*/
586 int
587 dr_post_detach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
588 {
589 	static fn_t	f = "dr_post_detach_cpu";
590 
591 	PR_CPU("%s...\n", f);
592 
593 	mutex_exit(&cpu_lock);
594 	dr_unlock_status(hp->h_bd);
595 
596 	return (0);
597 }
598 
599 static void
600 dr_fill_cpu_stat(dr_cpu_unit_t *cp, drmach_status_t *pstat, sbd_cpu_stat_t *csp)
601 {
602 	ASSERT(cp && pstat && csp);
603 
604 	/* Fill in the common status information */
605 	bzero((caddr_t)csp, sizeof (*csp));
606 	csp->cs_type = cp->sbc_cm.sbdev_type;
607 	csp->cs_unit = cp->sbc_cm.sbdev_unum;
608 	(void) strlcpy(csp->cs_name, pstat->type, sizeof (csp->cs_name));
609 	csp->cs_cond = cp->sbc_cm.sbdev_cond;
610 	csp->cs_busy = cp->sbc_cm.sbdev_busy | pstat->busy;
611 	csp->cs_time = cp->sbc_cm.sbdev_time;
612 	csp->cs_ostate = cp->sbc_cm.sbdev_ostate;
613 	csp->cs_suspend = 0;
614 
615 	/* CPU specific status data */
616 	csp->cs_cpuid = cp->sbc_cpu_id;
617 
618 	/*
619 	 * If the speed and ecache properties have not been
620 	 * cached yet, read them in from the device tree.
621 	 */
622 	if ((cp->sbc_speed == 0) || (cp->sbc_ecache == 0))
623 		dr_cpu_set_prop(cp);
624 
625 	/* use the cached speed and ecache values */
626 	csp->cs_speed = cp->sbc_speed;
627 	csp->cs_ecache = cp->sbc_ecache;
628 
629 	mutex_enter(&cpu_lock);
630 	if (!cpu_get(csp->cs_cpuid)) {
631 		/* ostate must be UNCONFIGURED */
632 		csp->cs_cm.c_ostate = SBD_STAT_UNCONFIGURED;
633 	}
634 	mutex_exit(&cpu_lock);
635 }
636 
637 /*ARGSUSED2*/
638 static void
639 dr_fill_cmp_stat(sbd_cpu_stat_t *csp, int ncores, int impl, sbd_cmp_stat_t *psp)
640 {
641 	int	core;
642 
643 	ASSERT(csp && psp && (ncores >= 1));
644 
645 	bzero((caddr_t)psp, sizeof (*psp));
646 
647 	/*
648 	 * Fill in the common status information based
649 	 * on the data for the first core.
650 	 */
651 	psp->ps_type = SBD_COMP_CMP;
652 	psp->ps_unit = DR_UNUM2SBD_UNUM(csp->cs_unit, SBD_COMP_CMP);
653 	(void) strlcpy(psp->ps_name, csp->cs_name, sizeof (psp->ps_name));
654 	psp->ps_cond = csp->cs_cond;
655 	psp->ps_busy = csp->cs_busy;
656 	psp->ps_time = csp->cs_time;
657 	psp->ps_ostate = csp->cs_ostate;
658 	psp->ps_suspend = csp->cs_suspend;
659 
660 	/* CMP specific status data */
661 	*psp->ps_cpuid = csp->cs_cpuid;
662 	psp->ps_ncores = 1;
663 	psp->ps_speed = csp->cs_speed;
664 	psp->ps_ecache = csp->cs_ecache;
665 
666 	/*
667 	 * Walk through the data for the remaining cores.
668 	 * Make any adjustments to the common status data,
669 	 * or the shared CMP specific data if necessary.
670 	 */
671 	for (core = 1; core < ncores; core++) {
672 		/*
673 		 * The following properties should be the same
674 		 * for all the cores of the CMP.
675 		 */
676 		ASSERT(psp->ps_unit == DR_UNUM2SBD_UNUM(csp[core].cs_unit,
677 		    SBD_COMP_CMP));
678 
679 		if (csp[core].cs_speed > psp->ps_speed)
680 			psp->ps_speed = csp[core].cs_speed;
681 		if (csp[core].cs_ecache > psp->ps_ecache)
682 			psp->ps_ecache = csp[core].cs_ecache;
683 
684 		psp->ps_cpuid[core] = csp[core].cs_cpuid;
685 		psp->ps_ncores++;
686 
687 		/* adjust time if necessary */
688 		if (csp[core].cs_time > psp->ps_time) {
689 			psp->ps_time = csp[core].cs_time;
690 		}
691 
692 		psp->ps_busy |= csp[core].cs_busy;
693 
694 		/*
695 		 * If any of the cores are configured, the
696 		 * entire CMP is marked as configured.
697 		 */
698 		if (csp[core].cs_ostate == SBD_STAT_CONFIGURED) {
699 			psp->ps_ostate = csp[core].cs_ostate;
700 		}
701 	}
702 }
703 
704 int
705 dr_cpu_status(dr_handle_t *hp, dr_devset_t devset, sbd_dev_stat_t *dsp)
706 {
707 	int		cmp;
708 	int		core;
709 	int		ncpu;
710 	dr_board_t	*bp;
711 	sbd_cpu_stat_t	*cstat;
712 	int		impl;
713 
714 	bp = hp->h_bd;
715 	ncpu = 0;
716 
717 	devset &= DR_DEVS_PRESENT(bp);
718 	cstat = kmem_zalloc(sizeof (sbd_cpu_stat_t) * MAX_CORES_PER_CMP,
719 	    KM_SLEEP);
720 
721 	/*
722 	 * Treat every CPU as a CMP. In the case where the
723 	 * device is not a CMP, treat it as a CMP with only
724 	 * one core.
725 	 */
726 	for (cmp = 0; cmp < MAX_CMP_UNITS_PER_BOARD; cmp++) {
727 		int		ncores;
728 		dr_cpu_unit_t	*cp;
729 		drmach_status_t	pstat;
730 		sbd_error_t	*err;
731 		sbd_cmp_stat_t	*psp;
732 
733 		if ((devset & DEVSET(SBD_COMP_CMP, cmp)) == 0) {
734 			continue;
735 		}
736 
737 		ncores = 0;
738 
739 		for (core = 0; core < MAX_CORES_PER_CMP; core++) {
740 
741 			cp = dr_get_cpu_unit(bp, DR_CMP_CORE_UNUM(cmp, core));
742 
743 			if (cp->sbc_cm.sbdev_state == DR_STATE_EMPTY) {
744 				/* present, but not fully initialized */
745 				continue;
746 			}
747 
748 			ASSERT(dr_cpu_unit_is_sane(hp->h_bd, cp));
749 
750 			/* skip if not present */
751 			if (cp->sbc_cm.sbdev_id == (drmachid_t)0) {
752 				continue;
753 			}
754 
755 			/* fetch platform status */
756 			err = drmach_status(cp->sbc_cm.sbdev_id, &pstat);
757 			if (err) {
758 				DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
759 				continue;
760 			}
761 
762 			dr_fill_cpu_stat(cp, &pstat, &cstat[ncores++]);
763 			/*
764 			 * We should set impl here because the last core
765 			 * found might be EMPTY or not present.
766 			 */
767 			impl = cp->sbc_cpu_impl;
768 		}
769 
770 		if (ncores == 0) {
771 			continue;
772 		}
773 
774 		/*
775 		 * Store the data to the outgoing array. If the
776 		 * device is a CMP, combine all the data for the
777 		 * cores into a single stat structure.
778 		 *
779 		 * The check for a CMP device uses the last core
780 		 * found, assuming that all cores will have the
781 		 * same implementation.
782 		 */
783 		if (CPU_IMPL_IS_CMP(impl)) {
784 			psp = (sbd_cmp_stat_t *)dsp;
785 			dr_fill_cmp_stat(cstat, ncores, impl, psp);
786 		} else {
787 			ASSERT(ncores == 1);
788 			bcopy(cstat, dsp, sizeof (sbd_cpu_stat_t));
789 		}
790 
791 		dsp++;
792 		ncpu++;
793 	}
794 
795 	kmem_free(cstat, sizeof (sbd_cpu_stat_t) * MAX_CORES_PER_CMP);
796 
797 	return (ncpu);
798 }
799 
800 /*
801  * Cancel previous release operation for cpu.
802  * For cpus this means simply bringing cpus that
803  * were offline back online.  Note that they had
804  * to have been online at the time there were
805  * released.
806  */
807 int
808 dr_cancel_cpu(dr_cpu_unit_t *up)
809 {
810 	int		rv = 0;
811 	static fn_t	f = "dr_cancel_cpu";
812 
813 	ASSERT(dr_cpu_unit_is_sane(up->sbc_cm.sbdev_bp, up));
814 
815 	if (cpu_flagged_active(up->sbc_cpu_flags)) {
816 		struct cpu	*cp;
817 
818 		/*
819 		 * CPU had been online, go ahead
820 		 * bring it back online.
821 		 */
822 		PR_CPU("%s: bringing cpu %d back ONLINE\n", f, up->sbc_cpu_id);
823 
824 		mutex_enter(&cpu_lock);
825 		cp = cpu[up->sbc_cpu_id];
826 
827 		if (cpu_is_poweredoff(cp)) {
828 			if (cpu_poweron(cp)) {
829 				cmn_err(CE_WARN, "%s: failed to power-on "
830 				    "cpu %d", f, up->sbc_cpu_id);
831 				rv = -1;
832 			}
833 		}
834 
835 		if (rv == 0 && cpu_is_offline(cp)) {
836 			if (cpu_online(cp, 0)) {
837 				cmn_err(CE_WARN, "%s: failed to online cpu %d",
838 				    f, up->sbc_cpu_id);
839 				rv = -1;
840 			}
841 		}
842 
843 		if (rv == 0 && cpu_is_online(cp)) {
844 			if (cpu_flagged_nointr(up->sbc_cpu_flags)) {
845 				if (cpu_intr_disable(cp) != 0) {
846 					cmn_err(CE_WARN, "%s: failed to "
847 					    "disable interrupts on cpu %d", f,
848 					    up->sbc_cpu_id);
849 				}
850 			}
851 		}
852 
853 		mutex_exit(&cpu_lock);
854 	}
855 
856 	return (rv);
857 }
858 
859 int
860 dr_disconnect_cpu(dr_cpu_unit_t *up)
861 {
862 	sbd_error_t	*err;
863 	static fn_t	f = "dr_disconnect_cpu";
864 
865 	PR_CPU("%s...\n", f);
866 
867 	ASSERT((up->sbc_cm.sbdev_state == DR_STATE_CONNECTED) ||
868 	    (up->sbc_cm.sbdev_state == DR_STATE_UNCONFIGURED));
869 
870 	ASSERT(dr_cpu_unit_is_sane(up->sbc_cm.sbdev_bp, up));
871 
872 	if (up->sbc_cm.sbdev_state == DR_STATE_CONNECTED) {
873 		/*
874 		 * Cpus were never brought in and so are still
875 		 * effectively disconnected, so nothing to do here.
876 		 */
877 		PR_CPU("%s: cpu %d never brought in\n", f, up->sbc_cpu_id);
878 		return (0);
879 	}
880 
881 	err = drmach_cpu_disconnect(up->sbc_cm.sbdev_id);
882 	if (err == NULL)
883 		return (0);
884 	else {
885 		DRERR_SET_C(&up->sbc_cm.sbdev_error, &err);
886 		return (-1);
887 	}
888 	/*NOTREACHED*/
889 }
890