xref: /illumos-gate/usr/src/uts/sun4u/ngdr/io/dr_cpu.c (revision 1b58875ad7966cf2c85ee8e92f3da04f0a3b2f7a)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Copyright 2019 Peter Tribble.
29  */
30 
31 /*
32  * CPU support routines for DR
33  */
34 
35 #include <sys/note.h>
36 #include <sys/debug.h>
37 #include <sys/types.h>
38 #include <sys/errno.h>
39 #include <sys/cred.h>
40 #include <sys/dditypes.h>
41 #include <sys/devops.h>
42 #include <sys/modctl.h>
43 #include <sys/poll.h>
44 #include <sys/conf.h>
45 #include <sys/ddi.h>
46 #include <sys/sunddi.h>
47 #include <sys/sunndi.h>
48 #include <sys/ndi_impldefs.h>
49 #include <sys/stat.h>
50 #include <sys/kmem.h>
51 #include <sys/processor.h>
52 #include <sys/cpuvar.h>
53 #include <sys/mem_config.h>
54 #include <sys/promif.h>
55 #include <sys/x_call.h>
56 #include <sys/cpu_sgnblk_defs.h>
57 #include <sys/membar.h>
58 #include <sys/stack.h>
59 #include <sys/sysmacros.h>
60 #include <sys/machsystm.h>
61 #include <sys/spitregs.h>
62 
63 #include <sys/archsystm.h>
64 #include <vm/hat_sfmmu.h>
65 #include <sys/pte.h>
66 #include <sys/mmu.h>
67 #include <sys/x_call.h>
68 #include <sys/cpu_module.h>
69 #include <sys/cpu_impl.h>
70 
71 #include <sys/autoconf.h>
72 #include <sys/cmn_err.h>
73 
74 #include <sys/dr.h>
75 #include <sys/dr_util.h>
76 
77 /* for the DR*INTERNAL_ERROR macros.  see sys/dr.h. */
78 static char *dr_ie_fmt = "dr_cpu.c %d";
79 
80 int
81 dr_cpu_unit_is_sane(dr_board_t *bp, dr_cpu_unit_t *cp)
82 {
83 #ifdef DEBUG
84 	processorid_t	cpuid;
85 
86 	/*
87 	 * cpuid and unit number should never be different
88 	 * than they were at discovery/connect time
89 	 */
90 	ASSERT(drmach_cpu_get_id(cp->sbc_cm.sbdev_id, &cpuid) == 0);
91 
92 	ASSERT(cp->sbc_cm.sbdev_bp == bp);
93 	ASSERT(cp->sbc_cm.sbdev_type == SBD_COMP_CPU);
94 	ASSERT(cp->sbc_cpu_id == cpuid);
95 #else
96 	_NOTE(ARGUNUSED(bp))
97 	_NOTE(ARGUNUSED(cp))
98 #endif
99 
100 	return (1);
101 }
102 
103 static int
104 dr_errno2ecode(int error)
105 {
106 	int	rv;
107 
108 	switch (error) {
109 	case EBUSY:
110 		rv = ESBD_BUSY;
111 		break;
112 	case EINVAL:
113 		rv = ESBD_INVAL;
114 		break;
115 	case EALREADY:
116 		rv = ESBD_ALREADY;
117 		break;
118 	case ENODEV:
119 		rv = ESBD_NODEV;
120 		break;
121 	case ENOMEM:
122 		rv = ESBD_NOMEM;
123 		break;
124 	default:
125 		rv = ESBD_INVAL;
126 	}
127 
128 	return (rv);
129 }
130 
131 static void
132 dr_cpu_set_prop(dr_cpu_unit_t *cp)
133 {
134 	sbd_error_t	*err;
135 	dev_info_t	*dip;
136 	uint64_t	clock_freq;
137 	int		ecache_size = 0;
138 	char		*cache_str = NULL;
139 
140 	err = drmach_get_dip(cp->sbc_cm.sbdev_id, &dip);
141 	if (err) {
142 		DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
143 		return;
144 	}
145 
146 	if (dip == NULL) {
147 		DR_DEV_INTERNAL_ERROR(&cp->sbc_cm);
148 		return;
149 	}
150 
151 	/* read in the CPU speed */
152 
153 	/*
154 	 * If the property is not found in the CPU node, it has to be
155 	 * kept in the core or cmp node so we just keep looking.
156 	 */
157 	clock_freq = (unsigned int)ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
158 	    "clock-frequency", 0);
159 
160 	ASSERT(clock_freq != 0);
161 
162 	/*
163 	 * The ecache property string is not the same
164 	 * for all CPU implementations.
165 	 */
166 
167 	switch (cp->sbc_cpu_impl) {
168 	case BLACKBIRD_IMPL:
169 	case CHEETAH_IMPL:
170 	case CHEETAH_PLUS_IMPL:
171 		cache_str = "ecache-size";
172 		break;
173 	case JAGUAR_IMPL:
174 	case OLYMPUS_C_IMPL:
175 	case JUPITER_IMPL:
176 		cache_str = "l2-cache-size";
177 		break;
178 	case PANTHER_IMPL:
179 		cache_str = "l3-cache-size";
180 		break;
181 	default:
182 		cmn_err(CE_WARN, "Unknown cpu implementation=0x%x",
183 		    cp->sbc_cpu_impl);
184 		ASSERT(0);
185 		break;
186 	}
187 
188 	if (cache_str != NULL) {
189 		/* read in the ecache size */
190 		/*
191 		 * If the property is not found in the CPU node,
192 		 * it has to be kept in the core or cmp node so
193 		 * we just keep looking.
194 		 */
195 
196 		ecache_size = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
197 		    cache_str, 0);
198 	}
199 
200 	ASSERT(ecache_size != 0);
201 
202 	/* convert to the proper units */
203 	cp->sbc_speed = (clock_freq + 500000) / 1000000;
204 	cp->sbc_ecache = ecache_size / (1024 * 1024);
205 }
206 
207 void
208 dr_init_cpu_unit(dr_cpu_unit_t *cp)
209 {
210 	sbd_error_t	*err;
211 	dr_state_t	new_state;
212 	int		cpuid;
213 	int		impl;
214 
215 	if (DR_DEV_IS_ATTACHED(&cp->sbc_cm)) {
216 		new_state = DR_STATE_CONFIGURED;
217 		cp->sbc_cm.sbdev_cond = SBD_COND_OK;
218 	} else if (DR_DEV_IS_PRESENT(&cp->sbc_cm)) {
219 		new_state = DR_STATE_CONNECTED;
220 		cp->sbc_cm.sbdev_cond = SBD_COND_OK;
221 	} else {
222 		new_state = DR_STATE_EMPTY;
223 		cp->sbc_cm.sbdev_cond = SBD_COND_UNKNOWN;
224 	}
225 
226 	if (DR_DEV_IS_PRESENT(&cp->sbc_cm)) {
227 		err = drmach_cpu_get_id(cp->sbc_cm.sbdev_id, &cpuid);
228 		if (err) {
229 			DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
230 			new_state = DR_STATE_FATAL;
231 			goto done;
232 		}
233 
234 		err = drmach_cpu_get_impl(cp->sbc_cm.sbdev_id, &impl);
235 		if (err) {
236 			DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
237 			new_state = DR_STATE_FATAL;
238 			goto done;
239 		}
240 	} else {
241 		cp->sbc_cpu_id = -1;
242 		cp->sbc_cpu_impl = -1;
243 		goto done;
244 	}
245 
246 	cp->sbc_cpu_id = cpuid;
247 	cp->sbc_cpu_impl = impl;
248 
249 	/* if true at init time, it must always be true */
250 	ASSERT(dr_cpu_unit_is_sane(cp->sbc_cm.sbdev_bp, cp));
251 
252 	mutex_enter(&cpu_lock);
253 	if ((cpuid >= 0) && cpu[cpuid])
254 		cp->sbc_cpu_flags = cpu[cpuid]->cpu_flags;
255 	else
256 		cp->sbc_cpu_flags = P_OFFLINE | P_POWEROFF;
257 	mutex_exit(&cpu_lock);
258 
259 	dr_cpu_set_prop(cp);
260 
261 done:
262 	/* delay transition until fully initialized */
263 	dr_device_transition(&cp->sbc_cm, new_state);
264 }
265 
266 int
267 dr_pre_attach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
268 {
269 	int		i;
270 	int		curr_cpu;
271 	int		next_cpu;
272 	static fn_t	f = "dr_pre_attach_cpu";
273 
274 	PR_CPU("%s...\n", f);
275 
276 	for (next_cpu = 0, i = 0; i < devnum; i++) {
277 		dr_cpu_unit_t *up = (dr_cpu_unit_t *)devlist[i];
278 
279 		ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
280 
281 		/*
282 		 * Print a console message for each attachment
283 		 * point. For CMP devices, this means that only
284 		 * one message should be printed, no matter how
285 		 * many cores are actually present.
286 		 */
287 		curr_cpu = DR_UNUM2SBD_UNUM(up->sbc_cm.sbdev_unum,
288 		    SBD_COMP_CPU);
289 		if (curr_cpu >= next_cpu) {
290 			cmn_err(CE_CONT, "OS configure %s",
291 			    up->sbc_cm.sbdev_path);
292 			next_cpu = curr_cpu + 1;
293 		}
294 
295 		if (up->sbc_cm.sbdev_state == DR_STATE_UNCONFIGURED) {
296 			/*
297 			 * If we're coming from the UNCONFIGURED
298 			 * state then the cpu's sigblock will
299 			 * still be mapped in.  Need to unmap it
300 			 * before continuing with attachment.
301 			 */
302 			PR_CPU("%s: unmapping sigblk for cpu %d\n", f,
303 			    up->sbc_cpu_id);
304 		}
305 	}
306 
307 	/*
308 	 * Block out status threads while creating
309 	 * devinfo tree branches
310 	 */
311 	dr_lock_status(hp->h_bd);
312 	ndi_devi_enter(ddi_root_node(), (int *)(&hp->h_ndi));
313 	mutex_enter(&cpu_lock);
314 
315 	return (0);
316 }
317 
318 /*ARGSUSED*/
319 void
320 dr_attach_cpu(dr_handle_t *hp, dr_common_unit_t *cp)
321 {
322 	sbd_error_t	*err;
323 	processorid_t	 cpuid;
324 	int		 rv;
325 
326 	ASSERT(MUTEX_HELD(&cpu_lock));
327 
328 	err = drmach_configure(cp->sbdev_id, 0);
329 	if (err) {
330 		DRERR_SET_C(&cp->sbdev_error, &err);
331 		return;
332 	}
333 
334 	err = drmach_cpu_get_id(cp->sbdev_id, &cpuid);
335 	if (err) {
336 		DRERR_SET_C(&cp->sbdev_error, &err);
337 
338 		err = drmach_unconfigure(cp->sbdev_id, DEVI_BRANCH_DESTROY);
339 		if (err)
340 			sbd_err_clear(&err);
341 	} else if ((rv = cpu_configure(cpuid)) != 0) {
342 		dr_dev_err(CE_WARN, cp, dr_errno2ecode(rv));
343 		err = drmach_unconfigure(cp->sbdev_id, DEVI_BRANCH_DESTROY);
344 		if (err)
345 			sbd_err_clear(&err);
346 	}
347 }
348 
349 /*
350  * dr_post_attach_cpu
351  *
352  * sbd error policy: Does not stop on error.  Processes all units in list.
353  */
354 int
355 dr_post_attach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
356 {
357 	int		i;
358 	int		errflag = 0;
359 	static fn_t	f = "dr_post_attach_cpu";
360 
361 	PR_CPU("%s...\n", f);
362 
363 	/* Startup and online newly-attached CPUs */
364 	for (i = 0; i < devnum; i++) {
365 		dr_cpu_unit_t *up = (dr_cpu_unit_t *)devlist[i];
366 		struct cpu	*cp;
367 
368 		ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
369 
370 		cp = cpu_get(up->sbc_cpu_id);
371 		if (cp == NULL) {
372 			cmn_err(CE_WARN, "%s: cpu_get failed for cpu %d",
373 			    f, up->sbc_cpu_id);
374 			continue;
375 		}
376 
377 		if (cpu_is_poweredoff(cp)) {
378 			if (cpu_poweron(cp) != 0) {
379 				dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_CPUSTART);
380 				errflag = 1;
381 			}
382 			PR_CPU("%s: cpu %d powered ON\n", f, up->sbc_cpu_id);
383 		}
384 
385 		if (cpu_is_offline(cp)) {
386 			PR_CPU("%s: onlining cpu %d...\n", f, up->sbc_cpu_id);
387 
388 			if (cpu_online(cp) != 0) {
389 				dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_ONLINE);
390 				errflag = 1;
391 			}
392 		}
393 
394 	}
395 
396 	mutex_exit(&cpu_lock);
397 	ndi_devi_exit(ddi_root_node(), hp->h_ndi);
398 	dr_unlock_status(hp->h_bd);
399 
400 	if (errflag)
401 		return (-1);
402 	else
403 		return (0);
404 }
405 
406 /*
407  * dr_pre_release_cpu
408  *
409  * sbd error policy: Stops on first error.
410  */
411 int
412 dr_pre_release_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
413 {
414 	int		c, cix, i, lastoffline = -1, rv = 0;
415 	processorid_t	cpuid;
416 	struct cpu	*cp;
417 	dr_cpu_unit_t	*up;
418 	dr_devset_t	devset;
419 	sbd_dev_stat_t	*ds;
420 	static fn_t	f = "dr_pre_release_cpu";
421 	int		cpu_flags = 0;
422 
423 	devset = DR_DEVS_PRESENT(hp->h_bd);
424 
425 	/* allocate status struct storage. */
426 	ds = (sbd_dev_stat_t *) kmem_zalloc(sizeof (sbd_dev_stat_t) *
427 	    MAX_CPU_UNITS_PER_BOARD, KM_SLEEP);
428 
429 	cix = dr_cpu_status(hp, devset, ds);
430 
431 	mutex_enter(&cpu_lock);
432 
433 	for (i = 0; i < devnum; i++) {
434 		up = (dr_cpu_unit_t *)devlist[i];
435 		ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
436 
437 		/*
438 		 * The STARCAT platform borrows cpus for use by POST in
439 		 * iocage testing.  These cpus cannot be unconfigured
440 		 * while they are in use for the iocage.
441 		 * This check determines if a CPU is currently in use
442 		 * for iocage testing, and if so, returns a "Device busy"
443 		 * error.
444 		 */
445 		for (c = 0; c < cix; c++) {
446 			if (ds[c].d_cpu.cs_unit == up->sbc_cm.sbdev_unum) {
447 				if (ds[c].d_cpu.cs_busy) {
448 					dr_dev_err(CE_WARN, &up->sbc_cm,
449 					    ESBD_BUSY);
450 					rv = -1;
451 					break;
452 				}
453 			}
454 		}
455 		if (c < cix)
456 			break;
457 		cpuid = up->sbc_cpu_id;
458 		if ((cp = cpu_get(cpuid)) == NULL) {
459 			dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_OFFLINE);
460 			rv = -1;
461 			break;
462 		}
463 
464 		/* used by dr_cancel_cpu during error flow */
465 		up->sbc_cpu_flags = cp->cpu_flags;
466 
467 		if (CPU_ACTIVE(cp)) {
468 			if (dr_cmd_flags(hp) & SBD_FLAG_FORCE)
469 				cpu_flags = CPU_FORCED;
470 
471 			PR_CPU("%s: offlining cpu %d\n", f, cpuid);
472 			if (cpu_offline(cp, cpu_flags)) {
473 				PR_CPU("%s: failed to offline cpu %d\n", f,
474 				    cpuid);
475 				dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_OFFLINE);
476 				if (disp_bound_threads(cp, 0)) {
477 					cmn_err(CE_WARN, "%s: thread(s) bound "
478 					    "to cpu %d", f, cp->cpu_id);
479 				}
480 				rv = -1;
481 				break;
482 			} else
483 				lastoffline = i;
484 		}
485 
486 		if (!rv) {
487 			sbd_error_t *err;
488 
489 			err = drmach_release(up->sbc_cm.sbdev_id);
490 			if (err) {
491 				DRERR_SET_C(&up->sbc_cm.sbdev_error, &err);
492 				rv = -1;
493 				break;
494 			}
495 		}
496 	}
497 
498 	mutex_exit(&cpu_lock);
499 
500 	if (rv) {
501 		/*
502 		 * Need to unwind others since at this level (pre-release)
503 		 * the device state has not yet transitioned and failures
504 		 * will prevent us from reaching the "post" release
505 		 * function where states are normally transitioned.
506 		 */
507 		for (i = lastoffline; i >= 0; i--) {
508 			up = (dr_cpu_unit_t *)devlist[i];
509 			(void) dr_cancel_cpu(up);
510 		}
511 	}
512 
513 	kmem_free(ds, sizeof (sbd_dev_stat_t) * MAX_CPU_UNITS_PER_BOARD);
514 	return (rv);
515 }
516 
517 /*
518  * dr_pre_detach_cpu
519  *
520  * sbd error policy: Stops on first error.
521  */
522 int
523 dr_pre_detach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
524 {
525 	_NOTE(ARGUNUSED(hp))
526 
527 	int		i;
528 	int		curr_cpu;
529 	int		next_cpu;
530 	int		cpu_flags = 0;
531 	static fn_t	f = "dr_pre_detach_cpu";
532 
533 	PR_CPU("%s...\n", f);
534 
535 	/*
536 	 * Block out status threads while destroying devinfo tree
537 	 * branches
538 	 */
539 	dr_lock_status(hp->h_bd);
540 	mutex_enter(&cpu_lock);
541 
542 	for (next_cpu = 0, i = 0; i < devnum; i++) {
543 		dr_cpu_unit_t *up = (dr_cpu_unit_t *)devlist[i];
544 		struct cpu	*cp;
545 
546 		ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
547 
548 		cp = cpu_get(up->sbc_cpu_id);
549 		if (cp == NULL)
550 			continue;
551 
552 		/*
553 		 * Print a console message for each attachment
554 		 * point. For CMP devices, this means that only
555 		 * one message should be printed, no matter how
556 		 * many cores are actually present.
557 		 */
558 		curr_cpu = DR_UNUM2SBD_UNUM(up->sbc_cm.sbdev_unum,
559 		    SBD_COMP_CPU);
560 		if (curr_cpu >= next_cpu) {
561 			cmn_err(CE_CONT, "OS unconfigure %s\n",
562 			    up->sbc_cm.sbdev_path);
563 			next_cpu = curr_cpu + 1;
564 		}
565 
566 		/*
567 		 * CPUs were offlined during Release.
568 		 */
569 		if (cpu_is_poweredoff(cp)) {
570 			PR_CPU("%s: cpu %d already powered OFF\n",
571 			    f, up->sbc_cpu_id);
572 			continue;
573 		}
574 
575 		if (!cpu_is_offline(cp)) {
576 			if (dr_cmd_flags(hp) & SBD_FLAG_FORCE)
577 				cpu_flags = CPU_FORCED;
578 			/* cpu was onlined after release.  Offline it again */
579 			PR_CPU("%s: offlining cpu %d\n", f, up->sbc_cpu_id);
580 			if (cpu_offline(cp, cpu_flags)) {
581 				PR_CPU("%s: failed to offline cpu %d\n",
582 				    f, up->sbc_cpu_id);
583 				dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_OFFLINE);
584 				if (disp_bound_threads(cp, 0)) {
585 					cmn_err(CE_WARN, "%s: thread(s) bound "
586 					    "to cpu %d", f, cp->cpu_id);
587 				}
588 				goto err;
589 			}
590 		}
591 		if (cpu_poweroff(cp) != 0) {
592 			dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_CPUSTOP);
593 			goto err;
594 		} else {
595 			PR_CPU("%s: cpu %d powered OFF\n", f, up->sbc_cpu_id);
596 		}
597 	}
598 
599 	return (0);
600 
601 err:
602 	mutex_exit(&cpu_lock);
603 	dr_unlock_status(hp->h_bd);
604 	return (-1);
605 }
606 
607 /*ARGSUSED*/
608 void
609 dr_detach_cpu(dr_handle_t *hp, dr_common_unit_t *cp)
610 {
611 	sbd_error_t	*err;
612 	processorid_t	 cpuid;
613 	int		 rv;
614 
615 	ASSERT(MUTEX_HELD(&cpu_lock));
616 
617 	err = drmach_cpu_get_id(cp->sbdev_id, &cpuid);
618 	if (err) {
619 		DRERR_SET_C(&cp->sbdev_error, &err);
620 	} else if ((rv = cpu_unconfigure(cpuid)) != 0) {
621 		dr_dev_err(CE_IGNORE, cp, dr_errno2ecode(rv));
622 	} else {
623 		err = drmach_unconfigure(cp->sbdev_id, DEVI_BRANCH_DESTROY);
624 		if (err) {
625 			DRERR_SET_C(&cp->sbdev_error, &err);
626 		}
627 	}
628 }
629 
630 /*ARGSUSED1*/
631 int
632 dr_post_detach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
633 {
634 	static fn_t	f = "dr_post_detach_cpu";
635 
636 	PR_CPU("%s...\n", f);
637 	hp->h_ndi = 0;
638 
639 	mutex_exit(&cpu_lock);
640 	dr_unlock_status(hp->h_bd);
641 
642 	return (0);
643 }
644 
645 static void
646 dr_fill_cpu_stat(dr_cpu_unit_t *cp, drmach_status_t *pstat, sbd_cpu_stat_t *csp)
647 {
648 	ASSERT(cp && pstat && csp);
649 
650 	/* Fill in the common status information */
651 	bzero((caddr_t)csp, sizeof (*csp));
652 	csp->cs_type = cp->sbc_cm.sbdev_type;
653 	csp->cs_unit = cp->sbc_cm.sbdev_unum;
654 	(void) strncpy(csp->cs_name, pstat->type, sizeof (csp->cs_name));
655 	csp->cs_cond = cp->sbc_cm.sbdev_cond;
656 	csp->cs_busy = cp->sbc_cm.sbdev_busy | pstat->busy;
657 	csp->cs_time = cp->sbc_cm.sbdev_time;
658 	csp->cs_ostate = cp->sbc_cm.sbdev_ostate;
659 	csp->cs_suspend = 0;
660 
661 	/* CPU specific status data */
662 	csp->cs_cpuid = cp->sbc_cpu_id;
663 
664 	/*
665 	 * If the speed and ecache properties have not been
666 	 * cached yet, read them in from the device tree.
667 	 */
668 	if ((cp->sbc_speed == 0) || (cp->sbc_ecache == 0))
669 		dr_cpu_set_prop(cp);
670 
671 	/* use the cached speed and ecache values */
672 	csp->cs_speed = cp->sbc_speed;
673 	csp->cs_ecache = cp->sbc_ecache;
674 
675 	mutex_enter(&cpu_lock);
676 	if (!cpu_get(csp->cs_cpuid)) {
677 		/* ostate must be UNCONFIGURED */
678 		csp->cs_cm.c_ostate = SBD_STAT_UNCONFIGURED;
679 	}
680 	mutex_exit(&cpu_lock);
681 }
682 
683 static void
684 dr_fill_cmp_stat(sbd_cpu_stat_t *csp, int ncores, int impl, sbd_cmp_stat_t *psp)
685 {
686 	int	core;
687 
688 	ASSERT(csp && psp && (ncores >= 1));
689 
690 	bzero((caddr_t)psp, sizeof (*psp));
691 
692 	/*
693 	 * Fill in the common status information based
694 	 * on the data for the first core.
695 	 */
696 	psp->ps_type = SBD_COMP_CMP;
697 	psp->ps_unit = DR_UNUM2SBD_UNUM(csp->cs_unit, SBD_COMP_CMP);
698 	(void) strncpy(psp->ps_name, csp->cs_name, sizeof (psp->ps_name));
699 	psp->ps_cond = csp->cs_cond;
700 	psp->ps_busy = csp->cs_busy;
701 	psp->ps_time = csp->cs_time;
702 	psp->ps_ostate = csp->cs_ostate;
703 	psp->ps_suspend = csp->cs_suspend;
704 
705 	/* CMP specific status data */
706 	*psp->ps_cpuid = csp->cs_cpuid;
707 	psp->ps_ncores = 1;
708 	psp->ps_speed = csp->cs_speed;
709 	psp->ps_ecache = csp->cs_ecache;
710 
711 	/*
712 	 * Walk through the data for the remaining cores.
713 	 * Make any adjustments to the common status data,
714 	 * or the shared CMP specific data if necessary.
715 	 */
716 	for (core = 1; core < ncores; core++) {
717 
718 		/*
719 		 * The following properties should be the same
720 		 * for all the cores of the CMP.
721 		 */
722 		ASSERT(psp->ps_unit == DR_UNUM2SBD_UNUM(csp[core].cs_unit,
723 		    SBD_COMP_CMP));
724 		ASSERT(psp->ps_speed == csp[core].cs_speed);
725 
726 		psp->ps_cpuid[core] = csp[core].cs_cpuid;
727 		psp->ps_ncores++;
728 
729 		/*
730 		 * Jaguar has a split ecache, so the ecache
731 		 * for each core must be added together to
732 		 * get the total ecache for the whole chip.
733 		 */
734 		if (IS_JAGUAR(impl)) {
735 			psp->ps_ecache += csp[core].cs_ecache;
736 		}
737 
738 		/* adjust time if necessary */
739 		if (csp[core].cs_time > psp->ps_time) {
740 			psp->ps_time = csp[core].cs_time;
741 		}
742 
743 		psp->ps_busy |= csp[core].cs_busy;
744 
745 		/*
746 		 * If any of the cores are configured, the
747 		 * entire CMP is marked as configured.
748 		 */
749 		if (csp[core].cs_ostate == SBD_STAT_CONFIGURED) {
750 			psp->ps_ostate = csp[core].cs_ostate;
751 		}
752 	}
753 }
754 
755 int
756 dr_cpu_status(dr_handle_t *hp, dr_devset_t devset, sbd_dev_stat_t *dsp)
757 {
758 	int		cmp;
759 	int		core;
760 	int		ncpu;
761 	dr_board_t	*bp;
762 	sbd_cpu_stat_t	cstat[MAX_CORES_PER_CMP];
763 	int		impl;
764 
765 	bp = hp->h_bd;
766 	ncpu = 0;
767 
768 	devset &= DR_DEVS_PRESENT(bp);
769 
770 	/*
771 	 * Treat every CPU as a CMP. In the case where the
772 	 * device is not a CMP, treat it as a CMP with only
773 	 * one core.
774 	 */
775 	for (cmp = 0; cmp < MAX_CMP_UNITS_PER_BOARD; cmp++) {
776 
777 		int		ncores;
778 		dr_cpu_unit_t	*cp;
779 		drmach_status_t	pstat;
780 		sbd_error_t	*err;
781 		sbd_cmp_stat_t	*psp;
782 
783 		if ((devset & DEVSET(SBD_COMP_CMP, cmp)) == 0) {
784 			continue;
785 		}
786 
787 		ncores = 0;
788 
789 		for (core = 0; core < MAX_CORES_PER_CMP; core++) {
790 
791 			cp = dr_get_cpu_unit(bp, DR_CMP_CORE_UNUM(cmp, core));
792 
793 			if (cp->sbc_cm.sbdev_state == DR_STATE_EMPTY) {
794 				/* present, but not fully initialized */
795 				continue;
796 			}
797 
798 			ASSERT(dr_cpu_unit_is_sane(hp->h_bd, cp));
799 
800 			/* skip if not present */
801 			if (cp->sbc_cm.sbdev_id == (drmachid_t)0) {
802 				continue;
803 			}
804 
805 			/* fetch platform status */
806 			err = drmach_status(cp->sbc_cm.sbdev_id, &pstat);
807 			if (err) {
808 				DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
809 				continue;
810 			}
811 
812 			dr_fill_cpu_stat(cp, &pstat, &cstat[ncores++]);
813 			/*
814 			 * We should set impl here because the last core
815 			 * found might be EMPTY or not present.
816 			 */
817 			impl = cp->sbc_cpu_impl;
818 		}
819 
820 		if (ncores == 0) {
821 			continue;
822 		}
823 
824 		/*
825 		 * Store the data to the outgoing array. If the
826 		 * device is a CMP, combine all the data for the
827 		 * cores into a single stat structure.
828 		 *
829 		 * The check for a CMP device uses the last core
830 		 * found, assuming that all cores will have the
831 		 * same implementation.
832 		 */
833 
834 		if (CPU_IMPL_IS_CMP(impl)) {
835 			psp = (sbd_cmp_stat_t *)dsp;
836 			dr_fill_cmp_stat(cstat, ncores, impl, psp);
837 		} else {
838 			ASSERT(ncores == 1);
839 			bcopy(cstat, dsp, sizeof (sbd_cpu_stat_t));
840 		}
841 
842 		dsp++;
843 		ncpu++;
844 	}
845 
846 	return (ncpu);
847 }
848 
849 /*
850  * Cancel previous release operation for cpu.
851  * For cpus this means simply bringing cpus that
852  * were offline back online.  Note that they had
853  * to have been online at the time there were
854  * released.
855  */
856 int
857 dr_cancel_cpu(dr_cpu_unit_t *up)
858 {
859 	int		rv = 0;
860 	static fn_t	f = "dr_cancel_cpu";
861 
862 	ASSERT(dr_cpu_unit_is_sane(up->sbc_cm.sbdev_bp, up));
863 
864 	if (cpu_flagged_active(up->sbc_cpu_flags)) {
865 		struct cpu	*cp;
866 
867 		/*
868 		 * CPU had been online, go ahead
869 		 * bring it back online.
870 		 */
871 		PR_CPU("%s: bringing cpu %d back ONLINE\n", f, up->sbc_cpu_id);
872 
873 		mutex_enter(&cpu_lock);
874 		cp = cpu[up->sbc_cpu_id];
875 
876 		if (cpu_is_poweredoff(cp)) {
877 			if (cpu_poweron(cp)) {
878 				cmn_err(CE_WARN, "%s: failed to power-on "
879 				    "cpu %d", f, up->sbc_cpu_id);
880 				rv = -1;
881 			}
882 		}
883 
884 		if (cpu_is_offline(cp)) {
885 			if (cpu_online(cp)) {
886 				cmn_err(CE_WARN, "%s: failed to online cpu %d",
887 				    f, up->sbc_cpu_id);
888 				rv = -1;
889 			}
890 		}
891 
892 		if (cpu_is_online(cp)) {
893 			if (cpu_flagged_nointr(up->sbc_cpu_flags)) {
894 				if (cpu_intr_disable(cp) != 0) {
895 					cmn_err(CE_WARN, "%s: failed to "
896 					    "disable interrupts on cpu %d", f,
897 					    up->sbc_cpu_id);
898 				}
899 			}
900 		}
901 
902 		mutex_exit(&cpu_lock);
903 	}
904 
905 	return (rv);
906 }
907 
908 int
909 dr_disconnect_cpu(dr_cpu_unit_t *up)
910 {
911 	sbd_error_t	*err;
912 	static fn_t	f = "dr_disconnect_cpu";
913 
914 	PR_CPU("%s...\n", f);
915 
916 	ASSERT((up->sbc_cm.sbdev_state == DR_STATE_CONNECTED) ||
917 	    (up->sbc_cm.sbdev_state == DR_STATE_UNCONFIGURED));
918 
919 	ASSERT(dr_cpu_unit_is_sane(up->sbc_cm.sbdev_bp, up));
920 
921 	if (up->sbc_cm.sbdev_state == DR_STATE_CONNECTED) {
922 		/*
923 		 * Cpus were never brought in and so are still
924 		 * effectively disconnected, so nothing to do here.
925 		 */
926 		PR_CPU("%s: cpu %d never brought in\n", f, up->sbc_cpu_id);
927 		return (0);
928 	}
929 
930 	err = drmach_cpu_disconnect(up->sbc_cm.sbdev_id);
931 	if (err == NULL)
932 		return (0);
933 	else {
934 		DRERR_SET_C(&up->sbc_cm.sbdev_error, &err);
935 		return (-1);
936 	}
937 	/*NOTREACHED*/
938 }
939