xref: /illumos-gate/usr/src/uts/sun4u/ngdr/io/dr_cpu.c (revision 2bbdd445a21f9d61f4a0ca0faf05d5ceb2bd91f3)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * CPU support routines for DR
29  */
30 
31 #include <sys/note.h>
32 #include <sys/debug.h>
33 #include <sys/types.h>
34 #include <sys/errno.h>
35 #include <sys/cred.h>
36 #include <sys/dditypes.h>
37 #include <sys/devops.h>
38 #include <sys/modctl.h>
39 #include <sys/poll.h>
40 #include <sys/conf.h>
41 #include <sys/ddi.h>
42 #include <sys/sunddi.h>
43 #include <sys/sunndi.h>
44 #include <sys/ndi_impldefs.h>
45 #include <sys/stat.h>
46 #include <sys/kmem.h>
47 #include <sys/processor.h>
48 #include <sys/cpuvar.h>
49 #include <sys/mem_config.h>
50 #include <sys/promif.h>
51 #include <sys/x_call.h>
52 #include <sys/cpu_sgnblk_defs.h>
53 #include <sys/membar.h>
54 #include <sys/stack.h>
55 #include <sys/sysmacros.h>
56 #include <sys/machsystm.h>
57 #include <sys/spitregs.h>
58 
59 #include <sys/archsystm.h>
60 #include <vm/hat_sfmmu.h>
61 #include <sys/pte.h>
62 #include <sys/mmu.h>
63 #include <sys/x_call.h>
64 #include <sys/cpu_module.h>
65 #include <sys/cpu_impl.h>
66 
67 #include <sys/autoconf.h>
68 #include <sys/cmn_err.h>
69 
70 #include <sys/dr.h>
71 #include <sys/dr_util.h>
72 
73 #ifdef _STARFIRE
74 #include <sys/starfire.h>
75 extern struct cpu	*SIGBCPU;
76 #else
77 /* for the DR*INTERNAL_ERROR macros.  see sys/dr.h. */
78 static char *dr_ie_fmt = "dr_cpu.c %d";
79 #endif /* _STARFIRE */
80 
81 int
82 dr_cpu_unit_is_sane(dr_board_t *bp, dr_cpu_unit_t *cp)
83 {
84 #ifdef DEBUG
85 	processorid_t	cpuid;
86 
87 	/*
88 	 * cpuid and unit number should never be different
89 	 * than they were at discovery/connect time
90 	 */
91 	ASSERT(drmach_cpu_get_id(cp->sbc_cm.sbdev_id, &cpuid) == 0);
92 
93 	ASSERT(cp->sbc_cm.sbdev_bp == bp);
94 	ASSERT(cp->sbc_cm.sbdev_type == SBD_COMP_CPU);
95 	ASSERT(cp->sbc_cpu_id == cpuid);
96 #else
97 	_NOTE(ARGUNUSED(bp))
98 	_NOTE(ARGUNUSED(cp))
99 #endif
100 
101 	return (1);
102 }
103 
104 static int
105 dr_errno2ecode(int error)
106 {
107 	int	rv;
108 
109 	switch (error) {
110 	case EBUSY:
111 		rv = ESBD_BUSY;
112 		break;
113 	case EINVAL:
114 		rv = ESBD_INVAL;
115 		break;
116 	case EALREADY:
117 		rv = ESBD_ALREADY;
118 		break;
119 	case ENODEV:
120 		rv = ESBD_NODEV;
121 		break;
122 	case ENOMEM:
123 		rv = ESBD_NOMEM;
124 		break;
125 	default:
126 		rv = ESBD_INVAL;
127 	}
128 
129 	return (rv);
130 }
131 
132 static void
133 dr_cpu_set_prop(dr_cpu_unit_t *cp)
134 {
135 	sbd_error_t	*err;
136 	dev_info_t	*dip;
137 	uint64_t	clock_freq;
138 	int		ecache_size = 0;
139 	char		*cache_str = NULL;
140 
141 	err = drmach_get_dip(cp->sbc_cm.sbdev_id, &dip);
142 	if (err) {
143 		DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
144 		return;
145 	}
146 
147 	if (dip == NULL) {
148 #ifndef _STARFIRE
149 		/*
150 		 * Do not report an error on Starfire since
151 		 * the dip will not be created until after
152 		 * the CPU has been configured.
153 		 */
154 		DR_DEV_INTERNAL_ERROR(&cp->sbc_cm);
155 #endif /* !_STARFIRE */
156 		return;
157 	}
158 
159 	/* read in the CPU speed */
160 
161 	/*
162 	 * If the property is not found in the CPU node, it has to be
163 	 * kept in the core or cmp node so we just keep looking.
164 	 */
165 	clock_freq = (unsigned int)ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
166 	    "clock-frequency", 0);
167 
168 	ASSERT(clock_freq != 0);
169 
170 	/*
171 	 * The ecache property string is not the same
172 	 * for all CPU implementations.
173 	 */
174 
175 	switch (cp->sbc_cpu_impl) {
176 	case BLACKBIRD_IMPL:
177 	case CHEETAH_IMPL:
178 	case CHEETAH_PLUS_IMPL:
179 		cache_str = "ecache-size";
180 		break;
181 	case JAGUAR_IMPL:
182 	case OLYMPUS_C_IMPL:
183 	case JUPITER_IMPL:
184 		cache_str = "l2-cache-size";
185 		break;
186 	case PANTHER_IMPL:
187 		cache_str = "l3-cache-size";
188 		break;
189 	default:
190 		cmn_err(CE_WARN, "Unknown cpu implementation=0x%x",
191 		    cp->sbc_cpu_impl);
192 		ASSERT(0);
193 		break;
194 	}
195 
196 	if (cache_str != NULL) {
197 		/* read in the ecache size */
198 		/*
199 		 * If the property is not found in the CPU node,
200 		 * it has to be kept in the core or cmp node so
201 		 * we just keep looking.
202 		 */
203 
204 		ecache_size = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
205 		    cache_str, 0);
206 	}
207 
208 	ASSERT(ecache_size != 0);
209 
210 	/* convert to the proper units */
211 	cp->sbc_speed = (clock_freq + 500000) / 1000000;
212 	cp->sbc_ecache = ecache_size / (1024 * 1024);
213 }
214 
215 void
216 dr_init_cpu_unit(dr_cpu_unit_t *cp)
217 {
218 	sbd_error_t	*err;
219 	dr_state_t	new_state;
220 	int		cpuid;
221 	int		impl;
222 
223 	if (DR_DEV_IS_ATTACHED(&cp->sbc_cm)) {
224 		new_state = DR_STATE_CONFIGURED;
225 		cp->sbc_cm.sbdev_cond = SBD_COND_OK;
226 	} else if (DR_DEV_IS_PRESENT(&cp->sbc_cm)) {
227 		new_state = DR_STATE_CONNECTED;
228 		cp->sbc_cm.sbdev_cond = SBD_COND_OK;
229 	} else {
230 		new_state = DR_STATE_EMPTY;
231 		cp->sbc_cm.sbdev_cond = SBD_COND_UNKNOWN;
232 	}
233 
234 	if (DR_DEV_IS_PRESENT(&cp->sbc_cm)) {
235 		err = drmach_cpu_get_id(cp->sbc_cm.sbdev_id, &cpuid);
236 		if (err) {
237 			DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
238 			new_state = DR_STATE_FATAL;
239 			goto done;
240 		}
241 
242 		err = drmach_cpu_get_impl(cp->sbc_cm.sbdev_id, &impl);
243 		if (err) {
244 			DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
245 			new_state = DR_STATE_FATAL;
246 			goto done;
247 		}
248 	} else {
249 		cp->sbc_cpu_id = -1;
250 		cp->sbc_cpu_impl = -1;
251 		goto done;
252 	}
253 
254 	cp->sbc_cpu_id = cpuid;
255 	cp->sbc_cpu_impl = impl;
256 
257 	/* if true at init time, it must always be true */
258 	ASSERT(dr_cpu_unit_is_sane(cp->sbc_cm.sbdev_bp, cp));
259 
260 	mutex_enter(&cpu_lock);
261 	if ((cpuid >= 0) && cpu[cpuid])
262 		cp->sbc_cpu_flags = cpu[cpuid]->cpu_flags;
263 	else
264 		cp->sbc_cpu_flags = P_OFFLINE | P_POWEROFF;
265 	mutex_exit(&cpu_lock);
266 
267 	dr_cpu_set_prop(cp);
268 
269 done:
270 	/* delay transition until fully initialized */
271 	dr_device_transition(&cp->sbc_cm, new_state);
272 }
273 
274 int
275 dr_pre_attach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
276 {
277 	int		i;
278 	int		curr_cpu;
279 	int		next_cpu;
280 	static fn_t	f = "dr_pre_attach_cpu";
281 
282 	PR_CPU("%s...\n", f);
283 
284 	for (next_cpu = 0, i = 0; i < devnum; i++) {
285 		dr_cpu_unit_t *up = (dr_cpu_unit_t *)devlist[i];
286 
287 		ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
288 
289 		/*
290 		 * Print a console message for each attachment
291 		 * point. For CMP devices, this means that only
292 		 * one message should be printed, no matter how
293 		 * many cores are actually present.
294 		 */
295 		curr_cpu = DR_UNUM2SBD_UNUM(up->sbc_cm.sbdev_unum,
296 		    SBD_COMP_CPU);
297 		if (curr_cpu >= next_cpu) {
298 			cmn_err(CE_CONT, "OS configure %s",
299 			    up->sbc_cm.sbdev_path);
300 			next_cpu = curr_cpu + 1;
301 		}
302 
303 		if (up->sbc_cm.sbdev_state == DR_STATE_UNCONFIGURED) {
304 			/*
305 			 * If we're coming from the UNCONFIGURED
306 			 * state then the cpu's sigblock will
307 			 * still be mapped in.  Need to unmap it
308 			 * before continuing with attachment.
309 			 */
310 			PR_CPU("%s: unmapping sigblk for cpu %d\n", f,
311 			    up->sbc_cpu_id);
312 
313 			CPU_SGN_MAPOUT(up->sbc_cpu_id);
314 		}
315 	}
316 
317 	/*
318 	 * Block out status threads while creating
319 	 * devinfo tree branches
320 	 */
321 	dr_lock_status(hp->h_bd);
322 	ndi_devi_enter(ddi_root_node(), (int *)(&hp->h_ndi));
323 	mutex_enter(&cpu_lock);
324 
325 	return (0);
326 }
327 
328 /*ARGSUSED*/
329 void
330 dr_attach_cpu(dr_handle_t *hp, dr_common_unit_t *cp)
331 {
332 	sbd_error_t	*err;
333 	processorid_t	 cpuid;
334 	int		 rv;
335 
336 	ASSERT(MUTEX_HELD(&cpu_lock));
337 
338 	err = drmach_configure(cp->sbdev_id, 0);
339 	if (err) {
340 		DRERR_SET_C(&cp->sbdev_error, &err);
341 		return;
342 	}
343 
344 	err = drmach_cpu_get_id(cp->sbdev_id, &cpuid);
345 	if (err) {
346 		DRERR_SET_C(&cp->sbdev_error, &err);
347 
348 		err = drmach_unconfigure(cp->sbdev_id, DEVI_BRANCH_DESTROY);
349 		if (err)
350 			sbd_err_clear(&err);
351 	} else if ((rv = cpu_configure(cpuid)) != 0) {
352 		dr_dev_err(CE_WARN, cp, dr_errno2ecode(rv));
353 		err = drmach_unconfigure(cp->sbdev_id, DEVI_BRANCH_DESTROY);
354 		if (err)
355 			sbd_err_clear(&err);
356 	}
357 }
358 
359 /*
360  * dr_post_attach_cpu
361  *
362  * sbd error policy: Does not stop on error.  Processes all units in list.
363  */
364 int
365 dr_post_attach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
366 {
367 	int		i;
368 	int		errflag = 0;
369 	static fn_t	f = "dr_post_attach_cpu";
370 
371 	PR_CPU("%s...\n", f);
372 
373 	/* Startup and online newly-attached CPUs */
374 	for (i = 0; i < devnum; i++) {
375 		dr_cpu_unit_t *up = (dr_cpu_unit_t *)devlist[i];
376 		struct cpu	*cp;
377 
378 		ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
379 
380 		cp = cpu_get(up->sbc_cpu_id);
381 		if (cp == NULL) {
382 			cmn_err(CE_WARN, "%s: cpu_get failed for cpu %d",
383 			    f, up->sbc_cpu_id);
384 			continue;
385 		}
386 
387 		if (cpu_is_poweredoff(cp)) {
388 			if (cpu_poweron(cp) != 0) {
389 				dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_CPUSTART);
390 				errflag = 1;
391 			}
392 			PR_CPU("%s: cpu %d powered ON\n", f, up->sbc_cpu_id);
393 		}
394 
395 		if (cpu_is_offline(cp)) {
396 			PR_CPU("%s: onlining cpu %d...\n", f, up->sbc_cpu_id);
397 
398 			if (cpu_online(cp) != 0) {
399 				dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_ONLINE);
400 				errflag = 1;
401 			}
402 		}
403 
404 	}
405 
406 	mutex_exit(&cpu_lock);
407 	ndi_devi_exit(ddi_root_node(), hp->h_ndi);
408 	dr_unlock_status(hp->h_bd);
409 
410 	if (errflag)
411 		return (-1);
412 	else
413 		return (0);
414 }
415 
416 /*
417  * dr_pre_release_cpu
418  *
419  * sbd error policy: Stops on first error.
420  */
421 int
422 dr_pre_release_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
423 {
424 	int		c, cix, i, lastoffline = -1, rv = 0;
425 	processorid_t	cpuid;
426 	struct cpu	*cp;
427 	dr_cpu_unit_t	*up;
428 	dr_devset_t	devset;
429 	sbd_dev_stat_t	*ds;
430 	static fn_t	f = "dr_pre_release_cpu";
431 	int		cpu_flags = 0;
432 
433 	devset = DR_DEVS_PRESENT(hp->h_bd);
434 
435 	/* allocate status struct storage. */
436 	ds = (sbd_dev_stat_t *) kmem_zalloc(sizeof (sbd_dev_stat_t) *
437 	    MAX_CPU_UNITS_PER_BOARD, KM_SLEEP);
438 
439 	cix = dr_cpu_status(hp, devset, ds);
440 
441 	mutex_enter(&cpu_lock);
442 
443 	for (i = 0; i < devnum; i++) {
444 		up = (dr_cpu_unit_t *)devlist[i];
445 		ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
446 
447 		/*
448 		 * The STARCAT platform borrows cpus for use by POST in
449 		 * iocage testing.  These cpus cannot be unconfigured
450 		 * while they are in use for the iocage.
451 		 * This check determines if a CPU is currently in use
452 		 * for iocage testing, and if so, returns a "Device busy"
453 		 * error.
454 		 */
455 		for (c = 0; c < cix; c++) {
456 			if (ds[c].d_cpu.cs_unit == up->sbc_cm.sbdev_unum) {
457 				if (ds[c].d_cpu.cs_busy) {
458 					dr_dev_err(CE_WARN, &up->sbc_cm,
459 					    ESBD_BUSY);
460 					rv = -1;
461 					break;
462 				}
463 			}
464 		}
465 		if (c < cix)
466 			break;
467 		cpuid = up->sbc_cpu_id;
468 		if ((cp = cpu_get(cpuid)) == NULL) {
469 			dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_OFFLINE);
470 			rv = -1;
471 			break;
472 		}
473 
474 		/* used by dr_cancel_cpu during error flow */
475 		up->sbc_cpu_flags = cp->cpu_flags;
476 
477 		if (CPU_ACTIVE(cp)) {
478 			if (dr_cmd_flags(hp) & SBD_FLAG_FORCE)
479 				cpu_flags = CPU_FORCED;
480 
481 			PR_CPU("%s: offlining cpu %d\n", f, cpuid);
482 			if (cpu_offline(cp, cpu_flags)) {
483 				PR_CPU("%s: failed to offline cpu %d\n", f,
484 				    cpuid);
485 				dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_OFFLINE);
486 				if (disp_bound_threads(cp, 0)) {
487 					cmn_err(CE_WARN, "%s: thread(s) bound "
488 					    "to cpu %d", f, cp->cpu_id);
489 				}
490 				rv = -1;
491 				break;
492 			} else
493 				lastoffline = i;
494 		}
495 
496 		if (!rv) {
497 			sbd_error_t *err;
498 
499 			err = drmach_release(up->sbc_cm.sbdev_id);
500 			if (err) {
501 				DRERR_SET_C(&up->sbc_cm.sbdev_error, &err);
502 				rv = -1;
503 				break;
504 			}
505 		}
506 	}
507 
508 	mutex_exit(&cpu_lock);
509 
510 	if (rv) {
511 		/*
512 		 * Need to unwind others since at this level (pre-release)
513 		 * the device state has not yet transitioned and failures
514 		 * will prevent us from reaching the "post" release
515 		 * function where states are normally transitioned.
516 		 */
517 		for (i = lastoffline; i >= 0; i--) {
518 			up = (dr_cpu_unit_t *)devlist[i];
519 			(void) dr_cancel_cpu(up);
520 		}
521 	}
522 
523 	kmem_free(ds, sizeof (sbd_dev_stat_t) * MAX_CPU_UNITS_PER_BOARD);
524 	return (rv);
525 }
526 
527 /*
528  * dr_pre_detach_cpu
529  *
530  * sbd error policy: Stops on first error.
531  */
532 int
533 dr_pre_detach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
534 {
535 	_NOTE(ARGUNUSED(hp))
536 
537 	int		i;
538 	int		curr_cpu;
539 	int		next_cpu;
540 	int		cpu_flags = 0;
541 	static fn_t	f = "dr_pre_detach_cpu";
542 
543 	PR_CPU("%s...\n", f);
544 
545 	/*
546 	 * Block out status threads while destroying devinfo tree
547 	 * branches
548 	 */
549 	dr_lock_status(hp->h_bd);
550 	mutex_enter(&cpu_lock);
551 
552 	for (next_cpu = 0, i = 0; i < devnum; i++) {
553 		dr_cpu_unit_t *up = (dr_cpu_unit_t *)devlist[i];
554 		struct cpu	*cp;
555 
556 		ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
557 
558 		cp = cpu_get(up->sbc_cpu_id);
559 		if (cp == NULL)
560 			continue;
561 
562 		/*
563 		 * Print a console message for each attachment
564 		 * point. For CMP devices, this means that only
565 		 * one message should be printed, no matter how
566 		 * many cores are actually present.
567 		 */
568 		curr_cpu = DR_UNUM2SBD_UNUM(up->sbc_cm.sbdev_unum,
569 		    SBD_COMP_CPU);
570 		if (curr_cpu >= next_cpu) {
571 			cmn_err(CE_CONT, "OS unconfigure %s\n",
572 			    up->sbc_cm.sbdev_path);
573 			next_cpu = curr_cpu + 1;
574 		}
575 
576 		/*
577 		 * CPUs were offlined during Release.
578 		 */
579 		if (cpu_is_poweredoff(cp)) {
580 			PR_CPU("%s: cpu %d already powered OFF\n",
581 			    f, up->sbc_cpu_id);
582 			continue;
583 		}
584 
585 		if (!cpu_is_offline(cp)) {
586 			if (dr_cmd_flags(hp) & SBD_FLAG_FORCE)
587 				cpu_flags = CPU_FORCED;
588 			/* cpu was onlined after release.  Offline it again */
589 			PR_CPU("%s: offlining cpu %d\n", f, up->sbc_cpu_id);
590 			if (cpu_offline(cp, cpu_flags)) {
591 				PR_CPU("%s: failed to offline cpu %d\n",
592 				    f, up->sbc_cpu_id);
593 				dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_OFFLINE);
594 				if (disp_bound_threads(cp, 0)) {
595 					cmn_err(CE_WARN, "%s: thread(s) bound "
596 					    "to cpu %d", f, cp->cpu_id);
597 				}
598 				goto err;
599 			}
600 		}
601 		if (cpu_poweroff(cp) != 0) {
602 			dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_CPUSTOP);
603 			goto err;
604 		} else {
605 			PR_CPU("%s: cpu %d powered OFF\n", f, up->sbc_cpu_id);
606 		}
607 	}
608 
609 	return (0);
610 
611 err:
612 	mutex_exit(&cpu_lock);
613 	dr_unlock_status(hp->h_bd);
614 	return (-1);
615 }
616 
617 /*ARGSUSED*/
618 void
619 dr_detach_cpu(dr_handle_t *hp, dr_common_unit_t *cp)
620 {
621 	sbd_error_t	*err;
622 	processorid_t	 cpuid;
623 	int		 rv;
624 
625 	ASSERT(MUTEX_HELD(&cpu_lock));
626 
627 	err = drmach_cpu_get_id(cp->sbdev_id, &cpuid);
628 	if (err) {
629 		DRERR_SET_C(&cp->sbdev_error, &err);
630 	} else if ((rv = cpu_unconfigure(cpuid)) != 0) {
631 		dr_dev_err(CE_IGNORE, cp, dr_errno2ecode(rv));
632 	} else {
633 		err = drmach_unconfigure(cp->sbdev_id, DEVI_BRANCH_DESTROY);
634 		if (err) {
635 			DRERR_SET_C(&cp->sbdev_error, &err);
636 		}
637 	}
638 }
639 
640 /*ARGSUSED1*/
641 int
642 dr_post_detach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
643 {
644 	static fn_t	f = "dr_post_detach_cpu";
645 
646 	PR_CPU("%s...\n", f);
647 	hp->h_ndi = 0;
648 
649 	mutex_exit(&cpu_lock);
650 	dr_unlock_status(hp->h_bd);
651 
652 	return (0);
653 }
654 
655 static void
656 dr_fill_cpu_stat(dr_cpu_unit_t *cp, drmach_status_t *pstat, sbd_cpu_stat_t *csp)
657 {
658 	ASSERT(cp && pstat && csp);
659 
660 	/* Fill in the common status information */
661 	bzero((caddr_t)csp, sizeof (*csp));
662 	csp->cs_type = cp->sbc_cm.sbdev_type;
663 	csp->cs_unit = cp->sbc_cm.sbdev_unum;
664 	(void) strncpy(csp->cs_name, pstat->type, sizeof (csp->cs_name));
665 	csp->cs_cond = cp->sbc_cm.sbdev_cond;
666 	csp->cs_busy = cp->sbc_cm.sbdev_busy | pstat->busy;
667 	csp->cs_time = cp->sbc_cm.sbdev_time;
668 	csp->cs_ostate = cp->sbc_cm.sbdev_ostate;
669 	csp->cs_suspend = 0;
670 
671 	/* CPU specific status data */
672 	csp->cs_cpuid = cp->sbc_cpu_id;
673 
674 #ifdef _STARFIRE
675 	csp->cs_isbootproc = (SIGBCPU->cpu_id == cp->sbc_cpu_id) ? 1 : 0;
676 #endif /* _STARFIRE */
677 
678 	/*
679 	 * If the speed and ecache properties have not been
680 	 * cached yet, read them in from the device tree.
681 	 */
682 	if ((cp->sbc_speed == 0) || (cp->sbc_ecache == 0))
683 		dr_cpu_set_prop(cp);
684 
685 	/* use the cached speed and ecache values */
686 	csp->cs_speed = cp->sbc_speed;
687 	csp->cs_ecache = cp->sbc_ecache;
688 
689 	mutex_enter(&cpu_lock);
690 	if (!cpu_get(csp->cs_cpuid)) {
691 		/* ostate must be UNCONFIGURED */
692 		csp->cs_cm.c_ostate = SBD_STAT_UNCONFIGURED;
693 	}
694 	mutex_exit(&cpu_lock);
695 }
696 
697 static void
698 dr_fill_cmp_stat(sbd_cpu_stat_t *csp, int ncores, int impl, sbd_cmp_stat_t *psp)
699 {
700 	int	core;
701 
702 	ASSERT(csp && psp && (ncores >= 1));
703 
704 	bzero((caddr_t)psp, sizeof (*psp));
705 
706 	/*
707 	 * Fill in the common status information based
708 	 * on the data for the first core.
709 	 */
710 	psp->ps_type = SBD_COMP_CMP;
711 	psp->ps_unit = DR_UNUM2SBD_UNUM(csp->cs_unit, SBD_COMP_CMP);
712 	(void) strncpy(psp->ps_name, csp->cs_name, sizeof (psp->ps_name));
713 	psp->ps_cond = csp->cs_cond;
714 	psp->ps_busy = csp->cs_busy;
715 	psp->ps_time = csp->cs_time;
716 	psp->ps_ostate = csp->cs_ostate;
717 	psp->ps_suspend = csp->cs_suspend;
718 
719 	/* CMP specific status data */
720 	*psp->ps_cpuid = csp->cs_cpuid;
721 	psp->ps_ncores = 1;
722 	psp->ps_speed = csp->cs_speed;
723 	psp->ps_ecache = csp->cs_ecache;
724 
725 	/*
726 	 * Walk through the data for the remaining cores.
727 	 * Make any adjustments to the common status data,
728 	 * or the shared CMP specific data if necessary.
729 	 */
730 	for (core = 1; core < ncores; core++) {
731 
732 		/*
733 		 * The following properties should be the same
734 		 * for all the cores of the CMP.
735 		 */
736 		ASSERT(psp->ps_unit == DR_UNUM2SBD_UNUM(csp[core].cs_unit,
737 		    SBD_COMP_CMP));
738 		ASSERT(psp->ps_speed == csp[core].cs_speed);
739 
740 		psp->ps_cpuid[core] = csp[core].cs_cpuid;
741 		psp->ps_ncores++;
742 
743 		/*
744 		 * Jaguar has a split ecache, so the ecache
745 		 * for each core must be added together to
746 		 * get the total ecache for the whole chip.
747 		 */
748 		if (IS_JAGUAR(impl)) {
749 			psp->ps_ecache += csp[core].cs_ecache;
750 		}
751 
752 		/* adjust time if necessary */
753 		if (csp[core].cs_time > psp->ps_time) {
754 			psp->ps_time = csp[core].cs_time;
755 		}
756 
757 		psp->ps_busy |= csp[core].cs_busy;
758 
759 		/*
760 		 * If any of the cores are configured, the
761 		 * entire CMP is marked as configured.
762 		 */
763 		if (csp[core].cs_ostate == SBD_STAT_CONFIGURED) {
764 			psp->ps_ostate = csp[core].cs_ostate;
765 		}
766 	}
767 }
768 
769 int
770 dr_cpu_status(dr_handle_t *hp, dr_devset_t devset, sbd_dev_stat_t *dsp)
771 {
772 	int		cmp;
773 	int		core;
774 	int		ncpu;
775 	dr_board_t	*bp;
776 	sbd_cpu_stat_t	cstat[MAX_CORES_PER_CMP];
777 	int		impl;
778 
779 	bp = hp->h_bd;
780 	ncpu = 0;
781 
782 	devset &= DR_DEVS_PRESENT(bp);
783 
784 	/*
785 	 * Treat every CPU as a CMP. In the case where the
786 	 * device is not a CMP, treat it as a CMP with only
787 	 * one core.
788 	 */
789 	for (cmp = 0; cmp < MAX_CMP_UNITS_PER_BOARD; cmp++) {
790 
791 		int		ncores;
792 		dr_cpu_unit_t	*cp;
793 		drmach_status_t	pstat;
794 		sbd_error_t	*err;
795 		sbd_cmp_stat_t	*psp;
796 
797 		if ((devset & DEVSET(SBD_COMP_CMP, cmp)) == 0) {
798 			continue;
799 		}
800 
801 		ncores = 0;
802 
803 		for (core = 0; core < MAX_CORES_PER_CMP; core++) {
804 
805 			cp = dr_get_cpu_unit(bp, DR_CMP_CORE_UNUM(cmp, core));
806 
807 			if (cp->sbc_cm.sbdev_state == DR_STATE_EMPTY) {
808 				/* present, but not fully initialized */
809 				continue;
810 			}
811 
812 			ASSERT(dr_cpu_unit_is_sane(hp->h_bd, cp));
813 
814 			/* skip if not present */
815 			if (cp->sbc_cm.sbdev_id == (drmachid_t)0) {
816 				continue;
817 			}
818 
819 			/* fetch platform status */
820 			err = drmach_status(cp->sbc_cm.sbdev_id, &pstat);
821 			if (err) {
822 				DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
823 				continue;
824 			}
825 
826 			dr_fill_cpu_stat(cp, &pstat, &cstat[ncores++]);
827 			/*
828 			 * We should set impl here because the last core
829 			 * found might be EMPTY or not present.
830 			 */
831 			impl = cp->sbc_cpu_impl;
832 		}
833 
834 		if (ncores == 0) {
835 			continue;
836 		}
837 
838 		/*
839 		 * Store the data to the outgoing array. If the
840 		 * device is a CMP, combine all the data for the
841 		 * cores into a single stat structure.
842 		 *
843 		 * The check for a CMP device uses the last core
844 		 * found, assuming that all cores will have the
845 		 * same implementation.
846 		 */
847 
848 		if (CPU_IMPL_IS_CMP(impl)) {
849 			psp = (sbd_cmp_stat_t *)dsp;
850 			dr_fill_cmp_stat(cstat, ncores, impl, psp);
851 		} else {
852 			ASSERT(ncores == 1);
853 			bcopy(cstat, dsp, sizeof (sbd_cpu_stat_t));
854 		}
855 
856 		dsp++;
857 		ncpu++;
858 	}
859 
860 	return (ncpu);
861 }
862 
863 /*
864  * Cancel previous release operation for cpu.
865  * For cpus this means simply bringing cpus that
866  * were offline back online.  Note that they had
867  * to have been online at the time there were
868  * released.
869  */
870 int
871 dr_cancel_cpu(dr_cpu_unit_t *up)
872 {
873 	int		rv = 0;
874 	static fn_t	f = "dr_cancel_cpu";
875 
876 	ASSERT(dr_cpu_unit_is_sane(up->sbc_cm.sbdev_bp, up));
877 
878 	if (cpu_flagged_active(up->sbc_cpu_flags)) {
879 		struct cpu	*cp;
880 
881 		/*
882 		 * CPU had been online, go ahead
883 		 * bring it back online.
884 		 */
885 		PR_CPU("%s: bringing cpu %d back ONLINE\n", f, up->sbc_cpu_id);
886 
887 		mutex_enter(&cpu_lock);
888 		cp = cpu[up->sbc_cpu_id];
889 
890 		if (cpu_is_poweredoff(cp)) {
891 			if (cpu_poweron(cp)) {
892 				cmn_err(CE_WARN, "%s: failed to power-on "
893 				    "cpu %d", f, up->sbc_cpu_id);
894 				rv = -1;
895 			}
896 		}
897 
898 		if (cpu_is_offline(cp)) {
899 			if (cpu_online(cp)) {
900 				cmn_err(CE_WARN, "%s: failed to online cpu %d",
901 				    f, up->sbc_cpu_id);
902 				rv = -1;
903 			}
904 		}
905 
906 		if (cpu_is_online(cp)) {
907 			if (cpu_flagged_nointr(up->sbc_cpu_flags)) {
908 				if (cpu_intr_disable(cp) != 0) {
909 					cmn_err(CE_WARN, "%s: failed to "
910 					    "disable interrupts on cpu %d", f,
911 					    up->sbc_cpu_id);
912 				}
913 			}
914 		}
915 
916 		mutex_exit(&cpu_lock);
917 	}
918 
919 	return (rv);
920 }
921 
922 int
923 dr_disconnect_cpu(dr_cpu_unit_t *up)
924 {
925 	sbd_error_t	*err;
926 	static fn_t	f = "dr_disconnect_cpu";
927 
928 	PR_CPU("%s...\n", f);
929 
930 	ASSERT((up->sbc_cm.sbdev_state == DR_STATE_CONNECTED) ||
931 	    (up->sbc_cm.sbdev_state == DR_STATE_UNCONFIGURED));
932 
933 	ASSERT(dr_cpu_unit_is_sane(up->sbc_cm.sbdev_bp, up));
934 
935 	if (up->sbc_cm.sbdev_state == DR_STATE_CONNECTED) {
936 		/*
937 		 * Cpus were never brought in and so are still
938 		 * effectively disconnected, so nothing to do here.
939 		 */
940 		PR_CPU("%s: cpu %d never brought in\n", f, up->sbc_cpu_id);
941 		return (0);
942 	}
943 
944 	err = drmach_cpu_disconnect(up->sbc_cm.sbdev_id);
945 	if (err == NULL)
946 		return (0);
947 	else {
948 		DRERR_SET_C(&up->sbc_cm.sbdev_error, &err);
949 		return (-1);
950 	}
951 	/*NOTREACHED*/
952 }
953