xref: /titanic_52/usr/src/uts/sun4u/ngdr/io/dr_cpu.c (revision d58fda4376e4bf67072ce2e69f6f47036f9dbb68)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * CPU support routines for DR
31  */
32 
33 #include <sys/note.h>
34 #include <sys/debug.h>
35 #include <sys/types.h>
36 #include <sys/errno.h>
37 #include <sys/cred.h>
38 #include <sys/dditypes.h>
39 #include <sys/devops.h>
40 #include <sys/modctl.h>
41 #include <sys/poll.h>
42 #include <sys/conf.h>
43 #include <sys/ddi.h>
44 #include <sys/sunddi.h>
45 #include <sys/sunndi.h>
46 #include <sys/ndi_impldefs.h>
47 #include <sys/stat.h>
48 #include <sys/kmem.h>
49 #include <sys/processor.h>
50 #include <sys/cpuvar.h>
51 #include <sys/mem_config.h>
52 #include <sys/promif.h>
53 #include <sys/x_call.h>
54 #include <sys/cpu_sgnblk_defs.h>
55 #include <sys/membar.h>
56 #include <sys/stack.h>
57 #include <sys/sysmacros.h>
58 #include <sys/machsystm.h>
59 #include <sys/spitregs.h>
60 
61 #include <sys/archsystm.h>
62 #include <vm/hat_sfmmu.h>
63 #include <sys/pte.h>
64 #include <sys/mmu.h>
65 #include <sys/x_call.h>
66 #include <sys/cpu_module.h>
67 #include <sys/cheetahregs.h>
68 
69 #include <sys/autoconf.h>
70 #include <sys/cmn_err.h>
71 
72 #include <sys/dr.h>
73 #include <sys/dr_util.h>
74 
75 #ifdef _STARFIRE
76 #include <sys/starfire.h>
77 extern struct cpu	*SIGBCPU;
78 #else
79 /* for the DR*INTERNAL_ERROR macros.  see sys/dr.h. */
80 static char *dr_ie_fmt = "dr_cpu.c %d";
81 #endif /* _STARFIRE */
82 
83 int
84 dr_cpu_unit_is_sane(dr_board_t *bp, dr_cpu_unit_t *cp)
85 {
86 #ifdef DEBUG
87 	processorid_t	cpuid;
88 
89 	/*
90 	 * cpuid and unit number should never be different
91 	 * than they were at discovery/connect time
92 	 */
93 	ASSERT(drmach_cpu_get_id(cp->sbc_cm.sbdev_id, &cpuid) == 0);
94 
95 	ASSERT(cp->sbc_cm.sbdev_bp == bp);
96 	ASSERT(cp->sbc_cm.sbdev_type == SBD_COMP_CPU);
97 	ASSERT(cp->sbc_cpu_id == cpuid);
98 #else
99 	_NOTE(ARGUNUSED(bp))
100 	_NOTE(ARGUNUSED(cp))
101 #endif
102 
103 	return (1);
104 }
105 
106 static int
107 dr_errno2ecode(int error)
108 {
109 	int	rv;
110 
111 	switch (error) {
112 	case EBUSY:
113 		rv = ESBD_BUSY;
114 		break;
115 	case EINVAL:
116 		rv = ESBD_INVAL;
117 		break;
118 	case EALREADY:
119 		rv = ESBD_ALREADY;
120 		break;
121 	case ENODEV:
122 		rv = ESBD_NODEV;
123 		break;
124 	case ENOMEM:
125 		rv = ESBD_NOMEM;
126 		break;
127 	default:
128 		rv = ESBD_INVAL;
129 	}
130 
131 	return (rv);
132 }
133 
134 static void
135 dr_cpu_set_prop(dr_cpu_unit_t *cp)
136 {
137 	sbd_error_t	*err;
138 	dev_info_t	*dip;
139 	uint64_t	clock_freq;
140 	int		ecache_size = 0;
141 	char		*cache_str = NULL;
142 
143 	err = drmach_get_dip(cp->sbc_cm.sbdev_id, &dip);
144 	if (err) {
145 		DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
146 		return;
147 	}
148 
149 	if (dip == NULL) {
150 #ifndef _STARFIRE
151 		/*
152 		 * Do not report an error on Starfire since
153 		 * the dip will not be created until after
154 		 * the CPU has been configured.
155 		 */
156 		DR_DEV_INTERNAL_ERROR(&cp->sbc_cm);
157 #endif /* !_STARFIRE */
158 		return;
159 	}
160 
161 	/* read in the CPU speed */
162 	clock_freq = (unsigned int)ddi_prop_get_int(DDI_DEV_T_ANY, dip,
163 	    DDI_PROP_DONTPASS, "clock-frequency", 0);
164 
165 	ASSERT(clock_freq != 0);
166 
167 	/*
168 	 * The ecache property string is not the same
169 	 * for all CPU implementations.
170 	 */
171 	switch (cp->sbc_cpu_impl) {
172 	case BLACKBIRD_IMPL:
173 	case CHEETAH_IMPL:
174 	case CHEETAH_PLUS_IMPL:
175 		cache_str = "ecache-size";
176 		break;
177 	case JAGUAR_IMPL:
178 		cache_str = "l2-cache-size";
179 		break;
180 	case PANTHER_IMPL:
181 		cache_str = "l3-cache-size";
182 		break;
183 	default:
184 		cmn_err(CE_WARN, "Unknown cpu implementation=0x%x",
185 		    cp->sbc_cpu_impl);
186 		ASSERT(0);
187 		break;
188 	}
189 
190 	if (cache_str != NULL) {
191 		/* read in the ecache size */
192 		ecache_size = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
193 		    DDI_PROP_DONTPASS, cache_str, 0);
194 	}
195 
196 	ASSERT(ecache_size != 0);
197 
198 	/* convert to the proper units */
199 	cp->sbc_speed = (clock_freq + 500000) / 1000000;
200 	cp->sbc_ecache = ecache_size / (1024 * 1024);
201 }
202 
203 void
204 dr_init_cpu_unit(dr_cpu_unit_t *cp)
205 {
206 	sbd_error_t	*err;
207 	dr_state_t	new_state;
208 	int		cpuid;
209 	int		impl;
210 
211 	if (DR_DEV_IS_ATTACHED(&cp->sbc_cm)) {
212 		new_state = DR_STATE_CONFIGURED;
213 		cp->sbc_cm.sbdev_cond = SBD_COND_OK;
214 	} else if (DR_DEV_IS_PRESENT(&cp->sbc_cm)) {
215 		new_state = DR_STATE_CONNECTED;
216 		cp->sbc_cm.sbdev_cond = SBD_COND_OK;
217 	} else {
218 		new_state = DR_STATE_EMPTY;
219 		cp->sbc_cm.sbdev_cond = SBD_COND_UNKNOWN;
220 	}
221 
222 	if (DR_DEV_IS_PRESENT(&cp->sbc_cm)) {
223 		err = drmach_cpu_get_id(cp->sbc_cm.sbdev_id, &cpuid);
224 		if (err) {
225 			DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
226 			new_state = DR_STATE_FATAL;
227 			goto done;
228 		}
229 
230 		err = drmach_cpu_get_impl(cp->sbc_cm.sbdev_id, &impl);
231 		if (err) {
232 			DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
233 			new_state = DR_STATE_FATAL;
234 			goto done;
235 		}
236 	} else {
237 		cp->sbc_cpu_id = -1;
238 		cp->sbc_cpu_impl = -1;
239 		goto done;
240 	}
241 
242 	cp->sbc_cpu_id = cpuid;
243 	cp->sbc_cpu_impl = impl;
244 
245 	/* if true at init time, it must always be true */
246 	ASSERT(dr_cpu_unit_is_sane(cp->sbc_cm.sbdev_bp, cp));
247 
248 	mutex_enter(&cpu_lock);
249 	if ((cpuid >= 0) && cpu[cpuid])
250 		cp->sbc_cpu_flags = cpu[cpuid]->cpu_flags;
251 	else
252 		cp->sbc_cpu_flags = P_OFFLINE | P_POWEROFF;
253 	mutex_exit(&cpu_lock);
254 
255 	dr_cpu_set_prop(cp);
256 
257 done:
258 	/* delay transition until fully initialized */
259 	dr_device_transition(&cp->sbc_cm, new_state);
260 }
261 
262 int
263 dr_pre_attach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
264 {
265 	int		i;
266 	int		curr_cpu;
267 	int		next_cpu;
268 	static fn_t	f = "dr_pre_attach_cpu";
269 
270 	PR_CPU("%s...\n", f);
271 
272 	for (next_cpu = 0, i = 0; i < devnum; i++) {
273 		dr_cpu_unit_t *up = (dr_cpu_unit_t *)devlist[i];
274 
275 		ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
276 
277 		/*
278 		 * Print a console message for each attachment
279 		 * point. For CMP devices, this means that only
280 		 * one message should be printed, no matter how
281 		 * many cores are actually present.
282 		 */
283 		curr_cpu = DR_UNUM2SBD_UNUM(up->sbc_cm.sbdev_unum);
284 		if (curr_cpu >= next_cpu) {
285 			cmn_err(CE_CONT, "OS configure %s",
286 			    up->sbc_cm.sbdev_path);
287 			next_cpu = curr_cpu + 1;
288 		}
289 
290 		if (up->sbc_cm.sbdev_state == DR_STATE_UNCONFIGURED) {
291 			/*
292 			 * If we're coming from the UNCONFIGURED
293 			 * state then the cpu's sigblock will
294 			 * still be mapped in.  Need to unmap it
295 			 * before continuing with attachment.
296 			 */
297 			PR_CPU("%s: unmapping sigblk for cpu %d\n",
298 				f, up->sbc_cpu_id);
299 
300 			CPU_SGN_MAPOUT(up->sbc_cpu_id);
301 		}
302 	}
303 
304 	/*
305 	 * Block out status threads while creating
306 	 * devinfo tree branches
307 	 */
308 	dr_lock_status(hp->h_bd);
309 	mutex_enter(&cpu_lock);
310 
311 	return (0);
312 }
313 
314 /*ARGSUSED*/
315 void
316 dr_attach_cpu(dr_handle_t *hp, dr_common_unit_t *cp)
317 {
318 	sbd_error_t	*err;
319 	processorid_t	 cpuid;
320 	int		 rv;
321 
322 	ASSERT(MUTEX_HELD(&cpu_lock));
323 
324 	err = drmach_configure(cp->sbdev_id, 0);
325 	if (err) {
326 		DRERR_SET_C(&cp->sbdev_error, &err);
327 		return;
328 	}
329 
330 	err = drmach_cpu_get_id(cp->sbdev_id, &cpuid);
331 	if (err) {
332 		DRERR_SET_C(&cp->sbdev_error, &err);
333 
334 		err = drmach_unconfigure(cp->sbdev_id, DRMACH_DEVI_REMOVE);
335 		if (err)
336 			sbd_err_clear(&err);
337 	} else if ((rv = cpu_configure(cpuid)) != 0) {
338 		dr_dev_err(CE_WARN, cp, dr_errno2ecode(rv));
339 		err = drmach_unconfigure(cp->sbdev_id,
340 				DRMACH_DEVI_REMOVE);
341 		if (err)
342 			sbd_err_clear(&err);
343 	}
344 }
345 
346 /*
347  * dr_post_attach_cpu
348  *
349  * sbd error policy: Does not stop on error.  Processes all units in list.
350  */
351 int
352 dr_post_attach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
353 {
354 	int		i;
355 	int		errflag = 0;
356 	static fn_t	f = "dr_post_attach_cpu";
357 
358 	PR_CPU("%s...\n", f);
359 	hp->h_ndi = 0;
360 
361 	/* Startup and online newly-attached CPUs */
362 	for (i = 0; i < devnum; i++) {
363 		dr_cpu_unit_t *up = (dr_cpu_unit_t *)devlist[i];
364 		struct cpu	*cp;
365 
366 		ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
367 
368 		cp = cpu_get(up->sbc_cpu_id);
369 		if (cp == NULL) {
370 			cmn_err(CE_WARN, "%s: cpu_get failed for cpu %d",
371 			    f, up->sbc_cpu_id);
372 			continue;
373 		}
374 
375 		if (cpu_is_poweredoff(cp)) {
376 			if (cpu_poweron(cp) != 0) {
377 				dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_CPUSTART);
378 				errflag = 1;
379 			}
380 			PR_CPU("%s: cpu %d powered ON\n", f, up->sbc_cpu_id);
381 		}
382 
383 		if (cpu_is_offline(cp)) {
384 			PR_CPU("%s: onlining cpu %d...\n", f, up->sbc_cpu_id);
385 
386 			if (cpu_online(cp) != 0) {
387 				dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_ONLINE);
388 				errflag = 1;
389 			}
390 		}
391 
392 	}
393 
394 	mutex_exit(&cpu_lock);
395 	dr_unlock_status(hp->h_bd);
396 
397 	if (errflag)
398 		return (-1);
399 	else
400 		return (0);
401 }
402 
403 /*
404  * dr_pre_release_cpu
405  *
406  * sbd error policy: Stops on first error.
407  */
408 int
409 dr_pre_release_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
410 {
411 	int		c, cix, i, lastoffline = -1, rv = 0;
412 	processorid_t	cpuid;
413 	struct cpu	*cp;
414 	dr_cpu_unit_t	*up;
415 	dr_devset_t	devset;
416 	sbd_dev_stat_t	*ds;
417 	static fn_t	f = "dr_pre_release_cpu";
418 	int		cpu_flags = 0;
419 
420 	devset = DR_DEVS_PRESENT(hp->h_bd);
421 
422 	/* allocate status struct storage. */
423 	ds = (sbd_dev_stat_t *) kmem_zalloc(sizeof (sbd_dev_stat_t) *
424 			MAX_CPU_UNITS_PER_BOARD, KM_SLEEP);
425 
426 	cix = dr_cpu_status(hp, devset, ds);
427 
428 	mutex_enter(&cpu_lock);
429 
430 	for (i = 0; i < devnum; i++) {
431 		up = (dr_cpu_unit_t *)devlist[i];
432 		ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
433 
434 		/*
435 		 * The STARCAT platform borrows cpus for use by POST in
436 		 * iocage testing.  These cpus cannot be unconfigured
437 		 * while they are in use for the iocage.
438 		 * This check determines if a CPU is currently in use
439 		 * for iocage testing, and if so, returns a "Device busy"
440 		 * error.
441 		 */
442 		for (c = 0; c < cix; c++) {
443 			if (ds[c].d_cpu.cs_unit == up->sbc_cm.sbdev_unum) {
444 				if (ds[c].d_cpu.cs_busy) {
445 					dr_dev_err(CE_WARN,
446 						&up->sbc_cm, ESBD_BUSY);
447 					rv = -1;
448 					break;
449 				}
450 			}
451 		}
452 		if (c < cix)
453 			break;
454 		cpuid = up->sbc_cpu_id;
455 		if ((cp = cpu_get(cpuid)) == NULL) {
456 			dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_OFFLINE);
457 			rv = -1;
458 			break;
459 		}
460 
461 		/* used by dr_cancel_cpu during error flow */
462 		up->sbc_cpu_flags = cp->cpu_flags;
463 
464 		if (CPU_ACTIVE(cp)) {
465 			if (dr_cmd_flags(hp) & SBD_FLAG_FORCE)
466 				cpu_flags = CPU_FORCED;
467 
468 			PR_CPU("%s: offlining cpu %d\n", f, cpuid);
469 			if (cpu_offline(cp, cpu_flags)) {
470 				PR_CPU("%s: failed to offline cpu %d\n",
471 					f, cpuid);
472 				dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_OFFLINE);
473 				if (disp_bound_threads(cp, 0)) {
474 					cmn_err(CE_WARN, "%s: thread(s) "
475 						"bound to cpu %d",
476 						f, cp->cpu_id);
477 				}
478 				rv = -1;
479 				break;
480 			} else
481 				lastoffline = i;
482 		}
483 
484 		if (!rv) {
485 			sbd_error_t *err;
486 
487 			err = drmach_release(up->sbc_cm.sbdev_id);
488 			if (err) {
489 				DRERR_SET_C(&up->sbc_cm.sbdev_error, &err);
490 				rv = -1;
491 				break;
492 			}
493 		}
494 	}
495 
496 	mutex_exit(&cpu_lock);
497 
498 	if (rv) {
499 		/*
500 		 * Need to unwind others since at this level (pre-release)
501 		 * the device state has not yet transitioned and failures
502 		 * will prevent us from reaching the "post" release
503 		 * function where states are normally transitioned.
504 		 */
505 		for (i = lastoffline; i >= 0; i--) {
506 			up = (dr_cpu_unit_t *)devlist[i];
507 			(void) dr_cancel_cpu(up);
508 		}
509 	}
510 
511 	kmem_free(ds, sizeof (sbd_dev_stat_t) * MAX_CPU_UNITS_PER_BOARD);
512 	return (rv);
513 }
514 
515 /*
516  * dr_pre_detach_cpu
517  *
518  * sbd error policy: Stops on first error.
519  */
520 int
521 dr_pre_detach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
522 {
523 	_NOTE(ARGUNUSED(hp))
524 
525 	int		i;
526 	int		curr_cpu;
527 	int		next_cpu;
528 	int		cpu_flags = 0;
529 	static fn_t	f = "dr_pre_detach_cpu";
530 
531 	PR_CPU("%s...\n", f);
532 
533 	/*
534 	 * Block out status threads while destroying devinfo tree
535 	 * branches
536 	 */
537 	dr_lock_status(hp->h_bd);
538 	mutex_enter(&cpu_lock);
539 
540 	for (next_cpu = 0, i = 0; i < devnum; i++) {
541 		dr_cpu_unit_t *up = (dr_cpu_unit_t *)devlist[i];
542 		struct cpu	*cp;
543 
544 		ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
545 
546 		cp = cpu_get(up->sbc_cpu_id);
547 		if (cp == NULL)
548 			continue;
549 
550 		/*
551 		 * Print a console message for each attachment
552 		 * point. For CMP devices, this means that only
553 		 * one message should be printed, no matter how
554 		 * many cores are actually present.
555 		 */
556 		curr_cpu = DR_UNUM2SBD_UNUM(up->sbc_cm.sbdev_unum);
557 		if (curr_cpu >= next_cpu) {
558 			cmn_err(CE_CONT, "OS unconfigure %s\n",
559 			    up->sbc_cm.sbdev_path);
560 			next_cpu = curr_cpu + 1;
561 		}
562 
563 		/*
564 		 * CPUs were offlined during Release.
565 		 */
566 		if (cpu_is_poweredoff(cp)) {
567 			PR_CPU("%s: cpu %d already powered OFF\n",
568 			    f, up->sbc_cpu_id);
569 			continue;
570 		}
571 
572 		if (!cpu_is_offline(cp)) {
573 			if (dr_cmd_flags(hp) & SBD_FLAG_FORCE)
574 				cpu_flags = CPU_FORCED;
575 			/* cpu was onlined after release.  Offline it again */
576 			PR_CPU("%s: offlining cpu %d\n", f, up->sbc_cpu_id);
577 			if (cpu_offline(cp, cpu_flags)) {
578 				PR_CPU("%s: failed to offline cpu %d\n",
579 				    f, up->sbc_cpu_id);
580 				dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_OFFLINE);
581 				if (disp_bound_threads(cp, 0)) {
582 					cmn_err(CE_WARN, "%s: thread(s) "
583 						"bound to cpu %d",
584 						f, cp->cpu_id);
585 				}
586 				goto err;
587 			}
588 		}
589 		if (cpu_poweroff(cp) != 0) {
590 			dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_CPUSTOP);
591 			goto err;
592 		} else {
593 			PR_CPU("%s: cpu %d powered OFF\n", f, up->sbc_cpu_id);
594 		}
595 	}
596 
597 	return (0);
598 
599 err:
600 	mutex_exit(&cpu_lock);
601 	dr_unlock_status(hp->h_bd);
602 	return (-1);
603 }
604 
605 /*ARGSUSED*/
606 void
607 dr_detach_cpu(dr_handle_t *hp, dr_common_unit_t *cp)
608 {
609 	sbd_error_t	*err;
610 	processorid_t	 cpuid;
611 	int		 rv;
612 
613 	ASSERT(MUTEX_HELD(&cpu_lock));
614 
615 	err = drmach_cpu_get_id(cp->sbdev_id, &cpuid);
616 	if (err) {
617 		DRERR_SET_C(&cp->sbdev_error, &err);
618 	} else if ((rv = cpu_unconfigure(cpuid)) != 0) {
619 		dr_dev_err(CE_IGNORE, cp, dr_errno2ecode(rv));
620 	} else {
621 		err = drmach_unconfigure(cp->sbdev_id, DRMACH_DEVI_REMOVE);
622 		if (err) {
623 			DRERR_SET_C(&cp->sbdev_error, &err);
624 		}
625 	}
626 }
627 
628 /*ARGSUSED1*/
629 int
630 dr_post_detach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
631 {
632 	static fn_t	f = "dr_post_detach_cpu";
633 
634 	PR_CPU("%s...\n", f);
635 	hp->h_ndi = 0;
636 
637 	mutex_exit(&cpu_lock);
638 	dr_unlock_status(hp->h_bd);
639 
640 	return (0);
641 }
642 
643 static void
644 dr_fill_cpu_stat(dr_cpu_unit_t *cp, drmach_status_t *pstat, sbd_cpu_stat_t *csp)
645 {
646 	ASSERT(cp && pstat && csp);
647 
648 	/* Fill in the common status information */
649 	bzero((caddr_t)csp, sizeof (*csp));
650 	csp->cs_type = cp->sbc_cm.sbdev_type;
651 	csp->cs_unit = cp->sbc_cm.sbdev_unum;
652 	strncpy(csp->cs_name, pstat->type, sizeof (csp->cs_name));
653 	csp->cs_cond = cp->sbc_cm.sbdev_cond;
654 	csp->cs_busy = cp->sbc_cm.sbdev_busy | pstat->busy;
655 	csp->cs_time = cp->sbc_cm.sbdev_time;
656 	csp->cs_ostate = cp->sbc_cm.sbdev_ostate;
657 	csp->cs_suspend = 0;
658 
659 	/* CPU specific status data */
660 	csp->cs_cpuid = cp->sbc_cpu_id;
661 
662 #ifdef _STARFIRE
663 	csp->cs_isbootproc = (SIGBCPU->cpu_id == cp->sbc_cpu_id) ? 1 : 0;
664 #endif /* _STARFIRE */
665 
666 	/*
667 	 * If the speed and ecache properties have not been
668 	 * cached yet, read them in from the device tree.
669 	 */
670 	if ((cp->sbc_speed == 0) || (cp->sbc_ecache == 0))
671 		dr_cpu_set_prop(cp);
672 
673 	/* use the cached speed and ecache values */
674 	csp->cs_speed = cp->sbc_speed;
675 	csp->cs_ecache = cp->sbc_ecache;
676 
677 	mutex_enter(&cpu_lock);
678 	if (!cpu_get(csp->cs_cpuid)) {
679 		/* ostate must be UNCONFIGURED */
680 		csp->cs_cm.c_ostate = SBD_STAT_UNCONFIGURED;
681 	}
682 	mutex_exit(&cpu_lock);
683 }
684 
685 static void
686 dr_fill_cmp_stat(sbd_cpu_stat_t *csp, int ncores, int impl, sbd_cmp_stat_t *psp)
687 {
688 	int	core;
689 
690 	ASSERT(csp && psp && (ncores >= 1));
691 
692 	bzero((caddr_t)psp, sizeof (*psp));
693 
694 	/*
695 	 * Fill in the common status information based
696 	 * on the data for the first core.
697 	 */
698 	psp->ps_type = SBD_COMP_CMP;
699 	psp->ps_unit = DR_UNUM2SBD_UNUM(csp->cs_unit);
700 	strncpy(psp->ps_name, csp->cs_name, sizeof (psp->ps_name));
701 	psp->ps_cond = csp->cs_cond;
702 	psp->ps_busy = csp->cs_busy;
703 	psp->ps_time = csp->cs_time;
704 	psp->ps_ostate = csp->cs_ostate;
705 	psp->ps_suspend = csp->cs_suspend;
706 
707 	/* CMP specific status data */
708 	*psp->ps_cpuid = csp->cs_cpuid;
709 	psp->ps_ncores = 1;
710 	psp->ps_speed = csp->cs_speed;
711 	psp->ps_ecache = csp->cs_ecache;
712 
713 	/*
714 	 * Walk through the data for the remaining cores.
715 	 * Make any adjustments to the common status data,
716 	 * or the shared CMP specific data if necessary.
717 	 */
718 	for (core = 1; core < ncores; core++) {
719 
720 		/*
721 		 * The following properties should be the same
722 		 * for all the cores of the CMP.
723 		 */
724 		ASSERT(psp->ps_unit == DR_UNUM2SBD_UNUM(csp[core].cs_unit));
725 		ASSERT(psp->ps_speed == csp[core].cs_speed);
726 
727 		psp->ps_cpuid[core] = csp[core].cs_cpuid;
728 		psp->ps_ncores++;
729 
730 		/*
731 		 * Jaguar has a split ecache, so the ecache
732 		 * for each core must be added together to
733 		 * get the total ecache for the whole chip.
734 		 */
735 		if (IS_JAGUAR(impl)) {
736 			psp->ps_ecache += csp[core].cs_ecache;
737 		}
738 
739 		/* adjust time if necessary */
740 		if (csp[core].cs_time > psp->ps_time) {
741 			psp->ps_time = csp[core].cs_time;
742 		}
743 
744 		psp->ps_busy |= csp[core].cs_busy;
745 
746 		/*
747 		 * If any of the cores are configured, the
748 		 * entire CMP is marked as configured.
749 		 */
750 		if (csp[core].cs_ostate == SBD_STAT_CONFIGURED) {
751 			psp->ps_ostate = csp[core].cs_ostate;
752 		}
753 	}
754 }
755 
756 int
757 dr_cpu_status(dr_handle_t *hp, dr_devset_t devset, sbd_dev_stat_t *dsp)
758 {
759 	int		cmp;
760 	int		core;
761 	int		ncpu;
762 	dr_board_t	*bp;
763 	sbd_cpu_stat_t	cstat[MAX_CORES_PER_CMP];
764 
765 	bp = hp->h_bd;
766 	ncpu = 0;
767 
768 	devset &= DR_DEVS_PRESENT(bp);
769 
770 	/*
771 	 * Treat every CPU as a CMP. In the case where the
772 	 * device is not a CMP, treat it as a CMP with only
773 	 * one core.
774 	 */
775 	for (cmp = 0; cmp < MAX_CMP_UNITS_PER_BOARD; cmp++) {
776 
777 		int		ncores;
778 		dr_cpu_unit_t	*cp;
779 		drmach_status_t	pstat;
780 		sbd_error_t	*err;
781 		sbd_cmp_stat_t	*psp;
782 
783 		if ((devset & DEVSET(SBD_COMP_CMP, cmp)) == 0) {
784 			continue;
785 		}
786 
787 		ncores = 0;
788 
789 		for (core = 0; core < MAX_CORES_PER_CMP; core++) {
790 
791 			cp = dr_get_cpu_unit(bp, DR_CMP_CORE_UNUM(cmp, core));
792 
793 			if (cp->sbc_cm.sbdev_state == DR_STATE_EMPTY) {
794 				/* present, but not fully initialized */
795 				continue;
796 			}
797 
798 			ASSERT(dr_cpu_unit_is_sane(hp->h_bd, cp));
799 
800 			/* skip if not present */
801 			if (cp->sbc_cm.sbdev_id == (drmachid_t)0) {
802 				continue;
803 			}
804 
805 			/* fetch platform status */
806 			err = drmach_status(cp->sbc_cm.sbdev_id, &pstat);
807 			if (err) {
808 				DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
809 				continue;
810 			}
811 
812 			dr_fill_cpu_stat(cp, &pstat, &cstat[ncores++]);
813 		}
814 
815 		if (ncores == 0) {
816 			continue;
817 		}
818 
819 		/*
820 		 * Store the data to the outgoing array. If the
821 		 * device is a CMP, combine all the data for the
822 		 * cores into a single stat structure.
823 		 *
824 		 * The check for a CMP device uses the last core
825 		 * found, assuming that all cores will have the
826 		 * same implementation.
827 		 */
828 		if (CPU_IMPL_IS_CMP(cp->sbc_cpu_impl)) {
829 			psp = (sbd_cmp_stat_t *)dsp;
830 			dr_fill_cmp_stat(cstat, ncores, cp->sbc_cpu_impl, psp);
831 		} else {
832 			ASSERT(ncores == 1);
833 			bcopy(cstat, dsp, sizeof (sbd_cpu_stat_t));
834 		}
835 
836 		dsp++;
837 		ncpu++;
838 	}
839 
840 	return (ncpu);
841 }
842 
843 /*
844  * Cancel previous release operation for cpu.
845  * For cpus this means simply bringing cpus that
846  * were offline back online.  Note that they had
847  * to have been online at the time there were
848  * released.
849  */
850 int
851 dr_cancel_cpu(dr_cpu_unit_t *up)
852 {
853 	int		rv = 0;
854 	static fn_t	f = "dr_cancel_cpu";
855 
856 	ASSERT(dr_cpu_unit_is_sane(up->sbc_cm.sbdev_bp, up));
857 
858 	if (cpu_flagged_active(up->sbc_cpu_flags)) {
859 		struct cpu	*cp;
860 
861 		/*
862 		 * CPU had been online, go ahead
863 		 * bring it back online.
864 		 */
865 		PR_CPU("%s: bringing cpu %d back ONLINE\n",
866 			f, up->sbc_cpu_id);
867 
868 		mutex_enter(&cpu_lock);
869 		cp = cpu[up->sbc_cpu_id];
870 
871 		if (cpu_is_poweredoff(cp)) {
872 			if (cpu_poweron(cp)) {
873 				cmn_err(CE_WARN, "%s: failed to power-on "
874 				    "cpu %d", f, up->sbc_cpu_id);
875 				rv = -1;
876 			}
877 		}
878 
879 		if (cpu_is_offline(cp)) {
880 			if (cpu_online(cp)) {
881 				cmn_err(CE_WARN, "%s: failed to online cpu %d",
882 				    f, up->sbc_cpu_id);
883 				rv = -1;
884 			}
885 		}
886 
887 		if (cpu_is_online(cp)) {
888 			if (cpu_flagged_nointr(up->sbc_cpu_flags)) {
889 				if (cpu_intr_disable(cp) != 0) {
890 					cmn_err(CE_WARN, "%s: failed to "
891 					    "disable interrupts on cpu %d",
892 					    f, up->sbc_cpu_id);
893 				}
894 			}
895 		}
896 
897 		mutex_exit(&cpu_lock);
898 	}
899 
900 	return (rv);
901 }
902 
903 int
904 dr_disconnect_cpu(dr_cpu_unit_t *up)
905 {
906 	sbd_error_t	*err;
907 	static fn_t	f = "dr_disconnect_cpu";
908 
909 	PR_CPU("%s...\n", f);
910 
911 	ASSERT((up->sbc_cm.sbdev_state == DR_STATE_CONNECTED) ||
912 		(up->sbc_cm.sbdev_state == DR_STATE_UNCONFIGURED));
913 
914 	ASSERT(dr_cpu_unit_is_sane(up->sbc_cm.sbdev_bp, up));
915 
916 	if (up->sbc_cm.sbdev_state == DR_STATE_CONNECTED) {
917 		/*
918 		 * Cpus were never brought in and so are still
919 		 * effectively disconnected, so nothing to do here.
920 		 */
921 		PR_CPU("%s: cpu %d never brought in\n",
922 			f, up->sbc_cpu_id);
923 		return (0);
924 	}
925 
926 	err = drmach_cpu_disconnect(up->sbc_cm.sbdev_id);
927 	if (err == NULL)
928 		return (0);
929 	else {
930 		DRERR_SET_C(&up->sbc_cm.sbdev_error, &err);
931 		return (-1);
932 	}
933 	/*NOTREACHED*/
934 }
935