xref: /illumos-gate/usr/src/uts/sun4u/ngdr/io/dr_cpu.c (revision 6e6545bfaed3bab9ce836ee82d1abd8f2edba89a)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Copyright 2019 Peter Tribble.
29  * Copyright 2019 Joyent, Inc.
30  */
31 
32 /*
33  * CPU support routines for DR
34  */
35 
36 #include <sys/note.h>
37 #include <sys/debug.h>
38 #include <sys/types.h>
39 #include <sys/errno.h>
40 #include <sys/cred.h>
41 #include <sys/dditypes.h>
42 #include <sys/devops.h>
43 #include <sys/modctl.h>
44 #include <sys/poll.h>
45 #include <sys/conf.h>
46 #include <sys/ddi.h>
47 #include <sys/sunddi.h>
48 #include <sys/sunndi.h>
49 #include <sys/ndi_impldefs.h>
50 #include <sys/stat.h>
51 #include <sys/kmem.h>
52 #include <sys/processor.h>
53 #include <sys/cpuvar.h>
54 #include <sys/mem_config.h>
55 #include <sys/promif.h>
56 #include <sys/x_call.h>
57 #include <sys/cpu_sgnblk_defs.h>
58 #include <sys/membar.h>
59 #include <sys/stack.h>
60 #include <sys/sysmacros.h>
61 #include <sys/machsystm.h>
62 #include <sys/spitregs.h>
63 
64 #include <sys/archsystm.h>
65 #include <vm/hat_sfmmu.h>
66 #include <sys/pte.h>
67 #include <sys/mmu.h>
68 #include <sys/x_call.h>
69 #include <sys/cpu_module.h>
70 #include <sys/cpu_impl.h>
71 
72 #include <sys/autoconf.h>
73 #include <sys/cmn_err.h>
74 
75 #include <sys/dr.h>
76 #include <sys/dr_util.h>
77 
78 /* for the DR*INTERNAL_ERROR macros.  see sys/dr.h. */
79 static char *dr_ie_fmt = "dr_cpu.c %d";
80 
81 int
82 dr_cpu_unit_is_sane(dr_board_t *bp, dr_cpu_unit_t *cp)
83 {
84 #ifdef DEBUG
85 	processorid_t	cpuid;
86 
87 	/*
88 	 * cpuid and unit number should never be different
89 	 * than they were at discovery/connect time
90 	 */
91 	ASSERT(drmach_cpu_get_id(cp->sbc_cm.sbdev_id, &cpuid) == 0);
92 
93 	ASSERT(cp->sbc_cm.sbdev_bp == bp);
94 	ASSERT(cp->sbc_cm.sbdev_type == SBD_COMP_CPU);
95 	ASSERT(cp->sbc_cpu_id == cpuid);
96 #else
97 	_NOTE(ARGUNUSED(bp))
98 	_NOTE(ARGUNUSED(cp))
99 #endif
100 
101 	return (1);
102 }
103 
104 static int
105 dr_errno2ecode(int error)
106 {
107 	int	rv;
108 
109 	switch (error) {
110 	case EBUSY:
111 		rv = ESBD_BUSY;
112 		break;
113 	case EINVAL:
114 		rv = ESBD_INVAL;
115 		break;
116 	case EALREADY:
117 		rv = ESBD_ALREADY;
118 		break;
119 	case ENODEV:
120 		rv = ESBD_NODEV;
121 		break;
122 	case ENOMEM:
123 		rv = ESBD_NOMEM;
124 		break;
125 	default:
126 		rv = ESBD_INVAL;
127 	}
128 
129 	return (rv);
130 }
131 
132 static void
133 dr_cpu_set_prop(dr_cpu_unit_t *cp)
134 {
135 	sbd_error_t	*err;
136 	dev_info_t	*dip;
137 	uint64_t	clock_freq;
138 	int		ecache_size = 0;
139 	char		*cache_str = NULL;
140 
141 	err = drmach_get_dip(cp->sbc_cm.sbdev_id, &dip);
142 	if (err) {
143 		DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
144 		return;
145 	}
146 
147 	if (dip == NULL) {
148 		DR_DEV_INTERNAL_ERROR(&cp->sbc_cm);
149 		return;
150 	}
151 
152 	/* read in the CPU speed */
153 
154 	/*
155 	 * If the property is not found in the CPU node, it has to be
156 	 * kept in the core or cmp node so we just keep looking.
157 	 */
158 	clock_freq = (unsigned int)ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
159 	    "clock-frequency", 0);
160 
161 	ASSERT(clock_freq != 0);
162 
163 	/*
164 	 * The ecache property string is not the same
165 	 * for all CPU implementations.
166 	 */
167 
168 	switch (cp->sbc_cpu_impl) {
169 	case BLACKBIRD_IMPL:
170 	case CHEETAH_IMPL:
171 	case CHEETAH_PLUS_IMPL:
172 		cache_str = "ecache-size";
173 		break;
174 	case JAGUAR_IMPL:
175 	case OLYMPUS_C_IMPL:
176 	case JUPITER_IMPL:
177 		cache_str = "l2-cache-size";
178 		break;
179 	case PANTHER_IMPL:
180 		cache_str = "l3-cache-size";
181 		break;
182 	default:
183 		cmn_err(CE_WARN, "Unknown cpu implementation=0x%x",
184 		    cp->sbc_cpu_impl);
185 		ASSERT(0);
186 		break;
187 	}
188 
189 	if (cache_str != NULL) {
190 		/* read in the ecache size */
191 		/*
192 		 * If the property is not found in the CPU node,
193 		 * it has to be kept in the core or cmp node so
194 		 * we just keep looking.
195 		 */
196 
197 		ecache_size = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
198 		    cache_str, 0);
199 	}
200 
201 	ASSERT(ecache_size != 0);
202 
203 	/* convert to the proper units */
204 	cp->sbc_speed = (clock_freq + 500000) / 1000000;
205 	cp->sbc_ecache = ecache_size / (1024 * 1024);
206 }
207 
208 void
209 dr_init_cpu_unit(dr_cpu_unit_t *cp)
210 {
211 	sbd_error_t	*err;
212 	dr_state_t	new_state;
213 	int		cpuid;
214 	int		impl;
215 
216 	if (DR_DEV_IS_ATTACHED(&cp->sbc_cm)) {
217 		new_state = DR_STATE_CONFIGURED;
218 		cp->sbc_cm.sbdev_cond = SBD_COND_OK;
219 	} else if (DR_DEV_IS_PRESENT(&cp->sbc_cm)) {
220 		new_state = DR_STATE_CONNECTED;
221 		cp->sbc_cm.sbdev_cond = SBD_COND_OK;
222 	} else {
223 		new_state = DR_STATE_EMPTY;
224 		cp->sbc_cm.sbdev_cond = SBD_COND_UNKNOWN;
225 	}
226 
227 	if (DR_DEV_IS_PRESENT(&cp->sbc_cm)) {
228 		err = drmach_cpu_get_id(cp->sbc_cm.sbdev_id, &cpuid);
229 		if (err) {
230 			DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
231 			new_state = DR_STATE_FATAL;
232 			goto done;
233 		}
234 
235 		err = drmach_cpu_get_impl(cp->sbc_cm.sbdev_id, &impl);
236 		if (err) {
237 			DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
238 			new_state = DR_STATE_FATAL;
239 			goto done;
240 		}
241 	} else {
242 		cp->sbc_cpu_id = -1;
243 		cp->sbc_cpu_impl = -1;
244 		goto done;
245 	}
246 
247 	cp->sbc_cpu_id = cpuid;
248 	cp->sbc_cpu_impl = impl;
249 
250 	/* if true at init time, it must always be true */
251 	ASSERT(dr_cpu_unit_is_sane(cp->sbc_cm.sbdev_bp, cp));
252 
253 	mutex_enter(&cpu_lock);
254 	if ((cpuid >= 0) && cpu[cpuid])
255 		cp->sbc_cpu_flags = cpu[cpuid]->cpu_flags;
256 	else
257 		cp->sbc_cpu_flags = P_OFFLINE | P_POWEROFF;
258 	mutex_exit(&cpu_lock);
259 
260 	dr_cpu_set_prop(cp);
261 
262 done:
263 	/* delay transition until fully initialized */
264 	dr_device_transition(&cp->sbc_cm, new_state);
265 }
266 
267 int
268 dr_pre_attach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
269 {
270 	int		i;
271 	int		curr_cpu;
272 	int		next_cpu;
273 	static fn_t	f = "dr_pre_attach_cpu";
274 
275 	PR_CPU("%s...\n", f);
276 
277 	for (next_cpu = 0, i = 0; i < devnum; i++) {
278 		dr_cpu_unit_t *up = (dr_cpu_unit_t *)devlist[i];
279 
280 		ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
281 
282 		/*
283 		 * Print a console message for each attachment
284 		 * point. For CMP devices, this means that only
285 		 * one message should be printed, no matter how
286 		 * many cores are actually present.
287 		 */
288 		curr_cpu = DR_UNUM2SBD_UNUM(up->sbc_cm.sbdev_unum,
289 		    SBD_COMP_CPU);
290 		if (curr_cpu >= next_cpu) {
291 			cmn_err(CE_CONT, "OS configure %s",
292 			    up->sbc_cm.sbdev_path);
293 			next_cpu = curr_cpu + 1;
294 		}
295 
296 		if (up->sbc_cm.sbdev_state == DR_STATE_UNCONFIGURED) {
297 			/*
298 			 * If we're coming from the UNCONFIGURED
299 			 * state then the cpu's sigblock will
300 			 * still be mapped in.  Need to unmap it
301 			 * before continuing with attachment.
302 			 */
303 			PR_CPU("%s: unmapping sigblk for cpu %d\n", f,
304 			    up->sbc_cpu_id);
305 		}
306 	}
307 
308 	/*
309 	 * Block out status threads while creating
310 	 * devinfo tree branches
311 	 */
312 	dr_lock_status(hp->h_bd);
313 	ndi_devi_enter(ddi_root_node(), (int *)(&hp->h_ndi));
314 	mutex_enter(&cpu_lock);
315 
316 	return (0);
317 }
318 
319 /*ARGSUSED*/
320 void
321 dr_attach_cpu(dr_handle_t *hp, dr_common_unit_t *cp)
322 {
323 	sbd_error_t	*err;
324 	processorid_t	 cpuid;
325 	int		 rv;
326 
327 	ASSERT(MUTEX_HELD(&cpu_lock));
328 
329 	err = drmach_configure(cp->sbdev_id, 0);
330 	if (err) {
331 		DRERR_SET_C(&cp->sbdev_error, &err);
332 		return;
333 	}
334 
335 	err = drmach_cpu_get_id(cp->sbdev_id, &cpuid);
336 	if (err) {
337 		DRERR_SET_C(&cp->sbdev_error, &err);
338 
339 		err = drmach_unconfigure(cp->sbdev_id, DEVI_BRANCH_DESTROY);
340 		if (err)
341 			sbd_err_clear(&err);
342 	} else if ((rv = cpu_configure(cpuid)) != 0) {
343 		dr_dev_err(CE_WARN, cp, dr_errno2ecode(rv));
344 		err = drmach_unconfigure(cp->sbdev_id, DEVI_BRANCH_DESTROY);
345 		if (err)
346 			sbd_err_clear(&err);
347 	}
348 }
349 
350 /*
351  * dr_post_attach_cpu
352  *
353  * sbd error policy: Does not stop on error.  Processes all units in list.
354  */
355 int
356 dr_post_attach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
357 {
358 	int		i;
359 	int		errflag = 0;
360 	static fn_t	f = "dr_post_attach_cpu";
361 
362 	PR_CPU("%s...\n", f);
363 
364 	/* Startup and online newly-attached CPUs */
365 	for (i = 0; i < devnum; i++) {
366 		dr_cpu_unit_t *up = (dr_cpu_unit_t *)devlist[i];
367 		struct cpu	*cp;
368 
369 		ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
370 
371 		cp = cpu_get(up->sbc_cpu_id);
372 		if (cp == NULL) {
373 			cmn_err(CE_WARN, "%s: cpu_get failed for cpu %d",
374 			    f, up->sbc_cpu_id);
375 			continue;
376 		}
377 
378 		if (cpu_is_poweredoff(cp)) {
379 			if (cpu_poweron(cp) != 0) {
380 				dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_CPUSTART);
381 				errflag = 1;
382 			}
383 			PR_CPU("%s: cpu %d powered ON\n", f, up->sbc_cpu_id);
384 		}
385 
386 		if (cpu_is_offline(cp)) {
387 			PR_CPU("%s: onlining cpu %d...\n", f, up->sbc_cpu_id);
388 
389 			if (cpu_online(cp, 0) != 0) {
390 				dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_ONLINE);
391 				errflag = 1;
392 			}
393 		}
394 
395 	}
396 
397 	mutex_exit(&cpu_lock);
398 	ndi_devi_exit(ddi_root_node(), hp->h_ndi);
399 	dr_unlock_status(hp->h_bd);
400 
401 	if (errflag)
402 		return (-1);
403 	else
404 		return (0);
405 }
406 
407 /*
408  * dr_pre_release_cpu
409  *
410  * sbd error policy: Stops on first error.
411  */
412 int
413 dr_pre_release_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
414 {
415 	int		c, cix, i, lastoffline = -1, rv = 0;
416 	processorid_t	cpuid;
417 	struct cpu	*cp;
418 	dr_cpu_unit_t	*up;
419 	dr_devset_t	devset;
420 	sbd_dev_stat_t	*ds;
421 	static fn_t	f = "dr_pre_release_cpu";
422 	int		cpu_flags = 0;
423 
424 	devset = DR_DEVS_PRESENT(hp->h_bd);
425 
426 	/* allocate status struct storage. */
427 	ds = (sbd_dev_stat_t *) kmem_zalloc(sizeof (sbd_dev_stat_t) *
428 	    MAX_CPU_UNITS_PER_BOARD, KM_SLEEP);
429 
430 	cix = dr_cpu_status(hp, devset, ds);
431 
432 	mutex_enter(&cpu_lock);
433 
434 	for (i = 0; i < devnum; i++) {
435 		up = (dr_cpu_unit_t *)devlist[i];
436 		ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
437 
438 		/*
439 		 * The STARCAT platform borrows cpus for use by POST in
440 		 * iocage testing.  These cpus cannot be unconfigured
441 		 * while they are in use for the iocage.
442 		 * This check determines if a CPU is currently in use
443 		 * for iocage testing, and if so, returns a "Device busy"
444 		 * error.
445 		 */
446 		for (c = 0; c < cix; c++) {
447 			if (ds[c].d_cpu.cs_unit == up->sbc_cm.sbdev_unum) {
448 				if (ds[c].d_cpu.cs_busy) {
449 					dr_dev_err(CE_WARN, &up->sbc_cm,
450 					    ESBD_BUSY);
451 					rv = -1;
452 					break;
453 				}
454 			}
455 		}
456 		if (c < cix)
457 			break;
458 		cpuid = up->sbc_cpu_id;
459 		if ((cp = cpu_get(cpuid)) == NULL) {
460 			dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_OFFLINE);
461 			rv = -1;
462 			break;
463 		}
464 
465 		/* used by dr_cancel_cpu during error flow */
466 		up->sbc_cpu_flags = cp->cpu_flags;
467 
468 		if (CPU_ACTIVE(cp)) {
469 			if (dr_cmd_flags(hp) & SBD_FLAG_FORCE)
470 				cpu_flags = CPU_FORCED;
471 
472 			PR_CPU("%s: offlining cpu %d\n", f, cpuid);
473 			if (cpu_offline(cp, cpu_flags)) {
474 				PR_CPU("%s: failed to offline cpu %d\n", f,
475 				    cpuid);
476 				dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_OFFLINE);
477 				if (disp_bound_threads(cp, 0)) {
478 					cmn_err(CE_WARN, "%s: thread(s) bound "
479 					    "to cpu %d", f, cp->cpu_id);
480 				}
481 				rv = -1;
482 				break;
483 			} else
484 				lastoffline = i;
485 		}
486 
487 		if (!rv) {
488 			sbd_error_t *err;
489 
490 			err = drmach_release(up->sbc_cm.sbdev_id);
491 			if (err) {
492 				DRERR_SET_C(&up->sbc_cm.sbdev_error, &err);
493 				rv = -1;
494 				break;
495 			}
496 		}
497 	}
498 
499 	mutex_exit(&cpu_lock);
500 
501 	if (rv) {
502 		/*
503 		 * Need to unwind others since at this level (pre-release)
504 		 * the device state has not yet transitioned and failures
505 		 * will prevent us from reaching the "post" release
506 		 * function where states are normally transitioned.
507 		 */
508 		for (i = lastoffline; i >= 0; i--) {
509 			up = (dr_cpu_unit_t *)devlist[i];
510 			(void) dr_cancel_cpu(up);
511 		}
512 	}
513 
514 	kmem_free(ds, sizeof (sbd_dev_stat_t) * MAX_CPU_UNITS_PER_BOARD);
515 	return (rv);
516 }
517 
518 /*
519  * dr_pre_detach_cpu
520  *
521  * sbd error policy: Stops on first error.
522  */
523 int
524 dr_pre_detach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
525 {
526 	_NOTE(ARGUNUSED(hp))
527 
528 	int		i;
529 	int		curr_cpu;
530 	int		next_cpu;
531 	int		cpu_flags = 0;
532 	static fn_t	f = "dr_pre_detach_cpu";
533 
534 	PR_CPU("%s...\n", f);
535 
536 	/*
537 	 * Block out status threads while destroying devinfo tree
538 	 * branches
539 	 */
540 	dr_lock_status(hp->h_bd);
541 	mutex_enter(&cpu_lock);
542 
543 	for (next_cpu = 0, i = 0; i < devnum; i++) {
544 		dr_cpu_unit_t *up = (dr_cpu_unit_t *)devlist[i];
545 		struct cpu	*cp;
546 
547 		ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
548 
549 		cp = cpu_get(up->sbc_cpu_id);
550 		if (cp == NULL)
551 			continue;
552 
553 		/*
554 		 * Print a console message for each attachment
555 		 * point. For CMP devices, this means that only
556 		 * one message should be printed, no matter how
557 		 * many cores are actually present.
558 		 */
559 		curr_cpu = DR_UNUM2SBD_UNUM(up->sbc_cm.sbdev_unum,
560 		    SBD_COMP_CPU);
561 		if (curr_cpu >= next_cpu) {
562 			cmn_err(CE_CONT, "OS unconfigure %s\n",
563 			    up->sbc_cm.sbdev_path);
564 			next_cpu = curr_cpu + 1;
565 		}
566 
567 		/*
568 		 * CPUs were offlined during Release.
569 		 */
570 		if (cpu_is_poweredoff(cp)) {
571 			PR_CPU("%s: cpu %d already powered OFF\n",
572 			    f, up->sbc_cpu_id);
573 			continue;
574 		}
575 
576 		if (!cpu_is_offline(cp)) {
577 			if (dr_cmd_flags(hp) & SBD_FLAG_FORCE)
578 				cpu_flags = CPU_FORCED;
579 			/* cpu was onlined after release.  Offline it again */
580 			PR_CPU("%s: offlining cpu %d\n", f, up->sbc_cpu_id);
581 			if (cpu_offline(cp, cpu_flags)) {
582 				PR_CPU("%s: failed to offline cpu %d\n",
583 				    f, up->sbc_cpu_id);
584 				dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_OFFLINE);
585 				if (disp_bound_threads(cp, 0)) {
586 					cmn_err(CE_WARN, "%s: thread(s) bound "
587 					    "to cpu %d", f, cp->cpu_id);
588 				}
589 				goto err;
590 			}
591 		}
592 		if (cpu_poweroff(cp) != 0) {
593 			dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_CPUSTOP);
594 			goto err;
595 		} else {
596 			PR_CPU("%s: cpu %d powered OFF\n", f, up->sbc_cpu_id);
597 		}
598 	}
599 
600 	return (0);
601 
602 err:
603 	mutex_exit(&cpu_lock);
604 	dr_unlock_status(hp->h_bd);
605 	return (-1);
606 }
607 
608 /*ARGSUSED*/
609 void
610 dr_detach_cpu(dr_handle_t *hp, dr_common_unit_t *cp)
611 {
612 	sbd_error_t	*err;
613 	processorid_t	 cpuid;
614 	int		 rv;
615 
616 	ASSERT(MUTEX_HELD(&cpu_lock));
617 
618 	err = drmach_cpu_get_id(cp->sbdev_id, &cpuid);
619 	if (err) {
620 		DRERR_SET_C(&cp->sbdev_error, &err);
621 	} else if ((rv = cpu_unconfigure(cpuid)) != 0) {
622 		dr_dev_err(CE_IGNORE, cp, dr_errno2ecode(rv));
623 	} else {
624 		err = drmach_unconfigure(cp->sbdev_id, DEVI_BRANCH_DESTROY);
625 		if (err) {
626 			DRERR_SET_C(&cp->sbdev_error, &err);
627 		}
628 	}
629 }
630 
631 /*ARGSUSED1*/
632 int
633 dr_post_detach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
634 {
635 	static fn_t	f = "dr_post_detach_cpu";
636 
637 	PR_CPU("%s...\n", f);
638 	hp->h_ndi = 0;
639 
640 	mutex_exit(&cpu_lock);
641 	dr_unlock_status(hp->h_bd);
642 
643 	return (0);
644 }
645 
646 static void
647 dr_fill_cpu_stat(dr_cpu_unit_t *cp, drmach_status_t *pstat, sbd_cpu_stat_t *csp)
648 {
649 	ASSERT(cp && pstat && csp);
650 
651 	/* Fill in the common status information */
652 	bzero((caddr_t)csp, sizeof (*csp));
653 	csp->cs_type = cp->sbc_cm.sbdev_type;
654 	csp->cs_unit = cp->sbc_cm.sbdev_unum;
655 	(void) strncpy(csp->cs_name, pstat->type, sizeof (csp->cs_name));
656 	csp->cs_cond = cp->sbc_cm.sbdev_cond;
657 	csp->cs_busy = cp->sbc_cm.sbdev_busy | pstat->busy;
658 	csp->cs_time = cp->sbc_cm.sbdev_time;
659 	csp->cs_ostate = cp->sbc_cm.sbdev_ostate;
660 	csp->cs_suspend = 0;
661 
662 	/* CPU specific status data */
663 	csp->cs_cpuid = cp->sbc_cpu_id;
664 
665 	/*
666 	 * If the speed and ecache properties have not been
667 	 * cached yet, read them in from the device tree.
668 	 */
669 	if ((cp->sbc_speed == 0) || (cp->sbc_ecache == 0))
670 		dr_cpu_set_prop(cp);
671 
672 	/* use the cached speed and ecache values */
673 	csp->cs_speed = cp->sbc_speed;
674 	csp->cs_ecache = cp->sbc_ecache;
675 
676 	mutex_enter(&cpu_lock);
677 	if (!cpu_get(csp->cs_cpuid)) {
678 		/* ostate must be UNCONFIGURED */
679 		csp->cs_cm.c_ostate = SBD_STAT_UNCONFIGURED;
680 	}
681 	mutex_exit(&cpu_lock);
682 }
683 
684 static void
685 dr_fill_cmp_stat(sbd_cpu_stat_t *csp, int ncores, int impl, sbd_cmp_stat_t *psp)
686 {
687 	int	core;
688 
689 	ASSERT(csp && psp && (ncores >= 1));
690 
691 	bzero((caddr_t)psp, sizeof (*psp));
692 
693 	/*
694 	 * Fill in the common status information based
695 	 * on the data for the first core.
696 	 */
697 	psp->ps_type = SBD_COMP_CMP;
698 	psp->ps_unit = DR_UNUM2SBD_UNUM(csp->cs_unit, SBD_COMP_CMP);
699 	(void) strncpy(psp->ps_name, csp->cs_name, sizeof (psp->ps_name));
700 	psp->ps_cond = csp->cs_cond;
701 	psp->ps_busy = csp->cs_busy;
702 	psp->ps_time = csp->cs_time;
703 	psp->ps_ostate = csp->cs_ostate;
704 	psp->ps_suspend = csp->cs_suspend;
705 
706 	/* CMP specific status data */
707 	*psp->ps_cpuid = csp->cs_cpuid;
708 	psp->ps_ncores = 1;
709 	psp->ps_speed = csp->cs_speed;
710 	psp->ps_ecache = csp->cs_ecache;
711 
712 	/*
713 	 * Walk through the data for the remaining cores.
714 	 * Make any adjustments to the common status data,
715 	 * or the shared CMP specific data if necessary.
716 	 */
717 	for (core = 1; core < ncores; core++) {
718 
719 		/*
720 		 * The following properties should be the same
721 		 * for all the cores of the CMP.
722 		 */
723 		ASSERT(psp->ps_unit == DR_UNUM2SBD_UNUM(csp[core].cs_unit,
724 		    SBD_COMP_CMP));
725 		ASSERT(psp->ps_speed == csp[core].cs_speed);
726 
727 		psp->ps_cpuid[core] = csp[core].cs_cpuid;
728 		psp->ps_ncores++;
729 
730 		/*
731 		 * Jaguar has a split ecache, so the ecache
732 		 * for each core must be added together to
733 		 * get the total ecache for the whole chip.
734 		 */
735 		if (IS_JAGUAR(impl)) {
736 			psp->ps_ecache += csp[core].cs_ecache;
737 		}
738 
739 		/* adjust time if necessary */
740 		if (csp[core].cs_time > psp->ps_time) {
741 			psp->ps_time = csp[core].cs_time;
742 		}
743 
744 		psp->ps_busy |= csp[core].cs_busy;
745 
746 		/*
747 		 * If any of the cores are configured, the
748 		 * entire CMP is marked as configured.
749 		 */
750 		if (csp[core].cs_ostate == SBD_STAT_CONFIGURED) {
751 			psp->ps_ostate = csp[core].cs_ostate;
752 		}
753 	}
754 }
755 
756 int
757 dr_cpu_status(dr_handle_t *hp, dr_devset_t devset, sbd_dev_stat_t *dsp)
758 {
759 	int		cmp;
760 	int		core;
761 	int		ncpu;
762 	dr_board_t	*bp;
763 	sbd_cpu_stat_t	cstat[MAX_CORES_PER_CMP];
764 	int		impl;
765 
766 	bp = hp->h_bd;
767 	ncpu = 0;
768 
769 	devset &= DR_DEVS_PRESENT(bp);
770 
771 	/*
772 	 * Treat every CPU as a CMP. In the case where the
773 	 * device is not a CMP, treat it as a CMP with only
774 	 * one core.
775 	 */
776 	for (cmp = 0; cmp < MAX_CMP_UNITS_PER_BOARD; cmp++) {
777 
778 		int		ncores;
779 		dr_cpu_unit_t	*cp;
780 		drmach_status_t	pstat;
781 		sbd_error_t	*err;
782 		sbd_cmp_stat_t	*psp;
783 
784 		if ((devset & DEVSET(SBD_COMP_CMP, cmp)) == 0) {
785 			continue;
786 		}
787 
788 		ncores = 0;
789 
790 		for (core = 0; core < MAX_CORES_PER_CMP; core++) {
791 
792 			cp = dr_get_cpu_unit(bp, DR_CMP_CORE_UNUM(cmp, core));
793 
794 			if (cp->sbc_cm.sbdev_state == DR_STATE_EMPTY) {
795 				/* present, but not fully initialized */
796 				continue;
797 			}
798 
799 			ASSERT(dr_cpu_unit_is_sane(hp->h_bd, cp));
800 
801 			/* skip if not present */
802 			if (cp->sbc_cm.sbdev_id == (drmachid_t)0) {
803 				continue;
804 			}
805 
806 			/* fetch platform status */
807 			err = drmach_status(cp->sbc_cm.sbdev_id, &pstat);
808 			if (err) {
809 				DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
810 				continue;
811 			}
812 
813 			dr_fill_cpu_stat(cp, &pstat, &cstat[ncores++]);
814 			/*
815 			 * We should set impl here because the last core
816 			 * found might be EMPTY or not present.
817 			 */
818 			impl = cp->sbc_cpu_impl;
819 		}
820 
821 		if (ncores == 0) {
822 			continue;
823 		}
824 
825 		/*
826 		 * Store the data to the outgoing array. If the
827 		 * device is a CMP, combine all the data for the
828 		 * cores into a single stat structure.
829 		 *
830 		 * The check for a CMP device uses the last core
831 		 * found, assuming that all cores will have the
832 		 * same implementation.
833 		 */
834 
835 		if (CPU_IMPL_IS_CMP(impl)) {
836 			psp = (sbd_cmp_stat_t *)dsp;
837 			dr_fill_cmp_stat(cstat, ncores, impl, psp);
838 		} else {
839 			ASSERT(ncores == 1);
840 			bcopy(cstat, dsp, sizeof (sbd_cpu_stat_t));
841 		}
842 
843 		dsp++;
844 		ncpu++;
845 	}
846 
847 	return (ncpu);
848 }
849 
850 /*
851  * Cancel previous release operation for cpu.
852  * For cpus this means simply bringing cpus that
853  * were offline back online.  Note that they had
854  * to have been online at the time there were
855  * released.
856  */
857 int
858 dr_cancel_cpu(dr_cpu_unit_t *up)
859 {
860 	int		rv = 0;
861 	static fn_t	f = "dr_cancel_cpu";
862 
863 	ASSERT(dr_cpu_unit_is_sane(up->sbc_cm.sbdev_bp, up));
864 
865 	if (cpu_flagged_active(up->sbc_cpu_flags)) {
866 		struct cpu	*cp;
867 
868 		/*
869 		 * CPU had been online, go ahead
870 		 * bring it back online.
871 		 */
872 		PR_CPU("%s: bringing cpu %d back ONLINE\n", f, up->sbc_cpu_id);
873 
874 		mutex_enter(&cpu_lock);
875 		cp = cpu[up->sbc_cpu_id];
876 
877 		if (cpu_is_poweredoff(cp)) {
878 			if (cpu_poweron(cp)) {
879 				cmn_err(CE_WARN, "%s: failed to power-on "
880 				    "cpu %d", f, up->sbc_cpu_id);
881 				rv = -1;
882 			}
883 		}
884 
885 		if (cpu_is_offline(cp)) {
886 			if (cpu_online(cp, 0)) {
887 				cmn_err(CE_WARN, "%s: failed to online cpu %d",
888 				    f, up->sbc_cpu_id);
889 				rv = -1;
890 			}
891 		}
892 
893 		if (cpu_is_online(cp)) {
894 			if (cpu_flagged_nointr(up->sbc_cpu_flags)) {
895 				if (cpu_intr_disable(cp) != 0) {
896 					cmn_err(CE_WARN, "%s: failed to "
897 					    "disable interrupts on cpu %d", f,
898 					    up->sbc_cpu_id);
899 				}
900 			}
901 		}
902 
903 		mutex_exit(&cpu_lock);
904 	}
905 
906 	return (rv);
907 }
908 
909 int
910 dr_disconnect_cpu(dr_cpu_unit_t *up)
911 {
912 	sbd_error_t	*err;
913 	static fn_t	f = "dr_disconnect_cpu";
914 
915 	PR_CPU("%s...\n", f);
916 
917 	ASSERT((up->sbc_cm.sbdev_state == DR_STATE_CONNECTED) ||
918 	    (up->sbc_cm.sbdev_state == DR_STATE_UNCONFIGURED));
919 
920 	ASSERT(dr_cpu_unit_is_sane(up->sbc_cm.sbdev_bp, up));
921 
922 	if (up->sbc_cm.sbdev_state == DR_STATE_CONNECTED) {
923 		/*
924 		 * Cpus were never brought in and so are still
925 		 * effectively disconnected, so nothing to do here.
926 		 */
927 		PR_CPU("%s: cpu %d never brought in\n", f, up->sbc_cpu_id);
928 		return (0);
929 	}
930 
931 	err = drmach_cpu_disconnect(up->sbc_cm.sbdev_id);
932 	if (err == NULL)
933 		return (0);
934 	else {
935 		DRERR_SET_C(&up->sbc_cm.sbdev_error, &err);
936 		return (-1);
937 	}
938 	/*NOTREACHED*/
939 }
940