xref: /titanic_50/usr/src/uts/sun4u/io/sbd_cpu.c (revision da2e3ebdc1edfbc5028edf1354e7dd2fa69a7968)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * CPU support routines for DR
31  */
32 
33 #include <sys/debug.h>
34 #include <sys/types.h>
35 #include <sys/errno.h>
36 #include <sys/cred.h>
37 #include <sys/dditypes.h>
38 #include <sys/devops.h>
39 #include <sys/modctl.h>
40 #include <sys/poll.h>
41 #include <sys/conf.h>
42 #include <sys/ddi.h>
43 #include <sys/sunddi.h>
44 #include <sys/sunndi.h>
45 #include <sys/ddi_impldefs.h>
46 #include <sys/ndi_impldefs.h>
47 #include <sys/stat.h>
48 #include <sys/kmem.h>
49 #include <sys/processor.h>
50 #include <sys/cpuvar.h>
51 #include <sys/mem_config.h>
52 #include <sys/promif.h>
53 #include <sys/x_call.h>
54 #include <sys/cpu_sgnblk_defs.h>
55 #include <sys/membar.h>
56 #include <sys/stack.h>
57 #include <sys/sysmacros.h>
58 #include <sys/machsystm.h>
59 #include <sys/spitregs.h>
60 
61 #include <sys/archsystm.h>
62 #include <vm/hat_sfmmu.h>
63 #include <sys/pte.h>
64 #include <sys/mmu.h>
65 #include <sys/x_call.h>
66 #include <sys/cpu_module.h>
67 #include <sys/cheetahregs.h>
68 
69 #include <sys/autoconf.h>
70 #include <sys/cmn_err.h>
71 
72 #include <sys/sbdpriv.h>
73 
74 void
75 sbd_cpu_set_prop(sbd_cpu_unit_t *cp, dev_info_t *dip)
76 {
77 	uint32_t	clock_freq;
78 	int		ecache_size = 0;
79 	char		*cache_str = NULL;
80 
81 	/* read in the CPU speed */
82 	clock_freq = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
83 	    DDI_PROP_DONTPASS, "clock-frequency", 0);
84 
85 	ASSERT(clock_freq != 0);
86 
87 	/*
88 	 * The ecache property string is not the same
89 	 * for all CPU implementations.
90 	 */
91 	switch (cp->sbc_cpu_impl) {
92 	case CHEETAH_IMPL:
93 	case CHEETAH_PLUS_IMPL:
94 		cache_str = "ecache-size";
95 		break;
96 	case JAGUAR_IMPL:
97 		cache_str = "l2-cache-size";
98 		break;
99 	case PANTHER_IMPL:
100 		cache_str = "l3-cache-size";
101 		break;
102 	default:
103 		cmn_err(CE_WARN, "cpu implementation type "
104 		    "is an unknown %d value", cp->sbc_cpu_impl);
105 		ASSERT(0);
106 		break;
107 	}
108 
109 	if (cache_str != NULL) {
110 		/* read in the ecache size */
111 		ecache_size = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
112 		    DDI_PROP_DONTPASS, cache_str, 0);
113 	}
114 
115 	/*
116 	 * In the case the size is still 0,
117 	 * a zero value will be displayed running non-debug.
118 	 */
119 	ASSERT(ecache_size != 0);
120 
121 	/* convert to the proper units */
122 	cp->sbc_speed = (clock_freq + 500000) / 1000000;
123 	cp->sbc_ecache = ecache_size / (1024 * 1024);
124 }
125 
126 static void
127 sbd_fill_cpu_stat(sbd_cpu_unit_t *cp, dev_info_t *dip, sbd_cpu_stat_t *csp)
128 {
129 	int		namelen;
130 
131 	bzero((caddr_t)csp, sizeof (*csp));
132 	csp->cs_type = cp->sbc_cm.sbdev_type;
133 	csp->cs_unit = cp->sbc_cm.sbdev_unum;
134 	namelen = sizeof (csp->cs_name);
135 	(void) ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
136 	    OBP_DEVICETYPE, (caddr_t)csp->cs_name, &namelen);
137 	csp->cs_busy = cp->sbc_cm.sbdev_busy;
138 	csp->cs_time = cp->sbc_cm.sbdev_time;
139 	csp->cs_ostate = cp->sbc_cm.sbdev_ostate;
140 	csp->cs_cpuid = cp->sbc_cpu_id;
141 	csp->cs_suspend = 0;
142 
143 	/*
144 	 * If we have marked the cpu's condition previously
145 	 * then don't rewrite it
146 	 */
147 	if (csp->cs_cond != SBD_COND_UNUSABLE)
148 		csp->cs_cond = sbd_get_comp_cond(dip);
149 
150 	/*
151 	 * If the speed and ecache properties have not been
152 	 * cached yet, read them in from the device tree.
153 	 */
154 	if ((cp->sbc_speed == 0) || (cp->sbc_ecache == 0))
155 		sbd_cpu_set_prop(cp, dip);
156 
157 	/* use the cached speed and ecache values */
158 	csp->cs_speed = cp->sbc_speed;
159 	csp->cs_ecache = cp->sbc_ecache;
160 }
161 
162 static void
163 sbd_fill_cmp_stat(sbd_cpu_stat_t *csp, int ncores, int impl,
164     sbd_cmp_stat_t *psp)
165 {
166 	int	core;
167 
168 	ASSERT(csp && psp && (ncores >= 1));
169 
170 	bzero((caddr_t)psp, sizeof (*psp));
171 
172 	/*
173 	 * Fill in the common status information based
174 	 * on the data for the first core.
175 	 */
176 	psp->ps_type = SBD_COMP_CMP;
177 	psp->ps_unit = SBD_CMP_NUM(csp->cs_unit);
178 	(void) strncpy(psp->ps_name, csp->cs_name, sizeof (psp->ps_name));
179 	psp->ps_cond = csp->cs_cond;
180 	psp->ps_busy = csp->cs_busy;
181 	psp->ps_time = csp->cs_time;
182 	psp->ps_ostate = csp->cs_ostate;
183 	psp->ps_suspend = csp->cs_suspend;
184 
185 	/* CMP specific status data */
186 	*psp->ps_cpuid = csp->cs_cpuid;
187 	psp->ps_ncores = 1;
188 	psp->ps_speed = csp->cs_speed;
189 	psp->ps_ecache = csp->cs_ecache;
190 
191 	/*
192 	 * Walk through the data for the remaining cores.
193 	 * Make any adjustments to the common status data,
194 	 * or the shared CMP specific data if necessary.
195 	 */
196 	for (core = 1; core < ncores; core++) {
197 
198 		/*
199 		 * The following properties should be the same
200 		 * for all the cores of the CMP.
201 		 */
202 		ASSERT(psp->ps_unit == SBD_CMP_NUM(csp[core].cs_unit));
203 		ASSERT(psp->ps_speed == csp[core].cs_speed);
204 
205 		psp->ps_cpuid[core] = csp[core].cs_cpuid;
206 		psp->ps_ncores++;
207 
208 		/*
209 		 * Jaguar has a split ecache, so the ecache
210 		 * for each core must be added together to
211 		 * get the total ecache for the whole chip.
212 		 */
213 		if (IS_JAGUAR(impl)) {
214 			psp->ps_ecache += csp[core].cs_ecache;
215 		}
216 
217 		/* adjust time if necessary */
218 		if (csp[core].cs_time > psp->ps_time) {
219 			psp->ps_time = csp[core].cs_time;
220 		}
221 
222 		psp->ps_busy |= csp[core].cs_busy;
223 
224 		/*
225 		 * If any of the cores are configured, the
226 		 * entire CMP is marked as configured.
227 		 */
228 		if (csp[core].cs_ostate == SBD_STAT_CONFIGURED) {
229 			psp->ps_ostate = csp[core].cs_ostate;
230 		}
231 	}
232 }
233 
234 int
235 sbd_cpu_flags(sbd_handle_t *hp, sbd_devset_t devset, sbd_dev_stat_t *dsp)
236 {
237 	int		cmp;
238 	int		ncpu;
239 	sbd_board_t	*sbp;
240 	sbdp_handle_t	*hdp;
241 	sbd_cpu_stat_t	cstat[MAX_CORES_PER_CMP];
242 
243 	sbp = SBDH2BD(hp->h_sbd);
244 	hdp = sbd_get_sbdp_handle(sbp, hp);
245 
246 	/*
247 	 * Grab the status lock before accessing the dip as we allow
248 	 * concurrent status and branch unconfigure and disconnect.
249 	 *
250 	 * The disconnect thread clears the present devset first
251 	 * and then destroys dips. It is possible that the status
252 	 * thread checks the present devset before they are cleared
253 	 * but accesses the dip after they are destroyed causing a
254 	 * panic. To prevent this, the status thread should check
255 	 * the present devset and access dips with status lock held.
256 	 * Similarly disconnect thread should clear the present devset
257 	 * and destroy dips with status lock held.
258 	 */
259 	mutex_enter(&sbp->sb_slock);
260 
261 	/*
262 	 * Only look for requested devices that are actually present.
263 	 */
264 	devset &= SBD_DEVS_PRESENT(sbp);
265 
266 	/*
267 	 * Treat every CPU as a CMP.  In the case where the
268 	 * device is not a CMP, treat it as a CMP with only
269 	 * one core.
270 	 */
271 	for (cmp = ncpu = 0; cmp < MAX_CMP_UNITS_PER_BOARD; cmp++) {
272 
273 		int		ncores;
274 		int		core;
275 		dev_info_t	*dip;
276 		sbd_cpu_unit_t	*cp;
277 		sbd_cmp_stat_t	*psp;
278 
279 		if (DEVSET_IN_SET(devset, SBD_COMP_CMP, cmp) == 0)
280 			continue;
281 
282 		ncores = 0;
283 
284 		for (core = 0; core < MAX_CORES_PER_CMP; core++) {
285 			int	unit;
286 
287 			unit = sbdp_portid_to_cpu_unit(cmp, core);
288 
289 			/*
290 			 * Check to make sure the cpu is in a state
291 			 * where its fully initialized.
292 			 */
293 			if (SBD_DEVICE_STATE(sbp, SBD_COMP_CPU, unit) ==
294 			    SBD_STATE_EMPTY)
295 				continue;
296 
297 			dip = sbp->sb_devlist[NIX(SBD_COMP_CMP)][unit];
298 			if (dip == NULL)
299 				continue;
300 
301 			cp = SBD_GET_BOARD_CPUUNIT(sbp, unit);
302 
303 			sbd_fill_cpu_stat(cp, dip, &cstat[ncores++]);
304 		}
305 
306 		if (ncores == 0)
307 			continue;
308 
309 		/*
310 		 * Store the data to the outgoing array. If the
311 		 * device is a CMP, combine all the data for the
312 		 * cores into a single stat structure.
313 		 *
314 		 * The check for a CMP device uses the last core
315 		 * found, assuming that all cores will have the
316 		 * same implementation.
317 		 */
318 		if (CPU_IMPL_IS_CMP(cp->sbc_cpu_impl)) {
319 			psp = (sbd_cmp_stat_t *)dsp;
320 			sbd_fill_cmp_stat(cstat, ncores, cp->sbc_cpu_impl, psp);
321 		} else {
322 			ASSERT(ncores == 1);
323 			bcopy(cstat, dsp, sizeof (sbd_cpu_stat_t));
324 		}
325 
326 		dsp++;
327 		ncpu++;
328 	}
329 
330 	mutex_exit(&sbp->sb_slock);
331 
332 	sbd_release_sbdp_handle(hdp);
333 
334 	return (ncpu);
335 }
336 
337 int
338 sbd_pre_release_cpu(sbd_handle_t *hp, sbd_devlist_t *devlist, int devnum)
339 {
340 	int		i, rv = 0, unit;
341 	dev_info_t	*dip;
342 	processorid_t	cpuid;
343 	struct cpu	*cpup;
344 	sbd_board_t	*sbp = SBDH2BD(hp->h_sbd);
345 	sbderror_t	*ep = SBD_HD2ERR(hp);
346 	sbd_cpu_unit_t	*cp;
347 	static fn_t	f = "sbd_pre_release_cpu";
348 	sbdp_handle_t	*hdp;
349 
350 	hdp = sbd_get_sbdp_handle(sbp, hp);
351 	/*
352 	 * May have to juggle bootproc in release_component
353 	 */
354 	mutex_enter(&cpu_lock);
355 
356 	for (i = 0; i < devnum; i++, devlist++) {
357 		dip = devlist->dv_dip;
358 
359 		cpuid = sbdp_get_cpuid(hdp, dip);
360 		if (cpuid < 0) {
361 			if (hp->h_flags & SBD_IOCTL_FLAG_FORCE) {
362 				cmn_err(CE_WARN,
363 					"sbd:%s: failed to get cpuid for "
364 					"dip (0x%p)", f, (void *)dip);
365 				continue;
366 			} else {
367 				SBD_GET_PERR(hdp->h_err, SBD_HD2ERR(hp));
368 				break;
369 			}
370 		}
371 
372 
373 		unit = sbdp_get_unit_num(hdp, dip);
374 		if (unit < 0) {
375 			if (hp->h_flags & SBD_IOCTL_FLAG_FORCE) {
376 			cmn_err(CE_WARN,
377 				"sbd:%s: failed to get unit (cpu %d)",
378 				f, cpuid);
379 				continue;
380 			} else {
381 				SBD_GET_PERR(hdp->h_err, SBD_HD2ERR(hp));
382 				break;
383 			}
384 		}
385 
386 		cp = SBD_GET_BOARD_CPUUNIT(sbp, unit);
387 		cp->sbc_cpu_flags = cpu[cpuid]->cpu_flags;
388 
389 		if (cpu_flagged_active(cp->sbc_cpu_flags)) {
390 			int cpu_offline_flags = 0;
391 
392 			if (hp->h_flags & SBD_IOCTL_FLAG_FORCE)
393 				cpu_offline_flags = CPU_FORCED;
394 			PR_CPU("%s: offlining cpuid %d unit %d", f,
395 				cpuid, unit);
396 			if (cpu_offline(cpu[cpuid], cpu_offline_flags)) {
397 				cmn_err(CE_WARN,
398 					"%s: failed to offline cpu %d",
399 					f, cpuid);
400 				rv = -1;
401 				SBD_SET_ERR(ep, ESBD_OFFLINE);
402 				SBD_SET_ERRSTR(ep, sbp->sb_cpupath[i]);
403 				cpup = cpu_get(cpuid);
404 				if (cpup && disp_bound_threads(cpup, 0)) {
405 					cmn_err(CE_WARN, "sbd:%s: thread(s) "
406 						"bound to cpu %d",
407 						f, cpup->cpu_id);
408 				}
409 				break;
410 			}
411 		}
412 
413 		if (rv == 0) {
414 			if (sbdp_release_component(hdp, dip)) {
415 				SBD_GET_PERR(hdp->h_err, ep);
416 				break;
417 			}
418 		}
419 
420 		if (rv)
421 			break;
422 	}
423 
424 	mutex_exit(&cpu_lock);
425 
426 	if (rv) {
427 		/*
428 		 * Need to unwind others since at this level (pre-release)
429 		 * the device state has not yet transitioned and failures
430 		 * will prevent us from reaching the "post" release
431 		 * function where states are normally transitioned.
432 		 */
433 		for (; i >= 0; i--, devlist--) {
434 			dip = devlist->dv_dip;
435 			unit = sbdp_get_unit_num(hdp, dip);
436 			if (unit < 0) {
437 				cmn_err(CE_WARN,
438 					"sbd:%s: failed to get unit for "
439 					"dip (0x%p)", f, (void *)dip);
440 				break;
441 			}
442 			(void) sbd_cancel_cpu(hp, unit);
443 		}
444 	}
445 
446 	SBD_INJECT_ERR(SBD_OFFLINE_CPU_PSEUDO_ERR,
447 		hp->h_err, EIO,
448 		ESBD_OFFLINE,
449 		sbp->sb_cpupath[devnum - 1]);
450 
451 	sbd_release_sbdp_handle(hdp);
452 
453 	return (rv);
454 }
455 
456 int
457 sbd_pre_attach_cpu(sbd_handle_t *hp, sbd_devlist_t *devlist, int devnum)
458 {
459 	int		i;
460 	int		unit;
461 	processorid_t	cpuid;
462 	sbd_board_t	*sbp = SBDH2BD(hp->h_sbd);
463 	sbd_istate_t	dstate;
464 	dev_info_t	*dip;
465 	static fn_t	f = "sbd_pre_attach_cpu";
466 	sbdp_handle_t	*hdp;
467 
468 	PR_CPU("%s...\n", f);
469 
470 	hdp = sbd_get_sbdp_handle(sbp, hp);
471 
472 	for (i = 0; i < devnum; i++, devlist++) {
473 		dip = devlist->dv_dip;
474 
475 		ASSERT(sbd_is_cmp_child(dip) || e_ddi_branch_held(dip));
476 
477 		cpuid = sbdp_get_cpuid(hdp, dip);
478 		if (cpuid < 0) {
479 			if (hp->h_flags & SBD_IOCTL_FLAG_FORCE) {
480 				cmn_err(CE_WARN,
481 					"sbd:%s: failed to get cpuid for "
482 					"dip (0x%p)", f, (void *)dip);
483 				continue;
484 			} else {
485 				SBD_GET_PERR(hdp->h_err, SBD_HD2ERR(hp));
486 				break;
487 			}
488 		}
489 
490 		unit = sbdp_get_unit_num(hdp, dip);
491 		if (unit < 0) {
492 			if (hp->h_flags & SBD_IOCTL_FLAG_FORCE) {
493 			cmn_err(CE_WARN,
494 				"sbd:%s: failed to get unit (cpu %d)",
495 				f, cpuid);
496 				continue;
497 			} else {
498 				SBD_GET_PERR(hdp->h_err, SBD_HD2ERR(hp));
499 				break;
500 			}
501 		}
502 
503 		PR_CPU("%s: attach cpu-unit (%d.%d)\n",
504 			f, sbp->sb_num, unit);
505 
506 		dstate = SBD_DEVICE_STATE(sbp, SBD_COMP_CPU, unit);
507 
508 		if (dstate == SBD_STATE_UNCONFIGURED) {
509 			/*
510 			 * If we're coming from the UNCONFIGURED
511 			 * state then the cpu's sigblock will
512 			 * still be mapped in.  Need to unmap it
513 			 * before continuing with attachment.
514 			 */
515 			PR_CPU("%s: unmapping sigblk for cpu %d\n",
516 				f, cpuid);
517 
518 			/* platform specific release of sigblk */
519 			CPU_SGN_MAPOUT(cpuid);
520 		}
521 
522 	}
523 
524 	mutex_enter(&cpu_lock);
525 
526 	sbd_release_sbdp_handle(hdp);
527 
528 	return (0);
529 }
530 
531 int
532 sbd_post_attach_cpu(sbd_handle_t *hp, sbd_devlist_t *devlist, int devnum)
533 {
534 	int		i;
535 	sbderror_t	*ep = SBD_HD2ERR(hp);
536 	sbd_board_t	*sbp = SBDH2BD(hp->h_sbd);
537 	processorid_t	cpuid;
538 	struct cpu	*cp;
539 	dev_info_t	*dip;
540 	int		err = ESBD_NOERROR;
541 	sbdp_handle_t	*hdp;
542 	static fn_t	f = "sbd_post_attach_cpu";
543 	sbd_cpu_unit_t	*cpup;
544 	int		unit;
545 
546 	hdp = sbd_get_sbdp_handle(sbp, hp);
547 
548 	/* Startup and online newly-attached CPUs */
549 	for (i = 0; i < devnum; i++, devlist++) {
550 		dip = devlist->dv_dip;
551 		cpuid = sbdp_get_cpuid(hdp, dip);
552 		if (cpuid < 0) {
553 			if (hp->h_flags & SBD_IOCTL_FLAG_FORCE) {
554 				cmn_err(CE_WARN,
555 				    "sbd:%s: failed to get cpuid for "
556 				    "dip (0x%p)", f, (void *)dip);
557 				continue;
558 			} else {
559 				SBD_GET_PERR(hdp->h_err, ep);
560 				break;
561 			}
562 		}
563 
564 		cp = cpu_get(cpuid);
565 
566 		if (cp == NULL) {
567 			if (hp->h_flags & SBD_IOCTL_FLAG_FORCE) {
568 				cmn_err(CE_WARN,
569 				    "sbd:%s: cpu_get failed for cpu %d",
570 				    f, cpuid);
571 				continue;
572 			} else {
573 				SBD_SET_ERR(ep, ESBD_INTERNAL);
574 				SBD_SET_ERRSTR(ep, sbp->sb_cpupath[i]);
575 				break;
576 			}
577 		}
578 
579 		if (cpu_is_poweredoff(cp)) {
580 			if (cpu_poweron(cp) != 0) {
581 				SBD_SET_ERR(ep, ESBD_CPUSTART);
582 				SBD_SET_ERRSTR(ep, sbp->sb_cpupath[i]);
583 				cmn_err(CE_WARN,
584 				    "%s: failed to power-on cpu %d",
585 				    f, cpuid);
586 				break;
587 			}
588 			SBD_INJECT_ERR(SBD_POWERON_CPU_PSEUDO_ERR,
589 			    ep, EIO,
590 			    ESBD_CPUSTOP,
591 			    sbp->sb_cpupath[i]);
592 			PR_CPU("%s: cpu %d powered ON\n", f, cpuid);
593 		}
594 
595 		if (cpu_is_offline(cp)) {
596 			PR_CPU("%s: onlining cpu %d...\n", f, cpuid);
597 
598 			if (cpu_online(cp) != 0) {
599 				SBD_SET_ERR(ep, ESBD_ONLINE);
600 				SBD_SET_ERRSTR(ep, sbp->sb_cpupath[i]);
601 				cmn_err(CE_WARN,
602 				    "%s: failed to online cpu %d",
603 				    f, cp->cpu_id);
604 			}
605 			SBD_INJECT_ERR(SBD_ONLINE_CPU_PSEUDO_ERR,
606 			    ep, EIO,
607 			    ESBD_ONLINE,
608 			    sbp->sb_cpupath[i]);
609 		}
610 
611 		/*
612 		 * if there is no error mark the cpu as OK to use
613 		 */
614 		if (SBD_GET_ERR(ep) == 0) {
615 			unit = sbdp_get_unit_num(hdp, dip);
616 			if (unit < 0) {
617 				if (hp->h_flags & SBD_IOCTL_FLAG_FORCE) {
618 					cmn_err(CE_WARN,
619 					    "sbd:%s: failed to get unit "
620 					    "(cpu %d)", f, cpuid);
621 					continue;
622 				} else {
623 					SBD_GET_PERR(hdp->h_err,
624 					    SBD_HD2ERR(hp));
625 					break;
626 				}
627 			}
628 			cpup = SBD_GET_BOARD_CPUUNIT(sbp, unit);
629 			cpup->sbc_cm.sbdev_cond = SBD_COND_OK;
630 		}
631 	}
632 
633 	mutex_exit(&cpu_lock);
634 
635 	sbd_release_sbdp_handle(hdp);
636 
637 	if (err != ESBD_NOERROR) {
638 		return (-1);
639 	} else {
640 		return (0);
641 	}
642 }
643 
644 int
645 sbd_pre_detach_cpu(sbd_handle_t *hp, sbd_devlist_t *devlist, int devnum)
646 {
647 	int		i;
648 	int		unit;
649 	processorid_t	cpuid;
650 	dev_info_t	*dip;
651 	struct cpu	*cpu;
652 	sbd_board_t	*sbp = SBDH2BD(hp->h_sbd);
653 	sbderror_t	*ep = SBD_HD2ERR(hp);
654 	static fn_t	f = "sbd_pre_detach_cpu";
655 	sbdp_handle_t	*hdp;
656 	int		rv = 0;
657 
658 	PR_CPU("%s...\n", f);
659 
660 	hdp = sbd_get_sbdp_handle(sbp, hp);
661 
662 	mutex_enter(&cpu_lock);
663 
664 	for (i = 0; i < devnum; i++, devlist++) {
665 		dip = devlist->dv_dip;
666 		cpuid = sbdp_get_cpuid(hdp, dip);
667 		if (cpuid < 0) {
668 			if (hp->h_flags & SBD_IOCTL_FLAG_FORCE) {
669 				cmn_err(CE_WARN,
670 				    "sbd:%s: failed to get cpuid for "
671 				    "dip (0x%p)", f, (void *)dip);
672 				continue;
673 			} else {
674 				SBD_GET_PERR(hdp->h_err, SBD_HD2ERR(hp));
675 				break;
676 			}
677 		}
678 
679 		cpu = cpu_get(cpuid);
680 
681 		if (cpu == NULL) {
682 			if (hp->h_flags & SBD_IOCTL_FLAG_FORCE) {
683 				cmn_err(CE_WARN,
684 				    "sbd:%s: failed to get cpu %d",
685 				    f, cpuid);
686 				continue;
687 			} else {
688 				SBD_SET_ERR(ep, ESBD_INTERNAL);
689 				SBD_SET_ERRSTR(ep, sbp->sb_cpupath[i]);
690 				break;
691 			}
692 		}
693 
694 		unit = sbdp_get_unit_num(hdp, dip);
695 		if (unit < 0) {
696 			if (hp->h_flags & SBD_IOCTL_FLAG_FORCE) {
697 				cmn_err(CE_WARN,
698 				    "sbd:%s: failed to get unit (cpu %d)",
699 				    f, cpuid);
700 				continue;
701 			} else {
702 				SBD_GET_PERR(hdp->h_err, SBD_HD2ERR(hp));
703 				break;
704 			}
705 		}
706 
707 		PR_CPU("%s: OS detach cpu-unit (%d.%d)\n",
708 		    f, sbp->sb_num, unit);
709 
710 		/*
711 		 * CPUs were offlined during Release.
712 		 */
713 		if (cpu_is_poweredoff(cpu)) {
714 			PR_CPU("%s: cpu %d already powered OFF\n", f, cpuid);
715 			continue;
716 		}
717 
718 		if (cpu_is_offline(cpu)) {
719 			int	e;
720 
721 			if (e = cpu_poweroff(cpu)) {
722 				cmn_err(CE_WARN,
723 				    "%s: failed to power-off cpu %d "
724 				    "(errno %d)",
725 				    f, cpu->cpu_id, e);
726 				SBD_SET_ERR(ep, ESBD_CPUSTOP);
727 				SBD_SET_ERRSTR(ep, sbp->sb_cpupath[i]);
728 
729 				rv = -1;
730 				break;
731 			} else {
732 				PR_CPU("%s: cpu %d powered OFF\n",
733 					f, cpuid);
734 			}
735 		} else {
736 			cmn_err(CE_WARN, "%s: cpu %d still active",
737 				f, cpu->cpu_id);
738 			SBD_SET_ERR(ep, ESBD_BUSY);
739 			SBD_SET_ERRSTR(ep, sbp->sb_cpupath[i]);
740 			rv = -1;
741 			break;
742 		}
743 	}
744 
745 	sbd_release_sbdp_handle(hdp);
746 
747 	return (rv);
748 }
749 
750 int
751 sbd_post_detach_cpu(sbd_handle_t *hp, sbd_devlist_t *devlist, int devnum)
752 {
753 	static fn_t	f = "sbd_post_detach_cpu";
754 	int		i;
755 	sbderror_t	*ep = SBD_HD2ERR(hp);
756 	sbd_board_t	*sbp = SBDH2BD(hp->h_sbd);
757 	processorid_t	cpuid;
758 	dev_info_t	*dip;
759 	sbdp_handle_t	*hdp;
760 	sbd_cpu_unit_t	*cpup;
761 	int		unit;
762 
763 	PR_CPU("%s...\n", f);
764 
765 	/*
766 	 * We should be holding the cpu_lock at this point,
767 	 * and should have blocked device tree changes.
768 	 */
769 	ASSERT(MUTEX_HELD(&cpu_lock));
770 
771 	for (i = 0; i < devnum; i++, devlist++) {
772 		dip = devlist->dv_dip;
773 		hdp = sbd_get_sbdp_handle(sbp, hp);
774 		cpuid = sbdp_get_cpuid(hdp, dip);
775 		if (cpuid < 0) {
776 			if (hp->h_flags & SBD_IOCTL_FLAG_FORCE) {
777 				cmn_err(CE_WARN,
778 					"sbd:%s: failed to get cpuid for "
779 					"dip (0x%p)", f, (void *)dip);
780 				continue;
781 			} else {
782 				SBD_GET_PERR(hdp->h_err, ep);
783 				break;
784 			}
785 		}
786 		/*
787 		 * if there is no error mark the cpu as unusable
788 		 */
789 		if (SBD_GET_ERR(ep) == 0) {
790 			unit = sbdp_get_unit_num(hdp, dip);
791 			if (unit < 0) {
792 				if (hp->h_flags & SBD_IOCTL_FLAG_FORCE) {
793 					cmn_err(CE_WARN,
794 					    "sbd:%s: failed to get unit "
795 					    "(cpu %d)", f, cpuid);
796 					continue;
797 				} else {
798 					SBD_GET_PERR(hdp->h_err,
799 					    SBD_HD2ERR(hp));
800 					break;
801 				}
802 			}
803 			cpup = SBD_GET_BOARD_CPUUNIT(sbp, unit);
804 			cpup->sbc_cm.sbdev_cond = SBD_COND_UNUSABLE;
805 		}
806 		sbd_release_sbdp_handle(hdp);
807 	}
808 
809 	mutex_exit(&cpu_lock);
810 
811 
812 	return (0);
813 }
814 
815 /*
816  * Cancel previous release operation for cpu.  For cpus this means simply
817  * bringing cpus that were offline back online.  Note that they had to have been
818  * online at the time they were released.  If attempting to power on or online
819  * a CPU fails, SBD_CPUERR_FATAL is returned to indicate that the CPU appears to
820  * be unsalvageable.  If a CPU reaches an online or nointr state but can't be
821  * taken to a "lesser" state, SBD_CPUERR_RECOVERABLE is returned to indicate
822  * that it was not returned to its original state but appears to be functional.
823  * Note that the latter case can occur due to unexpected but non-erroneous CPU
824  * manipulation (e.g. by the "psradm" command) during the DR operation.
825  */
826 int
827 sbd_cancel_cpu(sbd_handle_t *hp, int unit)
828 {
829 	int		rv = SBD_CPUERR_NONE;
830 	sbd_board_t	*sbp = SBDH2BD(hp->h_sbd);
831 	sbderror_t	*ep = SBD_HD2ERR(hp);
832 	sbd_cpu_unit_t	*cp;
833 	static fn_t	f = "sbd_cancel_cpu";
834 	struct cpu	*cpup;
835 	int		cpu_offline_flags = 0;
836 
837 	PR_ALL("%s...\n", f);
838 
839 	cp = SBD_GET_BOARD_CPUUNIT(sbp, unit);
840 
841 	/*
842 	 * If CPU should remain off, nothing needs to be done.
843 	 */
844 	if (cpu_flagged_poweredoff(cp->sbc_cpu_flags))
845 		return (rv);
846 
847 	if (hp->h_flags & SBD_IOCTL_FLAG_FORCE)
848 		cpu_offline_flags = CPU_FORCED;
849 
850 	/*
851 	 * CPU had been either offline, online, or set to no-intr.  We
852 	 * will return a component to its original state that it was
853 	 * prior to the failed DR operation.  There is a possible race
854 	 * condition between the calls to this function and re-obtaining
855 	 * the cpu_lock where a cpu state could change.  Because of this
856 	 * we can't externally document that we are trying to roll cpus
857 	 * back to their original state, but we believe a best effort
858 	 * should be made.
859 	 */
860 
861 	mutex_enter(&cpu_lock);
862 	cpup = cpu[cp->sbc_cpu_id];
863 
864 	/*
865 	 * The following will compare the cpu's current state with a
866 	 * snapshot of its state taken before the failed DR operation
867 	 * had started.
868 	 */
869 	/* POWEROFF */
870 	if (cpu_is_poweredoff(cpup)) {
871 		if (cpu_poweron(cpup)) {
872 			cmn_err(CE_WARN,
873 			    "sbd:%s: failed to power-on cpu %d",
874 			    f, cp->sbc_cpu_id);
875 			SBD_SET_ERR(ep, ESBD_CPUSTART);
876 			SBD_SET_ERRSTR(ep, sbp->sb_cpupath[unit]);
877 			rv = SBD_CPUERR_FATAL;
878 			goto out;
879 		}
880 		SBD_INJECT_ERR(SBD_POWERON_CPU_PSEUDO_ERR,
881 		    hp->h_err, EIO,
882 		    ESBD_CPUSTART,
883 		    sbp->sb_cpupath[unit]);
884 	}
885 
886 	/* OFFLINE */
887 	if (cpu_is_offline(cpup)) {
888 		if (cpu_flagged_offline(cp->sbc_cpu_flags)) {
889 			PR_CPU("%s: leaving cpu %d OFFLINE\n",
890 			    f, cp->sbc_cpu_id);
891 		} else if (cpu_online(cpup)) {
892 			cmn_err(CE_WARN,
893 			    "sbd:%s: failed to online cpu %d",
894 			    f, cp->sbc_cpu_id);
895 			SBD_SET_ERR(ep, ESBD_ONLINE);
896 			SBD_SET_ERRSTR(ep, sbp->sb_cpupath[unit]);
897 			rv = SBD_CPUERR_FATAL;
898 			goto out;
899 		} else {
900 			SBD_INJECT_ERR(SBD_ONLINE_CPU_PSEUDO_ERR,
901 			    hp->h_err, EIO,
902 			    ESBD_ONLINE,
903 			    sbp->sb_cpupath[unit]);
904 		}
905 	}
906 
907 	/* ONLINE */
908 	if (cpu_is_online(cpup)) {
909 		if (cpu_flagged_online(cp->sbc_cpu_flags)) {
910 			PR_CPU("%s: setting cpu %d ONLINE\n",
911 			    f, cp->sbc_cpu_id);
912 		} else if (cpu_flagged_offline(cp->sbc_cpu_flags)) {
913 			if (cpu_offline(cpup, cpu_offline_flags)) {
914 				cmn_err(CE_WARN,
915 				    "sbd:%s: failed to offline"
916 				    " cpu %d", f, cp->sbc_cpu_id);
917 				rv = SBD_CPUERR_RECOVERABLE;
918 				goto out;
919 			}
920 		} else if (cpu_flagged_nointr(cp->sbc_cpu_flags)) {
921 			if (cpu_intr_disable(cpup)) {
922 				cmn_err(CE_WARN, "%s: failed to "
923 				    "disable interrupts on cpu %d",
924 				    f, cp->sbc_cpu_id);
925 				rv = SBD_CPUERR_RECOVERABLE;
926 			} else {
927 				PR_CPU("%s: setting cpu %d to NOINTR"
928 				    " (was online)\n",
929 				    f, cp->sbc_cpu_id);
930 			}
931 			goto out;
932 		}
933 	}
934 
935 	/* NOINTR */
936 	if (cpu_is_nointr(cpup)) {
937 		if (cpu_flagged_online(cp->sbc_cpu_flags)) {
938 			cpu_intr_enable(cpup);
939 			PR_CPU("%s: setting cpu %d ONLINE"
940 			    "(was nointr)\n",
941 			    f, cp->sbc_cpu_id);
942 		}
943 		if (cpu_flagged_offline(cp->sbc_cpu_flags)) {
944 			if (cpu_offline(cpup, cpu_offline_flags)) {
945 				cmn_err(CE_WARN,
946 				    "sbd:%s: failed to offline"
947 				    " cpu %d", f, cp->sbc_cpu_id);
948 				rv = SBD_CPUERR_RECOVERABLE;
949 			}
950 		}
951 	}
952 out:
953 	mutex_exit(&cpu_lock);
954 
955 	return (rv);
956 }
957 
958 int
959 sbd_connect_cpu(sbd_board_t *sbp, int unit)
960 {
961 	int		rv;
962 	processorid_t	cpuid;
963 	struct cpu	*cpu;
964 	dev_info_t	*dip;
965 	sbdp_handle_t	*hdp;
966 	extern kmutex_t	cpu_lock;
967 	static fn_t	f = "sbd_connect_cpu";
968 	sbd_handle_t	*hp = MACHBD2HD(sbp);
969 
970 	/*
971 	 * get dip for cpu just located in tree walk
972 	 */
973 	if (SBD_DEV_IS_PRESENT(sbp, SBD_COMP_CPU, unit)) {
974 		dip = sbp->sb_devlist[NIX(SBD_COMP_CPU)][unit];
975 		if (dip == NULL) {
976 			cmn_err(CE_WARN,
977 			"sbd:%s: bad dip for cpu unit %d board %d",
978 			f, unit, sbp->sb_num);
979 			return (-1);
980 		}
981 		PR_CPU("%s...\n", f);
982 	} else {
983 		return (0);
984 	}
985 
986 	/*
987 	 * if sbd has attached this cpu, no need to bring
988 	 * it out of reset
989 	 */
990 	if (SBD_DEV_IS_ATTACHED(sbp, SBD_COMP_CPU, unit)) {
991 		return (0);
992 	}
993 
994 	hdp = sbd_get_sbdp_handle(sbp, hp);
995 
996 	cpuid = sbdp_get_cpuid(hdp, dip);
997 	if (cpuid == -1) {
998 		sbd_release_sbdp_handle(hdp);
999 		return (-1);
1000 	}
1001 
1002 	/*
1003 	 * if the cpu is already under Solaris control,
1004 	 * do not wake it up
1005 	 */
1006 	mutex_enter(&cpu_lock);
1007 	cpu = cpu_get(cpuid);
1008 	mutex_exit(&cpu_lock);
1009 	if (cpu != NULL) {
1010 		sbd_release_sbdp_handle(hdp);
1011 		return (0);
1012 	}
1013 
1014 	rv = sbdp_connect_cpu(hdp, dip, cpuid);
1015 
1016 	if (rv != 0) {
1017 		sbp->sb_memaccess_ok = 0;
1018 		cmn_err(CE_WARN,
1019 			"sbd:%s: failed to wake up cpu unit %d board %d",
1020 			f, unit, sbp->sb_num);
1021 		sbd_release_sbdp_handle(hdp);
1022 		return (rv);
1023 	}
1024 	sbd_release_sbdp_handle(hdp);
1025 
1026 	return (rv);
1027 }
1028 
1029 int
1030 sbd_disconnect_cpu(sbd_handle_t *hp, int unit)
1031 {
1032 	sbd_board_t	*sbp = SBDH2BD(hp->h_sbd);
1033 	int		rv;
1034 	dev_info_t	*dip;
1035 	sbdp_handle_t	*hdp;
1036 	sbd_cpu_unit_t *cp;
1037 	processorid_t   cpuid;
1038 	static fn_t	f = "sbd_disconnect_cpu";
1039 
1040 	PR_CPU("%s...\n", f);
1041 
1042 	ASSERT((SBD_DEVICE_STATE(sbp, SBD_COMP_CPU, unit) ==
1043 						SBD_STATE_CONNECTED) ||
1044 		(SBD_DEVICE_STATE(sbp, SBD_COMP_CPU, unit) ==
1045 						SBD_STATE_UNCONFIGURED));
1046 
1047 	cp = SBD_GET_BOARD_CPUUNIT(sbp, unit);
1048 
1049 	cpuid = cp->sbc_cpu_id;
1050 
1051 	dip = sbp->sb_devlist[NIX(SBD_COMP_CPU)][unit];
1052 
1053 	hdp = sbd_get_sbdp_handle(sbp, hp);
1054 
1055 	rv = sbdp_disconnect_cpu(hdp, dip, cpuid);
1056 
1057 	if (rv != 0) {
1058 		SBD_GET_PERR(hdp->h_err, SBD_HD2ERR(hp));
1059 	}
1060 	sbd_release_sbdp_handle(hdp);
1061 
1062 	return (rv);
1063 }
1064