xref: /illumos-gate/usr/src/uts/sun4u/io/sbd_cpu.c (revision 9b9d39d2a32ff806d2431dbcc50968ef1e6d46b2)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 /*
27  * Copyright 2019 Peter Tribble.
28  * Copyright 2019 Joyent, Inc.
29  */
30 
31 /*
32  * CPU support routines for DR
33  */
34 
35 #include <sys/debug.h>
36 #include <sys/types.h>
37 #include <sys/errno.h>
38 #include <sys/cred.h>
39 #include <sys/dditypes.h>
40 #include <sys/devops.h>
41 #include <sys/modctl.h>
42 #include <sys/poll.h>
43 #include <sys/conf.h>
44 #include <sys/ddi.h>
45 #include <sys/sunddi.h>
46 #include <sys/sunndi.h>
47 #include <sys/ddi_impldefs.h>
48 #include <sys/ndi_impldefs.h>
49 #include <sys/stat.h>
50 #include <sys/kmem.h>
51 #include <sys/processor.h>
52 #include <sys/cpuvar.h>
53 #include <sys/mem_config.h>
54 #include <sys/promif.h>
55 #include <sys/x_call.h>
56 #include <sys/cpu_sgnblk_defs.h>
57 #include <sys/membar.h>
58 #include <sys/stack.h>
59 #include <sys/sysmacros.h>
60 #include <sys/machsystm.h>
61 #include <sys/spitregs.h>
62 
63 #include <sys/archsystm.h>
64 #include <vm/hat_sfmmu.h>
65 #include <sys/pte.h>
66 #include <sys/mmu.h>
67 #include <sys/x_call.h>
68 #include <sys/cpu_module.h>
69 #include <sys/cheetahregs.h>
70 
71 #include <sys/autoconf.h>
72 #include <sys/cmn_err.h>
73 
74 #include <sys/sbdpriv.h>
75 
76 void
77 sbd_cpu_set_prop(sbd_cpu_unit_t *cp, dev_info_t *dip)
78 {
79 	uint32_t	clock_freq;
80 	int		ecache_size = 0;
81 	char		*cache_str = NULL;
82 
83 	/* read in the CPU speed */
84 	clock_freq = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
85 	    DDI_PROP_DONTPASS, "clock-frequency", 0);
86 
87 	ASSERT(clock_freq != 0);
88 
89 	/*
90 	 * The ecache property string is not the same
91 	 * for all CPU implementations.
92 	 */
93 	switch (cp->sbc_cpu_impl) {
94 	case CHEETAH_IMPL:
95 	case CHEETAH_PLUS_IMPL:
96 		cache_str = "ecache-size";
97 		break;
98 	case JAGUAR_IMPL:
99 		cache_str = "l2-cache-size";
100 		break;
101 	case PANTHER_IMPL:
102 		cache_str = "l3-cache-size";
103 		break;
104 	default:
105 		cmn_err(CE_WARN, "cpu implementation type "
106 		    "is an unknown %d value", cp->sbc_cpu_impl);
107 		ASSERT(0);
108 		break;
109 	}
110 
111 	if (cache_str != NULL) {
112 		/* read in the ecache size */
113 		ecache_size = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
114 		    DDI_PROP_DONTPASS, cache_str, 0);
115 	}
116 
117 	/*
118 	 * In the case the size is still 0,
119 	 * a zero value will be displayed running non-debug.
120 	 */
121 	ASSERT(ecache_size != 0);
122 
123 	/* convert to the proper units */
124 	cp->sbc_speed = (clock_freq + 500000) / 1000000;
125 	cp->sbc_ecache = ecache_size / (1024 * 1024);
126 }
127 
128 static void
129 sbd_fill_cpu_stat(sbd_cpu_unit_t *cp, dev_info_t *dip, sbd_cpu_stat_t *csp)
130 {
131 	int		namelen;
132 
133 	bzero((caddr_t)csp, sizeof (*csp));
134 	csp->cs_type = cp->sbc_cm.sbdev_type;
135 	csp->cs_unit = cp->sbc_cm.sbdev_unum;
136 	namelen = sizeof (csp->cs_name);
137 	(void) ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
138 	    OBP_DEVICETYPE, (caddr_t)csp->cs_name, &namelen);
139 	csp->cs_busy = cp->sbc_cm.sbdev_busy;
140 	csp->cs_time = cp->sbc_cm.sbdev_time;
141 	csp->cs_ostate = cp->sbc_cm.sbdev_ostate;
142 	csp->cs_cpuid = cp->sbc_cpu_id;
143 	csp->cs_suspend = 0;
144 
145 	/*
146 	 * If we have marked the cpu's condition previously
147 	 * then don't rewrite it
148 	 */
149 	if (csp->cs_cond != SBD_COND_UNUSABLE)
150 		csp->cs_cond = sbd_get_comp_cond(dip);
151 
152 	/*
153 	 * If the speed and ecache properties have not been
154 	 * cached yet, read them in from the device tree.
155 	 */
156 	if ((cp->sbc_speed == 0) || (cp->sbc_ecache == 0))
157 		sbd_cpu_set_prop(cp, dip);
158 
159 	/* use the cached speed and ecache values */
160 	csp->cs_speed = cp->sbc_speed;
161 	csp->cs_ecache = cp->sbc_ecache;
162 }
163 
164 static void
165 sbd_fill_cmp_stat(sbd_cpu_stat_t *csp, int ncores, int impl,
166     sbd_cmp_stat_t *psp)
167 {
168 	int	core;
169 
170 	ASSERT(csp && psp && (ncores >= 1));
171 
172 	bzero((caddr_t)psp, sizeof (*psp));
173 
174 	/*
175 	 * Fill in the common status information based
176 	 * on the data for the first core.
177 	 */
178 	psp->ps_type = SBD_COMP_CMP;
179 	psp->ps_unit = SBD_CMP_NUM(csp->cs_unit);
180 	(void) strncpy(psp->ps_name, csp->cs_name, sizeof (psp->ps_name));
181 	psp->ps_cond = csp->cs_cond;
182 	psp->ps_busy = csp->cs_busy;
183 	psp->ps_time = csp->cs_time;
184 	psp->ps_ostate = csp->cs_ostate;
185 	psp->ps_suspend = csp->cs_suspend;
186 
187 	/* CMP specific status data */
188 	*psp->ps_cpuid = csp->cs_cpuid;
189 	psp->ps_ncores = 1;
190 	psp->ps_speed = csp->cs_speed;
191 	psp->ps_ecache = csp->cs_ecache;
192 
193 	/*
194 	 * Walk through the data for the remaining cores.
195 	 * Make any adjustments to the common status data,
196 	 * or the shared CMP specific data if necessary.
197 	 */
198 	for (core = 1; core < ncores; core++) {
199 
200 		/*
201 		 * The following properties should be the same
202 		 * for all the cores of the CMP.
203 		 */
204 		ASSERT(psp->ps_unit == SBD_CMP_NUM(csp[core].cs_unit));
205 		ASSERT(psp->ps_speed == csp[core].cs_speed);
206 
207 		psp->ps_cpuid[core] = csp[core].cs_cpuid;
208 		psp->ps_ncores++;
209 
210 		/*
211 		 * Jaguar has a split ecache, so the ecache
212 		 * for each core must be added together to
213 		 * get the total ecache for the whole chip.
214 		 */
215 		if (IS_JAGUAR(impl)) {
216 			psp->ps_ecache += csp[core].cs_ecache;
217 		}
218 
219 		/* adjust time if necessary */
220 		if (csp[core].cs_time > psp->ps_time) {
221 			psp->ps_time = csp[core].cs_time;
222 		}
223 
224 		psp->ps_busy |= csp[core].cs_busy;
225 
226 		/*
227 		 * If any of the cores are configured, the
228 		 * entire CMP is marked as configured.
229 		 */
230 		if (csp[core].cs_ostate == SBD_STAT_CONFIGURED) {
231 			psp->ps_ostate = csp[core].cs_ostate;
232 		}
233 	}
234 }
235 
236 int
237 sbd_cpu_flags(sbd_handle_t *hp, sbd_devset_t devset, sbd_dev_stat_t *dsp)
238 {
239 	int		cmp;
240 	int		ncpu;
241 	sbd_board_t	*sbp;
242 	sbdp_handle_t	*hdp;
243 	sbd_cpu_stat_t	cstat[MAX_CORES_PER_CMP];
244 
245 	sbp = SBDH2BD(hp->h_sbd);
246 	hdp = sbd_get_sbdp_handle(sbp, hp);
247 
248 	/*
249 	 * Grab the status lock before accessing the dip as we allow
250 	 * concurrent status and branch unconfigure and disconnect.
251 	 *
252 	 * The disconnect thread clears the present devset first
253 	 * and then destroys dips. It is possible that the status
254 	 * thread checks the present devset before they are cleared
255 	 * but accesses the dip after they are destroyed causing a
256 	 * panic. To prevent this, the status thread should check
257 	 * the present devset and access dips with status lock held.
258 	 * Similarly disconnect thread should clear the present devset
259 	 * and destroy dips with status lock held.
260 	 */
261 	mutex_enter(&sbp->sb_slock);
262 
263 	/*
264 	 * Only look for requested devices that are actually present.
265 	 */
266 	devset &= SBD_DEVS_PRESENT(sbp);
267 
268 	/*
269 	 * Treat every CPU as a CMP.  In the case where the
270 	 * device is not a CMP, treat it as a CMP with only
271 	 * one core.
272 	 */
273 	for (cmp = ncpu = 0; cmp < MAX_CMP_UNITS_PER_BOARD; cmp++) {
274 
275 		int		ncores;
276 		int		core;
277 		dev_info_t	*dip;
278 		sbd_cpu_unit_t	*cp;
279 		sbd_cmp_stat_t	*psp;
280 
281 		if (DEVSET_IN_SET(devset, SBD_COMP_CMP, cmp) == 0)
282 			continue;
283 
284 		ncores = 0;
285 
286 		for (core = 0; core < MAX_CORES_PER_CMP; core++) {
287 			int	unit;
288 
289 			unit = sbdp_portid_to_cpu_unit(cmp, core);
290 
291 			/*
292 			 * Check to make sure the cpu is in a state
293 			 * where its fully initialized.
294 			 */
295 			if (SBD_DEVICE_STATE(sbp, SBD_COMP_CPU, unit) ==
296 			    SBD_STATE_EMPTY)
297 				continue;
298 
299 			dip = sbp->sb_devlist[NIX(SBD_COMP_CMP)][unit];
300 			if (dip == NULL)
301 				continue;
302 
303 			cp = SBD_GET_BOARD_CPUUNIT(sbp, unit);
304 
305 			sbd_fill_cpu_stat(cp, dip, &cstat[ncores++]);
306 		}
307 
308 		if (ncores == 0)
309 			continue;
310 
311 		/*
312 		 * Store the data to the outgoing array. If the
313 		 * device is a CMP, combine all the data for the
314 		 * cores into a single stat structure.
315 		 *
316 		 * The check for a CMP device uses the last core
317 		 * found, assuming that all cores will have the
318 		 * same implementation.
319 		 */
320 		if (CPU_IMPL_IS_CMP(cp->sbc_cpu_impl)) {
321 			psp = (sbd_cmp_stat_t *)dsp;
322 			sbd_fill_cmp_stat(cstat, ncores, cp->sbc_cpu_impl, psp);
323 		} else {
324 			ASSERT(ncores == 1);
325 			bcopy(cstat, dsp, sizeof (sbd_cpu_stat_t));
326 		}
327 
328 		dsp++;
329 		ncpu++;
330 	}
331 
332 	mutex_exit(&sbp->sb_slock);
333 
334 	sbd_release_sbdp_handle(hdp);
335 
336 	return (ncpu);
337 }
338 
339 int
340 sbd_pre_release_cpu(sbd_handle_t *hp, sbd_devlist_t *devlist, int devnum)
341 {
342 	int		i, rv = 0, unit;
343 	dev_info_t	*dip;
344 	processorid_t	cpuid;
345 	struct cpu	*cpup;
346 	sbd_board_t	*sbp = SBDH2BD(hp->h_sbd);
347 	sbderror_t	*ep = SBD_HD2ERR(hp);
348 	sbd_cpu_unit_t	*cp;
349 	static fn_t	f = "sbd_pre_release_cpu";
350 	sbdp_handle_t	*hdp;
351 
352 	hdp = sbd_get_sbdp_handle(sbp, hp);
353 	/*
354 	 * May have to juggle bootproc in release_component
355 	 */
356 	mutex_enter(&cpu_lock);
357 
358 	for (i = 0; i < devnum; i++, devlist++) {
359 		dip = devlist->dv_dip;
360 
361 		cpuid = sbdp_get_cpuid(hdp, dip);
362 		if (cpuid < 0) {
363 			if (hp->h_flags & SBD_IOCTL_FLAG_FORCE) {
364 				cmn_err(CE_WARN,
365 					"sbd:%s: failed to get cpuid for "
366 					"dip (0x%p)", f, (void *)dip);
367 				continue;
368 			} else {
369 				SBD_GET_PERR(hdp->h_err, SBD_HD2ERR(hp));
370 				break;
371 			}
372 		}
373 
374 
375 		unit = sbdp_get_unit_num(hdp, dip);
376 		if (unit < 0) {
377 			if (hp->h_flags & SBD_IOCTL_FLAG_FORCE) {
378 			cmn_err(CE_WARN,
379 				"sbd:%s: failed to get unit (cpu %d)",
380 				f, cpuid);
381 				continue;
382 			} else {
383 				SBD_GET_PERR(hdp->h_err, SBD_HD2ERR(hp));
384 				break;
385 			}
386 		}
387 
388 		cp = SBD_GET_BOARD_CPUUNIT(sbp, unit);
389 		cp->sbc_cpu_flags = cpu[cpuid]->cpu_flags;
390 
391 		if (cpu_flagged_active(cp->sbc_cpu_flags)) {
392 			int cpu_offline_flags = 0;
393 
394 			if (hp->h_flags & SBD_IOCTL_FLAG_FORCE)
395 				cpu_offline_flags = CPU_FORCED;
396 			PR_CPU("%s: offlining cpuid %d unit %d", f,
397 				cpuid, unit);
398 			if (cpu_offline(cpu[cpuid], cpu_offline_flags)) {
399 				cmn_err(CE_WARN,
400 					"%s: failed to offline cpu %d",
401 					f, cpuid);
402 				rv = -1;
403 				SBD_SET_ERR(ep, ESBD_OFFLINE);
404 				SBD_SET_ERRSTR(ep, sbp->sb_cpupath[i]);
405 				cpup = cpu_get(cpuid);
406 				if (cpup && disp_bound_threads(cpup, 0)) {
407 					cmn_err(CE_WARN, "sbd:%s: thread(s) "
408 						"bound to cpu %d",
409 						f, cpup->cpu_id);
410 				}
411 				break;
412 			}
413 		}
414 
415 		if (rv == 0) {
416 			if (sbdp_release_component(hdp, dip)) {
417 				SBD_GET_PERR(hdp->h_err, ep);
418 				break;
419 			}
420 		}
421 
422 		if (rv)
423 			break;
424 	}
425 
426 	mutex_exit(&cpu_lock);
427 
428 	if (rv) {
429 		/*
430 		 * Need to unwind others since at this level (pre-release)
431 		 * the device state has not yet transitioned and failures
432 		 * will prevent us from reaching the "post" release
433 		 * function where states are normally transitioned.
434 		 */
435 		for (; i >= 0; i--, devlist--) {
436 			dip = devlist->dv_dip;
437 			unit = sbdp_get_unit_num(hdp, dip);
438 			if (unit < 0) {
439 				cmn_err(CE_WARN,
440 					"sbd:%s: failed to get unit for "
441 					"dip (0x%p)", f, (void *)dip);
442 				break;
443 			}
444 			(void) sbd_cancel_cpu(hp, unit);
445 		}
446 	}
447 
448 	SBD_INJECT_ERR(SBD_OFFLINE_CPU_PSEUDO_ERR,
449 		hp->h_err, EIO,
450 		ESBD_OFFLINE,
451 		sbp->sb_cpupath[devnum - 1]);
452 
453 	sbd_release_sbdp_handle(hdp);
454 
455 	return (rv);
456 }
457 
458 int
459 sbd_pre_attach_cpu(sbd_handle_t *hp, sbd_devlist_t *devlist, int devnum)
460 {
461 	int		i;
462 	int		unit;
463 	processorid_t	cpuid;
464 	sbd_board_t	*sbp = SBDH2BD(hp->h_sbd);
465 	sbd_istate_t	dstate;
466 	dev_info_t	*dip;
467 	static fn_t	f = "sbd_pre_attach_cpu";
468 	sbdp_handle_t	*hdp;
469 
470 	PR_CPU("%s...\n", f);
471 
472 	hdp = sbd_get_sbdp_handle(sbp, hp);
473 
474 	for (i = 0; i < devnum; i++, devlist++) {
475 		dip = devlist->dv_dip;
476 
477 		ASSERT(sbd_is_cmp_child(dip) || e_ddi_branch_held(dip));
478 
479 		cpuid = sbdp_get_cpuid(hdp, dip);
480 		if (cpuid < 0) {
481 			if (hp->h_flags & SBD_IOCTL_FLAG_FORCE) {
482 				cmn_err(CE_WARN,
483 					"sbd:%s: failed to get cpuid for "
484 					"dip (0x%p)", f, (void *)dip);
485 				continue;
486 			} else {
487 				SBD_GET_PERR(hdp->h_err, SBD_HD2ERR(hp));
488 				break;
489 			}
490 		}
491 
492 		unit = sbdp_get_unit_num(hdp, dip);
493 		if (unit < 0) {
494 			if (hp->h_flags & SBD_IOCTL_FLAG_FORCE) {
495 			cmn_err(CE_WARN,
496 				"sbd:%s: failed to get unit (cpu %d)",
497 				f, cpuid);
498 				continue;
499 			} else {
500 				SBD_GET_PERR(hdp->h_err, SBD_HD2ERR(hp));
501 				break;
502 			}
503 		}
504 
505 		PR_CPU("%s: attach cpu-unit (%d.%d)\n",
506 			f, sbp->sb_num, unit);
507 
508 		dstate = SBD_DEVICE_STATE(sbp, SBD_COMP_CPU, unit);
509 
510 		if (dstate == SBD_STATE_UNCONFIGURED) {
511 			/*
512 			 * If we're coming from the UNCONFIGURED
513 			 * state then the cpu's sigblock will
514 			 * still be mapped in.  Need to unmap it
515 			 * before continuing with attachment.
516 			 */
517 			PR_CPU("%s: unmapping sigblk for cpu %d\n",
518 				f, cpuid);
519 		}
520 
521 	}
522 
523 	mutex_enter(&cpu_lock);
524 
525 	sbd_release_sbdp_handle(hdp);
526 
527 	return (0);
528 }
529 
530 int
531 sbd_post_attach_cpu(sbd_handle_t *hp, sbd_devlist_t *devlist, int devnum)
532 {
533 	int		i;
534 	sbderror_t	*ep = SBD_HD2ERR(hp);
535 	sbd_board_t	*sbp = SBDH2BD(hp->h_sbd);
536 	processorid_t	cpuid;
537 	struct cpu	*cp;
538 	dev_info_t	*dip;
539 	int		err = ESBD_NOERROR;
540 	sbdp_handle_t	*hdp;
541 	static fn_t	f = "sbd_post_attach_cpu";
542 	sbd_cpu_unit_t	*cpup;
543 	int		unit;
544 
545 	hdp = sbd_get_sbdp_handle(sbp, hp);
546 
547 	/* Startup and online newly-attached CPUs */
548 	for (i = 0; i < devnum; i++, devlist++) {
549 		dip = devlist->dv_dip;
550 		cpuid = sbdp_get_cpuid(hdp, dip);
551 		if (cpuid < 0) {
552 			if (hp->h_flags & SBD_IOCTL_FLAG_FORCE) {
553 				cmn_err(CE_WARN,
554 				    "sbd:%s: failed to get cpuid for "
555 				    "dip (0x%p)", f, (void *)dip);
556 				continue;
557 			} else {
558 				SBD_GET_PERR(hdp->h_err, ep);
559 				break;
560 			}
561 		}
562 
563 		cp = cpu_get(cpuid);
564 
565 		if (cp == NULL) {
566 			if (hp->h_flags & SBD_IOCTL_FLAG_FORCE) {
567 				cmn_err(CE_WARN,
568 				    "sbd:%s: cpu_get failed for cpu %d",
569 				    f, cpuid);
570 				continue;
571 			} else {
572 				SBD_SET_ERR(ep, ESBD_INTERNAL);
573 				SBD_SET_ERRSTR(ep, sbp->sb_cpupath[i]);
574 				break;
575 			}
576 		}
577 
578 		if (cpu_is_poweredoff(cp)) {
579 			if (cpu_poweron(cp) != 0) {
580 				SBD_SET_ERR(ep, ESBD_CPUSTART);
581 				SBD_SET_ERRSTR(ep, sbp->sb_cpupath[i]);
582 				cmn_err(CE_WARN,
583 				    "%s: failed to power-on cpu %d",
584 				    f, cpuid);
585 				break;
586 			}
587 			SBD_INJECT_ERR(SBD_POWERON_CPU_PSEUDO_ERR,
588 			    ep, EIO,
589 			    ESBD_CPUSTOP,
590 			    sbp->sb_cpupath[i]);
591 			PR_CPU("%s: cpu %d powered ON\n", f, cpuid);
592 		}
593 
594 		if (cpu_is_offline(cp)) {
595 			PR_CPU("%s: onlining cpu %d...\n", f, cpuid);
596 
597 			if (cpu_online(cp, 0) != 0) {
598 				SBD_SET_ERR(ep, ESBD_ONLINE);
599 				SBD_SET_ERRSTR(ep, sbp->sb_cpupath[i]);
600 				cmn_err(CE_WARN,
601 				    "%s: failed to online cpu %d",
602 				    f, cp->cpu_id);
603 			}
604 			SBD_INJECT_ERR(SBD_ONLINE_CPU_PSEUDO_ERR,
605 			    ep, EIO,
606 			    ESBD_ONLINE,
607 			    sbp->sb_cpupath[i]);
608 		}
609 
610 		/*
611 		 * if there is no error mark the cpu as OK to use
612 		 */
613 		if (SBD_GET_ERR(ep) == 0) {
614 			unit = sbdp_get_unit_num(hdp, dip);
615 			if (unit < 0) {
616 				if (hp->h_flags & SBD_IOCTL_FLAG_FORCE) {
617 					cmn_err(CE_WARN,
618 					    "sbd:%s: failed to get unit "
619 					    "(cpu %d)", f, cpuid);
620 					continue;
621 				} else {
622 					SBD_GET_PERR(hdp->h_err,
623 					    SBD_HD2ERR(hp));
624 					break;
625 				}
626 			}
627 			cpup = SBD_GET_BOARD_CPUUNIT(sbp, unit);
628 			cpup->sbc_cm.sbdev_cond = SBD_COND_OK;
629 		}
630 	}
631 
632 	mutex_exit(&cpu_lock);
633 
634 	sbd_release_sbdp_handle(hdp);
635 
636 	if (err != ESBD_NOERROR) {
637 		return (-1);
638 	} else {
639 		return (0);
640 	}
641 }
642 
643 int
644 sbd_pre_detach_cpu(sbd_handle_t *hp, sbd_devlist_t *devlist, int devnum)
645 {
646 	int		i;
647 	int		unit;
648 	processorid_t	cpuid;
649 	dev_info_t	*dip;
650 	struct cpu	*cpu;
651 	sbd_board_t	*sbp = SBDH2BD(hp->h_sbd);
652 	sbderror_t	*ep = SBD_HD2ERR(hp);
653 	static fn_t	f = "sbd_pre_detach_cpu";
654 	sbdp_handle_t	*hdp;
655 	int		rv = 0;
656 
657 	PR_CPU("%s...\n", f);
658 
659 	hdp = sbd_get_sbdp_handle(sbp, hp);
660 
661 	mutex_enter(&cpu_lock);
662 
663 	for (i = 0; i < devnum; i++, devlist++) {
664 		dip = devlist->dv_dip;
665 		cpuid = sbdp_get_cpuid(hdp, dip);
666 		if (cpuid < 0) {
667 			if (hp->h_flags & SBD_IOCTL_FLAG_FORCE) {
668 				cmn_err(CE_WARN,
669 				    "sbd:%s: failed to get cpuid for "
670 				    "dip (0x%p)", f, (void *)dip);
671 				continue;
672 			} else {
673 				SBD_GET_PERR(hdp->h_err, SBD_HD2ERR(hp));
674 				break;
675 			}
676 		}
677 
678 		cpu = cpu_get(cpuid);
679 
680 		if (cpu == NULL) {
681 			if (hp->h_flags & SBD_IOCTL_FLAG_FORCE) {
682 				cmn_err(CE_WARN,
683 				    "sbd:%s: failed to get cpu %d",
684 				    f, cpuid);
685 				continue;
686 			} else {
687 				SBD_SET_ERR(ep, ESBD_INTERNAL);
688 				SBD_SET_ERRSTR(ep, sbp->sb_cpupath[i]);
689 				break;
690 			}
691 		}
692 
693 		unit = sbdp_get_unit_num(hdp, dip);
694 		if (unit < 0) {
695 			if (hp->h_flags & SBD_IOCTL_FLAG_FORCE) {
696 				cmn_err(CE_WARN,
697 				    "sbd:%s: failed to get unit (cpu %d)",
698 				    f, cpuid);
699 				continue;
700 			} else {
701 				SBD_GET_PERR(hdp->h_err, SBD_HD2ERR(hp));
702 				break;
703 			}
704 		}
705 
706 		PR_CPU("%s: OS detach cpu-unit (%d.%d)\n",
707 		    f, sbp->sb_num, unit);
708 
709 		/*
710 		 * CPUs were offlined during Release.
711 		 */
712 		if (cpu_is_poweredoff(cpu)) {
713 			PR_CPU("%s: cpu %d already powered OFF\n", f, cpuid);
714 			continue;
715 		}
716 
717 		if (cpu_is_offline(cpu)) {
718 			int	e;
719 
720 			if (e = cpu_poweroff(cpu)) {
721 				cmn_err(CE_WARN,
722 				    "%s: failed to power-off cpu %d "
723 				    "(errno %d)",
724 				    f, cpu->cpu_id, e);
725 				SBD_SET_ERR(ep, ESBD_CPUSTOP);
726 				SBD_SET_ERRSTR(ep, sbp->sb_cpupath[i]);
727 
728 				rv = -1;
729 				break;
730 			} else {
731 				PR_CPU("%s: cpu %d powered OFF\n",
732 					f, cpuid);
733 			}
734 		} else {
735 			cmn_err(CE_WARN, "%s: cpu %d still active",
736 				f, cpu->cpu_id);
737 			SBD_SET_ERR(ep, ESBD_BUSY);
738 			SBD_SET_ERRSTR(ep, sbp->sb_cpupath[i]);
739 			rv = -1;
740 			break;
741 		}
742 	}
743 
744 	sbd_release_sbdp_handle(hdp);
745 
746 	return (rv);
747 }
748 
749 int
750 sbd_post_detach_cpu(sbd_handle_t *hp, sbd_devlist_t *devlist, int devnum)
751 {
752 	static fn_t	f = "sbd_post_detach_cpu";
753 	int		i;
754 	sbderror_t	*ep = SBD_HD2ERR(hp);
755 	sbd_board_t	*sbp = SBDH2BD(hp->h_sbd);
756 	processorid_t	cpuid;
757 	dev_info_t	*dip;
758 	sbdp_handle_t	*hdp;
759 	sbd_cpu_unit_t	*cpup;
760 	int		unit;
761 
762 	PR_CPU("%s...\n", f);
763 
764 	/*
765 	 * We should be holding the cpu_lock at this point,
766 	 * and should have blocked device tree changes.
767 	 */
768 	ASSERT(MUTEX_HELD(&cpu_lock));
769 
770 	for (i = 0; i < devnum; i++, devlist++) {
771 		dip = devlist->dv_dip;
772 		hdp = sbd_get_sbdp_handle(sbp, hp);
773 		cpuid = sbdp_get_cpuid(hdp, dip);
774 		if (cpuid < 0) {
775 			if (hp->h_flags & SBD_IOCTL_FLAG_FORCE) {
776 				cmn_err(CE_WARN,
777 					"sbd:%s: failed to get cpuid for "
778 					"dip (0x%p)", f, (void *)dip);
779 				continue;
780 			} else {
781 				SBD_GET_PERR(hdp->h_err, ep);
782 				break;
783 			}
784 		}
785 		/*
786 		 * if there is no error mark the cpu as unusable
787 		 */
788 		if (SBD_GET_ERR(ep) == 0) {
789 			unit = sbdp_get_unit_num(hdp, dip);
790 			if (unit < 0) {
791 				if (hp->h_flags & SBD_IOCTL_FLAG_FORCE) {
792 					cmn_err(CE_WARN,
793 					    "sbd:%s: failed to get unit "
794 					    "(cpu %d)", f, cpuid);
795 					continue;
796 				} else {
797 					SBD_GET_PERR(hdp->h_err,
798 					    SBD_HD2ERR(hp));
799 					break;
800 				}
801 			}
802 			cpup = SBD_GET_BOARD_CPUUNIT(sbp, unit);
803 			cpup->sbc_cm.sbdev_cond = SBD_COND_UNUSABLE;
804 		}
805 		sbd_release_sbdp_handle(hdp);
806 	}
807 
808 	mutex_exit(&cpu_lock);
809 
810 
811 	return (0);
812 }
813 
814 /*
815  * Cancel previous release operation for cpu.  For cpus this means simply
816  * bringing cpus that were offline back online.  Note that they had to have been
817  * online at the time they were released.  If attempting to power on or online
818  * a CPU fails, SBD_CPUERR_FATAL is returned to indicate that the CPU appears to
819  * be unsalvageable.  If a CPU reaches an online or nointr state but can't be
820  * taken to a "lesser" state, SBD_CPUERR_RECOVERABLE is returned to indicate
821  * that it was not returned to its original state but appears to be functional.
822  * Note that the latter case can occur due to unexpected but non-erroneous CPU
823  * manipulation (e.g. by the "psradm" command) during the DR operation.
824  */
825 int
826 sbd_cancel_cpu(sbd_handle_t *hp, int unit)
827 {
828 	int		rv = SBD_CPUERR_NONE;
829 	sbd_board_t	*sbp = SBDH2BD(hp->h_sbd);
830 	sbderror_t	*ep = SBD_HD2ERR(hp);
831 	sbd_cpu_unit_t	*cp;
832 	static fn_t	f = "sbd_cancel_cpu";
833 	struct cpu	*cpup;
834 	int		cpu_offline_flags = 0;
835 
836 	PR_ALL("%s...\n", f);
837 
838 	cp = SBD_GET_BOARD_CPUUNIT(sbp, unit);
839 
840 	/*
841 	 * If CPU should remain off, nothing needs to be done.
842 	 */
843 	if (cpu_flagged_poweredoff(cp->sbc_cpu_flags))
844 		return (rv);
845 
846 	if (hp->h_flags & SBD_IOCTL_FLAG_FORCE)
847 		cpu_offline_flags = CPU_FORCED;
848 
849 	/*
850 	 * CPU had been either offline, online, or set to no-intr.  We
851 	 * will return a component to its original state that it was
852 	 * prior to the failed DR operation.  There is a possible race
853 	 * condition between the calls to this function and re-obtaining
854 	 * the cpu_lock where a cpu state could change.  Because of this
855 	 * we can't externally document that we are trying to roll cpus
856 	 * back to their original state, but we believe a best effort
857 	 * should be made.
858 	 */
859 
860 	mutex_enter(&cpu_lock);
861 	cpup = cpu[cp->sbc_cpu_id];
862 
863 	/*
864 	 * The following will compare the cpu's current state with a
865 	 * snapshot of its state taken before the failed DR operation
866 	 * had started.
867 	 */
868 	/* POWEROFF */
869 	if (cpu_is_poweredoff(cpup)) {
870 		if (cpu_poweron(cpup)) {
871 			cmn_err(CE_WARN,
872 			    "sbd:%s: failed to power-on cpu %d",
873 			    f, cp->sbc_cpu_id);
874 			SBD_SET_ERR(ep, ESBD_CPUSTART);
875 			SBD_SET_ERRSTR(ep, sbp->sb_cpupath[unit]);
876 			rv = SBD_CPUERR_FATAL;
877 			goto out;
878 		}
879 		SBD_INJECT_ERR(SBD_POWERON_CPU_PSEUDO_ERR,
880 		    hp->h_err, EIO,
881 		    ESBD_CPUSTART,
882 		    sbp->sb_cpupath[unit]);
883 	}
884 
885 	/* OFFLINE */
886 	if (cpu_is_offline(cpup)) {
887 		if (cpu_flagged_offline(cp->sbc_cpu_flags)) {
888 			PR_CPU("%s: leaving cpu %d OFFLINE\n",
889 			    f, cp->sbc_cpu_id);
890 		} else if (cpu_online(cpup, 0)) {
891 			cmn_err(CE_WARN,
892 			    "sbd:%s: failed to online cpu %d",
893 			    f, cp->sbc_cpu_id);
894 			SBD_SET_ERR(ep, ESBD_ONLINE);
895 			SBD_SET_ERRSTR(ep, sbp->sb_cpupath[unit]);
896 			rv = SBD_CPUERR_FATAL;
897 			goto out;
898 		} else {
899 			SBD_INJECT_ERR(SBD_ONLINE_CPU_PSEUDO_ERR,
900 			    hp->h_err, EIO,
901 			    ESBD_ONLINE,
902 			    sbp->sb_cpupath[unit]);
903 		}
904 	}
905 
906 	/* ONLINE */
907 	if (cpu_is_online(cpup)) {
908 		if (cpu_flagged_online(cp->sbc_cpu_flags)) {
909 			PR_CPU("%s: setting cpu %d ONLINE\n",
910 			    f, cp->sbc_cpu_id);
911 		} else if (cpu_flagged_offline(cp->sbc_cpu_flags)) {
912 			if (cpu_offline(cpup, cpu_offline_flags)) {
913 				cmn_err(CE_WARN,
914 				    "sbd:%s: failed to offline"
915 				    " cpu %d", f, cp->sbc_cpu_id);
916 				rv = SBD_CPUERR_RECOVERABLE;
917 				goto out;
918 			}
919 		} else if (cpu_flagged_nointr(cp->sbc_cpu_flags)) {
920 			if (cpu_intr_disable(cpup)) {
921 				cmn_err(CE_WARN, "%s: failed to "
922 				    "disable interrupts on cpu %d",
923 				    f, cp->sbc_cpu_id);
924 				rv = SBD_CPUERR_RECOVERABLE;
925 			} else {
926 				PR_CPU("%s: setting cpu %d to NOINTR"
927 				    " (was online)\n",
928 				    f, cp->sbc_cpu_id);
929 			}
930 			goto out;
931 		}
932 	}
933 
934 	/* NOINTR */
935 	if (cpu_is_nointr(cpup)) {
936 		if (cpu_flagged_online(cp->sbc_cpu_flags)) {
937 			cpu_intr_enable(cpup);
938 			PR_CPU("%s: setting cpu %d ONLINE"
939 			    "(was nointr)\n",
940 			    f, cp->sbc_cpu_id);
941 		}
942 		if (cpu_flagged_offline(cp->sbc_cpu_flags)) {
943 			if (cpu_offline(cpup, cpu_offline_flags)) {
944 				cmn_err(CE_WARN,
945 				    "sbd:%s: failed to offline"
946 				    " cpu %d", f, cp->sbc_cpu_id);
947 				rv = SBD_CPUERR_RECOVERABLE;
948 			}
949 		}
950 	}
951 out:
952 	mutex_exit(&cpu_lock);
953 
954 	return (rv);
955 }
956 
957 int
958 sbd_connect_cpu(sbd_board_t *sbp, int unit)
959 {
960 	int		rv;
961 	processorid_t	cpuid;
962 	struct cpu	*cpu;
963 	dev_info_t	*dip;
964 	sbdp_handle_t	*hdp;
965 	extern kmutex_t	cpu_lock;
966 	static fn_t	f = "sbd_connect_cpu";
967 	sbd_handle_t	*hp = MACHBD2HD(sbp);
968 
969 	/*
970 	 * get dip for cpu just located in tree walk
971 	 */
972 	if (SBD_DEV_IS_PRESENT(sbp, SBD_COMP_CPU, unit)) {
973 		dip = sbp->sb_devlist[NIX(SBD_COMP_CPU)][unit];
974 		if (dip == NULL) {
975 			cmn_err(CE_WARN,
976 			"sbd:%s: bad dip for cpu unit %d board %d",
977 			f, unit, sbp->sb_num);
978 			return (-1);
979 		}
980 		PR_CPU("%s...\n", f);
981 	} else {
982 		return (0);
983 	}
984 
985 	/*
986 	 * if sbd has attached this cpu, no need to bring
987 	 * it out of reset
988 	 */
989 	if (SBD_DEV_IS_ATTACHED(sbp, SBD_COMP_CPU, unit)) {
990 		return (0);
991 	}
992 
993 	hdp = sbd_get_sbdp_handle(sbp, hp);
994 
995 	cpuid = sbdp_get_cpuid(hdp, dip);
996 	if (cpuid == -1) {
997 		sbd_release_sbdp_handle(hdp);
998 		return (-1);
999 	}
1000 
1001 	/*
1002 	 * if the cpu is already under Solaris control,
1003 	 * do not wake it up
1004 	 */
1005 	mutex_enter(&cpu_lock);
1006 	cpu = cpu_get(cpuid);
1007 	mutex_exit(&cpu_lock);
1008 	if (cpu != NULL) {
1009 		sbd_release_sbdp_handle(hdp);
1010 		return (0);
1011 	}
1012 
1013 	rv = sbdp_connect_cpu(hdp, dip, cpuid);
1014 
1015 	if (rv != 0) {
1016 		sbp->sb_memaccess_ok = 0;
1017 		cmn_err(CE_WARN,
1018 			"sbd:%s: failed to wake up cpu unit %d board %d",
1019 			f, unit, sbp->sb_num);
1020 		sbd_release_sbdp_handle(hdp);
1021 		return (rv);
1022 	}
1023 	sbd_release_sbdp_handle(hdp);
1024 
1025 	return (rv);
1026 }
1027 
1028 int
1029 sbd_disconnect_cpu(sbd_handle_t *hp, int unit)
1030 {
1031 	sbd_board_t	*sbp = SBDH2BD(hp->h_sbd);
1032 	int		rv;
1033 	dev_info_t	*dip;
1034 	sbdp_handle_t	*hdp;
1035 	sbd_cpu_unit_t *cp;
1036 	processorid_t   cpuid;
1037 	static fn_t	f = "sbd_disconnect_cpu";
1038 
1039 	PR_CPU("%s...\n", f);
1040 
1041 	ASSERT((SBD_DEVICE_STATE(sbp, SBD_COMP_CPU, unit) ==
1042 						SBD_STATE_CONNECTED) ||
1043 		(SBD_DEVICE_STATE(sbp, SBD_COMP_CPU, unit) ==
1044 						SBD_STATE_UNCONFIGURED));
1045 
1046 	cp = SBD_GET_BOARD_CPUUNIT(sbp, unit);
1047 
1048 	cpuid = cp->sbc_cpu_id;
1049 
1050 	dip = sbp->sb_devlist[NIX(SBD_COMP_CPU)][unit];
1051 
1052 	hdp = sbd_get_sbdp_handle(sbp, hp);
1053 
1054 	rv = sbdp_disconnect_cpu(hdp, dip, cpuid);
1055 
1056 	if (rv != 0) {
1057 		SBD_GET_PERR(hdp->h_err, SBD_HD2ERR(hp));
1058 	}
1059 	sbd_release_sbdp_handle(hdp);
1060 
1061 	return (rv);
1062 }
1063