1*a3114836SGerry Liu /*
2*a3114836SGerry Liu * CDDL HEADER START
3*a3114836SGerry Liu *
4*a3114836SGerry Liu * The contents of this file are subject to the terms of the
5*a3114836SGerry Liu * Common Development and Distribution License (the "License").
6*a3114836SGerry Liu * You may not use this file except in compliance with the License.
7*a3114836SGerry Liu *
8*a3114836SGerry Liu * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9*a3114836SGerry Liu * or http://www.opensolaris.org/os/licensing.
10*a3114836SGerry Liu * See the License for the specific language governing permissions
11*a3114836SGerry Liu * and limitations under the License.
12*a3114836SGerry Liu *
13*a3114836SGerry Liu * When distributing Covered Code, include this CDDL HEADER in each
14*a3114836SGerry Liu * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15*a3114836SGerry Liu * If applicable, add the following below this CDDL HEADER, with the
16*a3114836SGerry Liu * fields enclosed by brackets "[]" replaced with your own identifying
17*a3114836SGerry Liu * information: Portions Copyright [yyyy] [name of copyright owner]
18*a3114836SGerry Liu *
19*a3114836SGerry Liu * CDDL HEADER END
20*a3114836SGerry Liu */
21*a3114836SGerry Liu
22*a3114836SGerry Liu /*
23*a3114836SGerry Liu * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24*a3114836SGerry Liu * Use is subject to license terms.
25*a3114836SGerry Liu */
26*a3114836SGerry Liu /*
27*a3114836SGerry Liu * Copyright (c) 2010, Intel Corporation.
28*a3114836SGerry Liu * All rights reserved.
29*a3114836SGerry Liu */
30*a3114836SGerry Liu
31*a3114836SGerry Liu /*
32*a3114836SGerry Liu * CPU support routines for DR
33*a3114836SGerry Liu */
34*a3114836SGerry Liu
35*a3114836SGerry Liu #include <sys/note.h>
36*a3114836SGerry Liu #include <sys/debug.h>
37*a3114836SGerry Liu #include <sys/types.h>
38*a3114836SGerry Liu #include <sys/errno.h>
39*a3114836SGerry Liu #include <sys/dditypes.h>
40*a3114836SGerry Liu #include <sys/ddi.h>
41*a3114836SGerry Liu #include <sys/sunddi.h>
42*a3114836SGerry Liu #include <sys/sunndi.h>
43*a3114836SGerry Liu #include <sys/ndi_impldefs.h>
44*a3114836SGerry Liu #include <sys/kmem.h>
45*a3114836SGerry Liu #include <sys/processor.h>
46*a3114836SGerry Liu #include <sys/cpuvar.h>
47*a3114836SGerry Liu #include <sys/promif.h>
48*a3114836SGerry Liu #include <sys/sysmacros.h>
49*a3114836SGerry Liu #include <sys/archsystm.h>
50*a3114836SGerry Liu #include <sys/machsystm.h>
51*a3114836SGerry Liu #include <sys/cpu_module.h>
52*a3114836SGerry Liu #include <sys/cmn_err.h>
53*a3114836SGerry Liu
54*a3114836SGerry Liu #include <sys/dr.h>
55*a3114836SGerry Liu #include <sys/dr_util.h>
56*a3114836SGerry Liu
57*a3114836SGerry Liu /* for the DR*INTERNAL_ERROR macros. see sys/dr.h. */
58*a3114836SGerry Liu static char *dr_ie_fmt = "dr_cpu.c %d";
59*a3114836SGerry Liu
60*a3114836SGerry Liu int
dr_cpu_unit_is_sane(dr_board_t * bp,dr_cpu_unit_t * cp)61*a3114836SGerry Liu dr_cpu_unit_is_sane(dr_board_t *bp, dr_cpu_unit_t *cp)
62*a3114836SGerry Liu {
63*a3114836SGerry Liu #ifdef DEBUG
64*a3114836SGerry Liu ASSERT(cp->sbc_cm.sbdev_bp == bp);
65*a3114836SGerry Liu ASSERT(cp->sbc_cm.sbdev_type == SBD_COMP_CPU);
66*a3114836SGerry Liu #else
67*a3114836SGerry Liu _NOTE(ARGUNUSED(bp))
68*a3114836SGerry Liu _NOTE(ARGUNUSED(cp))
69*a3114836SGerry Liu #endif
70*a3114836SGerry Liu
71*a3114836SGerry Liu return (1);
72*a3114836SGerry Liu }
73*a3114836SGerry Liu
74*a3114836SGerry Liu static int
dr_errno2ecode(int error)75*a3114836SGerry Liu dr_errno2ecode(int error)
76*a3114836SGerry Liu {
77*a3114836SGerry Liu int rv;
78*a3114836SGerry Liu
79*a3114836SGerry Liu switch (error) {
80*a3114836SGerry Liu case EBUSY:
81*a3114836SGerry Liu rv = ESBD_BUSY;
82*a3114836SGerry Liu break;
83*a3114836SGerry Liu case EINVAL:
84*a3114836SGerry Liu rv = ESBD_INVAL;
85*a3114836SGerry Liu break;
86*a3114836SGerry Liu case EALREADY:
87*a3114836SGerry Liu rv = ESBD_ALREADY;
88*a3114836SGerry Liu break;
89*a3114836SGerry Liu case ENODEV:
90*a3114836SGerry Liu rv = ESBD_NODEV;
91*a3114836SGerry Liu break;
92*a3114836SGerry Liu case ENOMEM:
93*a3114836SGerry Liu rv = ESBD_NOMEM;
94*a3114836SGerry Liu break;
95*a3114836SGerry Liu default:
96*a3114836SGerry Liu rv = ESBD_INVAL;
97*a3114836SGerry Liu }
98*a3114836SGerry Liu
99*a3114836SGerry Liu return (rv);
100*a3114836SGerry Liu }
101*a3114836SGerry Liu
102*a3114836SGerry Liu /*
103*a3114836SGerry Liu * On x86, the "clock-frequency" and cache size device properties may be
104*a3114836SGerry Liu * unavailable before CPU starts. If they are unavailabe, just set them to zero.
105*a3114836SGerry Liu */
106*a3114836SGerry Liu static void
dr_cpu_set_prop(dr_cpu_unit_t * cp)107*a3114836SGerry Liu dr_cpu_set_prop(dr_cpu_unit_t *cp)
108*a3114836SGerry Liu {
109*a3114836SGerry Liu sbd_error_t *err;
110*a3114836SGerry Liu dev_info_t *dip;
111*a3114836SGerry Liu uint64_t clock_freq;
112*a3114836SGerry Liu int ecache_size = 0;
113*a3114836SGerry Liu char *cache_str = NULL;
114*a3114836SGerry Liu
115*a3114836SGerry Liu err = drmach_get_dip(cp->sbc_cm.sbdev_id, &dip);
116*a3114836SGerry Liu if (err) {
117*a3114836SGerry Liu DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
118*a3114836SGerry Liu return;
119*a3114836SGerry Liu }
120*a3114836SGerry Liu
121*a3114836SGerry Liu if (dip == NULL) {
122*a3114836SGerry Liu DR_DEV_INTERNAL_ERROR(&cp->sbc_cm);
123*a3114836SGerry Liu return;
124*a3114836SGerry Liu }
125*a3114836SGerry Liu
126*a3114836SGerry Liu /* read in the CPU speed */
127*a3114836SGerry Liu clock_freq = (unsigned int)ddi_prop_get_int(DDI_DEV_T_ANY, dip,
128*a3114836SGerry Liu DDI_PROP_DONTPASS, "clock-frequency", 0);
129*a3114836SGerry Liu
130*a3114836SGerry Liu /*
131*a3114836SGerry Liu * The ecache property string is not the same
132*a3114836SGerry Liu * for all CPU implementations.
133*a3114836SGerry Liu */
134*a3114836SGerry Liu switch (cp->sbc_cpu_impl) {
135*a3114836SGerry Liu case X86_CPU_IMPL_NEHALEM_EX:
136*a3114836SGerry Liu cache_str = "l3-cache-size";
137*a3114836SGerry Liu break;
138*a3114836SGerry Liu default:
139*a3114836SGerry Liu cmn_err(CE_WARN, "Unknown cpu implementation=0x%x",
140*a3114836SGerry Liu cp->sbc_cpu_impl);
141*a3114836SGerry Liu break;
142*a3114836SGerry Liu }
143*a3114836SGerry Liu
144*a3114836SGerry Liu if (cache_str != NULL) {
145*a3114836SGerry Liu /* read in the ecache size */
146*a3114836SGerry Liu /*
147*a3114836SGerry Liu * If the property is not found in the CPU node,
148*a3114836SGerry Liu * it has to be kept in the core or cmp node so
149*a3114836SGerry Liu * we just keep looking.
150*a3114836SGerry Liu */
151*a3114836SGerry Liu
152*a3114836SGerry Liu ecache_size = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
153*a3114836SGerry Liu cache_str, 0);
154*a3114836SGerry Liu }
155*a3114836SGerry Liu
156*a3114836SGerry Liu /* convert to the proper units */
157*a3114836SGerry Liu cp->sbc_speed = (clock_freq + 500000) / 1000000;
158*a3114836SGerry Liu cp->sbc_ecache = ecache_size / (1024 * 1024);
159*a3114836SGerry Liu }
160*a3114836SGerry Liu
161*a3114836SGerry Liu void
dr_init_cpu_unit(dr_cpu_unit_t * cp)162*a3114836SGerry Liu dr_init_cpu_unit(dr_cpu_unit_t *cp)
163*a3114836SGerry Liu {
164*a3114836SGerry Liu sbd_error_t *err;
165*a3114836SGerry Liu dr_state_t new_state;
166*a3114836SGerry Liu int cpuid;
167*a3114836SGerry Liu int impl;
168*a3114836SGerry Liu
169*a3114836SGerry Liu if (DR_DEV_IS_ATTACHED(&cp->sbc_cm)) {
170*a3114836SGerry Liu new_state = DR_STATE_CONFIGURED;
171*a3114836SGerry Liu cp->sbc_cm.sbdev_cond = SBD_COND_OK;
172*a3114836SGerry Liu } else if (DR_DEV_IS_PRESENT(&cp->sbc_cm)) {
173*a3114836SGerry Liu new_state = DR_STATE_CONNECTED;
174*a3114836SGerry Liu cp->sbc_cm.sbdev_cond = SBD_COND_OK;
175*a3114836SGerry Liu } else {
176*a3114836SGerry Liu new_state = DR_STATE_EMPTY;
177*a3114836SGerry Liu cp->sbc_cm.sbdev_cond = SBD_COND_UNKNOWN;
178*a3114836SGerry Liu }
179*a3114836SGerry Liu
180*a3114836SGerry Liu if (DR_DEV_IS_PRESENT(&cp->sbc_cm)) {
181*a3114836SGerry Liu err = drmach_cpu_get_id(cp->sbc_cm.sbdev_id, &cpuid);
182*a3114836SGerry Liu if (err) {
183*a3114836SGerry Liu DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
184*a3114836SGerry Liu new_state = DR_STATE_FATAL;
185*a3114836SGerry Liu goto done;
186*a3114836SGerry Liu }
187*a3114836SGerry Liu
188*a3114836SGerry Liu err = drmach_cpu_get_impl(cp->sbc_cm.sbdev_id, &impl);
189*a3114836SGerry Liu if (err) {
190*a3114836SGerry Liu DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
191*a3114836SGerry Liu new_state = DR_STATE_FATAL;
192*a3114836SGerry Liu goto done;
193*a3114836SGerry Liu }
194*a3114836SGerry Liu } else {
195*a3114836SGerry Liu cp->sbc_cpu_id = -1;
196*a3114836SGerry Liu cp->sbc_cpu_impl = -1;
197*a3114836SGerry Liu goto done;
198*a3114836SGerry Liu }
199*a3114836SGerry Liu
200*a3114836SGerry Liu cp->sbc_cpu_id = cpuid;
201*a3114836SGerry Liu cp->sbc_cpu_impl = impl;
202*a3114836SGerry Liu
203*a3114836SGerry Liu /* if true at init time, it must always be true */
204*a3114836SGerry Liu ASSERT(dr_cpu_unit_is_sane(cp->sbc_cm.sbdev_bp, cp));
205*a3114836SGerry Liu
206*a3114836SGerry Liu mutex_enter(&cpu_lock);
207*a3114836SGerry Liu if ((cpuid >= 0) && cpu[cpuid])
208*a3114836SGerry Liu cp->sbc_cpu_flags = cpu[cpuid]->cpu_flags;
209*a3114836SGerry Liu else
210*a3114836SGerry Liu cp->sbc_cpu_flags = P_OFFLINE | P_POWEROFF;
211*a3114836SGerry Liu mutex_exit(&cpu_lock);
212*a3114836SGerry Liu
213*a3114836SGerry Liu dr_cpu_set_prop(cp);
214*a3114836SGerry Liu
215*a3114836SGerry Liu done:
216*a3114836SGerry Liu /* delay transition until fully initialized */
217*a3114836SGerry Liu dr_device_transition(&cp->sbc_cm, new_state);
218*a3114836SGerry Liu }
219*a3114836SGerry Liu
220*a3114836SGerry Liu int
dr_pre_attach_cpu(dr_handle_t * hp,dr_common_unit_t ** devlist,int devnum)221*a3114836SGerry Liu dr_pre_attach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
222*a3114836SGerry Liu {
223*a3114836SGerry Liu int i;
224*a3114836SGerry Liu static fn_t f = "dr_pre_attach_cpu";
225*a3114836SGerry Liu
226*a3114836SGerry Liu PR_CPU("%s...\n", f);
227*a3114836SGerry Liu
228*a3114836SGerry Liu for (i = 0; i < devnum; i++) {
229*a3114836SGerry Liu dr_cpu_unit_t *up = (dr_cpu_unit_t *)devlist[i];
230*a3114836SGerry Liu
231*a3114836SGerry Liu ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
232*a3114836SGerry Liu
233*a3114836SGerry Liu /*
234*a3114836SGerry Liu * Print a console message for each attachment
235*a3114836SGerry Liu * point. For CMP devices, this means that only
236*a3114836SGerry Liu * one message should be printed, no matter how
237*a3114836SGerry Liu * many cores are actually present.
238*a3114836SGerry Liu */
239*a3114836SGerry Liu if ((up->sbc_cm.sbdev_unum % MAX_CORES_PER_CMP) == 0) {
240*a3114836SGerry Liu cmn_err(CE_CONT, "OS configure %s",
241*a3114836SGerry Liu up->sbc_cm.sbdev_path);
242*a3114836SGerry Liu }
243*a3114836SGerry Liu }
244*a3114836SGerry Liu
245*a3114836SGerry Liu /*
246*a3114836SGerry Liu * Block out status threads while creating
247*a3114836SGerry Liu * devinfo tree branches
248*a3114836SGerry Liu */
249*a3114836SGerry Liu dr_lock_status(hp->h_bd);
250*a3114836SGerry Liu ndi_devi_enter(ddi_root_node(), (int *)(&hp->h_ndi));
251*a3114836SGerry Liu mutex_enter(&cpu_lock);
252*a3114836SGerry Liu
253*a3114836SGerry Liu return (0);
254*a3114836SGerry Liu }
255*a3114836SGerry Liu
256*a3114836SGerry Liu /*ARGSUSED*/
257*a3114836SGerry Liu void
dr_attach_cpu(dr_handle_t * hp,dr_common_unit_t * cp)258*a3114836SGerry Liu dr_attach_cpu(dr_handle_t *hp, dr_common_unit_t *cp)
259*a3114836SGerry Liu {
260*a3114836SGerry Liu sbd_error_t *err;
261*a3114836SGerry Liu processorid_t cpuid;
262*a3114836SGerry Liu int rv;
263*a3114836SGerry Liu
264*a3114836SGerry Liu ASSERT(MUTEX_HELD(&cpu_lock));
265*a3114836SGerry Liu
266*a3114836SGerry Liu err = drmach_configure(cp->sbdev_id, 0);
267*a3114836SGerry Liu if (err) {
268*a3114836SGerry Liu DRERR_SET_C(&cp->sbdev_error, &err);
269*a3114836SGerry Liu return;
270*a3114836SGerry Liu }
271*a3114836SGerry Liu
272*a3114836SGerry Liu err = drmach_cpu_get_id(cp->sbdev_id, &cpuid);
273*a3114836SGerry Liu if (err) {
274*a3114836SGerry Liu DRERR_SET_C(&cp->sbdev_error, &err);
275*a3114836SGerry Liu
276*a3114836SGerry Liu err = drmach_unconfigure(cp->sbdev_id, DEVI_BRANCH_DESTROY);
277*a3114836SGerry Liu if (err)
278*a3114836SGerry Liu sbd_err_clear(&err);
279*a3114836SGerry Liu } else if ((rv = cpu_configure(cpuid)) != 0) {
280*a3114836SGerry Liu dr_dev_err(CE_WARN, cp, dr_errno2ecode(rv));
281*a3114836SGerry Liu err = drmach_unconfigure(cp->sbdev_id, DEVI_BRANCH_DESTROY);
282*a3114836SGerry Liu if (err)
283*a3114836SGerry Liu sbd_err_clear(&err);
284*a3114836SGerry Liu } else {
285*a3114836SGerry Liu dr_cpu_unit_t *up = (dr_cpu_unit_t *)cp;
286*a3114836SGerry Liu up->sbc_cpu_id = cpuid;
287*a3114836SGerry Liu }
288*a3114836SGerry Liu }
289*a3114836SGerry Liu
290*a3114836SGerry Liu /*
291*a3114836SGerry Liu * dr_post_attach_cpu
292*a3114836SGerry Liu *
293*a3114836SGerry Liu * sbd error policy: Does not stop on error. Processes all units in list.
294*a3114836SGerry Liu */
295*a3114836SGerry Liu int
dr_post_attach_cpu(dr_handle_t * hp,dr_common_unit_t ** devlist,int devnum)296*a3114836SGerry Liu dr_post_attach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
297*a3114836SGerry Liu {
298*a3114836SGerry Liu int i;
299*a3114836SGerry Liu int errflag = 0;
300*a3114836SGerry Liu static fn_t f = "dr_post_attach_cpu";
301*a3114836SGerry Liu
302*a3114836SGerry Liu PR_CPU("%s...\n", f);
303*a3114836SGerry Liu
304*a3114836SGerry Liu /* Startup and online newly-attached CPUs */
305*a3114836SGerry Liu for (i = 0; i < devnum; i++) {
306*a3114836SGerry Liu dr_cpu_unit_t *up = (dr_cpu_unit_t *)devlist[i];
307*a3114836SGerry Liu struct cpu *cp;
308*a3114836SGerry Liu
309*a3114836SGerry Liu ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
310*a3114836SGerry Liu
311*a3114836SGerry Liu cp = cpu_get(up->sbc_cpu_id);
312*a3114836SGerry Liu if (cp == NULL) {
313*a3114836SGerry Liu cmn_err(CE_WARN, "%s: cpu_get failed for cpu %d",
314*a3114836SGerry Liu f, up->sbc_cpu_id);
315*a3114836SGerry Liu continue;
316*a3114836SGerry Liu }
317*a3114836SGerry Liu
318*a3114836SGerry Liu if (cpu_is_poweredoff(cp)) {
319*a3114836SGerry Liu if (cpu_poweron(cp) != 0) {
320*a3114836SGerry Liu dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_CPUSTART);
321*a3114836SGerry Liu errflag = 1;
322*a3114836SGerry Liu }
323*a3114836SGerry Liu PR_CPU("%s: cpu %d powered ON\n", f, up->sbc_cpu_id);
324*a3114836SGerry Liu }
325*a3114836SGerry Liu
326*a3114836SGerry Liu if (cpu_is_offline(cp)) {
327*a3114836SGerry Liu PR_CPU("%s: onlining cpu %d...\n", f, up->sbc_cpu_id);
328*a3114836SGerry Liu
329*a3114836SGerry Liu if (cpu_online(cp) != 0) {
330*a3114836SGerry Liu dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_ONLINE);
331*a3114836SGerry Liu errflag = 1;
332*a3114836SGerry Liu }
333*a3114836SGerry Liu }
334*a3114836SGerry Liu
335*a3114836SGerry Liu }
336*a3114836SGerry Liu
337*a3114836SGerry Liu mutex_exit(&cpu_lock);
338*a3114836SGerry Liu ndi_devi_exit(ddi_root_node(), hp->h_ndi);
339*a3114836SGerry Liu dr_unlock_status(hp->h_bd);
340*a3114836SGerry Liu
341*a3114836SGerry Liu if (errflag)
342*a3114836SGerry Liu return (-1);
343*a3114836SGerry Liu else
344*a3114836SGerry Liu return (0);
345*a3114836SGerry Liu }
346*a3114836SGerry Liu
347*a3114836SGerry Liu /*
348*a3114836SGerry Liu * dr_pre_release_cpu
349*a3114836SGerry Liu *
350*a3114836SGerry Liu * sbd error policy: Stops on first error.
351*a3114836SGerry Liu */
352*a3114836SGerry Liu int
dr_pre_release_cpu(dr_handle_t * hp,dr_common_unit_t ** devlist,int devnum)353*a3114836SGerry Liu dr_pre_release_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
354*a3114836SGerry Liu {
355*a3114836SGerry Liu int c, cix, i, lastoffline = -1, rv = 0;
356*a3114836SGerry Liu processorid_t cpuid;
357*a3114836SGerry Liu struct cpu *cp;
358*a3114836SGerry Liu dr_cpu_unit_t *up;
359*a3114836SGerry Liu dr_devset_t devset;
360*a3114836SGerry Liu sbd_dev_stat_t *ds;
361*a3114836SGerry Liu static fn_t f = "dr_pre_release_cpu";
362*a3114836SGerry Liu int cpu_flags = 0;
363*a3114836SGerry Liu
364*a3114836SGerry Liu devset = DR_DEVS_PRESENT(hp->h_bd);
365*a3114836SGerry Liu
366*a3114836SGerry Liu /* allocate status struct storage. */
367*a3114836SGerry Liu ds = (sbd_dev_stat_t *) kmem_zalloc(sizeof (sbd_dev_stat_t) *
368*a3114836SGerry Liu MAX_CPU_UNITS_PER_BOARD, KM_SLEEP);
369*a3114836SGerry Liu
370*a3114836SGerry Liu cix = dr_cpu_status(hp, devset, ds);
371*a3114836SGerry Liu
372*a3114836SGerry Liu mutex_enter(&cpu_lock);
373*a3114836SGerry Liu
374*a3114836SGerry Liu for (i = 0; i < devnum; i++) {
375*a3114836SGerry Liu up = (dr_cpu_unit_t *)devlist[i];
376*a3114836SGerry Liu if (!DR_DEV_IS_ATTACHED(&up->sbc_cm)) {
377*a3114836SGerry Liu continue;
378*a3114836SGerry Liu }
379*a3114836SGerry Liu ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
380*a3114836SGerry Liu
381*a3114836SGerry Liu /*
382*a3114836SGerry Liu * On x86 systems, some CPUs can't be unconfigured.
383*a3114836SGerry Liu * For example, CPU0 can't be unconfigured because many other
384*a3114836SGerry Liu * components have a dependency on it.
385*a3114836SGerry Liu * This check determines if a CPU is currently in use and
386*a3114836SGerry Liu * returns a "Device busy" error if so.
387*a3114836SGerry Liu */
388*a3114836SGerry Liu for (c = 0; c < cix; c++) {
389*a3114836SGerry Liu if (ds[c].d_cpu.cs_unit == up->sbc_cm.sbdev_unum) {
390*a3114836SGerry Liu if (ds[c].d_cpu.cs_busy) {
391*a3114836SGerry Liu dr_dev_err(CE_WARN, &up->sbc_cm,
392*a3114836SGerry Liu ESBD_BUSY);
393*a3114836SGerry Liu rv = -1;
394*a3114836SGerry Liu break;
395*a3114836SGerry Liu }
396*a3114836SGerry Liu }
397*a3114836SGerry Liu }
398*a3114836SGerry Liu if (c < cix)
399*a3114836SGerry Liu break;
400*a3114836SGerry Liu
401*a3114836SGerry Liu cpuid = up->sbc_cpu_id;
402*a3114836SGerry Liu if ((cp = cpu_get(cpuid)) == NULL) {
403*a3114836SGerry Liu dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_OFFLINE);
404*a3114836SGerry Liu rv = -1;
405*a3114836SGerry Liu break;
406*a3114836SGerry Liu }
407*a3114836SGerry Liu
408*a3114836SGerry Liu /* used by dr_cancel_cpu during error flow */
409*a3114836SGerry Liu up->sbc_cpu_flags = cp->cpu_flags;
410*a3114836SGerry Liu
411*a3114836SGerry Liu if (CPU_ACTIVE(cp)) {
412*a3114836SGerry Liu if (dr_cmd_flags(hp) & SBD_FLAG_FORCE)
413*a3114836SGerry Liu cpu_flags = CPU_FORCED;
414*a3114836SGerry Liu
415*a3114836SGerry Liu PR_CPU("%s: offlining cpu %d\n", f, cpuid);
416*a3114836SGerry Liu if (cpu_offline(cp, cpu_flags)) {
417*a3114836SGerry Liu PR_CPU("%s: failed to offline cpu %d\n", f,
418*a3114836SGerry Liu cpuid);
419*a3114836SGerry Liu dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_OFFLINE);
420*a3114836SGerry Liu if (disp_bound_threads(cp, 0)) {
421*a3114836SGerry Liu cmn_err(CE_WARN, "%s: thread(s) bound "
422*a3114836SGerry Liu "to cpu %d", f, cp->cpu_id);
423*a3114836SGerry Liu }
424*a3114836SGerry Liu rv = -1;
425*a3114836SGerry Liu break;
426*a3114836SGerry Liu } else
427*a3114836SGerry Liu lastoffline = i;
428*a3114836SGerry Liu }
429*a3114836SGerry Liu
430*a3114836SGerry Liu if (!rv) {
431*a3114836SGerry Liu sbd_error_t *err;
432*a3114836SGerry Liu
433*a3114836SGerry Liu err = drmach_release(up->sbc_cm.sbdev_id);
434*a3114836SGerry Liu if (err) {
435*a3114836SGerry Liu DRERR_SET_C(&up->sbc_cm.sbdev_error, &err);
436*a3114836SGerry Liu rv = -1;
437*a3114836SGerry Liu break;
438*a3114836SGerry Liu }
439*a3114836SGerry Liu }
440*a3114836SGerry Liu }
441*a3114836SGerry Liu
442*a3114836SGerry Liu mutex_exit(&cpu_lock);
443*a3114836SGerry Liu
444*a3114836SGerry Liu if (rv) {
445*a3114836SGerry Liu /*
446*a3114836SGerry Liu * Need to unwind others since at this level (pre-release)
447*a3114836SGerry Liu * the device state has not yet transitioned and failures
448*a3114836SGerry Liu * will prevent us from reaching the "post" release
449*a3114836SGerry Liu * function where states are normally transitioned.
450*a3114836SGerry Liu */
451*a3114836SGerry Liu for (i = lastoffline; i >= 0; i--) {
452*a3114836SGerry Liu up = (dr_cpu_unit_t *)devlist[i];
453*a3114836SGerry Liu (void) dr_cancel_cpu(up);
454*a3114836SGerry Liu }
455*a3114836SGerry Liu }
456*a3114836SGerry Liu
457*a3114836SGerry Liu kmem_free(ds, sizeof (sbd_dev_stat_t) * MAX_CPU_UNITS_PER_BOARD);
458*a3114836SGerry Liu return (rv);
459*a3114836SGerry Liu }
460*a3114836SGerry Liu
461*a3114836SGerry Liu /*
462*a3114836SGerry Liu * dr_pre_detach_cpu
463*a3114836SGerry Liu *
464*a3114836SGerry Liu * sbd error policy: Stops on first error.
465*a3114836SGerry Liu */
466*a3114836SGerry Liu int
dr_pre_detach_cpu(dr_handle_t * hp,dr_common_unit_t ** devlist,int devnum)467*a3114836SGerry Liu dr_pre_detach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
468*a3114836SGerry Liu {
469*a3114836SGerry Liu _NOTE(ARGUNUSED(hp))
470*a3114836SGerry Liu
471*a3114836SGerry Liu int i;
472*a3114836SGerry Liu int cpu_flags = 0;
473*a3114836SGerry Liu static fn_t f = "dr_pre_detach_cpu";
474*a3114836SGerry Liu
475*a3114836SGerry Liu PR_CPU("%s...\n", f);
476*a3114836SGerry Liu
477*a3114836SGerry Liu /*
478*a3114836SGerry Liu * Block out status threads while destroying devinfo tree
479*a3114836SGerry Liu * branches
480*a3114836SGerry Liu */
481*a3114836SGerry Liu dr_lock_status(hp->h_bd);
482*a3114836SGerry Liu mutex_enter(&cpu_lock);
483*a3114836SGerry Liu
484*a3114836SGerry Liu for (i = 0; i < devnum; i++) {
485*a3114836SGerry Liu dr_cpu_unit_t *up = (dr_cpu_unit_t *)devlist[i];
486*a3114836SGerry Liu struct cpu *cp;
487*a3114836SGerry Liu
488*a3114836SGerry Liu if (!DR_DEV_IS_ATTACHED(&up->sbc_cm)) {
489*a3114836SGerry Liu continue;
490*a3114836SGerry Liu }
491*a3114836SGerry Liu
492*a3114836SGerry Liu ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
493*a3114836SGerry Liu
494*a3114836SGerry Liu cp = cpu_get(up->sbc_cpu_id);
495*a3114836SGerry Liu if (cp == NULL)
496*a3114836SGerry Liu continue;
497*a3114836SGerry Liu
498*a3114836SGerry Liu /*
499*a3114836SGerry Liu * Print a console message for each attachment
500*a3114836SGerry Liu * point. For CMP devices, this means that only
501*a3114836SGerry Liu * one message should be printed, no matter how
502*a3114836SGerry Liu * many cores are actually present.
503*a3114836SGerry Liu */
504*a3114836SGerry Liu if ((up->sbc_cm.sbdev_unum % MAX_CORES_PER_CMP) == 0) {
505*a3114836SGerry Liu cmn_err(CE_CONT, "OS unconfigure %s\n",
506*a3114836SGerry Liu up->sbc_cm.sbdev_path);
507*a3114836SGerry Liu }
508*a3114836SGerry Liu
509*a3114836SGerry Liu /*
510*a3114836SGerry Liu * CPUs were offlined during Release.
511*a3114836SGerry Liu */
512*a3114836SGerry Liu if (cpu_is_poweredoff(cp)) {
513*a3114836SGerry Liu PR_CPU("%s: cpu %d already powered OFF\n",
514*a3114836SGerry Liu f, up->sbc_cpu_id);
515*a3114836SGerry Liu continue;
516*a3114836SGerry Liu }
517*a3114836SGerry Liu
518*a3114836SGerry Liu if (!cpu_is_offline(cp)) {
519*a3114836SGerry Liu if (dr_cmd_flags(hp) & SBD_FLAG_FORCE)
520*a3114836SGerry Liu cpu_flags = CPU_FORCED;
521*a3114836SGerry Liu /* cpu was onlined after release. Offline it again */
522*a3114836SGerry Liu PR_CPU("%s: offlining cpu %d\n", f, up->sbc_cpu_id);
523*a3114836SGerry Liu if (cpu_offline(cp, cpu_flags)) {
524*a3114836SGerry Liu PR_CPU("%s: failed to offline cpu %d\n",
525*a3114836SGerry Liu f, up->sbc_cpu_id);
526*a3114836SGerry Liu dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_OFFLINE);
527*a3114836SGerry Liu if (disp_bound_threads(cp, 0)) {
528*a3114836SGerry Liu cmn_err(CE_WARN, "%s: thread(s) bound "
529*a3114836SGerry Liu "to cpu %d", f, cp->cpu_id);
530*a3114836SGerry Liu }
531*a3114836SGerry Liu goto err;
532*a3114836SGerry Liu }
533*a3114836SGerry Liu }
534*a3114836SGerry Liu if (cpu_poweroff(cp) != 0) {
535*a3114836SGerry Liu dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_CPUSTOP);
536*a3114836SGerry Liu goto err;
537*a3114836SGerry Liu } else {
538*a3114836SGerry Liu PR_CPU("%s: cpu %d powered OFF\n", f, up->sbc_cpu_id);
539*a3114836SGerry Liu }
540*a3114836SGerry Liu }
541*a3114836SGerry Liu
542*a3114836SGerry Liu return (0);
543*a3114836SGerry Liu
544*a3114836SGerry Liu err:
545*a3114836SGerry Liu mutex_exit(&cpu_lock);
546*a3114836SGerry Liu dr_unlock_status(hp->h_bd);
547*a3114836SGerry Liu return (-1);
548*a3114836SGerry Liu }
549*a3114836SGerry Liu
550*a3114836SGerry Liu /*ARGSUSED*/
551*a3114836SGerry Liu void
dr_detach_cpu(dr_handle_t * hp,dr_common_unit_t * cp)552*a3114836SGerry Liu dr_detach_cpu(dr_handle_t *hp, dr_common_unit_t *cp)
553*a3114836SGerry Liu {
554*a3114836SGerry Liu sbd_error_t *err;
555*a3114836SGerry Liu processorid_t cpuid;
556*a3114836SGerry Liu int rv;
557*a3114836SGerry Liu dr_cpu_unit_t *up = (dr_cpu_unit_t *)cp;
558*a3114836SGerry Liu
559*a3114836SGerry Liu ASSERT(MUTEX_HELD(&cpu_lock));
560*a3114836SGerry Liu
561*a3114836SGerry Liu if (!DR_DEV_IS_ATTACHED(&up->sbc_cm)) {
562*a3114836SGerry Liu return;
563*a3114836SGerry Liu }
564*a3114836SGerry Liu
565*a3114836SGerry Liu err = drmach_cpu_get_id(cp->sbdev_id, &cpuid);
566*a3114836SGerry Liu if (err) {
567*a3114836SGerry Liu DRERR_SET_C(&cp->sbdev_error, &err);
568*a3114836SGerry Liu } else if ((rv = cpu_unconfigure(cpuid)) != 0) {
569*a3114836SGerry Liu dr_dev_err(CE_IGNORE, cp, dr_errno2ecode(rv));
570*a3114836SGerry Liu } else {
571*a3114836SGerry Liu err = drmach_unconfigure(cp->sbdev_id, DEVI_BRANCH_DESTROY);
572*a3114836SGerry Liu if (err) {
573*a3114836SGerry Liu DRERR_SET_C(&cp->sbdev_error, &err);
574*a3114836SGerry Liu } else {
575*a3114836SGerry Liu up->sbc_cpu_id = -1;
576*a3114836SGerry Liu }
577*a3114836SGerry Liu }
578*a3114836SGerry Liu }
579*a3114836SGerry Liu
580*a3114836SGerry Liu /*ARGSUSED1*/
581*a3114836SGerry Liu int
dr_post_detach_cpu(dr_handle_t * hp,dr_common_unit_t ** devlist,int devnum)582*a3114836SGerry Liu dr_post_detach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
583*a3114836SGerry Liu {
584*a3114836SGerry Liu static fn_t f = "dr_post_detach_cpu";
585*a3114836SGerry Liu
586*a3114836SGerry Liu PR_CPU("%s...\n", f);
587*a3114836SGerry Liu hp->h_ndi = 0;
588*a3114836SGerry Liu
589*a3114836SGerry Liu mutex_exit(&cpu_lock);
590*a3114836SGerry Liu dr_unlock_status(hp->h_bd);
591*a3114836SGerry Liu
592*a3114836SGerry Liu return (0);
593*a3114836SGerry Liu }
594*a3114836SGerry Liu
595*a3114836SGerry Liu static void
dr_fill_cpu_stat(dr_cpu_unit_t * cp,drmach_status_t * pstat,sbd_cpu_stat_t * csp)596*a3114836SGerry Liu dr_fill_cpu_stat(dr_cpu_unit_t *cp, drmach_status_t *pstat, sbd_cpu_stat_t *csp)
597*a3114836SGerry Liu {
598*a3114836SGerry Liu ASSERT(cp && pstat && csp);
599*a3114836SGerry Liu
600*a3114836SGerry Liu /* Fill in the common status information */
601*a3114836SGerry Liu bzero((caddr_t)csp, sizeof (*csp));
602*a3114836SGerry Liu csp->cs_type = cp->sbc_cm.sbdev_type;
603*a3114836SGerry Liu csp->cs_unit = cp->sbc_cm.sbdev_unum;
604*a3114836SGerry Liu (void) strlcpy(csp->cs_name, pstat->type, sizeof (csp->cs_name));
605*a3114836SGerry Liu csp->cs_cond = cp->sbc_cm.sbdev_cond;
606*a3114836SGerry Liu csp->cs_busy = cp->sbc_cm.sbdev_busy | pstat->busy;
607*a3114836SGerry Liu csp->cs_time = cp->sbc_cm.sbdev_time;
608*a3114836SGerry Liu csp->cs_ostate = cp->sbc_cm.sbdev_ostate;
609*a3114836SGerry Liu csp->cs_suspend = 0;
610*a3114836SGerry Liu
611*a3114836SGerry Liu /* CPU specific status data */
612*a3114836SGerry Liu csp->cs_cpuid = cp->sbc_cpu_id;
613*a3114836SGerry Liu
614*a3114836SGerry Liu /*
615*a3114836SGerry Liu * If the speed and ecache properties have not been
616*a3114836SGerry Liu * cached yet, read them in from the device tree.
617*a3114836SGerry Liu */
618*a3114836SGerry Liu if ((cp->sbc_speed == 0) || (cp->sbc_ecache == 0))
619*a3114836SGerry Liu dr_cpu_set_prop(cp);
620*a3114836SGerry Liu
621*a3114836SGerry Liu /* use the cached speed and ecache values */
622*a3114836SGerry Liu csp->cs_speed = cp->sbc_speed;
623*a3114836SGerry Liu csp->cs_ecache = cp->sbc_ecache;
624*a3114836SGerry Liu
625*a3114836SGerry Liu mutex_enter(&cpu_lock);
626*a3114836SGerry Liu if (!cpu_get(csp->cs_cpuid)) {
627*a3114836SGerry Liu /* ostate must be UNCONFIGURED */
628*a3114836SGerry Liu csp->cs_cm.c_ostate = SBD_STAT_UNCONFIGURED;
629*a3114836SGerry Liu }
630*a3114836SGerry Liu mutex_exit(&cpu_lock);
631*a3114836SGerry Liu }
632*a3114836SGerry Liu
633*a3114836SGerry Liu /*ARGSUSED2*/
634*a3114836SGerry Liu static void
dr_fill_cmp_stat(sbd_cpu_stat_t * csp,int ncores,int impl,sbd_cmp_stat_t * psp)635*a3114836SGerry Liu dr_fill_cmp_stat(sbd_cpu_stat_t *csp, int ncores, int impl, sbd_cmp_stat_t *psp)
636*a3114836SGerry Liu {
637*a3114836SGerry Liu int core;
638*a3114836SGerry Liu
639*a3114836SGerry Liu ASSERT(csp && psp && (ncores >= 1));
640*a3114836SGerry Liu
641*a3114836SGerry Liu bzero((caddr_t)psp, sizeof (*psp));
642*a3114836SGerry Liu
643*a3114836SGerry Liu /*
644*a3114836SGerry Liu * Fill in the common status information based
645*a3114836SGerry Liu * on the data for the first core.
646*a3114836SGerry Liu */
647*a3114836SGerry Liu psp->ps_type = SBD_COMP_CMP;
648*a3114836SGerry Liu psp->ps_unit = DR_UNUM2SBD_UNUM(csp->cs_unit, SBD_COMP_CMP);
649*a3114836SGerry Liu (void) strlcpy(psp->ps_name, csp->cs_name, sizeof (psp->ps_name));
650*a3114836SGerry Liu psp->ps_cond = csp->cs_cond;
651*a3114836SGerry Liu psp->ps_busy = csp->cs_busy;
652*a3114836SGerry Liu psp->ps_time = csp->cs_time;
653*a3114836SGerry Liu psp->ps_ostate = csp->cs_ostate;
654*a3114836SGerry Liu psp->ps_suspend = csp->cs_suspend;
655*a3114836SGerry Liu
656*a3114836SGerry Liu /* CMP specific status data */
657*a3114836SGerry Liu *psp->ps_cpuid = csp->cs_cpuid;
658*a3114836SGerry Liu psp->ps_ncores = 1;
659*a3114836SGerry Liu psp->ps_speed = csp->cs_speed;
660*a3114836SGerry Liu psp->ps_ecache = csp->cs_ecache;
661*a3114836SGerry Liu
662*a3114836SGerry Liu /*
663*a3114836SGerry Liu * Walk through the data for the remaining cores.
664*a3114836SGerry Liu * Make any adjustments to the common status data,
665*a3114836SGerry Liu * or the shared CMP specific data if necessary.
666*a3114836SGerry Liu */
667*a3114836SGerry Liu for (core = 1; core < ncores; core++) {
668*a3114836SGerry Liu /*
669*a3114836SGerry Liu * The following properties should be the same
670*a3114836SGerry Liu * for all the cores of the CMP.
671*a3114836SGerry Liu */
672*a3114836SGerry Liu ASSERT(psp->ps_unit == DR_UNUM2SBD_UNUM(csp[core].cs_unit,
673*a3114836SGerry Liu SBD_COMP_CMP));
674*a3114836SGerry Liu
675*a3114836SGerry Liu if (csp[core].cs_speed > psp->ps_speed)
676*a3114836SGerry Liu psp->ps_speed = csp[core].cs_speed;
677*a3114836SGerry Liu if (csp[core].cs_ecache > psp->ps_ecache)
678*a3114836SGerry Liu psp->ps_ecache = csp[core].cs_ecache;
679*a3114836SGerry Liu
680*a3114836SGerry Liu psp->ps_cpuid[core] = csp[core].cs_cpuid;
681*a3114836SGerry Liu psp->ps_ncores++;
682*a3114836SGerry Liu
683*a3114836SGerry Liu /* adjust time if necessary */
684*a3114836SGerry Liu if (csp[core].cs_time > psp->ps_time) {
685*a3114836SGerry Liu psp->ps_time = csp[core].cs_time;
686*a3114836SGerry Liu }
687*a3114836SGerry Liu
688*a3114836SGerry Liu psp->ps_busy |= csp[core].cs_busy;
689*a3114836SGerry Liu
690*a3114836SGerry Liu /*
691*a3114836SGerry Liu * If any of the cores are configured, the
692*a3114836SGerry Liu * entire CMP is marked as configured.
693*a3114836SGerry Liu */
694*a3114836SGerry Liu if (csp[core].cs_ostate == SBD_STAT_CONFIGURED) {
695*a3114836SGerry Liu psp->ps_ostate = csp[core].cs_ostate;
696*a3114836SGerry Liu }
697*a3114836SGerry Liu }
698*a3114836SGerry Liu }
699*a3114836SGerry Liu
700*a3114836SGerry Liu int
dr_cpu_status(dr_handle_t * hp,dr_devset_t devset,sbd_dev_stat_t * dsp)701*a3114836SGerry Liu dr_cpu_status(dr_handle_t *hp, dr_devset_t devset, sbd_dev_stat_t *dsp)
702*a3114836SGerry Liu {
703*a3114836SGerry Liu int cmp;
704*a3114836SGerry Liu int core;
705*a3114836SGerry Liu int ncpu;
706*a3114836SGerry Liu dr_board_t *bp;
707*a3114836SGerry Liu sbd_cpu_stat_t *cstat;
708*a3114836SGerry Liu int impl;
709*a3114836SGerry Liu
710*a3114836SGerry Liu bp = hp->h_bd;
711*a3114836SGerry Liu ncpu = 0;
712*a3114836SGerry Liu
713*a3114836SGerry Liu devset &= DR_DEVS_PRESENT(bp);
714*a3114836SGerry Liu cstat = kmem_zalloc(sizeof (sbd_cpu_stat_t) * MAX_CORES_PER_CMP,
715*a3114836SGerry Liu KM_SLEEP);
716*a3114836SGerry Liu
717*a3114836SGerry Liu /*
718*a3114836SGerry Liu * Treat every CPU as a CMP. In the case where the
719*a3114836SGerry Liu * device is not a CMP, treat it as a CMP with only
720*a3114836SGerry Liu * one core.
721*a3114836SGerry Liu */
722*a3114836SGerry Liu for (cmp = 0; cmp < MAX_CMP_UNITS_PER_BOARD; cmp++) {
723*a3114836SGerry Liu int ncores;
724*a3114836SGerry Liu dr_cpu_unit_t *cp;
725*a3114836SGerry Liu drmach_status_t pstat;
726*a3114836SGerry Liu sbd_error_t *err;
727*a3114836SGerry Liu sbd_cmp_stat_t *psp;
728*a3114836SGerry Liu
729*a3114836SGerry Liu if ((devset & DEVSET(SBD_COMP_CMP, cmp)) == 0) {
730*a3114836SGerry Liu continue;
731*a3114836SGerry Liu }
732*a3114836SGerry Liu
733*a3114836SGerry Liu ncores = 0;
734*a3114836SGerry Liu
735*a3114836SGerry Liu for (core = 0; core < MAX_CORES_PER_CMP; core++) {
736*a3114836SGerry Liu
737*a3114836SGerry Liu cp = dr_get_cpu_unit(bp, DR_CMP_CORE_UNUM(cmp, core));
738*a3114836SGerry Liu
739*a3114836SGerry Liu if (cp->sbc_cm.sbdev_state == DR_STATE_EMPTY) {
740*a3114836SGerry Liu /* present, but not fully initialized */
741*a3114836SGerry Liu continue;
742*a3114836SGerry Liu }
743*a3114836SGerry Liu
744*a3114836SGerry Liu ASSERT(dr_cpu_unit_is_sane(hp->h_bd, cp));
745*a3114836SGerry Liu
746*a3114836SGerry Liu /* skip if not present */
747*a3114836SGerry Liu if (cp->sbc_cm.sbdev_id == (drmachid_t)0) {
748*a3114836SGerry Liu continue;
749*a3114836SGerry Liu }
750*a3114836SGerry Liu
751*a3114836SGerry Liu /* fetch platform status */
752*a3114836SGerry Liu err = drmach_status(cp->sbc_cm.sbdev_id, &pstat);
753*a3114836SGerry Liu if (err) {
754*a3114836SGerry Liu DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
755*a3114836SGerry Liu continue;
756*a3114836SGerry Liu }
757*a3114836SGerry Liu
758*a3114836SGerry Liu dr_fill_cpu_stat(cp, &pstat, &cstat[ncores++]);
759*a3114836SGerry Liu /*
760*a3114836SGerry Liu * We should set impl here because the last core
761*a3114836SGerry Liu * found might be EMPTY or not present.
762*a3114836SGerry Liu */
763*a3114836SGerry Liu impl = cp->sbc_cpu_impl;
764*a3114836SGerry Liu }
765*a3114836SGerry Liu
766*a3114836SGerry Liu if (ncores == 0) {
767*a3114836SGerry Liu continue;
768*a3114836SGerry Liu }
769*a3114836SGerry Liu
770*a3114836SGerry Liu /*
771*a3114836SGerry Liu * Store the data to the outgoing array. If the
772*a3114836SGerry Liu * device is a CMP, combine all the data for the
773*a3114836SGerry Liu * cores into a single stat structure.
774*a3114836SGerry Liu *
775*a3114836SGerry Liu * The check for a CMP device uses the last core
776*a3114836SGerry Liu * found, assuming that all cores will have the
777*a3114836SGerry Liu * same implementation.
778*a3114836SGerry Liu */
779*a3114836SGerry Liu if (CPU_IMPL_IS_CMP(impl)) {
780*a3114836SGerry Liu psp = (sbd_cmp_stat_t *)dsp;
781*a3114836SGerry Liu dr_fill_cmp_stat(cstat, ncores, impl, psp);
782*a3114836SGerry Liu } else {
783*a3114836SGerry Liu ASSERT(ncores == 1);
784*a3114836SGerry Liu bcopy(cstat, dsp, sizeof (sbd_cpu_stat_t));
785*a3114836SGerry Liu }
786*a3114836SGerry Liu
787*a3114836SGerry Liu dsp++;
788*a3114836SGerry Liu ncpu++;
789*a3114836SGerry Liu }
790*a3114836SGerry Liu
791*a3114836SGerry Liu kmem_free(cstat, sizeof (sbd_cpu_stat_t) * MAX_CORES_PER_CMP);
792*a3114836SGerry Liu
793*a3114836SGerry Liu return (ncpu);
794*a3114836SGerry Liu }
795*a3114836SGerry Liu
796*a3114836SGerry Liu /*
797*a3114836SGerry Liu * Cancel previous release operation for cpu.
798*a3114836SGerry Liu * For cpus this means simply bringing cpus that
799*a3114836SGerry Liu * were offline back online. Note that they had
800*a3114836SGerry Liu * to have been online at the time there were
801*a3114836SGerry Liu * released.
802*a3114836SGerry Liu */
803*a3114836SGerry Liu int
dr_cancel_cpu(dr_cpu_unit_t * up)804*a3114836SGerry Liu dr_cancel_cpu(dr_cpu_unit_t *up)
805*a3114836SGerry Liu {
806*a3114836SGerry Liu int rv = 0;
807*a3114836SGerry Liu static fn_t f = "dr_cancel_cpu";
808*a3114836SGerry Liu
809*a3114836SGerry Liu ASSERT(dr_cpu_unit_is_sane(up->sbc_cm.sbdev_bp, up));
810*a3114836SGerry Liu
811*a3114836SGerry Liu if (cpu_flagged_active(up->sbc_cpu_flags)) {
812*a3114836SGerry Liu struct cpu *cp;
813*a3114836SGerry Liu
814*a3114836SGerry Liu /*
815*a3114836SGerry Liu * CPU had been online, go ahead
816*a3114836SGerry Liu * bring it back online.
817*a3114836SGerry Liu */
818*a3114836SGerry Liu PR_CPU("%s: bringing cpu %d back ONLINE\n", f, up->sbc_cpu_id);
819*a3114836SGerry Liu
820*a3114836SGerry Liu mutex_enter(&cpu_lock);
821*a3114836SGerry Liu cp = cpu[up->sbc_cpu_id];
822*a3114836SGerry Liu
823*a3114836SGerry Liu if (cpu_is_poweredoff(cp)) {
824*a3114836SGerry Liu if (cpu_poweron(cp)) {
825*a3114836SGerry Liu cmn_err(CE_WARN, "%s: failed to power-on "
826*a3114836SGerry Liu "cpu %d", f, up->sbc_cpu_id);
827*a3114836SGerry Liu rv = -1;
828*a3114836SGerry Liu }
829*a3114836SGerry Liu }
830*a3114836SGerry Liu
831*a3114836SGerry Liu if (rv == 0 && cpu_is_offline(cp)) {
832*a3114836SGerry Liu if (cpu_online(cp)) {
833*a3114836SGerry Liu cmn_err(CE_WARN, "%s: failed to online cpu %d",
834*a3114836SGerry Liu f, up->sbc_cpu_id);
835*a3114836SGerry Liu rv = -1;
836*a3114836SGerry Liu }
837*a3114836SGerry Liu }
838*a3114836SGerry Liu
839*a3114836SGerry Liu if (rv == 0 && cpu_is_online(cp)) {
840*a3114836SGerry Liu if (cpu_flagged_nointr(up->sbc_cpu_flags)) {
841*a3114836SGerry Liu if (cpu_intr_disable(cp) != 0) {
842*a3114836SGerry Liu cmn_err(CE_WARN, "%s: failed to "
843*a3114836SGerry Liu "disable interrupts on cpu %d", f,
844*a3114836SGerry Liu up->sbc_cpu_id);
845*a3114836SGerry Liu }
846*a3114836SGerry Liu }
847*a3114836SGerry Liu }
848*a3114836SGerry Liu
849*a3114836SGerry Liu mutex_exit(&cpu_lock);
850*a3114836SGerry Liu }
851*a3114836SGerry Liu
852*a3114836SGerry Liu return (rv);
853*a3114836SGerry Liu }
854*a3114836SGerry Liu
855*a3114836SGerry Liu int
dr_disconnect_cpu(dr_cpu_unit_t * up)856*a3114836SGerry Liu dr_disconnect_cpu(dr_cpu_unit_t *up)
857*a3114836SGerry Liu {
858*a3114836SGerry Liu sbd_error_t *err;
859*a3114836SGerry Liu static fn_t f = "dr_disconnect_cpu";
860*a3114836SGerry Liu
861*a3114836SGerry Liu PR_CPU("%s...\n", f);
862*a3114836SGerry Liu
863*a3114836SGerry Liu ASSERT((up->sbc_cm.sbdev_state == DR_STATE_CONNECTED) ||
864*a3114836SGerry Liu (up->sbc_cm.sbdev_state == DR_STATE_UNCONFIGURED));
865*a3114836SGerry Liu
866*a3114836SGerry Liu ASSERT(dr_cpu_unit_is_sane(up->sbc_cm.sbdev_bp, up));
867*a3114836SGerry Liu
868*a3114836SGerry Liu if (up->sbc_cm.sbdev_state == DR_STATE_CONNECTED) {
869*a3114836SGerry Liu /*
870*a3114836SGerry Liu * Cpus were never brought in and so are still
871*a3114836SGerry Liu * effectively disconnected, so nothing to do here.
872*a3114836SGerry Liu */
873*a3114836SGerry Liu PR_CPU("%s: cpu %d never brought in\n", f, up->sbc_cpu_id);
874*a3114836SGerry Liu return (0);
875*a3114836SGerry Liu }
876*a3114836SGerry Liu
877*a3114836SGerry Liu err = drmach_cpu_disconnect(up->sbc_cm.sbdev_id);
878*a3114836SGerry Liu if (err == NULL)
879*a3114836SGerry Liu return (0);
880*a3114836SGerry Liu else {
881*a3114836SGerry Liu DRERR_SET_C(&up->sbc_cm.sbdev_error, &err);
882*a3114836SGerry Liu return (-1);
883*a3114836SGerry Liu }
884*a3114836SGerry Liu /*NOTREACHED*/
885*a3114836SGerry Liu }
886