1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24 /*
25 * Copyright (c) 2010, Intel Corporation.
26 * All rights reserved.
27 */
28 /*
29 * Copyright 2019 Joyent, Inc.
30 */
31
32 /*
33 * Welcome to the world of the "real mode platter".
34 * See also startup.c, mpcore.s and apic.c for related routines.
35 */
36
37 #include <sys/types.h>
38 #include <sys/systm.h>
39 #include <sys/cpuvar.h>
40 #include <sys/cpu_module.h>
41 #include <sys/kmem.h>
42 #include <sys/archsystm.h>
43 #include <sys/machsystm.h>
44 #include <sys/controlregs.h>
45 #include <sys/x86_archext.h>
46 #include <sys/smp_impldefs.h>
47 #include <sys/sysmacros.h>
48 #include <sys/mach_mmu.h>
49 #include <sys/promif.h>
50 #include <sys/cpu.h>
51 #include <sys/cpu_event.h>
52 #include <sys/sunndi.h>
53 #include <sys/fs/dv_node.h>
54 #include <vm/hat_i86.h>
55 #include <vm/as.h>
56
57 extern cpuset_t cpu_ready_set;
58
59 extern int mp_start_cpu_common(cpu_t *cp, boolean_t boot);
60 extern void real_mode_start_cpu(void);
61 extern void real_mode_start_cpu_end(void);
62 extern void real_mode_stop_cpu_stage1(void);
63 extern void real_mode_stop_cpu_stage1_end(void);
64 extern void real_mode_stop_cpu_stage2(void);
65 extern void real_mode_stop_cpu_stage2_end(void);
66
67 void rmp_gdt_init(rm_platter_t *);
68
69 /*
70 * Fill up the real mode platter to make it easy for real mode code to
71 * kick it off. This area should really be one passed by boot to kernel
72 * and guaranteed to be below 1MB and aligned to 16 bytes. Should also
73 * have identical physical and virtual address in paged mode.
74 */
75 static ushort_t *warm_reset_vector = NULL;
76
77 int
mach_cpucontext_init(void)78 mach_cpucontext_init(void)
79 {
80 ushort_t *vec;
81 ulong_t addr;
82 struct rm_platter *rm = (struct rm_platter *)rm_platter_va;
83
84 if (!(vec = (ushort_t *)psm_map_phys(WARM_RESET_VECTOR,
85 sizeof (vec), PROT_READ | PROT_WRITE)))
86 return (-1);
87
88 /*
89 * setup secondary cpu bios boot up vector
90 * Write page offset to 0x467 and page frame number to 0x469.
91 */
92 addr = (ulong_t)((caddr_t)rm->rm_code - (caddr_t)rm) + rm_platter_pa;
93 vec[0] = (ushort_t)(addr & PAGEOFFSET);
94 vec[1] = (ushort_t)((addr & (0xfffff & PAGEMASK)) >> 4);
95 warm_reset_vector = vec;
96
97 /* Map real mode platter into kas so kernel can access it. */
98 hat_devload(kas.a_hat,
99 (caddr_t)(uintptr_t)rm_platter_pa, MMU_PAGESIZE,
100 btop(rm_platter_pa), PROT_READ | PROT_WRITE | PROT_EXEC,
101 HAT_LOAD_NOCONSIST);
102
103 /* Copy CPU startup code to rm_platter if it's still during boot. */
104 if (!plat_dr_enabled()) {
105 ASSERT((size_t)real_mode_start_cpu_end -
106 (size_t)real_mode_start_cpu <= RM_PLATTER_CODE_SIZE);
107 bcopy((caddr_t)real_mode_start_cpu, (caddr_t)rm->rm_code,
108 (size_t)real_mode_start_cpu_end -
109 (size_t)real_mode_start_cpu);
110 }
111
112 return (0);
113 }
114
115 void
mach_cpucontext_fini(void)116 mach_cpucontext_fini(void)
117 {
118 if (warm_reset_vector)
119 psm_unmap_phys((caddr_t)warm_reset_vector,
120 sizeof (warm_reset_vector));
121 hat_unload(kas.a_hat, (caddr_t)(uintptr_t)rm_platter_pa, MMU_PAGESIZE,
122 HAT_UNLOAD);
123 }
124
125 extern void *long_mode_64(void);
126
127 /*ARGSUSED*/
128 void
rmp_gdt_init(rm_platter_t * rm)129 rmp_gdt_init(rm_platter_t *rm)
130 {
131
132 /* Use the kas address space for the CPU startup thread. */
133 if (mmu_ptob(kas.a_hat->hat_htable->ht_pfn) > 0xffffffffUL) {
134 panic("Cannot initialize CPUs; kernel's 64-bit page tables\n"
135 "located above 4G in physical memory (@ 0x%lx)",
136 mmu_ptob(kas.a_hat->hat_htable->ht_pfn));
137 }
138
139 /*
140 * Setup pseudo-descriptors for temporary GDT and IDT for use ONLY
141 * by code in real_mode_start_cpu():
142 *
143 * GDT[0]: NULL selector
144 * GDT[1]: 64-bit CS: Long = 1, Present = 1, bits 12, 11 = 1
145 *
146 * Clear the IDT as interrupts will be off and a limit of 0 will cause
147 * the CPU to triple fault and reset on an NMI, seemingly as reasonable
148 * a course of action as any other, though it may cause the entire
149 * platform to reset in some cases...
150 */
151 rm->rm_temp_gdt[0] = 0;
152 rm->rm_temp_gdt[TEMPGDT_KCODE64] = 0x20980000000000ULL;
153
154 rm->rm_temp_gdt_lim = (ushort_t)(sizeof (rm->rm_temp_gdt) - 1);
155 rm->rm_temp_gdt_base = rm_platter_pa +
156 (uint32_t)offsetof(rm_platter_t, rm_temp_gdt);
157 rm->rm_temp_idt_lim = 0;
158 rm->rm_temp_idt_base = 0;
159
160 /*
161 * Since the CPU needs to jump to protected mode using an identity
162 * mapped address, we need to calculate it here.
163 */
164 rm->rm_longmode64_addr = rm_platter_pa +
165 (uint32_t)((uintptr_t)long_mode_64 -
166 (uintptr_t)real_mode_start_cpu);
167 }
168
169 static void *
mach_cpucontext_alloc_tables(struct cpu * cp)170 mach_cpucontext_alloc_tables(struct cpu *cp)
171 {
172 tss_t *ntss;
173 struct cpu_tables *ct;
174 size_t ctsize;
175
176 /*
177 * Allocate space for stack, tss, gdt and idt. We round the size
178 * allotted for cpu_tables up, so that the TSS is on a unique page.
179 * This is more efficient when running in virtual machines.
180 */
181 ctsize = P2ROUNDUP(sizeof (*ct), PAGESIZE);
182 ct = kmem_zalloc(ctsize, KM_SLEEP);
183 if ((uintptr_t)ct & PAGEOFFSET)
184 panic("mach_cpucontext_alloc_tables: cpu%d misaligned tables",
185 cp->cpu_id);
186
187 ntss = cp->cpu_tss = &ct->ct_tss;
188
189 uintptr_t va;
190 size_t len;
191
192 /*
193 * #DF (double fault).
194 */
195 ntss->tss_ist1 = (uintptr_t)&ct->ct_stack1[sizeof (ct->ct_stack1)];
196
197 /*
198 * #NM (non-maskable interrupt)
199 */
200 ntss->tss_ist2 = (uintptr_t)&ct->ct_stack2[sizeof (ct->ct_stack2)];
201
202 /*
203 * #MC (machine check exception / hardware error)
204 */
205 ntss->tss_ist3 = (uintptr_t)&ct->ct_stack3[sizeof (ct->ct_stack3)];
206
207 /*
208 * #DB, #BP debug interrupts and KDI/kmdb
209 */
210 ntss->tss_ist4 = (uintptr_t)&cp->cpu_m.mcpu_kpti_dbg.kf_tr_rsp;
211
212 if (kpti_enable == 1) {
213 /*
214 * #GP, #PF, #SS fault interrupts
215 */
216 ntss->tss_ist5 = (uintptr_t)&cp->cpu_m.mcpu_kpti_flt.kf_tr_rsp;
217
218 /*
219 * Used by all other interrupts
220 */
221 ntss->tss_ist6 = (uint64_t)&cp->cpu_m.mcpu_kpti.kf_tr_rsp;
222
223 /*
224 * On AMD64 we need to make sure that all of the pages of the
225 * struct cpu_tables are punched through onto the user CPU for
226 * kpti.
227 *
228 * The final page will always be the TSS, so treat that
229 * separately.
230 */
231 for (va = (uintptr_t)ct, len = ctsize - MMU_PAGESIZE;
232 len >= MMU_PAGESIZE;
233 len -= MMU_PAGESIZE, va += MMU_PAGESIZE) {
234 /* The doublefault stack must be RW */
235 hati_cpu_punchin(cp, va, PROT_READ | PROT_WRITE);
236 }
237 ASSERT3U((uintptr_t)ntss, ==, va);
238 hati_cpu_punchin(cp, (uintptr_t)ntss, PROT_READ);
239 }
240
241
242 /*
243 * Set I/O bit map offset equal to size of TSS segment limit
244 * for no I/O permission map. This will cause all user I/O
245 * instructions to generate #gp fault.
246 */
247 ntss->tss_bitmapbase = sizeof (*ntss);
248
249 /*
250 * Setup kernel tss.
251 */
252 set_syssegd((system_desc_t *)&cp->cpu_gdt[GDT_KTSS], cp->cpu_tss,
253 sizeof (*cp->cpu_tss) - 1, SDT_SYSTSS, SEL_KPL);
254
255 return (ct);
256 }
257
258 void *
mach_cpucontext_xalloc(struct cpu * cp,int optype)259 mach_cpucontext_xalloc(struct cpu *cp, int optype)
260 {
261 size_t len;
262 struct cpu_tables *ct;
263 rm_platter_t *rm = (rm_platter_t *)rm_platter_va;
264 static int cpu_halt_code_ready;
265
266 if (optype == MACH_CPUCONTEXT_OP_STOP) {
267 ASSERT(plat_dr_enabled());
268
269 /*
270 * The WARM_RESET_VECTOR has a limitation that the physical
271 * address written to it must be page-aligned. To work around
272 * this limitation, the CPU stop code has been splitted into
273 * two stages.
274 * The stage 2 code, which implements the real logic to halt
275 * CPUs, is copied to the rm_cpu_halt_code field in the real
276 * mode platter. The stage 1 code, which simply jumps to the
277 * stage 2 code in the rm_cpu_halt_code field, is copied to
278 * rm_code field in the real mode platter and it may be
279 * overwritten after the CPU has been stopped.
280 */
281 if (!cpu_halt_code_ready) {
282 /*
283 * The rm_cpu_halt_code field in the real mode platter
284 * is used by the CPU stop code only. So only copy the
285 * CPU stop stage 2 code into the rm_cpu_halt_code
286 * field on the first call.
287 */
288 len = (size_t)real_mode_stop_cpu_stage2_end -
289 (size_t)real_mode_stop_cpu_stage2;
290 ASSERT(len <= RM_PLATTER_CPU_HALT_CODE_SIZE);
291 bcopy((caddr_t)real_mode_stop_cpu_stage2,
292 (caddr_t)rm->rm_cpu_halt_code, len);
293 cpu_halt_code_ready = 1;
294 }
295
296 /*
297 * The rm_code field in the real mode platter is shared by
298 * the CPU start, CPU stop, CPR and fast reboot code. So copy
299 * the CPU stop stage 1 code into the rm_code field every time.
300 */
301 len = (size_t)real_mode_stop_cpu_stage1_end -
302 (size_t)real_mode_stop_cpu_stage1;
303 ASSERT(len <= RM_PLATTER_CODE_SIZE);
304 bcopy((caddr_t)real_mode_stop_cpu_stage1,
305 (caddr_t)rm->rm_code, len);
306 rm->rm_cpu_halted = 0;
307
308 return (cp->cpu_m.mcpu_mach_ctx_ptr);
309 } else if (optype != MACH_CPUCONTEXT_OP_START) {
310 return (NULL);
311 }
312
313 /*
314 * Only need to allocate tables when starting CPU.
315 * Tables allocated when starting CPU will be reused when stopping CPU.
316 */
317 ct = mach_cpucontext_alloc_tables(cp);
318 if (ct == NULL) {
319 return (NULL);
320 }
321
322 /* Copy CPU startup code to rm_platter for CPU hot-add operations. */
323 if (plat_dr_enabled()) {
324 bcopy((caddr_t)real_mode_start_cpu, (caddr_t)rm->rm_code,
325 (size_t)real_mode_start_cpu_end -
326 (size_t)real_mode_start_cpu);
327 }
328
329 /*
330 * Now copy all that we've set up onto the real mode platter
331 * for the real mode code to digest as part of starting the cpu.
332 */
333 rm->rm_idt_base = cp->cpu_idt;
334 rm->rm_idt_lim = sizeof (*cp->cpu_idt) * NIDT - 1;
335 rm->rm_gdt_base = cp->cpu_gdt;
336 rm->rm_gdt_lim = sizeof (*cp->cpu_gdt) * NGDT - 1;
337
338 /*
339 * CPU needs to access kernel address space after powering on.
340 */
341 rm->rm_pdbr = MAKECR3(kas.a_hat->hat_htable->ht_pfn, PCID_NONE);
342 rm->rm_cpu = cp->cpu_id;
343
344 /*
345 * We need to mask off any bits set on our boot CPU that can't apply
346 * while the subject CPU is initializing. If appropriate, they are
347 * enabled later on.
348 */
349 rm->rm_cr4 = getcr4();
350 rm->rm_cr4 &= ~(CR4_MCE | CR4_PCE | CR4_PCIDE);
351
352 rmp_gdt_init(rm);
353
354 return (ct);
355 }
356
357 void
mach_cpucontext_xfree(struct cpu * cp,void * arg,int err,int optype)358 mach_cpucontext_xfree(struct cpu *cp, void *arg, int err, int optype)
359 {
360 struct cpu_tables *ct = arg;
361
362 ASSERT(&ct->ct_tss == cp->cpu_tss);
363 if (optype == MACH_CPUCONTEXT_OP_START) {
364 switch (err) {
365 case 0:
366 /*
367 * Save pointer for reuse when stopping CPU.
368 */
369 cp->cpu_m.mcpu_mach_ctx_ptr = arg;
370 break;
371 case ETIMEDOUT:
372 /*
373 * The processor was poked, but failed to start before
374 * we gave up waiting for it. In case it starts later,
375 * don't free anything.
376 */
377 cp->cpu_m.mcpu_mach_ctx_ptr = arg;
378 break;
379 default:
380 /*
381 * Some other, passive, error occurred.
382 */
383 kmem_free(ct, P2ROUNDUP(sizeof (*ct), PAGESIZE));
384 cp->cpu_tss = NULL;
385 break;
386 }
387 } else if (optype == MACH_CPUCONTEXT_OP_STOP) {
388 switch (err) {
389 case 0:
390 /*
391 * Free resources allocated when starting CPU.
392 */
393 kmem_free(ct, P2ROUNDUP(sizeof (*ct), PAGESIZE));
394 cp->cpu_tss = NULL;
395 cp->cpu_m.mcpu_mach_ctx_ptr = NULL;
396 break;
397 default:
398 /*
399 * Don't touch table pointer in case of failure.
400 */
401 break;
402 }
403 } else {
404 ASSERT(0);
405 }
406 }
407
408 void *
mach_cpucontext_alloc(struct cpu * cp)409 mach_cpucontext_alloc(struct cpu *cp)
410 {
411 return (mach_cpucontext_xalloc(cp, MACH_CPUCONTEXT_OP_START));
412 }
413
414 void
mach_cpucontext_free(struct cpu * cp,void * arg,int err)415 mach_cpucontext_free(struct cpu *cp, void *arg, int err)
416 {
417 mach_cpucontext_xfree(cp, arg, err, MACH_CPUCONTEXT_OP_START);
418 }
419
420 /*
421 * "Enter monitor." Called via cross-call from stop_other_cpus().
422 */
423 int
mach_cpu_halt(xc_arg_t arg1,xc_arg_t arg2 __unused,xc_arg_t arg3 __unused)424 mach_cpu_halt(xc_arg_t arg1, xc_arg_t arg2 __unused, xc_arg_t arg3 __unused)
425 {
426 char *msg = (char *)arg1;
427
428 if (msg)
429 prom_printf("%s\n", msg);
430
431 /*CONSTANTCONDITION*/
432 while (1)
433 ;
434 return (0);
435 }
436
437 void
mach_cpu_idle(void)438 mach_cpu_idle(void)
439 {
440 x86_md_clear();
441 i86_halt();
442 }
443
444 void
mach_cpu_pause(volatile char * safe)445 mach_cpu_pause(volatile char *safe)
446 {
447 /*
448 * This cpu is now safe.
449 */
450 *safe = PAUSE_WAIT;
451 membar_enter(); /* make sure stores are flushed */
452
453 /*
454 * Now we wait. When we are allowed to continue, safe
455 * will be set to PAUSE_IDLE.
456 */
457 while (*safe != PAUSE_IDLE)
458 SMT_PAUSE();
459 }
460
461 /*
462 * Power on the target CPU.
463 */
464 int
mp_cpu_poweron(struct cpu * cp)465 mp_cpu_poweron(struct cpu *cp)
466 {
467 int error;
468 cpuset_t tempset;
469 processorid_t cpuid;
470
471 ASSERT(cp != NULL);
472 cpuid = cp->cpu_id;
473 if (use_mp == 0 || plat_dr_support_cpu() == 0) {
474 return (ENOTSUP);
475 } else if (cpuid < 0 || cpuid >= max_ncpus) {
476 return (EINVAL);
477 }
478
479 /*
480 * The currrent x86 implementaiton of mp_cpu_configure() and
481 * mp_cpu_poweron() have a limitation that mp_cpu_poweron() could only
482 * be called once after calling mp_cpu_configure() for a specific CPU.
483 * It's because mp_cpu_poweron() will destroy data structure created
484 * by mp_cpu_configure(). So reject the request if the CPU has already
485 * been powered on once after calling mp_cpu_configure().
486 * This limitaiton only affects the p_online syscall and the DR driver
487 * won't be affected because the DR driver always invoke public CPU
488 * management interfaces in the predefined order:
489 * cpu_configure()->cpu_poweron()...->cpu_poweroff()->cpu_unconfigure()
490 */
491 if (cpuid_checkpass(cp, 4) || cp->cpu_thread == cp->cpu_idle_thread) {
492 return (ENOTSUP);
493 }
494
495 /*
496 * Check if there's at least a Mbyte of kmem available
497 * before attempting to start the cpu.
498 */
499 if (kmem_avail() < 1024 * 1024) {
500 /*
501 * Kick off a reap in case that helps us with
502 * later attempts ..
503 */
504 kmem_reap();
505 return (ENOMEM);
506 }
507
508 affinity_set(CPU->cpu_id);
509
510 /*
511 * Start the target CPU. No need to call mach_cpucontext_fini()
512 * if mach_cpucontext_init() fails.
513 */
514 if ((error = mach_cpucontext_init()) == 0) {
515 error = mp_start_cpu_common(cp, B_FALSE);
516 mach_cpucontext_fini();
517 }
518 if (error != 0) {
519 affinity_clear();
520 return (error);
521 }
522
523 /* Wait for the target cpu to reach READY state. */
524 tempset = cpu_ready_set;
525 while (!CPU_IN_SET(tempset, cpuid)) {
526 delay(1);
527 tempset = *((volatile cpuset_t *)&cpu_ready_set);
528 }
529
530 /* Mark the target CPU as available for mp operation. */
531 CPUSET_ATOMIC_ADD(mp_cpus, cpuid);
532
533 /* Free the space allocated to hold the microcode file */
534 ucode_cleanup();
535
536 affinity_clear();
537
538 return (0);
539 }
540
541 #define MP_CPU_DETACH_MAX_TRIES 5
542 #define MP_CPU_DETACH_DELAY 100
543
544 static int
mp_cpu_detach_driver(dev_info_t * dip)545 mp_cpu_detach_driver(dev_info_t *dip)
546 {
547 int i;
548 int rv = EBUSY;
549 dev_info_t *pdip;
550
551 pdip = ddi_get_parent(dip);
552 ASSERT(pdip != NULL);
553 /*
554 * Check if caller holds pdip busy - can cause deadlocks in
555 * e_ddi_branch_unconfigure(), which calls devfs_clean().
556 */
557 if (DEVI_BUSY_OWNED(pdip)) {
558 return (EDEADLOCK);
559 }
560
561 for (i = 0; i < MP_CPU_DETACH_MAX_TRIES; i++) {
562 if (e_ddi_branch_unconfigure(dip, NULL, 0) == 0) {
563 rv = 0;
564 break;
565 }
566 DELAY(MP_CPU_DETACH_DELAY);
567 }
568
569 return (rv);
570 }
571
572 /*
573 * Power off the target CPU.
574 * Note: cpu_lock will be released and then reacquired.
575 */
576 int
mp_cpu_poweroff(struct cpu * cp)577 mp_cpu_poweroff(struct cpu *cp)
578 {
579 int rv = 0;
580 void *ctx;
581 dev_info_t *dip = NULL;
582 rm_platter_t *rm = (rm_platter_t *)rm_platter_va;
583 extern void cpupm_start(cpu_t *);
584 extern void cpupm_stop(cpu_t *);
585
586 ASSERT(cp != NULL);
587 ASSERT((cp->cpu_flags & CPU_OFFLINE) != 0);
588 ASSERT((cp->cpu_flags & CPU_QUIESCED) != 0);
589
590 if (use_mp == 0 || plat_dr_support_cpu() == 0) {
591 return (ENOTSUP);
592 }
593 /*
594 * There is no support for powering off cpu0 yet.
595 * There are many pieces of code which have a hard dependency on cpu0.
596 */
597 if (cp->cpu_id == 0) {
598 return (ENOTSUP);
599 };
600
601 if (mach_cpu_get_device_node(cp, &dip) != PSM_SUCCESS) {
602 return (ENXIO);
603 }
604 ASSERT(dip != NULL);
605 if (mp_cpu_detach_driver(dip) != 0) {
606 rv = EBUSY;
607 goto out_online;
608 }
609
610 /* Allocate CPU context for stopping */
611 if (mach_cpucontext_init() != 0) {
612 rv = ENXIO;
613 goto out_online;
614 }
615 ctx = mach_cpucontext_xalloc(cp, MACH_CPUCONTEXT_OP_STOP);
616 if (ctx == NULL) {
617 rv = ENXIO;
618 goto out_context_fini;
619 }
620
621 cpupm_stop(cp);
622 cpu_event_fini_cpu(cp);
623
624 if (cp->cpu_m.mcpu_cmi_hdl != NULL) {
625 cmi_fini(cp->cpu_m.mcpu_cmi_hdl);
626 cp->cpu_m.mcpu_cmi_hdl = NULL;
627 }
628
629 rv = mach_cpu_stop(cp, ctx);
630 if (rv != 0) {
631 goto out_enable_cmi;
632 }
633
634 /* Wait until the target CPU has been halted. */
635 while (*(volatile ushort_t *)&(rm->rm_cpu_halted) != 0xdead) {
636 delay(1);
637 }
638 rm->rm_cpu_halted = 0xffff;
639
640 /* CPU_READY has been cleared by mach_cpu_stop. */
641 ASSERT((cp->cpu_flags & CPU_READY) == 0);
642 ASSERT((cp->cpu_flags & CPU_RUNNING) == 0);
643 cp->cpu_flags = CPU_OFFLINE | CPU_QUIESCED | CPU_POWEROFF;
644 CPUSET_ATOMIC_DEL(mp_cpus, cp->cpu_id);
645
646 mach_cpucontext_xfree(cp, ctx, 0, MACH_CPUCONTEXT_OP_STOP);
647 mach_cpucontext_fini();
648
649 return (0);
650
651 out_enable_cmi:
652 {
653 cmi_hdl_t hdl;
654
655 if ((hdl = cmi_init(CMI_HDL_NATIVE, cmi_ntv_hwchipid(cp),
656 cmi_ntv_hwcoreid(cp), cmi_ntv_hwstrandid(cp))) != NULL) {
657 if (is_x86_feature(x86_featureset, X86FSET_MCA))
658 cmi_mca_init(hdl);
659 cp->cpu_m.mcpu_cmi_hdl = hdl;
660 }
661 }
662 cpu_event_init_cpu(cp);
663 cpupm_start(cp);
664 mach_cpucontext_xfree(cp, ctx, rv, MACH_CPUCONTEXT_OP_STOP);
665
666 out_context_fini:
667 mach_cpucontext_fini();
668
669 out_online:
670 (void) e_ddi_branch_configure(dip, NULL, 0);
671
672 if (rv != EAGAIN && rv != ETIME) {
673 rv = ENXIO;
674 }
675
676 return (rv);
677 }
678
679 /*
680 * Return vcpu state, since this could be a virtual environment that we
681 * are unaware of, return "unknown".
682 */
683 /* ARGSUSED */
684 int
vcpu_on_pcpu(processorid_t cpu)685 vcpu_on_pcpu(processorid_t cpu)
686 {
687 return (VCPU_STATE_UNKNOWN);
688 }
689