1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /*
28 * Copyright 2019 Joyent, Inc.
29 * Copyright 2019 Peter Tribble.
30 */
31
32 #include <sys/sysmacros.h>
33 #include <sys/prom_plat.h>
34 #include <sys/prom_debug.h>
35 #include <vm/hat_sfmmu.h>
36 #include <vm/seg_kp.h>
37 #include <vm/seg_kmem.h>
38 #include <sys/machsystm.h>
39 #include <sys/callb.h>
40 #include <sys/cpu_module.h>
41 #include <sys/pg.h>
42 #include <sys/cmt.h>
43 #include <sys/dtrace.h>
44 #include <sys/reboot.h>
45 #include <sys/kdi.h>
46 #include <sys/traptrace.h>
47 #ifdef TRAPTRACE
48 #include <sys/bootconf.h>
49 #endif /* TRAPTRACE */
50 #include <sys/cpu_sgnblk_defs.h>
51
52 extern int cpu_intrq_setup(struct cpu *);
53 extern void cpu_intrq_cleanup(struct cpu *);
54 extern void cpu_intrq_register(struct cpu *);
55
56 struct cpu *cpus; /* pointer to other cpus; dynamically allocate */
57 struct cpu *cpu[NCPU]; /* pointers to all CPUs */
58 uint64_t cpu_pa[NCPU]; /* pointers to all CPUs in PA */
59 cpu_core_t cpu_core[NCPU]; /* cpu_core structures */
60
61 #ifdef TRAPTRACE
62 caddr_t ttrace_buf; /* kmem64 traptrace for all cpus except 0 */
63 #endif /* TRAPTRACE */
64
65 /* bit mask of cpus ready for x-calls, protected by cpu_lock */
66 cpuset_t cpu_ready_set;
67
68 /* bit mask used to communicate with cpus during bringup */
69 static cpuset_t proxy_ready_set;
70
71 static void slave_startup(void);
72
73 /*
74 * Defined in $KARCH/os/mach_mp_startup.c
75 */
76 #pragma weak init_cpu_info
77
78 /*
79 * Amount of time (in milliseconds) we should wait before giving up on CPU
80 * initialization and assuming that the CPU we're trying to wake up is dead
81 * or out of control.
82 */
83 #define CPU_WAKEUP_GRACE_MSEC 1000
84
85 #ifdef TRAPTRACE
86 /*
87 * This function sets traptrace buffers for all cpus
88 * other than boot cpu.
89 */
90 size_t
calc_traptrace_sz(void)91 calc_traptrace_sz(void)
92 {
93 return (TRAP_TSIZE * (max_ncpus - 1));
94 }
95 #endif /* TRAPTRACE */
96
97
98 /*
99 * common slave cpu initialization code
100 */
101 void
common_startup_init(cpu_t * cp,int cpuid)102 common_startup_init(cpu_t *cp, int cpuid)
103 {
104 kthread_id_t tp;
105 sfmmu_t *sfmmup;
106 caddr_t sp;
107
108 /*
109 * Allocate and initialize the startup thread for this CPU.
110 */
111 tp = thread_create(NULL, 0, slave_startup, NULL, 0, &p0,
112 TS_STOPPED, maxclsyspri);
113
114 /*
115 * Set state to TS_ONPROC since this thread will start running
116 * as soon as the CPU comes online.
117 *
118 * All the other fields of the thread structure are setup by
119 * thread_create().
120 */
121 THREAD_ONPROC(tp, cp);
122 tp->t_preempt = 1;
123 tp->t_bound_cpu = cp;
124 tp->t_affinitycnt = 1;
125 tp->t_cpu = cp;
126 tp->t_disp_queue = cp->cpu_disp;
127
128 sfmmup = astosfmmu(&kas);
129 CPUSET_ADD(sfmmup->sfmmu_cpusran, cpuid);
130
131 /*
132 * Setup thread to start in slave_startup.
133 */
134 sp = tp->t_stk;
135 tp->t_pc = (uintptr_t)slave_startup - 8;
136 tp->t_sp = (uintptr_t)((struct rwindow *)sp - 1) - STACK_BIAS;
137
138 cp->cpu_id = cpuid;
139 cp->cpu_self = cp;
140 cp->cpu_thread = tp;
141 cp->cpu_lwp = NULL;
142 cp->cpu_dispthread = tp;
143 cp->cpu_dispatch_pri = DISP_PRIO(tp);
144 cp->cpu_startup_thread = tp;
145
146 /*
147 * The dispatcher may discover the CPU before it is in cpu_ready_set
148 * and attempt to poke it. Before the CPU is in cpu_ready_set, any
149 * cross calls to it will be dropped. We initialize
150 * poke_cpu_outstanding to true so that poke_cpu will ignore any poke
151 * requests for this CPU. Pokes that come in before the CPU is in
152 * cpu_ready_set can be ignored because the CPU is about to come
153 * online.
154 */
155 cp->cpu_m.poke_cpu_outstanding = B_TRUE;
156 }
157
158 /*
159 * parametric flag setting functions. these routines set the cpu
160 * state just prior to releasing the slave cpu.
161 */
162 void
cold_flag_set(int cpuid)163 cold_flag_set(int cpuid)
164 {
165 cpu_t *cp;
166
167 ASSERT(MUTEX_HELD(&cpu_lock));
168
169 cp = cpu[cpuid];
170
171 if (!(cp->cpu_flags & CPU_ENABLE))
172 ncpus_intr_enabled++;
173
174 cp->cpu_flags |= CPU_RUNNING | CPU_ENABLE | CPU_EXISTS;
175 cpu_add_active(cp);
176 /*
177 * Add CPU_READY after the cpu_add_active() call
178 * to avoid pausing cp.
179 */
180 cp->cpu_flags |= CPU_READY; /* ready */
181 cpu_set_state(cp);
182 }
183
184 static void
warm_flag_set(int cpuid)185 warm_flag_set(int cpuid)
186 {
187 cpu_t *cp;
188
189 ASSERT(MUTEX_HELD(&cpu_lock));
190
191 /*
192 * warm start activates cpus into the OFFLINE state
193 */
194 cp = cpu[cpuid];
195 cp->cpu_flags |= CPU_RUNNING | CPU_READY | CPU_EXISTS
196 | CPU_OFFLINE | CPU_QUIESCED;
197 cpu_set_state(cp);
198 }
199
200 /*
201 * Internal cpu startup sequencer
202 * The sequence is as follows:
203 *
204 * MASTER SLAVE
205 * ------- ----------
206 * assume the kernel data is initialized
207 * clear the proxy bit
208 * start the slave cpu
209 * wait for the slave cpu to set the proxy
210 *
211 * the slave runs slave_startup and then sets the proxy
212 * the slave waits for the master to add slave to the ready set
213 *
214 * the master finishes the initialization and
215 * adds the slave to the ready set
216 *
217 * the slave exits the startup thread and is running
218 */
219 void
start_cpu(int cpuid,void (* flag_func)(int))220 start_cpu(int cpuid, void(*flag_func)(int))
221 {
222 extern void cpu_startup(int);
223 int timout;
224
225 ASSERT(MUTEX_HELD(&cpu_lock));
226
227 /*
228 * Before we begin the dance, tell DTrace that we're about to start
229 * a CPU.
230 */
231 if (dtrace_cpustart_init != NULL)
232 (*dtrace_cpustart_init)();
233
234 /* start the slave cpu */
235 CPUSET_DEL(proxy_ready_set, cpuid);
236 if (prom_test("SUNW,start-cpu-by-cpuid") == 0) {
237 (void) prom_startcpu_bycpuid(cpuid, (caddr_t)&cpu_startup,
238 cpuid);
239 } else {
240 /* "by-cpuid" interface didn't exist. Do it the old way */
241 pnode_t nodeid = cpunodes[cpuid].nodeid;
242
243 ASSERT(nodeid != (pnode_t)0);
244 (void) prom_startcpu(nodeid, (caddr_t)&cpu_startup, cpuid);
245 }
246
247 /* wait for the slave cpu to check in. */
248 for (timout = CPU_WAKEUP_GRACE_MSEC; timout; timout--) {
249 if (CPU_IN_SET(proxy_ready_set, cpuid))
250 break;
251 DELAY(1000);
252 }
253 if (timout == 0) {
254 panic("cpu%d failed to start (2)", cpuid);
255 }
256
257 /*
258 * The slave has started; we can tell DTrace that it's safe again.
259 */
260 if (dtrace_cpustart_fini != NULL)
261 (*dtrace_cpustart_fini)();
262
263 /* run the master side of stick synchronization for the slave cpu */
264 sticksync_master();
265
266 /*
267 * deal with the cpu flags in a phase-specific manner
268 * for various reasons, this needs to run after the slave
269 * is checked in but before the slave is released.
270 */
271 (*flag_func)(cpuid);
272
273 /* release the slave */
274 CPUSET_ADD(cpu_ready_set, cpuid);
275 }
276
277 #ifdef TRAPTRACE
278 int trap_tr0_inuse = 1; /* it is always used on the boot cpu */
279 int trap_trace_inuse[NCPU];
280 #endif /* TRAPTRACE */
281
282 #define cpu_next_free cpu_prev
283
284 /*
285 * Routine to set up a CPU to prepare for starting it up.
286 */
287 int
setup_cpu_common(int cpuid)288 setup_cpu_common(int cpuid)
289 {
290 struct cpu *cp = NULL;
291 kthread_id_t tp;
292 #ifdef TRAPTRACE
293 int tt_index;
294 TRAP_TRACE_CTL *ctlp;
295 caddr_t newbuf;
296 #endif /* TRAPTRACE */
297
298 extern void idle();
299 int rval;
300
301 ASSERT(MUTEX_HELD(&cpu_lock));
302 ASSERT(cpu[cpuid] == NULL);
303
304 ASSERT(ncpus <= max_ncpus);
305
306 #ifdef TRAPTRACE
307 /*
308 * allocate a traptrace buffer for this CPU.
309 */
310 ctlp = &trap_trace_ctl[cpuid];
311 if (!trap_tr0_inuse) {
312 trap_tr0_inuse = 1;
313 newbuf = trap_tr0;
314 tt_index = -1;
315 } else {
316 for (tt_index = 0; tt_index < (max_ncpus-1); tt_index++)
317 if (!trap_trace_inuse[tt_index])
318 break;
319 ASSERT(tt_index < max_ncpus - 1);
320 trap_trace_inuse[tt_index] = 1;
321 newbuf = (caddr_t)(ttrace_buf + (tt_index * TRAP_TSIZE));
322 }
323 ctlp->d.vaddr_base = newbuf;
324 ctlp->d.offset = ctlp->d.last_offset = 0;
325 ctlp->d.limit = trap_trace_bufsize;
326 ctlp->d.paddr_base = va_to_pa(newbuf);
327 ASSERT(ctlp->d.paddr_base != (uint64_t)-1);
328 #endif /* TRAPTRACE */
329 /*
330 * initialize hv traptrace buffer for this CPU
331 */
332 mach_htraptrace_setup(cpuid);
333
334 /*
335 * Obtain pointer to the appropriate cpu structure.
336 */
337 if (cpu0.cpu_flags == 0) {
338 cp = &cpu0;
339 } else {
340 /*
341 * When dynamically allocating cpu structs,
342 * cpus is used as a pointer to a list of freed
343 * cpu structs.
344 */
345 if (cpus) {
346 /* grab the first cpu struct on the free list */
347 cp = cpus;
348 if (cp->cpu_next_free)
349 cpus = cp->cpu_next_free;
350 else
351 cpus = NULL;
352 }
353 }
354
355 if (cp == NULL)
356 cp = vmem_xalloc(static_alloc_arena, CPU_ALLOC_SIZE,
357 CPU_ALLOC_SIZE, 0, 0, NULL, NULL, VM_SLEEP);
358
359 bzero(cp, sizeof (*cp));
360
361 cp->cpu_id = cpuid;
362 cp->cpu_self = cp;
363
364 /*
365 * Initialize ptl1_panic stack
366 */
367 ptl1_init_cpu(cp);
368
369 /*
370 * Initialize the dispatcher for this CPU.
371 */
372 disp_cpu_init(cp);
373
374 /*
375 * Bootstrap the CPU's PG data
376 */
377 pg_cpu_bootstrap(cp);
378
379 cpu_vm_data_init(cp);
380
381 /*
382 * Now, initialize per-CPU idle thread for this CPU.
383 */
384 tp = thread_create(NULL, 0, idle, NULL, 0, &p0, TS_ONPROC, -1);
385
386 cp->cpu_idle_thread = tp;
387
388 tp->t_preempt = 1;
389 tp->t_bound_cpu = cp;
390 tp->t_affinitycnt = 1;
391 tp->t_cpu = cp;
392 tp->t_disp_queue = cp->cpu_disp;
393
394 /*
395 * Registering a thread in the callback table is usually
396 * done in the initialization code of the thread. In this
397 * case, we do it right after thread creation to avoid
398 * blocking idle thread while registering itself. It also
399 * avoids the possibility of reregistration in case a CPU
400 * restarts its idle thread.
401 */
402 CALLB_CPR_INIT_SAFE(tp, "idle");
403
404 init_cpu_info(cp);
405
406 /*
407 * Initialize the interrupt threads for this CPU
408 */
409 cpu_intr_alloc(cp, NINTR_THREADS);
410
411 /*
412 * Add CPU to list of available CPUs.
413 * It'll be on the active list after it is started.
414 */
415 cpu_add_unit(cp);
416
417 /*
418 * Allocate and init cpu module private data structures,
419 * including scrubber.
420 */
421 cpu_init_private(cp);
422 populate_idstr(cp);
423
424 /*
425 * Initialize the CPUs physical ID cache, and processor groups
426 */
427 pghw_physid_create(cp);
428 (void) pg_cpu_init(cp, B_FALSE);
429
430 if ((rval = cpu_intrq_setup(cp)) != 0) {
431 return (rval);
432 }
433
434 /*
435 * Initialize MMU context domain information.
436 */
437 sfmmu_cpu_init(cp);
438
439 return (0);
440 }
441
442 /*
443 * Routine to clean up a CPU after shutting it down.
444 */
445 int
cleanup_cpu_common(int cpuid)446 cleanup_cpu_common(int cpuid)
447 {
448 struct cpu *cp;
449 #ifdef TRAPTRACE
450 int i;
451 TRAP_TRACE_CTL *ctlp;
452 caddr_t newbuf;
453 #endif /* TRAPTRACE */
454
455 ASSERT(MUTEX_HELD(&cpu_lock));
456 ASSERT(cpu[cpuid] != NULL);
457
458 cp = cpu[cpuid];
459
460 /* Free cpu module private data structures, including scrubber. */
461 cpu_uninit_private(cp);
462
463 /* Free cpu ID string and brand string. */
464 if (cp->cpu_idstr)
465 kmem_free(cp->cpu_idstr, strlen(cp->cpu_idstr) + 1);
466 if (cp->cpu_brandstr)
467 kmem_free(cp->cpu_brandstr, strlen(cp->cpu_brandstr) + 1);
468
469 cpu_vm_data_destroy(cp);
470
471 /*
472 * Remove CPU from list of available CPUs.
473 */
474 cpu_del_unit(cpuid);
475
476 /*
477 * Clean any machine specific interrupt states.
478 */
479 cpu_intrq_cleanup(cp);
480
481 /*
482 * At this point, the only threads bound to this CPU should be
483 * special per-cpu threads: it's idle thread, it's pause thread,
484 * and it's interrupt threads. Clean these up.
485 */
486 cpu_destroy_bound_threads(cp);
487
488 /*
489 * Free the interrupt stack.
490 */
491 segkp_release(segkp, cp->cpu_intr_stack);
492
493 /*
494 * Free hv traptrace buffer for this CPU.
495 */
496 mach_htraptrace_cleanup(cpuid);
497 #ifdef TRAPTRACE
498 /*
499 * Free the traptrace buffer for this CPU.
500 */
501 ctlp = &trap_trace_ctl[cpuid];
502 newbuf = ctlp->d.vaddr_base;
503 i = (newbuf - ttrace_buf) / (TRAP_TSIZE);
504 if (((newbuf - ttrace_buf) % (TRAP_TSIZE) == 0) &&
505 ((i >= 0) && (i < (max_ncpus-1)))) {
506 /*
507 * This CPU got it's trap trace buffer from the
508 * boot-alloc'd bunch of them.
509 */
510 trap_trace_inuse[i] = 0;
511 bzero(newbuf, (TRAP_TSIZE));
512 } else if (newbuf == trap_tr0) {
513 trap_tr0_inuse = 0;
514 bzero(trap_tr0, (TRAP_TSIZE));
515 } else {
516 cmn_err(CE_WARN, "failed to free trap trace buffer from cpu%d",
517 cpuid);
518 }
519 bzero(ctlp, sizeof (*ctlp));
520 #endif /* TRAPTRACE */
521
522 /*
523 * There is a race condition with mutex_vector_enter() which
524 * caches a cpu pointer. The race is detected by checking cpu_next.
525 */
526 disp_cpu_fini(cp);
527 cpu_pa[cpuid] = 0;
528 if (CPU_MMU_CTXP(cp))
529 sfmmu_cpu_cleanup(cp);
530 bzero(cp, sizeof (*cp));
531
532 /*
533 * Place the freed cpu structure on the list of freed cpus.
534 */
535 if (cp != &cpu0) {
536 if (cpus) {
537 cp->cpu_next_free = cpus;
538 cpus = cp;
539 }
540 else
541 cpus = cp;
542 }
543
544 return (0);
545 }
546
547 /*
548 * This routine is used to start a previously powered off processor.
549 * Note that restarted cpus are initialized into the offline state.
550 */
551 void
restart_other_cpu(int cpuid)552 restart_other_cpu(int cpuid)
553 {
554 struct cpu *cp;
555 kthread_id_t tp;
556 caddr_t sp;
557 extern void idle();
558
559 ASSERT(MUTEX_HELD(&cpu_lock));
560 ASSERT(cpuid < NCPU && cpu[cpuid] != NULL);
561
562 /*
563 * Obtain pointer to the appropriate cpu structure.
564 */
565 cp = cpu[cpuid];
566
567 common_startup_init(cp, cpuid);
568
569 /*
570 * idle thread t_lock is held when the idle thread is suspended.
571 * Manually unlock the t_lock of idle loop so that we can resume
572 * the suspended idle thread.
573 * Also adjust the PC of idle thread for re-retry.
574 */
575 cp->cpu_intr_actv = 0; /* clear the value from previous life */
576 cp->cpu_m.mutex_ready = 0; /* we are not ready yet */
577 lock_clear(&cp->cpu_idle_thread->t_lock);
578 tp = cp->cpu_idle_thread;
579
580 sp = tp->t_stk;
581 tp->t_sp = (uintptr_t)((struct rwindow *)sp - 1) - STACK_BIAS;
582 tp->t_pc = (uintptr_t)idle - 8;
583
584 /*
585 * restart the cpu now
586 */
587 promsafe_pause_cpus();
588 start_cpu(cpuid, warm_flag_set);
589 start_cpus();
590
591 /* call cmn_err outside pause_cpus/start_cpus to avoid deadlock */
592 cmn_err(CE_CONT, "!cpu%d initialization complete - restarted\n",
593 cpuid);
594 }
595
596 /*
597 * Startup function executed on 'other' CPUs. This is the first
598 * C function after cpu_start sets up the cpu registers.
599 */
600 static void
slave_startup(void)601 slave_startup(void)
602 {
603 struct cpu *cp = CPU;
604 ushort_t original_flags = cp->cpu_flags;
605
606 mach_htraptrace_configure(cp->cpu_id);
607 cpu_intrq_register(CPU);
608 cp->cpu_m.mutex_ready = 1;
609
610 /* acknowledge that we are done with initialization */
611 CPUSET_ADD(proxy_ready_set, cp->cpu_id);
612
613 /* synchronize STICK */
614 sticksync_slave();
615
616 if (boothowto & RB_DEBUG)
617 kdi_dvec_cpu_init(cp);
618
619 /*
620 * the slave will wait here forever -- assuming that the master
621 * will get back to us. if it doesn't we've got bigger problems
622 * than a master not replying to this slave.
623 * the small delay improves the slave's responsiveness to the
624 * master's ack and decreases the time window between master and
625 * slave operations.
626 */
627 while (!CPU_IN_SET(cpu_ready_set, cp->cpu_id))
628 DELAY(1);
629
630 /*
631 * The CPU is now in cpu_ready_set, safely able to take pokes.
632 */
633 cp->cpu_m.poke_cpu_outstanding = B_FALSE;
634
635 /* enable interrupts */
636 (void) spl0();
637
638 /*
639 * Signature block update to indicate that this CPU is in OS now.
640 * This needs to be done after the PIL is lowered since on
641 * some platforms the update code may block.
642 */
643 CPU_SIGNATURE(OS_SIG, SIGST_RUN, SIGSUBST_NULL, cp->cpu_id);
644
645 /*
646 * park the slave thread in a safe/quiet state and wait for the master
647 * to finish configuring this CPU before proceeding to thread_exit().
648 */
649 while (((volatile ushort_t)cp->cpu_flags) & CPU_QUIESCED)
650 DELAY(1);
651
652 /*
653 * Initialize CPC CPU state.
654 */
655 kcpc_hw_startup_cpu(original_flags);
656
657 /*
658 * Notify the PG subsystem that the CPU has started
659 */
660 pg_cmt_cpu_startup(CPU);
661
662 /*
663 * Now we are done with the startup thread, so free it up.
664 */
665 thread_exit();
666 cmn_err(CE_PANIC, "slave_startup: cannot return");
667 /*NOTREACHED*/
668 }
669
670 extern struct cpu *cpu[NCPU]; /* pointers to all CPUs */
671
672 /*
673 * cpu_bringup_set is a tunable (via /etc/system, debugger, etc.) that
674 * can be used during debugging to control which processors are brought
675 * online at boot time. The variable represents a bitmap of the id's
676 * of the processors that will be brought online. The initialization
677 * of this variable depends on the type of cpuset_t, which varies
678 * depending on the number of processors supported (see cpuvar.h).
679 */
680 cpuset_t cpu_bringup_set;
681
682
683 /*
684 * Generic start-all cpus entry. Typically used during cold initialization.
685 * Note that cold start cpus are initialized into the online state.
686 */
687 /*ARGSUSED*/
688 void
start_other_cpus(int flag)689 start_other_cpus(int flag)
690 {
691 int cpuid;
692 extern void idlestop_init(void);
693 int bootcpu;
694
695 /*
696 * Check if cpu_bringup_set has been explicitly set before
697 * initializing it.
698 */
699 if (CPUSET_ISNULL(cpu_bringup_set)) {
700 CPUSET_ALL(cpu_bringup_set);
701 }
702
703 if (&cpu_feature_init)
704 cpu_feature_init();
705
706 /*
707 * Initialize CPC.
708 */
709 kcpc_hw_init();
710
711 mutex_enter(&cpu_lock);
712
713 /*
714 * Initialize our own cpu_info.
715 */
716 init_cpu_info(CPU);
717
718 /*
719 * Initialize CPU 0 cpu module private data area, including scrubber.
720 */
721 cpu_init_private(CPU);
722 populate_idstr(CPU);
723
724 /*
725 * perform such initialization as is needed
726 * to be able to take CPUs on- and off-line.
727 */
728 cpu_pause_init();
729 xc_init(); /* initialize processor crosscalls */
730 idlestop_init();
731
732 if (!use_mp) {
733 mutex_exit(&cpu_lock);
734 cmn_err(CE_CONT, "?***** Not in MP mode\n");
735 return;
736 }
737 /*
738 * should we be initializing this cpu?
739 */
740 bootcpu = getprocessorid();
741
742 /*
743 * launch all the slave cpus now
744 */
745 for (cpuid = 0; cpuid < NCPU; cpuid++) {
746 pnode_t nodeid = cpunodes[cpuid].nodeid;
747
748 if (nodeid == (pnode_t)0)
749 continue;
750
751 if (cpuid == bootcpu) {
752 if (!CPU_IN_SET(cpu_bringup_set, cpuid)) {
753 cmn_err(CE_WARN, "boot cpu not a member "
754 "of cpu_bringup_set, adding it");
755 CPUSET_ADD(cpu_bringup_set, cpuid);
756 }
757 continue;
758 }
759 if (!CPU_IN_SET(cpu_bringup_set, cpuid))
760 continue;
761
762 ASSERT(cpu[cpuid] == NULL);
763
764 if (setup_cpu_common(cpuid)) {
765 cmn_err(CE_PANIC, "cpu%d: setup failed", cpuid);
766 }
767
768 common_startup_init(cpu[cpuid], cpuid);
769
770 start_cpu(cpuid, cold_flag_set);
771 /*
772 * Because slave_startup() gets fired off after init()
773 * starts, we can't use the '?' trick to do 'boot -v'
774 * printing - so we always direct the 'cpu .. online'
775 * messages to the log.
776 */
777 cmn_err(CE_CONT, "!cpu%d initialization complete - online\n",
778 cpuid);
779
780 cpu_state_change_notify(cpuid, CPU_SETUP);
781
782 if (dtrace_cpu_init != NULL)
783 (*dtrace_cpu_init)(cpuid);
784 }
785
786 /*
787 * since all the cpus are online now, redistribute interrupts to them.
788 */
789 intr_redist_all_cpus();
790
791 mutex_exit(&cpu_lock);
792
793 /*
794 * Start the Ecache scrubber. Must be done after all calls to
795 * cpu_init_private for every cpu (including CPU 0).
796 */
797 cpu_init_cache_scrub();
798
799 if (&cpu_mp_init)
800 cpu_mp_init();
801 }
802