1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <sys/sysmacros.h> 30 #include <sys/prom_plat.h> 31 #include <sys/prom_debug.h> 32 #include <vm/hat_sfmmu.h> 33 #include <vm/seg_kp.h> 34 #include <vm/seg_kmem.h> 35 #include <sys/machsystm.h> 36 #include <sys/callb.h> 37 #include <sys/cpu_module.h> 38 #include <sys/pg.h> 39 #include <sys/cmt.h> 40 #include <sys/dtrace.h> 41 #include <sys/reboot.h> 42 #include <sys/kdi.h> 43 #include <sys/traptrace.h> 44 #ifdef TRAPTRACE 45 #include <sys/bootconf.h> 46 #endif /* TRAPTRACE */ 47 #include <sys/cpu_sgnblk_defs.h> 48 49 extern int cpu_intrq_setup(struct cpu *); 50 extern void cpu_intrq_cleanup(struct cpu *); 51 extern void cpu_intrq_register(struct cpu *); 52 53 struct cpu *cpus; /* pointer to other cpus; dynamically allocate */ 54 struct cpu *cpu[NCPU]; /* pointers to all CPUs */ 55 uint64_t cpu_pa[NCPU]; /* pointers to all CPUs in PA */ 56 cpu_core_t cpu_core[NCPU]; /* cpu_core structures */ 57 58 #ifdef TRAPTRACE 59 caddr_t ttrace_buf; /* bop alloced traptrace for all cpus except 0 */ 60 #endif /* TRAPTRACE */ 61 62 /* bit mask of cpus ready for x-calls, protected by cpu_lock */ 63 cpuset_t cpu_ready_set; 64 65 /* bit mask used to communicate with cpus during bringup */ 66 static cpuset_t proxy_ready_set; 67 68 static void slave_startup(void); 69 70 /* 71 * Defined in $KARCH/os/mach_mp_startup.c 72 */ 73 #pragma weak init_cpu_info 74 75 /* 76 * Amount of time (in milliseconds) we should wait before giving up on CPU 77 * initialization and assuming that the CPU we're trying to wake up is dead 78 * or out of control. 79 */ 80 #define CPU_WAKEUP_GRACE_MSEC 1000 81 82 #ifdef TRAPTRACE 83 /* 84 * This function sets traptrace buffers for all cpus 85 * other than boot cpu. 86 * Note that the memory at base will be allocated later. 87 */ 88 caddr_t 89 trap_trace_alloc(caddr_t base) 90 { 91 caddr_t vaddr; 92 extern int max_ncpus; 93 94 if (max_ncpus == 1) { 95 return (base); 96 } 97 98 vaddr = (caddr_t)base; 99 100 ttrace_buf = vaddr; 101 PRM_DEBUG(ttrace_buf); 102 return (vaddr + (TRAP_TSIZE * (max_ncpus - 1))); 103 } 104 #endif /* TRAPTRACE */ 105 106 /* 107 * common slave cpu initialization code 108 */ 109 void 110 common_startup_init(cpu_t *cp, int cpuid) 111 { 112 kthread_id_t tp; 113 sfmmu_t *sfmmup; 114 caddr_t sp; 115 116 /* 117 * Allocate and initialize the startup thread for this CPU. 118 */ 119 tp = thread_create(NULL, 0, slave_startup, NULL, 0, &p0, 120 TS_STOPPED, maxclsyspri); 121 122 /* 123 * Set state to TS_ONPROC since this thread will start running 124 * as soon as the CPU comes online. 125 * 126 * All the other fields of the thread structure are setup by 127 * thread_create(). 128 */ 129 THREAD_ONPROC(tp, cp); 130 tp->t_preempt = 1; 131 tp->t_bound_cpu = cp; 132 tp->t_affinitycnt = 1; 133 tp->t_cpu = cp; 134 tp->t_disp_queue = cp->cpu_disp; 135 136 sfmmup = astosfmmu(&kas); 137 CPUSET_ADD(sfmmup->sfmmu_cpusran, cpuid); 138 139 /* 140 * Setup thread to start in slave_startup. 141 */ 142 sp = tp->t_stk; 143 tp->t_pc = (uintptr_t)slave_startup - 8; 144 tp->t_sp = (uintptr_t)((struct rwindow *)sp - 1) - STACK_BIAS; 145 146 cp->cpu_id = cpuid; 147 cp->cpu_self = cp; 148 cp->cpu_thread = tp; 149 cp->cpu_lwp = NULL; 150 cp->cpu_dispthread = tp; 151 cp->cpu_dispatch_pri = DISP_PRIO(tp); 152 cp->cpu_startup_thread = tp; 153 } 154 155 /* 156 * parametric flag setting functions. these routines set the cpu 157 * state just prior to releasing the slave cpu. 158 */ 159 void 160 cold_flag_set(int cpuid) 161 { 162 cpu_t *cp; 163 164 ASSERT(MUTEX_HELD(&cpu_lock)); 165 166 cp = cpu[cpuid]; 167 cp->cpu_flags |= CPU_RUNNING | CPU_ENABLE | CPU_EXISTS; 168 cpu_add_active(cp); 169 /* 170 * Add CPU_READY after the cpu_add_active() call 171 * to avoid pausing cp. 172 */ 173 cp->cpu_flags |= CPU_READY; /* ready */ 174 cpu_set_state(cp); 175 } 176 177 static void 178 warm_flag_set(int cpuid) 179 { 180 cpu_t *cp; 181 182 ASSERT(MUTEX_HELD(&cpu_lock)); 183 184 /* 185 * warm start activates cpus into the OFFLINE state 186 */ 187 cp = cpu[cpuid]; 188 cp->cpu_flags |= CPU_RUNNING | CPU_READY | CPU_EXISTS 189 | CPU_OFFLINE | CPU_QUIESCED; 190 cpu_set_state(cp); 191 } 192 193 /* 194 * Internal cpu startup sequencer 195 * The sequence is as follows: 196 * 197 * MASTER SLAVE 198 * ------- ---------- 199 * assume the kernel data is initialized 200 * clear the proxy bit 201 * start the slave cpu 202 * wait for the slave cpu to set the proxy 203 * 204 * the slave runs slave_startup and then sets the proxy 205 * the slave waits for the master to add slave to the ready set 206 * 207 * the master finishes the initialization and 208 * adds the slave to the ready set 209 * 210 * the slave exits the startup thread and is running 211 */ 212 void 213 start_cpu(int cpuid, void(*flag_func)(int)) 214 { 215 extern void cpu_startup(int); 216 int timout; 217 218 ASSERT(MUTEX_HELD(&cpu_lock)); 219 220 /* 221 * Before we begin the dance, tell DTrace that we're about to start 222 * a CPU. 223 */ 224 if (dtrace_cpustart_init != NULL) 225 (*dtrace_cpustart_init)(); 226 227 /* start the slave cpu */ 228 CPUSET_DEL(proxy_ready_set, cpuid); 229 if (prom_test("SUNW,start-cpu-by-cpuid") == 0) { 230 (void) prom_startcpu_bycpuid(cpuid, (caddr_t)&cpu_startup, 231 cpuid); 232 } else { 233 /* "by-cpuid" interface didn't exist. Do it the old way */ 234 pnode_t nodeid = cpunodes[cpuid].nodeid; 235 236 ASSERT(nodeid != (pnode_t)0); 237 (void) prom_startcpu(nodeid, (caddr_t)&cpu_startup, cpuid); 238 } 239 240 /* wait for the slave cpu to check in. */ 241 for (timout = CPU_WAKEUP_GRACE_MSEC; timout; timout--) { 242 if (CPU_IN_SET(proxy_ready_set, cpuid)) 243 break; 244 DELAY(1000); 245 } 246 if (timout == 0) { 247 panic("cpu%d failed to start (2)", cpuid); 248 } 249 250 /* 251 * The slave has started; we can tell DTrace that it's safe again. 252 */ 253 if (dtrace_cpustart_fini != NULL) 254 (*dtrace_cpustart_fini)(); 255 256 /* run the master side of stick synchronization for the slave cpu */ 257 sticksync_master(); 258 259 /* 260 * deal with the cpu flags in a phase-specific manner 261 * for various reasons, this needs to run after the slave 262 * is checked in but before the slave is released. 263 */ 264 (*flag_func)(cpuid); 265 266 /* release the slave */ 267 CPUSET_ADD(cpu_ready_set, cpuid); 268 } 269 270 #ifdef TRAPTRACE 271 int trap_tr0_inuse = 1; /* it is always used on the boot cpu */ 272 int trap_trace_inuse[NCPU]; 273 #endif /* TRAPTRACE */ 274 275 #define cpu_next_free cpu_prev 276 277 /* 278 * Routine to set up a CPU to prepare for starting it up. 279 */ 280 int 281 setup_cpu_common(int cpuid) 282 { 283 struct cpu *cp = NULL; 284 kthread_id_t tp; 285 #ifdef TRAPTRACE 286 int tt_index; 287 TRAP_TRACE_CTL *ctlp; 288 caddr_t newbuf; 289 #endif /* TRAPTRACE */ 290 291 extern void idle(); 292 int rval; 293 294 ASSERT(MUTEX_HELD(&cpu_lock)); 295 ASSERT(cpu[cpuid] == NULL); 296 297 ASSERT(ncpus <= max_ncpus); 298 299 #ifdef TRAPTRACE 300 /* 301 * allocate a traptrace buffer for this CPU. 302 */ 303 ctlp = &trap_trace_ctl[cpuid]; 304 if (!trap_tr0_inuse) { 305 trap_tr0_inuse = 1; 306 newbuf = trap_tr0; 307 tt_index = -1; 308 } else { 309 for (tt_index = 0; tt_index < (max_ncpus-1); tt_index++) 310 if (!trap_trace_inuse[tt_index]) 311 break; 312 ASSERT(tt_index < max_ncpus - 1); 313 trap_trace_inuse[tt_index] = 1; 314 newbuf = (caddr_t)(ttrace_buf + (tt_index * TRAP_TSIZE)); 315 } 316 ctlp->d.vaddr_base = newbuf; 317 ctlp->d.offset = ctlp->d.last_offset = 0; 318 ctlp->d.limit = trap_trace_bufsize; 319 ctlp->d.paddr_base = va_to_pa(newbuf); 320 ASSERT(ctlp->d.paddr_base != (uint64_t)-1); 321 #endif /* TRAPTRACE */ 322 /* 323 * initialize hv traptrace buffer for this CPU 324 */ 325 mach_htraptrace_setup(cpuid); 326 327 /* 328 * Obtain pointer to the appropriate cpu structure. 329 */ 330 if (cpu0.cpu_flags == 0) { 331 cp = &cpu0; 332 } else { 333 /* 334 * When dynamically allocating cpu structs, 335 * cpus is used as a pointer to a list of freed 336 * cpu structs. 337 */ 338 if (cpus) { 339 /* grab the first cpu struct on the free list */ 340 cp = cpus; 341 if (cp->cpu_next_free) 342 cpus = cp->cpu_next_free; 343 else 344 cpus = NULL; 345 } 346 } 347 348 if (cp == NULL) 349 cp = vmem_xalloc(static_alloc_arena, CPU_ALLOC_SIZE, 350 CPU_ALLOC_SIZE, 0, 0, NULL, NULL, VM_SLEEP); 351 352 bzero(cp, sizeof (*cp)); 353 354 cp->cpu_id = cpuid; 355 cp->cpu_self = cp; 356 357 /* 358 * Initialize ptl1_panic stack 359 */ 360 ptl1_init_cpu(cp); 361 362 /* 363 * Initialize the dispatcher for this CPU. 364 */ 365 disp_cpu_init(cp); 366 367 cpu_vm_data_init(cp); 368 369 /* 370 * Now, initialize per-CPU idle thread for this CPU. 371 */ 372 tp = thread_create(NULL, 0, idle, NULL, 0, &p0, TS_ONPROC, -1); 373 374 cp->cpu_idle_thread = tp; 375 376 tp->t_preempt = 1; 377 tp->t_bound_cpu = cp; 378 tp->t_affinitycnt = 1; 379 tp->t_cpu = cp; 380 tp->t_disp_queue = cp->cpu_disp; 381 382 /* 383 * Registering a thread in the callback table is usually 384 * done in the initialization code of the thread. In this 385 * case, we do it right after thread creation to avoid 386 * blocking idle thread while registering itself. It also 387 * avoids the possibility of reregistration in case a CPU 388 * restarts its idle thread. 389 */ 390 CALLB_CPR_INIT_SAFE(tp, "idle"); 391 392 init_cpu_info(cp); 393 394 /* 395 * Initialize the interrupt threads for this CPU 396 */ 397 cpu_intr_alloc(cp, NINTR_THREADS); 398 399 /* 400 * Add CPU to list of available CPUs. 401 * It'll be on the active list after it is started. 402 */ 403 cpu_add_unit(cp); 404 405 /* 406 * Allocate and init cpu module private data structures, 407 * including scrubber. 408 */ 409 cpu_init_private(cp); 410 411 /* 412 * Initialize the CPUs physical ID cache, and processor groups 413 */ 414 pghw_physid_create(cp); 415 pg_cpu_init(cp); 416 417 if ((rval = cpu_intrq_setup(cp)) != 0) { 418 return (rval); 419 } 420 421 /* 422 * Initialize MMU context domain information. 423 */ 424 sfmmu_cpu_init(cp); 425 426 return (0); 427 } 428 429 /* 430 * Routine to clean up a CPU after shutting it down. 431 */ 432 int 433 cleanup_cpu_common(int cpuid) 434 { 435 struct cpu *cp; 436 #ifdef TRAPTRACE 437 int i; 438 TRAP_TRACE_CTL *ctlp; 439 caddr_t newbuf; 440 #endif /* TRAPTRACE */ 441 442 ASSERT(MUTEX_HELD(&cpu_lock)); 443 ASSERT(cpu[cpuid] != NULL); 444 445 cp = cpu[cpuid]; 446 447 /* Free cpu module private data structures, including scrubber. */ 448 cpu_uninit_private(cp); 449 450 /* Free cpu ID string and brand string. */ 451 if (cp->cpu_idstr) 452 kmem_free(cp->cpu_idstr, strlen(cp->cpu_idstr) + 1); 453 if (cp->cpu_brandstr) 454 kmem_free(cp->cpu_brandstr, strlen(cp->cpu_brandstr) + 1); 455 456 cpu_vm_data_destroy(cp); 457 458 /* 459 * Remove CPU from list of available CPUs. 460 */ 461 cpu_del_unit(cpuid); 462 463 /* 464 * Clean any machine specific interrupt states. 465 */ 466 cpu_intrq_cleanup(cp); 467 468 /* 469 * At this point, the only threads bound to this CPU should be 470 * special per-cpu threads: it's idle thread, it's pause thread, 471 * and it's interrupt threads. Clean these up. 472 */ 473 cpu_destroy_bound_threads(cp); 474 475 /* 476 * Free the interrupt stack. 477 */ 478 segkp_release(segkp, cp->cpu_intr_stack); 479 480 /* 481 * Free hv traptrace buffer for this CPU. 482 */ 483 mach_htraptrace_cleanup(cpuid); 484 #ifdef TRAPTRACE 485 /* 486 * Free the traptrace buffer for this CPU. 487 */ 488 ctlp = &trap_trace_ctl[cpuid]; 489 newbuf = ctlp->d.vaddr_base; 490 i = (newbuf - ttrace_buf) / (TRAP_TSIZE); 491 if (((newbuf - ttrace_buf) % (TRAP_TSIZE) == 0) && 492 ((i >= 0) && (i < (max_ncpus-1)))) { 493 /* 494 * This CPU got it's trap trace buffer from the 495 * boot-alloc'd bunch of them. 496 */ 497 trap_trace_inuse[i] = 0; 498 bzero(newbuf, (TRAP_TSIZE)); 499 } else if (newbuf == trap_tr0) { 500 trap_tr0_inuse = 0; 501 bzero(trap_tr0, (TRAP_TSIZE)); 502 } else { 503 cmn_err(CE_WARN, "failed to free trap trace buffer from cpu%d", 504 cpuid); 505 } 506 bzero(ctlp, sizeof (*ctlp)); 507 #endif /* TRAPTRACE */ 508 509 /* 510 * There is a race condition with mutex_vector_enter() which 511 * caches a cpu pointer. The race is detected by checking cpu_next. 512 */ 513 disp_cpu_fini(cp); 514 cpu_pa[cpuid] = 0; 515 if (CPU_MMU_CTXP(cp)) 516 sfmmu_cpu_cleanup(cp); 517 bzero(cp, sizeof (*cp)); 518 519 /* 520 * Place the freed cpu structure on the list of freed cpus. 521 */ 522 if (cp != &cpu0) { 523 if (cpus) { 524 cp->cpu_next_free = cpus; 525 cpus = cp; 526 } 527 else 528 cpus = cp; 529 } 530 531 return (0); 532 } 533 534 /* 535 * This routine is used to start a previously powered off processor. 536 * Note that restarted cpus are initialized into the offline state. 537 */ 538 void 539 restart_other_cpu(int cpuid) 540 { 541 struct cpu *cp; 542 kthread_id_t tp; 543 caddr_t sp; 544 extern void idle(); 545 546 ASSERT(MUTEX_HELD(&cpu_lock)); 547 ASSERT(cpuid < NCPU && cpu[cpuid] != NULL); 548 549 /* 550 * Obtain pointer to the appropriate cpu structure. 551 */ 552 cp = cpu[cpuid]; 553 554 common_startup_init(cp, cpuid); 555 556 /* 557 * idle thread t_lock is held when the idle thread is suspended. 558 * Manually unlock the t_lock of idle loop so that we can resume 559 * the suspended idle thread. 560 * Also adjust the PC of idle thread for re-retry. 561 */ 562 cp->cpu_intr_actv = 0; /* clear the value from previous life */ 563 cp->cpu_m.mutex_ready = 0; /* we are not ready yet */ 564 lock_clear(&cp->cpu_idle_thread->t_lock); 565 tp = cp->cpu_idle_thread; 566 567 sp = tp->t_stk; 568 tp->t_sp = (uintptr_t)((struct rwindow *)sp - 1) - STACK_BIAS; 569 tp->t_pc = (uintptr_t)idle - 8; 570 571 /* 572 * restart the cpu now 573 */ 574 promsafe_pause_cpus(); 575 start_cpu(cpuid, warm_flag_set); 576 start_cpus(); 577 578 /* call cmn_err outside pause_cpus/start_cpus to avoid deadlock */ 579 cmn_err(CE_CONT, "!cpu%d initialization complete - restarted\n", 580 cpuid); 581 } 582 583 /* 584 * Startup function executed on 'other' CPUs. This is the first 585 * C function after cpu_start sets up the cpu registers. 586 */ 587 static void 588 slave_startup(void) 589 { 590 struct cpu *cp = CPU; 591 ushort_t original_flags = cp->cpu_flags; 592 593 mach_htraptrace_configure(cp->cpu_id); 594 cpu_intrq_register(CPU); 595 cp->cpu_m.mutex_ready = 1; 596 cp->cpu_m.poke_cpu_outstanding = B_FALSE; 597 598 /* acknowledge that we are done with initialization */ 599 CPUSET_ADD(proxy_ready_set, cp->cpu_id); 600 601 /* synchronize STICK */ 602 sticksync_slave(); 603 604 if (boothowto & RB_DEBUG) 605 kdi_dvec_cpu_init(cp); 606 607 /* 608 * the slave will wait here forever -- assuming that the master 609 * will get back to us. if it doesn't we've got bigger problems 610 * than a master not replying to this slave. 611 * the small delay improves the slave's responsiveness to the 612 * master's ack and decreases the time window between master and 613 * slave operations. 614 */ 615 while (!CPU_IN_SET(cpu_ready_set, cp->cpu_id)) 616 DELAY(1); 617 618 /* enable interrupts */ 619 (void) spl0(); 620 621 /* 622 * Signature block update to indicate that this CPU is in OS now. 623 * This needs to be done after the PIL is lowered since on 624 * some platforms the update code may block. 625 */ 626 CPU_SIGNATURE(OS_SIG, SIGST_RUN, SIGSUBST_NULL, cp->cpu_id); 627 628 /* 629 * park the slave thread in a safe/quiet state and wait for the master 630 * to finish configuring this CPU before proceeding to thread_exit(). 631 */ 632 while (((volatile ushort_t)cp->cpu_flags) & CPU_QUIESCED) 633 DELAY(1); 634 635 /* 636 * Initialize CPC CPU state. 637 */ 638 kcpc_hw_startup_cpu(original_flags); 639 640 /* 641 * Notify the PG subsystem that the CPU has started 642 */ 643 pg_cmt_cpu_startup(CPU); 644 645 /* 646 * Now we are done with the startup thread, so free it up. 647 */ 648 thread_exit(); 649 cmn_err(CE_PANIC, "slave_startup: cannot return"); 650 /*NOTREACHED*/ 651 } 652 653 extern struct cpu *cpu[NCPU]; /* pointers to all CPUs */ 654 655 /* 656 * cpu_bringup_set is a tunable (via /etc/system, debugger, etc.) that 657 * can be used during debugging to control which processors are brought 658 * online at boot time. The variable represents a bitmap of the id's 659 * of the processors that will be brought online. The initialization 660 * of this variable depends on the type of cpuset_t, which varies 661 * depending on the number of processors supported (see cpuvar.h). 662 */ 663 cpuset_t cpu_bringup_set; 664 665 666 /* 667 * Generic start-all cpus entry. Typically used during cold initialization. 668 * Note that cold start cpus are initialized into the online state. 669 */ 670 /*ARGSUSED*/ 671 void 672 start_other_cpus(int flag) 673 { 674 int cpuid; 675 extern void idlestop_init(void); 676 int bootcpu; 677 678 /* 679 * Check if cpu_bringup_set has been explicitly set before 680 * initializing it. 681 */ 682 if (CPUSET_ISNULL(cpu_bringup_set)) { 683 #ifdef MPSAS 684 /* just CPU 0 */ 685 CPUSET_ADD(cpu_bringup_set, 0); 686 #else 687 CPUSET_ALL(cpu_bringup_set); 688 #endif 689 } 690 691 if (&cpu_feature_init) 692 cpu_feature_init(); 693 694 /* 695 * Initialize CPC. 696 */ 697 kcpc_hw_init(); 698 699 mutex_enter(&cpu_lock); 700 701 /* 702 * Initialize our own cpu_info. 703 */ 704 init_cpu_info(CPU); 705 706 /* 707 * Initialize CPU 0 cpu module private data area, including scrubber. 708 */ 709 cpu_init_private(CPU); 710 711 /* 712 * perform such initialization as is needed 713 * to be able to take CPUs on- and off-line. 714 */ 715 cpu_pause_init(); 716 xc_init(); /* initialize processor crosscalls */ 717 idlestop_init(); 718 719 if (!use_mp) { 720 mutex_exit(&cpu_lock); 721 cmn_err(CE_CONT, "?***** Not in MP mode\n"); 722 return; 723 } 724 /* 725 * should we be initializing this cpu? 726 */ 727 bootcpu = getprocessorid(); 728 729 /* 730 * launch all the slave cpus now 731 */ 732 for (cpuid = 0; cpuid < NCPU; cpuid++) { 733 pnode_t nodeid = cpunodes[cpuid].nodeid; 734 735 if (nodeid == (pnode_t)0) 736 continue; 737 738 if (cpuid == bootcpu) { 739 if (!CPU_IN_SET(cpu_bringup_set, cpuid)) { 740 cmn_err(CE_WARN, "boot cpu not a member " 741 "of cpu_bringup_set, adding it"); 742 CPUSET_ADD(cpu_bringup_set, cpuid); 743 } 744 continue; 745 } 746 if (!CPU_IN_SET(cpu_bringup_set, cpuid)) 747 continue; 748 749 ASSERT(cpu[cpuid] == NULL); 750 751 if (setup_cpu_common(cpuid)) { 752 cmn_err(CE_PANIC, "cpu%d: setup failed", cpuid); 753 } 754 755 common_startup_init(cpu[cpuid], cpuid); 756 757 start_cpu(cpuid, cold_flag_set); 758 /* 759 * Because slave_startup() gets fired off after init() 760 * starts, we can't use the '?' trick to do 'boot -v' 761 * printing - so we always direct the 'cpu .. online' 762 * messages to the log. 763 */ 764 cmn_err(CE_CONT, "!cpu%d initialization complete - online\n", 765 cpuid); 766 767 /* 768 * XXX: register_cpu_setup() callbacks should be called here 769 * with a new setup code, CPU_BOOT (or something). 770 */ 771 if (dtrace_cpu_init != NULL) 772 (*dtrace_cpu_init)(cpuid); 773 } 774 775 /* 776 * since all the cpus are online now, redistribute interrupts to them. 777 */ 778 intr_redist_all_cpus(); 779 780 mutex_exit(&cpu_lock); 781 782 /* 783 * Start the Ecache scrubber. Must be done after all calls to 784 * cpu_init_private for every cpu (including CPU 0). 785 */ 786 cpu_init_cache_scrub(); 787 788 if (&cpu_mp_init) 789 cpu_mp_init(); 790 } 791