1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/types.h> 29 #include <sys/systm.h> 30 #include <sys/archsystm.h> 31 #include <sys/t_lock.h> 32 #include <sys/uadmin.h> 33 #include <sys/panic.h> 34 #include <sys/reboot.h> 35 #include <sys/autoconf.h> 36 #include <sys/machsystm.h> 37 #include <sys/promif.h> 38 #include <sys/membar.h> 39 #include <vm/hat_sfmmu.h> 40 #include <sys/cpu_module.h> 41 #include <sys/cpu_sgnblk_defs.h> 42 #include <sys/intreg.h> 43 #include <sys/consdev.h> 44 #include <sys/kdi_impl.h> 45 #include <sys/traptrace.h> 46 #include <sys/hypervisor_api.h> 47 #include <sys/vmsystm.h> 48 #include <sys/dtrace.h> 49 #include <sys/xc_impl.h> 50 #include <sys/callb.h> 51 #include <sys/mdesc.h> 52 #include <sys/mach_descrip.h> 53 #include <sys/wdt.h> 54 55 /* 56 * hvdump_buf_va is a pointer to the currently-configured hvdump_buf. 57 * A value of NULL indicates that this area is not configured. 58 * hvdump_buf_sz is tunable but will be clamped to HVDUMP_SIZE_MAX. 59 */ 60 61 caddr_t hvdump_buf_va; 62 uint64_t hvdump_buf_sz = HVDUMP_SIZE_DEFAULT; 63 static uint64_t hvdump_buf_pa; 64 65 u_longlong_t panic_tick; 66 67 extern u_longlong_t gettick(); 68 static void reboot_machine(char *); 69 static void update_hvdump_buffer(void); 70 71 /* 72 * For xt_sync synchronization. 73 */ 74 extern uint64_t xc_tick_limit; 75 extern uint64_t xc_tick_jump_limit; 76 77 /* 78 * We keep our own copies, used for cache flushing, because we can be called 79 * before cpu_fiximpl(). 80 */ 81 static int kdi_dcache_size; 82 static int kdi_dcache_linesize; 83 static int kdi_icache_size; 84 static int kdi_icache_linesize; 85 86 /* 87 * Assembly support for generic modules in sun4v/ml/mach_xc.s 88 */ 89 extern void init_mondo_nocheck(xcfunc_t *func, uint64_t arg1, uint64_t arg2); 90 extern void kdi_flush_idcache(int, int, int, int); 91 extern uint64_t get_cpuaddr(uint64_t, uint64_t); 92 93 /* 94 * Machine dependent code to reboot. 95 * "mdep" is interpreted as a character pointer; if non-null, it is a pointer 96 * to a string to be used as the argument string when rebooting. 97 * 98 * "invoke_cb" is a boolean. It is set to true when mdboot() can safely 99 * invoke CB_CL_MDBOOT callbacks before shutting the system down, i.e. when 100 * we are in a normal shutdown sequence (interrupts are not blocked, the 101 * system is not panic'ing or being suspended). 102 */ 103 /*ARGSUSED*/ 104 void 105 mdboot(int cmd, int fcn, char *bootstr, boolean_t invoke_cb) 106 { 107 extern void pm_cfb_check_and_powerup(void); 108 109 /* 110 * XXX - rconsvp is set to NULL to ensure that output messages 111 * are sent to the underlying "hardware" device using the 112 * monitor's printf routine since we are in the process of 113 * either rebooting or halting the machine. 114 */ 115 rconsvp = NULL; 116 117 /* 118 * At a high interrupt level we can't: 119 * 1) bring up the console 120 * or 121 * 2) wait for pending interrupts prior to redistribution 122 * to the current CPU 123 * 124 * so we do them now. 125 */ 126 pm_cfb_check_and_powerup(); 127 128 /* make sure there are no more changes to the device tree */ 129 devtree_freeze(); 130 131 if (invoke_cb) 132 (void) callb_execute_class(CB_CL_MDBOOT, NULL); 133 134 /* 135 * Clear any unresolved UEs from memory. 136 */ 137 if (memsegs != NULL) 138 page_retire_hunt(page_retire_mdboot_cb); 139 140 /* 141 * stop other cpus which also raise our priority. since there is only 142 * one active cpu after this, and our priority will be too high 143 * for us to be preempted, we're essentially single threaded 144 * from here on out. 145 */ 146 stop_other_cpus(); 147 148 /* 149 * try and reset leaf devices. reset_leaves() should only 150 * be called when there are no other threads that could be 151 * accessing devices 152 */ 153 reset_leaves(); 154 155 watchdog_clear(); 156 157 if (fcn == AD_HALT) { 158 halt((char *)NULL); 159 } else if (fcn == AD_POWEROFF) { 160 power_down(NULL); 161 } else { 162 if (bootstr == NULL) { 163 switch (fcn) { 164 165 case AD_BOOT: 166 bootstr = ""; 167 break; 168 169 case AD_IBOOT: 170 bootstr = "-a"; 171 break; 172 173 case AD_SBOOT: 174 bootstr = "-s"; 175 break; 176 177 case AD_SIBOOT: 178 bootstr = "-sa"; 179 break; 180 default: 181 cmn_err(CE_WARN, 182 "mdboot: invalid function %d", fcn); 183 bootstr = ""; 184 break; 185 } 186 } 187 reboot_machine(bootstr); 188 } 189 /* MAYBE REACHED */ 190 } 191 192 /* mdpreboot - may be called prior to mdboot while root fs still mounted */ 193 /*ARGSUSED*/ 194 void 195 mdpreboot(int cmd, int fcn, char *bootstr) 196 { 197 } 198 199 /* 200 * Halt the machine and then reboot with the device 201 * and arguments specified in bootstr. 202 */ 203 static void 204 reboot_machine(char *bootstr) 205 { 206 flush_windows(); 207 stop_other_cpus(); /* send stop signal to other CPUs */ 208 prom_printf("rebooting...\n"); 209 /* 210 * For platforms that use CPU signatures, we 211 * need to set the signature block to OS and 212 * the state to exiting for all the processors. 213 */ 214 CPU_SIGNATURE(OS_SIG, SIGST_EXIT, SIGSUBST_REBOOT, -1); 215 prom_reboot(bootstr); 216 /*NOTREACHED*/ 217 } 218 219 /* 220 * We use the x-trap mechanism and idle_stop_xcall() to stop the other CPUs. 221 * Once in panic_idle() they raise spl, record their location, and spin. 222 */ 223 static void 224 panic_idle(void) 225 { 226 (void) spl7(); 227 228 debug_flush_windows(); 229 (void) setjmp(&curthread->t_pcb); 230 231 CPU->cpu_m.in_prom = 1; 232 membar_stld(); 233 234 for (;;); 235 } 236 237 /* 238 * Force the other CPUs to trap into panic_idle(), and then remove them 239 * from the cpu_ready_set so they will no longer receive cross-calls. 240 */ 241 /*ARGSUSED*/ 242 void 243 panic_stopcpus(cpu_t *cp, kthread_t *t, int spl) 244 { 245 cpuset_t cps; 246 int i; 247 248 (void) splzs(); 249 CPUSET_ALL_BUT(cps, cp->cpu_id); 250 xt_some(cps, (xcfunc_t *)idle_stop_xcall, (uint64_t)&panic_idle, NULL); 251 252 for (i = 0; i < NCPU; i++) { 253 if (i != cp->cpu_id && CPU_XCALL_READY(i)) { 254 int ntries = 0x10000; 255 256 while (!cpu[i]->cpu_m.in_prom && ntries) { 257 DELAY(50); 258 ntries--; 259 } 260 261 if (!cpu[i]->cpu_m.in_prom) 262 printf("panic: failed to stop cpu%d\n", i); 263 264 cpu[i]->cpu_flags &= ~CPU_READY; 265 cpu[i]->cpu_flags |= CPU_QUIESCED; 266 CPUSET_DEL(cpu_ready_set, cpu[i]->cpu_id); 267 } 268 } 269 } 270 271 /* 272 * Platform callback following each entry to panicsys(). If we've panicked at 273 * level 14, we examine t_panic_trap to see if a fatal trap occurred. If so, 274 * we disable further %tick_cmpr interrupts. If not, an explicit call to panic 275 * was made and so we re-enqueue an interrupt request structure to allow 276 * further level 14 interrupts to be processed once we lower PIL. This allows 277 * us to handle panics from the deadman() CY_HIGH_LEVEL cyclic. 278 */ 279 void 280 panic_enter_hw(int spl) 281 { 282 if (!panic_tick) { 283 panic_tick = gettick(); 284 if (mach_htraptrace_enable) { 285 uint64_t prev_freeze; 286 287 /* there are no possible error codes for this hcall */ 288 (void) hv_ttrace_freeze((uint64_t)TRAP_TFREEZE_ALL, 289 &prev_freeze); 290 } 291 #ifdef TRAPTRACE 292 TRAPTRACE_FREEZE; 293 #endif 294 } 295 if (spl == ipltospl(PIL_14)) { 296 uint_t opstate = disable_vec_intr(); 297 298 if (curthread->t_panic_trap != NULL) { 299 tickcmpr_disable(); 300 intr_dequeue_req(PIL_14, cbe_level14_inum); 301 } else { 302 if (!tickcmpr_disabled()) 303 intr_enqueue_req(PIL_14, cbe_level14_inum); 304 /* 305 * Clear SOFTINT<14>, SOFTINT<0> (TICK_INT) 306 * and SOFTINT<16> (STICK_INT) to indicate 307 * that the current level 14 has been serviced. 308 */ 309 wr_clr_softint((1 << PIL_14) | 310 TICK_INT_MASK | STICK_INT_MASK); 311 } 312 313 enable_vec_intr(opstate); 314 } 315 } 316 317 /* 318 * Miscellaneous hardware-specific code to execute after panicstr is set 319 * by the panic code: we also print and record PTL1 panic information here. 320 */ 321 /*ARGSUSED*/ 322 void 323 panic_quiesce_hw(panic_data_t *pdp) 324 { 325 extern uint_t getpstate(void); 326 extern void setpstate(uint_t); 327 328 /* 329 * Turn off TRAPTRACE and save the current %tick value in panic_tick. 330 */ 331 if (!panic_tick) { 332 panic_tick = gettick(); 333 if (mach_htraptrace_enable) { 334 uint64_t prev_freeze; 335 336 /* there are no possible error codes for this hcall */ 337 (void) hv_ttrace_freeze((uint64_t)TRAP_TFREEZE_ALL, 338 &prev_freeze); 339 } 340 #ifdef TRAPTRACE 341 TRAPTRACE_FREEZE; 342 #endif 343 } 344 /* 345 * For Platforms that use CPU signatures, we 346 * need to set the signature block to OS, the state to 347 * exiting, and the substate to panic for all the processors. 348 */ 349 CPU_SIGNATURE(OS_SIG, SIGST_EXIT, SIGSUBST_PANIC, -1); 350 351 update_hvdump_buffer(); 352 353 /* 354 * Disable further ECC errors from the bus nexus. 355 */ 356 (void) bus_func_invoke(BF_TYPE_ERRDIS); 357 358 /* 359 * Redirect all interrupts to the current CPU. 360 */ 361 intr_redist_all_cpus_shutdown(); 362 363 /* 364 * This call exists solely to support dumps to network 365 * devices after sync from OBP. 366 * 367 * If we came here via the sync callback, then on some 368 * platforms, interrupts may have arrived while we were 369 * stopped in OBP. OBP will arrange for those interrupts to 370 * be redelivered if you say "go", but not if you invoke a 371 * client callback like 'sync'. For some dump devices 372 * (network swap devices), we need interrupts to be 373 * delivered in order to dump, so we have to call the bus 374 * nexus driver to reset the interrupt state machines. 375 */ 376 (void) bus_func_invoke(BF_TYPE_RESINTR); 377 378 setpstate(getpstate() | PSTATE_IE); 379 } 380 381 /* 382 * Platforms that use CPU signatures need to set the signature block to OS and 383 * the state to exiting for all CPUs. PANIC_CONT indicates that we're about to 384 * write the crash dump, which tells the SSP/SMS to begin a timeout routine to 385 * reboot the machine if the dump never completes. 386 */ 387 /*ARGSUSED*/ 388 void 389 panic_dump_hw(int spl) 390 { 391 CPU_SIGNATURE(OS_SIG, SIGST_EXIT, SIGSUBST_DUMP, -1); 392 } 393 394 /* 395 * for ptl1_panic 396 */ 397 void 398 ptl1_init_cpu(struct cpu *cpu) 399 { 400 ptl1_state_t *pstate = &cpu->cpu_m.ptl1_state; 401 402 /*CONSTCOND*/ 403 if (sizeof (struct cpu) + PTL1_SSIZE > CPU_ALLOC_SIZE) { 404 panic("ptl1_init_cpu: not enough space left for ptl1_panic " 405 "stack, sizeof (struct cpu) = %lu", 406 (unsigned long)sizeof (struct cpu)); 407 } 408 409 pstate->ptl1_stktop = (uintptr_t)cpu + CPU_ALLOC_SIZE; 410 cpu_pa[cpu->cpu_id] = va_to_pa(cpu); 411 } 412 413 void 414 ptl1_panic_handler(ptl1_state_t *pstate) 415 { 416 static const char *ptl1_reasons[] = { 417 #ifdef PTL1_PANIC_DEBUG 418 "trap for debug purpose", /* PTL1_BAD_DEBUG */ 419 #else 420 "unknown trap", /* PTL1_BAD_DEBUG */ 421 #endif 422 "register window trap", /* PTL1_BAD_WTRAP */ 423 "kernel MMU miss", /* PTL1_BAD_KMISS */ 424 "kernel protection fault", /* PTL1_BAD_KPROT_FAULT */ 425 "ISM MMU miss", /* PTL1_BAD_ISM */ 426 "kernel MMU trap", /* PTL1_BAD_MMUTRAP */ 427 "kernel trap handler state", /* PTL1_BAD_TRAP */ 428 "floating point trap", /* PTL1_BAD_FPTRAP */ 429 #ifdef DEBUG 430 "pointer to intr_req", /* PTL1_BAD_INTR_REQ */ 431 #else 432 "unknown trap", /* PTL1_BAD_INTR_REQ */ 433 #endif 434 #ifdef TRAPTRACE 435 "TRACE_PTR state", /* PTL1_BAD_TRACE_PTR */ 436 #else 437 "unknown trap", /* PTL1_BAD_TRACE_PTR */ 438 #endif 439 "stack overflow", /* PTL1_BAD_STACK */ 440 "DTrace flags", /* PTL1_BAD_DTRACE_FLAGS */ 441 "attempt to steal locked ctx", /* PTL1_BAD_CTX_STEAL */ 442 "CPU ECC error loop", /* PTL1_BAD_ECC */ 443 "unexpected error from hypervisor call", /* PTL1_BAD_HCALL */ 444 "unexpected global level(%gl)", /* PTL1_BAD_GL */ 445 "Watchdog Reset", /* PTL1_BAD_WATCHDOG */ 446 "unexpected RED mode trap", /* PTL1_BAD_RED */ 447 "return value EINVAL from hcall: "\ 448 "UNMAP_PERM_ADDR", /* PTL1_BAD_HCALL_UNMAP_PERM_EINVAL */ 449 "return value ENOMAP from hcall: "\ 450 "UNMAP_PERM_ADDR", /* PTL1_BAD_HCALL_UNMAP_PERM_ENOMAP */ 451 }; 452 453 uint_t reason = pstate->ptl1_regs.ptl1_gregs[0].ptl1_g1; 454 uint_t tl = pstate->ptl1_regs.ptl1_trap_regs[0].ptl1_tl; 455 struct trap_info ti = { 0 }; 456 457 /* 458 * Use trap_info for a place holder to call panic_savetrap() and 459 * panic_showtrap() to save and print out ptl1_panic information. 460 */ 461 if (curthread->t_panic_trap == NULL) 462 curthread->t_panic_trap = &ti; 463 464 if (reason < sizeof (ptl1_reasons) / sizeof (ptl1_reasons[0])) 465 panic("bad %s at TL %u", ptl1_reasons[reason], tl); 466 else 467 panic("ptl1_panic reason 0x%x at TL %u", reason, tl); 468 } 469 470 void 471 clear_watchdog_on_exit(void) 472 { 473 prom_printf("Debugging requested; hardware watchdog suspended.\n"); 474 (void) watchdog_suspend(); 475 } 476 477 /* 478 * Restore the watchdog timer when returning from a debugger 479 * after a panic or L1-A and resume watchdog pat. 480 */ 481 void 482 restore_watchdog_on_entry() 483 { 484 watchdog_resume(); 485 } 486 487 int 488 kdi_watchdog_disable(void) 489 { 490 watchdog_suspend(); 491 492 return (0); 493 } 494 495 void 496 kdi_watchdog_restore(void) 497 { 498 watchdog_resume(); 499 } 500 501 void 502 mach_dump_buffer_init(void) 503 { 504 uint64_t ret, minsize = 0; 505 506 if (hvdump_buf_sz > HVDUMP_SIZE_MAX) 507 hvdump_buf_sz = HVDUMP_SIZE_MAX; 508 509 hvdump_buf_va = contig_mem_alloc_align(hvdump_buf_sz, PAGESIZE); 510 if (hvdump_buf_va == NULL) 511 return; 512 513 hvdump_buf_pa = va_to_pa(hvdump_buf_va); 514 515 ret = hv_dump_buf_update(hvdump_buf_pa, hvdump_buf_sz, 516 &minsize); 517 518 if (ret != H_EOK) { 519 contig_mem_free(hvdump_buf_va, hvdump_buf_sz); 520 hvdump_buf_va = NULL; 521 cmn_err(CE_NOTE, "!Error in setting up hvstate" 522 "dump buffer. Error = 0x%lx, size = 0x%lx," 523 "buf_pa = 0x%lx", ret, hvdump_buf_sz, 524 hvdump_buf_pa); 525 526 if (ret == H_EINVAL) { 527 cmn_err(CE_NOTE, "!Buffer size too small." 528 "Available buffer size = 0x%lx," 529 "Minimum buffer size required = 0x%lx", 530 hvdump_buf_sz, minsize); 531 } 532 } 533 } 534 535 536 static void 537 update_hvdump_buffer(void) 538 { 539 uint64_t ret, dummy_val; 540 541 if (hvdump_buf_va == NULL) 542 return; 543 544 ret = hv_dump_buf_update(hvdump_buf_pa, hvdump_buf_sz, 545 &dummy_val); 546 if (ret != H_EOK) { 547 cmn_err(CE_NOTE, "!Cannot update hvstate dump" 548 "buffer. Error = 0x%lx", ret); 549 } 550 } 551 552 553 static int 554 getintprop(pnode_t node, char *name, int deflt) 555 { 556 int value; 557 558 switch (prom_getproplen(node, name)) { 559 case 0: 560 value = 1; /* boolean properties */ 561 break; 562 563 case sizeof (int): 564 (void) prom_getprop(node, name, (caddr_t)&value); 565 break; 566 567 default: 568 value = deflt; 569 break; 570 } 571 572 return (value); 573 } 574 575 /* 576 * Called by setcpudelay 577 */ 578 void 579 cpu_init_tick_freq(void) 580 { 581 md_t *mdp; 582 mde_cookie_t rootnode; 583 int listsz; 584 mde_cookie_t *listp = NULL; 585 int num_nodes; 586 uint64_t stick_prop; 587 588 if (broken_md_flag) { 589 sys_tick_freq = cpunodes[CPU->cpu_id].clock_freq; 590 return; 591 } 592 593 if ((mdp = md_get_handle()) == NULL) 594 panic("stick_frequency property not found in MD"); 595 596 rootnode = md_root_node(mdp); 597 ASSERT(rootnode != MDE_INVAL_ELEM_COOKIE); 598 599 num_nodes = md_node_count(mdp); 600 601 ASSERT(num_nodes > 0); 602 listsz = num_nodes * sizeof (mde_cookie_t); 603 listp = (mde_cookie_t *)prom_alloc((caddr_t)0, listsz, 0); 604 605 if (listp == NULL) 606 panic("cannot allocate list for MD properties"); 607 608 num_nodes = md_scan_dag(mdp, rootnode, md_find_name(mdp, "platform"), 609 md_find_name(mdp, "fwd"), listp); 610 611 ASSERT(num_nodes == 1); 612 613 if (md_get_prop_val(mdp, *listp, "stick-frequency", &stick_prop) != 0) 614 panic("stick_frequency property not found in MD"); 615 616 sys_tick_freq = stick_prop; 617 618 prom_free((caddr_t)listp, listsz); 619 (void) md_fini_handle(mdp); 620 } 621 622 int shipit(int n, uint64_t cpu_list_ra); 623 extern uint64_t xc_tick_limit; 624 extern uint64_t xc_tick_jump_limit; 625 626 #ifdef DEBUG 627 #define SEND_MONDO_STATS 1 628 #endif 629 630 #ifdef SEND_MONDO_STATS 631 uint32_t x_one_stimes[64]; 632 uint32_t x_one_ltimes[16]; 633 uint32_t x_set_stimes[64]; 634 uint32_t x_set_ltimes[16]; 635 uint32_t x_set_cpus[NCPU]; 636 #endif 637 638 void 639 send_one_mondo(int cpuid) 640 { 641 int retries, stat; 642 uint64_t starttick, endtick, tick, lasttick; 643 struct machcpu *mcpup = &(CPU->cpu_m); 644 645 CPU_STATS_ADDQ(CPU, sys, xcalls, 1); 646 starttick = lasttick = gettick(); 647 mcpup->cpu_list[0] = (uint16_t)cpuid; 648 stat = shipit(1, mcpup->cpu_list_ra); 649 endtick = starttick + xc_tick_limit; 650 retries = 0; 651 while (stat != H_EOK) { 652 if (stat != H_EWOULDBLOCK) { 653 if (panic_quiesce) 654 return; 655 if (stat == H_ECPUERROR) 656 cmn_err(CE_PANIC, "send_one_mondo: " 657 "cpuid: 0x%x has been marked in " 658 "error", cpuid); 659 else 660 cmn_err(CE_PANIC, "send_one_mondo: " 661 "unexpected hypervisor error 0x%x " 662 "while sending a mondo to cpuid: " 663 "0x%x", stat, cpuid); 664 } 665 tick = gettick(); 666 /* 667 * If there is a big jump between the current tick 668 * count and lasttick, we have probably hit a break 669 * point. Adjust endtick accordingly to avoid panic. 670 */ 671 if (tick > (lasttick + xc_tick_jump_limit)) 672 endtick += (tick - lasttick); 673 lasttick = tick; 674 if (tick > endtick) { 675 if (panic_quiesce) 676 return; 677 cmn_err(CE_PANIC, "send mondo timeout " 678 "(target 0x%x) [retries: 0x%x hvstat: 0x%x]", 679 cpuid, retries, stat); 680 } 681 drv_usecwait(1); 682 stat = shipit(1, mcpup->cpu_list_ra); 683 retries++; 684 } 685 #ifdef SEND_MONDO_STATS 686 { 687 uint64_t n = gettick() - starttick; 688 if (n < 8192) 689 x_one_stimes[n >> 7]++; 690 else if (n < 15*8192) 691 x_one_ltimes[n >> 13]++; 692 else 693 x_one_ltimes[0xf]++; 694 } 695 #endif 696 } 697 698 void 699 send_mondo_set(cpuset_t set) 700 { 701 uint64_t starttick, endtick, tick, lasttick; 702 int shipped = 0; 703 int retries = 0; 704 struct machcpu *mcpup = &(CPU->cpu_m); 705 706 ASSERT(!CPUSET_ISNULL(set)); 707 starttick = lasttick = gettick(); 708 endtick = starttick + xc_tick_limit; 709 710 do { 711 int ncpuids = 0; 712 int i, stat; 713 714 /* assemble CPU list for HV argument */ 715 for (i = 0; i < NCPU; i++) { 716 if (CPU_IN_SET(set, i)) { 717 mcpup->cpu_list[ncpuids] = (uint16_t)i; 718 ncpuids++; 719 } 720 } 721 722 stat = shipit(ncpuids, mcpup->cpu_list_ra); 723 if (stat == H_EOK) { 724 shipped += ncpuids; 725 break; 726 } 727 728 /* 729 * Either not all CPU mondos were sent, or an 730 * error occurred. CPUs that were sent mondos 731 * have their CPU IDs overwritten in cpu_list. 732 * Reset the cpuset so that its only members 733 * are those CPU IDs that still need to be sent. 734 */ 735 CPUSET_ZERO(set); 736 for (i = 0; i < ncpuids; i++) { 737 if (mcpup->cpu_list[i] == HV_SEND_MONDO_ENTRYDONE) { 738 shipped++; 739 } else { 740 CPUSET_ADD(set, mcpup->cpu_list[i]); 741 } 742 } 743 744 /* 745 * Now handle possible errors returned 746 * from hypervisor. 747 */ 748 if (stat == H_ECPUERROR) { 749 cpuset_t error_set; 750 751 /* 752 * One or more of the CPUs passed to HV is 753 * in the error state. Remove those CPUs from 754 * set and record them in error_set. 755 */ 756 CPUSET_ZERO(error_set); 757 for (i = 0; i < NCPU; i++) { 758 if (CPU_IN_SET(set, i)) { 759 uint64_t state = CPU_STATE_INVALID; 760 (void) hv_cpu_state(i, &state); 761 if (state == CPU_STATE_ERROR) { 762 CPUSET_ADD(error_set, i); 763 CPUSET_DEL(set, i); 764 } 765 } 766 } 767 768 if (!panic_quiesce) { 769 if (CPUSET_ISNULL(error_set)) { 770 cmn_err(CE_PANIC, "send_mondo_set: " 771 "hypervisor returned " 772 "H_ECPUERROR but no CPU in " 773 "cpu_list in error state"); 774 } 775 776 cmn_err(CE_CONT, "send_mondo_set: cpuid(s) "); 777 for (i = 0; i < NCPU; i++) { 778 if (CPU_IN_SET(error_set, i)) { 779 cmn_err(CE_CONT, "0x%x ", i); 780 } 781 } 782 cmn_err(CE_CONT, "have been marked in " 783 "error\n"); 784 cmn_err(CE_PANIC, "send_mondo_set: CPU(s) " 785 "in error state"); 786 } 787 } else if (stat != H_EWOULDBLOCK) { 788 if (panic_quiesce) 789 return; 790 /* 791 * For all other errors, panic. 792 */ 793 cmn_err(CE_CONT, "send_mondo_set: unexpected " 794 "hypervisor error 0x%x while sending a " 795 "mondo to cpuid(s):", stat); 796 for (i = 0; i < NCPU; i++) { 797 if (CPU_IN_SET(set, i)) { 798 cmn_err(CE_CONT, " 0x%x", i); 799 } 800 } 801 cmn_err(CE_CONT, "\n"); 802 cmn_err(CE_PANIC, "send_mondo_set: unexpected " 803 "hypervisor error"); 804 } 805 806 tick = gettick(); 807 /* 808 * If there is a big jump between the current tick 809 * count and lasttick, we have probably hit a break 810 * point. Adjust endtick accordingly to avoid panic. 811 */ 812 if (tick > (lasttick + xc_tick_jump_limit)) 813 endtick += (tick - lasttick); 814 lasttick = tick; 815 if (tick > endtick) { 816 if (panic_quiesce) 817 return; 818 cmn_err(CE_CONT, "send mondo timeout " 819 "[retries: 0x%x] cpuids: ", retries); 820 for (i = 0; i < NCPU; i++) 821 if (CPU_IN_SET(set, i)) 822 cmn_err(CE_CONT, " 0x%x", i); 823 cmn_err(CE_CONT, "\n"); 824 cmn_err(CE_PANIC, "send_mondo_set: timeout"); 825 } 826 827 while (gettick() < (tick + sys_clock_mhz)) 828 ; 829 retries++; 830 } while (!CPUSET_ISNULL(set)); 831 832 CPU_STATS_ADDQ(CPU, sys, xcalls, shipped); 833 834 #ifdef SEND_MONDO_STATS 835 { 836 uint64_t n = gettick() - starttick; 837 if (n < 8192) 838 x_set_stimes[n >> 7]++; 839 else if (n < 15*8192) 840 x_set_ltimes[n >> 13]++; 841 else 842 x_set_ltimes[0xf]++; 843 } 844 x_set_cpus[shipped]++; 845 #endif 846 } 847 848 void 849 syncfpu(void) 850 { 851 } 852 853 void 854 cpu_flush_ecache(void) 855 { 856 } 857 858 void 859 sticksync_slave(void) 860 {} 861 862 void 863 sticksync_master(void) 864 {} 865 866 void 867 cpu_init_cache_scrub(void) 868 {} 869 870 int 871 dtrace_blksuword32_err(uintptr_t addr, uint32_t *data) 872 { 873 int ret, watched; 874 875 watched = watch_disable_addr((void *)addr, 4, S_WRITE); 876 ret = dtrace_blksuword32(addr, data, 0); 877 if (watched) 878 watch_enable_addr((void *)addr, 4, S_WRITE); 879 880 return (ret); 881 } 882 883 int 884 dtrace_blksuword32(uintptr_t addr, uint32_t *data, int tryagain) 885 { 886 if (suword32((void *)addr, *data) == -1) 887 return (tryagain ? dtrace_blksuword32_err(addr, data) : -1); 888 dtrace_flush_sec(addr); 889 890 return (0); 891 } 892 893 /*ARGSUSED*/ 894 void 895 cpu_faulted_enter(struct cpu *cp) 896 { 897 } 898 899 /*ARGSUSED*/ 900 void 901 cpu_faulted_exit(struct cpu *cp) 902 { 903 } 904 905 static int 906 kdi_cpu_ready_iter(int (*cb)(int, void *), void *arg) 907 { 908 int rc, i; 909 910 for (rc = 0, i = 0; i < NCPU; i++) { 911 if (CPU_IN_SET(cpu_ready_set, i)) 912 rc += cb(i, arg); 913 } 914 915 return (rc); 916 } 917 918 /* 919 * Sends a cross-call to a specified processor. The caller assumes 920 * responsibility for repetition of cross-calls, as appropriate (MARSA for 921 * debugging). 922 */ 923 static int 924 kdi_xc_one(int cpuid, void (*func)(uintptr_t, uintptr_t), uintptr_t arg1, 925 uintptr_t arg2) 926 { 927 int stat; 928 struct machcpu *mcpup; 929 uint64_t cpuaddr_reg = 0, cpuaddr_scr = 0; 930 931 mcpup = &(((cpu_t *)get_cpuaddr(cpuaddr_reg, cpuaddr_scr))->cpu_m); 932 933 /* 934 * if (idsr_busy()) 935 * return (KDI_XC_RES_ERR); 936 */ 937 938 init_mondo_nocheck((xcfunc_t *)func, arg1, arg2); 939 940 mcpup->cpu_list[0] = (uint16_t)cpuid; 941 stat = shipit(1, mcpup->cpu_list_ra); 942 943 if (stat == 0) 944 return (KDI_XC_RES_OK); 945 else 946 return (KDI_XC_RES_NACK); 947 } 948 949 static void 950 kdi_tickwait(clock_t nticks) 951 { 952 clock_t endtick = gettick() + nticks; 953 954 while (gettick() < endtick); 955 } 956 957 static void 958 kdi_cpu_init(int dcache_size, int dcache_linesize, int icache_size, 959 int icache_linesize) 960 { 961 kdi_dcache_size = dcache_size; 962 kdi_dcache_linesize = dcache_linesize; 963 kdi_icache_size = icache_size; 964 kdi_icache_linesize = icache_linesize; 965 } 966 967 /* used directly by kdi_read/write_phys */ 968 void 969 kdi_flush_caches(void) 970 { 971 /* Not required on sun4v architecture. */ 972 } 973 974 /*ARGSUSED*/ 975 int 976 kdi_get_stick(uint64_t *stickp) 977 { 978 return (-1); 979 } 980 981 void 982 cpu_kdi_init(kdi_t *kdi) 983 { 984 kdi->kdi_flush_caches = kdi_flush_caches; 985 kdi->mkdi_cpu_init = kdi_cpu_init; 986 kdi->mkdi_cpu_ready_iter = kdi_cpu_ready_iter; 987 kdi->mkdi_xc_one = kdi_xc_one; 988 kdi->mkdi_tickwait = kdi_tickwait; 989 kdi->mkdi_get_stick = kdi_get_stick; 990 } 991 992 static void 993 sun4v_system_claim(void) 994 { 995 watchdog_suspend(); 996 } 997 998 static void 999 sun4v_system_release(void) 1000 { 1001 watchdog_resume(); 1002 } 1003 1004 void 1005 plat_kdi_init(kdi_t *kdi) 1006 { 1007 kdi->pkdi_system_claim = sun4v_system_claim; 1008 kdi->pkdi_system_release = sun4v_system_release; 1009 } 1010 1011 /* 1012 * Routine to return memory information associated 1013 * with a physical address and syndrome. 1014 */ 1015 /* ARGSUSED */ 1016 int 1017 cpu_get_mem_info(uint64_t synd, uint64_t afar, 1018 uint64_t *mem_sizep, uint64_t *seg_sizep, uint64_t *bank_sizep, 1019 int *segsp, int *banksp, int *mcidp) 1020 { 1021 return (ENOTSUP); 1022 } 1023 1024 /* 1025 * This routine returns the size of the kernel's FRU name buffer. 1026 */ 1027 size_t 1028 cpu_get_name_bufsize() 1029 { 1030 return (UNUM_NAMLEN); 1031 } 1032 1033 /* 1034 * This routine is a more generic interface to cpu_get_mem_unum(), 1035 * that may be used by other modules (e.g. mm). 1036 */ 1037 /* ARGSUSED */ 1038 int 1039 cpu_get_mem_name(uint64_t synd, uint64_t *afsr, uint64_t afar, 1040 char *buf, int buflen, int *lenp) 1041 { 1042 return (ENOTSUP); 1043 } 1044 1045 /* ARGSUSED */ 1046 int 1047 cpu_get_mem_sid(char *unum, char *buf, int buflen, int *lenp) 1048 { 1049 return (ENOTSUP); 1050 } 1051 1052 /* ARGSUSED */ 1053 int 1054 cpu_get_mem_addr(char *unum, char *sid, uint64_t offset, uint64_t *addrp) 1055 { 1056 return (ENOTSUP); 1057 } 1058 1059 /* 1060 * xt_sync - wait for previous x-traps to finish 1061 */ 1062 void 1063 xt_sync(cpuset_t cpuset) 1064 { 1065 union { 1066 uint8_t volatile byte[NCPU]; 1067 uint64_t volatile xword[NCPU / 8]; 1068 } cpu_sync; 1069 uint64_t starttick, endtick, tick, lasttick; 1070 int i; 1071 1072 kpreempt_disable(); 1073 CPUSET_DEL(cpuset, CPU->cpu_id); 1074 CPUSET_AND(cpuset, cpu_ready_set); 1075 1076 /* 1077 * Sun4v uses a queue for receiving mondos. Successful 1078 * transmission of a mondo only indicates that the mondo 1079 * has been written into the queue. 1080 * 1081 * We use an array of bytes to let each cpu to signal back 1082 * to the cross trap sender that the cross trap has been 1083 * executed. Set the byte to 1 before sending the cross trap 1084 * and wait until other cpus reset it to 0. 1085 */ 1086 bzero((void *)&cpu_sync, NCPU); 1087 for (i = 0; i < NCPU; i++) 1088 if (CPU_IN_SET(cpuset, i)) 1089 cpu_sync.byte[i] = 1; 1090 1091 xt_some(cpuset, (xcfunc_t *)xt_sync_tl1, 1092 (uint64_t)cpu_sync.byte, 0); 1093 1094 starttick = lasttick = gettick(); 1095 endtick = starttick + xc_tick_limit; 1096 1097 for (i = 0; i < (NCPU / 8); i ++) { 1098 while (cpu_sync.xword[i] != 0) { 1099 tick = gettick(); 1100 /* 1101 * If there is a big jump between the current tick 1102 * count and lasttick, we have probably hit a break 1103 * point. Adjust endtick accordingly to avoid panic. 1104 */ 1105 if (tick > (lasttick + xc_tick_jump_limit)) { 1106 endtick += (tick - lasttick); 1107 } 1108 lasttick = tick; 1109 if (tick > endtick) { 1110 if (panic_quiesce) 1111 goto out; 1112 cmn_err(CE_CONT, "Cross trap sync timeout " 1113 "at cpu_sync.xword[%d]: 0x%lx\n", 1114 i, cpu_sync.xword[i]); 1115 cmn_err(CE_PANIC, "xt_sync: timeout"); 1116 } 1117 } 1118 } 1119 1120 out: 1121 kpreempt_enable(); 1122 } 1123