17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 51ae08745Sheppo * Common Development and Distribution License (the "License"). 61ae08745Sheppo * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 217c478bd9Sstevel@tonic-gate /* 22d07db889SSree Vemuri * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. 237c478bd9Sstevel@tonic-gate */ 247c478bd9Sstevel@tonic-gate 257c478bd9Sstevel@tonic-gate #include <sys/types.h> 267c478bd9Sstevel@tonic-gate #include <sys/systm.h> 277c478bd9Sstevel@tonic-gate #include <sys/archsystm.h> 287c478bd9Sstevel@tonic-gate #include <sys/t_lock.h> 297c478bd9Sstevel@tonic-gate #include <sys/uadmin.h> 307c478bd9Sstevel@tonic-gate #include <sys/panic.h> 317c478bd9Sstevel@tonic-gate #include <sys/reboot.h> 327c478bd9Sstevel@tonic-gate #include <sys/autoconf.h> 337c478bd9Sstevel@tonic-gate #include <sys/machsystm.h> 347c478bd9Sstevel@tonic-gate #include <sys/promif.h> 357c478bd9Sstevel@tonic-gate #include <sys/membar.h> 367c478bd9Sstevel@tonic-gate #include <vm/hat_sfmmu.h> 377c478bd9Sstevel@tonic-gate #include <sys/cpu_module.h> 387c478bd9Sstevel@tonic-gate #include <sys/cpu_sgnblk_defs.h> 397c478bd9Sstevel@tonic-gate #include <sys/intreg.h> 407c478bd9Sstevel@tonic-gate #include <sys/consdev.h> 417c478bd9Sstevel@tonic-gate #include <sys/kdi_impl.h> 42db6d2ee3Ssvemuri #include <sys/traptrace.h> 437c478bd9Sstevel@tonic-gate #include <sys/hypervisor_api.h> 447c478bd9Sstevel@tonic-gate #include <sys/vmsystm.h> 457c478bd9Sstevel@tonic-gate #include <sys/dtrace.h> 467c478bd9Sstevel@tonic-gate #include <sys/xc_impl.h> 47edc40228Sachartre #include <sys/callb.h> 481ae08745Sheppo #include <sys/mdesc.h> 491ae08745Sheppo #include <sys/mach_descrip.h> 503c431bb5Swentaoy #include <sys/wdt.h> 513b890a5bSjb145095 #include <sys/soft_state.h> 523b890a5bSjb145095 #include <sys/promimpl.h> 533b890a5bSjb145095 #include <sys/hsvc.h> 5422e19ac1Sjm22469 #include <sys/ldoms.h> 555699897cSHaik Aftandilian #include <sys/kldc.h> 56d3d50737SRafael Vanoni #include <sys/clock_impl.h> 57023e71deSHaik Aftandilian #include <sys/suspend.h> 58ca3e8d88SDave Plauger #include <sys/dumphdr.h> 597c478bd9Sstevel@tonic-gate 607c478bd9Sstevel@tonic-gate /* 617c478bd9Sstevel@tonic-gate * hvdump_buf_va is a pointer to the currently-configured hvdump_buf. 627c478bd9Sstevel@tonic-gate * A value of NULL indicates that this area is not configured. 637c478bd9Sstevel@tonic-gate * hvdump_buf_sz is tunable but will be clamped to HVDUMP_SIZE_MAX. 647c478bd9Sstevel@tonic-gate */ 657c478bd9Sstevel@tonic-gate 667c478bd9Sstevel@tonic-gate caddr_t hvdump_buf_va; 677c478bd9Sstevel@tonic-gate uint64_t hvdump_buf_sz = HVDUMP_SIZE_DEFAULT; 687c478bd9Sstevel@tonic-gate static uint64_t hvdump_buf_pa; 697c478bd9Sstevel@tonic-gate 707c478bd9Sstevel@tonic-gate u_longlong_t panic_tick; 717c478bd9Sstevel@tonic-gate 727c478bd9Sstevel@tonic-gate extern u_longlong_t gettick(); 737c478bd9Sstevel@tonic-gate static void reboot_machine(char *); 747c478bd9Sstevel@tonic-gate static void update_hvdump_buffer(void); 757c478bd9Sstevel@tonic-gate 767c478bd9Sstevel@tonic-gate /* 777c478bd9Sstevel@tonic-gate * For xt_sync synchronization. 787c478bd9Sstevel@tonic-gate */ 797c478bd9Sstevel@tonic-gate extern uint64_t xc_tick_limit; 807c478bd9Sstevel@tonic-gate extern uint64_t xc_tick_jump_limit; 81374ae87fSsvemuri extern uint64_t xc_sync_tick_limit; 827c478bd9Sstevel@tonic-gate 837c478bd9Sstevel@tonic-gate /* 84*a1bf6e2eSChristopher Baumbauer - Oracle America - San Diego United States * Bring in the cpc PIL_15 handler for panic_enter_hw. 85*a1bf6e2eSChristopher Baumbauer - Oracle America - San Diego United States */ 86*a1bf6e2eSChristopher Baumbauer - Oracle America - San Diego United States extern uint64_t cpc_level15_inum; 87*a1bf6e2eSChristopher Baumbauer - Oracle America - San Diego United States 88*a1bf6e2eSChristopher Baumbauer - Oracle America - San Diego United States /* 897c478bd9Sstevel@tonic-gate * We keep our own copies, used for cache flushing, because we can be called 907c478bd9Sstevel@tonic-gate * before cpu_fiximpl(). 917c478bd9Sstevel@tonic-gate */ 927c478bd9Sstevel@tonic-gate static int kdi_dcache_size; 937c478bd9Sstevel@tonic-gate static int kdi_dcache_linesize; 947c478bd9Sstevel@tonic-gate static int kdi_icache_size; 957c478bd9Sstevel@tonic-gate static int kdi_icache_linesize; 967c478bd9Sstevel@tonic-gate 977c478bd9Sstevel@tonic-gate /* 987c478bd9Sstevel@tonic-gate * Assembly support for generic modules in sun4v/ml/mach_xc.s 997c478bd9Sstevel@tonic-gate */ 1007c478bd9Sstevel@tonic-gate extern void init_mondo_nocheck(xcfunc_t *func, uint64_t arg1, uint64_t arg2); 1017c478bd9Sstevel@tonic-gate extern void kdi_flush_idcache(int, int, int, int); 1027c478bd9Sstevel@tonic-gate extern uint64_t get_cpuaddr(uint64_t, uint64_t); 1037c478bd9Sstevel@tonic-gate 10422e19ac1Sjm22469 105d07db889SSree Vemuri #define BOOT_CMD_MAX_LEN 256 /* power of 2 & 16-byte aligned */ 10622e19ac1Sjm22469 #define BOOT_CMD_BASE "boot " 10722e19ac1Sjm22469 10822e19ac1Sjm22469 /* 10922e19ac1Sjm22469 * In an LDoms system we do not save the user's boot args in NVRAM 11022e19ac1Sjm22469 * as is done on legacy systems. Instead, we format and send a 11122e19ac1Sjm22469 * 'reboot-command' variable to the variable service. The contents 11222e19ac1Sjm22469 * of the variable are retrieved by OBP and used verbatim for 11322e19ac1Sjm22469 * the next boot. 11422e19ac1Sjm22469 */ 11522e19ac1Sjm22469 static void 1164df55fdeSJanie Lu store_boot_cmd(char *args, boolean_t add_boot_str, boolean_t invoke_cb) 11722e19ac1Sjm22469 { 118d07db889SSree Vemuri static char *cmd_buf; 1191b83305cSjm22469 size_t len = 1; 12022e19ac1Sjm22469 pnode_t node; 1211b83305cSjm22469 size_t base_len = 0; 12222e19ac1Sjm22469 size_t args_len; 12322e19ac1Sjm22469 size_t args_max; 1244df55fdeSJanie Lu uint64_t majornum; 1254df55fdeSJanie Lu uint64_t minornum; 1264df55fdeSJanie Lu uint64_t buf_pa; 1274df55fdeSJanie Lu uint64_t status; 1284df55fdeSJanie Lu 1294df55fdeSJanie Lu status = hsvc_version(HSVC_GROUP_REBOOT_DATA, &majornum, &minornum); 1304df55fdeSJanie Lu 1314df55fdeSJanie Lu /* 1324df55fdeSJanie Lu * invoke_cb is set to true when we are in a normal shutdown sequence 1334df55fdeSJanie Lu * (interrupts are not blocked, the system is not panicking or being 1344df55fdeSJanie Lu * suspended). In that case, we can use any method to store the boot 1354df55fdeSJanie Lu * command. Otherwise storing the boot command can not be done using 1364df55fdeSJanie Lu * a domain service because it can not be safely used in that context. 1374df55fdeSJanie Lu */ 1384df55fdeSJanie Lu if ((status != H_EOK) && (invoke_cb == B_FALSE)) 1394df55fdeSJanie Lu return; 14022e19ac1Sjm22469 141d07db889SSree Vemuri cmd_buf = contig_mem_alloc(BOOT_CMD_MAX_LEN); 142d07db889SSree Vemuri if (cmd_buf == NULL) 143d07db889SSree Vemuri return; 144d07db889SSree Vemuri 1451b83305cSjm22469 if (add_boot_str) { 14622e19ac1Sjm22469 (void) strcpy(cmd_buf, BOOT_CMD_BASE); 14722e19ac1Sjm22469 14822e19ac1Sjm22469 base_len = strlen(BOOT_CMD_BASE); 14922e19ac1Sjm22469 len = base_len + 1; 1501b83305cSjm22469 } 15122e19ac1Sjm22469 15222e19ac1Sjm22469 if (args != NULL) { 15322e19ac1Sjm22469 args_len = strlen(args); 15422e19ac1Sjm22469 args_max = BOOT_CMD_MAX_LEN - len; 15522e19ac1Sjm22469 15622e19ac1Sjm22469 if (args_len > args_max) { 15722e19ac1Sjm22469 cmn_err(CE_WARN, "Reboot command too long (%ld), " 15822e19ac1Sjm22469 "truncating command arguments", len + args_len); 15922e19ac1Sjm22469 16022e19ac1Sjm22469 args_len = args_max; 16122e19ac1Sjm22469 } 16222e19ac1Sjm22469 16322e19ac1Sjm22469 len += args_len; 16422e19ac1Sjm22469 (void) strncpy(&cmd_buf[base_len], args, args_len); 16522e19ac1Sjm22469 } 16622e19ac1Sjm22469 1674df55fdeSJanie Lu /* 1684df55fdeSJanie Lu * Save the reboot-command with HV, if reboot data group is 1694df55fdeSJanie Lu * negotiated. Else save the reboot-command via vars-config domain 1704df55fdeSJanie Lu * services on the SP. 1714df55fdeSJanie Lu */ 1724df55fdeSJanie Lu if (status == H_EOK) { 1734df55fdeSJanie Lu buf_pa = va_to_pa(cmd_buf); 1744df55fdeSJanie Lu status = hv_reboot_data_set(buf_pa, len); 1754df55fdeSJanie Lu if (status != H_EOK) { 1764df55fdeSJanie Lu cmn_err(CE_WARN, "Unable to store boot command for " 1774df55fdeSJanie Lu "use on reboot with HV: error = 0x%lx", status); 1784df55fdeSJanie Lu } 1794df55fdeSJanie Lu } else { 18022e19ac1Sjm22469 node = prom_optionsnode(); 18122e19ac1Sjm22469 if ((node == OBP_NONODE) || (node == OBP_BADNODE) || 18222e19ac1Sjm22469 prom_setprop(node, "reboot-command", cmd_buf, len) == -1) 18322e19ac1Sjm22469 cmn_err(CE_WARN, "Unable to store boot command for " 18422e19ac1Sjm22469 "use on reboot"); 18522e19ac1Sjm22469 } 1864df55fdeSJanie Lu } 18722e19ac1Sjm22469 18822e19ac1Sjm22469 1897c478bd9Sstevel@tonic-gate /* 1907c478bd9Sstevel@tonic-gate * Machine dependent code to reboot. 19122e19ac1Sjm22469 * 19222e19ac1Sjm22469 * "bootstr", when non-null, points to a string to be used as the 19322e19ac1Sjm22469 * argument string when rebooting. 194edc40228Sachartre * 195edc40228Sachartre * "invoke_cb" is a boolean. It is set to true when mdboot() can safely 196edc40228Sachartre * invoke CB_CL_MDBOOT callbacks before shutting the system down, i.e. when 197edc40228Sachartre * we are in a normal shutdown sequence (interrupts are not blocked, the 198edc40228Sachartre * system is not panic'ing or being suspended). 1997c478bd9Sstevel@tonic-gate */ 2007c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 2017c478bd9Sstevel@tonic-gate void 202edc40228Sachartre mdboot(int cmd, int fcn, char *bootstr, boolean_t invoke_cb) 2037c478bd9Sstevel@tonic-gate { 2047c478bd9Sstevel@tonic-gate extern void pm_cfb_check_and_powerup(void); 2057c478bd9Sstevel@tonic-gate 206281888b3Sjbeck /* 207281888b3Sjbeck * XXX - rconsvp is set to NULL to ensure that output messages 208281888b3Sjbeck * are sent to the underlying "hardware" device using the 209281888b3Sjbeck * monitor's printf routine since we are in the process of 210281888b3Sjbeck * either rebooting or halting the machine. 211281888b3Sjbeck */ 212281888b3Sjbeck rconsvp = NULL; 213281888b3Sjbeck 21422e19ac1Sjm22469 switch (fcn) { 21522e19ac1Sjm22469 case AD_HALT: 2161b83305cSjm22469 /* 2171b83305cSjm22469 * LDoms: By storing a no-op command 2181b83305cSjm22469 * in the 'reboot-command' variable we cause OBP 2191b83305cSjm22469 * to ignore the setting of 'auto-boot?' after 2201b83305cSjm22469 * it completes the reset. This causes the system 2211b83305cSjm22469 * to stop at the ok prompt. 2221b83305cSjm22469 */ 2234df55fdeSJanie Lu if (domaining_enabled()) 2244df55fdeSJanie Lu store_boot_cmd("noop", B_FALSE, invoke_cb); 2251b83305cSjm22469 break; 2261b83305cSjm22469 22722e19ac1Sjm22469 case AD_POWEROFF: 22822e19ac1Sjm22469 break; 2291b83305cSjm22469 23022e19ac1Sjm22469 default: 23122e19ac1Sjm22469 if (bootstr == NULL) { 23222e19ac1Sjm22469 switch (fcn) { 23322e19ac1Sjm22469 234e557d412SChristopher Kiick case AD_FASTREBOOT: 23522e19ac1Sjm22469 case AD_BOOT: 23622e19ac1Sjm22469 bootstr = ""; 23722e19ac1Sjm22469 break; 23822e19ac1Sjm22469 23922e19ac1Sjm22469 case AD_IBOOT: 24022e19ac1Sjm22469 bootstr = "-a"; 24122e19ac1Sjm22469 break; 24222e19ac1Sjm22469 24322e19ac1Sjm22469 case AD_SBOOT: 24422e19ac1Sjm22469 bootstr = "-s"; 24522e19ac1Sjm22469 break; 24622e19ac1Sjm22469 24722e19ac1Sjm22469 case AD_SIBOOT: 24822e19ac1Sjm22469 bootstr = "-sa"; 24922e19ac1Sjm22469 break; 25022e19ac1Sjm22469 default: 25122e19ac1Sjm22469 cmn_err(CE_WARN, 25222e19ac1Sjm22469 "mdboot: invalid function %d", fcn); 25322e19ac1Sjm22469 bootstr = ""; 25422e19ac1Sjm22469 break; 25522e19ac1Sjm22469 } 25622e19ac1Sjm22469 } 25722e19ac1Sjm22469 25822e19ac1Sjm22469 /* 259cdf9f8c9Sjm22469 * If LDoms is running, we must save the boot string 260cdf9f8c9Sjm22469 * before we enter restricted mode. This is possible 261cdf9f8c9Sjm22469 * only if we are not being called from panic. 26222e19ac1Sjm22469 */ 2634df55fdeSJanie Lu if (domaining_enabled()) 2644df55fdeSJanie Lu store_boot_cmd(bootstr, B_TRUE, invoke_cb); 265cdf9f8c9Sjm22469 } 266cdf9f8c9Sjm22469 2677c478bd9Sstevel@tonic-gate /* 2687c478bd9Sstevel@tonic-gate * At a high interrupt level we can't: 2697c478bd9Sstevel@tonic-gate * 1) bring up the console 2707c478bd9Sstevel@tonic-gate * or 2717c478bd9Sstevel@tonic-gate * 2) wait for pending interrupts prior to redistribution 2727c478bd9Sstevel@tonic-gate * to the current CPU 2737c478bd9Sstevel@tonic-gate * 2747c478bd9Sstevel@tonic-gate * so we do them now. 2757c478bd9Sstevel@tonic-gate */ 2767c478bd9Sstevel@tonic-gate pm_cfb_check_and_powerup(); 2777c478bd9Sstevel@tonic-gate 2787c478bd9Sstevel@tonic-gate /* make sure there are no more changes to the device tree */ 2797c478bd9Sstevel@tonic-gate devtree_freeze(); 2807c478bd9Sstevel@tonic-gate 281edc40228Sachartre if (invoke_cb) 282edc40228Sachartre (void) callb_execute_class(CB_CL_MDBOOT, NULL); 283edc40228Sachartre 2847c478bd9Sstevel@tonic-gate /* 285db874c57Selowe * Clear any unresolved UEs from memory. 286db874c57Selowe */ 2878b464eb8Smec page_retire_mdboot(); 288db874c57Selowe 289db874c57Selowe /* 2907c478bd9Sstevel@tonic-gate * stop other cpus which also raise our priority. since there is only 2917c478bd9Sstevel@tonic-gate * one active cpu after this, and our priority will be too high 2927c478bd9Sstevel@tonic-gate * for us to be preempted, we're essentially single threaded 2937c478bd9Sstevel@tonic-gate * from here on out. 2947c478bd9Sstevel@tonic-gate */ 2957c478bd9Sstevel@tonic-gate stop_other_cpus(); 2967c478bd9Sstevel@tonic-gate 2977c478bd9Sstevel@tonic-gate /* 2987c478bd9Sstevel@tonic-gate * try and reset leaf devices. reset_leaves() should only 2997c478bd9Sstevel@tonic-gate * be called when there are no other threads that could be 3007c478bd9Sstevel@tonic-gate * accessing devices 3017c478bd9Sstevel@tonic-gate */ 3027c478bd9Sstevel@tonic-gate reset_leaves(); 3037c478bd9Sstevel@tonic-gate 3043c431bb5Swentaoy watchdog_clear(); 3053c431bb5Swentaoy 3067c478bd9Sstevel@tonic-gate if (fcn == AD_HALT) { 3073b890a5bSjb145095 mach_set_soft_state(SIS_TRANSITION, 3083b890a5bSjb145095 &SOLARIS_SOFT_STATE_HALT_MSG); 3097c478bd9Sstevel@tonic-gate halt((char *)NULL); 3107c478bd9Sstevel@tonic-gate } else if (fcn == AD_POWEROFF) { 3113b890a5bSjb145095 mach_set_soft_state(SIS_TRANSITION, 3123b890a5bSjb145095 &SOLARIS_SOFT_STATE_POWER_MSG); 3137c478bd9Sstevel@tonic-gate power_down(NULL); 3147c478bd9Sstevel@tonic-gate } else { 3153b890a5bSjb145095 mach_set_soft_state(SIS_TRANSITION, 3163b890a5bSjb145095 &SOLARIS_SOFT_STATE_REBOOT_MSG); 3177c478bd9Sstevel@tonic-gate reboot_machine(bootstr); 3187c478bd9Sstevel@tonic-gate } 3197c478bd9Sstevel@tonic-gate /* MAYBE REACHED */ 3207c478bd9Sstevel@tonic-gate } 3217c478bd9Sstevel@tonic-gate 3227c478bd9Sstevel@tonic-gate /* mdpreboot - may be called prior to mdboot while root fs still mounted */ 3237c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 3247c478bd9Sstevel@tonic-gate void 3257c478bd9Sstevel@tonic-gate mdpreboot(int cmd, int fcn, char *bootstr) 3267c478bd9Sstevel@tonic-gate { 3277c478bd9Sstevel@tonic-gate } 3287c478bd9Sstevel@tonic-gate 3297c478bd9Sstevel@tonic-gate /* 3307c478bd9Sstevel@tonic-gate * Halt the machine and then reboot with the device 3317c478bd9Sstevel@tonic-gate * and arguments specified in bootstr. 3327c478bd9Sstevel@tonic-gate */ 3337c478bd9Sstevel@tonic-gate static void 3347c478bd9Sstevel@tonic-gate reboot_machine(char *bootstr) 3357c478bd9Sstevel@tonic-gate { 3367c478bd9Sstevel@tonic-gate flush_windows(); 3377c478bd9Sstevel@tonic-gate stop_other_cpus(); /* send stop signal to other CPUs */ 3387c478bd9Sstevel@tonic-gate prom_printf("rebooting...\n"); 3397c478bd9Sstevel@tonic-gate /* 3407c478bd9Sstevel@tonic-gate * For platforms that use CPU signatures, we 3417c478bd9Sstevel@tonic-gate * need to set the signature block to OS and 3427c478bd9Sstevel@tonic-gate * the state to exiting for all the processors. 3437c478bd9Sstevel@tonic-gate */ 3447c478bd9Sstevel@tonic-gate CPU_SIGNATURE(OS_SIG, SIGST_EXIT, SIGSUBST_REBOOT, -1); 3457c478bd9Sstevel@tonic-gate prom_reboot(bootstr); 3467c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 3477c478bd9Sstevel@tonic-gate } 3487c478bd9Sstevel@tonic-gate 3497c478bd9Sstevel@tonic-gate /* 3507c478bd9Sstevel@tonic-gate * We use the x-trap mechanism and idle_stop_xcall() to stop the other CPUs. 3517c478bd9Sstevel@tonic-gate * Once in panic_idle() they raise spl, record their location, and spin. 3527c478bd9Sstevel@tonic-gate */ 3537c478bd9Sstevel@tonic-gate static void 3547c478bd9Sstevel@tonic-gate panic_idle(void) 3557c478bd9Sstevel@tonic-gate { 3567c478bd9Sstevel@tonic-gate (void) spl7(); 3577c478bd9Sstevel@tonic-gate 3587c478bd9Sstevel@tonic-gate debug_flush_windows(); 3597c478bd9Sstevel@tonic-gate (void) setjmp(&curthread->t_pcb); 3607c478bd9Sstevel@tonic-gate 3617c478bd9Sstevel@tonic-gate CPU->cpu_m.in_prom = 1; 3627c478bd9Sstevel@tonic-gate membar_stld(); 3637c478bd9Sstevel@tonic-gate 364ca3e8d88SDave Plauger dumpsys_helper(); 365ca3e8d88SDave Plauger 36622e19ac1Sjm22469 for (;;) 3671b83305cSjm22469 ; 3687c478bd9Sstevel@tonic-gate } 3697c478bd9Sstevel@tonic-gate 3707c478bd9Sstevel@tonic-gate /* 3717c478bd9Sstevel@tonic-gate * Force the other CPUs to trap into panic_idle(), and then remove them 3727c478bd9Sstevel@tonic-gate * from the cpu_ready_set so they will no longer receive cross-calls. 3737c478bd9Sstevel@tonic-gate */ 3747c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 3757c478bd9Sstevel@tonic-gate void 3767c478bd9Sstevel@tonic-gate panic_stopcpus(cpu_t *cp, kthread_t *t, int spl) 3777c478bd9Sstevel@tonic-gate { 3787c478bd9Sstevel@tonic-gate cpuset_t cps; 3797c478bd9Sstevel@tonic-gate int i; 3807c478bd9Sstevel@tonic-gate 3817c478bd9Sstevel@tonic-gate (void) splzs(); 3827c478bd9Sstevel@tonic-gate CPUSET_ALL_BUT(cps, cp->cpu_id); 3837c478bd9Sstevel@tonic-gate xt_some(cps, (xcfunc_t *)idle_stop_xcall, (uint64_t)&panic_idle, NULL); 3847c478bd9Sstevel@tonic-gate 3857c478bd9Sstevel@tonic-gate for (i = 0; i < NCPU; i++) { 3867c478bd9Sstevel@tonic-gate if (i != cp->cpu_id && CPU_XCALL_READY(i)) { 3877c478bd9Sstevel@tonic-gate int ntries = 0x10000; 3887c478bd9Sstevel@tonic-gate 3897c478bd9Sstevel@tonic-gate while (!cpu[i]->cpu_m.in_prom && ntries) { 3907c478bd9Sstevel@tonic-gate DELAY(50); 3917c478bd9Sstevel@tonic-gate ntries--; 3927c478bd9Sstevel@tonic-gate } 3937c478bd9Sstevel@tonic-gate 3947c478bd9Sstevel@tonic-gate if (!cpu[i]->cpu_m.in_prom) 3957c478bd9Sstevel@tonic-gate printf("panic: failed to stop cpu%d\n", i); 3967c478bd9Sstevel@tonic-gate 3977c478bd9Sstevel@tonic-gate cpu[i]->cpu_flags &= ~CPU_READY; 3987c478bd9Sstevel@tonic-gate cpu[i]->cpu_flags |= CPU_QUIESCED; 3997c478bd9Sstevel@tonic-gate CPUSET_DEL(cpu_ready_set, cpu[i]->cpu_id); 4007c478bd9Sstevel@tonic-gate } 4017c478bd9Sstevel@tonic-gate } 4027c478bd9Sstevel@tonic-gate } 4037c478bd9Sstevel@tonic-gate 4047c478bd9Sstevel@tonic-gate /* 4057c478bd9Sstevel@tonic-gate * Platform callback following each entry to panicsys(). If we've panicked at 4067c478bd9Sstevel@tonic-gate * level 14, we examine t_panic_trap to see if a fatal trap occurred. If so, 4077c478bd9Sstevel@tonic-gate * we disable further %tick_cmpr interrupts. If not, an explicit call to panic 4087c478bd9Sstevel@tonic-gate * was made and so we re-enqueue an interrupt request structure to allow 4097c478bd9Sstevel@tonic-gate * further level 14 interrupts to be processed once we lower PIL. This allows 4107c478bd9Sstevel@tonic-gate * us to handle panics from the deadman() CY_HIGH_LEVEL cyclic. 411*a1bf6e2eSChristopher Baumbauer - Oracle America - San Diego United States * 412*a1bf6e2eSChristopher Baumbauer - Oracle America - San Diego United States * In case we panic at level 15, ensure that the cpc handler has been 413*a1bf6e2eSChristopher Baumbauer - Oracle America - San Diego United States * reinstalled otherwise we could run the risk of hitting a missing interrupt 414*a1bf6e2eSChristopher Baumbauer - Oracle America - San Diego United States * handler when this thread drops PIL and the cpc counter overflows. 4157c478bd9Sstevel@tonic-gate */ 4167c478bd9Sstevel@tonic-gate void 4177c478bd9Sstevel@tonic-gate panic_enter_hw(int spl) 4187c478bd9Sstevel@tonic-gate { 419*a1bf6e2eSChristopher Baumbauer - Oracle America - San Diego United States uint_t opstate; 420*a1bf6e2eSChristopher Baumbauer - Oracle America - San Diego United States 421efaef81fSarao if (!panic_tick) { 422db6d2ee3Ssvemuri panic_tick = gettick(); 423db6d2ee3Ssvemuri if (mach_htraptrace_enable) { 424efaef81fSarao uint64_t prev_freeze; 425efaef81fSarao 426efaef81fSarao /* there are no possible error codes for this hcall */ 427efaef81fSarao (void) hv_ttrace_freeze((uint64_t)TRAP_TFREEZE_ALL, 428efaef81fSarao &prev_freeze); 429efaef81fSarao } 430db6d2ee3Ssvemuri #ifdef TRAPTRACE 431db6d2ee3Ssvemuri TRAPTRACE_FREEZE; 432efaef81fSarao #endif 433db6d2ee3Ssvemuri } 4343b890a5bSjb145095 4353b890a5bSjb145095 mach_set_soft_state(SIS_TRANSITION, &SOLARIS_SOFT_STATE_PANIC_MSG); 4363b890a5bSjb145095 4377c478bd9Sstevel@tonic-gate if (spl == ipltospl(PIL_14)) { 438*a1bf6e2eSChristopher Baumbauer - Oracle America - San Diego United States opstate = disable_vec_intr(); 4397c478bd9Sstevel@tonic-gate 4407c478bd9Sstevel@tonic-gate if (curthread->t_panic_trap != NULL) { 4417c478bd9Sstevel@tonic-gate tickcmpr_disable(); 4427c478bd9Sstevel@tonic-gate intr_dequeue_req(PIL_14, cbe_level14_inum); 4437c478bd9Sstevel@tonic-gate } else { 4447c478bd9Sstevel@tonic-gate if (!tickcmpr_disabled()) 4457c478bd9Sstevel@tonic-gate intr_enqueue_req(PIL_14, cbe_level14_inum); 4467c478bd9Sstevel@tonic-gate /* 4477c478bd9Sstevel@tonic-gate * Clear SOFTINT<14>, SOFTINT<0> (TICK_INT) 4487c478bd9Sstevel@tonic-gate * and SOFTINT<16> (STICK_INT) to indicate 4497c478bd9Sstevel@tonic-gate * that the current level 14 has been serviced. 4507c478bd9Sstevel@tonic-gate */ 4517c478bd9Sstevel@tonic-gate wr_clr_softint((1 << PIL_14) | 4527c478bd9Sstevel@tonic-gate TICK_INT_MASK | STICK_INT_MASK); 4537c478bd9Sstevel@tonic-gate } 4547c478bd9Sstevel@tonic-gate 4557c478bd9Sstevel@tonic-gate enable_vec_intr(opstate); 456*a1bf6e2eSChristopher Baumbauer - Oracle America - San Diego United States } else if (spl == ipltospl(PIL_15)) { 457*a1bf6e2eSChristopher Baumbauer - Oracle America - San Diego United States opstate = disable_vec_intr(); 458*a1bf6e2eSChristopher Baumbauer - Oracle America - San Diego United States intr_enqueue_req(PIL_15, cpc_level15_inum); 459*a1bf6e2eSChristopher Baumbauer - Oracle America - San Diego United States wr_clr_softint(1 << PIL_15); 460*a1bf6e2eSChristopher Baumbauer - Oracle America - San Diego United States enable_vec_intr(opstate); 4617c478bd9Sstevel@tonic-gate } 4627c478bd9Sstevel@tonic-gate } 4637c478bd9Sstevel@tonic-gate 4647c478bd9Sstevel@tonic-gate /* 4657c478bd9Sstevel@tonic-gate * Miscellaneous hardware-specific code to execute after panicstr is set 4667c478bd9Sstevel@tonic-gate * by the panic code: we also print and record PTL1 panic information here. 4677c478bd9Sstevel@tonic-gate */ 4687c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 4697c478bd9Sstevel@tonic-gate void 4707c478bd9Sstevel@tonic-gate panic_quiesce_hw(panic_data_t *pdp) 4717c478bd9Sstevel@tonic-gate { 4727c478bd9Sstevel@tonic-gate extern uint_t getpstate(void); 4737c478bd9Sstevel@tonic-gate extern void setpstate(uint_t); 4747c478bd9Sstevel@tonic-gate 4757c478bd9Sstevel@tonic-gate /* 4767c478bd9Sstevel@tonic-gate * Turn off TRAPTRACE and save the current %tick value in panic_tick. 4777c478bd9Sstevel@tonic-gate */ 478db6d2ee3Ssvemuri if (!panic_tick) { 4797c478bd9Sstevel@tonic-gate panic_tick = gettick(); 480db6d2ee3Ssvemuri if (mach_htraptrace_enable) { 481db6d2ee3Ssvemuri uint64_t prev_freeze; 482db6d2ee3Ssvemuri 4837c478bd9Sstevel@tonic-gate /* there are no possible error codes for this hcall */ 484db6d2ee3Ssvemuri (void) hv_ttrace_freeze((uint64_t)TRAP_TFREEZE_ALL, 485db6d2ee3Ssvemuri &prev_freeze); 486db6d2ee3Ssvemuri } 487db6d2ee3Ssvemuri #ifdef TRAPTRACE 4887c478bd9Sstevel@tonic-gate TRAPTRACE_FREEZE; 4897c478bd9Sstevel@tonic-gate #endif 490db6d2ee3Ssvemuri } 4917c478bd9Sstevel@tonic-gate /* 4927c478bd9Sstevel@tonic-gate * For Platforms that use CPU signatures, we 4937c478bd9Sstevel@tonic-gate * need to set the signature block to OS, the state to 4947c478bd9Sstevel@tonic-gate * exiting, and the substate to panic for all the processors. 4957c478bd9Sstevel@tonic-gate */ 4967c478bd9Sstevel@tonic-gate CPU_SIGNATURE(OS_SIG, SIGST_EXIT, SIGSUBST_PANIC, -1); 4977c478bd9Sstevel@tonic-gate 4987c478bd9Sstevel@tonic-gate update_hvdump_buffer(); 4997c478bd9Sstevel@tonic-gate 5007c478bd9Sstevel@tonic-gate /* 5017c478bd9Sstevel@tonic-gate * Disable further ECC errors from the bus nexus. 5027c478bd9Sstevel@tonic-gate */ 5037c478bd9Sstevel@tonic-gate (void) bus_func_invoke(BF_TYPE_ERRDIS); 5047c478bd9Sstevel@tonic-gate 5057c478bd9Sstevel@tonic-gate /* 5067c478bd9Sstevel@tonic-gate * Redirect all interrupts to the current CPU. 5077c478bd9Sstevel@tonic-gate */ 5087c478bd9Sstevel@tonic-gate intr_redist_all_cpus_shutdown(); 5097c478bd9Sstevel@tonic-gate 5107c478bd9Sstevel@tonic-gate /* 5117c478bd9Sstevel@tonic-gate * This call exists solely to support dumps to network 5127c478bd9Sstevel@tonic-gate * devices after sync from OBP. 5137c478bd9Sstevel@tonic-gate * 5147c478bd9Sstevel@tonic-gate * If we came here via the sync callback, then on some 5157c478bd9Sstevel@tonic-gate * platforms, interrupts may have arrived while we were 5167c478bd9Sstevel@tonic-gate * stopped in OBP. OBP will arrange for those interrupts to 5177c478bd9Sstevel@tonic-gate * be redelivered if you say "go", but not if you invoke a 5187c478bd9Sstevel@tonic-gate * client callback like 'sync'. For some dump devices 5197c478bd9Sstevel@tonic-gate * (network swap devices), we need interrupts to be 5207c478bd9Sstevel@tonic-gate * delivered in order to dump, so we have to call the bus 5217c478bd9Sstevel@tonic-gate * nexus driver to reset the interrupt state machines. 5227c478bd9Sstevel@tonic-gate */ 5237c478bd9Sstevel@tonic-gate (void) bus_func_invoke(BF_TYPE_RESINTR); 5247c478bd9Sstevel@tonic-gate 5257c478bd9Sstevel@tonic-gate setpstate(getpstate() | PSTATE_IE); 5267c478bd9Sstevel@tonic-gate } 5277c478bd9Sstevel@tonic-gate 5287c478bd9Sstevel@tonic-gate /* 5297c478bd9Sstevel@tonic-gate * Platforms that use CPU signatures need to set the signature block to OS and 5307c478bd9Sstevel@tonic-gate * the state to exiting for all CPUs. PANIC_CONT indicates that we're about to 5317c478bd9Sstevel@tonic-gate * write the crash dump, which tells the SSP/SMS to begin a timeout routine to 5327c478bd9Sstevel@tonic-gate * reboot the machine if the dump never completes. 5337c478bd9Sstevel@tonic-gate */ 5347c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 5357c478bd9Sstevel@tonic-gate void 5367c478bd9Sstevel@tonic-gate panic_dump_hw(int spl) 5377c478bd9Sstevel@tonic-gate { 5387c478bd9Sstevel@tonic-gate CPU_SIGNATURE(OS_SIG, SIGST_EXIT, SIGSUBST_DUMP, -1); 5397c478bd9Sstevel@tonic-gate } 5407c478bd9Sstevel@tonic-gate 5417c478bd9Sstevel@tonic-gate /* 5427c478bd9Sstevel@tonic-gate * for ptl1_panic 5437c478bd9Sstevel@tonic-gate */ 5447c478bd9Sstevel@tonic-gate void 5457c478bd9Sstevel@tonic-gate ptl1_init_cpu(struct cpu *cpu) 5467c478bd9Sstevel@tonic-gate { 5477c478bd9Sstevel@tonic-gate ptl1_state_t *pstate = &cpu->cpu_m.ptl1_state; 5487c478bd9Sstevel@tonic-gate 5497c478bd9Sstevel@tonic-gate /*CONSTCOND*/ 5507c478bd9Sstevel@tonic-gate if (sizeof (struct cpu) + PTL1_SSIZE > CPU_ALLOC_SIZE) { 5517c478bd9Sstevel@tonic-gate panic("ptl1_init_cpu: not enough space left for ptl1_panic " 5520bd5614cSiskreen "stack, sizeof (struct cpu) = %lu", 5530bd5614cSiskreen (unsigned long)sizeof (struct cpu)); 5547c478bd9Sstevel@tonic-gate } 5557c478bd9Sstevel@tonic-gate 5567c478bd9Sstevel@tonic-gate pstate->ptl1_stktop = (uintptr_t)cpu + CPU_ALLOC_SIZE; 5577c478bd9Sstevel@tonic-gate cpu_pa[cpu->cpu_id] = va_to_pa(cpu); 5587c478bd9Sstevel@tonic-gate } 5597c478bd9Sstevel@tonic-gate 5607c478bd9Sstevel@tonic-gate void 5617c478bd9Sstevel@tonic-gate ptl1_panic_handler(ptl1_state_t *pstate) 5627c478bd9Sstevel@tonic-gate { 5637c478bd9Sstevel@tonic-gate static const char *ptl1_reasons[] = { 5647c478bd9Sstevel@tonic-gate #ifdef PTL1_PANIC_DEBUG 5657c478bd9Sstevel@tonic-gate "trap for debug purpose", /* PTL1_BAD_DEBUG */ 5667c478bd9Sstevel@tonic-gate #else 5677c478bd9Sstevel@tonic-gate "unknown trap", /* PTL1_BAD_DEBUG */ 5687c478bd9Sstevel@tonic-gate #endif 5697c478bd9Sstevel@tonic-gate "register window trap", /* PTL1_BAD_WTRAP */ 5707c478bd9Sstevel@tonic-gate "kernel MMU miss", /* PTL1_BAD_KMISS */ 5717c478bd9Sstevel@tonic-gate "kernel protection fault", /* PTL1_BAD_KPROT_FAULT */ 5727c478bd9Sstevel@tonic-gate "ISM MMU miss", /* PTL1_BAD_ISM */ 5737c478bd9Sstevel@tonic-gate "kernel MMU trap", /* PTL1_BAD_MMUTRAP */ 5747c478bd9Sstevel@tonic-gate "kernel trap handler state", /* PTL1_BAD_TRAP */ 5757c478bd9Sstevel@tonic-gate "floating point trap", /* PTL1_BAD_FPTRAP */ 5767c478bd9Sstevel@tonic-gate #ifdef DEBUG 577b0fc0e77Sgovinda "pointer to intr_vec", /* PTL1_BAD_INTR_VEC */ 5787c478bd9Sstevel@tonic-gate #else 579b0fc0e77Sgovinda "unknown trap", /* PTL1_BAD_INTR_VEC */ 5807c478bd9Sstevel@tonic-gate #endif 5817c478bd9Sstevel@tonic-gate #ifdef TRAPTRACE 5827c478bd9Sstevel@tonic-gate "TRACE_PTR state", /* PTL1_BAD_TRACE_PTR */ 5837c478bd9Sstevel@tonic-gate #else 5847c478bd9Sstevel@tonic-gate "unknown trap", /* PTL1_BAD_TRACE_PTR */ 5857c478bd9Sstevel@tonic-gate #endif 5867c478bd9Sstevel@tonic-gate "stack overflow", /* PTL1_BAD_STACK */ 5877c478bd9Sstevel@tonic-gate "DTrace flags", /* PTL1_BAD_DTRACE_FLAGS */ 5887c478bd9Sstevel@tonic-gate "attempt to steal locked ctx", /* PTL1_BAD_CTX_STEAL */ 5897c478bd9Sstevel@tonic-gate "CPU ECC error loop", /* PTL1_BAD_ECC */ 5907c478bd9Sstevel@tonic-gate "unexpected error from hypervisor call", /* PTL1_BAD_HCALL */ 591efaef81fSarao "unexpected global level(%gl)", /* PTL1_BAD_GL */ 5921ae08745Sheppo "Watchdog Reset", /* PTL1_BAD_WATCHDOG */ 5931ae08745Sheppo "unexpected RED mode trap", /* PTL1_BAD_RED */ 5941ae08745Sheppo "return value EINVAL from hcall: "\ 5951ae08745Sheppo "UNMAP_PERM_ADDR", /* PTL1_BAD_HCALL_UNMAP_PERM_EINVAL */ 5961ae08745Sheppo "return value ENOMAP from hcall: "\ 5971ae08745Sheppo "UNMAP_PERM_ADDR", /* PTL1_BAD_HCALL_UNMAP_PERM_ENOMAP */ 59834528892Spaulsan "error raising a TSB exception", /* PTL1_BAD_RAISE_TSBEXCP */ 59934528892Spaulsan "missing shared TSB" /* PTL1_NO_SCDTSB8K */ 6007c478bd9Sstevel@tonic-gate }; 6017c478bd9Sstevel@tonic-gate 60249a230e1Ssvemuri uint_t reason = pstate->ptl1_regs.ptl1_gregs[0].ptl1_g1; 6037c478bd9Sstevel@tonic-gate uint_t tl = pstate->ptl1_regs.ptl1_trap_regs[0].ptl1_tl; 604843e1988Sjohnlev struct panic_trap_info ti = { 0 }; 6057c478bd9Sstevel@tonic-gate 6067c478bd9Sstevel@tonic-gate /* 6077c478bd9Sstevel@tonic-gate * Use trap_info for a place holder to call panic_savetrap() and 6087c478bd9Sstevel@tonic-gate * panic_showtrap() to save and print out ptl1_panic information. 6097c478bd9Sstevel@tonic-gate */ 6107c478bd9Sstevel@tonic-gate if (curthread->t_panic_trap == NULL) 6117c478bd9Sstevel@tonic-gate curthread->t_panic_trap = &ti; 6127c478bd9Sstevel@tonic-gate 6137c478bd9Sstevel@tonic-gate if (reason < sizeof (ptl1_reasons) / sizeof (ptl1_reasons[0])) 6147c478bd9Sstevel@tonic-gate panic("bad %s at TL %u", ptl1_reasons[reason], tl); 6157c478bd9Sstevel@tonic-gate else 6167c478bd9Sstevel@tonic-gate panic("ptl1_panic reason 0x%x at TL %u", reason, tl); 6177c478bd9Sstevel@tonic-gate } 6187c478bd9Sstevel@tonic-gate 6197c478bd9Sstevel@tonic-gate void 6207c478bd9Sstevel@tonic-gate clear_watchdog_on_exit(void) 6217c478bd9Sstevel@tonic-gate { 622927a453eSwentaoy if (watchdog_enabled && watchdog_activated) { 623927a453eSwentaoy prom_printf("Debugging requested; hardware watchdog " 624927a453eSwentaoy "suspended.\n"); 6253c431bb5Swentaoy (void) watchdog_suspend(); 6267c478bd9Sstevel@tonic-gate } 627927a453eSwentaoy } 6287c478bd9Sstevel@tonic-gate 6293c431bb5Swentaoy /* 6303c431bb5Swentaoy * Restore the watchdog timer when returning from a debugger 6313c431bb5Swentaoy * after a panic or L1-A and resume watchdog pat. 6323c431bb5Swentaoy */ 6337c478bd9Sstevel@tonic-gate void 6343c431bb5Swentaoy restore_watchdog_on_entry() 6357c478bd9Sstevel@tonic-gate { 6363c431bb5Swentaoy watchdog_resume(); 6377c478bd9Sstevel@tonic-gate } 6387c478bd9Sstevel@tonic-gate 6397c478bd9Sstevel@tonic-gate int 6407c478bd9Sstevel@tonic-gate kdi_watchdog_disable(void) 6417c478bd9Sstevel@tonic-gate { 6423c431bb5Swentaoy watchdog_suspend(); 6433c431bb5Swentaoy 6443c431bb5Swentaoy return (0); 6457c478bd9Sstevel@tonic-gate } 6467c478bd9Sstevel@tonic-gate 6477c478bd9Sstevel@tonic-gate void 6487c478bd9Sstevel@tonic-gate kdi_watchdog_restore(void) 6497c478bd9Sstevel@tonic-gate { 6503c431bb5Swentaoy watchdog_resume(); 6517c478bd9Sstevel@tonic-gate } 6527c478bd9Sstevel@tonic-gate 6537c478bd9Sstevel@tonic-gate void 6547c478bd9Sstevel@tonic-gate mach_dump_buffer_init(void) 6557c478bd9Sstevel@tonic-gate { 6567c478bd9Sstevel@tonic-gate uint64_t ret, minsize = 0; 6577c478bd9Sstevel@tonic-gate 6587c478bd9Sstevel@tonic-gate if (hvdump_buf_sz > HVDUMP_SIZE_MAX) 6597c478bd9Sstevel@tonic-gate hvdump_buf_sz = HVDUMP_SIZE_MAX; 6607c478bd9Sstevel@tonic-gate 661ea841a36Sarao hvdump_buf_va = contig_mem_alloc_align(hvdump_buf_sz, PAGESIZE); 6627c478bd9Sstevel@tonic-gate if (hvdump_buf_va == NULL) 6637c478bd9Sstevel@tonic-gate return; 6647c478bd9Sstevel@tonic-gate 6657c478bd9Sstevel@tonic-gate hvdump_buf_pa = va_to_pa(hvdump_buf_va); 6667c478bd9Sstevel@tonic-gate 6677c478bd9Sstevel@tonic-gate ret = hv_dump_buf_update(hvdump_buf_pa, hvdump_buf_sz, 6687c478bd9Sstevel@tonic-gate &minsize); 6697c478bd9Sstevel@tonic-gate 6707c478bd9Sstevel@tonic-gate if (ret != H_EOK) { 6717c478bd9Sstevel@tonic-gate contig_mem_free(hvdump_buf_va, hvdump_buf_sz); 6727c478bd9Sstevel@tonic-gate hvdump_buf_va = NULL; 6737c478bd9Sstevel@tonic-gate cmn_err(CE_NOTE, "!Error in setting up hvstate" 6747c478bd9Sstevel@tonic-gate "dump buffer. Error = 0x%lx, size = 0x%lx," 6757c478bd9Sstevel@tonic-gate "buf_pa = 0x%lx", ret, hvdump_buf_sz, 6767c478bd9Sstevel@tonic-gate hvdump_buf_pa); 6777c478bd9Sstevel@tonic-gate 6787c478bd9Sstevel@tonic-gate if (ret == H_EINVAL) { 6797c478bd9Sstevel@tonic-gate cmn_err(CE_NOTE, "!Buffer size too small." 6807c478bd9Sstevel@tonic-gate "Available buffer size = 0x%lx," 6817c478bd9Sstevel@tonic-gate "Minimum buffer size required = 0x%lx", 6827c478bd9Sstevel@tonic-gate hvdump_buf_sz, minsize); 6837c478bd9Sstevel@tonic-gate } 6847c478bd9Sstevel@tonic-gate } 6857c478bd9Sstevel@tonic-gate } 6867c478bd9Sstevel@tonic-gate 6877c478bd9Sstevel@tonic-gate 6887c478bd9Sstevel@tonic-gate static void 6897c478bd9Sstevel@tonic-gate update_hvdump_buffer(void) 6907c478bd9Sstevel@tonic-gate { 6917c478bd9Sstevel@tonic-gate uint64_t ret, dummy_val; 6927c478bd9Sstevel@tonic-gate 6937c478bd9Sstevel@tonic-gate if (hvdump_buf_va == NULL) 6947c478bd9Sstevel@tonic-gate return; 6957c478bd9Sstevel@tonic-gate 6967c478bd9Sstevel@tonic-gate ret = hv_dump_buf_update(hvdump_buf_pa, hvdump_buf_sz, 6977c478bd9Sstevel@tonic-gate &dummy_val); 6987c478bd9Sstevel@tonic-gate if (ret != H_EOK) { 6997c478bd9Sstevel@tonic-gate cmn_err(CE_NOTE, "!Cannot update hvstate dump" 7007c478bd9Sstevel@tonic-gate "buffer. Error = 0x%lx", ret); 7017c478bd9Sstevel@tonic-gate } 7027c478bd9Sstevel@tonic-gate } 7037c478bd9Sstevel@tonic-gate 7047c478bd9Sstevel@tonic-gate 7057c478bd9Sstevel@tonic-gate static int 706fa9e4066Sahrens getintprop(pnode_t node, char *name, int deflt) 7077c478bd9Sstevel@tonic-gate { 7087c478bd9Sstevel@tonic-gate int value; 7097c478bd9Sstevel@tonic-gate 7107c478bd9Sstevel@tonic-gate switch (prom_getproplen(node, name)) { 7117c478bd9Sstevel@tonic-gate case 0: 7127c478bd9Sstevel@tonic-gate value = 1; /* boolean properties */ 7137c478bd9Sstevel@tonic-gate break; 7147c478bd9Sstevel@tonic-gate 7157c478bd9Sstevel@tonic-gate case sizeof (int): 7167c478bd9Sstevel@tonic-gate (void) prom_getprop(node, name, (caddr_t)&value); 7177c478bd9Sstevel@tonic-gate break; 7187c478bd9Sstevel@tonic-gate 7197c478bd9Sstevel@tonic-gate default: 7207c478bd9Sstevel@tonic-gate value = deflt; 7217c478bd9Sstevel@tonic-gate break; 7227c478bd9Sstevel@tonic-gate } 7237c478bd9Sstevel@tonic-gate 7247c478bd9Sstevel@tonic-gate return (value); 7257c478bd9Sstevel@tonic-gate } 7267c478bd9Sstevel@tonic-gate 7277c478bd9Sstevel@tonic-gate /* 7287c478bd9Sstevel@tonic-gate * Called by setcpudelay 7297c478bd9Sstevel@tonic-gate */ 7307c478bd9Sstevel@tonic-gate void 7317c478bd9Sstevel@tonic-gate cpu_init_tick_freq(void) 7327c478bd9Sstevel@tonic-gate { 7331ae08745Sheppo md_t *mdp; 7341ae08745Sheppo mde_cookie_t rootnode; 7351ae08745Sheppo int listsz; 7361ae08745Sheppo mde_cookie_t *listp = NULL; 7371ae08745Sheppo int num_nodes; 7381ae08745Sheppo uint64_t stick_prop; 7391ae08745Sheppo 7401ae08745Sheppo if (broken_md_flag) { 7417c478bd9Sstevel@tonic-gate sys_tick_freq = cpunodes[CPU->cpu_id].clock_freq; 7421ae08745Sheppo return; 7431ae08745Sheppo } 7441ae08745Sheppo 7451ae08745Sheppo if ((mdp = md_get_handle()) == NULL) 7461ae08745Sheppo panic("stick_frequency property not found in MD"); 7471ae08745Sheppo 7481ae08745Sheppo rootnode = md_root_node(mdp); 7491ae08745Sheppo ASSERT(rootnode != MDE_INVAL_ELEM_COOKIE); 7501ae08745Sheppo 7511ae08745Sheppo num_nodes = md_node_count(mdp); 7521ae08745Sheppo 7531ae08745Sheppo ASSERT(num_nodes > 0); 7541ae08745Sheppo listsz = num_nodes * sizeof (mde_cookie_t); 7551ae08745Sheppo listp = (mde_cookie_t *)prom_alloc((caddr_t)0, listsz, 0); 7561ae08745Sheppo 7571ae08745Sheppo if (listp == NULL) 7581ae08745Sheppo panic("cannot allocate list for MD properties"); 7591ae08745Sheppo 7601ae08745Sheppo num_nodes = md_scan_dag(mdp, rootnode, md_find_name(mdp, "platform"), 7611ae08745Sheppo md_find_name(mdp, "fwd"), listp); 7621ae08745Sheppo 7631ae08745Sheppo ASSERT(num_nodes == 1); 7641ae08745Sheppo 7651ae08745Sheppo if (md_get_prop_val(mdp, *listp, "stick-frequency", &stick_prop) != 0) 7661ae08745Sheppo panic("stick_frequency property not found in MD"); 7671ae08745Sheppo 7681ae08745Sheppo sys_tick_freq = stick_prop; 7691ae08745Sheppo 7701ae08745Sheppo prom_free((caddr_t)listp, listsz); 7711ae08745Sheppo (void) md_fini_handle(mdp); 7727c478bd9Sstevel@tonic-gate } 7737c478bd9Sstevel@tonic-gate 7747c478bd9Sstevel@tonic-gate int shipit(int n, uint64_t cpu_list_ra); 7757c478bd9Sstevel@tonic-gate 7767c478bd9Sstevel@tonic-gate #ifdef DEBUG 7777c478bd9Sstevel@tonic-gate #define SEND_MONDO_STATS 1 7787c478bd9Sstevel@tonic-gate #endif 7797c478bd9Sstevel@tonic-gate 7807c478bd9Sstevel@tonic-gate #ifdef SEND_MONDO_STATS 7817c478bd9Sstevel@tonic-gate uint32_t x_one_stimes[64]; 7827c478bd9Sstevel@tonic-gate uint32_t x_one_ltimes[16]; 7837c478bd9Sstevel@tonic-gate uint32_t x_set_stimes[64]; 7847c478bd9Sstevel@tonic-gate uint32_t x_set_ltimes[16]; 7857c478bd9Sstevel@tonic-gate uint32_t x_set_cpus[NCPU]; 7867c478bd9Sstevel@tonic-gate #endif 7877c478bd9Sstevel@tonic-gate 7887c478bd9Sstevel@tonic-gate void 7897c478bd9Sstevel@tonic-gate send_one_mondo(int cpuid) 7907c478bd9Sstevel@tonic-gate { 7917c478bd9Sstevel@tonic-gate int retries, stat; 7927c478bd9Sstevel@tonic-gate uint64_t starttick, endtick, tick, lasttick; 7937c478bd9Sstevel@tonic-gate struct machcpu *mcpup = &(CPU->cpu_m); 7947c478bd9Sstevel@tonic-gate 7957c478bd9Sstevel@tonic-gate CPU_STATS_ADDQ(CPU, sys, xcalls, 1); 7967c478bd9Sstevel@tonic-gate starttick = lasttick = gettick(); 7977c478bd9Sstevel@tonic-gate mcpup->cpu_list[0] = (uint16_t)cpuid; 7987c478bd9Sstevel@tonic-gate stat = shipit(1, mcpup->cpu_list_ra); 7997c478bd9Sstevel@tonic-gate endtick = starttick + xc_tick_limit; 8007c478bd9Sstevel@tonic-gate retries = 0; 801e5900f74Sha137994 while (stat != H_EOK) { 802e5900f74Sha137994 if (stat != H_EWOULDBLOCK) { 803e5900f74Sha137994 if (panic_quiesce) 804e5900f74Sha137994 return; 805e5900f74Sha137994 if (stat == H_ECPUERROR) 806e5900f74Sha137994 cmn_err(CE_PANIC, "send_one_mondo: " 807e5900f74Sha137994 "cpuid: 0x%x has been marked in " 808e5900f74Sha137994 "error", cpuid); 809e5900f74Sha137994 else 810e5900f74Sha137994 cmn_err(CE_PANIC, "send_one_mondo: " 811e5900f74Sha137994 "unexpected hypervisor error 0x%x " 812e5900f74Sha137994 "while sending a mondo to cpuid: " 813e5900f74Sha137994 "0x%x", stat, cpuid); 814e5900f74Sha137994 } 8157c478bd9Sstevel@tonic-gate tick = gettick(); 8167c478bd9Sstevel@tonic-gate /* 8177c478bd9Sstevel@tonic-gate * If there is a big jump between the current tick 8187c478bd9Sstevel@tonic-gate * count and lasttick, we have probably hit a break 8197c478bd9Sstevel@tonic-gate * point. Adjust endtick accordingly to avoid panic. 8207c478bd9Sstevel@tonic-gate */ 8217c478bd9Sstevel@tonic-gate if (tick > (lasttick + xc_tick_jump_limit)) 8227c478bd9Sstevel@tonic-gate endtick += (tick - lasttick); 8237c478bd9Sstevel@tonic-gate lasttick = tick; 8247c478bd9Sstevel@tonic-gate if (tick > endtick) { 8257c478bd9Sstevel@tonic-gate if (panic_quiesce) 8267c478bd9Sstevel@tonic-gate return; 8277c478bd9Sstevel@tonic-gate cmn_err(CE_PANIC, "send mondo timeout " 8287c478bd9Sstevel@tonic-gate "(target 0x%x) [retries: 0x%x hvstat: 0x%x]", 8297c478bd9Sstevel@tonic-gate cpuid, retries, stat); 8307c478bd9Sstevel@tonic-gate } 8317c478bd9Sstevel@tonic-gate drv_usecwait(1); 8327c478bd9Sstevel@tonic-gate stat = shipit(1, mcpup->cpu_list_ra); 8337c478bd9Sstevel@tonic-gate retries++; 8347c478bd9Sstevel@tonic-gate } 8357c478bd9Sstevel@tonic-gate #ifdef SEND_MONDO_STATS 8367c478bd9Sstevel@tonic-gate { 837e5900f74Sha137994 uint64_t n = gettick() - starttick; 8387c478bd9Sstevel@tonic-gate if (n < 8192) 8397c478bd9Sstevel@tonic-gate x_one_stimes[n >> 7]++; 840e5900f74Sha137994 else if (n < 15*8192) 841e5900f74Sha137994 x_one_ltimes[n >> 13]++; 8427c478bd9Sstevel@tonic-gate else 8437c478bd9Sstevel@tonic-gate x_one_ltimes[0xf]++; 8447c478bd9Sstevel@tonic-gate } 8457c478bd9Sstevel@tonic-gate #endif 8467c478bd9Sstevel@tonic-gate } 8477c478bd9Sstevel@tonic-gate 8487c478bd9Sstevel@tonic-gate void 8497c478bd9Sstevel@tonic-gate send_mondo_set(cpuset_t set) 8507c478bd9Sstevel@tonic-gate { 8517c478bd9Sstevel@tonic-gate uint64_t starttick, endtick, tick, lasttick; 85200423197Sha137994 uint_t largestid, smallestid; 85300423197Sha137994 int i, j; 85400423197Sha137994 int ncpuids = 0; 8557c478bd9Sstevel@tonic-gate int shipped = 0; 856e5900f74Sha137994 int retries = 0; 8577c478bd9Sstevel@tonic-gate struct machcpu *mcpup = &(CPU->cpu_m); 8587c478bd9Sstevel@tonic-gate 8597c478bd9Sstevel@tonic-gate ASSERT(!CPUSET_ISNULL(set)); 86000423197Sha137994 CPUSET_BOUNDS(set, smallestid, largestid); 86100423197Sha137994 if (smallestid == CPUSET_NOTINSET) { 86200423197Sha137994 return; 86300423197Sha137994 } 86400423197Sha137994 8657c478bd9Sstevel@tonic-gate starttick = lasttick = gettick(); 8667c478bd9Sstevel@tonic-gate endtick = starttick + xc_tick_limit; 8677c478bd9Sstevel@tonic-gate 86800423197Sha137994 /* 86900423197Sha137994 * Assemble CPU list for HV argument. We already know 87000423197Sha137994 * smallestid and largestid are members of set. 87100423197Sha137994 */ 87200423197Sha137994 mcpup->cpu_list[ncpuids++] = (uint16_t)smallestid; 87300423197Sha137994 if (largestid != smallestid) { 87400423197Sha137994 for (i = smallestid+1; i <= largestid-1; i++) { 8757c478bd9Sstevel@tonic-gate if (CPU_IN_SET(set, i)) { 87600423197Sha137994 mcpup->cpu_list[ncpuids++] = (uint16_t)i; 8777c478bd9Sstevel@tonic-gate } 8787c478bd9Sstevel@tonic-gate } 87900423197Sha137994 mcpup->cpu_list[ncpuids++] = (uint16_t)largestid; 88000423197Sha137994 } 88100423197Sha137994 88200423197Sha137994 do { 88300423197Sha137994 int stat; 8847c478bd9Sstevel@tonic-gate 885e5900f74Sha137994 stat = shipit(ncpuids, mcpup->cpu_list_ra); 886e5900f74Sha137994 if (stat == H_EOK) { 887e5900f74Sha137994 shipped += ncpuids; 888e5900f74Sha137994 break; 889e5900f74Sha137994 } 890e5900f74Sha137994 891e5900f74Sha137994 /* 892e5900f74Sha137994 * Either not all CPU mondos were sent, or an 893e5900f74Sha137994 * error occurred. CPUs that were sent mondos 894e5900f74Sha137994 * have their CPU IDs overwritten in cpu_list. 89500423197Sha137994 * Reset cpu_list so that it only holds those 89600423197Sha137994 * CPU IDs that still need to be sent. 897e5900f74Sha137994 */ 89800423197Sha137994 for (i = 0, j = 0; i < ncpuids; i++) { 899e5900f74Sha137994 if (mcpup->cpu_list[i] == HV_SEND_MONDO_ENTRYDONE) { 900e5900f74Sha137994 shipped++; 901e5900f74Sha137994 } else { 90200423197Sha137994 mcpup->cpu_list[j++] = mcpup->cpu_list[i]; 903e5900f74Sha137994 } 904e5900f74Sha137994 } 90500423197Sha137994 ncpuids = j; 906e5900f74Sha137994 907e5900f74Sha137994 /* 908e5900f74Sha137994 * Now handle possible errors returned 909e5900f74Sha137994 * from hypervisor. 910e5900f74Sha137994 */ 911e5900f74Sha137994 if (stat == H_ECPUERROR) { 91200423197Sha137994 int errorcpus; 91300423197Sha137994 91400423197Sha137994 if (!panic_quiesce) 91500423197Sha137994 cmn_err(CE_CONT, "send_mondo_set: cpuid(s) "); 916e5900f74Sha137994 917e5900f74Sha137994 /* 91800423197Sha137994 * Remove any CPUs in the error state from 91900423197Sha137994 * cpu_list. At this point cpu_list only 92000423197Sha137994 * contains the CPU IDs for mondos not 92100423197Sha137994 * succesfully sent. 922e5900f74Sha137994 */ 92300423197Sha137994 for (i = 0, errorcpus = 0; i < ncpuids; i++) { 9242ed737cbSha137994 uint64_t state = CPU_STATE_INVALID; 92500423197Sha137994 uint16_t id = mcpup->cpu_list[i]; 92600423197Sha137994 92700423197Sha137994 (void) hv_cpu_state(id, &state); 928e5900f74Sha137994 if (state == CPU_STATE_ERROR) { 92900423197Sha137994 if (!panic_quiesce) 93000423197Sha137994 cmn_err(CE_CONT, "0x%x ", id); 93100423197Sha137994 errorcpus++; 93200423197Sha137994 } else if (errorcpus > 0) { 93300423197Sha137994 mcpup->cpu_list[i - errorcpus] = 93400423197Sha137994 mcpup->cpu_list[i]; 935e5900f74Sha137994 } 936e5900f74Sha137994 } 93700423197Sha137994 ncpuids -= errorcpus; 938e5900f74Sha137994 939e5900f74Sha137994 if (!panic_quiesce) { 94000423197Sha137994 if (errorcpus == 0) { 94100423197Sha137994 cmn_err(CE_CONT, "<none> have been " 94200423197Sha137994 "marked in error\n"); 943e5900f74Sha137994 cmn_err(CE_PANIC, "send_mondo_set: " 944e5900f74Sha137994 "hypervisor returned " 945e5900f74Sha137994 "H_ECPUERROR but no CPU in " 946e5900f74Sha137994 "cpu_list in error state"); 94700423197Sha137994 } else { 948e5900f74Sha137994 cmn_err(CE_CONT, "have been marked in " 949e5900f74Sha137994 "error\n"); 95000423197Sha137994 cmn_err(CE_PANIC, "send_mondo_set: " 95100423197Sha137994 "CPU(s) in error state"); 95200423197Sha137994 } 953e5900f74Sha137994 } 954e5900f74Sha137994 } else if (stat != H_EWOULDBLOCK) { 955e5900f74Sha137994 if (panic_quiesce) 956e5900f74Sha137994 return; 957e5900f74Sha137994 /* 958e5900f74Sha137994 * For all other errors, panic. 959e5900f74Sha137994 */ 960e5900f74Sha137994 cmn_err(CE_CONT, "send_mondo_set: unexpected " 961e5900f74Sha137994 "hypervisor error 0x%x while sending a " 962e5900f74Sha137994 "mondo to cpuid(s):", stat); 96300423197Sha137994 for (i = 0; i < ncpuids; i++) { 96400423197Sha137994 cmn_err(CE_CONT, " 0x%x", mcpup->cpu_list[i]); 965e5900f74Sha137994 } 966e5900f74Sha137994 cmn_err(CE_CONT, "\n"); 967e5900f74Sha137994 cmn_err(CE_PANIC, "send_mondo_set: unexpected " 968e5900f74Sha137994 "hypervisor error"); 969e5900f74Sha137994 } 970e5900f74Sha137994 9717c478bd9Sstevel@tonic-gate tick = gettick(); 9727c478bd9Sstevel@tonic-gate /* 9737c478bd9Sstevel@tonic-gate * If there is a big jump between the current tick 9747c478bd9Sstevel@tonic-gate * count and lasttick, we have probably hit a break 9757c478bd9Sstevel@tonic-gate * point. Adjust endtick accordingly to avoid panic. 9767c478bd9Sstevel@tonic-gate */ 9777c478bd9Sstevel@tonic-gate if (tick > (lasttick + xc_tick_jump_limit)) 9787c478bd9Sstevel@tonic-gate endtick += (tick - lasttick); 9797c478bd9Sstevel@tonic-gate lasttick = tick; 9807c478bd9Sstevel@tonic-gate if (tick > endtick) { 9817c478bd9Sstevel@tonic-gate if (panic_quiesce) 9827c478bd9Sstevel@tonic-gate return; 9837c478bd9Sstevel@tonic-gate cmn_err(CE_CONT, "send mondo timeout " 9847c478bd9Sstevel@tonic-gate "[retries: 0x%x] cpuids: ", retries); 98500423197Sha137994 for (i = 0; i < ncpuids; i++) 98600423197Sha137994 cmn_err(CE_CONT, " 0x%x", mcpup->cpu_list[i]); 9877c478bd9Sstevel@tonic-gate cmn_err(CE_CONT, "\n"); 9887c478bd9Sstevel@tonic-gate cmn_err(CE_PANIC, "send_mondo_set: timeout"); 9897c478bd9Sstevel@tonic-gate } 9907c478bd9Sstevel@tonic-gate 9917c478bd9Sstevel@tonic-gate while (gettick() < (tick + sys_clock_mhz)) 9927c478bd9Sstevel@tonic-gate ; 9937c478bd9Sstevel@tonic-gate retries++; 99400423197Sha137994 } while (ncpuids > 0); 995e5900f74Sha137994 996e5900f74Sha137994 CPU_STATS_ADDQ(CPU, sys, xcalls, shipped); 9977c478bd9Sstevel@tonic-gate 9987c478bd9Sstevel@tonic-gate #ifdef SEND_MONDO_STATS 9997c478bd9Sstevel@tonic-gate { 1000e5900f74Sha137994 uint64_t n = gettick() - starttick; 10017c478bd9Sstevel@tonic-gate if (n < 8192) 10027c478bd9Sstevel@tonic-gate x_set_stimes[n >> 7]++; 1003e5900f74Sha137994 else if (n < 15*8192) 1004e5900f74Sha137994 x_set_ltimes[n >> 13]++; 10057c478bd9Sstevel@tonic-gate else 10067c478bd9Sstevel@tonic-gate x_set_ltimes[0xf]++; 10077c478bd9Sstevel@tonic-gate } 10087c478bd9Sstevel@tonic-gate x_set_cpus[shipped]++; 10097c478bd9Sstevel@tonic-gate #endif 10107c478bd9Sstevel@tonic-gate } 10117c478bd9Sstevel@tonic-gate 10127c478bd9Sstevel@tonic-gate void 10137c478bd9Sstevel@tonic-gate syncfpu(void) 10147c478bd9Sstevel@tonic-gate { 10157c478bd9Sstevel@tonic-gate } 10167c478bd9Sstevel@tonic-gate 10177c478bd9Sstevel@tonic-gate void 10187c478bd9Sstevel@tonic-gate sticksync_slave(void) 1019023e71deSHaik Aftandilian { 1020023e71deSHaik Aftandilian suspend_sync_tick_stick_npt(); 1021023e71deSHaik Aftandilian } 10227c478bd9Sstevel@tonic-gate 10237c478bd9Sstevel@tonic-gate void 10247c478bd9Sstevel@tonic-gate sticksync_master(void) 10257c478bd9Sstevel@tonic-gate {} 10267c478bd9Sstevel@tonic-gate 10277c478bd9Sstevel@tonic-gate void 10287c478bd9Sstevel@tonic-gate cpu_init_cache_scrub(void) 10293b890a5bSjb145095 { 10303b890a5bSjb145095 mach_set_soft_state(SIS_NORMAL, &SOLARIS_SOFT_STATE_RUN_MSG); 10313b890a5bSjb145095 } 10327c478bd9Sstevel@tonic-gate 10337c478bd9Sstevel@tonic-gate int 10347c478bd9Sstevel@tonic-gate dtrace_blksuword32_err(uintptr_t addr, uint32_t *data) 10357c478bd9Sstevel@tonic-gate { 10367c478bd9Sstevel@tonic-gate int ret, watched; 10377c478bd9Sstevel@tonic-gate 10387c478bd9Sstevel@tonic-gate watched = watch_disable_addr((void *)addr, 4, S_WRITE); 10397c478bd9Sstevel@tonic-gate ret = dtrace_blksuword32(addr, data, 0); 10407c478bd9Sstevel@tonic-gate if (watched) 10417c478bd9Sstevel@tonic-gate watch_enable_addr((void *)addr, 4, S_WRITE); 10427c478bd9Sstevel@tonic-gate 10437c478bd9Sstevel@tonic-gate return (ret); 10447c478bd9Sstevel@tonic-gate } 10457c478bd9Sstevel@tonic-gate 10467c478bd9Sstevel@tonic-gate int 10477c478bd9Sstevel@tonic-gate dtrace_blksuword32(uintptr_t addr, uint32_t *data, int tryagain) 10487c478bd9Sstevel@tonic-gate { 10497c478bd9Sstevel@tonic-gate if (suword32((void *)addr, *data) == -1) 10507c478bd9Sstevel@tonic-gate return (tryagain ? dtrace_blksuword32_err(addr, data) : -1); 10517c478bd9Sstevel@tonic-gate dtrace_flush_sec(addr); 10527c478bd9Sstevel@tonic-gate 10537c478bd9Sstevel@tonic-gate return (0); 10547c478bd9Sstevel@tonic-gate } 10557c478bd9Sstevel@tonic-gate 10567c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 10577c478bd9Sstevel@tonic-gate void 10587c478bd9Sstevel@tonic-gate cpu_faulted_enter(struct cpu *cp) 10597c478bd9Sstevel@tonic-gate { 10607c478bd9Sstevel@tonic-gate } 10617c478bd9Sstevel@tonic-gate 10627c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 10637c478bd9Sstevel@tonic-gate void 10647c478bd9Sstevel@tonic-gate cpu_faulted_exit(struct cpu *cp) 10657c478bd9Sstevel@tonic-gate { 10667c478bd9Sstevel@tonic-gate } 10677c478bd9Sstevel@tonic-gate 10687c478bd9Sstevel@tonic-gate static int 10697c478bd9Sstevel@tonic-gate kdi_cpu_ready_iter(int (*cb)(int, void *), void *arg) 10707c478bd9Sstevel@tonic-gate { 10717c478bd9Sstevel@tonic-gate int rc, i; 10727c478bd9Sstevel@tonic-gate 10737c478bd9Sstevel@tonic-gate for (rc = 0, i = 0; i < NCPU; i++) { 10747c478bd9Sstevel@tonic-gate if (CPU_IN_SET(cpu_ready_set, i)) 10757c478bd9Sstevel@tonic-gate rc += cb(i, arg); 10767c478bd9Sstevel@tonic-gate } 10777c478bd9Sstevel@tonic-gate 10787c478bd9Sstevel@tonic-gate return (rc); 10797c478bd9Sstevel@tonic-gate } 10807c478bd9Sstevel@tonic-gate 10817c478bd9Sstevel@tonic-gate /* 10827c478bd9Sstevel@tonic-gate * Sends a cross-call to a specified processor. The caller assumes 10837c478bd9Sstevel@tonic-gate * responsibility for repetition of cross-calls, as appropriate (MARSA for 10847c478bd9Sstevel@tonic-gate * debugging). 10857c478bd9Sstevel@tonic-gate */ 10867c478bd9Sstevel@tonic-gate static int 10877c478bd9Sstevel@tonic-gate kdi_xc_one(int cpuid, void (*func)(uintptr_t, uintptr_t), uintptr_t arg1, 10887c478bd9Sstevel@tonic-gate uintptr_t arg2) 10897c478bd9Sstevel@tonic-gate { 10907c478bd9Sstevel@tonic-gate int stat; 10917c478bd9Sstevel@tonic-gate struct machcpu *mcpup; 10927c478bd9Sstevel@tonic-gate uint64_t cpuaddr_reg = 0, cpuaddr_scr = 0; 10937c478bd9Sstevel@tonic-gate 10947c478bd9Sstevel@tonic-gate mcpup = &(((cpu_t *)get_cpuaddr(cpuaddr_reg, cpuaddr_scr))->cpu_m); 10957c478bd9Sstevel@tonic-gate 10967c478bd9Sstevel@tonic-gate /* 10977c478bd9Sstevel@tonic-gate * if (idsr_busy()) 10987c478bd9Sstevel@tonic-gate * return (KDI_XC_RES_ERR); 10997c478bd9Sstevel@tonic-gate */ 11007c478bd9Sstevel@tonic-gate 11017c478bd9Sstevel@tonic-gate init_mondo_nocheck((xcfunc_t *)func, arg1, arg2); 11027c478bd9Sstevel@tonic-gate 11037c478bd9Sstevel@tonic-gate mcpup->cpu_list[0] = (uint16_t)cpuid; 11047c478bd9Sstevel@tonic-gate stat = shipit(1, mcpup->cpu_list_ra); 11057c478bd9Sstevel@tonic-gate 11067c478bd9Sstevel@tonic-gate if (stat == 0) 11077c478bd9Sstevel@tonic-gate return (KDI_XC_RES_OK); 11087c478bd9Sstevel@tonic-gate else 11097c478bd9Sstevel@tonic-gate return (KDI_XC_RES_NACK); 11107c478bd9Sstevel@tonic-gate } 11117c478bd9Sstevel@tonic-gate 11127c478bd9Sstevel@tonic-gate static void 11137c478bd9Sstevel@tonic-gate kdi_tickwait(clock_t nticks) 11147c478bd9Sstevel@tonic-gate { 11157c478bd9Sstevel@tonic-gate clock_t endtick = gettick() + nticks; 11167c478bd9Sstevel@tonic-gate 111722e19ac1Sjm22469 while (gettick() < endtick) 11181b83305cSjm22469 ; 11197c478bd9Sstevel@tonic-gate } 11207c478bd9Sstevel@tonic-gate 11217c478bd9Sstevel@tonic-gate static void 11227c478bd9Sstevel@tonic-gate kdi_cpu_init(int dcache_size, int dcache_linesize, int icache_size, 11237c478bd9Sstevel@tonic-gate int icache_linesize) 11247c478bd9Sstevel@tonic-gate { 11257c478bd9Sstevel@tonic-gate kdi_dcache_size = dcache_size; 11267c478bd9Sstevel@tonic-gate kdi_dcache_linesize = dcache_linesize; 11277c478bd9Sstevel@tonic-gate kdi_icache_size = icache_size; 11287c478bd9Sstevel@tonic-gate kdi_icache_linesize = icache_linesize; 11297c478bd9Sstevel@tonic-gate } 11307c478bd9Sstevel@tonic-gate 11317c478bd9Sstevel@tonic-gate /* used directly by kdi_read/write_phys */ 11327c478bd9Sstevel@tonic-gate void 11337c478bd9Sstevel@tonic-gate kdi_flush_caches(void) 11347c478bd9Sstevel@tonic-gate { 11359d0d62adSJason Beloro /* Not required on sun4v architecture. */ 11367c478bd9Sstevel@tonic-gate } 11377c478bd9Sstevel@tonic-gate 11387c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 11397c478bd9Sstevel@tonic-gate int 11407c478bd9Sstevel@tonic-gate kdi_get_stick(uint64_t *stickp) 11417c478bd9Sstevel@tonic-gate { 11427c478bd9Sstevel@tonic-gate return (-1); 11437c478bd9Sstevel@tonic-gate } 11447c478bd9Sstevel@tonic-gate 11457c478bd9Sstevel@tonic-gate void 11467c478bd9Sstevel@tonic-gate cpu_kdi_init(kdi_t *kdi) 11477c478bd9Sstevel@tonic-gate { 11487c478bd9Sstevel@tonic-gate kdi->kdi_flush_caches = kdi_flush_caches; 11497c478bd9Sstevel@tonic-gate kdi->mkdi_cpu_init = kdi_cpu_init; 11507c478bd9Sstevel@tonic-gate kdi->mkdi_cpu_ready_iter = kdi_cpu_ready_iter; 11517c478bd9Sstevel@tonic-gate kdi->mkdi_xc_one = kdi_xc_one; 11527c478bd9Sstevel@tonic-gate kdi->mkdi_tickwait = kdi_tickwait; 11537c478bd9Sstevel@tonic-gate kdi->mkdi_get_stick = kdi_get_stick; 11547c478bd9Sstevel@tonic-gate } 11557c478bd9Sstevel@tonic-gate 11563b890a5bSjb145095 uint64_t soft_state_message_ra[SOLARIS_SOFT_STATE_MSG_CNT]; 11573b890a5bSjb145095 static uint64_t soft_state_saved_state = (uint64_t)-1; 11583b890a5bSjb145095 static int soft_state_initialized = 0; 11593b890a5bSjb145095 static uint64_t soft_state_sup_minor; /* Supported minor number */ 11603b890a5bSjb145095 static hsvc_info_t soft_state_hsvc = { 11613b890a5bSjb145095 HSVC_REV_1, NULL, HSVC_GROUP_SOFT_STATE, 1, 0, NULL }; 11623b890a5bSjb145095 11633b890a5bSjb145095 11643c431bb5Swentaoy static void 11653c431bb5Swentaoy sun4v_system_claim(void) 11663c431bb5Swentaoy { 1167d3d50737SRafael Vanoni lbolt_debug_entry(); 1168d3d50737SRafael Vanoni 11693c431bb5Swentaoy watchdog_suspend(); 11705699897cSHaik Aftandilian kldc_debug_enter(); 11713b890a5bSjb145095 /* 11723b890a5bSjb145095 * For "mdb -K", set soft state to debugging 11733b890a5bSjb145095 */ 11743b890a5bSjb145095 if (soft_state_saved_state == -1) { 11753b890a5bSjb145095 mach_get_soft_state(&soft_state_saved_state, 11763b890a5bSjb145095 &SOLARIS_SOFT_STATE_SAVED_MSG); 11773b890a5bSjb145095 } 11783b890a5bSjb145095 /* 11793b890a5bSjb145095 * check again as the read above may or may not have worked and if 11803b890a5bSjb145095 * it didn't then soft state will still be -1 11813b890a5bSjb145095 */ 11823b890a5bSjb145095 if (soft_state_saved_state != -1) { 11833b890a5bSjb145095 mach_set_soft_state(SIS_TRANSITION, 11843b890a5bSjb145095 &SOLARIS_SOFT_STATE_DEBUG_MSG); 11853b890a5bSjb145095 } 11863c431bb5Swentaoy } 11873c431bb5Swentaoy 11883c431bb5Swentaoy static void 11893c431bb5Swentaoy sun4v_system_release(void) 11903c431bb5Swentaoy { 11913c431bb5Swentaoy watchdog_resume(); 11923b890a5bSjb145095 /* 11933b890a5bSjb145095 * For "mdb -K", set soft_state state back to original state on exit 11943b890a5bSjb145095 */ 11953b890a5bSjb145095 if (soft_state_saved_state != -1) { 11963b890a5bSjb145095 mach_set_soft_state(soft_state_saved_state, 11973b890a5bSjb145095 &SOLARIS_SOFT_STATE_SAVED_MSG); 11983b890a5bSjb145095 soft_state_saved_state = -1; 11993b890a5bSjb145095 } 1200d3d50737SRafael Vanoni 1201d3d50737SRafael Vanoni lbolt_debug_return(); 12023c431bb5Swentaoy } 12033c431bb5Swentaoy 12043c431bb5Swentaoy void 12053c431bb5Swentaoy plat_kdi_init(kdi_t *kdi) 12063c431bb5Swentaoy { 12073c431bb5Swentaoy kdi->pkdi_system_claim = sun4v_system_claim; 12083c431bb5Swentaoy kdi->pkdi_system_release = sun4v_system_release; 12093c431bb5Swentaoy } 12103c431bb5Swentaoy 12117c478bd9Sstevel@tonic-gate /* 12127c478bd9Sstevel@tonic-gate * Routine to return memory information associated 12137c478bd9Sstevel@tonic-gate * with a physical address and syndrome. 12147c478bd9Sstevel@tonic-gate */ 12157c478bd9Sstevel@tonic-gate /* ARGSUSED */ 12167c478bd9Sstevel@tonic-gate int 12177c478bd9Sstevel@tonic-gate cpu_get_mem_info(uint64_t synd, uint64_t afar, 12187c478bd9Sstevel@tonic-gate uint64_t *mem_sizep, uint64_t *seg_sizep, uint64_t *bank_sizep, 12197c478bd9Sstevel@tonic-gate int *segsp, int *banksp, int *mcidp) 12207c478bd9Sstevel@tonic-gate { 12217c478bd9Sstevel@tonic-gate return (ENOTSUP); 12227c478bd9Sstevel@tonic-gate } 12237c478bd9Sstevel@tonic-gate 12247c478bd9Sstevel@tonic-gate /* 12257c478bd9Sstevel@tonic-gate * This routine returns the size of the kernel's FRU name buffer. 12267c478bd9Sstevel@tonic-gate */ 12277c478bd9Sstevel@tonic-gate size_t 12287c478bd9Sstevel@tonic-gate cpu_get_name_bufsize() 12297c478bd9Sstevel@tonic-gate { 12307c478bd9Sstevel@tonic-gate return (UNUM_NAMLEN); 12317c478bd9Sstevel@tonic-gate } 12327c478bd9Sstevel@tonic-gate 12337c478bd9Sstevel@tonic-gate /* 12347c478bd9Sstevel@tonic-gate * This routine is a more generic interface to cpu_get_mem_unum(), 12357c478bd9Sstevel@tonic-gate * that may be used by other modules (e.g. mm). 12367c478bd9Sstevel@tonic-gate */ 12377c478bd9Sstevel@tonic-gate /* ARGSUSED */ 12387c478bd9Sstevel@tonic-gate int 12397c478bd9Sstevel@tonic-gate cpu_get_mem_name(uint64_t synd, uint64_t *afsr, uint64_t afar, 12407c478bd9Sstevel@tonic-gate char *buf, int buflen, int *lenp) 12417c478bd9Sstevel@tonic-gate { 12427c478bd9Sstevel@tonic-gate return (ENOTSUP); 12437c478bd9Sstevel@tonic-gate } 12447c478bd9Sstevel@tonic-gate 1245d00f0155Sayznaga /* ARGSUSED */ 1246d00f0155Sayznaga int 1247d00f0155Sayznaga cpu_get_mem_sid(char *unum, char *buf, int buflen, int *lenp) 1248d00f0155Sayznaga { 1249d00f0155Sayznaga return (ENOTSUP); 1250d00f0155Sayznaga } 1251d00f0155Sayznaga 1252d00f0155Sayznaga /* ARGSUSED */ 1253d00f0155Sayznaga int 1254d00f0155Sayznaga cpu_get_mem_addr(char *unum, char *sid, uint64_t offset, uint64_t *addrp) 1255d00f0155Sayznaga { 1256d00f0155Sayznaga return (ENOTSUP); 1257d00f0155Sayznaga } 1258d00f0155Sayznaga 12597c478bd9Sstevel@tonic-gate /* 12607c478bd9Sstevel@tonic-gate * xt_sync - wait for previous x-traps to finish 12617c478bd9Sstevel@tonic-gate */ 12627c478bd9Sstevel@tonic-gate void 12637c478bd9Sstevel@tonic-gate xt_sync(cpuset_t cpuset) 12647c478bd9Sstevel@tonic-gate { 12657c478bd9Sstevel@tonic-gate union { 12667c478bd9Sstevel@tonic-gate uint8_t volatile byte[NCPU]; 12677c478bd9Sstevel@tonic-gate uint64_t volatile xword[NCPU / 8]; 12687c478bd9Sstevel@tonic-gate } cpu_sync; 126964cfc8edSsvemuri uint64_t starttick, endtick, tick, lasttick, traptrace_id; 127000423197Sha137994 uint_t largestid, smallestid; 1271db7cdbd3Srf157361 int i, j; 12727c478bd9Sstevel@tonic-gate 12737c478bd9Sstevel@tonic-gate kpreempt_disable(); 12747c478bd9Sstevel@tonic-gate CPUSET_DEL(cpuset, CPU->cpu_id); 12757c478bd9Sstevel@tonic-gate CPUSET_AND(cpuset, cpu_ready_set); 12767c478bd9Sstevel@tonic-gate 127700423197Sha137994 CPUSET_BOUNDS(cpuset, smallestid, largestid); 127800423197Sha137994 if (smallestid == CPUSET_NOTINSET) 127900423197Sha137994 goto out; 128000423197Sha137994 12817c478bd9Sstevel@tonic-gate /* 12827c478bd9Sstevel@tonic-gate * Sun4v uses a queue for receiving mondos. Successful 12837c478bd9Sstevel@tonic-gate * transmission of a mondo only indicates that the mondo 12847c478bd9Sstevel@tonic-gate * has been written into the queue. 12857c478bd9Sstevel@tonic-gate * 12867c478bd9Sstevel@tonic-gate * We use an array of bytes to let each cpu to signal back 12877c478bd9Sstevel@tonic-gate * to the cross trap sender that the cross trap has been 12887c478bd9Sstevel@tonic-gate * executed. Set the byte to 1 before sending the cross trap 12897c478bd9Sstevel@tonic-gate * and wait until other cpus reset it to 0. 12907c478bd9Sstevel@tonic-gate */ 12917c478bd9Sstevel@tonic-gate bzero((void *)&cpu_sync, NCPU); 129200423197Sha137994 cpu_sync.byte[smallestid] = 1; 129300423197Sha137994 if (largestid != smallestid) { 129400423197Sha137994 for (i = (smallestid + 1); i <= (largestid - 1); i++) 12957c478bd9Sstevel@tonic-gate if (CPU_IN_SET(cpuset, i)) 12967c478bd9Sstevel@tonic-gate cpu_sync.byte[i] = 1; 129700423197Sha137994 cpu_sync.byte[largestid] = 1; 129800423197Sha137994 } 12997c478bd9Sstevel@tonic-gate 130064cfc8edSsvemuri /* 130164cfc8edSsvemuri * To help debug xt_sync panic, each mondo is uniquely identified 130264cfc8edSsvemuri * by passing the tick value, traptrace_id as the second mondo 130364cfc8edSsvemuri * argument to xt_some which is logged in CPU's mondo queue, 130464cfc8edSsvemuri * traptrace buffer and the panic message. 130564cfc8edSsvemuri */ 130664cfc8edSsvemuri traptrace_id = gettick(); 13077c478bd9Sstevel@tonic-gate xt_some(cpuset, (xcfunc_t *)xt_sync_tl1, 130864cfc8edSsvemuri (uint64_t)cpu_sync.byte, traptrace_id); 13097c478bd9Sstevel@tonic-gate 13107c478bd9Sstevel@tonic-gate starttick = lasttick = gettick(); 1311374ae87fSsvemuri endtick = starttick + xc_sync_tick_limit; 13127c478bd9Sstevel@tonic-gate 131300423197Sha137994 for (i = (smallestid / 8); i <= (largestid / 8); i++) { 13147c478bd9Sstevel@tonic-gate while (cpu_sync.xword[i] != 0) { 13157c478bd9Sstevel@tonic-gate tick = gettick(); 13167c478bd9Sstevel@tonic-gate /* 13177c478bd9Sstevel@tonic-gate * If there is a big jump between the current tick 13187c478bd9Sstevel@tonic-gate * count and lasttick, we have probably hit a break 13197c478bd9Sstevel@tonic-gate * point. Adjust endtick accordingly to avoid panic. 13207c478bd9Sstevel@tonic-gate */ 13217c478bd9Sstevel@tonic-gate if (tick > (lasttick + xc_tick_jump_limit)) { 13227c478bd9Sstevel@tonic-gate endtick += (tick - lasttick); 13237c478bd9Sstevel@tonic-gate } 13247c478bd9Sstevel@tonic-gate lasttick = tick; 13257c478bd9Sstevel@tonic-gate if (tick > endtick) { 13267c478bd9Sstevel@tonic-gate if (panic_quiesce) 13277c478bd9Sstevel@tonic-gate goto out; 1328db7cdbd3Srf157361 cmn_err(CE_CONT, "Cross trap sync timeout: " 132964cfc8edSsvemuri "at cpu_sync.xword[%d]: 0x%lx " 133064cfc8edSsvemuri "cpu_sync.byte: 0x%lx " 133164cfc8edSsvemuri "starttick: 0x%lx endtick: 0x%lx " 133264cfc8edSsvemuri "traptrace_id = 0x%lx\n", 133364cfc8edSsvemuri i, cpu_sync.xword[i], 133464cfc8edSsvemuri (uint64_t)cpu_sync.byte, 133564cfc8edSsvemuri starttick, endtick, traptrace_id); 133664cfc8edSsvemuri cmn_err(CE_CONT, "CPUIDs:"); 1337db7cdbd3Srf157361 for (j = (i * 8); j <= largestid; j++) { 1338db7cdbd3Srf157361 if (cpu_sync.byte[j] != 0) 1339db7cdbd3Srf157361 cmn_err(CE_CONT, " 0x%x", j); 1340db7cdbd3Srf157361 } 13417c478bd9Sstevel@tonic-gate cmn_err(CE_PANIC, "xt_sync: timeout"); 13427c478bd9Sstevel@tonic-gate } 13437c478bd9Sstevel@tonic-gate } 13447c478bd9Sstevel@tonic-gate } 13457c478bd9Sstevel@tonic-gate 13467c478bd9Sstevel@tonic-gate out: 13477c478bd9Sstevel@tonic-gate kpreempt_enable(); 13487c478bd9Sstevel@tonic-gate } 1349f273041fSjm22469 1350374ae87fSsvemuri #define QFACTOR 200 1351f273041fSjm22469 /* 1352f273041fSjm22469 * Recalculate the values of the cross-call timeout variables based 1353f273041fSjm22469 * on the value of the 'inter-cpu-latency' property of the platform node. 1354f273041fSjm22469 * The property sets the number of nanosec to wait for a cross-call 1355f273041fSjm22469 * to be acknowledged. Other timeout variables are derived from it. 1356f273041fSjm22469 * 1357f273041fSjm22469 * N.B. This implementation is aware of the internals of xc_init() 1358f273041fSjm22469 * and updates many of the same variables. 1359f273041fSjm22469 */ 1360f273041fSjm22469 void 1361f273041fSjm22469 recalc_xc_timeouts(void) 1362f273041fSjm22469 { 1363f273041fSjm22469 typedef union { 1364f273041fSjm22469 uint64_t whole; 1365f273041fSjm22469 struct { 1366f273041fSjm22469 uint_t high; 1367f273041fSjm22469 uint_t low; 1368f273041fSjm22469 } half; 1369f273041fSjm22469 } u_number; 1370f273041fSjm22469 1371f273041fSjm22469 /* See x_call.c for descriptions of these extern variables. */ 1372f273041fSjm22469 extern uint64_t xc_tick_limit_scale; 1373f273041fSjm22469 extern uint64_t xc_mondo_time_limit; 1374f273041fSjm22469 extern uint64_t xc_func_time_limit; 1375f273041fSjm22469 extern uint64_t xc_scale; 1376f273041fSjm22469 extern uint64_t xc_mondo_multiplier; 1377f273041fSjm22469 extern uint_t nsec_shift; 1378f273041fSjm22469 1379f273041fSjm22469 /* Temp versions of the target variables */ 1380f273041fSjm22469 uint64_t tick_limit; 1381f273041fSjm22469 uint64_t tick_jump_limit; 1382f273041fSjm22469 uint64_t mondo_time_limit; 1383f273041fSjm22469 uint64_t func_time_limit; 1384f273041fSjm22469 uint64_t scale; 1385f273041fSjm22469 1386f273041fSjm22469 uint64_t latency; /* nanoseconds */ 1387f273041fSjm22469 uint64_t maxfreq; 1388f273041fSjm22469 uint64_t tick_limit_save = xc_tick_limit; 1389374ae87fSsvemuri uint64_t sync_tick_limit_save = xc_sync_tick_limit; 1390f273041fSjm22469 uint_t tick_scale; 1391f273041fSjm22469 uint64_t top; 1392f273041fSjm22469 uint64_t bottom; 1393f273041fSjm22469 u_number tk; 1394f273041fSjm22469 1395f273041fSjm22469 md_t *mdp; 1396f273041fSjm22469 int nrnode; 1397f273041fSjm22469 mde_cookie_t *platlist; 1398f273041fSjm22469 1399f273041fSjm22469 /* 1400f273041fSjm22469 * Look up the 'inter-cpu-latency' (optional) property in the 1401f273041fSjm22469 * platform node of the MD. The units are nanoseconds. 1402f273041fSjm22469 */ 1403f273041fSjm22469 if ((mdp = md_get_handle()) == NULL) { 1404f273041fSjm22469 cmn_err(CE_WARN, "recalc_xc_timeouts: " 1405f273041fSjm22469 "Unable to initialize machine description"); 1406f273041fSjm22469 return; 1407f273041fSjm22469 } 1408f273041fSjm22469 1409f273041fSjm22469 nrnode = md_alloc_scan_dag(mdp, 1410f273041fSjm22469 md_root_node(mdp), "platform", "fwd", &platlist); 1411f273041fSjm22469 1412f273041fSjm22469 ASSERT(nrnode == 1); 1413f273041fSjm22469 if (nrnode < 1) { 1414f273041fSjm22469 cmn_err(CE_WARN, "recalc_xc_timeouts: platform node missing"); 1415a3f75865Sjm22469 goto done; 1416f273041fSjm22469 } 1417f273041fSjm22469 if (md_get_prop_val(mdp, platlist[0], 1418f273041fSjm22469 "inter-cpu-latency", &latency) == -1) 1419a3f75865Sjm22469 goto done; 1420f273041fSjm22469 1421f273041fSjm22469 /* 1422f273041fSjm22469 * clock.h defines an assembly-language macro 1423f273041fSjm22469 * (NATIVE_TIME_TO_NSEC_SCALE) to convert from %stick 1424f273041fSjm22469 * units to nanoseconds. Since the inter-cpu-latency 1425f273041fSjm22469 * units are nanoseconds and the xc_* variables require 1426f273041fSjm22469 * %stick units, we need the inverse of that function. 1427f273041fSjm22469 * The trick is to perform the calculation without 1428f273041fSjm22469 * floating point, but also without integer truncation 1429f273041fSjm22469 * or overflow. To understand the calculation below, 1430f273041fSjm22469 * please read the discussion of the macro in clock.h. 1431f273041fSjm22469 * Since this new code will be invoked infrequently, 1432f273041fSjm22469 * we can afford to implement it in C. 1433f273041fSjm22469 * 1434f273041fSjm22469 * tick_scale is the reciprocal of nsec_scale which is 1435f273041fSjm22469 * calculated at startup in setcpudelay(). The calc 1436f273041fSjm22469 * of tick_limit parallels that of NATIVE_TIME_TO_NSEC_SCALE 1437f273041fSjm22469 * except we use tick_scale instead of nsec_scale and 1438f273041fSjm22469 * C instead of assembler. 1439f273041fSjm22469 */ 1440f273041fSjm22469 tick_scale = (uint_t)(((u_longlong_t)sys_tick_freq 1441f273041fSjm22469 << (32 - nsec_shift)) / NANOSEC); 1442f273041fSjm22469 1443f273041fSjm22469 tk.whole = latency; 1444f273041fSjm22469 top = ((uint64_t)tk.half.high << 4) * tick_scale; 1445f273041fSjm22469 bottom = (((uint64_t)tk.half.low << 4) * (uint64_t)tick_scale) >> 32; 1446f273041fSjm22469 tick_limit = top + bottom; 1447f273041fSjm22469 1448f273041fSjm22469 /* 1449f273041fSjm22469 * xc_init() calculated 'maxfreq' by looking at all the cpus, 1450f273041fSjm22469 * and used it to derive some of the timeout variables that we 1451f273041fSjm22469 * recalculate below. We can back into the original value by 1452f273041fSjm22469 * using the inverse of one of those calculations. 1453f273041fSjm22469 */ 1454f273041fSjm22469 maxfreq = xc_mondo_time_limit / xc_scale; 1455f273041fSjm22469 1456f273041fSjm22469 /* 1457f273041fSjm22469 * Don't allow the new timeout (xc_tick_limit) to fall below 1458f273041fSjm22469 * the system tick frequency (stick). Allowing the timeout 1459f273041fSjm22469 * to be set more tightly than this empirically determined 1460f273041fSjm22469 * value may cause panics. 1461f273041fSjm22469 */ 1462f273041fSjm22469 tick_limit = tick_limit < sys_tick_freq ? sys_tick_freq : tick_limit; 1463f273041fSjm22469 1464f273041fSjm22469 tick_jump_limit = tick_limit / 32; 1465f273041fSjm22469 tick_limit *= xc_tick_limit_scale; 1466f273041fSjm22469 1467f273041fSjm22469 /* 1468f273041fSjm22469 * Recalculate xc_scale since it is used in a callback function 1469f273041fSjm22469 * (xc_func_timeout_adj) to adjust two of the timeouts dynamically. 1470f273041fSjm22469 * Make the change in xc_scale proportional to the change in 1471f273041fSjm22469 * xc_tick_limit. 1472f273041fSjm22469 */ 1473f273041fSjm22469 scale = (xc_scale * tick_limit + sys_tick_freq / 2) / tick_limit_save; 1474f273041fSjm22469 if (scale == 0) 1475f273041fSjm22469 scale = 1; 1476f273041fSjm22469 1477f273041fSjm22469 mondo_time_limit = maxfreq * scale; 1478f273041fSjm22469 func_time_limit = mondo_time_limit * xc_mondo_multiplier; 1479f273041fSjm22469 1480f273041fSjm22469 /* 1481f273041fSjm22469 * Don't modify the timeouts if nothing has changed. Else, 1482f273041fSjm22469 * stuff the variables with the freshly calculated (temp) 1483f273041fSjm22469 * variables. This minimizes the window where the set of 1484f273041fSjm22469 * values could be inconsistent. 1485f273041fSjm22469 */ 1486f273041fSjm22469 if (tick_limit != xc_tick_limit) { 1487f273041fSjm22469 xc_tick_limit = tick_limit; 1488f273041fSjm22469 xc_tick_jump_limit = tick_jump_limit; 1489f273041fSjm22469 xc_scale = scale; 1490f273041fSjm22469 xc_mondo_time_limit = mondo_time_limit; 1491f273041fSjm22469 xc_func_time_limit = func_time_limit; 1492f273041fSjm22469 } 1493a3f75865Sjm22469 1494a3f75865Sjm22469 done: 1495374ae87fSsvemuri /* 1496374ae87fSsvemuri * Increase the timeout limit for xt_sync() cross calls. 1497374ae87fSsvemuri */ 1498374ae87fSsvemuri xc_sync_tick_limit = xc_tick_limit * (cpu_q_entries / QFACTOR); 1499374ae87fSsvemuri xc_sync_tick_limit = xc_sync_tick_limit < xc_tick_limit ? 1500374ae87fSsvemuri xc_tick_limit : xc_sync_tick_limit; 1501374ae87fSsvemuri 1502374ae87fSsvemuri /* 1503374ae87fSsvemuri * Force the new values to be used for future cross calls. 1504374ae87fSsvemuri * This is necessary only when we increase the timeouts. 1505374ae87fSsvemuri */ 1506374ae87fSsvemuri if ((xc_tick_limit > tick_limit_save) || (xc_sync_tick_limit > 1507374ae87fSsvemuri sync_tick_limit_save)) { 1508374ae87fSsvemuri cpuset_t cpuset = cpu_ready_set; 1509374ae87fSsvemuri xt_sync(cpuset); 1510374ae87fSsvemuri } 1511374ae87fSsvemuri 1512a3f75865Sjm22469 if (nrnode > 0) 1513a3f75865Sjm22469 md_free_scan_dag(mdp, &platlist); 1514a3f75865Sjm22469 (void) md_fini_handle(mdp); 1515f273041fSjm22469 } 15163b890a5bSjb145095 15173b890a5bSjb145095 void 15183b890a5bSjb145095 mach_soft_state_init(void) 15193b890a5bSjb145095 { 15203b890a5bSjb145095 int i; 15213b890a5bSjb145095 uint64_t ra; 15223b890a5bSjb145095 15233b890a5bSjb145095 /* 15243b890a5bSjb145095 * Try to register soft_state api. If it fails, soft_state api has not 15253b890a5bSjb145095 * been implemented in the firmware, so do not bother to setup 15263b890a5bSjb145095 * soft_state in the kernel. 15273b890a5bSjb145095 */ 15283b890a5bSjb145095 if ((i = hsvc_register(&soft_state_hsvc, &soft_state_sup_minor)) != 0) { 15293b890a5bSjb145095 return; 15303b890a5bSjb145095 } 15313b890a5bSjb145095 for (i = 0; i < SOLARIS_SOFT_STATE_MSG_CNT; i++) { 15323b890a5bSjb145095 ASSERT(strlen((const char *)(void *) 15333b890a5bSjb145095 soft_state_message_strings + i) < SSM_SIZE); 153422e19ac1Sjm22469 if ((ra = va_to_pa( 153522e19ac1Sjm22469 (void *)(soft_state_message_strings + i))) == -1ll) { 15363b890a5bSjb145095 return; 15373b890a5bSjb145095 } 15383b890a5bSjb145095 soft_state_message_ra[i] = ra; 15393b890a5bSjb145095 } 15403b890a5bSjb145095 /* 15413b890a5bSjb145095 * Tell OBP that we are supporting Guest State 15423b890a5bSjb145095 */ 15433b890a5bSjb145095 prom_sun4v_soft_state_supported(); 15443b890a5bSjb145095 soft_state_initialized = 1; 15453b890a5bSjb145095 } 15463b890a5bSjb145095 15473b890a5bSjb145095 void 15483b890a5bSjb145095 mach_set_soft_state(uint64_t state, uint64_t *string_ra) 15493b890a5bSjb145095 { 15503b890a5bSjb145095 uint64_t rc; 15513b890a5bSjb145095 15523b890a5bSjb145095 if (soft_state_initialized && *string_ra) { 15533b890a5bSjb145095 rc = hv_soft_state_set(state, *string_ra); 15543b890a5bSjb145095 if (rc != H_EOK) { 155564cfc8edSsvemuri cmn_err(CE_WARN, 155664cfc8edSsvemuri "hv_soft_state_set returned %ld\n", rc); 15573b890a5bSjb145095 } 15583b890a5bSjb145095 } 15593b890a5bSjb145095 } 15603b890a5bSjb145095 15613b890a5bSjb145095 void 15623b890a5bSjb145095 mach_get_soft_state(uint64_t *state, uint64_t *string_ra) 15633b890a5bSjb145095 { 156422e19ac1Sjm22469 uint64_t rc; 15653b890a5bSjb145095 15663b890a5bSjb145095 if (soft_state_initialized && *string_ra) { 15673b890a5bSjb145095 rc = hv_soft_state_get(*string_ra, state); 15683b890a5bSjb145095 if (rc != H_EOK) { 156964cfc8edSsvemuri cmn_err(CE_WARN, 157064cfc8edSsvemuri "hv_soft_state_get returned %ld\n", rc); 15713b890a5bSjb145095 *state = -1; 15723b890a5bSjb145095 } 15733b890a5bSjb145095 } 15743b890a5bSjb145095 } 1575