17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5fea9cb91Slq150181 * Common Development and Distribution License (the "License"). 6fea9cb91Slq150181 * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 21fea9cb91Slq150181 227c478bd9Sstevel@tonic-gate /* 235cd376e8SJimmy Vetayases * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved. 247c478bd9Sstevel@tonic-gate */ 25a3114836SGerry Liu /* 26a3114836SGerry Liu * Copyright (c) 2010, Intel Corporation. 27a3114836SGerry Liu * All rights reserved. 28a3114836SGerry Liu */ 297c478bd9Sstevel@tonic-gate 307c478bd9Sstevel@tonic-gate #include <sys/types.h> 317c478bd9Sstevel@tonic-gate #include <sys/t_lock.h> 327c478bd9Sstevel@tonic-gate #include <sys/param.h> 33ddece0baSsethg #include <sys/segments.h> 347c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h> 357c478bd9Sstevel@tonic-gate #include <sys/signal.h> 367c478bd9Sstevel@tonic-gate #include <sys/systm.h> 377c478bd9Sstevel@tonic-gate #include <sys/user.h> 387c478bd9Sstevel@tonic-gate #include <sys/mman.h> 397c478bd9Sstevel@tonic-gate #include <sys/vm.h> 407c478bd9Sstevel@tonic-gate 417c478bd9Sstevel@tonic-gate #include <sys/disp.h> 427c478bd9Sstevel@tonic-gate #include <sys/class.h> 437c478bd9Sstevel@tonic-gate 447c478bd9Sstevel@tonic-gate #include <sys/proc.h> 457c478bd9Sstevel@tonic-gate #include <sys/buf.h> 467c478bd9Sstevel@tonic-gate #include <sys/kmem.h> 477c478bd9Sstevel@tonic-gate 487c478bd9Sstevel@tonic-gate #include <sys/reboot.h> 497c478bd9Sstevel@tonic-gate #include <sys/uadmin.h> 507c478bd9Sstevel@tonic-gate #include <sys/callb.h> 517c478bd9Sstevel@tonic-gate 527c478bd9Sstevel@tonic-gate #include <sys/cred.h> 537c478bd9Sstevel@tonic-gate #include <sys/vnode.h> 547c478bd9Sstevel@tonic-gate #include <sys/file.h> 557c478bd9Sstevel@tonic-gate 567c478bd9Sstevel@tonic-gate #include <sys/procfs.h> 577c478bd9Sstevel@tonic-gate #include <sys/acct.h> 587c478bd9Sstevel@tonic-gate 597c478bd9Sstevel@tonic-gate #include <sys/vfs.h> 607c478bd9Sstevel@tonic-gate #include <sys/dnlc.h> 617c478bd9Sstevel@tonic-gate #include <sys/var.h> 627c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 637c478bd9Sstevel@tonic-gate #include <sys/utsname.h> 647c478bd9Sstevel@tonic-gate #include <sys/debug.h> 657c478bd9Sstevel@tonic-gate 667c478bd9Sstevel@tonic-gate #include <sys/dumphdr.h> 677c478bd9Sstevel@tonic-gate #include <sys/bootconf.h> 687c478bd9Sstevel@tonic-gate #include <sys/varargs.h> 697c478bd9Sstevel@tonic-gate #include <sys/promif.h> 707c478bd9Sstevel@tonic-gate #include <sys/modctl.h> 717c478bd9Sstevel@tonic-gate 727c478bd9Sstevel@tonic-gate #include <sys/consdev.h> 737c478bd9Sstevel@tonic-gate #include <sys/frame.h> 747c478bd9Sstevel@tonic-gate 757c478bd9Sstevel@tonic-gate #include <sys/sunddi.h> 767c478bd9Sstevel@tonic-gate #include <sys/ddidmareq.h> 777c478bd9Sstevel@tonic-gate #include <sys/psw.h> 787c478bd9Sstevel@tonic-gate #include <sys/regset.h> 797c478bd9Sstevel@tonic-gate #include <sys/privregs.h> 807c478bd9Sstevel@tonic-gate #include <sys/clock.h> 817c478bd9Sstevel@tonic-gate #include <sys/tss.h> 827c478bd9Sstevel@tonic-gate #include <sys/cpu.h> 837c478bd9Sstevel@tonic-gate #include <sys/stack.h> 847c478bd9Sstevel@tonic-gate #include <sys/trap.h> 857c478bd9Sstevel@tonic-gate #include <sys/pic.h> 867c478bd9Sstevel@tonic-gate #include <vm/hat.h> 877c478bd9Sstevel@tonic-gate #include <vm/anon.h> 887c478bd9Sstevel@tonic-gate #include <vm/as.h> 897c478bd9Sstevel@tonic-gate #include <vm/page.h> 907c478bd9Sstevel@tonic-gate #include <vm/seg.h> 917c478bd9Sstevel@tonic-gate #include <vm/seg_kmem.h> 927c478bd9Sstevel@tonic-gate #include <vm/seg_map.h> 937c478bd9Sstevel@tonic-gate #include <vm/seg_vn.h> 947c478bd9Sstevel@tonic-gate #include <vm/seg_kp.h> 957c478bd9Sstevel@tonic-gate #include <vm/hat_i86.h> 967c478bd9Sstevel@tonic-gate #include <sys/swap.h> 977c478bd9Sstevel@tonic-gate #include <sys/thread.h> 987c478bd9Sstevel@tonic-gate #include <sys/sysconf.h> 997c478bd9Sstevel@tonic-gate #include <sys/vm_machparam.h> 1007c478bd9Sstevel@tonic-gate #include <sys/archsystm.h> 1017c478bd9Sstevel@tonic-gate #include <sys/machsystm.h> 1027c478bd9Sstevel@tonic-gate #include <sys/machlock.h> 1037c478bd9Sstevel@tonic-gate #include <sys/x_call.h> 1047c478bd9Sstevel@tonic-gate #include <sys/instance.h> 1057c478bd9Sstevel@tonic-gate 1067c478bd9Sstevel@tonic-gate #include <sys/time.h> 1077c478bd9Sstevel@tonic-gate #include <sys/smp_impldefs.h> 1087c478bd9Sstevel@tonic-gate #include <sys/psm_types.h> 1097c478bd9Sstevel@tonic-gate #include <sys/atomic.h> 1107c478bd9Sstevel@tonic-gate #include <sys/panic.h> 1117c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h> 1127c478bd9Sstevel@tonic-gate #include <sys/dtrace.h> 1137c478bd9Sstevel@tonic-gate #include <sys/bl.h> 1147c478bd9Sstevel@tonic-gate #include <sys/nvpair.h> 1157c478bd9Sstevel@tonic-gate #include <sys/x86_archext.h> 1167c478bd9Sstevel@tonic-gate #include <sys/pool_pset.h> 1177c478bd9Sstevel@tonic-gate #include <sys/autoconf.h> 118ae115bc7Smrj #include <sys/mem.h> 119ae115bc7Smrj #include <sys/dumphdr.h> 120ae115bc7Smrj #include <sys/compress.h> 121e4b86885SCheng Sean Ye #include <sys/cpu_module.h> 122843e1988Sjohnlev #if defined(__xpv) 123843e1988Sjohnlev #include <sys/hypervisor.h> 124843e1988Sjohnlev #include <sys/xpv_panic.h> 125843e1988Sjohnlev #endif 1267c478bd9Sstevel@tonic-gate 12719397407SSherry Moore #include <sys/fastboot.h> 12819397407SSherry Moore #include <sys/machelf.h> 12919397407SSherry Moore #include <sys/kobj.h> 13019397407SSherry Moore #include <sys/multiboot.h> 13119397407SSherry Moore 1327c478bd9Sstevel@tonic-gate #ifdef TRAPTRACE 1337c478bd9Sstevel@tonic-gate #include <sys/traptrace.h> 1347c478bd9Sstevel@tonic-gate #endif /* TRAPTRACE */ 1357c478bd9Sstevel@tonic-gate 136005d3febSMarek Pospisil #include <c2/audit.h> 137d3d50737SRafael Vanoni #include <sys/clock_impl.h> 138d3d50737SRafael Vanoni 1397c478bd9Sstevel@tonic-gate extern void audit_enterprom(int); 1407c478bd9Sstevel@tonic-gate extern void audit_exitprom(int); 1417c478bd9Sstevel@tonic-gate 1427c478bd9Sstevel@tonic-gate /* 1437ff178cdSJimmy Vetayases * Tunable to enable apix PSM; if set to 0, pcplusmp PSM will be used. 1447ff178cdSJimmy Vetayases */ 1457ff178cdSJimmy Vetayases int apix_enable = 1; 1467ff178cdSJimmy Vetayases 1477ff178cdSJimmy Vetayases int apic_nvidia_io_max = 0; /* no. of NVIDIA i/o apics */ 1487ff178cdSJimmy Vetayases 1497ff178cdSJimmy Vetayases /* 150c48ac12eSjohnlev * Occassionally the kernel knows better whether to power-off or reboot. 151c48ac12eSjohnlev */ 152c48ac12eSjohnlev int force_shutdown_method = AD_UNKNOWN; 153c48ac12eSjohnlev 154c48ac12eSjohnlev /* 1557c478bd9Sstevel@tonic-gate * The panicbuf array is used to record messages and state: 1567c478bd9Sstevel@tonic-gate */ 1577c478bd9Sstevel@tonic-gate char panicbuf[PANICBUFSIZE]; 1587c478bd9Sstevel@tonic-gate 1597c478bd9Sstevel@tonic-gate /* 160a3114836SGerry Liu * Flags to control Dynamic Reconfiguration features. 161a3114836SGerry Liu */ 162a3114836SGerry Liu uint64_t plat_dr_options; 163a3114836SGerry Liu 164a3114836SGerry Liu /* 165a3114836SGerry Liu * Maximum physical address for memory DR operations. 166a3114836SGerry Liu */ 167a3114836SGerry Liu uint64_t plat_dr_physmax; 168a3114836SGerry Liu 169a3114836SGerry Liu /* 1707c478bd9Sstevel@tonic-gate * maxphys - used during physio 1717c478bd9Sstevel@tonic-gate * klustsize - used for klustering by swapfs and specfs 1727c478bd9Sstevel@tonic-gate */ 1737c478bd9Sstevel@tonic-gate int maxphys = 56 * 1024; /* XXX See vm_subr.c - max b_count in physio */ 1747c478bd9Sstevel@tonic-gate int klustsize = 56 * 1024; 1757c478bd9Sstevel@tonic-gate 1767c478bd9Sstevel@tonic-gate caddr_t p0_va; /* Virtual address for accessing physical page 0 */ 1777c478bd9Sstevel@tonic-gate 1787c478bd9Sstevel@tonic-gate /* 1797c478bd9Sstevel@tonic-gate * defined here, though unused on x86, 1807c478bd9Sstevel@tonic-gate * to make kstat_fr.c happy. 1817c478bd9Sstevel@tonic-gate */ 1827c478bd9Sstevel@tonic-gate int vac; 1837c478bd9Sstevel@tonic-gate 1847c478bd9Sstevel@tonic-gate void debug_enter(char *); 1857c478bd9Sstevel@tonic-gate 1867c478bd9Sstevel@tonic-gate extern void pm_cfb_check_and_powerup(void); 1877c478bd9Sstevel@tonic-gate extern void pm_cfb_rele(void); 1887c478bd9Sstevel@tonic-gate 18919397407SSherry Moore extern fastboot_info_t newkernel; 19019397407SSherry Moore 1917c478bd9Sstevel@tonic-gate /* 1927c478bd9Sstevel@tonic-gate * Machine dependent code to reboot. 1937c478bd9Sstevel@tonic-gate * "mdep" is interpreted as a character pointer; if non-null, it is a pointer 1947c478bd9Sstevel@tonic-gate * to a string to be used as the argument string when rebooting. 195edc40228Sachartre * 196edc40228Sachartre * "invoke_cb" is a boolean. It is set to true when mdboot() can safely 197edc40228Sachartre * invoke CB_CL_MDBOOT callbacks before shutting the system down, i.e. when 198edc40228Sachartre * we are in a normal shutdown sequence (interrupts are not blocked, the 199edc40228Sachartre * system is not panic'ing or being suspended). 2007c478bd9Sstevel@tonic-gate */ 2017c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 2027c478bd9Sstevel@tonic-gate void 203edc40228Sachartre mdboot(int cmd, int fcn, char *mdep, boolean_t invoke_cb) 2047c478bd9Sstevel@tonic-gate { 20519397407SSherry Moore processorid_t bootcpuid = 0; 2066e5965bbSSherry Moore static int is_first_quiesce = 1; 2076e5965bbSSherry Moore static int is_first_reset = 1; 2086e5965bbSSherry Moore int reset_status = 0; 209753a6d45SSherry Moore static char fallback_str[] = "Falling back to regular reboot.\n"; 21019397407SSherry Moore 21119397407SSherry Moore if (fcn == AD_FASTREBOOT && !newkernel.fi_valid) 21219397407SSherry Moore fcn = AD_BOOT; 21319397407SSherry Moore 2147c478bd9Sstevel@tonic-gate if (!panicstr) { 2157c478bd9Sstevel@tonic-gate kpreempt_disable(); 21619397407SSherry Moore if (fcn == AD_FASTREBOOT) { 21719397407SSherry Moore mutex_enter(&cpu_lock); 21819397407SSherry Moore if (CPU_ACTIVE(cpu_get(bootcpuid))) { 21919397407SSherry Moore affinity_set(bootcpuid); 22019397407SSherry Moore } 22119397407SSherry Moore mutex_exit(&cpu_lock); 22219397407SSherry Moore } else { 223d90554ebSdmick affinity_set(CPU_CURRENT); 2247c478bd9Sstevel@tonic-gate } 22519397407SSherry Moore } 2267c478bd9Sstevel@tonic-gate 227c48ac12eSjohnlev if (force_shutdown_method != AD_UNKNOWN) 228c48ac12eSjohnlev fcn = force_shutdown_method; 229c48ac12eSjohnlev 2307c478bd9Sstevel@tonic-gate /* 231281888b3Sjbeck * XXX - rconsvp is set to NULL to ensure that output messages 232281888b3Sjbeck * are sent to the underlying "hardware" device using the 233281888b3Sjbeck * monitor's printf routine since we are in the process of 234281888b3Sjbeck * either rebooting or halting the machine. 235281888b3Sjbeck */ 236281888b3Sjbeck rconsvp = NULL; 237281888b3Sjbeck 238281888b3Sjbeck /* 2397c478bd9Sstevel@tonic-gate * Print the reboot message now, before pausing other cpus. 2407c478bd9Sstevel@tonic-gate * There is a race condition in the printing support that 2417c478bd9Sstevel@tonic-gate * can deadlock multiprocessor machines. 2427c478bd9Sstevel@tonic-gate */ 2437c478bd9Sstevel@tonic-gate if (!(fcn == AD_HALT || fcn == AD_POWEROFF)) 2447c478bd9Sstevel@tonic-gate prom_printf("rebooting...\n"); 2457c478bd9Sstevel@tonic-gate 246843e1988Sjohnlev if (IN_XPV_PANIC()) 247843e1988Sjohnlev reset(); 248843e1988Sjohnlev 2497c478bd9Sstevel@tonic-gate /* 2507c478bd9Sstevel@tonic-gate * We can't bring up the console from above lock level, so do it now 2517c478bd9Sstevel@tonic-gate */ 2527c478bd9Sstevel@tonic-gate pm_cfb_check_and_powerup(); 2537c478bd9Sstevel@tonic-gate 2547c478bd9Sstevel@tonic-gate /* make sure there are no more changes to the device tree */ 2557c478bd9Sstevel@tonic-gate devtree_freeze(); 2567c478bd9Sstevel@tonic-gate 257edc40228Sachartre if (invoke_cb) 258edc40228Sachartre (void) callb_execute_class(CB_CL_MDBOOT, NULL); 259edc40228Sachartre 2608b464eb8Smec /* 2618b464eb8Smec * Clear any unresolved UEs from memory. 2628b464eb8Smec */ 2638b464eb8Smec page_retire_mdboot(); 264db874c57Selowe 265843e1988Sjohnlev #if defined(__xpv) 266843e1988Sjohnlev /* 267843e1988Sjohnlev * XXPV Should probably think some more about how we deal 268843e1988Sjohnlev * with panicing before it's really safe to panic. 269843e1988Sjohnlev * On hypervisors, we reboot very quickly.. Perhaps panic 270843e1988Sjohnlev * should only attempt to recover by rebooting if, 271843e1988Sjohnlev * say, we were able to mount the root filesystem, 272843e1988Sjohnlev * or if we successfully launched init(1m). 273843e1988Sjohnlev */ 274843e1988Sjohnlev if (panicstr && proc_init == NULL) 275843e1988Sjohnlev (void) HYPERVISOR_shutdown(SHUTDOWN_poweroff); 276843e1988Sjohnlev #endif 2777c478bd9Sstevel@tonic-gate /* 2787c478bd9Sstevel@tonic-gate * stop other cpus and raise our priority. since there is only 2797c478bd9Sstevel@tonic-gate * one active cpu after this, and our priority will be too high 2807c478bd9Sstevel@tonic-gate * for us to be preempted, we're essentially single threaded 2817c478bd9Sstevel@tonic-gate * from here on out. 2827c478bd9Sstevel@tonic-gate */ 2837c478bd9Sstevel@tonic-gate (void) spl6(); 2847c478bd9Sstevel@tonic-gate if (!panicstr) { 2857c478bd9Sstevel@tonic-gate mutex_enter(&cpu_lock); 286*0ed5c46eSJosef 'Jeff' Sipek pause_cpus(NULL, NULL); 2877c478bd9Sstevel@tonic-gate mutex_exit(&cpu_lock); 2887c478bd9Sstevel@tonic-gate } 2897c478bd9Sstevel@tonic-gate 2907c478bd9Sstevel@tonic-gate /* 291835b9930SSherry Moore * If the system is panicking, the preloaded kernel is valid, and 292835b9930SSherry Moore * fastreboot_onpanic has been set, and the system has been up for 293835b9930SSherry Moore * longer than fastreboot_onpanic_uptime (default to 10 minutes), 294835b9930SSherry Moore * choose Fast Reboot. 295753a6d45SSherry Moore */ 296753a6d45SSherry Moore if (fcn == AD_BOOT && panicstr && newkernel.fi_valid && 297835b9930SSherry Moore fastreboot_onpanic && 298835b9930SSherry Moore (panic_lbolt - lbolt_at_boot) > fastreboot_onpanic_uptime) { 299753a6d45SSherry Moore fcn = AD_FASTREBOOT; 300835b9930SSherry Moore } 301753a6d45SSherry Moore 302753a6d45SSherry Moore /* 30319397407SSherry Moore * Try to quiesce devices. 3047c478bd9Sstevel@tonic-gate */ 3056e5965bbSSherry Moore if (is_first_quiesce) { 3066e5965bbSSherry Moore /* 3076e5965bbSSherry Moore * Clear is_first_quiesce before calling quiesce_devices() 3086e5965bbSSherry Moore * so that if quiesce_devices() causes panics, it will not 3096e5965bbSSherry Moore * be invoked again. 3106e5965bbSSherry Moore */ 3116e5965bbSSherry Moore is_first_quiesce = 0; 31219397407SSherry Moore 31319397407SSherry Moore quiesce_active = 1; 31419397407SSherry Moore quiesce_devices(ddi_root_node(), &reset_status); 315abe65fd9SSherry Moore if (reset_status == -1) { 316abe65fd9SSherry Moore if (fcn == AD_FASTREBOOT && !force_fastreboot) { 317abe65fd9SSherry Moore prom_printf("Driver(s) not capable of fast " 318753a6d45SSherry Moore "reboot.\n"); 319753a6d45SSherry Moore prom_printf(fallback_str); 320abe65fd9SSherry Moore fastreboot_capable = 0; 321753a6d45SSherry Moore fcn = AD_BOOT; 322abe65fd9SSherry Moore } else if (fcn != AD_FASTREBOOT) 32319397407SSherry Moore fastreboot_capable = 0; 32419397407SSherry Moore } 32519397407SSherry Moore quiesce_active = 0; 32619397407SSherry Moore } 32719397407SSherry Moore 32819397407SSherry Moore /* 3296e5965bbSSherry Moore * Try to reset devices. reset_leaves() should only be called 3306e5965bbSSherry Moore * a) when there are no other threads that could be accessing devices, 3316e5965bbSSherry Moore * and 3326e5965bbSSherry Moore * b) on a system that's not capable of fast reboot (fastreboot_capable 3336e5965bbSSherry Moore * being 0), or on a system where quiesce_devices() failed to 3346e5965bbSSherry Moore * complete (quiesce_active being 1). 33519397407SSherry Moore */ 3366e5965bbSSherry Moore if (is_first_reset && (!fastreboot_capable || quiesce_active)) { 3376e5965bbSSherry Moore /* 3386e5965bbSSherry Moore * Clear is_first_reset before calling reset_devices() 3396e5965bbSSherry Moore * so that if reset_devices() causes panics, it will not 3406e5965bbSSherry Moore * be invoked again. 3416e5965bbSSherry Moore */ 3426e5965bbSSherry Moore is_first_reset = 0; 3437c478bd9Sstevel@tonic-gate reset_leaves(); 3446e5965bbSSherry Moore } 3457c478bd9Sstevel@tonic-gate 346753a6d45SSherry Moore /* Verify newkernel checksum */ 347753a6d45SSherry Moore if (fastreboot_capable && fcn == AD_FASTREBOOT && 348753a6d45SSherry Moore fastboot_cksum_verify(&newkernel) != 0) { 349753a6d45SSherry Moore fastreboot_capable = 0; 350753a6d45SSherry Moore prom_printf("Fast reboot: checksum failed for the new " 351753a6d45SSherry Moore "kernel.\n"); 352753a6d45SSherry Moore prom_printf(fallback_str); 353753a6d45SSherry Moore } 354753a6d45SSherry Moore 3557c478bd9Sstevel@tonic-gate (void) spl8(); 356753a6d45SSherry Moore 357753a6d45SSherry Moore if (fastreboot_capable && fcn == AD_FASTREBOOT) { 358753a6d45SSherry Moore /* 359753a6d45SSherry Moore * psm_shutdown is called within fast_reboot() 360753a6d45SSherry Moore */ 361753a6d45SSherry Moore fast_reboot(); 362753a6d45SSherry Moore } else { 3637c478bd9Sstevel@tonic-gate (*psm_shutdownf)(cmd, fcn); 3647c478bd9Sstevel@tonic-gate 365753a6d45SSherry Moore if (fcn == AD_HALT || fcn == AD_POWEROFF) 3667c478bd9Sstevel@tonic-gate halt((char *)NULL); 3677c478bd9Sstevel@tonic-gate else 3687c478bd9Sstevel@tonic-gate prom_reboot(""); 369753a6d45SSherry Moore } 3707c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 3717c478bd9Sstevel@tonic-gate } 3727c478bd9Sstevel@tonic-gate 3737c478bd9Sstevel@tonic-gate /* mdpreboot - may be called prior to mdboot while root fs still mounted */ 3747c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 3757c478bd9Sstevel@tonic-gate void 3767c478bd9Sstevel@tonic-gate mdpreboot(int cmd, int fcn, char *mdep) 3777c478bd9Sstevel@tonic-gate { 37819397407SSherry Moore if (fcn == AD_FASTREBOOT && !fastreboot_capable) { 37919397407SSherry Moore fcn = AD_BOOT; 38019397407SSherry Moore #ifdef __xpv 381753a6d45SSherry Moore cmn_err(CE_WARN, "Fast reboot is not supported on xVM"); 38219397407SSherry Moore #else 383753a6d45SSherry Moore cmn_err(CE_WARN, 3845ee8e422SKonstantin Ananyev "Fast reboot is not supported on this platform%s", 3855ee8e422SKonstantin Ananyev fastreboot_nosup_message()); 38619397407SSherry Moore #endif 38719397407SSherry Moore } 38819397407SSherry Moore 38919397407SSherry Moore if (fcn == AD_FASTREBOOT) { 390753a6d45SSherry Moore fastboot_load_kernel(mdep); 39119397407SSherry Moore if (!newkernel.fi_valid) 39219397407SSherry Moore fcn = AD_BOOT; 39319397407SSherry Moore } 39419397407SSherry Moore 3957c478bd9Sstevel@tonic-gate (*psm_preshutdownf)(cmd, fcn); 3967c478bd9Sstevel@tonic-gate } 3977c478bd9Sstevel@tonic-gate 398f34a7178SJoe Bonasera static void 399f34a7178SJoe Bonasera stop_other_cpus(void) 4007c478bd9Sstevel@tonic-gate { 401f34a7178SJoe Bonasera ulong_t s = clear_int_flag(); /* fast way to keep CPU from changing */ 4027c478bd9Sstevel@tonic-gate cpuset_t xcset; 4037c478bd9Sstevel@tonic-gate 404f34a7178SJoe Bonasera CPUSET_ALL_BUT(xcset, CPU->cpu_id); 405f34a7178SJoe Bonasera xc_priority(0, 0, 0, CPUSET2BV(xcset), (xc_func_t)mach_cpu_halt); 406f34a7178SJoe Bonasera restore_int_flag(s); 4077c478bd9Sstevel@tonic-gate } 4087c478bd9Sstevel@tonic-gate 4097c478bd9Sstevel@tonic-gate /* 4107c478bd9Sstevel@tonic-gate * Machine dependent abort sequence handling 4117c478bd9Sstevel@tonic-gate */ 4127c478bd9Sstevel@tonic-gate void 4137c478bd9Sstevel@tonic-gate abort_sequence_enter(char *msg) 4147c478bd9Sstevel@tonic-gate { 4157c478bd9Sstevel@tonic-gate if (abort_enable == 0) { 416005d3febSMarek Pospisil if (AU_ZONE_AUDITING(GET_KCTX_GZ)) 4177c478bd9Sstevel@tonic-gate audit_enterprom(0); 4187c478bd9Sstevel@tonic-gate return; 4197c478bd9Sstevel@tonic-gate } 420005d3febSMarek Pospisil if (AU_ZONE_AUDITING(GET_KCTX_GZ)) 4217c478bd9Sstevel@tonic-gate audit_enterprom(1); 4227c478bd9Sstevel@tonic-gate debug_enter(msg); 423005d3febSMarek Pospisil if (AU_ZONE_AUDITING(GET_KCTX_GZ)) 4247c478bd9Sstevel@tonic-gate audit_exitprom(1); 4257c478bd9Sstevel@tonic-gate } 4267c478bd9Sstevel@tonic-gate 4277c478bd9Sstevel@tonic-gate /* 4287c478bd9Sstevel@tonic-gate * Enter debugger. Called when the user types ctrl-alt-d or whenever 4297c478bd9Sstevel@tonic-gate * code wants to enter the debugger and possibly resume later. 4307c478bd9Sstevel@tonic-gate */ 4317c478bd9Sstevel@tonic-gate void 4327c478bd9Sstevel@tonic-gate debug_enter( 4337c478bd9Sstevel@tonic-gate char *msg) /* message to print, possibly NULL */ 4347c478bd9Sstevel@tonic-gate { 4357c478bd9Sstevel@tonic-gate if (dtrace_debugger_init != NULL) 4367c478bd9Sstevel@tonic-gate (*dtrace_debugger_init)(); 4377c478bd9Sstevel@tonic-gate 4387c478bd9Sstevel@tonic-gate if (msg) 4397c478bd9Sstevel@tonic-gate prom_printf("%s\n", msg); 4407c478bd9Sstevel@tonic-gate 4417c478bd9Sstevel@tonic-gate if (boothowto & RB_DEBUG) 442ae115bc7Smrj kmdb_enter(); 4437c478bd9Sstevel@tonic-gate 4447c478bd9Sstevel@tonic-gate if (dtrace_debugger_fini != NULL) 4457c478bd9Sstevel@tonic-gate (*dtrace_debugger_fini)(); 4467c478bd9Sstevel@tonic-gate } 4477c478bd9Sstevel@tonic-gate 4487c478bd9Sstevel@tonic-gate void 4497c478bd9Sstevel@tonic-gate reset(void) 4507c478bd9Sstevel@tonic-gate { 4514cf02d40SSaurabh Misra extern void acpi_reset_system(); 452843e1988Sjohnlev #if !defined(__xpv) 4537c478bd9Sstevel@tonic-gate ushort_t *bios_memchk; 4547c478bd9Sstevel@tonic-gate 4557c478bd9Sstevel@tonic-gate /* 4564cf02d40SSaurabh Misra * Can't use psm_map_phys or acpi_reset_system before the hat is 4574cf02d40SSaurabh Misra * initialized. 4587c478bd9Sstevel@tonic-gate */ 4597c478bd9Sstevel@tonic-gate if (khat_running) { 4607c478bd9Sstevel@tonic-gate bios_memchk = (ushort_t *)psm_map_phys(0x472, 4617c478bd9Sstevel@tonic-gate sizeof (ushort_t), PROT_READ | PROT_WRITE); 4627c478bd9Sstevel@tonic-gate if (bios_memchk) 4637c478bd9Sstevel@tonic-gate *bios_memchk = 0x1234; /* bios memory check disable */ 4644cf02d40SSaurabh Misra 4654cf02d40SSaurabh Misra if (options_dip != NULL && 4664cf02d40SSaurabh Misra ddi_prop_exists(DDI_DEV_T_ANY, ddi_root_node(), 0, 4674cf02d40SSaurabh Misra "efi-systab")) { 4684cf02d40SSaurabh Misra efi_reset(); 4697c478bd9Sstevel@tonic-gate } 4707c478bd9Sstevel@tonic-gate 4714cf02d40SSaurabh Misra /* 4724cf02d40SSaurabh Misra * The problem with using stubs is that we can call 4734cf02d40SSaurabh Misra * acpi_reset_system only after the kernel is up and running. 4744cf02d40SSaurabh Misra * 4754cf02d40SSaurabh Misra * We should create a global state to keep track of how far 4764cf02d40SSaurabh Misra * up the kernel is but for the time being we will depend on 4774cf02d40SSaurabh Misra * bootops. bootops cleared in startup_end(). 4784cf02d40SSaurabh Misra */ 4794cf02d40SSaurabh Misra if (bootops == NULL) 4804cf02d40SSaurabh Misra acpi_reset_system(); 4814cf02d40SSaurabh Misra } 4824cf02d40SSaurabh Misra 4837c478bd9Sstevel@tonic-gate pc_reset(); 484843e1988Sjohnlev #else 4854cf02d40SSaurabh Misra if (IN_XPV_PANIC()) { 4864cf02d40SSaurabh Misra if (khat_running && bootops == NULL) { 4874cf02d40SSaurabh Misra acpi_reset_system(); 4884cf02d40SSaurabh Misra } 4894cf02d40SSaurabh Misra 490843e1988Sjohnlev pc_reset(); 4914cf02d40SSaurabh Misra } 4924cf02d40SSaurabh Misra 493843e1988Sjohnlev (void) HYPERVISOR_shutdown(SHUTDOWN_reboot); 494843e1988Sjohnlev panic("HYPERVISOR_shutdown() failed"); 495843e1988Sjohnlev #endif 4967c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 4977c478bd9Sstevel@tonic-gate } 4987c478bd9Sstevel@tonic-gate 4997c478bd9Sstevel@tonic-gate /* 5007c478bd9Sstevel@tonic-gate * Halt the machine and return to the monitor 5017c478bd9Sstevel@tonic-gate */ 5027c478bd9Sstevel@tonic-gate void 5037c478bd9Sstevel@tonic-gate halt(char *s) 5047c478bd9Sstevel@tonic-gate { 5057c478bd9Sstevel@tonic-gate stop_other_cpus(); /* send stop signal to other CPUs */ 5067c478bd9Sstevel@tonic-gate if (s) 5077c478bd9Sstevel@tonic-gate prom_printf("(%s) \n", s); 5087c478bd9Sstevel@tonic-gate prom_exit_to_mon(); 5097c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 5107c478bd9Sstevel@tonic-gate } 5117c478bd9Sstevel@tonic-gate 5127c478bd9Sstevel@tonic-gate /* 5137c478bd9Sstevel@tonic-gate * Initiate interrupt redistribution. 5147c478bd9Sstevel@tonic-gate */ 5157c478bd9Sstevel@tonic-gate void 5167c478bd9Sstevel@tonic-gate i_ddi_intr_redist_all_cpus() 5177c478bd9Sstevel@tonic-gate { 5187c478bd9Sstevel@tonic-gate } 5197c478bd9Sstevel@tonic-gate 5207c478bd9Sstevel@tonic-gate /* 5217c478bd9Sstevel@tonic-gate * XXX These probably ought to live somewhere else 5227c478bd9Sstevel@tonic-gate * XXX They are called from mem.c 5237c478bd9Sstevel@tonic-gate */ 5247c478bd9Sstevel@tonic-gate 5257c478bd9Sstevel@tonic-gate /* 5267c478bd9Sstevel@tonic-gate * Convert page frame number to an OBMEM page frame number 5277c478bd9Sstevel@tonic-gate * (i.e. put in the type bits -- zero for this implementation) 5287c478bd9Sstevel@tonic-gate */ 5297c478bd9Sstevel@tonic-gate pfn_t 5307c478bd9Sstevel@tonic-gate impl_obmem_pfnum(pfn_t pf) 5317c478bd9Sstevel@tonic-gate { 5327c478bd9Sstevel@tonic-gate return (pf); 5337c478bd9Sstevel@tonic-gate } 5347c478bd9Sstevel@tonic-gate 5357c478bd9Sstevel@tonic-gate #ifdef NM_DEBUG 5367c478bd9Sstevel@tonic-gate int nmi_test = 0; /* checked in intentry.s during clock int */ 5377c478bd9Sstevel@tonic-gate int nmtest = -1; 5387c478bd9Sstevel@tonic-gate nmfunc1(arg, rp) 5397c478bd9Sstevel@tonic-gate int arg; 5407c478bd9Sstevel@tonic-gate struct regs *rp; 5417c478bd9Sstevel@tonic-gate { 5427c478bd9Sstevel@tonic-gate printf("nmi called with arg = %x, regs = %x\n", arg, rp); 5437c478bd9Sstevel@tonic-gate nmtest += 50; 5447c478bd9Sstevel@tonic-gate if (arg == nmtest) { 5457c478bd9Sstevel@tonic-gate printf("ip = %x\n", rp->r_pc); 5467c478bd9Sstevel@tonic-gate return (1); 5477c478bd9Sstevel@tonic-gate } 5487c478bd9Sstevel@tonic-gate return (0); 5497c478bd9Sstevel@tonic-gate } 5507c478bd9Sstevel@tonic-gate 5517c478bd9Sstevel@tonic-gate #endif 5527c478bd9Sstevel@tonic-gate 5537c478bd9Sstevel@tonic-gate #include <sys/bootsvcs.h> 5547c478bd9Sstevel@tonic-gate 5557c478bd9Sstevel@tonic-gate /* Hacked up initialization for initial kernel check out is HERE. */ 5567c478bd9Sstevel@tonic-gate /* The basic steps are: */ 5577c478bd9Sstevel@tonic-gate /* kernel bootfuncs definition/initialization for KADB */ 5587c478bd9Sstevel@tonic-gate /* kadb bootfuncs pointer initialization */ 5597c478bd9Sstevel@tonic-gate /* putchar/getchar (interrupts disabled) */ 5607c478bd9Sstevel@tonic-gate 5617c478bd9Sstevel@tonic-gate /* kadb bootfuncs pointer initialization */ 5627c478bd9Sstevel@tonic-gate 5637c478bd9Sstevel@tonic-gate int 5647c478bd9Sstevel@tonic-gate sysp_getchar() 5657c478bd9Sstevel@tonic-gate { 5667c478bd9Sstevel@tonic-gate int i; 567ae115bc7Smrj ulong_t s; 5687c478bd9Sstevel@tonic-gate 5697c478bd9Sstevel@tonic-gate if (cons_polledio == NULL) { 5707c478bd9Sstevel@tonic-gate /* Uh oh */ 5717c478bd9Sstevel@tonic-gate prom_printf("getchar called with no console\n"); 5727c478bd9Sstevel@tonic-gate for (;;) 5737c478bd9Sstevel@tonic-gate /* LOOP FOREVER */; 5747c478bd9Sstevel@tonic-gate } 5757c478bd9Sstevel@tonic-gate 5767c478bd9Sstevel@tonic-gate s = clear_int_flag(); 5777c478bd9Sstevel@tonic-gate i = cons_polledio->cons_polledio_getchar( 5787c478bd9Sstevel@tonic-gate cons_polledio->cons_polledio_argument); 5797c478bd9Sstevel@tonic-gate restore_int_flag(s); 5807c478bd9Sstevel@tonic-gate return (i); 5817c478bd9Sstevel@tonic-gate } 5827c478bd9Sstevel@tonic-gate 5837c478bd9Sstevel@tonic-gate void 5847c478bd9Sstevel@tonic-gate sysp_putchar(int c) 5857c478bd9Sstevel@tonic-gate { 586ae115bc7Smrj ulong_t s; 5877c478bd9Sstevel@tonic-gate 5887c478bd9Sstevel@tonic-gate /* 5897c478bd9Sstevel@tonic-gate * We have no alternative but to drop the output on the floor. 5907c478bd9Sstevel@tonic-gate */ 591fea9cb91Slq150181 if (cons_polledio == NULL || 592fea9cb91Slq150181 cons_polledio->cons_polledio_putchar == NULL) 5937c478bd9Sstevel@tonic-gate return; 5947c478bd9Sstevel@tonic-gate 5957c478bd9Sstevel@tonic-gate s = clear_int_flag(); 5967c478bd9Sstevel@tonic-gate cons_polledio->cons_polledio_putchar( 5977c478bd9Sstevel@tonic-gate cons_polledio->cons_polledio_argument, c); 5987c478bd9Sstevel@tonic-gate restore_int_flag(s); 5997c478bd9Sstevel@tonic-gate } 6007c478bd9Sstevel@tonic-gate 6017c478bd9Sstevel@tonic-gate int 6027c478bd9Sstevel@tonic-gate sysp_ischar() 6037c478bd9Sstevel@tonic-gate { 6047c478bd9Sstevel@tonic-gate int i; 605ae115bc7Smrj ulong_t s; 6067c478bd9Sstevel@tonic-gate 607fea9cb91Slq150181 if (cons_polledio == NULL || 608fea9cb91Slq150181 cons_polledio->cons_polledio_ischar == NULL) 6097c478bd9Sstevel@tonic-gate return (0); 6107c478bd9Sstevel@tonic-gate 6117c478bd9Sstevel@tonic-gate s = clear_int_flag(); 6127c478bd9Sstevel@tonic-gate i = cons_polledio->cons_polledio_ischar( 6137c478bd9Sstevel@tonic-gate cons_polledio->cons_polledio_argument); 6147c478bd9Sstevel@tonic-gate restore_int_flag(s); 6157c478bd9Sstevel@tonic-gate return (i); 6167c478bd9Sstevel@tonic-gate } 6177c478bd9Sstevel@tonic-gate 6187c478bd9Sstevel@tonic-gate int 6197c478bd9Sstevel@tonic-gate goany(void) 6207c478bd9Sstevel@tonic-gate { 6217c478bd9Sstevel@tonic-gate prom_printf("Type any key to continue "); 6227c478bd9Sstevel@tonic-gate (void) prom_getchar(); 6237c478bd9Sstevel@tonic-gate prom_printf("\n"); 6247c478bd9Sstevel@tonic-gate return (1); 6257c478bd9Sstevel@tonic-gate } 6267c478bd9Sstevel@tonic-gate 6277c478bd9Sstevel@tonic-gate static struct boot_syscalls kern_sysp = { 6287c478bd9Sstevel@tonic-gate sysp_getchar, /* unchar (*getchar)(); 7 */ 6297c478bd9Sstevel@tonic-gate sysp_putchar, /* int (*putchar)(); 8 */ 6307c478bd9Sstevel@tonic-gate sysp_ischar, /* int (*ischar)(); 9 */ 6317c478bd9Sstevel@tonic-gate }; 6327c478bd9Sstevel@tonic-gate 633843e1988Sjohnlev #if defined(__xpv) 634843e1988Sjohnlev int using_kern_polledio; 635843e1988Sjohnlev #endif 636843e1988Sjohnlev 6377c478bd9Sstevel@tonic-gate void 6387c478bd9Sstevel@tonic-gate kadb_uses_kernel() 6397c478bd9Sstevel@tonic-gate { 6407c478bd9Sstevel@tonic-gate /* 6417c478bd9Sstevel@tonic-gate * This routine is now totally misnamed, since it does not in fact 6427c478bd9Sstevel@tonic-gate * control kadb's I/O; it only controls the kernel's prom_* I/O. 6437c478bd9Sstevel@tonic-gate */ 6447c478bd9Sstevel@tonic-gate sysp = &kern_sysp; 645843e1988Sjohnlev #if defined(__xpv) 646843e1988Sjohnlev using_kern_polledio = 1; 647843e1988Sjohnlev #endif 6487c478bd9Sstevel@tonic-gate } 6497c478bd9Sstevel@tonic-gate 6507c478bd9Sstevel@tonic-gate /* 6517c478bd9Sstevel@tonic-gate * the interface to the outside world 6527c478bd9Sstevel@tonic-gate */ 6537c478bd9Sstevel@tonic-gate 6547c478bd9Sstevel@tonic-gate /* 6557c478bd9Sstevel@tonic-gate * poll_port -- wait for a register to achieve a 6567c478bd9Sstevel@tonic-gate * specific state. Arguments are a mask of bits we care about, 6577c478bd9Sstevel@tonic-gate * and two sub-masks. To return normally, all the bits in the 6587c478bd9Sstevel@tonic-gate * first sub-mask must be ON, all the bits in the second sub- 6597c478bd9Sstevel@tonic-gate * mask must be OFF. If about seconds pass without the register 6607c478bd9Sstevel@tonic-gate * achieving the desired bit configuration, we return 1, else 6617c478bd9Sstevel@tonic-gate * 0. 6627c478bd9Sstevel@tonic-gate */ 6637c478bd9Sstevel@tonic-gate int 6647c478bd9Sstevel@tonic-gate poll_port(ushort_t port, ushort_t mask, ushort_t onbits, ushort_t offbits) 6657c478bd9Sstevel@tonic-gate { 6667c478bd9Sstevel@tonic-gate int i; 6677c478bd9Sstevel@tonic-gate ushort_t maskval; 6687c478bd9Sstevel@tonic-gate 6697c478bd9Sstevel@tonic-gate for (i = 500000; i; i--) { 6707c478bd9Sstevel@tonic-gate maskval = inb(port) & mask; 6717c478bd9Sstevel@tonic-gate if (((maskval & onbits) == onbits) && 6727c478bd9Sstevel@tonic-gate ((maskval & offbits) == 0)) 6737c478bd9Sstevel@tonic-gate return (0); 6747c478bd9Sstevel@tonic-gate drv_usecwait(10); 6757c478bd9Sstevel@tonic-gate } 6767c478bd9Sstevel@tonic-gate return (1); 6777c478bd9Sstevel@tonic-gate } 6787c478bd9Sstevel@tonic-gate 6797c478bd9Sstevel@tonic-gate /* 6807c478bd9Sstevel@tonic-gate * set_idle_cpu is called from idle() when a CPU becomes idle. 6817c478bd9Sstevel@tonic-gate */ 6827c478bd9Sstevel@tonic-gate /*LINTED: static unused */ 6837c478bd9Sstevel@tonic-gate static uint_t last_idle_cpu; 6847c478bd9Sstevel@tonic-gate 6857c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 6867c478bd9Sstevel@tonic-gate void 6877c478bd9Sstevel@tonic-gate set_idle_cpu(int cpun) 6887c478bd9Sstevel@tonic-gate { 6897c478bd9Sstevel@tonic-gate last_idle_cpu = cpun; 6907c478bd9Sstevel@tonic-gate (*psm_set_idle_cpuf)(cpun); 6917c478bd9Sstevel@tonic-gate } 6927c478bd9Sstevel@tonic-gate 6937c478bd9Sstevel@tonic-gate /* 6947c478bd9Sstevel@tonic-gate * unset_idle_cpu is called from idle() when a CPU is no longer idle. 6957c478bd9Sstevel@tonic-gate */ 6967c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 6977c478bd9Sstevel@tonic-gate void 6987c478bd9Sstevel@tonic-gate unset_idle_cpu(int cpun) 6997c478bd9Sstevel@tonic-gate { 7007c478bd9Sstevel@tonic-gate (*psm_unset_idle_cpuf)(cpun); 7017c478bd9Sstevel@tonic-gate } 7027c478bd9Sstevel@tonic-gate 7037c478bd9Sstevel@tonic-gate /* 7047c478bd9Sstevel@tonic-gate * This routine is almost correct now, but not quite. It still needs the 7057c478bd9Sstevel@tonic-gate * equivalent concept of "hres_last_tick", just like on the sparc side. 7067c478bd9Sstevel@tonic-gate * The idea is to take a snapshot of the hi-res timer while doing the 7077c478bd9Sstevel@tonic-gate * hrestime_adj updates under hres_lock in locore, so that the small 7087c478bd9Sstevel@tonic-gate * interval between interrupt assertion and interrupt processing is 7097c478bd9Sstevel@tonic-gate * accounted for correctly. Once we have this, the code below should 7107c478bd9Sstevel@tonic-gate * be modified to subtract off hres_last_tick rather than hrtime_base. 7117c478bd9Sstevel@tonic-gate * 7127c478bd9Sstevel@tonic-gate * I'd have done this myself, but I don't have source to all of the 7137c478bd9Sstevel@tonic-gate * vendor-specific hi-res timer routines (grrr...). The generic hook I 7147c478bd9Sstevel@tonic-gate * need is something like "gethrtime_unlocked()", which would be just like 7157c478bd9Sstevel@tonic-gate * gethrtime() but would assume that you're already holding CLOCK_LOCK(). 7167c478bd9Sstevel@tonic-gate * This is what the GET_HRTIME() macro is for on sparc (although it also 7177c478bd9Sstevel@tonic-gate * serves the function of making time available without a function call 7187c478bd9Sstevel@tonic-gate * so you don't take a register window overflow while traps are disabled). 7197c478bd9Sstevel@tonic-gate */ 7207c478bd9Sstevel@tonic-gate void 7217c478bd9Sstevel@tonic-gate pc_gethrestime(timestruc_t *tp) 7227c478bd9Sstevel@tonic-gate { 7237c478bd9Sstevel@tonic-gate int lock_prev; 7247c478bd9Sstevel@tonic-gate timestruc_t now; 7257c478bd9Sstevel@tonic-gate int nslt; /* nsec since last tick */ 7267c478bd9Sstevel@tonic-gate int adj; /* amount of adjustment to apply */ 7277c478bd9Sstevel@tonic-gate 7287c478bd9Sstevel@tonic-gate loop: 7297c478bd9Sstevel@tonic-gate lock_prev = hres_lock; 7307c478bd9Sstevel@tonic-gate now = hrestime; 7317c478bd9Sstevel@tonic-gate nslt = (int)(gethrtime() - hres_last_tick); 7327c478bd9Sstevel@tonic-gate if (nslt < 0) { 7337c478bd9Sstevel@tonic-gate /* 7347c478bd9Sstevel@tonic-gate * nslt < 0 means a tick came between sampling 7357c478bd9Sstevel@tonic-gate * gethrtime() and hres_last_tick; restart the loop 7367c478bd9Sstevel@tonic-gate */ 7377c478bd9Sstevel@tonic-gate 7387c478bd9Sstevel@tonic-gate goto loop; 7397c478bd9Sstevel@tonic-gate } 7407c478bd9Sstevel@tonic-gate now.tv_nsec += nslt; 7417c478bd9Sstevel@tonic-gate if (hrestime_adj != 0) { 7427c478bd9Sstevel@tonic-gate if (hrestime_adj > 0) { 7437c478bd9Sstevel@tonic-gate adj = (nslt >> ADJ_SHIFT); 7447c478bd9Sstevel@tonic-gate if (adj > hrestime_adj) 7457c478bd9Sstevel@tonic-gate adj = (int)hrestime_adj; 7467c478bd9Sstevel@tonic-gate } else { 7477c478bd9Sstevel@tonic-gate adj = -(nslt >> ADJ_SHIFT); 7487c478bd9Sstevel@tonic-gate if (adj < hrestime_adj) 7497c478bd9Sstevel@tonic-gate adj = (int)hrestime_adj; 7507c478bd9Sstevel@tonic-gate } 7517c478bd9Sstevel@tonic-gate now.tv_nsec += adj; 7527c478bd9Sstevel@tonic-gate } 7537c478bd9Sstevel@tonic-gate while ((unsigned long)now.tv_nsec >= NANOSEC) { 7547c478bd9Sstevel@tonic-gate 7557c478bd9Sstevel@tonic-gate /* 7567c478bd9Sstevel@tonic-gate * We might have a large adjustment or have been in the 7577c478bd9Sstevel@tonic-gate * debugger for a long time; take care of (at most) four 7587c478bd9Sstevel@tonic-gate * of those missed seconds (tv_nsec is 32 bits, so 7597c478bd9Sstevel@tonic-gate * anything >4s will be wrapping around). However, 7607c478bd9Sstevel@tonic-gate * anything more than 2 seconds out of sync will trigger 7617c478bd9Sstevel@tonic-gate * timedelta from clock() to go correct the time anyway, 7627c478bd9Sstevel@tonic-gate * so do what we can, and let the big crowbar do the 7637c478bd9Sstevel@tonic-gate * rest. A similar correction while loop exists inside 7647c478bd9Sstevel@tonic-gate * hres_tick(); in all cases we'd like tv_nsec to 7657c478bd9Sstevel@tonic-gate * satisfy 0 <= tv_nsec < NANOSEC to avoid confusing 7667c478bd9Sstevel@tonic-gate * user processes, but if tv_sec's a little behind for a 7677c478bd9Sstevel@tonic-gate * little while, that's OK; time still monotonically 7687c478bd9Sstevel@tonic-gate * increases. 7697c478bd9Sstevel@tonic-gate */ 7707c478bd9Sstevel@tonic-gate 7717c478bd9Sstevel@tonic-gate now.tv_nsec -= NANOSEC; 7727c478bd9Sstevel@tonic-gate now.tv_sec++; 7737c478bd9Sstevel@tonic-gate } 7747c478bd9Sstevel@tonic-gate if ((hres_lock & ~1) != lock_prev) 7757c478bd9Sstevel@tonic-gate goto loop; 7767c478bd9Sstevel@tonic-gate 7777c478bd9Sstevel@tonic-gate *tp = now; 7787c478bd9Sstevel@tonic-gate } 7797c478bd9Sstevel@tonic-gate 7807c478bd9Sstevel@tonic-gate void 7817c478bd9Sstevel@tonic-gate gethrestime_lasttick(timespec_t *tp) 7827c478bd9Sstevel@tonic-gate { 7837c478bd9Sstevel@tonic-gate int s; 7847c478bd9Sstevel@tonic-gate 7857c478bd9Sstevel@tonic-gate s = hr_clock_lock(); 7867c478bd9Sstevel@tonic-gate *tp = hrestime; 7877c478bd9Sstevel@tonic-gate hr_clock_unlock(s); 7887c478bd9Sstevel@tonic-gate } 7897c478bd9Sstevel@tonic-gate 7907c478bd9Sstevel@tonic-gate time_t 7917c478bd9Sstevel@tonic-gate gethrestime_sec(void) 7927c478bd9Sstevel@tonic-gate { 7937c478bd9Sstevel@tonic-gate timestruc_t now; 7947c478bd9Sstevel@tonic-gate 7957c478bd9Sstevel@tonic-gate gethrestime(&now); 7967c478bd9Sstevel@tonic-gate return (now.tv_sec); 7977c478bd9Sstevel@tonic-gate } 7987c478bd9Sstevel@tonic-gate 7997c478bd9Sstevel@tonic-gate /* 8007c478bd9Sstevel@tonic-gate * Initialize a kernel thread's stack 8017c478bd9Sstevel@tonic-gate */ 8027c478bd9Sstevel@tonic-gate 8037c478bd9Sstevel@tonic-gate caddr_t 8047c478bd9Sstevel@tonic-gate thread_stk_init(caddr_t stk) 8057c478bd9Sstevel@tonic-gate { 8067c478bd9Sstevel@tonic-gate ASSERT(((uintptr_t)stk & (STACK_ALIGN - 1)) == 0); 8077c478bd9Sstevel@tonic-gate return (stk - SA(MINFRAME)); 8087c478bd9Sstevel@tonic-gate } 8097c478bd9Sstevel@tonic-gate 8107c478bd9Sstevel@tonic-gate /* 8117c478bd9Sstevel@tonic-gate * Initialize lwp's kernel stack. 8127c478bd9Sstevel@tonic-gate */ 8137c478bd9Sstevel@tonic-gate 8147c478bd9Sstevel@tonic-gate #ifdef TRAPTRACE 8157c478bd9Sstevel@tonic-gate /* 8167c478bd9Sstevel@tonic-gate * There's a tricky interdependency here between use of sysenter and 8177c478bd9Sstevel@tonic-gate * TRAPTRACE which needs recording to avoid future confusion (this is 8187c478bd9Sstevel@tonic-gate * about the third time I've re-figured this out ..) 8197c478bd9Sstevel@tonic-gate * 8207c478bd9Sstevel@tonic-gate * Here's how debugging lcall works with TRAPTRACE. 8217c478bd9Sstevel@tonic-gate * 8227c478bd9Sstevel@tonic-gate * 1 We're in userland with a breakpoint on the lcall instruction. 8237c478bd9Sstevel@tonic-gate * 2 We execute the instruction - the instruction pushes the userland 8247c478bd9Sstevel@tonic-gate * %ss, %esp, %efl, %cs, %eip on the stack and zips into the kernel 8257c478bd9Sstevel@tonic-gate * via the call gate. 8267c478bd9Sstevel@tonic-gate * 3 The hardware raises a debug trap in kernel mode, the hardware 8277c478bd9Sstevel@tonic-gate * pushes %efl, %cs, %eip and gets to dbgtrap via the idt. 8287c478bd9Sstevel@tonic-gate * 4 dbgtrap pushes the error code and trapno and calls cmntrap 8297c478bd9Sstevel@tonic-gate * 5 cmntrap finishes building a trap frame 8307c478bd9Sstevel@tonic-gate * 6 The TRACE_REGS macros in cmntrap copy a REGSIZE worth chunk 8317c478bd9Sstevel@tonic-gate * off the stack into the traptrace buffer. 8327c478bd9Sstevel@tonic-gate * 8337c478bd9Sstevel@tonic-gate * This means that the traptrace buffer contains the wrong values in 8347c478bd9Sstevel@tonic-gate * %esp and %ss, but everything else in there is correct. 8357c478bd9Sstevel@tonic-gate * 8367c478bd9Sstevel@tonic-gate * Here's how debugging sysenter works with TRAPTRACE. 8377c478bd9Sstevel@tonic-gate * 8387c478bd9Sstevel@tonic-gate * a We're in userland with a breakpoint on the sysenter instruction. 8397c478bd9Sstevel@tonic-gate * b We execute the instruction - the instruction pushes -nothing- 8407c478bd9Sstevel@tonic-gate * on the stack, but sets %cs, %eip, %ss, %esp to prearranged 8417c478bd9Sstevel@tonic-gate * values to take us to sys_sysenter, at the top of the lwp's 8427c478bd9Sstevel@tonic-gate * stack. 8437c478bd9Sstevel@tonic-gate * c goto 3 8447c478bd9Sstevel@tonic-gate * 8457c478bd9Sstevel@tonic-gate * At this point, because we got into the kernel without the requisite 8467c478bd9Sstevel@tonic-gate * five pushes on the stack, if we didn't make extra room, we'd 8477c478bd9Sstevel@tonic-gate * end up with the TRACE_REGS macro fetching the saved %ss and %esp 8487c478bd9Sstevel@tonic-gate * values from negative (unmapped) stack addresses -- which really bites. 8497c478bd9Sstevel@tonic-gate * That's why we do the '-= 8' below. 8507c478bd9Sstevel@tonic-gate * 8517c478bd9Sstevel@tonic-gate * XXX Note that reading "up" lwp0's stack works because t0 is declared 8527c478bd9Sstevel@tonic-gate * right next to t0stack in locore.s 8537c478bd9Sstevel@tonic-gate */ 8547c478bd9Sstevel@tonic-gate #endif 8557c478bd9Sstevel@tonic-gate 8567c478bd9Sstevel@tonic-gate caddr_t 8577c478bd9Sstevel@tonic-gate lwp_stk_init(klwp_t *lwp, caddr_t stk) 8587c478bd9Sstevel@tonic-gate { 8597c478bd9Sstevel@tonic-gate caddr_t oldstk; 8607c478bd9Sstevel@tonic-gate struct pcb *pcb = &lwp->lwp_pcb; 8617c478bd9Sstevel@tonic-gate 8627c478bd9Sstevel@tonic-gate oldstk = stk; 8637c478bd9Sstevel@tonic-gate stk -= SA(sizeof (struct regs) + SA(MINFRAME)); 8647c478bd9Sstevel@tonic-gate #ifdef TRAPTRACE 8657c478bd9Sstevel@tonic-gate stk -= 2 * sizeof (greg_t); /* space for phony %ss:%sp (see above) */ 8667c478bd9Sstevel@tonic-gate #endif 8677c478bd9Sstevel@tonic-gate stk = (caddr_t)((uintptr_t)stk & ~(STACK_ALIGN - 1ul)); 8687c478bd9Sstevel@tonic-gate bzero(stk, oldstk - stk); 8697c478bd9Sstevel@tonic-gate lwp->lwp_regs = (void *)(stk + SA(MINFRAME)); 8707c478bd9Sstevel@tonic-gate 8717c478bd9Sstevel@tonic-gate /* 8727c478bd9Sstevel@tonic-gate * Arrange that the virtualized %fs and %gs GDT descriptors 8737c478bd9Sstevel@tonic-gate * have a well-defined initial state (present, ring 3 8747c478bd9Sstevel@tonic-gate * and of type data). 8757c478bd9Sstevel@tonic-gate */ 8767c478bd9Sstevel@tonic-gate #if defined(__amd64) 8777c478bd9Sstevel@tonic-gate if (lwp_getdatamodel(lwp) == DATAMODEL_NATIVE) 8787c478bd9Sstevel@tonic-gate pcb->pcb_fsdesc = pcb->pcb_gsdesc = zero_udesc; 8797c478bd9Sstevel@tonic-gate else 8807c478bd9Sstevel@tonic-gate pcb->pcb_fsdesc = pcb->pcb_gsdesc = zero_u32desc; 8817c478bd9Sstevel@tonic-gate #elif defined(__i386) 8827c478bd9Sstevel@tonic-gate pcb->pcb_fsdesc = pcb->pcb_gsdesc = zero_udesc; 8837c478bd9Sstevel@tonic-gate #endif /* __i386 */ 8847c478bd9Sstevel@tonic-gate lwp_installctx(lwp); 8857c478bd9Sstevel@tonic-gate return (stk); 8867c478bd9Sstevel@tonic-gate } 8877c478bd9Sstevel@tonic-gate 8887c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 8897c478bd9Sstevel@tonic-gate void 8907c478bd9Sstevel@tonic-gate lwp_stk_fini(klwp_t *lwp) 8917c478bd9Sstevel@tonic-gate {} 8927c478bd9Sstevel@tonic-gate 8937c478bd9Sstevel@tonic-gate /* 894d90554ebSdmick * If we're not the panic CPU, we wait in panic_idle for reboot. 8957c478bd9Sstevel@tonic-gate */ 896f34a7178SJoe Bonasera void 8977c478bd9Sstevel@tonic-gate panic_idle(void) 8987c478bd9Sstevel@tonic-gate { 8997c478bd9Sstevel@tonic-gate splx(ipltospl(CLOCK_LEVEL)); 9007c478bd9Sstevel@tonic-gate (void) setjmp(&curthread->t_pcb); 9017c478bd9Sstevel@tonic-gate 902ca3e8d88SDave Plauger dumpsys_helper(); 903ca3e8d88SDave Plauger 904f34a7178SJoe Bonasera #ifndef __xpv 905f34a7178SJoe Bonasera for (;;) 906f34a7178SJoe Bonasera i86_halt(); 907f34a7178SJoe Bonasera #else 908ae115bc7Smrj for (;;) 909ae115bc7Smrj ; 910f34a7178SJoe Bonasera #endif 9117c478bd9Sstevel@tonic-gate } 9127c478bd9Sstevel@tonic-gate 9137c478bd9Sstevel@tonic-gate /* 9147c478bd9Sstevel@tonic-gate * Stop the other CPUs by cross-calling them and forcing them to enter 9157c478bd9Sstevel@tonic-gate * the panic_idle() loop above. 9167c478bd9Sstevel@tonic-gate */ 9177c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 9187c478bd9Sstevel@tonic-gate void 9197c478bd9Sstevel@tonic-gate panic_stopcpus(cpu_t *cp, kthread_t *t, int spl) 9207c478bd9Sstevel@tonic-gate { 9217c478bd9Sstevel@tonic-gate processorid_t i; 9227c478bd9Sstevel@tonic-gate cpuset_t xcset; 9237c478bd9Sstevel@tonic-gate 924843e1988Sjohnlev /* 925843e1988Sjohnlev * In the case of a Xen panic, the hypervisor has already stopped 926843e1988Sjohnlev * all of the CPUs. 927843e1988Sjohnlev */ 928843e1988Sjohnlev if (!IN_XPV_PANIC()) { 9297c478bd9Sstevel@tonic-gate (void) splzs(); 9307c478bd9Sstevel@tonic-gate 9317c478bd9Sstevel@tonic-gate CPUSET_ALL_BUT(xcset, cp->cpu_id); 932f34a7178SJoe Bonasera xc_priority(0, 0, 0, CPUSET2BV(xcset), (xc_func_t)panic_idle); 933843e1988Sjohnlev } 9347c478bd9Sstevel@tonic-gate 9357c478bd9Sstevel@tonic-gate for (i = 0; i < NCPU; i++) { 9367c478bd9Sstevel@tonic-gate if (i != cp->cpu_id && cpu[i] != NULL && 9377c478bd9Sstevel@tonic-gate (cpu[i]->cpu_flags & CPU_EXISTS)) 9387c478bd9Sstevel@tonic-gate cpu[i]->cpu_flags |= CPU_QUIESCED; 9397c478bd9Sstevel@tonic-gate } 9407c478bd9Sstevel@tonic-gate } 9417c478bd9Sstevel@tonic-gate 9427c478bd9Sstevel@tonic-gate /* 9437c478bd9Sstevel@tonic-gate * Platform callback following each entry to panicsys(). 9447c478bd9Sstevel@tonic-gate */ 9457c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 9467c478bd9Sstevel@tonic-gate void 9477c478bd9Sstevel@tonic-gate panic_enter_hw(int spl) 9487c478bd9Sstevel@tonic-gate { 9497c478bd9Sstevel@tonic-gate /* Nothing to do here */ 9507c478bd9Sstevel@tonic-gate } 9517c478bd9Sstevel@tonic-gate 9527c478bd9Sstevel@tonic-gate /* 9537c478bd9Sstevel@tonic-gate * Platform-specific code to execute after panicstr is set: we invoke 9547c478bd9Sstevel@tonic-gate * the PSM entry point to indicate that a panic has occurred. 9557c478bd9Sstevel@tonic-gate */ 9567c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 9577c478bd9Sstevel@tonic-gate void 9587c478bd9Sstevel@tonic-gate panic_quiesce_hw(panic_data_t *pdp) 9597c478bd9Sstevel@tonic-gate { 9607c478bd9Sstevel@tonic-gate psm_notifyf(PSM_PANIC_ENTER); 9617c478bd9Sstevel@tonic-gate 962e4b86885SCheng Sean Ye cmi_panic_callback(); 963e4b86885SCheng Sean Ye 9647c478bd9Sstevel@tonic-gate #ifdef TRAPTRACE 9657c478bd9Sstevel@tonic-gate /* 9667c478bd9Sstevel@tonic-gate * Turn off TRAPTRACE 9677c478bd9Sstevel@tonic-gate */ 9687c478bd9Sstevel@tonic-gate TRAPTRACE_FREEZE; 9697c478bd9Sstevel@tonic-gate #endif /* TRAPTRACE */ 9707c478bd9Sstevel@tonic-gate } 9717c478bd9Sstevel@tonic-gate 9727c478bd9Sstevel@tonic-gate /* 9737c478bd9Sstevel@tonic-gate * Platform callback prior to writing crash dump. 9747c478bd9Sstevel@tonic-gate */ 9757c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 9767c478bd9Sstevel@tonic-gate void 9777c478bd9Sstevel@tonic-gate panic_dump_hw(int spl) 9787c478bd9Sstevel@tonic-gate { 9797c478bd9Sstevel@tonic-gate /* Nothing to do here */ 9807c478bd9Sstevel@tonic-gate } 9817c478bd9Sstevel@tonic-gate 982843e1988Sjohnlev void * 983843e1988Sjohnlev plat_traceback(void *fpreg) 984843e1988Sjohnlev { 985843e1988Sjohnlev #ifdef __xpv 986843e1988Sjohnlev if (IN_XPV_PANIC()) 987843e1988Sjohnlev return (xpv_traceback(fpreg)); 988843e1988Sjohnlev #endif 989843e1988Sjohnlev return (fpreg); 990843e1988Sjohnlev } 991843e1988Sjohnlev 9927c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 9937c478bd9Sstevel@tonic-gate void 9947c478bd9Sstevel@tonic-gate plat_tod_fault(enum tod_fault_type tod_bad) 995ae115bc7Smrj {} 9967c478bd9Sstevel@tonic-gate 9977c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 9987c478bd9Sstevel@tonic-gate int 9997c478bd9Sstevel@tonic-gate blacklist(int cmd, const char *scheme, nvlist_t *fmri, const char *class) 10007c478bd9Sstevel@tonic-gate { 10017c478bd9Sstevel@tonic-gate return (ENOTSUP); 10027c478bd9Sstevel@tonic-gate } 10037c478bd9Sstevel@tonic-gate 10047c478bd9Sstevel@tonic-gate /* 10057c478bd9Sstevel@tonic-gate * The underlying console output routines are protected by raising IPL in case 10067c478bd9Sstevel@tonic-gate * we are still calling into the early boot services. Once we start calling 10077c478bd9Sstevel@tonic-gate * the kernel console emulator, it will disable interrupts completely during 10087c478bd9Sstevel@tonic-gate * character rendering (see sysp_putchar, for example). Refer to the comments 10097c478bd9Sstevel@tonic-gate * and code in common/os/console.c for more information on these callbacks. 10107c478bd9Sstevel@tonic-gate */ 10117c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 10127c478bd9Sstevel@tonic-gate int 10137c478bd9Sstevel@tonic-gate console_enter(int busy) 10147c478bd9Sstevel@tonic-gate { 10157c478bd9Sstevel@tonic-gate return (splzs()); 10167c478bd9Sstevel@tonic-gate } 10177c478bd9Sstevel@tonic-gate 10187c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 10197c478bd9Sstevel@tonic-gate void 10207c478bd9Sstevel@tonic-gate console_exit(int busy, int spl) 10217c478bd9Sstevel@tonic-gate { 10227c478bd9Sstevel@tonic-gate splx(spl); 10237c478bd9Sstevel@tonic-gate } 10247c478bd9Sstevel@tonic-gate 10257c478bd9Sstevel@tonic-gate /* 10267c478bd9Sstevel@tonic-gate * Allocate a region of virtual address space, unmapped. 10277c478bd9Sstevel@tonic-gate * Stubbed out except on sparc, at least for now. 10287c478bd9Sstevel@tonic-gate */ 10297c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 10307c478bd9Sstevel@tonic-gate void * 10317c478bd9Sstevel@tonic-gate boot_virt_alloc(void *addr, size_t size) 10327c478bd9Sstevel@tonic-gate { 10337c478bd9Sstevel@tonic-gate return (addr); 10347c478bd9Sstevel@tonic-gate } 10357c478bd9Sstevel@tonic-gate 10367c478bd9Sstevel@tonic-gate volatile unsigned long tenmicrodata; 10377c478bd9Sstevel@tonic-gate 10387c478bd9Sstevel@tonic-gate void 10397c478bd9Sstevel@tonic-gate tenmicrosec(void) 10407c478bd9Sstevel@tonic-gate { 1041843e1988Sjohnlev extern int gethrtime_hires; 10427c478bd9Sstevel@tonic-gate 1043843e1988Sjohnlev if (gethrtime_hires) { 10447c478bd9Sstevel@tonic-gate hrtime_t start, end; 10457c478bd9Sstevel@tonic-gate start = end = gethrtime(); 10467c478bd9Sstevel@tonic-gate while ((end - start) < (10 * (NANOSEC / MICROSEC))) { 10477c478bd9Sstevel@tonic-gate SMT_PAUSE(); 10487c478bd9Sstevel@tonic-gate end = gethrtime(); 10497c478bd9Sstevel@tonic-gate } 10507c478bd9Sstevel@tonic-gate } else { 1051843e1988Sjohnlev #if defined(__xpv) 1052843e1988Sjohnlev hrtime_t newtime; 1053843e1988Sjohnlev 1054843e1988Sjohnlev newtime = xpv_gethrtime() + 10000; /* now + 10 us */ 1055843e1988Sjohnlev while (xpv_gethrtime() < newtime) 1056843e1988Sjohnlev SMT_PAUSE(); 1057843e1988Sjohnlev #else /* __xpv */ 1058ae115bc7Smrj int i; 1059ae115bc7Smrj 10607c478bd9Sstevel@tonic-gate /* 10617c478bd9Sstevel@tonic-gate * Artificial loop to induce delay. 10627c478bd9Sstevel@tonic-gate */ 10637c478bd9Sstevel@tonic-gate for (i = 0; i < microdata; i++) 10647c478bd9Sstevel@tonic-gate tenmicrodata = microdata; 1065843e1988Sjohnlev #endif /* __xpv */ 10667c478bd9Sstevel@tonic-gate } 10677c478bd9Sstevel@tonic-gate } 1068eda89462Sesolom 1069eda89462Sesolom /* 1070eda89462Sesolom * get_cpu_mstate() is passed an array of timestamps, NCMSTATES 1071eda89462Sesolom * long, and it fills in the array with the time spent on cpu in 1072eda89462Sesolom * each of the mstates, where time is returned in nsec. 1073eda89462Sesolom * 1074eda89462Sesolom * No guarantee is made that the returned values in times[] will 1075eda89462Sesolom * monotonically increase on sequential calls, although this will 1076eda89462Sesolom * be true in the long run. Any such guarantee must be handled by 1077eda89462Sesolom * the caller, if needed. This can happen if we fail to account 1078eda89462Sesolom * for elapsed time due to a generation counter conflict, yet we 1079eda89462Sesolom * did account for it on a prior call (see below). 1080eda89462Sesolom * 1081eda89462Sesolom * The complication is that the cpu in question may be updating 1082eda89462Sesolom * its microstate at the same time that we are reading it. 1083eda89462Sesolom * Because the microstate is only updated when the CPU's state 1084eda89462Sesolom * changes, the values in cpu_intracct[] can be indefinitely out 1085eda89462Sesolom * of date. To determine true current values, it is necessary to 1086eda89462Sesolom * compare the current time with cpu_mstate_start, and add the 1087eda89462Sesolom * difference to times[cpu_mstate]. 1088eda89462Sesolom * 1089eda89462Sesolom * This can be a problem if those values are changing out from 1090eda89462Sesolom * under us. Because the code path in new_cpu_mstate() is 1091eda89462Sesolom * performance critical, we have not added a lock to it. Instead, 1092eda89462Sesolom * we have added a generation counter. Before beginning 1093eda89462Sesolom * modifications, the counter is set to 0. After modifications, 1094eda89462Sesolom * it is set to the old value plus one. 1095eda89462Sesolom * 1096eda89462Sesolom * get_cpu_mstate() will not consider the values of cpu_mstate 1097eda89462Sesolom * and cpu_mstate_start to be usable unless the value of 1098eda89462Sesolom * cpu_mstate_gen is both non-zero and unchanged, both before and 1099eda89462Sesolom * after reading the mstate information. Note that we must 1100eda89462Sesolom * protect against out-of-order loads around accesses to the 1101eda89462Sesolom * generation counter. Also, this is a best effort approach in 1102eda89462Sesolom * that we do not retry should the counter be found to have 1103eda89462Sesolom * changed. 1104eda89462Sesolom * 1105eda89462Sesolom * cpu_intracct[] is used to identify time spent in each CPU 1106eda89462Sesolom * mstate while handling interrupts. Such time should be reported 1107eda89462Sesolom * against system time, and so is subtracted out from its 1108eda89462Sesolom * corresponding cpu_acct[] time and added to 1109eda89462Sesolom * cpu_acct[CMS_SYSTEM]. 1110eda89462Sesolom */ 1111eda89462Sesolom 1112eda89462Sesolom void 1113eda89462Sesolom get_cpu_mstate(cpu_t *cpu, hrtime_t *times) 1114eda89462Sesolom { 1115eda89462Sesolom int i; 1116eda89462Sesolom hrtime_t now, start; 1117eda89462Sesolom uint16_t gen; 1118eda89462Sesolom uint16_t state; 1119eda89462Sesolom hrtime_t intracct[NCMSTATES]; 1120eda89462Sesolom 1121eda89462Sesolom /* 1122eda89462Sesolom * Load all volatile state under the protection of membar. 1123eda89462Sesolom * cpu_acct[cpu_mstate] must be loaded to avoid double counting 1124eda89462Sesolom * of (now - cpu_mstate_start) by a change in CPU mstate that 1125eda89462Sesolom * arrives after we make our last check of cpu_mstate_gen. 1126eda89462Sesolom */ 1127eda89462Sesolom 1128eda89462Sesolom now = gethrtime_unscaled(); 1129eda89462Sesolom gen = cpu->cpu_mstate_gen; 1130eda89462Sesolom 1131eda89462Sesolom membar_consumer(); /* guarantee load ordering */ 1132eda89462Sesolom start = cpu->cpu_mstate_start; 1133eda89462Sesolom state = cpu->cpu_mstate; 1134eda89462Sesolom for (i = 0; i < NCMSTATES; i++) { 1135eda89462Sesolom intracct[i] = cpu->cpu_intracct[i]; 1136eda89462Sesolom times[i] = cpu->cpu_acct[i]; 1137eda89462Sesolom } 1138eda89462Sesolom membar_consumer(); /* guarantee load ordering */ 1139eda89462Sesolom 1140eda89462Sesolom if (gen != 0 && gen == cpu->cpu_mstate_gen && now > start) 1141eda89462Sesolom times[state] += now - start; 1142eda89462Sesolom 1143eda89462Sesolom for (i = 0; i < NCMSTATES; i++) { 1144eda89462Sesolom if (i == CMS_SYSTEM) 1145eda89462Sesolom continue; 1146eda89462Sesolom times[i] -= intracct[i]; 1147eda89462Sesolom if (times[i] < 0) { 1148eda89462Sesolom intracct[i] += times[i]; 1149eda89462Sesolom times[i] = 0; 1150eda89462Sesolom } 1151eda89462Sesolom times[CMS_SYSTEM] += intracct[i]; 1152eda89462Sesolom scalehrtime(×[i]); 1153eda89462Sesolom } 1154eda89462Sesolom scalehrtime(×[CMS_SYSTEM]); 1155eda89462Sesolom } 1156ae115bc7Smrj 1157ae115bc7Smrj /* 1158ae115bc7Smrj * This is a version of the rdmsr instruction that allows 1159ae115bc7Smrj * an error code to be returned in the case of failure. 1160ae115bc7Smrj */ 1161ae115bc7Smrj int 1162ae115bc7Smrj checked_rdmsr(uint_t msr, uint64_t *value) 1163ae115bc7Smrj { 11647417cfdeSKuriakose Kuruvilla if (!is_x86_feature(x86_featureset, X86FSET_MSR)) 1165ae115bc7Smrj return (ENOTSUP); 1166ae115bc7Smrj *value = rdmsr(msr); 1167ae115bc7Smrj return (0); 1168ae115bc7Smrj } 1169ae115bc7Smrj 1170ae115bc7Smrj /* 1171ae115bc7Smrj * This is a version of the wrmsr instruction that allows 1172ae115bc7Smrj * an error code to be returned in the case of failure. 1173ae115bc7Smrj */ 1174ae115bc7Smrj int 1175ae115bc7Smrj checked_wrmsr(uint_t msr, uint64_t value) 1176ae115bc7Smrj { 11777417cfdeSKuriakose Kuruvilla if (!is_x86_feature(x86_featureset, X86FSET_MSR)) 1178ae115bc7Smrj return (ENOTSUP); 1179ae115bc7Smrj wrmsr(msr, value); 1180ae115bc7Smrj return (0); 1181ae115bc7Smrj } 1182ae115bc7Smrj 1183ae115bc7Smrj /* 1184843e1988Sjohnlev * The mem driver's usual method of using hat_devload() to establish a 1185843e1988Sjohnlev * temporary mapping will not work for foreign pages mapped into this 1186843e1988Sjohnlev * domain or for the special hypervisor-provided pages. For the foreign 1187843e1988Sjohnlev * pages, we often don't know which domain owns them, so we can't ask the 1188843e1988Sjohnlev * hypervisor to set up a new mapping. For the other pages, we don't have 1189843e1988Sjohnlev * a pfn, so we can't create a new PTE. For these special cases, we do a 1190843e1988Sjohnlev * direct uiomove() from the existing kernel virtual address. 1191ae115bc7Smrj */ 1192ae115bc7Smrj /*ARGSUSED*/ 1193ae115bc7Smrj int 1194843e1988Sjohnlev plat_mem_do_mmio(struct uio *uio, enum uio_rw rw) 1195ae115bc7Smrj { 1196843e1988Sjohnlev #if defined(__xpv) 1197843e1988Sjohnlev void *va = (void *)(uintptr_t)uio->uio_loffset; 1198843e1988Sjohnlev off_t pageoff = uio->uio_loffset & PAGEOFFSET; 1199843e1988Sjohnlev size_t nbytes = MIN((size_t)(PAGESIZE - pageoff), 1200843e1988Sjohnlev (size_t)uio->uio_iov->iov_len); 1201843e1988Sjohnlev 1202843e1988Sjohnlev if ((rw == UIO_READ && 1203843e1988Sjohnlev (va == HYPERVISOR_shared_info || va == xen_info)) || 1204843e1988Sjohnlev (pfn_is_foreign(hat_getpfnum(kas.a_hat, va)))) 1205843e1988Sjohnlev return (uiomove(va, nbytes, rw, uio)); 1206843e1988Sjohnlev #endif 1207843e1988Sjohnlev return (ENOTSUP); 1208843e1988Sjohnlev } 1209843e1988Sjohnlev 1210843e1988Sjohnlev pgcnt_t 1211843e1988Sjohnlev num_phys_pages() 1212843e1988Sjohnlev { 1213843e1988Sjohnlev pgcnt_t npages = 0; 1214843e1988Sjohnlev struct memlist *mp; 1215843e1988Sjohnlev 1216843e1988Sjohnlev #if defined(__xpv) 1217349b53ddSStuart Maybee if (DOMAIN_IS_INITDOMAIN(xen_info)) 1218349b53ddSStuart Maybee return (xpv_nr_phys_pages()); 1219843e1988Sjohnlev #endif /* __xpv */ 1220843e1988Sjohnlev 122156f33205SJonathan Adams for (mp = phys_install; mp != NULL; mp = mp->ml_next) 122256f33205SJonathan Adams npages += mp->ml_size >> PAGESHIFT; 1223843e1988Sjohnlev 1224843e1988Sjohnlev return (npages); 1225ae115bc7Smrj } 1226ae115bc7Smrj 1227ca3e8d88SDave Plauger /* cpu threshold for compressed dumps */ 1228ca3e8d88SDave Plauger #ifdef _LP64 12294cca9c84SDave Plauger uint_t dump_plat_mincpu_default = DUMP_PLAT_X86_64_MINCPU; 1230ca3e8d88SDave Plauger #else 12314cca9c84SDave Plauger uint_t dump_plat_mincpu_default = DUMP_PLAT_X86_32_MINCPU; 1232ca3e8d88SDave Plauger #endif 1233ca3e8d88SDave Plauger 1234ae115bc7Smrj int 1235ae115bc7Smrj dump_plat_addr() 1236ae115bc7Smrj { 1237843e1988Sjohnlev #ifdef __xpv 1238843e1988Sjohnlev pfn_t pfn = mmu_btop(xen_info->shared_info) | PFN_IS_FOREIGN_MFN; 1239843e1988Sjohnlev mem_vtop_t mem_vtop; 1240843e1988Sjohnlev int cnt; 1241843e1988Sjohnlev 1242843e1988Sjohnlev /* 1243843e1988Sjohnlev * On the hypervisor, we want to dump the page with shared_info on it. 1244843e1988Sjohnlev */ 1245843e1988Sjohnlev if (!IN_XPV_PANIC()) { 1246843e1988Sjohnlev mem_vtop.m_as = &kas; 1247843e1988Sjohnlev mem_vtop.m_va = HYPERVISOR_shared_info; 1248843e1988Sjohnlev mem_vtop.m_pfn = pfn; 1249843e1988Sjohnlev dumpvp_write(&mem_vtop, sizeof (mem_vtop_t)); 1250843e1988Sjohnlev cnt = 1; 1251843e1988Sjohnlev } else { 1252843e1988Sjohnlev cnt = dump_xpv_addr(); 1253843e1988Sjohnlev } 1254843e1988Sjohnlev return (cnt); 1255843e1988Sjohnlev #else 1256ae115bc7Smrj return (0); 1257843e1988Sjohnlev #endif 1258ae115bc7Smrj } 1259ae115bc7Smrj 1260ae115bc7Smrj void 1261ae115bc7Smrj dump_plat_pfn() 1262ae115bc7Smrj { 1263843e1988Sjohnlev #ifdef __xpv 1264843e1988Sjohnlev pfn_t pfn = mmu_btop(xen_info->shared_info) | PFN_IS_FOREIGN_MFN; 1265843e1988Sjohnlev 1266843e1988Sjohnlev if (!IN_XPV_PANIC()) 1267843e1988Sjohnlev dumpvp_write(&pfn, sizeof (pfn)); 1268843e1988Sjohnlev else 1269843e1988Sjohnlev dump_xpv_pfn(); 1270843e1988Sjohnlev #endif 1271ae115bc7Smrj } 1272ae115bc7Smrj 1273ae115bc7Smrj /*ARGSUSED*/ 1274ae115bc7Smrj int 1275ae115bc7Smrj dump_plat_data(void *dump_cbuf) 1276ae115bc7Smrj { 1277843e1988Sjohnlev #ifdef __xpv 1278843e1988Sjohnlev uint32_t csize; 1279843e1988Sjohnlev int cnt; 1280843e1988Sjohnlev 1281843e1988Sjohnlev if (!IN_XPV_PANIC()) { 1282843e1988Sjohnlev csize = (uint32_t)compress(HYPERVISOR_shared_info, dump_cbuf, 1283843e1988Sjohnlev PAGESIZE); 1284843e1988Sjohnlev dumpvp_write(&csize, sizeof (uint32_t)); 1285843e1988Sjohnlev dumpvp_write(dump_cbuf, csize); 1286843e1988Sjohnlev cnt = 1; 1287843e1988Sjohnlev } else { 1288843e1988Sjohnlev cnt = dump_xpv_data(dump_cbuf); 1289843e1988Sjohnlev } 1290843e1988Sjohnlev return (cnt); 1291843e1988Sjohnlev #else 1292ae115bc7Smrj return (0); 1293843e1988Sjohnlev #endif 1294ae115bc7Smrj } 1295ddece0baSsethg 1296ddece0baSsethg /* 1297ddece0baSsethg * Calculates a linear address, given the CS selector and PC values 1298ddece0baSsethg * by looking up the %cs selector process's LDT or the CPU's GDT. 1299ddece0baSsethg * proc->p_ldtlock must be held across this call. 1300ddece0baSsethg */ 1301ddece0baSsethg int 1302ddece0baSsethg linear_pc(struct regs *rp, proc_t *p, caddr_t *linearp) 1303ddece0baSsethg { 1304ddece0baSsethg user_desc_t *descrp; 1305ddece0baSsethg caddr_t baseaddr; 1306ddece0baSsethg uint16_t idx = SELTOIDX(rp->r_cs); 1307ddece0baSsethg 1308ddece0baSsethg ASSERT(rp->r_cs <= 0xFFFF); 1309ddece0baSsethg ASSERT(MUTEX_HELD(&p->p_ldtlock)); 1310ddece0baSsethg 1311ddece0baSsethg if (SELISLDT(rp->r_cs)) { 1312ddece0baSsethg /* 1313ddece0baSsethg * Currently 64 bit processes cannot have private LDTs. 1314ddece0baSsethg */ 1315ddece0baSsethg ASSERT(p->p_model != DATAMODEL_LP64); 1316ddece0baSsethg 1317ddece0baSsethg if (p->p_ldt == NULL) 1318ddece0baSsethg return (-1); 1319ddece0baSsethg 1320ddece0baSsethg descrp = &p->p_ldt[idx]; 1321ddece0baSsethg baseaddr = (caddr_t)(uintptr_t)USEGD_GETBASE(descrp); 1322ddece0baSsethg 1323ddece0baSsethg /* 1324ddece0baSsethg * Calculate the linear address (wraparound is not only ok, 1325ddece0baSsethg * it's expected behavior). The cast to uint32_t is because 1326ddece0baSsethg * LDT selectors are only allowed in 32-bit processes. 1327ddece0baSsethg */ 1328ddece0baSsethg *linearp = (caddr_t)(uintptr_t)(uint32_t)((uintptr_t)baseaddr + 1329ddece0baSsethg rp->r_pc); 1330ddece0baSsethg } else { 1331ddece0baSsethg #ifdef DEBUG 1332ddece0baSsethg descrp = &CPU->cpu_gdt[idx]; 1333ddece0baSsethg baseaddr = (caddr_t)(uintptr_t)USEGD_GETBASE(descrp); 1334ddece0baSsethg /* GDT-based descriptors' base addresses should always be 0 */ 1335ddece0baSsethg ASSERT(baseaddr == 0); 1336ddece0baSsethg #endif 1337ddece0baSsethg *linearp = (caddr_t)(uintptr_t)rp->r_pc; 1338ddece0baSsethg } 1339ddece0baSsethg 1340ddece0baSsethg return (0); 1341ddece0baSsethg } 1342ddece0baSsethg 1343ddece0baSsethg /* 1344ddece0baSsethg * The implementation of dtrace_linear_pc is similar to the that of 1345ddece0baSsethg * linear_pc, above, but here we acquire p_ldtlock before accessing 1346ddece0baSsethg * p_ldt. This implementation is used by the pid provider; we prefix 1347ddece0baSsethg * it with "dtrace_" to avoid inducing spurious tracing events. 1348ddece0baSsethg */ 1349ddece0baSsethg int 1350ddece0baSsethg dtrace_linear_pc(struct regs *rp, proc_t *p, caddr_t *linearp) 1351ddece0baSsethg { 1352ddece0baSsethg user_desc_t *descrp; 1353ddece0baSsethg caddr_t baseaddr; 1354ddece0baSsethg uint16_t idx = SELTOIDX(rp->r_cs); 1355ddece0baSsethg 1356ddece0baSsethg ASSERT(rp->r_cs <= 0xFFFF); 1357ddece0baSsethg 1358ddece0baSsethg if (SELISLDT(rp->r_cs)) { 1359ddece0baSsethg /* 1360ddece0baSsethg * Currently 64 bit processes cannot have private LDTs. 1361ddece0baSsethg */ 1362ddece0baSsethg ASSERT(p->p_model != DATAMODEL_LP64); 1363ddece0baSsethg 1364ddece0baSsethg mutex_enter(&p->p_ldtlock); 1365ddece0baSsethg if (p->p_ldt == NULL) { 1366ddece0baSsethg mutex_exit(&p->p_ldtlock); 1367ddece0baSsethg return (-1); 1368ddece0baSsethg } 1369ddece0baSsethg descrp = &p->p_ldt[idx]; 1370ddece0baSsethg baseaddr = (caddr_t)(uintptr_t)USEGD_GETBASE(descrp); 1371ddece0baSsethg mutex_exit(&p->p_ldtlock); 1372ddece0baSsethg 1373ddece0baSsethg /* 1374ddece0baSsethg * Calculate the linear address (wraparound is not only ok, 1375ddece0baSsethg * it's expected behavior). The cast to uint32_t is because 1376ddece0baSsethg * LDT selectors are only allowed in 32-bit processes. 1377ddece0baSsethg */ 1378ddece0baSsethg *linearp = (caddr_t)(uintptr_t)(uint32_t)((uintptr_t)baseaddr + 1379ddece0baSsethg rp->r_pc); 1380ddece0baSsethg } else { 1381ddece0baSsethg #ifdef DEBUG 1382ddece0baSsethg descrp = &CPU->cpu_gdt[idx]; 1383ddece0baSsethg baseaddr = (caddr_t)(uintptr_t)USEGD_GETBASE(descrp); 1384ddece0baSsethg /* GDT-based descriptors' base addresses should always be 0 */ 1385ddece0baSsethg ASSERT(baseaddr == 0); 1386ddece0baSsethg #endif 1387ddece0baSsethg *linearp = (caddr_t)(uintptr_t)rp->r_pc; 1388ddece0baSsethg } 1389ddece0baSsethg 1390ddece0baSsethg return (0); 1391ddece0baSsethg } 1392d3d50737SRafael Vanoni 1393d3d50737SRafael Vanoni /* 1394d3d50737SRafael Vanoni * We need to post a soft interrupt to reprogram the lbolt cyclic when 1395d3d50737SRafael Vanoni * switching from event to cyclic driven lbolt. The following code adds 1396d3d50737SRafael Vanoni * and posts the softint for x86. 1397d3d50737SRafael Vanoni */ 1398d3d50737SRafael Vanoni static ddi_softint_hdl_impl_t lbolt_softint_hdl = 1399d3d50737SRafael Vanoni {0, NULL, NULL, NULL, 0, NULL, NULL, NULL}; 1400d3d50737SRafael Vanoni 1401d3d50737SRafael Vanoni void 1402d3d50737SRafael Vanoni lbolt_softint_add(void) 1403d3d50737SRafael Vanoni { 1404d3d50737SRafael Vanoni (void) add_avsoftintr((void *)&lbolt_softint_hdl, LOCK_LEVEL, 1405d3d50737SRafael Vanoni (avfunc)lbolt_ev_to_cyclic, "lbolt_ev_to_cyclic", NULL, NULL); 1406d3d50737SRafael Vanoni } 1407d3d50737SRafael Vanoni 1408d3d50737SRafael Vanoni void 1409d3d50737SRafael Vanoni lbolt_softint_post(void) 1410d3d50737SRafael Vanoni { 1411d3d50737SRafael Vanoni (*setsoftint)(CBE_LOCK_PIL, lbolt_softint_hdl.ih_pending); 1412d3d50737SRafael Vanoni } 1413a3114836SGerry Liu 1414a3114836SGerry Liu boolean_t 1415a3114836SGerry Liu plat_dr_check_capability(uint64_t features) 1416a3114836SGerry Liu { 1417a3114836SGerry Liu return ((plat_dr_options & features) == features); 1418a3114836SGerry Liu } 1419a3114836SGerry Liu 1420a3114836SGerry Liu boolean_t 1421a3114836SGerry Liu plat_dr_support_cpu(void) 1422a3114836SGerry Liu { 1423a3114836SGerry Liu return (plat_dr_options & PLAT_DR_FEATURE_CPU); 1424a3114836SGerry Liu } 1425a3114836SGerry Liu 1426a3114836SGerry Liu boolean_t 1427a3114836SGerry Liu plat_dr_support_memory(void) 1428a3114836SGerry Liu { 1429a3114836SGerry Liu return (plat_dr_options & PLAT_DR_FEATURE_MEMORY); 1430a3114836SGerry Liu } 1431a3114836SGerry Liu 1432a3114836SGerry Liu void 1433a3114836SGerry Liu plat_dr_enable_capability(uint64_t features) 1434a3114836SGerry Liu { 1435a3114836SGerry Liu atomic_or_64(&plat_dr_options, features); 1436a3114836SGerry Liu } 1437a3114836SGerry Liu 1438a3114836SGerry Liu void 1439a3114836SGerry Liu plat_dr_disable_capability(uint64_t features) 1440a3114836SGerry Liu { 1441a3114836SGerry Liu atomic_and_64(&plat_dr_options, ~features); 1442a3114836SGerry Liu } 1443