xref: /titanic_52/usr/src/uts/i86pc/os/cpr_impl.c (revision 58865bb7f764a6ca11f3057bee77153724ebb239)
12df1fe9cSrandyf /*
22df1fe9cSrandyf  * CDDL HEADER START
32df1fe9cSrandyf  *
42df1fe9cSrandyf  * The contents of this file are subject to the terms of the
52df1fe9cSrandyf  * Common Development and Distribution License (the "License").
62df1fe9cSrandyf  * You may not use this file except in compliance with the License.
72df1fe9cSrandyf  *
82df1fe9cSrandyf  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
92df1fe9cSrandyf  * or http://www.opensolaris.org/os/licensing.
102df1fe9cSrandyf  * See the License for the specific language governing permissions
112df1fe9cSrandyf  * and limitations under the License.
122df1fe9cSrandyf  *
132df1fe9cSrandyf  * When distributing Covered Code, include this CDDL HEADER in each
142df1fe9cSrandyf  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
152df1fe9cSrandyf  * If applicable, add the following below this CDDL HEADER, with the
162df1fe9cSrandyf  * fields enclosed by brackets "[]" replaced with your own identifying
172df1fe9cSrandyf  * information: Portions Copyright [yyyy] [name of copyright owner]
182df1fe9cSrandyf  *
192df1fe9cSrandyf  * CDDL HEADER END
202df1fe9cSrandyf  */
212df1fe9cSrandyf /*
227417cfdeSKuriakose Kuruvilla  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
232df1fe9cSrandyf  */
242df1fe9cSrandyf 
252df1fe9cSrandyf /*
262df1fe9cSrandyf  * Platform specific implementation code
272df1fe9cSrandyf  * Currently only suspend to RAM is supported (ACPI S3)
282df1fe9cSrandyf  */
292df1fe9cSrandyf 
302df1fe9cSrandyf #define	SUNDDI_IMPL
312df1fe9cSrandyf 
322df1fe9cSrandyf #include <sys/types.h>
332df1fe9cSrandyf #include <sys/promif.h>
342df1fe9cSrandyf #include <sys/prom_isa.h>
352df1fe9cSrandyf #include <sys/prom_plat.h>
362df1fe9cSrandyf #include <sys/cpuvar.h>
372df1fe9cSrandyf #include <sys/pte.h>
382df1fe9cSrandyf #include <vm/hat.h>
392df1fe9cSrandyf #include <vm/page.h>
402df1fe9cSrandyf #include <vm/as.h>
412df1fe9cSrandyf #include <sys/cpr.h>
422df1fe9cSrandyf #include <sys/kmem.h>
432df1fe9cSrandyf #include <sys/clock.h>
442df1fe9cSrandyf #include <sys/kmem.h>
452df1fe9cSrandyf #include <sys/panic.h>
462df1fe9cSrandyf #include <vm/seg_kmem.h>
472df1fe9cSrandyf #include <sys/cpu_module.h>
482df1fe9cSrandyf #include <sys/callb.h>
492df1fe9cSrandyf #include <sys/machsystm.h>
502df1fe9cSrandyf #include <sys/vmsystm.h>
512df1fe9cSrandyf #include <sys/systm.h>
522df1fe9cSrandyf #include <sys/archsystm.h>
532df1fe9cSrandyf #include <sys/stack.h>
542df1fe9cSrandyf #include <sys/fs/ufs_fs.h>
552df1fe9cSrandyf #include <sys/memlist.h>
562df1fe9cSrandyf #include <sys/bootconf.h>
572df1fe9cSrandyf #include <sys/thread.h>
582df1fe9cSrandyf #include <sys/x_call.h>
592df1fe9cSrandyf #include <sys/smp_impldefs.h>
602df1fe9cSrandyf #include <vm/vm_dep.h>
612df1fe9cSrandyf #include <sys/psm.h>
622df1fe9cSrandyf #include <sys/epm.h>
632df1fe9cSrandyf #include <sys/cpr_wakecode.h>
642df1fe9cSrandyf #include <sys/x86_archext.h>
652df1fe9cSrandyf #include <sys/reboot.h>
662df1fe9cSrandyf #include <sys/acpi/acpi.h>
672df1fe9cSrandyf #include <sys/acpica.h>
687af88ac7SKuriakose Kuruvilla #include <sys/fp.h>
697008f154SRichard Lowe #include <sys/sysmacros.h>
702df1fe9cSrandyf 
712df1fe9cSrandyf #define	AFMT	"%lx"
722df1fe9cSrandyf 
732df1fe9cSrandyf extern int	flushes_require_xcalls;
742df1fe9cSrandyf extern cpuset_t	cpu_ready_set;
752df1fe9cSrandyf 
762df1fe9cSrandyf #if defined(__amd64)
772df1fe9cSrandyf extern void	*wc_long_mode_64(void);
782df1fe9cSrandyf #endif	/* __amd64 */
792df1fe9cSrandyf extern int	tsc_gethrtime_enable;
802df1fe9cSrandyf extern	void	i_cpr_start_cpu(void);
812df1fe9cSrandyf 
822df1fe9cSrandyf ushort_t	cpr_mach_type = CPR_MACHTYPE_X86;
832df1fe9cSrandyf void		(*cpr_start_cpu_func)(void) = i_cpr_start_cpu;
842df1fe9cSrandyf 
852df1fe9cSrandyf static wc_cpu_t	*wc_other_cpus = NULL;
86a563a037Sbholler static cpuset_t procset;
872df1fe9cSrandyf 
882df1fe9cSrandyf static void
892df1fe9cSrandyf init_real_mode_platter(int cpun, uint32_t offset, uint_t cr4, wc_desctbr_t gdt);
902df1fe9cSrandyf 
912df1fe9cSrandyf static int i_cpr_platform_alloc(psm_state_request_t *req);
922df1fe9cSrandyf static void i_cpr_platform_free(psm_state_request_t *req);
932df1fe9cSrandyf static int i_cpr_save_apic(psm_state_request_t *req);
942df1fe9cSrandyf static int i_cpr_restore_apic(psm_state_request_t *req);
954716fd88Sjan static int wait_for_set(cpuset_t *set, int who);
962df1fe9cSrandyf 
973d995820SJoseph A Townsend static	void i_cpr_save_stack(kthread_t *t, wc_cpu_t *wc_cpu);
983d995820SJoseph A Townsend void i_cpr_restore_stack(kthread_t *t, greg_t *save_stack);
993d995820SJoseph A Townsend 
1003d995820SJoseph A Townsend #ifdef STACK_GROWTH_DOWN
1013d995820SJoseph A Townsend #define	CPR_GET_STACK_START(t) ((t)->t_stkbase)
1023d995820SJoseph A Townsend #define	CPR_GET_STACK_END(t) ((t)->t_stk)
1033d995820SJoseph A Townsend #else
1043d995820SJoseph A Townsend #define	CPR_GET_STACK_START(t) ((t)->t_stk)
1053d995820SJoseph A Townsend #define	CPR_GET_STACK_END(t) ((t)->t_stkbase)
1063d995820SJoseph A Townsend #endif	/* STACK_GROWTH_DOWN */
1073d995820SJoseph A Townsend 
1082df1fe9cSrandyf /*
1092df1fe9cSrandyf  * restart paused slave cpus
1102df1fe9cSrandyf  */
1112df1fe9cSrandyf void
1122df1fe9cSrandyf i_cpr_machdep_setup(void)
1132df1fe9cSrandyf {
1142df1fe9cSrandyf 	if (ncpus > 1) {
1152df1fe9cSrandyf 		CPR_DEBUG(CPR_DEBUG1, ("MP restarted...\n"));
1162df1fe9cSrandyf 		mutex_enter(&cpu_lock);
1172df1fe9cSrandyf 		start_cpus();
1182df1fe9cSrandyf 		mutex_exit(&cpu_lock);
1192df1fe9cSrandyf 	}
1202df1fe9cSrandyf }
1212df1fe9cSrandyf 
1222df1fe9cSrandyf 
1232df1fe9cSrandyf /*
1242df1fe9cSrandyf  * Stop all interrupt activities in the system
1252df1fe9cSrandyf  */
1262df1fe9cSrandyf void
1272df1fe9cSrandyf i_cpr_stop_intr(void)
1282df1fe9cSrandyf {
1292df1fe9cSrandyf 	(void) spl7();
1302df1fe9cSrandyf }
1312df1fe9cSrandyf 
1322df1fe9cSrandyf /*
1332df1fe9cSrandyf  * Set machine up to take interrupts
1342df1fe9cSrandyf  */
1352df1fe9cSrandyf void
1362df1fe9cSrandyf i_cpr_enable_intr(void)
1372df1fe9cSrandyf {
1382df1fe9cSrandyf 	(void) spl0();
1392df1fe9cSrandyf }
1402df1fe9cSrandyf 
1412df1fe9cSrandyf /*
1422df1fe9cSrandyf  * Save miscellaneous information which needs to be written to the
1432df1fe9cSrandyf  * state file.  This information is required to re-initialize
1442df1fe9cSrandyf  * kernel/prom handshaking.
1452df1fe9cSrandyf  */
1462df1fe9cSrandyf void
1472df1fe9cSrandyf i_cpr_save_machdep_info(void)
1482df1fe9cSrandyf {
1492df1fe9cSrandyf 	int notcalled = 0;
1502df1fe9cSrandyf 	ASSERT(notcalled);
1512df1fe9cSrandyf }
1522df1fe9cSrandyf 
1532df1fe9cSrandyf 
1542df1fe9cSrandyf void
1552df1fe9cSrandyf i_cpr_set_tbr(void)
1562df1fe9cSrandyf {
1572df1fe9cSrandyf }
1582df1fe9cSrandyf 
1592df1fe9cSrandyf 
1602df1fe9cSrandyf processorid_t
1612df1fe9cSrandyf i_cpr_bootcpuid(void)
1622df1fe9cSrandyf {
1632df1fe9cSrandyf 	return (0);
1642df1fe9cSrandyf }
1652df1fe9cSrandyf 
1662df1fe9cSrandyf /*
1672df1fe9cSrandyf  * cpu0 should contain bootcpu info
1682df1fe9cSrandyf  */
1692df1fe9cSrandyf cpu_t *
1702df1fe9cSrandyf i_cpr_bootcpu(void)
1712df1fe9cSrandyf {
1722df1fe9cSrandyf 	ASSERT(MUTEX_HELD(&cpu_lock));
1732df1fe9cSrandyf 
1742df1fe9cSrandyf 	return (cpu_get(i_cpr_bootcpuid()));
1752df1fe9cSrandyf }
1762df1fe9cSrandyf 
1772df1fe9cSrandyf /*
1782df1fe9cSrandyf  *	Save context for the specified CPU
1792df1fe9cSrandyf  */
1802df1fe9cSrandyf void *
1812df1fe9cSrandyf i_cpr_save_context(void *arg)
1822df1fe9cSrandyf {
1832df1fe9cSrandyf 	long	index = (long)arg;
1842df1fe9cSrandyf 	psm_state_request_t *papic_state;
1852df1fe9cSrandyf 	int resuming;
1862df1fe9cSrandyf 	int	ret;
1873d995820SJoseph A Townsend 	wc_cpu_t	*wc_cpu = wc_other_cpus + index;
1882df1fe9cSrandyf 
1892df1fe9cSrandyf 	PMD(PMD_SX, ("i_cpr_save_context() index = %ld\n", index))
1902df1fe9cSrandyf 
1912df1fe9cSrandyf 	ASSERT(index < NCPU);
1922df1fe9cSrandyf 
1933d995820SJoseph A Townsend 	papic_state = &(wc_cpu)->wc_apic_state;
1942df1fe9cSrandyf 
1952df1fe9cSrandyf 	ret = i_cpr_platform_alloc(papic_state);
1962df1fe9cSrandyf 	ASSERT(ret == 0);
1972df1fe9cSrandyf 
1982df1fe9cSrandyf 	ret = i_cpr_save_apic(papic_state);
1992df1fe9cSrandyf 	ASSERT(ret == 0);
2002df1fe9cSrandyf 
2013d995820SJoseph A Townsend 	i_cpr_save_stack(curthread, wc_cpu);
2023d995820SJoseph A Townsend 
2032df1fe9cSrandyf 	/*
2042df1fe9cSrandyf 	 * wc_save_context returns twice, once when susending and
2052df1fe9cSrandyf 	 * once when resuming,  wc_save_context() returns 0 when
2062df1fe9cSrandyf 	 * suspending and non-zero upon resume
2072df1fe9cSrandyf 	 */
2083d995820SJoseph A Townsend 	resuming = (wc_save_context(wc_cpu) == 0);
2092df1fe9cSrandyf 
2102df1fe9cSrandyf 	/*
2112df1fe9cSrandyf 	 * do NOT call any functions after this point, because doing so
2122df1fe9cSrandyf 	 * will modify the stack that we are running on
2132df1fe9cSrandyf 	 */
2142df1fe9cSrandyf 
2152df1fe9cSrandyf 	if (resuming) {
2162df1fe9cSrandyf 
2172df1fe9cSrandyf 		ret = i_cpr_restore_apic(papic_state);
2182df1fe9cSrandyf 		ASSERT(ret == 0);
2192df1fe9cSrandyf 
2202df1fe9cSrandyf 		i_cpr_platform_free(papic_state);
2212df1fe9cSrandyf 
2222df1fe9cSrandyf 		/*
223643e2e74Sbholler 		 * Enable interrupts on this cpu.
224643e2e74Sbholler 		 * Do not bind interrupts to this CPU's local APIC until
22555d507a9SSeth Goldberg 		 * the CPU is ready to receive interrupts.
226643e2e74Sbholler 		 */
227643e2e74Sbholler 		ASSERT(CPU->cpu_id != i_cpr_bootcpuid());
228643e2e74Sbholler 		mutex_enter(&cpu_lock);
229643e2e74Sbholler 		cpu_enable_intr(CPU);
230643e2e74Sbholler 		mutex_exit(&cpu_lock);
231643e2e74Sbholler 
232643e2e74Sbholler 		/*
2332df1fe9cSrandyf 		 * Setting the bit in cpu_ready_set must be the last operation
2342df1fe9cSrandyf 		 * in processor initialization; the boot CPU will continue to
2352df1fe9cSrandyf 		 * boot once it sees this bit set for all active CPUs.
2362df1fe9cSrandyf 		 */
2372df1fe9cSrandyf 		CPUSET_ATOMIC_ADD(cpu_ready_set, CPU->cpu_id);
2382df1fe9cSrandyf 
2392df1fe9cSrandyf 		PMD(PMD_SX,
240a563a037Sbholler 		    ("i_cpr_save_context() resuming cpu %d in cpu_ready_set\n",
241a563a037Sbholler 		    CPU->cpu_id))
24255d507a9SSeth Goldberg 	} else {
24355d507a9SSeth Goldberg 		/*
24455d507a9SSeth Goldberg 		 * Disable interrupts on this CPU so that PSM knows not to bind
24555d507a9SSeth Goldberg 		 * interrupts here on resume until the CPU has executed
24655d507a9SSeth Goldberg 		 * cpu_enable_intr() (above) in the resume path.
24755d507a9SSeth Goldberg 		 * We explicitly do not grab cpu_lock here because at this point
24855d507a9SSeth Goldberg 		 * in the suspend process, the boot cpu owns cpu_lock and all
24955d507a9SSeth Goldberg 		 * other cpus are also executing in the pause thread (only
25055d507a9SSeth Goldberg 		 * modifying their respective CPU structure).
25155d507a9SSeth Goldberg 		 */
25255d507a9SSeth Goldberg 		(void) cpu_disable_intr(CPU);
2532df1fe9cSrandyf 	}
25455d507a9SSeth Goldberg 
25555d507a9SSeth Goldberg 	PMD(PMD_SX, ("i_cpr_save_context: wc_save_context returns %d\n",
25655d507a9SSeth Goldberg 	    resuming))
25755d507a9SSeth Goldberg 
2582df1fe9cSrandyf 	return (NULL);
2592df1fe9cSrandyf }
2602df1fe9cSrandyf 
2612df1fe9cSrandyf static ushort_t *warm_reset_vector = NULL;
2622df1fe9cSrandyf 
2632df1fe9cSrandyf static ushort_t *
2642df1fe9cSrandyf map_warm_reset_vector()
2652df1fe9cSrandyf {
2662df1fe9cSrandyf 	/*LINTED*/
2672df1fe9cSrandyf 	if (!(warm_reset_vector = (ushort_t *)psm_map_phys(WARM_RESET_VECTOR,
2682df1fe9cSrandyf 	    sizeof (ushort_t *), PROT_READ|PROT_WRITE)))
2692df1fe9cSrandyf 		return (NULL);
2702df1fe9cSrandyf 
2712df1fe9cSrandyf 	/*
2722df1fe9cSrandyf 	 * setup secondary cpu bios boot up vector
2732df1fe9cSrandyf 	 */
2742df1fe9cSrandyf 	*warm_reset_vector = (ushort_t)((caddr_t)
2752df1fe9cSrandyf 	    /*LINTED*/
2762df1fe9cSrandyf 	    ((struct rm_platter *)rm_platter_va)->rm_code - rm_platter_va
2772df1fe9cSrandyf 	    + ((ulong_t)rm_platter_va & 0xf));
2782df1fe9cSrandyf 	warm_reset_vector++;
2792df1fe9cSrandyf 	*warm_reset_vector = (ushort_t)(rm_platter_pa >> 4);
2802df1fe9cSrandyf 
2812df1fe9cSrandyf 	--warm_reset_vector;
2822df1fe9cSrandyf 	return (warm_reset_vector);
2832df1fe9cSrandyf }
2842df1fe9cSrandyf 
2852df1fe9cSrandyf void
2862df1fe9cSrandyf i_cpr_pre_resume_cpus()
2872df1fe9cSrandyf {
2882df1fe9cSrandyf 	/*
2892df1fe9cSrandyf 	 * this is a cut down version of start_other_cpus()
2902df1fe9cSrandyf 	 * just do the initialization to wake the other cpus
2912df1fe9cSrandyf 	 */
2922df1fe9cSrandyf 	unsigned who;
2934716fd88Sjan 	int boot_cpuid = i_cpr_bootcpuid();
2942df1fe9cSrandyf 	uint32_t		code_length = 0;
2952df1fe9cSrandyf 	caddr_t			wakevirt = rm_platter_va;
2962df1fe9cSrandyf 	/*LINTED*/
2972df1fe9cSrandyf 	wakecode_t		*wp = (wakecode_t *)wakevirt;
2982df1fe9cSrandyf 	char *str = "i_cpr_pre_resume_cpus";
2992df1fe9cSrandyf 	extern int get_tsc_ready();
3002df1fe9cSrandyf 	int err;
3012df1fe9cSrandyf 
3022df1fe9cSrandyf 	/*LINTED*/
3032df1fe9cSrandyf 	rm_platter_t *real_mode_platter = (rm_platter_t *)rm_platter_va;
3042df1fe9cSrandyf 
3052df1fe9cSrandyf 	/*
30655d507a9SSeth Goldberg 	 * If startup wasn't able to find a page under 1M, we cannot
30755d507a9SSeth Goldberg 	 * proceed.
30855d507a9SSeth Goldberg 	 */
30955d507a9SSeth Goldberg 	if (rm_platter_va == 0) {
31055d507a9SSeth Goldberg 		cmn_err(CE_WARN, "Cannot suspend the system because no "
31155d507a9SSeth Goldberg 		    "memory below 1M could be found for processor startup");
31255d507a9SSeth Goldberg 		return;
31355d507a9SSeth Goldberg 	}
31455d507a9SSeth Goldberg 
31555d507a9SSeth Goldberg 	/*
3162df1fe9cSrandyf 	 * Copy the real mode code at "real_mode_start" to the
3172df1fe9cSrandyf 	 * page at rm_platter_va.
3182df1fe9cSrandyf 	 */
3192df1fe9cSrandyf 	warm_reset_vector = map_warm_reset_vector();
3202df1fe9cSrandyf 	if (warm_reset_vector == NULL) {
3212df1fe9cSrandyf 		PMD(PMD_SX, ("i_cpr_pre_resume_cpus() returning #2\n"))
3222df1fe9cSrandyf 		return;
3232df1fe9cSrandyf 	}
3242df1fe9cSrandyf 
3252df1fe9cSrandyf 	flushes_require_xcalls = 1;
3262df1fe9cSrandyf 
3272df1fe9cSrandyf 	/*
3282df1fe9cSrandyf 	 * We lock our affinity to the master CPU to ensure that all slave CPUs
3292df1fe9cSrandyf 	 * do their TSC syncs with the same CPU.
3302df1fe9cSrandyf 	 */
3312df1fe9cSrandyf 
3322df1fe9cSrandyf 	affinity_set(CPU_CURRENT);
3332df1fe9cSrandyf 
3344716fd88Sjan 	/*
335a563a037Sbholler 	 * Mark the boot cpu as being ready and in the procset, since we are
336a563a037Sbholler 	 * running on that cpu.
3374716fd88Sjan 	 */
3384716fd88Sjan 	CPUSET_ONLY(cpu_ready_set, boot_cpuid);
339a563a037Sbholler 	CPUSET_ONLY(procset, boot_cpuid);
3402df1fe9cSrandyf 
341a3114836SGerry Liu 	for (who = 0; who < max_ncpus; who++) {
3422df1fe9cSrandyf 
3432df1fe9cSrandyf 		wc_cpu_t	*cpup = wc_other_cpus + who;
3442df1fe9cSrandyf 		wc_desctbr_t	gdt;
3452df1fe9cSrandyf 
3464716fd88Sjan 		if (who == boot_cpuid)
3472df1fe9cSrandyf 			continue;
3482df1fe9cSrandyf 
3492df1fe9cSrandyf 		if (!CPU_IN_SET(mp_cpus, who))
3502df1fe9cSrandyf 			continue;
3512df1fe9cSrandyf 
3522df1fe9cSrandyf 		PMD(PMD_SX, ("%s() waking up %d cpu\n", str, who))
3532df1fe9cSrandyf 
3542df1fe9cSrandyf 		bcopy(cpup, &(wp->wc_cpu), sizeof (wc_cpu_t));
3552df1fe9cSrandyf 
3562df1fe9cSrandyf 		gdt.base = cpup->wc_gdt_base;
3572df1fe9cSrandyf 		gdt.limit = cpup->wc_gdt_limit;
3582df1fe9cSrandyf 
3592df1fe9cSrandyf #if defined(__amd64)
3607008f154SRichard Lowe 		code_length = (uint32_t)((uintptr_t)wc_long_mode_64 -
3617008f154SRichard Lowe 		    (uintptr_t)wc_rm_start);
3622df1fe9cSrandyf #else
3632df1fe9cSrandyf 		code_length = 0;
3642df1fe9cSrandyf #endif
3652df1fe9cSrandyf 
3662df1fe9cSrandyf 		init_real_mode_platter(who, code_length, cpup->wc_cr4, gdt);
3672df1fe9cSrandyf 
368a3114836SGerry Liu 		mutex_enter(&cpu_lock);
369a3114836SGerry Liu 		err = mach_cpuid_start(who, rm_platter_va);
370a3114836SGerry Liu 		mutex_exit(&cpu_lock);
371a3114836SGerry Liu 		if (err != 0) {
3722df1fe9cSrandyf 			cmn_err(CE_WARN, "cpu%d: failed to start during "
3732df1fe9cSrandyf 			    "suspend/resume error %d", who, err);
3742df1fe9cSrandyf 			continue;
3752df1fe9cSrandyf 		}
3762df1fe9cSrandyf 
377a563a037Sbholler 		PMD(PMD_SX, ("%s() #1 waiting for %d in procset\n", str, who))
3782df1fe9cSrandyf 
3794716fd88Sjan 		if (!wait_for_set(&procset, who))
3804716fd88Sjan 			continue;
3812df1fe9cSrandyf 
3822df1fe9cSrandyf 		PMD(PMD_SX, ("%s() %d cpu started\n", str, who))
3832df1fe9cSrandyf 
3844716fd88Sjan 		PMD(PMD_SX, ("%s() tsc_ready = %d\n", str, get_tsc_ready()))
3852df1fe9cSrandyf 
3862df1fe9cSrandyf 		if (tsc_gethrtime_enable) {
3872df1fe9cSrandyf 			PMD(PMD_SX, ("%s() calling tsc_sync_master\n", str))
3882df1fe9cSrandyf 			tsc_sync_master(who);
3892df1fe9cSrandyf 		}
3902df1fe9cSrandyf 
391a563a037Sbholler 		PMD(PMD_SX, ("%s() waiting for %d in cpu_ready_set\n", str,
392a563a037Sbholler 		    who))
3932df1fe9cSrandyf 		/*
3942df1fe9cSrandyf 		 * Wait for cpu to declare that it is ready, we want the
3952df1fe9cSrandyf 		 * cpus to start serially instead of in parallel, so that
3962df1fe9cSrandyf 		 * they do not contend with each other in wc_rm_start()
3972df1fe9cSrandyf 		 */
3984716fd88Sjan 		if (!wait_for_set(&cpu_ready_set, who))
3994716fd88Sjan 			continue;
4002df1fe9cSrandyf 
4012df1fe9cSrandyf 		/*
4022df1fe9cSrandyf 		 * do not need to re-initialize dtrace using dtrace_cpu_init
4032df1fe9cSrandyf 		 * function
4042df1fe9cSrandyf 		 */
4052df1fe9cSrandyf 		PMD(PMD_SX, ("%s() cpu %d now ready\n", str, who))
4062df1fe9cSrandyf 	}
4072df1fe9cSrandyf 
4082df1fe9cSrandyf 	affinity_clear();
4092df1fe9cSrandyf 
4102df1fe9cSrandyf 	PMD(PMD_SX, ("%s() all cpus now ready\n", str))
4114716fd88Sjan 
4122df1fe9cSrandyf }
4132df1fe9cSrandyf 
4142df1fe9cSrandyf static void
4152df1fe9cSrandyf unmap_warm_reset_vector(ushort_t *warm_reset_vector)
4162df1fe9cSrandyf {
4172df1fe9cSrandyf 	psm_unmap_phys((caddr_t)warm_reset_vector, sizeof (ushort_t *));
4182df1fe9cSrandyf }
4192df1fe9cSrandyf 
4202df1fe9cSrandyf /*
4212df1fe9cSrandyf  * We need to setup a 1:1 (virtual to physical) mapping for the
4222df1fe9cSrandyf  * page containing the wakeup code.
4232df1fe9cSrandyf  */
4242df1fe9cSrandyf static struct as *save_as;	/* when switching to kas */
4252df1fe9cSrandyf 
4262df1fe9cSrandyf static void
4272df1fe9cSrandyf unmap_wakeaddr_1to1(uint64_t wakephys)
4282df1fe9cSrandyf {
4292df1fe9cSrandyf 	uintptr_t	wp = (uintptr_t)wakephys;
4302df1fe9cSrandyf 	hat_setup(save_as->a_hat, 0);	/* switch back from kernel hat */
4312df1fe9cSrandyf 	hat_unload(kas.a_hat, (caddr_t)wp, PAGESIZE, HAT_UNLOAD);
4322df1fe9cSrandyf }
4332df1fe9cSrandyf 
4342df1fe9cSrandyf void
4352df1fe9cSrandyf i_cpr_post_resume_cpus()
4362df1fe9cSrandyf {
4372df1fe9cSrandyf 	uint64_t	wakephys = rm_platter_pa;
4382df1fe9cSrandyf 
4392df1fe9cSrandyf 	if (warm_reset_vector != NULL)
4402df1fe9cSrandyf 		unmap_warm_reset_vector(warm_reset_vector);
4412df1fe9cSrandyf 
4422df1fe9cSrandyf 	hat_unload(kas.a_hat, (caddr_t)(uintptr_t)rm_platter_pa, MMU_PAGESIZE,
4432df1fe9cSrandyf 	    HAT_UNLOAD);
4442df1fe9cSrandyf 
4452df1fe9cSrandyf 	/*
4462df1fe9cSrandyf 	 * cmi_post_mpstartup() is only required upon boot not upon
4472df1fe9cSrandyf 	 * resume from RAM
4482df1fe9cSrandyf 	 */
4492df1fe9cSrandyf 
4502df1fe9cSrandyf 	PT(PT_UNDO1to1);
4512df1fe9cSrandyf 	/* Tear down 1:1 mapping for wakeup code */
4522df1fe9cSrandyf 	unmap_wakeaddr_1to1(wakephys);
4532df1fe9cSrandyf }
4542df1fe9cSrandyf 
4552df1fe9cSrandyf /* ARGSUSED */
4562df1fe9cSrandyf void
4572df1fe9cSrandyf i_cpr_handle_xc(int flag)
4582df1fe9cSrandyf {
4592df1fe9cSrandyf }
4602df1fe9cSrandyf 
4612df1fe9cSrandyf int
4622df1fe9cSrandyf i_cpr_reusable_supported(void)
4632df1fe9cSrandyf {
4642df1fe9cSrandyf 	return (0);
4652df1fe9cSrandyf }
4662df1fe9cSrandyf static void
4672df1fe9cSrandyf map_wakeaddr_1to1(uint64_t wakephys)
4682df1fe9cSrandyf {
4692df1fe9cSrandyf 	uintptr_t	wp = (uintptr_t)wakephys;
4702df1fe9cSrandyf 	hat_devload(kas.a_hat, (caddr_t)wp, PAGESIZE, btop(wakephys),
4712df1fe9cSrandyf 	    (PROT_READ|PROT_WRITE|PROT_EXEC|HAT_STORECACHING_OK|HAT_NOSYNC),
4722df1fe9cSrandyf 	    HAT_LOAD);
4732df1fe9cSrandyf 	save_as = curthread->t_procp->p_as;
4742df1fe9cSrandyf 	hat_setup(kas.a_hat, 0);	/* switch to kernel-only hat */
4752df1fe9cSrandyf }
4762df1fe9cSrandyf 
4772df1fe9cSrandyf 
4782df1fe9cSrandyf void
4792df1fe9cSrandyf prt_other_cpus()
4802df1fe9cSrandyf {
4812df1fe9cSrandyf 	int	who;
4822df1fe9cSrandyf 
4832df1fe9cSrandyf 	if (ncpus == 1) {
4842df1fe9cSrandyf 		PMD(PMD_SX, ("prt_other_cpus() other cpu table empty for "
4852df1fe9cSrandyf 		    "uniprocessor machine\n"))
4862df1fe9cSrandyf 		return;
4872df1fe9cSrandyf 	}
4882df1fe9cSrandyf 
489a3114836SGerry Liu 	for (who = 0; who < max_ncpus; who++) {
4902df1fe9cSrandyf 
4912df1fe9cSrandyf 		wc_cpu_t	*cpup = wc_other_cpus + who;
4922df1fe9cSrandyf 
493a3114836SGerry Liu 		if (!CPU_IN_SET(mp_cpus, who))
494a3114836SGerry Liu 			continue;
495a3114836SGerry Liu 
4962df1fe9cSrandyf 		PMD(PMD_SX, ("prt_other_cpus() who = %d, gdt=%p:%x, "
4972df1fe9cSrandyf 		    "idt=%p:%x, ldt=%lx, tr=%lx, kgsbase="
4982df1fe9cSrandyf 		    AFMT ", sp=%lx\n", who,
4992df1fe9cSrandyf 		    (void *)cpup->wc_gdt_base, cpup->wc_gdt_limit,
5002df1fe9cSrandyf 		    (void *)cpup->wc_idt_base, cpup->wc_idt_limit,
5012df1fe9cSrandyf 		    (long)cpup->wc_ldt, (long)cpup->wc_tr,
5022df1fe9cSrandyf 		    (long)cpup->wc_kgsbase, (long)cpup->wc_rsp))
5032df1fe9cSrandyf 	}
5042df1fe9cSrandyf }
5052df1fe9cSrandyf 
5062df1fe9cSrandyf /*
5072df1fe9cSrandyf  * Power down the system.
5082df1fe9cSrandyf  */
5092df1fe9cSrandyf int
5102df1fe9cSrandyf i_cpr_power_down(int sleeptype)
5112df1fe9cSrandyf {
5122df1fe9cSrandyf 	caddr_t		wakevirt = rm_platter_va;
5132df1fe9cSrandyf 	uint64_t	wakephys = rm_platter_pa;
514a563a037Sbholler 	ulong_t		saved_intr;
5152df1fe9cSrandyf 	uint32_t	code_length = 0;
5162df1fe9cSrandyf 	wc_desctbr_t	gdt;
5172df1fe9cSrandyf 	/*LINTED*/
5182df1fe9cSrandyf 	wakecode_t	*wp = (wakecode_t *)wakevirt;
5192df1fe9cSrandyf 	/*LINTED*/
5202df1fe9cSrandyf 	rm_platter_t	*wcpp = (rm_platter_t *)wakevirt;
5212df1fe9cSrandyf 	wc_cpu_t	*cpup = &(wp->wc_cpu);
5222df1fe9cSrandyf 	dev_info_t	*ppm;
5232df1fe9cSrandyf 	int		ret = 0;
5242df1fe9cSrandyf 	power_req_t	power_req;
5252df1fe9cSrandyf 	char *str =	"i_cpr_power_down";
5262df1fe9cSrandyf #if defined(__amd64)
5272df1fe9cSrandyf 	/*LINTED*/
5282df1fe9cSrandyf 	rm_platter_t *real_mode_platter = (rm_platter_t *)rm_platter_va;
5292df1fe9cSrandyf #endif
5302df1fe9cSrandyf 	extern int	cpr_suspend_succeeded;
5312df1fe9cSrandyf 	extern void	kernel_wc_code();
5322df1fe9cSrandyf 
5332df1fe9cSrandyf 	ASSERT(sleeptype == CPR_TORAM);
5342df1fe9cSrandyf 	ASSERT(CPU->cpu_id == 0);
5352df1fe9cSrandyf 
5362df1fe9cSrandyf 	if ((ppm = PPM(ddi_root_node())) == NULL) {
5372df1fe9cSrandyf 		PMD(PMD_SX, ("%s: root node not claimed\n", str))
5382df1fe9cSrandyf 		return (ENOTTY);
5392df1fe9cSrandyf 	}
5402df1fe9cSrandyf 
5412df1fe9cSrandyf 	PMD(PMD_SX, ("Entering %s()\n", str))
5422df1fe9cSrandyf 
5432df1fe9cSrandyf 	PT(PT_IC);
5442df1fe9cSrandyf 	saved_intr = intr_clear();
5452df1fe9cSrandyf 
5462df1fe9cSrandyf 	PT(PT_1to1);
5472df1fe9cSrandyf 	/* Setup 1:1 mapping for wakeup code */
5482df1fe9cSrandyf 	map_wakeaddr_1to1(wakephys);
5492df1fe9cSrandyf 
5502df1fe9cSrandyf 	PMD(PMD_SX, ("ncpus=%d\n", ncpus))
5512df1fe9cSrandyf 
5522df1fe9cSrandyf 	PMD(PMD_SX, ("wc_rm_end - wc_rm_start=%lx WC_CODESIZE=%x\n",
5537008f154SRichard Lowe 	    ((size_t)((uintptr_t)wc_rm_end - (uintptr_t)wc_rm_start)),
5547008f154SRichard Lowe 	    WC_CODESIZE))
5552df1fe9cSrandyf 
5562df1fe9cSrandyf 	PMD(PMD_SX, ("wakevirt=%p, wakephys=%x\n",
5572df1fe9cSrandyf 	    (void *)wakevirt, (uint_t)wakephys))
5582df1fe9cSrandyf 
5597008f154SRichard Lowe 	ASSERT(((size_t)((uintptr_t)wc_rm_end - (uintptr_t)wc_rm_start)) <
5602df1fe9cSrandyf 	    WC_CODESIZE);
5612df1fe9cSrandyf 
5622df1fe9cSrandyf 	bzero(wakevirt, PAGESIZE);
5632df1fe9cSrandyf 
5642df1fe9cSrandyf 	/* Copy code to rm_platter */
5652df1fe9cSrandyf 	bcopy((caddr_t)wc_rm_start, wakevirt,
5667008f154SRichard Lowe 	    (size_t)((uintptr_t)wc_rm_end - (uintptr_t)wc_rm_start));
5672df1fe9cSrandyf 
5682df1fe9cSrandyf 	prt_other_cpus();
5692df1fe9cSrandyf 
5702df1fe9cSrandyf #if defined(__amd64)
5712df1fe9cSrandyf 
5722df1fe9cSrandyf 	PMD(PMD_SX, ("real_mode_platter->rm_cr4=%lx, getcr4()=%lx\n",
5732df1fe9cSrandyf 	    (ulong_t)real_mode_platter->rm_cr4, (ulong_t)getcr4()))
5747008f154SRichard Lowe 
5752df1fe9cSrandyf 	PMD(PMD_SX, ("real_mode_platter->rm_pdbr=%lx, getcr3()=%lx\n",
5762df1fe9cSrandyf 	    (ulong_t)real_mode_platter->rm_pdbr, getcr3()))
5772df1fe9cSrandyf 
5782df1fe9cSrandyf 	real_mode_platter->rm_cr4 = getcr4();
5792df1fe9cSrandyf 	real_mode_platter->rm_pdbr = getcr3();
5802df1fe9cSrandyf 
5812df1fe9cSrandyf 	rmp_gdt_init(real_mode_platter);
5822df1fe9cSrandyf 
5832df1fe9cSrandyf 	/*
5842df1fe9cSrandyf 	 * Since the CPU needs to jump to protected mode using an identity
5852df1fe9cSrandyf 	 * mapped address, we need to calculate it here.
5862df1fe9cSrandyf 	 */
5872df1fe9cSrandyf 	real_mode_platter->rm_longmode64_addr = rm_platter_pa +
5887008f154SRichard Lowe 	    (uint32_t)((uintptr_t)wc_long_mode_64 - (uintptr_t)wc_rm_start);
5892df1fe9cSrandyf 
5902df1fe9cSrandyf 	PMD(PMD_SX, ("real_mode_platter->rm_cr4=%lx, getcr4()=%lx\n",
5912df1fe9cSrandyf 	    (ulong_t)real_mode_platter->rm_cr4, getcr4()))
5922df1fe9cSrandyf 	PMD(PMD_SX, ("real_mode_platter->rm_pdbr=%lx, getcr3()=%lx\n",
5932df1fe9cSrandyf 	    (ulong_t)real_mode_platter->rm_pdbr, getcr3()))
5942df1fe9cSrandyf 
5952df1fe9cSrandyf 	PMD(PMD_SX, ("real_mode_platter->rm_longmode64_addr=%lx\n",
5962df1fe9cSrandyf 	    (ulong_t)real_mode_platter->rm_longmode64_addr))
5972df1fe9cSrandyf 
5982df1fe9cSrandyf #endif
5992df1fe9cSrandyf 
6002df1fe9cSrandyf 	PT(PT_SC);
6012df1fe9cSrandyf 	if (wc_save_context(cpup)) {
6022df1fe9cSrandyf 
6032df1fe9cSrandyf 		ret = i_cpr_platform_alloc(&(wc_other_cpus->wc_apic_state));
6042df1fe9cSrandyf 		if (ret != 0)
6052df1fe9cSrandyf 			return (ret);
6062df1fe9cSrandyf 
6072df1fe9cSrandyf 		ret = i_cpr_save_apic(&(wc_other_cpus->wc_apic_state));
6082df1fe9cSrandyf 		PMD(PMD_SX, ("%s: i_cpr_save_apic() returned %d\n", str, ret))
6092df1fe9cSrandyf 		if (ret != 0)
6102df1fe9cSrandyf 			return (ret);
6112df1fe9cSrandyf 
6122df1fe9cSrandyf 		PMD(PMD_SX, ("wakephys=%x, kernel_wc_code=%p\n",
6132df1fe9cSrandyf 		    (uint_t)wakephys, (void *)&kernel_wc_code))
6142df1fe9cSrandyf 		PMD(PMD_SX, ("virtaddr=%lx, retaddr=%lx\n",
6152df1fe9cSrandyf 		    (long)cpup->wc_virtaddr, (long)cpup->wc_retaddr))
6162df1fe9cSrandyf 		PMD(PMD_SX, ("ebx=%x, edi=%x, esi=%x, ebp=%x, esp=%x\n",
6172df1fe9cSrandyf 		    cpup->wc_ebx, cpup->wc_edi, cpup->wc_esi, cpup->wc_ebp,
6182df1fe9cSrandyf 		    cpup->wc_esp))
6192df1fe9cSrandyf 		PMD(PMD_SX, ("cr0=%lx, cr3=%lx, cr4=%lx\n",
6202df1fe9cSrandyf 		    (long)cpup->wc_cr0, (long)cpup->wc_cr3,
6212df1fe9cSrandyf 		    (long)cpup->wc_cr4))
6222df1fe9cSrandyf 		PMD(PMD_SX, ("cs=%x, ds=%x, es=%x, ss=%x, fs=%lx, gs=%lx, "
6232df1fe9cSrandyf 		    "flgs=%lx\n", cpup->wc_cs, cpup->wc_ds, cpup->wc_es,
6242df1fe9cSrandyf 		    cpup->wc_ss, (long)cpup->wc_fs, (long)cpup->wc_gs,
6252df1fe9cSrandyf 		    (long)cpup->wc_eflags))
6262df1fe9cSrandyf 
6272df1fe9cSrandyf 		PMD(PMD_SX, ("gdt=%p:%x, idt=%p:%x, ldt=%lx, tr=%lx, "
6282df1fe9cSrandyf 		    "kgbase=%lx\n", (void *)cpup->wc_gdt_base,
6292df1fe9cSrandyf 		    cpup->wc_gdt_limit, (void *)cpup->wc_idt_base,
6302df1fe9cSrandyf 		    cpup->wc_idt_limit, (long)cpup->wc_ldt,
6312df1fe9cSrandyf 		    (long)cpup->wc_tr, (long)cpup->wc_kgsbase))
6322df1fe9cSrandyf 
6332df1fe9cSrandyf 		gdt.base = cpup->wc_gdt_base;
6342df1fe9cSrandyf 		gdt.limit = cpup->wc_gdt_limit;
6352df1fe9cSrandyf 
6362df1fe9cSrandyf #if defined(__amd64)
6377008f154SRichard Lowe 		code_length = (uint32_t)((uintptr_t)wc_long_mode_64 -
6387008f154SRichard Lowe 		    (uintptr_t)wc_rm_start);
6392df1fe9cSrandyf #else
6402df1fe9cSrandyf 		code_length = 0;
6412df1fe9cSrandyf #endif
6422df1fe9cSrandyf 
6432df1fe9cSrandyf 		init_real_mode_platter(0, code_length, cpup->wc_cr4, gdt);
6442df1fe9cSrandyf 
6452df1fe9cSrandyf #if defined(__amd64)
6462df1fe9cSrandyf 		PMD(PMD_SX, ("real_mode_platter->rm_cr4=%lx, getcr4()=%lx\n",
6472df1fe9cSrandyf 		    (ulong_t)wcpp->rm_cr4, getcr4()))
6482df1fe9cSrandyf 
6492df1fe9cSrandyf 		PMD(PMD_SX, ("real_mode_platter->rm_pdbr=%lx, getcr3()=%lx\n",
6502df1fe9cSrandyf 		    (ulong_t)wcpp->rm_pdbr, getcr3()))
6512df1fe9cSrandyf 
6522df1fe9cSrandyf 		PMD(PMD_SX, ("real_mode_platter->rm_longmode64_addr=%lx\n",
6532df1fe9cSrandyf 		    (ulong_t)wcpp->rm_longmode64_addr))
6542df1fe9cSrandyf 
6552df1fe9cSrandyf 		PMD(PMD_SX,
6562df1fe9cSrandyf 		    ("real_mode_platter->rm_temp_gdt[TEMPGDT_KCODE64]=%lx\n",
6572df1fe9cSrandyf 		    (ulong_t)wcpp->rm_temp_gdt[TEMPGDT_KCODE64]))
6582df1fe9cSrandyf #endif
6592df1fe9cSrandyf 
6602df1fe9cSrandyf 		PMD(PMD_SX, ("gdt=%p:%x, idt=%p:%x, ldt=%lx, tr=%lx, "
6612df1fe9cSrandyf 		    "kgsbase=%lx\n", (void *)wcpp->rm_gdt_base,
6622df1fe9cSrandyf 		    wcpp->rm_gdt_lim, (void *)wcpp->rm_idt_base,
6632df1fe9cSrandyf 		    wcpp->rm_idt_lim, (long)cpup->wc_ldt, (long)cpup->wc_tr,
6642df1fe9cSrandyf 		    (long)cpup->wc_kgsbase))
6652df1fe9cSrandyf 
6662df1fe9cSrandyf 		power_req.request_type = PMR_PPM_ENTER_SX;
6672df1fe9cSrandyf 		power_req.req.ppm_power_enter_sx_req.sx_state = S3;
6682df1fe9cSrandyf 		power_req.req.ppm_power_enter_sx_req.test_point =
6692df1fe9cSrandyf 		    cpr_test_point;
6702df1fe9cSrandyf 		power_req.req.ppm_power_enter_sx_req.wakephys = wakephys;
6712df1fe9cSrandyf 
6722df1fe9cSrandyf 		PMD(PMD_SX, ("%s: pm_ctlops PMR_PPM_ENTER_SX\n", str))
6732df1fe9cSrandyf 		PT(PT_PPMCTLOP);
6742df1fe9cSrandyf 		(void) pm_ctlops(ppm, ddi_root_node(), DDI_CTLOPS_POWER,
6752df1fe9cSrandyf 		    &power_req, &ret);
6762df1fe9cSrandyf 		PMD(PMD_SX, ("%s: returns %d\n", str, ret))
6772df1fe9cSrandyf 
6782df1fe9cSrandyf 		/*
6792df1fe9cSrandyf 		 * If it works, we get control back to the else branch below
6802df1fe9cSrandyf 		 * If we get control back here, it didn't work.
6812df1fe9cSrandyf 		 * XXX return EINVAL here?
6822df1fe9cSrandyf 		 */
6832df1fe9cSrandyf 
6842df1fe9cSrandyf 		unmap_wakeaddr_1to1(wakephys);
6852df1fe9cSrandyf 		intr_restore(saved_intr);
6862df1fe9cSrandyf 
6872df1fe9cSrandyf 		return (ret);
6882df1fe9cSrandyf 	} else {
6892df1fe9cSrandyf 		cpr_suspend_succeeded = 1;
6902df1fe9cSrandyf 
6912df1fe9cSrandyf 		power_req.request_type = PMR_PPM_EXIT_SX;
6922df1fe9cSrandyf 		power_req.req.ppm_power_enter_sx_req.sx_state = S3;
6932df1fe9cSrandyf 
6942df1fe9cSrandyf 		PMD(PMD_SX, ("%s: pm_ctlops PMR_PPM_EXIT_SX\n", str))
6952df1fe9cSrandyf 		PT(PT_PPMCTLOP);
6962df1fe9cSrandyf 		(void) pm_ctlops(ppm, ddi_root_node(), DDI_CTLOPS_POWER,
6972df1fe9cSrandyf 		    &power_req, &ret);
6982df1fe9cSrandyf 		PMD(PMD_SX, ("%s: returns %d\n", str, ret))
6992df1fe9cSrandyf 
7002df1fe9cSrandyf 		ret = i_cpr_restore_apic(&(wc_other_cpus->wc_apic_state));
7012df1fe9cSrandyf 		/*
7022df1fe9cSrandyf 		 * the restore should never fail, if the saved suceeded
7032df1fe9cSrandyf 		 */
7042df1fe9cSrandyf 		ASSERT(ret == 0);
7052df1fe9cSrandyf 
7062df1fe9cSrandyf 		i_cpr_platform_free(&(wc_other_cpus->wc_apic_state));
7072df1fe9cSrandyf 
708643e2e74Sbholler 		/*
709643e2e74Sbholler 		 * Enable interrupts on boot cpu.
710643e2e74Sbholler 		 */
711643e2e74Sbholler 		ASSERT(CPU->cpu_id == i_cpr_bootcpuid());
712643e2e74Sbholler 		mutex_enter(&cpu_lock);
713643e2e74Sbholler 		cpu_enable_intr(CPU);
714643e2e74Sbholler 		mutex_exit(&cpu_lock);
715643e2e74Sbholler 
7162df1fe9cSrandyf 		PT(PT_INTRRESTORE);
7172df1fe9cSrandyf 		intr_restore(saved_intr);
7182df1fe9cSrandyf 		PT(PT_CPU);
7192df1fe9cSrandyf 
7202df1fe9cSrandyf 		return (ret);
7212df1fe9cSrandyf 	}
7222df1fe9cSrandyf }
7232df1fe9cSrandyf 
7242df1fe9cSrandyf /*
7252df1fe9cSrandyf  * Stop all other cpu's before halting or rebooting. We pause the cpu's
7262df1fe9cSrandyf  * instead of sending a cross call.
7272df1fe9cSrandyf  * Stolen from sun4/os/mp_states.c
7282df1fe9cSrandyf  */
7292df1fe9cSrandyf 
7302df1fe9cSrandyf static int cpu_are_paused;	/* sic */
7312df1fe9cSrandyf 
7322df1fe9cSrandyf void
7332df1fe9cSrandyf i_cpr_stop_other_cpus(void)
7342df1fe9cSrandyf {
7352df1fe9cSrandyf 	mutex_enter(&cpu_lock);
7362df1fe9cSrandyf 	if (cpu_are_paused) {
7372df1fe9cSrandyf 		mutex_exit(&cpu_lock);
7382df1fe9cSrandyf 		return;
7392df1fe9cSrandyf 	}
740*0ed5c46eSJosef 'Jeff' Sipek 	pause_cpus(NULL, NULL);
7412df1fe9cSrandyf 	cpu_are_paused = 1;
7422df1fe9cSrandyf 
7432df1fe9cSrandyf 	mutex_exit(&cpu_lock);
7442df1fe9cSrandyf }
7452df1fe9cSrandyf 
7462df1fe9cSrandyf int
7472df1fe9cSrandyf i_cpr_is_supported(int sleeptype)
7482df1fe9cSrandyf {
7492df1fe9cSrandyf 	extern int cpr_supported_override;
7502df1fe9cSrandyf 	extern int cpr_platform_enable;
7512df1fe9cSrandyf 	extern int pm_S3_enabled;
7522df1fe9cSrandyf 
7532df1fe9cSrandyf 	if (sleeptype != CPR_TORAM)
7542df1fe9cSrandyf 		return (0);
7552df1fe9cSrandyf 
7562df1fe9cSrandyf 	/*
7572df1fe9cSrandyf 	 * The next statement tests if a specific platform has turned off
7582df1fe9cSrandyf 	 * cpr support.
7592df1fe9cSrandyf 	 */
7602df1fe9cSrandyf 	if (cpr_supported_override)
7612df1fe9cSrandyf 		return (0);
7622df1fe9cSrandyf 
7632df1fe9cSrandyf 	/*
7642df1fe9cSrandyf 	 * If a platform has specifically turned on cpr support ...
7652df1fe9cSrandyf 	 */
7662df1fe9cSrandyf 	if (cpr_platform_enable)
7672df1fe9cSrandyf 		return (1);
7682df1fe9cSrandyf 
7692df1fe9cSrandyf 	return (pm_S3_enabled);
7702df1fe9cSrandyf }
7712df1fe9cSrandyf 
7722df1fe9cSrandyf void
7732df1fe9cSrandyf i_cpr_bitmap_cleanup(void)
7742df1fe9cSrandyf {
7752df1fe9cSrandyf }
7762df1fe9cSrandyf 
7772df1fe9cSrandyf void
7782df1fe9cSrandyf i_cpr_free_memory_resources(void)
7792df1fe9cSrandyf {
7802df1fe9cSrandyf }
7812df1fe9cSrandyf 
7822df1fe9cSrandyf /*
7832df1fe9cSrandyf  * Needed only for S3 so far
7842df1fe9cSrandyf  */
7852df1fe9cSrandyf static int
7862df1fe9cSrandyf i_cpr_platform_alloc(psm_state_request_t *req)
7872df1fe9cSrandyf {
78855d507a9SSeth Goldberg #ifdef DEBUG
7892df1fe9cSrandyf 	char	*str = "i_cpr_platform_alloc";
79055d507a9SSeth Goldberg #endif
7912df1fe9cSrandyf 
7922df1fe9cSrandyf 	PMD(PMD_SX, ("cpu = %d, %s(%p) \n", CPU->cpu_id, str, (void *)req))
7932df1fe9cSrandyf 
79455d507a9SSeth Goldberg 	if (psm_state == NULL) {
79555d507a9SSeth Goldberg 		PMD(PMD_SX, ("%s() : psm_state == NULL\n", str))
7962df1fe9cSrandyf 		return (0);
7972df1fe9cSrandyf 	}
7982df1fe9cSrandyf 
7992df1fe9cSrandyf 	req->psr_cmd = PSM_STATE_ALLOC;
8002df1fe9cSrandyf 	return ((*psm_state)(req));
8012df1fe9cSrandyf }
8022df1fe9cSrandyf 
8032df1fe9cSrandyf /*
8042df1fe9cSrandyf  * Needed only for S3 so far
8052df1fe9cSrandyf  */
8062df1fe9cSrandyf static void
8072df1fe9cSrandyf i_cpr_platform_free(psm_state_request_t *req)
8082df1fe9cSrandyf {
80955d507a9SSeth Goldberg #ifdef DEBUG
8102df1fe9cSrandyf 	char	*str = "i_cpr_platform_free";
81155d507a9SSeth Goldberg #endif
8122df1fe9cSrandyf 
8132df1fe9cSrandyf 	PMD(PMD_SX, ("cpu = %d, %s(%p) \n", CPU->cpu_id, str, (void *)req))
8142df1fe9cSrandyf 
81555d507a9SSeth Goldberg 	if (psm_state == NULL) {
81655d507a9SSeth Goldberg 		PMD(PMD_SX, ("%s() : psm_state == NULL\n", str))
81755d507a9SSeth Goldberg 		return;
8182df1fe9cSrandyf 	}
8192df1fe9cSrandyf 
8202df1fe9cSrandyf 	req->psr_cmd = PSM_STATE_FREE;
8212df1fe9cSrandyf 	(void) (*psm_state)(req);
8222df1fe9cSrandyf }
8232df1fe9cSrandyf 
8242df1fe9cSrandyf static int
8252df1fe9cSrandyf i_cpr_save_apic(psm_state_request_t *req)
8262df1fe9cSrandyf {
82755d507a9SSeth Goldberg #ifdef DEBUG
8282df1fe9cSrandyf 	char	*str = "i_cpr_save_apic";
82955d507a9SSeth Goldberg #endif
8302df1fe9cSrandyf 
83155d507a9SSeth Goldberg 	if (psm_state == NULL) {
83255d507a9SSeth Goldberg 		PMD(PMD_SX, ("%s() : psm_state == NULL\n", str))
8332df1fe9cSrandyf 		return (0);
8342df1fe9cSrandyf 	}
8352df1fe9cSrandyf 
8362df1fe9cSrandyf 	req->psr_cmd = PSM_STATE_SAVE;
8372df1fe9cSrandyf 	return ((*psm_state)(req));
8382df1fe9cSrandyf }
8392df1fe9cSrandyf 
8402df1fe9cSrandyf static int
8412df1fe9cSrandyf i_cpr_restore_apic(psm_state_request_t *req)
8422df1fe9cSrandyf {
84355d507a9SSeth Goldberg #ifdef DEBUG
8442df1fe9cSrandyf 	char	*str = "i_cpr_restore_apic";
84555d507a9SSeth Goldberg #endif
8462df1fe9cSrandyf 
84755d507a9SSeth Goldberg 	if (psm_state == NULL) {
84855d507a9SSeth Goldberg 		PMD(PMD_SX, ("%s() : psm_state == NULL\n", str))
8492df1fe9cSrandyf 		return (0);
8502df1fe9cSrandyf 	}
8512df1fe9cSrandyf 
8522df1fe9cSrandyf 	req->psr_cmd = PSM_STATE_RESTORE;
8532df1fe9cSrandyf 	return ((*psm_state)(req));
8542df1fe9cSrandyf }
8552df1fe9cSrandyf 
8562df1fe9cSrandyf 
8572df1fe9cSrandyf /* stop lint complaining about offset not being used in 32bit mode */
8582df1fe9cSrandyf #if !defined(__amd64)
8592df1fe9cSrandyf /*ARGSUSED*/
8602df1fe9cSrandyf #endif
8612df1fe9cSrandyf static void
8622df1fe9cSrandyf init_real_mode_platter(int cpun, uint32_t offset, uint_t cr4, wc_desctbr_t gdt)
8632df1fe9cSrandyf {
8642df1fe9cSrandyf 	/*LINTED*/
8652df1fe9cSrandyf 	rm_platter_t *real_mode_platter = (rm_platter_t *)rm_platter_va;
8662df1fe9cSrandyf 
8672df1fe9cSrandyf 	/*
8682df1fe9cSrandyf 	 * Fill up the real mode platter to make it easy for real mode code to
8692df1fe9cSrandyf 	 * kick it off. This area should really be one passed by boot to kernel
8702df1fe9cSrandyf 	 * and guaranteed to be below 1MB and aligned to 16 bytes. Should also
8712df1fe9cSrandyf 	 * have identical physical and virtual address in paged mode.
8722df1fe9cSrandyf 	 */
8732df1fe9cSrandyf 
8742df1fe9cSrandyf 	real_mode_platter->rm_pdbr = getcr3();
8752df1fe9cSrandyf 	real_mode_platter->rm_cpu = cpun;
8762df1fe9cSrandyf 	real_mode_platter->rm_cr4 = cr4;
8772df1fe9cSrandyf 
8782df1fe9cSrandyf 	real_mode_platter->rm_gdt_base = gdt.base;
8792df1fe9cSrandyf 	real_mode_platter->rm_gdt_lim = gdt.limit;
8802df1fe9cSrandyf 
8812df1fe9cSrandyf #if defined(__amd64)
8822df1fe9cSrandyf 	if (getcr3() > 0xffffffffUL)
8832df1fe9cSrandyf 		panic("Cannot initialize CPUs; kernel's 64-bit page tables\n"
8842df1fe9cSrandyf 		    "located above 4G in physical memory (@ 0x%llx).",
8852df1fe9cSrandyf 		    (unsigned long long)getcr3());
8862df1fe9cSrandyf 
8872df1fe9cSrandyf 	/*
8882df1fe9cSrandyf 	 * Setup pseudo-descriptors for temporary GDT and IDT for use ONLY
8892df1fe9cSrandyf 	 * by code in real_mode_start():
8902df1fe9cSrandyf 	 *
8912df1fe9cSrandyf 	 * GDT[0]:  NULL selector
8922df1fe9cSrandyf 	 * GDT[1]:  64-bit CS: Long = 1, Present = 1, bits 12, 11 = 1
8932df1fe9cSrandyf 	 *
8942df1fe9cSrandyf 	 * Clear the IDT as interrupts will be off and a limit of 0 will cause
8952df1fe9cSrandyf 	 * the CPU to triple fault and reset on an NMI, seemingly as reasonable
8962df1fe9cSrandyf 	 * a course of action as any other, though it may cause the entire
8972df1fe9cSrandyf 	 * platform to reset in some cases...
8982df1fe9cSrandyf 	 */
8992df1fe9cSrandyf 	real_mode_platter->rm_temp_gdt[0] = 0ULL;
9002df1fe9cSrandyf 	real_mode_platter->rm_temp_gdt[TEMPGDT_KCODE64] = 0x20980000000000ULL;
9012df1fe9cSrandyf 
9022df1fe9cSrandyf 	real_mode_platter->rm_temp_gdt_lim = (ushort_t)
9032df1fe9cSrandyf 	    (sizeof (real_mode_platter->rm_temp_gdt) - 1);
9042df1fe9cSrandyf 	real_mode_platter->rm_temp_gdt_base = rm_platter_pa +
9057008f154SRichard Lowe 	    offsetof(rm_platter_t, rm_temp_gdt);
9062df1fe9cSrandyf 
9072df1fe9cSrandyf 	real_mode_platter->rm_temp_idt_lim = 0;
9082df1fe9cSrandyf 	real_mode_platter->rm_temp_idt_base = 0;
9092df1fe9cSrandyf 
9102df1fe9cSrandyf 	/*
9112df1fe9cSrandyf 	 * Since the CPU needs to jump to protected mode using an identity
9122df1fe9cSrandyf 	 * mapped address, we need to calculate it here.
9132df1fe9cSrandyf 	 */
9142df1fe9cSrandyf 	real_mode_platter->rm_longmode64_addr = rm_platter_pa + offset;
9152df1fe9cSrandyf #endif	/* __amd64 */
9162df1fe9cSrandyf 
9172df1fe9cSrandyf 	/* return; */
9182df1fe9cSrandyf }
9192df1fe9cSrandyf 
9202df1fe9cSrandyf void
9212df1fe9cSrandyf i_cpr_start_cpu(void)
9222df1fe9cSrandyf {
9232df1fe9cSrandyf 
9242df1fe9cSrandyf 	struct cpu *cp = CPU;
9252df1fe9cSrandyf 
9262df1fe9cSrandyf 	char *str = "i_cpr_start_cpu";
9272df1fe9cSrandyf 	extern void init_cpu_syscall(struct cpu *cp);
9282df1fe9cSrandyf 
9292df1fe9cSrandyf 	PMD(PMD_SX, ("%s() called\n", str))
9302df1fe9cSrandyf 
9312df1fe9cSrandyf 	PMD(PMD_SX, ("%s() #0 cp->cpu_base_spl %d\n", str,
9322df1fe9cSrandyf 	    cp->cpu_base_spl))
9332df1fe9cSrandyf 
9342df1fe9cSrandyf 	mutex_enter(&cpu_lock);
9352df1fe9cSrandyf 	if (cp == i_cpr_bootcpu()) {
9362df1fe9cSrandyf 		mutex_exit(&cpu_lock);
9372df1fe9cSrandyf 		PMD(PMD_SX,
9382df1fe9cSrandyf 		    ("%s() called on bootcpu nothing to do!\n", str))
9392df1fe9cSrandyf 		return;
9402df1fe9cSrandyf 	}
9412df1fe9cSrandyf 	mutex_exit(&cpu_lock);
9422df1fe9cSrandyf 
9432df1fe9cSrandyf 	/*
9442df1fe9cSrandyf 	 * We need to Sync PAT with cpu0's PAT. We have to do
9452df1fe9cSrandyf 	 * this with interrupts disabled.
9462df1fe9cSrandyf 	 */
9472df1fe9cSrandyf 	pat_sync();
9482df1fe9cSrandyf 
9492df1fe9cSrandyf 	/*
9507af88ac7SKuriakose Kuruvilla 	 * If we use XSAVE, we need to restore XFEATURE_ENABLE_MASK register.
9517af88ac7SKuriakose Kuruvilla 	 */
9527af88ac7SKuriakose Kuruvilla 	if (fp_save_mech == FP_XSAVE) {
9537af88ac7SKuriakose Kuruvilla 		setup_xfem();
9547af88ac7SKuriakose Kuruvilla 	}
9557af88ac7SKuriakose Kuruvilla 
9567af88ac7SKuriakose Kuruvilla 	/*
9572df1fe9cSrandyf 	 * Initialize this CPU's syscall handlers
9582df1fe9cSrandyf 	 */
9592df1fe9cSrandyf 	init_cpu_syscall(cp);
9602df1fe9cSrandyf 
9612df1fe9cSrandyf 	PMD(PMD_SX, ("%s() #1 cp->cpu_base_spl %d\n", str, cp->cpu_base_spl))
9622df1fe9cSrandyf 
9632df1fe9cSrandyf 	/*
9642df1fe9cSrandyf 	 * Do not need to call cpuid_pass2(), cpuid_pass3(), cpuid_pass4() or
9652df1fe9cSrandyf 	 * init_cpu_info(), since the work that they do is only needed to
9662df1fe9cSrandyf 	 * be done once at boot time
9672df1fe9cSrandyf 	 */
9682df1fe9cSrandyf 
9692df1fe9cSrandyf 
9702df1fe9cSrandyf 	mutex_enter(&cpu_lock);
9712df1fe9cSrandyf 	CPUSET_ADD(procset, cp->cpu_id);
9722df1fe9cSrandyf 	mutex_exit(&cpu_lock);
9732df1fe9cSrandyf 
9742df1fe9cSrandyf 	PMD(PMD_SX, ("%s() #2 cp->cpu_base_spl %d\n", str,
9752df1fe9cSrandyf 	    cp->cpu_base_spl))
9762df1fe9cSrandyf 
9772df1fe9cSrandyf 	if (tsc_gethrtime_enable) {
9782df1fe9cSrandyf 		PMD(PMD_SX, ("%s() calling tsc_sync_slave\n", str))
9792df1fe9cSrandyf 		tsc_sync_slave();
9802df1fe9cSrandyf 	}
9812df1fe9cSrandyf 
9822df1fe9cSrandyf 	PMD(PMD_SX, ("%s() cp->cpu_id %d, cp->cpu_intr_actv %d\n", str,
9832df1fe9cSrandyf 	    cp->cpu_id, cp->cpu_intr_actv))
9842df1fe9cSrandyf 	PMD(PMD_SX, ("%s() #3 cp->cpu_base_spl %d\n", str,
9852df1fe9cSrandyf 	    cp->cpu_base_spl))
9862df1fe9cSrandyf 
9872df1fe9cSrandyf 	(void) spl0();		/* enable interrupts */
9882df1fe9cSrandyf 
9892df1fe9cSrandyf 	PMD(PMD_SX, ("%s() #4 cp->cpu_base_spl %d\n", str,
9902df1fe9cSrandyf 	    cp->cpu_base_spl))
9912df1fe9cSrandyf 
9922df1fe9cSrandyf 	/*
9932df1fe9cSrandyf 	 * Set up the CPU module for this CPU.  This can't be done before
9942df1fe9cSrandyf 	 * this CPU is made CPU_READY, because we may (in heterogeneous systems)
9952df1fe9cSrandyf 	 * need to go load another CPU module.  The act of attempting to load
9962df1fe9cSrandyf 	 * a module may trigger a cross-call, which will ASSERT unless this
9972df1fe9cSrandyf 	 * cpu is CPU_READY.
9982df1fe9cSrandyf 	 */
9992df1fe9cSrandyf 
10002df1fe9cSrandyf 	/*
10012df1fe9cSrandyf 	 * cmi already been init'd (during boot), so do not need to do it again
10022df1fe9cSrandyf 	 */
10032df1fe9cSrandyf #ifdef PM_REINITMCAONRESUME
10047417cfdeSKuriakose Kuruvilla 	if (is_x86_feature(x86_featureset, X86FSET_MCA))
10052df1fe9cSrandyf 		cmi_mca_init();
10062df1fe9cSrandyf #endif
10072df1fe9cSrandyf 
10082df1fe9cSrandyf 	PMD(PMD_SX, ("%s() returning\n", str))
10092df1fe9cSrandyf 
10102df1fe9cSrandyf 	/* return; */
10112df1fe9cSrandyf }
10122df1fe9cSrandyf 
10132df1fe9cSrandyf void
10142df1fe9cSrandyf i_cpr_alloc_cpus(void)
10152df1fe9cSrandyf {
10162df1fe9cSrandyf 	char *str = "i_cpr_alloc_cpus";
10172df1fe9cSrandyf 
10182df1fe9cSrandyf 	PMD(PMD_SX, ("%s() CPU->cpu_id %d\n", str, CPU->cpu_id))
10192df1fe9cSrandyf 	/*
10202df1fe9cSrandyf 	 * we allocate this only when we actually need it to save on
10212df1fe9cSrandyf 	 * kernel memory
10222df1fe9cSrandyf 	 */
10232df1fe9cSrandyf 
10242df1fe9cSrandyf 	if (wc_other_cpus == NULL) {
1025a3114836SGerry Liu 		wc_other_cpus = kmem_zalloc(max_ncpus * sizeof (wc_cpu_t),
10262df1fe9cSrandyf 		    KM_SLEEP);
10272df1fe9cSrandyf 	}
10282df1fe9cSrandyf 
10292df1fe9cSrandyf }
10302df1fe9cSrandyf 
10312df1fe9cSrandyf void
10322df1fe9cSrandyf i_cpr_free_cpus(void)
10332df1fe9cSrandyf {
1034e2e5537fSJoseph A Townsend 	int index;
1035e2e5537fSJoseph A Townsend 	wc_cpu_t *wc_cpu;
1036e2e5537fSJoseph A Townsend 
10372df1fe9cSrandyf 	if (wc_other_cpus != NULL) {
1038a3114836SGerry Liu 		for (index = 0; index < max_ncpus; index++) {
1039e2e5537fSJoseph A Townsend 			wc_cpu = wc_other_cpus + index;
1040e2e5537fSJoseph A Townsend 			if (wc_cpu->wc_saved_stack != NULL) {
1041e2e5537fSJoseph A Townsend 				kmem_free(wc_cpu->wc_saved_stack,
1042e2e5537fSJoseph A Townsend 				    wc_cpu->wc_saved_stack_size);
1043e2e5537fSJoseph A Townsend 			}
1044e2e5537fSJoseph A Townsend 		}
1045e2e5537fSJoseph A Townsend 
1046a3114836SGerry Liu 		kmem_free((void *) wc_other_cpus,
1047a3114836SGerry Liu 		    max_ncpus * sizeof (wc_cpu_t));
10482df1fe9cSrandyf 		wc_other_cpus = NULL;
10492df1fe9cSrandyf 	}
10502df1fe9cSrandyf }
10512df1fe9cSrandyf 
10522df1fe9cSrandyf /*
10532df1fe9cSrandyf  * wrapper for acpica_ddi_save_resources()
10542df1fe9cSrandyf  */
10552df1fe9cSrandyf void
10562df1fe9cSrandyf i_cpr_save_configuration(dev_info_t *dip)
10572df1fe9cSrandyf {
10582df1fe9cSrandyf 	acpica_ddi_save_resources(dip);
10592df1fe9cSrandyf }
10602df1fe9cSrandyf 
10612df1fe9cSrandyf /*
10622df1fe9cSrandyf  * wrapper for acpica_ddi_restore_resources()
10632df1fe9cSrandyf  */
10642df1fe9cSrandyf void
10652df1fe9cSrandyf i_cpr_restore_configuration(dev_info_t *dip)
10662df1fe9cSrandyf {
10672df1fe9cSrandyf 	acpica_ddi_restore_resources(dip);
10682df1fe9cSrandyf }
10694716fd88Sjan 
10704716fd88Sjan static int
10714716fd88Sjan wait_for_set(cpuset_t *set, int who)
10724716fd88Sjan {
10734716fd88Sjan 	int delays;
10744716fd88Sjan 	char *str = "wait_for_set";
10754716fd88Sjan 
10764716fd88Sjan 	for (delays = 0; !CPU_IN_SET(*set, who); delays++) {
10774716fd88Sjan 		if (delays == 500) {
10784716fd88Sjan 			/*
10794716fd88Sjan 			 * After five seconds, things are probably
10804716fd88Sjan 			 * looking a bit bleak - explain the hang.
10814716fd88Sjan 			 */
10824716fd88Sjan 			cmn_err(CE_NOTE, "cpu%d: started, "
10834716fd88Sjan 			    "but not running in the kernel yet", who);
10844716fd88Sjan 			PMD(PMD_SX, ("%s() %d cpu started "
10854716fd88Sjan 			    "but not running in the kernel yet\n",
10864716fd88Sjan 			    str, who))
10874716fd88Sjan 		} else if (delays > 2000) {
10884716fd88Sjan 			/*
10894716fd88Sjan 			 * We waited at least 20 seconds, bail ..
10904716fd88Sjan 			 */
10914716fd88Sjan 			cmn_err(CE_WARN, "cpu%d: timed out", who);
10924716fd88Sjan 			PMD(PMD_SX, ("%s() %d cpu timed out\n",
10934716fd88Sjan 			    str, who))
10944716fd88Sjan 			return (0);
10954716fd88Sjan 		}
10964716fd88Sjan 
10974716fd88Sjan 		/*
10984716fd88Sjan 		 * wait at least 10ms, then check again..
10994716fd88Sjan 		 */
11004716fd88Sjan 		drv_usecwait(10000);
11014716fd88Sjan 	}
11024716fd88Sjan 
11034716fd88Sjan 	return (1);
11044716fd88Sjan }
11053d995820SJoseph A Townsend 
11063d995820SJoseph A Townsend static	void
11073d995820SJoseph A Townsend i_cpr_save_stack(kthread_t *t, wc_cpu_t *wc_cpu)
11083d995820SJoseph A Townsend {
11093d995820SJoseph A Townsend 	size_t	stack_size;	/* size of stack */
11103d995820SJoseph A Townsend 	caddr_t	start = CPR_GET_STACK_START(t);	/* stack start */
11113d995820SJoseph A Townsend 	caddr_t	end = CPR_GET_STACK_END(t);	/* stack end  */
11123d995820SJoseph A Townsend 
11133d995820SJoseph A Townsend 	stack_size = (size_t)end - (size_t)start;
11143d995820SJoseph A Townsend 
11153d995820SJoseph A Townsend 	if (wc_cpu->wc_saved_stack_size < stack_size) {
11163d995820SJoseph A Townsend 		if (wc_cpu->wc_saved_stack != NULL) {
11173d995820SJoseph A Townsend 			kmem_free(wc_cpu->wc_saved_stack,
11183d995820SJoseph A Townsend 			    wc_cpu->wc_saved_stack_size);
11193d995820SJoseph A Townsend 		}
11203d995820SJoseph A Townsend 		wc_cpu->wc_saved_stack = kmem_zalloc(stack_size, KM_SLEEP);
11213d995820SJoseph A Townsend 		wc_cpu->wc_saved_stack_size = stack_size;
11223d995820SJoseph A Townsend 	}
11233d995820SJoseph A Townsend 
11243d995820SJoseph A Townsend 	bcopy(start, wc_cpu->wc_saved_stack, stack_size);
11253d995820SJoseph A Townsend }
11263d995820SJoseph A Townsend 
11273d995820SJoseph A Townsend void
11283d995820SJoseph A Townsend i_cpr_restore_stack(kthread_t *t, greg_t *save_stack)
11293d995820SJoseph A Townsend {
11303d995820SJoseph A Townsend 	size_t	stack_size;	/* size of stack */
11313d995820SJoseph A Townsend 	caddr_t	start = CPR_GET_STACK_START(t);	/* stack start */
11323d995820SJoseph A Townsend 	caddr_t	end = CPR_GET_STACK_END(t);	/* stack end  */
11333d995820SJoseph A Townsend 
11343d995820SJoseph A Townsend 	stack_size = (size_t)end - (size_t)start;
11353d995820SJoseph A Townsend 
11363d995820SJoseph A Townsend 	bcopy(save_stack, start, stack_size);
11373d995820SJoseph A Townsend }
1138