xref: /titanic_52/usr/src/uts/i86pc/os/cpupm/cpu_idle.c (revision a31148363f598def767ac48c5d82e1572e44b935)
10e751525SEric Saxe /*
20e751525SEric Saxe  * CDDL HEADER START
30e751525SEric Saxe  *
40e751525SEric Saxe  * The contents of this file are subject to the terms of the
50e751525SEric Saxe  * Common Development and Distribution License (the "License").
60e751525SEric Saxe  * You may not use this file except in compliance with the License.
70e751525SEric Saxe  *
80e751525SEric Saxe  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90e751525SEric Saxe  * or http://www.opensolaris.org/os/licensing.
100e751525SEric Saxe  * See the License for the specific language governing permissions
110e751525SEric Saxe  * and limitations under the License.
120e751525SEric Saxe  *
130e751525SEric Saxe  * When distributing Covered Code, include this CDDL HEADER in each
140e751525SEric Saxe  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150e751525SEric Saxe  * If applicable, add the following below this CDDL HEADER, with the
160e751525SEric Saxe  * fields enclosed by brackets "[]" replaced with your own identifying
170e751525SEric Saxe  * information: Portions Copyright [yyyy] [name of copyright owner]
180e751525SEric Saxe  *
190e751525SEric Saxe  * CDDL HEADER END
200e751525SEric Saxe  */
210e751525SEric Saxe /*
220e751525SEric Saxe  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
230e751525SEric Saxe  * Use is subject to license terms.
240e751525SEric Saxe  */
25cef70d2cSBill Holler /*
26*a3114836SGerry Liu  * Copyright (c) 2009-2010, Intel Corporation.
27cef70d2cSBill Holler  * All rights reserved.
28cef70d2cSBill Holler  */
290e751525SEric Saxe 
300e751525SEric Saxe #include <sys/x86_archext.h>
310e751525SEric Saxe #include <sys/machsystm.h>
320e751525SEric Saxe #include <sys/x_call.h>
330e751525SEric Saxe #include <sys/stat.h>
340e751525SEric Saxe #include <sys/acpi/acpi.h>
350e751525SEric Saxe #include <sys/acpica.h>
360e751525SEric Saxe #include <sys/cpu_acpi.h>
370e751525SEric Saxe #include <sys/cpu_idle.h>
380e751525SEric Saxe #include <sys/cpupm.h>
39fb2caebeSRandy Fishel #include <sys/cpu_event.h>
400e751525SEric Saxe #include <sys/hpet.h>
410e751525SEric Saxe #include <sys/archsystm.h>
420e751525SEric Saxe #include <vm/hat_i86.h>
430e751525SEric Saxe #include <sys/dtrace.h>
440e751525SEric Saxe #include <sys/sdt.h>
450e751525SEric Saxe #include <sys/callb.h>
460e751525SEric Saxe 
47cef70d2cSBill Holler #define	CSTATE_USING_HPET		1
48cef70d2cSBill Holler #define	CSTATE_USING_LAT		2
49cef70d2cSBill Holler 
50*a3114836SGerry Liu #define	CPU_IDLE_STOP_TIMEOUT		1000
51*a3114836SGerry Liu 
520e751525SEric Saxe extern void cpu_idle_adaptive(void);
539aa01d98SBill Holler extern uint32_t cpupm_next_cstate(cma_c_state_t *cs_data,
549aa01d98SBill Holler     cpu_acpi_cstate_t *cstates, uint32_t cs_count, hrtime_t start);
550e751525SEric Saxe 
560e751525SEric Saxe static int cpu_idle_init(cpu_t *);
570e751525SEric Saxe static void cpu_idle_fini(cpu_t *);
58444f66e7SMark Haywood static void cpu_idle_stop(cpu_t *);
590e751525SEric Saxe static boolean_t cpu_deep_idle_callb(void *arg, int code);
600e751525SEric Saxe static boolean_t cpu_idle_cpr_callb(void *arg, int code);
610e751525SEric Saxe static void acpi_cpu_cstate(cpu_acpi_cstate_t *cstate);
620e751525SEric Saxe 
63cef70d2cSBill Holler static boolean_t cstate_use_timer(hrtime_t *lapic_expire, int timer);
64cef70d2cSBill Holler 
65cef70d2cSBill Holler /*
66cef70d2cSBill Holler  * the flag of always-running local APIC timer.
67cef70d2cSBill Holler  * the flag of HPET Timer use in deep cstate.
68cef70d2cSBill Holler  */
69cef70d2cSBill Holler static boolean_t cpu_cstate_arat = B_FALSE;
70cef70d2cSBill Holler static boolean_t cpu_cstate_hpet = B_FALSE;
71cef70d2cSBill Holler 
720e751525SEric Saxe /*
730e751525SEric Saxe  * Interfaces for modules implementing Intel's deep c-state.
740e751525SEric Saxe  */
750e751525SEric Saxe cpupm_state_ops_t cpu_idle_ops = {
760e751525SEric Saxe 	"Generic ACPI C-state Support",
770e751525SEric Saxe 	cpu_idle_init,
780e751525SEric Saxe 	cpu_idle_fini,
79444f66e7SMark Haywood 	NULL,
80444f66e7SMark Haywood 	cpu_idle_stop
810e751525SEric Saxe };
820e751525SEric Saxe 
830e751525SEric Saxe static kmutex_t		cpu_idle_callb_mutex;
840e751525SEric Saxe static callb_id_t	cpu_deep_idle_callb_id;
850e751525SEric Saxe static callb_id_t	cpu_idle_cpr_callb_id;
860e751525SEric Saxe static uint_t		cpu_idle_cfg_state;
870e751525SEric Saxe 
880e751525SEric Saxe static kmutex_t cpu_idle_mutex;
890e751525SEric Saxe 
900e751525SEric Saxe cpu_idle_kstat_t cpu_idle_kstat = {
910e751525SEric Saxe 	{ "address_space_id",	KSTAT_DATA_STRING },
920e751525SEric Saxe 	{ "latency",		KSTAT_DATA_UINT32 },
930e751525SEric Saxe 	{ "power",		KSTAT_DATA_UINT32 },
940e751525SEric Saxe };
950e751525SEric Saxe 
960e751525SEric Saxe /*
970e751525SEric Saxe  * kstat update function of the c-state info
980e751525SEric Saxe  */
990e751525SEric Saxe static int
1000e751525SEric Saxe cpu_idle_kstat_update(kstat_t *ksp, int flag)
1010e751525SEric Saxe {
1020e751525SEric Saxe 	cpu_acpi_cstate_t *cstate = ksp->ks_private;
1030e751525SEric Saxe 
1040e751525SEric Saxe 	if (flag == KSTAT_WRITE) {
1050e751525SEric Saxe 		return (EACCES);
1060e751525SEric Saxe 	}
1070e751525SEric Saxe 
1080e751525SEric Saxe 	if (cstate->cs_addrspace_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
1090e751525SEric Saxe 		kstat_named_setstr(&cpu_idle_kstat.addr_space_id,
1100e751525SEric Saxe 		"FFixedHW");
1110e751525SEric Saxe 	} else if (cstate->cs_addrspace_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1120e751525SEric Saxe 		kstat_named_setstr(&cpu_idle_kstat.addr_space_id,
1130e751525SEric Saxe 		"SystemIO");
1140e751525SEric Saxe 	} else {
1150e751525SEric Saxe 		kstat_named_setstr(&cpu_idle_kstat.addr_space_id,
1160e751525SEric Saxe 		"Unsupported");
1170e751525SEric Saxe 	}
1180e751525SEric Saxe 
1190e751525SEric Saxe 	cpu_idle_kstat.cs_latency.value.ui32 = cstate->cs_latency;
1200e751525SEric Saxe 	cpu_idle_kstat.cs_power.value.ui32 = cstate->cs_power;
1210e751525SEric Saxe 
1220e751525SEric Saxe 	return (0);
1230e751525SEric Saxe }
1240e751525SEric Saxe 
1250e751525SEric Saxe /*
126cef70d2cSBill Holler  * Used during configuration callbacks to manage implementation specific
127cef70d2cSBill Holler  * details of the hardware timer used during Deep C-state.
128cef70d2cSBill Holler  */
129cef70d2cSBill Holler boolean_t
130cef70d2cSBill Holler cstate_timer_callback(int code)
131cef70d2cSBill Holler {
132cef70d2cSBill Holler 	if (cpu_cstate_arat) {
133cef70d2cSBill Holler 		return (B_TRUE);
134cef70d2cSBill Holler 	} else if (cpu_cstate_hpet) {
135cef70d2cSBill Holler 		return (hpet.callback(code));
136cef70d2cSBill Holler 	}
137cef70d2cSBill Holler 	return (B_FALSE);
138cef70d2cSBill Holler }
139cef70d2cSBill Holler 
140cef70d2cSBill Holler /*
141cef70d2cSBill Holler  * Some Local APIC Timers do not work during Deep C-states.
142cef70d2cSBill Holler  * The Deep C-state idle function uses this function to ensure it is using a
143cef70d2cSBill Holler  * hardware timer that works during Deep C-states.  This function also
144cef70d2cSBill Holler  * switches the timer back to the LACPI Timer after Deep C-state.
145cef70d2cSBill Holler  */
146cef70d2cSBill Holler static boolean_t
147cef70d2cSBill Holler cstate_use_timer(hrtime_t *lapic_expire, int timer)
148cef70d2cSBill Holler {
149cef70d2cSBill Holler 	if (cpu_cstate_arat)
150cef70d2cSBill Holler 		return (B_TRUE);
151cef70d2cSBill Holler 
152cef70d2cSBill Holler 	/*
153cef70d2cSBill Holler 	 * We have to return B_FALSE if no arat or hpet support
154cef70d2cSBill Holler 	 */
155cef70d2cSBill Holler 	if (!cpu_cstate_hpet)
156cef70d2cSBill Holler 		return (B_FALSE);
157cef70d2cSBill Holler 
158cef70d2cSBill Holler 	switch (timer) {
159cef70d2cSBill Holler 	case CSTATE_USING_HPET:
160cef70d2cSBill Holler 		return (hpet.use_hpet_timer(lapic_expire));
161cef70d2cSBill Holler 	case CSTATE_USING_LAT:
162cef70d2cSBill Holler 		hpet.use_lapic_timer(*lapic_expire);
163cef70d2cSBill Holler 		return (B_TRUE);
164cef70d2cSBill Holler 	default:
165cef70d2cSBill Holler 		return (B_FALSE);
166cef70d2cSBill Holler 	}
167cef70d2cSBill Holler }
168cef70d2cSBill Holler 
169cef70d2cSBill Holler /*
1700e751525SEric Saxe  * c-state wakeup function.
1710e751525SEric Saxe  * Similar to cpu_wakeup and cpu_wakeup_mwait except this function deals
1720e751525SEric Saxe  * with CPUs asleep in MWAIT, HLT, or ACPI Deep C-State.
1730e751525SEric Saxe  */
1740e751525SEric Saxe void
1750e751525SEric Saxe cstate_wakeup(cpu_t *cp, int bound)
1760e751525SEric Saxe {
1770e751525SEric Saxe 	struct machcpu	*mcpu = &(cp->cpu_m);
1780e751525SEric Saxe 	volatile uint32_t *mcpu_mwait = mcpu->mcpu_mwait;
1790e751525SEric Saxe 	cpupart_t	*cpu_part;
1800e751525SEric Saxe 	uint_t		cpu_found;
1810e751525SEric Saxe 	processorid_t	cpu_sid;
1820e751525SEric Saxe 
1830e751525SEric Saxe 	cpu_part = cp->cpu_part;
1840e751525SEric Saxe 	cpu_sid = cp->cpu_seqid;
1850e751525SEric Saxe 	/*
1860e751525SEric Saxe 	 * Clear the halted bit for that CPU since it will be woken up
1870e751525SEric Saxe 	 * in a moment.
1880e751525SEric Saxe 	 */
1890e751525SEric Saxe 	if (bitset_in_set(&cpu_part->cp_haltset, cpu_sid)) {
1900e751525SEric Saxe 		/*
1910e751525SEric Saxe 		 * Clear the halted bit for that CPU since it will be
1920e751525SEric Saxe 		 * poked in a moment.
1930e751525SEric Saxe 		 */
1940e751525SEric Saxe 		bitset_atomic_del(&cpu_part->cp_haltset, cpu_sid);
1950e751525SEric Saxe 
1960e751525SEric Saxe 		/*
1970e751525SEric Saxe 		 * We may find the current CPU present in the halted cpuset
1980e751525SEric Saxe 		 * if we're in the context of an interrupt that occurred
1990e751525SEric Saxe 		 * before we had a chance to clear our bit in cpu_idle().
2000e751525SEric Saxe 		 * Waking ourself is obviously unnecessary, since if
2010e751525SEric Saxe 		 * we're here, we're not halted.
2020e751525SEric Saxe 		 */
2030e751525SEric Saxe 		if (cp != CPU) {
2040e751525SEric Saxe 			/*
2050e751525SEric Saxe 			 * Use correct wakeup mechanism
2060e751525SEric Saxe 			 */
2070e751525SEric Saxe 			if ((mcpu_mwait != NULL) &&
2080e751525SEric Saxe 			    (*mcpu_mwait == MWAIT_HALTED))
2090e751525SEric Saxe 				MWAIT_WAKEUP(cp);
2100e751525SEric Saxe 			else
2110e751525SEric Saxe 				poke_cpu(cp->cpu_id);
2120e751525SEric Saxe 		}
2130e751525SEric Saxe 		return;
2140e751525SEric Saxe 	} else {
2150e751525SEric Saxe 		/*
2160e751525SEric Saxe 		 * This cpu isn't halted, but it's idle or undergoing a
2170e751525SEric Saxe 		 * context switch. No need to awaken anyone else.
2180e751525SEric Saxe 		 */
2190e751525SEric Saxe 		if (cp->cpu_thread == cp->cpu_idle_thread ||
2200e751525SEric Saxe 		    cp->cpu_disp_flags & CPU_DISP_DONTSTEAL)
2210e751525SEric Saxe 			return;
2220e751525SEric Saxe 	}
2230e751525SEric Saxe 
2240e751525SEric Saxe 	/*
2250e751525SEric Saxe 	 * No need to wake up other CPUs if the thread we just enqueued
2260e751525SEric Saxe 	 * is bound.
2270e751525SEric Saxe 	 */
2280e751525SEric Saxe 	if (bound)
2290e751525SEric Saxe 		return;
2300e751525SEric Saxe 
2310e751525SEric Saxe 
2320e751525SEric Saxe 	/*
2330e751525SEric Saxe 	 * See if there's any other halted CPUs. If there are, then
2340e751525SEric Saxe 	 * select one, and awaken it.
2350e751525SEric Saxe 	 * It's possible that after we find a CPU, somebody else
2360e751525SEric Saxe 	 * will awaken it before we get the chance.
2370e751525SEric Saxe 	 * In that case, look again.
2380e751525SEric Saxe 	 */
2390e751525SEric Saxe 	do {
2400e751525SEric Saxe 		cpu_found = bitset_find(&cpu_part->cp_haltset);
2410e751525SEric Saxe 		if (cpu_found == (uint_t)-1)
2420e751525SEric Saxe 			return;
2430e751525SEric Saxe 
2440e751525SEric Saxe 	} while (bitset_atomic_test_and_del(&cpu_part->cp_haltset,
2450e751525SEric Saxe 	    cpu_found) < 0);
2460e751525SEric Saxe 
2470e751525SEric Saxe 	/*
2480e751525SEric Saxe 	 * Must use correct wakeup mechanism to avoid lost wakeup of
2490e751525SEric Saxe 	 * alternate cpu.
2500e751525SEric Saxe 	 */
2510e751525SEric Saxe 	if (cpu_found != CPU->cpu_seqid) {
252cc31ad68Saubrey.li@intel.com 		mcpu_mwait = cpu_seq[cpu_found]->cpu_m.mcpu_mwait;
2530e751525SEric Saxe 		if ((mcpu_mwait != NULL) && (*mcpu_mwait == MWAIT_HALTED))
2540e751525SEric Saxe 			MWAIT_WAKEUP(cpu_seq[cpu_found]);
2550e751525SEric Saxe 		else
2560e751525SEric Saxe 			poke_cpu(cpu_seq[cpu_found]->cpu_id);
2570e751525SEric Saxe 	}
2580e751525SEric Saxe }
2590e751525SEric Saxe 
2600e751525SEric Saxe /*
261fb2caebeSRandy Fishel  * Function called by CPU idle notification framework to check whether CPU
262fb2caebeSRandy Fishel  * has been awakened. It will be called with interrupt disabled.
263fb2caebeSRandy Fishel  * If CPU has been awakened, call cpu_idle_exit() to notify CPU idle
264fb2caebeSRandy Fishel  * notification framework.
265fb2caebeSRandy Fishel  */
266fb2caebeSRandy Fishel static void
267fb2caebeSRandy Fishel acpi_cpu_mwait_check_wakeup(void *arg)
268fb2caebeSRandy Fishel {
269fb2caebeSRandy Fishel 	volatile uint32_t *mcpu_mwait = (volatile uint32_t *)arg;
270fb2caebeSRandy Fishel 
271fb2caebeSRandy Fishel 	ASSERT(arg != NULL);
272fb2caebeSRandy Fishel 	if (*mcpu_mwait != MWAIT_HALTED) {
273fb2caebeSRandy Fishel 		/*
274fb2caebeSRandy Fishel 		 * CPU has been awakened, notify CPU idle notification system.
275fb2caebeSRandy Fishel 		 */
276fb2caebeSRandy Fishel 		cpu_idle_exit(CPU_IDLE_CB_FLAG_IDLE);
277fb2caebeSRandy Fishel 	} else {
278fb2caebeSRandy Fishel 		/*
279fb2caebeSRandy Fishel 		 * Toggle interrupt flag to detect pending interrupts.
280fb2caebeSRandy Fishel 		 * If interrupt happened, do_interrupt() will notify CPU idle
281fb2caebeSRandy Fishel 		 * notification framework so no need to call cpu_idle_exit()
282fb2caebeSRandy Fishel 		 * here.
283fb2caebeSRandy Fishel 		 */
284fb2caebeSRandy Fishel 		sti();
285fb2caebeSRandy Fishel 		SMT_PAUSE();
286fb2caebeSRandy Fishel 		cli();
287fb2caebeSRandy Fishel 	}
288fb2caebeSRandy Fishel }
289fb2caebeSRandy Fishel 
290fb2caebeSRandy Fishel static void
291fb2caebeSRandy Fishel acpi_cpu_mwait_ipi_check_wakeup(void *arg)
292fb2caebeSRandy Fishel {
293fb2caebeSRandy Fishel 	volatile uint32_t *mcpu_mwait = (volatile uint32_t *)arg;
294fb2caebeSRandy Fishel 
295fb2caebeSRandy Fishel 	ASSERT(arg != NULL);
296fb2caebeSRandy Fishel 	if (*mcpu_mwait != MWAIT_WAKEUP_IPI) {
297fb2caebeSRandy Fishel 		/*
298fb2caebeSRandy Fishel 		 * CPU has been awakened, notify CPU idle notification system.
299fb2caebeSRandy Fishel 		 */
300fb2caebeSRandy Fishel 		cpu_idle_exit(CPU_IDLE_CB_FLAG_IDLE);
301fb2caebeSRandy Fishel 	} else {
302fb2caebeSRandy Fishel 		/*
303fb2caebeSRandy Fishel 		 * Toggle interrupt flag to detect pending interrupts.
304fb2caebeSRandy Fishel 		 * If interrupt happened, do_interrupt() will notify CPU idle
305fb2caebeSRandy Fishel 		 * notification framework so no need to call cpu_idle_exit()
306fb2caebeSRandy Fishel 		 * here.
307fb2caebeSRandy Fishel 		 */
308fb2caebeSRandy Fishel 		sti();
309fb2caebeSRandy Fishel 		SMT_PAUSE();
310fb2caebeSRandy Fishel 		cli();
311fb2caebeSRandy Fishel 	}
312fb2caebeSRandy Fishel }
313fb2caebeSRandy Fishel 
314fb2caebeSRandy Fishel /*ARGSUSED*/
315fb2caebeSRandy Fishel static void
316fb2caebeSRandy Fishel acpi_cpu_check_wakeup(void *arg)
317fb2caebeSRandy Fishel {
318fb2caebeSRandy Fishel 	/*
319fb2caebeSRandy Fishel 	 * Toggle interrupt flag to detect pending interrupts.
320fb2caebeSRandy Fishel 	 * If interrupt happened, do_interrupt() will notify CPU idle
321fb2caebeSRandy Fishel 	 * notification framework so no need to call cpu_idle_exit() here.
322fb2caebeSRandy Fishel 	 */
323fb2caebeSRandy Fishel 	sti();
324fb2caebeSRandy Fishel 	SMT_PAUSE();
325fb2caebeSRandy Fishel 	cli();
326fb2caebeSRandy Fishel }
327fb2caebeSRandy Fishel 
328fb2caebeSRandy Fishel /*
3290e751525SEric Saxe  * enter deep c-state handler
3300e751525SEric Saxe  */
3310e751525SEric Saxe static void
3320e751525SEric Saxe acpi_cpu_cstate(cpu_acpi_cstate_t *cstate)
3330e751525SEric Saxe {
3340e751525SEric Saxe 	volatile uint32_t	*mcpu_mwait = CPU->cpu_m.mcpu_mwait;
3350e751525SEric Saxe 	cpu_t			*cpup = CPU;
3360e751525SEric Saxe 	processorid_t		cpu_sid = cpup->cpu_seqid;
3370e751525SEric Saxe 	cpupart_t		*cp = cpup->cpu_part;
3380e751525SEric Saxe 	hrtime_t		lapic_expire;
3390e751525SEric Saxe 	uint8_t			type = cstate->cs_addrspace_id;
3400e751525SEric Saxe 	uint32_t		cs_type = cstate->cs_type;
3410e751525SEric Saxe 	int			hset_update = 1;
342cef70d2cSBill Holler 	boolean_t		using_timer;
343fb2caebeSRandy Fishel 	cpu_idle_check_wakeup_t check_func = &acpi_cpu_check_wakeup;
3440e751525SEric Saxe 
3450e751525SEric Saxe 	/*
3460e751525SEric Saxe 	 * Set our mcpu_mwait here, so we can tell if anyone tries to
3470e751525SEric Saxe 	 * wake us between now and when we call mwait.  No other cpu will
3480e751525SEric Saxe 	 * attempt to set our mcpu_mwait until we add ourself to the haltset.
3490e751525SEric Saxe 	 */
3500e751525SEric Saxe 	if (mcpu_mwait) {
351fb2caebeSRandy Fishel 		if (type == ACPI_ADR_SPACE_SYSTEM_IO) {
3520e751525SEric Saxe 			*mcpu_mwait = MWAIT_WAKEUP_IPI;
353fb2caebeSRandy Fishel 			check_func = &acpi_cpu_mwait_ipi_check_wakeup;
354fb2caebeSRandy Fishel 		} else {
3550e751525SEric Saxe 			*mcpu_mwait = MWAIT_HALTED;
356fb2caebeSRandy Fishel 			check_func = &acpi_cpu_mwait_check_wakeup;
357fb2caebeSRandy Fishel 		}
3580e751525SEric Saxe 	}
3590e751525SEric Saxe 
3600e751525SEric Saxe 	/*
3610e751525SEric Saxe 	 * If this CPU is online, and there are multiple CPUs
3620e751525SEric Saxe 	 * in the system, then we should note our halting
3630e751525SEric Saxe 	 * by adding ourselves to the partition's halted CPU
3640e751525SEric Saxe 	 * bitmap. This allows other CPUs to find/awaken us when
3650e751525SEric Saxe 	 * work becomes available.
3660e751525SEric Saxe 	 */
3670e751525SEric Saxe 	if (cpup->cpu_flags & CPU_OFFLINE || ncpus == 1)
3680e751525SEric Saxe 		hset_update = 0;
3690e751525SEric Saxe 
3700e751525SEric Saxe 	/*
3710e751525SEric Saxe 	 * Add ourselves to the partition's halted CPUs bitmask
3720e751525SEric Saxe 	 * and set our HALTED flag, if necessary.
3730e751525SEric Saxe 	 *
3740e751525SEric Saxe 	 * When a thread becomes runnable, it is placed on the queue
3750e751525SEric Saxe 	 * and then the halted cpuset is checked to determine who
3760e751525SEric Saxe 	 * (if anyone) should be awakened. We therefore need to first
3770e751525SEric Saxe 	 * add ourselves to the halted cpuset, and and then check if there
3780e751525SEric Saxe 	 * is any work available.
3790e751525SEric Saxe 	 *
3800e751525SEric Saxe 	 * Note that memory barriers after updating the HALTED flag
3810e751525SEric Saxe 	 * are not necessary since an atomic operation (updating the bitmap)
3820e751525SEric Saxe 	 * immediately follows. On x86 the atomic operation acts as a
3830e751525SEric Saxe 	 * memory barrier for the update of cpu_disp_flags.
3840e751525SEric Saxe 	 */
3850e751525SEric Saxe 	if (hset_update) {
3860e751525SEric Saxe 		cpup->cpu_disp_flags |= CPU_DISP_HALTED;
3870e751525SEric Saxe 		bitset_atomic_add(&cp->cp_haltset, cpu_sid);
3880e751525SEric Saxe 	}
3890e751525SEric Saxe 
3900e751525SEric Saxe 	/*
3910e751525SEric Saxe 	 * Check to make sure there's really nothing to do.
3920e751525SEric Saxe 	 * Work destined for this CPU may become available after
3930e751525SEric Saxe 	 * this check. We'll be notified through the clearing of our
3940e751525SEric Saxe 	 * bit in the halted CPU bitmask, and a write to our mcpu_mwait.
3950e751525SEric Saxe 	 *
3960e751525SEric Saxe 	 * disp_anywork() checks disp_nrunnable, so we do not have to later.
3970e751525SEric Saxe 	 */
3980e751525SEric Saxe 	if (disp_anywork()) {
3990e751525SEric Saxe 		if (hset_update) {
4000e751525SEric Saxe 			cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
4010e751525SEric Saxe 			bitset_atomic_del(&cp->cp_haltset, cpu_sid);
4020e751525SEric Saxe 		}
4030e751525SEric Saxe 		return;
4040e751525SEric Saxe 	}
4050e751525SEric Saxe 
4060e751525SEric Saxe 	/*
4070e751525SEric Saxe 	 * We're on our way to being halted.
4080e751525SEric Saxe 	 *
4090e751525SEric Saxe 	 * The local APIC timer can stop in ACPI C2 and deeper c-states.
410cef70d2cSBill Holler 	 * Try to program the HPET hardware to substitute for this CPU's
411cef70d2cSBill Holler 	 * LAPIC timer.
412cef70d2cSBill Holler 	 * cstate_use_timer() could disable the LAPIC Timer.  Make sure
413cef70d2cSBill Holler 	 * to start the LAPIC Timer again before leaving this function.
4140e751525SEric Saxe 	 *
415cef70d2cSBill Holler 	 * Disable interrupts here so we will awaken immediately after halting
416cef70d2cSBill Holler 	 * if someone tries to poke us between now and the time we actually
417cef70d2cSBill Holler 	 * halt.
4180e751525SEric Saxe 	 */
419cef70d2cSBill Holler 	cli();
420cef70d2cSBill Holler 	using_timer = cstate_use_timer(&lapic_expire, CSTATE_USING_HPET);
4210e751525SEric Saxe 
4220e751525SEric Saxe 	/*
4230e751525SEric Saxe 	 * We check for the presence of our bit after disabling interrupts.
4240e751525SEric Saxe 	 * If it's cleared, we'll return. If the bit is cleared after
4250e751525SEric Saxe 	 * we check then the cstate_wakeup() will pop us out of the halted
4260e751525SEric Saxe 	 * state.
4270e751525SEric Saxe 	 *
4280e751525SEric Saxe 	 * This means that the ordering of the cstate_wakeup() and the clearing
4290e751525SEric Saxe 	 * of the bit by cpu_wakeup is important.
4300e751525SEric Saxe 	 * cpu_wakeup() must clear our mc_haltset bit, and then call
4310e751525SEric Saxe 	 * cstate_wakeup().
4320e751525SEric Saxe 	 * acpi_cpu_cstate() must disable interrupts, then check for the bit.
4330e751525SEric Saxe 	 */
4340e751525SEric Saxe 	if (hset_update && bitset_in_set(&cp->cp_haltset, cpu_sid) == 0) {
435cef70d2cSBill Holler 		(void) cstate_use_timer(&lapic_expire,
436cef70d2cSBill Holler 		    CSTATE_USING_LAT);
437cef70d2cSBill Holler 		sti();
4380e751525SEric Saxe 		cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
4390e751525SEric Saxe 		return;
4400e751525SEric Saxe 	}
4410e751525SEric Saxe 
4420e751525SEric Saxe 	/*
4430e751525SEric Saxe 	 * The check for anything locally runnable is here for performance
4440e751525SEric Saxe 	 * and isn't needed for correctness. disp_nrunnable ought to be
4450e751525SEric Saxe 	 * in our cache still, so it's inexpensive to check, and if there
4460e751525SEric Saxe 	 * is anything runnable we won't have to wait for the poke.
4470e751525SEric Saxe 	 */
4480e751525SEric Saxe 	if (cpup->cpu_disp->disp_nrunnable != 0) {
449cef70d2cSBill Holler 		(void) cstate_use_timer(&lapic_expire,
450cef70d2cSBill Holler 		    CSTATE_USING_LAT);
451cef70d2cSBill Holler 		sti();
4520e751525SEric Saxe 		if (hset_update) {
4530e751525SEric Saxe 			cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
4540e751525SEric Saxe 			bitset_atomic_del(&cp->cp_haltset, cpu_sid);
4550e751525SEric Saxe 		}
4560e751525SEric Saxe 		return;
4570e751525SEric Saxe 	}
4580e751525SEric Saxe 
459cef70d2cSBill Holler 	if (using_timer == B_FALSE) {
4600e751525SEric Saxe 
461cef70d2cSBill Holler 		(void) cstate_use_timer(&lapic_expire,
462cef70d2cSBill Holler 		    CSTATE_USING_LAT);
463cef70d2cSBill Holler 		sti();
4640e751525SEric Saxe 
4650e751525SEric Saxe 		/*
4660e751525SEric Saxe 		 * We are currently unable to program the HPET to act as this
467cef70d2cSBill Holler 		 * CPU's proxy LAPIC timer.  This CPU cannot enter C2 or deeper
468cef70d2cSBill Holler 		 * because no timer is set to wake it up while its LAPIC timer
4690e751525SEric Saxe 		 * stalls in deep C-States.
4700e751525SEric Saxe 		 * Enter C1 instead.
4710e751525SEric Saxe 		 *
4720e751525SEric Saxe 		 * cstate_wake_cpu() will wake this CPU with an IPI which
4730e751525SEric Saxe 		 * works with MWAIT.
4740e751525SEric Saxe 		 */
4750e751525SEric Saxe 		i86_monitor(mcpu_mwait, 0, 0);
4760e751525SEric Saxe 		if ((*mcpu_mwait & ~MWAIT_WAKEUP_IPI) == MWAIT_HALTED) {
477fb2caebeSRandy Fishel 			if (cpu_idle_enter(IDLE_STATE_C1, 0,
478fb2caebeSRandy Fishel 			    check_func, (void *)mcpu_mwait) == 0) {
479fb2caebeSRandy Fishel 				if ((*mcpu_mwait & ~MWAIT_WAKEUP_IPI) ==
480fb2caebeSRandy Fishel 				    MWAIT_HALTED) {
4810e751525SEric Saxe 					i86_mwait(0, 0);
482fb2caebeSRandy Fishel 				}
483fb2caebeSRandy Fishel 				cpu_idle_exit(CPU_IDLE_CB_FLAG_IDLE);
484fb2caebeSRandy Fishel 			}
4850e751525SEric Saxe 		}
4860e751525SEric Saxe 
4870e751525SEric Saxe 		/*
4880e751525SEric Saxe 		 * We're no longer halted
4890e751525SEric Saxe 		 */
4900e751525SEric Saxe 		if (hset_update) {
4910e751525SEric Saxe 			cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
4920e751525SEric Saxe 			bitset_atomic_del(&cp->cp_haltset, cpu_sid);
4930e751525SEric Saxe 		}
4940e751525SEric Saxe 		return;
4950e751525SEric Saxe 	}
4960e751525SEric Saxe 
4970e751525SEric Saxe 	if (type == ACPI_ADR_SPACE_FIXED_HARDWARE) {
4980e751525SEric Saxe 		/*
4990e751525SEric Saxe 		 * We're on our way to being halted.
5000e751525SEric Saxe 		 * To avoid a lost wakeup, arm the monitor before checking
5010e751525SEric Saxe 		 * if another cpu wrote to mcpu_mwait to wake us up.
5020e751525SEric Saxe 		 */
5030e751525SEric Saxe 		i86_monitor(mcpu_mwait, 0, 0);
5040e751525SEric Saxe 		if (*mcpu_mwait == MWAIT_HALTED) {
505fb2caebeSRandy Fishel 			if (cpu_idle_enter((uint_t)cs_type, 0,
506fb2caebeSRandy Fishel 			    check_func, (void *)mcpu_mwait) == 0) {
507fb2caebeSRandy Fishel 				if (*mcpu_mwait == MWAIT_HALTED) {
508fb2caebeSRandy Fishel 					i86_mwait(cstate->cs_address, 1);
509fb2caebeSRandy Fishel 				}
510fb2caebeSRandy Fishel 				cpu_idle_exit(CPU_IDLE_CB_FLAG_IDLE);
511fb2caebeSRandy Fishel 			}
5120e751525SEric Saxe 		}
5130e751525SEric Saxe 	} else if (type == ACPI_ADR_SPACE_SYSTEM_IO) {
5140e751525SEric Saxe 		uint32_t value;
5150e751525SEric Saxe 		ACPI_TABLE_FADT *gbl_FADT;
5160e751525SEric Saxe 
5170e751525SEric Saxe 		if (*mcpu_mwait == MWAIT_WAKEUP_IPI) {
518fb2caebeSRandy Fishel 			if (cpu_idle_enter((uint_t)cs_type, 0,
519fb2caebeSRandy Fishel 			    check_func, (void *)mcpu_mwait) == 0) {
520fb2caebeSRandy Fishel 				if (*mcpu_mwait == MWAIT_WAKEUP_IPI) {
521fb2caebeSRandy Fishel 					(void) cpu_acpi_read_port(
522fb2caebeSRandy Fishel 					    cstate->cs_address, &value, 8);
5230e751525SEric Saxe 					acpica_get_global_FADT(&gbl_FADT);
5240e751525SEric Saxe 					(void) cpu_acpi_read_port(
525fb2caebeSRandy Fishel 					    gbl_FADT->XPmTimerBlock.Address,
526fb2caebeSRandy Fishel 					    &value, 32);
527fb2caebeSRandy Fishel 				}
528fb2caebeSRandy Fishel 				cpu_idle_exit(CPU_IDLE_CB_FLAG_IDLE);
529fb2caebeSRandy Fishel 			}
5300e751525SEric Saxe 		}
5310e751525SEric Saxe 	}
5320e751525SEric Saxe 
5330e751525SEric Saxe 	/*
534cef70d2cSBill Holler 	 * The LAPIC timer may have stopped in deep c-state.
535cef70d2cSBill Holler 	 * Reprogram this CPU's LAPIC here before enabling interrupts.
5360e751525SEric Saxe 	 */
537cef70d2cSBill Holler 	(void) cstate_use_timer(&lapic_expire, CSTATE_USING_LAT);
538cef70d2cSBill Holler 	sti();
5390e751525SEric Saxe 
5400e751525SEric Saxe 	/*
5410e751525SEric Saxe 	 * We're no longer halted
5420e751525SEric Saxe 	 */
5430e751525SEric Saxe 	if (hset_update) {
5440e751525SEric Saxe 		cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
5450e751525SEric Saxe 		bitset_atomic_del(&cp->cp_haltset, cpu_sid);
5460e751525SEric Saxe 	}
5470e751525SEric Saxe }
5480e751525SEric Saxe 
5490e751525SEric Saxe /*
5500e751525SEric Saxe  * Idle the present CPU, deep c-state is supported
5510e751525SEric Saxe  */
5520e751525SEric Saxe void
5530e751525SEric Saxe cpu_acpi_idle(void)
5540e751525SEric Saxe {
5550e751525SEric Saxe 	cpu_t *cp = CPU;
5560e751525SEric Saxe 	cpu_acpi_handle_t handle;
5570e751525SEric Saxe 	cma_c_state_t *cs_data;
5589aa01d98SBill Holler 	cpu_acpi_cstate_t *cstates;
5590e751525SEric Saxe 	hrtime_t start, end;
5600e751525SEric Saxe 	int cpu_max_cstates;
5619aa01d98SBill Holler 	uint32_t cs_indx;
5629aa01d98SBill Holler 	uint16_t cs_type;
5630e751525SEric Saxe 
5640e751525SEric Saxe 	cpupm_mach_state_t *mach_state =
5650e751525SEric Saxe 	    (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state;
5660e751525SEric Saxe 	handle = mach_state->ms_acpi_handle;
5670e751525SEric Saxe 	ASSERT(CPU_ACPI_CSTATES(handle) != NULL);
5680e751525SEric Saxe 
5690e751525SEric Saxe 	cs_data = mach_state->ms_cstate.cma_state.cstate;
5709aa01d98SBill Holler 	cstates = (cpu_acpi_cstate_t *)CPU_ACPI_CSTATES(handle);
5719aa01d98SBill Holler 	ASSERT(cstates != NULL);
5720e751525SEric Saxe 	cpu_max_cstates = cpu_acpi_get_max_cstates(handle);
5730e751525SEric Saxe 	if (cpu_max_cstates > CPU_MAX_CSTATES)
5740e751525SEric Saxe 		cpu_max_cstates = CPU_MAX_CSTATES;
5759aa01d98SBill Holler 	if (cpu_max_cstates == 1) {	/* no ACPI c-state data */
5769aa01d98SBill Holler 		(*non_deep_idle_cpu)();
5779aa01d98SBill Holler 		return;
5789aa01d98SBill Holler 	}
5790e751525SEric Saxe 
5800e751525SEric Saxe 	start = gethrtime_unscaled();
5810e751525SEric Saxe 
5829aa01d98SBill Holler 	cs_indx = cpupm_next_cstate(cs_data, cstates, cpu_max_cstates, start);
5830e751525SEric Saxe 
5849aa01d98SBill Holler 	cs_type = cstates[cs_indx].cs_type;
5850e751525SEric Saxe 
5860e751525SEric Saxe 	switch (cs_type) {
5870e751525SEric Saxe 	default:
5880e751525SEric Saxe 		/* FALLTHROUGH */
5890e751525SEric Saxe 	case CPU_ACPI_C1:
5900e751525SEric Saxe 		(*non_deep_idle_cpu)();
5910e751525SEric Saxe 		break;
5920e751525SEric Saxe 
5930e751525SEric Saxe 	case CPU_ACPI_C2:
5949aa01d98SBill Holler 		acpi_cpu_cstate(&cstates[cs_indx]);
5950e751525SEric Saxe 		break;
5960e751525SEric Saxe 
5970e751525SEric Saxe 	case CPU_ACPI_C3:
5980e751525SEric Saxe 		/*
59956b56c0dSBill Holler 		 * All supported Intel processors maintain cache coherency
60056b56c0dSBill Holler 		 * during C3.  Currently when entering C3 processors flush
60156b56c0dSBill Holler 		 * core caches to higher level shared cache. The shared cache
60256b56c0dSBill Holler 		 * maintains state and supports probes during C3.
60356b56c0dSBill Holler 		 * Consequently there is no need to handle cache coherency
60456b56c0dSBill Holler 		 * and Bus Master activity here with the cache flush, BM_RLD
60556b56c0dSBill Holler 		 * bit, BM_STS bit, nor PM2_CNT.ARB_DIS mechanisms described
60656b56c0dSBill Holler 		 * in section 8.1.4 of the ACPI Specification 4.0.
6070e751525SEric Saxe 		 */
6089aa01d98SBill Holler 		acpi_cpu_cstate(&cstates[cs_indx]);
6090e751525SEric Saxe 		break;
6100e751525SEric Saxe 	}
6110e751525SEric Saxe 
6120e751525SEric Saxe 	end = gethrtime_unscaled();
6130e751525SEric Saxe 
6140e751525SEric Saxe 	/*
6150e751525SEric Saxe 	 * Update statistics
6160e751525SEric Saxe 	 */
6170e751525SEric Saxe 	cpupm_wakeup_cstate_data(cs_data, end);
6180e751525SEric Saxe }
6190e751525SEric Saxe 
6200e751525SEric Saxe boolean_t
6210e751525SEric Saxe cpu_deep_cstates_supported(void)
6220e751525SEric Saxe {
6230e751525SEric Saxe 	extern int	idle_cpu_no_deep_c;
6240e751525SEric Saxe 
6250e751525SEric Saxe 	if (idle_cpu_no_deep_c)
6260e751525SEric Saxe 		return (B_FALSE);
6270e751525SEric Saxe 
6280e751525SEric Saxe 	if (!cpuid_deep_cstates_supported())
6290e751525SEric Saxe 		return (B_FALSE);
6300e751525SEric Saxe 
631cef70d2cSBill Holler 	if (cpuid_arat_supported()) {
632cef70d2cSBill Holler 		cpu_cstate_arat = B_TRUE;
6330e751525SEric Saxe 		return (B_TRUE);
6340e751525SEric Saxe 	}
6350e751525SEric Saxe 
636cef70d2cSBill Holler 	if ((hpet.supported == HPET_FULL_SUPPORT) &&
637cef70d2cSBill Holler 	    hpet.install_proxy()) {
638cef70d2cSBill Holler 		cpu_cstate_hpet = B_TRUE;
639cef70d2cSBill Holler 		return (B_TRUE);
640cef70d2cSBill Holler 	}
641cef70d2cSBill Holler 
642cef70d2cSBill Holler 	return (B_FALSE);
643cef70d2cSBill Holler }
644cef70d2cSBill Holler 
6450e751525SEric Saxe /*
6460e751525SEric Saxe  * Validate that this processor supports deep cstate and if so,
6470e751525SEric Saxe  * get the c-state data from ACPI and cache it.
6480e751525SEric Saxe  */
6490e751525SEric Saxe static int
6500e751525SEric Saxe cpu_idle_init(cpu_t *cp)
6510e751525SEric Saxe {
6520e751525SEric Saxe 	cpupm_mach_state_t *mach_state =
6530e751525SEric Saxe 	    (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state;
6540e751525SEric Saxe 	cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
6550e751525SEric Saxe 	cpu_acpi_cstate_t *cstate;
6560e751525SEric Saxe 	char name[KSTAT_STRLEN];
6570e751525SEric Saxe 	int cpu_max_cstates, i;
65800f97612SMark Haywood 	int ret;
6590e751525SEric Saxe 
6600e751525SEric Saxe 	/*
6610e751525SEric Saxe 	 * Cache the C-state specific ACPI data.
6620e751525SEric Saxe 	 */
66300f97612SMark Haywood 	if ((ret = cpu_acpi_cache_cstate_data(handle)) != 0) {
66400f97612SMark Haywood 		if (ret < 0)
6650e751525SEric Saxe 			cmn_err(CE_NOTE,
66600f97612SMark Haywood 			    "!Support for CPU deep idle states is being "
66700f97612SMark Haywood 			    "disabled due to errors parsing ACPI C-state "
66800f97612SMark Haywood 			    "objects exported by BIOS.");
6690e751525SEric Saxe 		cpu_idle_fini(cp);
6700e751525SEric Saxe 		return (-1);
6710e751525SEric Saxe 	}
6720e751525SEric Saxe 
6730e751525SEric Saxe 	cstate = (cpu_acpi_cstate_t *)CPU_ACPI_CSTATES(handle);
6740e751525SEric Saxe 
6750e751525SEric Saxe 	cpu_max_cstates = cpu_acpi_get_max_cstates(handle);
6760e751525SEric Saxe 
6770e751525SEric Saxe 	for (i = CPU_ACPI_C1; i <= cpu_max_cstates; i++) {
6780e751525SEric Saxe 		(void) snprintf(name, KSTAT_STRLEN - 1, "c%d", cstate->cs_type);
6790e751525SEric Saxe 		/*
6800e751525SEric Saxe 		 * Allocate, initialize and install cstate kstat
6810e751525SEric Saxe 		 */
682*a3114836SGerry Liu 		cstate->cs_ksp = kstat_create("cstate", cp->cpu_id,
6830e751525SEric Saxe 		    name, "misc",
6840e751525SEric Saxe 		    KSTAT_TYPE_NAMED,
6850e751525SEric Saxe 		    sizeof (cpu_idle_kstat) / sizeof (kstat_named_t),
6860e751525SEric Saxe 		    KSTAT_FLAG_VIRTUAL);
6870e751525SEric Saxe 
6880e751525SEric Saxe 		if (cstate->cs_ksp == NULL) {
6890e751525SEric Saxe 			cmn_err(CE_NOTE, "kstat_create(c_state) fail");
6900e751525SEric Saxe 		} else {
6910e751525SEric Saxe 			cstate->cs_ksp->ks_data = &cpu_idle_kstat;
6920e751525SEric Saxe 			cstate->cs_ksp->ks_lock = &cpu_idle_mutex;
6930e751525SEric Saxe 			cstate->cs_ksp->ks_update = cpu_idle_kstat_update;
6940e751525SEric Saxe 			cstate->cs_ksp->ks_data_size += MAXNAMELEN;
6950e751525SEric Saxe 			cstate->cs_ksp->ks_private = cstate;
6960e751525SEric Saxe 			kstat_install(cstate->cs_ksp);
6970e751525SEric Saxe 		}
698*a3114836SGerry Liu 		cstate++;
6990e751525SEric Saxe 	}
7000e751525SEric Saxe 
7010e751525SEric Saxe 	cpupm_alloc_domains(cp, CPUPM_C_STATES);
7020e751525SEric Saxe 	cpupm_alloc_ms_cstate(cp);
7030e751525SEric Saxe 
7040e751525SEric Saxe 	if (cpu_deep_cstates_supported()) {
70556b56c0dSBill Holler 		uint32_t value;
70656b56c0dSBill Holler 
7070e751525SEric Saxe 		mutex_enter(&cpu_idle_callb_mutex);
7080e751525SEric Saxe 		if (cpu_deep_idle_callb_id == (callb_id_t)0)
7090e751525SEric Saxe 			cpu_deep_idle_callb_id = callb_add(&cpu_deep_idle_callb,
7100e751525SEric Saxe 			    (void *)NULL, CB_CL_CPU_DEEP_IDLE, "cpu_deep_idle");
7110e751525SEric Saxe 		if (cpu_idle_cpr_callb_id == (callb_id_t)0)
7120e751525SEric Saxe 			cpu_idle_cpr_callb_id = callb_add(&cpu_idle_cpr_callb,
7130e751525SEric Saxe 			    (void *)NULL, CB_CL_CPR_PM, "cpu_idle_cpr");
7140e751525SEric Saxe 		mutex_exit(&cpu_idle_callb_mutex);
71556b56c0dSBill Holler 
71656b56c0dSBill Holler 
71756b56c0dSBill Holler 		/*
71856b56c0dSBill Holler 		 * All supported CPUs (Nehalem and later) will remain in C3
71956b56c0dSBill Holler 		 * during Bus Master activity.
72056b56c0dSBill Holler 		 * All CPUs set ACPI_BITREG_BUS_MASTER_RLD to 0 here if it
72156b56c0dSBill Holler 		 * is not already 0 before enabling Deeper C-states.
72256b56c0dSBill Holler 		 */
72356b56c0dSBill Holler 		cpu_acpi_get_register(ACPI_BITREG_BUS_MASTER_RLD, &value);
72456b56c0dSBill Holler 		if (value & 1)
72556b56c0dSBill Holler 			cpu_acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
7260e751525SEric Saxe 	}
7270e751525SEric Saxe 
7280e751525SEric Saxe 	return (0);
7290e751525SEric Saxe }
7300e751525SEric Saxe 
7310e751525SEric Saxe /*
7320e751525SEric Saxe  * Free resources allocated by cpu_idle_init().
7330e751525SEric Saxe  */
7340e751525SEric Saxe static void
7350e751525SEric Saxe cpu_idle_fini(cpu_t *cp)
7360e751525SEric Saxe {
7370e751525SEric Saxe 	cpupm_mach_state_t *mach_state =
7380e751525SEric Saxe 	    (cpupm_mach_state_t *)(cp->cpu_m.mcpu_pm_mach_state);
7390e751525SEric Saxe 	cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
7400e751525SEric Saxe 	cpu_acpi_cstate_t *cstate;
7410e751525SEric Saxe 	uint_t	cpu_max_cstates, i;
7420e751525SEric Saxe 
7430e751525SEric Saxe 	/*
7440e751525SEric Saxe 	 * idle cpu points back to the generic one
7450e751525SEric Saxe 	 */
7466af9d452Saubrey.li@intel.com 	idle_cpu = cp->cpu_m.mcpu_idle_cpu = non_deep_idle_cpu;
7470e751525SEric Saxe 	disp_enq_thread = non_deep_idle_disp_enq_thread;
7480e751525SEric Saxe 
7490e751525SEric Saxe 	cstate = (cpu_acpi_cstate_t *)CPU_ACPI_CSTATES(handle);
7500e751525SEric Saxe 	if (cstate) {
7510e751525SEric Saxe 		cpu_max_cstates = cpu_acpi_get_max_cstates(handle);
7520e751525SEric Saxe 
7530e751525SEric Saxe 		for (i = CPU_ACPI_C1; i <= cpu_max_cstates; i++) {
7540e751525SEric Saxe 			if (cstate->cs_ksp != NULL)
7550e751525SEric Saxe 				kstat_delete(cstate->cs_ksp);
7560e751525SEric Saxe 			cstate++;
7570e751525SEric Saxe 		}
7580e751525SEric Saxe 	}
7590e751525SEric Saxe 
7600e751525SEric Saxe 	cpupm_free_ms_cstate(cp);
7610e751525SEric Saxe 	cpupm_free_domains(&cpupm_cstate_domains);
7620e751525SEric Saxe 	cpu_acpi_free_cstate_data(handle);
7630e751525SEric Saxe 
7640e751525SEric Saxe 	mutex_enter(&cpu_idle_callb_mutex);
7650e751525SEric Saxe 	if (cpu_deep_idle_callb_id != (callb_id_t)0) {
7660e751525SEric Saxe 		(void) callb_delete(cpu_deep_idle_callb_id);
7670e751525SEric Saxe 		cpu_deep_idle_callb_id = (callb_id_t)0;
7680e751525SEric Saxe 	}
7690e751525SEric Saxe 	if (cpu_idle_cpr_callb_id != (callb_id_t)0) {
7700e751525SEric Saxe 		(void) callb_delete(cpu_idle_cpr_callb_id);
7710e751525SEric Saxe 		cpu_idle_cpr_callb_id = (callb_id_t)0;
7720e751525SEric Saxe 	}
7730e751525SEric Saxe 	mutex_exit(&cpu_idle_callb_mutex);
7740e751525SEric Saxe }
7750e751525SEric Saxe 
776*a3114836SGerry Liu /*
777*a3114836SGerry Liu  * This function is introduced here to solve a race condition
778*a3114836SGerry Liu  * between the master and the slave to touch c-state data structure.
779*a3114836SGerry Liu  * After the slave calls this idle function to switch to the non
780*a3114836SGerry Liu  * deep idle function, the master can go on to reclaim the resource.
781*a3114836SGerry Liu  */
782*a3114836SGerry Liu static void
783*a3114836SGerry Liu cpu_idle_stop_sync(void)
784*a3114836SGerry Liu {
785*a3114836SGerry Liu 	/* switch to the non deep idle function */
786*a3114836SGerry Liu 	CPU->cpu_m.mcpu_idle_cpu = non_deep_idle_cpu;
787*a3114836SGerry Liu }
788*a3114836SGerry Liu 
789444f66e7SMark Haywood static void
790444f66e7SMark Haywood cpu_idle_stop(cpu_t *cp)
791444f66e7SMark Haywood {
792444f66e7SMark Haywood 	cpupm_mach_state_t *mach_state =
793444f66e7SMark Haywood 	    (cpupm_mach_state_t *)(cp->cpu_m.mcpu_pm_mach_state);
794444f66e7SMark Haywood 	cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
795444f66e7SMark Haywood 	cpu_acpi_cstate_t *cstate;
796*a3114836SGerry Liu 	uint_t cpu_max_cstates, i = 0;
797*a3114836SGerry Liu 
798*a3114836SGerry Liu 	mutex_enter(&cpu_idle_callb_mutex);
799*a3114836SGerry Liu 	if (idle_cpu == cpu_idle_adaptive) {
800*a3114836SGerry Liu 		/*
801*a3114836SGerry Liu 		 * invoke the slave to call synchronous idle function.
802*a3114836SGerry Liu 		 */
803*a3114836SGerry Liu 		cp->cpu_m.mcpu_idle_cpu = cpu_idle_stop_sync;
804*a3114836SGerry Liu 		poke_cpu(cp->cpu_id);
805444f66e7SMark Haywood 
8066af9d452Saubrey.li@intel.com 		/*
807*a3114836SGerry Liu 		 * wait until the slave switchs to non deep idle function,
808*a3114836SGerry Liu 		 * so that the master is safe to go on to reclaim the resource.
8096af9d452Saubrey.li@intel.com 		 */
810*a3114836SGerry Liu 		while (cp->cpu_m.mcpu_idle_cpu != non_deep_idle_cpu) {
811*a3114836SGerry Liu 			drv_usecwait(10);
812*a3114836SGerry Liu 			if ((++i % CPU_IDLE_STOP_TIMEOUT) == 0)
813*a3114836SGerry Liu 				cmn_err(CE_NOTE, "!cpu_idle_stop: the slave"
814*a3114836SGerry Liu 				    " idle stop timeout");
815*a3114836SGerry Liu 		}
816*a3114836SGerry Liu 	}
817*a3114836SGerry Liu 	mutex_exit(&cpu_idle_callb_mutex);
8186af9d452Saubrey.li@intel.com 
819444f66e7SMark Haywood 	cstate = (cpu_acpi_cstate_t *)CPU_ACPI_CSTATES(handle);
820444f66e7SMark Haywood 	if (cstate) {
821444f66e7SMark Haywood 		cpu_max_cstates = cpu_acpi_get_max_cstates(handle);
822444f66e7SMark Haywood 
823444f66e7SMark Haywood 		for (i = CPU_ACPI_C1; i <= cpu_max_cstates; i++) {
824444f66e7SMark Haywood 			if (cstate->cs_ksp != NULL)
825444f66e7SMark Haywood 				kstat_delete(cstate->cs_ksp);
826444f66e7SMark Haywood 			cstate++;
827444f66e7SMark Haywood 		}
828444f66e7SMark Haywood 	}
829444f66e7SMark Haywood 	cpupm_free_ms_cstate(cp);
830444f66e7SMark Haywood 	cpupm_remove_domains(cp, CPUPM_C_STATES, &cpupm_cstate_domains);
831444f66e7SMark Haywood 	cpu_acpi_free_cstate_data(handle);
832444f66e7SMark Haywood }
833444f66e7SMark Haywood 
8340e751525SEric Saxe /*ARGSUSED*/
8350e751525SEric Saxe static boolean_t
8360e751525SEric Saxe cpu_deep_idle_callb(void *arg, int code)
8370e751525SEric Saxe {
8380e751525SEric Saxe 	boolean_t rslt = B_TRUE;
8390e751525SEric Saxe 
8400e751525SEric Saxe 	mutex_enter(&cpu_idle_callb_mutex);
8410e751525SEric Saxe 	switch (code) {
8420e751525SEric Saxe 	case PM_DEFAULT_CPU_DEEP_IDLE:
8430e751525SEric Saxe 		/*
8440e751525SEric Saxe 		 * Default policy is same as enable
8450e751525SEric Saxe 		 */
8460e751525SEric Saxe 		/*FALLTHROUGH*/
8470e751525SEric Saxe 	case PM_ENABLE_CPU_DEEP_IDLE:
8480e751525SEric Saxe 		if ((cpu_idle_cfg_state & CPU_IDLE_DEEP_CFG) == 0)
8490e751525SEric Saxe 			break;
8500e751525SEric Saxe 
851cef70d2cSBill Holler 		if (cstate_timer_callback(PM_ENABLE_CPU_DEEP_IDLE)) {
8520e751525SEric Saxe 			disp_enq_thread = cstate_wakeup;
8530e751525SEric Saxe 			idle_cpu = cpu_idle_adaptive;
8540e751525SEric Saxe 			cpu_idle_cfg_state &= ~CPU_IDLE_DEEP_CFG;
8550e751525SEric Saxe 		} else {
8560e751525SEric Saxe 			rslt = B_FALSE;
8570e751525SEric Saxe 		}
8580e751525SEric Saxe 		break;
8590e751525SEric Saxe 
8600e751525SEric Saxe 	case PM_DISABLE_CPU_DEEP_IDLE:
8610e751525SEric Saxe 		if (cpu_idle_cfg_state & CPU_IDLE_DEEP_CFG)
8620e751525SEric Saxe 			break;
8630e751525SEric Saxe 
8640e751525SEric Saxe 		idle_cpu = non_deep_idle_cpu;
865cef70d2cSBill Holler 		if (cstate_timer_callback(PM_DISABLE_CPU_DEEP_IDLE)) {
8660e751525SEric Saxe 			disp_enq_thread = non_deep_idle_disp_enq_thread;
8670e751525SEric Saxe 			cpu_idle_cfg_state |= CPU_IDLE_DEEP_CFG;
8680e751525SEric Saxe 		}
8690e751525SEric Saxe 		break;
8700e751525SEric Saxe 
8710e751525SEric Saxe 	default:
8720e751525SEric Saxe 		cmn_err(CE_NOTE, "!cpu deep_idle_callb: invalid code %d\n",
8730e751525SEric Saxe 		    code);
8740e751525SEric Saxe 		break;
8750e751525SEric Saxe 	}
8760e751525SEric Saxe 	mutex_exit(&cpu_idle_callb_mutex);
8770e751525SEric Saxe 	return (rslt);
8780e751525SEric Saxe }
8790e751525SEric Saxe 
8800e751525SEric Saxe /*ARGSUSED*/
8810e751525SEric Saxe static boolean_t
8820e751525SEric Saxe cpu_idle_cpr_callb(void *arg, int code)
8830e751525SEric Saxe {
8840e751525SEric Saxe 	boolean_t rslt = B_TRUE;
8850e751525SEric Saxe 
8860e751525SEric Saxe 	mutex_enter(&cpu_idle_callb_mutex);
8870e751525SEric Saxe 	switch (code) {
8880e751525SEric Saxe 	case CB_CODE_CPR_RESUME:
889cef70d2cSBill Holler 		if (cstate_timer_callback(CB_CODE_CPR_RESUME)) {
8900e751525SEric Saxe 			/*
8910e751525SEric Saxe 			 * Do not enable dispatcher hooks if disabled by user.
8920e751525SEric Saxe 			 */
8930e751525SEric Saxe 			if (cpu_idle_cfg_state & CPU_IDLE_DEEP_CFG)
8940e751525SEric Saxe 				break;
8950e751525SEric Saxe 
8960e751525SEric Saxe 			disp_enq_thread = cstate_wakeup;
8970e751525SEric Saxe 			idle_cpu = cpu_idle_adaptive;
8980e751525SEric Saxe 		} else {
8990e751525SEric Saxe 			rslt = B_FALSE;
9000e751525SEric Saxe 		}
9010e751525SEric Saxe 		break;
9020e751525SEric Saxe 
9030e751525SEric Saxe 	case CB_CODE_CPR_CHKPT:
9040e751525SEric Saxe 		idle_cpu = non_deep_idle_cpu;
9050e751525SEric Saxe 		disp_enq_thread = non_deep_idle_disp_enq_thread;
906cef70d2cSBill Holler 		(void) cstate_timer_callback(CB_CODE_CPR_CHKPT);
9070e751525SEric Saxe 		break;
9080e751525SEric Saxe 
9090e751525SEric Saxe 	default:
9100e751525SEric Saxe 		cmn_err(CE_NOTE, "!cpudvr cpr_callb: invalid code %d\n", code);
9110e751525SEric Saxe 		break;
9120e751525SEric Saxe 	}
9130e751525SEric Saxe 	mutex_exit(&cpu_idle_callb_mutex);
9140e751525SEric Saxe 	return (rslt);
9150e751525SEric Saxe }
9160e751525SEric Saxe 
9170e751525SEric Saxe /*
9180e751525SEric Saxe  * handle _CST notification
9190e751525SEric Saxe  */
9200e751525SEric Saxe void
9210e751525SEric Saxe cpuidle_cstate_instance(cpu_t *cp)
9220e751525SEric Saxe {
9230e751525SEric Saxe #ifndef	__xpv
9240e751525SEric Saxe 	cpupm_mach_state_t	*mach_state =
9250e751525SEric Saxe 	    (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state;
9260e751525SEric Saxe 	cpu_acpi_handle_t	handle;
9270e751525SEric Saxe 	struct machcpu		*mcpu;
9280e751525SEric Saxe 	cpuset_t 		dom_cpu_set;
9290e751525SEric Saxe 	kmutex_t		*pm_lock;
9300e751525SEric Saxe 	int			result = 0;
9310e751525SEric Saxe 	processorid_t		cpu_id;
9320e751525SEric Saxe 
9330e751525SEric Saxe 	if (mach_state == NULL) {
9340e751525SEric Saxe 		return;
9350e751525SEric Saxe 	}
9360e751525SEric Saxe 
9370e751525SEric Saxe 	ASSERT(mach_state->ms_cstate.cma_domain != NULL);
9380e751525SEric Saxe 	dom_cpu_set = mach_state->ms_cstate.cma_domain->pm_cpus;
9390e751525SEric Saxe 	pm_lock = &mach_state->ms_cstate.cma_domain->pm_lock;
9400e751525SEric Saxe 
9410e751525SEric Saxe 	/*
9420e751525SEric Saxe 	 * Do for all the CPU's in the domain
9430e751525SEric Saxe 	 */
9440e751525SEric Saxe 	mutex_enter(pm_lock);
9450e751525SEric Saxe 	do {
9460e751525SEric Saxe 		CPUSET_FIND(dom_cpu_set, cpu_id);
9470e751525SEric Saxe 		if (cpu_id == CPUSET_NOTINSET)
9480e751525SEric Saxe 			break;
9490e751525SEric Saxe 
9500e751525SEric Saxe 		ASSERT(cpu_id >= 0 && cpu_id < NCPU);
9510e751525SEric Saxe 		cp = cpu[cpu_id];
9520e751525SEric Saxe 		mach_state = (cpupm_mach_state_t *)
9530e751525SEric Saxe 		    cp->cpu_m.mcpu_pm_mach_state;
9540e751525SEric Saxe 		if (!(mach_state->ms_caps & CPUPM_C_STATES)) {
9550e751525SEric Saxe 			mutex_exit(pm_lock);
9560e751525SEric Saxe 			return;
9570e751525SEric Saxe 		}
9580e751525SEric Saxe 		handle = mach_state->ms_acpi_handle;
9590e751525SEric Saxe 		ASSERT(handle != NULL);
9600e751525SEric Saxe 
9610e751525SEric Saxe 		/*
9620e751525SEric Saxe 		 * re-evaluate cstate object
9630e751525SEric Saxe 		 */
9640e751525SEric Saxe 		if (cpu_acpi_cache_cstate_data(handle) != 0) {
9650e751525SEric Saxe 			cmn_err(CE_WARN, "Cannot re-evaluate the cpu c-state"
9660e751525SEric Saxe 			    " object Instance: %d", cpu_id);
9670e751525SEric Saxe 		}
9680e751525SEric Saxe 		mcpu = &(cp->cpu_m);
9690e751525SEric Saxe 		mcpu->max_cstates = cpu_acpi_get_max_cstates(handle);
9700e751525SEric Saxe 		if (mcpu->max_cstates > CPU_ACPI_C1) {
971cef70d2cSBill Holler 			(void) cstate_timer_callback(
972cef70d2cSBill Holler 			    CST_EVENT_MULTIPLE_CSTATES);
9730e751525SEric Saxe 			disp_enq_thread = cstate_wakeup;
9740e751525SEric Saxe 			cp->cpu_m.mcpu_idle_cpu = cpu_acpi_idle;
9750e751525SEric Saxe 		} else if (mcpu->max_cstates == CPU_ACPI_C1) {
9760e751525SEric Saxe 			disp_enq_thread = non_deep_idle_disp_enq_thread;
9770e751525SEric Saxe 			cp->cpu_m.mcpu_idle_cpu = non_deep_idle_cpu;
978cef70d2cSBill Holler 			(void) cstate_timer_callback(CST_EVENT_ONE_CSTATE);
9790e751525SEric Saxe 		}
9800e751525SEric Saxe 
9810e751525SEric Saxe 		CPUSET_ATOMIC_XDEL(dom_cpu_set, cpu_id, result);
9820e751525SEric Saxe 	} while (result < 0);
983444f66e7SMark Haywood 	mutex_exit(pm_lock);
9840e751525SEric Saxe #endif
9850e751525SEric Saxe }
9860e751525SEric Saxe 
9870e751525SEric Saxe /*
9880e751525SEric Saxe  * handle the number or the type of available processor power states change
9890e751525SEric Saxe  */
9900e751525SEric Saxe void
9910e751525SEric Saxe cpuidle_manage_cstates(void *ctx)
9920e751525SEric Saxe {
9930e751525SEric Saxe 	cpu_t			*cp = ctx;
9940e751525SEric Saxe 	cpupm_mach_state_t	*mach_state =
9950e751525SEric Saxe 	    (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state;
9960e751525SEric Saxe 	boolean_t		is_ready;
9970e751525SEric Saxe 
9980e751525SEric Saxe 	if (mach_state == NULL) {
9990e751525SEric Saxe 		return;
10000e751525SEric Saxe 	}
10010e751525SEric Saxe 
10020e751525SEric Saxe 	/*
10030e751525SEric Saxe 	 * We currently refuse to power manage if the CPU is not ready to
10040e751525SEric Saxe 	 * take cross calls (cross calls fail silently if CPU is not ready
10050e751525SEric Saxe 	 * for it).
10060e751525SEric Saxe 	 *
1007444f66e7SMark Haywood 	 * Additionally, for x86 platforms we cannot power manage an instance,
1008444f66e7SMark Haywood 	 * until it has been initialized.
10090e751525SEric Saxe 	 */
1010444f66e7SMark Haywood 	is_ready = (cp->cpu_flags & CPU_READY) && cpupm_cstate_ready(cp);
10110e751525SEric Saxe 	if (!is_ready)
10120e751525SEric Saxe 		return;
10130e751525SEric Saxe 
10140e751525SEric Saxe 	cpuidle_cstate_instance(cp);
10150e751525SEric Saxe }
1016