xref: /linux/arch/x86/kernel/acpi/cstate.c (revision 79790b6818e96c58fe2bffee1b418c16e64e7b80)
1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
223d6f82bSThomas Gleixner /*
323d6f82bSThomas Gleixner  * Copyright (C) 2005 Intel Corporation
423d6f82bSThomas Gleixner  * 	Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
523d6f82bSThomas Gleixner  * 	- Added _PDC for SMP C-states on Intel CPUs
623d6f82bSThomas Gleixner  */
723d6f82bSThomas Gleixner 
823d6f82bSThomas Gleixner #include <linux/kernel.h>
9186f4360SPaul Gortmaker #include <linux/export.h>
1023d6f82bSThomas Gleixner #include <linux/init.h>
1123d6f82bSThomas Gleixner #include <linux/acpi.h>
1223d6f82bSThomas Gleixner #include <linux/cpu.h>
1323d6f82bSThomas Gleixner #include <linux/sched.h>
1423d6f82bSThomas Gleixner 
1523d6f82bSThomas Gleixner #include <acpi/processor.h>
16bc83ccccSH. Peter Anvin #include <asm/mwait.h>
17f05e798aSDavid Howells #include <asm/special_insns.h>
1823d6f82bSThomas Gleixner 
1923d6f82bSThomas Gleixner /*
2023d6f82bSThomas Gleixner  * Initialize bm_flags based on the CPU cache properties
2123d6f82bSThomas Gleixner  * On SMP it depends on cache configuration
2223d6f82bSThomas Gleixner  * - When cache is not shared among all CPUs, we flush cache
2323d6f82bSThomas Gleixner  *   before entering C3.
2423d6f82bSThomas Gleixner  * - When cache is shared among all CPUs, we use bm_check
2523d6f82bSThomas Gleixner  *   mechanism as in UP case
2623d6f82bSThomas Gleixner  *
2723d6f82bSThomas Gleixner  * This routine is called only after all the CPUs are online
2823d6f82bSThomas Gleixner  */
acpi_processor_power_init_bm_check(struct acpi_processor_flags * flags,unsigned int cpu)2923d6f82bSThomas Gleixner void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags,
3023d6f82bSThomas Gleixner 					unsigned int cpu)
3123d6f82bSThomas Gleixner {
3292cb7612SMike Travis 	struct cpuinfo_x86 *c = &cpu_data(cpu);
3323d6f82bSThomas Gleixner 
3423d6f82bSThomas Gleixner 	flags->bm_check = 0;
3523d6f82bSThomas Gleixner 	if (num_online_cpus() == 1)
3623d6f82bSThomas Gleixner 		flags->bm_check = 1;
3723d6f82bSThomas Gleixner 	else if (c->x86_vendor == X86_VENDOR_INTEL) {
3823d6f82bSThomas Gleixner 		/*
39ee1ca48fSPallipadi, Venkatesh 		 * Today all MP CPUs that support C3 share cache.
40ee1ca48fSPallipadi, Venkatesh 		 * And caches should not be flushed by software while
41ee1ca48fSPallipadi, Venkatesh 		 * entering C3 type state.
4223d6f82bSThomas Gleixner 		 */
4323d6f82bSThomas Gleixner 		flags->bm_check = 1;
4423d6f82bSThomas Gleixner 	}
45ee1ca48fSPallipadi, Venkatesh 
46ee1ca48fSPallipadi, Venkatesh 	/*
47ee1ca48fSPallipadi, Venkatesh 	 * On all recent Intel platforms, ARB_DISABLE is a nop.
48ee1ca48fSPallipadi, Venkatesh 	 * So, set bm_control to zero to indicate that ARB_DISABLE
49ee1ca48fSPallipadi, Venkatesh 	 * is not required while entering C3 type state on
50ee1ca48fSPallipadi, Venkatesh 	 * P4, Core and beyond CPUs
51ee1ca48fSPallipadi, Venkatesh 	 */
52ee1ca48fSPallipadi, Venkatesh 	if (c->x86_vendor == X86_VENDOR_INTEL &&
5303a05ed1SZhao Yakui 	    (c->x86 > 0xf || (c->x86 == 6 && c->x86_model >= 0x0f)))
54ee1ca48fSPallipadi, Venkatesh 			flags->bm_control = 0;
55dacca1e5STony W Wang-oc 
56dacca1e5STony W Wang-oc 	if (c->x86_vendor == X86_VENDOR_CENTAUR) {
57dacca1e5STony W Wang-oc 		if (c->x86 > 6 || (c->x86 == 6 && c->x86_model == 0x0f &&
58dacca1e5STony W Wang-oc 		    c->x86_stepping >= 0x0e)) {
59987ddbe4SDavid Wang 			/*
60987ddbe4SDavid Wang 			 * For all recent Centaur CPUs, the ucode will make sure that each
61987ddbe4SDavid Wang 			 * core can keep cache coherence with each other while entering C3
62987ddbe4SDavid Wang 			 * type state. So, set bm_check to 1 to indicate that the kernel
63987ddbe4SDavid Wang 			 * doesn't need to execute a cache flush operation (WBINVD) when
64987ddbe4SDavid Wang 			 * entering C3 type state.
65987ddbe4SDavid Wang 			 */
66987ddbe4SDavid Wang 			flags->bm_check = 1;
67dacca1e5STony W Wang-oc 			/*
68dacca1e5STony W Wang-oc 			 * For all recent Centaur platforms, ARB_DISABLE is a nop.
69dacca1e5STony W Wang-oc 			 * Set bm_control to zero to indicate that ARB_DISABLE is
70dacca1e5STony W Wang-oc 			 * not required while entering C3 type state.
71dacca1e5STony W Wang-oc 			 */
72dacca1e5STony W Wang-oc 			flags->bm_control = 0;
73dacca1e5STony W Wang-oc 		}
74987ddbe4SDavid Wang 	}
75f8c0e061STony W Wang-oc 
76f8c0e061STony W Wang-oc 	if (c->x86_vendor == X86_VENDOR_ZHAOXIN) {
77f8c0e061STony W Wang-oc 		/*
78f8c0e061STony W Wang-oc 		 * All Zhaoxin CPUs that support C3 share cache.
79f8c0e061STony W Wang-oc 		 * And caches should not be flushed by software while
80f8c0e061STony W Wang-oc 		 * entering C3 type state.
81f8c0e061STony W Wang-oc 		 */
82f8c0e061STony W Wang-oc 		flags->bm_check = 1;
83f8c0e061STony W Wang-oc 		/*
84f8c0e061STony W Wang-oc 		 * On all recent Zhaoxin platforms, ARB_DISABLE is a nop.
85f8c0e061STony W Wang-oc 		 * So, set bm_control to zero to indicate that ARB_DISABLE
86f8c0e061STony W Wang-oc 		 * is not required while entering C3 type state.
87f8c0e061STony W Wang-oc 		 */
88f8c0e061STony W Wang-oc 		flags->bm_control = 0;
89f8c0e061STony W Wang-oc 	}
90a8fb4096SDeepak Sharma 	if (c->x86_vendor == X86_VENDOR_AMD && c->x86 >= 0x17) {
91a8fb4096SDeepak Sharma 		/*
92a8fb4096SDeepak Sharma 		 * For all AMD Zen or newer CPUs that support C3, caches
93a8fb4096SDeepak Sharma 		 * should not be flushed by software while entering C3
94a8fb4096SDeepak Sharma 		 * type state. Set bm->check to 1 so that kernel doesn't
95a8fb4096SDeepak Sharma 		 * need to execute cache flush operation.
96a8fb4096SDeepak Sharma 		 */
97a8fb4096SDeepak Sharma 		flags->bm_check = 1;
98a8fb4096SDeepak Sharma 		/*
99a8fb4096SDeepak Sharma 		 * In current AMD C state implementation ARB_DIS is no longer
100a8fb4096SDeepak Sharma 		 * used. So set bm_control to zero to indicate ARB_DIS is not
101a8fb4096SDeepak Sharma 		 * required while entering C3 type state.
102a8fb4096SDeepak Sharma 		 */
103a8fb4096SDeepak Sharma 		flags->bm_control = 0;
104a8fb4096SDeepak Sharma 	}
10523d6f82bSThomas Gleixner }
10623d6f82bSThomas Gleixner EXPORT_SYMBOL(acpi_processor_power_init_bm_check);
10723d6f82bSThomas Gleixner 
10823d6f82bSThomas Gleixner /* The code below handles cstate entry with monitor-mwait pair on Intel*/
10923d6f82bSThomas Gleixner 
11023d6f82bSThomas Gleixner struct cstate_entry {
11123d6f82bSThomas Gleixner 	struct {
11223d6f82bSThomas Gleixner 		unsigned int eax;
11323d6f82bSThomas Gleixner 		unsigned int ecx;
11423d6f82bSThomas Gleixner 	} states[ACPI_PROCESSOR_MAX_POWER];
11523d6f82bSThomas Gleixner };
116bd126b23SNamhyung Kim static struct cstate_entry __percpu *cpu_cstate_entry;	/* per CPU ptr */
11723d6f82bSThomas Gleixner 
11823d6f82bSThomas Gleixner static short mwait_supported[ACPI_PROCESSOR_MAX_POWER];
11923d6f82bSThomas Gleixner 
12023d6f82bSThomas Gleixner #define NATIVE_CSTATE_BEYOND_HALT	(2)
12123d6f82bSThomas Gleixner 
acpi_processor_ffh_cstate_probe_cpu(void * _cx)122c74f31c0SMike Travis static long acpi_processor_ffh_cstate_probe_cpu(void *_cx)
12323d6f82bSThomas Gleixner {
124c74f31c0SMike Travis 	struct acpi_processor_cx *cx = _cx;
125c74f31c0SMike Travis 	long retval;
12623d6f82bSThomas Gleixner 	unsigned int eax, ebx, ecx, edx;
12723d6f82bSThomas Gleixner 	unsigned int edx_part;
12823d6f82bSThomas Gleixner 	unsigned int cstate_type; /* C-state type and not ACPI C-state type */
12923d6f82bSThomas Gleixner 	unsigned int num_cstate_subtype;
13023d6f82bSThomas Gleixner 
13123d6f82bSThomas Gleixner 	cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
13223d6f82bSThomas Gleixner 
13323d6f82bSThomas Gleixner 	/* Check whether this particular cx_type (in CST) is supported or not */
134*6b8e288fSHe Rongguang 	cstate_type = (((cx->address >> MWAIT_SUBSTATE_SIZE) &
135*6b8e288fSHe Rongguang 			MWAIT_CSTATE_MASK) + 1) & MWAIT_CSTATE_MASK;
13623d6f82bSThomas Gleixner 	edx_part = edx >> (cstate_type * MWAIT_SUBSTATE_SIZE);
13723d6f82bSThomas Gleixner 	num_cstate_subtype = edx_part & MWAIT_SUBSTATE_MASK;
13823d6f82bSThomas Gleixner 
13923d6f82bSThomas Gleixner 	retval = 0;
1402194324dSLen Brown 	/* If the HW does not support any sub-states in this C-state */
1412194324dSLen Brown 	if (num_cstate_subtype == 0) {
1422dc8ffadSNick Desaulniers 		pr_warn(FW_BUG "ACPI MWAIT C-state 0x%x not supported by HW (0x%x)\n",
1432dc8ffadSNick Desaulniers 				cx->address, edx_part);
14423d6f82bSThomas Gleixner 		retval = -1;
14523d6f82bSThomas Gleixner 		goto out;
14623d6f82bSThomas Gleixner 	}
14723d6f82bSThomas Gleixner 
14823d6f82bSThomas Gleixner 	/* mwait ecx extensions INTERRUPT_BREAK should be supported for C2/C3 */
14923d6f82bSThomas Gleixner 	if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
15023d6f82bSThomas Gleixner 	    !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) {
15123d6f82bSThomas Gleixner 		retval = -1;
15223d6f82bSThomas Gleixner 		goto out;
15323d6f82bSThomas Gleixner 	}
15423d6f82bSThomas Gleixner 
15523d6f82bSThomas Gleixner 	if (!mwait_supported[cstate_type]) {
15623d6f82bSThomas Gleixner 		mwait_supported[cstate_type] = 1;
157c74f31c0SMike Travis 		printk(KERN_DEBUG
1582dc8ffadSNick Desaulniers 			"Monitor-Mwait will be used to enter C-%d state\n",
1592dc8ffadSNick Desaulniers 			cx->type);
16023d6f82bSThomas Gleixner 	}
161c74f31c0SMike Travis 	snprintf(cx->desc,
1623d95b89eSPrarit Bhargava 			ACPI_CX_DESC_LEN, "ACPI FFH MWAIT 0x%x",
1634fcb2fcdSVenkatesh Pallipadi 			cx->address);
16423d6f82bSThomas Gleixner out:
165c74f31c0SMike Travis 	return retval;
166c74f31c0SMike Travis }
167c74f31c0SMike Travis 
acpi_processor_ffh_cstate_probe(unsigned int cpu,struct acpi_processor_cx * cx,struct acpi_power_register * reg)168c74f31c0SMike Travis int acpi_processor_ffh_cstate_probe(unsigned int cpu,
169c74f31c0SMike Travis 		struct acpi_processor_cx *cx, struct acpi_power_register *reg)
170c74f31c0SMike Travis {
171c74f31c0SMike Travis 	struct cstate_entry *percpu_entry;
172c74f31c0SMike Travis 	struct cpuinfo_x86 *c = &cpu_data(cpu);
173c74f31c0SMike Travis 	long retval;
174c74f31c0SMike Travis 
175c74f31c0SMike Travis 	if (!cpu_cstate_entry || c->cpuid_level < CPUID_MWAIT_LEAF)
176c74f31c0SMike Travis 		return -1;
177c74f31c0SMike Travis 
178c74f31c0SMike Travis 	if (reg->bit_offset != NATIVE_CSTATE_BEYOND_HALT)
179c74f31c0SMike Travis 		return -1;
180c74f31c0SMike Travis 
181c74f31c0SMike Travis 	percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu);
182c74f31c0SMike Travis 	percpu_entry->states[cx->index].eax = 0;
183c74f31c0SMike Travis 	percpu_entry->states[cx->index].ecx = 0;
184c74f31c0SMike Travis 
185c74f31c0SMike Travis 	/* Make sure we are running on right CPU */
186c74f31c0SMike Travis 
187696ac2e3SQian Cai 	retval = call_on_cpu(cpu, acpi_processor_ffh_cstate_probe_cpu, cx,
188696ac2e3SQian Cai 			     false);
189c74f31c0SMike Travis 	if (retval == 0) {
190c74f31c0SMike Travis 		/* Use the hint in CST */
191c74f31c0SMike Travis 		percpu_entry->states[cx->index].eax = cx->address;
192c74f31c0SMike Travis 		percpu_entry->states[cx->index].ecx = MWAIT_ECX_INTERRUPT_BREAK;
193c74f31c0SMike Travis 	}
194718be4aaSLen Brown 
195718be4aaSLen Brown 	/*
196718be4aaSLen Brown 	 * For _CST FFH on Intel, if GAS.access_size bit 1 is cleared,
197718be4aaSLen Brown 	 * then we should skip checking BM_STS for this C-state.
198718be4aaSLen Brown 	 * ref: "Intel Processor Vendor-Specific ACPI Interface Specification"
199718be4aaSLen Brown 	 */
200718be4aaSLen Brown 	if ((c->x86_vendor == X86_VENDOR_INTEL) && !(reg->access_size & 0x2))
201718be4aaSLen Brown 		cx->bm_sts_skip = 1;
202718be4aaSLen Brown 
20323d6f82bSThomas Gleixner 	return retval;
20423d6f82bSThomas Gleixner }
20523d6f82bSThomas Gleixner EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe);
20623d6f82bSThomas Gleixner 
acpi_processor_ffh_cstate_enter(struct acpi_processor_cx * cx)2076727ad9eSChris Metcalf void __cpuidle acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx)
20823d6f82bSThomas Gleixner {
20923d6f82bSThomas Gleixner 	unsigned int cpu = smp_processor_id();
21023d6f82bSThomas Gleixner 	struct cstate_entry *percpu_entry;
21123d6f82bSThomas Gleixner 
21223d6f82bSThomas Gleixner 	percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu);
21323d6f82bSThomas Gleixner 	mwait_idle_with_hints(percpu_entry->states[cx->index].eax,
21423d6f82bSThomas Gleixner 	                      percpu_entry->states[cx->index].ecx);
21523d6f82bSThomas Gleixner }
21623d6f82bSThomas Gleixner EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_enter);
21723d6f82bSThomas Gleixner 
ffh_cstate_init(void)21823d6f82bSThomas Gleixner static int __init ffh_cstate_init(void)
21923d6f82bSThomas Gleixner {
22023d6f82bSThomas Gleixner 	struct cpuinfo_x86 *c = &boot_cpu_data;
2212dc8ffadSNick Desaulniers 
2225209654aSYazen Ghannam 	if (c->x86_vendor != X86_VENDOR_INTEL &&
223280b68a3SPu Wen 	    c->x86_vendor != X86_VENDOR_AMD &&
224280b68a3SPu Wen 	    c->x86_vendor != X86_VENDOR_HYGON)
22523d6f82bSThomas Gleixner 		return -1;
22623d6f82bSThomas Gleixner 
22723d6f82bSThomas Gleixner 	cpu_cstate_entry = alloc_percpu(struct cstate_entry);
22823d6f82bSThomas Gleixner 	return 0;
22923d6f82bSThomas Gleixner }
23023d6f82bSThomas Gleixner 
ffh_cstate_exit(void)23123d6f82bSThomas Gleixner static void __exit ffh_cstate_exit(void)
23223d6f82bSThomas Gleixner {
23323d6f82bSThomas Gleixner 	free_percpu(cpu_cstate_entry);
23423d6f82bSThomas Gleixner 	cpu_cstate_entry = NULL;
23523d6f82bSThomas Gleixner }
23623d6f82bSThomas Gleixner 
23723d6f82bSThomas Gleixner arch_initcall(ffh_cstate_init);
23823d6f82bSThomas Gleixner __exitcall(ffh_cstate_exit);
239