1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Pentium 4/Xeon CPU on demand clock modulation/speed scaling
4 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
5 * (C) 2002 Zwane Mwaikambo <zwane@commfireservices.com>
6 * (C) 2002 Arjan van de Ven <arjanv@redhat.com>
7 * (C) 2002 Tora T. Engstad
8 * All Rights Reserved
9 *
10 * The author(s) of this software shall not be held liable for damages
11 * of any nature resulting due to the use of this software. This
12 * software is provided AS-IS with no warranties.
13 *
14 * Date Errata Description
15 * 20020525 N44, O17 12.5% or 25% DC causes lockup
16 */
17
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
23 #include <linux/smp.h>
24 #include <linux/cpufreq.h>
25 #include <linux/cpumask.h>
26 #include <linux/timex.h>
27
28 #include <asm/processor.h>
29 #include <asm/msr.h>
30 #include <asm/timer.h>
31 #include <asm/cpu_device_id.h>
32
33 #include "speedstep-lib.h"
34
35 /*
36 * Duty Cycle (3bits), note DC_DISABLE is not specified in
37 * intel docs i just use it to mean disable
38 */
39 enum {
40 DC_RESV, DC_DFLT, DC_25PT, DC_38PT, DC_50PT,
41 DC_64PT, DC_75PT, DC_88PT, DC_DISABLE
42 };
43
44 #define DC_ENTRIES 8
45
46
47 static int has_N44_O17_errata[NR_CPUS];
48 static unsigned int stock_freq;
49 static struct cpufreq_driver p4clockmod_driver;
50 static unsigned int cpufreq_p4_get(unsigned int cpu);
51
cpufreq_p4_setdc(unsigned int cpu,unsigned int newstate)52 static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate)
53 {
54 u32 l, h;
55
56 if ((newstate > DC_DISABLE) || (newstate == DC_RESV))
57 return -EINVAL;
58
59 rdmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, &l, &h);
60
61 if (l & 0x01)
62 pr_debug("CPU#%d currently thermal throttled\n", cpu);
63
64 if (has_N44_O17_errata[cpu] &&
65 (newstate == DC_25PT || newstate == DC_DFLT))
66 newstate = DC_38PT;
67
68 rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h);
69 if (newstate == DC_DISABLE) {
70 pr_debug("CPU#%d disabling modulation\n", cpu);
71 wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l & ~(1<<4), h);
72 } else {
73 pr_debug("CPU#%d setting duty cycle to %d%%\n",
74 cpu, ((125 * newstate) / 10));
75 /* bits 63 - 5 : reserved
76 * bit 4 : enable/disable
77 * bits 3-1 : duty cycle
78 * bit 0 : reserved
79 */
80 l = (l & ~14);
81 l = l | (1<<4) | ((newstate & 0x7)<<1);
82 wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l, h);
83 }
84
85 return 0;
86 }
87
88
89 static struct cpufreq_frequency_table p4clockmod_table[] = {
90 {0, DC_RESV, CPUFREQ_ENTRY_INVALID},
91 {0, DC_DFLT, 0},
92 {0, DC_25PT, 0},
93 {0, DC_38PT, 0},
94 {0, DC_50PT, 0},
95 {0, DC_64PT, 0},
96 {0, DC_75PT, 0},
97 {0, DC_88PT, 0},
98 {0, DC_DISABLE, 0},
99 {0, DC_RESV, CPUFREQ_TABLE_END},
100 };
101
102
cpufreq_p4_target(struct cpufreq_policy * policy,unsigned int index)103 static int cpufreq_p4_target(struct cpufreq_policy *policy, unsigned int index)
104 {
105 int i;
106
107 /* run on each logical CPU,
108 * see section 13.15.3 of IA32 Intel Architecture Software
109 * Developer's Manual, Volume 3
110 */
111 for_each_cpu(i, policy->cpus)
112 cpufreq_p4_setdc(i, p4clockmod_table[index].driver_data);
113
114 return 0;
115 }
116
117
cpufreq_p4_get_frequency(struct cpuinfo_x86 * c)118 static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
119 {
120 if (c->x86 == 0x06) {
121 if (cpu_has(c, X86_FEATURE_EST))
122 pr_warn_once("Warning: EST-capable CPU detected. The acpi-cpufreq module offers voltage scaling in addition to frequency scaling. You should use that instead of p4-clockmod, if possible.\n");
123 switch (c->x86_model) {
124 case 0x0E: /* Core */
125 case 0x0F: /* Core Duo */
126 case 0x16: /* Celeron Core */
127 case 0x1C: /* Atom */
128 p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
129 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
130 case 0x0D: /* Pentium M (Dothan) */
131 p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
132 fallthrough;
133 case 0x09: /* Pentium M (Banias) */
134 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
135 }
136 }
137
138 if (c->x86 != 0xF)
139 return 0;
140
141 /* on P-4s, the TSC runs with constant frequency independent whether
142 * throttling is active or not. */
143 p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
144
145 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
146 pr_warn("Warning: Pentium 4-M detected. The speedstep-ich or acpi cpufreq modules offer voltage scaling in addition of frequency scaling. You should use either one instead of p4-clockmod, if possible.\n");
147 return speedstep_get_frequency(SPEEDSTEP_CPU_P4M);
148 }
149
150 return speedstep_get_frequency(SPEEDSTEP_CPU_P4D);
151 }
152
153
154
cpufreq_p4_cpu_init(struct cpufreq_policy * policy)155 static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
156 {
157 struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
158 int cpuid = 0;
159 unsigned int i;
160
161 #ifdef CONFIG_SMP
162 cpumask_copy(policy->cpus, topology_sibling_cpumask(policy->cpu));
163 #endif
164
165 /* Errata workaround */
166 cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_stepping;
167 switch (cpuid) {
168 case 0x0f07:
169 case 0x0f0a:
170 case 0x0f11:
171 case 0x0f12:
172 has_N44_O17_errata[policy->cpu] = 1;
173 pr_debug("has errata -- disabling low frequencies\n");
174 }
175
176 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4D &&
177 c->x86_model < 2) {
178 /* switch to maximum frequency and measure result */
179 cpufreq_p4_setdc(policy->cpu, DC_DISABLE);
180 recalibrate_cpu_khz();
181 }
182 /* get max frequency */
183 stock_freq = cpufreq_p4_get_frequency(c);
184 if (!stock_freq)
185 return -EINVAL;
186
187 /* table init */
188 for (i = 1; (p4clockmod_table[i].frequency != CPUFREQ_TABLE_END); i++) {
189 if ((i < 2) && (has_N44_O17_errata[policy->cpu]))
190 p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID;
191 else
192 p4clockmod_table[i].frequency = (stock_freq * i)/8;
193 }
194
195 /* cpuinfo and default policy values */
196
197 /* the transition latency is set to be 1 higher than the maximum
198 * transition latency of the ondemand governor */
199 policy->cpuinfo.transition_latency = 10000001;
200 policy->freq_table = &p4clockmod_table[0];
201
202 return 0;
203 }
204
205
cpufreq_p4_get(unsigned int cpu)206 static unsigned int cpufreq_p4_get(unsigned int cpu)
207 {
208 u32 l, h;
209
210 rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h);
211
212 if (l & 0x10) {
213 l = l >> 1;
214 l &= 0x7;
215 } else
216 l = DC_DISABLE;
217
218 if (l != DC_DISABLE)
219 return stock_freq * l / 8;
220
221 return stock_freq;
222 }
223
224 static struct cpufreq_driver p4clockmod_driver = {
225 .verify = cpufreq_generic_frequency_table_verify,
226 .target_index = cpufreq_p4_target,
227 .init = cpufreq_p4_cpu_init,
228 .get = cpufreq_p4_get,
229 .name = "p4-clockmod",
230 .attr = cpufreq_generic_attr,
231 };
232
233 static const struct x86_cpu_id cpufreq_p4_id[] = {
234 X86_MATCH_VENDOR_FEATURE(INTEL, X86_FEATURE_ACC, NULL),
235 {}
236 };
237
238 /*
239 * Intentionally no MODULE_DEVICE_TABLE here: this driver should not
240 * be auto loaded. Please don't add one.
241 */
242
cpufreq_p4_init(void)243 static int __init cpufreq_p4_init(void)
244 {
245 int ret;
246
247 /*
248 * THERM_CONTROL is architectural for IA32 now, so
249 * we can rely on the capability checks
250 */
251 if (!x86_match_cpu(cpufreq_p4_id) || !boot_cpu_has(X86_FEATURE_ACPI))
252 return -ENODEV;
253
254 ret = cpufreq_register_driver(&p4clockmod_driver);
255 if (!ret)
256 pr_info("P4/Xeon(TM) CPU On-Demand Clock Modulation available\n");
257
258 return ret;
259 }
260
261
cpufreq_p4_exit(void)262 static void __exit cpufreq_p4_exit(void)
263 {
264 cpufreq_unregister_driver(&p4clockmod_driver);
265 }
266
267
268 MODULE_AUTHOR("Zwane Mwaikambo <zwane@commfireservices.com>");
269 MODULE_DESCRIPTION("cpufreq driver for Pentium(TM) 4/Xeon(TM)");
270 MODULE_LICENSE("GPL");
271
272 late_initcall(cpufreq_p4_init);
273 module_exit(cpufreq_p4_exit);
274