xref: /linux/drivers/cpufreq/acpi-cpufreq.c (revision f55b0671e3f90824ac06dc06b988075eb9c6830c)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * acpi-cpufreq.c - ACPI Processor P-States Driver
4  *
5  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7  *  Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de>
8  *  Copyright (C) 2006       Denis Sadykov <denis.m.sadykov@intel.com>
9  */
10 
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/smp.h>
17 #include <linux/sched.h>
18 #include <linux/cpufreq.h>
19 #include <linux/compiler.h>
20 #include <linux/dmi.h>
21 #include <linux/slab.h>
22 #include <linux/string_helpers.h>
23 #include <linux/platform_device.h>
24 
25 #include <linux/acpi.h>
26 #include <linux/io.h>
27 #include <linux/delay.h>
28 #include <linux/uaccess.h>
29 
30 #include <acpi/processor.h>
31 #include <acpi/cppc_acpi.h>
32 
33 #include <asm/msr.h>
34 #include <asm/processor.h>
35 #include <asm/cpufeature.h>
36 #include <asm/cpu_device_id.h>
37 
38 MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
39 MODULE_DESCRIPTION("ACPI Processor P-States Driver");
40 MODULE_LICENSE("GPL");
41 
42 enum {
43 	UNDEFINED_CAPABLE = 0,
44 	SYSTEM_INTEL_MSR_CAPABLE,
45 	SYSTEM_AMD_MSR_CAPABLE,
46 	SYSTEM_IO_CAPABLE,
47 };
48 
49 #define INTEL_MSR_RANGE		(0xffff)
50 #define AMD_MSR_RANGE		(0x7)
51 #define HYGON_MSR_RANGE		(0x7)
52 
53 struct acpi_cpufreq_data {
54 	unsigned int resume;
55 	unsigned int cpu_feature;
56 	unsigned int acpi_perf_cpu;
57 	cpumask_var_t freqdomain_cpus;
58 	void (*cpu_freq_write)(struct acpi_pct_register *reg, u32 val);
59 	u32 (*cpu_freq_read)(struct acpi_pct_register *reg);
60 };
61 
62 /* acpi_perf_data is a pointer to percpu data. */
63 static struct acpi_processor_performance __percpu *acpi_perf_data;
64 
to_perf_data(struct acpi_cpufreq_data * data)65 static inline struct acpi_processor_performance *to_perf_data(struct acpi_cpufreq_data *data)
66 {
67 	return per_cpu_ptr(acpi_perf_data, data->acpi_perf_cpu);
68 }
69 
70 static struct cpufreq_driver acpi_cpufreq_driver;
71 
72 static unsigned int acpi_pstate_strict;
73 
boost_state(unsigned int cpu)74 static bool boost_state(unsigned int cpu)
75 {
76 	u64 msr;
77 
78 	switch (boot_cpu_data.x86_vendor) {
79 	case X86_VENDOR_INTEL:
80 	case X86_VENDOR_CENTAUR:
81 	case X86_VENDOR_ZHAOXIN:
82 		rdmsrl_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &msr);
83 		return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
84 	case X86_VENDOR_HYGON:
85 	case X86_VENDOR_AMD:
86 		rdmsrl_on_cpu(cpu, MSR_K7_HWCR, &msr);
87 		return !(msr & MSR_K7_HWCR_CPB_DIS);
88 	}
89 	return false;
90 }
91 
boost_set_msr(bool enable)92 static int boost_set_msr(bool enable)
93 {
94 	u32 msr_addr;
95 	u64 msr_mask, val;
96 
97 	switch (boot_cpu_data.x86_vendor) {
98 	case X86_VENDOR_INTEL:
99 	case X86_VENDOR_CENTAUR:
100 	case X86_VENDOR_ZHAOXIN:
101 		msr_addr = MSR_IA32_MISC_ENABLE;
102 		msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
103 		break;
104 	case X86_VENDOR_HYGON:
105 	case X86_VENDOR_AMD:
106 		msr_addr = MSR_K7_HWCR;
107 		msr_mask = MSR_K7_HWCR_CPB_DIS;
108 		break;
109 	default:
110 		return -EINVAL;
111 	}
112 
113 	rdmsrl(msr_addr, val);
114 
115 	if (enable)
116 		val &= ~msr_mask;
117 	else
118 		val |= msr_mask;
119 
120 	wrmsrl(msr_addr, val);
121 	return 0;
122 }
123 
boost_set_msr_each(void * p_en)124 static void boost_set_msr_each(void *p_en)
125 {
126 	bool enable = (bool) p_en;
127 
128 	boost_set_msr(enable);
129 }
130 
set_boost(struct cpufreq_policy * policy,int val)131 static int set_boost(struct cpufreq_policy *policy, int val)
132 {
133 	on_each_cpu_mask(policy->cpus, boost_set_msr_each,
134 			 (void *)(long)val, 1);
135 	pr_debug("CPU %*pbl: Core Boosting %s.\n",
136 		 cpumask_pr_args(policy->cpus), str_enabled_disabled(val));
137 
138 	return 0;
139 }
140 
show_freqdomain_cpus(struct cpufreq_policy * policy,char * buf)141 static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
142 {
143 	struct acpi_cpufreq_data *data = policy->driver_data;
144 
145 	if (unlikely(!data))
146 		return -ENODEV;
147 
148 	return cpufreq_show_cpus(data->freqdomain_cpus, buf);
149 }
150 
151 cpufreq_freq_attr_ro(freqdomain_cpus);
152 
153 #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
store_cpb(struct cpufreq_policy * policy,const char * buf,size_t count)154 static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
155 			 size_t count)
156 {
157 	int ret;
158 	unsigned int val = 0;
159 
160 	if (!acpi_cpufreq_driver.set_boost)
161 		return -EINVAL;
162 
163 	ret = kstrtouint(buf, 10, &val);
164 	if (ret || val > 1)
165 		return -EINVAL;
166 
167 	cpus_read_lock();
168 	set_boost(policy, val);
169 	cpus_read_unlock();
170 
171 	return count;
172 }
173 
show_cpb(struct cpufreq_policy * policy,char * buf)174 static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
175 {
176 	return sprintf(buf, "%u\n", acpi_cpufreq_driver.boost_enabled);
177 }
178 
179 cpufreq_freq_attr_rw(cpb);
180 #endif
181 
check_est_cpu(unsigned int cpuid)182 static int check_est_cpu(unsigned int cpuid)
183 {
184 	struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
185 
186 	return cpu_has(cpu, X86_FEATURE_EST);
187 }
188 
check_amd_hwpstate_cpu(unsigned int cpuid)189 static int check_amd_hwpstate_cpu(unsigned int cpuid)
190 {
191 	struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
192 
193 	return cpu_has(cpu, X86_FEATURE_HW_PSTATE);
194 }
195 
extract_io(struct cpufreq_policy * policy,u32 value)196 static unsigned extract_io(struct cpufreq_policy *policy, u32 value)
197 {
198 	struct acpi_cpufreq_data *data = policy->driver_data;
199 	struct acpi_processor_performance *perf;
200 	int i;
201 
202 	perf = to_perf_data(data);
203 
204 	for (i = 0; i < perf->state_count; i++) {
205 		if (value == perf->states[i].status)
206 			return policy->freq_table[i].frequency;
207 	}
208 	return 0;
209 }
210 
extract_msr(struct cpufreq_policy * policy,u32 msr)211 static unsigned extract_msr(struct cpufreq_policy *policy, u32 msr)
212 {
213 	struct acpi_cpufreq_data *data = policy->driver_data;
214 	struct cpufreq_frequency_table *pos;
215 	struct acpi_processor_performance *perf;
216 
217 	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
218 		msr &= AMD_MSR_RANGE;
219 	else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
220 		msr &= HYGON_MSR_RANGE;
221 	else
222 		msr &= INTEL_MSR_RANGE;
223 
224 	perf = to_perf_data(data);
225 
226 	cpufreq_for_each_entry(pos, policy->freq_table)
227 		if (msr == perf->states[pos->driver_data].status)
228 			return pos->frequency;
229 	return policy->freq_table[0].frequency;
230 }
231 
extract_freq(struct cpufreq_policy * policy,u32 val)232 static unsigned extract_freq(struct cpufreq_policy *policy, u32 val)
233 {
234 	struct acpi_cpufreq_data *data = policy->driver_data;
235 
236 	switch (data->cpu_feature) {
237 	case SYSTEM_INTEL_MSR_CAPABLE:
238 	case SYSTEM_AMD_MSR_CAPABLE:
239 		return extract_msr(policy, val);
240 	case SYSTEM_IO_CAPABLE:
241 		return extract_io(policy, val);
242 	default:
243 		return 0;
244 	}
245 }
246 
cpu_freq_read_intel(struct acpi_pct_register * not_used)247 static u32 cpu_freq_read_intel(struct acpi_pct_register *not_used)
248 {
249 	u32 val, dummy __always_unused;
250 
251 	rdmsr(MSR_IA32_PERF_CTL, val, dummy);
252 	return val;
253 }
254 
cpu_freq_write_intel(struct acpi_pct_register * not_used,u32 val)255 static void cpu_freq_write_intel(struct acpi_pct_register *not_used, u32 val)
256 {
257 	u32 lo, hi;
258 
259 	rdmsr(MSR_IA32_PERF_CTL, lo, hi);
260 	lo = (lo & ~INTEL_MSR_RANGE) | (val & INTEL_MSR_RANGE);
261 	wrmsr(MSR_IA32_PERF_CTL, lo, hi);
262 }
263 
cpu_freq_read_amd(struct acpi_pct_register * not_used)264 static u32 cpu_freq_read_amd(struct acpi_pct_register *not_used)
265 {
266 	u32 val, dummy __always_unused;
267 
268 	rdmsr(MSR_AMD_PERF_CTL, val, dummy);
269 	return val;
270 }
271 
cpu_freq_write_amd(struct acpi_pct_register * not_used,u32 val)272 static void cpu_freq_write_amd(struct acpi_pct_register *not_used, u32 val)
273 {
274 	wrmsr(MSR_AMD_PERF_CTL, val, 0);
275 }
276 
cpu_freq_read_io(struct acpi_pct_register * reg)277 static u32 cpu_freq_read_io(struct acpi_pct_register *reg)
278 {
279 	u32 val;
280 
281 	acpi_os_read_port(reg->address, &val, reg->bit_width);
282 	return val;
283 }
284 
cpu_freq_write_io(struct acpi_pct_register * reg,u32 val)285 static void cpu_freq_write_io(struct acpi_pct_register *reg, u32 val)
286 {
287 	acpi_os_write_port(reg->address, val, reg->bit_width);
288 }
289 
290 struct drv_cmd {
291 	struct acpi_pct_register *reg;
292 	u32 val;
293 	union {
294 		void (*write)(struct acpi_pct_register *reg, u32 val);
295 		u32 (*read)(struct acpi_pct_register *reg);
296 	} func;
297 };
298 
299 /* Called via smp_call_function_single(), on the target CPU */
do_drv_read(void * _cmd)300 static void do_drv_read(void *_cmd)
301 {
302 	struct drv_cmd *cmd = _cmd;
303 
304 	cmd->val = cmd->func.read(cmd->reg);
305 }
306 
drv_read(struct acpi_cpufreq_data * data,const struct cpumask * mask)307 static u32 drv_read(struct acpi_cpufreq_data *data, const struct cpumask *mask)
308 {
309 	struct acpi_processor_performance *perf = to_perf_data(data);
310 	struct drv_cmd cmd = {
311 		.reg = &perf->control_register,
312 		.func.read = data->cpu_freq_read,
313 	};
314 	int err;
315 
316 	err = smp_call_function_any(mask, do_drv_read, &cmd, 1);
317 	WARN_ON_ONCE(err);	/* smp_call_function_any() was buggy? */
318 	return cmd.val;
319 }
320 
321 /* Called via smp_call_function_many(), on the target CPUs */
do_drv_write(void * _cmd)322 static void do_drv_write(void *_cmd)
323 {
324 	struct drv_cmd *cmd = _cmd;
325 
326 	cmd->func.write(cmd->reg, cmd->val);
327 }
328 
drv_write(struct acpi_cpufreq_data * data,const struct cpumask * mask,u32 val)329 static void drv_write(struct acpi_cpufreq_data *data,
330 		      const struct cpumask *mask, u32 val)
331 {
332 	struct acpi_processor_performance *perf = to_perf_data(data);
333 	struct drv_cmd cmd = {
334 		.reg = &perf->control_register,
335 		.val = val,
336 		.func.write = data->cpu_freq_write,
337 	};
338 	int this_cpu;
339 
340 	this_cpu = get_cpu();
341 	if (cpumask_test_cpu(this_cpu, mask))
342 		do_drv_write(&cmd);
343 
344 	smp_call_function_many(mask, do_drv_write, &cmd, 1);
345 	put_cpu();
346 }
347 
get_cur_val(const struct cpumask * mask,struct acpi_cpufreq_data * data)348 static u32 get_cur_val(const struct cpumask *mask, struct acpi_cpufreq_data *data)
349 {
350 	u32 val;
351 
352 	if (unlikely(cpumask_empty(mask)))
353 		return 0;
354 
355 	val = drv_read(data, mask);
356 
357 	pr_debug("%s = %u\n", __func__, val);
358 
359 	return val;
360 }
361 
get_cur_freq_on_cpu(unsigned int cpu)362 static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
363 {
364 	struct acpi_cpufreq_data *data;
365 	struct cpufreq_policy *policy;
366 	unsigned int freq;
367 	unsigned int cached_freq;
368 
369 	pr_debug("%s (%d)\n", __func__, cpu);
370 
371 	policy = cpufreq_cpu_get_raw(cpu);
372 	if (unlikely(!policy))
373 		return 0;
374 
375 	data = policy->driver_data;
376 	if (unlikely(!data || !policy->freq_table))
377 		return 0;
378 
379 	cached_freq = policy->freq_table[to_perf_data(data)->state].frequency;
380 	freq = extract_freq(policy, get_cur_val(cpumask_of(cpu), data));
381 	if (freq != cached_freq) {
382 		/*
383 		 * The dreaded BIOS frequency change behind our back.
384 		 * Force set the frequency on next target call.
385 		 */
386 		data->resume = 1;
387 	}
388 
389 	pr_debug("cur freq = %u\n", freq);
390 
391 	return freq;
392 }
393 
check_freqs(struct cpufreq_policy * policy,const struct cpumask * mask,unsigned int freq)394 static unsigned int check_freqs(struct cpufreq_policy *policy,
395 				const struct cpumask *mask, unsigned int freq)
396 {
397 	struct acpi_cpufreq_data *data = policy->driver_data;
398 	unsigned int cur_freq;
399 	unsigned int i;
400 
401 	for (i = 0; i < 100; i++) {
402 		cur_freq = extract_freq(policy, get_cur_val(mask, data));
403 		if (cur_freq == freq)
404 			return 1;
405 		udelay(10);
406 	}
407 	return 0;
408 }
409 
acpi_cpufreq_target(struct cpufreq_policy * policy,unsigned int index)410 static int acpi_cpufreq_target(struct cpufreq_policy *policy,
411 			       unsigned int index)
412 {
413 	struct acpi_cpufreq_data *data = policy->driver_data;
414 	struct acpi_processor_performance *perf;
415 	const struct cpumask *mask;
416 	unsigned int next_perf_state = 0; /* Index into perf table */
417 	int result = 0;
418 
419 	if (unlikely(!data)) {
420 		return -ENODEV;
421 	}
422 
423 	perf = to_perf_data(data);
424 	next_perf_state = policy->freq_table[index].driver_data;
425 	if (perf->state == next_perf_state) {
426 		if (unlikely(data->resume)) {
427 			pr_debug("Called after resume, resetting to P%d\n",
428 				next_perf_state);
429 			data->resume = 0;
430 		} else {
431 			pr_debug("Already at target state (P%d)\n",
432 				next_perf_state);
433 			return 0;
434 		}
435 	}
436 
437 	/*
438 	 * The core won't allow CPUs to go away until the governor has been
439 	 * stopped, so we can rely on the stability of policy->cpus.
440 	 */
441 	mask = policy->shared_type == CPUFREQ_SHARED_TYPE_ANY ?
442 		cpumask_of(policy->cpu) : policy->cpus;
443 
444 	drv_write(data, mask, perf->states[next_perf_state].control);
445 
446 	if (acpi_pstate_strict) {
447 		if (!check_freqs(policy, mask,
448 				 policy->freq_table[index].frequency)) {
449 			pr_debug("%s (%d)\n", __func__, policy->cpu);
450 			result = -EAGAIN;
451 		}
452 	}
453 
454 	if (!result)
455 		perf->state = next_perf_state;
456 
457 	return result;
458 }
459 
acpi_cpufreq_fast_switch(struct cpufreq_policy * policy,unsigned int target_freq)460 static unsigned int acpi_cpufreq_fast_switch(struct cpufreq_policy *policy,
461 					     unsigned int target_freq)
462 {
463 	struct acpi_cpufreq_data *data = policy->driver_data;
464 	struct acpi_processor_performance *perf;
465 	struct cpufreq_frequency_table *entry;
466 	unsigned int next_perf_state, next_freq, index;
467 
468 	/*
469 	 * Find the closest frequency above target_freq.
470 	 */
471 	if (policy->cached_target_freq == target_freq)
472 		index = policy->cached_resolved_idx;
473 	else
474 		index = cpufreq_table_find_index_dl(policy, target_freq,
475 						    false);
476 
477 	entry = &policy->freq_table[index];
478 	next_freq = entry->frequency;
479 	next_perf_state = entry->driver_data;
480 
481 	perf = to_perf_data(data);
482 	if (perf->state == next_perf_state) {
483 		if (unlikely(data->resume))
484 			data->resume = 0;
485 		else
486 			return next_freq;
487 	}
488 
489 	data->cpu_freq_write(&perf->control_register,
490 			     perf->states[next_perf_state].control);
491 	perf->state = next_perf_state;
492 	return next_freq;
493 }
494 
495 static unsigned long
acpi_cpufreq_guess_freq(struct acpi_cpufreq_data * data,unsigned int cpu)496 acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
497 {
498 	struct acpi_processor_performance *perf;
499 
500 	perf = to_perf_data(data);
501 	if (cpu_khz) {
502 		/* search the closest match to cpu_khz */
503 		unsigned int i;
504 		unsigned long freq;
505 		unsigned long freqn = perf->states[0].core_frequency * 1000;
506 
507 		for (i = 0; i < (perf->state_count-1); i++) {
508 			freq = freqn;
509 			freqn = perf->states[i+1].core_frequency * 1000;
510 			if ((2 * cpu_khz) > (freqn + freq)) {
511 				perf->state = i;
512 				return freq;
513 			}
514 		}
515 		perf->state = perf->state_count-1;
516 		return freqn;
517 	} else {
518 		/* assume CPU is at P0... */
519 		perf->state = 0;
520 		return perf->states[0].core_frequency * 1000;
521 	}
522 }
523 
free_acpi_perf_data(void)524 static void free_acpi_perf_data(void)
525 {
526 	unsigned int i;
527 
528 	/* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */
529 	for_each_possible_cpu(i)
530 		free_cpumask_var(per_cpu_ptr(acpi_perf_data, i)
531 				 ->shared_cpu_map);
532 	free_percpu(acpi_perf_data);
533 }
534 
cpufreq_boost_down_prep(unsigned int cpu)535 static int cpufreq_boost_down_prep(unsigned int cpu)
536 {
537 	/*
538 	 * Clear the boost-disable bit on the CPU_DOWN path so that
539 	 * this cpu cannot block the remaining ones from boosting.
540 	 */
541 	return boost_set_msr(1);
542 }
543 
544 /*
545  * acpi_cpufreq_early_init - initialize ACPI P-States library
546  *
547  * Initialize the ACPI P-States library (drivers/acpi/processor_perflib.c)
548  * in order to determine correct frequency and voltage pairings. We can
549  * do _PDC and _PSD and find out the processor dependency for the
550  * actual init that will happen later...
551  */
acpi_cpufreq_early_init(void)552 static int __init acpi_cpufreq_early_init(void)
553 {
554 	unsigned int i;
555 	pr_debug("%s\n", __func__);
556 
557 	acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
558 	if (!acpi_perf_data) {
559 		pr_debug("Memory allocation error for acpi_perf_data.\n");
560 		return -ENOMEM;
561 	}
562 	for_each_possible_cpu(i) {
563 		if (!zalloc_cpumask_var_node(
564 			&per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map,
565 			GFP_KERNEL, cpu_to_node(i))) {
566 
567 			/* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
568 			free_acpi_perf_data();
569 			return -ENOMEM;
570 		}
571 	}
572 
573 	/* Do initialization in ACPI core */
574 	acpi_processor_preregister_performance(acpi_perf_data);
575 	return 0;
576 }
577 
578 #ifdef CONFIG_SMP
579 /*
580  * Some BIOSes do SW_ANY coordination internally, either set it up in hw
581  * or do it in BIOS firmware and won't inform about it to OS. If not
582  * detected, this has a side effect of making CPU run at a different speed
583  * than OS intended it to run at. Detect it and handle it cleanly.
584  */
585 static int bios_with_sw_any_bug;
586 
sw_any_bug_found(const struct dmi_system_id * d)587 static int sw_any_bug_found(const struct dmi_system_id *d)
588 {
589 	bios_with_sw_any_bug = 1;
590 	return 0;
591 }
592 
593 static const struct dmi_system_id sw_any_bug_dmi_table[] = {
594 	{
595 		.callback = sw_any_bug_found,
596 		.ident = "Supermicro Server X6DLP",
597 		.matches = {
598 			DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
599 			DMI_MATCH(DMI_BIOS_VERSION, "080010"),
600 			DMI_MATCH(DMI_PRODUCT_NAME, "X6DLP"),
601 		},
602 	},
603 	{ }
604 };
605 
acpi_cpufreq_blacklist(struct cpuinfo_x86 * c)606 static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
607 {
608 	/* Intel Xeon Processor 7100 Series Specification Update
609 	 * https://www.intel.com/Assets/PDF/specupdate/314554.pdf
610 	 * AL30: A Machine Check Exception (MCE) Occurring during an
611 	 * Enhanced Intel SpeedStep Technology Ratio Change May Cause
612 	 * Both Processor Cores to Lock Up. */
613 	if (c->x86_vendor == X86_VENDOR_INTEL) {
614 		if ((c->x86 == 15) &&
615 		    (c->x86_model == 6) &&
616 		    (c->x86_stepping == 8)) {
617 			pr_info("Intel(R) Xeon(R) 7100 Errata AL30, processors may lock up on frequency changes: disabling acpi-cpufreq\n");
618 			return -ENODEV;
619 		    }
620 		}
621 	return 0;
622 }
623 #endif
624 
625 #ifdef CONFIG_ACPI_CPPC_LIB
626 /*
627  * get_max_boost_ratio: Computes the max_boost_ratio as the ratio
628  * between the highest_perf and the nominal_perf.
629  *
630  * Returns the max_boost_ratio for @cpu. Returns the CPPC nominal
631  * frequency via @nominal_freq if it is non-NULL pointer.
632  */
get_max_boost_ratio(unsigned int cpu,u64 * nominal_freq)633 static u64 get_max_boost_ratio(unsigned int cpu, u64 *nominal_freq)
634 {
635 	struct cppc_perf_caps perf_caps;
636 	u64 highest_perf, nominal_perf;
637 	int ret;
638 
639 	if (acpi_pstate_strict)
640 		return 0;
641 
642 	ret = cppc_get_perf_caps(cpu, &perf_caps);
643 	if (ret) {
644 		pr_debug("CPU%d: Unable to get performance capabilities (%d)\n",
645 			 cpu, ret);
646 		return 0;
647 	}
648 
649 	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
650 		ret = amd_get_boost_ratio_numerator(cpu, &highest_perf);
651 		if (ret) {
652 			pr_debug("CPU%d: Unable to get boost ratio numerator (%d)\n",
653 				 cpu, ret);
654 			return 0;
655 		}
656 	} else {
657 		highest_perf = perf_caps.highest_perf;
658 	}
659 
660 	nominal_perf = perf_caps.nominal_perf;
661 
662 	if (nominal_freq)
663 		*nominal_freq = perf_caps.nominal_freq;
664 
665 	if (!highest_perf || !nominal_perf) {
666 		pr_debug("CPU%d: highest or nominal performance missing\n", cpu);
667 		return 0;
668 	}
669 
670 	if (highest_perf < nominal_perf) {
671 		pr_debug("CPU%d: nominal performance above highest\n", cpu);
672 		return 0;
673 	}
674 
675 	return div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf);
676 }
677 
678 #else
get_max_boost_ratio(unsigned int cpu,u64 * nominal_freq)679 static inline u64 get_max_boost_ratio(unsigned int cpu, u64 *nominal_freq)
680 {
681 	return 0;
682 }
683 #endif
684 
acpi_cpufreq_cpu_init(struct cpufreq_policy * policy)685 static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
686 {
687 	struct cpufreq_frequency_table *freq_table;
688 	struct acpi_processor_performance *perf;
689 	struct acpi_cpufreq_data *data;
690 	unsigned int cpu = policy->cpu;
691 	struct cpuinfo_x86 *c = &cpu_data(cpu);
692 	u64 max_boost_ratio, nominal_freq = 0;
693 	unsigned int valid_states = 0;
694 	unsigned int result = 0;
695 	unsigned int i;
696 #ifdef CONFIG_SMP
697 	static int blacklisted;
698 #endif
699 
700 	pr_debug("%s\n", __func__);
701 
702 #ifdef CONFIG_SMP
703 	if (blacklisted)
704 		return blacklisted;
705 	blacklisted = acpi_cpufreq_blacklist(c);
706 	if (blacklisted)
707 		return blacklisted;
708 #endif
709 
710 	data = kzalloc(sizeof(*data), GFP_KERNEL);
711 	if (!data)
712 		return -ENOMEM;
713 
714 	if (!zalloc_cpumask_var(&data->freqdomain_cpus, GFP_KERNEL)) {
715 		result = -ENOMEM;
716 		goto err_free;
717 	}
718 
719 	perf = per_cpu_ptr(acpi_perf_data, cpu);
720 	data->acpi_perf_cpu = cpu;
721 	policy->driver_data = data;
722 
723 	if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
724 		acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
725 
726 	result = acpi_processor_register_performance(perf, cpu);
727 	if (result)
728 		goto err_free_mask;
729 
730 	policy->shared_type = perf->shared_type;
731 
732 	/*
733 	 * Will let policy->cpus know about dependency only when software
734 	 * coordination is required.
735 	 */
736 	if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
737 	    policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
738 		cpumask_copy(policy->cpus, perf->shared_cpu_map);
739 	}
740 	cpumask_copy(data->freqdomain_cpus, perf->shared_cpu_map);
741 
742 #ifdef CONFIG_SMP
743 	dmi_check_system(sw_any_bug_dmi_table);
744 	if (bios_with_sw_any_bug && !policy_is_shared(policy)) {
745 		policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
746 		cpumask_copy(policy->cpus, topology_core_cpumask(cpu));
747 	}
748 
749 	if (check_amd_hwpstate_cpu(cpu) && boot_cpu_data.x86 < 0x19 &&
750 	    !acpi_pstate_strict) {
751 		cpumask_clear(policy->cpus);
752 		cpumask_set_cpu(cpu, policy->cpus);
753 		cpumask_copy(data->freqdomain_cpus,
754 			     topology_sibling_cpumask(cpu));
755 		policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
756 		pr_info_once("overriding BIOS provided _PSD data\n");
757 	}
758 #endif
759 
760 	/* capability check */
761 	if (perf->state_count <= 1) {
762 		pr_debug("No P-States\n");
763 		result = -ENODEV;
764 		goto err_unreg;
765 	}
766 
767 	if (perf->control_register.space_id != perf->status_register.space_id) {
768 		result = -ENODEV;
769 		goto err_unreg;
770 	}
771 
772 	switch (perf->control_register.space_id) {
773 	case ACPI_ADR_SPACE_SYSTEM_IO:
774 		if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
775 		    boot_cpu_data.x86 == 0xf) {
776 			pr_debug("AMD K8 systems must use native drivers.\n");
777 			result = -ENODEV;
778 			goto err_unreg;
779 		}
780 		pr_debug("SYSTEM IO addr space\n");
781 		data->cpu_feature = SYSTEM_IO_CAPABLE;
782 		data->cpu_freq_read = cpu_freq_read_io;
783 		data->cpu_freq_write = cpu_freq_write_io;
784 		break;
785 	case ACPI_ADR_SPACE_FIXED_HARDWARE:
786 		pr_debug("HARDWARE addr space\n");
787 		if (check_est_cpu(cpu)) {
788 			data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
789 			data->cpu_freq_read = cpu_freq_read_intel;
790 			data->cpu_freq_write = cpu_freq_write_intel;
791 			break;
792 		}
793 		if (check_amd_hwpstate_cpu(cpu)) {
794 			data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE;
795 			data->cpu_freq_read = cpu_freq_read_amd;
796 			data->cpu_freq_write = cpu_freq_write_amd;
797 			break;
798 		}
799 		result = -ENODEV;
800 		goto err_unreg;
801 	default:
802 		pr_debug("Unknown addr space %d\n",
803 			(u32) (perf->control_register.space_id));
804 		result = -ENODEV;
805 		goto err_unreg;
806 	}
807 
808 	freq_table = kcalloc(perf->state_count + 1, sizeof(*freq_table),
809 			     GFP_KERNEL);
810 	if (!freq_table) {
811 		result = -ENOMEM;
812 		goto err_unreg;
813 	}
814 
815 	/* detect transition latency */
816 	policy->cpuinfo.transition_latency = 0;
817 	for (i = 0; i < perf->state_count; i++) {
818 		if ((perf->states[i].transition_latency * 1000) >
819 		    policy->cpuinfo.transition_latency)
820 			policy->cpuinfo.transition_latency =
821 			    perf->states[i].transition_latency * 1000;
822 	}
823 
824 	/* Check for high latency (>20uS) from buggy BIOSes, like on T42 */
825 	if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE &&
826 	    policy->cpuinfo.transition_latency > 20 * 1000) {
827 		policy->cpuinfo.transition_latency = 20 * 1000;
828 		pr_info_once("P-state transition latency capped at 20 uS\n");
829 	}
830 
831 	/* table init */
832 	for (i = 0; i < perf->state_count; i++) {
833 		if (i > 0 && perf->states[i].core_frequency >=
834 		    freq_table[valid_states-1].frequency / 1000)
835 			continue;
836 
837 		freq_table[valid_states].driver_data = i;
838 		freq_table[valid_states].frequency =
839 		    perf->states[i].core_frequency * 1000;
840 		valid_states++;
841 	}
842 	freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
843 
844 	max_boost_ratio = get_max_boost_ratio(cpu, &nominal_freq);
845 	if (max_boost_ratio) {
846 		unsigned int freq = nominal_freq;
847 
848 		/*
849 		 * The loop above sorts the freq_table entries in the
850 		 * descending order. If ACPI CPPC has not advertised
851 		 * the nominal frequency (this is possible in CPPC
852 		 * revisions prior to 3), then use the first entry in
853 		 * the pstate table as a proxy for nominal frequency.
854 		 */
855 		if (!freq)
856 			freq = freq_table[0].frequency;
857 
858 		policy->cpuinfo.max_freq = freq * max_boost_ratio >> SCHED_CAPACITY_SHIFT;
859 	} else {
860 		/*
861 		 * If the maximum "boost" frequency is unknown, ask the arch
862 		 * scale-invariance code to use the "nominal" performance for
863 		 * CPU utilization scaling so as to prevent the schedutil
864 		 * governor from selecting inadequate CPU frequencies.
865 		 */
866 		arch_set_max_freq_ratio(true);
867 	}
868 
869 	policy->freq_table = freq_table;
870 	perf->state = 0;
871 
872 	switch (perf->control_register.space_id) {
873 	case ACPI_ADR_SPACE_SYSTEM_IO:
874 		/*
875 		 * The core will not set policy->cur, because
876 		 * cpufreq_driver->get is NULL, so we need to set it here.
877 		 * However, we have to guess it, because the current speed is
878 		 * unknown and not detectable via IO ports.
879 		 */
880 		policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
881 		break;
882 	case ACPI_ADR_SPACE_FIXED_HARDWARE:
883 		acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
884 		break;
885 	default:
886 		break;
887 	}
888 
889 	/* notify BIOS that we exist */
890 	acpi_processor_notify_smm(THIS_MODULE);
891 
892 	pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
893 	for (i = 0; i < perf->state_count; i++)
894 		pr_debug("     %cP%d: %d MHz, %d mW, %d uS\n",
895 			(i == perf->state ? '*' : ' '), i,
896 			(u32) perf->states[i].core_frequency,
897 			(u32) perf->states[i].power,
898 			(u32) perf->states[i].transition_latency);
899 
900 	/*
901 	 * the first call to ->target() should result in us actually
902 	 * writing something to the appropriate registers.
903 	 */
904 	data->resume = 1;
905 
906 	policy->fast_switch_possible = !acpi_pstate_strict &&
907 		!(policy_is_shared(policy) && policy->shared_type != CPUFREQ_SHARED_TYPE_ANY);
908 
909 	if (perf->states[0].core_frequency * 1000 != freq_table[0].frequency)
910 		pr_warn(FW_WARN "P-state 0 is not max freq\n");
911 
912 	return result;
913 
914 err_unreg:
915 	acpi_processor_unregister_performance(cpu);
916 err_free_mask:
917 	free_cpumask_var(data->freqdomain_cpus);
918 err_free:
919 	kfree(data);
920 	policy->driver_data = NULL;
921 
922 	return result;
923 }
924 
acpi_cpufreq_cpu_exit(struct cpufreq_policy * policy)925 static void acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
926 {
927 	struct acpi_cpufreq_data *data = policy->driver_data;
928 
929 	pr_debug("%s\n", __func__);
930 
931 	cpufreq_boost_down_prep(policy->cpu);
932 	policy->fast_switch_possible = false;
933 	policy->driver_data = NULL;
934 	acpi_processor_unregister_performance(data->acpi_perf_cpu);
935 	free_cpumask_var(data->freqdomain_cpus);
936 	kfree(policy->freq_table);
937 	kfree(data);
938 }
939 
acpi_cpufreq_resume(struct cpufreq_policy * policy)940 static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
941 {
942 	struct acpi_cpufreq_data *data = policy->driver_data;
943 
944 	pr_debug("%s\n", __func__);
945 
946 	data->resume = 1;
947 
948 	return 0;
949 }
950 
951 static struct freq_attr *acpi_cpufreq_attr[] = {
952 	&cpufreq_freq_attr_scaling_available_freqs,
953 	&freqdomain_cpus,
954 #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
955 	&cpb,
956 #endif
957 	NULL,
958 };
959 
960 static struct cpufreq_driver acpi_cpufreq_driver = {
961 	.verify		= cpufreq_generic_frequency_table_verify,
962 	.target_index	= acpi_cpufreq_target,
963 	.fast_switch	= acpi_cpufreq_fast_switch,
964 	.bios_limit	= acpi_processor_get_bios_limit,
965 	.init		= acpi_cpufreq_cpu_init,
966 	.exit		= acpi_cpufreq_cpu_exit,
967 	.resume		= acpi_cpufreq_resume,
968 	.name		= "acpi-cpufreq",
969 	.attr		= acpi_cpufreq_attr,
970 };
971 
acpi_cpufreq_boost_init(void)972 static void __init acpi_cpufreq_boost_init(void)
973 {
974 	if (!(boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA))) {
975 		pr_debug("Boost capabilities not present in the processor\n");
976 		return;
977 	}
978 
979 	acpi_cpufreq_driver.set_boost = set_boost;
980 	acpi_cpufreq_driver.boost_enabled = boost_state(0);
981 }
982 
acpi_cpufreq_probe(struct platform_device * pdev)983 static int __init acpi_cpufreq_probe(struct platform_device *pdev)
984 {
985 	int ret;
986 
987 	if (acpi_disabled)
988 		return -ENODEV;
989 
990 	/* don't keep reloading if cpufreq_driver exists */
991 	if (cpufreq_get_current_driver())
992 		return -ENODEV;
993 
994 	pr_debug("%s\n", __func__);
995 
996 	ret = acpi_cpufreq_early_init();
997 	if (ret)
998 		return ret;
999 
1000 #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
1001 	/* this is a sysfs file with a strange name and an even stranger
1002 	 * semantic - per CPU instantiation, but system global effect.
1003 	 * Lets enable it only on AMD CPUs for compatibility reasons and
1004 	 * only if configured. This is considered legacy code, which
1005 	 * will probably be removed at some point in the future.
1006 	 */
1007 	if (!check_amd_hwpstate_cpu(0)) {
1008 		struct freq_attr **attr;
1009 
1010 		pr_debug("CPB unsupported, do not expose it\n");
1011 
1012 		for (attr = acpi_cpufreq_attr; *attr; attr++)
1013 			if (*attr == &cpb) {
1014 				*attr = NULL;
1015 				break;
1016 			}
1017 	}
1018 #endif
1019 	acpi_cpufreq_boost_init();
1020 
1021 	ret = cpufreq_register_driver(&acpi_cpufreq_driver);
1022 	if (ret) {
1023 		free_acpi_perf_data();
1024 	}
1025 	return ret;
1026 }
1027 
acpi_cpufreq_remove(struct platform_device * pdev)1028 static void acpi_cpufreq_remove(struct platform_device *pdev)
1029 {
1030 	pr_debug("%s\n", __func__);
1031 
1032 	cpufreq_unregister_driver(&acpi_cpufreq_driver);
1033 
1034 	free_acpi_perf_data();
1035 }
1036 
1037 static struct platform_driver acpi_cpufreq_platdrv = {
1038 	.driver = {
1039 		.name	= "acpi-cpufreq",
1040 	},
1041 	.remove = acpi_cpufreq_remove,
1042 };
1043 
acpi_cpufreq_init(void)1044 static int __init acpi_cpufreq_init(void)
1045 {
1046 	return platform_driver_probe(&acpi_cpufreq_platdrv, acpi_cpufreq_probe);
1047 }
1048 
acpi_cpufreq_exit(void)1049 static void __exit acpi_cpufreq_exit(void)
1050 {
1051 	platform_driver_unregister(&acpi_cpufreq_platdrv);
1052 }
1053 
1054 module_param(acpi_pstate_strict, uint, 0644);
1055 MODULE_PARM_DESC(acpi_pstate_strict,
1056 	"value 0 or non-zero. non-zero -> strict ACPI checks are "
1057 	"performed during frequency changes.");
1058 
1059 late_initcall(acpi_cpufreq_init);
1060 module_exit(acpi_cpufreq_exit);
1061 
1062 MODULE_ALIAS("platform:acpi-cpufreq");
1063