xref: /linux/drivers/cpufreq/amd-pstate-ut.c (revision c89756bcf406af313d191cfe3709e7c175c5b0cd)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * AMD Processor P-state Frequency Driver Unit Test
4  *
5  * Copyright (C) 2022 Advanced Micro Devices, Inc. All Rights Reserved.
6  *
7  * Author: Meng Li <li.meng@amd.com>
8  *
9  * The AMD P-State Unit Test is a test module for testing the amd-pstate
10  * driver. 1) It can help all users to verify their processor support
11  * (SBIOS/Firmware or Hardware). 2) Kernel can have a basic function
12  * test to avoid the kernel regression during the update. 3) We can
13  * introduce more functional or performance tests to align the result
14  * together, it will benefit power and performance scale optimization.
15  *
16  * This driver implements basic framework with plans to enhance it with
17  * additional test cases to improve the depth and coverage of the test.
18  *
19  * See Documentation/admin-guide/pm/amd-pstate.rst Unit Tests for
20  * amd-pstate to get more detail.
21  */
22 
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24 
25 #include <linux/bitfield.h>
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/moduleparam.h>
29 #include <linux/fs.h>
30 #include <linux/cleanup.h>
31 
32 #include <acpi/cppc_acpi.h>
33 
34 #include <asm/msr.h>
35 
36 #include "amd-pstate.h"
37 
38 
39 struct amd_pstate_ut_struct {
40 	const char *name;
41 	int (*func)(u32 index);
42 };
43 
44 /*
45  * Kernel module for testing the AMD P-State unit test
46  */
47 static int amd_pstate_ut_acpi_cpc_valid(u32 index);
48 static int amd_pstate_ut_check_enabled(u32 index);
49 static int amd_pstate_ut_check_perf(u32 index);
50 static int amd_pstate_ut_check_freq(u32 index);
51 static int amd_pstate_ut_check_driver(u32 index);
52 
53 static struct amd_pstate_ut_struct amd_pstate_ut_cases[] = {
54 	{"amd_pstate_ut_acpi_cpc_valid",   amd_pstate_ut_acpi_cpc_valid   },
55 	{"amd_pstate_ut_check_enabled",    amd_pstate_ut_check_enabled    },
56 	{"amd_pstate_ut_check_perf",       amd_pstate_ut_check_perf       },
57 	{"amd_pstate_ut_check_freq",       amd_pstate_ut_check_freq       },
58 	{"amd_pstate_ut_check_driver",	   amd_pstate_ut_check_driver     }
59 };
60 
get_shared_mem(void)61 static bool get_shared_mem(void)
62 {
63 	bool result = false;
64 
65 	if (!boot_cpu_has(X86_FEATURE_CPPC))
66 		result = true;
67 
68 	return result;
69 }
70 
71 /*
72  * check the _CPC object is present in SBIOS.
73  */
amd_pstate_ut_acpi_cpc_valid(u32 index)74 static int amd_pstate_ut_acpi_cpc_valid(u32 index)
75 {
76 	if (!acpi_cpc_valid()) {
77 		pr_err("%s the _CPC object is not present in SBIOS!\n", __func__);
78 		return -EINVAL;
79 	}
80 
81 	return 0;
82 }
83 
84 /*
85  * check if amd pstate is enabled
86  */
amd_pstate_ut_check_enabled(u32 index)87 static int amd_pstate_ut_check_enabled(u32 index)
88 {
89 	u64 cppc_enable = 0;
90 	int ret;
91 
92 	if (get_shared_mem())
93 		return 0;
94 
95 	ret = rdmsrq_safe(MSR_AMD_CPPC_ENABLE, &cppc_enable);
96 	if (ret) {
97 		pr_err("%s rdmsrq_safe MSR_AMD_CPPC_ENABLE ret=%d error!\n", __func__, ret);
98 		return ret;
99 	}
100 
101 	if (!cppc_enable) {
102 		pr_err("%s amd pstate must be enabled!\n", __func__);
103 		return -EINVAL;
104 	}
105 
106 	return 0;
107 }
108 
109 /*
110  * check if performance values are reasonable.
111  * highest_perf >= nominal_perf > lowest_nonlinear_perf > lowest_perf > 0
112  */
amd_pstate_ut_check_perf(u32 index)113 static int amd_pstate_ut_check_perf(u32 index)
114 {
115 	int cpu = 0, ret = 0;
116 	u32 highest_perf = 0, nominal_perf = 0, lowest_nonlinear_perf = 0, lowest_perf = 0;
117 	u64 cap1 = 0;
118 	struct cppc_perf_caps cppc_perf;
119 	union perf_cached cur_perf;
120 
121 	for_each_online_cpu(cpu) {
122 		struct cpufreq_policy *policy __free(put_cpufreq_policy) = NULL;
123 		struct amd_cpudata *cpudata;
124 
125 		policy = cpufreq_cpu_get(cpu);
126 		if (!policy)
127 			continue;
128 		cpudata = policy->driver_data;
129 
130 		if (get_shared_mem()) {
131 			ret = cppc_get_perf_caps(cpu, &cppc_perf);
132 			if (ret) {
133 				pr_err("%s cppc_get_perf_caps ret=%d error!\n", __func__, ret);
134 				return ret;
135 			}
136 
137 			highest_perf = cppc_perf.highest_perf;
138 			nominal_perf = cppc_perf.nominal_perf;
139 			lowest_nonlinear_perf = cppc_perf.lowest_nonlinear_perf;
140 			lowest_perf = cppc_perf.lowest_perf;
141 		} else {
142 			ret = rdmsrq_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1);
143 			if (ret) {
144 				pr_err("%s read CPPC_CAP1 ret=%d error!\n", __func__, ret);
145 				return ret;
146 			}
147 
148 			highest_perf = FIELD_GET(AMD_CPPC_HIGHEST_PERF_MASK, cap1);
149 			nominal_perf = FIELD_GET(AMD_CPPC_NOMINAL_PERF_MASK, cap1);
150 			lowest_nonlinear_perf = FIELD_GET(AMD_CPPC_LOWNONLIN_PERF_MASK, cap1);
151 			lowest_perf = FIELD_GET(AMD_CPPC_LOWEST_PERF_MASK, cap1);
152 		}
153 
154 		cur_perf = READ_ONCE(cpudata->perf);
155 		if (highest_perf != cur_perf.highest_perf && !cpudata->hw_prefcore) {
156 			pr_err("%s cpu%d highest=%d %d highest perf doesn't match\n",
157 				__func__, cpu, highest_perf, cur_perf.highest_perf);
158 			return -EINVAL;
159 		}
160 		if (nominal_perf != cur_perf.nominal_perf ||
161 		   (lowest_nonlinear_perf != cur_perf.lowest_nonlinear_perf) ||
162 		   (lowest_perf != cur_perf.lowest_perf)) {
163 			pr_err("%s cpu%d nominal=%d %d lowest_nonlinear=%d %d lowest=%d %d, they should be equal!\n",
164 				__func__, cpu, nominal_perf, cur_perf.nominal_perf,
165 				lowest_nonlinear_perf, cur_perf.lowest_nonlinear_perf,
166 				lowest_perf, cur_perf.lowest_perf);
167 			return -EINVAL;
168 		}
169 
170 		if (!((highest_perf >= nominal_perf) &&
171 			(nominal_perf > lowest_nonlinear_perf) &&
172 			(lowest_nonlinear_perf >= lowest_perf) &&
173 			(lowest_perf > 0))) {
174 			pr_err("%s cpu%d highest=%d >= nominal=%d > lowest_nonlinear=%d > lowest=%d > 0, the formula is incorrect!\n",
175 				__func__, cpu, highest_perf, nominal_perf,
176 				lowest_nonlinear_perf, lowest_perf);
177 			return -EINVAL;
178 		}
179 	}
180 
181 	return 0;
182 }
183 
184 /*
185  * Check if frequency values are reasonable.
186  * max_freq >= nominal_freq > lowest_nonlinear_freq > min_freq > 0
187  * check max freq when set support boost mode.
188  */
amd_pstate_ut_check_freq(u32 index)189 static int amd_pstate_ut_check_freq(u32 index)
190 {
191 	int cpu = 0;
192 
193 	for_each_online_cpu(cpu) {
194 		struct cpufreq_policy *policy __free(put_cpufreq_policy) = NULL;
195 		struct amd_cpudata *cpudata;
196 
197 		policy = cpufreq_cpu_get(cpu);
198 		if (!policy)
199 			continue;
200 		cpudata = policy->driver_data;
201 
202 		if (!((policy->cpuinfo.max_freq >= cpudata->nominal_freq) &&
203 			(cpudata->nominal_freq > cpudata->lowest_nonlinear_freq) &&
204 			(cpudata->lowest_nonlinear_freq >= policy->cpuinfo.min_freq) &&
205 			(policy->cpuinfo.min_freq > 0))) {
206 			pr_err("%s cpu%d max=%d >= nominal=%d > lowest_nonlinear=%d > min=%d > 0, the formula is incorrect!\n",
207 				__func__, cpu, policy->cpuinfo.max_freq, cpudata->nominal_freq,
208 				cpudata->lowest_nonlinear_freq, policy->cpuinfo.min_freq);
209 			return -EINVAL;
210 		}
211 
212 		if (cpudata->lowest_nonlinear_freq != policy->min) {
213 			pr_err("%s cpu%d cpudata_lowest_nonlinear_freq=%d policy_min=%d, they should be equal!\n",
214 				__func__, cpu, cpudata->lowest_nonlinear_freq, policy->min);
215 			return -EINVAL;
216 		}
217 
218 		if (cpudata->boost_supported) {
219 			if ((policy->max != policy->cpuinfo.max_freq) &&
220 			    (policy->max != cpudata->nominal_freq)) {
221 				pr_err("%s cpu%d policy_max=%d should be equal cpu_max=%d or cpu_nominal=%d !\n",
222 					__func__, cpu, policy->max, policy->cpuinfo.max_freq,
223 					cpudata->nominal_freq);
224 				return -EINVAL;
225 			}
226 		} else {
227 			pr_err("%s cpu%d must support boost!\n", __func__, cpu);
228 			return -EINVAL;
229 		}
230 	}
231 
232 	return 0;
233 }
234 
amd_pstate_set_mode(enum amd_pstate_mode mode)235 static int amd_pstate_set_mode(enum amd_pstate_mode mode)
236 {
237 	const char *mode_str = amd_pstate_get_mode_string(mode);
238 
239 	pr_debug("->setting mode to %s\n", mode_str);
240 
241 	return amd_pstate_update_status(mode_str, strlen(mode_str));
242 }
243 
amd_pstate_ut_check_driver(u32 index)244 static int amd_pstate_ut_check_driver(u32 index)
245 {
246 	enum amd_pstate_mode mode1, mode2 = AMD_PSTATE_DISABLE;
247 	enum amd_pstate_mode orig_mode = amd_pstate_get_status();
248 	int ret;
249 
250 	for (mode1 = AMD_PSTATE_DISABLE; mode1 < AMD_PSTATE_MAX; mode1++) {
251 		ret = amd_pstate_set_mode(mode1);
252 		if (ret)
253 			return ret;
254 		for (mode2 = AMD_PSTATE_DISABLE; mode2 < AMD_PSTATE_MAX; mode2++) {
255 			if (mode1 == mode2)
256 				continue;
257 			ret = amd_pstate_set_mode(mode2);
258 			if (ret)
259 				goto out;
260 		}
261 	}
262 
263 out:
264 	if (ret)
265 		pr_warn("%s: failed to update status for %s->%s: %d\n", __func__,
266 			amd_pstate_get_mode_string(mode1),
267 			amd_pstate_get_mode_string(mode2), ret);
268 
269 	amd_pstate_set_mode(orig_mode);
270 	return ret;
271 }
272 
amd_pstate_ut_init(void)273 static int __init amd_pstate_ut_init(void)
274 {
275 	u32 i = 0, arr_size = ARRAY_SIZE(amd_pstate_ut_cases);
276 
277 	for (i = 0; i < arr_size; i++) {
278 		int ret = amd_pstate_ut_cases[i].func(i);
279 
280 		if (ret)
281 			pr_err("%-4d %-20s\t fail: %d!\n", i+1, amd_pstate_ut_cases[i].name, ret);
282 		else
283 			pr_info("%-4d %-20s\t success!\n", i+1, amd_pstate_ut_cases[i].name);
284 	}
285 
286 	return 0;
287 }
288 
amd_pstate_ut_exit(void)289 static void __exit amd_pstate_ut_exit(void)
290 {
291 }
292 
293 module_init(amd_pstate_ut_init);
294 module_exit(amd_pstate_ut_exit);
295 
296 MODULE_AUTHOR("Meng Li <li.meng@amd.com>");
297 MODULE_DESCRIPTION("AMD P-state driver Test module");
298 MODULE_LICENSE("GPL");
299