1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * AMD Processor P-state Frequency Driver Unit Test 4 * 5 * Copyright (C) 2022 Advanced Micro Devices, Inc. All Rights Reserved. 6 * 7 * Author: Meng Li <li.meng@amd.com> 8 * 9 * The AMD P-State Unit Test is a test module for testing the amd-pstate 10 * driver. 1) It can help all users to verify their processor support 11 * (SBIOS/Firmware or Hardware). 2) Kernel can have a basic function 12 * test to avoid the kernel regression during the update. 3) We can 13 * introduce more functional or performance tests to align the result 14 * together, it will benefit power and performance scale optimization. 15 * 16 * This driver implements basic framework with plans to enhance it with 17 * additional test cases to improve the depth and coverage of the test. 18 * 19 * See Documentation/admin-guide/pm/amd-pstate.rst Unit Tests for 20 * amd-pstate to get more detail. 21 */ 22 23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 24 25 #include <linux/bitfield.h> 26 #include <linux/cpufeature.h> 27 #include <linux/cpufreq.h> 28 #include <linux/kernel.h> 29 #include <linux/module.h> 30 #include <linux/moduleparam.h> 31 #include <linux/mm.h> 32 #include <linux/fs.h> 33 #include <linux/cleanup.h> 34 35 #include <acpi/cppc_acpi.h> 36 37 #include <asm/msr.h> 38 39 #include "amd-pstate.h" 40 41 static char *test_list; 42 module_param(test_list, charp, 0444); 43 MODULE_PARM_DESC(test_list, 44 "Comma-delimited list of tests to run (empty means run all tests)"); 45 DEFINE_FREE(cleanup_page, void *, if (_T) free_page((unsigned long)_T)) 46 47 struct amd_pstate_ut_struct { 48 const char *name; 49 int (*func)(u32 index); 50 }; 51 52 /* 53 * Kernel module for testing the AMD P-State unit test 54 */ 55 static int amd_pstate_ut_acpi_cpc_valid(u32 index); 56 static int amd_pstate_ut_check_enabled(u32 index); 57 static int amd_pstate_ut_check_perf(u32 index); 58 static int amd_pstate_ut_check_freq(u32 index); 59 static int amd_pstate_ut_epp(u32 index); 60 static int amd_pstate_ut_check_driver(u32 index); 61 static int amd_pstate_ut_check_freq_attrs(u32 index); 62 63 static struct amd_pstate_ut_struct amd_pstate_ut_cases[] = { 64 {"amd_pstate_ut_acpi_cpc_valid", amd_pstate_ut_acpi_cpc_valid }, 65 {"amd_pstate_ut_check_enabled", amd_pstate_ut_check_enabled }, 66 {"amd_pstate_ut_check_perf", amd_pstate_ut_check_perf }, 67 {"amd_pstate_ut_check_freq", amd_pstate_ut_check_freq }, 68 {"amd_pstate_ut_epp", amd_pstate_ut_epp }, 69 {"amd_pstate_ut_check_driver", amd_pstate_ut_check_driver }, 70 {"amd_pstate_ut_check_freq_attrs", amd_pstate_ut_check_freq_attrs }, 71 }; 72 73 static bool test_in_list(const char *list, const char *name) 74 { 75 size_t name_len = strlen(name); 76 const char *p = list; 77 78 while (*p) { 79 const char *sep = strchr(p, ','); 80 size_t token_len = sep ? sep - p : strlen(p); 81 82 if (token_len == name_len && !strncmp(p, name, token_len)) 83 return true; 84 if (!sep) 85 break; 86 p = sep + 1; 87 } 88 89 return false; 90 } 91 92 static bool get_shared_mem(void) 93 { 94 bool result = false; 95 96 if (!boot_cpu_has(X86_FEATURE_CPPC)) 97 result = true; 98 99 return result; 100 } 101 102 /* 103 * check the _CPC object is present in SBIOS. 104 */ 105 static int amd_pstate_ut_acpi_cpc_valid(u32 index) 106 { 107 if (!acpi_cpc_valid()) { 108 pr_err("%s the _CPC object is not present in SBIOS!\n", __func__); 109 return -EINVAL; 110 } 111 112 return 0; 113 } 114 115 /* 116 * check if amd pstate is enabled 117 */ 118 static int amd_pstate_ut_check_enabled(u32 index) 119 { 120 u64 cppc_enable = 0; 121 int ret; 122 123 if (get_shared_mem()) 124 return 0; 125 126 ret = rdmsrq_safe(MSR_AMD_CPPC_ENABLE, &cppc_enable); 127 if (ret) { 128 pr_err("%s rdmsrq_safe MSR_AMD_CPPC_ENABLE ret=%d error!\n", __func__, ret); 129 return ret; 130 } 131 132 if (!cppc_enable) { 133 pr_err("%s amd pstate must be enabled!\n", __func__); 134 return -EINVAL; 135 } 136 137 return 0; 138 } 139 140 /* 141 * check if performance values are reasonable. 142 * highest_perf >= nominal_perf > lowest_nonlinear_perf > lowest_perf > 0 143 */ 144 static int amd_pstate_ut_check_perf(u32 index) 145 { 146 int cpu = 0, ret = 0; 147 u32 highest_perf = 0, nominal_perf = 0, lowest_nonlinear_perf = 0, lowest_perf = 0; 148 u64 cap1 = 0; 149 struct cppc_perf_caps cppc_perf; 150 union perf_cached cur_perf; 151 152 for_each_online_cpu(cpu) { 153 struct cpufreq_policy *policy __free(put_cpufreq_policy) = NULL; 154 struct amd_cpudata *cpudata; 155 156 policy = cpufreq_cpu_get(cpu); 157 if (!policy) 158 continue; 159 cpudata = policy->driver_data; 160 161 if (get_shared_mem()) { 162 ret = cppc_get_perf_caps(cpu, &cppc_perf); 163 if (ret) { 164 pr_err("%s cppc_get_perf_caps ret=%d error!\n", __func__, ret); 165 return ret; 166 } 167 168 highest_perf = cppc_perf.highest_perf; 169 nominal_perf = cppc_perf.nominal_perf; 170 lowest_nonlinear_perf = cppc_perf.lowest_nonlinear_perf; 171 lowest_perf = cppc_perf.lowest_perf; 172 } else { 173 ret = rdmsrq_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1); 174 if (ret) { 175 pr_err("%s read CPPC_CAP1 ret=%d error!\n", __func__, ret); 176 return ret; 177 } 178 179 highest_perf = FIELD_GET(AMD_CPPC_HIGHEST_PERF_MASK, cap1); 180 nominal_perf = FIELD_GET(AMD_CPPC_NOMINAL_PERF_MASK, cap1); 181 lowest_nonlinear_perf = FIELD_GET(AMD_CPPC_LOWNONLIN_PERF_MASK, cap1); 182 lowest_perf = FIELD_GET(AMD_CPPC_LOWEST_PERF_MASK, cap1); 183 } 184 185 cur_perf = READ_ONCE(cpudata->perf); 186 if (highest_perf != cur_perf.highest_perf && !cpudata->hw_prefcore) { 187 pr_err("%s cpu%d highest=%d %d highest perf doesn't match\n", 188 __func__, cpu, highest_perf, cur_perf.highest_perf); 189 return -EINVAL; 190 } 191 if (nominal_perf != cur_perf.nominal_perf || 192 (lowest_nonlinear_perf != cur_perf.lowest_nonlinear_perf) || 193 (lowest_perf != cur_perf.lowest_perf)) { 194 pr_err("%s cpu%d nominal=%d %d lowest_nonlinear=%d %d lowest=%d %d, they should be equal!\n", 195 __func__, cpu, nominal_perf, cur_perf.nominal_perf, 196 lowest_nonlinear_perf, cur_perf.lowest_nonlinear_perf, 197 lowest_perf, cur_perf.lowest_perf); 198 return -EINVAL; 199 } 200 201 if (!((highest_perf >= nominal_perf) && 202 (nominal_perf > lowest_nonlinear_perf) && 203 (lowest_nonlinear_perf >= lowest_perf) && 204 (lowest_perf > 0))) { 205 pr_err("%s cpu%d highest=%d >= nominal=%d > lowest_nonlinear=%d > lowest=%d > 0, the formula is incorrect!\n", 206 __func__, cpu, highest_perf, nominal_perf, 207 lowest_nonlinear_perf, lowest_perf); 208 return -EINVAL; 209 } 210 } 211 212 return 0; 213 } 214 215 /* 216 * Check if frequency values are reasonable. 217 * max_freq >= nominal_freq > lowest_nonlinear_freq > min_freq > 0 218 * check max freq when set support boost mode. 219 */ 220 static int amd_pstate_ut_check_freq(u32 index) 221 { 222 int cpu = 0; 223 224 for_each_online_cpu(cpu) { 225 struct cpufreq_policy *policy __free(put_cpufreq_policy) = NULL; 226 struct amd_cpudata *cpudata; 227 228 policy = cpufreq_cpu_get(cpu); 229 if (!policy) 230 continue; 231 cpudata = policy->driver_data; 232 233 if (!((policy->cpuinfo.max_freq >= cpudata->nominal_freq) && 234 (cpudata->nominal_freq > cpudata->lowest_nonlinear_freq) && 235 (cpudata->lowest_nonlinear_freq >= policy->cpuinfo.min_freq) && 236 (policy->cpuinfo.min_freq > 0))) { 237 pr_err("%s cpu%d max=%d >= nominal=%d > lowest_nonlinear=%d > min=%d > 0, the formula is incorrect!\n", 238 __func__, cpu, policy->cpuinfo.max_freq, cpudata->nominal_freq, 239 cpudata->lowest_nonlinear_freq, policy->cpuinfo.min_freq); 240 return -EINVAL; 241 } 242 243 if (cpudata->lowest_nonlinear_freq != policy->min) { 244 pr_err("%s cpu%d cpudata_lowest_nonlinear_freq=%d policy_min=%d, they should be equal!\n", 245 __func__, cpu, cpudata->lowest_nonlinear_freq, policy->min); 246 return -EINVAL; 247 } 248 249 if (cpudata->boost_supported) { 250 if ((policy->max != policy->cpuinfo.max_freq) && 251 (policy->max != cpudata->nominal_freq)) { 252 pr_err("%s cpu%d policy_max=%d should be equal cpu_max=%d or cpu_nominal=%d !\n", 253 __func__, cpu, policy->max, policy->cpuinfo.max_freq, 254 cpudata->nominal_freq); 255 return -EINVAL; 256 } 257 } else { 258 pr_err("%s cpu%d must support boost!\n", __func__, cpu); 259 return -EINVAL; 260 } 261 } 262 263 return 0; 264 } 265 266 static int amd_pstate_set_mode(enum amd_pstate_mode mode) 267 { 268 const char *mode_str = amd_pstate_get_mode_string(mode); 269 270 pr_debug("->setting mode to %s\n", mode_str); 271 272 return amd_pstate_update_status(mode_str, strlen(mode_str)); 273 } 274 275 static int amd_pstate_ut_epp(u32 index) 276 { 277 struct cpufreq_policy *policy __free(put_cpufreq_policy) = NULL; 278 char *buf __free(cleanup_page) = NULL; 279 static const char * const epp_strings[] = { 280 "performance", 281 "balance_performance", 282 "balance_power", 283 "power", 284 }; 285 struct amd_cpudata *cpudata; 286 enum amd_pstate_mode orig_mode; 287 bool orig_dynamic_epp; 288 int ret, cpu = 0; 289 int i; 290 u16 epp; 291 292 policy = cpufreq_cpu_get(cpu); 293 if (!policy) 294 return -ENODEV; 295 296 cpudata = policy->driver_data; 297 orig_mode = amd_pstate_get_status(); 298 orig_dynamic_epp = cpudata->dynamic_epp; 299 300 /* disable dynamic EPP before running test */ 301 if (cpudata->dynamic_epp) { 302 pr_debug("Dynamic EPP is enabled, disabling it\n"); 303 amd_pstate_clear_dynamic_epp(policy); 304 } 305 306 buf = (char *)__get_free_page(GFP_KERNEL); 307 if (!buf) 308 return -ENOMEM; 309 310 ret = amd_pstate_set_mode(AMD_PSTATE_ACTIVE); 311 if (ret) 312 goto out; 313 314 for (epp = 0; epp <= U8_MAX; epp++) { 315 u8 val; 316 317 /* write all EPP values */ 318 memset(buf, 0, PAGE_SIZE); 319 snprintf(buf, PAGE_SIZE, "%d", epp); 320 ret = store_energy_performance_preference(policy, buf, strlen(buf)); 321 if (ret < 0) 322 goto out; 323 324 /* check if the EPP value reads back correctly for raw numbers */ 325 memset(buf, 0, PAGE_SIZE); 326 ret = show_energy_performance_preference(policy, buf); 327 if (ret < 0) 328 goto out; 329 strreplace(buf, '\n', '\0'); 330 ret = kstrtou8(buf, 0, &val); 331 if (!ret && epp != val) { 332 pr_err("Raw EPP value mismatch: %d != %d\n", epp, val); 333 ret = -EINVAL; 334 goto out; 335 } 336 } 337 338 for (i = 0; i < ARRAY_SIZE(epp_strings); i++) { 339 memset(buf, 0, PAGE_SIZE); 340 snprintf(buf, PAGE_SIZE, "%s", epp_strings[i]); 341 ret = store_energy_performance_preference(policy, buf, strlen(buf)); 342 if (ret < 0) 343 goto out; 344 345 memset(buf, 0, PAGE_SIZE); 346 ret = show_energy_performance_preference(policy, buf); 347 if (ret < 0) 348 goto out; 349 strreplace(buf, '\n', '\0'); 350 351 if (strcmp(buf, epp_strings[i])) { 352 pr_err("String EPP value mismatch: %s != %s\n", buf, epp_strings[i]); 353 ret = -EINVAL; 354 goto out; 355 } 356 } 357 358 ret = 0; 359 360 out: 361 if (orig_dynamic_epp) { 362 int ret2; 363 364 ret2 = amd_pstate_set_mode(AMD_PSTATE_DISABLE); 365 if (!ret && ret2) 366 ret = ret2; 367 } 368 369 if (orig_mode != amd_pstate_get_status()) { 370 int ret2; 371 372 ret2 = amd_pstate_set_mode(orig_mode); 373 if (!ret && ret2) 374 ret = ret2; 375 } 376 377 return ret; 378 } 379 380 static int amd_pstate_ut_check_driver(u32 index) 381 { 382 enum amd_pstate_mode mode1, mode2 = AMD_PSTATE_DISABLE; 383 enum amd_pstate_mode orig_mode = amd_pstate_get_status(); 384 int ret; 385 386 for (mode1 = AMD_PSTATE_DISABLE; mode1 < AMD_PSTATE_MAX; mode1++) { 387 ret = amd_pstate_set_mode(mode1); 388 if (ret) 389 return ret; 390 for (mode2 = AMD_PSTATE_DISABLE; mode2 < AMD_PSTATE_MAX; mode2++) { 391 if (mode1 == mode2) 392 continue; 393 ret = amd_pstate_set_mode(mode2); 394 if (ret) 395 goto out; 396 } 397 } 398 399 out: 400 if (ret) 401 pr_warn("%s: failed to update status for %s->%s: %d\n", __func__, 402 amd_pstate_get_mode_string(mode1), 403 amd_pstate_get_mode_string(mode2), ret); 404 405 amd_pstate_set_mode(orig_mode); 406 return ret; 407 } 408 409 enum attr_category { 410 ATTR_ALWAYS, 411 ATTR_PREFCORE, 412 ATTR_EPP, 413 ATTR_FLOOR_FREQ, 414 }; 415 416 static const struct { 417 const char *name; 418 enum attr_category category; 419 } expected_freq_attrs[] = { 420 {"amd_pstate_max_freq", ATTR_ALWAYS}, 421 {"amd_pstate_lowest_nonlinear_freq", ATTR_ALWAYS}, 422 {"amd_pstate_highest_perf", ATTR_ALWAYS}, 423 {"amd_pstate_prefcore_ranking", ATTR_PREFCORE}, 424 {"amd_pstate_hw_prefcore", ATTR_PREFCORE}, 425 {"energy_performance_preference", ATTR_EPP}, 426 {"energy_performance_available_preferences", ATTR_EPP}, 427 {"amd_pstate_floor_freq", ATTR_FLOOR_FREQ}, 428 {"amd_pstate_floor_count", ATTR_FLOOR_FREQ}, 429 }; 430 431 static bool attr_in_driver(struct freq_attr **driver_attrs, const char *name) 432 { 433 int j; 434 435 for (j = 0; driver_attrs[j]; j++) { 436 if (!strcmp(driver_attrs[j]->attr.name, name)) 437 return true; 438 } 439 return false; 440 } 441 442 /* 443 * Verify that for each mode the driver's live ->attr array contains exactly 444 * the attributes that should be visible. Expected visibility is derived 445 * independently from hw_prefcore, cpu features, and the current mode — 446 * not from the driver's own visibility functions. 447 */ 448 static int amd_pstate_ut_check_freq_attrs(u32 index) 449 { 450 enum amd_pstate_mode orig_mode = amd_pstate_get_status(); 451 static const enum amd_pstate_mode modes[] = { 452 AMD_PSTATE_PASSIVE, AMD_PSTATE_ACTIVE, AMD_PSTATE_GUIDED, 453 }; 454 bool has_prefcore, has_floor_freq; 455 int m, i, ret; 456 457 has_floor_freq = cpu_feature_enabled(X86_FEATURE_CPPC_PERF_PRIO); 458 459 /* 460 * Determine prefcore support from any online CPU's cpudata. 461 * hw_prefcore reflects the platform-wide decision made at init. 462 */ 463 has_prefcore = false; 464 for_each_online_cpu(i) { 465 struct cpufreq_policy *policy __free(put_cpufreq_policy) = NULL; 466 struct amd_cpudata *cpudata; 467 468 policy = cpufreq_cpu_get(i); 469 if (!policy) 470 continue; 471 cpudata = policy->driver_data; 472 has_prefcore = cpudata->hw_prefcore; 473 break; 474 } 475 476 for (m = 0; m < ARRAY_SIZE(modes); m++) { 477 struct freq_attr **driver_attrs; 478 479 ret = amd_pstate_set_mode(modes[m]); 480 if (ret) 481 goto out; 482 483 driver_attrs = amd_pstate_get_current_attrs(); 484 if (!driver_attrs) { 485 pr_err("%s: no driver attrs in mode %s\n", 486 __func__, amd_pstate_get_mode_string(modes[m])); 487 ret = -EINVAL; 488 goto out; 489 } 490 491 for (i = 0; i < ARRAY_SIZE(expected_freq_attrs); i++) { 492 bool expected, found; 493 494 switch (expected_freq_attrs[i].category) { 495 case ATTR_ALWAYS: 496 expected = true; 497 break; 498 case ATTR_PREFCORE: 499 expected = has_prefcore; 500 break; 501 case ATTR_EPP: 502 expected = (modes[m] == AMD_PSTATE_ACTIVE); 503 break; 504 case ATTR_FLOOR_FREQ: 505 expected = has_floor_freq; 506 break; 507 default: 508 expected = false; 509 break; 510 } 511 512 found = attr_in_driver(driver_attrs, 513 expected_freq_attrs[i].name); 514 515 if (expected != found) { 516 pr_err("%s: mode %s: attr %s expected %s but is %s\n", 517 __func__, 518 amd_pstate_get_mode_string(modes[m]), 519 expected_freq_attrs[i].name, 520 expected ? "visible" : "hidden", 521 found ? "visible" : "hidden"); 522 ret = -EINVAL; 523 goto out; 524 } 525 } 526 } 527 528 ret = 0; 529 out: 530 amd_pstate_set_mode(orig_mode); 531 return ret; 532 } 533 534 static int __init amd_pstate_ut_init(void) 535 { 536 u32 i = 0, arr_size = ARRAY_SIZE(amd_pstate_ut_cases); 537 538 for (i = 0; i < arr_size; i++) { 539 int ret; 540 541 if (test_list && *test_list && 542 !test_in_list(test_list, amd_pstate_ut_cases[i].name)) 543 continue; 544 545 ret = amd_pstate_ut_cases[i].func(i); 546 547 if (ret) 548 pr_err("%-4d %-20s\t fail: %d!\n", i+1, amd_pstate_ut_cases[i].name, ret); 549 else 550 pr_info("%-4d %-20s\t success!\n", i+1, amd_pstate_ut_cases[i].name); 551 } 552 553 return 0; 554 } 555 556 static void __exit amd_pstate_ut_exit(void) 557 { 558 } 559 560 module_init(amd_pstate_ut_init); 561 module_exit(amd_pstate_ut_exit); 562 563 MODULE_AUTHOR("Meng Li <li.meng@amd.com>"); 564 MODULE_DESCRIPTION("AMD P-state driver Test module"); 565 MODULE_LICENSE("GPL"); 566