1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * amd-pstate.c - AMD Processor P-state Frequency Driver 4 * 5 * Copyright (C) 2021 Advanced Micro Devices, Inc. All Rights Reserved. 6 * 7 * Author: Huang Rui <ray.huang@amd.com> 8 * 9 * AMD P-State introduces a new CPU performance scaling design for AMD 10 * processors using the ACPI Collaborative Performance and Power Control (CPPC) 11 * feature which works with the AMD SMU firmware providing a finer grained 12 * frequency control range. It is to replace the legacy ACPI P-States control, 13 * allows a flexible, low-latency interface for the Linux kernel to directly 14 * communicate the performance hints to hardware. 15 * 16 * AMD P-State is supported on recent AMD Zen base CPU series include some of 17 * Zen2 and Zen3 processors. _CPC needs to be present in the ACPI tables of AMD 18 * P-State supported system. And there are two types of hardware implementations 19 * for AMD P-State: 1) Full MSR Solution and 2) Shared Memory Solution. 20 * X86_FEATURE_CPPC CPU feature flag is used to distinguish the different types. 21 */ 22 23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 24 25 #include <linux/bitfield.h> 26 #include <linux/kernel.h> 27 #include <linux/module.h> 28 #include <linux/init.h> 29 #include <linux/smp.h> 30 #include <linux/sched.h> 31 #include <linux/cpufreq.h> 32 #include <linux/compiler.h> 33 #include <linux/dmi.h> 34 #include <linux/slab.h> 35 #include <linux/acpi.h> 36 #include <linux/io.h> 37 #include <linux/delay.h> 38 #include <linux/uaccess.h> 39 #include <linux/static_call.h> 40 #include <linux/topology.h> 41 42 #include <acpi/processor.h> 43 #include <acpi/cppc_acpi.h> 44 45 #include <asm/msr.h> 46 #include <asm/processor.h> 47 #include <asm/cpufeature.h> 48 #include <asm/cpu_device_id.h> 49 50 #include "amd-pstate.h" 51 #include "amd-pstate-trace.h" 52 53 #define AMD_PSTATE_TRANSITION_LATENCY 20000 54 #define AMD_PSTATE_TRANSITION_DELAY 1000 55 #define AMD_PSTATE_FAST_CPPC_TRANSITION_DELAY 600 56 57 #define AMD_CPPC_EPP_PERFORMANCE 0x00 58 #define AMD_CPPC_EPP_BALANCE_PERFORMANCE 0x80 59 #define AMD_CPPC_EPP_BALANCE_POWERSAVE 0xBF 60 #define AMD_CPPC_EPP_POWERSAVE 0xFF 61 62 static const char * const amd_pstate_mode_string[] = { 63 [AMD_PSTATE_UNDEFINED] = "undefined", 64 [AMD_PSTATE_DISABLE] = "disable", 65 [AMD_PSTATE_PASSIVE] = "passive", 66 [AMD_PSTATE_ACTIVE] = "active", 67 [AMD_PSTATE_GUIDED] = "guided", 68 NULL, 69 }; 70 71 const char *amd_pstate_get_mode_string(enum amd_pstate_mode mode) 72 { 73 if (mode < 0 || mode >= AMD_PSTATE_MAX) 74 return NULL; 75 return amd_pstate_mode_string[mode]; 76 } 77 EXPORT_SYMBOL_GPL(amd_pstate_get_mode_string); 78 79 struct quirk_entry { 80 u32 nominal_freq; 81 u32 lowest_freq; 82 }; 83 84 static struct cpufreq_driver *current_pstate_driver; 85 static struct cpufreq_driver amd_pstate_driver; 86 static struct cpufreq_driver amd_pstate_epp_driver; 87 static int cppc_state = AMD_PSTATE_UNDEFINED; 88 static bool amd_pstate_prefcore = true; 89 static struct quirk_entry *quirks; 90 91 /* 92 * AMD Energy Preference Performance (EPP) 93 * The EPP is used in the CCLK DPM controller to drive 94 * the frequency that a core is going to operate during 95 * short periods of activity. EPP values will be utilized for 96 * different OS profiles (balanced, performance, power savings) 97 * display strings corresponding to EPP index in the 98 * energy_perf_strings[] 99 * index String 100 *------------------------------------- 101 * 0 default 102 * 1 performance 103 * 2 balance_performance 104 * 3 balance_power 105 * 4 power 106 */ 107 enum energy_perf_value_index { 108 EPP_INDEX_DEFAULT = 0, 109 EPP_INDEX_PERFORMANCE, 110 EPP_INDEX_BALANCE_PERFORMANCE, 111 EPP_INDEX_BALANCE_POWERSAVE, 112 EPP_INDEX_POWERSAVE, 113 }; 114 115 static const char * const energy_perf_strings[] = { 116 [EPP_INDEX_DEFAULT] = "default", 117 [EPP_INDEX_PERFORMANCE] = "performance", 118 [EPP_INDEX_BALANCE_PERFORMANCE] = "balance_performance", 119 [EPP_INDEX_BALANCE_POWERSAVE] = "balance_power", 120 [EPP_INDEX_POWERSAVE] = "power", 121 NULL 122 }; 123 124 static unsigned int epp_values[] = { 125 [EPP_INDEX_DEFAULT] = 0, 126 [EPP_INDEX_PERFORMANCE] = AMD_CPPC_EPP_PERFORMANCE, 127 [EPP_INDEX_BALANCE_PERFORMANCE] = AMD_CPPC_EPP_BALANCE_PERFORMANCE, 128 [EPP_INDEX_BALANCE_POWERSAVE] = AMD_CPPC_EPP_BALANCE_POWERSAVE, 129 [EPP_INDEX_POWERSAVE] = AMD_CPPC_EPP_POWERSAVE, 130 }; 131 132 typedef int (*cppc_mode_transition_fn)(int); 133 134 static struct quirk_entry quirk_amd_7k62 = { 135 .nominal_freq = 2600, 136 .lowest_freq = 550, 137 }; 138 139 static inline u8 freq_to_perf(union perf_cached perf, u32 nominal_freq, unsigned int freq_val) 140 { 141 u32 perf_val = DIV_ROUND_UP_ULL((u64)freq_val * perf.nominal_perf, nominal_freq); 142 143 return (u8)clamp(perf_val, perf.lowest_perf, perf.highest_perf); 144 } 145 146 static inline u32 perf_to_freq(union perf_cached perf, u32 nominal_freq, u8 perf_val) 147 { 148 return DIV_ROUND_UP_ULL((u64)nominal_freq * perf_val, 149 perf.nominal_perf); 150 } 151 152 static int __init dmi_matched_7k62_bios_bug(const struct dmi_system_id *dmi) 153 { 154 /** 155 * match the broken bios for family 17h processor support CPPC V2 156 * broken BIOS lack of nominal_freq and lowest_freq capabilities 157 * definition in ACPI tables 158 */ 159 if (cpu_feature_enabled(X86_FEATURE_ZEN2)) { 160 quirks = dmi->driver_data; 161 pr_info("Overriding nominal and lowest frequencies for %s\n", dmi->ident); 162 return 1; 163 } 164 165 return 0; 166 } 167 168 static const struct dmi_system_id amd_pstate_quirks_table[] __initconst = { 169 { 170 .callback = dmi_matched_7k62_bios_bug, 171 .ident = "AMD EPYC 7K62", 172 .matches = { 173 DMI_MATCH(DMI_BIOS_VERSION, "5.14"), 174 DMI_MATCH(DMI_BIOS_RELEASE, "12/12/2019"), 175 }, 176 .driver_data = &quirk_amd_7k62, 177 }, 178 {} 179 }; 180 MODULE_DEVICE_TABLE(dmi, amd_pstate_quirks_table); 181 182 static inline int get_mode_idx_from_str(const char *str, size_t size) 183 { 184 int i; 185 186 for (i=0; i < AMD_PSTATE_MAX; i++) { 187 if (!strncmp(str, amd_pstate_mode_string[i], size)) 188 return i; 189 } 190 return -EINVAL; 191 } 192 193 static DEFINE_MUTEX(amd_pstate_driver_lock); 194 195 static u8 msr_get_epp(struct amd_cpudata *cpudata) 196 { 197 u64 value; 198 int ret; 199 200 ret = rdmsrq_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value); 201 if (ret < 0) { 202 pr_debug("Could not retrieve energy perf value (%d)\n", ret); 203 return ret; 204 } 205 206 return FIELD_GET(AMD_CPPC_EPP_PERF_MASK, value); 207 } 208 209 DEFINE_STATIC_CALL(amd_pstate_get_epp, msr_get_epp); 210 211 static inline s16 amd_pstate_get_epp(struct amd_cpudata *cpudata) 212 { 213 return static_call(amd_pstate_get_epp)(cpudata); 214 } 215 216 static u8 shmem_get_epp(struct amd_cpudata *cpudata) 217 { 218 u64 epp; 219 int ret; 220 221 ret = cppc_get_epp_perf(cpudata->cpu, &epp); 222 if (ret < 0) { 223 pr_debug("Could not retrieve energy perf value (%d)\n", ret); 224 return ret; 225 } 226 227 return FIELD_GET(AMD_CPPC_EPP_PERF_MASK, epp); 228 } 229 230 static int msr_update_perf(struct cpufreq_policy *policy, u8 min_perf, 231 u8 des_perf, u8 max_perf, u8 epp, bool fast_switch) 232 { 233 struct amd_cpudata *cpudata = policy->driver_data; 234 u64 value, prev; 235 236 value = prev = READ_ONCE(cpudata->cppc_req_cached); 237 238 value &= ~(AMD_CPPC_MAX_PERF_MASK | AMD_CPPC_MIN_PERF_MASK | 239 AMD_CPPC_DES_PERF_MASK | AMD_CPPC_EPP_PERF_MASK); 240 value |= FIELD_PREP(AMD_CPPC_MAX_PERF_MASK, max_perf); 241 value |= FIELD_PREP(AMD_CPPC_DES_PERF_MASK, des_perf); 242 value |= FIELD_PREP(AMD_CPPC_MIN_PERF_MASK, min_perf); 243 value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp); 244 245 if (trace_amd_pstate_epp_perf_enabled()) { 246 union perf_cached perf = READ_ONCE(cpudata->perf); 247 248 trace_amd_pstate_epp_perf(cpudata->cpu, 249 perf.highest_perf, 250 epp, 251 min_perf, 252 max_perf, 253 policy->boost_enabled, 254 value != prev); 255 } 256 257 if (value == prev) 258 return 0; 259 260 if (fast_switch) { 261 wrmsrq(MSR_AMD_CPPC_REQ, value); 262 return 0; 263 } else { 264 int ret = wrmsrq_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value); 265 266 if (ret) 267 return ret; 268 } 269 270 WRITE_ONCE(cpudata->cppc_req_cached, value); 271 272 return 0; 273 } 274 275 DEFINE_STATIC_CALL(amd_pstate_update_perf, msr_update_perf); 276 277 static inline int amd_pstate_update_perf(struct cpufreq_policy *policy, 278 u8 min_perf, u8 des_perf, 279 u8 max_perf, u8 epp, 280 bool fast_switch) 281 { 282 return static_call(amd_pstate_update_perf)(policy, min_perf, des_perf, 283 max_perf, epp, fast_switch); 284 } 285 286 static int msr_set_epp(struct cpufreq_policy *policy, u8 epp) 287 { 288 struct amd_cpudata *cpudata = policy->driver_data; 289 u64 value, prev; 290 int ret; 291 292 value = prev = READ_ONCE(cpudata->cppc_req_cached); 293 value &= ~AMD_CPPC_EPP_PERF_MASK; 294 value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp); 295 296 if (trace_amd_pstate_epp_perf_enabled()) { 297 union perf_cached perf = cpudata->perf; 298 299 trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf, 300 epp, 301 FIELD_GET(AMD_CPPC_MIN_PERF_MASK, 302 cpudata->cppc_req_cached), 303 FIELD_GET(AMD_CPPC_MAX_PERF_MASK, 304 cpudata->cppc_req_cached), 305 policy->boost_enabled, 306 value != prev); 307 } 308 309 if (value == prev) 310 return 0; 311 312 ret = wrmsrq_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value); 313 if (ret) { 314 pr_err("failed to set energy perf value (%d)\n", ret); 315 return ret; 316 } 317 318 /* update both so that msr_update_perf() can effectively check */ 319 WRITE_ONCE(cpudata->cppc_req_cached, value); 320 321 return ret; 322 } 323 324 DEFINE_STATIC_CALL(amd_pstate_set_epp, msr_set_epp); 325 326 static inline int amd_pstate_set_epp(struct cpufreq_policy *policy, u8 epp) 327 { 328 return static_call(amd_pstate_set_epp)(policy, epp); 329 } 330 331 static int shmem_set_epp(struct cpufreq_policy *policy, u8 epp) 332 { 333 struct amd_cpudata *cpudata = policy->driver_data; 334 struct cppc_perf_ctrls perf_ctrls; 335 u8 epp_cached; 336 u64 value; 337 int ret; 338 339 340 epp_cached = FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached); 341 if (trace_amd_pstate_epp_perf_enabled()) { 342 union perf_cached perf = cpudata->perf; 343 344 trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf, 345 epp, 346 FIELD_GET(AMD_CPPC_MIN_PERF_MASK, 347 cpudata->cppc_req_cached), 348 FIELD_GET(AMD_CPPC_MAX_PERF_MASK, 349 cpudata->cppc_req_cached), 350 policy->boost_enabled, 351 epp != epp_cached); 352 } 353 354 if (epp == epp_cached) 355 return 0; 356 357 perf_ctrls.energy_perf = epp; 358 ret = cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1); 359 if (ret) { 360 pr_debug("failed to set energy perf value (%d)\n", ret); 361 return ret; 362 } 363 364 value = READ_ONCE(cpudata->cppc_req_cached); 365 value &= ~AMD_CPPC_EPP_PERF_MASK; 366 value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp); 367 WRITE_ONCE(cpudata->cppc_req_cached, value); 368 369 return ret; 370 } 371 372 static inline int msr_cppc_enable(struct cpufreq_policy *policy) 373 { 374 return wrmsrq_safe_on_cpu(policy->cpu, MSR_AMD_CPPC_ENABLE, 1); 375 } 376 377 static int shmem_cppc_enable(struct cpufreq_policy *policy) 378 { 379 return cppc_set_enable(policy->cpu, 1); 380 } 381 382 DEFINE_STATIC_CALL(amd_pstate_cppc_enable, msr_cppc_enable); 383 384 static inline int amd_pstate_cppc_enable(struct cpufreq_policy *policy) 385 { 386 return static_call(amd_pstate_cppc_enable)(policy); 387 } 388 389 static int msr_init_perf(struct amd_cpudata *cpudata) 390 { 391 union perf_cached perf = READ_ONCE(cpudata->perf); 392 u64 cap1, numerator, cppc_req; 393 u8 min_perf; 394 395 int ret = rdmsrq_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1, 396 &cap1); 397 if (ret) 398 return ret; 399 400 ret = amd_get_boost_ratio_numerator(cpudata->cpu, &numerator); 401 if (ret) 402 return ret; 403 404 ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &cppc_req); 405 if (ret) 406 return ret; 407 408 WRITE_ONCE(cpudata->cppc_req_cached, cppc_req); 409 min_perf = FIELD_GET(AMD_CPPC_MIN_PERF_MASK, cppc_req); 410 411 /* 412 * Clear out the min_perf part to check if the rest of the MSR is 0, if yes, this is an 413 * indication that the min_perf value is the one specified through the BIOS option 414 */ 415 cppc_req &= ~(AMD_CPPC_MIN_PERF_MASK); 416 417 if (!cppc_req) 418 perf.bios_min_perf = min_perf; 419 420 perf.highest_perf = numerator; 421 perf.max_limit_perf = numerator; 422 perf.min_limit_perf = FIELD_GET(AMD_CPPC_LOWEST_PERF_MASK, cap1); 423 perf.nominal_perf = FIELD_GET(AMD_CPPC_NOMINAL_PERF_MASK, cap1); 424 perf.lowest_nonlinear_perf = FIELD_GET(AMD_CPPC_LOWNONLIN_PERF_MASK, cap1); 425 perf.lowest_perf = FIELD_GET(AMD_CPPC_LOWEST_PERF_MASK, cap1); 426 WRITE_ONCE(cpudata->perf, perf); 427 WRITE_ONCE(cpudata->prefcore_ranking, FIELD_GET(AMD_CPPC_HIGHEST_PERF_MASK, cap1)); 428 429 return 0; 430 } 431 432 static int shmem_init_perf(struct amd_cpudata *cpudata) 433 { 434 struct cppc_perf_caps cppc_perf; 435 union perf_cached perf = READ_ONCE(cpudata->perf); 436 u64 numerator; 437 bool auto_sel; 438 439 int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf); 440 if (ret) 441 return ret; 442 443 ret = amd_get_boost_ratio_numerator(cpudata->cpu, &numerator); 444 if (ret) 445 return ret; 446 447 perf.highest_perf = numerator; 448 perf.max_limit_perf = numerator; 449 perf.min_limit_perf = cppc_perf.lowest_perf; 450 perf.nominal_perf = cppc_perf.nominal_perf; 451 perf.lowest_nonlinear_perf = cppc_perf.lowest_nonlinear_perf; 452 perf.lowest_perf = cppc_perf.lowest_perf; 453 WRITE_ONCE(cpudata->perf, perf); 454 WRITE_ONCE(cpudata->prefcore_ranking, cppc_perf.highest_perf); 455 456 if (cppc_state == AMD_PSTATE_ACTIVE) 457 return 0; 458 459 ret = cppc_get_auto_sel(cpudata->cpu, &auto_sel); 460 if (ret) { 461 pr_warn("failed to get auto_sel, ret: %d\n", ret); 462 return 0; 463 } 464 465 ret = cppc_set_auto_sel(cpudata->cpu, 466 (cppc_state == AMD_PSTATE_PASSIVE) ? 0 : 1); 467 468 if (ret) 469 pr_warn("failed to set auto_sel, ret: %d\n", ret); 470 471 return ret; 472 } 473 474 DEFINE_STATIC_CALL(amd_pstate_init_perf, msr_init_perf); 475 476 static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata) 477 { 478 return static_call(amd_pstate_init_perf)(cpudata); 479 } 480 481 static int shmem_update_perf(struct cpufreq_policy *policy, u8 min_perf, 482 u8 des_perf, u8 max_perf, u8 epp, bool fast_switch) 483 { 484 struct amd_cpudata *cpudata = policy->driver_data; 485 struct cppc_perf_ctrls perf_ctrls; 486 u64 value, prev; 487 int ret; 488 489 if (cppc_state == AMD_PSTATE_ACTIVE) { 490 int ret = shmem_set_epp(policy, epp); 491 492 if (ret) 493 return ret; 494 } 495 496 value = prev = READ_ONCE(cpudata->cppc_req_cached); 497 498 value &= ~(AMD_CPPC_MAX_PERF_MASK | AMD_CPPC_MIN_PERF_MASK | 499 AMD_CPPC_DES_PERF_MASK | AMD_CPPC_EPP_PERF_MASK); 500 value |= FIELD_PREP(AMD_CPPC_MAX_PERF_MASK, max_perf); 501 value |= FIELD_PREP(AMD_CPPC_DES_PERF_MASK, des_perf); 502 value |= FIELD_PREP(AMD_CPPC_MIN_PERF_MASK, min_perf); 503 value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp); 504 505 if (trace_amd_pstate_epp_perf_enabled()) { 506 union perf_cached perf = READ_ONCE(cpudata->perf); 507 508 trace_amd_pstate_epp_perf(cpudata->cpu, 509 perf.highest_perf, 510 epp, 511 min_perf, 512 max_perf, 513 policy->boost_enabled, 514 value != prev); 515 } 516 517 if (value == prev) 518 return 0; 519 520 perf_ctrls.max_perf = max_perf; 521 perf_ctrls.min_perf = min_perf; 522 perf_ctrls.desired_perf = des_perf; 523 524 ret = cppc_set_perf(cpudata->cpu, &perf_ctrls); 525 if (ret) 526 return ret; 527 528 WRITE_ONCE(cpudata->cppc_req_cached, value); 529 530 return 0; 531 } 532 533 static inline bool amd_pstate_sample(struct amd_cpudata *cpudata) 534 { 535 u64 aperf, mperf, tsc; 536 unsigned long flags; 537 538 local_irq_save(flags); 539 rdmsrq(MSR_IA32_APERF, aperf); 540 rdmsrq(MSR_IA32_MPERF, mperf); 541 tsc = rdtsc(); 542 543 if (cpudata->prev.mperf == mperf || cpudata->prev.tsc == tsc) { 544 local_irq_restore(flags); 545 return false; 546 } 547 548 local_irq_restore(flags); 549 550 cpudata->cur.aperf = aperf; 551 cpudata->cur.mperf = mperf; 552 cpudata->cur.tsc = tsc; 553 cpudata->cur.aperf -= cpudata->prev.aperf; 554 cpudata->cur.mperf -= cpudata->prev.mperf; 555 cpudata->cur.tsc -= cpudata->prev.tsc; 556 557 cpudata->prev.aperf = aperf; 558 cpudata->prev.mperf = mperf; 559 cpudata->prev.tsc = tsc; 560 561 cpudata->freq = div64_u64((cpudata->cur.aperf * cpu_khz), cpudata->cur.mperf); 562 563 return true; 564 } 565 566 static void amd_pstate_update(struct amd_cpudata *cpudata, u8 min_perf, 567 u8 des_perf, u8 max_perf, bool fast_switch, int gov_flags) 568 { 569 struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpudata->cpu); 570 union perf_cached perf = READ_ONCE(cpudata->perf); 571 572 if (!policy) 573 return; 574 575 /* limit the max perf when core performance boost feature is disabled */ 576 if (!cpudata->boost_supported) 577 max_perf = min_t(u8, perf.nominal_perf, max_perf); 578 579 des_perf = clamp_t(u8, des_perf, min_perf, max_perf); 580 581 policy->cur = perf_to_freq(perf, cpudata->nominal_freq, des_perf); 582 583 if ((cppc_state == AMD_PSTATE_GUIDED) && (gov_flags & CPUFREQ_GOV_DYNAMIC_SWITCHING)) { 584 min_perf = des_perf; 585 des_perf = 0; 586 } 587 588 if (trace_amd_pstate_perf_enabled() && amd_pstate_sample(cpudata)) { 589 trace_amd_pstate_perf(min_perf, des_perf, max_perf, cpudata->freq, 590 cpudata->cur.mperf, cpudata->cur.aperf, cpudata->cur.tsc, 591 cpudata->cpu, fast_switch); 592 } 593 594 amd_pstate_update_perf(policy, min_perf, des_perf, max_perf, 0, fast_switch); 595 } 596 597 static int amd_pstate_verify(struct cpufreq_policy_data *policy_data) 598 { 599 /* 600 * Initialize lower frequency limit (i.e.policy->min) with 601 * lowest_nonlinear_frequency or the min frequency (if) specified in BIOS, 602 * Override the initial value set by cpufreq core and amd-pstate qos_requests. 603 */ 604 if (policy_data->min == FREQ_QOS_MIN_DEFAULT_VALUE) { 605 struct cpufreq_policy *policy __free(put_cpufreq_policy) = 606 cpufreq_cpu_get(policy_data->cpu); 607 struct amd_cpudata *cpudata; 608 union perf_cached perf; 609 610 if (!policy) 611 return -EINVAL; 612 613 cpudata = policy->driver_data; 614 perf = READ_ONCE(cpudata->perf); 615 616 if (perf.bios_min_perf) 617 policy_data->min = perf_to_freq(perf, cpudata->nominal_freq, 618 perf.bios_min_perf); 619 else 620 policy_data->min = cpudata->lowest_nonlinear_freq; 621 } 622 623 cpufreq_verify_within_cpu_limits(policy_data); 624 625 return 0; 626 } 627 628 static void amd_pstate_update_min_max_limit(struct cpufreq_policy *policy) 629 { 630 struct amd_cpudata *cpudata = policy->driver_data; 631 union perf_cached perf = READ_ONCE(cpudata->perf); 632 633 perf.max_limit_perf = freq_to_perf(perf, cpudata->nominal_freq, policy->max); 634 WRITE_ONCE(cpudata->max_limit_freq, policy->max); 635 636 if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) { 637 perf.min_limit_perf = min(perf.nominal_perf, perf.max_limit_perf); 638 WRITE_ONCE(cpudata->min_limit_freq, min(cpudata->nominal_freq, cpudata->max_limit_freq)); 639 } else { 640 perf.min_limit_perf = freq_to_perf(perf, cpudata->nominal_freq, policy->min); 641 WRITE_ONCE(cpudata->min_limit_freq, policy->min); 642 } 643 644 WRITE_ONCE(cpudata->perf, perf); 645 } 646 647 static int amd_pstate_update_freq(struct cpufreq_policy *policy, 648 unsigned int target_freq, bool fast_switch) 649 { 650 struct cpufreq_freqs freqs; 651 struct amd_cpudata *cpudata; 652 union perf_cached perf; 653 u8 des_perf; 654 655 cpudata = policy->driver_data; 656 657 if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq) 658 amd_pstate_update_min_max_limit(policy); 659 660 perf = READ_ONCE(cpudata->perf); 661 662 freqs.old = policy->cur; 663 freqs.new = target_freq; 664 665 des_perf = freq_to_perf(perf, cpudata->nominal_freq, target_freq); 666 667 WARN_ON(fast_switch && !policy->fast_switch_enabled); 668 /* 669 * If fast_switch is desired, then there aren't any registered 670 * transition notifiers. See comment for 671 * cpufreq_enable_fast_switch(). 672 */ 673 if (!fast_switch) 674 cpufreq_freq_transition_begin(policy, &freqs); 675 676 amd_pstate_update(cpudata, perf.min_limit_perf, des_perf, 677 perf.max_limit_perf, fast_switch, 678 policy->governor->flags); 679 680 if (!fast_switch) 681 cpufreq_freq_transition_end(policy, &freqs, false); 682 683 return 0; 684 } 685 686 static int amd_pstate_target(struct cpufreq_policy *policy, 687 unsigned int target_freq, 688 unsigned int relation) 689 { 690 return amd_pstate_update_freq(policy, target_freq, false); 691 } 692 693 static unsigned int amd_pstate_fast_switch(struct cpufreq_policy *policy, 694 unsigned int target_freq) 695 { 696 if (!amd_pstate_update_freq(policy, target_freq, true)) 697 return target_freq; 698 return policy->cur; 699 } 700 701 static void amd_pstate_adjust_perf(unsigned int cpu, 702 unsigned long _min_perf, 703 unsigned long target_perf, 704 unsigned long capacity) 705 { 706 u8 max_perf, min_perf, des_perf, cap_perf; 707 struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu); 708 struct amd_cpudata *cpudata; 709 union perf_cached perf; 710 711 if (!policy) 712 return; 713 714 cpudata = policy->driver_data; 715 716 if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq) 717 amd_pstate_update_min_max_limit(policy); 718 719 perf = READ_ONCE(cpudata->perf); 720 cap_perf = perf.highest_perf; 721 722 des_perf = cap_perf; 723 if (target_perf < capacity) 724 des_perf = DIV_ROUND_UP(cap_perf * target_perf, capacity); 725 726 if (_min_perf < capacity) 727 min_perf = DIV_ROUND_UP(cap_perf * _min_perf, capacity); 728 else 729 min_perf = cap_perf; 730 731 if (min_perf < perf.min_limit_perf) 732 min_perf = perf.min_limit_perf; 733 734 max_perf = perf.max_limit_perf; 735 if (max_perf < min_perf) 736 max_perf = min_perf; 737 738 amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true, 739 policy->governor->flags); 740 } 741 742 static int amd_pstate_cpu_boost_update(struct cpufreq_policy *policy, bool on) 743 { 744 struct amd_cpudata *cpudata = policy->driver_data; 745 union perf_cached perf = READ_ONCE(cpudata->perf); 746 u32 nominal_freq, max_freq; 747 int ret = 0; 748 749 nominal_freq = READ_ONCE(cpudata->nominal_freq); 750 max_freq = perf_to_freq(perf, cpudata->nominal_freq, perf.highest_perf); 751 752 if (on) 753 policy->cpuinfo.max_freq = max_freq; 754 else if (policy->cpuinfo.max_freq > nominal_freq) 755 policy->cpuinfo.max_freq = nominal_freq; 756 757 policy->max = policy->cpuinfo.max_freq; 758 759 if (cppc_state == AMD_PSTATE_PASSIVE) { 760 ret = freq_qos_update_request(&cpudata->req[1], policy->cpuinfo.max_freq); 761 if (ret < 0) 762 pr_debug("Failed to update freq constraint: CPU%d\n", cpudata->cpu); 763 } 764 765 return ret < 0 ? ret : 0; 766 } 767 768 static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state) 769 { 770 struct amd_cpudata *cpudata = policy->driver_data; 771 int ret; 772 773 if (!cpudata->boost_supported) { 774 pr_err("Boost mode is not supported by this processor or SBIOS\n"); 775 return -EOPNOTSUPP; 776 } 777 778 ret = amd_pstate_cpu_boost_update(policy, state); 779 refresh_frequency_limits(policy); 780 781 return ret; 782 } 783 784 static int amd_pstate_init_boost_support(struct amd_cpudata *cpudata) 785 { 786 u64 boost_val; 787 int ret = -1; 788 789 /* 790 * If platform has no CPB support or disable it, initialize current driver 791 * boost_enabled state to be false, it is not an error for cpufreq core to handle. 792 */ 793 if (!cpu_feature_enabled(X86_FEATURE_CPB)) { 794 pr_debug_once("Boost CPB capabilities not present in the processor\n"); 795 ret = 0; 796 goto exit_err; 797 } 798 799 ret = rdmsrq_on_cpu(cpudata->cpu, MSR_K7_HWCR, &boost_val); 800 if (ret) { 801 pr_err_once("failed to read initial CPU boost state!\n"); 802 ret = -EIO; 803 goto exit_err; 804 } 805 806 if (!(boost_val & MSR_K7_HWCR_CPB_DIS)) 807 cpudata->boost_supported = true; 808 809 return 0; 810 811 exit_err: 812 cpudata->boost_supported = false; 813 return ret; 814 } 815 816 static void amd_perf_ctl_reset(unsigned int cpu) 817 { 818 wrmsrq_on_cpu(cpu, MSR_AMD_PERF_CTL, 0); 819 } 820 821 #define CPPC_MAX_PERF U8_MAX 822 823 static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata) 824 { 825 /* user disabled or not detected */ 826 if (!amd_pstate_prefcore) 827 return; 828 829 /* should use amd-hfi instead */ 830 if (cpu_feature_enabled(X86_FEATURE_AMD_WORKLOAD_CLASS) && 831 IS_ENABLED(CONFIG_AMD_HFI)) { 832 amd_pstate_prefcore = false; 833 return; 834 } 835 836 cpudata->hw_prefcore = true; 837 838 /* Priorities must be initialized before ITMT support can be toggled on. */ 839 sched_set_itmt_core_prio((int)READ_ONCE(cpudata->prefcore_ranking), cpudata->cpu); 840 } 841 842 static void amd_pstate_update_limits(struct cpufreq_policy *policy) 843 { 844 struct amd_cpudata *cpudata; 845 u32 prev_high = 0, cur_high = 0; 846 bool highest_perf_changed = false; 847 unsigned int cpu = policy->cpu; 848 849 if (!amd_pstate_prefcore) 850 return; 851 852 if (amd_get_highest_perf(cpu, &cur_high)) 853 return; 854 855 cpudata = policy->driver_data; 856 857 prev_high = READ_ONCE(cpudata->prefcore_ranking); 858 highest_perf_changed = (prev_high != cur_high); 859 if (highest_perf_changed) { 860 WRITE_ONCE(cpudata->prefcore_ranking, cur_high); 861 862 if (cur_high < CPPC_MAX_PERF) { 863 sched_set_itmt_core_prio((int)cur_high, cpu); 864 sched_update_asym_prefer_cpu(cpu, prev_high, cur_high); 865 } 866 } 867 } 868 869 /* 870 * Get pstate transition delay time from ACPI tables that firmware set 871 * instead of using hardcode value directly. 872 */ 873 static u32 amd_pstate_get_transition_delay_us(unsigned int cpu) 874 { 875 u32 transition_delay_ns; 876 877 transition_delay_ns = cppc_get_transition_latency(cpu); 878 if (transition_delay_ns == CPUFREQ_ETERNAL) { 879 if (cpu_feature_enabled(X86_FEATURE_AMD_FAST_CPPC)) 880 return AMD_PSTATE_FAST_CPPC_TRANSITION_DELAY; 881 else 882 return AMD_PSTATE_TRANSITION_DELAY; 883 } 884 885 return transition_delay_ns / NSEC_PER_USEC; 886 } 887 888 /* 889 * Get pstate transition latency value from ACPI tables that firmware 890 * set instead of using hardcode value directly. 891 */ 892 static u32 amd_pstate_get_transition_latency(unsigned int cpu) 893 { 894 u32 transition_latency; 895 896 transition_latency = cppc_get_transition_latency(cpu); 897 if (transition_latency == CPUFREQ_ETERNAL) 898 return AMD_PSTATE_TRANSITION_LATENCY; 899 900 return transition_latency; 901 } 902 903 /* 904 * amd_pstate_init_freq: Initialize the nominal_freq and lowest_nonlinear_freq 905 * for the @cpudata object. 906 * 907 * Requires: all perf members of @cpudata to be initialized. 908 * 909 * Returns 0 on success, non-zero value on failure. 910 */ 911 static int amd_pstate_init_freq(struct amd_cpudata *cpudata) 912 { 913 u32 min_freq, max_freq, nominal_freq, lowest_nonlinear_freq; 914 struct cppc_perf_caps cppc_perf; 915 union perf_cached perf; 916 int ret; 917 918 ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf); 919 if (ret) 920 return ret; 921 perf = READ_ONCE(cpudata->perf); 922 923 if (quirks && quirks->nominal_freq) 924 nominal_freq = quirks->nominal_freq; 925 else 926 nominal_freq = cppc_perf.nominal_freq; 927 nominal_freq *= 1000; 928 929 if (quirks && quirks->lowest_freq) { 930 min_freq = quirks->lowest_freq; 931 perf.lowest_perf = freq_to_perf(perf, nominal_freq, min_freq); 932 WRITE_ONCE(cpudata->perf, perf); 933 } else 934 min_freq = cppc_perf.lowest_freq; 935 936 min_freq *= 1000; 937 938 WRITE_ONCE(cpudata->nominal_freq, nominal_freq); 939 940 max_freq = perf_to_freq(perf, nominal_freq, perf.highest_perf); 941 lowest_nonlinear_freq = perf_to_freq(perf, nominal_freq, perf.lowest_nonlinear_perf); 942 WRITE_ONCE(cpudata->lowest_nonlinear_freq, lowest_nonlinear_freq); 943 944 /** 945 * Below values need to be initialized correctly, otherwise driver will fail to load 946 * max_freq is calculated according to (nominal_freq * highest_perf)/nominal_perf 947 * lowest_nonlinear_freq is a value between [min_freq, nominal_freq] 948 * Check _CPC in ACPI table objects if any values are incorrect 949 */ 950 if (min_freq <= 0 || max_freq <= 0 || nominal_freq <= 0 || min_freq > max_freq) { 951 pr_err("min_freq(%d) or max_freq(%d) or nominal_freq(%d) value is incorrect\n", 952 min_freq, max_freq, nominal_freq); 953 return -EINVAL; 954 } 955 956 if (lowest_nonlinear_freq <= min_freq || lowest_nonlinear_freq > nominal_freq) { 957 pr_err("lowest_nonlinear_freq(%d) value is out of range [min_freq(%d), nominal_freq(%d)]\n", 958 lowest_nonlinear_freq, min_freq, nominal_freq); 959 return -EINVAL; 960 } 961 962 return 0; 963 } 964 965 static int amd_pstate_cpu_init(struct cpufreq_policy *policy) 966 { 967 struct amd_cpudata *cpudata; 968 union perf_cached perf; 969 struct device *dev; 970 int ret; 971 972 /* 973 * Resetting PERF_CTL_MSR will put the CPU in P0 frequency, 974 * which is ideal for initialization process. 975 */ 976 amd_perf_ctl_reset(policy->cpu); 977 dev = get_cpu_device(policy->cpu); 978 if (!dev) 979 return -ENODEV; 980 981 cpudata = kzalloc(sizeof(*cpudata), GFP_KERNEL); 982 if (!cpudata) 983 return -ENOMEM; 984 985 cpudata->cpu = policy->cpu; 986 987 ret = amd_pstate_init_perf(cpudata); 988 if (ret) 989 goto free_cpudata1; 990 991 amd_pstate_init_prefcore(cpudata); 992 993 ret = amd_pstate_init_freq(cpudata); 994 if (ret) 995 goto free_cpudata1; 996 997 ret = amd_pstate_init_boost_support(cpudata); 998 if (ret) 999 goto free_cpudata1; 1000 1001 policy->cpuinfo.transition_latency = amd_pstate_get_transition_latency(policy->cpu); 1002 policy->transition_delay_us = amd_pstate_get_transition_delay_us(policy->cpu); 1003 1004 perf = READ_ONCE(cpudata->perf); 1005 1006 policy->cpuinfo.min_freq = policy->min = perf_to_freq(perf, 1007 cpudata->nominal_freq, 1008 perf.lowest_perf); 1009 policy->cpuinfo.max_freq = policy->max = perf_to_freq(perf, 1010 cpudata->nominal_freq, 1011 perf.highest_perf); 1012 1013 ret = amd_pstate_cppc_enable(policy); 1014 if (ret) 1015 goto free_cpudata1; 1016 1017 policy->boost_supported = READ_ONCE(cpudata->boost_supported); 1018 1019 /* It will be updated by governor */ 1020 policy->cur = policy->cpuinfo.min_freq; 1021 1022 if (cpu_feature_enabled(X86_FEATURE_CPPC)) 1023 policy->fast_switch_possible = true; 1024 1025 ret = freq_qos_add_request(&policy->constraints, &cpudata->req[0], 1026 FREQ_QOS_MIN, FREQ_QOS_MIN_DEFAULT_VALUE); 1027 if (ret < 0) { 1028 dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret); 1029 goto free_cpudata1; 1030 } 1031 1032 ret = freq_qos_add_request(&policy->constraints, &cpudata->req[1], 1033 FREQ_QOS_MAX, policy->cpuinfo.max_freq); 1034 if (ret < 0) { 1035 dev_err(dev, "Failed to add max-freq constraint (%d)\n", ret); 1036 goto free_cpudata2; 1037 } 1038 1039 policy->driver_data = cpudata; 1040 1041 if (!current_pstate_driver->adjust_perf) 1042 current_pstate_driver->adjust_perf = amd_pstate_adjust_perf; 1043 1044 return 0; 1045 1046 free_cpudata2: 1047 freq_qos_remove_request(&cpudata->req[0]); 1048 free_cpudata1: 1049 pr_warn("Failed to initialize CPU %d: %d\n", policy->cpu, ret); 1050 kfree(cpudata); 1051 return ret; 1052 } 1053 1054 static void amd_pstate_cpu_exit(struct cpufreq_policy *policy) 1055 { 1056 struct amd_cpudata *cpudata = policy->driver_data; 1057 union perf_cached perf = READ_ONCE(cpudata->perf); 1058 1059 /* Reset CPPC_REQ MSR to the BIOS value */ 1060 amd_pstate_update_perf(policy, perf.bios_min_perf, 0U, 0U, 0U, false); 1061 1062 freq_qos_remove_request(&cpudata->req[1]); 1063 freq_qos_remove_request(&cpudata->req[0]); 1064 policy->fast_switch_possible = false; 1065 kfree(cpudata); 1066 } 1067 1068 /* Sysfs attributes */ 1069 1070 /* 1071 * This frequency is to indicate the maximum hardware frequency. 1072 * If boost is not active but supported, the frequency will be larger than the 1073 * one in cpuinfo. 1074 */ 1075 static ssize_t show_amd_pstate_max_freq(struct cpufreq_policy *policy, 1076 char *buf) 1077 { 1078 struct amd_cpudata *cpudata; 1079 union perf_cached perf; 1080 1081 cpudata = policy->driver_data; 1082 perf = READ_ONCE(cpudata->perf); 1083 1084 return sysfs_emit(buf, "%u\n", 1085 perf_to_freq(perf, cpudata->nominal_freq, perf.highest_perf)); 1086 } 1087 1088 static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *policy, 1089 char *buf) 1090 { 1091 struct amd_cpudata *cpudata; 1092 union perf_cached perf; 1093 1094 cpudata = policy->driver_data; 1095 perf = READ_ONCE(cpudata->perf); 1096 1097 return sysfs_emit(buf, "%u\n", 1098 perf_to_freq(perf, cpudata->nominal_freq, perf.lowest_nonlinear_perf)); 1099 } 1100 1101 /* 1102 * In some of ASICs, the highest_perf is not the one in the _CPC table, so we 1103 * need to expose it to sysfs. 1104 */ 1105 static ssize_t show_amd_pstate_highest_perf(struct cpufreq_policy *policy, 1106 char *buf) 1107 { 1108 struct amd_cpudata *cpudata; 1109 1110 cpudata = policy->driver_data; 1111 1112 return sysfs_emit(buf, "%u\n", cpudata->perf.highest_perf); 1113 } 1114 1115 static ssize_t show_amd_pstate_prefcore_ranking(struct cpufreq_policy *policy, 1116 char *buf) 1117 { 1118 u8 perf; 1119 struct amd_cpudata *cpudata = policy->driver_data; 1120 1121 perf = READ_ONCE(cpudata->prefcore_ranking); 1122 1123 return sysfs_emit(buf, "%u\n", perf); 1124 } 1125 1126 static ssize_t show_amd_pstate_hw_prefcore(struct cpufreq_policy *policy, 1127 char *buf) 1128 { 1129 bool hw_prefcore; 1130 struct amd_cpudata *cpudata = policy->driver_data; 1131 1132 hw_prefcore = READ_ONCE(cpudata->hw_prefcore); 1133 1134 return sysfs_emit(buf, "%s\n", str_enabled_disabled(hw_prefcore)); 1135 } 1136 1137 static ssize_t show_energy_performance_available_preferences( 1138 struct cpufreq_policy *policy, char *buf) 1139 { 1140 int i = 0; 1141 int offset = 0; 1142 struct amd_cpudata *cpudata = policy->driver_data; 1143 1144 if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) 1145 return sysfs_emit_at(buf, offset, "%s\n", 1146 energy_perf_strings[EPP_INDEX_PERFORMANCE]); 1147 1148 while (energy_perf_strings[i] != NULL) 1149 offset += sysfs_emit_at(buf, offset, "%s ", energy_perf_strings[i++]); 1150 1151 offset += sysfs_emit_at(buf, offset, "\n"); 1152 1153 return offset; 1154 } 1155 1156 static ssize_t store_energy_performance_preference( 1157 struct cpufreq_policy *policy, const char *buf, size_t count) 1158 { 1159 struct amd_cpudata *cpudata = policy->driver_data; 1160 char str_preference[21]; 1161 ssize_t ret; 1162 u8 epp; 1163 1164 ret = sscanf(buf, "%20s", str_preference); 1165 if (ret != 1) 1166 return -EINVAL; 1167 1168 ret = match_string(energy_perf_strings, -1, str_preference); 1169 if (ret < 0) 1170 return -EINVAL; 1171 1172 if (!ret) 1173 epp = cpudata->epp_default; 1174 else 1175 epp = epp_values[ret]; 1176 1177 if (epp > 0 && policy->policy == CPUFREQ_POLICY_PERFORMANCE) { 1178 pr_debug("EPP cannot be set under performance policy\n"); 1179 return -EBUSY; 1180 } 1181 1182 ret = amd_pstate_set_epp(policy, epp); 1183 1184 return ret ? ret : count; 1185 } 1186 1187 static ssize_t show_energy_performance_preference( 1188 struct cpufreq_policy *policy, char *buf) 1189 { 1190 struct amd_cpudata *cpudata = policy->driver_data; 1191 u8 preference, epp; 1192 1193 epp = FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached); 1194 1195 switch (epp) { 1196 case AMD_CPPC_EPP_PERFORMANCE: 1197 preference = EPP_INDEX_PERFORMANCE; 1198 break; 1199 case AMD_CPPC_EPP_BALANCE_PERFORMANCE: 1200 preference = EPP_INDEX_BALANCE_PERFORMANCE; 1201 break; 1202 case AMD_CPPC_EPP_BALANCE_POWERSAVE: 1203 preference = EPP_INDEX_BALANCE_POWERSAVE; 1204 break; 1205 case AMD_CPPC_EPP_POWERSAVE: 1206 preference = EPP_INDEX_POWERSAVE; 1207 break; 1208 default: 1209 return -EINVAL; 1210 } 1211 1212 return sysfs_emit(buf, "%s\n", energy_perf_strings[preference]); 1213 } 1214 1215 static void amd_pstate_driver_cleanup(void) 1216 { 1217 if (amd_pstate_prefcore) 1218 sched_clear_itmt_support(); 1219 1220 cppc_state = AMD_PSTATE_DISABLE; 1221 current_pstate_driver = NULL; 1222 } 1223 1224 static int amd_pstate_set_driver(int mode_idx) 1225 { 1226 if (mode_idx >= AMD_PSTATE_DISABLE && mode_idx < AMD_PSTATE_MAX) { 1227 cppc_state = mode_idx; 1228 if (cppc_state == AMD_PSTATE_DISABLE) 1229 pr_info("driver is explicitly disabled\n"); 1230 1231 if (cppc_state == AMD_PSTATE_ACTIVE) 1232 current_pstate_driver = &amd_pstate_epp_driver; 1233 1234 if (cppc_state == AMD_PSTATE_PASSIVE || cppc_state == AMD_PSTATE_GUIDED) 1235 current_pstate_driver = &amd_pstate_driver; 1236 1237 return 0; 1238 } 1239 1240 return -EINVAL; 1241 } 1242 1243 static int amd_pstate_register_driver(int mode) 1244 { 1245 int ret; 1246 1247 ret = amd_pstate_set_driver(mode); 1248 if (ret) 1249 return ret; 1250 1251 cppc_state = mode; 1252 1253 /* at least one CPU supports CPB */ 1254 current_pstate_driver->boost_enabled = cpu_feature_enabled(X86_FEATURE_CPB); 1255 1256 ret = cpufreq_register_driver(current_pstate_driver); 1257 if (ret) { 1258 amd_pstate_driver_cleanup(); 1259 return ret; 1260 } 1261 1262 /* Enable ITMT support once all CPUs have initialized their asym priorities. */ 1263 if (amd_pstate_prefcore) 1264 sched_set_itmt_support(); 1265 1266 return 0; 1267 } 1268 1269 static int amd_pstate_unregister_driver(int dummy) 1270 { 1271 cpufreq_unregister_driver(current_pstate_driver); 1272 amd_pstate_driver_cleanup(); 1273 return 0; 1274 } 1275 1276 static int amd_pstate_change_mode_without_dvr_change(int mode) 1277 { 1278 int cpu = 0; 1279 1280 cppc_state = mode; 1281 1282 if (cpu_feature_enabled(X86_FEATURE_CPPC) || cppc_state == AMD_PSTATE_ACTIVE) 1283 return 0; 1284 1285 for_each_present_cpu(cpu) { 1286 cppc_set_auto_sel(cpu, (cppc_state == AMD_PSTATE_PASSIVE) ? 0 : 1); 1287 } 1288 1289 return 0; 1290 } 1291 1292 static int amd_pstate_change_driver_mode(int mode) 1293 { 1294 int ret; 1295 1296 ret = amd_pstate_unregister_driver(0); 1297 if (ret) 1298 return ret; 1299 1300 ret = amd_pstate_register_driver(mode); 1301 if (ret) 1302 return ret; 1303 1304 return 0; 1305 } 1306 1307 static cppc_mode_transition_fn mode_state_machine[AMD_PSTATE_MAX][AMD_PSTATE_MAX] = { 1308 [AMD_PSTATE_DISABLE] = { 1309 [AMD_PSTATE_DISABLE] = NULL, 1310 [AMD_PSTATE_PASSIVE] = amd_pstate_register_driver, 1311 [AMD_PSTATE_ACTIVE] = amd_pstate_register_driver, 1312 [AMD_PSTATE_GUIDED] = amd_pstate_register_driver, 1313 }, 1314 [AMD_PSTATE_PASSIVE] = { 1315 [AMD_PSTATE_DISABLE] = amd_pstate_unregister_driver, 1316 [AMD_PSTATE_PASSIVE] = NULL, 1317 [AMD_PSTATE_ACTIVE] = amd_pstate_change_driver_mode, 1318 [AMD_PSTATE_GUIDED] = amd_pstate_change_mode_without_dvr_change, 1319 }, 1320 [AMD_PSTATE_ACTIVE] = { 1321 [AMD_PSTATE_DISABLE] = amd_pstate_unregister_driver, 1322 [AMD_PSTATE_PASSIVE] = amd_pstate_change_driver_mode, 1323 [AMD_PSTATE_ACTIVE] = NULL, 1324 [AMD_PSTATE_GUIDED] = amd_pstate_change_driver_mode, 1325 }, 1326 [AMD_PSTATE_GUIDED] = { 1327 [AMD_PSTATE_DISABLE] = amd_pstate_unregister_driver, 1328 [AMD_PSTATE_PASSIVE] = amd_pstate_change_mode_without_dvr_change, 1329 [AMD_PSTATE_ACTIVE] = amd_pstate_change_driver_mode, 1330 [AMD_PSTATE_GUIDED] = NULL, 1331 }, 1332 }; 1333 1334 static ssize_t amd_pstate_show_status(char *buf) 1335 { 1336 if (!current_pstate_driver) 1337 return sysfs_emit(buf, "disable\n"); 1338 1339 return sysfs_emit(buf, "%s\n", amd_pstate_mode_string[cppc_state]); 1340 } 1341 1342 int amd_pstate_get_status(void) 1343 { 1344 return cppc_state; 1345 } 1346 EXPORT_SYMBOL_GPL(amd_pstate_get_status); 1347 1348 int amd_pstate_update_status(const char *buf, size_t size) 1349 { 1350 int mode_idx; 1351 1352 if (size > strlen("passive") || size < strlen("active")) 1353 return -EINVAL; 1354 1355 mode_idx = get_mode_idx_from_str(buf, size); 1356 1357 if (mode_idx < 0 || mode_idx >= AMD_PSTATE_MAX) 1358 return -EINVAL; 1359 1360 if (mode_state_machine[cppc_state][mode_idx]) { 1361 guard(mutex)(&amd_pstate_driver_lock); 1362 return mode_state_machine[cppc_state][mode_idx](mode_idx); 1363 } 1364 1365 return 0; 1366 } 1367 EXPORT_SYMBOL_GPL(amd_pstate_update_status); 1368 1369 static ssize_t status_show(struct device *dev, 1370 struct device_attribute *attr, char *buf) 1371 { 1372 1373 guard(mutex)(&amd_pstate_driver_lock); 1374 1375 return amd_pstate_show_status(buf); 1376 } 1377 1378 static ssize_t status_store(struct device *a, struct device_attribute *b, 1379 const char *buf, size_t count) 1380 { 1381 char *p = memchr(buf, '\n', count); 1382 int ret; 1383 1384 ret = amd_pstate_update_status(buf, p ? p - buf : count); 1385 1386 return ret < 0 ? ret : count; 1387 } 1388 1389 static ssize_t prefcore_show(struct device *dev, 1390 struct device_attribute *attr, char *buf) 1391 { 1392 return sysfs_emit(buf, "%s\n", str_enabled_disabled(amd_pstate_prefcore)); 1393 } 1394 1395 cpufreq_freq_attr_ro(amd_pstate_max_freq); 1396 cpufreq_freq_attr_ro(amd_pstate_lowest_nonlinear_freq); 1397 1398 cpufreq_freq_attr_ro(amd_pstate_highest_perf); 1399 cpufreq_freq_attr_ro(amd_pstate_prefcore_ranking); 1400 cpufreq_freq_attr_ro(amd_pstate_hw_prefcore); 1401 cpufreq_freq_attr_rw(energy_performance_preference); 1402 cpufreq_freq_attr_ro(energy_performance_available_preferences); 1403 static DEVICE_ATTR_RW(status); 1404 static DEVICE_ATTR_RO(prefcore); 1405 1406 static struct freq_attr *amd_pstate_attr[] = { 1407 &amd_pstate_max_freq, 1408 &amd_pstate_lowest_nonlinear_freq, 1409 &amd_pstate_highest_perf, 1410 &amd_pstate_prefcore_ranking, 1411 &amd_pstate_hw_prefcore, 1412 NULL, 1413 }; 1414 1415 static struct freq_attr *amd_pstate_epp_attr[] = { 1416 &amd_pstate_max_freq, 1417 &amd_pstate_lowest_nonlinear_freq, 1418 &amd_pstate_highest_perf, 1419 &amd_pstate_prefcore_ranking, 1420 &amd_pstate_hw_prefcore, 1421 &energy_performance_preference, 1422 &energy_performance_available_preferences, 1423 NULL, 1424 }; 1425 1426 static struct attribute *pstate_global_attributes[] = { 1427 &dev_attr_status.attr, 1428 &dev_attr_prefcore.attr, 1429 NULL 1430 }; 1431 1432 static const struct attribute_group amd_pstate_global_attr_group = { 1433 .name = "amd_pstate", 1434 .attrs = pstate_global_attributes, 1435 }; 1436 1437 static bool amd_pstate_acpi_pm_profile_server(void) 1438 { 1439 switch (acpi_gbl_FADT.preferred_profile) { 1440 case PM_ENTERPRISE_SERVER: 1441 case PM_SOHO_SERVER: 1442 case PM_PERFORMANCE_SERVER: 1443 return true; 1444 } 1445 return false; 1446 } 1447 1448 static bool amd_pstate_acpi_pm_profile_undefined(void) 1449 { 1450 if (acpi_gbl_FADT.preferred_profile == PM_UNSPECIFIED) 1451 return true; 1452 if (acpi_gbl_FADT.preferred_profile >= NR_PM_PROFILES) 1453 return true; 1454 return false; 1455 } 1456 1457 static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy) 1458 { 1459 struct amd_cpudata *cpudata; 1460 union perf_cached perf; 1461 struct device *dev; 1462 int ret; 1463 1464 /* 1465 * Resetting PERF_CTL_MSR will put the CPU in P0 frequency, 1466 * which is ideal for initialization process. 1467 */ 1468 amd_perf_ctl_reset(policy->cpu); 1469 dev = get_cpu_device(policy->cpu); 1470 if (!dev) 1471 return -ENODEV; 1472 1473 cpudata = kzalloc(sizeof(*cpudata), GFP_KERNEL); 1474 if (!cpudata) 1475 return -ENOMEM; 1476 1477 cpudata->cpu = policy->cpu; 1478 1479 ret = amd_pstate_init_perf(cpudata); 1480 if (ret) 1481 goto free_cpudata1; 1482 1483 amd_pstate_init_prefcore(cpudata); 1484 1485 ret = amd_pstate_init_freq(cpudata); 1486 if (ret) 1487 goto free_cpudata1; 1488 1489 ret = amd_pstate_init_boost_support(cpudata); 1490 if (ret) 1491 goto free_cpudata1; 1492 1493 perf = READ_ONCE(cpudata->perf); 1494 1495 policy->cpuinfo.min_freq = policy->min = perf_to_freq(perf, 1496 cpudata->nominal_freq, 1497 perf.lowest_perf); 1498 policy->cpuinfo.max_freq = policy->max = perf_to_freq(perf, 1499 cpudata->nominal_freq, 1500 perf.highest_perf); 1501 policy->driver_data = cpudata; 1502 1503 ret = amd_pstate_cppc_enable(policy); 1504 if (ret) 1505 goto free_cpudata1; 1506 1507 /* It will be updated by governor */ 1508 policy->cur = policy->cpuinfo.min_freq; 1509 1510 1511 policy->boost_supported = READ_ONCE(cpudata->boost_supported); 1512 1513 /* 1514 * Set the policy to provide a valid fallback value in case 1515 * the default cpufreq governor is neither powersave nor performance. 1516 */ 1517 if (amd_pstate_acpi_pm_profile_server() || 1518 amd_pstate_acpi_pm_profile_undefined()) { 1519 policy->policy = CPUFREQ_POLICY_PERFORMANCE; 1520 cpudata->epp_default = amd_pstate_get_epp(cpudata); 1521 } else { 1522 policy->policy = CPUFREQ_POLICY_POWERSAVE; 1523 cpudata->epp_default = AMD_CPPC_EPP_BALANCE_PERFORMANCE; 1524 } 1525 1526 ret = amd_pstate_set_epp(policy, cpudata->epp_default); 1527 if (ret) 1528 return ret; 1529 1530 current_pstate_driver->adjust_perf = NULL; 1531 1532 return 0; 1533 1534 free_cpudata1: 1535 pr_warn("Failed to initialize CPU %d: %d\n", policy->cpu, ret); 1536 kfree(cpudata); 1537 return ret; 1538 } 1539 1540 static void amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy) 1541 { 1542 struct amd_cpudata *cpudata = policy->driver_data; 1543 1544 if (cpudata) { 1545 union perf_cached perf = READ_ONCE(cpudata->perf); 1546 1547 /* Reset CPPC_REQ MSR to the BIOS value */ 1548 amd_pstate_update_perf(policy, perf.bios_min_perf, 0U, 0U, 0U, false); 1549 1550 kfree(cpudata); 1551 policy->driver_data = NULL; 1552 } 1553 1554 pr_debug("CPU %d exiting\n", policy->cpu); 1555 } 1556 1557 static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy) 1558 { 1559 struct amd_cpudata *cpudata = policy->driver_data; 1560 union perf_cached perf; 1561 u8 epp; 1562 1563 if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq) 1564 amd_pstate_update_min_max_limit(policy); 1565 1566 if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) 1567 epp = 0; 1568 else 1569 epp = FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached); 1570 1571 perf = READ_ONCE(cpudata->perf); 1572 1573 return amd_pstate_update_perf(policy, perf.min_limit_perf, 0U, 1574 perf.max_limit_perf, epp, false); 1575 } 1576 1577 static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy) 1578 { 1579 struct amd_cpudata *cpudata = policy->driver_data; 1580 int ret; 1581 1582 if (!policy->cpuinfo.max_freq) 1583 return -ENODEV; 1584 1585 cpudata->policy = policy->policy; 1586 1587 ret = amd_pstate_epp_update_limit(policy); 1588 if (ret) 1589 return ret; 1590 1591 /* 1592 * policy->cur is never updated with the amd_pstate_epp driver, but it 1593 * is used as a stale frequency value. So, keep it within limits. 1594 */ 1595 policy->cur = policy->min; 1596 1597 return 0; 1598 } 1599 1600 static int amd_pstate_cpu_online(struct cpufreq_policy *policy) 1601 { 1602 return amd_pstate_cppc_enable(policy); 1603 } 1604 1605 static int amd_pstate_cpu_offline(struct cpufreq_policy *policy) 1606 { 1607 struct amd_cpudata *cpudata = policy->driver_data; 1608 union perf_cached perf = READ_ONCE(cpudata->perf); 1609 1610 /* 1611 * Reset CPPC_REQ MSR to the BIOS value, this will allow us to retain the BIOS specified 1612 * min_perf value across kexec reboots. If this CPU is just onlined normally after this, the 1613 * limits, epp and desired perf will get reset to the cached values in cpudata struct 1614 */ 1615 return amd_pstate_update_perf(policy, perf.bios_min_perf, 0U, 0U, 0U, false); 1616 } 1617 1618 static int amd_pstate_suspend(struct cpufreq_policy *policy) 1619 { 1620 struct amd_cpudata *cpudata = policy->driver_data; 1621 union perf_cached perf = READ_ONCE(cpudata->perf); 1622 int ret; 1623 1624 /* 1625 * Reset CPPC_REQ MSR to the BIOS value, this will allow us to retain the BIOS specified 1626 * min_perf value across kexec reboots. If this CPU is just resumed back without kexec, 1627 * the limits, epp and desired perf will get reset to the cached values in cpudata struct 1628 */ 1629 ret = amd_pstate_update_perf(policy, perf.bios_min_perf, 0U, 0U, 0U, false); 1630 if (ret) 1631 return ret; 1632 1633 /* invalidate to ensure it's rewritten during resume */ 1634 cpudata->cppc_req_cached = 0; 1635 1636 /* set this flag to avoid setting core offline*/ 1637 cpudata->suspended = true; 1638 1639 return 0; 1640 } 1641 1642 static int amd_pstate_resume(struct cpufreq_policy *policy) 1643 { 1644 struct amd_cpudata *cpudata = policy->driver_data; 1645 union perf_cached perf = READ_ONCE(cpudata->perf); 1646 int cur_perf = freq_to_perf(perf, cpudata->nominal_freq, policy->cur); 1647 1648 /* Set CPPC_REQ to last sane value until the governor updates it */ 1649 return amd_pstate_update_perf(policy, perf.min_limit_perf, cur_perf, perf.max_limit_perf, 1650 0U, false); 1651 } 1652 1653 static int amd_pstate_epp_resume(struct cpufreq_policy *policy) 1654 { 1655 struct amd_cpudata *cpudata = policy->driver_data; 1656 1657 if (cpudata->suspended) { 1658 int ret; 1659 1660 /* enable amd pstate from suspend state*/ 1661 ret = amd_pstate_epp_update_limit(policy); 1662 if (ret) 1663 return ret; 1664 1665 cpudata->suspended = false; 1666 } 1667 1668 return 0; 1669 } 1670 1671 static struct cpufreq_driver amd_pstate_driver = { 1672 .flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_UPDATE_LIMITS, 1673 .verify = amd_pstate_verify, 1674 .target = amd_pstate_target, 1675 .fast_switch = amd_pstate_fast_switch, 1676 .init = amd_pstate_cpu_init, 1677 .exit = amd_pstate_cpu_exit, 1678 .online = amd_pstate_cpu_online, 1679 .offline = amd_pstate_cpu_offline, 1680 .suspend = amd_pstate_suspend, 1681 .resume = amd_pstate_resume, 1682 .set_boost = amd_pstate_set_boost, 1683 .update_limits = amd_pstate_update_limits, 1684 .name = "amd-pstate", 1685 .attr = amd_pstate_attr, 1686 }; 1687 1688 static struct cpufreq_driver amd_pstate_epp_driver = { 1689 .flags = CPUFREQ_CONST_LOOPS, 1690 .verify = amd_pstate_verify, 1691 .setpolicy = amd_pstate_epp_set_policy, 1692 .init = amd_pstate_epp_cpu_init, 1693 .exit = amd_pstate_epp_cpu_exit, 1694 .offline = amd_pstate_cpu_offline, 1695 .online = amd_pstate_cpu_online, 1696 .suspend = amd_pstate_suspend, 1697 .resume = amd_pstate_epp_resume, 1698 .update_limits = amd_pstate_update_limits, 1699 .set_boost = amd_pstate_set_boost, 1700 .name = "amd-pstate-epp", 1701 .attr = amd_pstate_epp_attr, 1702 }; 1703 1704 /* 1705 * CPPC function is not supported for family ID 17H with model_ID ranging from 0x10 to 0x2F. 1706 * show the debug message that helps to check if the CPU has CPPC support for loading issue. 1707 */ 1708 static bool amd_cppc_supported(void) 1709 { 1710 struct cpuinfo_x86 *c = &cpu_data(0); 1711 bool warn = false; 1712 1713 if ((boot_cpu_data.x86 == 0x17) && (boot_cpu_data.x86_model < 0x30)) { 1714 pr_debug_once("CPPC feature is not supported by the processor\n"); 1715 return false; 1716 } 1717 1718 /* 1719 * If the CPPC feature is disabled in the BIOS for processors 1720 * that support MSR-based CPPC, the AMD Pstate driver may not 1721 * function correctly. 1722 * 1723 * For such processors, check the CPPC flag and display a 1724 * warning message if the platform supports CPPC. 1725 * 1726 * Note: The code check below will not abort the driver 1727 * registration process because of the code is added for 1728 * debugging purposes. Besides, it may still be possible for 1729 * the driver to work using the shared-memory mechanism. 1730 */ 1731 if (!cpu_feature_enabled(X86_FEATURE_CPPC)) { 1732 if (cpu_feature_enabled(X86_FEATURE_ZEN2)) { 1733 switch (c->x86_model) { 1734 case 0x60 ... 0x6F: 1735 case 0x80 ... 0xAF: 1736 warn = true; 1737 break; 1738 } 1739 } else if (cpu_feature_enabled(X86_FEATURE_ZEN3) || 1740 cpu_feature_enabled(X86_FEATURE_ZEN4)) { 1741 switch (c->x86_model) { 1742 case 0x10 ... 0x1F: 1743 case 0x40 ... 0xAF: 1744 warn = true; 1745 break; 1746 } 1747 } else if (cpu_feature_enabled(X86_FEATURE_ZEN5)) { 1748 warn = true; 1749 } 1750 } 1751 1752 if (warn) 1753 pr_warn_once("The CPPC feature is supported but currently disabled by the BIOS.\n" 1754 "Please enable it if your BIOS has the CPPC option.\n"); 1755 return true; 1756 } 1757 1758 static int __init amd_pstate_init(void) 1759 { 1760 struct device *dev_root; 1761 int ret; 1762 1763 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) 1764 return -ENODEV; 1765 1766 /* show debug message only if CPPC is not supported */ 1767 if (!amd_cppc_supported()) 1768 return -EOPNOTSUPP; 1769 1770 /* show warning message when BIOS broken or ACPI disabled */ 1771 if (!acpi_cpc_valid()) { 1772 pr_warn_once("the _CPC object is not present in SBIOS or ACPI disabled\n"); 1773 return -ENODEV; 1774 } 1775 1776 /* don't keep reloading if cpufreq_driver exists */ 1777 if (cpufreq_get_current_driver()) 1778 return -EEXIST; 1779 1780 quirks = NULL; 1781 1782 /* check if this machine need CPPC quirks */ 1783 dmi_check_system(amd_pstate_quirks_table); 1784 1785 /* 1786 * determine the driver mode from the command line or kernel config. 1787 * If no command line input is provided, cppc_state will be AMD_PSTATE_UNDEFINED. 1788 * command line options will override the kernel config settings. 1789 */ 1790 1791 if (cppc_state == AMD_PSTATE_UNDEFINED) { 1792 /* Disable on the following configs by default: 1793 * 1. Undefined platforms 1794 * 2. Server platforms with CPUs older than Family 0x1A. 1795 */ 1796 if (amd_pstate_acpi_pm_profile_undefined() || 1797 (amd_pstate_acpi_pm_profile_server() && boot_cpu_data.x86 < 0x1A)) { 1798 pr_info("driver load is disabled, boot with specific mode to enable this\n"); 1799 return -ENODEV; 1800 } 1801 /* get driver mode from kernel config option [1:4] */ 1802 cppc_state = CONFIG_X86_AMD_PSTATE_DEFAULT_MODE; 1803 } 1804 1805 if (cppc_state == AMD_PSTATE_DISABLE) { 1806 pr_info("driver load is disabled, boot with specific mode to enable this\n"); 1807 return -ENODEV; 1808 } 1809 1810 /* capability check */ 1811 if (cpu_feature_enabled(X86_FEATURE_CPPC)) { 1812 pr_debug("AMD CPPC MSR based functionality is supported\n"); 1813 } else { 1814 pr_debug("AMD CPPC shared memory based functionality is supported\n"); 1815 static_call_update(amd_pstate_cppc_enable, shmem_cppc_enable); 1816 static_call_update(amd_pstate_init_perf, shmem_init_perf); 1817 static_call_update(amd_pstate_update_perf, shmem_update_perf); 1818 static_call_update(amd_pstate_get_epp, shmem_get_epp); 1819 static_call_update(amd_pstate_set_epp, shmem_set_epp); 1820 } 1821 1822 if (amd_pstate_prefcore) { 1823 ret = amd_detect_prefcore(&amd_pstate_prefcore); 1824 if (ret) 1825 return ret; 1826 } 1827 1828 ret = amd_pstate_register_driver(cppc_state); 1829 if (ret) { 1830 pr_err("failed to register with return %d\n", ret); 1831 return ret; 1832 } 1833 1834 dev_root = bus_get_dev_root(&cpu_subsys); 1835 if (dev_root) { 1836 ret = sysfs_create_group(&dev_root->kobj, &amd_pstate_global_attr_group); 1837 put_device(dev_root); 1838 if (ret) { 1839 pr_err("sysfs attribute export failed with error %d.\n", ret); 1840 goto global_attr_free; 1841 } 1842 } 1843 1844 return ret; 1845 1846 global_attr_free: 1847 cpufreq_unregister_driver(current_pstate_driver); 1848 return ret; 1849 } 1850 device_initcall(amd_pstate_init); 1851 1852 static int __init amd_pstate_param(char *str) 1853 { 1854 size_t size; 1855 int mode_idx; 1856 1857 if (!str) 1858 return -EINVAL; 1859 1860 size = strlen(str); 1861 mode_idx = get_mode_idx_from_str(str, size); 1862 1863 return amd_pstate_set_driver(mode_idx); 1864 } 1865 1866 static int __init amd_prefcore_param(char *str) 1867 { 1868 if (!strcmp(str, "disable")) 1869 amd_pstate_prefcore = false; 1870 1871 return 0; 1872 } 1873 1874 early_param("amd_pstate", amd_pstate_param); 1875 early_param("amd_prefcore", amd_prefcore_param); 1876 1877 MODULE_AUTHOR("Huang Rui <ray.huang@amd.com>"); 1878 MODULE_DESCRIPTION("AMD Processor P-state Frequency Driver"); 1879