1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * amd-pstate.c - AMD Processor P-state Frequency Driver 4 * 5 * Copyright (C) 2021 Advanced Micro Devices, Inc. All Rights Reserved. 6 * 7 * Author: Huang Rui <ray.huang@amd.com> 8 * 9 * AMD P-State introduces a new CPU performance scaling design for AMD 10 * processors using the ACPI Collaborative Performance and Power Control (CPPC) 11 * feature which works with the AMD SMU firmware providing a finer grained 12 * frequency control range. It is to replace the legacy ACPI P-States control, 13 * allows a flexible, low-latency interface for the Linux kernel to directly 14 * communicate the performance hints to hardware. 15 * 16 * AMD P-State is supported on recent AMD Zen base CPU series include some of 17 * Zen2 and Zen3 processors. _CPC needs to be present in the ACPI tables of AMD 18 * P-State supported system. And there are two types of hardware implementations 19 * for AMD P-State: 1) Full MSR Solution and 2) Shared Memory Solution. 20 * X86_FEATURE_CPPC CPU feature flag is used to distinguish the different types. 21 */ 22 23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 24 25 #include <linux/kernel.h> 26 #include <linux/module.h> 27 #include <linux/init.h> 28 #include <linux/smp.h> 29 #include <linux/sched.h> 30 #include <linux/cpufreq.h> 31 #include <linux/compiler.h> 32 #include <linux/dmi.h> 33 #include <linux/slab.h> 34 #include <linux/acpi.h> 35 #include <linux/io.h> 36 #include <linux/delay.h> 37 #include <linux/uaccess.h> 38 #include <linux/static_call.h> 39 #include <linux/topology.h> 40 41 #include <acpi/processor.h> 42 #include <acpi/cppc_acpi.h> 43 44 #include <asm/msr.h> 45 #include <asm/processor.h> 46 #include <asm/cpufeature.h> 47 #include <asm/cpu_device_id.h> 48 49 #include "amd-pstate.h" 50 #include "amd-pstate-trace.h" 51 52 #define AMD_PSTATE_TRANSITION_LATENCY 20000 53 #define AMD_PSTATE_TRANSITION_DELAY 1000 54 #define AMD_PSTATE_FAST_CPPC_TRANSITION_DELAY 600 55 56 #define AMD_CPPC_EPP_PERFORMANCE 0x00 57 #define AMD_CPPC_EPP_BALANCE_PERFORMANCE 0x80 58 #define AMD_CPPC_EPP_BALANCE_POWERSAVE 0xBF 59 #define AMD_CPPC_EPP_POWERSAVE 0xFF 60 61 static const char * const amd_pstate_mode_string[] = { 62 [AMD_PSTATE_UNDEFINED] = "undefined", 63 [AMD_PSTATE_DISABLE] = "disable", 64 [AMD_PSTATE_PASSIVE] = "passive", 65 [AMD_PSTATE_ACTIVE] = "active", 66 [AMD_PSTATE_GUIDED] = "guided", 67 NULL, 68 }; 69 70 const char *amd_pstate_get_mode_string(enum amd_pstate_mode mode) 71 { 72 if (mode < 0 || mode >= AMD_PSTATE_MAX) 73 return NULL; 74 return amd_pstate_mode_string[mode]; 75 } 76 EXPORT_SYMBOL_GPL(amd_pstate_get_mode_string); 77 78 struct quirk_entry { 79 u32 nominal_freq; 80 u32 lowest_freq; 81 }; 82 83 static struct cpufreq_driver *current_pstate_driver; 84 static struct cpufreq_driver amd_pstate_driver; 85 static struct cpufreq_driver amd_pstate_epp_driver; 86 static int cppc_state = AMD_PSTATE_UNDEFINED; 87 static bool cppc_enabled; 88 static bool amd_pstate_prefcore = true; 89 static struct quirk_entry *quirks; 90 91 /* 92 * AMD Energy Preference Performance (EPP) 93 * The EPP is used in the CCLK DPM controller to drive 94 * the frequency that a core is going to operate during 95 * short periods of activity. EPP values will be utilized for 96 * different OS profiles (balanced, performance, power savings) 97 * display strings corresponding to EPP index in the 98 * energy_perf_strings[] 99 * index String 100 *------------------------------------- 101 * 0 default 102 * 1 performance 103 * 2 balance_performance 104 * 3 balance_power 105 * 4 power 106 */ 107 enum energy_perf_value_index { 108 EPP_INDEX_DEFAULT = 0, 109 EPP_INDEX_PERFORMANCE, 110 EPP_INDEX_BALANCE_PERFORMANCE, 111 EPP_INDEX_BALANCE_POWERSAVE, 112 EPP_INDEX_POWERSAVE, 113 }; 114 115 static const char * const energy_perf_strings[] = { 116 [EPP_INDEX_DEFAULT] = "default", 117 [EPP_INDEX_PERFORMANCE] = "performance", 118 [EPP_INDEX_BALANCE_PERFORMANCE] = "balance_performance", 119 [EPP_INDEX_BALANCE_POWERSAVE] = "balance_power", 120 [EPP_INDEX_POWERSAVE] = "power", 121 NULL 122 }; 123 124 static unsigned int epp_values[] = { 125 [EPP_INDEX_DEFAULT] = 0, 126 [EPP_INDEX_PERFORMANCE] = AMD_CPPC_EPP_PERFORMANCE, 127 [EPP_INDEX_BALANCE_PERFORMANCE] = AMD_CPPC_EPP_BALANCE_PERFORMANCE, 128 [EPP_INDEX_BALANCE_POWERSAVE] = AMD_CPPC_EPP_BALANCE_POWERSAVE, 129 [EPP_INDEX_POWERSAVE] = AMD_CPPC_EPP_POWERSAVE, 130 }; 131 132 typedef int (*cppc_mode_transition_fn)(int); 133 134 static struct quirk_entry quirk_amd_7k62 = { 135 .nominal_freq = 2600, 136 .lowest_freq = 550, 137 }; 138 139 static int __init dmi_matched_7k62_bios_bug(const struct dmi_system_id *dmi) 140 { 141 /** 142 * match the broken bios for family 17h processor support CPPC V2 143 * broken BIOS lack of nominal_freq and lowest_freq capabilities 144 * definition in ACPI tables 145 */ 146 if (cpu_feature_enabled(X86_FEATURE_ZEN2)) { 147 quirks = dmi->driver_data; 148 pr_info("Overriding nominal and lowest frequencies for %s\n", dmi->ident); 149 return 1; 150 } 151 152 return 0; 153 } 154 155 static const struct dmi_system_id amd_pstate_quirks_table[] __initconst = { 156 { 157 .callback = dmi_matched_7k62_bios_bug, 158 .ident = "AMD EPYC 7K62", 159 .matches = { 160 DMI_MATCH(DMI_BIOS_VERSION, "5.14"), 161 DMI_MATCH(DMI_BIOS_RELEASE, "12/12/2019"), 162 }, 163 .driver_data = &quirk_amd_7k62, 164 }, 165 {} 166 }; 167 MODULE_DEVICE_TABLE(dmi, amd_pstate_quirks_table); 168 169 static inline int get_mode_idx_from_str(const char *str, size_t size) 170 { 171 int i; 172 173 for (i=0; i < AMD_PSTATE_MAX; i++) { 174 if (!strncmp(str, amd_pstate_mode_string[i], size)) 175 return i; 176 } 177 return -EINVAL; 178 } 179 180 static DEFINE_MUTEX(amd_pstate_limits_lock); 181 static DEFINE_MUTEX(amd_pstate_driver_lock); 182 183 static s16 amd_pstate_get_epp(struct amd_cpudata *cpudata, u64 cppc_req_cached) 184 { 185 u64 epp; 186 int ret; 187 188 if (cpu_feature_enabled(X86_FEATURE_CPPC)) { 189 if (!cppc_req_cached) { 190 epp = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, 191 &cppc_req_cached); 192 if (epp) 193 return epp; 194 } 195 epp = (cppc_req_cached >> 24) & 0xFF; 196 } else { 197 ret = cppc_get_epp_perf(cpudata->cpu, &epp); 198 if (ret < 0) { 199 pr_debug("Could not retrieve energy perf value (%d)\n", ret); 200 return -EIO; 201 } 202 } 203 204 return (s16)(epp & 0xff); 205 } 206 207 static int amd_pstate_get_energy_pref_index(struct amd_cpudata *cpudata) 208 { 209 s16 epp; 210 int index = -EINVAL; 211 212 epp = amd_pstate_get_epp(cpudata, 0); 213 if (epp < 0) 214 return epp; 215 216 switch (epp) { 217 case AMD_CPPC_EPP_PERFORMANCE: 218 index = EPP_INDEX_PERFORMANCE; 219 break; 220 case AMD_CPPC_EPP_BALANCE_PERFORMANCE: 221 index = EPP_INDEX_BALANCE_PERFORMANCE; 222 break; 223 case AMD_CPPC_EPP_BALANCE_POWERSAVE: 224 index = EPP_INDEX_BALANCE_POWERSAVE; 225 break; 226 case AMD_CPPC_EPP_POWERSAVE: 227 index = EPP_INDEX_POWERSAVE; 228 break; 229 default: 230 break; 231 } 232 233 return index; 234 } 235 236 static void pstate_update_perf(struct amd_cpudata *cpudata, u32 min_perf, 237 u32 des_perf, u32 max_perf, bool fast_switch) 238 { 239 if (fast_switch) 240 wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached)); 241 else 242 wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, 243 READ_ONCE(cpudata->cppc_req_cached)); 244 } 245 246 DEFINE_STATIC_CALL(amd_pstate_update_perf, pstate_update_perf); 247 248 static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata, 249 u32 min_perf, u32 des_perf, 250 u32 max_perf, bool fast_switch) 251 { 252 static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf, 253 max_perf, fast_switch); 254 } 255 256 static int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp) 257 { 258 int ret; 259 struct cppc_perf_ctrls perf_ctrls; 260 261 if (cpu_feature_enabled(X86_FEATURE_CPPC)) { 262 u64 value = READ_ONCE(cpudata->cppc_req_cached); 263 264 value &= ~GENMASK_ULL(31, 24); 265 value |= (u64)epp << 24; 266 WRITE_ONCE(cpudata->cppc_req_cached, value); 267 268 ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value); 269 if (!ret) 270 cpudata->epp_cached = epp; 271 } else { 272 amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U, 273 cpudata->max_limit_perf, false); 274 275 perf_ctrls.energy_perf = epp; 276 ret = cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1); 277 if (ret) { 278 pr_debug("failed to set energy perf value (%d)\n", ret); 279 return ret; 280 } 281 cpudata->epp_cached = epp; 282 } 283 284 return ret; 285 } 286 287 static int amd_pstate_set_energy_pref_index(struct amd_cpudata *cpudata, 288 int pref_index) 289 { 290 int epp = -EINVAL; 291 int ret; 292 293 if (!pref_index) 294 epp = cpudata->epp_default; 295 296 if (epp == -EINVAL) 297 epp = epp_values[pref_index]; 298 299 if (epp > 0 && cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) { 300 pr_debug("EPP cannot be set under performance policy\n"); 301 return -EBUSY; 302 } 303 304 ret = amd_pstate_set_epp(cpudata, epp); 305 306 return ret; 307 } 308 309 static inline int pstate_enable(bool enable) 310 { 311 int ret, cpu; 312 unsigned long logical_proc_id_mask = 0; 313 314 if (enable == cppc_enabled) 315 return 0; 316 317 for_each_present_cpu(cpu) { 318 unsigned long logical_id = topology_logical_package_id(cpu); 319 320 if (test_bit(logical_id, &logical_proc_id_mask)) 321 continue; 322 323 set_bit(logical_id, &logical_proc_id_mask); 324 325 ret = wrmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_ENABLE, 326 enable); 327 if (ret) 328 return ret; 329 } 330 331 cppc_enabled = enable; 332 return 0; 333 } 334 335 static int cppc_enable(bool enable) 336 { 337 int cpu, ret = 0; 338 struct cppc_perf_ctrls perf_ctrls; 339 340 if (enable == cppc_enabled) 341 return 0; 342 343 for_each_present_cpu(cpu) { 344 ret = cppc_set_enable(cpu, enable); 345 if (ret) 346 return ret; 347 348 /* Enable autonomous mode for EPP */ 349 if (cppc_state == AMD_PSTATE_ACTIVE) { 350 /* Set desired perf as zero to allow EPP firmware control */ 351 perf_ctrls.desired_perf = 0; 352 ret = cppc_set_perf(cpu, &perf_ctrls); 353 if (ret) 354 return ret; 355 } 356 } 357 358 cppc_enabled = enable; 359 return ret; 360 } 361 362 DEFINE_STATIC_CALL(amd_pstate_enable, pstate_enable); 363 364 static inline int amd_pstate_enable(bool enable) 365 { 366 return static_call(amd_pstate_enable)(enable); 367 } 368 369 static int pstate_init_perf(struct amd_cpudata *cpudata) 370 { 371 u64 cap1; 372 373 int ret = rdmsrl_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1, 374 &cap1); 375 if (ret) 376 return ret; 377 378 WRITE_ONCE(cpudata->highest_perf, AMD_CPPC_HIGHEST_PERF(cap1)); 379 WRITE_ONCE(cpudata->max_limit_perf, AMD_CPPC_HIGHEST_PERF(cap1)); 380 WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1)); 381 WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1)); 382 WRITE_ONCE(cpudata->lowest_perf, AMD_CPPC_LOWEST_PERF(cap1)); 383 WRITE_ONCE(cpudata->prefcore_ranking, AMD_CPPC_HIGHEST_PERF(cap1)); 384 WRITE_ONCE(cpudata->min_limit_perf, AMD_CPPC_LOWEST_PERF(cap1)); 385 return 0; 386 } 387 388 static int cppc_init_perf(struct amd_cpudata *cpudata) 389 { 390 struct cppc_perf_caps cppc_perf; 391 392 int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf); 393 if (ret) 394 return ret; 395 396 WRITE_ONCE(cpudata->highest_perf, cppc_perf.highest_perf); 397 WRITE_ONCE(cpudata->max_limit_perf, cppc_perf.highest_perf); 398 WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf); 399 WRITE_ONCE(cpudata->lowest_nonlinear_perf, 400 cppc_perf.lowest_nonlinear_perf); 401 WRITE_ONCE(cpudata->lowest_perf, cppc_perf.lowest_perf); 402 WRITE_ONCE(cpudata->prefcore_ranking, cppc_perf.highest_perf); 403 WRITE_ONCE(cpudata->min_limit_perf, cppc_perf.lowest_perf); 404 405 if (cppc_state == AMD_PSTATE_ACTIVE) 406 return 0; 407 408 ret = cppc_get_auto_sel_caps(cpudata->cpu, &cppc_perf); 409 if (ret) { 410 pr_warn("failed to get auto_sel, ret: %d\n", ret); 411 return 0; 412 } 413 414 ret = cppc_set_auto_sel(cpudata->cpu, 415 (cppc_state == AMD_PSTATE_PASSIVE) ? 0 : 1); 416 417 if (ret) 418 pr_warn("failed to set auto_sel, ret: %d\n", ret); 419 420 return ret; 421 } 422 423 DEFINE_STATIC_CALL(amd_pstate_init_perf, pstate_init_perf); 424 425 static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata) 426 { 427 return static_call(amd_pstate_init_perf)(cpudata); 428 } 429 430 static void cppc_update_perf(struct amd_cpudata *cpudata, 431 u32 min_perf, u32 des_perf, 432 u32 max_perf, bool fast_switch) 433 { 434 struct cppc_perf_ctrls perf_ctrls; 435 436 perf_ctrls.max_perf = max_perf; 437 perf_ctrls.min_perf = min_perf; 438 perf_ctrls.desired_perf = des_perf; 439 440 cppc_set_perf(cpudata->cpu, &perf_ctrls); 441 } 442 443 static inline bool amd_pstate_sample(struct amd_cpudata *cpudata) 444 { 445 u64 aperf, mperf, tsc; 446 unsigned long flags; 447 448 local_irq_save(flags); 449 rdmsrl(MSR_IA32_APERF, aperf); 450 rdmsrl(MSR_IA32_MPERF, mperf); 451 tsc = rdtsc(); 452 453 if (cpudata->prev.mperf == mperf || cpudata->prev.tsc == tsc) { 454 local_irq_restore(flags); 455 return false; 456 } 457 458 local_irq_restore(flags); 459 460 cpudata->cur.aperf = aperf; 461 cpudata->cur.mperf = mperf; 462 cpudata->cur.tsc = tsc; 463 cpudata->cur.aperf -= cpudata->prev.aperf; 464 cpudata->cur.mperf -= cpudata->prev.mperf; 465 cpudata->cur.tsc -= cpudata->prev.tsc; 466 467 cpudata->prev.aperf = aperf; 468 cpudata->prev.mperf = mperf; 469 cpudata->prev.tsc = tsc; 470 471 cpudata->freq = div64_u64((cpudata->cur.aperf * cpu_khz), cpudata->cur.mperf); 472 473 return true; 474 } 475 476 static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf, 477 u32 des_perf, u32 max_perf, bool fast_switch, int gov_flags) 478 { 479 unsigned long max_freq; 480 struct cpufreq_policy *policy = cpufreq_cpu_get(cpudata->cpu); 481 u64 prev = READ_ONCE(cpudata->cppc_req_cached); 482 u32 nominal_perf = READ_ONCE(cpudata->nominal_perf); 483 u64 value = prev; 484 485 min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf, 486 cpudata->max_limit_perf); 487 max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf, 488 cpudata->max_limit_perf); 489 des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf); 490 491 max_freq = READ_ONCE(cpudata->max_limit_freq); 492 policy->cur = div_u64(des_perf * max_freq, max_perf); 493 494 if ((cppc_state == AMD_PSTATE_GUIDED) && (gov_flags & CPUFREQ_GOV_DYNAMIC_SWITCHING)) { 495 min_perf = des_perf; 496 des_perf = 0; 497 } 498 499 value &= ~AMD_CPPC_MIN_PERF(~0L); 500 value |= AMD_CPPC_MIN_PERF(min_perf); 501 502 value &= ~AMD_CPPC_DES_PERF(~0L); 503 value |= AMD_CPPC_DES_PERF(des_perf); 504 505 /* limit the max perf when core performance boost feature is disabled */ 506 if (!cpudata->boost_supported) 507 max_perf = min_t(unsigned long, nominal_perf, max_perf); 508 509 value &= ~AMD_CPPC_MAX_PERF(~0L); 510 value |= AMD_CPPC_MAX_PERF(max_perf); 511 512 if (trace_amd_pstate_perf_enabled() && amd_pstate_sample(cpudata)) { 513 trace_amd_pstate_perf(min_perf, des_perf, max_perf, cpudata->freq, 514 cpudata->cur.mperf, cpudata->cur.aperf, cpudata->cur.tsc, 515 cpudata->cpu, (value != prev), fast_switch); 516 } 517 518 if (value == prev) 519 goto cpufreq_policy_put; 520 521 WRITE_ONCE(cpudata->cppc_req_cached, value); 522 523 amd_pstate_update_perf(cpudata, min_perf, des_perf, 524 max_perf, fast_switch); 525 526 cpufreq_policy_put: 527 cpufreq_cpu_put(policy); 528 } 529 530 static int amd_pstate_verify(struct cpufreq_policy_data *policy) 531 { 532 cpufreq_verify_within_cpu_limits(policy); 533 534 return 0; 535 } 536 537 static int amd_pstate_update_min_max_limit(struct cpufreq_policy *policy) 538 { 539 u32 max_limit_perf, min_limit_perf, lowest_perf, max_perf; 540 struct amd_cpudata *cpudata = policy->driver_data; 541 542 if (cpudata->boost_supported && !policy->boost_enabled) 543 max_perf = READ_ONCE(cpudata->nominal_perf); 544 else 545 max_perf = READ_ONCE(cpudata->highest_perf); 546 547 max_limit_perf = div_u64(policy->max * max_perf, policy->cpuinfo.max_freq); 548 min_limit_perf = div_u64(policy->min * max_perf, policy->cpuinfo.max_freq); 549 550 lowest_perf = READ_ONCE(cpudata->lowest_perf); 551 if (min_limit_perf < lowest_perf) 552 min_limit_perf = lowest_perf; 553 554 if (max_limit_perf < min_limit_perf) 555 max_limit_perf = min_limit_perf; 556 557 WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf); 558 WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf); 559 WRITE_ONCE(cpudata->max_limit_freq, policy->max); 560 WRITE_ONCE(cpudata->min_limit_freq, policy->min); 561 562 return 0; 563 } 564 565 static int amd_pstate_update_freq(struct cpufreq_policy *policy, 566 unsigned int target_freq, bool fast_switch) 567 { 568 struct cpufreq_freqs freqs; 569 struct amd_cpudata *cpudata = policy->driver_data; 570 unsigned long max_perf, min_perf, des_perf, cap_perf; 571 572 if (!cpudata->max_freq) 573 return -ENODEV; 574 575 if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq) 576 amd_pstate_update_min_max_limit(policy); 577 578 cap_perf = READ_ONCE(cpudata->highest_perf); 579 min_perf = READ_ONCE(cpudata->lowest_perf); 580 max_perf = cap_perf; 581 582 freqs.old = policy->cur; 583 freqs.new = target_freq; 584 585 des_perf = DIV_ROUND_CLOSEST(target_freq * cap_perf, 586 cpudata->max_freq); 587 588 WARN_ON(fast_switch && !policy->fast_switch_enabled); 589 /* 590 * If fast_switch is desired, then there aren't any registered 591 * transition notifiers. See comment for 592 * cpufreq_enable_fast_switch(). 593 */ 594 if (!fast_switch) 595 cpufreq_freq_transition_begin(policy, &freqs); 596 597 amd_pstate_update(cpudata, min_perf, des_perf, 598 max_perf, fast_switch, policy->governor->flags); 599 600 if (!fast_switch) 601 cpufreq_freq_transition_end(policy, &freqs, false); 602 603 return 0; 604 } 605 606 static int amd_pstate_target(struct cpufreq_policy *policy, 607 unsigned int target_freq, 608 unsigned int relation) 609 { 610 return amd_pstate_update_freq(policy, target_freq, false); 611 } 612 613 static unsigned int amd_pstate_fast_switch(struct cpufreq_policy *policy, 614 unsigned int target_freq) 615 { 616 if (!amd_pstate_update_freq(policy, target_freq, true)) 617 return target_freq; 618 return policy->cur; 619 } 620 621 static void amd_pstate_adjust_perf(unsigned int cpu, 622 unsigned long _min_perf, 623 unsigned long target_perf, 624 unsigned long capacity) 625 { 626 unsigned long max_perf, min_perf, des_perf, 627 cap_perf, lowest_nonlinear_perf; 628 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 629 struct amd_cpudata *cpudata; 630 631 if (!policy) 632 return; 633 634 cpudata = policy->driver_data; 635 636 if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq) 637 amd_pstate_update_min_max_limit(policy); 638 639 640 cap_perf = READ_ONCE(cpudata->highest_perf); 641 lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf); 642 643 des_perf = cap_perf; 644 if (target_perf < capacity) 645 des_perf = DIV_ROUND_UP(cap_perf * target_perf, capacity); 646 647 min_perf = READ_ONCE(cpudata->lowest_perf); 648 if (_min_perf < capacity) 649 min_perf = DIV_ROUND_UP(cap_perf * _min_perf, capacity); 650 651 if (min_perf < lowest_nonlinear_perf) 652 min_perf = lowest_nonlinear_perf; 653 654 max_perf = cap_perf; 655 if (max_perf < min_perf) 656 max_perf = min_perf; 657 658 des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf); 659 660 amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true, 661 policy->governor->flags); 662 cpufreq_cpu_put(policy); 663 } 664 665 static int amd_pstate_cpu_boost_update(struct cpufreq_policy *policy, bool on) 666 { 667 struct amd_cpudata *cpudata = policy->driver_data; 668 struct cppc_perf_ctrls perf_ctrls; 669 u32 highest_perf, nominal_perf, nominal_freq, max_freq; 670 int ret = 0; 671 672 highest_perf = READ_ONCE(cpudata->highest_perf); 673 nominal_perf = READ_ONCE(cpudata->nominal_perf); 674 nominal_freq = READ_ONCE(cpudata->nominal_freq); 675 max_freq = READ_ONCE(cpudata->max_freq); 676 677 if (boot_cpu_has(X86_FEATURE_CPPC)) { 678 u64 value = READ_ONCE(cpudata->cppc_req_cached); 679 680 value &= ~GENMASK_ULL(7, 0); 681 value |= on ? highest_perf : nominal_perf; 682 WRITE_ONCE(cpudata->cppc_req_cached, value); 683 684 wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value); 685 } else { 686 perf_ctrls.max_perf = on ? highest_perf : nominal_perf; 687 ret = cppc_set_perf(cpudata->cpu, &perf_ctrls); 688 if (ret) { 689 cpufreq_cpu_release(policy); 690 pr_debug("Failed to set max perf on CPU:%d. ret:%d\n", 691 cpudata->cpu, ret); 692 return ret; 693 } 694 } 695 696 if (on) 697 policy->cpuinfo.max_freq = max_freq; 698 else if (policy->cpuinfo.max_freq > nominal_freq * 1000) 699 policy->cpuinfo.max_freq = nominal_freq * 1000; 700 701 policy->max = policy->cpuinfo.max_freq; 702 703 if (cppc_state == AMD_PSTATE_PASSIVE) { 704 ret = freq_qos_update_request(&cpudata->req[1], policy->cpuinfo.max_freq); 705 if (ret < 0) 706 pr_debug("Failed to update freq constraint: CPU%d\n", cpudata->cpu); 707 } 708 709 return ret < 0 ? ret : 0; 710 } 711 712 static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state) 713 { 714 struct amd_cpudata *cpudata = policy->driver_data; 715 int ret; 716 717 if (!cpudata->boost_supported) { 718 pr_err("Boost mode is not supported by this processor or SBIOS\n"); 719 return -EOPNOTSUPP; 720 } 721 mutex_lock(&amd_pstate_driver_lock); 722 ret = amd_pstate_cpu_boost_update(policy, state); 723 WRITE_ONCE(cpudata->boost_state, !ret ? state : false); 724 policy->boost_enabled = !ret ? state : false; 725 refresh_frequency_limits(policy); 726 mutex_unlock(&amd_pstate_driver_lock); 727 728 return ret; 729 } 730 731 static int amd_pstate_init_boost_support(struct amd_cpudata *cpudata) 732 { 733 u64 boost_val; 734 int ret = -1; 735 736 /* 737 * If platform has no CPB support or disable it, initialize current driver 738 * boost_enabled state to be false, it is not an error for cpufreq core to handle. 739 */ 740 if (!cpu_feature_enabled(X86_FEATURE_CPB)) { 741 pr_debug_once("Boost CPB capabilities not present in the processor\n"); 742 ret = 0; 743 goto exit_err; 744 } 745 746 /* at least one CPU supports CPB, even if others fail later on to set up */ 747 current_pstate_driver->boost_enabled = true; 748 749 ret = rdmsrl_on_cpu(cpudata->cpu, MSR_K7_HWCR, &boost_val); 750 if (ret) { 751 pr_err_once("failed to read initial CPU boost state!\n"); 752 ret = -EIO; 753 goto exit_err; 754 } 755 756 if (!(boost_val & MSR_K7_HWCR_CPB_DIS)) 757 cpudata->boost_supported = true; 758 759 return 0; 760 761 exit_err: 762 cpudata->boost_supported = false; 763 return ret; 764 } 765 766 static void amd_perf_ctl_reset(unsigned int cpu) 767 { 768 wrmsrl_on_cpu(cpu, MSR_AMD_PERF_CTL, 0); 769 } 770 771 /* 772 * Set amd-pstate preferred core enable can't be done directly from cpufreq callbacks 773 * due to locking, so queue the work for later. 774 */ 775 static void amd_pstste_sched_prefcore_workfn(struct work_struct *work) 776 { 777 sched_set_itmt_support(); 778 } 779 static DECLARE_WORK(sched_prefcore_work, amd_pstste_sched_prefcore_workfn); 780 781 #define CPPC_MAX_PERF U8_MAX 782 783 static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata) 784 { 785 /* user disabled or not detected */ 786 if (!amd_pstate_prefcore) 787 return; 788 789 cpudata->hw_prefcore = true; 790 791 /* 792 * The priorities can be set regardless of whether or not 793 * sched_set_itmt_support(true) has been called and it is valid to 794 * update them at any time after it has been called. 795 */ 796 sched_set_itmt_core_prio((int)READ_ONCE(cpudata->highest_perf), cpudata->cpu); 797 798 schedule_work(&sched_prefcore_work); 799 } 800 801 static void amd_pstate_update_limits(unsigned int cpu) 802 { 803 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 804 struct amd_cpudata *cpudata; 805 u32 prev_high = 0, cur_high = 0; 806 int ret; 807 bool highest_perf_changed = false; 808 809 if (!policy) 810 return; 811 812 cpudata = policy->driver_data; 813 814 if (!amd_pstate_prefcore) 815 return; 816 817 mutex_lock(&amd_pstate_driver_lock); 818 ret = amd_get_highest_perf(cpu, &cur_high); 819 if (ret) 820 goto free_cpufreq_put; 821 822 prev_high = READ_ONCE(cpudata->prefcore_ranking); 823 highest_perf_changed = (prev_high != cur_high); 824 if (highest_perf_changed) { 825 WRITE_ONCE(cpudata->prefcore_ranking, cur_high); 826 827 if (cur_high < CPPC_MAX_PERF) 828 sched_set_itmt_core_prio((int)cur_high, cpu); 829 } 830 831 free_cpufreq_put: 832 cpufreq_cpu_put(policy); 833 834 if (!highest_perf_changed) 835 cpufreq_update_policy(cpu); 836 837 mutex_unlock(&amd_pstate_driver_lock); 838 } 839 840 /* 841 * Get pstate transition delay time from ACPI tables that firmware set 842 * instead of using hardcode value directly. 843 */ 844 static u32 amd_pstate_get_transition_delay_us(unsigned int cpu) 845 { 846 u32 transition_delay_ns; 847 848 transition_delay_ns = cppc_get_transition_latency(cpu); 849 if (transition_delay_ns == CPUFREQ_ETERNAL) { 850 if (cpu_feature_enabled(X86_FEATURE_FAST_CPPC)) 851 return AMD_PSTATE_FAST_CPPC_TRANSITION_DELAY; 852 else 853 return AMD_PSTATE_TRANSITION_DELAY; 854 } 855 856 return transition_delay_ns / NSEC_PER_USEC; 857 } 858 859 /* 860 * Get pstate transition latency value from ACPI tables that firmware 861 * set instead of using hardcode value directly. 862 */ 863 static u32 amd_pstate_get_transition_latency(unsigned int cpu) 864 { 865 u32 transition_latency; 866 867 transition_latency = cppc_get_transition_latency(cpu); 868 if (transition_latency == CPUFREQ_ETERNAL) 869 return AMD_PSTATE_TRANSITION_LATENCY; 870 871 return transition_latency; 872 } 873 874 /* 875 * amd_pstate_init_freq: Initialize the max_freq, min_freq, 876 * nominal_freq and lowest_nonlinear_freq for 877 * the @cpudata object. 878 * 879 * Requires: highest_perf, lowest_perf, nominal_perf and 880 * lowest_nonlinear_perf members of @cpudata to be 881 * initialized. 882 * 883 * Returns 0 on success, non-zero value on failure. 884 */ 885 static int amd_pstate_init_freq(struct amd_cpudata *cpudata) 886 { 887 int ret; 888 u32 min_freq, max_freq; 889 u64 numerator; 890 u32 nominal_perf, nominal_freq; 891 u32 lowest_nonlinear_perf, lowest_nonlinear_freq; 892 u32 boost_ratio, lowest_nonlinear_ratio; 893 struct cppc_perf_caps cppc_perf; 894 895 ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf); 896 if (ret) 897 return ret; 898 899 if (quirks && quirks->lowest_freq) 900 min_freq = quirks->lowest_freq * 1000; 901 else 902 min_freq = cppc_perf.lowest_freq * 1000; 903 904 if (quirks && quirks->nominal_freq) 905 nominal_freq = quirks->nominal_freq ; 906 else 907 nominal_freq = cppc_perf.nominal_freq; 908 909 nominal_perf = READ_ONCE(cpudata->nominal_perf); 910 911 ret = amd_get_boost_ratio_numerator(cpudata->cpu, &numerator); 912 if (ret) 913 return ret; 914 boost_ratio = div_u64(numerator << SCHED_CAPACITY_SHIFT, nominal_perf); 915 max_freq = (nominal_freq * boost_ratio >> SCHED_CAPACITY_SHIFT) * 1000; 916 917 lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf); 918 lowest_nonlinear_ratio = div_u64(lowest_nonlinear_perf << SCHED_CAPACITY_SHIFT, 919 nominal_perf); 920 lowest_nonlinear_freq = (nominal_freq * lowest_nonlinear_ratio >> SCHED_CAPACITY_SHIFT) * 1000; 921 922 WRITE_ONCE(cpudata->min_freq, min_freq); 923 WRITE_ONCE(cpudata->lowest_nonlinear_freq, lowest_nonlinear_freq); 924 WRITE_ONCE(cpudata->nominal_freq, nominal_freq); 925 WRITE_ONCE(cpudata->max_freq, max_freq); 926 927 /** 928 * Below values need to be initialized correctly, otherwise driver will fail to load 929 * max_freq is calculated according to (nominal_freq * highest_perf)/nominal_perf 930 * lowest_nonlinear_freq is a value between [min_freq, nominal_freq] 931 * Check _CPC in ACPI table objects if any values are incorrect 932 */ 933 if (min_freq <= 0 || max_freq <= 0 || nominal_freq <= 0 || min_freq > max_freq) { 934 pr_err("min_freq(%d) or max_freq(%d) or nominal_freq(%d) value is incorrect\n", 935 min_freq, max_freq, nominal_freq * 1000); 936 return -EINVAL; 937 } 938 939 if (lowest_nonlinear_freq <= min_freq || lowest_nonlinear_freq > nominal_freq * 1000) { 940 pr_err("lowest_nonlinear_freq(%d) value is out of range [min_freq(%d), nominal_freq(%d)]\n", 941 lowest_nonlinear_freq, min_freq, nominal_freq * 1000); 942 return -EINVAL; 943 } 944 945 return 0; 946 } 947 948 static int amd_pstate_cpu_init(struct cpufreq_policy *policy) 949 { 950 int min_freq, max_freq, ret; 951 struct device *dev; 952 struct amd_cpudata *cpudata; 953 954 /* 955 * Resetting PERF_CTL_MSR will put the CPU in P0 frequency, 956 * which is ideal for initialization process. 957 */ 958 amd_perf_ctl_reset(policy->cpu); 959 dev = get_cpu_device(policy->cpu); 960 if (!dev) 961 return -ENODEV; 962 963 cpudata = kzalloc(sizeof(*cpudata), GFP_KERNEL); 964 if (!cpudata) 965 return -ENOMEM; 966 967 cpudata->cpu = policy->cpu; 968 969 ret = amd_pstate_init_perf(cpudata); 970 if (ret) 971 goto free_cpudata1; 972 973 amd_pstate_init_prefcore(cpudata); 974 975 ret = amd_pstate_init_freq(cpudata); 976 if (ret) 977 goto free_cpudata1; 978 979 ret = amd_pstate_init_boost_support(cpudata); 980 if (ret) 981 goto free_cpudata1; 982 983 min_freq = READ_ONCE(cpudata->min_freq); 984 max_freq = READ_ONCE(cpudata->max_freq); 985 986 policy->cpuinfo.transition_latency = amd_pstate_get_transition_latency(policy->cpu); 987 policy->transition_delay_us = amd_pstate_get_transition_delay_us(policy->cpu); 988 989 policy->min = min_freq; 990 policy->max = max_freq; 991 992 policy->cpuinfo.min_freq = min_freq; 993 policy->cpuinfo.max_freq = max_freq; 994 995 policy->boost_enabled = READ_ONCE(cpudata->boost_supported); 996 997 /* It will be updated by governor */ 998 policy->cur = policy->cpuinfo.min_freq; 999 1000 if (cpu_feature_enabled(X86_FEATURE_CPPC)) 1001 policy->fast_switch_possible = true; 1002 1003 ret = freq_qos_add_request(&policy->constraints, &cpudata->req[0], 1004 FREQ_QOS_MIN, policy->cpuinfo.min_freq); 1005 if (ret < 0) { 1006 dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret); 1007 goto free_cpudata1; 1008 } 1009 1010 ret = freq_qos_add_request(&policy->constraints, &cpudata->req[1], 1011 FREQ_QOS_MAX, policy->cpuinfo.max_freq); 1012 if (ret < 0) { 1013 dev_err(dev, "Failed to add max-freq constraint (%d)\n", ret); 1014 goto free_cpudata2; 1015 } 1016 1017 cpudata->max_limit_freq = max_freq; 1018 cpudata->min_limit_freq = min_freq; 1019 1020 policy->driver_data = cpudata; 1021 1022 if (!current_pstate_driver->adjust_perf) 1023 current_pstate_driver->adjust_perf = amd_pstate_adjust_perf; 1024 1025 return 0; 1026 1027 free_cpudata2: 1028 freq_qos_remove_request(&cpudata->req[0]); 1029 free_cpudata1: 1030 kfree(cpudata); 1031 return ret; 1032 } 1033 1034 static void amd_pstate_cpu_exit(struct cpufreq_policy *policy) 1035 { 1036 struct amd_cpudata *cpudata = policy->driver_data; 1037 1038 freq_qos_remove_request(&cpudata->req[1]); 1039 freq_qos_remove_request(&cpudata->req[0]); 1040 policy->fast_switch_possible = false; 1041 kfree(cpudata); 1042 } 1043 1044 static int amd_pstate_cpu_resume(struct cpufreq_policy *policy) 1045 { 1046 int ret; 1047 1048 ret = amd_pstate_enable(true); 1049 if (ret) 1050 pr_err("failed to enable amd-pstate during resume, return %d\n", ret); 1051 1052 return ret; 1053 } 1054 1055 static int amd_pstate_cpu_suspend(struct cpufreq_policy *policy) 1056 { 1057 int ret; 1058 1059 ret = amd_pstate_enable(false); 1060 if (ret) 1061 pr_err("failed to disable amd-pstate during suspend, return %d\n", ret); 1062 1063 return ret; 1064 } 1065 1066 /* Sysfs attributes */ 1067 1068 /* 1069 * This frequency is to indicate the maximum hardware frequency. 1070 * If boost is not active but supported, the frequency will be larger than the 1071 * one in cpuinfo. 1072 */ 1073 static ssize_t show_amd_pstate_max_freq(struct cpufreq_policy *policy, 1074 char *buf) 1075 { 1076 int max_freq; 1077 struct amd_cpudata *cpudata = policy->driver_data; 1078 1079 max_freq = READ_ONCE(cpudata->max_freq); 1080 if (max_freq < 0) 1081 return max_freq; 1082 1083 return sysfs_emit(buf, "%u\n", max_freq); 1084 } 1085 1086 static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *policy, 1087 char *buf) 1088 { 1089 int freq; 1090 struct amd_cpudata *cpudata = policy->driver_data; 1091 1092 freq = READ_ONCE(cpudata->lowest_nonlinear_freq); 1093 if (freq < 0) 1094 return freq; 1095 1096 return sysfs_emit(buf, "%u\n", freq); 1097 } 1098 1099 /* 1100 * In some of ASICs, the highest_perf is not the one in the _CPC table, so we 1101 * need to expose it to sysfs. 1102 */ 1103 static ssize_t show_amd_pstate_highest_perf(struct cpufreq_policy *policy, 1104 char *buf) 1105 { 1106 u32 perf; 1107 struct amd_cpudata *cpudata = policy->driver_data; 1108 1109 perf = READ_ONCE(cpudata->highest_perf); 1110 1111 return sysfs_emit(buf, "%u\n", perf); 1112 } 1113 1114 static ssize_t show_amd_pstate_prefcore_ranking(struct cpufreq_policy *policy, 1115 char *buf) 1116 { 1117 u32 perf; 1118 struct amd_cpudata *cpudata = policy->driver_data; 1119 1120 perf = READ_ONCE(cpudata->prefcore_ranking); 1121 1122 return sysfs_emit(buf, "%u\n", perf); 1123 } 1124 1125 static ssize_t show_amd_pstate_hw_prefcore(struct cpufreq_policy *policy, 1126 char *buf) 1127 { 1128 bool hw_prefcore; 1129 struct amd_cpudata *cpudata = policy->driver_data; 1130 1131 hw_prefcore = READ_ONCE(cpudata->hw_prefcore); 1132 1133 return sysfs_emit(buf, "%s\n", str_enabled_disabled(hw_prefcore)); 1134 } 1135 1136 static ssize_t show_energy_performance_available_preferences( 1137 struct cpufreq_policy *policy, char *buf) 1138 { 1139 int i = 0; 1140 int offset = 0; 1141 struct amd_cpudata *cpudata = policy->driver_data; 1142 1143 if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) 1144 return sysfs_emit_at(buf, offset, "%s\n", 1145 energy_perf_strings[EPP_INDEX_PERFORMANCE]); 1146 1147 while (energy_perf_strings[i] != NULL) 1148 offset += sysfs_emit_at(buf, offset, "%s ", energy_perf_strings[i++]); 1149 1150 offset += sysfs_emit_at(buf, offset, "\n"); 1151 1152 return offset; 1153 } 1154 1155 static ssize_t store_energy_performance_preference( 1156 struct cpufreq_policy *policy, const char *buf, size_t count) 1157 { 1158 struct amd_cpudata *cpudata = policy->driver_data; 1159 char str_preference[21]; 1160 ssize_t ret; 1161 1162 ret = sscanf(buf, "%20s", str_preference); 1163 if (ret != 1) 1164 return -EINVAL; 1165 1166 ret = match_string(energy_perf_strings, -1, str_preference); 1167 if (ret < 0) 1168 return -EINVAL; 1169 1170 mutex_lock(&amd_pstate_limits_lock); 1171 ret = amd_pstate_set_energy_pref_index(cpudata, ret); 1172 mutex_unlock(&amd_pstate_limits_lock); 1173 1174 return ret ?: count; 1175 } 1176 1177 static ssize_t show_energy_performance_preference( 1178 struct cpufreq_policy *policy, char *buf) 1179 { 1180 struct amd_cpudata *cpudata = policy->driver_data; 1181 int preference; 1182 1183 preference = amd_pstate_get_energy_pref_index(cpudata); 1184 if (preference < 0) 1185 return preference; 1186 1187 return sysfs_emit(buf, "%s\n", energy_perf_strings[preference]); 1188 } 1189 1190 static void amd_pstate_driver_cleanup(void) 1191 { 1192 amd_pstate_enable(false); 1193 cppc_state = AMD_PSTATE_DISABLE; 1194 current_pstate_driver = NULL; 1195 } 1196 1197 static int amd_pstate_register_driver(int mode) 1198 { 1199 int ret; 1200 1201 if (mode == AMD_PSTATE_PASSIVE || mode == AMD_PSTATE_GUIDED) 1202 current_pstate_driver = &amd_pstate_driver; 1203 else if (mode == AMD_PSTATE_ACTIVE) 1204 current_pstate_driver = &amd_pstate_epp_driver; 1205 else 1206 return -EINVAL; 1207 1208 cppc_state = mode; 1209 1210 ret = amd_pstate_enable(true); 1211 if (ret) { 1212 pr_err("failed to enable cppc during amd-pstate driver registration, return %d\n", 1213 ret); 1214 amd_pstate_driver_cleanup(); 1215 return ret; 1216 } 1217 1218 ret = cpufreq_register_driver(current_pstate_driver); 1219 if (ret) { 1220 amd_pstate_driver_cleanup(); 1221 return ret; 1222 } 1223 1224 return 0; 1225 } 1226 1227 static int amd_pstate_unregister_driver(int dummy) 1228 { 1229 cpufreq_unregister_driver(current_pstate_driver); 1230 amd_pstate_driver_cleanup(); 1231 return 0; 1232 } 1233 1234 static int amd_pstate_change_mode_without_dvr_change(int mode) 1235 { 1236 int cpu = 0; 1237 1238 cppc_state = mode; 1239 1240 if (cpu_feature_enabled(X86_FEATURE_CPPC) || cppc_state == AMD_PSTATE_ACTIVE) 1241 return 0; 1242 1243 for_each_present_cpu(cpu) { 1244 cppc_set_auto_sel(cpu, (cppc_state == AMD_PSTATE_PASSIVE) ? 0 : 1); 1245 } 1246 1247 return 0; 1248 } 1249 1250 static int amd_pstate_change_driver_mode(int mode) 1251 { 1252 int ret; 1253 1254 ret = amd_pstate_unregister_driver(0); 1255 if (ret) 1256 return ret; 1257 1258 ret = amd_pstate_register_driver(mode); 1259 if (ret) 1260 return ret; 1261 1262 return 0; 1263 } 1264 1265 static cppc_mode_transition_fn mode_state_machine[AMD_PSTATE_MAX][AMD_PSTATE_MAX] = { 1266 [AMD_PSTATE_DISABLE] = { 1267 [AMD_PSTATE_DISABLE] = NULL, 1268 [AMD_PSTATE_PASSIVE] = amd_pstate_register_driver, 1269 [AMD_PSTATE_ACTIVE] = amd_pstate_register_driver, 1270 [AMD_PSTATE_GUIDED] = amd_pstate_register_driver, 1271 }, 1272 [AMD_PSTATE_PASSIVE] = { 1273 [AMD_PSTATE_DISABLE] = amd_pstate_unregister_driver, 1274 [AMD_PSTATE_PASSIVE] = NULL, 1275 [AMD_PSTATE_ACTIVE] = amd_pstate_change_driver_mode, 1276 [AMD_PSTATE_GUIDED] = amd_pstate_change_mode_without_dvr_change, 1277 }, 1278 [AMD_PSTATE_ACTIVE] = { 1279 [AMD_PSTATE_DISABLE] = amd_pstate_unregister_driver, 1280 [AMD_PSTATE_PASSIVE] = amd_pstate_change_driver_mode, 1281 [AMD_PSTATE_ACTIVE] = NULL, 1282 [AMD_PSTATE_GUIDED] = amd_pstate_change_driver_mode, 1283 }, 1284 [AMD_PSTATE_GUIDED] = { 1285 [AMD_PSTATE_DISABLE] = amd_pstate_unregister_driver, 1286 [AMD_PSTATE_PASSIVE] = amd_pstate_change_mode_without_dvr_change, 1287 [AMD_PSTATE_ACTIVE] = amd_pstate_change_driver_mode, 1288 [AMD_PSTATE_GUIDED] = NULL, 1289 }, 1290 }; 1291 1292 static ssize_t amd_pstate_show_status(char *buf) 1293 { 1294 if (!current_pstate_driver) 1295 return sysfs_emit(buf, "disable\n"); 1296 1297 return sysfs_emit(buf, "%s\n", amd_pstate_mode_string[cppc_state]); 1298 } 1299 1300 int amd_pstate_update_status(const char *buf, size_t size) 1301 { 1302 int mode_idx; 1303 1304 if (size > strlen("passive") || size < strlen("active")) 1305 return -EINVAL; 1306 1307 mode_idx = get_mode_idx_from_str(buf, size); 1308 1309 if (mode_idx < 0 || mode_idx >= AMD_PSTATE_MAX) 1310 return -EINVAL; 1311 1312 if (mode_state_machine[cppc_state][mode_idx]) 1313 return mode_state_machine[cppc_state][mode_idx](mode_idx); 1314 1315 return 0; 1316 } 1317 EXPORT_SYMBOL_GPL(amd_pstate_update_status); 1318 1319 static ssize_t status_show(struct device *dev, 1320 struct device_attribute *attr, char *buf) 1321 { 1322 ssize_t ret; 1323 1324 mutex_lock(&amd_pstate_driver_lock); 1325 ret = amd_pstate_show_status(buf); 1326 mutex_unlock(&amd_pstate_driver_lock); 1327 1328 return ret; 1329 } 1330 1331 static ssize_t status_store(struct device *a, struct device_attribute *b, 1332 const char *buf, size_t count) 1333 { 1334 char *p = memchr(buf, '\n', count); 1335 int ret; 1336 1337 mutex_lock(&amd_pstate_driver_lock); 1338 ret = amd_pstate_update_status(buf, p ? p - buf : count); 1339 mutex_unlock(&amd_pstate_driver_lock); 1340 1341 return ret < 0 ? ret : count; 1342 } 1343 1344 static ssize_t prefcore_show(struct device *dev, 1345 struct device_attribute *attr, char *buf) 1346 { 1347 return sysfs_emit(buf, "%s\n", str_enabled_disabled(amd_pstate_prefcore)); 1348 } 1349 1350 cpufreq_freq_attr_ro(amd_pstate_max_freq); 1351 cpufreq_freq_attr_ro(amd_pstate_lowest_nonlinear_freq); 1352 1353 cpufreq_freq_attr_ro(amd_pstate_highest_perf); 1354 cpufreq_freq_attr_ro(amd_pstate_prefcore_ranking); 1355 cpufreq_freq_attr_ro(amd_pstate_hw_prefcore); 1356 cpufreq_freq_attr_rw(energy_performance_preference); 1357 cpufreq_freq_attr_ro(energy_performance_available_preferences); 1358 static DEVICE_ATTR_RW(status); 1359 static DEVICE_ATTR_RO(prefcore); 1360 1361 static struct freq_attr *amd_pstate_attr[] = { 1362 &amd_pstate_max_freq, 1363 &amd_pstate_lowest_nonlinear_freq, 1364 &amd_pstate_highest_perf, 1365 &amd_pstate_prefcore_ranking, 1366 &amd_pstate_hw_prefcore, 1367 NULL, 1368 }; 1369 1370 static struct freq_attr *amd_pstate_epp_attr[] = { 1371 &amd_pstate_max_freq, 1372 &amd_pstate_lowest_nonlinear_freq, 1373 &amd_pstate_highest_perf, 1374 &amd_pstate_prefcore_ranking, 1375 &amd_pstate_hw_prefcore, 1376 &energy_performance_preference, 1377 &energy_performance_available_preferences, 1378 NULL, 1379 }; 1380 1381 static struct attribute *pstate_global_attributes[] = { 1382 &dev_attr_status.attr, 1383 &dev_attr_prefcore.attr, 1384 NULL 1385 }; 1386 1387 static const struct attribute_group amd_pstate_global_attr_group = { 1388 .name = "amd_pstate", 1389 .attrs = pstate_global_attributes, 1390 }; 1391 1392 static bool amd_pstate_acpi_pm_profile_server(void) 1393 { 1394 switch (acpi_gbl_FADT.preferred_profile) { 1395 case PM_ENTERPRISE_SERVER: 1396 case PM_SOHO_SERVER: 1397 case PM_PERFORMANCE_SERVER: 1398 return true; 1399 } 1400 return false; 1401 } 1402 1403 static bool amd_pstate_acpi_pm_profile_undefined(void) 1404 { 1405 if (acpi_gbl_FADT.preferred_profile == PM_UNSPECIFIED) 1406 return true; 1407 if (acpi_gbl_FADT.preferred_profile >= NR_PM_PROFILES) 1408 return true; 1409 return false; 1410 } 1411 1412 static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy) 1413 { 1414 int min_freq, max_freq, ret; 1415 struct amd_cpudata *cpudata; 1416 struct device *dev; 1417 u64 value; 1418 1419 /* 1420 * Resetting PERF_CTL_MSR will put the CPU in P0 frequency, 1421 * which is ideal for initialization process. 1422 */ 1423 amd_perf_ctl_reset(policy->cpu); 1424 dev = get_cpu_device(policy->cpu); 1425 if (!dev) 1426 return -ENODEV; 1427 1428 cpudata = kzalloc(sizeof(*cpudata), GFP_KERNEL); 1429 if (!cpudata) 1430 return -ENOMEM; 1431 1432 cpudata->cpu = policy->cpu; 1433 cpudata->epp_policy = 0; 1434 1435 ret = amd_pstate_init_perf(cpudata); 1436 if (ret) 1437 goto free_cpudata1; 1438 1439 amd_pstate_init_prefcore(cpudata); 1440 1441 ret = amd_pstate_init_freq(cpudata); 1442 if (ret) 1443 goto free_cpudata1; 1444 1445 ret = amd_pstate_init_boost_support(cpudata); 1446 if (ret) 1447 goto free_cpudata1; 1448 1449 min_freq = READ_ONCE(cpudata->min_freq); 1450 max_freq = READ_ONCE(cpudata->max_freq); 1451 1452 policy->cpuinfo.min_freq = min_freq; 1453 policy->cpuinfo.max_freq = max_freq; 1454 /* It will be updated by governor */ 1455 policy->cur = policy->cpuinfo.min_freq; 1456 1457 policy->driver_data = cpudata; 1458 1459 cpudata->epp_cached = cpudata->epp_default = amd_pstate_get_epp(cpudata, 0); 1460 1461 policy->min = policy->cpuinfo.min_freq; 1462 policy->max = policy->cpuinfo.max_freq; 1463 1464 policy->boost_enabled = READ_ONCE(cpudata->boost_supported); 1465 1466 /* 1467 * Set the policy to provide a valid fallback value in case 1468 * the default cpufreq governor is neither powersave nor performance. 1469 */ 1470 if (amd_pstate_acpi_pm_profile_server() || 1471 amd_pstate_acpi_pm_profile_undefined()) 1472 policy->policy = CPUFREQ_POLICY_PERFORMANCE; 1473 else 1474 policy->policy = CPUFREQ_POLICY_POWERSAVE; 1475 1476 if (cpu_feature_enabled(X86_FEATURE_CPPC)) { 1477 ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value); 1478 if (ret) 1479 return ret; 1480 WRITE_ONCE(cpudata->cppc_req_cached, value); 1481 1482 ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1, &value); 1483 if (ret) 1484 return ret; 1485 WRITE_ONCE(cpudata->cppc_cap1_cached, value); 1486 } 1487 1488 return 0; 1489 1490 free_cpudata1: 1491 kfree(cpudata); 1492 return ret; 1493 } 1494 1495 static void amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy) 1496 { 1497 struct amd_cpudata *cpudata = policy->driver_data; 1498 1499 if (cpudata) { 1500 kfree(cpudata); 1501 policy->driver_data = NULL; 1502 } 1503 1504 pr_debug("CPU %d exiting\n", policy->cpu); 1505 } 1506 1507 static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy) 1508 { 1509 struct amd_cpudata *cpudata = policy->driver_data; 1510 u32 max_perf, min_perf, min_limit_perf, max_limit_perf; 1511 u64 value; 1512 s16 epp; 1513 1514 if (cpudata->boost_supported && !policy->boost_enabled) 1515 max_perf = READ_ONCE(cpudata->nominal_perf); 1516 else 1517 max_perf = READ_ONCE(cpudata->highest_perf); 1518 min_perf = READ_ONCE(cpudata->lowest_perf); 1519 max_limit_perf = div_u64(policy->max * max_perf, policy->cpuinfo.max_freq); 1520 min_limit_perf = div_u64(policy->min * max_perf, policy->cpuinfo.max_freq); 1521 1522 if (min_limit_perf < min_perf) 1523 min_limit_perf = min_perf; 1524 1525 if (max_limit_perf < min_limit_perf) 1526 max_limit_perf = min_limit_perf; 1527 1528 WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf); 1529 WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf); 1530 1531 max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf, 1532 cpudata->max_limit_perf); 1533 min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf, 1534 cpudata->max_limit_perf); 1535 value = READ_ONCE(cpudata->cppc_req_cached); 1536 1537 if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) 1538 min_perf = max_perf; 1539 1540 /* Initial min/max values for CPPC Performance Controls Register */ 1541 value &= ~AMD_CPPC_MIN_PERF(~0L); 1542 value |= AMD_CPPC_MIN_PERF(min_perf); 1543 1544 value &= ~AMD_CPPC_MAX_PERF(~0L); 1545 value |= AMD_CPPC_MAX_PERF(max_perf); 1546 1547 /* CPPC EPP feature require to set zero to the desire perf bit */ 1548 value &= ~AMD_CPPC_DES_PERF(~0L); 1549 value |= AMD_CPPC_DES_PERF(0); 1550 1551 cpudata->epp_policy = cpudata->policy; 1552 1553 /* Get BIOS pre-defined epp value */ 1554 epp = amd_pstate_get_epp(cpudata, value); 1555 if (epp < 0) { 1556 /** 1557 * This return value can only be negative for shared_memory 1558 * systems where EPP register read/write not supported. 1559 */ 1560 return epp; 1561 } 1562 1563 if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) 1564 epp = 0; 1565 1566 /* Set initial EPP value */ 1567 if (cpu_feature_enabled(X86_FEATURE_CPPC)) { 1568 value &= ~GENMASK_ULL(31, 24); 1569 value |= (u64)epp << 24; 1570 } 1571 1572 WRITE_ONCE(cpudata->cppc_req_cached, value); 1573 return amd_pstate_set_epp(cpudata, epp); 1574 } 1575 1576 static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy) 1577 { 1578 struct amd_cpudata *cpudata = policy->driver_data; 1579 int ret; 1580 1581 if (!policy->cpuinfo.max_freq) 1582 return -ENODEV; 1583 1584 pr_debug("set_policy: cpuinfo.max %u policy->max %u\n", 1585 policy->cpuinfo.max_freq, policy->max); 1586 1587 cpudata->policy = policy->policy; 1588 1589 ret = amd_pstate_epp_update_limit(policy); 1590 if (ret) 1591 return ret; 1592 1593 /* 1594 * policy->cur is never updated with the amd_pstate_epp driver, but it 1595 * is used as a stale frequency value. So, keep it within limits. 1596 */ 1597 policy->cur = policy->min; 1598 1599 return 0; 1600 } 1601 1602 static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata) 1603 { 1604 struct cppc_perf_ctrls perf_ctrls; 1605 u64 value, max_perf; 1606 int ret; 1607 1608 ret = amd_pstate_enable(true); 1609 if (ret) 1610 pr_err("failed to enable amd pstate during resume, return %d\n", ret); 1611 1612 value = READ_ONCE(cpudata->cppc_req_cached); 1613 max_perf = READ_ONCE(cpudata->highest_perf); 1614 1615 if (cpu_feature_enabled(X86_FEATURE_CPPC)) { 1616 wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value); 1617 } else { 1618 perf_ctrls.max_perf = max_perf; 1619 perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(cpudata->epp_cached); 1620 cppc_set_perf(cpudata->cpu, &perf_ctrls); 1621 } 1622 } 1623 1624 static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy) 1625 { 1626 struct amd_cpudata *cpudata = policy->driver_data; 1627 1628 pr_debug("AMD CPU Core %d going online\n", cpudata->cpu); 1629 1630 if (cppc_state == AMD_PSTATE_ACTIVE) { 1631 amd_pstate_epp_reenable(cpudata); 1632 cpudata->suspended = false; 1633 } 1634 1635 return 0; 1636 } 1637 1638 static void amd_pstate_epp_offline(struct cpufreq_policy *policy) 1639 { 1640 struct amd_cpudata *cpudata = policy->driver_data; 1641 struct cppc_perf_ctrls perf_ctrls; 1642 int min_perf; 1643 u64 value; 1644 1645 min_perf = READ_ONCE(cpudata->lowest_perf); 1646 value = READ_ONCE(cpudata->cppc_req_cached); 1647 1648 mutex_lock(&amd_pstate_limits_lock); 1649 if (cpu_feature_enabled(X86_FEATURE_CPPC)) { 1650 cpudata->epp_policy = CPUFREQ_POLICY_UNKNOWN; 1651 1652 /* Set max perf same as min perf */ 1653 value &= ~AMD_CPPC_MAX_PERF(~0L); 1654 value |= AMD_CPPC_MAX_PERF(min_perf); 1655 value &= ~AMD_CPPC_MIN_PERF(~0L); 1656 value |= AMD_CPPC_MIN_PERF(min_perf); 1657 wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value); 1658 } else { 1659 perf_ctrls.desired_perf = 0; 1660 perf_ctrls.max_perf = min_perf; 1661 perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(HWP_EPP_BALANCE_POWERSAVE); 1662 cppc_set_perf(cpudata->cpu, &perf_ctrls); 1663 } 1664 mutex_unlock(&amd_pstate_limits_lock); 1665 } 1666 1667 static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy) 1668 { 1669 struct amd_cpudata *cpudata = policy->driver_data; 1670 1671 pr_debug("AMD CPU Core %d going offline\n", cpudata->cpu); 1672 1673 if (cpudata->suspended) 1674 return 0; 1675 1676 if (cppc_state == AMD_PSTATE_ACTIVE) 1677 amd_pstate_epp_offline(policy); 1678 1679 return 0; 1680 } 1681 1682 static int amd_pstate_epp_verify_policy(struct cpufreq_policy_data *policy) 1683 { 1684 cpufreq_verify_within_cpu_limits(policy); 1685 pr_debug("policy_max =%d, policy_min=%d\n", policy->max, policy->min); 1686 return 0; 1687 } 1688 1689 static int amd_pstate_epp_suspend(struct cpufreq_policy *policy) 1690 { 1691 struct amd_cpudata *cpudata = policy->driver_data; 1692 int ret; 1693 1694 /* avoid suspending when EPP is not enabled */ 1695 if (cppc_state != AMD_PSTATE_ACTIVE) 1696 return 0; 1697 1698 /* set this flag to avoid setting core offline*/ 1699 cpudata->suspended = true; 1700 1701 /* disable CPPC in lowlevel firmware */ 1702 ret = amd_pstate_enable(false); 1703 if (ret) 1704 pr_err("failed to suspend, return %d\n", ret); 1705 1706 return 0; 1707 } 1708 1709 static int amd_pstate_epp_resume(struct cpufreq_policy *policy) 1710 { 1711 struct amd_cpudata *cpudata = policy->driver_data; 1712 1713 if (cpudata->suspended) { 1714 mutex_lock(&amd_pstate_limits_lock); 1715 1716 /* enable amd pstate from suspend state*/ 1717 amd_pstate_epp_reenable(cpudata); 1718 1719 mutex_unlock(&amd_pstate_limits_lock); 1720 1721 cpudata->suspended = false; 1722 } 1723 1724 return 0; 1725 } 1726 1727 static struct cpufreq_driver amd_pstate_driver = { 1728 .flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_UPDATE_LIMITS, 1729 .verify = amd_pstate_verify, 1730 .target = amd_pstate_target, 1731 .fast_switch = amd_pstate_fast_switch, 1732 .init = amd_pstate_cpu_init, 1733 .exit = amd_pstate_cpu_exit, 1734 .suspend = amd_pstate_cpu_suspend, 1735 .resume = amd_pstate_cpu_resume, 1736 .set_boost = amd_pstate_set_boost, 1737 .update_limits = amd_pstate_update_limits, 1738 .name = "amd-pstate", 1739 .attr = amd_pstate_attr, 1740 }; 1741 1742 static struct cpufreq_driver amd_pstate_epp_driver = { 1743 .flags = CPUFREQ_CONST_LOOPS, 1744 .verify = amd_pstate_epp_verify_policy, 1745 .setpolicy = amd_pstate_epp_set_policy, 1746 .init = amd_pstate_epp_cpu_init, 1747 .exit = amd_pstate_epp_cpu_exit, 1748 .offline = amd_pstate_epp_cpu_offline, 1749 .online = amd_pstate_epp_cpu_online, 1750 .suspend = amd_pstate_epp_suspend, 1751 .resume = amd_pstate_epp_resume, 1752 .update_limits = amd_pstate_update_limits, 1753 .set_boost = amd_pstate_set_boost, 1754 .name = "amd-pstate-epp", 1755 .attr = amd_pstate_epp_attr, 1756 }; 1757 1758 static int __init amd_pstate_set_driver(int mode_idx) 1759 { 1760 if (mode_idx >= AMD_PSTATE_DISABLE && mode_idx < AMD_PSTATE_MAX) { 1761 cppc_state = mode_idx; 1762 if (cppc_state == AMD_PSTATE_DISABLE) 1763 pr_info("driver is explicitly disabled\n"); 1764 1765 if (cppc_state == AMD_PSTATE_ACTIVE) 1766 current_pstate_driver = &amd_pstate_epp_driver; 1767 1768 if (cppc_state == AMD_PSTATE_PASSIVE || cppc_state == AMD_PSTATE_GUIDED) 1769 current_pstate_driver = &amd_pstate_driver; 1770 1771 return 0; 1772 } 1773 1774 return -EINVAL; 1775 } 1776 1777 /** 1778 * CPPC function is not supported for family ID 17H with model_ID ranging from 0x10 to 0x2F. 1779 * show the debug message that helps to check if the CPU has CPPC support for loading issue. 1780 */ 1781 static bool amd_cppc_supported(void) 1782 { 1783 struct cpuinfo_x86 *c = &cpu_data(0); 1784 bool warn = false; 1785 1786 if ((boot_cpu_data.x86 == 0x17) && (boot_cpu_data.x86_model < 0x30)) { 1787 pr_debug_once("CPPC feature is not supported by the processor\n"); 1788 return false; 1789 } 1790 1791 /* 1792 * If the CPPC feature is disabled in the BIOS for processors 1793 * that support MSR-based CPPC, the AMD Pstate driver may not 1794 * function correctly. 1795 * 1796 * For such processors, check the CPPC flag and display a 1797 * warning message if the platform supports CPPC. 1798 * 1799 * Note: The code check below will not abort the driver 1800 * registration process because of the code is added for 1801 * debugging purposes. Besides, it may still be possible for 1802 * the driver to work using the shared-memory mechanism. 1803 */ 1804 if (!cpu_feature_enabled(X86_FEATURE_CPPC)) { 1805 if (cpu_feature_enabled(X86_FEATURE_ZEN2)) { 1806 switch (c->x86_model) { 1807 case 0x60 ... 0x6F: 1808 case 0x80 ... 0xAF: 1809 warn = true; 1810 break; 1811 } 1812 } else if (cpu_feature_enabled(X86_FEATURE_ZEN3) || 1813 cpu_feature_enabled(X86_FEATURE_ZEN4)) { 1814 switch (c->x86_model) { 1815 case 0x10 ... 0x1F: 1816 case 0x40 ... 0xAF: 1817 warn = true; 1818 break; 1819 } 1820 } else if (cpu_feature_enabled(X86_FEATURE_ZEN5)) { 1821 warn = true; 1822 } 1823 } 1824 1825 if (warn) 1826 pr_warn_once("The CPPC feature is supported but currently disabled by the BIOS.\n" 1827 "Please enable it if your BIOS has the CPPC option.\n"); 1828 return true; 1829 } 1830 1831 static int __init amd_pstate_init(void) 1832 { 1833 struct device *dev_root; 1834 int ret; 1835 1836 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) 1837 return -ENODEV; 1838 1839 /* show debug message only if CPPC is not supported */ 1840 if (!amd_cppc_supported()) 1841 return -EOPNOTSUPP; 1842 1843 /* show warning message when BIOS broken or ACPI disabled */ 1844 if (!acpi_cpc_valid()) { 1845 pr_warn_once("the _CPC object is not present in SBIOS or ACPI disabled\n"); 1846 return -ENODEV; 1847 } 1848 1849 /* don't keep reloading if cpufreq_driver exists */ 1850 if (cpufreq_get_current_driver()) 1851 return -EEXIST; 1852 1853 quirks = NULL; 1854 1855 /* check if this machine need CPPC quirks */ 1856 dmi_check_system(amd_pstate_quirks_table); 1857 1858 /* 1859 * determine the driver mode from the command line or kernel config. 1860 * If no command line input is provided, cppc_state will be AMD_PSTATE_UNDEFINED. 1861 * command line options will override the kernel config settings. 1862 */ 1863 1864 if (cppc_state == AMD_PSTATE_UNDEFINED) { 1865 /* Disable on the following configs by default: 1866 * 1. Undefined platforms 1867 * 2. Server platforms 1868 */ 1869 if (amd_pstate_acpi_pm_profile_undefined() || 1870 amd_pstate_acpi_pm_profile_server()) { 1871 pr_info("driver load is disabled, boot with specific mode to enable this\n"); 1872 return -ENODEV; 1873 } 1874 /* get driver mode from kernel config option [1:4] */ 1875 cppc_state = CONFIG_X86_AMD_PSTATE_DEFAULT_MODE; 1876 } 1877 1878 switch (cppc_state) { 1879 case AMD_PSTATE_DISABLE: 1880 pr_info("driver load is disabled, boot with specific mode to enable this\n"); 1881 return -ENODEV; 1882 case AMD_PSTATE_PASSIVE: 1883 case AMD_PSTATE_ACTIVE: 1884 case AMD_PSTATE_GUIDED: 1885 ret = amd_pstate_set_driver(cppc_state); 1886 if (ret) 1887 return ret; 1888 break; 1889 default: 1890 return -EINVAL; 1891 } 1892 1893 /* capability check */ 1894 if (cpu_feature_enabled(X86_FEATURE_CPPC)) { 1895 pr_debug("AMD CPPC MSR based functionality is supported\n"); 1896 if (cppc_state != AMD_PSTATE_ACTIVE) 1897 current_pstate_driver->adjust_perf = amd_pstate_adjust_perf; 1898 } else { 1899 pr_debug("AMD CPPC shared memory based functionality is supported\n"); 1900 static_call_update(amd_pstate_enable, cppc_enable); 1901 static_call_update(amd_pstate_init_perf, cppc_init_perf); 1902 static_call_update(amd_pstate_update_perf, cppc_update_perf); 1903 } 1904 1905 if (amd_pstate_prefcore) { 1906 ret = amd_detect_prefcore(&amd_pstate_prefcore); 1907 if (ret) 1908 return ret; 1909 } 1910 1911 /* enable amd pstate feature */ 1912 ret = amd_pstate_enable(true); 1913 if (ret) { 1914 pr_err("failed to enable driver mode(%d)\n", cppc_state); 1915 return ret; 1916 } 1917 1918 ret = cpufreq_register_driver(current_pstate_driver); 1919 if (ret) { 1920 pr_err("failed to register with return %d\n", ret); 1921 goto disable_driver; 1922 } 1923 1924 dev_root = bus_get_dev_root(&cpu_subsys); 1925 if (dev_root) { 1926 ret = sysfs_create_group(&dev_root->kobj, &amd_pstate_global_attr_group); 1927 put_device(dev_root); 1928 if (ret) { 1929 pr_err("sysfs attribute export failed with error %d.\n", ret); 1930 goto global_attr_free; 1931 } 1932 } 1933 1934 return ret; 1935 1936 global_attr_free: 1937 cpufreq_unregister_driver(current_pstate_driver); 1938 disable_driver: 1939 amd_pstate_enable(false); 1940 return ret; 1941 } 1942 device_initcall(amd_pstate_init); 1943 1944 static int __init amd_pstate_param(char *str) 1945 { 1946 size_t size; 1947 int mode_idx; 1948 1949 if (!str) 1950 return -EINVAL; 1951 1952 size = strlen(str); 1953 mode_idx = get_mode_idx_from_str(str, size); 1954 1955 return amd_pstate_set_driver(mode_idx); 1956 } 1957 1958 static int __init amd_prefcore_param(char *str) 1959 { 1960 if (!strcmp(str, "disable")) 1961 amd_pstate_prefcore = false; 1962 1963 return 0; 1964 } 1965 1966 early_param("amd_pstate", amd_pstate_param); 1967 early_param("amd_prefcore", amd_prefcore_param); 1968 1969 MODULE_AUTHOR("Huang Rui <ray.huang@amd.com>"); 1970 MODULE_DESCRIPTION("AMD Processor P-state Frequency Driver"); 1971