1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * amd-pstate.c - AMD Processor P-state Frequency Driver 4 * 5 * Copyright (C) 2021 Advanced Micro Devices, Inc. All Rights Reserved. 6 * 7 * Author: Huang Rui <ray.huang@amd.com> 8 * 9 * AMD P-State introduces a new CPU performance scaling design for AMD 10 * processors using the ACPI Collaborative Performance and Power Control (CPPC) 11 * feature which works with the AMD SMU firmware providing a finer grained 12 * frequency control range. It is to replace the legacy ACPI P-States control, 13 * allows a flexible, low-latency interface for the Linux kernel to directly 14 * communicate the performance hints to hardware. 15 * 16 * AMD P-State is supported on recent AMD Zen base CPU series include some of 17 * Zen2 and Zen3 processors. _CPC needs to be present in the ACPI tables of AMD 18 * P-State supported system. And there are two types of hardware implementations 19 * for AMD P-State: 1) Full MSR Solution and 2) Shared Memory Solution. 20 * X86_FEATURE_CPPC CPU feature flag is used to distinguish the different types. 21 */ 22 23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 24 25 #include <linux/kernel.h> 26 #include <linux/module.h> 27 #include <linux/init.h> 28 #include <linux/smp.h> 29 #include <linux/sched.h> 30 #include <linux/cpufreq.h> 31 #include <linux/compiler.h> 32 #include <linux/dmi.h> 33 #include <linux/slab.h> 34 #include <linux/acpi.h> 35 #include <linux/io.h> 36 #include <linux/delay.h> 37 #include <linux/uaccess.h> 38 #include <linux/static_call.h> 39 #include <linux/amd-pstate.h> 40 41 #include <acpi/processor.h> 42 #include <acpi/cppc_acpi.h> 43 44 #include <asm/msr.h> 45 #include <asm/processor.h> 46 #include <asm/cpufeature.h> 47 #include <asm/cpu_device_id.h> 48 #include "amd-pstate-trace.h" 49 50 #define AMD_PSTATE_TRANSITION_LATENCY 20000 51 #define AMD_PSTATE_TRANSITION_DELAY 1000 52 53 /* 54 * TODO: We need more time to fine tune processors with shared memory solution 55 * with community together. 56 * 57 * There are some performance drops on the CPU benchmarks which reports from 58 * Suse. We are co-working with them to fine tune the shared memory solution. So 59 * we disable it by default to go acpi-cpufreq on these processors and add a 60 * module parameter to be able to enable it manually for debugging. 61 */ 62 static struct cpufreq_driver *current_pstate_driver; 63 static struct cpufreq_driver amd_pstate_driver; 64 static struct cpufreq_driver amd_pstate_epp_driver; 65 static int cppc_state = AMD_PSTATE_DISABLE; 66 67 /* 68 * AMD Energy Preference Performance (EPP) 69 * The EPP is used in the CCLK DPM controller to drive 70 * the frequency that a core is going to operate during 71 * short periods of activity. EPP values will be utilized for 72 * different OS profiles (balanced, performance, power savings) 73 * display strings corresponding to EPP index in the 74 * energy_perf_strings[] 75 * index String 76 *------------------------------------- 77 * 0 default 78 * 1 performance 79 * 2 balance_performance 80 * 3 balance_power 81 * 4 power 82 */ 83 enum energy_perf_value_index { 84 EPP_INDEX_DEFAULT = 0, 85 EPP_INDEX_PERFORMANCE, 86 EPP_INDEX_BALANCE_PERFORMANCE, 87 EPP_INDEX_BALANCE_POWERSAVE, 88 EPP_INDEX_POWERSAVE, 89 }; 90 91 static const char * const energy_perf_strings[] = { 92 [EPP_INDEX_DEFAULT] = "default", 93 [EPP_INDEX_PERFORMANCE] = "performance", 94 [EPP_INDEX_BALANCE_PERFORMANCE] = "balance_performance", 95 [EPP_INDEX_BALANCE_POWERSAVE] = "balance_power", 96 [EPP_INDEX_POWERSAVE] = "power", 97 NULL 98 }; 99 100 static unsigned int epp_values[] = { 101 [EPP_INDEX_DEFAULT] = 0, 102 [EPP_INDEX_PERFORMANCE] = AMD_CPPC_EPP_PERFORMANCE, 103 [EPP_INDEX_BALANCE_PERFORMANCE] = AMD_CPPC_EPP_BALANCE_PERFORMANCE, 104 [EPP_INDEX_BALANCE_POWERSAVE] = AMD_CPPC_EPP_BALANCE_POWERSAVE, 105 [EPP_INDEX_POWERSAVE] = AMD_CPPC_EPP_POWERSAVE, 106 }; 107 108 static inline int get_mode_idx_from_str(const char *str, size_t size) 109 { 110 int i; 111 112 for (i=0; i < AMD_PSTATE_MAX; i++) { 113 if (!strncmp(str, amd_pstate_mode_string[i], size)) 114 return i; 115 } 116 return -EINVAL; 117 } 118 119 static DEFINE_MUTEX(amd_pstate_limits_lock); 120 static DEFINE_MUTEX(amd_pstate_driver_lock); 121 122 static s16 amd_pstate_get_epp(struct amd_cpudata *cpudata, u64 cppc_req_cached) 123 { 124 u64 epp; 125 int ret; 126 127 if (boot_cpu_has(X86_FEATURE_CPPC)) { 128 if (!cppc_req_cached) { 129 epp = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, 130 &cppc_req_cached); 131 if (epp) 132 return epp; 133 } 134 epp = (cppc_req_cached >> 24) & 0xFF; 135 } else { 136 ret = cppc_get_epp_perf(cpudata->cpu, &epp); 137 if (ret < 0) { 138 pr_debug("Could not retrieve energy perf value (%d)\n", ret); 139 return -EIO; 140 } 141 } 142 143 return (s16)(epp & 0xff); 144 } 145 146 static int amd_pstate_get_energy_pref_index(struct amd_cpudata *cpudata) 147 { 148 s16 epp; 149 int index = -EINVAL; 150 151 epp = amd_pstate_get_epp(cpudata, 0); 152 if (epp < 0) 153 return epp; 154 155 switch (epp) { 156 case AMD_CPPC_EPP_PERFORMANCE: 157 index = EPP_INDEX_PERFORMANCE; 158 break; 159 case AMD_CPPC_EPP_BALANCE_PERFORMANCE: 160 index = EPP_INDEX_BALANCE_PERFORMANCE; 161 break; 162 case AMD_CPPC_EPP_BALANCE_POWERSAVE: 163 index = EPP_INDEX_BALANCE_POWERSAVE; 164 break; 165 case AMD_CPPC_EPP_POWERSAVE: 166 index = EPP_INDEX_POWERSAVE; 167 break; 168 default: 169 break; 170 } 171 172 return index; 173 } 174 175 static int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp) 176 { 177 int ret; 178 struct cppc_perf_ctrls perf_ctrls; 179 180 if (boot_cpu_has(X86_FEATURE_CPPC)) { 181 u64 value = READ_ONCE(cpudata->cppc_req_cached); 182 183 value &= ~GENMASK_ULL(31, 24); 184 value |= (u64)epp << 24; 185 WRITE_ONCE(cpudata->cppc_req_cached, value); 186 187 ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value); 188 if (!ret) 189 cpudata->epp_cached = epp; 190 } else { 191 perf_ctrls.energy_perf = epp; 192 ret = cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1); 193 if (ret) { 194 pr_debug("failed to set energy perf value (%d)\n", ret); 195 return ret; 196 } 197 cpudata->epp_cached = epp; 198 } 199 200 return ret; 201 } 202 203 static int amd_pstate_set_energy_pref_index(struct amd_cpudata *cpudata, 204 int pref_index) 205 { 206 int epp = -EINVAL; 207 int ret; 208 209 if (!pref_index) { 210 pr_debug("EPP pref_index is invalid\n"); 211 return -EINVAL; 212 } 213 214 if (epp == -EINVAL) 215 epp = epp_values[pref_index]; 216 217 if (epp > 0 && cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) { 218 pr_debug("EPP cannot be set under performance policy\n"); 219 return -EBUSY; 220 } 221 222 ret = amd_pstate_set_epp(cpudata, epp); 223 224 return ret; 225 } 226 227 static inline int pstate_enable(bool enable) 228 { 229 return wrmsrl_safe(MSR_AMD_CPPC_ENABLE, enable); 230 } 231 232 static int cppc_enable(bool enable) 233 { 234 int cpu, ret = 0; 235 struct cppc_perf_ctrls perf_ctrls; 236 237 for_each_present_cpu(cpu) { 238 ret = cppc_set_enable(cpu, enable); 239 if (ret) 240 return ret; 241 242 /* Enable autonomous mode for EPP */ 243 if (cppc_state == AMD_PSTATE_ACTIVE) { 244 /* Set desired perf as zero to allow EPP firmware control */ 245 perf_ctrls.desired_perf = 0; 246 ret = cppc_set_perf(cpu, &perf_ctrls); 247 if (ret) 248 return ret; 249 } 250 } 251 252 return ret; 253 } 254 255 DEFINE_STATIC_CALL(amd_pstate_enable, pstate_enable); 256 257 static inline int amd_pstate_enable(bool enable) 258 { 259 return static_call(amd_pstate_enable)(enable); 260 } 261 262 static int pstate_init_perf(struct amd_cpudata *cpudata) 263 { 264 u64 cap1; 265 u32 highest_perf; 266 267 int ret = rdmsrl_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1, 268 &cap1); 269 if (ret) 270 return ret; 271 272 /* 273 * TODO: Introduce AMD specific power feature. 274 * 275 * CPPC entry doesn't indicate the highest performance in some ASICs. 276 */ 277 highest_perf = amd_get_highest_perf(); 278 if (highest_perf > AMD_CPPC_HIGHEST_PERF(cap1)) 279 highest_perf = AMD_CPPC_HIGHEST_PERF(cap1); 280 281 WRITE_ONCE(cpudata->highest_perf, highest_perf); 282 283 WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1)); 284 WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1)); 285 WRITE_ONCE(cpudata->lowest_perf, AMD_CPPC_LOWEST_PERF(cap1)); 286 287 return 0; 288 } 289 290 static int cppc_init_perf(struct amd_cpudata *cpudata) 291 { 292 struct cppc_perf_caps cppc_perf; 293 u32 highest_perf; 294 295 int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf); 296 if (ret) 297 return ret; 298 299 highest_perf = amd_get_highest_perf(); 300 if (highest_perf > cppc_perf.highest_perf) 301 highest_perf = cppc_perf.highest_perf; 302 303 WRITE_ONCE(cpudata->highest_perf, highest_perf); 304 305 WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf); 306 WRITE_ONCE(cpudata->lowest_nonlinear_perf, 307 cppc_perf.lowest_nonlinear_perf); 308 WRITE_ONCE(cpudata->lowest_perf, cppc_perf.lowest_perf); 309 310 return 0; 311 } 312 313 DEFINE_STATIC_CALL(amd_pstate_init_perf, pstate_init_perf); 314 315 static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata) 316 { 317 return static_call(amd_pstate_init_perf)(cpudata); 318 } 319 320 static void pstate_update_perf(struct amd_cpudata *cpudata, u32 min_perf, 321 u32 des_perf, u32 max_perf, bool fast_switch) 322 { 323 if (fast_switch) 324 wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached)); 325 else 326 wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, 327 READ_ONCE(cpudata->cppc_req_cached)); 328 } 329 330 static void cppc_update_perf(struct amd_cpudata *cpudata, 331 u32 min_perf, u32 des_perf, 332 u32 max_perf, bool fast_switch) 333 { 334 struct cppc_perf_ctrls perf_ctrls; 335 336 perf_ctrls.max_perf = max_perf; 337 perf_ctrls.min_perf = min_perf; 338 perf_ctrls.desired_perf = des_perf; 339 340 cppc_set_perf(cpudata->cpu, &perf_ctrls); 341 } 342 343 DEFINE_STATIC_CALL(amd_pstate_update_perf, pstate_update_perf); 344 345 static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata, 346 u32 min_perf, u32 des_perf, 347 u32 max_perf, bool fast_switch) 348 { 349 static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf, 350 max_perf, fast_switch); 351 } 352 353 static inline bool amd_pstate_sample(struct amd_cpudata *cpudata) 354 { 355 u64 aperf, mperf, tsc; 356 unsigned long flags; 357 358 local_irq_save(flags); 359 rdmsrl(MSR_IA32_APERF, aperf); 360 rdmsrl(MSR_IA32_MPERF, mperf); 361 tsc = rdtsc(); 362 363 if (cpudata->prev.mperf == mperf || cpudata->prev.tsc == tsc) { 364 local_irq_restore(flags); 365 return false; 366 } 367 368 local_irq_restore(flags); 369 370 cpudata->cur.aperf = aperf; 371 cpudata->cur.mperf = mperf; 372 cpudata->cur.tsc = tsc; 373 cpudata->cur.aperf -= cpudata->prev.aperf; 374 cpudata->cur.mperf -= cpudata->prev.mperf; 375 cpudata->cur.tsc -= cpudata->prev.tsc; 376 377 cpudata->prev.aperf = aperf; 378 cpudata->prev.mperf = mperf; 379 cpudata->prev.tsc = tsc; 380 381 cpudata->freq = div64_u64((cpudata->cur.aperf * cpu_khz), cpudata->cur.mperf); 382 383 return true; 384 } 385 386 static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf, 387 u32 des_perf, u32 max_perf, bool fast_switch) 388 { 389 u64 prev = READ_ONCE(cpudata->cppc_req_cached); 390 u64 value = prev; 391 392 des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf); 393 value &= ~AMD_CPPC_MIN_PERF(~0L); 394 value |= AMD_CPPC_MIN_PERF(min_perf); 395 396 value &= ~AMD_CPPC_DES_PERF(~0L); 397 value |= AMD_CPPC_DES_PERF(des_perf); 398 399 value &= ~AMD_CPPC_MAX_PERF(~0L); 400 value |= AMD_CPPC_MAX_PERF(max_perf); 401 402 if (trace_amd_pstate_perf_enabled() && amd_pstate_sample(cpudata)) { 403 trace_amd_pstate_perf(min_perf, des_perf, max_perf, cpudata->freq, 404 cpudata->cur.mperf, cpudata->cur.aperf, cpudata->cur.tsc, 405 cpudata->cpu, (value != prev), fast_switch); 406 } 407 408 if (value == prev) 409 return; 410 411 WRITE_ONCE(cpudata->cppc_req_cached, value); 412 413 amd_pstate_update_perf(cpudata, min_perf, des_perf, 414 max_perf, fast_switch); 415 } 416 417 static int amd_pstate_verify(struct cpufreq_policy_data *policy) 418 { 419 cpufreq_verify_within_cpu_limits(policy); 420 421 return 0; 422 } 423 424 static int amd_pstate_target(struct cpufreq_policy *policy, 425 unsigned int target_freq, 426 unsigned int relation) 427 { 428 struct cpufreq_freqs freqs; 429 struct amd_cpudata *cpudata = policy->driver_data; 430 unsigned long max_perf, min_perf, des_perf, cap_perf; 431 432 if (!cpudata->max_freq) 433 return -ENODEV; 434 435 cap_perf = READ_ONCE(cpudata->highest_perf); 436 min_perf = READ_ONCE(cpudata->lowest_perf); 437 max_perf = cap_perf; 438 439 freqs.old = policy->cur; 440 freqs.new = target_freq; 441 442 des_perf = DIV_ROUND_CLOSEST(target_freq * cap_perf, 443 cpudata->max_freq); 444 445 cpufreq_freq_transition_begin(policy, &freqs); 446 amd_pstate_update(cpudata, min_perf, des_perf, 447 max_perf, false); 448 cpufreq_freq_transition_end(policy, &freqs, false); 449 450 return 0; 451 } 452 453 static void amd_pstate_adjust_perf(unsigned int cpu, 454 unsigned long _min_perf, 455 unsigned long target_perf, 456 unsigned long capacity) 457 { 458 unsigned long max_perf, min_perf, des_perf, 459 cap_perf, lowest_nonlinear_perf; 460 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 461 struct amd_cpudata *cpudata = policy->driver_data; 462 463 cap_perf = READ_ONCE(cpudata->highest_perf); 464 lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf); 465 466 des_perf = cap_perf; 467 if (target_perf < capacity) 468 des_perf = DIV_ROUND_UP(cap_perf * target_perf, capacity); 469 470 min_perf = READ_ONCE(cpudata->highest_perf); 471 if (_min_perf < capacity) 472 min_perf = DIV_ROUND_UP(cap_perf * _min_perf, capacity); 473 474 if (min_perf < lowest_nonlinear_perf) 475 min_perf = lowest_nonlinear_perf; 476 477 max_perf = cap_perf; 478 if (max_perf < min_perf) 479 max_perf = min_perf; 480 481 amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true); 482 cpufreq_cpu_put(policy); 483 } 484 485 static int amd_get_min_freq(struct amd_cpudata *cpudata) 486 { 487 struct cppc_perf_caps cppc_perf; 488 489 int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf); 490 if (ret) 491 return ret; 492 493 /* Switch to khz */ 494 return cppc_perf.lowest_freq * 1000; 495 } 496 497 static int amd_get_max_freq(struct amd_cpudata *cpudata) 498 { 499 struct cppc_perf_caps cppc_perf; 500 u32 max_perf, max_freq, nominal_freq, nominal_perf; 501 u64 boost_ratio; 502 503 int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf); 504 if (ret) 505 return ret; 506 507 nominal_freq = cppc_perf.nominal_freq; 508 nominal_perf = READ_ONCE(cpudata->nominal_perf); 509 max_perf = READ_ONCE(cpudata->highest_perf); 510 511 boost_ratio = div_u64(max_perf << SCHED_CAPACITY_SHIFT, 512 nominal_perf); 513 514 max_freq = nominal_freq * boost_ratio >> SCHED_CAPACITY_SHIFT; 515 516 /* Switch to khz */ 517 return max_freq * 1000; 518 } 519 520 static int amd_get_nominal_freq(struct amd_cpudata *cpudata) 521 { 522 struct cppc_perf_caps cppc_perf; 523 524 int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf); 525 if (ret) 526 return ret; 527 528 /* Switch to khz */ 529 return cppc_perf.nominal_freq * 1000; 530 } 531 532 static int amd_get_lowest_nonlinear_freq(struct amd_cpudata *cpudata) 533 { 534 struct cppc_perf_caps cppc_perf; 535 u32 lowest_nonlinear_freq, lowest_nonlinear_perf, 536 nominal_freq, nominal_perf; 537 u64 lowest_nonlinear_ratio; 538 539 int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf); 540 if (ret) 541 return ret; 542 543 nominal_freq = cppc_perf.nominal_freq; 544 nominal_perf = READ_ONCE(cpudata->nominal_perf); 545 546 lowest_nonlinear_perf = cppc_perf.lowest_nonlinear_perf; 547 548 lowest_nonlinear_ratio = div_u64(lowest_nonlinear_perf << SCHED_CAPACITY_SHIFT, 549 nominal_perf); 550 551 lowest_nonlinear_freq = nominal_freq * lowest_nonlinear_ratio >> SCHED_CAPACITY_SHIFT; 552 553 /* Switch to khz */ 554 return lowest_nonlinear_freq * 1000; 555 } 556 557 static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state) 558 { 559 struct amd_cpudata *cpudata = policy->driver_data; 560 int ret; 561 562 if (!cpudata->boost_supported) { 563 pr_err("Boost mode is not supported by this processor or SBIOS\n"); 564 return -EINVAL; 565 } 566 567 if (state) 568 policy->cpuinfo.max_freq = cpudata->max_freq; 569 else 570 policy->cpuinfo.max_freq = cpudata->nominal_freq; 571 572 policy->max = policy->cpuinfo.max_freq; 573 574 ret = freq_qos_update_request(&cpudata->req[1], 575 policy->cpuinfo.max_freq); 576 if (ret < 0) 577 return ret; 578 579 return 0; 580 } 581 582 static void amd_pstate_boost_init(struct amd_cpudata *cpudata) 583 { 584 u32 highest_perf, nominal_perf; 585 586 highest_perf = READ_ONCE(cpudata->highest_perf); 587 nominal_perf = READ_ONCE(cpudata->nominal_perf); 588 589 if (highest_perf <= nominal_perf) 590 return; 591 592 cpudata->boost_supported = true; 593 current_pstate_driver->boost_enabled = true; 594 } 595 596 static void amd_perf_ctl_reset(unsigned int cpu) 597 { 598 wrmsrl_on_cpu(cpu, MSR_AMD_PERF_CTL, 0); 599 } 600 601 static int amd_pstate_cpu_init(struct cpufreq_policy *policy) 602 { 603 int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret; 604 struct device *dev; 605 struct amd_cpudata *cpudata; 606 607 /* 608 * Resetting PERF_CTL_MSR will put the CPU in P0 frequency, 609 * which is ideal for initialization process. 610 */ 611 amd_perf_ctl_reset(policy->cpu); 612 dev = get_cpu_device(policy->cpu); 613 if (!dev) 614 return -ENODEV; 615 616 cpudata = kzalloc(sizeof(*cpudata), GFP_KERNEL); 617 if (!cpudata) 618 return -ENOMEM; 619 620 cpudata->cpu = policy->cpu; 621 622 ret = amd_pstate_init_perf(cpudata); 623 if (ret) 624 goto free_cpudata1; 625 626 min_freq = amd_get_min_freq(cpudata); 627 max_freq = amd_get_max_freq(cpudata); 628 nominal_freq = amd_get_nominal_freq(cpudata); 629 lowest_nonlinear_freq = amd_get_lowest_nonlinear_freq(cpudata); 630 631 if (min_freq < 0 || max_freq < 0 || min_freq > max_freq) { 632 dev_err(dev, "min_freq(%d) or max_freq(%d) value is incorrect\n", 633 min_freq, max_freq); 634 ret = -EINVAL; 635 goto free_cpudata1; 636 } 637 638 policy->cpuinfo.transition_latency = AMD_PSTATE_TRANSITION_LATENCY; 639 policy->transition_delay_us = AMD_PSTATE_TRANSITION_DELAY; 640 641 policy->min = min_freq; 642 policy->max = max_freq; 643 644 policy->cpuinfo.min_freq = min_freq; 645 policy->cpuinfo.max_freq = max_freq; 646 647 /* It will be updated by governor */ 648 policy->cur = policy->cpuinfo.min_freq; 649 650 if (boot_cpu_has(X86_FEATURE_CPPC)) 651 policy->fast_switch_possible = true; 652 653 ret = freq_qos_add_request(&policy->constraints, &cpudata->req[0], 654 FREQ_QOS_MIN, policy->cpuinfo.min_freq); 655 if (ret < 0) { 656 dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret); 657 goto free_cpudata1; 658 } 659 660 ret = freq_qos_add_request(&policy->constraints, &cpudata->req[1], 661 FREQ_QOS_MAX, policy->cpuinfo.max_freq); 662 if (ret < 0) { 663 dev_err(dev, "Failed to add max-freq constraint (%d)\n", ret); 664 goto free_cpudata2; 665 } 666 667 /* Initial processor data capability frequencies */ 668 cpudata->max_freq = max_freq; 669 cpudata->min_freq = min_freq; 670 cpudata->nominal_freq = nominal_freq; 671 cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq; 672 673 policy->driver_data = cpudata; 674 675 amd_pstate_boost_init(cpudata); 676 if (!current_pstate_driver->adjust_perf) 677 current_pstate_driver->adjust_perf = amd_pstate_adjust_perf; 678 679 return 0; 680 681 free_cpudata2: 682 freq_qos_remove_request(&cpudata->req[0]); 683 free_cpudata1: 684 kfree(cpudata); 685 return ret; 686 } 687 688 static int amd_pstate_cpu_exit(struct cpufreq_policy *policy) 689 { 690 struct amd_cpudata *cpudata = policy->driver_data; 691 692 freq_qos_remove_request(&cpudata->req[1]); 693 freq_qos_remove_request(&cpudata->req[0]); 694 kfree(cpudata); 695 696 return 0; 697 } 698 699 static int amd_pstate_cpu_resume(struct cpufreq_policy *policy) 700 { 701 int ret; 702 703 ret = amd_pstate_enable(true); 704 if (ret) 705 pr_err("failed to enable amd-pstate during resume, return %d\n", ret); 706 707 return ret; 708 } 709 710 static int amd_pstate_cpu_suspend(struct cpufreq_policy *policy) 711 { 712 int ret; 713 714 ret = amd_pstate_enable(false); 715 if (ret) 716 pr_err("failed to disable amd-pstate during suspend, return %d\n", ret); 717 718 return ret; 719 } 720 721 /* Sysfs attributes */ 722 723 /* 724 * This frequency is to indicate the maximum hardware frequency. 725 * If boost is not active but supported, the frequency will be larger than the 726 * one in cpuinfo. 727 */ 728 static ssize_t show_amd_pstate_max_freq(struct cpufreq_policy *policy, 729 char *buf) 730 { 731 int max_freq; 732 struct amd_cpudata *cpudata = policy->driver_data; 733 734 max_freq = amd_get_max_freq(cpudata); 735 if (max_freq < 0) 736 return max_freq; 737 738 return sysfs_emit(buf, "%u\n", max_freq); 739 } 740 741 static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *policy, 742 char *buf) 743 { 744 int freq; 745 struct amd_cpudata *cpudata = policy->driver_data; 746 747 freq = amd_get_lowest_nonlinear_freq(cpudata); 748 if (freq < 0) 749 return freq; 750 751 return sysfs_emit(buf, "%u\n", freq); 752 } 753 754 /* 755 * In some of ASICs, the highest_perf is not the one in the _CPC table, so we 756 * need to expose it to sysfs. 757 */ 758 static ssize_t show_amd_pstate_highest_perf(struct cpufreq_policy *policy, 759 char *buf) 760 { 761 u32 perf; 762 struct amd_cpudata *cpudata = policy->driver_data; 763 764 perf = READ_ONCE(cpudata->highest_perf); 765 766 return sysfs_emit(buf, "%u\n", perf); 767 } 768 769 static ssize_t show_energy_performance_available_preferences( 770 struct cpufreq_policy *policy, char *buf) 771 { 772 int i = 0; 773 int offset = 0; 774 775 while (energy_perf_strings[i] != NULL) 776 offset += sysfs_emit_at(buf, offset, "%s ", energy_perf_strings[i++]); 777 778 sysfs_emit_at(buf, offset, "\n"); 779 780 return offset; 781 } 782 783 static ssize_t store_energy_performance_preference( 784 struct cpufreq_policy *policy, const char *buf, size_t count) 785 { 786 struct amd_cpudata *cpudata = policy->driver_data; 787 char str_preference[21]; 788 ssize_t ret; 789 790 ret = sscanf(buf, "%20s", str_preference); 791 if (ret != 1) 792 return -EINVAL; 793 794 ret = match_string(energy_perf_strings, -1, str_preference); 795 if (ret < 0) 796 return -EINVAL; 797 798 mutex_lock(&amd_pstate_limits_lock); 799 ret = amd_pstate_set_energy_pref_index(cpudata, ret); 800 mutex_unlock(&amd_pstate_limits_lock); 801 802 return ret ?: count; 803 } 804 805 static ssize_t show_energy_performance_preference( 806 struct cpufreq_policy *policy, char *buf) 807 { 808 struct amd_cpudata *cpudata = policy->driver_data; 809 int preference; 810 811 preference = amd_pstate_get_energy_pref_index(cpudata); 812 if (preference < 0) 813 return preference; 814 815 return sysfs_emit(buf, "%s\n", energy_perf_strings[preference]); 816 } 817 818 static ssize_t amd_pstate_show_status(char *buf) 819 { 820 if (!current_pstate_driver) 821 return sysfs_emit(buf, "disable\n"); 822 823 return sysfs_emit(buf, "%s\n", amd_pstate_mode_string[cppc_state]); 824 } 825 826 static void amd_pstate_driver_cleanup(void) 827 { 828 current_pstate_driver = NULL; 829 } 830 831 static int amd_pstate_update_status(const char *buf, size_t size) 832 { 833 int ret = 0; 834 int mode_idx; 835 836 if (size > 7 || size < 6) 837 return -EINVAL; 838 mode_idx = get_mode_idx_from_str(buf, size); 839 840 switch(mode_idx) { 841 case AMD_PSTATE_DISABLE: 842 if (!current_pstate_driver) 843 return -EINVAL; 844 if (cppc_state == AMD_PSTATE_ACTIVE) 845 return -EBUSY; 846 cpufreq_unregister_driver(current_pstate_driver); 847 amd_pstate_driver_cleanup(); 848 break; 849 case AMD_PSTATE_PASSIVE: 850 if (current_pstate_driver) { 851 if (current_pstate_driver == &amd_pstate_driver) 852 return 0; 853 cpufreq_unregister_driver(current_pstate_driver); 854 cppc_state = AMD_PSTATE_PASSIVE; 855 current_pstate_driver = &amd_pstate_driver; 856 } 857 858 ret = cpufreq_register_driver(current_pstate_driver); 859 break; 860 case AMD_PSTATE_ACTIVE: 861 if (current_pstate_driver) { 862 if (current_pstate_driver == &amd_pstate_epp_driver) 863 return 0; 864 cpufreq_unregister_driver(current_pstate_driver); 865 current_pstate_driver = &amd_pstate_epp_driver; 866 cppc_state = AMD_PSTATE_ACTIVE; 867 } 868 869 ret = cpufreq_register_driver(current_pstate_driver); 870 break; 871 default: 872 ret = -EINVAL; 873 break; 874 } 875 876 return ret; 877 } 878 879 static ssize_t show_status(struct kobject *kobj, 880 struct kobj_attribute *attr, char *buf) 881 { 882 ssize_t ret; 883 884 mutex_lock(&amd_pstate_driver_lock); 885 ret = amd_pstate_show_status(buf); 886 mutex_unlock(&amd_pstate_driver_lock); 887 888 return ret; 889 } 890 891 static ssize_t store_status(struct kobject *a, struct kobj_attribute *b, 892 const char *buf, size_t count) 893 { 894 char *p = memchr(buf, '\n', count); 895 int ret; 896 897 mutex_lock(&amd_pstate_driver_lock); 898 ret = amd_pstate_update_status(buf, p ? p - buf : count); 899 mutex_unlock(&amd_pstate_driver_lock); 900 901 return ret < 0 ? ret : count; 902 } 903 904 cpufreq_freq_attr_ro(amd_pstate_max_freq); 905 cpufreq_freq_attr_ro(amd_pstate_lowest_nonlinear_freq); 906 907 cpufreq_freq_attr_ro(amd_pstate_highest_perf); 908 cpufreq_freq_attr_rw(energy_performance_preference); 909 cpufreq_freq_attr_ro(energy_performance_available_preferences); 910 define_one_global_rw(status); 911 912 static struct freq_attr *amd_pstate_attr[] = { 913 &amd_pstate_max_freq, 914 &amd_pstate_lowest_nonlinear_freq, 915 &amd_pstate_highest_perf, 916 NULL, 917 }; 918 919 static struct freq_attr *amd_pstate_epp_attr[] = { 920 &amd_pstate_max_freq, 921 &amd_pstate_lowest_nonlinear_freq, 922 &amd_pstate_highest_perf, 923 &energy_performance_preference, 924 &energy_performance_available_preferences, 925 NULL, 926 }; 927 928 static struct attribute *pstate_global_attributes[] = { 929 &status.attr, 930 NULL 931 }; 932 933 static const struct attribute_group amd_pstate_global_attr_group = { 934 .name = "amd_pstate", 935 .attrs = pstate_global_attributes, 936 }; 937 938 static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy) 939 { 940 int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret; 941 struct amd_cpudata *cpudata; 942 struct device *dev; 943 u64 value; 944 945 /* 946 * Resetting PERF_CTL_MSR will put the CPU in P0 frequency, 947 * which is ideal for initialization process. 948 */ 949 amd_perf_ctl_reset(policy->cpu); 950 dev = get_cpu_device(policy->cpu); 951 if (!dev) 952 return -ENODEV; 953 954 cpudata = kzalloc(sizeof(*cpudata), GFP_KERNEL); 955 if (!cpudata) 956 return -ENOMEM; 957 958 cpudata->cpu = policy->cpu; 959 cpudata->epp_policy = 0; 960 961 ret = amd_pstate_init_perf(cpudata); 962 if (ret) 963 goto free_cpudata1; 964 965 min_freq = amd_get_min_freq(cpudata); 966 max_freq = amd_get_max_freq(cpudata); 967 nominal_freq = amd_get_nominal_freq(cpudata); 968 lowest_nonlinear_freq = amd_get_lowest_nonlinear_freq(cpudata); 969 if (min_freq < 0 || max_freq < 0 || min_freq > max_freq) { 970 dev_err(dev, "min_freq(%d) or max_freq(%d) value is incorrect\n", 971 min_freq, max_freq); 972 ret = -EINVAL; 973 goto free_cpudata1; 974 } 975 976 policy->cpuinfo.min_freq = min_freq; 977 policy->cpuinfo.max_freq = max_freq; 978 /* It will be updated by governor */ 979 policy->cur = policy->cpuinfo.min_freq; 980 981 /* Initial processor data capability frequencies */ 982 cpudata->max_freq = max_freq; 983 cpudata->min_freq = min_freq; 984 cpudata->nominal_freq = nominal_freq; 985 cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq; 986 987 policy->driver_data = cpudata; 988 989 cpudata->epp_cached = amd_pstate_get_epp(cpudata, 0); 990 991 policy->min = policy->cpuinfo.min_freq; 992 policy->max = policy->cpuinfo.max_freq; 993 994 /* 995 * Set the policy to powersave to provide a valid fallback value in case 996 * the default cpufreq governor is neither powersave nor performance. 997 */ 998 policy->policy = CPUFREQ_POLICY_POWERSAVE; 999 1000 if (boot_cpu_has(X86_FEATURE_CPPC)) { 1001 policy->fast_switch_possible = true; 1002 ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value); 1003 if (ret) 1004 return ret; 1005 WRITE_ONCE(cpudata->cppc_req_cached, value); 1006 1007 ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1, &value); 1008 if (ret) 1009 return ret; 1010 WRITE_ONCE(cpudata->cppc_cap1_cached, value); 1011 } 1012 amd_pstate_boost_init(cpudata); 1013 1014 return 0; 1015 1016 free_cpudata1: 1017 kfree(cpudata); 1018 return ret; 1019 } 1020 1021 static int amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy) 1022 { 1023 pr_debug("CPU %d exiting\n", policy->cpu); 1024 policy->fast_switch_possible = false; 1025 return 0; 1026 } 1027 1028 static void amd_pstate_epp_init(unsigned int cpu) 1029 { 1030 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 1031 struct amd_cpudata *cpudata = policy->driver_data; 1032 u32 max_perf, min_perf; 1033 u64 value; 1034 s16 epp; 1035 1036 max_perf = READ_ONCE(cpudata->highest_perf); 1037 min_perf = READ_ONCE(cpudata->lowest_perf); 1038 1039 value = READ_ONCE(cpudata->cppc_req_cached); 1040 1041 if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) 1042 min_perf = max_perf; 1043 1044 /* Initial min/max values for CPPC Performance Controls Register */ 1045 value &= ~AMD_CPPC_MIN_PERF(~0L); 1046 value |= AMD_CPPC_MIN_PERF(min_perf); 1047 1048 value &= ~AMD_CPPC_MAX_PERF(~0L); 1049 value |= AMD_CPPC_MAX_PERF(max_perf); 1050 1051 /* CPPC EPP feature require to set zero to the desire perf bit */ 1052 value &= ~AMD_CPPC_DES_PERF(~0L); 1053 value |= AMD_CPPC_DES_PERF(0); 1054 1055 if (cpudata->epp_policy == cpudata->policy) 1056 goto skip_epp; 1057 1058 cpudata->epp_policy = cpudata->policy; 1059 1060 /* Get BIOS pre-defined epp value */ 1061 epp = amd_pstate_get_epp(cpudata, value); 1062 if (epp < 0) { 1063 /** 1064 * This return value can only be negative for shared_memory 1065 * systems where EPP register read/write not supported. 1066 */ 1067 goto skip_epp; 1068 } 1069 1070 if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) 1071 epp = 0; 1072 1073 /* Set initial EPP value */ 1074 if (boot_cpu_has(X86_FEATURE_CPPC)) { 1075 value &= ~GENMASK_ULL(31, 24); 1076 value |= (u64)epp << 24; 1077 } 1078 1079 WRITE_ONCE(cpudata->cppc_req_cached, value); 1080 amd_pstate_set_epp(cpudata, epp); 1081 skip_epp: 1082 cpufreq_cpu_put(policy); 1083 } 1084 1085 static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy) 1086 { 1087 struct amd_cpudata *cpudata = policy->driver_data; 1088 1089 if (!policy->cpuinfo.max_freq) 1090 return -ENODEV; 1091 1092 pr_debug("set_policy: cpuinfo.max %u policy->max %u\n", 1093 policy->cpuinfo.max_freq, policy->max); 1094 1095 cpudata->policy = policy->policy; 1096 1097 amd_pstate_epp_init(policy->cpu); 1098 1099 return 0; 1100 } 1101 1102 static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata) 1103 { 1104 struct cppc_perf_ctrls perf_ctrls; 1105 u64 value, max_perf; 1106 int ret; 1107 1108 ret = amd_pstate_enable(true); 1109 if (ret) 1110 pr_err("failed to enable amd pstate during resume, return %d\n", ret); 1111 1112 value = READ_ONCE(cpudata->cppc_req_cached); 1113 max_perf = READ_ONCE(cpudata->highest_perf); 1114 1115 if (boot_cpu_has(X86_FEATURE_CPPC)) { 1116 wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value); 1117 } else { 1118 perf_ctrls.max_perf = max_perf; 1119 perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(cpudata->epp_cached); 1120 cppc_set_perf(cpudata->cpu, &perf_ctrls); 1121 } 1122 } 1123 1124 static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy) 1125 { 1126 struct amd_cpudata *cpudata = policy->driver_data; 1127 1128 pr_debug("AMD CPU Core %d going online\n", cpudata->cpu); 1129 1130 if (cppc_state == AMD_PSTATE_ACTIVE) { 1131 amd_pstate_epp_reenable(cpudata); 1132 cpudata->suspended = false; 1133 } 1134 1135 return 0; 1136 } 1137 1138 static void amd_pstate_epp_offline(struct cpufreq_policy *policy) 1139 { 1140 struct amd_cpudata *cpudata = policy->driver_data; 1141 struct cppc_perf_ctrls perf_ctrls; 1142 int min_perf; 1143 u64 value; 1144 1145 min_perf = READ_ONCE(cpudata->lowest_perf); 1146 value = READ_ONCE(cpudata->cppc_req_cached); 1147 1148 mutex_lock(&amd_pstate_limits_lock); 1149 if (boot_cpu_has(X86_FEATURE_CPPC)) { 1150 cpudata->epp_policy = CPUFREQ_POLICY_UNKNOWN; 1151 1152 /* Set max perf same as min perf */ 1153 value &= ~AMD_CPPC_MAX_PERF(~0L); 1154 value |= AMD_CPPC_MAX_PERF(min_perf); 1155 value &= ~AMD_CPPC_MIN_PERF(~0L); 1156 value |= AMD_CPPC_MIN_PERF(min_perf); 1157 wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value); 1158 } else { 1159 perf_ctrls.desired_perf = 0; 1160 perf_ctrls.max_perf = min_perf; 1161 perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(HWP_EPP_BALANCE_POWERSAVE); 1162 cppc_set_perf(cpudata->cpu, &perf_ctrls); 1163 } 1164 mutex_unlock(&amd_pstate_limits_lock); 1165 } 1166 1167 static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy) 1168 { 1169 struct amd_cpudata *cpudata = policy->driver_data; 1170 1171 pr_debug("AMD CPU Core %d going offline\n", cpudata->cpu); 1172 1173 if (cpudata->suspended) 1174 return 0; 1175 1176 if (cppc_state == AMD_PSTATE_ACTIVE) 1177 amd_pstate_epp_offline(policy); 1178 1179 return 0; 1180 } 1181 1182 static int amd_pstate_epp_verify_policy(struct cpufreq_policy_data *policy) 1183 { 1184 cpufreq_verify_within_cpu_limits(policy); 1185 pr_debug("policy_max =%d, policy_min=%d\n", policy->max, policy->min); 1186 return 0; 1187 } 1188 1189 static int amd_pstate_epp_suspend(struct cpufreq_policy *policy) 1190 { 1191 struct amd_cpudata *cpudata = policy->driver_data; 1192 int ret; 1193 1194 /* avoid suspending when EPP is not enabled */ 1195 if (cppc_state != AMD_PSTATE_ACTIVE) 1196 return 0; 1197 1198 /* set this flag to avoid setting core offline*/ 1199 cpudata->suspended = true; 1200 1201 /* disable CPPC in lowlevel firmware */ 1202 ret = amd_pstate_enable(false); 1203 if (ret) 1204 pr_err("failed to suspend, return %d\n", ret); 1205 1206 return 0; 1207 } 1208 1209 static int amd_pstate_epp_resume(struct cpufreq_policy *policy) 1210 { 1211 struct amd_cpudata *cpudata = policy->driver_data; 1212 1213 if (cpudata->suspended) { 1214 mutex_lock(&amd_pstate_limits_lock); 1215 1216 /* enable amd pstate from suspend state*/ 1217 amd_pstate_epp_reenable(cpudata); 1218 1219 mutex_unlock(&amd_pstate_limits_lock); 1220 1221 cpudata->suspended = false; 1222 } 1223 1224 return 0; 1225 } 1226 1227 static struct cpufreq_driver amd_pstate_driver = { 1228 .flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_UPDATE_LIMITS, 1229 .verify = amd_pstate_verify, 1230 .target = amd_pstate_target, 1231 .init = amd_pstate_cpu_init, 1232 .exit = amd_pstate_cpu_exit, 1233 .suspend = amd_pstate_cpu_suspend, 1234 .resume = amd_pstate_cpu_resume, 1235 .set_boost = amd_pstate_set_boost, 1236 .name = "amd-pstate", 1237 .attr = amd_pstate_attr, 1238 }; 1239 1240 static struct cpufreq_driver amd_pstate_epp_driver = { 1241 .flags = CPUFREQ_CONST_LOOPS, 1242 .verify = amd_pstate_epp_verify_policy, 1243 .setpolicy = amd_pstate_epp_set_policy, 1244 .init = amd_pstate_epp_cpu_init, 1245 .exit = amd_pstate_epp_cpu_exit, 1246 .offline = amd_pstate_epp_cpu_offline, 1247 .online = amd_pstate_epp_cpu_online, 1248 .suspend = amd_pstate_epp_suspend, 1249 .resume = amd_pstate_epp_resume, 1250 .name = "amd_pstate_epp", 1251 .attr = amd_pstate_epp_attr, 1252 }; 1253 1254 static int __init amd_pstate_init(void) 1255 { 1256 struct device *dev_root; 1257 int ret; 1258 1259 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) 1260 return -ENODEV; 1261 /* 1262 * by default the pstate driver is disabled to load 1263 * enable the amd_pstate passive mode driver explicitly 1264 * with amd_pstate=passive or other modes in kernel command line 1265 */ 1266 if (cppc_state == AMD_PSTATE_DISABLE) { 1267 pr_info("driver load is disabled, boot with specific mode to enable this\n"); 1268 return -ENODEV; 1269 } 1270 1271 if (!acpi_cpc_valid()) { 1272 pr_warn_once("the _CPC object is not present in SBIOS or ACPI disabled\n"); 1273 return -ENODEV; 1274 } 1275 1276 /* don't keep reloading if cpufreq_driver exists */ 1277 if (cpufreq_get_current_driver()) 1278 return -EEXIST; 1279 1280 /* capability check */ 1281 if (boot_cpu_has(X86_FEATURE_CPPC)) { 1282 pr_debug("AMD CPPC MSR based functionality is supported\n"); 1283 if (cppc_state == AMD_PSTATE_PASSIVE) 1284 current_pstate_driver->adjust_perf = amd_pstate_adjust_perf; 1285 } else { 1286 pr_debug("AMD CPPC shared memory based functionality is supported\n"); 1287 static_call_update(amd_pstate_enable, cppc_enable); 1288 static_call_update(amd_pstate_init_perf, cppc_init_perf); 1289 static_call_update(amd_pstate_update_perf, cppc_update_perf); 1290 } 1291 1292 /* enable amd pstate feature */ 1293 ret = amd_pstate_enable(true); 1294 if (ret) { 1295 pr_err("failed to enable with return %d\n", ret); 1296 return ret; 1297 } 1298 1299 ret = cpufreq_register_driver(current_pstate_driver); 1300 if (ret) 1301 pr_err("failed to register with return %d\n", ret); 1302 1303 dev_root = bus_get_dev_root(&cpu_subsys); 1304 if (dev_root) { 1305 ret = sysfs_create_group(&dev_root->kobj, &amd_pstate_global_attr_group); 1306 put_device(dev_root); 1307 if (ret) { 1308 pr_err("sysfs attribute export failed with error %d.\n", ret); 1309 goto global_attr_free; 1310 } 1311 } 1312 1313 return ret; 1314 1315 global_attr_free: 1316 cpufreq_unregister_driver(current_pstate_driver); 1317 return ret; 1318 } 1319 device_initcall(amd_pstate_init); 1320 1321 static int __init amd_pstate_param(char *str) 1322 { 1323 size_t size; 1324 int mode_idx; 1325 1326 if (!str) 1327 return -EINVAL; 1328 1329 size = strlen(str); 1330 mode_idx = get_mode_idx_from_str(str, size); 1331 1332 if (mode_idx >= AMD_PSTATE_DISABLE && mode_idx < AMD_PSTATE_MAX) { 1333 cppc_state = mode_idx; 1334 if (cppc_state == AMD_PSTATE_DISABLE) 1335 pr_info("driver is explicitly disabled\n"); 1336 1337 if (cppc_state == AMD_PSTATE_ACTIVE) 1338 current_pstate_driver = &amd_pstate_epp_driver; 1339 1340 if (cppc_state == AMD_PSTATE_PASSIVE) 1341 current_pstate_driver = &amd_pstate_driver; 1342 1343 return 0; 1344 } 1345 1346 return -EINVAL; 1347 } 1348 early_param("amd_pstate", amd_pstate_param); 1349 1350 MODULE_AUTHOR("Huang Rui <ray.huang@amd.com>"); 1351 MODULE_DESCRIPTION("AMD Processor P-state Frequency Driver"); 1352