Lines Matching +full:retain +full:- +full:state +full:- +full:suspended
1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * amd-pstate.c - AMD Processor P-state Frequency Driver
9 * AMD P-State introduces a new CPU performance scaling design for AMD
12 * frequency control range. It is to replace the legacy ACPI P-States control,
13 * allows a flexible, low-latency interface for the Linux kernel to directly
16 * AMD P-State is supported on recent AMD Zen base CPU series include some of
18 * P-State supported system. And there are two types of hardware implementations
19 * for AMD P-State: 1) Full MSR Solution and 2) Shared Memory Solution.
50 #include "amd-pstate.h"
51 #include "amd-pstate-trace.h"
100 *-------------------------------------
160 quirks = dmi->driver_data; in dmi_matched_7k62_bios_bug()
161 pr_info("Overriding nominal and lowest frequencies for %s\n", dmi->ident); in dmi_matched_7k62_bios_bug()
190 return -EINVAL; in get_mode_idx_from_str()
200 ret = rdmsrq_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value); in msr_get_epp()
221 ret = cppc_get_epp_perf(cpudata->cpu, &epp); in shmem_get_epp()
233 struct amd_cpudata *cpudata = policy->driver_data; in msr_update_perf()
236 value = prev = READ_ONCE(cpudata->cppc_req_cached); in msr_update_perf()
246 union perf_cached perf = READ_ONCE(cpudata->perf); in msr_update_perf()
248 trace_amd_pstate_epp_perf(cpudata->cpu, in msr_update_perf()
253 policy->boost_enabled, in msr_update_perf()
264 int ret = wrmsrq_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value); in msr_update_perf()
270 WRITE_ONCE(cpudata->cppc_req_cached, value); in msr_update_perf()
288 struct amd_cpudata *cpudata = policy->driver_data; in msr_set_epp()
292 value = prev = READ_ONCE(cpudata->cppc_req_cached); in msr_set_epp()
297 union perf_cached perf = cpudata->perf; in msr_set_epp()
299 trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf, in msr_set_epp()
302 cpudata->cppc_req_cached), in msr_set_epp()
304 cpudata->cppc_req_cached), in msr_set_epp()
305 policy->boost_enabled, in msr_set_epp()
312 ret = wrmsrq_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value); in msr_set_epp()
319 WRITE_ONCE(cpudata->cppc_req_cached, value); in msr_set_epp()
333 struct amd_cpudata *cpudata = policy->driver_data; in shmem_set_epp()
340 epp_cached = FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached); in shmem_set_epp()
342 union perf_cached perf = cpudata->perf; in shmem_set_epp()
344 trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf, in shmem_set_epp()
347 cpudata->cppc_req_cached), in shmem_set_epp()
349 cpudata->cppc_req_cached), in shmem_set_epp()
350 policy->boost_enabled, in shmem_set_epp()
358 ret = cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1); in shmem_set_epp()
364 value = READ_ONCE(cpudata->cppc_req_cached); in shmem_set_epp()
367 WRITE_ONCE(cpudata->cppc_req_cached, value); in shmem_set_epp()
374 return wrmsrq_safe_on_cpu(policy->cpu, MSR_AMD_CPPC_ENABLE, 1); in msr_cppc_enable()
379 return cppc_set_enable(policy->cpu, 1); in shmem_cppc_enable()
391 union perf_cached perf = READ_ONCE(cpudata->perf); in msr_init_perf()
395 int ret = rdmsrq_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1, in msr_init_perf()
400 ret = amd_get_boost_ratio_numerator(cpudata->cpu, &numerator); in msr_init_perf()
404 ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &cppc_req); in msr_init_perf()
408 WRITE_ONCE(cpudata->cppc_req_cached, cppc_req); in msr_init_perf()
426 WRITE_ONCE(cpudata->perf, perf); in msr_init_perf()
427 WRITE_ONCE(cpudata->prefcore_ranking, FIELD_GET(AMD_CPPC_HIGHEST_PERF_MASK, cap1)); in msr_init_perf()
435 union perf_cached perf = READ_ONCE(cpudata->perf); in shmem_init_perf()
439 int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf); in shmem_init_perf()
443 ret = amd_get_boost_ratio_numerator(cpudata->cpu, &numerator); in shmem_init_perf()
453 WRITE_ONCE(cpudata->perf, perf); in shmem_init_perf()
454 WRITE_ONCE(cpudata->prefcore_ranking, cppc_perf.highest_perf); in shmem_init_perf()
459 ret = cppc_get_auto_sel(cpudata->cpu, &auto_sel); in shmem_init_perf()
465 ret = cppc_set_auto_sel(cpudata->cpu, in shmem_init_perf()
484 struct amd_cpudata *cpudata = policy->driver_data; in shmem_update_perf()
496 value = prev = READ_ONCE(cpudata->cppc_req_cached); in shmem_update_perf()
506 union perf_cached perf = READ_ONCE(cpudata->perf); in shmem_update_perf()
508 trace_amd_pstate_epp_perf(cpudata->cpu, in shmem_update_perf()
513 policy->boost_enabled, in shmem_update_perf()
524 ret = cppc_set_perf(cpudata->cpu, &perf_ctrls); in shmem_update_perf()
528 WRITE_ONCE(cpudata->cppc_req_cached, value); in shmem_update_perf()
543 if (cpudata->prev.mperf == mperf || cpudata->prev.tsc == tsc) { in amd_pstate_sample()
550 cpudata->cur.aperf = aperf; in amd_pstate_sample()
551 cpudata->cur.mperf = mperf; in amd_pstate_sample()
552 cpudata->cur.tsc = tsc; in amd_pstate_sample()
553 cpudata->cur.aperf -= cpudata->prev.aperf; in amd_pstate_sample()
554 cpudata->cur.mperf -= cpudata->prev.mperf; in amd_pstate_sample()
555 cpudata->cur.tsc -= cpudata->prev.tsc; in amd_pstate_sample()
557 cpudata->prev.aperf = aperf; in amd_pstate_sample()
558 cpudata->prev.mperf = mperf; in amd_pstate_sample()
559 cpudata->prev.tsc = tsc; in amd_pstate_sample()
561 cpudata->freq = div64_u64((cpudata->cur.aperf * cpu_khz), cpudata->cur.mperf); in amd_pstate_sample()
569 struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpudata->cpu); in amd_pstate_update()
570 union perf_cached perf = READ_ONCE(cpudata->perf); in amd_pstate_update()
576 if (!cpudata->boost_supported) in amd_pstate_update()
581 policy->cur = perf_to_freq(perf, cpudata->nominal_freq, des_perf); in amd_pstate_update()
589 trace_amd_pstate_perf(min_perf, des_perf, max_perf, cpudata->freq, in amd_pstate_update()
590 cpudata->cur.mperf, cpudata->cur.aperf, cpudata->cur.tsc, in amd_pstate_update()
591 cpudata->cpu, fast_switch); in amd_pstate_update()
600 * Initialize lower frequency limit (i.e.policy->min) with in amd_pstate_verify()
602 * Override the initial value set by cpufreq core and amd-pstate qos_requests. in amd_pstate_verify()
604 if (policy_data->min == FREQ_QOS_MIN_DEFAULT_VALUE) { in amd_pstate_verify()
606 cpufreq_cpu_get(policy_data->cpu); in amd_pstate_verify()
611 return -EINVAL; in amd_pstate_verify()
613 cpudata = policy->driver_data; in amd_pstate_verify()
614 perf = READ_ONCE(cpudata->perf); in amd_pstate_verify()
617 policy_data->min = perf_to_freq(perf, cpudata->nominal_freq, in amd_pstate_verify()
620 policy_data->min = cpudata->lowest_nonlinear_freq; in amd_pstate_verify()
630 struct amd_cpudata *cpudata = policy->driver_data; in amd_pstate_update_min_max_limit()
631 union perf_cached perf = READ_ONCE(cpudata->perf); in amd_pstate_update_min_max_limit()
633 perf.max_limit_perf = freq_to_perf(perf, cpudata->nominal_freq, policy->max); in amd_pstate_update_min_max_limit()
634 WRITE_ONCE(cpudata->max_limit_freq, policy->max); in amd_pstate_update_min_max_limit()
636 if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) { in amd_pstate_update_min_max_limit()
638 WRITE_ONCE(cpudata->min_limit_freq, min(cpudata->nominal_freq, cpudata->max_limit_freq)); in amd_pstate_update_min_max_limit()
640 perf.min_limit_perf = freq_to_perf(perf, cpudata->nominal_freq, policy->min); in amd_pstate_update_min_max_limit()
641 WRITE_ONCE(cpudata->min_limit_freq, policy->min); in amd_pstate_update_min_max_limit()
644 WRITE_ONCE(cpudata->perf, perf); in amd_pstate_update_min_max_limit()
655 cpudata = policy->driver_data; in amd_pstate_update_freq()
657 if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq) in amd_pstate_update_freq()
660 perf = READ_ONCE(cpudata->perf); in amd_pstate_update_freq()
662 freqs.old = policy->cur; in amd_pstate_update_freq()
665 des_perf = freq_to_perf(perf, cpudata->nominal_freq, target_freq); in amd_pstate_update_freq()
667 WARN_ON(fast_switch && !policy->fast_switch_enabled); in amd_pstate_update_freq()
678 policy->governor->flags); in amd_pstate_update_freq()
698 return policy->cur; in amd_pstate_fast_switch()
714 cpudata = policy->driver_data; in amd_pstate_adjust_perf()
716 if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq) in amd_pstate_adjust_perf()
719 perf = READ_ONCE(cpudata->perf); in amd_pstate_adjust_perf()
739 policy->governor->flags); in amd_pstate_adjust_perf()
744 struct amd_cpudata *cpudata = policy->driver_data; in amd_pstate_cpu_boost_update()
745 union perf_cached perf = READ_ONCE(cpudata->perf); in amd_pstate_cpu_boost_update()
749 nominal_freq = READ_ONCE(cpudata->nominal_freq); in amd_pstate_cpu_boost_update()
750 max_freq = perf_to_freq(perf, cpudata->nominal_freq, perf.highest_perf); in amd_pstate_cpu_boost_update()
753 policy->cpuinfo.max_freq = max_freq; in amd_pstate_cpu_boost_update()
754 else if (policy->cpuinfo.max_freq > nominal_freq) in amd_pstate_cpu_boost_update()
755 policy->cpuinfo.max_freq = nominal_freq; in amd_pstate_cpu_boost_update()
757 policy->max = policy->cpuinfo.max_freq; in amd_pstate_cpu_boost_update()
760 ret = freq_qos_update_request(&cpudata->req[1], policy->cpuinfo.max_freq); in amd_pstate_cpu_boost_update()
762 pr_debug("Failed to update freq constraint: CPU%d\n", cpudata->cpu); in amd_pstate_cpu_boost_update()
768 static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state) in amd_pstate_set_boost() argument
770 struct amd_cpudata *cpudata = policy->driver_data; in amd_pstate_set_boost()
773 if (!cpudata->boost_supported) { in amd_pstate_set_boost()
775 return -EOPNOTSUPP; in amd_pstate_set_boost()
778 ret = amd_pstate_cpu_boost_update(policy, state); in amd_pstate_set_boost()
787 int ret = -1; in amd_pstate_init_boost_support()
791 * boost_enabled state to be false, it is not an error for cpufreq core to handle. in amd_pstate_init_boost_support()
799 ret = rdmsrq_on_cpu(cpudata->cpu, MSR_K7_HWCR, &boost_val); in amd_pstate_init_boost_support()
801 pr_err_once("failed to read initial CPU boost state!\n"); in amd_pstate_init_boost_support()
802 ret = -EIO; in amd_pstate_init_boost_support()
807 cpudata->boost_supported = true; in amd_pstate_init_boost_support()
812 cpudata->boost_supported = false; in amd_pstate_init_boost_support()
829 /* should use amd-hfi instead */ in amd_pstate_init_prefcore()
836 cpudata->hw_prefcore = true; in amd_pstate_init_prefcore()
839 sched_set_itmt_core_prio((int)READ_ONCE(cpudata->prefcore_ranking), cpudata->cpu); in amd_pstate_init_prefcore()
847 unsigned int cpu = policy->cpu; in amd_pstate_update_limits()
855 cpudata = policy->driver_data; in amd_pstate_update_limits()
857 prev_high = READ_ONCE(cpudata->prefcore_ranking); in amd_pstate_update_limits()
860 WRITE_ONCE(cpudata->prefcore_ranking, cur_high); in amd_pstate_update_limits()
909 * Returns 0 on success, non-zero value on failure.
918 ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf); in amd_pstate_init_freq()
921 perf = READ_ONCE(cpudata->perf); in amd_pstate_init_freq()
923 if (quirks && quirks->nominal_freq) in amd_pstate_init_freq()
924 nominal_freq = quirks->nominal_freq; in amd_pstate_init_freq()
929 if (quirks && quirks->lowest_freq) { in amd_pstate_init_freq()
930 min_freq = quirks->lowest_freq; in amd_pstate_init_freq()
932 WRITE_ONCE(cpudata->perf, perf); in amd_pstate_init_freq()
938 WRITE_ONCE(cpudata->nominal_freq, nominal_freq); in amd_pstate_init_freq()
942 WRITE_ONCE(cpudata->lowest_nonlinear_freq, lowest_nonlinear_freq); in amd_pstate_init_freq()
953 return -EINVAL; in amd_pstate_init_freq()
959 return -EINVAL; in amd_pstate_init_freq()
976 amd_perf_ctl_reset(policy->cpu); in amd_pstate_cpu_init()
977 dev = get_cpu_device(policy->cpu); in amd_pstate_cpu_init()
979 return -ENODEV; in amd_pstate_cpu_init()
983 return -ENOMEM; in amd_pstate_cpu_init()
985 cpudata->cpu = policy->cpu; in amd_pstate_cpu_init()
1001 policy->cpuinfo.transition_latency = amd_pstate_get_transition_latency(policy->cpu); in amd_pstate_cpu_init()
1002 policy->transition_delay_us = amd_pstate_get_transition_delay_us(policy->cpu); in amd_pstate_cpu_init()
1004 perf = READ_ONCE(cpudata->perf); in amd_pstate_cpu_init()
1006 policy->cpuinfo.min_freq = policy->min = perf_to_freq(perf, in amd_pstate_cpu_init()
1007 cpudata->nominal_freq, in amd_pstate_cpu_init()
1009 policy->cpuinfo.max_freq = policy->max = perf_to_freq(perf, in amd_pstate_cpu_init()
1010 cpudata->nominal_freq, in amd_pstate_cpu_init()
1017 policy->boost_supported = READ_ONCE(cpudata->boost_supported); in amd_pstate_cpu_init()
1020 policy->cur = policy->cpuinfo.min_freq; in amd_pstate_cpu_init()
1023 policy->fast_switch_possible = true; in amd_pstate_cpu_init()
1025 ret = freq_qos_add_request(&policy->constraints, &cpudata->req[0], in amd_pstate_cpu_init()
1028 dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret); in amd_pstate_cpu_init()
1032 ret = freq_qos_add_request(&policy->constraints, &cpudata->req[1], in amd_pstate_cpu_init()
1033 FREQ_QOS_MAX, policy->cpuinfo.max_freq); in amd_pstate_cpu_init()
1035 dev_err(dev, "Failed to add max-freq constraint (%d)\n", ret); in amd_pstate_cpu_init()
1039 policy->driver_data = cpudata; in amd_pstate_cpu_init()
1041 if (!current_pstate_driver->adjust_perf) in amd_pstate_cpu_init()
1042 current_pstate_driver->adjust_perf = amd_pstate_adjust_perf; in amd_pstate_cpu_init()
1047 freq_qos_remove_request(&cpudata->req[0]); in amd_pstate_cpu_init()
1049 pr_warn("Failed to initialize CPU %d: %d\n", policy->cpu, ret); in amd_pstate_cpu_init()
1056 struct amd_cpudata *cpudata = policy->driver_data; in amd_pstate_cpu_exit()
1057 union perf_cached perf = READ_ONCE(cpudata->perf); in amd_pstate_cpu_exit()
1062 freq_qos_remove_request(&cpudata->req[1]); in amd_pstate_cpu_exit()
1063 freq_qos_remove_request(&cpudata->req[0]); in amd_pstate_cpu_exit()
1064 policy->fast_switch_possible = false; in amd_pstate_cpu_exit()
1081 cpudata = policy->driver_data; in show_amd_pstate_max_freq()
1082 perf = READ_ONCE(cpudata->perf); in show_amd_pstate_max_freq()
1085 perf_to_freq(perf, cpudata->nominal_freq, perf.highest_perf)); in show_amd_pstate_max_freq()
1094 cpudata = policy->driver_data; in show_amd_pstate_lowest_nonlinear_freq()
1095 perf = READ_ONCE(cpudata->perf); in show_amd_pstate_lowest_nonlinear_freq()
1098 perf_to_freq(perf, cpudata->nominal_freq, perf.lowest_nonlinear_perf)); in show_amd_pstate_lowest_nonlinear_freq()
1110 cpudata = policy->driver_data; in show_amd_pstate_highest_perf()
1112 return sysfs_emit(buf, "%u\n", cpudata->perf.highest_perf); in show_amd_pstate_highest_perf()
1119 struct amd_cpudata *cpudata = policy->driver_data; in show_amd_pstate_prefcore_ranking()
1121 perf = READ_ONCE(cpudata->prefcore_ranking); in show_amd_pstate_prefcore_ranking()
1130 struct amd_cpudata *cpudata = policy->driver_data; in show_amd_pstate_hw_prefcore()
1132 hw_prefcore = READ_ONCE(cpudata->hw_prefcore); in show_amd_pstate_hw_prefcore()
1142 struct amd_cpudata *cpudata = policy->driver_data; in show_energy_performance_available_preferences()
1144 if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) in show_energy_performance_available_preferences()
1159 struct amd_cpudata *cpudata = policy->driver_data; in store_energy_performance_preference()
1166 return -EINVAL; in store_energy_performance_preference()
1168 ret = match_string(energy_perf_strings, -1, str_preference); in store_energy_performance_preference()
1170 return -EINVAL; in store_energy_performance_preference()
1173 epp = cpudata->epp_default; in store_energy_performance_preference()
1177 if (epp > 0 && policy->policy == CPUFREQ_POLICY_PERFORMANCE) { in store_energy_performance_preference()
1179 return -EBUSY; in store_energy_performance_preference()
1190 struct amd_cpudata *cpudata = policy->driver_data; in show_energy_performance_preference()
1193 epp = FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached); in show_energy_performance_preference()
1209 return -EINVAL; in show_energy_performance_preference()
1240 return -EINVAL; in amd_pstate_set_driver()
1254 current_pstate_driver->boost_enabled = cpu_feature_enabled(X86_FEATURE_CPB); in amd_pstate_register_driver()
1353 return -EINVAL; in amd_pstate_update_status()
1358 return -EINVAL; in amd_pstate_update_status()
1384 ret = amd_pstate_update_status(buf, p ? p - buf : count); in status_store()
1468 amd_perf_ctl_reset(policy->cpu); in amd_pstate_epp_cpu_init()
1469 dev = get_cpu_device(policy->cpu); in amd_pstate_epp_cpu_init()
1471 return -ENODEV; in amd_pstate_epp_cpu_init()
1475 return -ENOMEM; in amd_pstate_epp_cpu_init()
1477 cpudata->cpu = policy->cpu; in amd_pstate_epp_cpu_init()
1493 perf = READ_ONCE(cpudata->perf); in amd_pstate_epp_cpu_init()
1495 policy->cpuinfo.min_freq = policy->min = perf_to_freq(perf, in amd_pstate_epp_cpu_init()
1496 cpudata->nominal_freq, in amd_pstate_epp_cpu_init()
1498 policy->cpuinfo.max_freq = policy->max = perf_to_freq(perf, in amd_pstate_epp_cpu_init()
1499 cpudata->nominal_freq, in amd_pstate_epp_cpu_init()
1501 policy->driver_data = cpudata; in amd_pstate_epp_cpu_init()
1508 policy->cur = policy->cpuinfo.min_freq; in amd_pstate_epp_cpu_init()
1511 policy->boost_supported = READ_ONCE(cpudata->boost_supported); in amd_pstate_epp_cpu_init()
1519 policy->policy = CPUFREQ_POLICY_PERFORMANCE; in amd_pstate_epp_cpu_init()
1520 cpudata->epp_default = amd_pstate_get_epp(cpudata); in amd_pstate_epp_cpu_init()
1522 policy->policy = CPUFREQ_POLICY_POWERSAVE; in amd_pstate_epp_cpu_init()
1523 cpudata->epp_default = AMD_CPPC_EPP_BALANCE_PERFORMANCE; in amd_pstate_epp_cpu_init()
1526 ret = amd_pstate_set_epp(policy, cpudata->epp_default); in amd_pstate_epp_cpu_init()
1530 current_pstate_driver->adjust_perf = NULL; in amd_pstate_epp_cpu_init()
1535 pr_warn("Failed to initialize CPU %d: %d\n", policy->cpu, ret); in amd_pstate_epp_cpu_init()
1542 struct amd_cpudata *cpudata = policy->driver_data; in amd_pstate_epp_cpu_exit()
1545 union perf_cached perf = READ_ONCE(cpudata->perf); in amd_pstate_epp_cpu_exit()
1551 policy->driver_data = NULL; in amd_pstate_epp_cpu_exit()
1554 pr_debug("CPU %d exiting\n", policy->cpu); in amd_pstate_epp_cpu_exit()
1559 struct amd_cpudata *cpudata = policy->driver_data; in amd_pstate_epp_update_limit()
1564 policy->min != cpudata->min_limit_freq || in amd_pstate_epp_update_limit()
1565 policy->max != cpudata->max_limit_freq) in amd_pstate_epp_update_limit()
1568 if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) in amd_pstate_epp_update_limit()
1571 epp = FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached); in amd_pstate_epp_update_limit()
1573 perf = READ_ONCE(cpudata->perf); in amd_pstate_epp_update_limit()
1581 struct amd_cpudata *cpudata = policy->driver_data; in amd_pstate_epp_set_policy()
1584 if (!policy->cpuinfo.max_freq) in amd_pstate_epp_set_policy()
1585 return -ENODEV; in amd_pstate_epp_set_policy()
1587 cpudata->policy = policy->policy; in amd_pstate_epp_set_policy()
1594 * policy->cur is never updated with the amd_pstate_epp driver, but it in amd_pstate_epp_set_policy()
1597 policy->cur = policy->min; in amd_pstate_epp_set_policy()
1609 struct amd_cpudata *cpudata = policy->driver_data; in amd_pstate_cpu_offline()
1610 union perf_cached perf = READ_ONCE(cpudata->perf); in amd_pstate_cpu_offline()
1613 * Reset CPPC_REQ MSR to the BIOS value, this will allow us to retain the BIOS specified in amd_pstate_cpu_offline()
1618 FIELD_GET(AMD_CPPC_DES_PERF_MASK, cpudata->cppc_req_cached), in amd_pstate_cpu_offline()
1619 FIELD_GET(AMD_CPPC_MAX_PERF_MASK, cpudata->cppc_req_cached), in amd_pstate_cpu_offline()
1620 FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached), in amd_pstate_cpu_offline()
1626 struct amd_cpudata *cpudata = policy->driver_data; in amd_pstate_suspend()
1627 union perf_cached perf = READ_ONCE(cpudata->perf); in amd_pstate_suspend()
1631 * Reset CPPC_REQ MSR to the BIOS value, this will allow us to retain the BIOS specified in amd_pstate_suspend()
1636 FIELD_GET(AMD_CPPC_DES_PERF_MASK, cpudata->cppc_req_cached), in amd_pstate_suspend()
1637 FIELD_GET(AMD_CPPC_MAX_PERF_MASK, cpudata->cppc_req_cached), in amd_pstate_suspend()
1638 FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached), in amd_pstate_suspend()
1644 cpudata->suspended = true; in amd_pstate_suspend()
1651 struct amd_cpudata *cpudata = policy->driver_data; in amd_pstate_resume()
1652 union perf_cached perf = READ_ONCE(cpudata->perf); in amd_pstate_resume()
1653 int cur_perf = freq_to_perf(perf, cpudata->nominal_freq, policy->cur); in amd_pstate_resume()
1662 struct amd_cpudata *cpudata = policy->driver_data; in amd_pstate_epp_resume()
1664 if (cpudata->suspended) { in amd_pstate_epp_resume()
1667 /* enable amd pstate from suspend state*/ in amd_pstate_epp_resume()
1672 cpudata->suspended = false; in amd_pstate_epp_resume()
1691 .name = "amd-pstate",
1707 .name = "amd-pstate-epp",
1727 * that support MSR-based CPPC, the AMD Pstate driver may not in amd_cppc_supported()
1736 * the driver to work using the shared-memory mechanism. in amd_cppc_supported()
1740 switch (c->x86_model) { in amd_cppc_supported()
1748 switch (c->x86_model) { in amd_cppc_supported()
1771 return -ENODEV; in amd_pstate_init()
1775 return -EOPNOTSUPP; in amd_pstate_init()
1780 return -ENODEV; in amd_pstate_init()
1785 return -EEXIST; in amd_pstate_init()
1806 return -ENODEV; in amd_pstate_init()
1814 return -ENODEV; in amd_pstate_init()
1843 ret = sysfs_create_group(&dev_root->kobj, &amd_pstate_global_attr_group); in amd_pstate_init()
1865 return -EINVAL; in amd_pstate_param()
1885 MODULE_DESCRIPTION("AMD Processor P-state Frequency Driver");