1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * cppc.c: CPPC Interface for x86 4 * Copyright (c) 2016, Intel Corporation. 5 */ 6 7 #include <acpi/cppc_acpi.h> 8 #include <asm/msr.h> 9 #include <asm/processor.h> 10 #include <asm/topology.h> 11 12 #define CPPC_HIGHEST_PERF_PERFORMANCE 196 13 #define CPPC_HIGHEST_PERF_PREFCORE 166 14 15 enum amd_pref_core { 16 AMD_PREF_CORE_UNKNOWN = 0, 17 AMD_PREF_CORE_SUPPORTED, 18 AMD_PREF_CORE_UNSUPPORTED, 19 }; 20 static enum amd_pref_core amd_pref_core_detected; 21 static u64 boost_numerator; 22 23 /* Refer to drivers/acpi/cppc_acpi.c for the description of functions */ 24 25 bool cpc_supported_by_cpu(void) 26 { 27 switch (boot_cpu_data.x86_vendor) { 28 case X86_VENDOR_AMD: 29 case X86_VENDOR_HYGON: 30 if (boot_cpu_data.x86 == 0x19 && ((boot_cpu_data.x86_model <= 0x0f) || 31 (boot_cpu_data.x86_model >= 0x20 && boot_cpu_data.x86_model <= 0x2f))) 32 return true; 33 else if (boot_cpu_data.x86 == 0x17 && 34 boot_cpu_data.x86_model >= 0x30 && boot_cpu_data.x86_model <= 0x7f) 35 return true; 36 return boot_cpu_has(X86_FEATURE_CPPC); 37 } 38 return false; 39 } 40 41 bool cpc_ffh_supported(void) 42 { 43 return true; 44 } 45 46 int cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val) 47 { 48 int err; 49 50 err = rdmsrl_safe_on_cpu(cpunum, reg->address, val); 51 if (!err) { 52 u64 mask = GENMASK_ULL(reg->bit_offset + reg->bit_width - 1, 53 reg->bit_offset); 54 55 *val &= mask; 56 *val >>= reg->bit_offset; 57 } 58 return err; 59 } 60 61 int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val) 62 { 63 u64 rd_val; 64 int err; 65 66 err = rdmsrl_safe_on_cpu(cpunum, reg->address, &rd_val); 67 if (!err) { 68 u64 mask = GENMASK_ULL(reg->bit_offset + reg->bit_width - 1, 69 reg->bit_offset); 70 71 val <<= reg->bit_offset; 72 val &= mask; 73 rd_val &= ~mask; 74 rd_val |= val; 75 err = wrmsrl_safe_on_cpu(cpunum, reg->address, rd_val); 76 } 77 return err; 78 } 79 80 static void amd_set_max_freq_ratio(void) 81 { 82 struct cppc_perf_caps perf_caps; 83 u64 numerator, nominal_perf; 84 u64 perf_ratio; 85 int rc; 86 87 rc = cppc_get_perf_caps(0, &perf_caps); 88 if (rc) { 89 pr_warn("Could not retrieve perf counters (%d)\n", rc); 90 return; 91 } 92 93 rc = amd_get_boost_ratio_numerator(0, &numerator); 94 if (rc) { 95 pr_warn("Could not retrieve highest performance (%d)\n", rc); 96 return; 97 } 98 nominal_perf = perf_caps.nominal_perf; 99 100 if (!nominal_perf) { 101 pr_warn("Could not retrieve nominal performance\n"); 102 return; 103 } 104 105 /* midpoint between max_boost and max_P */ 106 perf_ratio = (div_u64(numerator * SCHED_CAPACITY_SCALE, nominal_perf) + SCHED_CAPACITY_SCALE) >> 1; 107 108 freq_invariance_set_perf_ratio(perf_ratio, false); 109 } 110 111 static DEFINE_MUTEX(freq_invariance_lock); 112 113 void init_freq_invariance_cppc(void) 114 { 115 static bool init_done; 116 117 if (!cpu_feature_enabled(X86_FEATURE_APERFMPERF)) 118 return; 119 120 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) 121 return; 122 123 mutex_lock(&freq_invariance_lock); 124 if (!init_done) 125 amd_set_max_freq_ratio(); 126 init_done = true; 127 mutex_unlock(&freq_invariance_lock); 128 } 129 130 /* 131 * Get the highest performance register value. 132 * @cpu: CPU from which to get highest performance. 133 * @highest_perf: Return address for highest performance value. 134 * 135 * Return: 0 for success, negative error code otherwise. 136 */ 137 int amd_get_highest_perf(unsigned int cpu, u32 *highest_perf) 138 { 139 u64 val; 140 int ret; 141 142 if (cpu_feature_enabled(X86_FEATURE_CPPC)) { 143 ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &val); 144 if (ret) 145 goto out; 146 147 val = AMD_CPPC_HIGHEST_PERF(val); 148 } else { 149 ret = cppc_get_highest_perf(cpu, &val); 150 if (ret) 151 goto out; 152 } 153 154 WRITE_ONCE(*highest_perf, (u32)val); 155 out: 156 return ret; 157 } 158 EXPORT_SYMBOL_GPL(amd_get_highest_perf); 159 160 /** 161 * amd_detect_prefcore: Detect if CPUs in the system support preferred cores 162 * @detected: Output variable for the result of the detection. 163 * 164 * Determine whether CPUs in the system support preferred cores. On systems 165 * that support preferred cores, different highest perf values will be found 166 * on different cores. On other systems, the highest perf value will be the 167 * same on all cores. 168 * 169 * The result of the detection will be stored in the 'detected' parameter. 170 * 171 * Return: 0 for success, negative error code otherwise 172 */ 173 int amd_detect_prefcore(bool *detected) 174 { 175 int cpu, count = 0; 176 u64 highest_perf[2] = {0}; 177 178 if (WARN_ON(!detected)) 179 return -EINVAL; 180 181 switch (amd_pref_core_detected) { 182 case AMD_PREF_CORE_SUPPORTED: 183 *detected = true; 184 return 0; 185 case AMD_PREF_CORE_UNSUPPORTED: 186 *detected = false; 187 return 0; 188 default: 189 break; 190 } 191 192 for_each_present_cpu(cpu) { 193 u32 tmp; 194 int ret; 195 196 ret = amd_get_highest_perf(cpu, &tmp); 197 if (ret) 198 return ret; 199 200 if (!count || (count == 1 && tmp != highest_perf[0])) 201 highest_perf[count++] = tmp; 202 203 if (count == 2) 204 break; 205 } 206 207 *detected = (count == 2); 208 boost_numerator = highest_perf[0]; 209 210 amd_pref_core_detected = *detected ? AMD_PREF_CORE_SUPPORTED : 211 AMD_PREF_CORE_UNSUPPORTED; 212 213 pr_debug("AMD CPPC preferred core is %ssupported (highest perf: 0x%llx)\n", 214 *detected ? "" : "un", highest_perf[0]); 215 216 return 0; 217 } 218 EXPORT_SYMBOL_GPL(amd_detect_prefcore); 219 220 /** 221 * amd_get_boost_ratio_numerator: Get the numerator to use for boost ratio calculation 222 * @cpu: CPU to get numerator for. 223 * @numerator: Output variable for numerator. 224 * 225 * Determine the numerator to use for calculating the boost ratio on 226 * a CPU. On systems that support preferred cores, this will be a hardcoded 227 * value. On other systems this will the highest performance register value. 228 * 229 * If booting the system with amd-pstate enabled but preferred cores disabled then 230 * the correct boost numerator will be returned to match hardware capabilities 231 * even if the preferred cores scheduling hints are not enabled. 232 * 233 * Return: 0 for success, negative error code otherwise. 234 */ 235 int amd_get_boost_ratio_numerator(unsigned int cpu, u64 *numerator) 236 { 237 bool prefcore; 238 int ret; 239 240 ret = amd_detect_prefcore(&prefcore); 241 if (ret) 242 return ret; 243 244 /* without preferred cores, return the highest perf register value */ 245 if (!prefcore) { 246 *numerator = boost_numerator; 247 return 0; 248 } 249 250 /* 251 * For AMD CPUs with Family ID 19H and Model ID range 0x70 to 0x7f, 252 * the highest performance level is set to 196. 253 * https://bugzilla.kernel.org/show_bug.cgi?id=218759 254 */ 255 if (cpu_feature_enabled(X86_FEATURE_ZEN4)) { 256 switch (boot_cpu_data.x86_model) { 257 case 0x70 ... 0x7f: 258 *numerator = CPPC_HIGHEST_PERF_PERFORMANCE; 259 return 0; 260 default: 261 break; 262 } 263 } 264 *numerator = CPPC_HIGHEST_PERF_PREFCORE; 265 266 return 0; 267 } 268 EXPORT_SYMBOL_GPL(amd_get_boost_ratio_numerator); 269