1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * CPPC (Collaborative Processor Performance Control) driver for 4 * interfacing with the CPUfreq layer and governors. See 5 * cppc_acpi.c for CPPC specific methods. 6 * 7 * (C) Copyright 2014, 2015 Linaro Ltd. 8 * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org> 9 */ 10 11 #define pr_fmt(fmt) "CPPC Cpufreq:" fmt 12 13 #include <linux/kernel.h> 14 #include <linux/module.h> 15 #include <linux/delay.h> 16 #include <linux/cpu.h> 17 #include <linux/cpufreq.h> 18 #include <linux/dmi.h> 19 #include <linux/time.h> 20 #include <linux/vmalloc.h> 21 22 #include <asm/unaligned.h> 23 24 #include <acpi/cppc_acpi.h> 25 26 /* Minimum struct length needed for the DMI processor entry we want */ 27 #define DMI_ENTRY_PROCESSOR_MIN_LENGTH 48 28 29 /* Offset in the DMI processor structure for the max frequency */ 30 #define DMI_PROCESSOR_MAX_SPEED 0x14 31 32 /* 33 * This list contains information parsed from per CPU ACPI _CPC and _PSD 34 * structures: e.g. the highest and lowest supported performance, capabilities, 35 * desired performance, level requested etc. Depending on the share_type, not 36 * all CPUs will have an entry in the list. 37 */ 38 static LIST_HEAD(cpu_data_list); 39 40 static bool boost_supported; 41 42 struct cppc_workaround_oem_info { 43 char oem_id[ACPI_OEM_ID_SIZE + 1]; 44 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1]; 45 u32 oem_revision; 46 }; 47 48 static struct cppc_workaround_oem_info wa_info[] = { 49 { 50 .oem_id = "HISI ", 51 .oem_table_id = "HIP07 ", 52 .oem_revision = 0, 53 }, { 54 .oem_id = "HISI ", 55 .oem_table_id = "HIP08 ", 56 .oem_revision = 0, 57 } 58 }; 59 60 /* Callback function used to retrieve the max frequency from DMI */ 61 static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private) 62 { 63 const u8 *dmi_data = (const u8 *)dm; 64 u16 *mhz = (u16 *)private; 65 66 if (dm->type == DMI_ENTRY_PROCESSOR && 67 dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) { 68 u16 val = (u16)get_unaligned((const u16 *) 69 (dmi_data + DMI_PROCESSOR_MAX_SPEED)); 70 *mhz = val > *mhz ? val : *mhz; 71 } 72 } 73 74 /* Look up the max frequency in DMI */ 75 static u64 cppc_get_dmi_max_khz(void) 76 { 77 u16 mhz = 0; 78 79 dmi_walk(cppc_find_dmi_mhz, &mhz); 80 81 /* 82 * Real stupid fallback value, just in case there is no 83 * actual value set. 84 */ 85 mhz = mhz ? mhz : 1; 86 87 return (1000 * mhz); 88 } 89 90 /* 91 * If CPPC lowest_freq and nominal_freq registers are exposed then we can 92 * use them to convert perf to freq and vice versa 93 * 94 * If the perf/freq point lies between Nominal and Lowest, we can treat 95 * (Low perf, Low freq) and (Nom Perf, Nom freq) as 2D co-ordinates of a line 96 * and extrapolate the rest 97 * For perf/freq > Nominal, we use the ratio perf:freq at Nominal for conversion 98 */ 99 static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu_data, 100 unsigned int perf) 101 { 102 struct cppc_perf_caps *caps = &cpu_data->perf_caps; 103 static u64 max_khz; 104 u64 mul, div; 105 106 if (caps->lowest_freq && caps->nominal_freq) { 107 if (perf >= caps->nominal_perf) { 108 mul = caps->nominal_freq; 109 div = caps->nominal_perf; 110 } else { 111 mul = caps->nominal_freq - caps->lowest_freq; 112 div = caps->nominal_perf - caps->lowest_perf; 113 } 114 } else { 115 if (!max_khz) 116 max_khz = cppc_get_dmi_max_khz(); 117 mul = max_khz; 118 div = caps->highest_perf; 119 } 120 return (u64)perf * mul / div; 121 } 122 123 static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu_data, 124 unsigned int freq) 125 { 126 struct cppc_perf_caps *caps = &cpu_data->perf_caps; 127 static u64 max_khz; 128 u64 mul, div; 129 130 if (caps->lowest_freq && caps->nominal_freq) { 131 if (freq >= caps->nominal_freq) { 132 mul = caps->nominal_perf; 133 div = caps->nominal_freq; 134 } else { 135 mul = caps->lowest_perf; 136 div = caps->lowest_freq; 137 } 138 } else { 139 if (!max_khz) 140 max_khz = cppc_get_dmi_max_khz(); 141 mul = caps->highest_perf; 142 div = max_khz; 143 } 144 145 return (u64)freq * mul / div; 146 } 147 148 static int cppc_cpufreq_set_target(struct cpufreq_policy *policy, 149 unsigned int target_freq, 150 unsigned int relation) 151 152 { 153 struct cppc_cpudata *cpu_data = policy->driver_data; 154 unsigned int cpu = policy->cpu; 155 struct cpufreq_freqs freqs; 156 u32 desired_perf; 157 int ret = 0; 158 159 desired_perf = cppc_cpufreq_khz_to_perf(cpu_data, target_freq); 160 /* Return if it is exactly the same perf */ 161 if (desired_perf == cpu_data->perf_ctrls.desired_perf) 162 return ret; 163 164 cpu_data->perf_ctrls.desired_perf = desired_perf; 165 freqs.old = policy->cur; 166 freqs.new = target_freq; 167 168 cpufreq_freq_transition_begin(policy, &freqs); 169 ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls); 170 cpufreq_freq_transition_end(policy, &freqs, ret != 0); 171 172 if (ret) 173 pr_debug("Failed to set target on CPU:%d. ret:%d\n", 174 cpu, ret); 175 176 return ret; 177 } 178 179 static int cppc_verify_policy(struct cpufreq_policy_data *policy) 180 { 181 cpufreq_verify_within_cpu_limits(policy); 182 return 0; 183 } 184 185 static void cppc_cpufreq_stop_cpu(struct cpufreq_policy *policy) 186 { 187 struct cppc_cpudata *cpu_data = policy->driver_data; 188 struct cppc_perf_caps *caps = &cpu_data->perf_caps; 189 unsigned int cpu = policy->cpu; 190 int ret; 191 192 cpu_data->perf_ctrls.desired_perf = caps->lowest_perf; 193 194 ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls); 195 if (ret) 196 pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n", 197 caps->lowest_perf, cpu, ret); 198 199 /* Remove CPU node from list and free driver data for policy */ 200 free_cpumask_var(cpu_data->shared_cpu_map); 201 list_del(&cpu_data->node); 202 kfree(policy->driver_data); 203 policy->driver_data = NULL; 204 } 205 206 /* 207 * The PCC subspace describes the rate at which platform can accept commands 208 * on the shared PCC channel (including READs which do not count towards freq 209 * transition requests), so ideally we need to use the PCC values as a fallback 210 * if we don't have a platform specific transition_delay_us 211 */ 212 #ifdef CONFIG_ARM64 213 #include <asm/cputype.h> 214 215 static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu) 216 { 217 unsigned long implementor = read_cpuid_implementor(); 218 unsigned long part_num = read_cpuid_part_number(); 219 unsigned int delay_us = 0; 220 221 switch (implementor) { 222 case ARM_CPU_IMP_QCOM: 223 switch (part_num) { 224 case QCOM_CPU_PART_FALKOR_V1: 225 case QCOM_CPU_PART_FALKOR: 226 delay_us = 10000; 227 break; 228 default: 229 delay_us = cppc_get_transition_latency(cpu) / NSEC_PER_USEC; 230 break; 231 } 232 break; 233 default: 234 delay_us = cppc_get_transition_latency(cpu) / NSEC_PER_USEC; 235 break; 236 } 237 238 return delay_us; 239 } 240 241 #else 242 243 static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu) 244 { 245 return cppc_get_transition_latency(cpu) / NSEC_PER_USEC; 246 } 247 #endif 248 249 250 static struct cppc_cpudata *cppc_cpufreq_get_cpu_data(unsigned int cpu) 251 { 252 struct cppc_cpudata *cpu_data; 253 int ret; 254 255 cpu_data = kzalloc(sizeof(struct cppc_cpudata), GFP_KERNEL); 256 if (!cpu_data) 257 goto out; 258 259 if (!zalloc_cpumask_var(&cpu_data->shared_cpu_map, GFP_KERNEL)) 260 goto free_cpu; 261 262 ret = acpi_get_psd_map(cpu, cpu_data); 263 if (ret) { 264 pr_debug("Err parsing CPU%d PSD data: ret:%d\n", cpu, ret); 265 goto free_mask; 266 } 267 268 ret = cppc_get_perf_caps(cpu, &cpu_data->perf_caps); 269 if (ret) { 270 pr_debug("Err reading CPU%d perf caps: ret:%d\n", cpu, ret); 271 goto free_mask; 272 } 273 274 /* Convert the lowest and nominal freq from MHz to KHz */ 275 cpu_data->perf_caps.lowest_freq *= 1000; 276 cpu_data->perf_caps.nominal_freq *= 1000; 277 278 list_add(&cpu_data->node, &cpu_data_list); 279 280 return cpu_data; 281 282 free_mask: 283 free_cpumask_var(cpu_data->shared_cpu_map); 284 free_cpu: 285 kfree(cpu_data); 286 out: 287 return NULL; 288 } 289 290 static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy) 291 { 292 unsigned int cpu = policy->cpu; 293 struct cppc_cpudata *cpu_data; 294 struct cppc_perf_caps *caps; 295 int ret; 296 297 cpu_data = cppc_cpufreq_get_cpu_data(cpu); 298 if (!cpu_data) { 299 pr_err("Error in acquiring _CPC/_PSD data for CPU%d.\n", cpu); 300 return -ENODEV; 301 } 302 caps = &cpu_data->perf_caps; 303 policy->driver_data = cpu_data; 304 305 /* 306 * Set min to lowest nonlinear perf to avoid any efficiency penalty (see 307 * Section 8.4.7.1.1.5 of ACPI 6.1 spec) 308 */ 309 policy->min = cppc_cpufreq_perf_to_khz(cpu_data, 310 caps->lowest_nonlinear_perf); 311 policy->max = cppc_cpufreq_perf_to_khz(cpu_data, 312 caps->nominal_perf); 313 314 /* 315 * Set cpuinfo.min_freq to Lowest to make the full range of performance 316 * available if userspace wants to use any perf between lowest & lowest 317 * nonlinear perf 318 */ 319 policy->cpuinfo.min_freq = cppc_cpufreq_perf_to_khz(cpu_data, 320 caps->lowest_perf); 321 policy->cpuinfo.max_freq = cppc_cpufreq_perf_to_khz(cpu_data, 322 caps->nominal_perf); 323 324 policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu); 325 policy->shared_type = cpu_data->shared_type; 326 327 switch (policy->shared_type) { 328 case CPUFREQ_SHARED_TYPE_HW: 329 case CPUFREQ_SHARED_TYPE_NONE: 330 /* Nothing to be done - we'll have a policy for each CPU */ 331 break; 332 case CPUFREQ_SHARED_TYPE_ANY: 333 /* 334 * All CPUs in the domain will share a policy and all cpufreq 335 * operations will use a single cppc_cpudata structure stored 336 * in policy->driver_data. 337 */ 338 cpumask_copy(policy->cpus, cpu_data->shared_cpu_map); 339 break; 340 default: 341 pr_debug("Unsupported CPU co-ord type: %d\n", 342 policy->shared_type); 343 return -EFAULT; 344 } 345 346 /* 347 * If 'highest_perf' is greater than 'nominal_perf', we assume CPU Boost 348 * is supported. 349 */ 350 if (caps->highest_perf > caps->nominal_perf) 351 boost_supported = true; 352 353 /* Set policy->cur to max now. The governors will adjust later. */ 354 policy->cur = cppc_cpufreq_perf_to_khz(cpu_data, caps->highest_perf); 355 cpu_data->perf_ctrls.desired_perf = caps->highest_perf; 356 357 ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls); 358 if (ret) 359 pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n", 360 caps->highest_perf, cpu, ret); 361 362 return ret; 363 } 364 365 static inline u64 get_delta(u64 t1, u64 t0) 366 { 367 if (t1 > t0 || t0 > ~(u32)0) 368 return t1 - t0; 369 370 return (u32)t1 - (u32)t0; 371 } 372 373 static int cppc_get_rate_from_fbctrs(struct cppc_cpudata *cpu_data, 374 struct cppc_perf_fb_ctrs fb_ctrs_t0, 375 struct cppc_perf_fb_ctrs fb_ctrs_t1) 376 { 377 u64 delta_reference, delta_delivered; 378 u64 reference_perf, delivered_perf; 379 380 reference_perf = fb_ctrs_t0.reference_perf; 381 382 delta_reference = get_delta(fb_ctrs_t1.reference, 383 fb_ctrs_t0.reference); 384 delta_delivered = get_delta(fb_ctrs_t1.delivered, 385 fb_ctrs_t0.delivered); 386 387 /* Check to avoid divide-by zero */ 388 if (delta_reference || delta_delivered) 389 delivered_perf = (reference_perf * delta_delivered) / 390 delta_reference; 391 else 392 delivered_perf = cpu_data->perf_ctrls.desired_perf; 393 394 return cppc_cpufreq_perf_to_khz(cpu_data, delivered_perf); 395 } 396 397 static unsigned int cppc_cpufreq_get_rate(unsigned int cpu) 398 { 399 struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0}; 400 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 401 struct cppc_cpudata *cpu_data = policy->driver_data; 402 int ret; 403 404 cpufreq_cpu_put(policy); 405 406 ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t0); 407 if (ret) 408 return ret; 409 410 udelay(2); /* 2usec delay between sampling */ 411 412 ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t1); 413 if (ret) 414 return ret; 415 416 return cppc_get_rate_from_fbctrs(cpu_data, fb_ctrs_t0, fb_ctrs_t1); 417 } 418 419 static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state) 420 { 421 struct cppc_cpudata *cpu_data = policy->driver_data; 422 struct cppc_perf_caps *caps = &cpu_data->perf_caps; 423 int ret; 424 425 if (!boost_supported) { 426 pr_err("BOOST not supported by CPU or firmware\n"); 427 return -EINVAL; 428 } 429 430 if (state) 431 policy->max = cppc_cpufreq_perf_to_khz(cpu_data, 432 caps->highest_perf); 433 else 434 policy->max = cppc_cpufreq_perf_to_khz(cpu_data, 435 caps->nominal_perf); 436 policy->cpuinfo.max_freq = policy->max; 437 438 ret = freq_qos_update_request(policy->max_freq_req, policy->max); 439 if (ret < 0) 440 return ret; 441 442 return 0; 443 } 444 445 static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf) 446 { 447 struct cppc_cpudata *cpu_data = policy->driver_data; 448 449 return cpufreq_show_cpus(cpu_data->shared_cpu_map, buf); 450 } 451 cpufreq_freq_attr_ro(freqdomain_cpus); 452 453 static struct freq_attr *cppc_cpufreq_attr[] = { 454 &freqdomain_cpus, 455 NULL, 456 }; 457 458 static struct cpufreq_driver cppc_cpufreq_driver = { 459 .flags = CPUFREQ_CONST_LOOPS, 460 .verify = cppc_verify_policy, 461 .target = cppc_cpufreq_set_target, 462 .get = cppc_cpufreq_get_rate, 463 .init = cppc_cpufreq_cpu_init, 464 .stop_cpu = cppc_cpufreq_stop_cpu, 465 .set_boost = cppc_cpufreq_set_boost, 466 .attr = cppc_cpufreq_attr, 467 .name = "cppc_cpufreq", 468 }; 469 470 /* 471 * HISI platform does not support delivered performance counter and 472 * reference performance counter. It can calculate the performance using the 473 * platform specific mechanism. We reuse the desired performance register to 474 * store the real performance calculated by the platform. 475 */ 476 static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu) 477 { 478 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 479 struct cppc_cpudata *cpu_data = policy->driver_data; 480 u64 desired_perf; 481 int ret; 482 483 cpufreq_cpu_put(policy); 484 485 ret = cppc_get_desired_perf(cpu, &desired_perf); 486 if (ret < 0) 487 return -EIO; 488 489 return cppc_cpufreq_perf_to_khz(cpu_data, desired_perf); 490 } 491 492 static void cppc_check_hisi_workaround(void) 493 { 494 struct acpi_table_header *tbl; 495 acpi_status status = AE_OK; 496 int i; 497 498 status = acpi_get_table(ACPI_SIG_PCCT, 0, &tbl); 499 if (ACPI_FAILURE(status) || !tbl) 500 return; 501 502 for (i = 0; i < ARRAY_SIZE(wa_info); i++) { 503 if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) && 504 !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) && 505 wa_info[i].oem_revision == tbl->oem_revision) { 506 /* Overwrite the get() callback */ 507 cppc_cpufreq_driver.get = hisi_cppc_cpufreq_get_rate; 508 break; 509 } 510 } 511 512 acpi_put_table(tbl); 513 } 514 515 static int __init cppc_cpufreq_init(void) 516 { 517 if ((acpi_disabled) || !acpi_cpc_valid()) 518 return -ENODEV; 519 520 INIT_LIST_HEAD(&cpu_data_list); 521 522 cppc_check_hisi_workaround(); 523 524 return cpufreq_register_driver(&cppc_cpufreq_driver); 525 } 526 527 static inline void free_cpu_data(void) 528 { 529 struct cppc_cpudata *iter, *tmp; 530 531 list_for_each_entry_safe(iter, tmp, &cpu_data_list, node) { 532 free_cpumask_var(iter->shared_cpu_map); 533 list_del(&iter->node); 534 kfree(iter); 535 } 536 537 } 538 539 static void __exit cppc_cpufreq_exit(void) 540 { 541 cpufreq_unregister_driver(&cppc_cpufreq_driver); 542 543 free_cpu_data(); 544 } 545 546 module_exit(cppc_cpufreq_exit); 547 MODULE_AUTHOR("Ashwin Chaugule"); 548 MODULE_DESCRIPTION("CPUFreq driver based on the ACPI CPPC v5.0+ spec"); 549 MODULE_LICENSE("GPL"); 550 551 late_initcall(cppc_cpufreq_init); 552 553 static const struct acpi_device_id cppc_acpi_ids[] __used = { 554 {ACPI_PROCESSOR_DEVICE_HID, }, 555 {} 556 }; 557 558 MODULE_DEVICE_TABLE(acpi, cppc_acpi_ids); 559