1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * acpi-cpufreq.c - ACPI Processor P-States Driver 4 * 5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 7 * Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de> 8 * Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com> 9 */ 10 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 13 #include <linux/kernel.h> 14 #include <linux/module.h> 15 #include <linux/init.h> 16 #include <linux/smp.h> 17 #include <linux/sched.h> 18 #include <linux/cpufreq.h> 19 #include <linux/compiler.h> 20 #include <linux/dmi.h> 21 #include <linux/slab.h> 22 #include <linux/string_helpers.h> 23 #include <linux/platform_device.h> 24 25 #include <linux/acpi.h> 26 #include <linux/io.h> 27 #include <linux/delay.h> 28 #include <linux/uaccess.h> 29 30 #include <acpi/processor.h> 31 #include <acpi/cppc_acpi.h> 32 33 #include <asm/msr.h> 34 #include <asm/processor.h> 35 #include <asm/cpufeature.h> 36 #include <asm/cpu_device_id.h> 37 38 MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski"); 39 MODULE_DESCRIPTION("ACPI Processor P-States Driver"); 40 MODULE_LICENSE("GPL"); 41 42 enum { 43 UNDEFINED_CAPABLE = 0, 44 SYSTEM_INTEL_MSR_CAPABLE, 45 SYSTEM_AMD_MSR_CAPABLE, 46 SYSTEM_IO_CAPABLE, 47 }; 48 49 #define INTEL_MSR_RANGE (0xffff) 50 #define AMD_MSR_RANGE (0x7) 51 #define HYGON_MSR_RANGE (0x7) 52 53 struct acpi_cpufreq_data { 54 unsigned int resume; 55 unsigned int cpu_feature; 56 unsigned int acpi_perf_cpu; 57 cpumask_var_t freqdomain_cpus; 58 void (*cpu_freq_write)(struct acpi_pct_register *reg, u32 val); 59 u32 (*cpu_freq_read)(struct acpi_pct_register *reg); 60 }; 61 62 /* acpi_perf_data is a pointer to percpu data. */ 63 static struct acpi_processor_performance __percpu *acpi_perf_data; 64 65 static inline struct acpi_processor_performance *to_perf_data(struct acpi_cpufreq_data *data) 66 { 67 return per_cpu_ptr(acpi_perf_data, data->acpi_perf_cpu); 68 } 69 70 static struct cpufreq_driver acpi_cpufreq_driver; 71 72 static unsigned int acpi_pstate_strict; 73 74 static bool boost_state(unsigned int cpu) 75 { 76 u32 lo, hi; 77 u64 msr; 78 79 switch (boot_cpu_data.x86_vendor) { 80 case X86_VENDOR_INTEL: 81 case X86_VENDOR_CENTAUR: 82 case X86_VENDOR_ZHAOXIN: 83 rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi); 84 msr = lo | ((u64)hi << 32); 85 return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE); 86 case X86_VENDOR_HYGON: 87 case X86_VENDOR_AMD: 88 rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi); 89 msr = lo | ((u64)hi << 32); 90 return !(msr & MSR_K7_HWCR_CPB_DIS); 91 } 92 return false; 93 } 94 95 static int boost_set_msr(bool enable) 96 { 97 u32 msr_addr; 98 u64 msr_mask, val; 99 100 switch (boot_cpu_data.x86_vendor) { 101 case X86_VENDOR_INTEL: 102 case X86_VENDOR_CENTAUR: 103 case X86_VENDOR_ZHAOXIN: 104 msr_addr = MSR_IA32_MISC_ENABLE; 105 msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE; 106 break; 107 case X86_VENDOR_HYGON: 108 case X86_VENDOR_AMD: 109 msr_addr = MSR_K7_HWCR; 110 msr_mask = MSR_K7_HWCR_CPB_DIS; 111 break; 112 default: 113 return -EINVAL; 114 } 115 116 rdmsrl(msr_addr, val); 117 118 if (enable) 119 val &= ~msr_mask; 120 else 121 val |= msr_mask; 122 123 wrmsrl(msr_addr, val); 124 return 0; 125 } 126 127 static void boost_set_msr_each(void *p_en) 128 { 129 bool enable = (bool) p_en; 130 131 boost_set_msr(enable); 132 } 133 134 static int set_boost(struct cpufreq_policy *policy, int val) 135 { 136 on_each_cpu_mask(policy->cpus, boost_set_msr_each, 137 (void *)(long)val, 1); 138 pr_debug("CPU %*pbl: Core Boosting %s.\n", 139 cpumask_pr_args(policy->cpus), str_enabled_disabled(val)); 140 141 return 0; 142 } 143 144 static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf) 145 { 146 struct acpi_cpufreq_data *data = policy->driver_data; 147 148 if (unlikely(!data)) 149 return -ENODEV; 150 151 return cpufreq_show_cpus(data->freqdomain_cpus, buf); 152 } 153 154 cpufreq_freq_attr_ro(freqdomain_cpus); 155 156 #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB 157 static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf, 158 size_t count) 159 { 160 int ret; 161 unsigned int val = 0; 162 163 if (!acpi_cpufreq_driver.set_boost) 164 return -EINVAL; 165 166 ret = kstrtouint(buf, 10, &val); 167 if (ret || val > 1) 168 return -EINVAL; 169 170 cpus_read_lock(); 171 set_boost(policy, val); 172 cpus_read_unlock(); 173 174 return count; 175 } 176 177 static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf) 178 { 179 return sprintf(buf, "%u\n", acpi_cpufreq_driver.boost_enabled); 180 } 181 182 cpufreq_freq_attr_rw(cpb); 183 #endif 184 185 static int check_est_cpu(unsigned int cpuid) 186 { 187 struct cpuinfo_x86 *cpu = &cpu_data(cpuid); 188 189 return cpu_has(cpu, X86_FEATURE_EST); 190 } 191 192 static int check_amd_hwpstate_cpu(unsigned int cpuid) 193 { 194 struct cpuinfo_x86 *cpu = &cpu_data(cpuid); 195 196 return cpu_has(cpu, X86_FEATURE_HW_PSTATE); 197 } 198 199 static unsigned extract_io(struct cpufreq_policy *policy, u32 value) 200 { 201 struct acpi_cpufreq_data *data = policy->driver_data; 202 struct acpi_processor_performance *perf; 203 int i; 204 205 perf = to_perf_data(data); 206 207 for (i = 0; i < perf->state_count; i++) { 208 if (value == perf->states[i].status) 209 return policy->freq_table[i].frequency; 210 } 211 return 0; 212 } 213 214 static unsigned extract_msr(struct cpufreq_policy *policy, u32 msr) 215 { 216 struct acpi_cpufreq_data *data = policy->driver_data; 217 struct cpufreq_frequency_table *pos; 218 struct acpi_processor_performance *perf; 219 220 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) 221 msr &= AMD_MSR_RANGE; 222 else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) 223 msr &= HYGON_MSR_RANGE; 224 else 225 msr &= INTEL_MSR_RANGE; 226 227 perf = to_perf_data(data); 228 229 cpufreq_for_each_entry(pos, policy->freq_table) 230 if (msr == perf->states[pos->driver_data].status) 231 return pos->frequency; 232 return policy->freq_table[0].frequency; 233 } 234 235 static unsigned extract_freq(struct cpufreq_policy *policy, u32 val) 236 { 237 struct acpi_cpufreq_data *data = policy->driver_data; 238 239 switch (data->cpu_feature) { 240 case SYSTEM_INTEL_MSR_CAPABLE: 241 case SYSTEM_AMD_MSR_CAPABLE: 242 return extract_msr(policy, val); 243 case SYSTEM_IO_CAPABLE: 244 return extract_io(policy, val); 245 default: 246 return 0; 247 } 248 } 249 250 static u32 cpu_freq_read_intel(struct acpi_pct_register *not_used) 251 { 252 u32 val, dummy __always_unused; 253 254 rdmsr(MSR_IA32_PERF_CTL, val, dummy); 255 return val; 256 } 257 258 static void cpu_freq_write_intel(struct acpi_pct_register *not_used, u32 val) 259 { 260 u32 lo, hi; 261 262 rdmsr(MSR_IA32_PERF_CTL, lo, hi); 263 lo = (lo & ~INTEL_MSR_RANGE) | (val & INTEL_MSR_RANGE); 264 wrmsr(MSR_IA32_PERF_CTL, lo, hi); 265 } 266 267 static u32 cpu_freq_read_amd(struct acpi_pct_register *not_used) 268 { 269 u32 val, dummy __always_unused; 270 271 rdmsr(MSR_AMD_PERF_CTL, val, dummy); 272 return val; 273 } 274 275 static void cpu_freq_write_amd(struct acpi_pct_register *not_used, u32 val) 276 { 277 wrmsr(MSR_AMD_PERF_CTL, val, 0); 278 } 279 280 static u32 cpu_freq_read_io(struct acpi_pct_register *reg) 281 { 282 u32 val; 283 284 acpi_os_read_port(reg->address, &val, reg->bit_width); 285 return val; 286 } 287 288 static void cpu_freq_write_io(struct acpi_pct_register *reg, u32 val) 289 { 290 acpi_os_write_port(reg->address, val, reg->bit_width); 291 } 292 293 struct drv_cmd { 294 struct acpi_pct_register *reg; 295 u32 val; 296 union { 297 void (*write)(struct acpi_pct_register *reg, u32 val); 298 u32 (*read)(struct acpi_pct_register *reg); 299 } func; 300 }; 301 302 /* Called via smp_call_function_single(), on the target CPU */ 303 static void do_drv_read(void *_cmd) 304 { 305 struct drv_cmd *cmd = _cmd; 306 307 cmd->val = cmd->func.read(cmd->reg); 308 } 309 310 static u32 drv_read(struct acpi_cpufreq_data *data, const struct cpumask *mask) 311 { 312 struct acpi_processor_performance *perf = to_perf_data(data); 313 struct drv_cmd cmd = { 314 .reg = &perf->control_register, 315 .func.read = data->cpu_freq_read, 316 }; 317 int err; 318 319 err = smp_call_function_any(mask, do_drv_read, &cmd, 1); 320 WARN_ON_ONCE(err); /* smp_call_function_any() was buggy? */ 321 return cmd.val; 322 } 323 324 /* Called via smp_call_function_many(), on the target CPUs */ 325 static void do_drv_write(void *_cmd) 326 { 327 struct drv_cmd *cmd = _cmd; 328 329 cmd->func.write(cmd->reg, cmd->val); 330 } 331 332 static void drv_write(struct acpi_cpufreq_data *data, 333 const struct cpumask *mask, u32 val) 334 { 335 struct acpi_processor_performance *perf = to_perf_data(data); 336 struct drv_cmd cmd = { 337 .reg = &perf->control_register, 338 .val = val, 339 .func.write = data->cpu_freq_write, 340 }; 341 int this_cpu; 342 343 this_cpu = get_cpu(); 344 if (cpumask_test_cpu(this_cpu, mask)) 345 do_drv_write(&cmd); 346 347 smp_call_function_many(mask, do_drv_write, &cmd, 1); 348 put_cpu(); 349 } 350 351 static u32 get_cur_val(const struct cpumask *mask, struct acpi_cpufreq_data *data) 352 { 353 u32 val; 354 355 if (unlikely(cpumask_empty(mask))) 356 return 0; 357 358 val = drv_read(data, mask); 359 360 pr_debug("%s = %u\n", __func__, val); 361 362 return val; 363 } 364 365 static unsigned int get_cur_freq_on_cpu(unsigned int cpu) 366 { 367 struct acpi_cpufreq_data *data; 368 struct cpufreq_policy *policy; 369 unsigned int freq; 370 unsigned int cached_freq; 371 372 pr_debug("%s (%d)\n", __func__, cpu); 373 374 policy = cpufreq_cpu_get_raw(cpu); 375 if (unlikely(!policy)) 376 return 0; 377 378 data = policy->driver_data; 379 if (unlikely(!data || !policy->freq_table)) 380 return 0; 381 382 cached_freq = policy->freq_table[to_perf_data(data)->state].frequency; 383 freq = extract_freq(policy, get_cur_val(cpumask_of(cpu), data)); 384 if (freq != cached_freq) { 385 /* 386 * The dreaded BIOS frequency change behind our back. 387 * Force set the frequency on next target call. 388 */ 389 data->resume = 1; 390 } 391 392 pr_debug("cur freq = %u\n", freq); 393 394 return freq; 395 } 396 397 static unsigned int check_freqs(struct cpufreq_policy *policy, 398 const struct cpumask *mask, unsigned int freq) 399 { 400 struct acpi_cpufreq_data *data = policy->driver_data; 401 unsigned int cur_freq; 402 unsigned int i; 403 404 for (i = 0; i < 100; i++) { 405 cur_freq = extract_freq(policy, get_cur_val(mask, data)); 406 if (cur_freq == freq) 407 return 1; 408 udelay(10); 409 } 410 return 0; 411 } 412 413 static int acpi_cpufreq_target(struct cpufreq_policy *policy, 414 unsigned int index) 415 { 416 struct acpi_cpufreq_data *data = policy->driver_data; 417 struct acpi_processor_performance *perf; 418 const struct cpumask *mask; 419 unsigned int next_perf_state = 0; /* Index into perf table */ 420 int result = 0; 421 422 if (unlikely(!data)) { 423 return -ENODEV; 424 } 425 426 perf = to_perf_data(data); 427 next_perf_state = policy->freq_table[index].driver_data; 428 if (perf->state == next_perf_state) { 429 if (unlikely(data->resume)) { 430 pr_debug("Called after resume, resetting to P%d\n", 431 next_perf_state); 432 data->resume = 0; 433 } else { 434 pr_debug("Already at target state (P%d)\n", 435 next_perf_state); 436 return 0; 437 } 438 } 439 440 /* 441 * The core won't allow CPUs to go away until the governor has been 442 * stopped, so we can rely on the stability of policy->cpus. 443 */ 444 mask = policy->shared_type == CPUFREQ_SHARED_TYPE_ANY ? 445 cpumask_of(policy->cpu) : policy->cpus; 446 447 drv_write(data, mask, perf->states[next_perf_state].control); 448 449 if (acpi_pstate_strict) { 450 if (!check_freqs(policy, mask, 451 policy->freq_table[index].frequency)) { 452 pr_debug("%s (%d)\n", __func__, policy->cpu); 453 result = -EAGAIN; 454 } 455 } 456 457 if (!result) 458 perf->state = next_perf_state; 459 460 return result; 461 } 462 463 static unsigned int acpi_cpufreq_fast_switch(struct cpufreq_policy *policy, 464 unsigned int target_freq) 465 { 466 struct acpi_cpufreq_data *data = policy->driver_data; 467 struct acpi_processor_performance *perf; 468 struct cpufreq_frequency_table *entry; 469 unsigned int next_perf_state, next_freq, index; 470 471 /* 472 * Find the closest frequency above target_freq. 473 */ 474 if (policy->cached_target_freq == target_freq) 475 index = policy->cached_resolved_idx; 476 else 477 index = cpufreq_table_find_index_dl(policy, target_freq, 478 false); 479 480 entry = &policy->freq_table[index]; 481 next_freq = entry->frequency; 482 next_perf_state = entry->driver_data; 483 484 perf = to_perf_data(data); 485 if (perf->state == next_perf_state) { 486 if (unlikely(data->resume)) 487 data->resume = 0; 488 else 489 return next_freq; 490 } 491 492 data->cpu_freq_write(&perf->control_register, 493 perf->states[next_perf_state].control); 494 perf->state = next_perf_state; 495 return next_freq; 496 } 497 498 static unsigned long 499 acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu) 500 { 501 struct acpi_processor_performance *perf; 502 503 perf = to_perf_data(data); 504 if (cpu_khz) { 505 /* search the closest match to cpu_khz */ 506 unsigned int i; 507 unsigned long freq; 508 unsigned long freqn = perf->states[0].core_frequency * 1000; 509 510 for (i = 0; i < (perf->state_count-1); i++) { 511 freq = freqn; 512 freqn = perf->states[i+1].core_frequency * 1000; 513 if ((2 * cpu_khz) > (freqn + freq)) { 514 perf->state = i; 515 return freq; 516 } 517 } 518 perf->state = perf->state_count-1; 519 return freqn; 520 } else { 521 /* assume CPU is at P0... */ 522 perf->state = 0; 523 return perf->states[0].core_frequency * 1000; 524 } 525 } 526 527 static void free_acpi_perf_data(void) 528 { 529 unsigned int i; 530 531 /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */ 532 for_each_possible_cpu(i) 533 free_cpumask_var(per_cpu_ptr(acpi_perf_data, i) 534 ->shared_cpu_map); 535 free_percpu(acpi_perf_data); 536 } 537 538 static int cpufreq_boost_down_prep(unsigned int cpu) 539 { 540 /* 541 * Clear the boost-disable bit on the CPU_DOWN path so that 542 * this cpu cannot block the remaining ones from boosting. 543 */ 544 return boost_set_msr(1); 545 } 546 547 /* 548 * acpi_cpufreq_early_init - initialize ACPI P-States library 549 * 550 * Initialize the ACPI P-States library (drivers/acpi/processor_perflib.c) 551 * in order to determine correct frequency and voltage pairings. We can 552 * do _PDC and _PSD and find out the processor dependency for the 553 * actual init that will happen later... 554 */ 555 static int __init acpi_cpufreq_early_init(void) 556 { 557 unsigned int i; 558 pr_debug("%s\n", __func__); 559 560 acpi_perf_data = alloc_percpu(struct acpi_processor_performance); 561 if (!acpi_perf_data) { 562 pr_debug("Memory allocation error for acpi_perf_data.\n"); 563 return -ENOMEM; 564 } 565 for_each_possible_cpu(i) { 566 if (!zalloc_cpumask_var_node( 567 &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map, 568 GFP_KERNEL, cpu_to_node(i))) { 569 570 /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */ 571 free_acpi_perf_data(); 572 return -ENOMEM; 573 } 574 } 575 576 /* Do initialization in ACPI core */ 577 acpi_processor_preregister_performance(acpi_perf_data); 578 return 0; 579 } 580 581 #ifdef CONFIG_SMP 582 /* 583 * Some BIOSes do SW_ANY coordination internally, either set it up in hw 584 * or do it in BIOS firmware and won't inform about it to OS. If not 585 * detected, this has a side effect of making CPU run at a different speed 586 * than OS intended it to run at. Detect it and handle it cleanly. 587 */ 588 static int bios_with_sw_any_bug; 589 590 static int sw_any_bug_found(const struct dmi_system_id *d) 591 { 592 bios_with_sw_any_bug = 1; 593 return 0; 594 } 595 596 static const struct dmi_system_id sw_any_bug_dmi_table[] = { 597 { 598 .callback = sw_any_bug_found, 599 .ident = "Supermicro Server X6DLP", 600 .matches = { 601 DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"), 602 DMI_MATCH(DMI_BIOS_VERSION, "080010"), 603 DMI_MATCH(DMI_PRODUCT_NAME, "X6DLP"), 604 }, 605 }, 606 { } 607 }; 608 609 static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c) 610 { 611 /* Intel Xeon Processor 7100 Series Specification Update 612 * https://www.intel.com/Assets/PDF/specupdate/314554.pdf 613 * AL30: A Machine Check Exception (MCE) Occurring during an 614 * Enhanced Intel SpeedStep Technology Ratio Change May Cause 615 * Both Processor Cores to Lock Up. */ 616 if (c->x86_vendor == X86_VENDOR_INTEL) { 617 if ((c->x86 == 15) && 618 (c->x86_model == 6) && 619 (c->x86_stepping == 8)) { 620 pr_info("Intel(R) Xeon(R) 7100 Errata AL30, processors may lock up on frequency changes: disabling acpi-cpufreq\n"); 621 return -ENODEV; 622 } 623 } 624 return 0; 625 } 626 #endif 627 628 #ifdef CONFIG_ACPI_CPPC_LIB 629 static u64 get_max_boost_ratio(unsigned int cpu) 630 { 631 struct cppc_perf_caps perf_caps; 632 u64 highest_perf, nominal_perf; 633 int ret; 634 635 if (acpi_pstate_strict) 636 return 0; 637 638 ret = cppc_get_perf_caps(cpu, &perf_caps); 639 if (ret) { 640 pr_debug("CPU%d: Unable to get performance capabilities (%d)\n", 641 cpu, ret); 642 return 0; 643 } 644 645 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) 646 highest_perf = amd_get_highest_perf(); 647 else 648 highest_perf = perf_caps.highest_perf; 649 650 nominal_perf = perf_caps.nominal_perf; 651 652 if (!highest_perf || !nominal_perf) { 653 pr_debug("CPU%d: highest or nominal performance missing\n", cpu); 654 return 0; 655 } 656 657 if (highest_perf < nominal_perf) { 658 pr_debug("CPU%d: nominal performance above highest\n", cpu); 659 return 0; 660 } 661 662 return div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf); 663 } 664 #else 665 static inline u64 get_max_boost_ratio(unsigned int cpu) { return 0; } 666 #endif 667 668 static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) 669 { 670 struct cpufreq_frequency_table *freq_table; 671 struct acpi_processor_performance *perf; 672 struct acpi_cpufreq_data *data; 673 unsigned int cpu = policy->cpu; 674 struct cpuinfo_x86 *c = &cpu_data(cpu); 675 unsigned int valid_states = 0; 676 unsigned int result = 0; 677 u64 max_boost_ratio; 678 unsigned int i; 679 #ifdef CONFIG_SMP 680 static int blacklisted; 681 #endif 682 683 pr_debug("%s\n", __func__); 684 685 #ifdef CONFIG_SMP 686 if (blacklisted) 687 return blacklisted; 688 blacklisted = acpi_cpufreq_blacklist(c); 689 if (blacklisted) 690 return blacklisted; 691 #endif 692 693 data = kzalloc(sizeof(*data), GFP_KERNEL); 694 if (!data) 695 return -ENOMEM; 696 697 if (!zalloc_cpumask_var(&data->freqdomain_cpus, GFP_KERNEL)) { 698 result = -ENOMEM; 699 goto err_free; 700 } 701 702 perf = per_cpu_ptr(acpi_perf_data, cpu); 703 data->acpi_perf_cpu = cpu; 704 policy->driver_data = data; 705 706 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) 707 acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS; 708 709 result = acpi_processor_register_performance(perf, cpu); 710 if (result) 711 goto err_free_mask; 712 713 policy->shared_type = perf->shared_type; 714 715 /* 716 * Will let policy->cpus know about dependency only when software 717 * coordination is required. 718 */ 719 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL || 720 policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { 721 cpumask_copy(policy->cpus, perf->shared_cpu_map); 722 } 723 cpumask_copy(data->freqdomain_cpus, perf->shared_cpu_map); 724 725 #ifdef CONFIG_SMP 726 dmi_check_system(sw_any_bug_dmi_table); 727 if (bios_with_sw_any_bug && !policy_is_shared(policy)) { 728 policy->shared_type = CPUFREQ_SHARED_TYPE_ALL; 729 cpumask_copy(policy->cpus, topology_core_cpumask(cpu)); 730 } 731 732 if (check_amd_hwpstate_cpu(cpu) && boot_cpu_data.x86 < 0x19 && 733 !acpi_pstate_strict) { 734 cpumask_clear(policy->cpus); 735 cpumask_set_cpu(cpu, policy->cpus); 736 cpumask_copy(data->freqdomain_cpus, 737 topology_sibling_cpumask(cpu)); 738 policy->shared_type = CPUFREQ_SHARED_TYPE_HW; 739 pr_info_once("overriding BIOS provided _PSD data\n"); 740 } 741 #endif 742 743 /* capability check */ 744 if (perf->state_count <= 1) { 745 pr_debug("No P-States\n"); 746 result = -ENODEV; 747 goto err_unreg; 748 } 749 750 if (perf->control_register.space_id != perf->status_register.space_id) { 751 result = -ENODEV; 752 goto err_unreg; 753 } 754 755 switch (perf->control_register.space_id) { 756 case ACPI_ADR_SPACE_SYSTEM_IO: 757 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && 758 boot_cpu_data.x86 == 0xf) { 759 pr_debug("AMD K8 systems must use native drivers.\n"); 760 result = -ENODEV; 761 goto err_unreg; 762 } 763 pr_debug("SYSTEM IO addr space\n"); 764 data->cpu_feature = SYSTEM_IO_CAPABLE; 765 data->cpu_freq_read = cpu_freq_read_io; 766 data->cpu_freq_write = cpu_freq_write_io; 767 break; 768 case ACPI_ADR_SPACE_FIXED_HARDWARE: 769 pr_debug("HARDWARE addr space\n"); 770 if (check_est_cpu(cpu)) { 771 data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE; 772 data->cpu_freq_read = cpu_freq_read_intel; 773 data->cpu_freq_write = cpu_freq_write_intel; 774 break; 775 } 776 if (check_amd_hwpstate_cpu(cpu)) { 777 data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE; 778 data->cpu_freq_read = cpu_freq_read_amd; 779 data->cpu_freq_write = cpu_freq_write_amd; 780 break; 781 } 782 result = -ENODEV; 783 goto err_unreg; 784 default: 785 pr_debug("Unknown addr space %d\n", 786 (u32) (perf->control_register.space_id)); 787 result = -ENODEV; 788 goto err_unreg; 789 } 790 791 freq_table = kcalloc(perf->state_count + 1, sizeof(*freq_table), 792 GFP_KERNEL); 793 if (!freq_table) { 794 result = -ENOMEM; 795 goto err_unreg; 796 } 797 798 /* detect transition latency */ 799 policy->cpuinfo.transition_latency = 0; 800 for (i = 0; i < perf->state_count; i++) { 801 if ((perf->states[i].transition_latency * 1000) > 802 policy->cpuinfo.transition_latency) 803 policy->cpuinfo.transition_latency = 804 perf->states[i].transition_latency * 1000; 805 } 806 807 /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */ 808 if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE && 809 policy->cpuinfo.transition_latency > 20 * 1000) { 810 policy->cpuinfo.transition_latency = 20 * 1000; 811 pr_info_once("P-state transition latency capped at 20 uS\n"); 812 } 813 814 /* table init */ 815 for (i = 0; i < perf->state_count; i++) { 816 if (i > 0 && perf->states[i].core_frequency >= 817 freq_table[valid_states-1].frequency / 1000) 818 continue; 819 820 freq_table[valid_states].driver_data = i; 821 freq_table[valid_states].frequency = 822 perf->states[i].core_frequency * 1000; 823 valid_states++; 824 } 825 freq_table[valid_states].frequency = CPUFREQ_TABLE_END; 826 827 max_boost_ratio = get_max_boost_ratio(cpu); 828 if (max_boost_ratio) { 829 unsigned int freq = freq_table[0].frequency; 830 831 /* 832 * Because the loop above sorts the freq_table entries in the 833 * descending order, freq is the maximum frequency in the table. 834 * Assume that it corresponds to the CPPC nominal frequency and 835 * use it to set cpuinfo.max_freq. 836 */ 837 policy->cpuinfo.max_freq = freq * max_boost_ratio >> SCHED_CAPACITY_SHIFT; 838 } else { 839 /* 840 * If the maximum "boost" frequency is unknown, ask the arch 841 * scale-invariance code to use the "nominal" performance for 842 * CPU utilization scaling so as to prevent the schedutil 843 * governor from selecting inadequate CPU frequencies. 844 */ 845 arch_set_max_freq_ratio(true); 846 } 847 848 policy->freq_table = freq_table; 849 perf->state = 0; 850 851 switch (perf->control_register.space_id) { 852 case ACPI_ADR_SPACE_SYSTEM_IO: 853 /* 854 * The core will not set policy->cur, because 855 * cpufreq_driver->get is NULL, so we need to set it here. 856 * However, we have to guess it, because the current speed is 857 * unknown and not detectable via IO ports. 858 */ 859 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu); 860 break; 861 case ACPI_ADR_SPACE_FIXED_HARDWARE: 862 acpi_cpufreq_driver.get = get_cur_freq_on_cpu; 863 break; 864 default: 865 break; 866 } 867 868 /* notify BIOS that we exist */ 869 acpi_processor_notify_smm(THIS_MODULE); 870 871 pr_debug("CPU%u - ACPI performance management activated.\n", cpu); 872 for (i = 0; i < perf->state_count; i++) 873 pr_debug(" %cP%d: %d MHz, %d mW, %d uS\n", 874 (i == perf->state ? '*' : ' '), i, 875 (u32) perf->states[i].core_frequency, 876 (u32) perf->states[i].power, 877 (u32) perf->states[i].transition_latency); 878 879 /* 880 * the first call to ->target() should result in us actually 881 * writing something to the appropriate registers. 882 */ 883 data->resume = 1; 884 885 policy->fast_switch_possible = !acpi_pstate_strict && 886 !(policy_is_shared(policy) && policy->shared_type != CPUFREQ_SHARED_TYPE_ANY); 887 888 if (perf->states[0].core_frequency * 1000 != freq_table[0].frequency) 889 pr_warn(FW_WARN "P-state 0 is not max freq\n"); 890 891 if (acpi_cpufreq_driver.set_boost) { 892 set_boost(policy, acpi_cpufreq_driver.boost_enabled); 893 policy->boost_enabled = acpi_cpufreq_driver.boost_enabled; 894 } 895 896 return result; 897 898 err_unreg: 899 acpi_processor_unregister_performance(cpu); 900 err_free_mask: 901 free_cpumask_var(data->freqdomain_cpus); 902 err_free: 903 kfree(data); 904 policy->driver_data = NULL; 905 906 return result; 907 } 908 909 static void acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) 910 { 911 struct acpi_cpufreq_data *data = policy->driver_data; 912 913 pr_debug("%s\n", __func__); 914 915 cpufreq_boost_down_prep(policy->cpu); 916 policy->fast_switch_possible = false; 917 policy->driver_data = NULL; 918 acpi_processor_unregister_performance(data->acpi_perf_cpu); 919 free_cpumask_var(data->freqdomain_cpus); 920 kfree(policy->freq_table); 921 kfree(data); 922 } 923 924 static int acpi_cpufreq_resume(struct cpufreq_policy *policy) 925 { 926 struct acpi_cpufreq_data *data = policy->driver_data; 927 928 pr_debug("%s\n", __func__); 929 930 data->resume = 1; 931 932 return 0; 933 } 934 935 static struct freq_attr *acpi_cpufreq_attr[] = { 936 &cpufreq_freq_attr_scaling_available_freqs, 937 &freqdomain_cpus, 938 #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB 939 &cpb, 940 #endif 941 NULL, 942 }; 943 944 static struct cpufreq_driver acpi_cpufreq_driver = { 945 .verify = cpufreq_generic_frequency_table_verify, 946 .target_index = acpi_cpufreq_target, 947 .fast_switch = acpi_cpufreq_fast_switch, 948 .bios_limit = acpi_processor_get_bios_limit, 949 .init = acpi_cpufreq_cpu_init, 950 .exit = acpi_cpufreq_cpu_exit, 951 .resume = acpi_cpufreq_resume, 952 .name = "acpi-cpufreq", 953 .attr = acpi_cpufreq_attr, 954 }; 955 956 static void __init acpi_cpufreq_boost_init(void) 957 { 958 if (!(boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA))) { 959 pr_debug("Boost capabilities not present in the processor\n"); 960 return; 961 } 962 963 acpi_cpufreq_driver.set_boost = set_boost; 964 acpi_cpufreq_driver.boost_enabled = boost_state(0); 965 } 966 967 static int __init acpi_cpufreq_probe(struct platform_device *pdev) 968 { 969 int ret; 970 971 if (acpi_disabled) 972 return -ENODEV; 973 974 /* don't keep reloading if cpufreq_driver exists */ 975 if (cpufreq_get_current_driver()) 976 return -ENODEV; 977 978 pr_debug("%s\n", __func__); 979 980 ret = acpi_cpufreq_early_init(); 981 if (ret) 982 return ret; 983 984 #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB 985 /* this is a sysfs file with a strange name and an even stranger 986 * semantic - per CPU instantiation, but system global effect. 987 * Lets enable it only on AMD CPUs for compatibility reasons and 988 * only if configured. This is considered legacy code, which 989 * will probably be removed at some point in the future. 990 */ 991 if (!check_amd_hwpstate_cpu(0)) { 992 struct freq_attr **attr; 993 994 pr_debug("CPB unsupported, do not expose it\n"); 995 996 for (attr = acpi_cpufreq_attr; *attr; attr++) 997 if (*attr == &cpb) { 998 *attr = NULL; 999 break; 1000 } 1001 } 1002 #endif 1003 acpi_cpufreq_boost_init(); 1004 1005 ret = cpufreq_register_driver(&acpi_cpufreq_driver); 1006 if (ret) { 1007 free_acpi_perf_data(); 1008 } 1009 return ret; 1010 } 1011 1012 static void acpi_cpufreq_remove(struct platform_device *pdev) 1013 { 1014 pr_debug("%s\n", __func__); 1015 1016 cpufreq_unregister_driver(&acpi_cpufreq_driver); 1017 1018 free_acpi_perf_data(); 1019 } 1020 1021 static struct platform_driver acpi_cpufreq_platdrv = { 1022 .driver = { 1023 .name = "acpi-cpufreq", 1024 }, 1025 .remove_new = acpi_cpufreq_remove, 1026 }; 1027 1028 static int __init acpi_cpufreq_init(void) 1029 { 1030 return platform_driver_probe(&acpi_cpufreq_platdrv, acpi_cpufreq_probe); 1031 } 1032 1033 static void __exit acpi_cpufreq_exit(void) 1034 { 1035 platform_driver_unregister(&acpi_cpufreq_platdrv); 1036 } 1037 1038 module_param(acpi_pstate_strict, uint, 0644); 1039 MODULE_PARM_DESC(acpi_pstate_strict, 1040 "value 0 or non-zero. non-zero -> strict ACPI checks are " 1041 "performed during frequency changes."); 1042 1043 late_initcall(acpi_cpufreq_init); 1044 module_exit(acpi_cpufreq_exit); 1045 1046 MODULE_ALIAS("platform:acpi-cpufreq"); 1047