1 /* 2 * intel_pstate.c: Native P state management for Intel processors 3 * 4 * (C) Copyright 2012 Intel Corporation 5 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com> 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; version 2 10 * of the License. 11 */ 12 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15 #include <linux/kernel.h> 16 #include <linux/kernel_stat.h> 17 #include <linux/module.h> 18 #include <linux/ktime.h> 19 #include <linux/hrtimer.h> 20 #include <linux/tick.h> 21 #include <linux/slab.h> 22 #include <linux/sched.h> 23 #include <linux/list.h> 24 #include <linux/cpu.h> 25 #include <linux/cpufreq.h> 26 #include <linux/sysfs.h> 27 #include <linux/types.h> 28 #include <linux/fs.h> 29 #include <linux/debugfs.h> 30 #include <linux/acpi.h> 31 #include <linux/vmalloc.h> 32 #include <trace/events/power.h> 33 34 #include <asm/div64.h> 35 #include <asm/msr.h> 36 #include <asm/cpu_device_id.h> 37 #include <asm/cpufeature.h> 38 #include <asm/intel-family.h> 39 40 #define INTEL_CPUFREQ_TRANSITION_LATENCY 20000 41 42 #define ATOM_RATIOS 0x66a 43 #define ATOM_VIDS 0x66b 44 #define ATOM_TURBO_RATIOS 0x66c 45 #define ATOM_TURBO_VIDS 0x66d 46 47 #ifdef CONFIG_ACPI 48 #include <acpi/processor.h> 49 #include <acpi/cppc_acpi.h> 50 #endif 51 52 #define FRAC_BITS 8 53 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) 54 #define fp_toint(X) ((X) >> FRAC_BITS) 55 56 #define EXT_BITS 6 57 #define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS) 58 #define fp_ext_toint(X) ((X) >> EXT_FRAC_BITS) 59 #define int_ext_tofp(X) ((int64_t)(X) << EXT_FRAC_BITS) 60 61 static inline int32_t mul_fp(int32_t x, int32_t y) 62 { 63 return ((int64_t)x * (int64_t)y) >> FRAC_BITS; 64 } 65 66 static inline int32_t div_fp(s64 x, s64 y) 67 { 68 return div64_s64((int64_t)x << FRAC_BITS, y); 69 } 70 71 static inline int ceiling_fp(int32_t x) 72 { 73 int mask, ret; 74 75 ret = fp_toint(x); 76 mask = (1 << FRAC_BITS) - 1; 77 if (x & mask) 78 ret += 1; 79 return ret; 80 } 81 82 static inline u64 mul_ext_fp(u64 x, u64 y) 83 { 84 return (x * y) >> EXT_FRAC_BITS; 85 } 86 87 static inline u64 div_ext_fp(u64 x, u64 y) 88 { 89 return div64_u64(x << EXT_FRAC_BITS, y); 90 } 91 92 /** 93 * struct sample - Store performance sample 94 * @core_avg_perf: Ratio of APERF/MPERF which is the actual average 95 * performance during last sample period 96 * @busy_scaled: Scaled busy value which is used to calculate next 97 * P state. This can be different than core_avg_perf 98 * to account for cpu idle period 99 * @aperf: Difference of actual performance frequency clock count 100 * read from APERF MSR between last and current sample 101 * @mperf: Difference of maximum performance frequency clock count 102 * read from MPERF MSR between last and current sample 103 * @tsc: Difference of time stamp counter between last and 104 * current sample 105 * @time: Current time from scheduler 106 * 107 * This structure is used in the cpudata structure to store performance sample 108 * data for choosing next P State. 109 */ 110 struct sample { 111 int32_t core_avg_perf; 112 int32_t busy_scaled; 113 u64 aperf; 114 u64 mperf; 115 u64 tsc; 116 u64 time; 117 }; 118 119 /** 120 * struct pstate_data - Store P state data 121 * @current_pstate: Current requested P state 122 * @min_pstate: Min P state possible for this platform 123 * @max_pstate: Max P state possible for this platform 124 * @max_pstate_physical:This is physical Max P state for a processor 125 * This can be higher than the max_pstate which can 126 * be limited by platform thermal design power limits 127 * @scaling: Scaling factor to convert frequency to cpufreq 128 * frequency units 129 * @turbo_pstate: Max Turbo P state possible for this platform 130 * @max_freq: @max_pstate frequency in cpufreq units 131 * @turbo_freq: @turbo_pstate frequency in cpufreq units 132 * 133 * Stores the per cpu model P state limits and current P state. 134 */ 135 struct pstate_data { 136 int current_pstate; 137 int min_pstate; 138 int max_pstate; 139 int max_pstate_physical; 140 int scaling; 141 int turbo_pstate; 142 unsigned int max_freq; 143 unsigned int turbo_freq; 144 }; 145 146 /** 147 * struct vid_data - Stores voltage information data 148 * @min: VID data for this platform corresponding to 149 * the lowest P state 150 * @max: VID data corresponding to the highest P State. 151 * @turbo: VID data for turbo P state 152 * @ratio: Ratio of (vid max - vid min) / 153 * (max P state - Min P State) 154 * 155 * Stores the voltage data for DVFS (Dynamic Voltage and Frequency Scaling) 156 * This data is used in Atom platforms, where in addition to target P state, 157 * the voltage data needs to be specified to select next P State. 158 */ 159 struct vid_data { 160 int min; 161 int max; 162 int turbo; 163 int32_t ratio; 164 }; 165 166 /** 167 * struct _pid - Stores PID data 168 * @setpoint: Target set point for busyness or performance 169 * @integral: Storage for accumulated error values 170 * @p_gain: PID proportional gain 171 * @i_gain: PID integral gain 172 * @d_gain: PID derivative gain 173 * @deadband: PID deadband 174 * @last_err: Last error storage for integral part of PID calculation 175 * 176 * Stores PID coefficients and last error for PID controller. 177 */ 178 struct _pid { 179 int setpoint; 180 int32_t integral; 181 int32_t p_gain; 182 int32_t i_gain; 183 int32_t d_gain; 184 int deadband; 185 int32_t last_err; 186 }; 187 188 /** 189 * struct perf_limits - Store user and policy limits 190 * @no_turbo: User requested turbo state from intel_pstate sysfs 191 * @turbo_disabled: Platform turbo status either from msr 192 * MSR_IA32_MISC_ENABLE or when maximum available pstate 193 * matches the maximum turbo pstate 194 * @max_perf_pct: Effective maximum performance limit in percentage, this 195 * is minimum of either limits enforced by cpufreq policy 196 * or limits from user set limits via intel_pstate sysfs 197 * @min_perf_pct: Effective minimum performance limit in percentage, this 198 * is maximum of either limits enforced by cpufreq policy 199 * or limits from user set limits via intel_pstate sysfs 200 * @max_perf: This is a scaled value between 0 to 255 for max_perf_pct 201 * This value is used to limit max pstate 202 * @min_perf: This is a scaled value between 0 to 255 for min_perf_pct 203 * This value is used to limit min pstate 204 * @max_policy_pct: The maximum performance in percentage enforced by 205 * cpufreq setpolicy interface 206 * @max_sysfs_pct: The maximum performance in percentage enforced by 207 * intel pstate sysfs interface, unused when per cpu 208 * controls are enforced 209 * @min_policy_pct: The minimum performance in percentage enforced by 210 * cpufreq setpolicy interface 211 * @min_sysfs_pct: The minimum performance in percentage enforced by 212 * intel pstate sysfs interface, unused when per cpu 213 * controls are enforced 214 * 215 * Storage for user and policy defined limits. 216 */ 217 struct perf_limits { 218 int no_turbo; 219 int turbo_disabled; 220 int max_perf_pct; 221 int min_perf_pct; 222 int32_t max_perf; 223 int32_t min_perf; 224 int max_policy_pct; 225 int max_sysfs_pct; 226 int min_policy_pct; 227 int min_sysfs_pct; 228 }; 229 230 /** 231 * struct cpudata - Per CPU instance data storage 232 * @cpu: CPU number for this instance data 233 * @policy: CPUFreq policy value 234 * @update_util: CPUFreq utility callback information 235 * @update_util_set: CPUFreq utility callback is set 236 * @iowait_boost: iowait-related boost fraction 237 * @last_update: Time of the last update. 238 * @pstate: Stores P state limits for this CPU 239 * @vid: Stores VID limits for this CPU 240 * @pid: Stores PID parameters for this CPU 241 * @last_sample_time: Last Sample time 242 * @prev_aperf: Last APERF value read from APERF MSR 243 * @prev_mperf: Last MPERF value read from MPERF MSR 244 * @prev_tsc: Last timestamp counter (TSC) value 245 * @prev_cummulative_iowait: IO Wait time difference from last and 246 * current sample 247 * @sample: Storage for storing last Sample data 248 * @perf_limits: Pointer to perf_limit unique to this CPU 249 * Not all field in the structure are applicable 250 * when per cpu controls are enforced 251 * @acpi_perf_data: Stores ACPI perf information read from _PSS 252 * @valid_pss_table: Set to true for valid ACPI _PSS entries found 253 * @epp_powersave: Last saved HWP energy performance preference 254 * (EPP) or energy performance bias (EPB), 255 * when policy switched to performance 256 * @epp_policy: Last saved policy used to set EPP/EPB 257 * @epp_default: Power on default HWP energy performance 258 * preference/bias 259 * @epp_saved: Saved EPP/EPB during system suspend or CPU offline 260 * operation 261 * 262 * This structure stores per CPU instance data for all CPUs. 263 */ 264 struct cpudata { 265 int cpu; 266 267 unsigned int policy; 268 struct update_util_data update_util; 269 bool update_util_set; 270 271 struct pstate_data pstate; 272 struct vid_data vid; 273 struct _pid pid; 274 275 u64 last_update; 276 u64 last_sample_time; 277 u64 prev_aperf; 278 u64 prev_mperf; 279 u64 prev_tsc; 280 u64 prev_cummulative_iowait; 281 struct sample sample; 282 struct perf_limits *perf_limits; 283 #ifdef CONFIG_ACPI 284 struct acpi_processor_performance acpi_perf_data; 285 bool valid_pss_table; 286 #endif 287 unsigned int iowait_boost; 288 s16 epp_powersave; 289 s16 epp_policy; 290 s16 epp_default; 291 s16 epp_saved; 292 }; 293 294 static struct cpudata **all_cpu_data; 295 296 /** 297 * struct pstate_adjust_policy - Stores static PID configuration data 298 * @sample_rate_ms: PID calculation sample rate in ms 299 * @sample_rate_ns: Sample rate calculation in ns 300 * @deadband: PID deadband 301 * @setpoint: PID Setpoint 302 * @p_gain_pct: PID proportional gain 303 * @i_gain_pct: PID integral gain 304 * @d_gain_pct: PID derivative gain 305 * 306 * Stores per CPU model static PID configuration data. 307 */ 308 struct pstate_adjust_policy { 309 int sample_rate_ms; 310 s64 sample_rate_ns; 311 int deadband; 312 int setpoint; 313 int p_gain_pct; 314 int d_gain_pct; 315 int i_gain_pct; 316 }; 317 318 /** 319 * struct pstate_funcs - Per CPU model specific callbacks 320 * @get_max: Callback to get maximum non turbo effective P state 321 * @get_max_physical: Callback to get maximum non turbo physical P state 322 * @get_min: Callback to get minimum P state 323 * @get_turbo: Callback to get turbo P state 324 * @get_scaling: Callback to get frequency scaling factor 325 * @get_val: Callback to convert P state to actual MSR write value 326 * @get_vid: Callback to get VID data for Atom platforms 327 * @get_target_pstate: Callback to a function to calculate next P state to use 328 * 329 * Core and Atom CPU models have different way to get P State limits. This 330 * structure is used to store those callbacks. 331 */ 332 struct pstate_funcs { 333 int (*get_max)(void); 334 int (*get_max_physical)(void); 335 int (*get_min)(void); 336 int (*get_turbo)(void); 337 int (*get_scaling)(void); 338 u64 (*get_val)(struct cpudata*, int pstate); 339 void (*get_vid)(struct cpudata *); 340 int32_t (*get_target_pstate)(struct cpudata *); 341 }; 342 343 /** 344 * struct cpu_defaults- Per CPU model default config data 345 * @pid_policy: PID config data 346 * @funcs: Callback function data 347 */ 348 struct cpu_defaults { 349 struct pstate_adjust_policy pid_policy; 350 struct pstate_funcs funcs; 351 }; 352 353 static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu); 354 static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu); 355 356 static struct pstate_adjust_policy pid_params __read_mostly; 357 static struct pstate_funcs pstate_funcs __read_mostly; 358 static int hwp_active __read_mostly; 359 static bool per_cpu_limits __read_mostly; 360 361 static bool driver_registered __read_mostly; 362 363 #ifdef CONFIG_ACPI 364 static bool acpi_ppc; 365 #endif 366 367 static struct perf_limits performance_limits = { 368 .no_turbo = 0, 369 .turbo_disabled = 0, 370 .max_perf_pct = 100, 371 .max_perf = int_ext_tofp(1), 372 .min_perf_pct = 100, 373 .min_perf = int_ext_tofp(1), 374 .max_policy_pct = 100, 375 .max_sysfs_pct = 100, 376 .min_policy_pct = 0, 377 .min_sysfs_pct = 0, 378 }; 379 380 static struct perf_limits powersave_limits = { 381 .no_turbo = 0, 382 .turbo_disabled = 0, 383 .max_perf_pct = 100, 384 .max_perf = int_ext_tofp(1), 385 .min_perf_pct = 0, 386 .min_perf = 0, 387 .max_policy_pct = 100, 388 .max_sysfs_pct = 100, 389 .min_policy_pct = 0, 390 .min_sysfs_pct = 0, 391 }; 392 393 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE 394 static struct perf_limits *limits = &performance_limits; 395 #else 396 static struct perf_limits *limits = &powersave_limits; 397 #endif 398 399 static DEFINE_MUTEX(intel_pstate_driver_lock); 400 static DEFINE_MUTEX(intel_pstate_limits_lock); 401 402 #ifdef CONFIG_ACPI 403 404 static bool intel_pstate_get_ppc_enable_status(void) 405 { 406 if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER || 407 acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER) 408 return true; 409 410 return acpi_ppc; 411 } 412 413 #ifdef CONFIG_ACPI_CPPC_LIB 414 415 /* The work item is needed to avoid CPU hotplug locking issues */ 416 static void intel_pstste_sched_itmt_work_fn(struct work_struct *work) 417 { 418 sched_set_itmt_support(); 419 } 420 421 static DECLARE_WORK(sched_itmt_work, intel_pstste_sched_itmt_work_fn); 422 423 static void intel_pstate_set_itmt_prio(int cpu) 424 { 425 struct cppc_perf_caps cppc_perf; 426 static u32 max_highest_perf = 0, min_highest_perf = U32_MAX; 427 int ret; 428 429 ret = cppc_get_perf_caps(cpu, &cppc_perf); 430 if (ret) 431 return; 432 433 /* 434 * The priorities can be set regardless of whether or not 435 * sched_set_itmt_support(true) has been called and it is valid to 436 * update them at any time after it has been called. 437 */ 438 sched_set_itmt_core_prio(cppc_perf.highest_perf, cpu); 439 440 if (max_highest_perf <= min_highest_perf) { 441 if (cppc_perf.highest_perf > max_highest_perf) 442 max_highest_perf = cppc_perf.highest_perf; 443 444 if (cppc_perf.highest_perf < min_highest_perf) 445 min_highest_perf = cppc_perf.highest_perf; 446 447 if (max_highest_perf > min_highest_perf) { 448 /* 449 * This code can be run during CPU online under the 450 * CPU hotplug locks, so sched_set_itmt_support() 451 * cannot be called from here. Queue up a work item 452 * to invoke it. 453 */ 454 schedule_work(&sched_itmt_work); 455 } 456 } 457 } 458 #else 459 static void intel_pstate_set_itmt_prio(int cpu) 460 { 461 } 462 #endif 463 464 static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) 465 { 466 struct cpudata *cpu; 467 int ret; 468 int i; 469 470 if (hwp_active) { 471 intel_pstate_set_itmt_prio(policy->cpu); 472 return; 473 } 474 475 if (!intel_pstate_get_ppc_enable_status()) 476 return; 477 478 cpu = all_cpu_data[policy->cpu]; 479 480 ret = acpi_processor_register_performance(&cpu->acpi_perf_data, 481 policy->cpu); 482 if (ret) 483 return; 484 485 /* 486 * Check if the control value in _PSS is for PERF_CTL MSR, which should 487 * guarantee that the states returned by it map to the states in our 488 * list directly. 489 */ 490 if (cpu->acpi_perf_data.control_register.space_id != 491 ACPI_ADR_SPACE_FIXED_HARDWARE) 492 goto err; 493 494 /* 495 * If there is only one entry _PSS, simply ignore _PSS and continue as 496 * usual without taking _PSS into account 497 */ 498 if (cpu->acpi_perf_data.state_count < 2) 499 goto err; 500 501 pr_debug("CPU%u - ACPI _PSS perf data\n", policy->cpu); 502 for (i = 0; i < cpu->acpi_perf_data.state_count; i++) { 503 pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n", 504 (i == cpu->acpi_perf_data.state ? '*' : ' '), i, 505 (u32) cpu->acpi_perf_data.states[i].core_frequency, 506 (u32) cpu->acpi_perf_data.states[i].power, 507 (u32) cpu->acpi_perf_data.states[i].control); 508 } 509 510 /* 511 * The _PSS table doesn't contain whole turbo frequency range. 512 * This just contains +1 MHZ above the max non turbo frequency, 513 * with control value corresponding to max turbo ratio. But 514 * when cpufreq set policy is called, it will call with this 515 * max frequency, which will cause a reduced performance as 516 * this driver uses real max turbo frequency as the max 517 * frequency. So correct this frequency in _PSS table to 518 * correct max turbo frequency based on the turbo state. 519 * Also need to convert to MHz as _PSS freq is in MHz. 520 */ 521 if (!limits->turbo_disabled) 522 cpu->acpi_perf_data.states[0].core_frequency = 523 policy->cpuinfo.max_freq / 1000; 524 cpu->valid_pss_table = true; 525 pr_debug("_PPC limits will be enforced\n"); 526 527 return; 528 529 err: 530 cpu->valid_pss_table = false; 531 acpi_processor_unregister_performance(policy->cpu); 532 } 533 534 static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) 535 { 536 struct cpudata *cpu; 537 538 cpu = all_cpu_data[policy->cpu]; 539 if (!cpu->valid_pss_table) 540 return; 541 542 acpi_processor_unregister_performance(policy->cpu); 543 } 544 #else 545 static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) 546 { 547 } 548 549 static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) 550 { 551 } 552 #endif 553 554 static inline void pid_reset(struct _pid *pid, int setpoint, int busy, 555 int deadband, int integral) { 556 pid->setpoint = int_tofp(setpoint); 557 pid->deadband = int_tofp(deadband); 558 pid->integral = int_tofp(integral); 559 pid->last_err = int_tofp(setpoint) - int_tofp(busy); 560 } 561 562 static inline void pid_p_gain_set(struct _pid *pid, int percent) 563 { 564 pid->p_gain = div_fp(percent, 100); 565 } 566 567 static inline void pid_i_gain_set(struct _pid *pid, int percent) 568 { 569 pid->i_gain = div_fp(percent, 100); 570 } 571 572 static inline void pid_d_gain_set(struct _pid *pid, int percent) 573 { 574 pid->d_gain = div_fp(percent, 100); 575 } 576 577 static signed int pid_calc(struct _pid *pid, int32_t busy) 578 { 579 signed int result; 580 int32_t pterm, dterm, fp_error; 581 int32_t integral_limit; 582 583 fp_error = pid->setpoint - busy; 584 585 if (abs(fp_error) <= pid->deadband) 586 return 0; 587 588 pterm = mul_fp(pid->p_gain, fp_error); 589 590 pid->integral += fp_error; 591 592 /* 593 * We limit the integral here so that it will never 594 * get higher than 30. This prevents it from becoming 595 * too large an input over long periods of time and allows 596 * it to get factored out sooner. 597 * 598 * The value of 30 was chosen through experimentation. 599 */ 600 integral_limit = int_tofp(30); 601 if (pid->integral > integral_limit) 602 pid->integral = integral_limit; 603 if (pid->integral < -integral_limit) 604 pid->integral = -integral_limit; 605 606 dterm = mul_fp(pid->d_gain, fp_error - pid->last_err); 607 pid->last_err = fp_error; 608 609 result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm; 610 result = result + (1 << (FRAC_BITS-1)); 611 return (signed int)fp_toint(result); 612 } 613 614 static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu) 615 { 616 pid_p_gain_set(&cpu->pid, pid_params.p_gain_pct); 617 pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct); 618 pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct); 619 620 pid_reset(&cpu->pid, pid_params.setpoint, 100, pid_params.deadband, 0); 621 } 622 623 static inline void intel_pstate_reset_all_pid(void) 624 { 625 unsigned int cpu; 626 627 for_each_online_cpu(cpu) { 628 if (all_cpu_data[cpu]) 629 intel_pstate_busy_pid_reset(all_cpu_data[cpu]); 630 } 631 } 632 633 static inline void update_turbo_state(void) 634 { 635 u64 misc_en; 636 struct cpudata *cpu; 637 638 cpu = all_cpu_data[0]; 639 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); 640 limits->turbo_disabled = 641 (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || 642 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); 643 } 644 645 static s16 intel_pstate_get_epb(struct cpudata *cpu_data) 646 { 647 u64 epb; 648 int ret; 649 650 if (!static_cpu_has(X86_FEATURE_EPB)) 651 return -ENXIO; 652 653 ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); 654 if (ret) 655 return (s16)ret; 656 657 return (s16)(epb & 0x0f); 658 } 659 660 static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data) 661 { 662 s16 epp; 663 664 if (static_cpu_has(X86_FEATURE_HWP_EPP)) { 665 /* 666 * When hwp_req_data is 0, means that caller didn't read 667 * MSR_HWP_REQUEST, so need to read and get EPP. 668 */ 669 if (!hwp_req_data) { 670 epp = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, 671 &hwp_req_data); 672 if (epp) 673 return epp; 674 } 675 epp = (hwp_req_data >> 24) & 0xff; 676 } else { 677 /* When there is no EPP present, HWP uses EPB settings */ 678 epp = intel_pstate_get_epb(cpu_data); 679 } 680 681 return epp; 682 } 683 684 static int intel_pstate_set_epb(int cpu, s16 pref) 685 { 686 u64 epb; 687 int ret; 688 689 if (!static_cpu_has(X86_FEATURE_EPB)) 690 return -ENXIO; 691 692 ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); 693 if (ret) 694 return ret; 695 696 epb = (epb & ~0x0f) | pref; 697 wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb); 698 699 return 0; 700 } 701 702 /* 703 * EPP/EPB display strings corresponding to EPP index in the 704 * energy_perf_strings[] 705 * index String 706 *------------------------------------- 707 * 0 default 708 * 1 performance 709 * 2 balance_performance 710 * 3 balance_power 711 * 4 power 712 */ 713 static const char * const energy_perf_strings[] = { 714 "default", 715 "performance", 716 "balance_performance", 717 "balance_power", 718 "power", 719 NULL 720 }; 721 722 static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data) 723 { 724 s16 epp; 725 int index = -EINVAL; 726 727 epp = intel_pstate_get_epp(cpu_data, 0); 728 if (epp < 0) 729 return epp; 730 731 if (static_cpu_has(X86_FEATURE_HWP_EPP)) { 732 /* 733 * Range: 734 * 0x00-0x3F : Performance 735 * 0x40-0x7F : Balance performance 736 * 0x80-0xBF : Balance power 737 * 0xC0-0xFF : Power 738 * The EPP is a 8 bit value, but our ranges restrict the 739 * value which can be set. Here only using top two bits 740 * effectively. 741 */ 742 index = (epp >> 6) + 1; 743 } else if (static_cpu_has(X86_FEATURE_EPB)) { 744 /* 745 * Range: 746 * 0x00-0x03 : Performance 747 * 0x04-0x07 : Balance performance 748 * 0x08-0x0B : Balance power 749 * 0x0C-0x0F : Power 750 * The EPB is a 4 bit value, but our ranges restrict the 751 * value which can be set. Here only using top two bits 752 * effectively. 753 */ 754 index = (epp >> 2) + 1; 755 } 756 757 return index; 758 } 759 760 static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data, 761 int pref_index) 762 { 763 int epp = -EINVAL; 764 int ret; 765 766 if (!pref_index) 767 epp = cpu_data->epp_default; 768 769 mutex_lock(&intel_pstate_limits_lock); 770 771 if (static_cpu_has(X86_FEATURE_HWP_EPP)) { 772 u64 value; 773 774 ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, &value); 775 if (ret) 776 goto return_pref; 777 778 value &= ~GENMASK_ULL(31, 24); 779 780 /* 781 * If epp is not default, convert from index into 782 * energy_perf_strings to epp value, by shifting 6 783 * bits left to use only top two bits in epp. 784 * The resultant epp need to shifted by 24 bits to 785 * epp position in MSR_HWP_REQUEST. 786 */ 787 if (epp == -EINVAL) 788 epp = (pref_index - 1) << 6; 789 790 value |= (u64)epp << 24; 791 ret = wrmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, value); 792 } else { 793 if (epp == -EINVAL) 794 epp = (pref_index - 1) << 2; 795 ret = intel_pstate_set_epb(cpu_data->cpu, epp); 796 } 797 return_pref: 798 mutex_unlock(&intel_pstate_limits_lock); 799 800 return ret; 801 } 802 803 static ssize_t show_energy_performance_available_preferences( 804 struct cpufreq_policy *policy, char *buf) 805 { 806 int i = 0; 807 int ret = 0; 808 809 while (energy_perf_strings[i] != NULL) 810 ret += sprintf(&buf[ret], "%s ", energy_perf_strings[i++]); 811 812 ret += sprintf(&buf[ret], "\n"); 813 814 return ret; 815 } 816 817 cpufreq_freq_attr_ro(energy_performance_available_preferences); 818 819 static ssize_t store_energy_performance_preference( 820 struct cpufreq_policy *policy, const char *buf, size_t count) 821 { 822 struct cpudata *cpu_data = all_cpu_data[policy->cpu]; 823 char str_preference[21]; 824 int ret, i = 0; 825 826 ret = sscanf(buf, "%20s", str_preference); 827 if (ret != 1) 828 return -EINVAL; 829 830 while (energy_perf_strings[i] != NULL) { 831 if (!strcmp(str_preference, energy_perf_strings[i])) { 832 intel_pstate_set_energy_pref_index(cpu_data, i); 833 return count; 834 } 835 ++i; 836 } 837 838 return -EINVAL; 839 } 840 841 static ssize_t show_energy_performance_preference( 842 struct cpufreq_policy *policy, char *buf) 843 { 844 struct cpudata *cpu_data = all_cpu_data[policy->cpu]; 845 int preference; 846 847 preference = intel_pstate_get_energy_pref_index(cpu_data); 848 if (preference < 0) 849 return preference; 850 851 return sprintf(buf, "%s\n", energy_perf_strings[preference]); 852 } 853 854 cpufreq_freq_attr_rw(energy_performance_preference); 855 856 static struct freq_attr *hwp_cpufreq_attrs[] = { 857 &energy_performance_preference, 858 &energy_performance_available_preferences, 859 NULL, 860 }; 861 862 static void intel_pstate_hwp_set(struct cpufreq_policy *policy) 863 { 864 int min, hw_min, max, hw_max, cpu, range, adj_range; 865 struct perf_limits *perf_limits = limits; 866 u64 value, cap; 867 868 for_each_cpu(cpu, policy->cpus) { 869 int max_perf_pct, min_perf_pct; 870 struct cpudata *cpu_data = all_cpu_data[cpu]; 871 s16 epp; 872 873 if (per_cpu_limits) 874 perf_limits = all_cpu_data[cpu]->perf_limits; 875 876 rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap); 877 hw_min = HWP_LOWEST_PERF(cap); 878 hw_max = HWP_HIGHEST_PERF(cap); 879 range = hw_max - hw_min; 880 881 max_perf_pct = perf_limits->max_perf_pct; 882 min_perf_pct = perf_limits->min_perf_pct; 883 884 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); 885 adj_range = min_perf_pct * range / 100; 886 min = hw_min + adj_range; 887 value &= ~HWP_MIN_PERF(~0L); 888 value |= HWP_MIN_PERF(min); 889 890 adj_range = max_perf_pct * range / 100; 891 max = hw_min + adj_range; 892 if (limits->no_turbo) { 893 hw_max = HWP_GUARANTEED_PERF(cap); 894 if (hw_max < max) 895 max = hw_max; 896 } 897 898 value &= ~HWP_MAX_PERF(~0L); 899 value |= HWP_MAX_PERF(max); 900 901 if (cpu_data->epp_policy == cpu_data->policy) 902 goto skip_epp; 903 904 cpu_data->epp_policy = cpu_data->policy; 905 906 if (cpu_data->epp_saved >= 0) { 907 epp = cpu_data->epp_saved; 908 cpu_data->epp_saved = -EINVAL; 909 goto update_epp; 910 } 911 912 if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) { 913 epp = intel_pstate_get_epp(cpu_data, value); 914 cpu_data->epp_powersave = epp; 915 /* If EPP read was failed, then don't try to write */ 916 if (epp < 0) 917 goto skip_epp; 918 919 920 epp = 0; 921 } else { 922 /* skip setting EPP, when saved value is invalid */ 923 if (cpu_data->epp_powersave < 0) 924 goto skip_epp; 925 926 /* 927 * No need to restore EPP when it is not zero. This 928 * means: 929 * - Policy is not changed 930 * - user has manually changed 931 * - Error reading EPB 932 */ 933 epp = intel_pstate_get_epp(cpu_data, value); 934 if (epp) 935 goto skip_epp; 936 937 epp = cpu_data->epp_powersave; 938 } 939 update_epp: 940 if (static_cpu_has(X86_FEATURE_HWP_EPP)) { 941 value &= ~GENMASK_ULL(31, 24); 942 value |= (u64)epp << 24; 943 } else { 944 intel_pstate_set_epb(cpu, epp); 945 } 946 skip_epp: 947 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); 948 } 949 } 950 951 static int intel_pstate_hwp_set_policy(struct cpufreq_policy *policy) 952 { 953 if (hwp_active) 954 intel_pstate_hwp_set(policy); 955 956 return 0; 957 } 958 959 static int intel_pstate_hwp_save_state(struct cpufreq_policy *policy) 960 { 961 struct cpudata *cpu_data = all_cpu_data[policy->cpu]; 962 963 if (!hwp_active) 964 return 0; 965 966 cpu_data->epp_saved = intel_pstate_get_epp(cpu_data, 0); 967 968 return 0; 969 } 970 971 static int intel_pstate_resume(struct cpufreq_policy *policy) 972 { 973 int ret; 974 975 if (!hwp_active) 976 return 0; 977 978 mutex_lock(&intel_pstate_limits_lock); 979 980 all_cpu_data[policy->cpu]->epp_policy = 0; 981 982 ret = intel_pstate_hwp_set_policy(policy); 983 984 mutex_unlock(&intel_pstate_limits_lock); 985 986 return ret; 987 } 988 989 static void intel_pstate_update_policies(void) 990 { 991 int cpu; 992 993 for_each_possible_cpu(cpu) 994 cpufreq_update_policy(cpu); 995 } 996 997 /************************** debugfs begin ************************/ 998 static int pid_param_set(void *data, u64 val) 999 { 1000 *(u32 *)data = val; 1001 intel_pstate_reset_all_pid(); 1002 return 0; 1003 } 1004 1005 static int pid_param_get(void *data, u64 *val) 1006 { 1007 *val = *(u32 *)data; 1008 return 0; 1009 } 1010 DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, pid_param_set, "%llu\n"); 1011 1012 static struct dentry *debugfs_parent; 1013 1014 struct pid_param { 1015 char *name; 1016 void *value; 1017 struct dentry *dentry; 1018 }; 1019 1020 static struct pid_param pid_files[] = { 1021 {"sample_rate_ms", &pid_params.sample_rate_ms, }, 1022 {"d_gain_pct", &pid_params.d_gain_pct, }, 1023 {"i_gain_pct", &pid_params.i_gain_pct, }, 1024 {"deadband", &pid_params.deadband, }, 1025 {"setpoint", &pid_params.setpoint, }, 1026 {"p_gain_pct", &pid_params.p_gain_pct, }, 1027 {NULL, NULL, } 1028 }; 1029 1030 static void intel_pstate_debug_expose_params(void) 1031 { 1032 int i; 1033 1034 debugfs_parent = debugfs_create_dir("pstate_snb", NULL); 1035 if (IS_ERR_OR_NULL(debugfs_parent)) 1036 return; 1037 1038 for (i = 0; pid_files[i].name; i++) { 1039 struct dentry *dentry; 1040 1041 dentry = debugfs_create_file(pid_files[i].name, 0660, 1042 debugfs_parent, pid_files[i].value, 1043 &fops_pid_param); 1044 if (!IS_ERR(dentry)) 1045 pid_files[i].dentry = dentry; 1046 } 1047 } 1048 1049 static void intel_pstate_debug_hide_params(void) 1050 { 1051 int i; 1052 1053 if (IS_ERR_OR_NULL(debugfs_parent)) 1054 return; 1055 1056 for (i = 0; pid_files[i].name; i++) { 1057 debugfs_remove(pid_files[i].dentry); 1058 pid_files[i].dentry = NULL; 1059 } 1060 1061 debugfs_remove(debugfs_parent); 1062 debugfs_parent = NULL; 1063 } 1064 1065 /************************** debugfs end ************************/ 1066 1067 /************************** sysfs begin ************************/ 1068 #define show_one(file_name, object) \ 1069 static ssize_t show_##file_name \ 1070 (struct kobject *kobj, struct attribute *attr, char *buf) \ 1071 { \ 1072 return sprintf(buf, "%u\n", limits->object); \ 1073 } 1074 1075 static ssize_t intel_pstate_show_status(char *buf); 1076 static int intel_pstate_update_status(const char *buf, size_t size); 1077 1078 static ssize_t show_status(struct kobject *kobj, 1079 struct attribute *attr, char *buf) 1080 { 1081 ssize_t ret; 1082 1083 mutex_lock(&intel_pstate_driver_lock); 1084 ret = intel_pstate_show_status(buf); 1085 mutex_unlock(&intel_pstate_driver_lock); 1086 1087 return ret; 1088 } 1089 1090 static ssize_t store_status(struct kobject *a, struct attribute *b, 1091 const char *buf, size_t count) 1092 { 1093 char *p = memchr(buf, '\n', count); 1094 int ret; 1095 1096 mutex_lock(&intel_pstate_driver_lock); 1097 ret = intel_pstate_update_status(buf, p ? p - buf : count); 1098 mutex_unlock(&intel_pstate_driver_lock); 1099 1100 return ret < 0 ? ret : count; 1101 } 1102 1103 static ssize_t show_turbo_pct(struct kobject *kobj, 1104 struct attribute *attr, char *buf) 1105 { 1106 struct cpudata *cpu; 1107 int total, no_turbo, turbo_pct; 1108 uint32_t turbo_fp; 1109 1110 mutex_lock(&intel_pstate_driver_lock); 1111 1112 if (!driver_registered) { 1113 mutex_unlock(&intel_pstate_driver_lock); 1114 return -EAGAIN; 1115 } 1116 1117 cpu = all_cpu_data[0]; 1118 1119 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 1120 no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1; 1121 turbo_fp = div_fp(no_turbo, total); 1122 turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100))); 1123 1124 mutex_unlock(&intel_pstate_driver_lock); 1125 1126 return sprintf(buf, "%u\n", turbo_pct); 1127 } 1128 1129 static ssize_t show_num_pstates(struct kobject *kobj, 1130 struct attribute *attr, char *buf) 1131 { 1132 struct cpudata *cpu; 1133 int total; 1134 1135 mutex_lock(&intel_pstate_driver_lock); 1136 1137 if (!driver_registered) { 1138 mutex_unlock(&intel_pstate_driver_lock); 1139 return -EAGAIN; 1140 } 1141 1142 cpu = all_cpu_data[0]; 1143 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 1144 1145 mutex_unlock(&intel_pstate_driver_lock); 1146 1147 return sprintf(buf, "%u\n", total); 1148 } 1149 1150 static ssize_t show_no_turbo(struct kobject *kobj, 1151 struct attribute *attr, char *buf) 1152 { 1153 ssize_t ret; 1154 1155 mutex_lock(&intel_pstate_driver_lock); 1156 1157 if (!driver_registered) { 1158 mutex_unlock(&intel_pstate_driver_lock); 1159 return -EAGAIN; 1160 } 1161 1162 update_turbo_state(); 1163 if (limits->turbo_disabled) 1164 ret = sprintf(buf, "%u\n", limits->turbo_disabled); 1165 else 1166 ret = sprintf(buf, "%u\n", limits->no_turbo); 1167 1168 mutex_unlock(&intel_pstate_driver_lock); 1169 1170 return ret; 1171 } 1172 1173 static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, 1174 const char *buf, size_t count) 1175 { 1176 unsigned int input; 1177 int ret; 1178 1179 ret = sscanf(buf, "%u", &input); 1180 if (ret != 1) 1181 return -EINVAL; 1182 1183 mutex_lock(&intel_pstate_driver_lock); 1184 1185 if (!driver_registered) { 1186 mutex_unlock(&intel_pstate_driver_lock); 1187 return -EAGAIN; 1188 } 1189 1190 mutex_lock(&intel_pstate_limits_lock); 1191 1192 update_turbo_state(); 1193 if (limits->turbo_disabled) { 1194 pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); 1195 mutex_unlock(&intel_pstate_limits_lock); 1196 mutex_unlock(&intel_pstate_driver_lock); 1197 return -EPERM; 1198 } 1199 1200 limits->no_turbo = clamp_t(int, input, 0, 1); 1201 1202 mutex_unlock(&intel_pstate_limits_lock); 1203 1204 intel_pstate_update_policies(); 1205 1206 mutex_unlock(&intel_pstate_driver_lock); 1207 1208 return count; 1209 } 1210 1211 static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, 1212 const char *buf, size_t count) 1213 { 1214 unsigned int input; 1215 int ret; 1216 1217 ret = sscanf(buf, "%u", &input); 1218 if (ret != 1) 1219 return -EINVAL; 1220 1221 mutex_lock(&intel_pstate_driver_lock); 1222 1223 if (!driver_registered) { 1224 mutex_unlock(&intel_pstate_driver_lock); 1225 return -EAGAIN; 1226 } 1227 1228 mutex_lock(&intel_pstate_limits_lock); 1229 1230 limits->max_sysfs_pct = clamp_t(int, input, 0 , 100); 1231 limits->max_perf_pct = min(limits->max_policy_pct, 1232 limits->max_sysfs_pct); 1233 limits->max_perf_pct = max(limits->min_policy_pct, 1234 limits->max_perf_pct); 1235 limits->max_perf_pct = max(limits->min_perf_pct, 1236 limits->max_perf_pct); 1237 limits->max_perf = div_ext_fp(limits->max_perf_pct, 100); 1238 1239 mutex_unlock(&intel_pstate_limits_lock); 1240 1241 intel_pstate_update_policies(); 1242 1243 mutex_unlock(&intel_pstate_driver_lock); 1244 1245 return count; 1246 } 1247 1248 static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, 1249 const char *buf, size_t count) 1250 { 1251 unsigned int input; 1252 int ret; 1253 1254 ret = sscanf(buf, "%u", &input); 1255 if (ret != 1) 1256 return -EINVAL; 1257 1258 mutex_lock(&intel_pstate_driver_lock); 1259 1260 if (!driver_registered) { 1261 mutex_unlock(&intel_pstate_driver_lock); 1262 return -EAGAIN; 1263 } 1264 1265 mutex_lock(&intel_pstate_limits_lock); 1266 1267 limits->min_sysfs_pct = clamp_t(int, input, 0 , 100); 1268 limits->min_perf_pct = max(limits->min_policy_pct, 1269 limits->min_sysfs_pct); 1270 limits->min_perf_pct = min(limits->max_policy_pct, 1271 limits->min_perf_pct); 1272 limits->min_perf_pct = min(limits->max_perf_pct, 1273 limits->min_perf_pct); 1274 limits->min_perf = div_ext_fp(limits->min_perf_pct, 100); 1275 1276 mutex_unlock(&intel_pstate_limits_lock); 1277 1278 intel_pstate_update_policies(); 1279 1280 mutex_unlock(&intel_pstate_driver_lock); 1281 1282 return count; 1283 } 1284 1285 show_one(max_perf_pct, max_perf_pct); 1286 show_one(min_perf_pct, min_perf_pct); 1287 1288 define_one_global_rw(status); 1289 define_one_global_rw(no_turbo); 1290 define_one_global_rw(max_perf_pct); 1291 define_one_global_rw(min_perf_pct); 1292 define_one_global_ro(turbo_pct); 1293 define_one_global_ro(num_pstates); 1294 1295 static struct attribute *intel_pstate_attributes[] = { 1296 &status.attr, 1297 &no_turbo.attr, 1298 &turbo_pct.attr, 1299 &num_pstates.attr, 1300 NULL 1301 }; 1302 1303 static struct attribute_group intel_pstate_attr_group = { 1304 .attrs = intel_pstate_attributes, 1305 }; 1306 1307 static void __init intel_pstate_sysfs_expose_params(void) 1308 { 1309 struct kobject *intel_pstate_kobject; 1310 int rc; 1311 1312 intel_pstate_kobject = kobject_create_and_add("intel_pstate", 1313 &cpu_subsys.dev_root->kobj); 1314 if (WARN_ON(!intel_pstate_kobject)) 1315 return; 1316 1317 rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group); 1318 if (WARN_ON(rc)) 1319 return; 1320 1321 /* 1322 * If per cpu limits are enforced there are no global limits, so 1323 * return without creating max/min_perf_pct attributes 1324 */ 1325 if (per_cpu_limits) 1326 return; 1327 1328 rc = sysfs_create_file(intel_pstate_kobject, &max_perf_pct.attr); 1329 WARN_ON(rc); 1330 1331 rc = sysfs_create_file(intel_pstate_kobject, &min_perf_pct.attr); 1332 WARN_ON(rc); 1333 1334 } 1335 /************************** sysfs end ************************/ 1336 1337 static void intel_pstate_hwp_enable(struct cpudata *cpudata) 1338 { 1339 /* First disable HWP notification interrupt as we don't process them */ 1340 if (static_cpu_has(X86_FEATURE_HWP_NOTIFY)) 1341 wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); 1342 1343 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); 1344 cpudata->epp_policy = 0; 1345 if (cpudata->epp_default == -EINVAL) 1346 cpudata->epp_default = intel_pstate_get_epp(cpudata, 0); 1347 } 1348 1349 static int atom_get_min_pstate(void) 1350 { 1351 u64 value; 1352 1353 rdmsrl(ATOM_RATIOS, value); 1354 return (value >> 8) & 0x7F; 1355 } 1356 1357 static int atom_get_max_pstate(void) 1358 { 1359 u64 value; 1360 1361 rdmsrl(ATOM_RATIOS, value); 1362 return (value >> 16) & 0x7F; 1363 } 1364 1365 static int atom_get_turbo_pstate(void) 1366 { 1367 u64 value; 1368 1369 rdmsrl(ATOM_TURBO_RATIOS, value); 1370 return value & 0x7F; 1371 } 1372 1373 static u64 atom_get_val(struct cpudata *cpudata, int pstate) 1374 { 1375 u64 val; 1376 int32_t vid_fp; 1377 u32 vid; 1378 1379 val = (u64)pstate << 8; 1380 if (limits->no_turbo && !limits->turbo_disabled) 1381 val |= (u64)1 << 32; 1382 1383 vid_fp = cpudata->vid.min + mul_fp( 1384 int_tofp(pstate - cpudata->pstate.min_pstate), 1385 cpudata->vid.ratio); 1386 1387 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max); 1388 vid = ceiling_fp(vid_fp); 1389 1390 if (pstate > cpudata->pstate.max_pstate) 1391 vid = cpudata->vid.turbo; 1392 1393 return val | vid; 1394 } 1395 1396 static int silvermont_get_scaling(void) 1397 { 1398 u64 value; 1399 int i; 1400 /* Defined in Table 35-6 from SDM (Sept 2015) */ 1401 static int silvermont_freq_table[] = { 1402 83300, 100000, 133300, 116700, 80000}; 1403 1404 rdmsrl(MSR_FSB_FREQ, value); 1405 i = value & 0x7; 1406 WARN_ON(i > 4); 1407 1408 return silvermont_freq_table[i]; 1409 } 1410 1411 static int airmont_get_scaling(void) 1412 { 1413 u64 value; 1414 int i; 1415 /* Defined in Table 35-10 from SDM (Sept 2015) */ 1416 static int airmont_freq_table[] = { 1417 83300, 100000, 133300, 116700, 80000, 1418 93300, 90000, 88900, 87500}; 1419 1420 rdmsrl(MSR_FSB_FREQ, value); 1421 i = value & 0xF; 1422 WARN_ON(i > 8); 1423 1424 return airmont_freq_table[i]; 1425 } 1426 1427 static void atom_get_vid(struct cpudata *cpudata) 1428 { 1429 u64 value; 1430 1431 rdmsrl(ATOM_VIDS, value); 1432 cpudata->vid.min = int_tofp((value >> 8) & 0x7f); 1433 cpudata->vid.max = int_tofp((value >> 16) & 0x7f); 1434 cpudata->vid.ratio = div_fp( 1435 cpudata->vid.max - cpudata->vid.min, 1436 int_tofp(cpudata->pstate.max_pstate - 1437 cpudata->pstate.min_pstate)); 1438 1439 rdmsrl(ATOM_TURBO_VIDS, value); 1440 cpudata->vid.turbo = value & 0x7f; 1441 } 1442 1443 static int core_get_min_pstate(void) 1444 { 1445 u64 value; 1446 1447 rdmsrl(MSR_PLATFORM_INFO, value); 1448 return (value >> 40) & 0xFF; 1449 } 1450 1451 static int core_get_max_pstate_physical(void) 1452 { 1453 u64 value; 1454 1455 rdmsrl(MSR_PLATFORM_INFO, value); 1456 return (value >> 8) & 0xFF; 1457 } 1458 1459 static int core_get_max_pstate(void) 1460 { 1461 u64 tar; 1462 u64 plat_info; 1463 int max_pstate; 1464 int err; 1465 1466 rdmsrl(MSR_PLATFORM_INFO, plat_info); 1467 max_pstate = (plat_info >> 8) & 0xFF; 1468 1469 err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar); 1470 if (!err) { 1471 /* Do some sanity checking for safety */ 1472 if (plat_info & 0x600000000) { 1473 u64 tdp_ctrl; 1474 u64 tdp_ratio; 1475 int tdp_msr; 1476 1477 err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl); 1478 if (err) 1479 goto skip_tar; 1480 1481 tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x3); 1482 err = rdmsrl_safe(tdp_msr, &tdp_ratio); 1483 if (err) 1484 goto skip_tar; 1485 1486 /* For level 1 and 2, bits[23:16] contain the ratio */ 1487 if (tdp_ctrl) 1488 tdp_ratio >>= 16; 1489 1490 tdp_ratio &= 0xff; /* ratios are only 8 bits long */ 1491 if (tdp_ratio - 1 == tar) { 1492 max_pstate = tar; 1493 pr_debug("max_pstate=TAC %x\n", max_pstate); 1494 } else { 1495 goto skip_tar; 1496 } 1497 } 1498 } 1499 1500 skip_tar: 1501 return max_pstate; 1502 } 1503 1504 static int core_get_turbo_pstate(void) 1505 { 1506 u64 value; 1507 int nont, ret; 1508 1509 rdmsrl(MSR_TURBO_RATIO_LIMIT, value); 1510 nont = core_get_max_pstate(); 1511 ret = (value) & 255; 1512 if (ret <= nont) 1513 ret = nont; 1514 return ret; 1515 } 1516 1517 static inline int core_get_scaling(void) 1518 { 1519 return 100000; 1520 } 1521 1522 static u64 core_get_val(struct cpudata *cpudata, int pstate) 1523 { 1524 u64 val; 1525 1526 val = (u64)pstate << 8; 1527 if (limits->no_turbo && !limits->turbo_disabled) 1528 val |= (u64)1 << 32; 1529 1530 return val; 1531 } 1532 1533 static int knl_get_turbo_pstate(void) 1534 { 1535 u64 value; 1536 int nont, ret; 1537 1538 rdmsrl(MSR_TURBO_RATIO_LIMIT, value); 1539 nont = core_get_max_pstate(); 1540 ret = (((value) >> 8) & 0xFF); 1541 if (ret <= nont) 1542 ret = nont; 1543 return ret; 1544 } 1545 1546 static struct cpu_defaults core_params = { 1547 .pid_policy = { 1548 .sample_rate_ms = 10, 1549 .deadband = 0, 1550 .setpoint = 97, 1551 .p_gain_pct = 20, 1552 .d_gain_pct = 0, 1553 .i_gain_pct = 0, 1554 }, 1555 .funcs = { 1556 .get_max = core_get_max_pstate, 1557 .get_max_physical = core_get_max_pstate_physical, 1558 .get_min = core_get_min_pstate, 1559 .get_turbo = core_get_turbo_pstate, 1560 .get_scaling = core_get_scaling, 1561 .get_val = core_get_val, 1562 .get_target_pstate = get_target_pstate_use_performance, 1563 }, 1564 }; 1565 1566 static const struct cpu_defaults silvermont_params = { 1567 .pid_policy = { 1568 .sample_rate_ms = 10, 1569 .deadband = 0, 1570 .setpoint = 60, 1571 .p_gain_pct = 14, 1572 .d_gain_pct = 0, 1573 .i_gain_pct = 4, 1574 }, 1575 .funcs = { 1576 .get_max = atom_get_max_pstate, 1577 .get_max_physical = atom_get_max_pstate, 1578 .get_min = atom_get_min_pstate, 1579 .get_turbo = atom_get_turbo_pstate, 1580 .get_val = atom_get_val, 1581 .get_scaling = silvermont_get_scaling, 1582 .get_vid = atom_get_vid, 1583 .get_target_pstate = get_target_pstate_use_cpu_load, 1584 }, 1585 }; 1586 1587 static const struct cpu_defaults airmont_params = { 1588 .pid_policy = { 1589 .sample_rate_ms = 10, 1590 .deadband = 0, 1591 .setpoint = 60, 1592 .p_gain_pct = 14, 1593 .d_gain_pct = 0, 1594 .i_gain_pct = 4, 1595 }, 1596 .funcs = { 1597 .get_max = atom_get_max_pstate, 1598 .get_max_physical = atom_get_max_pstate, 1599 .get_min = atom_get_min_pstate, 1600 .get_turbo = atom_get_turbo_pstate, 1601 .get_val = atom_get_val, 1602 .get_scaling = airmont_get_scaling, 1603 .get_vid = atom_get_vid, 1604 .get_target_pstate = get_target_pstate_use_cpu_load, 1605 }, 1606 }; 1607 1608 static const struct cpu_defaults knl_params = { 1609 .pid_policy = { 1610 .sample_rate_ms = 10, 1611 .deadband = 0, 1612 .setpoint = 97, 1613 .p_gain_pct = 20, 1614 .d_gain_pct = 0, 1615 .i_gain_pct = 0, 1616 }, 1617 .funcs = { 1618 .get_max = core_get_max_pstate, 1619 .get_max_physical = core_get_max_pstate_physical, 1620 .get_min = core_get_min_pstate, 1621 .get_turbo = knl_get_turbo_pstate, 1622 .get_scaling = core_get_scaling, 1623 .get_val = core_get_val, 1624 .get_target_pstate = get_target_pstate_use_performance, 1625 }, 1626 }; 1627 1628 static const struct cpu_defaults bxt_params = { 1629 .pid_policy = { 1630 .sample_rate_ms = 10, 1631 .deadband = 0, 1632 .setpoint = 60, 1633 .p_gain_pct = 14, 1634 .d_gain_pct = 0, 1635 .i_gain_pct = 4, 1636 }, 1637 .funcs = { 1638 .get_max = core_get_max_pstate, 1639 .get_max_physical = core_get_max_pstate_physical, 1640 .get_min = core_get_min_pstate, 1641 .get_turbo = core_get_turbo_pstate, 1642 .get_scaling = core_get_scaling, 1643 .get_val = core_get_val, 1644 .get_target_pstate = get_target_pstate_use_cpu_load, 1645 }, 1646 }; 1647 1648 static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) 1649 { 1650 int max_perf = cpu->pstate.turbo_pstate; 1651 int max_perf_adj; 1652 int min_perf; 1653 struct perf_limits *perf_limits = limits; 1654 1655 if (limits->no_turbo || limits->turbo_disabled) 1656 max_perf = cpu->pstate.max_pstate; 1657 1658 if (per_cpu_limits) 1659 perf_limits = cpu->perf_limits; 1660 1661 /* 1662 * performance can be limited by user through sysfs, by cpufreq 1663 * policy, or by cpu specific default values determined through 1664 * experimentation. 1665 */ 1666 max_perf_adj = fp_ext_toint(max_perf * perf_limits->max_perf); 1667 *max = clamp_t(int, max_perf_adj, 1668 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate); 1669 1670 min_perf = fp_ext_toint(max_perf * perf_limits->min_perf); 1671 *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf); 1672 } 1673 1674 static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) 1675 { 1676 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); 1677 cpu->pstate.current_pstate = pstate; 1678 /* 1679 * Generally, there is no guarantee that this code will always run on 1680 * the CPU being updated, so force the register update to run on the 1681 * right CPU. 1682 */ 1683 wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL, 1684 pstate_funcs.get_val(cpu, pstate)); 1685 } 1686 1687 static void intel_pstate_set_min_pstate(struct cpudata *cpu) 1688 { 1689 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); 1690 } 1691 1692 static void intel_pstate_max_within_limits(struct cpudata *cpu) 1693 { 1694 int min_pstate, max_pstate; 1695 1696 update_turbo_state(); 1697 intel_pstate_get_min_max(cpu, &min_pstate, &max_pstate); 1698 intel_pstate_set_pstate(cpu, max_pstate); 1699 } 1700 1701 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) 1702 { 1703 cpu->pstate.min_pstate = pstate_funcs.get_min(); 1704 cpu->pstate.max_pstate = pstate_funcs.get_max(); 1705 cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical(); 1706 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); 1707 cpu->pstate.scaling = pstate_funcs.get_scaling(); 1708 cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling; 1709 cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling; 1710 1711 if (pstate_funcs.get_vid) 1712 pstate_funcs.get_vid(cpu); 1713 1714 intel_pstate_set_min_pstate(cpu); 1715 } 1716 1717 static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu) 1718 { 1719 struct sample *sample = &cpu->sample; 1720 1721 sample->core_avg_perf = div_ext_fp(sample->aperf, sample->mperf); 1722 } 1723 1724 static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time) 1725 { 1726 u64 aperf, mperf; 1727 unsigned long flags; 1728 u64 tsc; 1729 1730 local_irq_save(flags); 1731 rdmsrl(MSR_IA32_APERF, aperf); 1732 rdmsrl(MSR_IA32_MPERF, mperf); 1733 tsc = rdtsc(); 1734 if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) { 1735 local_irq_restore(flags); 1736 return false; 1737 } 1738 local_irq_restore(flags); 1739 1740 cpu->last_sample_time = cpu->sample.time; 1741 cpu->sample.time = time; 1742 cpu->sample.aperf = aperf; 1743 cpu->sample.mperf = mperf; 1744 cpu->sample.tsc = tsc; 1745 cpu->sample.aperf -= cpu->prev_aperf; 1746 cpu->sample.mperf -= cpu->prev_mperf; 1747 cpu->sample.tsc -= cpu->prev_tsc; 1748 1749 cpu->prev_aperf = aperf; 1750 cpu->prev_mperf = mperf; 1751 cpu->prev_tsc = tsc; 1752 /* 1753 * First time this function is invoked in a given cycle, all of the 1754 * previous sample data fields are equal to zero or stale and they must 1755 * be populated with meaningful numbers for things to work, so assume 1756 * that sample.time will always be reset before setting the utilization 1757 * update hook and make the caller skip the sample then. 1758 */ 1759 return !!cpu->last_sample_time; 1760 } 1761 1762 static inline int32_t get_avg_frequency(struct cpudata *cpu) 1763 { 1764 return mul_ext_fp(cpu->sample.core_avg_perf, 1765 cpu->pstate.max_pstate_physical * cpu->pstate.scaling); 1766 } 1767 1768 static inline int32_t get_avg_pstate(struct cpudata *cpu) 1769 { 1770 return mul_ext_fp(cpu->pstate.max_pstate_physical, 1771 cpu->sample.core_avg_perf); 1772 } 1773 1774 static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu) 1775 { 1776 struct sample *sample = &cpu->sample; 1777 int32_t busy_frac, boost; 1778 int target, avg_pstate; 1779 1780 busy_frac = div_fp(sample->mperf, sample->tsc); 1781 1782 boost = cpu->iowait_boost; 1783 cpu->iowait_boost >>= 1; 1784 1785 if (busy_frac < boost) 1786 busy_frac = boost; 1787 1788 sample->busy_scaled = busy_frac * 100; 1789 1790 target = limits->no_turbo || limits->turbo_disabled ? 1791 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; 1792 target += target >> 2; 1793 target = mul_fp(target, busy_frac); 1794 if (target < cpu->pstate.min_pstate) 1795 target = cpu->pstate.min_pstate; 1796 1797 /* 1798 * If the average P-state during the previous cycle was higher than the 1799 * current target, add 50% of the difference to the target to reduce 1800 * possible performance oscillations and offset possible performance 1801 * loss related to moving the workload from one CPU to another within 1802 * a package/module. 1803 */ 1804 avg_pstate = get_avg_pstate(cpu); 1805 if (avg_pstate > target) 1806 target += (avg_pstate - target) >> 1; 1807 1808 return target; 1809 } 1810 1811 static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu) 1812 { 1813 int32_t perf_scaled, max_pstate, current_pstate, sample_ratio; 1814 u64 duration_ns; 1815 1816 /* 1817 * perf_scaled is the ratio of the average P-state during the last 1818 * sampling period to the P-state requested last time (in percent). 1819 * 1820 * That measures the system's response to the previous P-state 1821 * selection. 1822 */ 1823 max_pstate = cpu->pstate.max_pstate_physical; 1824 current_pstate = cpu->pstate.current_pstate; 1825 perf_scaled = mul_ext_fp(cpu->sample.core_avg_perf, 1826 div_fp(100 * max_pstate, current_pstate)); 1827 1828 /* 1829 * Since our utilization update callback will not run unless we are 1830 * in C0, check if the actual elapsed time is significantly greater (3x) 1831 * than our sample interval. If it is, then we were idle for a long 1832 * enough period of time to adjust our performance metric. 1833 */ 1834 duration_ns = cpu->sample.time - cpu->last_sample_time; 1835 if ((s64)duration_ns > pid_params.sample_rate_ns * 3) { 1836 sample_ratio = div_fp(pid_params.sample_rate_ns, duration_ns); 1837 perf_scaled = mul_fp(perf_scaled, sample_ratio); 1838 } else { 1839 sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc); 1840 if (sample_ratio < int_tofp(1)) 1841 perf_scaled = 0; 1842 } 1843 1844 cpu->sample.busy_scaled = perf_scaled; 1845 return cpu->pstate.current_pstate - pid_calc(&cpu->pid, perf_scaled); 1846 } 1847 1848 static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate) 1849 { 1850 int max_perf, min_perf; 1851 1852 intel_pstate_get_min_max(cpu, &min_perf, &max_perf); 1853 pstate = clamp_t(int, pstate, min_perf, max_perf); 1854 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); 1855 return pstate; 1856 } 1857 1858 static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate) 1859 { 1860 pstate = intel_pstate_prepare_request(cpu, pstate); 1861 if (pstate == cpu->pstate.current_pstate) 1862 return; 1863 1864 cpu->pstate.current_pstate = pstate; 1865 wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate)); 1866 } 1867 1868 static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) 1869 { 1870 int from, target_pstate; 1871 struct sample *sample; 1872 1873 from = cpu->pstate.current_pstate; 1874 1875 target_pstate = cpu->policy == CPUFREQ_POLICY_PERFORMANCE ? 1876 cpu->pstate.turbo_pstate : pstate_funcs.get_target_pstate(cpu); 1877 1878 update_turbo_state(); 1879 1880 intel_pstate_update_pstate(cpu, target_pstate); 1881 1882 sample = &cpu->sample; 1883 trace_pstate_sample(mul_ext_fp(100, sample->core_avg_perf), 1884 fp_toint(sample->busy_scaled), 1885 from, 1886 cpu->pstate.current_pstate, 1887 sample->mperf, 1888 sample->aperf, 1889 sample->tsc, 1890 get_avg_frequency(cpu), 1891 fp_toint(cpu->iowait_boost * 100)); 1892 } 1893 1894 static void intel_pstate_update_util(struct update_util_data *data, u64 time, 1895 unsigned int flags) 1896 { 1897 struct cpudata *cpu = container_of(data, struct cpudata, update_util); 1898 u64 delta_ns; 1899 1900 if (pstate_funcs.get_target_pstate == get_target_pstate_use_cpu_load) { 1901 if (flags & SCHED_CPUFREQ_IOWAIT) { 1902 cpu->iowait_boost = int_tofp(1); 1903 } else if (cpu->iowait_boost) { 1904 /* Clear iowait_boost if the CPU may have been idle. */ 1905 delta_ns = time - cpu->last_update; 1906 if (delta_ns > TICK_NSEC) 1907 cpu->iowait_boost = 0; 1908 } 1909 cpu->last_update = time; 1910 } 1911 1912 delta_ns = time - cpu->sample.time; 1913 if ((s64)delta_ns >= pid_params.sample_rate_ns) { 1914 bool sample_taken = intel_pstate_sample(cpu, time); 1915 1916 if (sample_taken) { 1917 intel_pstate_calc_avg_perf(cpu); 1918 if (!hwp_active) 1919 intel_pstate_adjust_busy_pstate(cpu); 1920 } 1921 } 1922 } 1923 1924 #define ICPU(model, policy) \ 1925 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\ 1926 (unsigned long)&policy } 1927 1928 static const struct x86_cpu_id intel_pstate_cpu_ids[] = { 1929 ICPU(INTEL_FAM6_SANDYBRIDGE, core_params), 1930 ICPU(INTEL_FAM6_SANDYBRIDGE_X, core_params), 1931 ICPU(INTEL_FAM6_ATOM_SILVERMONT1, silvermont_params), 1932 ICPU(INTEL_FAM6_IVYBRIDGE, core_params), 1933 ICPU(INTEL_FAM6_HASWELL_CORE, core_params), 1934 ICPU(INTEL_FAM6_BROADWELL_CORE, core_params), 1935 ICPU(INTEL_FAM6_IVYBRIDGE_X, core_params), 1936 ICPU(INTEL_FAM6_HASWELL_X, core_params), 1937 ICPU(INTEL_FAM6_HASWELL_ULT, core_params), 1938 ICPU(INTEL_FAM6_HASWELL_GT3E, core_params), 1939 ICPU(INTEL_FAM6_BROADWELL_GT3E, core_params), 1940 ICPU(INTEL_FAM6_ATOM_AIRMONT, airmont_params), 1941 ICPU(INTEL_FAM6_SKYLAKE_MOBILE, core_params), 1942 ICPU(INTEL_FAM6_BROADWELL_X, core_params), 1943 ICPU(INTEL_FAM6_SKYLAKE_DESKTOP, core_params), 1944 ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_params), 1945 ICPU(INTEL_FAM6_XEON_PHI_KNL, knl_params), 1946 ICPU(INTEL_FAM6_XEON_PHI_KNM, knl_params), 1947 ICPU(INTEL_FAM6_ATOM_GOLDMONT, bxt_params), 1948 {} 1949 }; 1950 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); 1951 1952 static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { 1953 ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_params), 1954 ICPU(INTEL_FAM6_BROADWELL_X, core_params), 1955 ICPU(INTEL_FAM6_SKYLAKE_X, core_params), 1956 {} 1957 }; 1958 1959 static int intel_pstate_init_cpu(unsigned int cpunum) 1960 { 1961 struct cpudata *cpu; 1962 1963 cpu = all_cpu_data[cpunum]; 1964 1965 if (!cpu) { 1966 unsigned int size = sizeof(struct cpudata); 1967 1968 if (per_cpu_limits) 1969 size += sizeof(struct perf_limits); 1970 1971 cpu = kzalloc(size, GFP_KERNEL); 1972 if (!cpu) 1973 return -ENOMEM; 1974 1975 all_cpu_data[cpunum] = cpu; 1976 if (per_cpu_limits) 1977 cpu->perf_limits = (struct perf_limits *)(cpu + 1); 1978 1979 cpu->epp_default = -EINVAL; 1980 cpu->epp_powersave = -EINVAL; 1981 cpu->epp_saved = -EINVAL; 1982 } 1983 1984 cpu = all_cpu_data[cpunum]; 1985 1986 cpu->cpu = cpunum; 1987 1988 if (hwp_active) { 1989 intel_pstate_hwp_enable(cpu); 1990 pid_params.sample_rate_ms = 50; 1991 pid_params.sample_rate_ns = 50 * NSEC_PER_MSEC; 1992 } 1993 1994 intel_pstate_get_cpu_pstates(cpu); 1995 1996 intel_pstate_busy_pid_reset(cpu); 1997 1998 pr_debug("controlling: cpu %d\n", cpunum); 1999 2000 return 0; 2001 } 2002 2003 static unsigned int intel_pstate_get(unsigned int cpu_num) 2004 { 2005 struct cpudata *cpu = all_cpu_data[cpu_num]; 2006 2007 return cpu ? get_avg_frequency(cpu) : 0; 2008 } 2009 2010 static void intel_pstate_set_update_util_hook(unsigned int cpu_num) 2011 { 2012 struct cpudata *cpu = all_cpu_data[cpu_num]; 2013 2014 if (cpu->update_util_set) 2015 return; 2016 2017 /* Prevent intel_pstate_update_util() from using stale data. */ 2018 cpu->sample.time = 0; 2019 cpufreq_add_update_util_hook(cpu_num, &cpu->update_util, 2020 intel_pstate_update_util); 2021 cpu->update_util_set = true; 2022 } 2023 2024 static void intel_pstate_clear_update_util_hook(unsigned int cpu) 2025 { 2026 struct cpudata *cpu_data = all_cpu_data[cpu]; 2027 2028 if (!cpu_data->update_util_set) 2029 return; 2030 2031 cpufreq_remove_update_util_hook(cpu); 2032 cpu_data->update_util_set = false; 2033 synchronize_sched(); 2034 } 2035 2036 static void intel_pstate_set_performance_limits(struct perf_limits *limits) 2037 { 2038 limits->no_turbo = 0; 2039 limits->turbo_disabled = 0; 2040 limits->max_perf_pct = 100; 2041 limits->max_perf = int_ext_tofp(1); 2042 limits->min_perf_pct = 100; 2043 limits->min_perf = int_ext_tofp(1); 2044 limits->max_policy_pct = 100; 2045 limits->max_sysfs_pct = 100; 2046 limits->min_policy_pct = 0; 2047 limits->min_sysfs_pct = 0; 2048 } 2049 2050 static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy, 2051 struct perf_limits *limits) 2052 { 2053 2054 limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100, 2055 policy->cpuinfo.max_freq); 2056 limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0, 100); 2057 if (policy->max == policy->min) { 2058 limits->min_policy_pct = limits->max_policy_pct; 2059 } else { 2060 limits->min_policy_pct = DIV_ROUND_UP(policy->min * 100, 2061 policy->cpuinfo.max_freq); 2062 limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 2063 0, 100); 2064 } 2065 2066 /* Normalize user input to [min_policy_pct, max_policy_pct] */ 2067 limits->min_perf_pct = max(limits->min_policy_pct, 2068 limits->min_sysfs_pct); 2069 limits->min_perf_pct = min(limits->max_policy_pct, 2070 limits->min_perf_pct); 2071 limits->max_perf_pct = min(limits->max_policy_pct, 2072 limits->max_sysfs_pct); 2073 limits->max_perf_pct = max(limits->min_policy_pct, 2074 limits->max_perf_pct); 2075 2076 /* Make sure min_perf_pct <= max_perf_pct */ 2077 limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct); 2078 2079 limits->min_perf = div_ext_fp(limits->min_perf_pct, 100); 2080 limits->max_perf = div_ext_fp(limits->max_perf_pct, 100); 2081 limits->max_perf = round_up(limits->max_perf, EXT_FRAC_BITS); 2082 limits->min_perf = round_up(limits->min_perf, EXT_FRAC_BITS); 2083 2084 pr_debug("cpu:%d max_perf_pct:%d min_perf_pct:%d\n", policy->cpu, 2085 limits->max_perf_pct, limits->min_perf_pct); 2086 } 2087 2088 static int intel_pstate_set_policy(struct cpufreq_policy *policy) 2089 { 2090 struct cpudata *cpu; 2091 struct perf_limits *perf_limits = NULL; 2092 2093 if (!policy->cpuinfo.max_freq) 2094 return -ENODEV; 2095 2096 pr_debug("set_policy cpuinfo.max %u policy->max %u\n", 2097 policy->cpuinfo.max_freq, policy->max); 2098 2099 cpu = all_cpu_data[policy->cpu]; 2100 cpu->policy = policy->policy; 2101 2102 if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate && 2103 policy->max < policy->cpuinfo.max_freq && 2104 policy->max > cpu->pstate.max_pstate * cpu->pstate.scaling) { 2105 pr_debug("policy->max > max non turbo frequency\n"); 2106 policy->max = policy->cpuinfo.max_freq; 2107 } 2108 2109 if (per_cpu_limits) 2110 perf_limits = cpu->perf_limits; 2111 2112 mutex_lock(&intel_pstate_limits_lock); 2113 2114 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { 2115 if (!perf_limits) { 2116 limits = &performance_limits; 2117 perf_limits = limits; 2118 } 2119 if (policy->max >= policy->cpuinfo.max_freq && 2120 !limits->no_turbo) { 2121 pr_debug("set performance\n"); 2122 intel_pstate_set_performance_limits(perf_limits); 2123 goto out; 2124 } 2125 } else { 2126 pr_debug("set powersave\n"); 2127 if (!perf_limits) { 2128 limits = &powersave_limits; 2129 perf_limits = limits; 2130 } 2131 2132 } 2133 2134 intel_pstate_update_perf_limits(policy, perf_limits); 2135 out: 2136 if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) { 2137 /* 2138 * NOHZ_FULL CPUs need this as the governor callback may not 2139 * be invoked on them. 2140 */ 2141 intel_pstate_clear_update_util_hook(policy->cpu); 2142 intel_pstate_max_within_limits(cpu); 2143 } 2144 2145 intel_pstate_set_update_util_hook(policy->cpu); 2146 2147 intel_pstate_hwp_set_policy(policy); 2148 2149 mutex_unlock(&intel_pstate_limits_lock); 2150 2151 return 0; 2152 } 2153 2154 static int intel_pstate_verify_policy(struct cpufreq_policy *policy) 2155 { 2156 cpufreq_verify_within_cpu_limits(policy); 2157 2158 if (policy->policy != CPUFREQ_POLICY_POWERSAVE && 2159 policy->policy != CPUFREQ_POLICY_PERFORMANCE) 2160 return -EINVAL; 2161 2162 /* When per-CPU limits are used, sysfs limits are not used */ 2163 if (!per_cpu_limits) { 2164 unsigned int max_freq, min_freq; 2165 2166 max_freq = policy->cpuinfo.max_freq * 2167 limits->max_sysfs_pct / 100; 2168 min_freq = policy->cpuinfo.max_freq * 2169 limits->min_sysfs_pct / 100; 2170 cpufreq_verify_within_limits(policy, min_freq, max_freq); 2171 } 2172 2173 return 0; 2174 } 2175 2176 static void intel_cpufreq_stop_cpu(struct cpufreq_policy *policy) 2177 { 2178 intel_pstate_set_min_pstate(all_cpu_data[policy->cpu]); 2179 } 2180 2181 static void intel_pstate_stop_cpu(struct cpufreq_policy *policy) 2182 { 2183 pr_debug("CPU %d exiting\n", policy->cpu); 2184 2185 intel_pstate_clear_update_util_hook(policy->cpu); 2186 if (hwp_active) 2187 intel_pstate_hwp_save_state(policy); 2188 else 2189 intel_cpufreq_stop_cpu(policy); 2190 } 2191 2192 static int intel_pstate_cpu_exit(struct cpufreq_policy *policy) 2193 { 2194 intel_pstate_exit_perf_limits(policy); 2195 2196 policy->fast_switch_possible = false; 2197 2198 return 0; 2199 } 2200 2201 static int __intel_pstate_cpu_init(struct cpufreq_policy *policy) 2202 { 2203 struct cpudata *cpu; 2204 int rc; 2205 2206 rc = intel_pstate_init_cpu(policy->cpu); 2207 if (rc) 2208 return rc; 2209 2210 cpu = all_cpu_data[policy->cpu]; 2211 2212 /* 2213 * We need sane value in the cpu->perf_limits, so inherit from global 2214 * perf_limits limits, which are seeded with values based on the 2215 * CONFIG_CPU_FREQ_DEFAULT_GOV_*, during boot up. 2216 */ 2217 if (per_cpu_limits) 2218 memcpy(cpu->perf_limits, limits, sizeof(struct perf_limits)); 2219 2220 policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling; 2221 policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling; 2222 2223 /* cpuinfo and default policy values */ 2224 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; 2225 update_turbo_state(); 2226 policy->cpuinfo.max_freq = limits->turbo_disabled ? 2227 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; 2228 policy->cpuinfo.max_freq *= cpu->pstate.scaling; 2229 2230 intel_pstate_init_acpi_perf_limits(policy); 2231 cpumask_set_cpu(policy->cpu, policy->cpus); 2232 2233 policy->fast_switch_possible = true; 2234 2235 return 0; 2236 } 2237 2238 static int intel_pstate_cpu_init(struct cpufreq_policy *policy) 2239 { 2240 int ret = __intel_pstate_cpu_init(policy); 2241 2242 if (ret) 2243 return ret; 2244 2245 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; 2246 if (limits->min_perf_pct == 100 && limits->max_perf_pct == 100) 2247 policy->policy = CPUFREQ_POLICY_PERFORMANCE; 2248 else 2249 policy->policy = CPUFREQ_POLICY_POWERSAVE; 2250 2251 return 0; 2252 } 2253 2254 static struct cpufreq_driver intel_pstate = { 2255 .flags = CPUFREQ_CONST_LOOPS, 2256 .verify = intel_pstate_verify_policy, 2257 .setpolicy = intel_pstate_set_policy, 2258 .suspend = intel_pstate_hwp_save_state, 2259 .resume = intel_pstate_resume, 2260 .get = intel_pstate_get, 2261 .init = intel_pstate_cpu_init, 2262 .exit = intel_pstate_cpu_exit, 2263 .stop_cpu = intel_pstate_stop_cpu, 2264 .name = "intel_pstate", 2265 }; 2266 2267 static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy) 2268 { 2269 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2270 struct perf_limits *perf_limits = limits; 2271 2272 update_turbo_state(); 2273 policy->cpuinfo.max_freq = limits->turbo_disabled ? 2274 cpu->pstate.max_freq : cpu->pstate.turbo_freq; 2275 2276 cpufreq_verify_within_cpu_limits(policy); 2277 2278 if (per_cpu_limits) 2279 perf_limits = cpu->perf_limits; 2280 2281 mutex_lock(&intel_pstate_limits_lock); 2282 2283 intel_pstate_update_perf_limits(policy, perf_limits); 2284 2285 mutex_unlock(&intel_pstate_limits_lock); 2286 2287 return 0; 2288 } 2289 2290 static unsigned int intel_cpufreq_turbo_update(struct cpudata *cpu, 2291 struct cpufreq_policy *policy, 2292 unsigned int target_freq) 2293 { 2294 unsigned int max_freq; 2295 2296 update_turbo_state(); 2297 2298 max_freq = limits->no_turbo || limits->turbo_disabled ? 2299 cpu->pstate.max_freq : cpu->pstate.turbo_freq; 2300 policy->cpuinfo.max_freq = max_freq; 2301 if (policy->max > max_freq) 2302 policy->max = max_freq; 2303 2304 if (target_freq > max_freq) 2305 target_freq = max_freq; 2306 2307 return target_freq; 2308 } 2309 2310 static int intel_cpufreq_target(struct cpufreq_policy *policy, 2311 unsigned int target_freq, 2312 unsigned int relation) 2313 { 2314 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2315 struct cpufreq_freqs freqs; 2316 int target_pstate; 2317 2318 freqs.old = policy->cur; 2319 freqs.new = intel_cpufreq_turbo_update(cpu, policy, target_freq); 2320 2321 cpufreq_freq_transition_begin(policy, &freqs); 2322 switch (relation) { 2323 case CPUFREQ_RELATION_L: 2324 target_pstate = DIV_ROUND_UP(freqs.new, cpu->pstate.scaling); 2325 break; 2326 case CPUFREQ_RELATION_H: 2327 target_pstate = freqs.new / cpu->pstate.scaling; 2328 break; 2329 default: 2330 target_pstate = DIV_ROUND_CLOSEST(freqs.new, cpu->pstate.scaling); 2331 break; 2332 } 2333 target_pstate = intel_pstate_prepare_request(cpu, target_pstate); 2334 if (target_pstate != cpu->pstate.current_pstate) { 2335 cpu->pstate.current_pstate = target_pstate; 2336 wrmsrl_on_cpu(policy->cpu, MSR_IA32_PERF_CTL, 2337 pstate_funcs.get_val(cpu, target_pstate)); 2338 } 2339 cpufreq_freq_transition_end(policy, &freqs, false); 2340 2341 return 0; 2342 } 2343 2344 static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy, 2345 unsigned int target_freq) 2346 { 2347 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2348 int target_pstate; 2349 2350 target_freq = intel_cpufreq_turbo_update(cpu, policy, target_freq); 2351 target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling); 2352 intel_pstate_update_pstate(cpu, target_pstate); 2353 return target_freq; 2354 } 2355 2356 static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy) 2357 { 2358 int ret = __intel_pstate_cpu_init(policy); 2359 2360 if (ret) 2361 return ret; 2362 2363 policy->cpuinfo.transition_latency = INTEL_CPUFREQ_TRANSITION_LATENCY; 2364 /* This reflects the intel_pstate_get_cpu_pstates() setting. */ 2365 policy->cur = policy->cpuinfo.min_freq; 2366 2367 return 0; 2368 } 2369 2370 static struct cpufreq_driver intel_cpufreq = { 2371 .flags = CPUFREQ_CONST_LOOPS, 2372 .verify = intel_cpufreq_verify_policy, 2373 .target = intel_cpufreq_target, 2374 .fast_switch = intel_cpufreq_fast_switch, 2375 .init = intel_cpufreq_cpu_init, 2376 .exit = intel_pstate_cpu_exit, 2377 .stop_cpu = intel_cpufreq_stop_cpu, 2378 .name = "intel_cpufreq", 2379 }; 2380 2381 static struct cpufreq_driver *intel_pstate_driver = &intel_pstate; 2382 2383 static void intel_pstate_driver_cleanup(void) 2384 { 2385 unsigned int cpu; 2386 2387 get_online_cpus(); 2388 for_each_online_cpu(cpu) { 2389 if (all_cpu_data[cpu]) { 2390 if (intel_pstate_driver == &intel_pstate) 2391 intel_pstate_clear_update_util_hook(cpu); 2392 2393 kfree(all_cpu_data[cpu]); 2394 all_cpu_data[cpu] = NULL; 2395 } 2396 } 2397 put_online_cpus(); 2398 } 2399 2400 static int intel_pstate_register_driver(void) 2401 { 2402 int ret; 2403 2404 ret = cpufreq_register_driver(intel_pstate_driver); 2405 if (ret) { 2406 intel_pstate_driver_cleanup(); 2407 return ret; 2408 } 2409 2410 mutex_lock(&intel_pstate_limits_lock); 2411 driver_registered = true; 2412 mutex_unlock(&intel_pstate_limits_lock); 2413 2414 if (intel_pstate_driver == &intel_pstate && !hwp_active && 2415 pstate_funcs.get_target_pstate != get_target_pstate_use_cpu_load) 2416 intel_pstate_debug_expose_params(); 2417 2418 return 0; 2419 } 2420 2421 static int intel_pstate_unregister_driver(void) 2422 { 2423 if (hwp_active) 2424 return -EBUSY; 2425 2426 if (intel_pstate_driver == &intel_pstate && !hwp_active && 2427 pstate_funcs.get_target_pstate != get_target_pstate_use_cpu_load) 2428 intel_pstate_debug_hide_params(); 2429 2430 mutex_lock(&intel_pstate_limits_lock); 2431 driver_registered = false; 2432 mutex_unlock(&intel_pstate_limits_lock); 2433 2434 cpufreq_unregister_driver(intel_pstate_driver); 2435 intel_pstate_driver_cleanup(); 2436 2437 return 0; 2438 } 2439 2440 static ssize_t intel_pstate_show_status(char *buf) 2441 { 2442 if (!driver_registered) 2443 return sprintf(buf, "off\n"); 2444 2445 return sprintf(buf, "%s\n", intel_pstate_driver == &intel_pstate ? 2446 "active" : "passive"); 2447 } 2448 2449 static int intel_pstate_update_status(const char *buf, size_t size) 2450 { 2451 int ret; 2452 2453 if (size == 3 && !strncmp(buf, "off", size)) 2454 return driver_registered ? 2455 intel_pstate_unregister_driver() : -EINVAL; 2456 2457 if (size == 6 && !strncmp(buf, "active", size)) { 2458 if (driver_registered) { 2459 if (intel_pstate_driver == &intel_pstate) 2460 return 0; 2461 2462 ret = intel_pstate_unregister_driver(); 2463 if (ret) 2464 return ret; 2465 } 2466 2467 intel_pstate_driver = &intel_pstate; 2468 return intel_pstate_register_driver(); 2469 } 2470 2471 if (size == 7 && !strncmp(buf, "passive", size)) { 2472 if (driver_registered) { 2473 if (intel_pstate_driver != &intel_pstate) 2474 return 0; 2475 2476 ret = intel_pstate_unregister_driver(); 2477 if (ret) 2478 return ret; 2479 } 2480 2481 intel_pstate_driver = &intel_cpufreq; 2482 return intel_pstate_register_driver(); 2483 } 2484 2485 return -EINVAL; 2486 } 2487 2488 static int no_load __initdata; 2489 static int no_hwp __initdata; 2490 static int hwp_only __initdata; 2491 static unsigned int force_load __initdata; 2492 2493 static int __init intel_pstate_msrs_not_valid(void) 2494 { 2495 if (!pstate_funcs.get_max() || 2496 !pstate_funcs.get_min() || 2497 !pstate_funcs.get_turbo()) 2498 return -ENODEV; 2499 2500 return 0; 2501 } 2502 2503 static void __init copy_pid_params(struct pstate_adjust_policy *policy) 2504 { 2505 pid_params.sample_rate_ms = policy->sample_rate_ms; 2506 pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC; 2507 pid_params.p_gain_pct = policy->p_gain_pct; 2508 pid_params.i_gain_pct = policy->i_gain_pct; 2509 pid_params.d_gain_pct = policy->d_gain_pct; 2510 pid_params.deadband = policy->deadband; 2511 pid_params.setpoint = policy->setpoint; 2512 } 2513 2514 #ifdef CONFIG_ACPI 2515 static void intel_pstate_use_acpi_profile(void) 2516 { 2517 if (acpi_gbl_FADT.preferred_profile == PM_MOBILE) 2518 pstate_funcs.get_target_pstate = 2519 get_target_pstate_use_cpu_load; 2520 } 2521 #else 2522 static void intel_pstate_use_acpi_profile(void) 2523 { 2524 } 2525 #endif 2526 2527 static void __init copy_cpu_funcs(struct pstate_funcs *funcs) 2528 { 2529 pstate_funcs.get_max = funcs->get_max; 2530 pstate_funcs.get_max_physical = funcs->get_max_physical; 2531 pstate_funcs.get_min = funcs->get_min; 2532 pstate_funcs.get_turbo = funcs->get_turbo; 2533 pstate_funcs.get_scaling = funcs->get_scaling; 2534 pstate_funcs.get_val = funcs->get_val; 2535 pstate_funcs.get_vid = funcs->get_vid; 2536 pstate_funcs.get_target_pstate = funcs->get_target_pstate; 2537 2538 intel_pstate_use_acpi_profile(); 2539 } 2540 2541 #ifdef CONFIG_ACPI 2542 2543 static bool __init intel_pstate_no_acpi_pss(void) 2544 { 2545 int i; 2546 2547 for_each_possible_cpu(i) { 2548 acpi_status status; 2549 union acpi_object *pss; 2550 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 2551 struct acpi_processor *pr = per_cpu(processors, i); 2552 2553 if (!pr) 2554 continue; 2555 2556 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); 2557 if (ACPI_FAILURE(status)) 2558 continue; 2559 2560 pss = buffer.pointer; 2561 if (pss && pss->type == ACPI_TYPE_PACKAGE) { 2562 kfree(pss); 2563 return false; 2564 } 2565 2566 kfree(pss); 2567 } 2568 2569 return true; 2570 } 2571 2572 static bool __init intel_pstate_has_acpi_ppc(void) 2573 { 2574 int i; 2575 2576 for_each_possible_cpu(i) { 2577 struct acpi_processor *pr = per_cpu(processors, i); 2578 2579 if (!pr) 2580 continue; 2581 if (acpi_has_method(pr->handle, "_PPC")) 2582 return true; 2583 } 2584 return false; 2585 } 2586 2587 enum { 2588 PSS, 2589 PPC, 2590 }; 2591 2592 struct hw_vendor_info { 2593 u16 valid; 2594 char oem_id[ACPI_OEM_ID_SIZE]; 2595 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE]; 2596 int oem_pwr_table; 2597 }; 2598 2599 /* Hardware vendor-specific info that has its own power management modes */ 2600 static struct hw_vendor_info vendor_info[] __initdata = { 2601 {1, "HP ", "ProLiant", PSS}, 2602 {1, "ORACLE", "X4-2 ", PPC}, 2603 {1, "ORACLE", "X4-2L ", PPC}, 2604 {1, "ORACLE", "X4-2B ", PPC}, 2605 {1, "ORACLE", "X3-2 ", PPC}, 2606 {1, "ORACLE", "X3-2L ", PPC}, 2607 {1, "ORACLE", "X3-2B ", PPC}, 2608 {1, "ORACLE", "X4470M2 ", PPC}, 2609 {1, "ORACLE", "X4270M3 ", PPC}, 2610 {1, "ORACLE", "X4270M2 ", PPC}, 2611 {1, "ORACLE", "X4170M2 ", PPC}, 2612 {1, "ORACLE", "X4170 M3", PPC}, 2613 {1, "ORACLE", "X4275 M3", PPC}, 2614 {1, "ORACLE", "X6-2 ", PPC}, 2615 {1, "ORACLE", "Sudbury ", PPC}, 2616 {0, "", ""}, 2617 }; 2618 2619 static bool __init intel_pstate_platform_pwr_mgmt_exists(void) 2620 { 2621 struct acpi_table_header hdr; 2622 struct hw_vendor_info *v_info; 2623 const struct x86_cpu_id *id; 2624 u64 misc_pwr; 2625 2626 id = x86_match_cpu(intel_pstate_cpu_oob_ids); 2627 if (id) { 2628 rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr); 2629 if ( misc_pwr & (1 << 8)) 2630 return true; 2631 } 2632 2633 if (acpi_disabled || 2634 ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr))) 2635 return false; 2636 2637 for (v_info = vendor_info; v_info->valid; v_info++) { 2638 if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) && 2639 !strncmp(hdr.oem_table_id, v_info->oem_table_id, 2640 ACPI_OEM_TABLE_ID_SIZE)) 2641 switch (v_info->oem_pwr_table) { 2642 case PSS: 2643 return intel_pstate_no_acpi_pss(); 2644 case PPC: 2645 return intel_pstate_has_acpi_ppc() && 2646 (!force_load); 2647 } 2648 } 2649 2650 return false; 2651 } 2652 2653 static void intel_pstate_request_control_from_smm(void) 2654 { 2655 /* 2656 * It may be unsafe to request P-states control from SMM if _PPC support 2657 * has not been enabled. 2658 */ 2659 if (acpi_ppc) 2660 acpi_processor_pstate_control(); 2661 } 2662 #else /* CONFIG_ACPI not enabled */ 2663 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; } 2664 static inline bool intel_pstate_has_acpi_ppc(void) { return false; } 2665 static inline void intel_pstate_request_control_from_smm(void) {} 2666 #endif /* CONFIG_ACPI */ 2667 2668 static const struct x86_cpu_id hwp_support_ids[] __initconst = { 2669 { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_HWP }, 2670 {} 2671 }; 2672 2673 static int __init intel_pstate_init(void) 2674 { 2675 const struct x86_cpu_id *id; 2676 struct cpu_defaults *cpu_def; 2677 int rc = 0; 2678 2679 if (no_load) 2680 return -ENODEV; 2681 2682 if (x86_match_cpu(hwp_support_ids) && !no_hwp) { 2683 copy_cpu_funcs(&core_params.funcs); 2684 hwp_active++; 2685 intel_pstate.attr = hwp_cpufreq_attrs; 2686 goto hwp_cpu_matched; 2687 } 2688 2689 id = x86_match_cpu(intel_pstate_cpu_ids); 2690 if (!id) 2691 return -ENODEV; 2692 2693 cpu_def = (struct cpu_defaults *)id->driver_data; 2694 2695 copy_pid_params(&cpu_def->pid_policy); 2696 copy_cpu_funcs(&cpu_def->funcs); 2697 2698 if (intel_pstate_msrs_not_valid()) 2699 return -ENODEV; 2700 2701 hwp_cpu_matched: 2702 /* 2703 * The Intel pstate driver will be ignored if the platform 2704 * firmware has its own power management modes. 2705 */ 2706 if (intel_pstate_platform_pwr_mgmt_exists()) 2707 return -ENODEV; 2708 2709 if (!hwp_active && hwp_only) 2710 return -ENOTSUPP; 2711 2712 pr_info("Intel P-state driver initializing\n"); 2713 2714 all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus()); 2715 if (!all_cpu_data) 2716 return -ENOMEM; 2717 2718 intel_pstate_request_control_from_smm(); 2719 2720 intel_pstate_sysfs_expose_params(); 2721 2722 mutex_lock(&intel_pstate_driver_lock); 2723 rc = intel_pstate_register_driver(); 2724 mutex_unlock(&intel_pstate_driver_lock); 2725 if (rc) 2726 return rc; 2727 2728 if (hwp_active) 2729 pr_info("HWP enabled\n"); 2730 2731 return 0; 2732 } 2733 device_initcall(intel_pstate_init); 2734 2735 static int __init intel_pstate_setup(char *str) 2736 { 2737 if (!str) 2738 return -EINVAL; 2739 2740 if (!strcmp(str, "disable")) { 2741 no_load = 1; 2742 } else if (!strcmp(str, "passive")) { 2743 pr_info("Passive mode enabled\n"); 2744 intel_pstate_driver = &intel_cpufreq; 2745 no_hwp = 1; 2746 } 2747 if (!strcmp(str, "no_hwp")) { 2748 pr_info("HWP disabled\n"); 2749 no_hwp = 1; 2750 } 2751 if (!strcmp(str, "force")) 2752 force_load = 1; 2753 if (!strcmp(str, "hwp_only")) 2754 hwp_only = 1; 2755 if (!strcmp(str, "per_cpu_perf_limits")) 2756 per_cpu_limits = true; 2757 2758 #ifdef CONFIG_ACPI 2759 if (!strcmp(str, "support_acpi_ppc")) 2760 acpi_ppc = true; 2761 #endif 2762 2763 return 0; 2764 } 2765 early_param("intel_pstate", intel_pstate_setup); 2766 2767 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>"); 2768 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors"); 2769 MODULE_LICENSE("GPL"); 2770