1 /* 2 * intel_pstate.c: Native P state management for Intel processors 3 * 4 * (C) Copyright 2012 Intel Corporation 5 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com> 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; version 2 10 * of the License. 11 */ 12 13 #include <linux/kernel.h> 14 #include <linux/kernel_stat.h> 15 #include <linux/module.h> 16 #include <linux/ktime.h> 17 #include <linux/hrtimer.h> 18 #include <linux/tick.h> 19 #include <linux/slab.h> 20 #include <linux/sched.h> 21 #include <linux/list.h> 22 #include <linux/cpu.h> 23 #include <linux/cpufreq.h> 24 #include <linux/sysfs.h> 25 #include <linux/types.h> 26 #include <linux/fs.h> 27 #include <linux/debugfs.h> 28 #include <linux/acpi.h> 29 #include <linux/vmalloc.h> 30 #include <trace/events/power.h> 31 32 #include <asm/div64.h> 33 #include <asm/msr.h> 34 #include <asm/cpu_device_id.h> 35 #include <asm/cpufeature.h> 36 37 #define ATOM_RATIOS 0x66a 38 #define ATOM_VIDS 0x66b 39 #define ATOM_TURBO_RATIOS 0x66c 40 #define ATOM_TURBO_VIDS 0x66d 41 42 #define FRAC_BITS 8 43 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) 44 #define fp_toint(X) ((X) >> FRAC_BITS) 45 46 static inline int32_t mul_fp(int32_t x, int32_t y) 47 { 48 return ((int64_t)x * (int64_t)y) >> FRAC_BITS; 49 } 50 51 static inline int32_t div_fp(s64 x, s64 y) 52 { 53 return div64_s64((int64_t)x << FRAC_BITS, y); 54 } 55 56 static inline int ceiling_fp(int32_t x) 57 { 58 int mask, ret; 59 60 ret = fp_toint(x); 61 mask = (1 << FRAC_BITS) - 1; 62 if (x & mask) 63 ret += 1; 64 return ret; 65 } 66 67 struct sample { 68 int32_t core_pct_busy; 69 int32_t busy_scaled; 70 u64 aperf; 71 u64 mperf; 72 u64 tsc; 73 int freq; 74 ktime_t time; 75 }; 76 77 struct pstate_data { 78 int current_pstate; 79 int min_pstate; 80 int max_pstate; 81 int max_pstate_physical; 82 int scaling; 83 int turbo_pstate; 84 }; 85 86 struct vid_data { 87 int min; 88 int max; 89 int turbo; 90 int32_t ratio; 91 }; 92 93 struct _pid { 94 int setpoint; 95 int32_t integral; 96 int32_t p_gain; 97 int32_t i_gain; 98 int32_t d_gain; 99 int deadband; 100 int32_t last_err; 101 }; 102 103 struct cpudata { 104 int cpu; 105 106 struct timer_list timer; 107 108 struct pstate_data pstate; 109 struct vid_data vid; 110 struct _pid pid; 111 112 ktime_t last_sample_time; 113 u64 prev_aperf; 114 u64 prev_mperf; 115 u64 prev_tsc; 116 u64 prev_cummulative_iowait; 117 struct sample sample; 118 }; 119 120 static struct cpudata **all_cpu_data; 121 struct pstate_adjust_policy { 122 int sample_rate_ms; 123 int deadband; 124 int setpoint; 125 int p_gain_pct; 126 int d_gain_pct; 127 int i_gain_pct; 128 }; 129 130 struct pstate_funcs { 131 int (*get_max)(void); 132 int (*get_max_physical)(void); 133 int (*get_min)(void); 134 int (*get_turbo)(void); 135 int (*get_scaling)(void); 136 void (*set)(struct cpudata*, int pstate); 137 void (*get_vid)(struct cpudata *); 138 int32_t (*get_target_pstate)(struct cpudata *); 139 }; 140 141 struct cpu_defaults { 142 struct pstate_adjust_policy pid_policy; 143 struct pstate_funcs funcs; 144 }; 145 146 static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu); 147 static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu); 148 149 static struct pstate_adjust_policy pid_params; 150 static struct pstate_funcs pstate_funcs; 151 static int hwp_active; 152 153 struct perf_limits { 154 int no_turbo; 155 int turbo_disabled; 156 int max_perf_pct; 157 int min_perf_pct; 158 int32_t max_perf; 159 int32_t min_perf; 160 int max_policy_pct; 161 int max_sysfs_pct; 162 int min_policy_pct; 163 int min_sysfs_pct; 164 }; 165 166 static struct perf_limits performance_limits = { 167 .no_turbo = 0, 168 .turbo_disabled = 0, 169 .max_perf_pct = 100, 170 .max_perf = int_tofp(1), 171 .min_perf_pct = 100, 172 .min_perf = int_tofp(1), 173 .max_policy_pct = 100, 174 .max_sysfs_pct = 100, 175 .min_policy_pct = 0, 176 .min_sysfs_pct = 0, 177 }; 178 179 static struct perf_limits powersave_limits = { 180 .no_turbo = 0, 181 .turbo_disabled = 0, 182 .max_perf_pct = 100, 183 .max_perf = int_tofp(1), 184 .min_perf_pct = 0, 185 .min_perf = 0, 186 .max_policy_pct = 100, 187 .max_sysfs_pct = 100, 188 .min_policy_pct = 0, 189 .min_sysfs_pct = 0, 190 }; 191 192 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE 193 static struct perf_limits *limits = &performance_limits; 194 #else 195 static struct perf_limits *limits = &powersave_limits; 196 #endif 197 198 static inline void pid_reset(struct _pid *pid, int setpoint, int busy, 199 int deadband, int integral) { 200 pid->setpoint = setpoint; 201 pid->deadband = deadband; 202 pid->integral = int_tofp(integral); 203 pid->last_err = int_tofp(setpoint) - int_tofp(busy); 204 } 205 206 static inline void pid_p_gain_set(struct _pid *pid, int percent) 207 { 208 pid->p_gain = div_fp(int_tofp(percent), int_tofp(100)); 209 } 210 211 static inline void pid_i_gain_set(struct _pid *pid, int percent) 212 { 213 pid->i_gain = div_fp(int_tofp(percent), int_tofp(100)); 214 } 215 216 static inline void pid_d_gain_set(struct _pid *pid, int percent) 217 { 218 pid->d_gain = div_fp(int_tofp(percent), int_tofp(100)); 219 } 220 221 static signed int pid_calc(struct _pid *pid, int32_t busy) 222 { 223 signed int result; 224 int32_t pterm, dterm, fp_error; 225 int32_t integral_limit; 226 227 fp_error = int_tofp(pid->setpoint) - busy; 228 229 if (abs(fp_error) <= int_tofp(pid->deadband)) 230 return 0; 231 232 pterm = mul_fp(pid->p_gain, fp_error); 233 234 pid->integral += fp_error; 235 236 /* 237 * We limit the integral here so that it will never 238 * get higher than 30. This prevents it from becoming 239 * too large an input over long periods of time and allows 240 * it to get factored out sooner. 241 * 242 * The value of 30 was chosen through experimentation. 243 */ 244 integral_limit = int_tofp(30); 245 if (pid->integral > integral_limit) 246 pid->integral = integral_limit; 247 if (pid->integral < -integral_limit) 248 pid->integral = -integral_limit; 249 250 dterm = mul_fp(pid->d_gain, fp_error - pid->last_err); 251 pid->last_err = fp_error; 252 253 result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm; 254 result = result + (1 << (FRAC_BITS-1)); 255 return (signed int)fp_toint(result); 256 } 257 258 static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu) 259 { 260 pid_p_gain_set(&cpu->pid, pid_params.p_gain_pct); 261 pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct); 262 pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct); 263 264 pid_reset(&cpu->pid, pid_params.setpoint, 100, pid_params.deadband, 0); 265 } 266 267 static inline void intel_pstate_reset_all_pid(void) 268 { 269 unsigned int cpu; 270 271 for_each_online_cpu(cpu) { 272 if (all_cpu_data[cpu]) 273 intel_pstate_busy_pid_reset(all_cpu_data[cpu]); 274 } 275 } 276 277 static inline void update_turbo_state(void) 278 { 279 u64 misc_en; 280 struct cpudata *cpu; 281 282 cpu = all_cpu_data[0]; 283 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); 284 limits->turbo_disabled = 285 (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || 286 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); 287 } 288 289 static void intel_pstate_hwp_set(void) 290 { 291 int min, hw_min, max, hw_max, cpu, range, adj_range; 292 u64 value, cap; 293 294 rdmsrl(MSR_HWP_CAPABILITIES, cap); 295 hw_min = HWP_LOWEST_PERF(cap); 296 hw_max = HWP_HIGHEST_PERF(cap); 297 range = hw_max - hw_min; 298 299 get_online_cpus(); 300 301 for_each_online_cpu(cpu) { 302 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); 303 adj_range = limits->min_perf_pct * range / 100; 304 min = hw_min + adj_range; 305 value &= ~HWP_MIN_PERF(~0L); 306 value |= HWP_MIN_PERF(min); 307 308 adj_range = limits->max_perf_pct * range / 100; 309 max = hw_min + adj_range; 310 if (limits->no_turbo) { 311 hw_max = HWP_GUARANTEED_PERF(cap); 312 if (hw_max < max) 313 max = hw_max; 314 } 315 316 value &= ~HWP_MAX_PERF(~0L); 317 value |= HWP_MAX_PERF(max); 318 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); 319 } 320 321 put_online_cpus(); 322 } 323 324 /************************** debugfs begin ************************/ 325 static int pid_param_set(void *data, u64 val) 326 { 327 *(u32 *)data = val; 328 intel_pstate_reset_all_pid(); 329 return 0; 330 } 331 332 static int pid_param_get(void *data, u64 *val) 333 { 334 *val = *(u32 *)data; 335 return 0; 336 } 337 DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, pid_param_set, "%llu\n"); 338 339 struct pid_param { 340 char *name; 341 void *value; 342 }; 343 344 static struct pid_param pid_files[] = { 345 {"sample_rate_ms", &pid_params.sample_rate_ms}, 346 {"d_gain_pct", &pid_params.d_gain_pct}, 347 {"i_gain_pct", &pid_params.i_gain_pct}, 348 {"deadband", &pid_params.deadband}, 349 {"setpoint", &pid_params.setpoint}, 350 {"p_gain_pct", &pid_params.p_gain_pct}, 351 {NULL, NULL} 352 }; 353 354 static void __init intel_pstate_debug_expose_params(void) 355 { 356 struct dentry *debugfs_parent; 357 int i = 0; 358 359 if (hwp_active) 360 return; 361 debugfs_parent = debugfs_create_dir("pstate_snb", NULL); 362 if (IS_ERR_OR_NULL(debugfs_parent)) 363 return; 364 while (pid_files[i].name) { 365 debugfs_create_file(pid_files[i].name, 0660, 366 debugfs_parent, pid_files[i].value, 367 &fops_pid_param); 368 i++; 369 } 370 } 371 372 /************************** debugfs end ************************/ 373 374 /************************** sysfs begin ************************/ 375 #define show_one(file_name, object) \ 376 static ssize_t show_##file_name \ 377 (struct kobject *kobj, struct attribute *attr, char *buf) \ 378 { \ 379 return sprintf(buf, "%u\n", limits->object); \ 380 } 381 382 static ssize_t show_turbo_pct(struct kobject *kobj, 383 struct attribute *attr, char *buf) 384 { 385 struct cpudata *cpu; 386 int total, no_turbo, turbo_pct; 387 uint32_t turbo_fp; 388 389 cpu = all_cpu_data[0]; 390 391 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 392 no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1; 393 turbo_fp = div_fp(int_tofp(no_turbo), int_tofp(total)); 394 turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100))); 395 return sprintf(buf, "%u\n", turbo_pct); 396 } 397 398 static ssize_t show_num_pstates(struct kobject *kobj, 399 struct attribute *attr, char *buf) 400 { 401 struct cpudata *cpu; 402 int total; 403 404 cpu = all_cpu_data[0]; 405 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 406 return sprintf(buf, "%u\n", total); 407 } 408 409 static ssize_t show_no_turbo(struct kobject *kobj, 410 struct attribute *attr, char *buf) 411 { 412 ssize_t ret; 413 414 update_turbo_state(); 415 if (limits->turbo_disabled) 416 ret = sprintf(buf, "%u\n", limits->turbo_disabled); 417 else 418 ret = sprintf(buf, "%u\n", limits->no_turbo); 419 420 return ret; 421 } 422 423 static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, 424 const char *buf, size_t count) 425 { 426 unsigned int input; 427 int ret; 428 429 ret = sscanf(buf, "%u", &input); 430 if (ret != 1) 431 return -EINVAL; 432 433 update_turbo_state(); 434 if (limits->turbo_disabled) { 435 pr_warn("intel_pstate: Turbo disabled by BIOS or unavailable on processor\n"); 436 return -EPERM; 437 } 438 439 limits->no_turbo = clamp_t(int, input, 0, 1); 440 441 if (hwp_active) 442 intel_pstate_hwp_set(); 443 444 return count; 445 } 446 447 static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, 448 const char *buf, size_t count) 449 { 450 unsigned int input; 451 int ret; 452 453 ret = sscanf(buf, "%u", &input); 454 if (ret != 1) 455 return -EINVAL; 456 457 limits->max_sysfs_pct = clamp_t(int, input, 0 , 100); 458 limits->max_perf_pct = min(limits->max_policy_pct, 459 limits->max_sysfs_pct); 460 limits->max_perf_pct = max(limits->min_policy_pct, 461 limits->max_perf_pct); 462 limits->max_perf_pct = max(limits->min_perf_pct, 463 limits->max_perf_pct); 464 limits->max_perf = div_fp(int_tofp(limits->max_perf_pct), 465 int_tofp(100)); 466 467 if (hwp_active) 468 intel_pstate_hwp_set(); 469 return count; 470 } 471 472 static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, 473 const char *buf, size_t count) 474 { 475 unsigned int input; 476 int ret; 477 478 ret = sscanf(buf, "%u", &input); 479 if (ret != 1) 480 return -EINVAL; 481 482 limits->min_sysfs_pct = clamp_t(int, input, 0 , 100); 483 limits->min_perf_pct = max(limits->min_policy_pct, 484 limits->min_sysfs_pct); 485 limits->min_perf_pct = min(limits->max_policy_pct, 486 limits->min_perf_pct); 487 limits->min_perf_pct = min(limits->max_perf_pct, 488 limits->min_perf_pct); 489 limits->min_perf = div_fp(int_tofp(limits->min_perf_pct), 490 int_tofp(100)); 491 492 if (hwp_active) 493 intel_pstate_hwp_set(); 494 return count; 495 } 496 497 show_one(max_perf_pct, max_perf_pct); 498 show_one(min_perf_pct, min_perf_pct); 499 500 define_one_global_rw(no_turbo); 501 define_one_global_rw(max_perf_pct); 502 define_one_global_rw(min_perf_pct); 503 define_one_global_ro(turbo_pct); 504 define_one_global_ro(num_pstates); 505 506 static struct attribute *intel_pstate_attributes[] = { 507 &no_turbo.attr, 508 &max_perf_pct.attr, 509 &min_perf_pct.attr, 510 &turbo_pct.attr, 511 &num_pstates.attr, 512 NULL 513 }; 514 515 static struct attribute_group intel_pstate_attr_group = { 516 .attrs = intel_pstate_attributes, 517 }; 518 519 static void __init intel_pstate_sysfs_expose_params(void) 520 { 521 struct kobject *intel_pstate_kobject; 522 int rc; 523 524 intel_pstate_kobject = kobject_create_and_add("intel_pstate", 525 &cpu_subsys.dev_root->kobj); 526 BUG_ON(!intel_pstate_kobject); 527 rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group); 528 BUG_ON(rc); 529 } 530 /************************** sysfs end ************************/ 531 532 static void intel_pstate_hwp_enable(struct cpudata *cpudata) 533 { 534 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); 535 } 536 537 static int atom_get_min_pstate(void) 538 { 539 u64 value; 540 541 rdmsrl(ATOM_RATIOS, value); 542 return (value >> 8) & 0x7F; 543 } 544 545 static int atom_get_max_pstate(void) 546 { 547 u64 value; 548 549 rdmsrl(ATOM_RATIOS, value); 550 return (value >> 16) & 0x7F; 551 } 552 553 static int atom_get_turbo_pstate(void) 554 { 555 u64 value; 556 557 rdmsrl(ATOM_TURBO_RATIOS, value); 558 return value & 0x7F; 559 } 560 561 static void atom_set_pstate(struct cpudata *cpudata, int pstate) 562 { 563 u64 val; 564 int32_t vid_fp; 565 u32 vid; 566 567 val = (u64)pstate << 8; 568 if (limits->no_turbo && !limits->turbo_disabled) 569 val |= (u64)1 << 32; 570 571 vid_fp = cpudata->vid.min + mul_fp( 572 int_tofp(pstate - cpudata->pstate.min_pstate), 573 cpudata->vid.ratio); 574 575 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max); 576 vid = ceiling_fp(vid_fp); 577 578 if (pstate > cpudata->pstate.max_pstate) 579 vid = cpudata->vid.turbo; 580 581 val |= vid; 582 583 wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val); 584 } 585 586 static int silvermont_get_scaling(void) 587 { 588 u64 value; 589 int i; 590 /* Defined in Table 35-6 from SDM (Sept 2015) */ 591 static int silvermont_freq_table[] = { 592 83300, 100000, 133300, 116700, 80000}; 593 594 rdmsrl(MSR_FSB_FREQ, value); 595 i = value & 0x7; 596 WARN_ON(i > 4); 597 598 return silvermont_freq_table[i]; 599 } 600 601 static int airmont_get_scaling(void) 602 { 603 u64 value; 604 int i; 605 /* Defined in Table 35-10 from SDM (Sept 2015) */ 606 static int airmont_freq_table[] = { 607 83300, 100000, 133300, 116700, 80000, 608 93300, 90000, 88900, 87500}; 609 610 rdmsrl(MSR_FSB_FREQ, value); 611 i = value & 0xF; 612 WARN_ON(i > 8); 613 614 return airmont_freq_table[i]; 615 } 616 617 static void atom_get_vid(struct cpudata *cpudata) 618 { 619 u64 value; 620 621 rdmsrl(ATOM_VIDS, value); 622 cpudata->vid.min = int_tofp((value >> 8) & 0x7f); 623 cpudata->vid.max = int_tofp((value >> 16) & 0x7f); 624 cpudata->vid.ratio = div_fp( 625 cpudata->vid.max - cpudata->vid.min, 626 int_tofp(cpudata->pstate.max_pstate - 627 cpudata->pstate.min_pstate)); 628 629 rdmsrl(ATOM_TURBO_VIDS, value); 630 cpudata->vid.turbo = value & 0x7f; 631 } 632 633 static int core_get_min_pstate(void) 634 { 635 u64 value; 636 637 rdmsrl(MSR_PLATFORM_INFO, value); 638 return (value >> 40) & 0xFF; 639 } 640 641 static int core_get_max_pstate_physical(void) 642 { 643 u64 value; 644 645 rdmsrl(MSR_PLATFORM_INFO, value); 646 return (value >> 8) & 0xFF; 647 } 648 649 static int core_get_max_pstate(void) 650 { 651 u64 tar; 652 u64 plat_info; 653 int max_pstate; 654 int err; 655 656 rdmsrl(MSR_PLATFORM_INFO, plat_info); 657 max_pstate = (plat_info >> 8) & 0xFF; 658 659 err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar); 660 if (!err) { 661 /* Do some sanity checking for safety */ 662 if (plat_info & 0x600000000) { 663 u64 tdp_ctrl; 664 u64 tdp_ratio; 665 int tdp_msr; 666 667 err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl); 668 if (err) 669 goto skip_tar; 670 671 tdp_msr = MSR_CONFIG_TDP_NOMINAL + tdp_ctrl; 672 err = rdmsrl_safe(tdp_msr, &tdp_ratio); 673 if (err) 674 goto skip_tar; 675 676 if (tdp_ratio - 1 == tar) { 677 max_pstate = tar; 678 pr_debug("max_pstate=TAC %x\n", max_pstate); 679 } else { 680 goto skip_tar; 681 } 682 } 683 } 684 685 skip_tar: 686 return max_pstate; 687 } 688 689 static int core_get_turbo_pstate(void) 690 { 691 u64 value; 692 int nont, ret; 693 694 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value); 695 nont = core_get_max_pstate(); 696 ret = (value) & 255; 697 if (ret <= nont) 698 ret = nont; 699 return ret; 700 } 701 702 static inline int core_get_scaling(void) 703 { 704 return 100000; 705 } 706 707 static void core_set_pstate(struct cpudata *cpudata, int pstate) 708 { 709 u64 val; 710 711 val = (u64)pstate << 8; 712 if (limits->no_turbo && !limits->turbo_disabled) 713 val |= (u64)1 << 32; 714 715 wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val); 716 } 717 718 static int knl_get_turbo_pstate(void) 719 { 720 u64 value; 721 int nont, ret; 722 723 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value); 724 nont = core_get_max_pstate(); 725 ret = (((value) >> 8) & 0xFF); 726 if (ret <= nont) 727 ret = nont; 728 return ret; 729 } 730 731 static struct cpu_defaults core_params = { 732 .pid_policy = { 733 .sample_rate_ms = 10, 734 .deadband = 0, 735 .setpoint = 97, 736 .p_gain_pct = 20, 737 .d_gain_pct = 0, 738 .i_gain_pct = 0, 739 }, 740 .funcs = { 741 .get_max = core_get_max_pstate, 742 .get_max_physical = core_get_max_pstate_physical, 743 .get_min = core_get_min_pstate, 744 .get_turbo = core_get_turbo_pstate, 745 .get_scaling = core_get_scaling, 746 .set = core_set_pstate, 747 .get_target_pstate = get_target_pstate_use_performance, 748 }, 749 }; 750 751 static struct cpu_defaults silvermont_params = { 752 .pid_policy = { 753 .sample_rate_ms = 10, 754 .deadband = 0, 755 .setpoint = 60, 756 .p_gain_pct = 14, 757 .d_gain_pct = 0, 758 .i_gain_pct = 4, 759 }, 760 .funcs = { 761 .get_max = atom_get_max_pstate, 762 .get_max_physical = atom_get_max_pstate, 763 .get_min = atom_get_min_pstate, 764 .get_turbo = atom_get_turbo_pstate, 765 .set = atom_set_pstate, 766 .get_scaling = silvermont_get_scaling, 767 .get_vid = atom_get_vid, 768 .get_target_pstate = get_target_pstate_use_cpu_load, 769 }, 770 }; 771 772 static struct cpu_defaults airmont_params = { 773 .pid_policy = { 774 .sample_rate_ms = 10, 775 .deadband = 0, 776 .setpoint = 60, 777 .p_gain_pct = 14, 778 .d_gain_pct = 0, 779 .i_gain_pct = 4, 780 }, 781 .funcs = { 782 .get_max = atom_get_max_pstate, 783 .get_max_physical = atom_get_max_pstate, 784 .get_min = atom_get_min_pstate, 785 .get_turbo = atom_get_turbo_pstate, 786 .set = atom_set_pstate, 787 .get_scaling = airmont_get_scaling, 788 .get_vid = atom_get_vid, 789 .get_target_pstate = get_target_pstate_use_cpu_load, 790 }, 791 }; 792 793 static struct cpu_defaults knl_params = { 794 .pid_policy = { 795 .sample_rate_ms = 10, 796 .deadband = 0, 797 .setpoint = 97, 798 .p_gain_pct = 20, 799 .d_gain_pct = 0, 800 .i_gain_pct = 0, 801 }, 802 .funcs = { 803 .get_max = core_get_max_pstate, 804 .get_max_physical = core_get_max_pstate_physical, 805 .get_min = core_get_min_pstate, 806 .get_turbo = knl_get_turbo_pstate, 807 .get_scaling = core_get_scaling, 808 .set = core_set_pstate, 809 .get_target_pstate = get_target_pstate_use_performance, 810 }, 811 }; 812 813 static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) 814 { 815 int max_perf = cpu->pstate.turbo_pstate; 816 int max_perf_adj; 817 int min_perf; 818 819 if (limits->no_turbo || limits->turbo_disabled) 820 max_perf = cpu->pstate.max_pstate; 821 822 /* 823 * performance can be limited by user through sysfs, by cpufreq 824 * policy, or by cpu specific default values determined through 825 * experimentation. 826 */ 827 max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits->max_perf)); 828 *max = clamp_t(int, max_perf_adj, 829 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate); 830 831 min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits->min_perf)); 832 *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf); 833 } 834 835 static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force) 836 { 837 int max_perf, min_perf; 838 839 if (force) { 840 update_turbo_state(); 841 842 intel_pstate_get_min_max(cpu, &min_perf, &max_perf); 843 844 pstate = clamp_t(int, pstate, min_perf, max_perf); 845 846 if (pstate == cpu->pstate.current_pstate) 847 return; 848 } 849 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); 850 851 cpu->pstate.current_pstate = pstate; 852 853 pstate_funcs.set(cpu, pstate); 854 } 855 856 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) 857 { 858 cpu->pstate.min_pstate = pstate_funcs.get_min(); 859 cpu->pstate.max_pstate = pstate_funcs.get_max(); 860 cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical(); 861 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); 862 cpu->pstate.scaling = pstate_funcs.get_scaling(); 863 864 if (pstate_funcs.get_vid) 865 pstate_funcs.get_vid(cpu); 866 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate, false); 867 } 868 869 static inline void intel_pstate_calc_busy(struct cpudata *cpu) 870 { 871 struct sample *sample = &cpu->sample; 872 int64_t core_pct; 873 874 core_pct = int_tofp(sample->aperf) * int_tofp(100); 875 core_pct = div64_u64(core_pct, int_tofp(sample->mperf)); 876 877 sample->freq = fp_toint( 878 mul_fp(int_tofp( 879 cpu->pstate.max_pstate_physical * 880 cpu->pstate.scaling / 100), 881 core_pct)); 882 883 sample->core_pct_busy = (int32_t)core_pct; 884 } 885 886 static inline void intel_pstate_sample(struct cpudata *cpu) 887 { 888 u64 aperf, mperf; 889 unsigned long flags; 890 u64 tsc; 891 892 local_irq_save(flags); 893 rdmsrl(MSR_IA32_APERF, aperf); 894 rdmsrl(MSR_IA32_MPERF, mperf); 895 tsc = rdtsc(); 896 if ((cpu->prev_mperf == mperf) || (cpu->prev_tsc == tsc)) { 897 local_irq_restore(flags); 898 return; 899 } 900 local_irq_restore(flags); 901 902 cpu->last_sample_time = cpu->sample.time; 903 cpu->sample.time = ktime_get(); 904 cpu->sample.aperf = aperf; 905 cpu->sample.mperf = mperf; 906 cpu->sample.tsc = tsc; 907 cpu->sample.aperf -= cpu->prev_aperf; 908 cpu->sample.mperf -= cpu->prev_mperf; 909 cpu->sample.tsc -= cpu->prev_tsc; 910 911 intel_pstate_calc_busy(cpu); 912 913 cpu->prev_aperf = aperf; 914 cpu->prev_mperf = mperf; 915 cpu->prev_tsc = tsc; 916 } 917 918 static inline void intel_hwp_set_sample_time(struct cpudata *cpu) 919 { 920 int delay; 921 922 delay = msecs_to_jiffies(50); 923 mod_timer_pinned(&cpu->timer, jiffies + delay); 924 } 925 926 static inline void intel_pstate_set_sample_time(struct cpudata *cpu) 927 { 928 int delay; 929 930 delay = msecs_to_jiffies(pid_params.sample_rate_ms); 931 mod_timer_pinned(&cpu->timer, jiffies + delay); 932 } 933 934 static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu) 935 { 936 struct sample *sample = &cpu->sample; 937 u64 cummulative_iowait, delta_iowait_us; 938 u64 delta_iowait_mperf; 939 u64 mperf, now; 940 int32_t cpu_load; 941 942 cummulative_iowait = get_cpu_iowait_time_us(cpu->cpu, &now); 943 944 /* 945 * Convert iowait time into number of IO cycles spent at max_freq. 946 * IO is considered as busy only for the cpu_load algorithm. For 947 * performance this is not needed since we always try to reach the 948 * maximum P-State, so we are already boosting the IOs. 949 */ 950 delta_iowait_us = cummulative_iowait - cpu->prev_cummulative_iowait; 951 delta_iowait_mperf = div64_u64(delta_iowait_us * cpu->pstate.scaling * 952 cpu->pstate.max_pstate, MSEC_PER_SEC); 953 954 mperf = cpu->sample.mperf + delta_iowait_mperf; 955 cpu->prev_cummulative_iowait = cummulative_iowait; 956 957 958 /* 959 * The load can be estimated as the ratio of the mperf counter 960 * running at a constant frequency during active periods 961 * (C0) and the time stamp counter running at the same frequency 962 * also during C-states. 963 */ 964 cpu_load = div64_u64(int_tofp(100) * mperf, sample->tsc); 965 cpu->sample.busy_scaled = cpu_load; 966 967 return cpu->pstate.current_pstate - pid_calc(&cpu->pid, cpu_load); 968 } 969 970 static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu) 971 { 972 int32_t core_busy, max_pstate, current_pstate, sample_ratio; 973 s64 duration_us; 974 u32 sample_time; 975 976 /* 977 * core_busy is the ratio of actual performance to max 978 * max_pstate is the max non turbo pstate available 979 * current_pstate was the pstate that was requested during 980 * the last sample period. 981 * 982 * We normalize core_busy, which was our actual percent 983 * performance to what we requested during the last sample 984 * period. The result will be a percentage of busy at a 985 * specified pstate. 986 */ 987 core_busy = cpu->sample.core_pct_busy; 988 max_pstate = int_tofp(cpu->pstate.max_pstate_physical); 989 current_pstate = int_tofp(cpu->pstate.current_pstate); 990 core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate)); 991 992 /* 993 * Since we have a deferred timer, it will not fire unless 994 * we are in C0. So, determine if the actual elapsed time 995 * is significantly greater (3x) than our sample interval. If it 996 * is, then we were idle for a long enough period of time 997 * to adjust our busyness. 998 */ 999 sample_time = pid_params.sample_rate_ms * USEC_PER_MSEC; 1000 duration_us = ktime_us_delta(cpu->sample.time, 1001 cpu->last_sample_time); 1002 if (duration_us > sample_time * 3) { 1003 sample_ratio = div_fp(int_tofp(sample_time), 1004 int_tofp(duration_us)); 1005 core_busy = mul_fp(core_busy, sample_ratio); 1006 } 1007 1008 cpu->sample.busy_scaled = core_busy; 1009 return cpu->pstate.current_pstate - pid_calc(&cpu->pid, core_busy); 1010 } 1011 1012 static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) 1013 { 1014 int from, target_pstate; 1015 struct sample *sample; 1016 1017 from = cpu->pstate.current_pstate; 1018 1019 target_pstate = pstate_funcs.get_target_pstate(cpu); 1020 1021 intel_pstate_set_pstate(cpu, target_pstate, true); 1022 1023 sample = &cpu->sample; 1024 trace_pstate_sample(fp_toint(sample->core_pct_busy), 1025 fp_toint(sample->busy_scaled), 1026 from, 1027 cpu->pstate.current_pstate, 1028 sample->mperf, 1029 sample->aperf, 1030 sample->tsc, 1031 sample->freq); 1032 } 1033 1034 static void intel_hwp_timer_func(unsigned long __data) 1035 { 1036 struct cpudata *cpu = (struct cpudata *) __data; 1037 1038 intel_pstate_sample(cpu); 1039 intel_hwp_set_sample_time(cpu); 1040 } 1041 1042 static void intel_pstate_timer_func(unsigned long __data) 1043 { 1044 struct cpudata *cpu = (struct cpudata *) __data; 1045 1046 intel_pstate_sample(cpu); 1047 1048 intel_pstate_adjust_busy_pstate(cpu); 1049 1050 intel_pstate_set_sample_time(cpu); 1051 } 1052 1053 #define ICPU(model, policy) \ 1054 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\ 1055 (unsigned long)&policy } 1056 1057 static const struct x86_cpu_id intel_pstate_cpu_ids[] = { 1058 ICPU(0x2a, core_params), 1059 ICPU(0x2d, core_params), 1060 ICPU(0x37, silvermont_params), 1061 ICPU(0x3a, core_params), 1062 ICPU(0x3c, core_params), 1063 ICPU(0x3d, core_params), 1064 ICPU(0x3e, core_params), 1065 ICPU(0x3f, core_params), 1066 ICPU(0x45, core_params), 1067 ICPU(0x46, core_params), 1068 ICPU(0x47, core_params), 1069 ICPU(0x4c, airmont_params), 1070 ICPU(0x4e, core_params), 1071 ICPU(0x4f, core_params), 1072 ICPU(0x5e, core_params), 1073 ICPU(0x56, core_params), 1074 ICPU(0x57, knl_params), 1075 {} 1076 }; 1077 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); 1078 1079 static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] = { 1080 ICPU(0x56, core_params), 1081 {} 1082 }; 1083 1084 static int intel_pstate_init_cpu(unsigned int cpunum) 1085 { 1086 struct cpudata *cpu; 1087 1088 if (!all_cpu_data[cpunum]) 1089 all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata), 1090 GFP_KERNEL); 1091 if (!all_cpu_data[cpunum]) 1092 return -ENOMEM; 1093 1094 cpu = all_cpu_data[cpunum]; 1095 1096 cpu->cpu = cpunum; 1097 1098 if (hwp_active) 1099 intel_pstate_hwp_enable(cpu); 1100 1101 intel_pstate_get_cpu_pstates(cpu); 1102 1103 init_timer_deferrable(&cpu->timer); 1104 cpu->timer.data = (unsigned long)cpu; 1105 cpu->timer.expires = jiffies + HZ/100; 1106 1107 if (!hwp_active) 1108 cpu->timer.function = intel_pstate_timer_func; 1109 else 1110 cpu->timer.function = intel_hwp_timer_func; 1111 1112 intel_pstate_busy_pid_reset(cpu); 1113 intel_pstate_sample(cpu); 1114 1115 add_timer_on(&cpu->timer, cpunum); 1116 1117 pr_debug("intel_pstate: controlling: cpu %d\n", cpunum); 1118 1119 return 0; 1120 } 1121 1122 static unsigned int intel_pstate_get(unsigned int cpu_num) 1123 { 1124 struct sample *sample; 1125 struct cpudata *cpu; 1126 1127 cpu = all_cpu_data[cpu_num]; 1128 if (!cpu) 1129 return 0; 1130 sample = &cpu->sample; 1131 return sample->freq; 1132 } 1133 1134 static int intel_pstate_set_policy(struct cpufreq_policy *policy) 1135 { 1136 if (!policy->cpuinfo.max_freq) 1137 return -ENODEV; 1138 1139 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE && 1140 policy->max >= policy->cpuinfo.max_freq) { 1141 pr_debug("intel_pstate: set performance\n"); 1142 limits = &performance_limits; 1143 if (hwp_active) 1144 intel_pstate_hwp_set(); 1145 return 0; 1146 } 1147 1148 pr_debug("intel_pstate: set powersave\n"); 1149 limits = &powersave_limits; 1150 limits->min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq; 1151 limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 0 , 100); 1152 limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100, 1153 policy->cpuinfo.max_freq); 1154 limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0 , 100); 1155 1156 /* Normalize user input to [min_policy_pct, max_policy_pct] */ 1157 limits->min_perf_pct = max(limits->min_policy_pct, 1158 limits->min_sysfs_pct); 1159 limits->min_perf_pct = min(limits->max_policy_pct, 1160 limits->min_perf_pct); 1161 limits->max_perf_pct = min(limits->max_policy_pct, 1162 limits->max_sysfs_pct); 1163 limits->max_perf_pct = max(limits->min_policy_pct, 1164 limits->max_perf_pct); 1165 limits->max_perf = round_up(limits->max_perf, FRAC_BITS); 1166 1167 /* Make sure min_perf_pct <= max_perf_pct */ 1168 limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct); 1169 1170 limits->min_perf = div_fp(int_tofp(limits->min_perf_pct), 1171 int_tofp(100)); 1172 limits->max_perf = div_fp(int_tofp(limits->max_perf_pct), 1173 int_tofp(100)); 1174 1175 if (hwp_active) 1176 intel_pstate_hwp_set(); 1177 1178 return 0; 1179 } 1180 1181 static int intel_pstate_verify_policy(struct cpufreq_policy *policy) 1182 { 1183 cpufreq_verify_within_cpu_limits(policy); 1184 1185 if (policy->policy != CPUFREQ_POLICY_POWERSAVE && 1186 policy->policy != CPUFREQ_POLICY_PERFORMANCE) 1187 return -EINVAL; 1188 1189 return 0; 1190 } 1191 1192 static void intel_pstate_stop_cpu(struct cpufreq_policy *policy) 1193 { 1194 int cpu_num = policy->cpu; 1195 struct cpudata *cpu = all_cpu_data[cpu_num]; 1196 1197 pr_debug("intel_pstate: CPU %d exiting\n", cpu_num); 1198 1199 del_timer_sync(&all_cpu_data[cpu_num]->timer); 1200 if (hwp_active) 1201 return; 1202 1203 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate, false); 1204 } 1205 1206 static int intel_pstate_cpu_init(struct cpufreq_policy *policy) 1207 { 1208 struct cpudata *cpu; 1209 int rc; 1210 1211 rc = intel_pstate_init_cpu(policy->cpu); 1212 if (rc) 1213 return rc; 1214 1215 cpu = all_cpu_data[policy->cpu]; 1216 1217 if (limits->min_perf_pct == 100 && limits->max_perf_pct == 100) 1218 policy->policy = CPUFREQ_POLICY_PERFORMANCE; 1219 else 1220 policy->policy = CPUFREQ_POLICY_POWERSAVE; 1221 1222 policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling; 1223 policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling; 1224 1225 /* cpuinfo and default policy values */ 1226 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; 1227 policy->cpuinfo.max_freq = 1228 cpu->pstate.turbo_pstate * cpu->pstate.scaling; 1229 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; 1230 cpumask_set_cpu(policy->cpu, policy->cpus); 1231 1232 return 0; 1233 } 1234 1235 static struct cpufreq_driver intel_pstate_driver = { 1236 .flags = CPUFREQ_CONST_LOOPS, 1237 .verify = intel_pstate_verify_policy, 1238 .setpolicy = intel_pstate_set_policy, 1239 .get = intel_pstate_get, 1240 .init = intel_pstate_cpu_init, 1241 .stop_cpu = intel_pstate_stop_cpu, 1242 .name = "intel_pstate", 1243 }; 1244 1245 static int __initdata no_load; 1246 static int __initdata no_hwp; 1247 static int __initdata hwp_only; 1248 static unsigned int force_load; 1249 1250 static int intel_pstate_msrs_not_valid(void) 1251 { 1252 if (!pstate_funcs.get_max() || 1253 !pstate_funcs.get_min() || 1254 !pstate_funcs.get_turbo()) 1255 return -ENODEV; 1256 1257 return 0; 1258 } 1259 1260 static void copy_pid_params(struct pstate_adjust_policy *policy) 1261 { 1262 pid_params.sample_rate_ms = policy->sample_rate_ms; 1263 pid_params.p_gain_pct = policy->p_gain_pct; 1264 pid_params.i_gain_pct = policy->i_gain_pct; 1265 pid_params.d_gain_pct = policy->d_gain_pct; 1266 pid_params.deadband = policy->deadband; 1267 pid_params.setpoint = policy->setpoint; 1268 } 1269 1270 static void copy_cpu_funcs(struct pstate_funcs *funcs) 1271 { 1272 pstate_funcs.get_max = funcs->get_max; 1273 pstate_funcs.get_max_physical = funcs->get_max_physical; 1274 pstate_funcs.get_min = funcs->get_min; 1275 pstate_funcs.get_turbo = funcs->get_turbo; 1276 pstate_funcs.get_scaling = funcs->get_scaling; 1277 pstate_funcs.set = funcs->set; 1278 pstate_funcs.get_vid = funcs->get_vid; 1279 pstate_funcs.get_target_pstate = funcs->get_target_pstate; 1280 1281 } 1282 1283 #if IS_ENABLED(CONFIG_ACPI) 1284 #include <acpi/processor.h> 1285 1286 static bool intel_pstate_no_acpi_pss(void) 1287 { 1288 int i; 1289 1290 for_each_possible_cpu(i) { 1291 acpi_status status; 1292 union acpi_object *pss; 1293 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 1294 struct acpi_processor *pr = per_cpu(processors, i); 1295 1296 if (!pr) 1297 continue; 1298 1299 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); 1300 if (ACPI_FAILURE(status)) 1301 continue; 1302 1303 pss = buffer.pointer; 1304 if (pss && pss->type == ACPI_TYPE_PACKAGE) { 1305 kfree(pss); 1306 return false; 1307 } 1308 1309 kfree(pss); 1310 } 1311 1312 return true; 1313 } 1314 1315 static bool intel_pstate_has_acpi_ppc(void) 1316 { 1317 int i; 1318 1319 for_each_possible_cpu(i) { 1320 struct acpi_processor *pr = per_cpu(processors, i); 1321 1322 if (!pr) 1323 continue; 1324 if (acpi_has_method(pr->handle, "_PPC")) 1325 return true; 1326 } 1327 return false; 1328 } 1329 1330 enum { 1331 PSS, 1332 PPC, 1333 }; 1334 1335 struct hw_vendor_info { 1336 u16 valid; 1337 char oem_id[ACPI_OEM_ID_SIZE]; 1338 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE]; 1339 int oem_pwr_table; 1340 }; 1341 1342 /* Hardware vendor-specific info that has its own power management modes */ 1343 static struct hw_vendor_info vendor_info[] = { 1344 {1, "HP ", "ProLiant", PSS}, 1345 {1, "ORACLE", "X4-2 ", PPC}, 1346 {1, "ORACLE", "X4-2L ", PPC}, 1347 {1, "ORACLE", "X4-2B ", PPC}, 1348 {1, "ORACLE", "X3-2 ", PPC}, 1349 {1, "ORACLE", "X3-2L ", PPC}, 1350 {1, "ORACLE", "X3-2B ", PPC}, 1351 {1, "ORACLE", "X4470M2 ", PPC}, 1352 {1, "ORACLE", "X4270M3 ", PPC}, 1353 {1, "ORACLE", "X4270M2 ", PPC}, 1354 {1, "ORACLE", "X4170M2 ", PPC}, 1355 {1, "ORACLE", "X4170 M3", PPC}, 1356 {1, "ORACLE", "X4275 M3", PPC}, 1357 {1, "ORACLE", "X6-2 ", PPC}, 1358 {1, "ORACLE", "Sudbury ", PPC}, 1359 {0, "", ""}, 1360 }; 1361 1362 static bool intel_pstate_platform_pwr_mgmt_exists(void) 1363 { 1364 struct acpi_table_header hdr; 1365 struct hw_vendor_info *v_info; 1366 const struct x86_cpu_id *id; 1367 u64 misc_pwr; 1368 1369 id = x86_match_cpu(intel_pstate_cpu_oob_ids); 1370 if (id) { 1371 rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr); 1372 if ( misc_pwr & (1 << 8)) 1373 return true; 1374 } 1375 1376 if (acpi_disabled || 1377 ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr))) 1378 return false; 1379 1380 for (v_info = vendor_info; v_info->valid; v_info++) { 1381 if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) && 1382 !strncmp(hdr.oem_table_id, v_info->oem_table_id, 1383 ACPI_OEM_TABLE_ID_SIZE)) 1384 switch (v_info->oem_pwr_table) { 1385 case PSS: 1386 return intel_pstate_no_acpi_pss(); 1387 case PPC: 1388 return intel_pstate_has_acpi_ppc() && 1389 (!force_load); 1390 } 1391 } 1392 1393 return false; 1394 } 1395 #else /* CONFIG_ACPI not enabled */ 1396 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; } 1397 static inline bool intel_pstate_has_acpi_ppc(void) { return false; } 1398 #endif /* CONFIG_ACPI */ 1399 1400 static int __init intel_pstate_init(void) 1401 { 1402 int cpu, rc = 0; 1403 const struct x86_cpu_id *id; 1404 struct cpu_defaults *cpu_def; 1405 1406 if (no_load) 1407 return -ENODEV; 1408 1409 id = x86_match_cpu(intel_pstate_cpu_ids); 1410 if (!id) 1411 return -ENODEV; 1412 1413 /* 1414 * The Intel pstate driver will be ignored if the platform 1415 * firmware has its own power management modes. 1416 */ 1417 if (intel_pstate_platform_pwr_mgmt_exists()) 1418 return -ENODEV; 1419 1420 cpu_def = (struct cpu_defaults *)id->driver_data; 1421 1422 copy_pid_params(&cpu_def->pid_policy); 1423 copy_cpu_funcs(&cpu_def->funcs); 1424 1425 if (intel_pstate_msrs_not_valid()) 1426 return -ENODEV; 1427 1428 pr_info("Intel P-state driver initializing.\n"); 1429 1430 all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus()); 1431 if (!all_cpu_data) 1432 return -ENOMEM; 1433 1434 if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp) { 1435 pr_info("intel_pstate: HWP enabled\n"); 1436 hwp_active++; 1437 } 1438 1439 if (!hwp_active && hwp_only) 1440 goto out; 1441 1442 rc = cpufreq_register_driver(&intel_pstate_driver); 1443 if (rc) 1444 goto out; 1445 1446 intel_pstate_debug_expose_params(); 1447 intel_pstate_sysfs_expose_params(); 1448 1449 return rc; 1450 out: 1451 get_online_cpus(); 1452 for_each_online_cpu(cpu) { 1453 if (all_cpu_data[cpu]) { 1454 del_timer_sync(&all_cpu_data[cpu]->timer); 1455 kfree(all_cpu_data[cpu]); 1456 } 1457 } 1458 1459 put_online_cpus(); 1460 vfree(all_cpu_data); 1461 return -ENODEV; 1462 } 1463 device_initcall(intel_pstate_init); 1464 1465 static int __init intel_pstate_setup(char *str) 1466 { 1467 if (!str) 1468 return -EINVAL; 1469 1470 if (!strcmp(str, "disable")) 1471 no_load = 1; 1472 if (!strcmp(str, "no_hwp")) { 1473 pr_info("intel_pstate: HWP disabled\n"); 1474 no_hwp = 1; 1475 } 1476 if (!strcmp(str, "force")) 1477 force_load = 1; 1478 if (!strcmp(str, "hwp_only")) 1479 hwp_only = 1; 1480 return 0; 1481 } 1482 early_param("intel_pstate", intel_pstate_setup); 1483 1484 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>"); 1485 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors"); 1486 MODULE_LICENSE("GPL"); 1487