1 /* 2 * intel_pstate.c: Native P state management for Intel processors 3 * 4 * (C) Copyright 2012 Intel Corporation 5 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com> 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; version 2 10 * of the License. 11 */ 12 13 #include <linux/kernel.h> 14 #include <linux/kernel_stat.h> 15 #include <linux/module.h> 16 #include <linux/ktime.h> 17 #include <linux/hrtimer.h> 18 #include <linux/tick.h> 19 #include <linux/slab.h> 20 #include <linux/sched.h> 21 #include <linux/list.h> 22 #include <linux/cpu.h> 23 #include <linux/cpufreq.h> 24 #include <linux/sysfs.h> 25 #include <linux/types.h> 26 #include <linux/fs.h> 27 #include <linux/debugfs.h> 28 #include <linux/acpi.h> 29 #include <linux/vmalloc.h> 30 #include <trace/events/power.h> 31 32 #include <asm/div64.h> 33 #include <asm/msr.h> 34 #include <asm/cpu_device_id.h> 35 #include <asm/cpufeature.h> 36 37 #define BYT_RATIOS 0x66a 38 #define BYT_VIDS 0x66b 39 #define BYT_TURBO_RATIOS 0x66c 40 #define BYT_TURBO_VIDS 0x66d 41 42 #define FRAC_BITS 8 43 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) 44 #define fp_toint(X) ((X) >> FRAC_BITS) 45 46 47 static inline int32_t mul_fp(int32_t x, int32_t y) 48 { 49 return ((int64_t)x * (int64_t)y) >> FRAC_BITS; 50 } 51 52 static inline int32_t div_fp(int32_t x, int32_t y) 53 { 54 return div_s64((int64_t)x << FRAC_BITS, y); 55 } 56 57 static inline int ceiling_fp(int32_t x) 58 { 59 int mask, ret; 60 61 ret = fp_toint(x); 62 mask = (1 << FRAC_BITS) - 1; 63 if (x & mask) 64 ret += 1; 65 return ret; 66 } 67 68 struct sample { 69 int32_t core_pct_busy; 70 u64 aperf; 71 u64 mperf; 72 int freq; 73 ktime_t time; 74 }; 75 76 struct pstate_data { 77 int current_pstate; 78 int min_pstate; 79 int max_pstate; 80 int scaling; 81 int turbo_pstate; 82 }; 83 84 struct vid_data { 85 int min; 86 int max; 87 int turbo; 88 int32_t ratio; 89 }; 90 91 struct _pid { 92 int setpoint; 93 int32_t integral; 94 int32_t p_gain; 95 int32_t i_gain; 96 int32_t d_gain; 97 int deadband; 98 int32_t last_err; 99 }; 100 101 struct cpudata { 102 int cpu; 103 104 struct timer_list timer; 105 106 struct pstate_data pstate; 107 struct vid_data vid; 108 struct _pid pid; 109 110 ktime_t last_sample_time; 111 u64 prev_aperf; 112 u64 prev_mperf; 113 struct sample sample; 114 }; 115 116 static struct cpudata **all_cpu_data; 117 struct pstate_adjust_policy { 118 int sample_rate_ms; 119 int deadband; 120 int setpoint; 121 int p_gain_pct; 122 int d_gain_pct; 123 int i_gain_pct; 124 }; 125 126 struct pstate_funcs { 127 int (*get_max)(void); 128 int (*get_min)(void); 129 int (*get_turbo)(void); 130 int (*get_scaling)(void); 131 void (*set)(struct cpudata*, int pstate); 132 void (*get_vid)(struct cpudata *); 133 }; 134 135 struct cpu_defaults { 136 struct pstate_adjust_policy pid_policy; 137 struct pstate_funcs funcs; 138 }; 139 140 static struct pstate_adjust_policy pid_params; 141 static struct pstate_funcs pstate_funcs; 142 static int hwp_active; 143 144 struct perf_limits { 145 int no_turbo; 146 int turbo_disabled; 147 int max_perf_pct; 148 int min_perf_pct; 149 int32_t max_perf; 150 int32_t min_perf; 151 int max_policy_pct; 152 int max_sysfs_pct; 153 int min_policy_pct; 154 int min_sysfs_pct; 155 }; 156 157 static struct perf_limits limits = { 158 .no_turbo = 0, 159 .turbo_disabled = 0, 160 .max_perf_pct = 100, 161 .max_perf = int_tofp(1), 162 .min_perf_pct = 0, 163 .min_perf = 0, 164 .max_policy_pct = 100, 165 .max_sysfs_pct = 100, 166 .min_policy_pct = 0, 167 .min_sysfs_pct = 0, 168 }; 169 170 static inline void pid_reset(struct _pid *pid, int setpoint, int busy, 171 int deadband, int integral) { 172 pid->setpoint = setpoint; 173 pid->deadband = deadband; 174 pid->integral = int_tofp(integral); 175 pid->last_err = int_tofp(setpoint) - int_tofp(busy); 176 } 177 178 static inline void pid_p_gain_set(struct _pid *pid, int percent) 179 { 180 pid->p_gain = div_fp(int_tofp(percent), int_tofp(100)); 181 } 182 183 static inline void pid_i_gain_set(struct _pid *pid, int percent) 184 { 185 pid->i_gain = div_fp(int_tofp(percent), int_tofp(100)); 186 } 187 188 static inline void pid_d_gain_set(struct _pid *pid, int percent) 189 { 190 pid->d_gain = div_fp(int_tofp(percent), int_tofp(100)); 191 } 192 193 static signed int pid_calc(struct _pid *pid, int32_t busy) 194 { 195 signed int result; 196 int32_t pterm, dterm, fp_error; 197 int32_t integral_limit; 198 199 fp_error = int_tofp(pid->setpoint) - busy; 200 201 if (abs(fp_error) <= int_tofp(pid->deadband)) 202 return 0; 203 204 pterm = mul_fp(pid->p_gain, fp_error); 205 206 pid->integral += fp_error; 207 208 /* 209 * We limit the integral here so that it will never 210 * get higher than 30. This prevents it from becoming 211 * too large an input over long periods of time and allows 212 * it to get factored out sooner. 213 * 214 * The value of 30 was chosen through experimentation. 215 */ 216 integral_limit = int_tofp(30); 217 if (pid->integral > integral_limit) 218 pid->integral = integral_limit; 219 if (pid->integral < -integral_limit) 220 pid->integral = -integral_limit; 221 222 dterm = mul_fp(pid->d_gain, fp_error - pid->last_err); 223 pid->last_err = fp_error; 224 225 result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm; 226 result = result + (1 << (FRAC_BITS-1)); 227 return (signed int)fp_toint(result); 228 } 229 230 static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu) 231 { 232 pid_p_gain_set(&cpu->pid, pid_params.p_gain_pct); 233 pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct); 234 pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct); 235 236 pid_reset(&cpu->pid, pid_params.setpoint, 100, pid_params.deadband, 0); 237 } 238 239 static inline void intel_pstate_reset_all_pid(void) 240 { 241 unsigned int cpu; 242 243 for_each_online_cpu(cpu) { 244 if (all_cpu_data[cpu]) 245 intel_pstate_busy_pid_reset(all_cpu_data[cpu]); 246 } 247 } 248 249 static inline void update_turbo_state(void) 250 { 251 u64 misc_en; 252 struct cpudata *cpu; 253 254 cpu = all_cpu_data[0]; 255 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); 256 limits.turbo_disabled = 257 (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || 258 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); 259 } 260 261 #define PCT_TO_HWP(x) (x * 255 / 100) 262 static void intel_pstate_hwp_set(void) 263 { 264 int min, max, cpu; 265 u64 value, freq; 266 267 get_online_cpus(); 268 269 for_each_online_cpu(cpu) { 270 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); 271 min = PCT_TO_HWP(limits.min_perf_pct); 272 value &= ~HWP_MIN_PERF(~0L); 273 value |= HWP_MIN_PERF(min); 274 275 max = PCT_TO_HWP(limits.max_perf_pct); 276 if (limits.no_turbo) { 277 rdmsrl( MSR_HWP_CAPABILITIES, freq); 278 max = HWP_GUARANTEED_PERF(freq); 279 } 280 281 value &= ~HWP_MAX_PERF(~0L); 282 value |= HWP_MAX_PERF(max); 283 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); 284 } 285 286 put_online_cpus(); 287 } 288 289 /************************** debugfs begin ************************/ 290 static int pid_param_set(void *data, u64 val) 291 { 292 *(u32 *)data = val; 293 intel_pstate_reset_all_pid(); 294 return 0; 295 } 296 297 static int pid_param_get(void *data, u64 *val) 298 { 299 *val = *(u32 *)data; 300 return 0; 301 } 302 DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, pid_param_set, "%llu\n"); 303 304 struct pid_param { 305 char *name; 306 void *value; 307 }; 308 309 static struct pid_param pid_files[] = { 310 {"sample_rate_ms", &pid_params.sample_rate_ms}, 311 {"d_gain_pct", &pid_params.d_gain_pct}, 312 {"i_gain_pct", &pid_params.i_gain_pct}, 313 {"deadband", &pid_params.deadband}, 314 {"setpoint", &pid_params.setpoint}, 315 {"p_gain_pct", &pid_params.p_gain_pct}, 316 {NULL, NULL} 317 }; 318 319 static void __init intel_pstate_debug_expose_params(void) 320 { 321 struct dentry *debugfs_parent; 322 int i = 0; 323 324 if (hwp_active) 325 return; 326 debugfs_parent = debugfs_create_dir("pstate_snb", NULL); 327 if (IS_ERR_OR_NULL(debugfs_parent)) 328 return; 329 while (pid_files[i].name) { 330 debugfs_create_file(pid_files[i].name, 0660, 331 debugfs_parent, pid_files[i].value, 332 &fops_pid_param); 333 i++; 334 } 335 } 336 337 /************************** debugfs end ************************/ 338 339 /************************** sysfs begin ************************/ 340 #define show_one(file_name, object) \ 341 static ssize_t show_##file_name \ 342 (struct kobject *kobj, struct attribute *attr, char *buf) \ 343 { \ 344 return sprintf(buf, "%u\n", limits.object); \ 345 } 346 347 static ssize_t show_turbo_pct(struct kobject *kobj, 348 struct attribute *attr, char *buf) 349 { 350 struct cpudata *cpu; 351 int total, no_turbo, turbo_pct; 352 uint32_t turbo_fp; 353 354 cpu = all_cpu_data[0]; 355 356 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 357 no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1; 358 turbo_fp = div_fp(int_tofp(no_turbo), int_tofp(total)); 359 turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100))); 360 return sprintf(buf, "%u\n", turbo_pct); 361 } 362 363 static ssize_t show_num_pstates(struct kobject *kobj, 364 struct attribute *attr, char *buf) 365 { 366 struct cpudata *cpu; 367 int total; 368 369 cpu = all_cpu_data[0]; 370 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; 371 return sprintf(buf, "%u\n", total); 372 } 373 374 static ssize_t show_no_turbo(struct kobject *kobj, 375 struct attribute *attr, char *buf) 376 { 377 ssize_t ret; 378 379 update_turbo_state(); 380 if (limits.turbo_disabled) 381 ret = sprintf(buf, "%u\n", limits.turbo_disabled); 382 else 383 ret = sprintf(buf, "%u\n", limits.no_turbo); 384 385 return ret; 386 } 387 388 static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, 389 const char *buf, size_t count) 390 { 391 unsigned int input; 392 int ret; 393 394 ret = sscanf(buf, "%u", &input); 395 if (ret != 1) 396 return -EINVAL; 397 398 update_turbo_state(); 399 if (limits.turbo_disabled) { 400 pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); 401 return -EPERM; 402 } 403 404 limits.no_turbo = clamp_t(int, input, 0, 1); 405 406 if (hwp_active) 407 intel_pstate_hwp_set(); 408 409 return count; 410 } 411 412 static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, 413 const char *buf, size_t count) 414 { 415 unsigned int input; 416 int ret; 417 418 ret = sscanf(buf, "%u", &input); 419 if (ret != 1) 420 return -EINVAL; 421 422 limits.max_sysfs_pct = clamp_t(int, input, 0 , 100); 423 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); 424 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); 425 426 if (hwp_active) 427 intel_pstate_hwp_set(); 428 return count; 429 } 430 431 static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, 432 const char *buf, size_t count) 433 { 434 unsigned int input; 435 int ret; 436 437 ret = sscanf(buf, "%u", &input); 438 if (ret != 1) 439 return -EINVAL; 440 441 limits.min_sysfs_pct = clamp_t(int, input, 0 , 100); 442 limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct); 443 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); 444 445 if (hwp_active) 446 intel_pstate_hwp_set(); 447 return count; 448 } 449 450 show_one(max_perf_pct, max_perf_pct); 451 show_one(min_perf_pct, min_perf_pct); 452 453 define_one_global_rw(no_turbo); 454 define_one_global_rw(max_perf_pct); 455 define_one_global_rw(min_perf_pct); 456 define_one_global_ro(turbo_pct); 457 define_one_global_ro(num_pstates); 458 459 static struct attribute *intel_pstate_attributes[] = { 460 &no_turbo.attr, 461 &max_perf_pct.attr, 462 &min_perf_pct.attr, 463 &turbo_pct.attr, 464 &num_pstates.attr, 465 NULL 466 }; 467 468 static struct attribute_group intel_pstate_attr_group = { 469 .attrs = intel_pstate_attributes, 470 }; 471 472 static void __init intel_pstate_sysfs_expose_params(void) 473 { 474 struct kobject *intel_pstate_kobject; 475 int rc; 476 477 intel_pstate_kobject = kobject_create_and_add("intel_pstate", 478 &cpu_subsys.dev_root->kobj); 479 BUG_ON(!intel_pstate_kobject); 480 rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group); 481 BUG_ON(rc); 482 } 483 /************************** sysfs end ************************/ 484 485 static void intel_pstate_hwp_enable(void) 486 { 487 hwp_active++; 488 pr_info("intel_pstate HWP enabled\n"); 489 490 wrmsrl( MSR_PM_ENABLE, 0x1); 491 } 492 493 static int byt_get_min_pstate(void) 494 { 495 u64 value; 496 497 rdmsrl(BYT_RATIOS, value); 498 return (value >> 8) & 0x7F; 499 } 500 501 static int byt_get_max_pstate(void) 502 { 503 u64 value; 504 505 rdmsrl(BYT_RATIOS, value); 506 return (value >> 16) & 0x7F; 507 } 508 509 static int byt_get_turbo_pstate(void) 510 { 511 u64 value; 512 513 rdmsrl(BYT_TURBO_RATIOS, value); 514 return value & 0x7F; 515 } 516 517 static void byt_set_pstate(struct cpudata *cpudata, int pstate) 518 { 519 u64 val; 520 int32_t vid_fp; 521 u32 vid; 522 523 val = pstate << 8; 524 if (limits.no_turbo && !limits.turbo_disabled) 525 val |= (u64)1 << 32; 526 527 vid_fp = cpudata->vid.min + mul_fp( 528 int_tofp(pstate - cpudata->pstate.min_pstate), 529 cpudata->vid.ratio); 530 531 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max); 532 vid = ceiling_fp(vid_fp); 533 534 if (pstate > cpudata->pstate.max_pstate) 535 vid = cpudata->vid.turbo; 536 537 val |= vid; 538 539 wrmsrl(MSR_IA32_PERF_CTL, val); 540 } 541 542 #define BYT_BCLK_FREQS 5 543 static int byt_freq_table[BYT_BCLK_FREQS] = { 833, 1000, 1333, 1167, 800}; 544 545 static int byt_get_scaling(void) 546 { 547 u64 value; 548 int i; 549 550 rdmsrl(MSR_FSB_FREQ, value); 551 i = value & 0x3; 552 553 BUG_ON(i > BYT_BCLK_FREQS); 554 555 return byt_freq_table[i] * 100; 556 } 557 558 static void byt_get_vid(struct cpudata *cpudata) 559 { 560 u64 value; 561 562 rdmsrl(BYT_VIDS, value); 563 cpudata->vid.min = int_tofp((value >> 8) & 0x7f); 564 cpudata->vid.max = int_tofp((value >> 16) & 0x7f); 565 cpudata->vid.ratio = div_fp( 566 cpudata->vid.max - cpudata->vid.min, 567 int_tofp(cpudata->pstate.max_pstate - 568 cpudata->pstate.min_pstate)); 569 570 rdmsrl(BYT_TURBO_VIDS, value); 571 cpudata->vid.turbo = value & 0x7f; 572 } 573 574 static int core_get_min_pstate(void) 575 { 576 u64 value; 577 578 rdmsrl(MSR_PLATFORM_INFO, value); 579 return (value >> 40) & 0xFF; 580 } 581 582 static int core_get_max_pstate(void) 583 { 584 u64 value; 585 586 rdmsrl(MSR_PLATFORM_INFO, value); 587 return (value >> 8) & 0xFF; 588 } 589 590 static int core_get_turbo_pstate(void) 591 { 592 u64 value; 593 int nont, ret; 594 595 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value); 596 nont = core_get_max_pstate(); 597 ret = (value) & 255; 598 if (ret <= nont) 599 ret = nont; 600 return ret; 601 } 602 603 static inline int core_get_scaling(void) 604 { 605 return 100000; 606 } 607 608 static void core_set_pstate(struct cpudata *cpudata, int pstate) 609 { 610 u64 val; 611 612 val = pstate << 8; 613 if (limits.no_turbo && !limits.turbo_disabled) 614 val |= (u64)1 << 32; 615 616 wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val); 617 } 618 619 static int knl_get_turbo_pstate(void) 620 { 621 u64 value; 622 int nont, ret; 623 624 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value); 625 nont = core_get_max_pstate(); 626 ret = (((value) >> 8) & 0xFF); 627 if (ret <= nont) 628 ret = nont; 629 return ret; 630 } 631 632 static struct cpu_defaults core_params = { 633 .pid_policy = { 634 .sample_rate_ms = 10, 635 .deadband = 0, 636 .setpoint = 97, 637 .p_gain_pct = 20, 638 .d_gain_pct = 0, 639 .i_gain_pct = 0, 640 }, 641 .funcs = { 642 .get_max = core_get_max_pstate, 643 .get_min = core_get_min_pstate, 644 .get_turbo = core_get_turbo_pstate, 645 .get_scaling = core_get_scaling, 646 .set = core_set_pstate, 647 }, 648 }; 649 650 static struct cpu_defaults byt_params = { 651 .pid_policy = { 652 .sample_rate_ms = 10, 653 .deadband = 0, 654 .setpoint = 60, 655 .p_gain_pct = 14, 656 .d_gain_pct = 0, 657 .i_gain_pct = 4, 658 }, 659 .funcs = { 660 .get_max = byt_get_max_pstate, 661 .get_min = byt_get_min_pstate, 662 .get_turbo = byt_get_turbo_pstate, 663 .set = byt_set_pstate, 664 .get_scaling = byt_get_scaling, 665 .get_vid = byt_get_vid, 666 }, 667 }; 668 669 static struct cpu_defaults knl_params = { 670 .pid_policy = { 671 .sample_rate_ms = 10, 672 .deadband = 0, 673 .setpoint = 97, 674 .p_gain_pct = 20, 675 .d_gain_pct = 0, 676 .i_gain_pct = 0, 677 }, 678 .funcs = { 679 .get_max = core_get_max_pstate, 680 .get_min = core_get_min_pstate, 681 .get_turbo = knl_get_turbo_pstate, 682 .set = core_set_pstate, 683 }, 684 }; 685 686 static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) 687 { 688 int max_perf = cpu->pstate.turbo_pstate; 689 int max_perf_adj; 690 int min_perf; 691 692 if (limits.no_turbo || limits.turbo_disabled) 693 max_perf = cpu->pstate.max_pstate; 694 695 /* 696 * performance can be limited by user through sysfs, by cpufreq 697 * policy, or by cpu specific default values determined through 698 * experimentation. 699 */ 700 max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf)); 701 *max = clamp_t(int, max_perf_adj, 702 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate); 703 704 min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf)); 705 *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf); 706 } 707 708 static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) 709 { 710 int max_perf, min_perf; 711 712 update_turbo_state(); 713 714 intel_pstate_get_min_max(cpu, &min_perf, &max_perf); 715 716 pstate = clamp_t(int, pstate, min_perf, max_perf); 717 718 if (pstate == cpu->pstate.current_pstate) 719 return; 720 721 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); 722 723 cpu->pstate.current_pstate = pstate; 724 725 pstate_funcs.set(cpu, pstate); 726 } 727 728 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) 729 { 730 cpu->pstate.min_pstate = pstate_funcs.get_min(); 731 cpu->pstate.max_pstate = pstate_funcs.get_max(); 732 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); 733 cpu->pstate.scaling = pstate_funcs.get_scaling(); 734 735 if (pstate_funcs.get_vid) 736 pstate_funcs.get_vid(cpu); 737 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); 738 } 739 740 static inline void intel_pstate_calc_busy(struct cpudata *cpu) 741 { 742 struct sample *sample = &cpu->sample; 743 int64_t core_pct; 744 745 core_pct = int_tofp(sample->aperf) * int_tofp(100); 746 core_pct = div64_u64(core_pct, int_tofp(sample->mperf)); 747 748 sample->freq = fp_toint( 749 mul_fp(int_tofp( 750 cpu->pstate.max_pstate * cpu->pstate.scaling / 100), 751 core_pct)); 752 753 sample->core_pct_busy = (int32_t)core_pct; 754 } 755 756 static inline void intel_pstate_sample(struct cpudata *cpu) 757 { 758 u64 aperf, mperf; 759 unsigned long flags; 760 761 local_irq_save(flags); 762 rdmsrl(MSR_IA32_APERF, aperf); 763 rdmsrl(MSR_IA32_MPERF, mperf); 764 local_irq_restore(flags); 765 766 cpu->last_sample_time = cpu->sample.time; 767 cpu->sample.time = ktime_get(); 768 cpu->sample.aperf = aperf; 769 cpu->sample.mperf = mperf; 770 cpu->sample.aperf -= cpu->prev_aperf; 771 cpu->sample.mperf -= cpu->prev_mperf; 772 773 intel_pstate_calc_busy(cpu); 774 775 cpu->prev_aperf = aperf; 776 cpu->prev_mperf = mperf; 777 } 778 779 static inline void intel_hwp_set_sample_time(struct cpudata *cpu) 780 { 781 int delay; 782 783 delay = msecs_to_jiffies(50); 784 mod_timer_pinned(&cpu->timer, jiffies + delay); 785 } 786 787 static inline void intel_pstate_set_sample_time(struct cpudata *cpu) 788 { 789 int delay; 790 791 delay = msecs_to_jiffies(pid_params.sample_rate_ms); 792 mod_timer_pinned(&cpu->timer, jiffies + delay); 793 } 794 795 static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu) 796 { 797 int32_t core_busy, max_pstate, current_pstate, sample_ratio; 798 u32 duration_us; 799 u32 sample_time; 800 801 /* 802 * core_busy is the ratio of actual performance to max 803 * max_pstate is the max non turbo pstate available 804 * current_pstate was the pstate that was requested during 805 * the last sample period. 806 * 807 * We normalize core_busy, which was our actual percent 808 * performance to what we requested during the last sample 809 * period. The result will be a percentage of busy at a 810 * specified pstate. 811 */ 812 core_busy = cpu->sample.core_pct_busy; 813 max_pstate = int_tofp(cpu->pstate.max_pstate); 814 current_pstate = int_tofp(cpu->pstate.current_pstate); 815 core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate)); 816 817 /* 818 * Since we have a deferred timer, it will not fire unless 819 * we are in C0. So, determine if the actual elapsed time 820 * is significantly greater (3x) than our sample interval. If it 821 * is, then we were idle for a long enough period of time 822 * to adjust our busyness. 823 */ 824 sample_time = pid_params.sample_rate_ms * USEC_PER_MSEC; 825 duration_us = (u32) ktime_us_delta(cpu->sample.time, 826 cpu->last_sample_time); 827 if (duration_us > sample_time * 3) { 828 sample_ratio = div_fp(int_tofp(sample_time), 829 int_tofp(duration_us)); 830 core_busy = mul_fp(core_busy, sample_ratio); 831 } 832 833 return core_busy; 834 } 835 836 static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) 837 { 838 int32_t busy_scaled; 839 struct _pid *pid; 840 signed int ctl; 841 842 pid = &cpu->pid; 843 busy_scaled = intel_pstate_get_scaled_busy(cpu); 844 845 ctl = pid_calc(pid, busy_scaled); 846 847 /* Negative values of ctl increase the pstate and vice versa */ 848 intel_pstate_set_pstate(cpu, cpu->pstate.current_pstate - ctl); 849 } 850 851 static void intel_hwp_timer_func(unsigned long __data) 852 { 853 struct cpudata *cpu = (struct cpudata *) __data; 854 855 intel_pstate_sample(cpu); 856 intel_hwp_set_sample_time(cpu); 857 } 858 859 static void intel_pstate_timer_func(unsigned long __data) 860 { 861 struct cpudata *cpu = (struct cpudata *) __data; 862 struct sample *sample; 863 864 intel_pstate_sample(cpu); 865 866 sample = &cpu->sample; 867 868 intel_pstate_adjust_busy_pstate(cpu); 869 870 trace_pstate_sample(fp_toint(sample->core_pct_busy), 871 fp_toint(intel_pstate_get_scaled_busy(cpu)), 872 cpu->pstate.current_pstate, 873 sample->mperf, 874 sample->aperf, 875 sample->freq); 876 877 intel_pstate_set_sample_time(cpu); 878 } 879 880 #define ICPU(model, policy) \ 881 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\ 882 (unsigned long)&policy } 883 884 static const struct x86_cpu_id intel_pstate_cpu_ids[] = { 885 ICPU(0x2a, core_params), 886 ICPU(0x2d, core_params), 887 ICPU(0x37, byt_params), 888 ICPU(0x3a, core_params), 889 ICPU(0x3c, core_params), 890 ICPU(0x3d, core_params), 891 ICPU(0x3e, core_params), 892 ICPU(0x3f, core_params), 893 ICPU(0x45, core_params), 894 ICPU(0x46, core_params), 895 ICPU(0x47, core_params), 896 ICPU(0x4c, byt_params), 897 ICPU(0x4e, core_params), 898 ICPU(0x4f, core_params), 899 ICPU(0x56, core_params), 900 ICPU(0x57, knl_params), 901 {} 902 }; 903 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); 904 905 static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] = { 906 ICPU(0x56, core_params), 907 {} 908 }; 909 910 static int intel_pstate_init_cpu(unsigned int cpunum) 911 { 912 struct cpudata *cpu; 913 914 if (!all_cpu_data[cpunum]) 915 all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata), 916 GFP_KERNEL); 917 if (!all_cpu_data[cpunum]) 918 return -ENOMEM; 919 920 cpu = all_cpu_data[cpunum]; 921 922 cpu->cpu = cpunum; 923 intel_pstate_get_cpu_pstates(cpu); 924 925 init_timer_deferrable(&cpu->timer); 926 cpu->timer.data = (unsigned long)cpu; 927 cpu->timer.expires = jiffies + HZ/100; 928 929 if (!hwp_active) 930 cpu->timer.function = intel_pstate_timer_func; 931 else 932 cpu->timer.function = intel_hwp_timer_func; 933 934 intel_pstate_busy_pid_reset(cpu); 935 intel_pstate_sample(cpu); 936 937 add_timer_on(&cpu->timer, cpunum); 938 939 pr_debug("Intel pstate controlling: cpu %d\n", cpunum); 940 941 return 0; 942 } 943 944 static unsigned int intel_pstate_get(unsigned int cpu_num) 945 { 946 struct sample *sample; 947 struct cpudata *cpu; 948 949 cpu = all_cpu_data[cpu_num]; 950 if (!cpu) 951 return 0; 952 sample = &cpu->sample; 953 return sample->freq; 954 } 955 956 static int intel_pstate_set_policy(struct cpufreq_policy *policy) 957 { 958 if (!policy->cpuinfo.max_freq) 959 return -ENODEV; 960 961 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE && 962 policy->max >= policy->cpuinfo.max_freq) { 963 limits.min_policy_pct = 100; 964 limits.min_perf_pct = 100; 965 limits.min_perf = int_tofp(1); 966 limits.max_policy_pct = 100; 967 limits.max_perf_pct = 100; 968 limits.max_perf = int_tofp(1); 969 limits.no_turbo = 0; 970 return 0; 971 } 972 973 limits.min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq; 974 limits.min_policy_pct = clamp_t(int, limits.min_policy_pct, 0 , 100); 975 limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct); 976 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); 977 978 limits.max_policy_pct = (policy->max * 100) / policy->cpuinfo.max_freq; 979 limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100); 980 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); 981 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); 982 983 if (hwp_active) 984 intel_pstate_hwp_set(); 985 986 return 0; 987 } 988 989 static int intel_pstate_verify_policy(struct cpufreq_policy *policy) 990 { 991 cpufreq_verify_within_cpu_limits(policy); 992 993 if (policy->policy != CPUFREQ_POLICY_POWERSAVE && 994 policy->policy != CPUFREQ_POLICY_PERFORMANCE) 995 return -EINVAL; 996 997 return 0; 998 } 999 1000 static void intel_pstate_stop_cpu(struct cpufreq_policy *policy) 1001 { 1002 int cpu_num = policy->cpu; 1003 struct cpudata *cpu = all_cpu_data[cpu_num]; 1004 1005 pr_info("intel_pstate CPU %d exiting\n", cpu_num); 1006 1007 del_timer_sync(&all_cpu_data[cpu_num]->timer); 1008 if (hwp_active) 1009 return; 1010 1011 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); 1012 } 1013 1014 static int intel_pstate_cpu_init(struct cpufreq_policy *policy) 1015 { 1016 struct cpudata *cpu; 1017 int rc; 1018 1019 rc = intel_pstate_init_cpu(policy->cpu); 1020 if (rc) 1021 return rc; 1022 1023 cpu = all_cpu_data[policy->cpu]; 1024 1025 if (limits.min_perf_pct == 100 && limits.max_perf_pct == 100) 1026 policy->policy = CPUFREQ_POLICY_PERFORMANCE; 1027 else 1028 policy->policy = CPUFREQ_POLICY_POWERSAVE; 1029 1030 policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling; 1031 policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling; 1032 1033 /* cpuinfo and default policy values */ 1034 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; 1035 policy->cpuinfo.max_freq = 1036 cpu->pstate.turbo_pstate * cpu->pstate.scaling; 1037 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; 1038 cpumask_set_cpu(policy->cpu, policy->cpus); 1039 1040 return 0; 1041 } 1042 1043 static struct cpufreq_driver intel_pstate_driver = { 1044 .flags = CPUFREQ_CONST_LOOPS, 1045 .verify = intel_pstate_verify_policy, 1046 .setpolicy = intel_pstate_set_policy, 1047 .get = intel_pstate_get, 1048 .init = intel_pstate_cpu_init, 1049 .stop_cpu = intel_pstate_stop_cpu, 1050 .name = "intel_pstate", 1051 }; 1052 1053 static int __initdata no_load; 1054 static int __initdata no_hwp; 1055 static int __initdata hwp_only; 1056 static unsigned int force_load; 1057 1058 static int intel_pstate_msrs_not_valid(void) 1059 { 1060 if (!pstate_funcs.get_max() || 1061 !pstate_funcs.get_min() || 1062 !pstate_funcs.get_turbo()) 1063 return -ENODEV; 1064 1065 return 0; 1066 } 1067 1068 static void copy_pid_params(struct pstate_adjust_policy *policy) 1069 { 1070 pid_params.sample_rate_ms = policy->sample_rate_ms; 1071 pid_params.p_gain_pct = policy->p_gain_pct; 1072 pid_params.i_gain_pct = policy->i_gain_pct; 1073 pid_params.d_gain_pct = policy->d_gain_pct; 1074 pid_params.deadband = policy->deadband; 1075 pid_params.setpoint = policy->setpoint; 1076 } 1077 1078 static void copy_cpu_funcs(struct pstate_funcs *funcs) 1079 { 1080 pstate_funcs.get_max = funcs->get_max; 1081 pstate_funcs.get_min = funcs->get_min; 1082 pstate_funcs.get_turbo = funcs->get_turbo; 1083 pstate_funcs.get_scaling = funcs->get_scaling; 1084 pstate_funcs.set = funcs->set; 1085 pstate_funcs.get_vid = funcs->get_vid; 1086 } 1087 1088 #if IS_ENABLED(CONFIG_ACPI) 1089 #include <acpi/processor.h> 1090 1091 static bool intel_pstate_no_acpi_pss(void) 1092 { 1093 int i; 1094 1095 for_each_possible_cpu(i) { 1096 acpi_status status; 1097 union acpi_object *pss; 1098 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 1099 struct acpi_processor *pr = per_cpu(processors, i); 1100 1101 if (!pr) 1102 continue; 1103 1104 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); 1105 if (ACPI_FAILURE(status)) 1106 continue; 1107 1108 pss = buffer.pointer; 1109 if (pss && pss->type == ACPI_TYPE_PACKAGE) { 1110 kfree(pss); 1111 return false; 1112 } 1113 1114 kfree(pss); 1115 } 1116 1117 return true; 1118 } 1119 1120 static bool intel_pstate_has_acpi_ppc(void) 1121 { 1122 int i; 1123 1124 for_each_possible_cpu(i) { 1125 struct acpi_processor *pr = per_cpu(processors, i); 1126 1127 if (!pr) 1128 continue; 1129 if (acpi_has_method(pr->handle, "_PPC")) 1130 return true; 1131 } 1132 return false; 1133 } 1134 1135 enum { 1136 PSS, 1137 PPC, 1138 }; 1139 1140 struct hw_vendor_info { 1141 u16 valid; 1142 char oem_id[ACPI_OEM_ID_SIZE]; 1143 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE]; 1144 int oem_pwr_table; 1145 }; 1146 1147 /* Hardware vendor-specific info that has its own power management modes */ 1148 static struct hw_vendor_info vendor_info[] = { 1149 {1, "HP ", "ProLiant", PSS}, 1150 {1, "ORACLE", "X4-2 ", PPC}, 1151 {1, "ORACLE", "X4-2L ", PPC}, 1152 {1, "ORACLE", "X4-2B ", PPC}, 1153 {1, "ORACLE", "X3-2 ", PPC}, 1154 {1, "ORACLE", "X3-2L ", PPC}, 1155 {1, "ORACLE", "X3-2B ", PPC}, 1156 {1, "ORACLE", "X4470M2 ", PPC}, 1157 {1, "ORACLE", "X4270M3 ", PPC}, 1158 {1, "ORACLE", "X4270M2 ", PPC}, 1159 {1, "ORACLE", "X4170M2 ", PPC}, 1160 {0, "", ""}, 1161 }; 1162 1163 static bool intel_pstate_platform_pwr_mgmt_exists(void) 1164 { 1165 struct acpi_table_header hdr; 1166 struct hw_vendor_info *v_info; 1167 const struct x86_cpu_id *id; 1168 u64 misc_pwr; 1169 1170 id = x86_match_cpu(intel_pstate_cpu_oob_ids); 1171 if (id) { 1172 rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr); 1173 if ( misc_pwr & (1 << 8)) 1174 return true; 1175 } 1176 1177 if (acpi_disabled || 1178 ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr))) 1179 return false; 1180 1181 for (v_info = vendor_info; v_info->valid; v_info++) { 1182 if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) && 1183 !strncmp(hdr.oem_table_id, v_info->oem_table_id, 1184 ACPI_OEM_TABLE_ID_SIZE)) 1185 switch (v_info->oem_pwr_table) { 1186 case PSS: 1187 return intel_pstate_no_acpi_pss(); 1188 case PPC: 1189 return intel_pstate_has_acpi_ppc() && 1190 (!force_load); 1191 } 1192 } 1193 1194 return false; 1195 } 1196 #else /* CONFIG_ACPI not enabled */ 1197 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; } 1198 static inline bool intel_pstate_has_acpi_ppc(void) { return false; } 1199 #endif /* CONFIG_ACPI */ 1200 1201 static int __init intel_pstate_init(void) 1202 { 1203 int cpu, rc = 0; 1204 const struct x86_cpu_id *id; 1205 struct cpu_defaults *cpu_def; 1206 1207 if (no_load) 1208 return -ENODEV; 1209 1210 id = x86_match_cpu(intel_pstate_cpu_ids); 1211 if (!id) 1212 return -ENODEV; 1213 1214 /* 1215 * The Intel pstate driver will be ignored if the platform 1216 * firmware has its own power management modes. 1217 */ 1218 if (intel_pstate_platform_pwr_mgmt_exists()) 1219 return -ENODEV; 1220 1221 cpu_def = (struct cpu_defaults *)id->driver_data; 1222 1223 copy_pid_params(&cpu_def->pid_policy); 1224 copy_cpu_funcs(&cpu_def->funcs); 1225 1226 if (intel_pstate_msrs_not_valid()) 1227 return -ENODEV; 1228 1229 pr_info("Intel P-state driver initializing.\n"); 1230 1231 all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus()); 1232 if (!all_cpu_data) 1233 return -ENOMEM; 1234 1235 if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp) 1236 intel_pstate_hwp_enable(); 1237 1238 if (!hwp_active && hwp_only) 1239 goto out; 1240 1241 rc = cpufreq_register_driver(&intel_pstate_driver); 1242 if (rc) 1243 goto out; 1244 1245 intel_pstate_debug_expose_params(); 1246 intel_pstate_sysfs_expose_params(); 1247 1248 return rc; 1249 out: 1250 get_online_cpus(); 1251 for_each_online_cpu(cpu) { 1252 if (all_cpu_data[cpu]) { 1253 del_timer_sync(&all_cpu_data[cpu]->timer); 1254 kfree(all_cpu_data[cpu]); 1255 } 1256 } 1257 1258 put_online_cpus(); 1259 vfree(all_cpu_data); 1260 return -ENODEV; 1261 } 1262 device_initcall(intel_pstate_init); 1263 1264 static int __init intel_pstate_setup(char *str) 1265 { 1266 if (!str) 1267 return -EINVAL; 1268 1269 if (!strcmp(str, "disable")) 1270 no_load = 1; 1271 if (!strcmp(str, "no_hwp")) 1272 no_hwp = 1; 1273 if (!strcmp(str, "force")) 1274 force_load = 1; 1275 if (!strcmp(str, "hwp_only")) 1276 hwp_only = 1; 1277 return 0; 1278 } 1279 early_param("intel_pstate", intel_pstate_setup); 1280 1281 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>"); 1282 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors"); 1283 MODULE_LICENSE("GPL"); 1284