1 /* 2 * processor_throttling.c - Throttling submodule of the ACPI processor driver 3 * 4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 6 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de> 7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 8 * - Added processor hotplug support 9 * 10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2 of the License, or (at 15 * your option) any later version. 16 * 17 * This program is distributed in the hope that it will be useful, but 18 * WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 20 * General Public License for more details. 21 * 22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 23 */ 24 25 #include <linux/kernel.h> 26 #include <linux/module.h> 27 #include <linux/slab.h> 28 #include <linux/init.h> 29 #include <linux/sched.h> 30 #include <linux/cpufreq.h> 31 #include <linux/acpi.h> 32 #include <acpi/processor.h> 33 #include <asm/io.h> 34 #include <linux/uaccess.h> 35 36 #define PREFIX "ACPI: " 37 38 #define ACPI_PROCESSOR_CLASS "processor" 39 #define _COMPONENT ACPI_PROCESSOR_COMPONENT 40 ACPI_MODULE_NAME("processor_throttling"); 41 42 /* ignore_tpc: 43 * 0 -> acpi processor driver doesn't ignore _TPC values 44 * 1 -> acpi processor driver ignores _TPC values 45 */ 46 static int ignore_tpc; 47 module_param(ignore_tpc, int, 0644); 48 MODULE_PARM_DESC(ignore_tpc, "Disable broken BIOS _TPC throttling support"); 49 50 struct throttling_tstate { 51 unsigned int cpu; /* cpu nr */ 52 int target_state; /* target T-state */ 53 }; 54 55 struct acpi_processor_throttling_arg { 56 struct acpi_processor *pr; 57 int target_state; 58 bool force; 59 }; 60 61 #define THROTTLING_PRECHANGE (1) 62 #define THROTTLING_POSTCHANGE (2) 63 64 static int acpi_processor_get_throttling(struct acpi_processor *pr); 65 static int __acpi_processor_set_throttling(struct acpi_processor *pr, 66 int state, bool force, bool direct); 67 68 static int acpi_processor_update_tsd_coord(void) 69 { 70 int count, count_target; 71 int retval = 0; 72 unsigned int i, j; 73 cpumask_var_t covered_cpus; 74 struct acpi_processor *pr, *match_pr; 75 struct acpi_tsd_package *pdomain, *match_pdomain; 76 struct acpi_processor_throttling *pthrottling, *match_pthrottling; 77 78 if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL)) 79 return -ENOMEM; 80 81 /* 82 * Now that we have _TSD data from all CPUs, lets setup T-state 83 * coordination between all CPUs. 84 */ 85 for_each_possible_cpu(i) { 86 pr = per_cpu(processors, i); 87 if (!pr) 88 continue; 89 90 /* Basic validity check for domain info */ 91 pthrottling = &(pr->throttling); 92 93 /* 94 * If tsd package for one cpu is invalid, the coordination 95 * among all CPUs is thought as invalid. 96 * Maybe it is ugly. 97 */ 98 if (!pthrottling->tsd_valid_flag) { 99 retval = -EINVAL; 100 break; 101 } 102 } 103 if (retval) 104 goto err_ret; 105 106 for_each_possible_cpu(i) { 107 pr = per_cpu(processors, i); 108 if (!pr) 109 continue; 110 111 if (cpumask_test_cpu(i, covered_cpus)) 112 continue; 113 pthrottling = &pr->throttling; 114 115 pdomain = &(pthrottling->domain_info); 116 cpumask_set_cpu(i, pthrottling->shared_cpu_map); 117 cpumask_set_cpu(i, covered_cpus); 118 /* 119 * If the number of processor in the TSD domain is 1, it is 120 * unnecessary to parse the coordination for this CPU. 121 */ 122 if (pdomain->num_processors <= 1) 123 continue; 124 125 /* Validate the Domain info */ 126 count_target = pdomain->num_processors; 127 count = 1; 128 129 for_each_possible_cpu(j) { 130 if (i == j) 131 continue; 132 133 match_pr = per_cpu(processors, j); 134 if (!match_pr) 135 continue; 136 137 match_pthrottling = &(match_pr->throttling); 138 match_pdomain = &(match_pthrottling->domain_info); 139 if (match_pdomain->domain != pdomain->domain) 140 continue; 141 142 /* Here i and j are in the same domain. 143 * If two TSD packages have the same domain, they 144 * should have the same num_porcessors and 145 * coordination type. Otherwise it will be regarded 146 * as illegal. 147 */ 148 if (match_pdomain->num_processors != count_target) { 149 retval = -EINVAL; 150 goto err_ret; 151 } 152 153 if (pdomain->coord_type != match_pdomain->coord_type) { 154 retval = -EINVAL; 155 goto err_ret; 156 } 157 158 cpumask_set_cpu(j, covered_cpus); 159 cpumask_set_cpu(j, pthrottling->shared_cpu_map); 160 count++; 161 } 162 for_each_possible_cpu(j) { 163 if (i == j) 164 continue; 165 166 match_pr = per_cpu(processors, j); 167 if (!match_pr) 168 continue; 169 170 match_pthrottling = &(match_pr->throttling); 171 match_pdomain = &(match_pthrottling->domain_info); 172 if (match_pdomain->domain != pdomain->domain) 173 continue; 174 175 /* 176 * If some CPUS have the same domain, they 177 * will have the same shared_cpu_map. 178 */ 179 cpumask_copy(match_pthrottling->shared_cpu_map, 180 pthrottling->shared_cpu_map); 181 } 182 } 183 184 err_ret: 185 free_cpumask_var(covered_cpus); 186 187 for_each_possible_cpu(i) { 188 pr = per_cpu(processors, i); 189 if (!pr) 190 continue; 191 192 /* 193 * Assume no coordination on any error parsing domain info. 194 * The coordination type will be forced as SW_ALL. 195 */ 196 if (retval) { 197 pthrottling = &(pr->throttling); 198 cpumask_clear(pthrottling->shared_cpu_map); 199 cpumask_set_cpu(i, pthrottling->shared_cpu_map); 200 pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL; 201 } 202 } 203 204 return retval; 205 } 206 207 /* 208 * Update the T-state coordination after the _TSD 209 * data for all cpus is obtained. 210 */ 211 void acpi_processor_throttling_init(void) 212 { 213 if (acpi_processor_update_tsd_coord()) { 214 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 215 "Assume no T-state coordination\n")); 216 } 217 218 return; 219 } 220 221 static int acpi_processor_throttling_notifier(unsigned long event, void *data) 222 { 223 struct throttling_tstate *p_tstate = data; 224 struct acpi_processor *pr; 225 unsigned int cpu ; 226 int target_state; 227 struct acpi_processor_limit *p_limit; 228 struct acpi_processor_throttling *p_throttling; 229 230 cpu = p_tstate->cpu; 231 pr = per_cpu(processors, cpu); 232 if (!pr) { 233 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Invalid pr pointer\n")); 234 return 0; 235 } 236 if (!pr->flags.throttling) { 237 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Throttling control is " 238 "unsupported on CPU %d\n", cpu)); 239 return 0; 240 } 241 target_state = p_tstate->target_state; 242 p_throttling = &(pr->throttling); 243 switch (event) { 244 case THROTTLING_PRECHANGE: 245 /* 246 * Prechange event is used to choose one proper t-state, 247 * which meets the limits of thermal, user and _TPC. 248 */ 249 p_limit = &pr->limit; 250 if (p_limit->thermal.tx > target_state) 251 target_state = p_limit->thermal.tx; 252 if (p_limit->user.tx > target_state) 253 target_state = p_limit->user.tx; 254 if (pr->throttling_platform_limit > target_state) 255 target_state = pr->throttling_platform_limit; 256 if (target_state >= p_throttling->state_count) { 257 printk(KERN_WARNING 258 "Exceed the limit of T-state \n"); 259 target_state = p_throttling->state_count - 1; 260 } 261 p_tstate->target_state = target_state; 262 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PreChange Event:" 263 "target T-state of CPU %d is T%d\n", 264 cpu, target_state)); 265 break; 266 case THROTTLING_POSTCHANGE: 267 /* 268 * Postchange event is only used to update the 269 * T-state flag of acpi_processor_throttling. 270 */ 271 p_throttling->state = target_state; 272 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PostChange Event:" 273 "CPU %d is switched to T%d\n", 274 cpu, target_state)); 275 break; 276 default: 277 printk(KERN_WARNING 278 "Unsupported Throttling notifier event\n"); 279 break; 280 } 281 282 return 0; 283 } 284 285 /* 286 * _TPC - Throttling Present Capabilities 287 */ 288 static int acpi_processor_get_platform_limit(struct acpi_processor *pr) 289 { 290 acpi_status status = 0; 291 unsigned long long tpc = 0; 292 293 if (!pr) 294 return -EINVAL; 295 296 if (ignore_tpc) 297 goto end; 298 299 status = acpi_evaluate_integer(pr->handle, "_TPC", NULL, &tpc); 300 if (ACPI_FAILURE(status)) { 301 if (status != AE_NOT_FOUND) { 302 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TPC")); 303 } 304 return -ENODEV; 305 } 306 307 end: 308 pr->throttling_platform_limit = (int)tpc; 309 return 0; 310 } 311 312 int acpi_processor_tstate_has_changed(struct acpi_processor *pr) 313 { 314 int result = 0; 315 int throttling_limit; 316 int current_state; 317 struct acpi_processor_limit *limit; 318 int target_state; 319 320 if (ignore_tpc) 321 return 0; 322 323 result = acpi_processor_get_platform_limit(pr); 324 if (result) { 325 /* Throttling Limit is unsupported */ 326 return result; 327 } 328 329 throttling_limit = pr->throttling_platform_limit; 330 if (throttling_limit >= pr->throttling.state_count) { 331 /* Uncorrect Throttling Limit */ 332 return -EINVAL; 333 } 334 335 current_state = pr->throttling.state; 336 if (current_state > throttling_limit) { 337 /* 338 * The current state can meet the requirement of 339 * _TPC limit. But it is reasonable that OSPM changes 340 * t-states from high to low for better performance. 341 * Of course the limit condition of thermal 342 * and user should be considered. 343 */ 344 limit = &pr->limit; 345 target_state = throttling_limit; 346 if (limit->thermal.tx > target_state) 347 target_state = limit->thermal.tx; 348 if (limit->user.tx > target_state) 349 target_state = limit->user.tx; 350 } else if (current_state == throttling_limit) { 351 /* 352 * Unnecessary to change the throttling state 353 */ 354 return 0; 355 } else { 356 /* 357 * If the current state is lower than the limit of _TPC, it 358 * will be forced to switch to the throttling state defined 359 * by throttling_platfor_limit. 360 * Because the previous state meets with the limit condition 361 * of thermal and user, it is unnecessary to check it again. 362 */ 363 target_state = throttling_limit; 364 } 365 return acpi_processor_set_throttling(pr, target_state, false); 366 } 367 368 /* 369 * This function is used to reevaluate whether the T-state is valid 370 * after one CPU is onlined/offlined. 371 * It is noted that it won't reevaluate the following properties for 372 * the T-state. 373 * 1. Control method. 374 * 2. the number of supported T-state 375 * 3. TSD domain 376 */ 377 void acpi_processor_reevaluate_tstate(struct acpi_processor *pr, 378 bool is_dead) 379 { 380 int result = 0; 381 382 if (is_dead) { 383 /* When one CPU is offline, the T-state throttling 384 * will be invalidated. 385 */ 386 pr->flags.throttling = 0; 387 return; 388 } 389 /* the following is to recheck whether the T-state is valid for 390 * the online CPU 391 */ 392 if (!pr->throttling.state_count) { 393 /* If the number of T-state is invalid, it is 394 * invalidated. 395 */ 396 pr->flags.throttling = 0; 397 return; 398 } 399 pr->flags.throttling = 1; 400 401 /* Disable throttling (if enabled). We'll let subsequent 402 * policy (e.g.thermal) decide to lower performance if it 403 * so chooses, but for now we'll crank up the speed. 404 */ 405 406 result = acpi_processor_get_throttling(pr); 407 if (result) 408 goto end; 409 410 if (pr->throttling.state) { 411 result = acpi_processor_set_throttling(pr, 0, false); 412 if (result) 413 goto end; 414 } 415 416 end: 417 if (result) 418 pr->flags.throttling = 0; 419 } 420 /* 421 * _PTC - Processor Throttling Control (and status) register location 422 */ 423 static int acpi_processor_get_throttling_control(struct acpi_processor *pr) 424 { 425 int result = 0; 426 acpi_status status = 0; 427 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 428 union acpi_object *ptc = NULL; 429 union acpi_object obj = { 0 }; 430 struct acpi_processor_throttling *throttling; 431 432 status = acpi_evaluate_object(pr->handle, "_PTC", NULL, &buffer); 433 if (ACPI_FAILURE(status)) { 434 if (status != AE_NOT_FOUND) { 435 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PTC")); 436 } 437 return -ENODEV; 438 } 439 440 ptc = (union acpi_object *)buffer.pointer; 441 if (!ptc || (ptc->type != ACPI_TYPE_PACKAGE) 442 || (ptc->package.count != 2)) { 443 printk(KERN_ERR PREFIX "Invalid _PTC data\n"); 444 result = -EFAULT; 445 goto end; 446 } 447 448 /* 449 * control_register 450 */ 451 452 obj = ptc->package.elements[0]; 453 454 if ((obj.type != ACPI_TYPE_BUFFER) 455 || (obj.buffer.length < sizeof(struct acpi_ptc_register)) 456 || (obj.buffer.pointer == NULL)) { 457 printk(KERN_ERR PREFIX 458 "Invalid _PTC data (control_register)\n"); 459 result = -EFAULT; 460 goto end; 461 } 462 memcpy(&pr->throttling.control_register, obj.buffer.pointer, 463 sizeof(struct acpi_ptc_register)); 464 465 /* 466 * status_register 467 */ 468 469 obj = ptc->package.elements[1]; 470 471 if ((obj.type != ACPI_TYPE_BUFFER) 472 || (obj.buffer.length < sizeof(struct acpi_ptc_register)) 473 || (obj.buffer.pointer == NULL)) { 474 printk(KERN_ERR PREFIX "Invalid _PTC data (status_register)\n"); 475 result = -EFAULT; 476 goto end; 477 } 478 479 memcpy(&pr->throttling.status_register, obj.buffer.pointer, 480 sizeof(struct acpi_ptc_register)); 481 482 throttling = &pr->throttling; 483 484 if ((throttling->control_register.bit_width + 485 throttling->control_register.bit_offset) > 32) { 486 printk(KERN_ERR PREFIX "Invalid _PTC control register\n"); 487 result = -EFAULT; 488 goto end; 489 } 490 491 if ((throttling->status_register.bit_width + 492 throttling->status_register.bit_offset) > 32) { 493 printk(KERN_ERR PREFIX "Invalid _PTC status register\n"); 494 result = -EFAULT; 495 goto end; 496 } 497 498 end: 499 kfree(buffer.pointer); 500 501 return result; 502 } 503 504 /* 505 * _TSS - Throttling Supported States 506 */ 507 static int acpi_processor_get_throttling_states(struct acpi_processor *pr) 508 { 509 int result = 0; 510 acpi_status status = AE_OK; 511 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 512 struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" }; 513 struct acpi_buffer state = { 0, NULL }; 514 union acpi_object *tss = NULL; 515 int i; 516 517 status = acpi_evaluate_object(pr->handle, "_TSS", NULL, &buffer); 518 if (ACPI_FAILURE(status)) { 519 if (status != AE_NOT_FOUND) { 520 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSS")); 521 } 522 return -ENODEV; 523 } 524 525 tss = buffer.pointer; 526 if (!tss || (tss->type != ACPI_TYPE_PACKAGE)) { 527 printk(KERN_ERR PREFIX "Invalid _TSS data\n"); 528 result = -EFAULT; 529 goto end; 530 } 531 532 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n", 533 tss->package.count)); 534 535 pr->throttling.state_count = tss->package.count; 536 pr->throttling.states_tss = 537 kmalloc(sizeof(struct acpi_processor_tx_tss) * tss->package.count, 538 GFP_KERNEL); 539 if (!pr->throttling.states_tss) { 540 result = -ENOMEM; 541 goto end; 542 } 543 544 for (i = 0; i < pr->throttling.state_count; i++) { 545 546 struct acpi_processor_tx_tss *tx = 547 (struct acpi_processor_tx_tss *)&(pr->throttling. 548 states_tss[i]); 549 550 state.length = sizeof(struct acpi_processor_tx_tss); 551 state.pointer = tx; 552 553 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i)); 554 555 status = acpi_extract_package(&(tss->package.elements[i]), 556 &format, &state); 557 if (ACPI_FAILURE(status)) { 558 ACPI_EXCEPTION((AE_INFO, status, "Invalid _TSS data")); 559 result = -EFAULT; 560 kfree(pr->throttling.states_tss); 561 goto end; 562 } 563 564 if (!tx->freqpercentage) { 565 printk(KERN_ERR PREFIX 566 "Invalid _TSS data: freq is zero\n"); 567 result = -EFAULT; 568 kfree(pr->throttling.states_tss); 569 goto end; 570 } 571 } 572 573 end: 574 kfree(buffer.pointer); 575 576 return result; 577 } 578 579 /* 580 * _TSD - T-State Dependencies 581 */ 582 static int acpi_processor_get_tsd(struct acpi_processor *pr) 583 { 584 int result = 0; 585 acpi_status status = AE_OK; 586 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 587 struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" }; 588 struct acpi_buffer state = { 0, NULL }; 589 union acpi_object *tsd = NULL; 590 struct acpi_tsd_package *pdomain; 591 struct acpi_processor_throttling *pthrottling; 592 593 pthrottling = &pr->throttling; 594 pthrottling->tsd_valid_flag = 0; 595 596 status = acpi_evaluate_object(pr->handle, "_TSD", NULL, &buffer); 597 if (ACPI_FAILURE(status)) { 598 if (status != AE_NOT_FOUND) { 599 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSD")); 600 } 601 return -ENODEV; 602 } 603 604 tsd = buffer.pointer; 605 if (!tsd || (tsd->type != ACPI_TYPE_PACKAGE)) { 606 printk(KERN_ERR PREFIX "Invalid _TSD data\n"); 607 result = -EFAULT; 608 goto end; 609 } 610 611 if (tsd->package.count != 1) { 612 printk(KERN_ERR PREFIX "Invalid _TSD data\n"); 613 result = -EFAULT; 614 goto end; 615 } 616 617 pdomain = &(pr->throttling.domain_info); 618 619 state.length = sizeof(struct acpi_tsd_package); 620 state.pointer = pdomain; 621 622 status = acpi_extract_package(&(tsd->package.elements[0]), 623 &format, &state); 624 if (ACPI_FAILURE(status)) { 625 printk(KERN_ERR PREFIX "Invalid _TSD data\n"); 626 result = -EFAULT; 627 goto end; 628 } 629 630 if (pdomain->num_entries != ACPI_TSD_REV0_ENTRIES) { 631 printk(KERN_ERR PREFIX "Unknown _TSD:num_entries\n"); 632 result = -EFAULT; 633 goto end; 634 } 635 636 if (pdomain->revision != ACPI_TSD_REV0_REVISION) { 637 printk(KERN_ERR PREFIX "Unknown _TSD:revision\n"); 638 result = -EFAULT; 639 goto end; 640 } 641 642 pthrottling = &pr->throttling; 643 pthrottling->tsd_valid_flag = 1; 644 pthrottling->shared_type = pdomain->coord_type; 645 cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map); 646 /* 647 * If the coordination type is not defined in ACPI spec, 648 * the tsd_valid_flag will be clear and coordination type 649 * will be forecd as DOMAIN_COORD_TYPE_SW_ALL. 650 */ 651 if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL && 652 pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY && 653 pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) { 654 pthrottling->tsd_valid_flag = 0; 655 pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL; 656 } 657 658 end: 659 kfree(buffer.pointer); 660 return result; 661 } 662 663 /* -------------------------------------------------------------------------- 664 Throttling Control 665 -------------------------------------------------------------------------- */ 666 static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr) 667 { 668 int state = 0; 669 u32 value = 0; 670 u32 duty_mask = 0; 671 u32 duty_value = 0; 672 673 if (!pr) 674 return -EINVAL; 675 676 if (!pr->flags.throttling) 677 return -ENODEV; 678 679 /* 680 * We don't care about error returns - we just try to mark 681 * these reserved so that nobody else is confused into thinking 682 * that this region might be unused.. 683 * 684 * (In particular, allocating the IO range for Cardbus) 685 */ 686 request_region(pr->throttling.address, 6, "ACPI CPU throttle"); 687 688 pr->throttling.state = 0; 689 690 duty_mask = pr->throttling.state_count - 1; 691 692 duty_mask <<= pr->throttling.duty_offset; 693 694 local_irq_disable(); 695 696 value = inl(pr->throttling.address); 697 698 /* 699 * Compute the current throttling state when throttling is enabled 700 * (bit 4 is on). 701 */ 702 if (value & 0x10) { 703 duty_value = value & duty_mask; 704 duty_value >>= pr->throttling.duty_offset; 705 706 if (duty_value) 707 state = pr->throttling.state_count - duty_value; 708 } 709 710 pr->throttling.state = state; 711 712 local_irq_enable(); 713 714 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 715 "Throttling state is T%d (%d%% throttling applied)\n", 716 state, pr->throttling.states[state].performance)); 717 718 return 0; 719 } 720 721 #ifdef CONFIG_X86 722 static int acpi_throttling_rdmsr(u64 *value) 723 { 724 u64 msr_high, msr_low; 725 u64 msr = 0; 726 int ret = -1; 727 728 if ((this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_INTEL) || 729 !this_cpu_has(X86_FEATURE_ACPI)) { 730 printk(KERN_ERR PREFIX 731 "HARDWARE addr space,NOT supported yet\n"); 732 } else { 733 msr_low = 0; 734 msr_high = 0; 735 rdmsr_safe(MSR_IA32_THERM_CONTROL, 736 (u32 *)&msr_low , (u32 *) &msr_high); 737 msr = (msr_high << 32) | msr_low; 738 *value = (u64) msr; 739 ret = 0; 740 } 741 return ret; 742 } 743 744 static int acpi_throttling_wrmsr(u64 value) 745 { 746 int ret = -1; 747 u64 msr; 748 749 if ((this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_INTEL) || 750 !this_cpu_has(X86_FEATURE_ACPI)) { 751 printk(KERN_ERR PREFIX 752 "HARDWARE addr space,NOT supported yet\n"); 753 } else { 754 msr = value; 755 wrmsr_safe(MSR_IA32_THERM_CONTROL, 756 msr & 0xffffffff, msr >> 32); 757 ret = 0; 758 } 759 return ret; 760 } 761 #else 762 static int acpi_throttling_rdmsr(u64 *value) 763 { 764 printk(KERN_ERR PREFIX 765 "HARDWARE addr space,NOT supported yet\n"); 766 return -1; 767 } 768 769 static int acpi_throttling_wrmsr(u64 value) 770 { 771 printk(KERN_ERR PREFIX 772 "HARDWARE addr space,NOT supported yet\n"); 773 return -1; 774 } 775 #endif 776 777 static int acpi_read_throttling_status(struct acpi_processor *pr, 778 u64 *value) 779 { 780 u32 bit_width, bit_offset; 781 u32 ptc_value; 782 u64 ptc_mask; 783 struct acpi_processor_throttling *throttling; 784 int ret = -1; 785 786 throttling = &pr->throttling; 787 switch (throttling->status_register.space_id) { 788 case ACPI_ADR_SPACE_SYSTEM_IO: 789 bit_width = throttling->status_register.bit_width; 790 bit_offset = throttling->status_register.bit_offset; 791 792 acpi_os_read_port((acpi_io_address) throttling->status_register. 793 address, &ptc_value, 794 (u32) (bit_width + bit_offset)); 795 ptc_mask = (1 << bit_width) - 1; 796 *value = (u64) ((ptc_value >> bit_offset) & ptc_mask); 797 ret = 0; 798 break; 799 case ACPI_ADR_SPACE_FIXED_HARDWARE: 800 ret = acpi_throttling_rdmsr(value); 801 break; 802 default: 803 printk(KERN_ERR PREFIX "Unknown addr space %d\n", 804 (u32) (throttling->status_register.space_id)); 805 } 806 return ret; 807 } 808 809 static int acpi_write_throttling_state(struct acpi_processor *pr, 810 u64 value) 811 { 812 u32 bit_width, bit_offset; 813 u64 ptc_value; 814 u64 ptc_mask; 815 struct acpi_processor_throttling *throttling; 816 int ret = -1; 817 818 throttling = &pr->throttling; 819 switch (throttling->control_register.space_id) { 820 case ACPI_ADR_SPACE_SYSTEM_IO: 821 bit_width = throttling->control_register.bit_width; 822 bit_offset = throttling->control_register.bit_offset; 823 ptc_mask = (1 << bit_width) - 1; 824 ptc_value = value & ptc_mask; 825 826 acpi_os_write_port((acpi_io_address) throttling-> 827 control_register.address, 828 (u32) (ptc_value << bit_offset), 829 (u32) (bit_width + bit_offset)); 830 ret = 0; 831 break; 832 case ACPI_ADR_SPACE_FIXED_HARDWARE: 833 ret = acpi_throttling_wrmsr(value); 834 break; 835 default: 836 printk(KERN_ERR PREFIX "Unknown addr space %d\n", 837 (u32) (throttling->control_register.space_id)); 838 } 839 return ret; 840 } 841 842 static int acpi_get_throttling_state(struct acpi_processor *pr, 843 u64 value) 844 { 845 int i; 846 847 for (i = 0; i < pr->throttling.state_count; i++) { 848 struct acpi_processor_tx_tss *tx = 849 (struct acpi_processor_tx_tss *)&(pr->throttling. 850 states_tss[i]); 851 if (tx->control == value) 852 return i; 853 } 854 return -1; 855 } 856 857 static int acpi_get_throttling_value(struct acpi_processor *pr, 858 int state, u64 *value) 859 { 860 int ret = -1; 861 862 if (state >= 0 && state <= pr->throttling.state_count) { 863 struct acpi_processor_tx_tss *tx = 864 (struct acpi_processor_tx_tss *)&(pr->throttling. 865 states_tss[state]); 866 *value = tx->control; 867 ret = 0; 868 } 869 return ret; 870 } 871 872 static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr) 873 { 874 int state = 0; 875 int ret; 876 u64 value; 877 878 if (!pr) 879 return -EINVAL; 880 881 if (!pr->flags.throttling) 882 return -ENODEV; 883 884 pr->throttling.state = 0; 885 886 value = 0; 887 ret = acpi_read_throttling_status(pr, &value); 888 if (ret >= 0) { 889 state = acpi_get_throttling_state(pr, value); 890 if (state == -1) { 891 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 892 "Invalid throttling state, reset\n")); 893 state = 0; 894 ret = __acpi_processor_set_throttling(pr, state, true, 895 true); 896 if (ret) 897 return ret; 898 } 899 pr->throttling.state = state; 900 } 901 902 return 0; 903 } 904 905 static long __acpi_processor_get_throttling(void *data) 906 { 907 struct acpi_processor *pr = data; 908 909 return pr->throttling.acpi_processor_get_throttling(pr); 910 } 911 912 static int acpi_processor_get_throttling(struct acpi_processor *pr) 913 { 914 if (!pr) 915 return -EINVAL; 916 917 if (!pr->flags.throttling) 918 return -ENODEV; 919 920 /* 921 * This is either called from the CPU hotplug callback of 922 * processor_driver or via the ACPI probe function. In the latter 923 * case the CPU is not guaranteed to be online. Both call sites are 924 * protected against CPU hotplug. 925 */ 926 if (!cpu_online(pr->id)) 927 return -ENODEV; 928 929 return work_on_cpu(pr->id, __acpi_processor_get_throttling, pr); 930 } 931 932 static int acpi_processor_get_fadt_info(struct acpi_processor *pr) 933 { 934 int i, step; 935 936 if (!pr->throttling.address) { 937 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling register\n")); 938 return -EINVAL; 939 } else if (!pr->throttling.duty_width) { 940 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling states\n")); 941 return -EINVAL; 942 } 943 /* TBD: Support duty_cycle values that span bit 4. */ 944 else if ((pr->throttling.duty_offset + pr->throttling.duty_width) > 4) { 945 printk(KERN_WARNING PREFIX "duty_cycle spans bit 4\n"); 946 return -EINVAL; 947 } 948 949 pr->throttling.state_count = 1 << acpi_gbl_FADT.duty_width; 950 951 /* 952 * Compute state values. Note that throttling displays a linear power 953 * performance relationship (at 50% performance the CPU will consume 954 * 50% power). Values are in 1/10th of a percent to preserve accuracy. 955 */ 956 957 step = (1000 / pr->throttling.state_count); 958 959 for (i = 0; i < pr->throttling.state_count; i++) { 960 pr->throttling.states[i].performance = 1000 - step * i; 961 pr->throttling.states[i].power = 1000 - step * i; 962 } 963 return 0; 964 } 965 966 static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr, 967 int state, bool force) 968 { 969 u32 value = 0; 970 u32 duty_mask = 0; 971 u32 duty_value = 0; 972 973 if (!pr) 974 return -EINVAL; 975 976 if ((state < 0) || (state > (pr->throttling.state_count - 1))) 977 return -EINVAL; 978 979 if (!pr->flags.throttling) 980 return -ENODEV; 981 982 if (!force && (state == pr->throttling.state)) 983 return 0; 984 985 if (state < pr->throttling_platform_limit) 986 return -EPERM; 987 /* 988 * Calculate the duty_value and duty_mask. 989 */ 990 if (state) { 991 duty_value = pr->throttling.state_count - state; 992 993 duty_value <<= pr->throttling.duty_offset; 994 995 /* Used to clear all duty_value bits */ 996 duty_mask = pr->throttling.state_count - 1; 997 998 duty_mask <<= acpi_gbl_FADT.duty_offset; 999 duty_mask = ~duty_mask; 1000 } 1001 1002 local_irq_disable(); 1003 1004 /* 1005 * Disable throttling by writing a 0 to bit 4. Note that we must 1006 * turn it off before you can change the duty_value. 1007 */ 1008 value = inl(pr->throttling.address); 1009 if (value & 0x10) { 1010 value &= 0xFFFFFFEF; 1011 outl(value, pr->throttling.address); 1012 } 1013 1014 /* 1015 * Write the new duty_value and then enable throttling. Note 1016 * that a state value of 0 leaves throttling disabled. 1017 */ 1018 if (state) { 1019 value &= duty_mask; 1020 value |= duty_value; 1021 outl(value, pr->throttling.address); 1022 1023 value |= 0x00000010; 1024 outl(value, pr->throttling.address); 1025 } 1026 1027 pr->throttling.state = state; 1028 1029 local_irq_enable(); 1030 1031 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1032 "Throttling state set to T%d (%d%%)\n", state, 1033 (pr->throttling.states[state].performance ? pr-> 1034 throttling.states[state].performance / 10 : 0))); 1035 1036 return 0; 1037 } 1038 1039 static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr, 1040 int state, bool force) 1041 { 1042 int ret; 1043 u64 value; 1044 1045 if (!pr) 1046 return -EINVAL; 1047 1048 if ((state < 0) || (state > (pr->throttling.state_count - 1))) 1049 return -EINVAL; 1050 1051 if (!pr->flags.throttling) 1052 return -ENODEV; 1053 1054 if (!force && (state == pr->throttling.state)) 1055 return 0; 1056 1057 if (state < pr->throttling_platform_limit) 1058 return -EPERM; 1059 1060 value = 0; 1061 ret = acpi_get_throttling_value(pr, state, &value); 1062 if (ret >= 0) { 1063 acpi_write_throttling_state(pr, value); 1064 pr->throttling.state = state; 1065 } 1066 1067 return 0; 1068 } 1069 1070 static long acpi_processor_throttling_fn(void *data) 1071 { 1072 struct acpi_processor_throttling_arg *arg = data; 1073 struct acpi_processor *pr = arg->pr; 1074 1075 return pr->throttling.acpi_processor_set_throttling(pr, 1076 arg->target_state, arg->force); 1077 } 1078 1079 static int call_on_cpu(int cpu, long (*fn)(void *), void *arg, bool direct) 1080 { 1081 if (direct) 1082 return fn(arg); 1083 return work_on_cpu(cpu, fn, arg); 1084 } 1085 1086 static int __acpi_processor_set_throttling(struct acpi_processor *pr, 1087 int state, bool force, bool direct) 1088 { 1089 int ret = 0; 1090 unsigned int i; 1091 struct acpi_processor *match_pr; 1092 struct acpi_processor_throttling *p_throttling; 1093 struct acpi_processor_throttling_arg arg; 1094 struct throttling_tstate t_state; 1095 1096 if (!pr) 1097 return -EINVAL; 1098 1099 if (!pr->flags.throttling) 1100 return -ENODEV; 1101 1102 if ((state < 0) || (state > (pr->throttling.state_count - 1))) 1103 return -EINVAL; 1104 1105 if (cpu_is_offline(pr->id)) { 1106 /* 1107 * the cpu pointed by pr->id is offline. Unnecessary to change 1108 * the throttling state any more. 1109 */ 1110 return -ENODEV; 1111 } 1112 1113 t_state.target_state = state; 1114 p_throttling = &(pr->throttling); 1115 1116 /* 1117 * The throttling notifier will be called for every 1118 * affected cpu in order to get one proper T-state. 1119 * The notifier event is THROTTLING_PRECHANGE. 1120 */ 1121 for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) { 1122 t_state.cpu = i; 1123 acpi_processor_throttling_notifier(THROTTLING_PRECHANGE, 1124 &t_state); 1125 } 1126 /* 1127 * The function of acpi_processor_set_throttling will be called 1128 * to switch T-state. If the coordination type is SW_ALL or HW_ALL, 1129 * it is necessary to call it for every affected cpu. Otherwise 1130 * it can be called only for the cpu pointed by pr. 1131 */ 1132 if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) { 1133 arg.pr = pr; 1134 arg.target_state = state; 1135 arg.force = force; 1136 ret = call_on_cpu(pr->id, acpi_processor_throttling_fn, &arg, 1137 direct); 1138 } else { 1139 /* 1140 * When the T-state coordination is SW_ALL or HW_ALL, 1141 * it is necessary to set T-state for every affected 1142 * cpus. 1143 */ 1144 for_each_cpu_and(i, cpu_online_mask, 1145 p_throttling->shared_cpu_map) { 1146 match_pr = per_cpu(processors, i); 1147 /* 1148 * If the pointer is invalid, we will report the 1149 * error message and continue. 1150 */ 1151 if (!match_pr) { 1152 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1153 "Invalid Pointer for CPU %d\n", i)); 1154 continue; 1155 } 1156 /* 1157 * If the throttling control is unsupported on CPU i, 1158 * we will report the error message and continue. 1159 */ 1160 if (!match_pr->flags.throttling) { 1161 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1162 "Throttling Control is unsupported " 1163 "on CPU %d\n", i)); 1164 continue; 1165 } 1166 1167 arg.pr = match_pr; 1168 arg.target_state = state; 1169 arg.force = force; 1170 ret = call_on_cpu(pr->id, acpi_processor_throttling_fn, 1171 &arg, direct); 1172 } 1173 } 1174 /* 1175 * After the set_throttling is called, the 1176 * throttling notifier is called for every 1177 * affected cpu to update the T-states. 1178 * The notifier event is THROTTLING_POSTCHANGE 1179 */ 1180 for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) { 1181 t_state.cpu = i; 1182 acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE, 1183 &t_state); 1184 } 1185 1186 return ret; 1187 } 1188 1189 int acpi_processor_set_throttling(struct acpi_processor *pr, int state, 1190 bool force) 1191 { 1192 return __acpi_processor_set_throttling(pr, state, force, false); 1193 } 1194 1195 int acpi_processor_get_throttling_info(struct acpi_processor *pr) 1196 { 1197 int result = 0; 1198 struct acpi_processor_throttling *pthrottling; 1199 1200 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1201 "pblk_address[0x%08x] duty_offset[%d] duty_width[%d]\n", 1202 pr->throttling.address, 1203 pr->throttling.duty_offset, 1204 pr->throttling.duty_width)); 1205 1206 /* 1207 * Evaluate _PTC, _TSS and _TPC 1208 * They must all be present or none of them can be used. 1209 */ 1210 if (acpi_processor_get_throttling_control(pr) || 1211 acpi_processor_get_throttling_states(pr) || 1212 acpi_processor_get_platform_limit(pr)) 1213 { 1214 pr->throttling.acpi_processor_get_throttling = 1215 &acpi_processor_get_throttling_fadt; 1216 pr->throttling.acpi_processor_set_throttling = 1217 &acpi_processor_set_throttling_fadt; 1218 if (acpi_processor_get_fadt_info(pr)) 1219 return 0; 1220 } else { 1221 pr->throttling.acpi_processor_get_throttling = 1222 &acpi_processor_get_throttling_ptc; 1223 pr->throttling.acpi_processor_set_throttling = 1224 &acpi_processor_set_throttling_ptc; 1225 } 1226 1227 /* 1228 * If TSD package for one CPU can't be parsed successfully, it means 1229 * that this CPU will have no coordination with other CPUs. 1230 */ 1231 if (acpi_processor_get_tsd(pr)) { 1232 pthrottling = &pr->throttling; 1233 pthrottling->tsd_valid_flag = 0; 1234 cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map); 1235 pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL; 1236 } 1237 1238 /* 1239 * PIIX4 Errata: We don't support throttling on the original PIIX4. 1240 * This shouldn't be an issue as few (if any) mobile systems ever 1241 * used this part. 1242 */ 1243 if (errata.piix4.throttle) { 1244 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1245 "Throttling not supported on PIIX4 A- or B-step\n")); 1246 return 0; 1247 } 1248 1249 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n", 1250 pr->throttling.state_count)); 1251 1252 pr->flags.throttling = 1; 1253 1254 /* 1255 * Disable throttling (if enabled). We'll let subsequent policy (e.g. 1256 * thermal) decide to lower performance if it so chooses, but for now 1257 * we'll crank up the speed. 1258 */ 1259 1260 result = acpi_processor_get_throttling(pr); 1261 if (result) 1262 goto end; 1263 1264 if (pr->throttling.state) { 1265 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1266 "Disabling throttling (was T%d)\n", 1267 pr->throttling.state)); 1268 result = acpi_processor_set_throttling(pr, 0, false); 1269 if (result) 1270 goto end; 1271 } 1272 1273 end: 1274 if (result) 1275 pr->flags.throttling = 0; 1276 1277 return result; 1278 } 1279 1280