1 /* 2 * processor_idle - idle state submodule to the ACPI processor driver 3 * 4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 6 * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de> 7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 8 * - Added processor hotplug support 9 * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 10 * - Added support for C3 on SMP 11 * 12 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 13 * 14 * This program is free software; you can redistribute it and/or modify 15 * it under the terms of the GNU General Public License as published by 16 * the Free Software Foundation; either version 2 of the License, or (at 17 * your option) any later version. 18 * 19 * This program is distributed in the hope that it will be useful, but 20 * WITHOUT ANY WARRANTY; without even the implied warranty of 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 22 * General Public License for more details. 23 * 24 * You should have received a copy of the GNU General Public License along 25 * with this program; if not, write to the Free Software Foundation, Inc., 26 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. 27 * 28 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 29 */ 30 31 #include <linux/kernel.h> 32 #include <linux/module.h> 33 #include <linux/init.h> 34 #include <linux/cpufreq.h> 35 #include <linux/slab.h> 36 #include <linux/acpi.h> 37 #include <linux/dmi.h> 38 #include <linux/moduleparam.h> 39 #include <linux/sched.h> /* need_resched() */ 40 #include <linux/pm_qos.h> 41 #include <linux/clockchips.h> 42 #include <linux/cpuidle.h> 43 #include <linux/irqflags.h> 44 45 /* 46 * Include the apic definitions for x86 to have the APIC timer related defines 47 * available also for UP (on SMP it gets magically included via linux/smp.h). 48 * asm/acpi.h is not an option, as it would require more include magic. Also 49 * creating an empty asm-ia64/apic.h would just trade pest vs. cholera. 50 */ 51 #ifdef CONFIG_X86 52 #include <asm/apic.h> 53 #endif 54 55 #include <asm/io.h> 56 #include <asm/uaccess.h> 57 58 #include <acpi/acpi_bus.h> 59 #include <acpi/processor.h> 60 #include <asm/processor.h> 61 62 #define PREFIX "ACPI: " 63 64 #define ACPI_PROCESSOR_CLASS "processor" 65 #define _COMPONENT ACPI_PROCESSOR_COMPONENT 66 ACPI_MODULE_NAME("processor_idle"); 67 #define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY) 68 #define C2_OVERHEAD 1 /* 1us */ 69 #define C3_OVERHEAD 1 /* 1us */ 70 #define PM_TIMER_TICKS_TO_US(p) (((p) * 1000)/(PM_TIMER_FREQUENCY/1000)) 71 72 static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER; 73 module_param(max_cstate, uint, 0000); 74 static unsigned int nocst __read_mostly; 75 module_param(nocst, uint, 0000); 76 static int bm_check_disable __read_mostly; 77 module_param(bm_check_disable, uint, 0000); 78 79 static unsigned int latency_factor __read_mostly = 2; 80 module_param(latency_factor, uint, 0644); 81 82 static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device); 83 84 static int disabled_by_idle_boot_param(void) 85 { 86 return boot_option_idle_override == IDLE_POLL || 87 boot_option_idle_override == IDLE_FORCE_MWAIT || 88 boot_option_idle_override == IDLE_HALT; 89 } 90 91 /* 92 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3. 93 * For now disable this. Probably a bug somewhere else. 94 * 95 * To skip this limit, boot/load with a large max_cstate limit. 96 */ 97 static int set_max_cstate(const struct dmi_system_id *id) 98 { 99 if (max_cstate > ACPI_PROCESSOR_MAX_POWER) 100 return 0; 101 102 printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate." 103 " Override with \"processor.max_cstate=%d\"\n", id->ident, 104 (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1); 105 106 max_cstate = (long)id->driver_data; 107 108 return 0; 109 } 110 111 /* Actually this shouldn't be __cpuinitdata, would be better to fix the 112 callers to only run once -AK */ 113 static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = { 114 { set_max_cstate, "Clevo 5600D", { 115 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), 116 DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")}, 117 (void *)2}, 118 { set_max_cstate, "Pavilion zv5000", { 119 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 120 DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")}, 121 (void *)1}, 122 { set_max_cstate, "Asus L8400B", { 123 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), 124 DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")}, 125 (void *)1}, 126 {}, 127 }; 128 129 130 /* 131 * Callers should disable interrupts before the call and enable 132 * interrupts after return. 133 */ 134 static void acpi_safe_halt(void) 135 { 136 current_thread_info()->status &= ~TS_POLLING; 137 /* 138 * TS_POLLING-cleared state must be visible before we 139 * test NEED_RESCHED: 140 */ 141 smp_mb(); 142 if (!need_resched()) { 143 safe_halt(); 144 local_irq_disable(); 145 } 146 current_thread_info()->status |= TS_POLLING; 147 } 148 149 #ifdef ARCH_APICTIMER_STOPS_ON_C3 150 151 /* 152 * Some BIOS implementations switch to C3 in the published C2 state. 153 * This seems to be a common problem on AMD boxen, but other vendors 154 * are affected too. We pick the most conservative approach: we assume 155 * that the local APIC stops in both C2 and C3. 156 */ 157 static void lapic_timer_check_state(int state, struct acpi_processor *pr, 158 struct acpi_processor_cx *cx) 159 { 160 struct acpi_processor_power *pwr = &pr->power; 161 u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2; 162 163 if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT)) 164 return; 165 166 if (amd_e400_c1e_detected) 167 type = ACPI_STATE_C1; 168 169 /* 170 * Check, if one of the previous states already marked the lapic 171 * unstable 172 */ 173 if (pwr->timer_broadcast_on_state < state) 174 return; 175 176 if (cx->type >= type) 177 pr->power.timer_broadcast_on_state = state; 178 } 179 180 static void __lapic_timer_propagate_broadcast(void *arg) 181 { 182 struct acpi_processor *pr = (struct acpi_processor *) arg; 183 unsigned long reason; 184 185 reason = pr->power.timer_broadcast_on_state < INT_MAX ? 186 CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF; 187 188 clockevents_notify(reason, &pr->id); 189 } 190 191 static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) 192 { 193 smp_call_function_single(pr->id, __lapic_timer_propagate_broadcast, 194 (void *)pr, 1); 195 } 196 197 /* Power(C) State timer broadcast control */ 198 static void lapic_timer_state_broadcast(struct acpi_processor *pr, 199 struct acpi_processor_cx *cx, 200 int broadcast) 201 { 202 int state = cx - pr->power.states; 203 204 if (state >= pr->power.timer_broadcast_on_state) { 205 unsigned long reason; 206 207 reason = broadcast ? CLOCK_EVT_NOTIFY_BROADCAST_ENTER : 208 CLOCK_EVT_NOTIFY_BROADCAST_EXIT; 209 clockevents_notify(reason, &pr->id); 210 } 211 } 212 213 #else 214 215 static void lapic_timer_check_state(int state, struct acpi_processor *pr, 216 struct acpi_processor_cx *cstate) { } 217 static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { } 218 static void lapic_timer_state_broadcast(struct acpi_processor *pr, 219 struct acpi_processor_cx *cx, 220 int broadcast) 221 { 222 } 223 224 #endif 225 226 static u32 saved_bm_rld; 227 228 static void acpi_idle_bm_rld_save(void) 229 { 230 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld); 231 } 232 static void acpi_idle_bm_rld_restore(void) 233 { 234 u32 resumed_bm_rld; 235 236 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld); 237 238 if (resumed_bm_rld != saved_bm_rld) 239 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld); 240 } 241 242 int acpi_processor_suspend(struct device *dev) 243 { 244 acpi_idle_bm_rld_save(); 245 return 0; 246 } 247 248 int acpi_processor_resume(struct device *dev) 249 { 250 acpi_idle_bm_rld_restore(); 251 return 0; 252 } 253 254 #if defined(CONFIG_X86) 255 static void tsc_check_state(int state) 256 { 257 switch (boot_cpu_data.x86_vendor) { 258 case X86_VENDOR_AMD: 259 case X86_VENDOR_INTEL: 260 /* 261 * AMD Fam10h TSC will tick in all 262 * C/P/S0/S1 states when this bit is set. 263 */ 264 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) 265 return; 266 267 /*FALL THROUGH*/ 268 default: 269 /* TSC could halt in idle, so notify users */ 270 if (state > ACPI_STATE_C1) 271 mark_tsc_unstable("TSC halts in idle"); 272 } 273 } 274 #else 275 static void tsc_check_state(int state) { return; } 276 #endif 277 278 static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) 279 { 280 281 if (!pr) 282 return -EINVAL; 283 284 if (!pr->pblk) 285 return -ENODEV; 286 287 /* if info is obtained from pblk/fadt, type equals state */ 288 pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2; 289 pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3; 290 291 #ifndef CONFIG_HOTPLUG_CPU 292 /* 293 * Check for P_LVL2_UP flag before entering C2 and above on 294 * an SMP system. 295 */ 296 if ((num_online_cpus() > 1) && 297 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) 298 return -ENODEV; 299 #endif 300 301 /* determine C2 and C3 address from pblk */ 302 pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4; 303 pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5; 304 305 /* determine latencies from FADT */ 306 pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.c2_latency; 307 pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.c3_latency; 308 309 /* 310 * FADT specified C2 latency must be less than or equal to 311 * 100 microseconds. 312 */ 313 if (acpi_gbl_FADT.c2_latency > ACPI_PROCESSOR_MAX_C2_LATENCY) { 314 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 315 "C2 latency too large [%d]\n", acpi_gbl_FADT.c2_latency)); 316 /* invalidate C2 */ 317 pr->power.states[ACPI_STATE_C2].address = 0; 318 } 319 320 /* 321 * FADT supplied C3 latency must be less than or equal to 322 * 1000 microseconds. 323 */ 324 if (acpi_gbl_FADT.c3_latency > ACPI_PROCESSOR_MAX_C3_LATENCY) { 325 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 326 "C3 latency too large [%d]\n", acpi_gbl_FADT.c3_latency)); 327 /* invalidate C3 */ 328 pr->power.states[ACPI_STATE_C3].address = 0; 329 } 330 331 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 332 "lvl2[0x%08x] lvl3[0x%08x]\n", 333 pr->power.states[ACPI_STATE_C2].address, 334 pr->power.states[ACPI_STATE_C3].address)); 335 336 return 0; 337 } 338 339 static int acpi_processor_get_power_info_default(struct acpi_processor *pr) 340 { 341 if (!pr->power.states[ACPI_STATE_C1].valid) { 342 /* set the first C-State to C1 */ 343 /* all processors need to support C1 */ 344 pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1; 345 pr->power.states[ACPI_STATE_C1].valid = 1; 346 pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT; 347 } 348 /* the C0 state only exists as a filler in our array */ 349 pr->power.states[ACPI_STATE_C0].valid = 1; 350 return 0; 351 } 352 353 static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) 354 { 355 acpi_status status = 0; 356 u64 count; 357 int current_count; 358 int i; 359 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 360 union acpi_object *cst; 361 362 363 if (nocst) 364 return -ENODEV; 365 366 current_count = 0; 367 368 status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer); 369 if (ACPI_FAILURE(status)) { 370 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n")); 371 return -ENODEV; 372 } 373 374 cst = buffer.pointer; 375 376 /* There must be at least 2 elements */ 377 if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) { 378 printk(KERN_ERR PREFIX "not enough elements in _CST\n"); 379 status = -EFAULT; 380 goto end; 381 } 382 383 count = cst->package.elements[0].integer.value; 384 385 /* Validate number of power states. */ 386 if (count < 1 || count != cst->package.count - 1) { 387 printk(KERN_ERR PREFIX "count given by _CST is not valid\n"); 388 status = -EFAULT; 389 goto end; 390 } 391 392 /* Tell driver that at least _CST is supported. */ 393 pr->flags.has_cst = 1; 394 395 for (i = 1; i <= count; i++) { 396 union acpi_object *element; 397 union acpi_object *obj; 398 struct acpi_power_register *reg; 399 struct acpi_processor_cx cx; 400 401 memset(&cx, 0, sizeof(cx)); 402 403 element = &(cst->package.elements[i]); 404 if (element->type != ACPI_TYPE_PACKAGE) 405 continue; 406 407 if (element->package.count != 4) 408 continue; 409 410 obj = &(element->package.elements[0]); 411 412 if (obj->type != ACPI_TYPE_BUFFER) 413 continue; 414 415 reg = (struct acpi_power_register *)obj->buffer.pointer; 416 417 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO && 418 (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) 419 continue; 420 421 /* There should be an easy way to extract an integer... */ 422 obj = &(element->package.elements[1]); 423 if (obj->type != ACPI_TYPE_INTEGER) 424 continue; 425 426 cx.type = obj->integer.value; 427 /* 428 * Some buggy BIOSes won't list C1 in _CST - 429 * Let acpi_processor_get_power_info_default() handle them later 430 */ 431 if (i == 1 && cx.type != ACPI_STATE_C1) 432 current_count++; 433 434 cx.address = reg->address; 435 cx.index = current_count + 1; 436 437 cx.entry_method = ACPI_CSTATE_SYSTEMIO; 438 if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) { 439 if (acpi_processor_ffh_cstate_probe 440 (pr->id, &cx, reg) == 0) { 441 cx.entry_method = ACPI_CSTATE_FFH; 442 } else if (cx.type == ACPI_STATE_C1) { 443 /* 444 * C1 is a special case where FIXED_HARDWARE 445 * can be handled in non-MWAIT way as well. 446 * In that case, save this _CST entry info. 447 * Otherwise, ignore this info and continue. 448 */ 449 cx.entry_method = ACPI_CSTATE_HALT; 450 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); 451 } else { 452 continue; 453 } 454 if (cx.type == ACPI_STATE_C1 && 455 (boot_option_idle_override == IDLE_NOMWAIT)) { 456 /* 457 * In most cases the C1 space_id obtained from 458 * _CST object is FIXED_HARDWARE access mode. 459 * But when the option of idle=halt is added, 460 * the entry_method type should be changed from 461 * CSTATE_FFH to CSTATE_HALT. 462 * When the option of idle=nomwait is added, 463 * the C1 entry_method type should be 464 * CSTATE_HALT. 465 */ 466 cx.entry_method = ACPI_CSTATE_HALT; 467 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); 468 } 469 } else { 470 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x", 471 cx.address); 472 } 473 474 if (cx.type == ACPI_STATE_C1) { 475 cx.valid = 1; 476 } 477 478 obj = &(element->package.elements[2]); 479 if (obj->type != ACPI_TYPE_INTEGER) 480 continue; 481 482 cx.latency = obj->integer.value; 483 484 obj = &(element->package.elements[3]); 485 if (obj->type != ACPI_TYPE_INTEGER) 486 continue; 487 488 current_count++; 489 memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx)); 490 491 /* 492 * We support total ACPI_PROCESSOR_MAX_POWER - 1 493 * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1) 494 */ 495 if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) { 496 printk(KERN_WARNING 497 "Limiting number of power states to max (%d)\n", 498 ACPI_PROCESSOR_MAX_POWER); 499 printk(KERN_WARNING 500 "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n"); 501 break; 502 } 503 } 504 505 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n", 506 current_count)); 507 508 /* Validate number of power states discovered */ 509 if (current_count < 2) 510 status = -EFAULT; 511 512 end: 513 kfree(buffer.pointer); 514 515 return status; 516 } 517 518 static void acpi_processor_power_verify_c3(struct acpi_processor *pr, 519 struct acpi_processor_cx *cx) 520 { 521 static int bm_check_flag = -1; 522 static int bm_control_flag = -1; 523 524 525 if (!cx->address) 526 return; 527 528 /* 529 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast) 530 * DMA transfers are used by any ISA device to avoid livelock. 531 * Note that we could disable Type-F DMA (as recommended by 532 * the erratum), but this is known to disrupt certain ISA 533 * devices thus we take the conservative approach. 534 */ 535 else if (errata.piix4.fdma) { 536 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 537 "C3 not supported on PIIX4 with Type-F DMA\n")); 538 return; 539 } 540 541 /* All the logic here assumes flags.bm_check is same across all CPUs */ 542 if (bm_check_flag == -1) { 543 /* Determine whether bm_check is needed based on CPU */ 544 acpi_processor_power_init_bm_check(&(pr->flags), pr->id); 545 bm_check_flag = pr->flags.bm_check; 546 bm_control_flag = pr->flags.bm_control; 547 } else { 548 pr->flags.bm_check = bm_check_flag; 549 pr->flags.bm_control = bm_control_flag; 550 } 551 552 if (pr->flags.bm_check) { 553 if (!pr->flags.bm_control) { 554 if (pr->flags.has_cst != 1) { 555 /* bus mastering control is necessary */ 556 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 557 "C3 support requires BM control\n")); 558 return; 559 } else { 560 /* Here we enter C3 without bus mastering */ 561 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 562 "C3 support without BM control\n")); 563 } 564 } 565 } else { 566 /* 567 * WBINVD should be set in fadt, for C3 state to be 568 * supported on when bm_check is not required. 569 */ 570 if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) { 571 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 572 "Cache invalidation should work properly" 573 " for C3 to be enabled on SMP systems\n")); 574 return; 575 } 576 } 577 578 /* 579 * Otherwise we've met all of our C3 requirements. 580 * Normalize the C3 latency to expidite policy. Enable 581 * checking of bus mastering status (bm_check) so we can 582 * use this in our C3 policy 583 */ 584 cx->valid = 1; 585 586 /* 587 * On older chipsets, BM_RLD needs to be set 588 * in order for Bus Master activity to wake the 589 * system from C3. Newer chipsets handle DMA 590 * during C3 automatically and BM_RLD is a NOP. 591 * In either case, the proper way to 592 * handle BM_RLD is to set it and leave it set. 593 */ 594 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1); 595 596 return; 597 } 598 599 static int acpi_processor_power_verify(struct acpi_processor *pr) 600 { 601 unsigned int i; 602 unsigned int working = 0; 603 604 pr->power.timer_broadcast_on_state = INT_MAX; 605 606 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { 607 struct acpi_processor_cx *cx = &pr->power.states[i]; 608 609 switch (cx->type) { 610 case ACPI_STATE_C1: 611 cx->valid = 1; 612 break; 613 614 case ACPI_STATE_C2: 615 if (!cx->address) 616 break; 617 cx->valid = 1; 618 break; 619 620 case ACPI_STATE_C3: 621 acpi_processor_power_verify_c3(pr, cx); 622 break; 623 } 624 if (!cx->valid) 625 continue; 626 627 lapic_timer_check_state(i, pr, cx); 628 tsc_check_state(cx->type); 629 working++; 630 } 631 632 lapic_timer_propagate_broadcast(pr); 633 634 return (working); 635 } 636 637 static int acpi_processor_get_power_info(struct acpi_processor *pr) 638 { 639 unsigned int i; 640 int result; 641 642 643 /* NOTE: the idle thread may not be running while calling 644 * this function */ 645 646 /* Zero initialize all the C-states info. */ 647 memset(pr->power.states, 0, sizeof(pr->power.states)); 648 649 result = acpi_processor_get_power_info_cst(pr); 650 if (result == -ENODEV) 651 result = acpi_processor_get_power_info_fadt(pr); 652 653 if (result) 654 return result; 655 656 acpi_processor_get_power_info_default(pr); 657 658 pr->power.count = acpi_processor_power_verify(pr); 659 660 /* 661 * if one state of type C2 or C3 is available, mark this 662 * CPU as being "idle manageable" 663 */ 664 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { 665 if (pr->power.states[i].valid) { 666 pr->power.count = i; 667 if (pr->power.states[i].type >= ACPI_STATE_C2) 668 pr->flags.power = 1; 669 } 670 } 671 672 return 0; 673 } 674 675 /** 676 * acpi_idle_bm_check - checks if bus master activity was detected 677 */ 678 static int acpi_idle_bm_check(void) 679 { 680 u32 bm_status = 0; 681 682 if (bm_check_disable) 683 return 0; 684 685 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status); 686 if (bm_status) 687 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1); 688 /* 689 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect 690 * the true state of bus mastering activity; forcing us to 691 * manually check the BMIDEA bit of each IDE channel. 692 */ 693 else if (errata.piix4.bmisx) { 694 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01) 695 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01)) 696 bm_status = 1; 697 } 698 return bm_status; 699 } 700 701 /** 702 * acpi_idle_do_entry - a helper function that does C2 and C3 type entry 703 * @cx: cstate data 704 * 705 * Caller disables interrupt before call and enables interrupt after return. 706 */ 707 static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) 708 { 709 /* Don't trace irqs off for idle */ 710 stop_critical_timings(); 711 if (cx->entry_method == ACPI_CSTATE_FFH) { 712 /* Call into architectural FFH based C-state */ 713 acpi_processor_ffh_cstate_enter(cx); 714 } else if (cx->entry_method == ACPI_CSTATE_HALT) { 715 acpi_safe_halt(); 716 } else { 717 /* IO port based C-state */ 718 inb(cx->address); 719 /* Dummy wait op - must do something useless after P_LVL2 read 720 because chipsets cannot guarantee that STPCLK# signal 721 gets asserted in time to freeze execution properly. */ 722 inl(acpi_gbl_FADT.xpm_timer_block.address); 723 } 724 start_critical_timings(); 725 } 726 727 /** 728 * acpi_idle_enter_c1 - enters an ACPI C1 state-type 729 * @dev: the target CPU 730 * @drv: cpuidle driver containing cpuidle state info 731 * @index: index of target state 732 * 733 * This is equivalent to the HALT instruction. 734 */ 735 static int acpi_idle_enter_c1(struct cpuidle_device *dev, 736 struct cpuidle_driver *drv, int index) 737 { 738 struct acpi_processor *pr; 739 struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; 740 struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); 741 742 pr = __this_cpu_read(processors); 743 744 if (unlikely(!pr)) 745 return -EINVAL; 746 747 lapic_timer_state_broadcast(pr, cx, 1); 748 acpi_idle_do_entry(cx); 749 750 lapic_timer_state_broadcast(pr, cx, 0); 751 752 return index; 753 } 754 755 756 /** 757 * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining) 758 * @dev: the target CPU 759 * @index: the index of suggested state 760 */ 761 static int acpi_idle_play_dead(struct cpuidle_device *dev, int index) 762 { 763 struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; 764 struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); 765 766 ACPI_FLUSH_CPU_CACHE(); 767 768 while (1) { 769 770 if (cx->entry_method == ACPI_CSTATE_HALT) 771 safe_halt(); 772 else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) { 773 inb(cx->address); 774 /* See comment in acpi_idle_do_entry() */ 775 inl(acpi_gbl_FADT.xpm_timer_block.address); 776 } else 777 return -ENODEV; 778 } 779 780 /* Never reached */ 781 return 0; 782 } 783 784 /** 785 * acpi_idle_enter_simple - enters an ACPI state without BM handling 786 * @dev: the target CPU 787 * @drv: cpuidle driver with cpuidle state information 788 * @index: the index of suggested state 789 */ 790 static int acpi_idle_enter_simple(struct cpuidle_device *dev, 791 struct cpuidle_driver *drv, int index) 792 { 793 struct acpi_processor *pr; 794 struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; 795 struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); 796 797 pr = __this_cpu_read(processors); 798 799 if (unlikely(!pr)) 800 return -EINVAL; 801 802 if (cx->entry_method != ACPI_CSTATE_FFH) { 803 current_thread_info()->status &= ~TS_POLLING; 804 /* 805 * TS_POLLING-cleared state must be visible before we test 806 * NEED_RESCHED: 807 */ 808 smp_mb(); 809 810 if (unlikely(need_resched())) { 811 current_thread_info()->status |= TS_POLLING; 812 return -EINVAL; 813 } 814 } 815 816 /* 817 * Must be done before busmaster disable as we might need to 818 * access HPET ! 819 */ 820 lapic_timer_state_broadcast(pr, cx, 1); 821 822 if (cx->type == ACPI_STATE_C3) 823 ACPI_FLUSH_CPU_CACHE(); 824 825 /* Tell the scheduler that we are going deep-idle: */ 826 sched_clock_idle_sleep_event(); 827 acpi_idle_do_entry(cx); 828 829 sched_clock_idle_wakeup_event(0); 830 831 if (cx->entry_method != ACPI_CSTATE_FFH) 832 current_thread_info()->status |= TS_POLLING; 833 834 lapic_timer_state_broadcast(pr, cx, 0); 835 return index; 836 } 837 838 static int c3_cpu_count; 839 static DEFINE_RAW_SPINLOCK(c3_lock); 840 841 /** 842 * acpi_idle_enter_bm - enters C3 with proper BM handling 843 * @dev: the target CPU 844 * @drv: cpuidle driver containing state data 845 * @index: the index of suggested state 846 * 847 * If BM is detected, the deepest non-C3 idle state is entered instead. 848 */ 849 static int acpi_idle_enter_bm(struct cpuidle_device *dev, 850 struct cpuidle_driver *drv, int index) 851 { 852 struct acpi_processor *pr; 853 struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; 854 struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); 855 856 pr = __this_cpu_read(processors); 857 858 if (unlikely(!pr)) 859 return -EINVAL; 860 861 if (!cx->bm_sts_skip && acpi_idle_bm_check()) { 862 if (drv->safe_state_index >= 0) { 863 return drv->states[drv->safe_state_index].enter(dev, 864 drv, drv->safe_state_index); 865 } else { 866 acpi_safe_halt(); 867 return -EBUSY; 868 } 869 } 870 871 if (cx->entry_method != ACPI_CSTATE_FFH) { 872 current_thread_info()->status &= ~TS_POLLING; 873 /* 874 * TS_POLLING-cleared state must be visible before we test 875 * NEED_RESCHED: 876 */ 877 smp_mb(); 878 879 if (unlikely(need_resched())) { 880 current_thread_info()->status |= TS_POLLING; 881 return -EINVAL; 882 } 883 } 884 885 acpi_unlazy_tlb(smp_processor_id()); 886 887 /* Tell the scheduler that we are going deep-idle: */ 888 sched_clock_idle_sleep_event(); 889 /* 890 * Must be done before busmaster disable as we might need to 891 * access HPET ! 892 */ 893 lapic_timer_state_broadcast(pr, cx, 1); 894 895 /* 896 * disable bus master 897 * bm_check implies we need ARB_DIS 898 * !bm_check implies we need cache flush 899 * bm_control implies whether we can do ARB_DIS 900 * 901 * That leaves a case where bm_check is set and bm_control is 902 * not set. In that case we cannot do much, we enter C3 903 * without doing anything. 904 */ 905 if (pr->flags.bm_check && pr->flags.bm_control) { 906 raw_spin_lock(&c3_lock); 907 c3_cpu_count++; 908 /* Disable bus master arbitration when all CPUs are in C3 */ 909 if (c3_cpu_count == num_online_cpus()) 910 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1); 911 raw_spin_unlock(&c3_lock); 912 } else if (!pr->flags.bm_check) { 913 ACPI_FLUSH_CPU_CACHE(); 914 } 915 916 acpi_idle_do_entry(cx); 917 918 /* Re-enable bus master arbitration */ 919 if (pr->flags.bm_check && pr->flags.bm_control) { 920 raw_spin_lock(&c3_lock); 921 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0); 922 c3_cpu_count--; 923 raw_spin_unlock(&c3_lock); 924 } 925 926 sched_clock_idle_wakeup_event(0); 927 928 if (cx->entry_method != ACPI_CSTATE_FFH) 929 current_thread_info()->status |= TS_POLLING; 930 931 lapic_timer_state_broadcast(pr, cx, 0); 932 return index; 933 } 934 935 struct cpuidle_driver acpi_idle_driver = { 936 .name = "acpi_idle", 937 .owner = THIS_MODULE, 938 .en_core_tk_irqen = 1, 939 }; 940 941 /** 942 * acpi_processor_setup_cpuidle_cx - prepares and configures CPUIDLE 943 * device i.e. per-cpu data 944 * 945 * @pr: the ACPI processor 946 */ 947 static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr) 948 { 949 int i, count = CPUIDLE_DRIVER_STATE_START; 950 struct acpi_processor_cx *cx; 951 struct cpuidle_state_usage *state_usage; 952 struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id); 953 954 if (!pr->flags.power_setup_done) 955 return -EINVAL; 956 957 if (pr->flags.power == 0) { 958 return -EINVAL; 959 } 960 961 dev->cpu = pr->id; 962 963 if (max_cstate == 0) 964 max_cstate = 1; 965 966 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { 967 cx = &pr->power.states[i]; 968 state_usage = &dev->states_usage[count]; 969 970 if (!cx->valid) 971 continue; 972 973 #ifdef CONFIG_HOTPLUG_CPU 974 if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && 975 !pr->flags.has_cst && 976 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) 977 continue; 978 #endif 979 980 cpuidle_set_statedata(state_usage, cx); 981 982 count++; 983 if (count == CPUIDLE_STATE_MAX) 984 break; 985 } 986 987 dev->state_count = count; 988 989 if (!count) 990 return -EINVAL; 991 992 return 0; 993 } 994 995 /** 996 * acpi_processor_setup_cpuidle states- prepares and configures cpuidle 997 * global state data i.e. idle routines 998 * 999 * @pr: the ACPI processor 1000 */ 1001 static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr) 1002 { 1003 int i, count = CPUIDLE_DRIVER_STATE_START; 1004 struct acpi_processor_cx *cx; 1005 struct cpuidle_state *state; 1006 struct cpuidle_driver *drv = &acpi_idle_driver; 1007 1008 if (!pr->flags.power_setup_done) 1009 return -EINVAL; 1010 1011 if (pr->flags.power == 0) 1012 return -EINVAL; 1013 1014 drv->safe_state_index = -1; 1015 for (i = 0; i < CPUIDLE_STATE_MAX; i++) { 1016 drv->states[i].name[0] = '\0'; 1017 drv->states[i].desc[0] = '\0'; 1018 } 1019 1020 if (max_cstate == 0) 1021 max_cstate = 1; 1022 1023 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { 1024 cx = &pr->power.states[i]; 1025 1026 if (!cx->valid) 1027 continue; 1028 1029 #ifdef CONFIG_HOTPLUG_CPU 1030 if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && 1031 !pr->flags.has_cst && 1032 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) 1033 continue; 1034 #endif 1035 1036 state = &drv->states[count]; 1037 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i); 1038 strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); 1039 state->exit_latency = cx->latency; 1040 state->target_residency = cx->latency * latency_factor; 1041 1042 state->flags = 0; 1043 switch (cx->type) { 1044 case ACPI_STATE_C1: 1045 if (cx->entry_method == ACPI_CSTATE_FFH) 1046 state->flags |= CPUIDLE_FLAG_TIME_VALID; 1047 1048 state->enter = acpi_idle_enter_c1; 1049 state->enter_dead = acpi_idle_play_dead; 1050 drv->safe_state_index = count; 1051 break; 1052 1053 case ACPI_STATE_C2: 1054 state->flags |= CPUIDLE_FLAG_TIME_VALID; 1055 state->enter = acpi_idle_enter_simple; 1056 state->enter_dead = acpi_idle_play_dead; 1057 drv->safe_state_index = count; 1058 break; 1059 1060 case ACPI_STATE_C3: 1061 state->flags |= CPUIDLE_FLAG_TIME_VALID; 1062 state->enter = pr->flags.bm_check ? 1063 acpi_idle_enter_bm : 1064 acpi_idle_enter_simple; 1065 break; 1066 } 1067 1068 count++; 1069 if (count == CPUIDLE_STATE_MAX) 1070 break; 1071 } 1072 1073 drv->state_count = count; 1074 1075 if (!count) 1076 return -EINVAL; 1077 1078 return 0; 1079 } 1080 1081 int acpi_processor_hotplug(struct acpi_processor *pr) 1082 { 1083 int ret = 0; 1084 struct cpuidle_device *dev; 1085 1086 if (disabled_by_idle_boot_param()) 1087 return 0; 1088 1089 if (!pr) 1090 return -EINVAL; 1091 1092 if (nocst) { 1093 return -ENODEV; 1094 } 1095 1096 if (!pr->flags.power_setup_done) 1097 return -ENODEV; 1098 1099 dev = per_cpu(acpi_cpuidle_device, pr->id); 1100 cpuidle_pause_and_lock(); 1101 cpuidle_disable_device(dev); 1102 acpi_processor_get_power_info(pr); 1103 if (pr->flags.power) { 1104 acpi_processor_setup_cpuidle_cx(pr); 1105 ret = cpuidle_enable_device(dev); 1106 } 1107 cpuidle_resume_and_unlock(); 1108 1109 return ret; 1110 } 1111 1112 int acpi_processor_cst_has_changed(struct acpi_processor *pr) 1113 { 1114 int cpu; 1115 struct acpi_processor *_pr; 1116 struct cpuidle_device *dev; 1117 1118 if (disabled_by_idle_boot_param()) 1119 return 0; 1120 1121 if (!pr) 1122 return -EINVAL; 1123 1124 if (nocst) 1125 return -ENODEV; 1126 1127 if (!pr->flags.power_setup_done) 1128 return -ENODEV; 1129 1130 /* 1131 * FIXME: Design the ACPI notification to make it once per 1132 * system instead of once per-cpu. This condition is a hack 1133 * to make the code that updates C-States be called once. 1134 */ 1135 1136 if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) { 1137 1138 cpuidle_pause_and_lock(); 1139 /* Protect against cpu-hotplug */ 1140 get_online_cpus(); 1141 1142 /* Disable all cpuidle devices */ 1143 for_each_online_cpu(cpu) { 1144 _pr = per_cpu(processors, cpu); 1145 if (!_pr || !_pr->flags.power_setup_done) 1146 continue; 1147 dev = per_cpu(acpi_cpuidle_device, cpu); 1148 cpuidle_disable_device(dev); 1149 } 1150 1151 /* Populate Updated C-state information */ 1152 acpi_processor_setup_cpuidle_states(pr); 1153 1154 /* Enable all cpuidle devices */ 1155 for_each_online_cpu(cpu) { 1156 _pr = per_cpu(processors, cpu); 1157 if (!_pr || !_pr->flags.power_setup_done) 1158 continue; 1159 acpi_processor_get_power_info(_pr); 1160 if (_pr->flags.power) { 1161 acpi_processor_setup_cpuidle_cx(_pr); 1162 dev = per_cpu(acpi_cpuidle_device, cpu); 1163 cpuidle_enable_device(dev); 1164 } 1165 } 1166 put_online_cpus(); 1167 cpuidle_resume_and_unlock(); 1168 } 1169 1170 return 0; 1171 } 1172 1173 static int acpi_processor_registered; 1174 1175 int __cpuinit acpi_processor_power_init(struct acpi_processor *pr) 1176 { 1177 acpi_status status = 0; 1178 int retval; 1179 struct cpuidle_device *dev; 1180 static int first_run; 1181 1182 if (disabled_by_idle_boot_param()) 1183 return 0; 1184 1185 if (!first_run) { 1186 dmi_check_system(processor_power_dmi_table); 1187 max_cstate = acpi_processor_cstate_check(max_cstate); 1188 if (max_cstate < ACPI_C_STATES_MAX) 1189 printk(KERN_NOTICE 1190 "ACPI: processor limited to max C-state %d\n", 1191 max_cstate); 1192 first_run++; 1193 } 1194 1195 if (!pr) 1196 return -EINVAL; 1197 1198 if (acpi_gbl_FADT.cst_control && !nocst) { 1199 status = 1200 acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8); 1201 if (ACPI_FAILURE(status)) { 1202 ACPI_EXCEPTION((AE_INFO, status, 1203 "Notifying BIOS of _CST ability failed")); 1204 } 1205 } 1206 1207 acpi_processor_get_power_info(pr); 1208 pr->flags.power_setup_done = 1; 1209 1210 /* 1211 * Install the idle handler if processor power management is supported. 1212 * Note that we use previously set idle handler will be used on 1213 * platforms that only support C1. 1214 */ 1215 if (pr->flags.power) { 1216 /* Register acpi_idle_driver if not already registered */ 1217 if (!acpi_processor_registered) { 1218 acpi_processor_setup_cpuidle_states(pr); 1219 retval = cpuidle_register_driver(&acpi_idle_driver); 1220 if (retval) 1221 return retval; 1222 printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n", 1223 acpi_idle_driver.name); 1224 } 1225 1226 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 1227 if (!dev) 1228 return -ENOMEM; 1229 per_cpu(acpi_cpuidle_device, pr->id) = dev; 1230 1231 acpi_processor_setup_cpuidle_cx(pr); 1232 1233 /* Register per-cpu cpuidle_device. Cpuidle driver 1234 * must already be registered before registering device 1235 */ 1236 retval = cpuidle_register_device(dev); 1237 if (retval) { 1238 if (acpi_processor_registered == 0) 1239 cpuidle_unregister_driver(&acpi_idle_driver); 1240 return retval; 1241 } 1242 acpi_processor_registered++; 1243 } 1244 return 0; 1245 } 1246 1247 int acpi_processor_power_exit(struct acpi_processor *pr) 1248 { 1249 struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id); 1250 1251 if (disabled_by_idle_boot_param()) 1252 return 0; 1253 1254 if (pr->flags.power) { 1255 cpuidle_unregister_device(dev); 1256 acpi_processor_registered--; 1257 if (acpi_processor_registered == 0) 1258 cpuidle_unregister_driver(&acpi_idle_driver); 1259 } 1260 1261 pr->flags.power_setup_done = 0; 1262 return 0; 1263 } 1264