1 /* 2 * processor_idle - idle state submodule to the ACPI processor driver 3 * 4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 6 * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de> 7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 8 * - Added processor hotplug support 9 * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 10 * - Added support for C3 on SMP 11 * 12 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 13 * 14 * This program is free software; you can redistribute it and/or modify 15 * it under the terms of the GNU General Public License as published by 16 * the Free Software Foundation; either version 2 of the License, or (at 17 * your option) any later version. 18 * 19 * This program is distributed in the hope that it will be useful, but 20 * WITHOUT ANY WARRANTY; without even the implied warranty of 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 22 * General Public License for more details. 23 * 24 * You should have received a copy of the GNU General Public License along 25 * with this program; if not, write to the Free Software Foundation, Inc., 26 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. 27 * 28 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 29 */ 30 31 #include <linux/kernel.h> 32 #include <linux/module.h> 33 #include <linux/init.h> 34 #include <linux/cpufreq.h> 35 #include <linux/proc_fs.h> 36 #include <linux/seq_file.h> 37 #include <linux/acpi.h> 38 #include <linux/dmi.h> 39 #include <linux/moduleparam.h> 40 #include <linux/sched.h> /* need_resched() */ 41 #include <linux/latency.h> 42 #include <linux/clockchips.h> 43 #include <linux/cpuidle.h> 44 45 /* 46 * Include the apic definitions for x86 to have the APIC timer related defines 47 * available also for UP (on SMP it gets magically included via linux/smp.h). 48 * asm/acpi.h is not an option, as it would require more include magic. Also 49 * creating an empty asm-ia64/apic.h would just trade pest vs. cholera. 50 */ 51 #ifdef CONFIG_X86 52 #include <asm/apic.h> 53 #endif 54 55 #include <asm/io.h> 56 #include <asm/uaccess.h> 57 58 #include <acpi/acpi_bus.h> 59 #include <acpi/processor.h> 60 61 #define ACPI_PROCESSOR_COMPONENT 0x01000000 62 #define ACPI_PROCESSOR_CLASS "processor" 63 #define _COMPONENT ACPI_PROCESSOR_COMPONENT 64 ACPI_MODULE_NAME("processor_idle"); 65 #define ACPI_PROCESSOR_FILE_POWER "power" 66 #define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000) 67 #define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY) 68 #ifndef CONFIG_CPU_IDLE 69 #define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */ 70 #define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */ 71 static void (*pm_idle_save) (void) __read_mostly; 72 #else 73 #define C2_OVERHEAD 1 /* 1us */ 74 #define C3_OVERHEAD 1 /* 1us */ 75 #endif 76 #define PM_TIMER_TICKS_TO_US(p) (((p) * 1000)/(PM_TIMER_FREQUENCY/1000)) 77 78 static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER; 79 #ifdef CONFIG_CPU_IDLE 80 module_param(max_cstate, uint, 0000); 81 #else 82 module_param(max_cstate, uint, 0644); 83 #endif 84 static unsigned int nocst __read_mostly; 85 module_param(nocst, uint, 0000); 86 87 #ifndef CONFIG_CPU_IDLE 88 /* 89 * bm_history -- bit-mask with a bit per jiffy of bus-master activity 90 * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms 91 * 800 HZ: 0xFFFFFFFF: 32 jiffies = 40ms 92 * 100 HZ: 0x0000000F: 4 jiffies = 40ms 93 * reduce history for more aggressive entry into C3 94 */ 95 static unsigned int bm_history __read_mostly = 96 (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1)); 97 module_param(bm_history, uint, 0644); 98 99 static int acpi_processor_set_power_policy(struct acpi_processor *pr); 100 101 #endif 102 103 /* 104 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3. 105 * For now disable this. Probably a bug somewhere else. 106 * 107 * To skip this limit, boot/load with a large max_cstate limit. 108 */ 109 static int set_max_cstate(const struct dmi_system_id *id) 110 { 111 if (max_cstate > ACPI_PROCESSOR_MAX_POWER) 112 return 0; 113 114 printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate." 115 " Override with \"processor.max_cstate=%d\"\n", id->ident, 116 (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1); 117 118 max_cstate = (long)id->driver_data; 119 120 return 0; 121 } 122 123 /* Actually this shouldn't be __cpuinitdata, would be better to fix the 124 callers to only run once -AK */ 125 static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = { 126 { set_max_cstate, "IBM ThinkPad R40e", { 127 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 128 DMI_MATCH(DMI_BIOS_VERSION,"1SET70WW")}, (void *)1}, 129 { set_max_cstate, "IBM ThinkPad R40e", { 130 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 131 DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW")}, (void *)1}, 132 { set_max_cstate, "IBM ThinkPad R40e", { 133 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 134 DMI_MATCH(DMI_BIOS_VERSION,"1SET43WW") }, (void*)1}, 135 { set_max_cstate, "IBM ThinkPad R40e", { 136 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 137 DMI_MATCH(DMI_BIOS_VERSION,"1SET45WW") }, (void*)1}, 138 { set_max_cstate, "IBM ThinkPad R40e", { 139 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 140 DMI_MATCH(DMI_BIOS_VERSION,"1SET47WW") }, (void*)1}, 141 { set_max_cstate, "IBM ThinkPad R40e", { 142 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 143 DMI_MATCH(DMI_BIOS_VERSION,"1SET50WW") }, (void*)1}, 144 { set_max_cstate, "IBM ThinkPad R40e", { 145 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 146 DMI_MATCH(DMI_BIOS_VERSION,"1SET52WW") }, (void*)1}, 147 { set_max_cstate, "IBM ThinkPad R40e", { 148 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 149 DMI_MATCH(DMI_BIOS_VERSION,"1SET55WW") }, (void*)1}, 150 { set_max_cstate, "IBM ThinkPad R40e", { 151 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 152 DMI_MATCH(DMI_BIOS_VERSION,"1SET56WW") }, (void*)1}, 153 { set_max_cstate, "IBM ThinkPad R40e", { 154 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 155 DMI_MATCH(DMI_BIOS_VERSION,"1SET59WW") }, (void*)1}, 156 { set_max_cstate, "IBM ThinkPad R40e", { 157 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 158 DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW") }, (void*)1}, 159 { set_max_cstate, "IBM ThinkPad R40e", { 160 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 161 DMI_MATCH(DMI_BIOS_VERSION,"1SET61WW") }, (void*)1}, 162 { set_max_cstate, "IBM ThinkPad R40e", { 163 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 164 DMI_MATCH(DMI_BIOS_VERSION,"1SET62WW") }, (void*)1}, 165 { set_max_cstate, "IBM ThinkPad R40e", { 166 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 167 DMI_MATCH(DMI_BIOS_VERSION,"1SET64WW") }, (void*)1}, 168 { set_max_cstate, "IBM ThinkPad R40e", { 169 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 170 DMI_MATCH(DMI_BIOS_VERSION,"1SET65WW") }, (void*)1}, 171 { set_max_cstate, "IBM ThinkPad R40e", { 172 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 173 DMI_MATCH(DMI_BIOS_VERSION,"1SET68WW") }, (void*)1}, 174 { set_max_cstate, "Medion 41700", { 175 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), 176 DMI_MATCH(DMI_BIOS_VERSION,"R01-A1J")}, (void *)1}, 177 { set_max_cstate, "Clevo 5600D", { 178 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), 179 DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")}, 180 (void *)2}, 181 {}, 182 }; 183 184 static inline u32 ticks_elapsed(u32 t1, u32 t2) 185 { 186 if (t2 >= t1) 187 return (t2 - t1); 188 else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER)) 189 return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF); 190 else 191 return ((0xFFFFFFFF - t1) + t2); 192 } 193 194 static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2) 195 { 196 if (t2 >= t1) 197 return PM_TIMER_TICKS_TO_US(t2 - t1); 198 else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER)) 199 return PM_TIMER_TICKS_TO_US(((0x00FFFFFF - t1) + t2) & 0x00FFFFFF); 200 else 201 return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2); 202 } 203 204 static void acpi_safe_halt(void) 205 { 206 current_thread_info()->status &= ~TS_POLLING; 207 /* 208 * TS_POLLING-cleared state must be visible before we 209 * test NEED_RESCHED: 210 */ 211 smp_mb(); 212 if (!need_resched()) 213 safe_halt(); 214 current_thread_info()->status |= TS_POLLING; 215 } 216 217 #ifndef CONFIG_CPU_IDLE 218 219 static void 220 acpi_processor_power_activate(struct acpi_processor *pr, 221 struct acpi_processor_cx *new) 222 { 223 struct acpi_processor_cx *old; 224 225 if (!pr || !new) 226 return; 227 228 old = pr->power.state; 229 230 if (old) 231 old->promotion.count = 0; 232 new->demotion.count = 0; 233 234 /* Cleanup from old state. */ 235 if (old) { 236 switch (old->type) { 237 case ACPI_STATE_C3: 238 /* Disable bus master reload */ 239 if (new->type != ACPI_STATE_C3 && pr->flags.bm_check) 240 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0); 241 break; 242 } 243 } 244 245 /* Prepare to use new state. */ 246 switch (new->type) { 247 case ACPI_STATE_C3: 248 /* Enable bus master reload */ 249 if (old->type != ACPI_STATE_C3 && pr->flags.bm_check) 250 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1); 251 break; 252 } 253 254 pr->power.state = new; 255 256 return; 257 } 258 259 static atomic_t c3_cpu_count; 260 261 /* Common C-state entry for C2, C3, .. */ 262 static void acpi_cstate_enter(struct acpi_processor_cx *cstate) 263 { 264 if (cstate->space_id == ACPI_CSTATE_FFH) { 265 /* Call into architectural FFH based C-state */ 266 acpi_processor_ffh_cstate_enter(cstate); 267 } else { 268 int unused; 269 /* IO port based C-state */ 270 inb(cstate->address); 271 /* Dummy wait op - must do something useless after P_LVL2 read 272 because chipsets cannot guarantee that STPCLK# signal 273 gets asserted in time to freeze execution properly. */ 274 unused = inl(acpi_gbl_FADT.xpm_timer_block.address); 275 } 276 } 277 #endif /* !CONFIG_CPU_IDLE */ 278 279 #ifdef ARCH_APICTIMER_STOPS_ON_C3 280 281 /* 282 * Some BIOS implementations switch to C3 in the published C2 state. 283 * This seems to be a common problem on AMD boxen, but other vendors 284 * are affected too. We pick the most conservative approach: we assume 285 * that the local APIC stops in both C2 and C3. 286 */ 287 static void acpi_timer_check_state(int state, struct acpi_processor *pr, 288 struct acpi_processor_cx *cx) 289 { 290 struct acpi_processor_power *pwr = &pr->power; 291 u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2; 292 293 /* 294 * Check, if one of the previous states already marked the lapic 295 * unstable 296 */ 297 if (pwr->timer_broadcast_on_state < state) 298 return; 299 300 if (cx->type >= type) 301 pr->power.timer_broadcast_on_state = state; 302 } 303 304 static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) 305 { 306 unsigned long reason; 307 308 reason = pr->power.timer_broadcast_on_state < INT_MAX ? 309 CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF; 310 311 clockevents_notify(reason, &pr->id); 312 } 313 314 /* Power(C) State timer broadcast control */ 315 static void acpi_state_timer_broadcast(struct acpi_processor *pr, 316 struct acpi_processor_cx *cx, 317 int broadcast) 318 { 319 int state = cx - pr->power.states; 320 321 if (state >= pr->power.timer_broadcast_on_state) { 322 unsigned long reason; 323 324 reason = broadcast ? CLOCK_EVT_NOTIFY_BROADCAST_ENTER : 325 CLOCK_EVT_NOTIFY_BROADCAST_EXIT; 326 clockevents_notify(reason, &pr->id); 327 } 328 } 329 330 #else 331 332 static void acpi_timer_check_state(int state, struct acpi_processor *pr, 333 struct acpi_processor_cx *cstate) { } 334 static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) { } 335 static void acpi_state_timer_broadcast(struct acpi_processor *pr, 336 struct acpi_processor_cx *cx, 337 int broadcast) 338 { 339 } 340 341 #endif 342 343 /* 344 * Suspend / resume control 345 */ 346 static int acpi_idle_suspend; 347 348 int acpi_processor_suspend(struct acpi_device * device, pm_message_t state) 349 { 350 acpi_idle_suspend = 1; 351 return 0; 352 } 353 354 int acpi_processor_resume(struct acpi_device * device) 355 { 356 acpi_idle_suspend = 0; 357 return 0; 358 } 359 360 #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC) 361 static int tsc_halts_in_c(int state) 362 { 363 switch (boot_cpu_data.x86_vendor) { 364 case X86_VENDOR_AMD: 365 /* 366 * AMD Fam10h TSC will tick in all 367 * C/P/S0/S1 states when this bit is set. 368 */ 369 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) 370 return 0; 371 /*FALL THROUGH*/ 372 case X86_VENDOR_INTEL: 373 /* Several cases known where TSC halts in C2 too */ 374 default: 375 return state > ACPI_STATE_C1; 376 } 377 } 378 #endif 379 380 #ifndef CONFIG_CPU_IDLE 381 static void acpi_processor_idle(void) 382 { 383 struct acpi_processor *pr = NULL; 384 struct acpi_processor_cx *cx = NULL; 385 struct acpi_processor_cx *next_state = NULL; 386 int sleep_ticks = 0; 387 u32 t1, t2 = 0; 388 389 /* 390 * Interrupts must be disabled during bus mastering calculations and 391 * for C2/C3 transitions. 392 */ 393 local_irq_disable(); 394 395 pr = processors[smp_processor_id()]; 396 if (!pr) { 397 local_irq_enable(); 398 return; 399 } 400 401 /* 402 * Check whether we truly need to go idle, or should 403 * reschedule: 404 */ 405 if (unlikely(need_resched())) { 406 local_irq_enable(); 407 return; 408 } 409 410 cx = pr->power.state; 411 if (!cx || acpi_idle_suspend) { 412 if (pm_idle_save) 413 pm_idle_save(); 414 else 415 acpi_safe_halt(); 416 return; 417 } 418 419 /* 420 * Check BM Activity 421 * ----------------- 422 * Check for bus mastering activity (if required), record, and check 423 * for demotion. 424 */ 425 if (pr->flags.bm_check) { 426 u32 bm_status = 0; 427 unsigned long diff = jiffies - pr->power.bm_check_timestamp; 428 429 if (diff > 31) 430 diff = 31; 431 432 pr->power.bm_activity <<= diff; 433 434 acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status); 435 if (bm_status) { 436 pr->power.bm_activity |= 0x1; 437 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1); 438 } 439 /* 440 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect 441 * the true state of bus mastering activity; forcing us to 442 * manually check the BMIDEA bit of each IDE channel. 443 */ 444 else if (errata.piix4.bmisx) { 445 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01) 446 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01)) 447 pr->power.bm_activity |= 0x1; 448 } 449 450 pr->power.bm_check_timestamp = jiffies; 451 452 /* 453 * If bus mastering is or was active this jiffy, demote 454 * to avoid a faulty transition. Note that the processor 455 * won't enter a low-power state during this call (to this 456 * function) but should upon the next. 457 * 458 * TBD: A better policy might be to fallback to the demotion 459 * state (use it for this quantum only) istead of 460 * demoting -- and rely on duration as our sole demotion 461 * qualification. This may, however, introduce DMA 462 * issues (e.g. floppy DMA transfer overrun/underrun). 463 */ 464 if ((pr->power.bm_activity & 0x1) && 465 cx->demotion.threshold.bm) { 466 local_irq_enable(); 467 next_state = cx->demotion.state; 468 goto end; 469 } 470 } 471 472 #ifdef CONFIG_HOTPLUG_CPU 473 /* 474 * Check for P_LVL2_UP flag before entering C2 and above on 475 * an SMP system. We do it here instead of doing it at _CST/P_LVL 476 * detection phase, to work cleanly with logical CPU hotplug. 477 */ 478 if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && 479 !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) 480 cx = &pr->power.states[ACPI_STATE_C1]; 481 #endif 482 483 /* 484 * Sleep: 485 * ------ 486 * Invoke the current Cx state to put the processor to sleep. 487 */ 488 if (cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3) { 489 current_thread_info()->status &= ~TS_POLLING; 490 /* 491 * TS_POLLING-cleared state must be visible before we 492 * test NEED_RESCHED: 493 */ 494 smp_mb(); 495 if (need_resched()) { 496 current_thread_info()->status |= TS_POLLING; 497 local_irq_enable(); 498 return; 499 } 500 } 501 502 switch (cx->type) { 503 504 case ACPI_STATE_C1: 505 /* 506 * Invoke C1. 507 * Use the appropriate idle routine, the one that would 508 * be used without acpi C-states. 509 */ 510 if (pm_idle_save) 511 pm_idle_save(); 512 else 513 acpi_safe_halt(); 514 515 /* 516 * TBD: Can't get time duration while in C1, as resumes 517 * go to an ISR rather than here. Need to instrument 518 * base interrupt handler. 519 * 520 * Note: the TSC better not stop in C1, sched_clock() will 521 * skew otherwise. 522 */ 523 sleep_ticks = 0xFFFFFFFF; 524 break; 525 526 case ACPI_STATE_C2: 527 /* Get start time (ticks) */ 528 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); 529 /* Tell the scheduler that we are going deep-idle: */ 530 sched_clock_idle_sleep_event(); 531 /* Invoke C2 */ 532 acpi_state_timer_broadcast(pr, cx, 1); 533 acpi_cstate_enter(cx); 534 /* Get end time (ticks) */ 535 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); 536 537 #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC) 538 /* TSC halts in C2, so notify users */ 539 if (tsc_halts_in_c(ACPI_STATE_C2)) 540 mark_tsc_unstable("possible TSC halt in C2"); 541 #endif 542 /* Compute time (ticks) that we were actually asleep */ 543 sleep_ticks = ticks_elapsed(t1, t2); 544 545 /* Tell the scheduler how much we idled: */ 546 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); 547 548 /* Re-enable interrupts */ 549 local_irq_enable(); 550 /* Do not account our idle-switching overhead: */ 551 sleep_ticks -= cx->latency_ticks + C2_OVERHEAD; 552 553 current_thread_info()->status |= TS_POLLING; 554 acpi_state_timer_broadcast(pr, cx, 0); 555 break; 556 557 case ACPI_STATE_C3: 558 acpi_unlazy_tlb(smp_processor_id()); 559 /* 560 * Must be done before busmaster disable as we might 561 * need to access HPET ! 562 */ 563 acpi_state_timer_broadcast(pr, cx, 1); 564 /* 565 * disable bus master 566 * bm_check implies we need ARB_DIS 567 * !bm_check implies we need cache flush 568 * bm_control implies whether we can do ARB_DIS 569 * 570 * That leaves a case where bm_check is set and bm_control is 571 * not set. In that case we cannot do much, we enter C3 572 * without doing anything. 573 */ 574 if (pr->flags.bm_check && pr->flags.bm_control) { 575 if (atomic_inc_return(&c3_cpu_count) == 576 num_online_cpus()) { 577 /* 578 * All CPUs are trying to go to C3 579 * Disable bus master arbitration 580 */ 581 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1); 582 } 583 } else if (!pr->flags.bm_check) { 584 /* SMP with no shared cache... Invalidate cache */ 585 ACPI_FLUSH_CPU_CACHE(); 586 } 587 588 /* Get start time (ticks) */ 589 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); 590 /* Invoke C3 */ 591 /* Tell the scheduler that we are going deep-idle: */ 592 sched_clock_idle_sleep_event(); 593 acpi_cstate_enter(cx); 594 /* Get end time (ticks) */ 595 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); 596 if (pr->flags.bm_check && pr->flags.bm_control) { 597 /* Enable bus master arbitration */ 598 atomic_dec(&c3_cpu_count); 599 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0); 600 } 601 602 #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC) 603 /* TSC halts in C3, so notify users */ 604 if (tsc_halts_in_c(ACPI_STATE_C3)) 605 mark_tsc_unstable("TSC halts in C3"); 606 #endif 607 /* Compute time (ticks) that we were actually asleep */ 608 sleep_ticks = ticks_elapsed(t1, t2); 609 /* Tell the scheduler how much we idled: */ 610 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); 611 612 /* Re-enable interrupts */ 613 local_irq_enable(); 614 /* Do not account our idle-switching overhead: */ 615 sleep_ticks -= cx->latency_ticks + C3_OVERHEAD; 616 617 current_thread_info()->status |= TS_POLLING; 618 acpi_state_timer_broadcast(pr, cx, 0); 619 break; 620 621 default: 622 local_irq_enable(); 623 return; 624 } 625 cx->usage++; 626 if ((cx->type != ACPI_STATE_C1) && (sleep_ticks > 0)) 627 cx->time += sleep_ticks; 628 629 next_state = pr->power.state; 630 631 #ifdef CONFIG_HOTPLUG_CPU 632 /* Don't do promotion/demotion */ 633 if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) && 634 !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) { 635 next_state = cx; 636 goto end; 637 } 638 #endif 639 640 /* 641 * Promotion? 642 * ---------- 643 * Track the number of longs (time asleep is greater than threshold) 644 * and promote when the count threshold is reached. Note that bus 645 * mastering activity may prevent promotions. 646 * Do not promote above max_cstate. 647 */ 648 if (cx->promotion.state && 649 ((cx->promotion.state - pr->power.states) <= max_cstate)) { 650 if (sleep_ticks > cx->promotion.threshold.ticks && 651 cx->promotion.state->latency <= system_latency_constraint()) { 652 cx->promotion.count++; 653 cx->demotion.count = 0; 654 if (cx->promotion.count >= 655 cx->promotion.threshold.count) { 656 if (pr->flags.bm_check) { 657 if (! 658 (pr->power.bm_activity & cx-> 659 promotion.threshold.bm)) { 660 next_state = 661 cx->promotion.state; 662 goto end; 663 } 664 } else { 665 next_state = cx->promotion.state; 666 goto end; 667 } 668 } 669 } 670 } 671 672 /* 673 * Demotion? 674 * --------- 675 * Track the number of shorts (time asleep is less than time threshold) 676 * and demote when the usage threshold is reached. 677 */ 678 if (cx->demotion.state) { 679 if (sleep_ticks < cx->demotion.threshold.ticks) { 680 cx->demotion.count++; 681 cx->promotion.count = 0; 682 if (cx->demotion.count >= cx->demotion.threshold.count) { 683 next_state = cx->demotion.state; 684 goto end; 685 } 686 } 687 } 688 689 end: 690 /* 691 * Demote if current state exceeds max_cstate 692 * or if the latency of the current state is unacceptable 693 */ 694 if ((pr->power.state - pr->power.states) > max_cstate || 695 pr->power.state->latency > system_latency_constraint()) { 696 if (cx->demotion.state) 697 next_state = cx->demotion.state; 698 } 699 700 /* 701 * New Cx State? 702 * ------------- 703 * If we're going to start using a new Cx state we must clean up 704 * from the previous and prepare to use the new. 705 */ 706 if (next_state != pr->power.state) 707 acpi_processor_power_activate(pr, next_state); 708 } 709 710 static int acpi_processor_set_power_policy(struct acpi_processor *pr) 711 { 712 unsigned int i; 713 unsigned int state_is_set = 0; 714 struct acpi_processor_cx *lower = NULL; 715 struct acpi_processor_cx *higher = NULL; 716 struct acpi_processor_cx *cx; 717 718 719 if (!pr) 720 return -EINVAL; 721 722 /* 723 * This function sets the default Cx state policy (OS idle handler). 724 * Our scheme is to promote quickly to C2 but more conservatively 725 * to C3. We're favoring C2 for its characteristics of low latency 726 * (quick response), good power savings, and ability to allow bus 727 * mastering activity. Note that the Cx state policy is completely 728 * customizable and can be altered dynamically. 729 */ 730 731 /* startup state */ 732 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { 733 cx = &pr->power.states[i]; 734 if (!cx->valid) 735 continue; 736 737 if (!state_is_set) 738 pr->power.state = cx; 739 state_is_set++; 740 break; 741 } 742 743 if (!state_is_set) 744 return -ENODEV; 745 746 /* demotion */ 747 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { 748 cx = &pr->power.states[i]; 749 if (!cx->valid) 750 continue; 751 752 if (lower) { 753 cx->demotion.state = lower; 754 cx->demotion.threshold.ticks = cx->latency_ticks; 755 cx->demotion.threshold.count = 1; 756 if (cx->type == ACPI_STATE_C3) 757 cx->demotion.threshold.bm = bm_history; 758 } 759 760 lower = cx; 761 } 762 763 /* promotion */ 764 for (i = (ACPI_PROCESSOR_MAX_POWER - 1); i > 0; i--) { 765 cx = &pr->power.states[i]; 766 if (!cx->valid) 767 continue; 768 769 if (higher) { 770 cx->promotion.state = higher; 771 cx->promotion.threshold.ticks = cx->latency_ticks; 772 if (cx->type >= ACPI_STATE_C2) 773 cx->promotion.threshold.count = 4; 774 else 775 cx->promotion.threshold.count = 10; 776 if (higher->type == ACPI_STATE_C3) 777 cx->promotion.threshold.bm = bm_history; 778 } 779 780 higher = cx; 781 } 782 783 return 0; 784 } 785 #endif /* !CONFIG_CPU_IDLE */ 786 787 static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) 788 { 789 790 if (!pr) 791 return -EINVAL; 792 793 if (!pr->pblk) 794 return -ENODEV; 795 796 /* if info is obtained from pblk/fadt, type equals state */ 797 pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2; 798 pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3; 799 800 #ifndef CONFIG_HOTPLUG_CPU 801 /* 802 * Check for P_LVL2_UP flag before entering C2 and above on 803 * an SMP system. 804 */ 805 if ((num_online_cpus() > 1) && 806 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) 807 return -ENODEV; 808 #endif 809 810 /* determine C2 and C3 address from pblk */ 811 pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4; 812 pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5; 813 814 /* determine latencies from FADT */ 815 pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency; 816 pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency; 817 818 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 819 "lvl2[0x%08x] lvl3[0x%08x]\n", 820 pr->power.states[ACPI_STATE_C2].address, 821 pr->power.states[ACPI_STATE_C3].address)); 822 823 return 0; 824 } 825 826 static int acpi_processor_get_power_info_default(struct acpi_processor *pr) 827 { 828 if (!pr->power.states[ACPI_STATE_C1].valid) { 829 /* set the first C-State to C1 */ 830 /* all processors need to support C1 */ 831 pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1; 832 pr->power.states[ACPI_STATE_C1].valid = 1; 833 } 834 /* the C0 state only exists as a filler in our array */ 835 pr->power.states[ACPI_STATE_C0].valid = 1; 836 return 0; 837 } 838 839 static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) 840 { 841 acpi_status status = 0; 842 acpi_integer count; 843 int current_count; 844 int i; 845 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 846 union acpi_object *cst; 847 848 849 if (nocst) 850 return -ENODEV; 851 852 current_count = 0; 853 854 status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer); 855 if (ACPI_FAILURE(status)) { 856 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n")); 857 return -ENODEV; 858 } 859 860 cst = buffer.pointer; 861 862 /* There must be at least 2 elements */ 863 if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) { 864 printk(KERN_ERR PREFIX "not enough elements in _CST\n"); 865 status = -EFAULT; 866 goto end; 867 } 868 869 count = cst->package.elements[0].integer.value; 870 871 /* Validate number of power states. */ 872 if (count < 1 || count != cst->package.count - 1) { 873 printk(KERN_ERR PREFIX "count given by _CST is not valid\n"); 874 status = -EFAULT; 875 goto end; 876 } 877 878 /* Tell driver that at least _CST is supported. */ 879 pr->flags.has_cst = 1; 880 881 for (i = 1; i <= count; i++) { 882 union acpi_object *element; 883 union acpi_object *obj; 884 struct acpi_power_register *reg; 885 struct acpi_processor_cx cx; 886 887 memset(&cx, 0, sizeof(cx)); 888 889 element = &(cst->package.elements[i]); 890 if (element->type != ACPI_TYPE_PACKAGE) 891 continue; 892 893 if (element->package.count != 4) 894 continue; 895 896 obj = &(element->package.elements[0]); 897 898 if (obj->type != ACPI_TYPE_BUFFER) 899 continue; 900 901 reg = (struct acpi_power_register *)obj->buffer.pointer; 902 903 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO && 904 (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) 905 continue; 906 907 /* There should be an easy way to extract an integer... */ 908 obj = &(element->package.elements[1]); 909 if (obj->type != ACPI_TYPE_INTEGER) 910 continue; 911 912 cx.type = obj->integer.value; 913 /* 914 * Some buggy BIOSes won't list C1 in _CST - 915 * Let acpi_processor_get_power_info_default() handle them later 916 */ 917 if (i == 1 && cx.type != ACPI_STATE_C1) 918 current_count++; 919 920 cx.address = reg->address; 921 cx.index = current_count + 1; 922 923 cx.space_id = ACPI_CSTATE_SYSTEMIO; 924 if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) { 925 if (acpi_processor_ffh_cstate_probe 926 (pr->id, &cx, reg) == 0) { 927 cx.space_id = ACPI_CSTATE_FFH; 928 } else if (cx.type != ACPI_STATE_C1) { 929 /* 930 * C1 is a special case where FIXED_HARDWARE 931 * can be handled in non-MWAIT way as well. 932 * In that case, save this _CST entry info. 933 * That is, we retain space_id of SYSTEM_IO for 934 * halt based C1. 935 * Otherwise, ignore this info and continue. 936 */ 937 continue; 938 } 939 } 940 941 obj = &(element->package.elements[2]); 942 if (obj->type != ACPI_TYPE_INTEGER) 943 continue; 944 945 cx.latency = obj->integer.value; 946 947 obj = &(element->package.elements[3]); 948 if (obj->type != ACPI_TYPE_INTEGER) 949 continue; 950 951 cx.power = obj->integer.value; 952 953 current_count++; 954 memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx)); 955 956 /* 957 * We support total ACPI_PROCESSOR_MAX_POWER - 1 958 * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1) 959 */ 960 if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) { 961 printk(KERN_WARNING 962 "Limiting number of power states to max (%d)\n", 963 ACPI_PROCESSOR_MAX_POWER); 964 printk(KERN_WARNING 965 "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n"); 966 break; 967 } 968 } 969 970 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n", 971 current_count)); 972 973 /* Validate number of power states discovered */ 974 if (current_count < 2) 975 status = -EFAULT; 976 977 end: 978 kfree(buffer.pointer); 979 980 return status; 981 } 982 983 static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx) 984 { 985 986 if (!cx->address) 987 return; 988 989 /* 990 * C2 latency must be less than or equal to 100 991 * microseconds. 992 */ 993 else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) { 994 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 995 "latency too large [%d]\n", cx->latency)); 996 return; 997 } 998 999 /* 1000 * Otherwise we've met all of our C2 requirements. 1001 * Normalize the C2 latency to expidite policy 1002 */ 1003 cx->valid = 1; 1004 1005 #ifndef CONFIG_CPU_IDLE 1006 cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); 1007 #else 1008 cx->latency_ticks = cx->latency; 1009 #endif 1010 1011 return; 1012 } 1013 1014 static void acpi_processor_power_verify_c3(struct acpi_processor *pr, 1015 struct acpi_processor_cx *cx) 1016 { 1017 static int bm_check_flag; 1018 1019 1020 if (!cx->address) 1021 return; 1022 1023 /* 1024 * C3 latency must be less than or equal to 1000 1025 * microseconds. 1026 */ 1027 else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) { 1028 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1029 "latency too large [%d]\n", cx->latency)); 1030 return; 1031 } 1032 1033 /* 1034 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast) 1035 * DMA transfers are used by any ISA device to avoid livelock. 1036 * Note that we could disable Type-F DMA (as recommended by 1037 * the erratum), but this is known to disrupt certain ISA 1038 * devices thus we take the conservative approach. 1039 */ 1040 else if (errata.piix4.fdma) { 1041 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1042 "C3 not supported on PIIX4 with Type-F DMA\n")); 1043 return; 1044 } 1045 1046 /* All the logic here assumes flags.bm_check is same across all CPUs */ 1047 if (!bm_check_flag) { 1048 /* Determine whether bm_check is needed based on CPU */ 1049 acpi_processor_power_init_bm_check(&(pr->flags), pr->id); 1050 bm_check_flag = pr->flags.bm_check; 1051 } else { 1052 pr->flags.bm_check = bm_check_flag; 1053 } 1054 1055 if (pr->flags.bm_check) { 1056 if (!pr->flags.bm_control) { 1057 if (pr->flags.has_cst != 1) { 1058 /* bus mastering control is necessary */ 1059 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1060 "C3 support requires BM control\n")); 1061 return; 1062 } else { 1063 /* Here we enter C3 without bus mastering */ 1064 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1065 "C3 support without BM control\n")); 1066 } 1067 } 1068 } else { 1069 /* 1070 * WBINVD should be set in fadt, for C3 state to be 1071 * supported on when bm_check is not required. 1072 */ 1073 if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) { 1074 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1075 "Cache invalidation should work properly" 1076 " for C3 to be enabled on SMP systems\n")); 1077 return; 1078 } 1079 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0); 1080 } 1081 1082 /* 1083 * Otherwise we've met all of our C3 requirements. 1084 * Normalize the C3 latency to expidite policy. Enable 1085 * checking of bus mastering status (bm_check) so we can 1086 * use this in our C3 policy 1087 */ 1088 cx->valid = 1; 1089 1090 #ifndef CONFIG_CPU_IDLE 1091 cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); 1092 #else 1093 cx->latency_ticks = cx->latency; 1094 #endif 1095 1096 return; 1097 } 1098 1099 static int acpi_processor_power_verify(struct acpi_processor *pr) 1100 { 1101 unsigned int i; 1102 unsigned int working = 0; 1103 1104 pr->power.timer_broadcast_on_state = INT_MAX; 1105 1106 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { 1107 struct acpi_processor_cx *cx = &pr->power.states[i]; 1108 1109 switch (cx->type) { 1110 case ACPI_STATE_C1: 1111 cx->valid = 1; 1112 break; 1113 1114 case ACPI_STATE_C2: 1115 acpi_processor_power_verify_c2(cx); 1116 if (cx->valid) 1117 acpi_timer_check_state(i, pr, cx); 1118 break; 1119 1120 case ACPI_STATE_C3: 1121 acpi_processor_power_verify_c3(pr, cx); 1122 if (cx->valid) 1123 acpi_timer_check_state(i, pr, cx); 1124 break; 1125 } 1126 1127 if (cx->valid) 1128 working++; 1129 } 1130 1131 acpi_propagate_timer_broadcast(pr); 1132 1133 return (working); 1134 } 1135 1136 static int acpi_processor_get_power_info(struct acpi_processor *pr) 1137 { 1138 unsigned int i; 1139 int result; 1140 1141 1142 /* NOTE: the idle thread may not be running while calling 1143 * this function */ 1144 1145 /* Zero initialize all the C-states info. */ 1146 memset(pr->power.states, 0, sizeof(pr->power.states)); 1147 1148 result = acpi_processor_get_power_info_cst(pr); 1149 if (result == -ENODEV) 1150 result = acpi_processor_get_power_info_fadt(pr); 1151 1152 if (result) 1153 return result; 1154 1155 acpi_processor_get_power_info_default(pr); 1156 1157 pr->power.count = acpi_processor_power_verify(pr); 1158 1159 #ifndef CONFIG_CPU_IDLE 1160 /* 1161 * Set Default Policy 1162 * ------------------ 1163 * Now that we know which states are supported, set the default 1164 * policy. Note that this policy can be changed dynamically 1165 * (e.g. encourage deeper sleeps to conserve battery life when 1166 * not on AC). 1167 */ 1168 result = acpi_processor_set_power_policy(pr); 1169 if (result) 1170 return result; 1171 #endif 1172 1173 /* 1174 * if one state of type C2 or C3 is available, mark this 1175 * CPU as being "idle manageable" 1176 */ 1177 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { 1178 if (pr->power.states[i].valid) { 1179 pr->power.count = i; 1180 if (pr->power.states[i].type >= ACPI_STATE_C2) 1181 pr->flags.power = 1; 1182 } 1183 } 1184 1185 return 0; 1186 } 1187 1188 static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset) 1189 { 1190 struct acpi_processor *pr = seq->private; 1191 unsigned int i; 1192 1193 1194 if (!pr) 1195 goto end; 1196 1197 seq_printf(seq, "active state: C%zd\n" 1198 "max_cstate: C%d\n" 1199 "bus master activity: %08x\n" 1200 "maximum allowed latency: %d usec\n", 1201 pr->power.state ? pr->power.state - pr->power.states : 0, 1202 max_cstate, (unsigned)pr->power.bm_activity, 1203 system_latency_constraint()); 1204 1205 seq_puts(seq, "states:\n"); 1206 1207 for (i = 1; i <= pr->power.count; i++) { 1208 seq_printf(seq, " %cC%d: ", 1209 (&pr->power.states[i] == 1210 pr->power.state ? '*' : ' '), i); 1211 1212 if (!pr->power.states[i].valid) { 1213 seq_puts(seq, "<not supported>\n"); 1214 continue; 1215 } 1216 1217 switch (pr->power.states[i].type) { 1218 case ACPI_STATE_C1: 1219 seq_printf(seq, "type[C1] "); 1220 break; 1221 case ACPI_STATE_C2: 1222 seq_printf(seq, "type[C2] "); 1223 break; 1224 case ACPI_STATE_C3: 1225 seq_printf(seq, "type[C3] "); 1226 break; 1227 default: 1228 seq_printf(seq, "type[--] "); 1229 break; 1230 } 1231 1232 if (pr->power.states[i].promotion.state) 1233 seq_printf(seq, "promotion[C%zd] ", 1234 (pr->power.states[i].promotion.state - 1235 pr->power.states)); 1236 else 1237 seq_puts(seq, "promotion[--] "); 1238 1239 if (pr->power.states[i].demotion.state) 1240 seq_printf(seq, "demotion[C%zd] ", 1241 (pr->power.states[i].demotion.state - 1242 pr->power.states)); 1243 else 1244 seq_puts(seq, "demotion[--] "); 1245 1246 seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n", 1247 pr->power.states[i].latency, 1248 pr->power.states[i].usage, 1249 (unsigned long long)pr->power.states[i].time); 1250 } 1251 1252 end: 1253 return 0; 1254 } 1255 1256 static int acpi_processor_power_open_fs(struct inode *inode, struct file *file) 1257 { 1258 return single_open(file, acpi_processor_power_seq_show, 1259 PDE(inode)->data); 1260 } 1261 1262 static const struct file_operations acpi_processor_power_fops = { 1263 .open = acpi_processor_power_open_fs, 1264 .read = seq_read, 1265 .llseek = seq_lseek, 1266 .release = single_release, 1267 }; 1268 1269 #ifndef CONFIG_CPU_IDLE 1270 1271 int acpi_processor_cst_has_changed(struct acpi_processor *pr) 1272 { 1273 int result = 0; 1274 1275 1276 if (!pr) 1277 return -EINVAL; 1278 1279 if (nocst) { 1280 return -ENODEV; 1281 } 1282 1283 if (!pr->flags.power_setup_done) 1284 return -ENODEV; 1285 1286 /* Fall back to the default idle loop */ 1287 pm_idle = pm_idle_save; 1288 synchronize_sched(); /* Relies on interrupts forcing exit from idle. */ 1289 1290 pr->flags.power = 0; 1291 result = acpi_processor_get_power_info(pr); 1292 if ((pr->flags.power == 1) && (pr->flags.power_setup_done)) 1293 pm_idle = acpi_processor_idle; 1294 1295 return result; 1296 } 1297 1298 #ifdef CONFIG_SMP 1299 static void smp_callback(void *v) 1300 { 1301 /* we already woke the CPU up, nothing more to do */ 1302 } 1303 1304 /* 1305 * This function gets called when a part of the kernel has a new latency 1306 * requirement. This means we need to get all processors out of their C-state, 1307 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that 1308 * wakes them all right up. 1309 */ 1310 static int acpi_processor_latency_notify(struct notifier_block *b, 1311 unsigned long l, void *v) 1312 { 1313 smp_call_function(smp_callback, NULL, 0, 1); 1314 return NOTIFY_OK; 1315 } 1316 1317 static struct notifier_block acpi_processor_latency_notifier = { 1318 .notifier_call = acpi_processor_latency_notify, 1319 }; 1320 1321 #endif 1322 1323 #else /* CONFIG_CPU_IDLE */ 1324 1325 /** 1326 * acpi_idle_bm_check - checks if bus master activity was detected 1327 */ 1328 static int acpi_idle_bm_check(void) 1329 { 1330 u32 bm_status = 0; 1331 1332 acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status); 1333 if (bm_status) 1334 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1); 1335 /* 1336 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect 1337 * the true state of bus mastering activity; forcing us to 1338 * manually check the BMIDEA bit of each IDE channel. 1339 */ 1340 else if (errata.piix4.bmisx) { 1341 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01) 1342 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01)) 1343 bm_status = 1; 1344 } 1345 return bm_status; 1346 } 1347 1348 /** 1349 * acpi_idle_update_bm_rld - updates the BM_RLD bit depending on target state 1350 * @pr: the processor 1351 * @target: the new target state 1352 */ 1353 static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr, 1354 struct acpi_processor_cx *target) 1355 { 1356 if (pr->flags.bm_rld_set && target->type != ACPI_STATE_C3) { 1357 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0); 1358 pr->flags.bm_rld_set = 0; 1359 } 1360 1361 if (!pr->flags.bm_rld_set && target->type == ACPI_STATE_C3) { 1362 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1); 1363 pr->flags.bm_rld_set = 1; 1364 } 1365 } 1366 1367 /** 1368 * acpi_idle_do_entry - a helper function that does C2 and C3 type entry 1369 * @cx: cstate data 1370 */ 1371 static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) 1372 { 1373 if (cx->space_id == ACPI_CSTATE_FFH) { 1374 /* Call into architectural FFH based C-state */ 1375 acpi_processor_ffh_cstate_enter(cx); 1376 } else { 1377 int unused; 1378 /* IO port based C-state */ 1379 inb(cx->address); 1380 /* Dummy wait op - must do something useless after P_LVL2 read 1381 because chipsets cannot guarantee that STPCLK# signal 1382 gets asserted in time to freeze execution properly. */ 1383 unused = inl(acpi_gbl_FADT.xpm_timer_block.address); 1384 } 1385 } 1386 1387 /** 1388 * acpi_idle_enter_c1 - enters an ACPI C1 state-type 1389 * @dev: the target CPU 1390 * @state: the state data 1391 * 1392 * This is equivalent to the HALT instruction. 1393 */ 1394 static int acpi_idle_enter_c1(struct cpuidle_device *dev, 1395 struct cpuidle_state *state) 1396 { 1397 struct acpi_processor *pr; 1398 struct acpi_processor_cx *cx = cpuidle_get_statedata(state); 1399 pr = processors[smp_processor_id()]; 1400 1401 if (unlikely(!pr)) 1402 return 0; 1403 1404 if (pr->flags.bm_check) 1405 acpi_idle_update_bm_rld(pr, cx); 1406 1407 acpi_safe_halt(); 1408 1409 cx->usage++; 1410 1411 return 0; 1412 } 1413 1414 /** 1415 * acpi_idle_enter_simple - enters an ACPI state without BM handling 1416 * @dev: the target CPU 1417 * @state: the state data 1418 */ 1419 static int acpi_idle_enter_simple(struct cpuidle_device *dev, 1420 struct cpuidle_state *state) 1421 { 1422 struct acpi_processor *pr; 1423 struct acpi_processor_cx *cx = cpuidle_get_statedata(state); 1424 u32 t1, t2; 1425 int sleep_ticks = 0; 1426 1427 pr = processors[smp_processor_id()]; 1428 1429 if (unlikely(!pr)) 1430 return 0; 1431 1432 if (acpi_idle_suspend) 1433 return(acpi_idle_enter_c1(dev, state)); 1434 1435 local_irq_disable(); 1436 current_thread_info()->status &= ~TS_POLLING; 1437 /* 1438 * TS_POLLING-cleared state must be visible before we test 1439 * NEED_RESCHED: 1440 */ 1441 smp_mb(); 1442 1443 if (unlikely(need_resched())) { 1444 current_thread_info()->status |= TS_POLLING; 1445 local_irq_enable(); 1446 return 0; 1447 } 1448 1449 acpi_unlazy_tlb(smp_processor_id()); 1450 /* 1451 * Must be done before busmaster disable as we might need to 1452 * access HPET ! 1453 */ 1454 acpi_state_timer_broadcast(pr, cx, 1); 1455 1456 if (pr->flags.bm_check) 1457 acpi_idle_update_bm_rld(pr, cx); 1458 1459 if (cx->type == ACPI_STATE_C3) 1460 ACPI_FLUSH_CPU_CACHE(); 1461 1462 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); 1463 /* Tell the scheduler that we are going deep-idle: */ 1464 sched_clock_idle_sleep_event(); 1465 acpi_idle_do_entry(cx); 1466 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); 1467 1468 #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC) 1469 /* TSC could halt in idle, so notify users */ 1470 if (tsc_halts_in_c(cx->type)) 1471 mark_tsc_unstable("TSC halts in idle");; 1472 #endif 1473 sleep_ticks = ticks_elapsed(t1, t2); 1474 1475 /* Tell the scheduler how much we idled: */ 1476 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); 1477 1478 local_irq_enable(); 1479 current_thread_info()->status |= TS_POLLING; 1480 1481 cx->usage++; 1482 1483 acpi_state_timer_broadcast(pr, cx, 0); 1484 cx->time += sleep_ticks; 1485 return ticks_elapsed_in_us(t1, t2); 1486 } 1487 1488 static int c3_cpu_count; 1489 static DEFINE_SPINLOCK(c3_lock); 1490 1491 /** 1492 * acpi_idle_enter_bm - enters C3 with proper BM handling 1493 * @dev: the target CPU 1494 * @state: the state data 1495 * 1496 * If BM is detected, the deepest non-C3 idle state is entered instead. 1497 */ 1498 static int acpi_idle_enter_bm(struct cpuidle_device *dev, 1499 struct cpuidle_state *state) 1500 { 1501 struct acpi_processor *pr; 1502 struct acpi_processor_cx *cx = cpuidle_get_statedata(state); 1503 u32 t1, t2; 1504 int sleep_ticks = 0; 1505 1506 pr = processors[smp_processor_id()]; 1507 1508 if (unlikely(!pr)) 1509 return 0; 1510 1511 if (acpi_idle_suspend) 1512 return(acpi_idle_enter_c1(dev, state)); 1513 1514 if (acpi_idle_bm_check()) { 1515 if (dev->safe_state) { 1516 return dev->safe_state->enter(dev, dev->safe_state); 1517 } else { 1518 acpi_safe_halt(); 1519 return 0; 1520 } 1521 } 1522 1523 local_irq_disable(); 1524 current_thread_info()->status &= ~TS_POLLING; 1525 /* 1526 * TS_POLLING-cleared state must be visible before we test 1527 * NEED_RESCHED: 1528 */ 1529 smp_mb(); 1530 1531 if (unlikely(need_resched())) { 1532 current_thread_info()->status |= TS_POLLING; 1533 local_irq_enable(); 1534 return 0; 1535 } 1536 1537 /* Tell the scheduler that we are going deep-idle: */ 1538 sched_clock_idle_sleep_event(); 1539 /* 1540 * Must be done before busmaster disable as we might need to 1541 * access HPET ! 1542 */ 1543 acpi_state_timer_broadcast(pr, cx, 1); 1544 1545 acpi_idle_update_bm_rld(pr, cx); 1546 1547 /* 1548 * disable bus master 1549 * bm_check implies we need ARB_DIS 1550 * !bm_check implies we need cache flush 1551 * bm_control implies whether we can do ARB_DIS 1552 * 1553 * That leaves a case where bm_check is set and bm_control is 1554 * not set. In that case we cannot do much, we enter C3 1555 * without doing anything. 1556 */ 1557 if (pr->flags.bm_check && pr->flags.bm_control) { 1558 spin_lock(&c3_lock); 1559 c3_cpu_count++; 1560 /* Disable bus master arbitration when all CPUs are in C3 */ 1561 if (c3_cpu_count == num_online_cpus()) 1562 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1); 1563 spin_unlock(&c3_lock); 1564 } else if (!pr->flags.bm_check) { 1565 ACPI_FLUSH_CPU_CACHE(); 1566 } 1567 1568 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); 1569 acpi_idle_do_entry(cx); 1570 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); 1571 1572 /* Re-enable bus master arbitration */ 1573 if (pr->flags.bm_check && pr->flags.bm_control) { 1574 spin_lock(&c3_lock); 1575 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0); 1576 c3_cpu_count--; 1577 spin_unlock(&c3_lock); 1578 } 1579 1580 #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC) 1581 /* TSC could halt in idle, so notify users */ 1582 if (tsc_halts_in_c(ACPI_STATE_C3)) 1583 mark_tsc_unstable("TSC halts in idle"); 1584 #endif 1585 sleep_ticks = ticks_elapsed(t1, t2); 1586 /* Tell the scheduler how much we idled: */ 1587 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); 1588 1589 local_irq_enable(); 1590 current_thread_info()->status |= TS_POLLING; 1591 1592 cx->usage++; 1593 1594 acpi_state_timer_broadcast(pr, cx, 0); 1595 cx->time += sleep_ticks; 1596 return ticks_elapsed_in_us(t1, t2); 1597 } 1598 1599 struct cpuidle_driver acpi_idle_driver = { 1600 .name = "acpi_idle", 1601 .owner = THIS_MODULE, 1602 }; 1603 1604 /** 1605 * acpi_processor_setup_cpuidle - prepares and configures CPUIDLE 1606 * @pr: the ACPI processor 1607 */ 1608 static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) 1609 { 1610 int i, count = 0; 1611 struct acpi_processor_cx *cx; 1612 struct cpuidle_state *state; 1613 struct cpuidle_device *dev = &pr->power.dev; 1614 1615 if (!pr->flags.power_setup_done) 1616 return -EINVAL; 1617 1618 if (pr->flags.power == 0) { 1619 return -EINVAL; 1620 } 1621 1622 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { 1623 cx = &pr->power.states[i]; 1624 state = &dev->states[count]; 1625 1626 if (!cx->valid) 1627 continue; 1628 1629 #ifdef CONFIG_HOTPLUG_CPU 1630 if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && 1631 !pr->flags.has_cst && 1632 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) 1633 continue; 1634 #endif 1635 cpuidle_set_statedata(state, cx); 1636 1637 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i); 1638 state->exit_latency = cx->latency; 1639 state->target_residency = cx->latency * 6; 1640 state->power_usage = cx->power; 1641 1642 state->flags = 0; 1643 switch (cx->type) { 1644 case ACPI_STATE_C1: 1645 state->flags |= CPUIDLE_FLAG_SHALLOW; 1646 state->enter = acpi_idle_enter_c1; 1647 dev->safe_state = state; 1648 break; 1649 1650 case ACPI_STATE_C2: 1651 state->flags |= CPUIDLE_FLAG_BALANCED; 1652 state->flags |= CPUIDLE_FLAG_TIME_VALID; 1653 state->enter = acpi_idle_enter_simple; 1654 dev->safe_state = state; 1655 break; 1656 1657 case ACPI_STATE_C3: 1658 state->flags |= CPUIDLE_FLAG_DEEP; 1659 state->flags |= CPUIDLE_FLAG_TIME_VALID; 1660 state->flags |= CPUIDLE_FLAG_CHECK_BM; 1661 state->enter = pr->flags.bm_check ? 1662 acpi_idle_enter_bm : 1663 acpi_idle_enter_simple; 1664 break; 1665 } 1666 1667 count++; 1668 } 1669 1670 dev->state_count = count; 1671 1672 if (!count) 1673 return -EINVAL; 1674 1675 return 0; 1676 } 1677 1678 int acpi_processor_cst_has_changed(struct acpi_processor *pr) 1679 { 1680 int ret; 1681 1682 if (!pr) 1683 return -EINVAL; 1684 1685 if (nocst) { 1686 return -ENODEV; 1687 } 1688 1689 if (!pr->flags.power_setup_done) 1690 return -ENODEV; 1691 1692 cpuidle_pause_and_lock(); 1693 cpuidle_disable_device(&pr->power.dev); 1694 acpi_processor_get_power_info(pr); 1695 acpi_processor_setup_cpuidle(pr); 1696 ret = cpuidle_enable_device(&pr->power.dev); 1697 cpuidle_resume_and_unlock(); 1698 1699 return ret; 1700 } 1701 1702 #endif /* CONFIG_CPU_IDLE */ 1703 1704 int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, 1705 struct acpi_device *device) 1706 { 1707 acpi_status status = 0; 1708 static int first_run; 1709 struct proc_dir_entry *entry = NULL; 1710 unsigned int i; 1711 1712 1713 if (!first_run) { 1714 dmi_check_system(processor_power_dmi_table); 1715 max_cstate = acpi_processor_cstate_check(max_cstate); 1716 if (max_cstate < ACPI_C_STATES_MAX) 1717 printk(KERN_NOTICE 1718 "ACPI: processor limited to max C-state %d\n", 1719 max_cstate); 1720 first_run++; 1721 #if !defined (CONFIG_CPU_IDLE) && defined (CONFIG_SMP) 1722 register_latency_notifier(&acpi_processor_latency_notifier); 1723 #endif 1724 } 1725 1726 if (!pr) 1727 return -EINVAL; 1728 1729 if (acpi_gbl_FADT.cst_control && !nocst) { 1730 status = 1731 acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8); 1732 if (ACPI_FAILURE(status)) { 1733 ACPI_EXCEPTION((AE_INFO, status, 1734 "Notifying BIOS of _CST ability failed")); 1735 } 1736 } 1737 1738 acpi_processor_get_power_info(pr); 1739 pr->flags.power_setup_done = 1; 1740 1741 /* 1742 * Install the idle handler if processor power management is supported. 1743 * Note that we use previously set idle handler will be used on 1744 * platforms that only support C1. 1745 */ 1746 if ((pr->flags.power) && (!boot_option_idle_override)) { 1747 #ifdef CONFIG_CPU_IDLE 1748 acpi_processor_setup_cpuidle(pr); 1749 pr->power.dev.cpu = pr->id; 1750 if (cpuidle_register_device(&pr->power.dev)) 1751 return -EIO; 1752 #endif 1753 1754 printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id); 1755 for (i = 1; i <= pr->power.count; i++) 1756 if (pr->power.states[i].valid) 1757 printk(" C%d[C%d]", i, 1758 pr->power.states[i].type); 1759 printk(")\n"); 1760 1761 #ifndef CONFIG_CPU_IDLE 1762 if (pr->id == 0) { 1763 pm_idle_save = pm_idle; 1764 pm_idle = acpi_processor_idle; 1765 } 1766 #endif 1767 } 1768 1769 /* 'power' [R] */ 1770 entry = create_proc_entry(ACPI_PROCESSOR_FILE_POWER, 1771 S_IRUGO, acpi_device_dir(device)); 1772 if (!entry) 1773 return -EIO; 1774 else { 1775 entry->proc_fops = &acpi_processor_power_fops; 1776 entry->data = acpi_driver_data(device); 1777 entry->owner = THIS_MODULE; 1778 } 1779 1780 return 0; 1781 } 1782 1783 int acpi_processor_power_exit(struct acpi_processor *pr, 1784 struct acpi_device *device) 1785 { 1786 #ifdef CONFIG_CPU_IDLE 1787 if ((pr->flags.power) && (!boot_option_idle_override)) 1788 cpuidle_unregister_device(&pr->power.dev); 1789 #endif 1790 pr->flags.power_setup_done = 0; 1791 1792 if (acpi_device_dir(device)) 1793 remove_proc_entry(ACPI_PROCESSOR_FILE_POWER, 1794 acpi_device_dir(device)); 1795 1796 #ifndef CONFIG_CPU_IDLE 1797 1798 /* Unregister the idle handler when processor #0 is removed. */ 1799 if (pr->id == 0) { 1800 pm_idle = pm_idle_save; 1801 1802 /* 1803 * We are about to unload the current idle thread pm callback 1804 * (pm_idle), Wait for all processors to update cached/local 1805 * copies of pm_idle before proceeding. 1806 */ 1807 cpu_idle_wait(); 1808 #ifdef CONFIG_SMP 1809 unregister_latency_notifier(&acpi_processor_latency_notifier); 1810 #endif 1811 } 1812 #endif 1813 1814 return 0; 1815 } 1816