1 /* 2 * processor_idle - idle state submodule to the ACPI processor driver 3 * 4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 6 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de> 7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 8 * - Added processor hotplug support 9 * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 10 * - Added support for C3 on SMP 11 * 12 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 13 * 14 * This program is free software; you can redistribute it and/or modify 15 * it under the terms of the GNU General Public License as published by 16 * the Free Software Foundation; either version 2 of the License, or (at 17 * your option) any later version. 18 * 19 * This program is distributed in the hope that it will be useful, but 20 * WITHOUT ANY WARRANTY; without even the implied warranty of 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 22 * General Public License for more details. 23 * 24 * You should have received a copy of the GNU General Public License along 25 * with this program; if not, write to the Free Software Foundation, Inc., 26 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. 27 * 28 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 29 */ 30 31 #include <linux/kernel.h> 32 #include <linux/module.h> 33 #include <linux/init.h> 34 #include <linux/cpufreq.h> 35 #include <linux/proc_fs.h> 36 #include <linux/seq_file.h> 37 #include <linux/acpi.h> 38 #include <linux/dmi.h> 39 #include <linux/moduleparam.h> 40 41 #include <asm/io.h> 42 #include <asm/uaccess.h> 43 44 #include <acpi/acpi_bus.h> 45 #include <acpi/processor.h> 46 47 #define ACPI_PROCESSOR_COMPONENT 0x01000000 48 #define ACPI_PROCESSOR_CLASS "processor" 49 #define ACPI_PROCESSOR_DRIVER_NAME "ACPI Processor Driver" 50 #define _COMPONENT ACPI_PROCESSOR_COMPONENT 51 ACPI_MODULE_NAME ("acpi_processor") 52 53 #define ACPI_PROCESSOR_FILE_POWER "power" 54 55 #define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000) 56 #define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */ 57 #define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */ 58 59 static void (*pm_idle_save)(void); 60 module_param(max_cstate, uint, 0644); 61 62 static unsigned int nocst = 0; 63 module_param(nocst, uint, 0000); 64 65 /* 66 * bm_history -- bit-mask with a bit per jiffy of bus-master activity 67 * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms 68 * 800 HZ: 0xFFFFFFFF: 32 jiffies = 40ms 69 * 100 HZ: 0x0000000F: 4 jiffies = 40ms 70 * reduce history for more aggressive entry into C3 71 */ 72 static unsigned int bm_history = (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1)); 73 module_param(bm_history, uint, 0644); 74 /* -------------------------------------------------------------------------- 75 Power Management 76 -------------------------------------------------------------------------- */ 77 78 /* 79 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3. 80 * For now disable this. Probably a bug somewhere else. 81 * 82 * To skip this limit, boot/load with a large max_cstate limit. 83 */ 84 static int set_max_cstate(struct dmi_system_id *id) 85 { 86 if (max_cstate > ACPI_PROCESSOR_MAX_POWER) 87 return 0; 88 89 printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate." 90 " Override with \"processor.max_cstate=%d\"\n", id->ident, 91 (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1); 92 93 max_cstate = (long)id->driver_data; 94 95 return 0; 96 } 97 98 99 static struct dmi_system_id __initdata processor_power_dmi_table[] = { 100 { set_max_cstate, "IBM ThinkPad R40e", { 101 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 102 DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW") }, (void*)1}, 103 { set_max_cstate, "Medion 41700", { 104 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), 105 DMI_MATCH(DMI_BIOS_VERSION,"R01-A1J") }, (void*)1}, 106 { set_max_cstate, "Clevo 5600D", { 107 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), 108 DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307") }, 109 (void*)2}, 110 {}, 111 }; 112 113 114 static inline u32 115 ticks_elapsed ( 116 u32 t1, 117 u32 t2) 118 { 119 if (t2 >= t1) 120 return (t2 - t1); 121 else if (!acpi_fadt.tmr_val_ext) 122 return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF); 123 else 124 return ((0xFFFFFFFF - t1) + t2); 125 } 126 127 128 static void 129 acpi_processor_power_activate ( 130 struct acpi_processor *pr, 131 struct acpi_processor_cx *new) 132 { 133 struct acpi_processor_cx *old; 134 135 if (!pr || !new) 136 return; 137 138 old = pr->power.state; 139 140 if (old) 141 old->promotion.count = 0; 142 new->demotion.count = 0; 143 144 /* Cleanup from old state. */ 145 if (old) { 146 switch (old->type) { 147 case ACPI_STATE_C3: 148 /* Disable bus master reload */ 149 if (new->type != ACPI_STATE_C3 && pr->flags.bm_check) 150 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0, ACPI_MTX_DO_NOT_LOCK); 151 break; 152 } 153 } 154 155 /* Prepare to use new state. */ 156 switch (new->type) { 157 case ACPI_STATE_C3: 158 /* Enable bus master reload */ 159 if (old->type != ACPI_STATE_C3 && pr->flags.bm_check) 160 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1, ACPI_MTX_DO_NOT_LOCK); 161 break; 162 } 163 164 pr->power.state = new; 165 166 return; 167 } 168 169 170 static atomic_t c3_cpu_count; 171 172 173 static void acpi_processor_idle (void) 174 { 175 struct acpi_processor *pr = NULL; 176 struct acpi_processor_cx *cx = NULL; 177 struct acpi_processor_cx *next_state = NULL; 178 int sleep_ticks = 0; 179 u32 t1, t2 = 0; 180 181 pr = processors[raw_smp_processor_id()]; 182 if (!pr) 183 return; 184 185 /* 186 * Interrupts must be disabled during bus mastering calculations and 187 * for C2/C3 transitions. 188 */ 189 local_irq_disable(); 190 191 /* 192 * Check whether we truly need to go idle, or should 193 * reschedule: 194 */ 195 if (unlikely(need_resched())) { 196 local_irq_enable(); 197 return; 198 } 199 200 cx = pr->power.state; 201 if (!cx) 202 goto easy_out; 203 204 /* 205 * Check BM Activity 206 * ----------------- 207 * Check for bus mastering activity (if required), record, and check 208 * for demotion. 209 */ 210 if (pr->flags.bm_check) { 211 u32 bm_status = 0; 212 unsigned long diff = jiffies - pr->power.bm_check_timestamp; 213 214 if (diff > 32) 215 diff = 32; 216 217 while (diff) { 218 /* if we didn't get called, assume there was busmaster activity */ 219 diff--; 220 if (diff) 221 pr->power.bm_activity |= 0x1; 222 pr->power.bm_activity <<= 1; 223 } 224 225 acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, 226 &bm_status, ACPI_MTX_DO_NOT_LOCK); 227 if (bm_status) { 228 pr->power.bm_activity++; 229 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 230 1, ACPI_MTX_DO_NOT_LOCK); 231 } 232 /* 233 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect 234 * the true state of bus mastering activity; forcing us to 235 * manually check the BMIDEA bit of each IDE channel. 236 */ 237 else if (errata.piix4.bmisx) { 238 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01) 239 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01)) 240 pr->power.bm_activity++; 241 } 242 243 pr->power.bm_check_timestamp = jiffies; 244 245 /* 246 * Apply bus mastering demotion policy. Automatically demote 247 * to avoid a faulty transition. Note that the processor 248 * won't enter a low-power state during this call (to this 249 * funciton) but should upon the next. 250 * 251 * TBD: A better policy might be to fallback to the demotion 252 * state (use it for this quantum only) istead of 253 * demoting -- and rely on duration as our sole demotion 254 * qualification. This may, however, introduce DMA 255 * issues (e.g. floppy DMA transfer overrun/underrun). 256 */ 257 if (pr->power.bm_activity & cx->demotion.threshold.bm) { 258 local_irq_enable(); 259 next_state = cx->demotion.state; 260 goto end; 261 } 262 } 263 264 cx->usage++; 265 266 /* 267 * Sleep: 268 * ------ 269 * Invoke the current Cx state to put the processor to sleep. 270 */ 271 switch (cx->type) { 272 273 case ACPI_STATE_C1: 274 /* 275 * Invoke C1. 276 * Use the appropriate idle routine, the one that would 277 * be used without acpi C-states. 278 */ 279 if (pm_idle_save) 280 pm_idle_save(); 281 else 282 safe_halt(); 283 /* 284 * TBD: Can't get time duration while in C1, as resumes 285 * go to an ISR rather than here. Need to instrument 286 * base interrupt handler. 287 */ 288 sleep_ticks = 0xFFFFFFFF; 289 break; 290 291 case ACPI_STATE_C2: 292 /* Get start time (ticks) */ 293 t1 = inl(acpi_fadt.xpm_tmr_blk.address); 294 /* Invoke C2 */ 295 inb(cx->address); 296 /* Dummy op - must do something useless after P_LVL2 read */ 297 t2 = inl(acpi_fadt.xpm_tmr_blk.address); 298 /* Get end time (ticks) */ 299 t2 = inl(acpi_fadt.xpm_tmr_blk.address); 300 /* Re-enable interrupts */ 301 local_irq_enable(); 302 /* Compute time (ticks) that we were actually asleep */ 303 sleep_ticks = ticks_elapsed(t1, t2) - cx->latency_ticks - C2_OVERHEAD; 304 break; 305 306 case ACPI_STATE_C3: 307 308 if (pr->flags.bm_check) { 309 if (atomic_inc_return(&c3_cpu_count) == 310 num_online_cpus()) { 311 /* 312 * All CPUs are trying to go to C3 313 * Disable bus master arbitration 314 */ 315 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1, 316 ACPI_MTX_DO_NOT_LOCK); 317 } 318 } else { 319 /* SMP with no shared cache... Invalidate cache */ 320 ACPI_FLUSH_CPU_CACHE(); 321 } 322 323 /* Get start time (ticks) */ 324 t1 = inl(acpi_fadt.xpm_tmr_blk.address); 325 /* Invoke C3 */ 326 inb(cx->address); 327 /* Dummy op - must do something useless after P_LVL3 read */ 328 t2 = inl(acpi_fadt.xpm_tmr_blk.address); 329 /* Get end time (ticks) */ 330 t2 = inl(acpi_fadt.xpm_tmr_blk.address); 331 if (pr->flags.bm_check) { 332 /* Enable bus master arbitration */ 333 atomic_dec(&c3_cpu_count); 334 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0, ACPI_MTX_DO_NOT_LOCK); 335 } 336 337 /* Re-enable interrupts */ 338 local_irq_enable(); 339 /* Compute time (ticks) that we were actually asleep */ 340 sleep_ticks = ticks_elapsed(t1, t2) - cx->latency_ticks - C3_OVERHEAD; 341 break; 342 343 default: 344 local_irq_enable(); 345 return; 346 } 347 348 next_state = pr->power.state; 349 350 /* 351 * Promotion? 352 * ---------- 353 * Track the number of longs (time asleep is greater than threshold) 354 * and promote when the count threshold is reached. Note that bus 355 * mastering activity may prevent promotions. 356 * Do not promote above max_cstate. 357 */ 358 if (cx->promotion.state && 359 ((cx->promotion.state - pr->power.states) <= max_cstate)) { 360 if (sleep_ticks > cx->promotion.threshold.ticks) { 361 cx->promotion.count++; 362 cx->demotion.count = 0; 363 if (cx->promotion.count >= cx->promotion.threshold.count) { 364 if (pr->flags.bm_check) { 365 if (!(pr->power.bm_activity & cx->promotion.threshold.bm)) { 366 next_state = cx->promotion.state; 367 goto end; 368 } 369 } 370 else { 371 next_state = cx->promotion.state; 372 goto end; 373 } 374 } 375 } 376 } 377 378 /* 379 * Demotion? 380 * --------- 381 * Track the number of shorts (time asleep is less than time threshold) 382 * and demote when the usage threshold is reached. 383 */ 384 if (cx->demotion.state) { 385 if (sleep_ticks < cx->demotion.threshold.ticks) { 386 cx->demotion.count++; 387 cx->promotion.count = 0; 388 if (cx->demotion.count >= cx->demotion.threshold.count) { 389 next_state = cx->demotion.state; 390 goto end; 391 } 392 } 393 } 394 395 end: 396 /* 397 * Demote if current state exceeds max_cstate 398 */ 399 if ((pr->power.state - pr->power.states) > max_cstate) { 400 if (cx->demotion.state) 401 next_state = cx->demotion.state; 402 } 403 404 /* 405 * New Cx State? 406 * ------------- 407 * If we're going to start using a new Cx state we must clean up 408 * from the previous and prepare to use the new. 409 */ 410 if (next_state != pr->power.state) 411 acpi_processor_power_activate(pr, next_state); 412 413 return; 414 415 easy_out: 416 /* do C1 instead of busy loop */ 417 if (pm_idle_save) 418 pm_idle_save(); 419 else 420 safe_halt(); 421 return; 422 } 423 424 425 static int 426 acpi_processor_set_power_policy ( 427 struct acpi_processor *pr) 428 { 429 unsigned int i; 430 unsigned int state_is_set = 0; 431 struct acpi_processor_cx *lower = NULL; 432 struct acpi_processor_cx *higher = NULL; 433 struct acpi_processor_cx *cx; 434 435 ACPI_FUNCTION_TRACE("acpi_processor_set_power_policy"); 436 437 if (!pr) 438 return_VALUE(-EINVAL); 439 440 /* 441 * This function sets the default Cx state policy (OS idle handler). 442 * Our scheme is to promote quickly to C2 but more conservatively 443 * to C3. We're favoring C2 for its characteristics of low latency 444 * (quick response), good power savings, and ability to allow bus 445 * mastering activity. Note that the Cx state policy is completely 446 * customizable and can be altered dynamically. 447 */ 448 449 /* startup state */ 450 for (i=1; i < ACPI_PROCESSOR_MAX_POWER; i++) { 451 cx = &pr->power.states[i]; 452 if (!cx->valid) 453 continue; 454 455 if (!state_is_set) 456 pr->power.state = cx; 457 state_is_set++; 458 break; 459 } 460 461 if (!state_is_set) 462 return_VALUE(-ENODEV); 463 464 /* demotion */ 465 for (i=1; i < ACPI_PROCESSOR_MAX_POWER; i++) { 466 cx = &pr->power.states[i]; 467 if (!cx->valid) 468 continue; 469 470 if (lower) { 471 cx->demotion.state = lower; 472 cx->demotion.threshold.ticks = cx->latency_ticks; 473 cx->demotion.threshold.count = 1; 474 if (cx->type == ACPI_STATE_C3) 475 cx->demotion.threshold.bm = bm_history; 476 } 477 478 lower = cx; 479 } 480 481 /* promotion */ 482 for (i = (ACPI_PROCESSOR_MAX_POWER - 1); i > 0; i--) { 483 cx = &pr->power.states[i]; 484 if (!cx->valid) 485 continue; 486 487 if (higher) { 488 cx->promotion.state = higher; 489 cx->promotion.threshold.ticks = cx->latency_ticks; 490 if (cx->type >= ACPI_STATE_C2) 491 cx->promotion.threshold.count = 4; 492 else 493 cx->promotion.threshold.count = 10; 494 if (higher->type == ACPI_STATE_C3) 495 cx->promotion.threshold.bm = bm_history; 496 } 497 498 higher = cx; 499 } 500 501 return_VALUE(0); 502 } 503 504 505 static int acpi_processor_get_power_info_fadt (struct acpi_processor *pr) 506 { 507 int i; 508 509 ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_fadt"); 510 511 if (!pr) 512 return_VALUE(-EINVAL); 513 514 if (!pr->pblk) 515 return_VALUE(-ENODEV); 516 517 for (i = 0; i < ACPI_PROCESSOR_MAX_POWER; i++) 518 memset(pr->power.states, 0, sizeof(struct acpi_processor_cx)); 519 520 /* if info is obtained from pblk/fadt, type equals state */ 521 pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1; 522 pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2; 523 pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3; 524 525 /* the C0 state only exists as a filler in our array, 526 * and all processors need to support C1 */ 527 pr->power.states[ACPI_STATE_C0].valid = 1; 528 pr->power.states[ACPI_STATE_C1].valid = 1; 529 530 /* determine C2 and C3 address from pblk */ 531 pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4; 532 pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5; 533 534 /* determine latencies from FADT */ 535 pr->power.states[ACPI_STATE_C2].latency = acpi_fadt.plvl2_lat; 536 pr->power.states[ACPI_STATE_C3].latency = acpi_fadt.plvl3_lat; 537 538 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 539 "lvl2[0x%08x] lvl3[0x%08x]\n", 540 pr->power.states[ACPI_STATE_C2].address, 541 pr->power.states[ACPI_STATE_C3].address)); 542 543 return_VALUE(0); 544 } 545 546 547 static int acpi_processor_get_power_info_default_c1 (struct acpi_processor *pr) 548 { 549 int i; 550 551 ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_default_c1"); 552 553 for (i = 0; i < ACPI_PROCESSOR_MAX_POWER; i++) 554 memset(&(pr->power.states[i]), 0, 555 sizeof(struct acpi_processor_cx)); 556 557 /* if info is obtained from pblk/fadt, type equals state */ 558 pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1; 559 pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2; 560 pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3; 561 562 /* the C0 state only exists as a filler in our array, 563 * and all processors need to support C1 */ 564 pr->power.states[ACPI_STATE_C0].valid = 1; 565 pr->power.states[ACPI_STATE_C1].valid = 1; 566 567 return_VALUE(0); 568 } 569 570 571 static int acpi_processor_get_power_info_cst (struct acpi_processor *pr) 572 { 573 acpi_status status = 0; 574 acpi_integer count; 575 int i; 576 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; 577 union acpi_object *cst; 578 579 ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_cst"); 580 581 if (nocst) 582 return_VALUE(-ENODEV); 583 584 pr->power.count = 0; 585 for (i = 0; i < ACPI_PROCESSOR_MAX_POWER; i++) 586 memset(&(pr->power.states[i]), 0, 587 sizeof(struct acpi_processor_cx)); 588 589 status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer); 590 if (ACPI_FAILURE(status)) { 591 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n")); 592 return_VALUE(-ENODEV); 593 } 594 595 cst = (union acpi_object *) buffer.pointer; 596 597 /* There must be at least 2 elements */ 598 if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) { 599 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "not enough elements in _CST\n")); 600 status = -EFAULT; 601 goto end; 602 } 603 604 count = cst->package.elements[0].integer.value; 605 606 /* Validate number of power states. */ 607 if (count < 1 || count != cst->package.count - 1) { 608 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "count given by _CST is not valid\n")); 609 status = -EFAULT; 610 goto end; 611 } 612 613 /* We support up to ACPI_PROCESSOR_MAX_POWER. */ 614 if (count > ACPI_PROCESSOR_MAX_POWER) { 615 printk(KERN_WARNING "Limiting number of power states to max (%d)\n", ACPI_PROCESSOR_MAX_POWER); 616 printk(KERN_WARNING "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n"); 617 count = ACPI_PROCESSOR_MAX_POWER; 618 } 619 620 /* Tell driver that at least _CST is supported. */ 621 pr->flags.has_cst = 1; 622 623 for (i = 1; i <= count; i++) { 624 union acpi_object *element; 625 union acpi_object *obj; 626 struct acpi_power_register *reg; 627 struct acpi_processor_cx cx; 628 629 memset(&cx, 0, sizeof(cx)); 630 631 element = (union acpi_object *) &(cst->package.elements[i]); 632 if (element->type != ACPI_TYPE_PACKAGE) 633 continue; 634 635 if (element->package.count != 4) 636 continue; 637 638 obj = (union acpi_object *) &(element->package.elements[0]); 639 640 if (obj->type != ACPI_TYPE_BUFFER) 641 continue; 642 643 reg = (struct acpi_power_register *) obj->buffer.pointer; 644 645 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO && 646 (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) 647 continue; 648 649 cx.address = (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) ? 650 0 : reg->address; 651 652 /* There should be an easy way to extract an integer... */ 653 obj = (union acpi_object *) &(element->package.elements[1]); 654 if (obj->type != ACPI_TYPE_INTEGER) 655 continue; 656 657 cx.type = obj->integer.value; 658 659 if ((cx.type != ACPI_STATE_C1) && 660 (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO)) 661 continue; 662 663 if ((cx.type < ACPI_STATE_C1) || 664 (cx.type > ACPI_STATE_C3)) 665 continue; 666 667 obj = (union acpi_object *) &(element->package.elements[2]); 668 if (obj->type != ACPI_TYPE_INTEGER) 669 continue; 670 671 cx.latency = obj->integer.value; 672 673 obj = (union acpi_object *) &(element->package.elements[3]); 674 if (obj->type != ACPI_TYPE_INTEGER) 675 continue; 676 677 cx.power = obj->integer.value; 678 679 (pr->power.count)++; 680 memcpy(&(pr->power.states[pr->power.count]), &cx, sizeof(cx)); 681 } 682 683 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n", pr->power.count)); 684 685 /* Validate number of power states discovered */ 686 if (pr->power.count < 2) 687 status = -ENODEV; 688 689 end: 690 acpi_os_free(buffer.pointer); 691 692 return_VALUE(status); 693 } 694 695 696 static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx) 697 { 698 ACPI_FUNCTION_TRACE("acpi_processor_get_power_verify_c2"); 699 700 if (!cx->address) 701 return_VOID; 702 703 /* 704 * C2 latency must be less than or equal to 100 705 * microseconds. 706 */ 707 else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) { 708 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 709 "latency too large [%d]\n", 710 cx->latency)); 711 return_VOID; 712 } 713 714 /* 715 * Otherwise we've met all of our C2 requirements. 716 * Normalize the C2 latency to expidite policy 717 */ 718 cx->valid = 1; 719 cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); 720 721 return_VOID; 722 } 723 724 725 static void acpi_processor_power_verify_c3( 726 struct acpi_processor *pr, 727 struct acpi_processor_cx *cx) 728 { 729 static int bm_check_flag; 730 731 ACPI_FUNCTION_TRACE("acpi_processor_get_power_verify_c3"); 732 733 if (!cx->address) 734 return_VOID; 735 736 /* 737 * C3 latency must be less than or equal to 1000 738 * microseconds. 739 */ 740 else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) { 741 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 742 "latency too large [%d]\n", 743 cx->latency)); 744 return_VOID; 745 } 746 747 /* 748 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast) 749 * DMA transfers are used by any ISA device to avoid livelock. 750 * Note that we could disable Type-F DMA (as recommended by 751 * the erratum), but this is known to disrupt certain ISA 752 * devices thus we take the conservative approach. 753 */ 754 else if (errata.piix4.fdma) { 755 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 756 "C3 not supported on PIIX4 with Type-F DMA\n")); 757 return_VOID; 758 } 759 760 /* All the logic here assumes flags.bm_check is same across all CPUs */ 761 if (!bm_check_flag) { 762 /* Determine whether bm_check is needed based on CPU */ 763 acpi_processor_power_init_bm_check(&(pr->flags), pr->id); 764 bm_check_flag = pr->flags.bm_check; 765 } else { 766 pr->flags.bm_check = bm_check_flag; 767 } 768 769 if (pr->flags.bm_check) { 770 /* bus mastering control is necessary */ 771 if (!pr->flags.bm_control) { 772 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 773 "C3 support requires bus mastering control\n")); 774 return_VOID; 775 } 776 } else { 777 /* 778 * WBINVD should be set in fadt, for C3 state to be 779 * supported on when bm_check is not required. 780 */ 781 if (acpi_fadt.wb_invd != 1) { 782 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 783 "Cache invalidation should work properly" 784 " for C3 to be enabled on SMP systems\n")); 785 return_VOID; 786 } 787 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 788 0, ACPI_MTX_DO_NOT_LOCK); 789 } 790 791 /* 792 * Otherwise we've met all of our C3 requirements. 793 * Normalize the C3 latency to expidite policy. Enable 794 * checking of bus mastering status (bm_check) so we can 795 * use this in our C3 policy 796 */ 797 cx->valid = 1; 798 cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); 799 800 return_VOID; 801 } 802 803 804 static int acpi_processor_power_verify(struct acpi_processor *pr) 805 { 806 unsigned int i; 807 unsigned int working = 0; 808 809 for (i=1; i < ACPI_PROCESSOR_MAX_POWER; i++) { 810 struct acpi_processor_cx *cx = &pr->power.states[i]; 811 812 switch (cx->type) { 813 case ACPI_STATE_C1: 814 cx->valid = 1; 815 break; 816 817 case ACPI_STATE_C2: 818 acpi_processor_power_verify_c2(cx); 819 break; 820 821 case ACPI_STATE_C3: 822 acpi_processor_power_verify_c3(pr, cx); 823 break; 824 } 825 826 if (cx->valid) 827 working++; 828 } 829 830 return (working); 831 } 832 833 static int acpi_processor_get_power_info ( 834 struct acpi_processor *pr) 835 { 836 unsigned int i; 837 int result; 838 839 ACPI_FUNCTION_TRACE("acpi_processor_get_power_info"); 840 841 /* NOTE: the idle thread may not be running while calling 842 * this function */ 843 844 result = acpi_processor_get_power_info_cst(pr); 845 if ((result) || (acpi_processor_power_verify(pr) < 2)) { 846 result = acpi_processor_get_power_info_fadt(pr); 847 if ((result) || (acpi_processor_power_verify(pr) < 2)) 848 result = acpi_processor_get_power_info_default_c1(pr); 849 } 850 851 /* 852 * Set Default Policy 853 * ------------------ 854 * Now that we know which states are supported, set the default 855 * policy. Note that this policy can be changed dynamically 856 * (e.g. encourage deeper sleeps to conserve battery life when 857 * not on AC). 858 */ 859 result = acpi_processor_set_power_policy(pr); 860 if (result) 861 return_VALUE(result); 862 863 /* 864 * if one state of type C2 or C3 is available, mark this 865 * CPU as being "idle manageable" 866 */ 867 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { 868 if (pr->power.states[i].valid) { 869 pr->power.count = i; 870 pr->flags.power = 1; 871 } 872 } 873 874 return_VALUE(0); 875 } 876 877 int acpi_processor_cst_has_changed (struct acpi_processor *pr) 878 { 879 int result = 0; 880 881 ACPI_FUNCTION_TRACE("acpi_processor_cst_has_changed"); 882 883 if (!pr) 884 return_VALUE(-EINVAL); 885 886 if ( nocst) { 887 return_VALUE(-ENODEV); 888 } 889 890 if (!pr->flags.power_setup_done) 891 return_VALUE(-ENODEV); 892 893 /* Fall back to the default idle loop */ 894 pm_idle = pm_idle_save; 895 synchronize_sched(); /* Relies on interrupts forcing exit from idle. */ 896 897 pr->flags.power = 0; 898 result = acpi_processor_get_power_info(pr); 899 if ((pr->flags.power == 1) && (pr->flags.power_setup_done)) 900 pm_idle = acpi_processor_idle; 901 902 return_VALUE(result); 903 } 904 905 /* proc interface */ 906 907 static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset) 908 { 909 struct acpi_processor *pr = (struct acpi_processor *)seq->private; 910 unsigned int i; 911 912 ACPI_FUNCTION_TRACE("acpi_processor_power_seq_show"); 913 914 if (!pr) 915 goto end; 916 917 seq_printf(seq, "active state: C%zd\n" 918 "max_cstate: C%d\n" 919 "bus master activity: %08x\n", 920 pr->power.state ? pr->power.state - pr->power.states : 0, 921 max_cstate, 922 (unsigned)pr->power.bm_activity); 923 924 seq_puts(seq, "states:\n"); 925 926 for (i = 1; i <= pr->power.count; i++) { 927 seq_printf(seq, " %cC%d: ", 928 (&pr->power.states[i] == pr->power.state?'*':' '), i); 929 930 if (!pr->power.states[i].valid) { 931 seq_puts(seq, "<not supported>\n"); 932 continue; 933 } 934 935 switch (pr->power.states[i].type) { 936 case ACPI_STATE_C1: 937 seq_printf(seq, "type[C1] "); 938 break; 939 case ACPI_STATE_C2: 940 seq_printf(seq, "type[C2] "); 941 break; 942 case ACPI_STATE_C3: 943 seq_printf(seq, "type[C3] "); 944 break; 945 default: 946 seq_printf(seq, "type[--] "); 947 break; 948 } 949 950 if (pr->power.states[i].promotion.state) 951 seq_printf(seq, "promotion[C%zd] ", 952 (pr->power.states[i].promotion.state - 953 pr->power.states)); 954 else 955 seq_puts(seq, "promotion[--] "); 956 957 if (pr->power.states[i].demotion.state) 958 seq_printf(seq, "demotion[C%zd] ", 959 (pr->power.states[i].demotion.state - 960 pr->power.states)); 961 else 962 seq_puts(seq, "demotion[--] "); 963 964 seq_printf(seq, "latency[%03d] usage[%08d]\n", 965 pr->power.states[i].latency, 966 pr->power.states[i].usage); 967 } 968 969 end: 970 return_VALUE(0); 971 } 972 973 static int acpi_processor_power_open_fs(struct inode *inode, struct file *file) 974 { 975 return single_open(file, acpi_processor_power_seq_show, 976 PDE(inode)->data); 977 } 978 979 static struct file_operations acpi_processor_power_fops = { 980 .open = acpi_processor_power_open_fs, 981 .read = seq_read, 982 .llseek = seq_lseek, 983 .release = single_release, 984 }; 985 986 int acpi_processor_power_init(struct acpi_processor *pr, struct acpi_device *device) 987 { 988 acpi_status status = 0; 989 static int first_run = 0; 990 struct proc_dir_entry *entry = NULL; 991 unsigned int i; 992 993 ACPI_FUNCTION_TRACE("acpi_processor_power_init"); 994 995 if (!first_run) { 996 dmi_check_system(processor_power_dmi_table); 997 if (max_cstate < ACPI_C_STATES_MAX) 998 printk(KERN_NOTICE "ACPI: processor limited to max C-state %d\n", max_cstate); 999 first_run++; 1000 } 1001 1002 if (!pr) 1003 return_VALUE(-EINVAL); 1004 1005 if (acpi_fadt.cst_cnt && !nocst) { 1006 status = acpi_os_write_port(acpi_fadt.smi_cmd, acpi_fadt.cst_cnt, 8); 1007 if (ACPI_FAILURE(status)) { 1008 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 1009 "Notifying BIOS of _CST ability failed\n")); 1010 } 1011 } 1012 1013 acpi_processor_power_init_pdc(&(pr->power), pr->id); 1014 acpi_processor_set_pdc(pr, pr->power.pdc); 1015 acpi_processor_get_power_info(pr); 1016 1017 /* 1018 * Install the idle handler if processor power management is supported. 1019 * Note that we use previously set idle handler will be used on 1020 * platforms that only support C1. 1021 */ 1022 if ((pr->flags.power) && (!boot_option_idle_override)) { 1023 printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id); 1024 for (i = 1; i <= pr->power.count; i++) 1025 if (pr->power.states[i].valid) 1026 printk(" C%d[C%d]", i, pr->power.states[i].type); 1027 printk(")\n"); 1028 1029 if (pr->id == 0) { 1030 pm_idle_save = pm_idle; 1031 pm_idle = acpi_processor_idle; 1032 } 1033 } 1034 1035 /* 'power' [R] */ 1036 entry = create_proc_entry(ACPI_PROCESSOR_FILE_POWER, 1037 S_IRUGO, acpi_device_dir(device)); 1038 if (!entry) 1039 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 1040 "Unable to create '%s' fs entry\n", 1041 ACPI_PROCESSOR_FILE_POWER)); 1042 else { 1043 entry->proc_fops = &acpi_processor_power_fops; 1044 entry->data = acpi_driver_data(device); 1045 entry->owner = THIS_MODULE; 1046 } 1047 1048 pr->flags.power_setup_done = 1; 1049 1050 return_VALUE(0); 1051 } 1052 1053 int acpi_processor_power_exit(struct acpi_processor *pr, struct acpi_device *device) 1054 { 1055 ACPI_FUNCTION_TRACE("acpi_processor_power_exit"); 1056 1057 pr->flags.power_setup_done = 0; 1058 1059 if (acpi_device_dir(device)) 1060 remove_proc_entry(ACPI_PROCESSOR_FILE_POWER,acpi_device_dir(device)); 1061 1062 /* Unregister the idle handler when processor #0 is removed. */ 1063 if (pr->id == 0) { 1064 pm_idle = pm_idle_save; 1065 1066 /* 1067 * We are about to unload the current idle thread pm callback 1068 * (pm_idle), Wait for all processors to update cached/local 1069 * copies of pm_idle before proceeding. 1070 */ 1071 cpu_idle_wait(); 1072 } 1073 1074 return_VALUE(0); 1075 } 1076