1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * processor_idle - idle state submodule to the ACPI processor driver 4 * 5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 7 * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de> 8 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 9 * - Added processor hotplug support 10 * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 11 * - Added support for C3 on SMP 12 */ 13 #define pr_fmt(fmt) "ACPI: " fmt 14 15 #include <linux/module.h> 16 #include <linux/acpi.h> 17 #include <linux/dmi.h> 18 #include <linux/sched.h> /* need_resched() */ 19 #include <linux/tick.h> 20 #include <linux/cpuidle.h> 21 #include <linux/cpu.h> 22 #include <linux/minmax.h> 23 #include <linux/perf_event.h> 24 #include <acpi/processor.h> 25 #include <linux/context_tracking.h> 26 27 /* 28 * Include the apic definitions for x86 to have the APIC timer related defines 29 * available also for UP (on SMP it gets magically included via linux/smp.h). 30 * asm/acpi.h is not an option, as it would require more include magic. Also 31 * creating an empty asm-ia64/apic.h would just trade pest vs. cholera. 32 */ 33 #ifdef CONFIG_X86 34 #include <asm/apic.h> 35 #include <asm/cpu.h> 36 #endif 37 38 #define ACPI_IDLE_STATE_START (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX) ? 1 : 0) 39 40 static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER; 41 module_param(max_cstate, uint, 0400); 42 static bool nocst __read_mostly; 43 module_param(nocst, bool, 0400); 44 static bool bm_check_disable __read_mostly; 45 module_param(bm_check_disable, bool, 0400); 46 47 static unsigned int latency_factor __read_mostly = 2; 48 module_param(latency_factor, uint, 0644); 49 50 static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device); 51 52 struct cpuidle_driver acpi_idle_driver = { 53 .name = "acpi_idle", 54 .owner = THIS_MODULE, 55 }; 56 57 #ifdef CONFIG_ACPI_PROCESSOR_CSTATE 58 static 59 DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX], acpi_cstate); 60 61 static int disabled_by_idle_boot_param(void) 62 { 63 return boot_option_idle_override == IDLE_POLL || 64 boot_option_idle_override == IDLE_HALT; 65 } 66 67 /* 68 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3. 69 * For now disable this. Probably a bug somewhere else. 70 * 71 * To skip this limit, boot/load with a large max_cstate limit. 72 */ 73 static int set_max_cstate(const struct dmi_system_id *id) 74 { 75 if (max_cstate > ACPI_PROCESSOR_MAX_POWER) 76 return 0; 77 78 pr_notice("%s detected - limiting to C%ld max_cstate." 79 " Override with \"processor.max_cstate=%d\"\n", id->ident, 80 (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1); 81 82 max_cstate = (long)id->driver_data; 83 84 return 0; 85 } 86 87 static const struct dmi_system_id processor_power_dmi_table[] = { 88 { set_max_cstate, "Clevo 5600D", { 89 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), 90 DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")}, 91 (void *)2}, 92 { set_max_cstate, "Pavilion zv5000", { 93 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 94 DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")}, 95 (void *)1}, 96 { set_max_cstate, "Asus L8400B", { 97 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), 98 DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")}, 99 (void *)1}, 100 {}, 101 }; 102 103 104 /* 105 * Callers should disable interrupts before the call and enable 106 * interrupts after return. 107 */ 108 static void __cpuidle acpi_safe_halt(void) 109 { 110 if (!tif_need_resched()) { 111 raw_safe_halt(); 112 raw_local_irq_disable(); 113 } 114 } 115 116 #ifdef ARCH_APICTIMER_STOPS_ON_C3 117 118 /* 119 * Some BIOS implementations switch to C3 in the published C2 state. 120 * This seems to be a common problem on AMD boxen, but other vendors 121 * are affected too. We pick the most conservative approach: we assume 122 * that the local APIC stops in both C2 and C3. 123 */ 124 static void lapic_timer_check_state(int state, struct acpi_processor *pr, 125 struct acpi_processor_cx *cx) 126 { 127 struct acpi_processor_power *pwr = &pr->power; 128 u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2; 129 130 if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT)) 131 return; 132 133 if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) 134 type = ACPI_STATE_C1; 135 136 /* 137 * Check, if one of the previous states already marked the lapic 138 * unstable 139 */ 140 if (pwr->timer_broadcast_on_state < state) 141 return; 142 143 if (cx->type >= type) 144 pr->power.timer_broadcast_on_state = state; 145 } 146 147 static void __lapic_timer_propagate_broadcast(void *arg) 148 { 149 struct acpi_processor *pr = arg; 150 151 if (pr->power.timer_broadcast_on_state < INT_MAX) 152 tick_broadcast_enable(); 153 else 154 tick_broadcast_disable(); 155 } 156 157 static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) 158 { 159 smp_call_function_single(pr->id, __lapic_timer_propagate_broadcast, 160 (void *)pr, 1); 161 } 162 163 /* Power(C) State timer broadcast control */ 164 static bool lapic_timer_needs_broadcast(struct acpi_processor *pr, 165 struct acpi_processor_cx *cx) 166 { 167 return cx - pr->power.states >= pr->power.timer_broadcast_on_state; 168 } 169 170 #else 171 172 static void lapic_timer_check_state(int state, struct acpi_processor *pr, 173 struct acpi_processor_cx *cstate) { } 174 static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { } 175 176 static bool lapic_timer_needs_broadcast(struct acpi_processor *pr, 177 struct acpi_processor_cx *cx) 178 { 179 return false; 180 } 181 182 #endif 183 184 #if defined(CONFIG_X86) 185 static void tsc_check_state(int state) 186 { 187 switch (boot_cpu_data.x86_vendor) { 188 case X86_VENDOR_HYGON: 189 case X86_VENDOR_AMD: 190 case X86_VENDOR_INTEL: 191 case X86_VENDOR_CENTAUR: 192 case X86_VENDOR_ZHAOXIN: 193 /* 194 * AMD Fam10h TSC will tick in all 195 * C/P/S0/S1 states when this bit is set. 196 */ 197 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) 198 return; 199 fallthrough; 200 default: 201 /* TSC could halt in idle, so notify users */ 202 if (state > ACPI_STATE_C1) 203 mark_tsc_unstable("TSC halts in idle"); 204 } 205 } 206 #else 207 static void tsc_check_state(int state) { return; } 208 #endif 209 210 static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) 211 { 212 213 if (!pr->pblk) 214 return -ENODEV; 215 216 /* if info is obtained from pblk/fadt, type equals state */ 217 pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2; 218 pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3; 219 220 #ifndef CONFIG_HOTPLUG_CPU 221 /* 222 * Check for P_LVL2_UP flag before entering C2 and above on 223 * an SMP system. 224 */ 225 if ((num_online_cpus() > 1) && 226 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) 227 return -ENODEV; 228 #endif 229 230 /* determine C2 and C3 address from pblk */ 231 pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4; 232 pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5; 233 234 /* determine latencies from FADT */ 235 pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.c2_latency; 236 pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.c3_latency; 237 238 /* 239 * FADT specified C2 latency must be less than or equal to 240 * 100 microseconds. 241 */ 242 if (acpi_gbl_FADT.c2_latency > ACPI_PROCESSOR_MAX_C2_LATENCY) { 243 acpi_handle_debug(pr->handle, "C2 latency too large [%d]\n", 244 acpi_gbl_FADT.c2_latency); 245 /* invalidate C2 */ 246 pr->power.states[ACPI_STATE_C2].address = 0; 247 } 248 249 /* 250 * FADT supplied C3 latency must be less than or equal to 251 * 1000 microseconds. 252 */ 253 if (acpi_gbl_FADT.c3_latency > ACPI_PROCESSOR_MAX_C3_LATENCY) { 254 acpi_handle_debug(pr->handle, "C3 latency too large [%d]\n", 255 acpi_gbl_FADT.c3_latency); 256 /* invalidate C3 */ 257 pr->power.states[ACPI_STATE_C3].address = 0; 258 } 259 260 acpi_handle_debug(pr->handle, "lvl2[0x%08x] lvl3[0x%08x]\n", 261 pr->power.states[ACPI_STATE_C2].address, 262 pr->power.states[ACPI_STATE_C3].address); 263 264 snprintf(pr->power.states[ACPI_STATE_C2].desc, 265 ACPI_CX_DESC_LEN, "ACPI P_LVL2 IOPORT 0x%x", 266 pr->power.states[ACPI_STATE_C2].address); 267 snprintf(pr->power.states[ACPI_STATE_C3].desc, 268 ACPI_CX_DESC_LEN, "ACPI P_LVL3 IOPORT 0x%x", 269 pr->power.states[ACPI_STATE_C3].address); 270 271 if (!pr->power.states[ACPI_STATE_C2].address && 272 !pr->power.states[ACPI_STATE_C3].address) 273 return -ENODEV; 274 275 return 0; 276 } 277 278 static int acpi_processor_get_power_info_default(struct acpi_processor *pr) 279 { 280 if (!pr->power.states[ACPI_STATE_C1].valid) { 281 /* set the first C-State to C1 */ 282 /* all processors need to support C1 */ 283 pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1; 284 pr->power.states[ACPI_STATE_C1].valid = 1; 285 pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT; 286 287 snprintf(pr->power.states[ACPI_STATE_C1].desc, 288 ACPI_CX_DESC_LEN, "ACPI HLT"); 289 } 290 /* the C0 state only exists as a filler in our array */ 291 pr->power.states[ACPI_STATE_C0].valid = 1; 292 return 0; 293 } 294 295 static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) 296 { 297 int ret; 298 299 if (nocst) 300 return -ENODEV; 301 302 ret = acpi_processor_evaluate_cst(pr->handle, pr->id, &pr->power); 303 if (ret) 304 return ret; 305 306 if (!pr->power.count) 307 return -EFAULT; 308 309 pr->flags.has_cst = 1; 310 return 0; 311 } 312 313 static void acpi_processor_power_verify_c3(struct acpi_processor *pr, 314 struct acpi_processor_cx *cx) 315 { 316 static int bm_check_flag = -1; 317 static int bm_control_flag = -1; 318 319 320 if (!cx->address) 321 return; 322 323 /* 324 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast) 325 * DMA transfers are used by any ISA device to avoid livelock. 326 * Note that we could disable Type-F DMA (as recommended by 327 * the erratum), but this is known to disrupt certain ISA 328 * devices thus we take the conservative approach. 329 */ 330 if (errata.piix4.fdma) { 331 acpi_handle_debug(pr->handle, 332 "C3 not supported on PIIX4 with Type-F DMA\n"); 333 return; 334 } 335 336 /* All the logic here assumes flags.bm_check is same across all CPUs */ 337 if (bm_check_flag == -1) { 338 /* Determine whether bm_check is needed based on CPU */ 339 acpi_processor_power_init_bm_check(&(pr->flags), pr->id); 340 bm_check_flag = pr->flags.bm_check; 341 bm_control_flag = pr->flags.bm_control; 342 } else { 343 pr->flags.bm_check = bm_check_flag; 344 pr->flags.bm_control = bm_control_flag; 345 } 346 347 if (pr->flags.bm_check) { 348 if (!pr->flags.bm_control) { 349 if (pr->flags.has_cst != 1) { 350 /* bus mastering control is necessary */ 351 acpi_handle_debug(pr->handle, 352 "C3 support requires BM control\n"); 353 return; 354 } else { 355 /* Here we enter C3 without bus mastering */ 356 acpi_handle_debug(pr->handle, 357 "C3 support without BM control\n"); 358 } 359 } 360 } else { 361 /* 362 * WBINVD should be set in fadt, for C3 state to be 363 * supported on when bm_check is not required. 364 */ 365 if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) { 366 acpi_handle_debug(pr->handle, 367 "Cache invalidation should work properly" 368 " for C3 to be enabled on SMP systems\n"); 369 return; 370 } 371 } 372 373 /* 374 * Otherwise we've met all of our C3 requirements. 375 * Normalize the C3 latency to expidite policy. Enable 376 * checking of bus mastering status (bm_check) so we can 377 * use this in our C3 policy 378 */ 379 cx->valid = 1; 380 381 /* 382 * On older chipsets, BM_RLD needs to be set 383 * in order for Bus Master activity to wake the 384 * system from C3. Newer chipsets handle DMA 385 * during C3 automatically and BM_RLD is a NOP. 386 * In either case, the proper way to 387 * handle BM_RLD is to set it and leave it set. 388 */ 389 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1); 390 } 391 392 static void acpi_cst_latency_sort(struct acpi_processor_cx *states, size_t length) 393 { 394 int i, j, k; 395 396 for (i = 1; i < length; i++) { 397 if (!states[i].valid) 398 continue; 399 400 for (j = i - 1, k = i; j >= 0; j--) { 401 if (!states[j].valid) 402 continue; 403 404 if (states[j].latency > states[k].latency) 405 swap(states[j].latency, states[k].latency); 406 407 k = j; 408 } 409 } 410 } 411 412 static int acpi_processor_power_verify(struct acpi_processor *pr) 413 { 414 unsigned int i; 415 unsigned int working = 0; 416 unsigned int last_latency = 0; 417 unsigned int last_type = 0; 418 bool buggy_latency = false; 419 420 pr->power.timer_broadcast_on_state = INT_MAX; 421 422 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { 423 struct acpi_processor_cx *cx = &pr->power.states[i]; 424 425 switch (cx->type) { 426 case ACPI_STATE_C1: 427 cx->valid = 1; 428 break; 429 430 case ACPI_STATE_C2: 431 if (!cx->address) 432 break; 433 cx->valid = 1; 434 break; 435 436 case ACPI_STATE_C3: 437 acpi_processor_power_verify_c3(pr, cx); 438 break; 439 } 440 if (!cx->valid) 441 continue; 442 if (cx->type >= last_type && cx->latency < last_latency) 443 buggy_latency = true; 444 last_latency = cx->latency; 445 last_type = cx->type; 446 447 lapic_timer_check_state(i, pr, cx); 448 tsc_check_state(cx->type); 449 working++; 450 } 451 452 if (buggy_latency) { 453 pr_notice("FW issue: working around C-state latencies out of order\n"); 454 acpi_cst_latency_sort(&pr->power.states[1], max_cstate); 455 } 456 457 lapic_timer_propagate_broadcast(pr); 458 459 return working; 460 } 461 462 static int acpi_processor_get_cstate_info(struct acpi_processor *pr) 463 { 464 unsigned int i; 465 int result; 466 467 468 /* NOTE: the idle thread may not be running while calling 469 * this function */ 470 471 /* Zero initialize all the C-states info. */ 472 memset(pr->power.states, 0, sizeof(pr->power.states)); 473 474 result = acpi_processor_get_power_info_cst(pr); 475 if (result == -ENODEV) 476 result = acpi_processor_get_power_info_fadt(pr); 477 478 if (result) 479 return result; 480 481 acpi_processor_get_power_info_default(pr); 482 483 pr->power.count = acpi_processor_power_verify(pr); 484 485 /* 486 * if one state of type C2 or C3 is available, mark this 487 * CPU as being "idle manageable" 488 */ 489 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { 490 if (pr->power.states[i].valid) { 491 pr->power.count = i; 492 pr->flags.power = 1; 493 } 494 } 495 496 return 0; 497 } 498 499 /** 500 * acpi_idle_bm_check - checks if bus master activity was detected 501 */ 502 static int acpi_idle_bm_check(void) 503 { 504 u32 bm_status = 0; 505 506 if (bm_check_disable) 507 return 0; 508 509 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status); 510 if (bm_status) 511 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1); 512 /* 513 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect 514 * the true state of bus mastering activity; forcing us to 515 * manually check the BMIDEA bit of each IDE channel. 516 */ 517 else if (errata.piix4.bmisx) { 518 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01) 519 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01)) 520 bm_status = 1; 521 } 522 return bm_status; 523 } 524 525 static __cpuidle void io_idle(unsigned long addr) 526 { 527 /* IO port based C-state */ 528 inb(addr); 529 530 #ifdef CONFIG_X86 531 /* No delay is needed if we are in guest */ 532 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) 533 return; 534 /* 535 * Modern (>=Nehalem) Intel systems use ACPI via intel_idle, 536 * not this code. Assume that any Intel systems using this 537 * are ancient and may need the dummy wait. This also assumes 538 * that the motivating chipset issue was Intel-only. 539 */ 540 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) 541 return; 542 #endif 543 /* 544 * Dummy wait op - must do something useless after P_LVL2 read 545 * because chipsets cannot guarantee that STPCLK# signal gets 546 * asserted in time to freeze execution properly 547 * 548 * This workaround has been in place since the original ACPI 549 * implementation was merged, circa 2002. 550 * 551 * If a profile is pointing to this instruction, please first 552 * consider moving your system to a more modern idle 553 * mechanism. 554 */ 555 inl(acpi_gbl_FADT.xpm_timer_block.address); 556 } 557 558 /** 559 * acpi_idle_do_entry - enter idle state using the appropriate method 560 * @cx: cstate data 561 * 562 * Caller disables interrupt before call and enables interrupt after return. 563 */ 564 static void __cpuidle acpi_idle_do_entry(struct acpi_processor_cx *cx) 565 { 566 perf_lopwr_cb(true); 567 568 if (cx->entry_method == ACPI_CSTATE_FFH) { 569 /* Call into architectural FFH based C-state */ 570 acpi_processor_ffh_cstate_enter(cx); 571 } else if (cx->entry_method == ACPI_CSTATE_HALT) { 572 acpi_safe_halt(); 573 } else { 574 io_idle(cx->address); 575 } 576 577 perf_lopwr_cb(false); 578 } 579 580 /** 581 * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining) 582 * @dev: the target CPU 583 * @index: the index of suggested state 584 */ 585 static void acpi_idle_play_dead(struct cpuidle_device *dev, int index) 586 { 587 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); 588 589 ACPI_FLUSH_CPU_CACHE(); 590 591 while (1) { 592 593 if (cx->entry_method == ACPI_CSTATE_HALT) 594 raw_safe_halt(); 595 else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) { 596 io_idle(cx->address); 597 } else if (cx->entry_method == ACPI_CSTATE_FFH) { 598 acpi_processor_ffh_play_dead(cx); 599 } else 600 return; 601 } 602 } 603 604 static __always_inline bool acpi_idle_fallback_to_c1(struct acpi_processor *pr) 605 { 606 return IS_ENABLED(CONFIG_HOTPLUG_CPU) && !pr->flags.has_cst && 607 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED); 608 } 609 610 static int c3_cpu_count; 611 static DEFINE_RAW_SPINLOCK(c3_lock); 612 613 /** 614 * acpi_idle_enter_bm - enters C3 with proper BM handling 615 * @drv: cpuidle driver 616 * @pr: Target processor 617 * @cx: Target state context 618 * @index: index of target state 619 */ 620 static int __cpuidle acpi_idle_enter_bm(struct cpuidle_driver *drv, 621 struct acpi_processor *pr, 622 struct acpi_processor_cx *cx, 623 int index) 624 { 625 static struct acpi_processor_cx safe_cx = { 626 .entry_method = ACPI_CSTATE_HALT, 627 }; 628 629 /* 630 * disable bus master 631 * bm_check implies we need ARB_DIS 632 * bm_control implies whether we can do ARB_DIS 633 * 634 * That leaves a case where bm_check is set and bm_control is not set. 635 * In that case we cannot do much, we enter C3 without doing anything. 636 */ 637 bool dis_bm = pr->flags.bm_control; 638 639 instrumentation_begin(); 640 641 /* If we can skip BM, demote to a safe state. */ 642 if (!cx->bm_sts_skip && acpi_idle_bm_check()) { 643 dis_bm = false; 644 index = drv->safe_state_index; 645 if (index >= 0) { 646 cx = this_cpu_read(acpi_cstate[index]); 647 } else { 648 cx = &safe_cx; 649 index = -EBUSY; 650 } 651 } 652 653 if (dis_bm) { 654 raw_spin_lock(&c3_lock); 655 c3_cpu_count++; 656 /* Disable bus master arbitration when all CPUs are in C3 */ 657 if (c3_cpu_count == num_online_cpus()) 658 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1); 659 raw_spin_unlock(&c3_lock); 660 } 661 662 ct_cpuidle_enter(); 663 664 acpi_idle_do_entry(cx); 665 666 ct_cpuidle_exit(); 667 668 /* Re-enable bus master arbitration */ 669 if (dis_bm) { 670 raw_spin_lock(&c3_lock); 671 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0); 672 c3_cpu_count--; 673 raw_spin_unlock(&c3_lock); 674 } 675 676 instrumentation_end(); 677 678 return index; 679 } 680 681 static int __cpuidle acpi_idle_enter(struct cpuidle_device *dev, 682 struct cpuidle_driver *drv, int index) 683 { 684 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); 685 struct acpi_processor *pr; 686 687 pr = __this_cpu_read(processors); 688 if (unlikely(!pr)) 689 return -EINVAL; 690 691 if (cx->type != ACPI_STATE_C1) { 692 if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) 693 return acpi_idle_enter_bm(drv, pr, cx, index); 694 695 /* C2 to C1 demotion. */ 696 if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) { 697 index = ACPI_IDLE_STATE_START; 698 cx = per_cpu(acpi_cstate[index], dev->cpu); 699 } 700 } 701 702 if (cx->type == ACPI_STATE_C3) 703 ACPI_FLUSH_CPU_CACHE(); 704 705 acpi_idle_do_entry(cx); 706 707 return index; 708 } 709 710 static int __cpuidle acpi_idle_enter_s2idle(struct cpuidle_device *dev, 711 struct cpuidle_driver *drv, int index) 712 { 713 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); 714 715 if (cx->type == ACPI_STATE_C3) { 716 struct acpi_processor *pr = __this_cpu_read(processors); 717 718 if (unlikely(!pr)) 719 return 0; 720 721 if (pr->flags.bm_check) { 722 u8 bm_sts_skip = cx->bm_sts_skip; 723 724 /* Don't check BM_STS, do an unconditional ARB_DIS for S2IDLE */ 725 cx->bm_sts_skip = 1; 726 acpi_idle_enter_bm(drv, pr, cx, index); 727 cx->bm_sts_skip = bm_sts_skip; 728 729 return 0; 730 } else { 731 ACPI_FLUSH_CPU_CACHE(); 732 } 733 } 734 acpi_idle_do_entry(cx); 735 736 return 0; 737 } 738 739 static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr, 740 struct cpuidle_device *dev) 741 { 742 int i, count = ACPI_IDLE_STATE_START; 743 struct acpi_processor_cx *cx; 744 struct cpuidle_state *state; 745 746 if (max_cstate == 0) 747 max_cstate = 1; 748 749 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { 750 state = &acpi_idle_driver.states[count]; 751 cx = &pr->power.states[i]; 752 753 if (!cx->valid) 754 continue; 755 756 per_cpu(acpi_cstate[count], dev->cpu) = cx; 757 758 if (lapic_timer_needs_broadcast(pr, cx)) 759 state->flags |= CPUIDLE_FLAG_TIMER_STOP; 760 761 if (cx->type == ACPI_STATE_C3) { 762 state->flags |= CPUIDLE_FLAG_TLB_FLUSHED; 763 if (pr->flags.bm_check) 764 state->flags |= CPUIDLE_FLAG_RCU_IDLE; 765 } 766 767 count++; 768 if (count == CPUIDLE_STATE_MAX) 769 break; 770 } 771 772 if (!count) 773 return -EINVAL; 774 775 return 0; 776 } 777 778 static int acpi_processor_setup_cstates(struct acpi_processor *pr) 779 { 780 int i, count; 781 struct acpi_processor_cx *cx; 782 struct cpuidle_state *state; 783 struct cpuidle_driver *drv = &acpi_idle_driver; 784 785 if (max_cstate == 0) 786 max_cstate = 1; 787 788 if (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX)) { 789 cpuidle_poll_state_init(drv); 790 count = 1; 791 } else { 792 count = 0; 793 } 794 795 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { 796 cx = &pr->power.states[i]; 797 798 if (!cx->valid) 799 continue; 800 801 state = &drv->states[count]; 802 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i); 803 strscpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); 804 state->exit_latency = cx->latency; 805 state->target_residency = cx->latency * latency_factor; 806 state->enter = acpi_idle_enter; 807 808 state->flags = 0; 809 810 state->enter_dead = acpi_idle_play_dead; 811 812 if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2) 813 drv->safe_state_index = count; 814 815 /* 816 * Halt-induced C1 is not good for ->enter_s2idle, because it 817 * re-enables interrupts on exit. Moreover, C1 is generally not 818 * particularly interesting from the suspend-to-idle angle, so 819 * avoid C1 and the situations in which we may need to fall back 820 * to it altogether. 821 */ 822 if (cx->type != ACPI_STATE_C1 && !acpi_idle_fallback_to_c1(pr)) 823 state->enter_s2idle = acpi_idle_enter_s2idle; 824 825 count++; 826 if (count == CPUIDLE_STATE_MAX) 827 break; 828 } 829 830 drv->state_count = count; 831 832 if (!count) 833 return -EINVAL; 834 835 return 0; 836 } 837 838 static inline void acpi_processor_cstate_first_run_checks(void) 839 { 840 static int first_run; 841 842 if (first_run) 843 return; 844 dmi_check_system(processor_power_dmi_table); 845 max_cstate = acpi_processor_cstate_check(max_cstate); 846 if (max_cstate < ACPI_C_STATES_MAX) 847 pr_notice("processor limited to max C-state %d\n", max_cstate); 848 849 first_run++; 850 851 if (nocst) 852 return; 853 854 acpi_processor_claim_cst_control(); 855 } 856 #else 857 858 static inline int disabled_by_idle_boot_param(void) { return 0; } 859 static inline void acpi_processor_cstate_first_run_checks(void) { } 860 static int acpi_processor_get_cstate_info(struct acpi_processor *pr) 861 { 862 return -ENODEV; 863 } 864 865 static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr, 866 struct cpuidle_device *dev) 867 { 868 return -EINVAL; 869 } 870 871 static int acpi_processor_setup_cstates(struct acpi_processor *pr) 872 { 873 return -EINVAL; 874 } 875 876 #endif /* CONFIG_ACPI_PROCESSOR_CSTATE */ 877 878 struct acpi_lpi_states_array { 879 unsigned int size; 880 unsigned int composite_states_size; 881 struct acpi_lpi_state *entries; 882 struct acpi_lpi_state *composite_states[ACPI_PROCESSOR_MAX_POWER]; 883 }; 884 885 static int obj_get_integer(union acpi_object *obj, u32 *value) 886 { 887 if (obj->type != ACPI_TYPE_INTEGER) 888 return -EINVAL; 889 890 *value = obj->integer.value; 891 return 0; 892 } 893 894 static int acpi_processor_evaluate_lpi(acpi_handle handle, 895 struct acpi_lpi_states_array *info) 896 { 897 acpi_status status; 898 int ret = 0; 899 int pkg_count, state_idx = 1, loop; 900 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 901 union acpi_object *lpi_data; 902 struct acpi_lpi_state *lpi_state; 903 904 status = acpi_evaluate_object(handle, "_LPI", NULL, &buffer); 905 if (ACPI_FAILURE(status)) { 906 acpi_handle_debug(handle, "No _LPI, giving up\n"); 907 return -ENODEV; 908 } 909 910 lpi_data = buffer.pointer; 911 912 /* There must be at least 4 elements = 3 elements + 1 package */ 913 if (!lpi_data || lpi_data->type != ACPI_TYPE_PACKAGE || 914 lpi_data->package.count < 4) { 915 pr_debug("not enough elements in _LPI\n"); 916 ret = -ENODATA; 917 goto end; 918 } 919 920 pkg_count = lpi_data->package.elements[2].integer.value; 921 922 /* Validate number of power states. */ 923 if (pkg_count < 1 || pkg_count != lpi_data->package.count - 3) { 924 pr_debug("count given by _LPI is not valid\n"); 925 ret = -ENODATA; 926 goto end; 927 } 928 929 lpi_state = kcalloc(pkg_count, sizeof(*lpi_state), GFP_KERNEL); 930 if (!lpi_state) { 931 ret = -ENOMEM; 932 goto end; 933 } 934 935 info->size = pkg_count; 936 info->entries = lpi_state; 937 938 /* LPI States start at index 3 */ 939 for (loop = 3; state_idx <= pkg_count; loop++, state_idx++, lpi_state++) { 940 union acpi_object *element, *pkg_elem, *obj; 941 942 element = &lpi_data->package.elements[loop]; 943 if (element->type != ACPI_TYPE_PACKAGE || element->package.count < 7) 944 continue; 945 946 pkg_elem = element->package.elements; 947 948 obj = pkg_elem + 6; 949 if (obj->type == ACPI_TYPE_BUFFER) { 950 struct acpi_power_register *reg; 951 952 reg = (struct acpi_power_register *)obj->buffer.pointer; 953 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO && 954 reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) 955 continue; 956 957 lpi_state->address = reg->address; 958 lpi_state->entry_method = 959 reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE ? 960 ACPI_CSTATE_FFH : ACPI_CSTATE_SYSTEMIO; 961 } else if (obj->type == ACPI_TYPE_INTEGER) { 962 lpi_state->entry_method = ACPI_CSTATE_INTEGER; 963 lpi_state->address = obj->integer.value; 964 } else { 965 continue; 966 } 967 968 /* elements[7,8] skipped for now i.e. Residency/Usage counter*/ 969 970 obj = pkg_elem + 9; 971 if (obj->type == ACPI_TYPE_STRING) 972 strscpy(lpi_state->desc, obj->string.pointer, 973 ACPI_CX_DESC_LEN); 974 975 lpi_state->index = state_idx; 976 if (obj_get_integer(pkg_elem + 0, &lpi_state->min_residency)) { 977 pr_debug("No min. residency found, assuming 10 us\n"); 978 lpi_state->min_residency = 10; 979 } 980 981 if (obj_get_integer(pkg_elem + 1, &lpi_state->wake_latency)) { 982 pr_debug("No wakeup residency found, assuming 10 us\n"); 983 lpi_state->wake_latency = 10; 984 } 985 986 if (obj_get_integer(pkg_elem + 2, &lpi_state->flags)) 987 lpi_state->flags = 0; 988 989 if (obj_get_integer(pkg_elem + 3, &lpi_state->arch_flags)) 990 lpi_state->arch_flags = 0; 991 992 if (obj_get_integer(pkg_elem + 4, &lpi_state->res_cnt_freq)) 993 lpi_state->res_cnt_freq = 1; 994 995 if (obj_get_integer(pkg_elem + 5, &lpi_state->enable_parent_state)) 996 lpi_state->enable_parent_state = 0; 997 } 998 999 acpi_handle_debug(handle, "Found %d power states\n", state_idx); 1000 end: 1001 kfree(buffer.pointer); 1002 return ret; 1003 } 1004 1005 /* 1006 * flat_state_cnt - the number of composite LPI states after the process of flattening 1007 */ 1008 static int flat_state_cnt; 1009 1010 /** 1011 * combine_lpi_states - combine local and parent LPI states to form a composite LPI state 1012 * 1013 * @local: local LPI state 1014 * @parent: parent LPI state 1015 * @result: composite LPI state 1016 */ 1017 static bool combine_lpi_states(struct acpi_lpi_state *local, 1018 struct acpi_lpi_state *parent, 1019 struct acpi_lpi_state *result) 1020 { 1021 if (parent->entry_method == ACPI_CSTATE_INTEGER) { 1022 if (!parent->address) /* 0 means autopromotable */ 1023 return false; 1024 result->address = local->address + parent->address; 1025 } else { 1026 result->address = parent->address; 1027 } 1028 1029 result->min_residency = max(local->min_residency, parent->min_residency); 1030 result->wake_latency = local->wake_latency + parent->wake_latency; 1031 result->enable_parent_state = parent->enable_parent_state; 1032 result->entry_method = local->entry_method; 1033 1034 result->flags = parent->flags; 1035 result->arch_flags = parent->arch_flags; 1036 result->index = parent->index; 1037 1038 strscpy(result->desc, local->desc, ACPI_CX_DESC_LEN); 1039 strlcat(result->desc, "+", ACPI_CX_DESC_LEN); 1040 strlcat(result->desc, parent->desc, ACPI_CX_DESC_LEN); 1041 return true; 1042 } 1043 1044 #define ACPI_LPI_STATE_FLAGS_ENABLED BIT(0) 1045 1046 static void stash_composite_state(struct acpi_lpi_states_array *curr_level, 1047 struct acpi_lpi_state *t) 1048 { 1049 curr_level->composite_states[curr_level->composite_states_size++] = t; 1050 } 1051 1052 static int flatten_lpi_states(struct acpi_processor *pr, 1053 struct acpi_lpi_states_array *curr_level, 1054 struct acpi_lpi_states_array *prev_level) 1055 { 1056 int i, j, state_count = curr_level->size; 1057 struct acpi_lpi_state *p, *t = curr_level->entries; 1058 1059 curr_level->composite_states_size = 0; 1060 for (j = 0; j < state_count; j++, t++) { 1061 struct acpi_lpi_state *flpi; 1062 1063 if (!(t->flags & ACPI_LPI_STATE_FLAGS_ENABLED)) 1064 continue; 1065 1066 if (flat_state_cnt >= ACPI_PROCESSOR_MAX_POWER) { 1067 pr_warn("Limiting number of LPI states to max (%d)\n", 1068 ACPI_PROCESSOR_MAX_POWER); 1069 pr_warn("Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n"); 1070 break; 1071 } 1072 1073 flpi = &pr->power.lpi_states[flat_state_cnt]; 1074 1075 if (!prev_level) { /* leaf/processor node */ 1076 memcpy(flpi, t, sizeof(*t)); 1077 stash_composite_state(curr_level, flpi); 1078 flat_state_cnt++; 1079 continue; 1080 } 1081 1082 for (i = 0; i < prev_level->composite_states_size; i++) { 1083 p = prev_level->composite_states[i]; 1084 if (t->index <= p->enable_parent_state && 1085 combine_lpi_states(p, t, flpi)) { 1086 stash_composite_state(curr_level, flpi); 1087 flat_state_cnt++; 1088 flpi++; 1089 } 1090 } 1091 } 1092 1093 kfree(curr_level->entries); 1094 return 0; 1095 } 1096 1097 int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu) 1098 { 1099 return -EOPNOTSUPP; 1100 } 1101 1102 static int acpi_processor_get_lpi_info(struct acpi_processor *pr) 1103 { 1104 int ret, i; 1105 acpi_status status; 1106 acpi_handle handle = pr->handle, pr_ahandle; 1107 struct acpi_device *d = NULL; 1108 struct acpi_lpi_states_array info[2], *tmp, *prev, *curr; 1109 1110 /* make sure our architecture has support */ 1111 ret = acpi_processor_ffh_lpi_probe(pr->id); 1112 if (ret == -EOPNOTSUPP) 1113 return ret; 1114 1115 if (!osc_pc_lpi_support_confirmed) 1116 return -EOPNOTSUPP; 1117 1118 if (!acpi_has_method(handle, "_LPI")) 1119 return -EINVAL; 1120 1121 flat_state_cnt = 0; 1122 prev = &info[0]; 1123 curr = &info[1]; 1124 handle = pr->handle; 1125 ret = acpi_processor_evaluate_lpi(handle, prev); 1126 if (ret) 1127 return ret; 1128 flatten_lpi_states(pr, prev, NULL); 1129 1130 status = acpi_get_parent(handle, &pr_ahandle); 1131 while (ACPI_SUCCESS(status)) { 1132 d = acpi_fetch_acpi_dev(pr_ahandle); 1133 if (!d) 1134 break; 1135 1136 handle = pr_ahandle; 1137 1138 if (strcmp(acpi_device_hid(d), ACPI_PROCESSOR_CONTAINER_HID)) 1139 break; 1140 1141 /* can be optional ? */ 1142 if (!acpi_has_method(handle, "_LPI")) 1143 break; 1144 1145 ret = acpi_processor_evaluate_lpi(handle, curr); 1146 if (ret) 1147 break; 1148 1149 /* flatten all the LPI states in this level of hierarchy */ 1150 flatten_lpi_states(pr, curr, prev); 1151 1152 tmp = prev, prev = curr, curr = tmp; 1153 1154 status = acpi_get_parent(handle, &pr_ahandle); 1155 } 1156 1157 pr->power.count = flat_state_cnt; 1158 /* reset the index after flattening */ 1159 for (i = 0; i < pr->power.count; i++) 1160 pr->power.lpi_states[i].index = i; 1161 1162 /* Tell driver that _LPI is supported. */ 1163 pr->flags.has_lpi = 1; 1164 pr->flags.power = 1; 1165 1166 return 0; 1167 } 1168 1169 int __weak acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi) 1170 { 1171 return -ENODEV; 1172 } 1173 1174 /** 1175 * acpi_idle_lpi_enter - enters an ACPI any LPI state 1176 * @dev: the target CPU 1177 * @drv: cpuidle driver containing cpuidle state info 1178 * @index: index of target state 1179 * 1180 * Return: 0 for success or negative value for error 1181 */ 1182 static int acpi_idle_lpi_enter(struct cpuidle_device *dev, 1183 struct cpuidle_driver *drv, int index) 1184 { 1185 struct acpi_processor *pr; 1186 struct acpi_lpi_state *lpi; 1187 1188 pr = __this_cpu_read(processors); 1189 1190 if (unlikely(!pr)) 1191 return -EINVAL; 1192 1193 lpi = &pr->power.lpi_states[index]; 1194 if (lpi->entry_method == ACPI_CSTATE_FFH) 1195 return acpi_processor_ffh_lpi_enter(lpi); 1196 1197 return -EINVAL; 1198 } 1199 1200 static int acpi_processor_setup_lpi_states(struct acpi_processor *pr) 1201 { 1202 int i; 1203 struct acpi_lpi_state *lpi; 1204 struct cpuidle_state *state; 1205 struct cpuidle_driver *drv = &acpi_idle_driver; 1206 1207 if (!pr->flags.has_lpi) 1208 return -EOPNOTSUPP; 1209 1210 for (i = 0; i < pr->power.count && i < CPUIDLE_STATE_MAX; i++) { 1211 lpi = &pr->power.lpi_states[i]; 1212 1213 state = &drv->states[i]; 1214 snprintf(state->name, CPUIDLE_NAME_LEN, "LPI-%d", i); 1215 strscpy(state->desc, lpi->desc, CPUIDLE_DESC_LEN); 1216 state->exit_latency = lpi->wake_latency; 1217 state->target_residency = lpi->min_residency; 1218 state->flags |= arch_get_idle_state_flags(lpi->arch_flags); 1219 if (i != 0 && lpi->entry_method == ACPI_CSTATE_FFH) 1220 state->flags |= CPUIDLE_FLAG_RCU_IDLE; 1221 state->enter = acpi_idle_lpi_enter; 1222 drv->safe_state_index = i; 1223 } 1224 1225 drv->state_count = i; 1226 1227 return 0; 1228 } 1229 1230 /** 1231 * acpi_processor_setup_cpuidle_states- prepares and configures cpuidle 1232 * global state data i.e. idle routines 1233 * 1234 * @pr: the ACPI processor 1235 */ 1236 static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr) 1237 { 1238 int i; 1239 struct cpuidle_driver *drv = &acpi_idle_driver; 1240 1241 if (!pr->flags.power_setup_done || !pr->flags.power) 1242 return -EINVAL; 1243 1244 drv->safe_state_index = -1; 1245 for (i = ACPI_IDLE_STATE_START; i < CPUIDLE_STATE_MAX; i++) { 1246 drv->states[i].name[0] = '\0'; 1247 drv->states[i].desc[0] = '\0'; 1248 } 1249 1250 if (pr->flags.has_lpi) 1251 return acpi_processor_setup_lpi_states(pr); 1252 1253 return acpi_processor_setup_cstates(pr); 1254 } 1255 1256 /** 1257 * acpi_processor_setup_cpuidle_dev - prepares and configures CPUIDLE 1258 * device i.e. per-cpu data 1259 * 1260 * @pr: the ACPI processor 1261 * @dev : the cpuidle device 1262 */ 1263 static int acpi_processor_setup_cpuidle_dev(struct acpi_processor *pr, 1264 struct cpuidle_device *dev) 1265 { 1266 if (!pr->flags.power_setup_done || !pr->flags.power || !dev) 1267 return -EINVAL; 1268 1269 dev->cpu = pr->id; 1270 if (pr->flags.has_lpi) 1271 return acpi_processor_ffh_lpi_probe(pr->id); 1272 1273 return acpi_processor_setup_cpuidle_cx(pr, dev); 1274 } 1275 1276 static int acpi_processor_get_power_info(struct acpi_processor *pr) 1277 { 1278 int ret; 1279 1280 ret = acpi_processor_get_lpi_info(pr); 1281 if (ret) 1282 ret = acpi_processor_get_cstate_info(pr); 1283 1284 return ret; 1285 } 1286 1287 int acpi_processor_hotplug(struct acpi_processor *pr) 1288 { 1289 int ret = 0; 1290 struct cpuidle_device *dev; 1291 1292 if (disabled_by_idle_boot_param()) 1293 return 0; 1294 1295 if (!pr->flags.power_setup_done) 1296 return -ENODEV; 1297 1298 dev = per_cpu(acpi_cpuidle_device, pr->id); 1299 cpuidle_pause_and_lock(); 1300 cpuidle_disable_device(dev); 1301 ret = acpi_processor_get_power_info(pr); 1302 if (!ret && pr->flags.power) { 1303 acpi_processor_setup_cpuidle_dev(pr, dev); 1304 ret = cpuidle_enable_device(dev); 1305 } 1306 cpuidle_resume_and_unlock(); 1307 1308 return ret; 1309 } 1310 1311 int acpi_processor_power_state_has_changed(struct acpi_processor *pr) 1312 { 1313 int cpu; 1314 struct acpi_processor *_pr; 1315 struct cpuidle_device *dev; 1316 1317 if (disabled_by_idle_boot_param()) 1318 return 0; 1319 1320 if (!pr->flags.power_setup_done) 1321 return -ENODEV; 1322 1323 /* 1324 * FIXME: Design the ACPI notification to make it once per 1325 * system instead of once per-cpu. This condition is a hack 1326 * to make the code that updates C-States be called once. 1327 */ 1328 1329 if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) { 1330 1331 /* Protect against cpu-hotplug */ 1332 cpus_read_lock(); 1333 cpuidle_pause_and_lock(); 1334 1335 /* Disable all cpuidle devices */ 1336 for_each_online_cpu(cpu) { 1337 _pr = per_cpu(processors, cpu); 1338 if (!_pr || !_pr->flags.power_setup_done) 1339 continue; 1340 dev = per_cpu(acpi_cpuidle_device, cpu); 1341 cpuidle_disable_device(dev); 1342 } 1343 1344 /* Populate Updated C-state information */ 1345 acpi_processor_get_power_info(pr); 1346 acpi_processor_setup_cpuidle_states(pr); 1347 1348 /* Enable all cpuidle devices */ 1349 for_each_online_cpu(cpu) { 1350 _pr = per_cpu(processors, cpu); 1351 if (!_pr || !_pr->flags.power_setup_done) 1352 continue; 1353 acpi_processor_get_power_info(_pr); 1354 if (_pr->flags.power) { 1355 dev = per_cpu(acpi_cpuidle_device, cpu); 1356 acpi_processor_setup_cpuidle_dev(_pr, dev); 1357 cpuidle_enable_device(dev); 1358 } 1359 } 1360 cpuidle_resume_and_unlock(); 1361 cpus_read_unlock(); 1362 } 1363 1364 return 0; 1365 } 1366 1367 static int acpi_processor_registered; 1368 1369 int acpi_processor_power_init(struct acpi_processor *pr) 1370 { 1371 int retval; 1372 struct cpuidle_device *dev; 1373 1374 if (disabled_by_idle_boot_param()) 1375 return 0; 1376 1377 acpi_processor_cstate_first_run_checks(); 1378 1379 if (!acpi_processor_get_power_info(pr)) 1380 pr->flags.power_setup_done = 1; 1381 1382 /* 1383 * Install the idle handler if processor power management is supported. 1384 * Note that we use previously set idle handler will be used on 1385 * platforms that only support C1. 1386 */ 1387 if (pr->flags.power) { 1388 /* Register acpi_idle_driver if not already registered */ 1389 if (!acpi_processor_registered) { 1390 acpi_processor_setup_cpuidle_states(pr); 1391 retval = cpuidle_register_driver(&acpi_idle_driver); 1392 if (retval) 1393 return retval; 1394 pr_debug("%s registered with cpuidle\n", 1395 acpi_idle_driver.name); 1396 } 1397 1398 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 1399 if (!dev) 1400 return -ENOMEM; 1401 per_cpu(acpi_cpuidle_device, pr->id) = dev; 1402 1403 acpi_processor_setup_cpuidle_dev(pr, dev); 1404 1405 /* Register per-cpu cpuidle_device. Cpuidle driver 1406 * must already be registered before registering device 1407 */ 1408 retval = cpuidle_register_device(dev); 1409 if (retval) { 1410 if (acpi_processor_registered == 0) 1411 cpuidle_unregister_driver(&acpi_idle_driver); 1412 return retval; 1413 } 1414 acpi_processor_registered++; 1415 } 1416 return 0; 1417 } 1418 1419 int acpi_processor_power_exit(struct acpi_processor *pr) 1420 { 1421 struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id); 1422 1423 if (disabled_by_idle_boot_param()) 1424 return 0; 1425 1426 if (pr->flags.power) { 1427 cpuidle_unregister_device(dev); 1428 acpi_processor_registered--; 1429 if (acpi_processor_registered == 0) 1430 cpuidle_unregister_driver(&acpi_idle_driver); 1431 1432 kfree(dev); 1433 } 1434 1435 pr->flags.power_setup_done = 0; 1436 return 0; 1437 } 1438