1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * intel_idle.c - native hardware idle loop for modern Intel processors 4 * 5 * Copyright (c) 2013 - 2020, Intel Corporation. 6 * Len Brown <len.brown@intel.com> 7 * Rafael J. Wysocki <rafael.j.wysocki@intel.com> 8 */ 9 10 /* 11 * intel_idle is a cpuidle driver that loads on all Intel CPUs with MWAIT 12 * in lieu of the legacy ACPI processor_idle driver. The intent is to 13 * make Linux more efficient on these processors, as intel_idle knows 14 * more than ACPI, as well as make Linux more immune to ACPI BIOS bugs. 15 */ 16 17 /* 18 * Design Assumptions 19 * 20 * All CPUs have same idle states as boot CPU 21 * 22 * Chipset BM_STS (bus master status) bit is a NOP 23 * for preventing entry into deep C-states 24 * 25 * CPU will flush caches as needed when entering a C-state via MWAIT 26 * (in contrast to entering ACPI C3, in which case the WBINVD 27 * instruction needs to be executed to flush the caches) 28 */ 29 30 /* 31 * Known limitations 32 * 33 * ACPI has a .suspend hack to turn off deep c-statees during suspend 34 * to avoid complications with the lapic timer workaround. 35 * Have not seen issues with suspend, but may need same workaround here. 36 * 37 */ 38 39 /* un-comment DEBUG to enable pr_debug() statements */ 40 /* #define DEBUG */ 41 42 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 43 44 #include <linux/acpi.h> 45 #include <linux/kernel.h> 46 #include <linux/cpuidle.h> 47 #include <linux/tick.h> 48 #include <trace/events/power.h> 49 #include <linux/sched.h> 50 #include <linux/sched/smt.h> 51 #include <linux/notifier.h> 52 #include <linux/cpu.h> 53 #include <linux/moduleparam.h> 54 #include <asm/cpuid/api.h> 55 #include <asm/cpu_device_id.h> 56 #include <asm/intel-family.h> 57 #include <asm/mwait.h> 58 #include <asm/spec-ctrl.h> 59 #include <asm/msr.h> 60 #include <asm/tsc.h> 61 #include <asm/fpu/api.h> 62 #include <asm/smp.h> 63 64 #define INTEL_IDLE_VERSION "0.5.1" 65 66 static struct cpuidle_driver intel_idle_driver = { 67 .name = "intel_idle", 68 .owner = THIS_MODULE, 69 }; 70 /* intel_idle.max_cstate=0 disables driver */ 71 static int max_cstate = CPUIDLE_STATE_MAX - 1; 72 static unsigned int disabled_states_mask __read_mostly; 73 static unsigned int preferred_states_mask __read_mostly; 74 static bool force_irq_on __read_mostly; 75 static bool ibrs_off __read_mostly; 76 77 static struct cpuidle_device __percpu *intel_idle_cpuidle_devices; 78 79 static unsigned long auto_demotion_disable_flags; 80 81 static enum { 82 C1E_PROMOTION_PRESERVE, 83 C1E_PROMOTION_ENABLE, 84 C1E_PROMOTION_DISABLE 85 } c1e_promotion = C1E_PROMOTION_PRESERVE; 86 87 struct idle_cpu { 88 struct cpuidle_state *state_table; 89 90 /* 91 * Hardware C-state auto-demotion may not always be optimal. 92 * Indicate which enable bits to clear here. 93 */ 94 unsigned long auto_demotion_disable_flags; 95 bool disable_promotion_to_c1e; 96 bool use_acpi; 97 }; 98 99 static const struct idle_cpu *icpu __initdata; 100 static struct cpuidle_state *cpuidle_state_table __initdata; 101 102 static unsigned int mwait_substates __initdata; 103 104 /* 105 * Enable interrupts before entering the C-state. On some platforms and for 106 * some C-states, this may measurably decrease interrupt latency. 107 */ 108 #define CPUIDLE_FLAG_IRQ_ENABLE BIT(14) 109 110 /* 111 * Enable this state by default even if the ACPI _CST does not list it. 112 */ 113 #define CPUIDLE_FLAG_ALWAYS_ENABLE BIT(15) 114 115 /* 116 * Disable IBRS across idle (when KERNEL_IBRS), is exclusive vs IRQ_ENABLE 117 * above. 118 */ 119 #define CPUIDLE_FLAG_IBRS BIT(16) 120 121 /* 122 * Initialize large xstate for the C6-state entrance. 123 */ 124 #define CPUIDLE_FLAG_INIT_XSTATE BIT(17) 125 126 /* 127 * Ignore the sub-state when matching mwait hints between the ACPI _CST and 128 * custom tables. 129 */ 130 #define CPUIDLE_FLAG_PARTIAL_HINT_MATCH BIT(18) 131 132 /* 133 * MWAIT takes an 8-bit "hint" in EAX "suggesting" 134 * the C-state (top nibble) and sub-state (bottom nibble) 135 * 0x00 means "MWAIT(C1)", 0x10 means "MWAIT(C2)" etc. 136 * 137 * We store the hint at the top of our "flags" for each state. 138 */ 139 #define flg2MWAIT(flags) (((flags) >> 24) & 0xFF) 140 #define MWAIT2flg(eax) ((eax & 0xFF) << 24) 141 142 static __always_inline int __intel_idle(struct cpuidle_device *dev, 143 struct cpuidle_driver *drv, 144 int index, bool irqoff) 145 { 146 struct cpuidle_state *state = &drv->states[index]; 147 unsigned long eax = flg2MWAIT(state->flags); 148 unsigned long ecx = 1*irqoff; /* break on interrupt flag */ 149 150 mwait_idle_with_hints(eax, ecx); 151 152 return index; 153 } 154 155 /** 156 * intel_idle - Ask the processor to enter the given idle state. 157 * @dev: cpuidle device of the target CPU. 158 * @drv: cpuidle driver (assumed to point to intel_idle_driver). 159 * @index: Target idle state index. 160 * 161 * Use the MWAIT instruction to notify the processor that the CPU represented by 162 * @dev is idle and it can try to enter the idle state corresponding to @index. 163 * 164 * If the local APIC timer is not known to be reliable in the target idle state, 165 * enable one-shot tick broadcasting for the target CPU before executing MWAIT. 166 * 167 * Must be called under local_irq_disable(). 168 */ 169 static __cpuidle int intel_idle(struct cpuidle_device *dev, 170 struct cpuidle_driver *drv, int index) 171 { 172 return __intel_idle(dev, drv, index, true); 173 } 174 175 static __cpuidle int intel_idle_irq(struct cpuidle_device *dev, 176 struct cpuidle_driver *drv, int index) 177 { 178 return __intel_idle(dev, drv, index, false); 179 } 180 181 static __cpuidle int intel_idle_ibrs(struct cpuidle_device *dev, 182 struct cpuidle_driver *drv, int index) 183 { 184 bool smt_active = sched_smt_active(); 185 u64 spec_ctrl = spec_ctrl_current(); 186 int ret; 187 188 if (smt_active) 189 __update_spec_ctrl(0); 190 191 ret = __intel_idle(dev, drv, index, true); 192 193 if (smt_active) 194 __update_spec_ctrl(spec_ctrl); 195 196 return ret; 197 } 198 199 static __cpuidle int intel_idle_xstate(struct cpuidle_device *dev, 200 struct cpuidle_driver *drv, int index) 201 { 202 fpu_idle_fpregs(); 203 return __intel_idle(dev, drv, index, true); 204 } 205 206 /** 207 * intel_idle_s2idle - Ask the processor to enter the given idle state. 208 * @dev: cpuidle device of the target CPU. 209 * @drv: cpuidle driver (assumed to point to intel_idle_driver). 210 * @index: Target idle state index. 211 * 212 * Use the MWAIT instruction to notify the processor that the CPU represented by 213 * @dev is idle and it can try to enter the idle state corresponding to @index. 214 * 215 * Invoked as a suspend-to-idle callback routine with frozen user space, frozen 216 * scheduler tick and suspended scheduler clock on the target CPU. 217 */ 218 static __cpuidle int intel_idle_s2idle(struct cpuidle_device *dev, 219 struct cpuidle_driver *drv, int index) 220 { 221 unsigned long ecx = 1; /* break on interrupt flag */ 222 struct cpuidle_state *state = &drv->states[index]; 223 unsigned long eax = flg2MWAIT(state->flags); 224 225 if (state->flags & CPUIDLE_FLAG_INIT_XSTATE) 226 fpu_idle_fpregs(); 227 228 mwait_idle_with_hints(eax, ecx); 229 230 return 0; 231 } 232 233 static void intel_idle_enter_dead(struct cpuidle_device *dev, int index) 234 { 235 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 236 struct cpuidle_state *state = &drv->states[index]; 237 unsigned long eax = flg2MWAIT(state->flags); 238 239 mwait_play_dead(eax); 240 } 241 242 /* 243 * States are indexed by the cstate number, 244 * which is also the index into the MWAIT hint array. 245 * Thus C0 is a dummy. 246 */ 247 static struct cpuidle_state nehalem_cstates[] __initdata = { 248 { 249 .name = "C1", 250 .desc = "MWAIT 0x00", 251 .flags = MWAIT2flg(0x00), 252 .exit_latency = 3, 253 .target_residency = 6, 254 .enter = &intel_idle, 255 .enter_s2idle = intel_idle_s2idle, }, 256 { 257 .name = "C1E", 258 .desc = "MWAIT 0x01", 259 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, 260 .exit_latency = 10, 261 .target_residency = 20, 262 .enter = &intel_idle, 263 .enter_s2idle = intel_idle_s2idle, }, 264 { 265 .name = "C3", 266 .desc = "MWAIT 0x10", 267 .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, 268 .exit_latency = 20, 269 .target_residency = 80, 270 .enter = &intel_idle, 271 .enter_s2idle = intel_idle_s2idle, }, 272 { 273 .name = "C6", 274 .desc = "MWAIT 0x20", 275 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, 276 .exit_latency = 200, 277 .target_residency = 800, 278 .enter = &intel_idle, 279 .enter_s2idle = intel_idle_s2idle, }, 280 { 281 .enter = NULL } 282 }; 283 284 static struct cpuidle_state snb_cstates[] __initdata = { 285 { 286 .name = "C1", 287 .desc = "MWAIT 0x00", 288 .flags = MWAIT2flg(0x00), 289 .exit_latency = 2, 290 .target_residency = 2, 291 .enter = &intel_idle, 292 .enter_s2idle = intel_idle_s2idle, }, 293 { 294 .name = "C1E", 295 .desc = "MWAIT 0x01", 296 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, 297 .exit_latency = 10, 298 .target_residency = 20, 299 .enter = &intel_idle, 300 .enter_s2idle = intel_idle_s2idle, }, 301 { 302 .name = "C3", 303 .desc = "MWAIT 0x10", 304 .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, 305 .exit_latency = 80, 306 .target_residency = 211, 307 .enter = &intel_idle, 308 .enter_s2idle = intel_idle_s2idle, }, 309 { 310 .name = "C6", 311 .desc = "MWAIT 0x20", 312 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, 313 .exit_latency = 104, 314 .target_residency = 345, 315 .enter = &intel_idle, 316 .enter_s2idle = intel_idle_s2idle, }, 317 { 318 .name = "C7", 319 .desc = "MWAIT 0x30", 320 .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED, 321 .exit_latency = 109, 322 .target_residency = 345, 323 .enter = &intel_idle, 324 .enter_s2idle = intel_idle_s2idle, }, 325 { 326 .enter = NULL } 327 }; 328 329 static struct cpuidle_state byt_cstates[] __initdata = { 330 { 331 .name = "C1", 332 .desc = "MWAIT 0x00", 333 .flags = MWAIT2flg(0x00), 334 .exit_latency = 1, 335 .target_residency = 1, 336 .enter = &intel_idle, 337 .enter_s2idle = intel_idle_s2idle, }, 338 { 339 .name = "C6N", 340 .desc = "MWAIT 0x58", 341 .flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED, 342 .exit_latency = 300, 343 .target_residency = 275, 344 .enter = &intel_idle, 345 .enter_s2idle = intel_idle_s2idle, }, 346 { 347 .name = "C6S", 348 .desc = "MWAIT 0x52", 349 .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED, 350 .exit_latency = 500, 351 .target_residency = 560, 352 .enter = &intel_idle, 353 .enter_s2idle = intel_idle_s2idle, }, 354 { 355 .name = "C7", 356 .desc = "MWAIT 0x60", 357 .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, 358 .exit_latency = 1200, 359 .target_residency = 4000, 360 .enter = &intel_idle, 361 .enter_s2idle = intel_idle_s2idle, }, 362 { 363 .name = "C7S", 364 .desc = "MWAIT 0x64", 365 .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED, 366 .exit_latency = 10000, 367 .target_residency = 20000, 368 .enter = &intel_idle, 369 .enter_s2idle = intel_idle_s2idle, }, 370 { 371 .enter = NULL } 372 }; 373 374 static struct cpuidle_state cht_cstates[] __initdata = { 375 { 376 .name = "C1", 377 .desc = "MWAIT 0x00", 378 .flags = MWAIT2flg(0x00), 379 .exit_latency = 1, 380 .target_residency = 1, 381 .enter = &intel_idle, 382 .enter_s2idle = intel_idle_s2idle, }, 383 { 384 .name = "C6N", 385 .desc = "MWAIT 0x58", 386 .flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED, 387 .exit_latency = 80, 388 .target_residency = 275, 389 .enter = &intel_idle, 390 .enter_s2idle = intel_idle_s2idle, }, 391 { 392 .name = "C6S", 393 .desc = "MWAIT 0x52", 394 .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED, 395 .exit_latency = 200, 396 .target_residency = 560, 397 .enter = &intel_idle, 398 .enter_s2idle = intel_idle_s2idle, }, 399 { 400 .name = "C7", 401 .desc = "MWAIT 0x60", 402 .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, 403 .exit_latency = 1200, 404 .target_residency = 4000, 405 .enter = &intel_idle, 406 .enter_s2idle = intel_idle_s2idle, }, 407 { 408 .name = "C7S", 409 .desc = "MWAIT 0x64", 410 .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED, 411 .exit_latency = 10000, 412 .target_residency = 20000, 413 .enter = &intel_idle, 414 .enter_s2idle = intel_idle_s2idle, }, 415 { 416 .enter = NULL } 417 }; 418 419 static struct cpuidle_state ivb_cstates[] __initdata = { 420 { 421 .name = "C1", 422 .desc = "MWAIT 0x00", 423 .flags = MWAIT2flg(0x00), 424 .exit_latency = 1, 425 .target_residency = 1, 426 .enter = &intel_idle, 427 .enter_s2idle = intel_idle_s2idle, }, 428 { 429 .name = "C1E", 430 .desc = "MWAIT 0x01", 431 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, 432 .exit_latency = 10, 433 .target_residency = 20, 434 .enter = &intel_idle, 435 .enter_s2idle = intel_idle_s2idle, }, 436 { 437 .name = "C3", 438 .desc = "MWAIT 0x10", 439 .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, 440 .exit_latency = 59, 441 .target_residency = 156, 442 .enter = &intel_idle, 443 .enter_s2idle = intel_idle_s2idle, }, 444 { 445 .name = "C6", 446 .desc = "MWAIT 0x20", 447 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, 448 .exit_latency = 80, 449 .target_residency = 300, 450 .enter = &intel_idle, 451 .enter_s2idle = intel_idle_s2idle, }, 452 { 453 .name = "C7", 454 .desc = "MWAIT 0x30", 455 .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED, 456 .exit_latency = 87, 457 .target_residency = 300, 458 .enter = &intel_idle, 459 .enter_s2idle = intel_idle_s2idle, }, 460 { 461 .enter = NULL } 462 }; 463 464 static struct cpuidle_state ivt_cstates[] __initdata = { 465 { 466 .name = "C1", 467 .desc = "MWAIT 0x00", 468 .flags = MWAIT2flg(0x00), 469 .exit_latency = 1, 470 .target_residency = 1, 471 .enter = &intel_idle, 472 .enter_s2idle = intel_idle_s2idle, }, 473 { 474 .name = "C1E", 475 .desc = "MWAIT 0x01", 476 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, 477 .exit_latency = 10, 478 .target_residency = 80, 479 .enter = &intel_idle, 480 .enter_s2idle = intel_idle_s2idle, }, 481 { 482 .name = "C3", 483 .desc = "MWAIT 0x10", 484 .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, 485 .exit_latency = 59, 486 .target_residency = 156, 487 .enter = &intel_idle, 488 .enter_s2idle = intel_idle_s2idle, }, 489 { 490 .name = "C6", 491 .desc = "MWAIT 0x20", 492 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, 493 .exit_latency = 82, 494 .target_residency = 300, 495 .enter = &intel_idle, 496 .enter_s2idle = intel_idle_s2idle, }, 497 { 498 .enter = NULL } 499 }; 500 501 static struct cpuidle_state ivt_cstates_4s[] __initdata = { 502 { 503 .name = "C1", 504 .desc = "MWAIT 0x00", 505 .flags = MWAIT2flg(0x00), 506 .exit_latency = 1, 507 .target_residency = 1, 508 .enter = &intel_idle, 509 .enter_s2idle = intel_idle_s2idle, }, 510 { 511 .name = "C1E", 512 .desc = "MWAIT 0x01", 513 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, 514 .exit_latency = 10, 515 .target_residency = 250, 516 .enter = &intel_idle, 517 .enter_s2idle = intel_idle_s2idle, }, 518 { 519 .name = "C3", 520 .desc = "MWAIT 0x10", 521 .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, 522 .exit_latency = 59, 523 .target_residency = 300, 524 .enter = &intel_idle, 525 .enter_s2idle = intel_idle_s2idle, }, 526 { 527 .name = "C6", 528 .desc = "MWAIT 0x20", 529 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, 530 .exit_latency = 84, 531 .target_residency = 400, 532 .enter = &intel_idle, 533 .enter_s2idle = intel_idle_s2idle, }, 534 { 535 .enter = NULL } 536 }; 537 538 static struct cpuidle_state ivt_cstates_8s[] __initdata = { 539 { 540 .name = "C1", 541 .desc = "MWAIT 0x00", 542 .flags = MWAIT2flg(0x00), 543 .exit_latency = 1, 544 .target_residency = 1, 545 .enter = &intel_idle, 546 .enter_s2idle = intel_idle_s2idle, }, 547 { 548 .name = "C1E", 549 .desc = "MWAIT 0x01", 550 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, 551 .exit_latency = 10, 552 .target_residency = 500, 553 .enter = &intel_idle, 554 .enter_s2idle = intel_idle_s2idle, }, 555 { 556 .name = "C3", 557 .desc = "MWAIT 0x10", 558 .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, 559 .exit_latency = 59, 560 .target_residency = 600, 561 .enter = &intel_idle, 562 .enter_s2idle = intel_idle_s2idle, }, 563 { 564 .name = "C6", 565 .desc = "MWAIT 0x20", 566 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, 567 .exit_latency = 88, 568 .target_residency = 700, 569 .enter = &intel_idle, 570 .enter_s2idle = intel_idle_s2idle, }, 571 { 572 .enter = NULL } 573 }; 574 575 static struct cpuidle_state hsw_cstates[] __initdata = { 576 { 577 .name = "C1", 578 .desc = "MWAIT 0x00", 579 .flags = MWAIT2flg(0x00), 580 .exit_latency = 2, 581 .target_residency = 2, 582 .enter = &intel_idle, 583 .enter_s2idle = intel_idle_s2idle, }, 584 { 585 .name = "C1E", 586 .desc = "MWAIT 0x01", 587 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, 588 .exit_latency = 10, 589 .target_residency = 20, 590 .enter = &intel_idle, 591 .enter_s2idle = intel_idle_s2idle, }, 592 { 593 .name = "C3", 594 .desc = "MWAIT 0x10", 595 .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, 596 .exit_latency = 33, 597 .target_residency = 100, 598 .enter = &intel_idle, 599 .enter_s2idle = intel_idle_s2idle, }, 600 { 601 .name = "C6", 602 .desc = "MWAIT 0x20", 603 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, 604 .exit_latency = 133, 605 .target_residency = 400, 606 .enter = &intel_idle, 607 .enter_s2idle = intel_idle_s2idle, }, 608 { 609 .name = "C7s", 610 .desc = "MWAIT 0x32", 611 .flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TLB_FLUSHED, 612 .exit_latency = 166, 613 .target_residency = 500, 614 .enter = &intel_idle, 615 .enter_s2idle = intel_idle_s2idle, }, 616 { 617 .name = "C8", 618 .desc = "MWAIT 0x40", 619 .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, 620 .exit_latency = 300, 621 .target_residency = 900, 622 .enter = &intel_idle, 623 .enter_s2idle = intel_idle_s2idle, }, 624 { 625 .name = "C9", 626 .desc = "MWAIT 0x50", 627 .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED, 628 .exit_latency = 600, 629 .target_residency = 1800, 630 .enter = &intel_idle, 631 .enter_s2idle = intel_idle_s2idle, }, 632 { 633 .name = "C10", 634 .desc = "MWAIT 0x60", 635 .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, 636 .exit_latency = 2600, 637 .target_residency = 7700, 638 .enter = &intel_idle, 639 .enter_s2idle = intel_idle_s2idle, }, 640 { 641 .enter = NULL } 642 }; 643 static struct cpuidle_state bdw_cstates[] __initdata = { 644 { 645 .name = "C1", 646 .desc = "MWAIT 0x00", 647 .flags = MWAIT2flg(0x00), 648 .exit_latency = 2, 649 .target_residency = 2, 650 .enter = &intel_idle, 651 .enter_s2idle = intel_idle_s2idle, }, 652 { 653 .name = "C1E", 654 .desc = "MWAIT 0x01", 655 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, 656 .exit_latency = 10, 657 .target_residency = 20, 658 .enter = &intel_idle, 659 .enter_s2idle = intel_idle_s2idle, }, 660 { 661 .name = "C3", 662 .desc = "MWAIT 0x10", 663 .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, 664 .exit_latency = 40, 665 .target_residency = 100, 666 .enter = &intel_idle, 667 .enter_s2idle = intel_idle_s2idle, }, 668 { 669 .name = "C6", 670 .desc = "MWAIT 0x20", 671 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, 672 .exit_latency = 133, 673 .target_residency = 400, 674 .enter = &intel_idle, 675 .enter_s2idle = intel_idle_s2idle, }, 676 { 677 .name = "C7s", 678 .desc = "MWAIT 0x32", 679 .flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TLB_FLUSHED, 680 .exit_latency = 166, 681 .target_residency = 500, 682 .enter = &intel_idle, 683 .enter_s2idle = intel_idle_s2idle, }, 684 { 685 .name = "C8", 686 .desc = "MWAIT 0x40", 687 .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, 688 .exit_latency = 300, 689 .target_residency = 900, 690 .enter = &intel_idle, 691 .enter_s2idle = intel_idle_s2idle, }, 692 { 693 .name = "C9", 694 .desc = "MWAIT 0x50", 695 .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED, 696 .exit_latency = 600, 697 .target_residency = 1800, 698 .enter = &intel_idle, 699 .enter_s2idle = intel_idle_s2idle, }, 700 { 701 .name = "C10", 702 .desc = "MWAIT 0x60", 703 .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, 704 .exit_latency = 2600, 705 .target_residency = 7700, 706 .enter = &intel_idle, 707 .enter_s2idle = intel_idle_s2idle, }, 708 { 709 .enter = NULL } 710 }; 711 712 static struct cpuidle_state skl_cstates[] __initdata = { 713 { 714 .name = "C1", 715 .desc = "MWAIT 0x00", 716 .flags = MWAIT2flg(0x00), 717 .exit_latency = 2, 718 .target_residency = 2, 719 .enter = &intel_idle, 720 .enter_s2idle = intel_idle_s2idle, }, 721 { 722 .name = "C1E", 723 .desc = "MWAIT 0x01", 724 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, 725 .exit_latency = 10, 726 .target_residency = 20, 727 .enter = &intel_idle, 728 .enter_s2idle = intel_idle_s2idle, }, 729 { 730 .name = "C3", 731 .desc = "MWAIT 0x10", 732 .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, 733 .exit_latency = 70, 734 .target_residency = 100, 735 .enter = &intel_idle, 736 .enter_s2idle = intel_idle_s2idle, }, 737 { 738 .name = "C6", 739 .desc = "MWAIT 0x20", 740 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS, 741 .exit_latency = 85, 742 .target_residency = 200, 743 .enter = &intel_idle, 744 .enter_s2idle = intel_idle_s2idle, }, 745 { 746 .name = "C7s", 747 .desc = "MWAIT 0x33", 748 .flags = MWAIT2flg(0x33) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS, 749 .exit_latency = 124, 750 .target_residency = 800, 751 .enter = &intel_idle, 752 .enter_s2idle = intel_idle_s2idle, }, 753 { 754 .name = "C8", 755 .desc = "MWAIT 0x40", 756 .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS, 757 .exit_latency = 200, 758 .target_residency = 800, 759 .enter = &intel_idle, 760 .enter_s2idle = intel_idle_s2idle, }, 761 { 762 .name = "C9", 763 .desc = "MWAIT 0x50", 764 .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS, 765 .exit_latency = 480, 766 .target_residency = 5000, 767 .enter = &intel_idle, 768 .enter_s2idle = intel_idle_s2idle, }, 769 { 770 .name = "C10", 771 .desc = "MWAIT 0x60", 772 .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS, 773 .exit_latency = 890, 774 .target_residency = 5000, 775 .enter = &intel_idle, 776 .enter_s2idle = intel_idle_s2idle, }, 777 { 778 .enter = NULL } 779 }; 780 781 static struct cpuidle_state skx_cstates[] __initdata = { 782 { 783 .name = "C1", 784 .desc = "MWAIT 0x00", 785 .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_IRQ_ENABLE, 786 .exit_latency = 2, 787 .target_residency = 2, 788 .enter = &intel_idle, 789 .enter_s2idle = intel_idle_s2idle, }, 790 { 791 .name = "C1E", 792 .desc = "MWAIT 0x01", 793 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, 794 .exit_latency = 10, 795 .target_residency = 20, 796 .enter = &intel_idle, 797 .enter_s2idle = intel_idle_s2idle, }, 798 { 799 .name = "C6", 800 .desc = "MWAIT 0x20", 801 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS, 802 .exit_latency = 133, 803 .target_residency = 600, 804 .enter = &intel_idle, 805 .enter_s2idle = intel_idle_s2idle, }, 806 { 807 .enter = NULL } 808 }; 809 810 static struct cpuidle_state icx_cstates[] __initdata = { 811 { 812 .name = "C1", 813 .desc = "MWAIT 0x00", 814 .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_IRQ_ENABLE, 815 .exit_latency = 1, 816 .target_residency = 1, 817 .enter = &intel_idle, 818 .enter_s2idle = intel_idle_s2idle, }, 819 { 820 .name = "C1E", 821 .desc = "MWAIT 0x01", 822 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, 823 .exit_latency = 4, 824 .target_residency = 4, 825 .enter = &intel_idle, 826 .enter_s2idle = intel_idle_s2idle, }, 827 { 828 .name = "C6", 829 .desc = "MWAIT 0x20", 830 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, 831 .exit_latency = 170, 832 .target_residency = 600, 833 .enter = &intel_idle, 834 .enter_s2idle = intel_idle_s2idle, }, 835 { 836 .enter = NULL } 837 }; 838 839 /* 840 * On AlderLake C1 has to be disabled if C1E is enabled, and vice versa. 841 * C1E is enabled only if "C1E promotion" bit is set in MSR_IA32_POWER_CTL. 842 * But in this case there is effectively no C1, because C1 requests are 843 * promoted to C1E. If the "C1E promotion" bit is cleared, then both C1 844 * and C1E requests end up with C1, so there is effectively no C1E. 845 * 846 * By default we enable C1E and disable C1 by marking it with 847 * 'CPUIDLE_FLAG_UNUSABLE'. 848 */ 849 static struct cpuidle_state adl_cstates[] __initdata = { 850 { 851 .name = "C1", 852 .desc = "MWAIT 0x00", 853 .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_UNUSABLE, 854 .exit_latency = 1, 855 .target_residency = 1, 856 .enter = &intel_idle, 857 .enter_s2idle = intel_idle_s2idle, }, 858 { 859 .name = "C1E", 860 .desc = "MWAIT 0x01", 861 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, 862 .exit_latency = 2, 863 .target_residency = 4, 864 .enter = &intel_idle, 865 .enter_s2idle = intel_idle_s2idle, }, 866 { 867 .name = "C6", 868 .desc = "MWAIT 0x20", 869 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, 870 .exit_latency = 220, 871 .target_residency = 600, 872 .enter = &intel_idle, 873 .enter_s2idle = intel_idle_s2idle, }, 874 { 875 .name = "C8", 876 .desc = "MWAIT 0x40", 877 .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, 878 .exit_latency = 280, 879 .target_residency = 800, 880 .enter = &intel_idle, 881 .enter_s2idle = intel_idle_s2idle, }, 882 { 883 .name = "C10", 884 .desc = "MWAIT 0x60", 885 .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, 886 .exit_latency = 680, 887 .target_residency = 2000, 888 .enter = &intel_idle, 889 .enter_s2idle = intel_idle_s2idle, }, 890 { 891 .enter = NULL } 892 }; 893 894 static struct cpuidle_state adl_l_cstates[] __initdata = { 895 { 896 .name = "C1", 897 .desc = "MWAIT 0x00", 898 .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_UNUSABLE, 899 .exit_latency = 1, 900 .target_residency = 1, 901 .enter = &intel_idle, 902 .enter_s2idle = intel_idle_s2idle, }, 903 { 904 .name = "C1E", 905 .desc = "MWAIT 0x01", 906 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, 907 .exit_latency = 2, 908 .target_residency = 4, 909 .enter = &intel_idle, 910 .enter_s2idle = intel_idle_s2idle, }, 911 { 912 .name = "C6", 913 .desc = "MWAIT 0x20", 914 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, 915 .exit_latency = 170, 916 .target_residency = 500, 917 .enter = &intel_idle, 918 .enter_s2idle = intel_idle_s2idle, }, 919 { 920 .name = "C8", 921 .desc = "MWAIT 0x40", 922 .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, 923 .exit_latency = 200, 924 .target_residency = 600, 925 .enter = &intel_idle, 926 .enter_s2idle = intel_idle_s2idle, }, 927 { 928 .name = "C10", 929 .desc = "MWAIT 0x60", 930 .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, 931 .exit_latency = 230, 932 .target_residency = 700, 933 .enter = &intel_idle, 934 .enter_s2idle = intel_idle_s2idle, }, 935 { 936 .enter = NULL } 937 }; 938 939 static struct cpuidle_state mtl_l_cstates[] __initdata = { 940 { 941 .name = "C1E", 942 .desc = "MWAIT 0x01", 943 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, 944 .exit_latency = 1, 945 .target_residency = 1, 946 .enter = &intel_idle, 947 .enter_s2idle = intel_idle_s2idle, }, 948 { 949 .name = "C6", 950 .desc = "MWAIT 0x20", 951 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, 952 .exit_latency = 140, 953 .target_residency = 420, 954 .enter = &intel_idle, 955 .enter_s2idle = intel_idle_s2idle, }, 956 { 957 .name = "C10", 958 .desc = "MWAIT 0x60", 959 .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, 960 .exit_latency = 310, 961 .target_residency = 930, 962 .enter = &intel_idle, 963 .enter_s2idle = intel_idle_s2idle, }, 964 { 965 .enter = NULL } 966 }; 967 968 static struct cpuidle_state gmt_cstates[] __initdata = { 969 { 970 .name = "C1", 971 .desc = "MWAIT 0x00", 972 .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_UNUSABLE, 973 .exit_latency = 1, 974 .target_residency = 1, 975 .enter = &intel_idle, 976 .enter_s2idle = intel_idle_s2idle, }, 977 { 978 .name = "C1E", 979 .desc = "MWAIT 0x01", 980 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, 981 .exit_latency = 2, 982 .target_residency = 4, 983 .enter = &intel_idle, 984 .enter_s2idle = intel_idle_s2idle, }, 985 { 986 .name = "C6", 987 .desc = "MWAIT 0x20", 988 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, 989 .exit_latency = 195, 990 .target_residency = 585, 991 .enter = &intel_idle, 992 .enter_s2idle = intel_idle_s2idle, }, 993 { 994 .name = "C8", 995 .desc = "MWAIT 0x40", 996 .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, 997 .exit_latency = 260, 998 .target_residency = 1040, 999 .enter = &intel_idle, 1000 .enter_s2idle = intel_idle_s2idle, }, 1001 { 1002 .name = "C10", 1003 .desc = "MWAIT 0x60", 1004 .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, 1005 .exit_latency = 660, 1006 .target_residency = 1980, 1007 .enter = &intel_idle, 1008 .enter_s2idle = intel_idle_s2idle, }, 1009 { 1010 .enter = NULL } 1011 }; 1012 1013 static struct cpuidle_state spr_cstates[] __initdata = { 1014 { 1015 .name = "C1", 1016 .desc = "MWAIT 0x00", 1017 .flags = MWAIT2flg(0x00), 1018 .exit_latency = 1, 1019 .target_residency = 1, 1020 .enter = &intel_idle, 1021 .enter_s2idle = intel_idle_s2idle, }, 1022 { 1023 .name = "C1E", 1024 .desc = "MWAIT 0x01", 1025 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, 1026 .exit_latency = 2, 1027 .target_residency = 4, 1028 .enter = &intel_idle, 1029 .enter_s2idle = intel_idle_s2idle, }, 1030 { 1031 .name = "C6", 1032 .desc = "MWAIT 0x20", 1033 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | 1034 CPUIDLE_FLAG_INIT_XSTATE, 1035 .exit_latency = 290, 1036 .target_residency = 800, 1037 .enter = &intel_idle, 1038 .enter_s2idle = intel_idle_s2idle, }, 1039 { 1040 .enter = NULL } 1041 }; 1042 1043 static struct cpuidle_state gnr_cstates[] __initdata = { 1044 { 1045 .name = "C1", 1046 .desc = "MWAIT 0x00", 1047 .flags = MWAIT2flg(0x00), 1048 .exit_latency = 1, 1049 .target_residency = 1, 1050 .enter = &intel_idle, 1051 .enter_s2idle = intel_idle_s2idle, }, 1052 { 1053 .name = "C1E", 1054 .desc = "MWAIT 0x01", 1055 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, 1056 .exit_latency = 4, 1057 .target_residency = 4, 1058 .enter = &intel_idle, 1059 .enter_s2idle = intel_idle_s2idle, }, 1060 { 1061 .name = "C6", 1062 .desc = "MWAIT 0x20", 1063 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | 1064 CPUIDLE_FLAG_INIT_XSTATE | 1065 CPUIDLE_FLAG_PARTIAL_HINT_MATCH, 1066 .exit_latency = 170, 1067 .target_residency = 650, 1068 .enter = &intel_idle, 1069 .enter_s2idle = intel_idle_s2idle, }, 1070 { 1071 .name = "C6P", 1072 .desc = "MWAIT 0x21", 1073 .flags = MWAIT2flg(0x21) | CPUIDLE_FLAG_TLB_FLUSHED | 1074 CPUIDLE_FLAG_INIT_XSTATE | 1075 CPUIDLE_FLAG_PARTIAL_HINT_MATCH, 1076 .exit_latency = 210, 1077 .target_residency = 1000, 1078 .enter = &intel_idle, 1079 .enter_s2idle = intel_idle_s2idle, }, 1080 { 1081 .enter = NULL } 1082 }; 1083 1084 static struct cpuidle_state gnrd_cstates[] __initdata = { 1085 { 1086 .name = "C1", 1087 .desc = "MWAIT 0x00", 1088 .flags = MWAIT2flg(0x00), 1089 .exit_latency = 1, 1090 .target_residency = 1, 1091 .enter = &intel_idle, 1092 .enter_s2idle = intel_idle_s2idle, }, 1093 { 1094 .name = "C1E", 1095 .desc = "MWAIT 0x01", 1096 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, 1097 .exit_latency = 4, 1098 .target_residency = 4, 1099 .enter = &intel_idle, 1100 .enter_s2idle = intel_idle_s2idle, }, 1101 { 1102 .name = "C6", 1103 .desc = "MWAIT 0x20", 1104 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | 1105 CPUIDLE_FLAG_INIT_XSTATE | 1106 CPUIDLE_FLAG_PARTIAL_HINT_MATCH, 1107 .exit_latency = 220, 1108 .target_residency = 650, 1109 .enter = &intel_idle, 1110 .enter_s2idle = intel_idle_s2idle, }, 1111 { 1112 .name = "C6P", 1113 .desc = "MWAIT 0x21", 1114 .flags = MWAIT2flg(0x21) | CPUIDLE_FLAG_TLB_FLUSHED | 1115 CPUIDLE_FLAG_INIT_XSTATE | 1116 CPUIDLE_FLAG_PARTIAL_HINT_MATCH, 1117 .exit_latency = 240, 1118 .target_residency = 750, 1119 .enter = &intel_idle, 1120 .enter_s2idle = intel_idle_s2idle, }, 1121 { 1122 .enter = NULL } 1123 }; 1124 1125 static struct cpuidle_state atom_cstates[] __initdata = { 1126 { 1127 .name = "C1E", 1128 .desc = "MWAIT 0x00", 1129 .flags = MWAIT2flg(0x00), 1130 .exit_latency = 10, 1131 .target_residency = 20, 1132 .enter = &intel_idle, 1133 .enter_s2idle = intel_idle_s2idle, }, 1134 { 1135 .name = "C2", 1136 .desc = "MWAIT 0x10", 1137 .flags = MWAIT2flg(0x10), 1138 .exit_latency = 20, 1139 .target_residency = 80, 1140 .enter = &intel_idle, 1141 .enter_s2idle = intel_idle_s2idle, }, 1142 { 1143 .name = "C4", 1144 .desc = "MWAIT 0x30", 1145 .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED, 1146 .exit_latency = 100, 1147 .target_residency = 400, 1148 .enter = &intel_idle, 1149 .enter_s2idle = intel_idle_s2idle, }, 1150 { 1151 .name = "C6", 1152 .desc = "MWAIT 0x52", 1153 .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED, 1154 .exit_latency = 140, 1155 .target_residency = 560, 1156 .enter = &intel_idle, 1157 .enter_s2idle = intel_idle_s2idle, }, 1158 { 1159 .enter = NULL } 1160 }; 1161 static struct cpuidle_state tangier_cstates[] __initdata = { 1162 { 1163 .name = "C1", 1164 .desc = "MWAIT 0x00", 1165 .flags = MWAIT2flg(0x00), 1166 .exit_latency = 1, 1167 .target_residency = 4, 1168 .enter = &intel_idle, 1169 .enter_s2idle = intel_idle_s2idle, }, 1170 { 1171 .name = "C4", 1172 .desc = "MWAIT 0x30", 1173 .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED, 1174 .exit_latency = 100, 1175 .target_residency = 400, 1176 .enter = &intel_idle, 1177 .enter_s2idle = intel_idle_s2idle, }, 1178 { 1179 .name = "C6", 1180 .desc = "MWAIT 0x52", 1181 .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED, 1182 .exit_latency = 140, 1183 .target_residency = 560, 1184 .enter = &intel_idle, 1185 .enter_s2idle = intel_idle_s2idle, }, 1186 { 1187 .name = "C7", 1188 .desc = "MWAIT 0x60", 1189 .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, 1190 .exit_latency = 1200, 1191 .target_residency = 4000, 1192 .enter = &intel_idle, 1193 .enter_s2idle = intel_idle_s2idle, }, 1194 { 1195 .name = "C9", 1196 .desc = "MWAIT 0x64", 1197 .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED, 1198 .exit_latency = 10000, 1199 .target_residency = 20000, 1200 .enter = &intel_idle, 1201 .enter_s2idle = intel_idle_s2idle, }, 1202 { 1203 .enter = NULL } 1204 }; 1205 static struct cpuidle_state avn_cstates[] __initdata = { 1206 { 1207 .name = "C1", 1208 .desc = "MWAIT 0x00", 1209 .flags = MWAIT2flg(0x00), 1210 .exit_latency = 2, 1211 .target_residency = 2, 1212 .enter = &intel_idle, 1213 .enter_s2idle = intel_idle_s2idle, }, 1214 { 1215 .name = "C6", 1216 .desc = "MWAIT 0x51", 1217 .flags = MWAIT2flg(0x51) | CPUIDLE_FLAG_TLB_FLUSHED, 1218 .exit_latency = 15, 1219 .target_residency = 45, 1220 .enter = &intel_idle, 1221 .enter_s2idle = intel_idle_s2idle, }, 1222 { 1223 .enter = NULL } 1224 }; 1225 static struct cpuidle_state knl_cstates[] __initdata = { 1226 { 1227 .name = "C1", 1228 .desc = "MWAIT 0x00", 1229 .flags = MWAIT2flg(0x00), 1230 .exit_latency = 1, 1231 .target_residency = 2, 1232 .enter = &intel_idle, 1233 .enter_s2idle = intel_idle_s2idle }, 1234 { 1235 .name = "C6", 1236 .desc = "MWAIT 0x10", 1237 .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, 1238 .exit_latency = 120, 1239 .target_residency = 500, 1240 .enter = &intel_idle, 1241 .enter_s2idle = intel_idle_s2idle }, 1242 { 1243 .enter = NULL } 1244 }; 1245 1246 static struct cpuidle_state bxt_cstates[] __initdata = { 1247 { 1248 .name = "C1", 1249 .desc = "MWAIT 0x00", 1250 .flags = MWAIT2flg(0x00), 1251 .exit_latency = 2, 1252 .target_residency = 2, 1253 .enter = &intel_idle, 1254 .enter_s2idle = intel_idle_s2idle, }, 1255 { 1256 .name = "C1E", 1257 .desc = "MWAIT 0x01", 1258 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, 1259 .exit_latency = 10, 1260 .target_residency = 20, 1261 .enter = &intel_idle, 1262 .enter_s2idle = intel_idle_s2idle, }, 1263 { 1264 .name = "C6", 1265 .desc = "MWAIT 0x20", 1266 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, 1267 .exit_latency = 133, 1268 .target_residency = 133, 1269 .enter = &intel_idle, 1270 .enter_s2idle = intel_idle_s2idle, }, 1271 { 1272 .name = "C7s", 1273 .desc = "MWAIT 0x31", 1274 .flags = MWAIT2flg(0x31) | CPUIDLE_FLAG_TLB_FLUSHED, 1275 .exit_latency = 155, 1276 .target_residency = 155, 1277 .enter = &intel_idle, 1278 .enter_s2idle = intel_idle_s2idle, }, 1279 { 1280 .name = "C8", 1281 .desc = "MWAIT 0x40", 1282 .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, 1283 .exit_latency = 1000, 1284 .target_residency = 1000, 1285 .enter = &intel_idle, 1286 .enter_s2idle = intel_idle_s2idle, }, 1287 { 1288 .name = "C9", 1289 .desc = "MWAIT 0x50", 1290 .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED, 1291 .exit_latency = 2000, 1292 .target_residency = 2000, 1293 .enter = &intel_idle, 1294 .enter_s2idle = intel_idle_s2idle, }, 1295 { 1296 .name = "C10", 1297 .desc = "MWAIT 0x60", 1298 .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, 1299 .exit_latency = 10000, 1300 .target_residency = 10000, 1301 .enter = &intel_idle, 1302 .enter_s2idle = intel_idle_s2idle, }, 1303 { 1304 .enter = NULL } 1305 }; 1306 1307 static struct cpuidle_state dnv_cstates[] __initdata = { 1308 { 1309 .name = "C1", 1310 .desc = "MWAIT 0x00", 1311 .flags = MWAIT2flg(0x00), 1312 .exit_latency = 2, 1313 .target_residency = 2, 1314 .enter = &intel_idle, 1315 .enter_s2idle = intel_idle_s2idle, }, 1316 { 1317 .name = "C1E", 1318 .desc = "MWAIT 0x01", 1319 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, 1320 .exit_latency = 10, 1321 .target_residency = 20, 1322 .enter = &intel_idle, 1323 .enter_s2idle = intel_idle_s2idle, }, 1324 { 1325 .name = "C6", 1326 .desc = "MWAIT 0x20", 1327 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, 1328 .exit_latency = 50, 1329 .target_residency = 500, 1330 .enter = &intel_idle, 1331 .enter_s2idle = intel_idle_s2idle, }, 1332 { 1333 .enter = NULL } 1334 }; 1335 1336 /* 1337 * Note, depending on HW and FW revision, SnowRidge SoC may or may not support 1338 * C6, and this is indicated in the CPUID mwait leaf. 1339 */ 1340 static struct cpuidle_state snr_cstates[] __initdata = { 1341 { 1342 .name = "C1", 1343 .desc = "MWAIT 0x00", 1344 .flags = MWAIT2flg(0x00), 1345 .exit_latency = 2, 1346 .target_residency = 2, 1347 .enter = &intel_idle, 1348 .enter_s2idle = intel_idle_s2idle, }, 1349 { 1350 .name = "C1E", 1351 .desc = "MWAIT 0x01", 1352 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, 1353 .exit_latency = 15, 1354 .target_residency = 25, 1355 .enter = &intel_idle, 1356 .enter_s2idle = intel_idle_s2idle, }, 1357 { 1358 .name = "C6", 1359 .desc = "MWAIT 0x20", 1360 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, 1361 .exit_latency = 130, 1362 .target_residency = 500, 1363 .enter = &intel_idle, 1364 .enter_s2idle = intel_idle_s2idle, }, 1365 { 1366 .enter = NULL } 1367 }; 1368 1369 static struct cpuidle_state grr_cstates[] __initdata = { 1370 { 1371 .name = "C1", 1372 .desc = "MWAIT 0x00", 1373 .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_ALWAYS_ENABLE, 1374 .exit_latency = 1, 1375 .target_residency = 1, 1376 .enter = &intel_idle, 1377 .enter_s2idle = intel_idle_s2idle, }, 1378 { 1379 .name = "C1E", 1380 .desc = "MWAIT 0x01", 1381 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, 1382 .exit_latency = 2, 1383 .target_residency = 10, 1384 .enter = &intel_idle, 1385 .enter_s2idle = intel_idle_s2idle, }, 1386 { 1387 .name = "C6S", 1388 .desc = "MWAIT 0x22", 1389 .flags = MWAIT2flg(0x22) | CPUIDLE_FLAG_TLB_FLUSHED, 1390 .exit_latency = 140, 1391 .target_residency = 500, 1392 .enter = &intel_idle, 1393 .enter_s2idle = intel_idle_s2idle, }, 1394 { 1395 .enter = NULL } 1396 }; 1397 1398 static struct cpuidle_state srf_cstates[] __initdata = { 1399 { 1400 .name = "C1", 1401 .desc = "MWAIT 0x00", 1402 .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_ALWAYS_ENABLE, 1403 .exit_latency = 1, 1404 .target_residency = 1, 1405 .enter = &intel_idle, 1406 .enter_s2idle = intel_idle_s2idle, }, 1407 { 1408 .name = "C1E", 1409 .desc = "MWAIT 0x01", 1410 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, 1411 .exit_latency = 2, 1412 .target_residency = 10, 1413 .enter = &intel_idle, 1414 .enter_s2idle = intel_idle_s2idle, }, 1415 { 1416 .name = "C6S", 1417 .desc = "MWAIT 0x22", 1418 .flags = MWAIT2flg(0x22) | CPUIDLE_FLAG_TLB_FLUSHED | 1419 CPUIDLE_FLAG_PARTIAL_HINT_MATCH, 1420 .exit_latency = 270, 1421 .target_residency = 700, 1422 .enter = &intel_idle, 1423 .enter_s2idle = intel_idle_s2idle, }, 1424 { 1425 .name = "C6SP", 1426 .desc = "MWAIT 0x23", 1427 .flags = MWAIT2flg(0x23) | CPUIDLE_FLAG_TLB_FLUSHED | 1428 CPUIDLE_FLAG_PARTIAL_HINT_MATCH, 1429 .exit_latency = 310, 1430 .target_residency = 900, 1431 .enter = &intel_idle, 1432 .enter_s2idle = intel_idle_s2idle, }, 1433 { 1434 .enter = NULL } 1435 }; 1436 1437 static const struct idle_cpu idle_cpu_nehalem __initconst = { 1438 .state_table = nehalem_cstates, 1439 .auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE, 1440 .disable_promotion_to_c1e = true, 1441 }; 1442 1443 static const struct idle_cpu idle_cpu_nhx __initconst = { 1444 .state_table = nehalem_cstates, 1445 .auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE, 1446 .disable_promotion_to_c1e = true, 1447 .use_acpi = true, 1448 }; 1449 1450 static const struct idle_cpu idle_cpu_atom __initconst = { 1451 .state_table = atom_cstates, 1452 }; 1453 1454 static const struct idle_cpu idle_cpu_tangier __initconst = { 1455 .state_table = tangier_cstates, 1456 }; 1457 1458 static const struct idle_cpu idle_cpu_lincroft __initconst = { 1459 .state_table = atom_cstates, 1460 .auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE, 1461 }; 1462 1463 static const struct idle_cpu idle_cpu_snb __initconst = { 1464 .state_table = snb_cstates, 1465 .disable_promotion_to_c1e = true, 1466 }; 1467 1468 static const struct idle_cpu idle_cpu_snx __initconst = { 1469 .state_table = snb_cstates, 1470 .disable_promotion_to_c1e = true, 1471 .use_acpi = true, 1472 }; 1473 1474 static const struct idle_cpu idle_cpu_byt __initconst = { 1475 .state_table = byt_cstates, 1476 .disable_promotion_to_c1e = true, 1477 }; 1478 1479 static const struct idle_cpu idle_cpu_cht __initconst = { 1480 .state_table = cht_cstates, 1481 .disable_promotion_to_c1e = true, 1482 }; 1483 1484 static const struct idle_cpu idle_cpu_ivb __initconst = { 1485 .state_table = ivb_cstates, 1486 .disable_promotion_to_c1e = true, 1487 }; 1488 1489 static const struct idle_cpu idle_cpu_ivt __initconst = { 1490 .state_table = ivt_cstates, 1491 .disable_promotion_to_c1e = true, 1492 .use_acpi = true, 1493 }; 1494 1495 static const struct idle_cpu idle_cpu_hsw __initconst = { 1496 .state_table = hsw_cstates, 1497 .disable_promotion_to_c1e = true, 1498 }; 1499 1500 static const struct idle_cpu idle_cpu_hsx __initconst = { 1501 .state_table = hsw_cstates, 1502 .disable_promotion_to_c1e = true, 1503 .use_acpi = true, 1504 }; 1505 1506 static const struct idle_cpu idle_cpu_bdw __initconst = { 1507 .state_table = bdw_cstates, 1508 .disable_promotion_to_c1e = true, 1509 }; 1510 1511 static const struct idle_cpu idle_cpu_bdx __initconst = { 1512 .state_table = bdw_cstates, 1513 .disable_promotion_to_c1e = true, 1514 .use_acpi = true, 1515 }; 1516 1517 static const struct idle_cpu idle_cpu_skl __initconst = { 1518 .state_table = skl_cstates, 1519 .disable_promotion_to_c1e = true, 1520 }; 1521 1522 static const struct idle_cpu idle_cpu_skx __initconst = { 1523 .state_table = skx_cstates, 1524 .disable_promotion_to_c1e = true, 1525 .use_acpi = true, 1526 }; 1527 1528 static const struct idle_cpu idle_cpu_icx __initconst = { 1529 .state_table = icx_cstates, 1530 .disable_promotion_to_c1e = true, 1531 .use_acpi = true, 1532 }; 1533 1534 static const struct idle_cpu idle_cpu_adl __initconst = { 1535 .state_table = adl_cstates, 1536 }; 1537 1538 static const struct idle_cpu idle_cpu_adl_l __initconst = { 1539 .state_table = adl_l_cstates, 1540 }; 1541 1542 static const struct idle_cpu idle_cpu_mtl_l __initconst = { 1543 .state_table = mtl_l_cstates, 1544 }; 1545 1546 static const struct idle_cpu idle_cpu_gmt __initconst = { 1547 .state_table = gmt_cstates, 1548 }; 1549 1550 static const struct idle_cpu idle_cpu_spr __initconst = { 1551 .state_table = spr_cstates, 1552 .disable_promotion_to_c1e = true, 1553 .use_acpi = true, 1554 }; 1555 1556 static const struct idle_cpu idle_cpu_gnr __initconst = { 1557 .state_table = gnr_cstates, 1558 .disable_promotion_to_c1e = true, 1559 .use_acpi = true, 1560 }; 1561 1562 static const struct idle_cpu idle_cpu_gnrd __initconst = { 1563 .state_table = gnrd_cstates, 1564 .disable_promotion_to_c1e = true, 1565 .use_acpi = true, 1566 }; 1567 1568 static const struct idle_cpu idle_cpu_avn __initconst = { 1569 .state_table = avn_cstates, 1570 .disable_promotion_to_c1e = true, 1571 .use_acpi = true, 1572 }; 1573 1574 static const struct idle_cpu idle_cpu_knl __initconst = { 1575 .state_table = knl_cstates, 1576 .use_acpi = true, 1577 }; 1578 1579 static const struct idle_cpu idle_cpu_bxt __initconst = { 1580 .state_table = bxt_cstates, 1581 .disable_promotion_to_c1e = true, 1582 }; 1583 1584 static const struct idle_cpu idle_cpu_dnv __initconst = { 1585 .state_table = dnv_cstates, 1586 .disable_promotion_to_c1e = true, 1587 .use_acpi = true, 1588 }; 1589 1590 static const struct idle_cpu idle_cpu_tmt __initconst = { 1591 .disable_promotion_to_c1e = true, 1592 }; 1593 1594 static const struct idle_cpu idle_cpu_snr __initconst = { 1595 .state_table = snr_cstates, 1596 .disable_promotion_to_c1e = true, 1597 .use_acpi = true, 1598 }; 1599 1600 static const struct idle_cpu idle_cpu_grr __initconst = { 1601 .state_table = grr_cstates, 1602 .disable_promotion_to_c1e = true, 1603 .use_acpi = true, 1604 }; 1605 1606 static const struct idle_cpu idle_cpu_srf __initconst = { 1607 .state_table = srf_cstates, 1608 .disable_promotion_to_c1e = true, 1609 .use_acpi = true, 1610 }; 1611 1612 static const struct x86_cpu_id intel_idle_ids[] __initconst = { 1613 X86_MATCH_VFM(INTEL_NEHALEM_EP, &idle_cpu_nhx), 1614 X86_MATCH_VFM(INTEL_NEHALEM, &idle_cpu_nehalem), 1615 X86_MATCH_VFM(INTEL_NEHALEM_G, &idle_cpu_nehalem), 1616 X86_MATCH_VFM(INTEL_WESTMERE, &idle_cpu_nehalem), 1617 X86_MATCH_VFM(INTEL_WESTMERE_EP, &idle_cpu_nhx), 1618 X86_MATCH_VFM(INTEL_NEHALEM_EX, &idle_cpu_nhx), 1619 X86_MATCH_VFM(INTEL_ATOM_BONNELL, &idle_cpu_atom), 1620 X86_MATCH_VFM(INTEL_ATOM_BONNELL_MID, &idle_cpu_lincroft), 1621 X86_MATCH_VFM(INTEL_WESTMERE_EX, &idle_cpu_nhx), 1622 X86_MATCH_VFM(INTEL_SANDYBRIDGE, &idle_cpu_snb), 1623 X86_MATCH_VFM(INTEL_SANDYBRIDGE_X, &idle_cpu_snx), 1624 X86_MATCH_VFM(INTEL_ATOM_SALTWELL, &idle_cpu_atom), 1625 X86_MATCH_VFM(INTEL_ATOM_SILVERMONT, &idle_cpu_byt), 1626 X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_MID, &idle_cpu_tangier), 1627 X86_MATCH_VFM(INTEL_ATOM_AIRMONT, &idle_cpu_cht), 1628 X86_MATCH_VFM(INTEL_IVYBRIDGE, &idle_cpu_ivb), 1629 X86_MATCH_VFM(INTEL_IVYBRIDGE_X, &idle_cpu_ivt), 1630 X86_MATCH_VFM(INTEL_HASWELL, &idle_cpu_hsw), 1631 X86_MATCH_VFM(INTEL_HASWELL_X, &idle_cpu_hsx), 1632 X86_MATCH_VFM(INTEL_HASWELL_L, &idle_cpu_hsw), 1633 X86_MATCH_VFM(INTEL_HASWELL_G, &idle_cpu_hsw), 1634 X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_D, &idle_cpu_avn), 1635 X86_MATCH_VFM(INTEL_BROADWELL, &idle_cpu_bdw), 1636 X86_MATCH_VFM(INTEL_BROADWELL_G, &idle_cpu_bdw), 1637 X86_MATCH_VFM(INTEL_BROADWELL_X, &idle_cpu_bdx), 1638 X86_MATCH_VFM(INTEL_BROADWELL_D, &idle_cpu_bdx), 1639 X86_MATCH_VFM(INTEL_SKYLAKE_L, &idle_cpu_skl), 1640 X86_MATCH_VFM(INTEL_SKYLAKE, &idle_cpu_skl), 1641 X86_MATCH_VFM(INTEL_KABYLAKE_L, &idle_cpu_skl), 1642 X86_MATCH_VFM(INTEL_KABYLAKE, &idle_cpu_skl), 1643 X86_MATCH_VFM(INTEL_SKYLAKE_X, &idle_cpu_skx), 1644 X86_MATCH_VFM(INTEL_ICELAKE_X, &idle_cpu_icx), 1645 X86_MATCH_VFM(INTEL_ICELAKE_D, &idle_cpu_icx), 1646 X86_MATCH_VFM(INTEL_ALDERLAKE, &idle_cpu_adl), 1647 X86_MATCH_VFM(INTEL_ALDERLAKE_L, &idle_cpu_adl_l), 1648 X86_MATCH_VFM(INTEL_METEORLAKE_L, &idle_cpu_mtl_l), 1649 X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, &idle_cpu_gmt), 1650 X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &idle_cpu_spr), 1651 X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &idle_cpu_spr), 1652 X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, &idle_cpu_gnr), 1653 X86_MATCH_VFM(INTEL_GRANITERAPIDS_D, &idle_cpu_gnrd), 1654 X86_MATCH_VFM(INTEL_XEON_PHI_KNL, &idle_cpu_knl), 1655 X86_MATCH_VFM(INTEL_XEON_PHI_KNM, &idle_cpu_knl), 1656 X86_MATCH_VFM(INTEL_ATOM_GOLDMONT, &idle_cpu_bxt), 1657 X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_PLUS, &idle_cpu_bxt), 1658 X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_D, &idle_cpu_dnv), 1659 X86_MATCH_VFM(INTEL_ATOM_TREMONT, &idle_cpu_tmt), 1660 X86_MATCH_VFM(INTEL_ATOM_TREMONT_L, &idle_cpu_tmt), 1661 X86_MATCH_VFM(INTEL_ATOM_TREMONT_D, &idle_cpu_snr), 1662 X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, &idle_cpu_grr), 1663 X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, &idle_cpu_srf), 1664 X86_MATCH_VFM(INTEL_ATOM_DARKMONT_X, &idle_cpu_srf), 1665 {} 1666 }; 1667 1668 static const struct x86_cpu_id intel_mwait_ids[] __initconst = { 1669 X86_MATCH_VENDOR_FAM_FEATURE(INTEL, 6, X86_FEATURE_MWAIT, NULL), 1670 {} 1671 }; 1672 1673 static bool __init intel_idle_max_cstate_reached(int cstate) 1674 { 1675 if (cstate + 1 > max_cstate) { 1676 pr_info("max_cstate %d reached\n", max_cstate); 1677 return true; 1678 } 1679 return false; 1680 } 1681 1682 static bool __init intel_idle_state_needs_timer_stop(struct cpuidle_state *state) 1683 { 1684 unsigned long eax = flg2MWAIT(state->flags); 1685 1686 if (boot_cpu_has(X86_FEATURE_ARAT)) 1687 return false; 1688 1689 /* 1690 * Switch over to one-shot tick broadcast if the target C-state 1691 * is deeper than C1. 1692 */ 1693 return !!((eax >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK); 1694 } 1695 1696 #ifdef CONFIG_ACPI_PROCESSOR_CSTATE 1697 #include <acpi/processor.h> 1698 1699 static bool no_acpi __read_mostly; 1700 module_param(no_acpi, bool, 0444); 1701 MODULE_PARM_DESC(no_acpi, "Do not use ACPI _CST for building the idle states list"); 1702 1703 static bool force_use_acpi __read_mostly; /* No effect if no_acpi is set. */ 1704 module_param_named(use_acpi, force_use_acpi, bool, 0444); 1705 MODULE_PARM_DESC(use_acpi, "Use ACPI _CST for building the idle states list"); 1706 1707 static bool no_native __read_mostly; /* No effect if no_acpi is set. */ 1708 module_param_named(no_native, no_native, bool, 0444); 1709 MODULE_PARM_DESC(no_native, "Ignore cpu specific (native) idle states in lieu of ACPI idle states"); 1710 1711 static struct acpi_processor_power acpi_state_table __initdata; 1712 1713 /** 1714 * intel_idle_cst_usable - Check if the _CST information can be used. 1715 * 1716 * Check if all of the C-states listed by _CST in the max_cstate range are 1717 * ACPI_CSTATE_FFH, which means that they should be entered via MWAIT. 1718 */ 1719 static bool __init intel_idle_cst_usable(void) 1720 { 1721 int cstate, limit; 1722 1723 limit = min_t(int, min_t(int, CPUIDLE_STATE_MAX, max_cstate + 1), 1724 acpi_state_table.count); 1725 1726 for (cstate = 1; cstate < limit; cstate++) { 1727 struct acpi_processor_cx *cx = &acpi_state_table.states[cstate]; 1728 1729 if (cx->entry_method != ACPI_CSTATE_FFH) 1730 return false; 1731 } 1732 1733 return true; 1734 } 1735 1736 static bool __init intel_idle_acpi_cst_extract(void) 1737 { 1738 unsigned int cpu; 1739 1740 if (no_acpi) { 1741 pr_debug("Not allowed to use ACPI _CST\n"); 1742 return false; 1743 } 1744 1745 for_each_possible_cpu(cpu) { 1746 struct acpi_processor *pr = per_cpu(processors, cpu); 1747 1748 if (!pr) 1749 continue; 1750 1751 if (acpi_processor_evaluate_cst(pr->handle, cpu, &acpi_state_table)) 1752 continue; 1753 1754 acpi_state_table.count++; 1755 1756 if (!intel_idle_cst_usable()) 1757 continue; 1758 1759 if (!acpi_processor_claim_cst_control()) 1760 break; 1761 1762 return true; 1763 } 1764 1765 acpi_state_table.count = 0; 1766 pr_debug("ACPI _CST not found or not usable\n"); 1767 return false; 1768 } 1769 1770 static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) 1771 { 1772 int cstate, limit = min_t(int, CPUIDLE_STATE_MAX, acpi_state_table.count); 1773 1774 /* 1775 * If limit > 0, intel_idle_cst_usable() has returned 'true', so all of 1776 * the interesting states are ACPI_CSTATE_FFH. 1777 */ 1778 for (cstate = 1; cstate < limit; cstate++) { 1779 struct acpi_processor_cx *cx; 1780 struct cpuidle_state *state; 1781 1782 if (intel_idle_max_cstate_reached(cstate - 1)) 1783 break; 1784 1785 cx = &acpi_state_table.states[cstate]; 1786 1787 state = &drv->states[drv->state_count++]; 1788 1789 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d_ACPI", cstate); 1790 strscpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); 1791 state->exit_latency = cx->latency; 1792 /* 1793 * For C1-type C-states use the same number for both the exit 1794 * latency and target residency, because that is the case for 1795 * C1 in the majority of the static C-states tables above. 1796 * For the other types of C-states, however, set the target 1797 * residency to 3 times the exit latency which should lead to 1798 * a reasonable balance between energy-efficiency and 1799 * performance in the majority of interesting cases. 1800 */ 1801 state->target_residency = cx->latency; 1802 if (cx->type > ACPI_STATE_C1) 1803 state->target_residency *= 3; 1804 1805 state->flags = MWAIT2flg(cx->address); 1806 if (cx->type > ACPI_STATE_C2) 1807 state->flags |= CPUIDLE_FLAG_TLB_FLUSHED; 1808 1809 if (disabled_states_mask & BIT(cstate)) 1810 state->flags |= CPUIDLE_FLAG_OFF; 1811 1812 if (intel_idle_state_needs_timer_stop(state)) 1813 state->flags |= CPUIDLE_FLAG_TIMER_STOP; 1814 1815 if (cx->type > ACPI_STATE_C1 && !boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) 1816 mark_tsc_unstable("TSC halts in idle"); 1817 1818 state->enter = intel_idle; 1819 state->enter_dead = intel_idle_enter_dead; 1820 state->enter_s2idle = intel_idle_s2idle; 1821 } 1822 } 1823 1824 static bool __init intel_idle_off_by_default(unsigned int flags, u32 mwait_hint) 1825 { 1826 int cstate, limit; 1827 1828 /* 1829 * If there are no _CST C-states, do not disable any C-states by 1830 * default. 1831 */ 1832 if (!acpi_state_table.count) 1833 return false; 1834 1835 limit = min_t(int, CPUIDLE_STATE_MAX, acpi_state_table.count); 1836 /* 1837 * If limit > 0, intel_idle_cst_usable() has returned 'true', so all of 1838 * the interesting states are ACPI_CSTATE_FFH. 1839 */ 1840 for (cstate = 1; cstate < limit; cstate++) { 1841 u32 acpi_hint = acpi_state_table.states[cstate].address; 1842 u32 table_hint = mwait_hint; 1843 1844 if (flags & CPUIDLE_FLAG_PARTIAL_HINT_MATCH) { 1845 acpi_hint &= ~MWAIT_SUBSTATE_MASK; 1846 table_hint &= ~MWAIT_SUBSTATE_MASK; 1847 } 1848 1849 if (acpi_hint == table_hint) 1850 return false; 1851 } 1852 return true; 1853 } 1854 1855 static inline bool ignore_native(void) 1856 { 1857 return no_native && !no_acpi; 1858 } 1859 #else /* !CONFIG_ACPI_PROCESSOR_CSTATE */ 1860 #define force_use_acpi (false) 1861 1862 static inline bool intel_idle_acpi_cst_extract(void) { return false; } 1863 static inline void intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) { } 1864 static inline bool intel_idle_off_by_default(unsigned int flags, u32 mwait_hint) 1865 { 1866 return false; 1867 } 1868 static inline bool ignore_native(void) { return false; } 1869 #endif /* !CONFIG_ACPI_PROCESSOR_CSTATE */ 1870 1871 /** 1872 * ivt_idle_state_table_update - Tune the idle states table for Ivy Town. 1873 * 1874 * Tune IVT multi-socket targets. 1875 * Assumption: num_sockets == (max_package_num + 1). 1876 */ 1877 static void __init ivt_idle_state_table_update(void) 1878 { 1879 /* IVT uses a different table for 1-2, 3-4, and > 4 sockets */ 1880 int cpu, package_num, num_sockets = 1; 1881 1882 for_each_online_cpu(cpu) { 1883 package_num = topology_physical_package_id(cpu); 1884 if (package_num + 1 > num_sockets) { 1885 num_sockets = package_num + 1; 1886 1887 if (num_sockets > 4) { 1888 cpuidle_state_table = ivt_cstates_8s; 1889 return; 1890 } 1891 } 1892 } 1893 1894 if (num_sockets > 2) 1895 cpuidle_state_table = ivt_cstates_4s; 1896 1897 /* else, 1 and 2 socket systems use default ivt_cstates */ 1898 } 1899 1900 /** 1901 * irtl_2_usec - IRTL to microseconds conversion. 1902 * @irtl: IRTL MSR value. 1903 * 1904 * Translate the IRTL (Interrupt Response Time Limit) MSR value to microseconds. 1905 */ 1906 static unsigned long long __init irtl_2_usec(unsigned long long irtl) 1907 { 1908 static const unsigned int irtl_ns_units[] __initconst = { 1909 1, 32, 1024, 32768, 1048576, 33554432, 0, 0 1910 }; 1911 unsigned long long ns; 1912 1913 if (!irtl) 1914 return 0; 1915 1916 ns = irtl_ns_units[(irtl >> 10) & 0x7]; 1917 1918 return div_u64((irtl & 0x3FF) * ns, NSEC_PER_USEC); 1919 } 1920 1921 /** 1922 * bxt_idle_state_table_update - Fix up the Broxton idle states table. 1923 * 1924 * On BXT, trust the IRTL (Interrupt Response Time Limit) MSR to show the 1925 * definitive maximum latency and use the same value for target_residency. 1926 */ 1927 static void __init bxt_idle_state_table_update(void) 1928 { 1929 unsigned long long msr; 1930 unsigned int usec; 1931 1932 rdmsrq(MSR_PKGC6_IRTL, msr); 1933 usec = irtl_2_usec(msr); 1934 if (usec) { 1935 bxt_cstates[2].exit_latency = usec; 1936 bxt_cstates[2].target_residency = usec; 1937 } 1938 1939 rdmsrq(MSR_PKGC7_IRTL, msr); 1940 usec = irtl_2_usec(msr); 1941 if (usec) { 1942 bxt_cstates[3].exit_latency = usec; 1943 bxt_cstates[3].target_residency = usec; 1944 } 1945 1946 rdmsrq(MSR_PKGC8_IRTL, msr); 1947 usec = irtl_2_usec(msr); 1948 if (usec) { 1949 bxt_cstates[4].exit_latency = usec; 1950 bxt_cstates[4].target_residency = usec; 1951 } 1952 1953 rdmsrq(MSR_PKGC9_IRTL, msr); 1954 usec = irtl_2_usec(msr); 1955 if (usec) { 1956 bxt_cstates[5].exit_latency = usec; 1957 bxt_cstates[5].target_residency = usec; 1958 } 1959 1960 rdmsrq(MSR_PKGC10_IRTL, msr); 1961 usec = irtl_2_usec(msr); 1962 if (usec) { 1963 bxt_cstates[6].exit_latency = usec; 1964 bxt_cstates[6].target_residency = usec; 1965 } 1966 1967 } 1968 1969 /** 1970 * sklh_idle_state_table_update - Fix up the Sky Lake idle states table. 1971 * 1972 * On SKL-H (model 0x5e) skip C8 and C9 if C10 is enabled and SGX disabled. 1973 */ 1974 static void __init sklh_idle_state_table_update(void) 1975 { 1976 unsigned long long msr; 1977 unsigned int eax, ebx, ecx, edx; 1978 1979 1980 /* if PC10 disabled via cmdline intel_idle.max_cstate=7 or shallower */ 1981 if (max_cstate <= 7) 1982 return; 1983 1984 /* if PC10 not present in CPUID.MWAIT.EDX */ 1985 if ((mwait_substates & (0xF << 28)) == 0) 1986 return; 1987 1988 rdmsrq(MSR_PKG_CST_CONFIG_CONTROL, msr); 1989 1990 /* PC10 is not enabled in PKG C-state limit */ 1991 if ((msr & 0xF) != 8) 1992 return; 1993 1994 ecx = 0; 1995 cpuid(7, &eax, &ebx, &ecx, &edx); 1996 1997 /* if SGX is present */ 1998 if (ebx & (1 << 2)) { 1999 2000 rdmsrq(MSR_IA32_FEAT_CTL, msr); 2001 2002 /* if SGX is enabled */ 2003 if (msr & (1 << 18)) 2004 return; 2005 } 2006 2007 skl_cstates[5].flags |= CPUIDLE_FLAG_UNUSABLE; /* C8-SKL */ 2008 skl_cstates[6].flags |= CPUIDLE_FLAG_UNUSABLE; /* C9-SKL */ 2009 } 2010 2011 /** 2012 * skx_idle_state_table_update - Adjust the Sky Lake/Cascade Lake 2013 * idle states table. 2014 */ 2015 static void __init skx_idle_state_table_update(void) 2016 { 2017 unsigned long long msr; 2018 2019 rdmsrq(MSR_PKG_CST_CONFIG_CONTROL, msr); 2020 2021 /* 2022 * 000b: C0/C1 (no package C-state support) 2023 * 001b: C2 2024 * 010b: C6 (non-retention) 2025 * 011b: C6 (retention) 2026 * 111b: No Package C state limits. 2027 */ 2028 if ((msr & 0x7) < 2) { 2029 /* 2030 * Uses the CC6 + PC0 latency and 3 times of 2031 * latency for target_residency if the PC6 2032 * is disabled in BIOS. This is consistent 2033 * with how intel_idle driver uses _CST 2034 * to set the target_residency. 2035 */ 2036 skx_cstates[2].exit_latency = 92; 2037 skx_cstates[2].target_residency = 276; 2038 } 2039 } 2040 2041 /** 2042 * adl_idle_state_table_update - Adjust AlderLake idle states table. 2043 */ 2044 static void __init adl_idle_state_table_update(void) 2045 { 2046 /* Check if user prefers C1 over C1E. */ 2047 if (preferred_states_mask & BIT(1) && !(preferred_states_mask & BIT(2))) { 2048 cpuidle_state_table[0].flags &= ~CPUIDLE_FLAG_UNUSABLE; 2049 cpuidle_state_table[1].flags |= CPUIDLE_FLAG_UNUSABLE; 2050 2051 /* Disable C1E by clearing the "C1E promotion" bit. */ 2052 c1e_promotion = C1E_PROMOTION_DISABLE; 2053 return; 2054 } 2055 2056 /* Make sure C1E is enabled by default */ 2057 c1e_promotion = C1E_PROMOTION_ENABLE; 2058 } 2059 2060 /** 2061 * spr_idle_state_table_update - Adjust Sapphire Rapids idle states table. 2062 */ 2063 static void __init spr_idle_state_table_update(void) 2064 { 2065 unsigned long long msr; 2066 2067 /* 2068 * By default, the C6 state assumes the worst-case scenario of package 2069 * C6. However, if PC6 is disabled, we update the numbers to match 2070 * core C6. 2071 */ 2072 rdmsrq(MSR_PKG_CST_CONFIG_CONTROL, msr); 2073 2074 /* Limit value 2 and above allow for PC6. */ 2075 if ((msr & 0x7) < 2) { 2076 spr_cstates[2].exit_latency = 190; 2077 spr_cstates[2].target_residency = 600; 2078 } 2079 } 2080 2081 /** 2082 * byt_cht_auto_demotion_disable - Disable Bay/Cherry Trail auto-demotion. 2083 */ 2084 static void __init byt_cht_auto_demotion_disable(void) 2085 { 2086 wrmsrq(MSR_CC6_DEMOTION_POLICY_CONFIG, 0); 2087 wrmsrq(MSR_MC6_DEMOTION_POLICY_CONFIG, 0); 2088 } 2089 2090 static bool __init intel_idle_verify_cstate(unsigned int mwait_hint) 2091 { 2092 unsigned int mwait_cstate = (MWAIT_HINT2CSTATE(mwait_hint) + 1) & 2093 MWAIT_CSTATE_MASK; 2094 unsigned int num_substates = (mwait_substates >> mwait_cstate * 4) & 2095 MWAIT_SUBSTATE_MASK; 2096 2097 /* Ignore the C-state if there are NO sub-states in CPUID for it. */ 2098 if (num_substates == 0) 2099 return false; 2100 2101 if (mwait_cstate > 2 && !boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) 2102 mark_tsc_unstable("TSC halts in idle states deeper than C2"); 2103 2104 return true; 2105 } 2106 2107 static void state_update_enter_method(struct cpuidle_state *state, int cstate) 2108 { 2109 if (state->flags & CPUIDLE_FLAG_INIT_XSTATE) { 2110 /* 2111 * Combining with XSTATE with IBRS or IRQ_ENABLE flags 2112 * is not currently supported but this driver. 2113 */ 2114 WARN_ON_ONCE(state->flags & CPUIDLE_FLAG_IBRS); 2115 WARN_ON_ONCE(state->flags & CPUIDLE_FLAG_IRQ_ENABLE); 2116 state->enter = intel_idle_xstate; 2117 return; 2118 } 2119 2120 if (cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS) && 2121 ((state->flags & CPUIDLE_FLAG_IBRS) || ibrs_off)) { 2122 /* 2123 * IBRS mitigation requires that C-states are entered 2124 * with interrupts disabled. 2125 */ 2126 if (ibrs_off && (state->flags & CPUIDLE_FLAG_IRQ_ENABLE)) 2127 state->flags &= ~CPUIDLE_FLAG_IRQ_ENABLE; 2128 WARN_ON_ONCE(state->flags & CPUIDLE_FLAG_IRQ_ENABLE); 2129 state->enter = intel_idle_ibrs; 2130 return; 2131 } 2132 2133 if (state->flags & CPUIDLE_FLAG_IRQ_ENABLE) { 2134 state->enter = intel_idle_irq; 2135 return; 2136 } 2137 2138 if (force_irq_on) { 2139 pr_info("forced intel_idle_irq for state %d\n", cstate); 2140 state->enter = intel_idle_irq; 2141 } 2142 } 2143 2144 static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv) 2145 { 2146 int cstate; 2147 2148 switch (boot_cpu_data.x86_vfm) { 2149 case INTEL_IVYBRIDGE_X: 2150 ivt_idle_state_table_update(); 2151 break; 2152 case INTEL_ATOM_GOLDMONT: 2153 case INTEL_ATOM_GOLDMONT_PLUS: 2154 bxt_idle_state_table_update(); 2155 break; 2156 case INTEL_SKYLAKE: 2157 sklh_idle_state_table_update(); 2158 break; 2159 case INTEL_SKYLAKE_X: 2160 skx_idle_state_table_update(); 2161 break; 2162 case INTEL_SAPPHIRERAPIDS_X: 2163 case INTEL_EMERALDRAPIDS_X: 2164 spr_idle_state_table_update(); 2165 break; 2166 case INTEL_ALDERLAKE: 2167 case INTEL_ALDERLAKE_L: 2168 case INTEL_ATOM_GRACEMONT: 2169 adl_idle_state_table_update(); 2170 break; 2171 case INTEL_ATOM_SILVERMONT: 2172 case INTEL_ATOM_AIRMONT: 2173 byt_cht_auto_demotion_disable(); 2174 break; 2175 } 2176 2177 for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) { 2178 struct cpuidle_state *state; 2179 unsigned int mwait_hint; 2180 2181 if (intel_idle_max_cstate_reached(cstate)) 2182 break; 2183 2184 if (!cpuidle_state_table[cstate].enter && 2185 !cpuidle_state_table[cstate].enter_s2idle) 2186 break; 2187 2188 if (!cpuidle_state_table[cstate].enter_dead) 2189 cpuidle_state_table[cstate].enter_dead = intel_idle_enter_dead; 2190 2191 /* If marked as unusable, skip this state. */ 2192 if (cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_UNUSABLE) { 2193 pr_debug("state %s is disabled\n", 2194 cpuidle_state_table[cstate].name); 2195 continue; 2196 } 2197 2198 mwait_hint = flg2MWAIT(cpuidle_state_table[cstate].flags); 2199 if (!intel_idle_verify_cstate(mwait_hint)) 2200 continue; 2201 2202 /* Structure copy. */ 2203 drv->states[drv->state_count] = cpuidle_state_table[cstate]; 2204 state = &drv->states[drv->state_count]; 2205 2206 state_update_enter_method(state, cstate); 2207 2208 2209 if ((disabled_states_mask & BIT(drv->state_count)) || 2210 ((icpu->use_acpi || force_use_acpi) && 2211 intel_idle_off_by_default(state->flags, mwait_hint) && 2212 !(state->flags & CPUIDLE_FLAG_ALWAYS_ENABLE))) 2213 state->flags |= CPUIDLE_FLAG_OFF; 2214 2215 if (intel_idle_state_needs_timer_stop(state)) 2216 state->flags |= CPUIDLE_FLAG_TIMER_STOP; 2217 2218 drv->state_count++; 2219 } 2220 } 2221 2222 /** 2223 * intel_idle_cpuidle_driver_init - Create the list of available idle states. 2224 * @drv: cpuidle driver structure to initialize. 2225 */ 2226 static void __init intel_idle_cpuidle_driver_init(struct cpuidle_driver *drv) 2227 { 2228 cpuidle_poll_state_init(drv); 2229 2230 if (disabled_states_mask & BIT(0)) 2231 drv->states[0].flags |= CPUIDLE_FLAG_OFF; 2232 2233 drv->state_count = 1; 2234 2235 if (icpu && icpu->state_table) 2236 intel_idle_init_cstates_icpu(drv); 2237 else 2238 intel_idle_init_cstates_acpi(drv); 2239 } 2240 2241 static void auto_demotion_disable(void) 2242 { 2243 unsigned long long msr_bits; 2244 2245 rdmsrq(MSR_PKG_CST_CONFIG_CONTROL, msr_bits); 2246 msr_bits &= ~auto_demotion_disable_flags; 2247 wrmsrq(MSR_PKG_CST_CONFIG_CONTROL, msr_bits); 2248 } 2249 2250 static void c1e_promotion_enable(void) 2251 { 2252 unsigned long long msr_bits; 2253 2254 rdmsrq(MSR_IA32_POWER_CTL, msr_bits); 2255 msr_bits |= 0x2; 2256 wrmsrq(MSR_IA32_POWER_CTL, msr_bits); 2257 } 2258 2259 static void c1e_promotion_disable(void) 2260 { 2261 unsigned long long msr_bits; 2262 2263 rdmsrq(MSR_IA32_POWER_CTL, msr_bits); 2264 msr_bits &= ~0x2; 2265 wrmsrq(MSR_IA32_POWER_CTL, msr_bits); 2266 } 2267 2268 /** 2269 * intel_idle_cpu_init - Register the target CPU with the cpuidle core. 2270 * @cpu: CPU to initialize. 2271 * 2272 * Register a cpuidle device object for @cpu and update its MSRs in accordance 2273 * with the processor model flags. 2274 */ 2275 static int intel_idle_cpu_init(unsigned int cpu) 2276 { 2277 struct cpuidle_device *dev; 2278 2279 dev = per_cpu_ptr(intel_idle_cpuidle_devices, cpu); 2280 dev->cpu = cpu; 2281 2282 if (cpuidle_register_device(dev)) { 2283 pr_debug("cpuidle_register_device %d failed!\n", cpu); 2284 return -EIO; 2285 } 2286 2287 if (auto_demotion_disable_flags) 2288 auto_demotion_disable(); 2289 2290 if (c1e_promotion == C1E_PROMOTION_ENABLE) 2291 c1e_promotion_enable(); 2292 else if (c1e_promotion == C1E_PROMOTION_DISABLE) 2293 c1e_promotion_disable(); 2294 2295 return 0; 2296 } 2297 2298 static int intel_idle_cpu_online(unsigned int cpu) 2299 { 2300 struct cpuidle_device *dev; 2301 2302 if (!boot_cpu_has(X86_FEATURE_ARAT)) 2303 tick_broadcast_enable(); 2304 2305 /* 2306 * Some systems can hotplug a cpu at runtime after 2307 * the kernel has booted, we have to initialize the 2308 * driver in this case 2309 */ 2310 dev = per_cpu_ptr(intel_idle_cpuidle_devices, cpu); 2311 if (!dev->registered) 2312 return intel_idle_cpu_init(cpu); 2313 2314 return 0; 2315 } 2316 2317 /** 2318 * intel_idle_cpuidle_devices_uninit - Unregister all cpuidle devices. 2319 */ 2320 static void __init intel_idle_cpuidle_devices_uninit(void) 2321 { 2322 int i; 2323 2324 for_each_online_cpu(i) 2325 cpuidle_unregister_device(per_cpu_ptr(intel_idle_cpuidle_devices, i)); 2326 } 2327 2328 static int __init intel_idle_init(void) 2329 { 2330 const struct x86_cpu_id *id; 2331 unsigned int eax, ebx, ecx; 2332 int retval; 2333 2334 /* Do not load intel_idle at all for now if idle= is passed */ 2335 if (boot_option_idle_override != IDLE_NO_OVERRIDE) 2336 return -ENODEV; 2337 2338 if (max_cstate == 0) { 2339 pr_debug("disabled\n"); 2340 return -EPERM; 2341 } 2342 2343 id = x86_match_cpu(intel_idle_ids); 2344 if (id) { 2345 if (!boot_cpu_has(X86_FEATURE_MWAIT)) { 2346 pr_debug("Please enable MWAIT in BIOS SETUP\n"); 2347 return -ENODEV; 2348 } 2349 } else { 2350 id = x86_match_cpu(intel_mwait_ids); 2351 if (!id) 2352 return -ENODEV; 2353 } 2354 2355 cpuid(CPUID_LEAF_MWAIT, &eax, &ebx, &ecx, &mwait_substates); 2356 2357 if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || 2358 !(ecx & CPUID5_ECX_INTERRUPT_BREAK) || 2359 !mwait_substates) 2360 return -ENODEV; 2361 2362 pr_debug("MWAIT substates: 0x%x\n", mwait_substates); 2363 2364 icpu = (const struct idle_cpu *)id->driver_data; 2365 if (icpu && ignore_native()) { 2366 pr_debug("ignoring native CPU idle states\n"); 2367 icpu = NULL; 2368 } 2369 if (icpu) { 2370 if (icpu->state_table) 2371 cpuidle_state_table = icpu->state_table; 2372 else if (!intel_idle_acpi_cst_extract()) 2373 return -ENODEV; 2374 2375 auto_demotion_disable_flags = icpu->auto_demotion_disable_flags; 2376 if (icpu->disable_promotion_to_c1e) 2377 c1e_promotion = C1E_PROMOTION_DISABLE; 2378 if (icpu->use_acpi || force_use_acpi) 2379 intel_idle_acpi_cst_extract(); 2380 } else if (!intel_idle_acpi_cst_extract()) { 2381 return -ENODEV; 2382 } 2383 2384 pr_debug("v" INTEL_IDLE_VERSION " model 0x%X\n", 2385 boot_cpu_data.x86_model); 2386 2387 intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device); 2388 if (!intel_idle_cpuidle_devices) 2389 return -ENOMEM; 2390 2391 intel_idle_cpuidle_driver_init(&intel_idle_driver); 2392 2393 retval = cpuidle_register_driver(&intel_idle_driver); 2394 if (retval) { 2395 struct cpuidle_driver *drv = cpuidle_get_driver(); 2396 printk(KERN_DEBUG pr_fmt("intel_idle yielding to %s\n"), 2397 drv ? drv->name : "none"); 2398 goto init_driver_fail; 2399 } 2400 2401 retval = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "idle/intel:online", 2402 intel_idle_cpu_online, NULL); 2403 if (retval < 0) 2404 goto hp_setup_fail; 2405 2406 pr_debug("Local APIC timer is reliable in %s\n", 2407 boot_cpu_has(X86_FEATURE_ARAT) ? "all C-states" : "C1"); 2408 2409 return 0; 2410 2411 hp_setup_fail: 2412 intel_idle_cpuidle_devices_uninit(); 2413 cpuidle_unregister_driver(&intel_idle_driver); 2414 init_driver_fail: 2415 free_percpu(intel_idle_cpuidle_devices); 2416 return retval; 2417 2418 } 2419 device_initcall(intel_idle_init); 2420 2421 /* 2422 * We are not really modular, but we used to support that. Meaning we also 2423 * support "intel_idle.max_cstate=..." at boot and also a read-only export of 2424 * it at /sys/module/intel_idle/parameters/max_cstate -- so using module_param 2425 * is the easiest way (currently) to continue doing that. 2426 */ 2427 module_param(max_cstate, int, 0444); 2428 /* 2429 * The positions of the bits that are set in this number are the indices of the 2430 * idle states to be disabled by default (as reflected by the names of the 2431 * corresponding idle state directories in sysfs, "state0", "state1" ... 2432 * "state<i>" ..., where <i> is the index of the given state). 2433 */ 2434 module_param_named(states_off, disabled_states_mask, uint, 0444); 2435 MODULE_PARM_DESC(states_off, "Mask of disabled idle states"); 2436 /* 2437 * Some platforms come with mutually exclusive C-states, so that if one is 2438 * enabled, the other C-states must not be used. Example: C1 and C1E on 2439 * Sapphire Rapids platform. This parameter allows for selecting the 2440 * preferred C-states among the groups of mutually exclusive C-states - the 2441 * selected C-states will be registered, the other C-states from the mutually 2442 * exclusive group won't be registered. If the platform has no mutually 2443 * exclusive C-states, this parameter has no effect. 2444 */ 2445 module_param_named(preferred_cstates, preferred_states_mask, uint, 0444); 2446 MODULE_PARM_DESC(preferred_cstates, "Mask of preferred idle states"); 2447 /* 2448 * Debugging option that forces the driver to enter all C-states with 2449 * interrupts enabled. Does not apply to C-states with 2450 * 'CPUIDLE_FLAG_INIT_XSTATE' and 'CPUIDLE_FLAG_IBRS' flags. 2451 */ 2452 module_param(force_irq_on, bool, 0444); 2453 /* 2454 * Force the disabling of IBRS when X86_FEATURE_KERNEL_IBRS is on and 2455 * CPUIDLE_FLAG_IRQ_ENABLE isn't set. 2456 */ 2457 module_param(ibrs_off, bool, 0444); 2458 MODULE_PARM_DESC(ibrs_off, "Disable IBRS when idle"); 2459