1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * intel_idle.c - native hardware idle loop for modern Intel processors 4 * 5 * Copyright (c) 2013 - 2020, Intel Corporation. 6 * Len Brown <len.brown@intel.com> 7 * Rafael J. Wysocki <rafael.j.wysocki@intel.com> 8 */ 9 10 /* 11 * intel_idle is a cpuidle driver that loads on all Intel CPUs with MWAIT 12 * in lieu of the legacy ACPI processor_idle driver. The intent is to 13 * make Linux more efficient on these processors, as intel_idle knows 14 * more than ACPI, as well as make Linux more immune to ACPI BIOS bugs. 15 */ 16 17 /* 18 * Design Assumptions 19 * 20 * All CPUs have same idle states as boot CPU 21 * 22 * Chipset BM_STS (bus master status) bit is a NOP 23 * for preventing entry into deep C-states 24 * 25 * CPU will flush caches as needed when entering a C-state via MWAIT 26 * (in contrast to entering ACPI C3, in which case the WBINVD 27 * instruction needs to be executed to flush the caches) 28 */ 29 30 /* 31 * Known limitations 32 * 33 * ACPI has a .suspend hack to turn off deep c-statees during suspend 34 * to avoid complications with the lapic timer workaround. 35 * Have not seen issues with suspend, but may need same workaround here. 36 * 37 */ 38 39 /* un-comment DEBUG to enable pr_debug() statements */ 40 /* #define DEBUG */ 41 42 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 43 44 #include <linux/acpi.h> 45 #include <linux/kernel.h> 46 #include <linux/cpuidle.h> 47 #include <linux/tick.h> 48 #include <trace/events/power.h> 49 #include <linux/sched.h> 50 #include <linux/sched/smt.h> 51 #include <linux/notifier.h> 52 #include <linux/cpu.h> 53 #include <linux/moduleparam.h> 54 #include <asm/cpuid.h> 55 #include <asm/cpu_device_id.h> 56 #include <asm/intel-family.h> 57 #include <asm/mwait.h> 58 #include <asm/spec-ctrl.h> 59 #include <asm/tsc.h> 60 #include <asm/fpu/api.h> 61 #include <asm/smp.h> 62 63 #define INTEL_IDLE_VERSION "0.5.1" 64 65 static struct cpuidle_driver intel_idle_driver = { 66 .name = "intel_idle", 67 .owner = THIS_MODULE, 68 }; 69 /* intel_idle.max_cstate=0 disables driver */ 70 static int max_cstate = CPUIDLE_STATE_MAX - 1; 71 static unsigned int disabled_states_mask __read_mostly; 72 static unsigned int preferred_states_mask __read_mostly; 73 static bool force_irq_on __read_mostly; 74 static bool ibrs_off __read_mostly; 75 76 static struct cpuidle_device __percpu *intel_idle_cpuidle_devices; 77 78 static unsigned long auto_demotion_disable_flags; 79 80 static enum { 81 C1E_PROMOTION_PRESERVE, 82 C1E_PROMOTION_ENABLE, 83 C1E_PROMOTION_DISABLE 84 } c1e_promotion = C1E_PROMOTION_PRESERVE; 85 86 struct idle_cpu { 87 struct cpuidle_state *state_table; 88 89 /* 90 * Hardware C-state auto-demotion may not always be optimal. 91 * Indicate which enable bits to clear here. 92 */ 93 unsigned long auto_demotion_disable_flags; 94 bool disable_promotion_to_c1e; 95 bool use_acpi; 96 }; 97 98 static const struct idle_cpu *icpu __initdata; 99 static struct cpuidle_state *cpuidle_state_table __initdata; 100 101 static unsigned int mwait_substates __initdata; 102 103 /* 104 * Enable interrupts before entering the C-state. On some platforms and for 105 * some C-states, this may measurably decrease interrupt latency. 106 */ 107 #define CPUIDLE_FLAG_IRQ_ENABLE BIT(14) 108 109 /* 110 * Enable this state by default even if the ACPI _CST does not list it. 111 */ 112 #define CPUIDLE_FLAG_ALWAYS_ENABLE BIT(15) 113 114 /* 115 * Disable IBRS across idle (when KERNEL_IBRS), is exclusive vs IRQ_ENABLE 116 * above. 117 */ 118 #define CPUIDLE_FLAG_IBRS BIT(16) 119 120 /* 121 * Initialize large xstate for the C6-state entrance. 122 */ 123 #define CPUIDLE_FLAG_INIT_XSTATE BIT(17) 124 125 /* 126 * Ignore the sub-state when matching mwait hints between the ACPI _CST and 127 * custom tables. 128 */ 129 #define CPUIDLE_FLAG_PARTIAL_HINT_MATCH BIT(18) 130 131 /* 132 * MWAIT takes an 8-bit "hint" in EAX "suggesting" 133 * the C-state (top nibble) and sub-state (bottom nibble) 134 * 0x00 means "MWAIT(C1)", 0x10 means "MWAIT(C2)" etc. 135 * 136 * We store the hint at the top of our "flags" for each state. 137 */ 138 #define flg2MWAIT(flags) (((flags) >> 24) & 0xFF) 139 #define MWAIT2flg(eax) ((eax & 0xFF) << 24) 140 141 static __always_inline int __intel_idle(struct cpuidle_device *dev, 142 struct cpuidle_driver *drv, 143 int index, bool irqoff) 144 { 145 struct cpuidle_state *state = &drv->states[index]; 146 unsigned long eax = flg2MWAIT(state->flags); 147 unsigned long ecx = 1*irqoff; /* break on interrupt flag */ 148 149 mwait_idle_with_hints(eax, ecx); 150 151 return index; 152 } 153 154 /** 155 * intel_idle - Ask the processor to enter the given idle state. 156 * @dev: cpuidle device of the target CPU. 157 * @drv: cpuidle driver (assumed to point to intel_idle_driver). 158 * @index: Target idle state index. 159 * 160 * Use the MWAIT instruction to notify the processor that the CPU represented by 161 * @dev is idle and it can try to enter the idle state corresponding to @index. 162 * 163 * If the local APIC timer is not known to be reliable in the target idle state, 164 * enable one-shot tick broadcasting for the target CPU before executing MWAIT. 165 * 166 * Must be called under local_irq_disable(). 167 */ 168 static __cpuidle int intel_idle(struct cpuidle_device *dev, 169 struct cpuidle_driver *drv, int index) 170 { 171 return __intel_idle(dev, drv, index, true); 172 } 173 174 static __cpuidle int intel_idle_irq(struct cpuidle_device *dev, 175 struct cpuidle_driver *drv, int index) 176 { 177 return __intel_idle(dev, drv, index, false); 178 } 179 180 static __cpuidle int intel_idle_ibrs(struct cpuidle_device *dev, 181 struct cpuidle_driver *drv, int index) 182 { 183 bool smt_active = sched_smt_active(); 184 u64 spec_ctrl = spec_ctrl_current(); 185 int ret; 186 187 if (smt_active) 188 __update_spec_ctrl(0); 189 190 ret = __intel_idle(dev, drv, index, true); 191 192 if (smt_active) 193 __update_spec_ctrl(spec_ctrl); 194 195 return ret; 196 } 197 198 static __cpuidle int intel_idle_xstate(struct cpuidle_device *dev, 199 struct cpuidle_driver *drv, int index) 200 { 201 fpu_idle_fpregs(); 202 return __intel_idle(dev, drv, index, true); 203 } 204 205 /** 206 * intel_idle_s2idle - Ask the processor to enter the given idle state. 207 * @dev: cpuidle device of the target CPU. 208 * @drv: cpuidle driver (assumed to point to intel_idle_driver). 209 * @index: Target idle state index. 210 * 211 * Use the MWAIT instruction to notify the processor that the CPU represented by 212 * @dev is idle and it can try to enter the idle state corresponding to @index. 213 * 214 * Invoked as a suspend-to-idle callback routine with frozen user space, frozen 215 * scheduler tick and suspended scheduler clock on the target CPU. 216 */ 217 static __cpuidle int intel_idle_s2idle(struct cpuidle_device *dev, 218 struct cpuidle_driver *drv, int index) 219 { 220 unsigned long ecx = 1; /* break on interrupt flag */ 221 struct cpuidle_state *state = &drv->states[index]; 222 unsigned long eax = flg2MWAIT(state->flags); 223 224 if (state->flags & CPUIDLE_FLAG_INIT_XSTATE) 225 fpu_idle_fpregs(); 226 227 mwait_idle_with_hints(eax, ecx); 228 229 return 0; 230 } 231 232 static void intel_idle_enter_dead(struct cpuidle_device *dev, int index) 233 { 234 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 235 struct cpuidle_state *state = &drv->states[index]; 236 unsigned long eax = flg2MWAIT(state->flags); 237 238 mwait_play_dead(eax); 239 } 240 241 /* 242 * States are indexed by the cstate number, 243 * which is also the index into the MWAIT hint array. 244 * Thus C0 is a dummy. 245 */ 246 static struct cpuidle_state nehalem_cstates[] __initdata = { 247 { 248 .name = "C1", 249 .desc = "MWAIT 0x00", 250 .flags = MWAIT2flg(0x00), 251 .exit_latency = 3, 252 .target_residency = 6, 253 .enter = &intel_idle, 254 .enter_s2idle = intel_idle_s2idle, }, 255 { 256 .name = "C1E", 257 .desc = "MWAIT 0x01", 258 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, 259 .exit_latency = 10, 260 .target_residency = 20, 261 .enter = &intel_idle, 262 .enter_s2idle = intel_idle_s2idle, }, 263 { 264 .name = "C3", 265 .desc = "MWAIT 0x10", 266 .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, 267 .exit_latency = 20, 268 .target_residency = 80, 269 .enter = &intel_idle, 270 .enter_s2idle = intel_idle_s2idle, }, 271 { 272 .name = "C6", 273 .desc = "MWAIT 0x20", 274 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, 275 .exit_latency = 200, 276 .target_residency = 800, 277 .enter = &intel_idle, 278 .enter_s2idle = intel_idle_s2idle, }, 279 { 280 .enter = NULL } 281 }; 282 283 static struct cpuidle_state snb_cstates[] __initdata = { 284 { 285 .name = "C1", 286 .desc = "MWAIT 0x00", 287 .flags = MWAIT2flg(0x00), 288 .exit_latency = 2, 289 .target_residency = 2, 290 .enter = &intel_idle, 291 .enter_s2idle = intel_idle_s2idle, }, 292 { 293 .name = "C1E", 294 .desc = "MWAIT 0x01", 295 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, 296 .exit_latency = 10, 297 .target_residency = 20, 298 .enter = &intel_idle, 299 .enter_s2idle = intel_idle_s2idle, }, 300 { 301 .name = "C3", 302 .desc = "MWAIT 0x10", 303 .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, 304 .exit_latency = 80, 305 .target_residency = 211, 306 .enter = &intel_idle, 307 .enter_s2idle = intel_idle_s2idle, }, 308 { 309 .name = "C6", 310 .desc = "MWAIT 0x20", 311 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, 312 .exit_latency = 104, 313 .target_residency = 345, 314 .enter = &intel_idle, 315 .enter_s2idle = intel_idle_s2idle, }, 316 { 317 .name = "C7", 318 .desc = "MWAIT 0x30", 319 .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED, 320 .exit_latency = 109, 321 .target_residency = 345, 322 .enter = &intel_idle, 323 .enter_s2idle = intel_idle_s2idle, }, 324 { 325 .enter = NULL } 326 }; 327 328 static struct cpuidle_state byt_cstates[] __initdata = { 329 { 330 .name = "C1", 331 .desc = "MWAIT 0x00", 332 .flags = MWAIT2flg(0x00), 333 .exit_latency = 1, 334 .target_residency = 1, 335 .enter = &intel_idle, 336 .enter_s2idle = intel_idle_s2idle, }, 337 { 338 .name = "C6N", 339 .desc = "MWAIT 0x58", 340 .flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED, 341 .exit_latency = 300, 342 .target_residency = 275, 343 .enter = &intel_idle, 344 .enter_s2idle = intel_idle_s2idle, }, 345 { 346 .name = "C6S", 347 .desc = "MWAIT 0x52", 348 .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED, 349 .exit_latency = 500, 350 .target_residency = 560, 351 .enter = &intel_idle, 352 .enter_s2idle = intel_idle_s2idle, }, 353 { 354 .name = "C7", 355 .desc = "MWAIT 0x60", 356 .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, 357 .exit_latency = 1200, 358 .target_residency = 4000, 359 .enter = &intel_idle, 360 .enter_s2idle = intel_idle_s2idle, }, 361 { 362 .name = "C7S", 363 .desc = "MWAIT 0x64", 364 .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED, 365 .exit_latency = 10000, 366 .target_residency = 20000, 367 .enter = &intel_idle, 368 .enter_s2idle = intel_idle_s2idle, }, 369 { 370 .enter = NULL } 371 }; 372 373 static struct cpuidle_state cht_cstates[] __initdata = { 374 { 375 .name = "C1", 376 .desc = "MWAIT 0x00", 377 .flags = MWAIT2flg(0x00), 378 .exit_latency = 1, 379 .target_residency = 1, 380 .enter = &intel_idle, 381 .enter_s2idle = intel_idle_s2idle, }, 382 { 383 .name = "C6N", 384 .desc = "MWAIT 0x58", 385 .flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED, 386 .exit_latency = 80, 387 .target_residency = 275, 388 .enter = &intel_idle, 389 .enter_s2idle = intel_idle_s2idle, }, 390 { 391 .name = "C6S", 392 .desc = "MWAIT 0x52", 393 .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED, 394 .exit_latency = 200, 395 .target_residency = 560, 396 .enter = &intel_idle, 397 .enter_s2idle = intel_idle_s2idle, }, 398 { 399 .name = "C7", 400 .desc = "MWAIT 0x60", 401 .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, 402 .exit_latency = 1200, 403 .target_residency = 4000, 404 .enter = &intel_idle, 405 .enter_s2idle = intel_idle_s2idle, }, 406 { 407 .name = "C7S", 408 .desc = "MWAIT 0x64", 409 .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED, 410 .exit_latency = 10000, 411 .target_residency = 20000, 412 .enter = &intel_idle, 413 .enter_s2idle = intel_idle_s2idle, }, 414 { 415 .enter = NULL } 416 }; 417 418 static struct cpuidle_state ivb_cstates[] __initdata = { 419 { 420 .name = "C1", 421 .desc = "MWAIT 0x00", 422 .flags = MWAIT2flg(0x00), 423 .exit_latency = 1, 424 .target_residency = 1, 425 .enter = &intel_idle, 426 .enter_s2idle = intel_idle_s2idle, }, 427 { 428 .name = "C1E", 429 .desc = "MWAIT 0x01", 430 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, 431 .exit_latency = 10, 432 .target_residency = 20, 433 .enter = &intel_idle, 434 .enter_s2idle = intel_idle_s2idle, }, 435 { 436 .name = "C3", 437 .desc = "MWAIT 0x10", 438 .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, 439 .exit_latency = 59, 440 .target_residency = 156, 441 .enter = &intel_idle, 442 .enter_s2idle = intel_idle_s2idle, }, 443 { 444 .name = "C6", 445 .desc = "MWAIT 0x20", 446 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, 447 .exit_latency = 80, 448 .target_residency = 300, 449 .enter = &intel_idle, 450 .enter_s2idle = intel_idle_s2idle, }, 451 { 452 .name = "C7", 453 .desc = "MWAIT 0x30", 454 .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED, 455 .exit_latency = 87, 456 .target_residency = 300, 457 .enter = &intel_idle, 458 .enter_s2idle = intel_idle_s2idle, }, 459 { 460 .enter = NULL } 461 }; 462 463 static struct cpuidle_state ivt_cstates[] __initdata = { 464 { 465 .name = "C1", 466 .desc = "MWAIT 0x00", 467 .flags = MWAIT2flg(0x00), 468 .exit_latency = 1, 469 .target_residency = 1, 470 .enter = &intel_idle, 471 .enter_s2idle = intel_idle_s2idle, }, 472 { 473 .name = "C1E", 474 .desc = "MWAIT 0x01", 475 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, 476 .exit_latency = 10, 477 .target_residency = 80, 478 .enter = &intel_idle, 479 .enter_s2idle = intel_idle_s2idle, }, 480 { 481 .name = "C3", 482 .desc = "MWAIT 0x10", 483 .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, 484 .exit_latency = 59, 485 .target_residency = 156, 486 .enter = &intel_idle, 487 .enter_s2idle = intel_idle_s2idle, }, 488 { 489 .name = "C6", 490 .desc = "MWAIT 0x20", 491 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, 492 .exit_latency = 82, 493 .target_residency = 300, 494 .enter = &intel_idle, 495 .enter_s2idle = intel_idle_s2idle, }, 496 { 497 .enter = NULL } 498 }; 499 500 static struct cpuidle_state ivt_cstates_4s[] __initdata = { 501 { 502 .name = "C1", 503 .desc = "MWAIT 0x00", 504 .flags = MWAIT2flg(0x00), 505 .exit_latency = 1, 506 .target_residency = 1, 507 .enter = &intel_idle, 508 .enter_s2idle = intel_idle_s2idle, }, 509 { 510 .name = "C1E", 511 .desc = "MWAIT 0x01", 512 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, 513 .exit_latency = 10, 514 .target_residency = 250, 515 .enter = &intel_idle, 516 .enter_s2idle = intel_idle_s2idle, }, 517 { 518 .name = "C3", 519 .desc = "MWAIT 0x10", 520 .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, 521 .exit_latency = 59, 522 .target_residency = 300, 523 .enter = &intel_idle, 524 .enter_s2idle = intel_idle_s2idle, }, 525 { 526 .name = "C6", 527 .desc = "MWAIT 0x20", 528 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, 529 .exit_latency = 84, 530 .target_residency = 400, 531 .enter = &intel_idle, 532 .enter_s2idle = intel_idle_s2idle, }, 533 { 534 .enter = NULL } 535 }; 536 537 static struct cpuidle_state ivt_cstates_8s[] __initdata = { 538 { 539 .name = "C1", 540 .desc = "MWAIT 0x00", 541 .flags = MWAIT2flg(0x00), 542 .exit_latency = 1, 543 .target_residency = 1, 544 .enter = &intel_idle, 545 .enter_s2idle = intel_idle_s2idle, }, 546 { 547 .name = "C1E", 548 .desc = "MWAIT 0x01", 549 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, 550 .exit_latency = 10, 551 .target_residency = 500, 552 .enter = &intel_idle, 553 .enter_s2idle = intel_idle_s2idle, }, 554 { 555 .name = "C3", 556 .desc = "MWAIT 0x10", 557 .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, 558 .exit_latency = 59, 559 .target_residency = 600, 560 .enter = &intel_idle, 561 .enter_s2idle = intel_idle_s2idle, }, 562 { 563 .name = "C6", 564 .desc = "MWAIT 0x20", 565 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, 566 .exit_latency = 88, 567 .target_residency = 700, 568 .enter = &intel_idle, 569 .enter_s2idle = intel_idle_s2idle, }, 570 { 571 .enter = NULL } 572 }; 573 574 static struct cpuidle_state hsw_cstates[] __initdata = { 575 { 576 .name = "C1", 577 .desc = "MWAIT 0x00", 578 .flags = MWAIT2flg(0x00), 579 .exit_latency = 2, 580 .target_residency = 2, 581 .enter = &intel_idle, 582 .enter_s2idle = intel_idle_s2idle, }, 583 { 584 .name = "C1E", 585 .desc = "MWAIT 0x01", 586 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, 587 .exit_latency = 10, 588 .target_residency = 20, 589 .enter = &intel_idle, 590 .enter_s2idle = intel_idle_s2idle, }, 591 { 592 .name = "C3", 593 .desc = "MWAIT 0x10", 594 .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, 595 .exit_latency = 33, 596 .target_residency = 100, 597 .enter = &intel_idle, 598 .enter_s2idle = intel_idle_s2idle, }, 599 { 600 .name = "C6", 601 .desc = "MWAIT 0x20", 602 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, 603 .exit_latency = 133, 604 .target_residency = 400, 605 .enter = &intel_idle, 606 .enter_s2idle = intel_idle_s2idle, }, 607 { 608 .name = "C7s", 609 .desc = "MWAIT 0x32", 610 .flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TLB_FLUSHED, 611 .exit_latency = 166, 612 .target_residency = 500, 613 .enter = &intel_idle, 614 .enter_s2idle = intel_idle_s2idle, }, 615 { 616 .name = "C8", 617 .desc = "MWAIT 0x40", 618 .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, 619 .exit_latency = 300, 620 .target_residency = 900, 621 .enter = &intel_idle, 622 .enter_s2idle = intel_idle_s2idle, }, 623 { 624 .name = "C9", 625 .desc = "MWAIT 0x50", 626 .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED, 627 .exit_latency = 600, 628 .target_residency = 1800, 629 .enter = &intel_idle, 630 .enter_s2idle = intel_idle_s2idle, }, 631 { 632 .name = "C10", 633 .desc = "MWAIT 0x60", 634 .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, 635 .exit_latency = 2600, 636 .target_residency = 7700, 637 .enter = &intel_idle, 638 .enter_s2idle = intel_idle_s2idle, }, 639 { 640 .enter = NULL } 641 }; 642 static struct cpuidle_state bdw_cstates[] __initdata = { 643 { 644 .name = "C1", 645 .desc = "MWAIT 0x00", 646 .flags = MWAIT2flg(0x00), 647 .exit_latency = 2, 648 .target_residency = 2, 649 .enter = &intel_idle, 650 .enter_s2idle = intel_idle_s2idle, }, 651 { 652 .name = "C1E", 653 .desc = "MWAIT 0x01", 654 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, 655 .exit_latency = 10, 656 .target_residency = 20, 657 .enter = &intel_idle, 658 .enter_s2idle = intel_idle_s2idle, }, 659 { 660 .name = "C3", 661 .desc = "MWAIT 0x10", 662 .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, 663 .exit_latency = 40, 664 .target_residency = 100, 665 .enter = &intel_idle, 666 .enter_s2idle = intel_idle_s2idle, }, 667 { 668 .name = "C6", 669 .desc = "MWAIT 0x20", 670 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, 671 .exit_latency = 133, 672 .target_residency = 400, 673 .enter = &intel_idle, 674 .enter_s2idle = intel_idle_s2idle, }, 675 { 676 .name = "C7s", 677 .desc = "MWAIT 0x32", 678 .flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TLB_FLUSHED, 679 .exit_latency = 166, 680 .target_residency = 500, 681 .enter = &intel_idle, 682 .enter_s2idle = intel_idle_s2idle, }, 683 { 684 .name = "C8", 685 .desc = "MWAIT 0x40", 686 .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, 687 .exit_latency = 300, 688 .target_residency = 900, 689 .enter = &intel_idle, 690 .enter_s2idle = intel_idle_s2idle, }, 691 { 692 .name = "C9", 693 .desc = "MWAIT 0x50", 694 .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED, 695 .exit_latency = 600, 696 .target_residency = 1800, 697 .enter = &intel_idle, 698 .enter_s2idle = intel_idle_s2idle, }, 699 { 700 .name = "C10", 701 .desc = "MWAIT 0x60", 702 .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, 703 .exit_latency = 2600, 704 .target_residency = 7700, 705 .enter = &intel_idle, 706 .enter_s2idle = intel_idle_s2idle, }, 707 { 708 .enter = NULL } 709 }; 710 711 static struct cpuidle_state skl_cstates[] __initdata = { 712 { 713 .name = "C1", 714 .desc = "MWAIT 0x00", 715 .flags = MWAIT2flg(0x00), 716 .exit_latency = 2, 717 .target_residency = 2, 718 .enter = &intel_idle, 719 .enter_s2idle = intel_idle_s2idle, }, 720 { 721 .name = "C1E", 722 .desc = "MWAIT 0x01", 723 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, 724 .exit_latency = 10, 725 .target_residency = 20, 726 .enter = &intel_idle, 727 .enter_s2idle = intel_idle_s2idle, }, 728 { 729 .name = "C3", 730 .desc = "MWAIT 0x10", 731 .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, 732 .exit_latency = 70, 733 .target_residency = 100, 734 .enter = &intel_idle, 735 .enter_s2idle = intel_idle_s2idle, }, 736 { 737 .name = "C6", 738 .desc = "MWAIT 0x20", 739 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS, 740 .exit_latency = 85, 741 .target_residency = 200, 742 .enter = &intel_idle, 743 .enter_s2idle = intel_idle_s2idle, }, 744 { 745 .name = "C7s", 746 .desc = "MWAIT 0x33", 747 .flags = MWAIT2flg(0x33) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS, 748 .exit_latency = 124, 749 .target_residency = 800, 750 .enter = &intel_idle, 751 .enter_s2idle = intel_idle_s2idle, }, 752 { 753 .name = "C8", 754 .desc = "MWAIT 0x40", 755 .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS, 756 .exit_latency = 200, 757 .target_residency = 800, 758 .enter = &intel_idle, 759 .enter_s2idle = intel_idle_s2idle, }, 760 { 761 .name = "C9", 762 .desc = "MWAIT 0x50", 763 .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS, 764 .exit_latency = 480, 765 .target_residency = 5000, 766 .enter = &intel_idle, 767 .enter_s2idle = intel_idle_s2idle, }, 768 { 769 .name = "C10", 770 .desc = "MWAIT 0x60", 771 .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS, 772 .exit_latency = 890, 773 .target_residency = 5000, 774 .enter = &intel_idle, 775 .enter_s2idle = intel_idle_s2idle, }, 776 { 777 .enter = NULL } 778 }; 779 780 static struct cpuidle_state skx_cstates[] __initdata = { 781 { 782 .name = "C1", 783 .desc = "MWAIT 0x00", 784 .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_IRQ_ENABLE, 785 .exit_latency = 2, 786 .target_residency = 2, 787 .enter = &intel_idle, 788 .enter_s2idle = intel_idle_s2idle, }, 789 { 790 .name = "C1E", 791 .desc = "MWAIT 0x01", 792 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, 793 .exit_latency = 10, 794 .target_residency = 20, 795 .enter = &intel_idle, 796 .enter_s2idle = intel_idle_s2idle, }, 797 { 798 .name = "C6", 799 .desc = "MWAIT 0x20", 800 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS, 801 .exit_latency = 133, 802 .target_residency = 600, 803 .enter = &intel_idle, 804 .enter_s2idle = intel_idle_s2idle, }, 805 { 806 .enter = NULL } 807 }; 808 809 static struct cpuidle_state icx_cstates[] __initdata = { 810 { 811 .name = "C1", 812 .desc = "MWAIT 0x00", 813 .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_IRQ_ENABLE, 814 .exit_latency = 1, 815 .target_residency = 1, 816 .enter = &intel_idle, 817 .enter_s2idle = intel_idle_s2idle, }, 818 { 819 .name = "C1E", 820 .desc = "MWAIT 0x01", 821 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, 822 .exit_latency = 4, 823 .target_residency = 4, 824 .enter = &intel_idle, 825 .enter_s2idle = intel_idle_s2idle, }, 826 { 827 .name = "C6", 828 .desc = "MWAIT 0x20", 829 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, 830 .exit_latency = 170, 831 .target_residency = 600, 832 .enter = &intel_idle, 833 .enter_s2idle = intel_idle_s2idle, }, 834 { 835 .enter = NULL } 836 }; 837 838 /* 839 * On AlderLake C1 has to be disabled if C1E is enabled, and vice versa. 840 * C1E is enabled only if "C1E promotion" bit is set in MSR_IA32_POWER_CTL. 841 * But in this case there is effectively no C1, because C1 requests are 842 * promoted to C1E. If the "C1E promotion" bit is cleared, then both C1 843 * and C1E requests end up with C1, so there is effectively no C1E. 844 * 845 * By default we enable C1E and disable C1 by marking it with 846 * 'CPUIDLE_FLAG_UNUSABLE'. 847 */ 848 static struct cpuidle_state adl_cstates[] __initdata = { 849 { 850 .name = "C1", 851 .desc = "MWAIT 0x00", 852 .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_UNUSABLE, 853 .exit_latency = 1, 854 .target_residency = 1, 855 .enter = &intel_idle, 856 .enter_s2idle = intel_idle_s2idle, }, 857 { 858 .name = "C1E", 859 .desc = "MWAIT 0x01", 860 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, 861 .exit_latency = 2, 862 .target_residency = 4, 863 .enter = &intel_idle, 864 .enter_s2idle = intel_idle_s2idle, }, 865 { 866 .name = "C6", 867 .desc = "MWAIT 0x20", 868 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, 869 .exit_latency = 220, 870 .target_residency = 600, 871 .enter = &intel_idle, 872 .enter_s2idle = intel_idle_s2idle, }, 873 { 874 .name = "C8", 875 .desc = "MWAIT 0x40", 876 .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, 877 .exit_latency = 280, 878 .target_residency = 800, 879 .enter = &intel_idle, 880 .enter_s2idle = intel_idle_s2idle, }, 881 { 882 .name = "C10", 883 .desc = "MWAIT 0x60", 884 .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, 885 .exit_latency = 680, 886 .target_residency = 2000, 887 .enter = &intel_idle, 888 .enter_s2idle = intel_idle_s2idle, }, 889 { 890 .enter = NULL } 891 }; 892 893 static struct cpuidle_state adl_l_cstates[] __initdata = { 894 { 895 .name = "C1", 896 .desc = "MWAIT 0x00", 897 .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_UNUSABLE, 898 .exit_latency = 1, 899 .target_residency = 1, 900 .enter = &intel_idle, 901 .enter_s2idle = intel_idle_s2idle, }, 902 { 903 .name = "C1E", 904 .desc = "MWAIT 0x01", 905 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, 906 .exit_latency = 2, 907 .target_residency = 4, 908 .enter = &intel_idle, 909 .enter_s2idle = intel_idle_s2idle, }, 910 { 911 .name = "C6", 912 .desc = "MWAIT 0x20", 913 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, 914 .exit_latency = 170, 915 .target_residency = 500, 916 .enter = &intel_idle, 917 .enter_s2idle = intel_idle_s2idle, }, 918 { 919 .name = "C8", 920 .desc = "MWAIT 0x40", 921 .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, 922 .exit_latency = 200, 923 .target_residency = 600, 924 .enter = &intel_idle, 925 .enter_s2idle = intel_idle_s2idle, }, 926 { 927 .name = "C10", 928 .desc = "MWAIT 0x60", 929 .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, 930 .exit_latency = 230, 931 .target_residency = 700, 932 .enter = &intel_idle, 933 .enter_s2idle = intel_idle_s2idle, }, 934 { 935 .enter = NULL } 936 }; 937 938 static struct cpuidle_state mtl_l_cstates[] __initdata = { 939 { 940 .name = "C1E", 941 .desc = "MWAIT 0x01", 942 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, 943 .exit_latency = 1, 944 .target_residency = 1, 945 .enter = &intel_idle, 946 .enter_s2idle = intel_idle_s2idle, }, 947 { 948 .name = "C6", 949 .desc = "MWAIT 0x20", 950 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, 951 .exit_latency = 140, 952 .target_residency = 420, 953 .enter = &intel_idle, 954 .enter_s2idle = intel_idle_s2idle, }, 955 { 956 .name = "C10", 957 .desc = "MWAIT 0x60", 958 .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, 959 .exit_latency = 310, 960 .target_residency = 930, 961 .enter = &intel_idle, 962 .enter_s2idle = intel_idle_s2idle, }, 963 { 964 .enter = NULL } 965 }; 966 967 static struct cpuidle_state gmt_cstates[] __initdata = { 968 { 969 .name = "C1", 970 .desc = "MWAIT 0x00", 971 .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_UNUSABLE, 972 .exit_latency = 1, 973 .target_residency = 1, 974 .enter = &intel_idle, 975 .enter_s2idle = intel_idle_s2idle, }, 976 { 977 .name = "C1E", 978 .desc = "MWAIT 0x01", 979 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, 980 .exit_latency = 2, 981 .target_residency = 4, 982 .enter = &intel_idle, 983 .enter_s2idle = intel_idle_s2idle, }, 984 { 985 .name = "C6", 986 .desc = "MWAIT 0x20", 987 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, 988 .exit_latency = 195, 989 .target_residency = 585, 990 .enter = &intel_idle, 991 .enter_s2idle = intel_idle_s2idle, }, 992 { 993 .name = "C8", 994 .desc = "MWAIT 0x40", 995 .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, 996 .exit_latency = 260, 997 .target_residency = 1040, 998 .enter = &intel_idle, 999 .enter_s2idle = intel_idle_s2idle, }, 1000 { 1001 .name = "C10", 1002 .desc = "MWAIT 0x60", 1003 .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, 1004 .exit_latency = 660, 1005 .target_residency = 1980, 1006 .enter = &intel_idle, 1007 .enter_s2idle = intel_idle_s2idle, }, 1008 { 1009 .enter = NULL } 1010 }; 1011 1012 static struct cpuidle_state spr_cstates[] __initdata = { 1013 { 1014 .name = "C1", 1015 .desc = "MWAIT 0x00", 1016 .flags = MWAIT2flg(0x00), 1017 .exit_latency = 1, 1018 .target_residency = 1, 1019 .enter = &intel_idle, 1020 .enter_s2idle = intel_idle_s2idle, }, 1021 { 1022 .name = "C1E", 1023 .desc = "MWAIT 0x01", 1024 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, 1025 .exit_latency = 2, 1026 .target_residency = 4, 1027 .enter = &intel_idle, 1028 .enter_s2idle = intel_idle_s2idle, }, 1029 { 1030 .name = "C6", 1031 .desc = "MWAIT 0x20", 1032 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | 1033 CPUIDLE_FLAG_INIT_XSTATE, 1034 .exit_latency = 290, 1035 .target_residency = 800, 1036 .enter = &intel_idle, 1037 .enter_s2idle = intel_idle_s2idle, }, 1038 { 1039 .enter = NULL } 1040 }; 1041 1042 static struct cpuidle_state gnr_cstates[] __initdata = { 1043 { 1044 .name = "C1", 1045 .desc = "MWAIT 0x00", 1046 .flags = MWAIT2flg(0x00), 1047 .exit_latency = 1, 1048 .target_residency = 1, 1049 .enter = &intel_idle, 1050 .enter_s2idle = intel_idle_s2idle, }, 1051 { 1052 .name = "C1E", 1053 .desc = "MWAIT 0x01", 1054 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, 1055 .exit_latency = 4, 1056 .target_residency = 4, 1057 .enter = &intel_idle, 1058 .enter_s2idle = intel_idle_s2idle, }, 1059 { 1060 .name = "C6", 1061 .desc = "MWAIT 0x20", 1062 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | 1063 CPUIDLE_FLAG_INIT_XSTATE | 1064 CPUIDLE_FLAG_PARTIAL_HINT_MATCH, 1065 .exit_latency = 170, 1066 .target_residency = 650, 1067 .enter = &intel_idle, 1068 .enter_s2idle = intel_idle_s2idle, }, 1069 { 1070 .name = "C6P", 1071 .desc = "MWAIT 0x21", 1072 .flags = MWAIT2flg(0x21) | CPUIDLE_FLAG_TLB_FLUSHED | 1073 CPUIDLE_FLAG_INIT_XSTATE | 1074 CPUIDLE_FLAG_PARTIAL_HINT_MATCH, 1075 .exit_latency = 210, 1076 .target_residency = 1000, 1077 .enter = &intel_idle, 1078 .enter_s2idle = intel_idle_s2idle, }, 1079 { 1080 .enter = NULL } 1081 }; 1082 1083 static struct cpuidle_state gnrd_cstates[] __initdata = { 1084 { 1085 .name = "C1", 1086 .desc = "MWAIT 0x00", 1087 .flags = MWAIT2flg(0x00), 1088 .exit_latency = 1, 1089 .target_residency = 1, 1090 .enter = &intel_idle, 1091 .enter_s2idle = intel_idle_s2idle, }, 1092 { 1093 .name = "C1E", 1094 .desc = "MWAIT 0x01", 1095 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, 1096 .exit_latency = 4, 1097 .target_residency = 4, 1098 .enter = &intel_idle, 1099 .enter_s2idle = intel_idle_s2idle, }, 1100 { 1101 .name = "C6", 1102 .desc = "MWAIT 0x20", 1103 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | 1104 CPUIDLE_FLAG_INIT_XSTATE | 1105 CPUIDLE_FLAG_PARTIAL_HINT_MATCH, 1106 .exit_latency = 220, 1107 .target_residency = 650, 1108 .enter = &intel_idle, 1109 .enter_s2idle = intel_idle_s2idle, }, 1110 { 1111 .name = "C6P", 1112 .desc = "MWAIT 0x21", 1113 .flags = MWAIT2flg(0x21) | CPUIDLE_FLAG_TLB_FLUSHED | 1114 CPUIDLE_FLAG_INIT_XSTATE | 1115 CPUIDLE_FLAG_PARTIAL_HINT_MATCH, 1116 .exit_latency = 240, 1117 .target_residency = 750, 1118 .enter = &intel_idle, 1119 .enter_s2idle = intel_idle_s2idle, }, 1120 { 1121 .enter = NULL } 1122 }; 1123 1124 static struct cpuidle_state atom_cstates[] __initdata = { 1125 { 1126 .name = "C1E", 1127 .desc = "MWAIT 0x00", 1128 .flags = MWAIT2flg(0x00), 1129 .exit_latency = 10, 1130 .target_residency = 20, 1131 .enter = &intel_idle, 1132 .enter_s2idle = intel_idle_s2idle, }, 1133 { 1134 .name = "C2", 1135 .desc = "MWAIT 0x10", 1136 .flags = MWAIT2flg(0x10), 1137 .exit_latency = 20, 1138 .target_residency = 80, 1139 .enter = &intel_idle, 1140 .enter_s2idle = intel_idle_s2idle, }, 1141 { 1142 .name = "C4", 1143 .desc = "MWAIT 0x30", 1144 .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED, 1145 .exit_latency = 100, 1146 .target_residency = 400, 1147 .enter = &intel_idle, 1148 .enter_s2idle = intel_idle_s2idle, }, 1149 { 1150 .name = "C6", 1151 .desc = "MWAIT 0x52", 1152 .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED, 1153 .exit_latency = 140, 1154 .target_residency = 560, 1155 .enter = &intel_idle, 1156 .enter_s2idle = intel_idle_s2idle, }, 1157 { 1158 .enter = NULL } 1159 }; 1160 static struct cpuidle_state tangier_cstates[] __initdata = { 1161 { 1162 .name = "C1", 1163 .desc = "MWAIT 0x00", 1164 .flags = MWAIT2flg(0x00), 1165 .exit_latency = 1, 1166 .target_residency = 4, 1167 .enter = &intel_idle, 1168 .enter_s2idle = intel_idle_s2idle, }, 1169 { 1170 .name = "C4", 1171 .desc = "MWAIT 0x30", 1172 .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED, 1173 .exit_latency = 100, 1174 .target_residency = 400, 1175 .enter = &intel_idle, 1176 .enter_s2idle = intel_idle_s2idle, }, 1177 { 1178 .name = "C6", 1179 .desc = "MWAIT 0x52", 1180 .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED, 1181 .exit_latency = 140, 1182 .target_residency = 560, 1183 .enter = &intel_idle, 1184 .enter_s2idle = intel_idle_s2idle, }, 1185 { 1186 .name = "C7", 1187 .desc = "MWAIT 0x60", 1188 .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, 1189 .exit_latency = 1200, 1190 .target_residency = 4000, 1191 .enter = &intel_idle, 1192 .enter_s2idle = intel_idle_s2idle, }, 1193 { 1194 .name = "C9", 1195 .desc = "MWAIT 0x64", 1196 .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED, 1197 .exit_latency = 10000, 1198 .target_residency = 20000, 1199 .enter = &intel_idle, 1200 .enter_s2idle = intel_idle_s2idle, }, 1201 { 1202 .enter = NULL } 1203 }; 1204 static struct cpuidle_state avn_cstates[] __initdata = { 1205 { 1206 .name = "C1", 1207 .desc = "MWAIT 0x00", 1208 .flags = MWAIT2flg(0x00), 1209 .exit_latency = 2, 1210 .target_residency = 2, 1211 .enter = &intel_idle, 1212 .enter_s2idle = intel_idle_s2idle, }, 1213 { 1214 .name = "C6", 1215 .desc = "MWAIT 0x51", 1216 .flags = MWAIT2flg(0x51) | CPUIDLE_FLAG_TLB_FLUSHED, 1217 .exit_latency = 15, 1218 .target_residency = 45, 1219 .enter = &intel_idle, 1220 .enter_s2idle = intel_idle_s2idle, }, 1221 { 1222 .enter = NULL } 1223 }; 1224 static struct cpuidle_state knl_cstates[] __initdata = { 1225 { 1226 .name = "C1", 1227 .desc = "MWAIT 0x00", 1228 .flags = MWAIT2flg(0x00), 1229 .exit_latency = 1, 1230 .target_residency = 2, 1231 .enter = &intel_idle, 1232 .enter_s2idle = intel_idle_s2idle }, 1233 { 1234 .name = "C6", 1235 .desc = "MWAIT 0x10", 1236 .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, 1237 .exit_latency = 120, 1238 .target_residency = 500, 1239 .enter = &intel_idle, 1240 .enter_s2idle = intel_idle_s2idle }, 1241 { 1242 .enter = NULL } 1243 }; 1244 1245 static struct cpuidle_state bxt_cstates[] __initdata = { 1246 { 1247 .name = "C1", 1248 .desc = "MWAIT 0x00", 1249 .flags = MWAIT2flg(0x00), 1250 .exit_latency = 2, 1251 .target_residency = 2, 1252 .enter = &intel_idle, 1253 .enter_s2idle = intel_idle_s2idle, }, 1254 { 1255 .name = "C1E", 1256 .desc = "MWAIT 0x01", 1257 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, 1258 .exit_latency = 10, 1259 .target_residency = 20, 1260 .enter = &intel_idle, 1261 .enter_s2idle = intel_idle_s2idle, }, 1262 { 1263 .name = "C6", 1264 .desc = "MWAIT 0x20", 1265 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, 1266 .exit_latency = 133, 1267 .target_residency = 133, 1268 .enter = &intel_idle, 1269 .enter_s2idle = intel_idle_s2idle, }, 1270 { 1271 .name = "C7s", 1272 .desc = "MWAIT 0x31", 1273 .flags = MWAIT2flg(0x31) | CPUIDLE_FLAG_TLB_FLUSHED, 1274 .exit_latency = 155, 1275 .target_residency = 155, 1276 .enter = &intel_idle, 1277 .enter_s2idle = intel_idle_s2idle, }, 1278 { 1279 .name = "C8", 1280 .desc = "MWAIT 0x40", 1281 .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, 1282 .exit_latency = 1000, 1283 .target_residency = 1000, 1284 .enter = &intel_idle, 1285 .enter_s2idle = intel_idle_s2idle, }, 1286 { 1287 .name = "C9", 1288 .desc = "MWAIT 0x50", 1289 .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED, 1290 .exit_latency = 2000, 1291 .target_residency = 2000, 1292 .enter = &intel_idle, 1293 .enter_s2idle = intel_idle_s2idle, }, 1294 { 1295 .name = "C10", 1296 .desc = "MWAIT 0x60", 1297 .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, 1298 .exit_latency = 10000, 1299 .target_residency = 10000, 1300 .enter = &intel_idle, 1301 .enter_s2idle = intel_idle_s2idle, }, 1302 { 1303 .enter = NULL } 1304 }; 1305 1306 static struct cpuidle_state dnv_cstates[] __initdata = { 1307 { 1308 .name = "C1", 1309 .desc = "MWAIT 0x00", 1310 .flags = MWAIT2flg(0x00), 1311 .exit_latency = 2, 1312 .target_residency = 2, 1313 .enter = &intel_idle, 1314 .enter_s2idle = intel_idle_s2idle, }, 1315 { 1316 .name = "C1E", 1317 .desc = "MWAIT 0x01", 1318 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, 1319 .exit_latency = 10, 1320 .target_residency = 20, 1321 .enter = &intel_idle, 1322 .enter_s2idle = intel_idle_s2idle, }, 1323 { 1324 .name = "C6", 1325 .desc = "MWAIT 0x20", 1326 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, 1327 .exit_latency = 50, 1328 .target_residency = 500, 1329 .enter = &intel_idle, 1330 .enter_s2idle = intel_idle_s2idle, }, 1331 { 1332 .enter = NULL } 1333 }; 1334 1335 /* 1336 * Note, depending on HW and FW revision, SnowRidge SoC may or may not support 1337 * C6, and this is indicated in the CPUID mwait leaf. 1338 */ 1339 static struct cpuidle_state snr_cstates[] __initdata = { 1340 { 1341 .name = "C1", 1342 .desc = "MWAIT 0x00", 1343 .flags = MWAIT2flg(0x00), 1344 .exit_latency = 2, 1345 .target_residency = 2, 1346 .enter = &intel_idle, 1347 .enter_s2idle = intel_idle_s2idle, }, 1348 { 1349 .name = "C1E", 1350 .desc = "MWAIT 0x01", 1351 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, 1352 .exit_latency = 15, 1353 .target_residency = 25, 1354 .enter = &intel_idle, 1355 .enter_s2idle = intel_idle_s2idle, }, 1356 { 1357 .name = "C6", 1358 .desc = "MWAIT 0x20", 1359 .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, 1360 .exit_latency = 130, 1361 .target_residency = 500, 1362 .enter = &intel_idle, 1363 .enter_s2idle = intel_idle_s2idle, }, 1364 { 1365 .enter = NULL } 1366 }; 1367 1368 static struct cpuidle_state grr_cstates[] __initdata = { 1369 { 1370 .name = "C1", 1371 .desc = "MWAIT 0x00", 1372 .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_ALWAYS_ENABLE, 1373 .exit_latency = 1, 1374 .target_residency = 1, 1375 .enter = &intel_idle, 1376 .enter_s2idle = intel_idle_s2idle, }, 1377 { 1378 .name = "C1E", 1379 .desc = "MWAIT 0x01", 1380 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, 1381 .exit_latency = 2, 1382 .target_residency = 10, 1383 .enter = &intel_idle, 1384 .enter_s2idle = intel_idle_s2idle, }, 1385 { 1386 .name = "C6S", 1387 .desc = "MWAIT 0x22", 1388 .flags = MWAIT2flg(0x22) | CPUIDLE_FLAG_TLB_FLUSHED, 1389 .exit_latency = 140, 1390 .target_residency = 500, 1391 .enter = &intel_idle, 1392 .enter_s2idle = intel_idle_s2idle, }, 1393 { 1394 .enter = NULL } 1395 }; 1396 1397 static struct cpuidle_state srf_cstates[] __initdata = { 1398 { 1399 .name = "C1", 1400 .desc = "MWAIT 0x00", 1401 .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_ALWAYS_ENABLE, 1402 .exit_latency = 1, 1403 .target_residency = 1, 1404 .enter = &intel_idle, 1405 .enter_s2idle = intel_idle_s2idle, }, 1406 { 1407 .name = "C1E", 1408 .desc = "MWAIT 0x01", 1409 .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, 1410 .exit_latency = 2, 1411 .target_residency = 10, 1412 .enter = &intel_idle, 1413 .enter_s2idle = intel_idle_s2idle, }, 1414 { 1415 .name = "C6S", 1416 .desc = "MWAIT 0x22", 1417 .flags = MWAIT2flg(0x22) | CPUIDLE_FLAG_TLB_FLUSHED | 1418 CPUIDLE_FLAG_PARTIAL_HINT_MATCH, 1419 .exit_latency = 270, 1420 .target_residency = 700, 1421 .enter = &intel_idle, 1422 .enter_s2idle = intel_idle_s2idle, }, 1423 { 1424 .name = "C6SP", 1425 .desc = "MWAIT 0x23", 1426 .flags = MWAIT2flg(0x23) | CPUIDLE_FLAG_TLB_FLUSHED | 1427 CPUIDLE_FLAG_PARTIAL_HINT_MATCH, 1428 .exit_latency = 310, 1429 .target_residency = 900, 1430 .enter = &intel_idle, 1431 .enter_s2idle = intel_idle_s2idle, }, 1432 { 1433 .enter = NULL } 1434 }; 1435 1436 static const struct idle_cpu idle_cpu_nehalem __initconst = { 1437 .state_table = nehalem_cstates, 1438 .auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE, 1439 .disable_promotion_to_c1e = true, 1440 }; 1441 1442 static const struct idle_cpu idle_cpu_nhx __initconst = { 1443 .state_table = nehalem_cstates, 1444 .auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE, 1445 .disable_promotion_to_c1e = true, 1446 .use_acpi = true, 1447 }; 1448 1449 static const struct idle_cpu idle_cpu_atom __initconst = { 1450 .state_table = atom_cstates, 1451 }; 1452 1453 static const struct idle_cpu idle_cpu_tangier __initconst = { 1454 .state_table = tangier_cstates, 1455 }; 1456 1457 static const struct idle_cpu idle_cpu_lincroft __initconst = { 1458 .state_table = atom_cstates, 1459 .auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE, 1460 }; 1461 1462 static const struct idle_cpu idle_cpu_snb __initconst = { 1463 .state_table = snb_cstates, 1464 .disable_promotion_to_c1e = true, 1465 }; 1466 1467 static const struct idle_cpu idle_cpu_snx __initconst = { 1468 .state_table = snb_cstates, 1469 .disable_promotion_to_c1e = true, 1470 .use_acpi = true, 1471 }; 1472 1473 static const struct idle_cpu idle_cpu_byt __initconst = { 1474 .state_table = byt_cstates, 1475 .disable_promotion_to_c1e = true, 1476 }; 1477 1478 static const struct idle_cpu idle_cpu_cht __initconst = { 1479 .state_table = cht_cstates, 1480 .disable_promotion_to_c1e = true, 1481 }; 1482 1483 static const struct idle_cpu idle_cpu_ivb __initconst = { 1484 .state_table = ivb_cstates, 1485 .disable_promotion_to_c1e = true, 1486 }; 1487 1488 static const struct idle_cpu idle_cpu_ivt __initconst = { 1489 .state_table = ivt_cstates, 1490 .disable_promotion_to_c1e = true, 1491 .use_acpi = true, 1492 }; 1493 1494 static const struct idle_cpu idle_cpu_hsw __initconst = { 1495 .state_table = hsw_cstates, 1496 .disable_promotion_to_c1e = true, 1497 }; 1498 1499 static const struct idle_cpu idle_cpu_hsx __initconst = { 1500 .state_table = hsw_cstates, 1501 .disable_promotion_to_c1e = true, 1502 .use_acpi = true, 1503 }; 1504 1505 static const struct idle_cpu idle_cpu_bdw __initconst = { 1506 .state_table = bdw_cstates, 1507 .disable_promotion_to_c1e = true, 1508 }; 1509 1510 static const struct idle_cpu idle_cpu_bdx __initconst = { 1511 .state_table = bdw_cstates, 1512 .disable_promotion_to_c1e = true, 1513 .use_acpi = true, 1514 }; 1515 1516 static const struct idle_cpu idle_cpu_skl __initconst = { 1517 .state_table = skl_cstates, 1518 .disable_promotion_to_c1e = true, 1519 }; 1520 1521 static const struct idle_cpu idle_cpu_skx __initconst = { 1522 .state_table = skx_cstates, 1523 .disable_promotion_to_c1e = true, 1524 .use_acpi = true, 1525 }; 1526 1527 static const struct idle_cpu idle_cpu_icx __initconst = { 1528 .state_table = icx_cstates, 1529 .disable_promotion_to_c1e = true, 1530 .use_acpi = true, 1531 }; 1532 1533 static const struct idle_cpu idle_cpu_adl __initconst = { 1534 .state_table = adl_cstates, 1535 }; 1536 1537 static const struct idle_cpu idle_cpu_adl_l __initconst = { 1538 .state_table = adl_l_cstates, 1539 }; 1540 1541 static const struct idle_cpu idle_cpu_mtl_l __initconst = { 1542 .state_table = mtl_l_cstates, 1543 }; 1544 1545 static const struct idle_cpu idle_cpu_gmt __initconst = { 1546 .state_table = gmt_cstates, 1547 }; 1548 1549 static const struct idle_cpu idle_cpu_spr __initconst = { 1550 .state_table = spr_cstates, 1551 .disable_promotion_to_c1e = true, 1552 .use_acpi = true, 1553 }; 1554 1555 static const struct idle_cpu idle_cpu_gnr __initconst = { 1556 .state_table = gnr_cstates, 1557 .disable_promotion_to_c1e = true, 1558 .use_acpi = true, 1559 }; 1560 1561 static const struct idle_cpu idle_cpu_gnrd __initconst = { 1562 .state_table = gnrd_cstates, 1563 .disable_promotion_to_c1e = true, 1564 .use_acpi = true, 1565 }; 1566 1567 static const struct idle_cpu idle_cpu_avn __initconst = { 1568 .state_table = avn_cstates, 1569 .disable_promotion_to_c1e = true, 1570 .use_acpi = true, 1571 }; 1572 1573 static const struct idle_cpu idle_cpu_knl __initconst = { 1574 .state_table = knl_cstates, 1575 .use_acpi = true, 1576 }; 1577 1578 static const struct idle_cpu idle_cpu_bxt __initconst = { 1579 .state_table = bxt_cstates, 1580 .disable_promotion_to_c1e = true, 1581 }; 1582 1583 static const struct idle_cpu idle_cpu_dnv __initconst = { 1584 .state_table = dnv_cstates, 1585 .disable_promotion_to_c1e = true, 1586 .use_acpi = true, 1587 }; 1588 1589 static const struct idle_cpu idle_cpu_tmt __initconst = { 1590 .disable_promotion_to_c1e = true, 1591 }; 1592 1593 static const struct idle_cpu idle_cpu_snr __initconst = { 1594 .state_table = snr_cstates, 1595 .disable_promotion_to_c1e = true, 1596 .use_acpi = true, 1597 }; 1598 1599 static const struct idle_cpu idle_cpu_grr __initconst = { 1600 .state_table = grr_cstates, 1601 .disable_promotion_to_c1e = true, 1602 .use_acpi = true, 1603 }; 1604 1605 static const struct idle_cpu idle_cpu_srf __initconst = { 1606 .state_table = srf_cstates, 1607 .disable_promotion_to_c1e = true, 1608 .use_acpi = true, 1609 }; 1610 1611 static const struct x86_cpu_id intel_idle_ids[] __initconst = { 1612 X86_MATCH_VFM(INTEL_NEHALEM_EP, &idle_cpu_nhx), 1613 X86_MATCH_VFM(INTEL_NEHALEM, &idle_cpu_nehalem), 1614 X86_MATCH_VFM(INTEL_NEHALEM_G, &idle_cpu_nehalem), 1615 X86_MATCH_VFM(INTEL_WESTMERE, &idle_cpu_nehalem), 1616 X86_MATCH_VFM(INTEL_WESTMERE_EP, &idle_cpu_nhx), 1617 X86_MATCH_VFM(INTEL_NEHALEM_EX, &idle_cpu_nhx), 1618 X86_MATCH_VFM(INTEL_ATOM_BONNELL, &idle_cpu_atom), 1619 X86_MATCH_VFM(INTEL_ATOM_BONNELL_MID, &idle_cpu_lincroft), 1620 X86_MATCH_VFM(INTEL_WESTMERE_EX, &idle_cpu_nhx), 1621 X86_MATCH_VFM(INTEL_SANDYBRIDGE, &idle_cpu_snb), 1622 X86_MATCH_VFM(INTEL_SANDYBRIDGE_X, &idle_cpu_snx), 1623 X86_MATCH_VFM(INTEL_ATOM_SALTWELL, &idle_cpu_atom), 1624 X86_MATCH_VFM(INTEL_ATOM_SILVERMONT, &idle_cpu_byt), 1625 X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_MID, &idle_cpu_tangier), 1626 X86_MATCH_VFM(INTEL_ATOM_AIRMONT, &idle_cpu_cht), 1627 X86_MATCH_VFM(INTEL_IVYBRIDGE, &idle_cpu_ivb), 1628 X86_MATCH_VFM(INTEL_IVYBRIDGE_X, &idle_cpu_ivt), 1629 X86_MATCH_VFM(INTEL_HASWELL, &idle_cpu_hsw), 1630 X86_MATCH_VFM(INTEL_HASWELL_X, &idle_cpu_hsx), 1631 X86_MATCH_VFM(INTEL_HASWELL_L, &idle_cpu_hsw), 1632 X86_MATCH_VFM(INTEL_HASWELL_G, &idle_cpu_hsw), 1633 X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_D, &idle_cpu_avn), 1634 X86_MATCH_VFM(INTEL_BROADWELL, &idle_cpu_bdw), 1635 X86_MATCH_VFM(INTEL_BROADWELL_G, &idle_cpu_bdw), 1636 X86_MATCH_VFM(INTEL_BROADWELL_X, &idle_cpu_bdx), 1637 X86_MATCH_VFM(INTEL_BROADWELL_D, &idle_cpu_bdx), 1638 X86_MATCH_VFM(INTEL_SKYLAKE_L, &idle_cpu_skl), 1639 X86_MATCH_VFM(INTEL_SKYLAKE, &idle_cpu_skl), 1640 X86_MATCH_VFM(INTEL_KABYLAKE_L, &idle_cpu_skl), 1641 X86_MATCH_VFM(INTEL_KABYLAKE, &idle_cpu_skl), 1642 X86_MATCH_VFM(INTEL_SKYLAKE_X, &idle_cpu_skx), 1643 X86_MATCH_VFM(INTEL_ICELAKE_X, &idle_cpu_icx), 1644 X86_MATCH_VFM(INTEL_ICELAKE_D, &idle_cpu_icx), 1645 X86_MATCH_VFM(INTEL_ALDERLAKE, &idle_cpu_adl), 1646 X86_MATCH_VFM(INTEL_ALDERLAKE_L, &idle_cpu_adl_l), 1647 X86_MATCH_VFM(INTEL_METEORLAKE_L, &idle_cpu_mtl_l), 1648 X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, &idle_cpu_gmt), 1649 X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &idle_cpu_spr), 1650 X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &idle_cpu_spr), 1651 X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, &idle_cpu_gnr), 1652 X86_MATCH_VFM(INTEL_GRANITERAPIDS_D, &idle_cpu_gnrd), 1653 X86_MATCH_VFM(INTEL_XEON_PHI_KNL, &idle_cpu_knl), 1654 X86_MATCH_VFM(INTEL_XEON_PHI_KNM, &idle_cpu_knl), 1655 X86_MATCH_VFM(INTEL_ATOM_GOLDMONT, &idle_cpu_bxt), 1656 X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_PLUS, &idle_cpu_bxt), 1657 X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_D, &idle_cpu_dnv), 1658 X86_MATCH_VFM(INTEL_ATOM_TREMONT, &idle_cpu_tmt), 1659 X86_MATCH_VFM(INTEL_ATOM_TREMONT_L, &idle_cpu_tmt), 1660 X86_MATCH_VFM(INTEL_ATOM_TREMONT_D, &idle_cpu_snr), 1661 X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, &idle_cpu_grr), 1662 X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, &idle_cpu_srf), 1663 X86_MATCH_VFM(INTEL_ATOM_DARKMONT_X, &idle_cpu_srf), 1664 {} 1665 }; 1666 1667 static const struct x86_cpu_id intel_mwait_ids[] __initconst = { 1668 X86_MATCH_VENDOR_FAM_FEATURE(INTEL, 6, X86_FEATURE_MWAIT, NULL), 1669 {} 1670 }; 1671 1672 static bool __init intel_idle_max_cstate_reached(int cstate) 1673 { 1674 if (cstate + 1 > max_cstate) { 1675 pr_info("max_cstate %d reached\n", max_cstate); 1676 return true; 1677 } 1678 return false; 1679 } 1680 1681 static bool __init intel_idle_state_needs_timer_stop(struct cpuidle_state *state) 1682 { 1683 unsigned long eax = flg2MWAIT(state->flags); 1684 1685 if (boot_cpu_has(X86_FEATURE_ARAT)) 1686 return false; 1687 1688 /* 1689 * Switch over to one-shot tick broadcast if the target C-state 1690 * is deeper than C1. 1691 */ 1692 return !!((eax >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK); 1693 } 1694 1695 #ifdef CONFIG_ACPI_PROCESSOR_CSTATE 1696 #include <acpi/processor.h> 1697 1698 static bool no_acpi __read_mostly; 1699 module_param(no_acpi, bool, 0444); 1700 MODULE_PARM_DESC(no_acpi, "Do not use ACPI _CST for building the idle states list"); 1701 1702 static bool force_use_acpi __read_mostly; /* No effect if no_acpi is set. */ 1703 module_param_named(use_acpi, force_use_acpi, bool, 0444); 1704 MODULE_PARM_DESC(use_acpi, "Use ACPI _CST for building the idle states list"); 1705 1706 static bool no_native __read_mostly; /* No effect if no_acpi is set. */ 1707 module_param_named(no_native, no_native, bool, 0444); 1708 MODULE_PARM_DESC(no_native, "Ignore cpu specific (native) idle states in lieu of ACPI idle states"); 1709 1710 static struct acpi_processor_power acpi_state_table __initdata; 1711 1712 /** 1713 * intel_idle_cst_usable - Check if the _CST information can be used. 1714 * 1715 * Check if all of the C-states listed by _CST in the max_cstate range are 1716 * ACPI_CSTATE_FFH, which means that they should be entered via MWAIT. 1717 */ 1718 static bool __init intel_idle_cst_usable(void) 1719 { 1720 int cstate, limit; 1721 1722 limit = min_t(int, min_t(int, CPUIDLE_STATE_MAX, max_cstate + 1), 1723 acpi_state_table.count); 1724 1725 for (cstate = 1; cstate < limit; cstate++) { 1726 struct acpi_processor_cx *cx = &acpi_state_table.states[cstate]; 1727 1728 if (cx->entry_method != ACPI_CSTATE_FFH) 1729 return false; 1730 } 1731 1732 return true; 1733 } 1734 1735 static bool __init intel_idle_acpi_cst_extract(void) 1736 { 1737 unsigned int cpu; 1738 1739 if (no_acpi) { 1740 pr_debug("Not allowed to use ACPI _CST\n"); 1741 return false; 1742 } 1743 1744 for_each_possible_cpu(cpu) { 1745 struct acpi_processor *pr = per_cpu(processors, cpu); 1746 1747 if (!pr) 1748 continue; 1749 1750 if (acpi_processor_evaluate_cst(pr->handle, cpu, &acpi_state_table)) 1751 continue; 1752 1753 acpi_state_table.count++; 1754 1755 if (!intel_idle_cst_usable()) 1756 continue; 1757 1758 if (!acpi_processor_claim_cst_control()) 1759 break; 1760 1761 return true; 1762 } 1763 1764 acpi_state_table.count = 0; 1765 pr_debug("ACPI _CST not found or not usable\n"); 1766 return false; 1767 } 1768 1769 static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) 1770 { 1771 int cstate, limit = min_t(int, CPUIDLE_STATE_MAX, acpi_state_table.count); 1772 1773 /* 1774 * If limit > 0, intel_idle_cst_usable() has returned 'true', so all of 1775 * the interesting states are ACPI_CSTATE_FFH. 1776 */ 1777 for (cstate = 1; cstate < limit; cstate++) { 1778 struct acpi_processor_cx *cx; 1779 struct cpuidle_state *state; 1780 1781 if (intel_idle_max_cstate_reached(cstate - 1)) 1782 break; 1783 1784 cx = &acpi_state_table.states[cstate]; 1785 1786 state = &drv->states[drv->state_count++]; 1787 1788 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d_ACPI", cstate); 1789 strscpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); 1790 state->exit_latency = cx->latency; 1791 /* 1792 * For C1-type C-states use the same number for both the exit 1793 * latency and target residency, because that is the case for 1794 * C1 in the majority of the static C-states tables above. 1795 * For the other types of C-states, however, set the target 1796 * residency to 3 times the exit latency which should lead to 1797 * a reasonable balance between energy-efficiency and 1798 * performance in the majority of interesting cases. 1799 */ 1800 state->target_residency = cx->latency; 1801 if (cx->type > ACPI_STATE_C1) 1802 state->target_residency *= 3; 1803 1804 state->flags = MWAIT2flg(cx->address); 1805 if (cx->type > ACPI_STATE_C2) 1806 state->flags |= CPUIDLE_FLAG_TLB_FLUSHED; 1807 1808 if (disabled_states_mask & BIT(cstate)) 1809 state->flags |= CPUIDLE_FLAG_OFF; 1810 1811 if (intel_idle_state_needs_timer_stop(state)) 1812 state->flags |= CPUIDLE_FLAG_TIMER_STOP; 1813 1814 if (cx->type > ACPI_STATE_C1 && !boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) 1815 mark_tsc_unstable("TSC halts in idle"); 1816 1817 state->enter = intel_idle; 1818 state->enter_dead = intel_idle_enter_dead; 1819 state->enter_s2idle = intel_idle_s2idle; 1820 } 1821 } 1822 1823 static bool __init intel_idle_off_by_default(unsigned int flags, u32 mwait_hint) 1824 { 1825 int cstate, limit; 1826 1827 /* 1828 * If there are no _CST C-states, do not disable any C-states by 1829 * default. 1830 */ 1831 if (!acpi_state_table.count) 1832 return false; 1833 1834 limit = min_t(int, CPUIDLE_STATE_MAX, acpi_state_table.count); 1835 /* 1836 * If limit > 0, intel_idle_cst_usable() has returned 'true', so all of 1837 * the interesting states are ACPI_CSTATE_FFH. 1838 */ 1839 for (cstate = 1; cstate < limit; cstate++) { 1840 u32 acpi_hint = acpi_state_table.states[cstate].address; 1841 u32 table_hint = mwait_hint; 1842 1843 if (flags & CPUIDLE_FLAG_PARTIAL_HINT_MATCH) { 1844 acpi_hint &= ~MWAIT_SUBSTATE_MASK; 1845 table_hint &= ~MWAIT_SUBSTATE_MASK; 1846 } 1847 1848 if (acpi_hint == table_hint) 1849 return false; 1850 } 1851 return true; 1852 } 1853 1854 static inline bool ignore_native(void) 1855 { 1856 return no_native && !no_acpi; 1857 } 1858 #else /* !CONFIG_ACPI_PROCESSOR_CSTATE */ 1859 #define force_use_acpi (false) 1860 1861 static inline bool intel_idle_acpi_cst_extract(void) { return false; } 1862 static inline void intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) { } 1863 static inline bool intel_idle_off_by_default(unsigned int flags, u32 mwait_hint) 1864 { 1865 return false; 1866 } 1867 static inline bool ignore_native(void) { return false; } 1868 #endif /* !CONFIG_ACPI_PROCESSOR_CSTATE */ 1869 1870 /** 1871 * ivt_idle_state_table_update - Tune the idle states table for Ivy Town. 1872 * 1873 * Tune IVT multi-socket targets. 1874 * Assumption: num_sockets == (max_package_num + 1). 1875 */ 1876 static void __init ivt_idle_state_table_update(void) 1877 { 1878 /* IVT uses a different table for 1-2, 3-4, and > 4 sockets */ 1879 int cpu, package_num, num_sockets = 1; 1880 1881 for_each_online_cpu(cpu) { 1882 package_num = topology_physical_package_id(cpu); 1883 if (package_num + 1 > num_sockets) { 1884 num_sockets = package_num + 1; 1885 1886 if (num_sockets > 4) { 1887 cpuidle_state_table = ivt_cstates_8s; 1888 return; 1889 } 1890 } 1891 } 1892 1893 if (num_sockets > 2) 1894 cpuidle_state_table = ivt_cstates_4s; 1895 1896 /* else, 1 and 2 socket systems use default ivt_cstates */ 1897 } 1898 1899 /** 1900 * irtl_2_usec - IRTL to microseconds conversion. 1901 * @irtl: IRTL MSR value. 1902 * 1903 * Translate the IRTL (Interrupt Response Time Limit) MSR value to microseconds. 1904 */ 1905 static unsigned long long __init irtl_2_usec(unsigned long long irtl) 1906 { 1907 static const unsigned int irtl_ns_units[] __initconst = { 1908 1, 32, 1024, 32768, 1048576, 33554432, 0, 0 1909 }; 1910 unsigned long long ns; 1911 1912 if (!irtl) 1913 return 0; 1914 1915 ns = irtl_ns_units[(irtl >> 10) & 0x7]; 1916 1917 return div_u64((irtl & 0x3FF) * ns, NSEC_PER_USEC); 1918 } 1919 1920 /** 1921 * bxt_idle_state_table_update - Fix up the Broxton idle states table. 1922 * 1923 * On BXT, trust the IRTL (Interrupt Response Time Limit) MSR to show the 1924 * definitive maximum latency and use the same value for target_residency. 1925 */ 1926 static void __init bxt_idle_state_table_update(void) 1927 { 1928 unsigned long long msr; 1929 unsigned int usec; 1930 1931 rdmsrl(MSR_PKGC6_IRTL, msr); 1932 usec = irtl_2_usec(msr); 1933 if (usec) { 1934 bxt_cstates[2].exit_latency = usec; 1935 bxt_cstates[2].target_residency = usec; 1936 } 1937 1938 rdmsrl(MSR_PKGC7_IRTL, msr); 1939 usec = irtl_2_usec(msr); 1940 if (usec) { 1941 bxt_cstates[3].exit_latency = usec; 1942 bxt_cstates[3].target_residency = usec; 1943 } 1944 1945 rdmsrl(MSR_PKGC8_IRTL, msr); 1946 usec = irtl_2_usec(msr); 1947 if (usec) { 1948 bxt_cstates[4].exit_latency = usec; 1949 bxt_cstates[4].target_residency = usec; 1950 } 1951 1952 rdmsrl(MSR_PKGC9_IRTL, msr); 1953 usec = irtl_2_usec(msr); 1954 if (usec) { 1955 bxt_cstates[5].exit_latency = usec; 1956 bxt_cstates[5].target_residency = usec; 1957 } 1958 1959 rdmsrl(MSR_PKGC10_IRTL, msr); 1960 usec = irtl_2_usec(msr); 1961 if (usec) { 1962 bxt_cstates[6].exit_latency = usec; 1963 bxt_cstates[6].target_residency = usec; 1964 } 1965 1966 } 1967 1968 /** 1969 * sklh_idle_state_table_update - Fix up the Sky Lake idle states table. 1970 * 1971 * On SKL-H (model 0x5e) skip C8 and C9 if C10 is enabled and SGX disabled. 1972 */ 1973 static void __init sklh_idle_state_table_update(void) 1974 { 1975 unsigned long long msr; 1976 unsigned int eax, ebx, ecx, edx; 1977 1978 1979 /* if PC10 disabled via cmdline intel_idle.max_cstate=7 or shallower */ 1980 if (max_cstate <= 7) 1981 return; 1982 1983 /* if PC10 not present in CPUID.MWAIT.EDX */ 1984 if ((mwait_substates & (0xF << 28)) == 0) 1985 return; 1986 1987 rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr); 1988 1989 /* PC10 is not enabled in PKG C-state limit */ 1990 if ((msr & 0xF) != 8) 1991 return; 1992 1993 ecx = 0; 1994 cpuid(7, &eax, &ebx, &ecx, &edx); 1995 1996 /* if SGX is present */ 1997 if (ebx & (1 << 2)) { 1998 1999 rdmsrl(MSR_IA32_FEAT_CTL, msr); 2000 2001 /* if SGX is enabled */ 2002 if (msr & (1 << 18)) 2003 return; 2004 } 2005 2006 skl_cstates[5].flags |= CPUIDLE_FLAG_UNUSABLE; /* C8-SKL */ 2007 skl_cstates[6].flags |= CPUIDLE_FLAG_UNUSABLE; /* C9-SKL */ 2008 } 2009 2010 /** 2011 * skx_idle_state_table_update - Adjust the Sky Lake/Cascade Lake 2012 * idle states table. 2013 */ 2014 static void __init skx_idle_state_table_update(void) 2015 { 2016 unsigned long long msr; 2017 2018 rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr); 2019 2020 /* 2021 * 000b: C0/C1 (no package C-state support) 2022 * 001b: C2 2023 * 010b: C6 (non-retention) 2024 * 011b: C6 (retention) 2025 * 111b: No Package C state limits. 2026 */ 2027 if ((msr & 0x7) < 2) { 2028 /* 2029 * Uses the CC6 + PC0 latency and 3 times of 2030 * latency for target_residency if the PC6 2031 * is disabled in BIOS. This is consistent 2032 * with how intel_idle driver uses _CST 2033 * to set the target_residency. 2034 */ 2035 skx_cstates[2].exit_latency = 92; 2036 skx_cstates[2].target_residency = 276; 2037 } 2038 } 2039 2040 /** 2041 * adl_idle_state_table_update - Adjust AlderLake idle states table. 2042 */ 2043 static void __init adl_idle_state_table_update(void) 2044 { 2045 /* Check if user prefers C1 over C1E. */ 2046 if (preferred_states_mask & BIT(1) && !(preferred_states_mask & BIT(2))) { 2047 cpuidle_state_table[0].flags &= ~CPUIDLE_FLAG_UNUSABLE; 2048 cpuidle_state_table[1].flags |= CPUIDLE_FLAG_UNUSABLE; 2049 2050 /* Disable C1E by clearing the "C1E promotion" bit. */ 2051 c1e_promotion = C1E_PROMOTION_DISABLE; 2052 return; 2053 } 2054 2055 /* Make sure C1E is enabled by default */ 2056 c1e_promotion = C1E_PROMOTION_ENABLE; 2057 } 2058 2059 /** 2060 * spr_idle_state_table_update - Adjust Sapphire Rapids idle states table. 2061 */ 2062 static void __init spr_idle_state_table_update(void) 2063 { 2064 unsigned long long msr; 2065 2066 /* 2067 * By default, the C6 state assumes the worst-case scenario of package 2068 * C6. However, if PC6 is disabled, we update the numbers to match 2069 * core C6. 2070 */ 2071 rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr); 2072 2073 /* Limit value 2 and above allow for PC6. */ 2074 if ((msr & 0x7) < 2) { 2075 spr_cstates[2].exit_latency = 190; 2076 spr_cstates[2].target_residency = 600; 2077 } 2078 } 2079 2080 /** 2081 * byt_cht_auto_demotion_disable - Disable Bay/Cherry Trail auto-demotion. 2082 */ 2083 static void __init byt_cht_auto_demotion_disable(void) 2084 { 2085 wrmsrl(MSR_CC6_DEMOTION_POLICY_CONFIG, 0); 2086 wrmsrl(MSR_MC6_DEMOTION_POLICY_CONFIG, 0); 2087 } 2088 2089 static bool __init intel_idle_verify_cstate(unsigned int mwait_hint) 2090 { 2091 unsigned int mwait_cstate = (MWAIT_HINT2CSTATE(mwait_hint) + 1) & 2092 MWAIT_CSTATE_MASK; 2093 unsigned int num_substates = (mwait_substates >> mwait_cstate * 4) & 2094 MWAIT_SUBSTATE_MASK; 2095 2096 /* Ignore the C-state if there are NO sub-states in CPUID for it. */ 2097 if (num_substates == 0) 2098 return false; 2099 2100 if (mwait_cstate > 2 && !boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) 2101 mark_tsc_unstable("TSC halts in idle states deeper than C2"); 2102 2103 return true; 2104 } 2105 2106 static void state_update_enter_method(struct cpuidle_state *state, int cstate) 2107 { 2108 if (state->flags & CPUIDLE_FLAG_INIT_XSTATE) { 2109 /* 2110 * Combining with XSTATE with IBRS or IRQ_ENABLE flags 2111 * is not currently supported but this driver. 2112 */ 2113 WARN_ON_ONCE(state->flags & CPUIDLE_FLAG_IBRS); 2114 WARN_ON_ONCE(state->flags & CPUIDLE_FLAG_IRQ_ENABLE); 2115 state->enter = intel_idle_xstate; 2116 return; 2117 } 2118 2119 if (cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS) && 2120 ((state->flags & CPUIDLE_FLAG_IBRS) || ibrs_off)) { 2121 /* 2122 * IBRS mitigation requires that C-states are entered 2123 * with interrupts disabled. 2124 */ 2125 if (ibrs_off && (state->flags & CPUIDLE_FLAG_IRQ_ENABLE)) 2126 state->flags &= ~CPUIDLE_FLAG_IRQ_ENABLE; 2127 WARN_ON_ONCE(state->flags & CPUIDLE_FLAG_IRQ_ENABLE); 2128 state->enter = intel_idle_ibrs; 2129 return; 2130 } 2131 2132 if (state->flags & CPUIDLE_FLAG_IRQ_ENABLE) { 2133 state->enter = intel_idle_irq; 2134 return; 2135 } 2136 2137 if (force_irq_on) { 2138 pr_info("forced intel_idle_irq for state %d\n", cstate); 2139 state->enter = intel_idle_irq; 2140 } 2141 } 2142 2143 static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv) 2144 { 2145 int cstate; 2146 2147 switch (boot_cpu_data.x86_vfm) { 2148 case INTEL_IVYBRIDGE_X: 2149 ivt_idle_state_table_update(); 2150 break; 2151 case INTEL_ATOM_GOLDMONT: 2152 case INTEL_ATOM_GOLDMONT_PLUS: 2153 bxt_idle_state_table_update(); 2154 break; 2155 case INTEL_SKYLAKE: 2156 sklh_idle_state_table_update(); 2157 break; 2158 case INTEL_SKYLAKE_X: 2159 skx_idle_state_table_update(); 2160 break; 2161 case INTEL_SAPPHIRERAPIDS_X: 2162 case INTEL_EMERALDRAPIDS_X: 2163 spr_idle_state_table_update(); 2164 break; 2165 case INTEL_ALDERLAKE: 2166 case INTEL_ALDERLAKE_L: 2167 case INTEL_ATOM_GRACEMONT: 2168 adl_idle_state_table_update(); 2169 break; 2170 case INTEL_ATOM_SILVERMONT: 2171 case INTEL_ATOM_AIRMONT: 2172 byt_cht_auto_demotion_disable(); 2173 break; 2174 } 2175 2176 for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) { 2177 struct cpuidle_state *state; 2178 unsigned int mwait_hint; 2179 2180 if (intel_idle_max_cstate_reached(cstate)) 2181 break; 2182 2183 if (!cpuidle_state_table[cstate].enter && 2184 !cpuidle_state_table[cstate].enter_s2idle) 2185 break; 2186 2187 if (!cpuidle_state_table[cstate].enter_dead) 2188 cpuidle_state_table[cstate].enter_dead = intel_idle_enter_dead; 2189 2190 /* If marked as unusable, skip this state. */ 2191 if (cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_UNUSABLE) { 2192 pr_debug("state %s is disabled\n", 2193 cpuidle_state_table[cstate].name); 2194 continue; 2195 } 2196 2197 mwait_hint = flg2MWAIT(cpuidle_state_table[cstate].flags); 2198 if (!intel_idle_verify_cstate(mwait_hint)) 2199 continue; 2200 2201 /* Structure copy. */ 2202 drv->states[drv->state_count] = cpuidle_state_table[cstate]; 2203 state = &drv->states[drv->state_count]; 2204 2205 state_update_enter_method(state, cstate); 2206 2207 2208 if ((disabled_states_mask & BIT(drv->state_count)) || 2209 ((icpu->use_acpi || force_use_acpi) && 2210 intel_idle_off_by_default(state->flags, mwait_hint) && 2211 !(state->flags & CPUIDLE_FLAG_ALWAYS_ENABLE))) 2212 state->flags |= CPUIDLE_FLAG_OFF; 2213 2214 if (intel_idle_state_needs_timer_stop(state)) 2215 state->flags |= CPUIDLE_FLAG_TIMER_STOP; 2216 2217 drv->state_count++; 2218 } 2219 } 2220 2221 /** 2222 * intel_idle_cpuidle_driver_init - Create the list of available idle states. 2223 * @drv: cpuidle driver structure to initialize. 2224 */ 2225 static void __init intel_idle_cpuidle_driver_init(struct cpuidle_driver *drv) 2226 { 2227 cpuidle_poll_state_init(drv); 2228 2229 if (disabled_states_mask & BIT(0)) 2230 drv->states[0].flags |= CPUIDLE_FLAG_OFF; 2231 2232 drv->state_count = 1; 2233 2234 if (icpu && icpu->state_table) 2235 intel_idle_init_cstates_icpu(drv); 2236 else 2237 intel_idle_init_cstates_acpi(drv); 2238 } 2239 2240 static void auto_demotion_disable(void) 2241 { 2242 unsigned long long msr_bits; 2243 2244 rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits); 2245 msr_bits &= ~auto_demotion_disable_flags; 2246 wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits); 2247 } 2248 2249 static void c1e_promotion_enable(void) 2250 { 2251 unsigned long long msr_bits; 2252 2253 rdmsrl(MSR_IA32_POWER_CTL, msr_bits); 2254 msr_bits |= 0x2; 2255 wrmsrl(MSR_IA32_POWER_CTL, msr_bits); 2256 } 2257 2258 static void c1e_promotion_disable(void) 2259 { 2260 unsigned long long msr_bits; 2261 2262 rdmsrl(MSR_IA32_POWER_CTL, msr_bits); 2263 msr_bits &= ~0x2; 2264 wrmsrl(MSR_IA32_POWER_CTL, msr_bits); 2265 } 2266 2267 /** 2268 * intel_idle_cpu_init - Register the target CPU with the cpuidle core. 2269 * @cpu: CPU to initialize. 2270 * 2271 * Register a cpuidle device object for @cpu and update its MSRs in accordance 2272 * with the processor model flags. 2273 */ 2274 static int intel_idle_cpu_init(unsigned int cpu) 2275 { 2276 struct cpuidle_device *dev; 2277 2278 dev = per_cpu_ptr(intel_idle_cpuidle_devices, cpu); 2279 dev->cpu = cpu; 2280 2281 if (cpuidle_register_device(dev)) { 2282 pr_debug("cpuidle_register_device %d failed!\n", cpu); 2283 return -EIO; 2284 } 2285 2286 if (auto_demotion_disable_flags) 2287 auto_demotion_disable(); 2288 2289 if (c1e_promotion == C1E_PROMOTION_ENABLE) 2290 c1e_promotion_enable(); 2291 else if (c1e_promotion == C1E_PROMOTION_DISABLE) 2292 c1e_promotion_disable(); 2293 2294 return 0; 2295 } 2296 2297 static int intel_idle_cpu_online(unsigned int cpu) 2298 { 2299 struct cpuidle_device *dev; 2300 2301 if (!boot_cpu_has(X86_FEATURE_ARAT)) 2302 tick_broadcast_enable(); 2303 2304 /* 2305 * Some systems can hotplug a cpu at runtime after 2306 * the kernel has booted, we have to initialize the 2307 * driver in this case 2308 */ 2309 dev = per_cpu_ptr(intel_idle_cpuidle_devices, cpu); 2310 if (!dev->registered) 2311 return intel_idle_cpu_init(cpu); 2312 2313 return 0; 2314 } 2315 2316 /** 2317 * intel_idle_cpuidle_devices_uninit - Unregister all cpuidle devices. 2318 */ 2319 static void __init intel_idle_cpuidle_devices_uninit(void) 2320 { 2321 int i; 2322 2323 for_each_online_cpu(i) 2324 cpuidle_unregister_device(per_cpu_ptr(intel_idle_cpuidle_devices, i)); 2325 } 2326 2327 static int __init intel_idle_init(void) 2328 { 2329 const struct x86_cpu_id *id; 2330 unsigned int eax, ebx, ecx; 2331 int retval; 2332 2333 /* Do not load intel_idle at all for now if idle= is passed */ 2334 if (boot_option_idle_override != IDLE_NO_OVERRIDE) 2335 return -ENODEV; 2336 2337 if (max_cstate == 0) { 2338 pr_debug("disabled\n"); 2339 return -EPERM; 2340 } 2341 2342 id = x86_match_cpu(intel_idle_ids); 2343 if (id) { 2344 if (!boot_cpu_has(X86_FEATURE_MWAIT)) { 2345 pr_debug("Please enable MWAIT in BIOS SETUP\n"); 2346 return -ENODEV; 2347 } 2348 } else { 2349 id = x86_match_cpu(intel_mwait_ids); 2350 if (!id) 2351 return -ENODEV; 2352 } 2353 2354 cpuid(CPUID_LEAF_MWAIT, &eax, &ebx, &ecx, &mwait_substates); 2355 2356 if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || 2357 !(ecx & CPUID5_ECX_INTERRUPT_BREAK) || 2358 !mwait_substates) 2359 return -ENODEV; 2360 2361 pr_debug("MWAIT substates: 0x%x\n", mwait_substates); 2362 2363 icpu = (const struct idle_cpu *)id->driver_data; 2364 if (icpu && ignore_native()) { 2365 pr_debug("ignoring native CPU idle states\n"); 2366 icpu = NULL; 2367 } 2368 if (icpu) { 2369 if (icpu->state_table) 2370 cpuidle_state_table = icpu->state_table; 2371 else if (!intel_idle_acpi_cst_extract()) 2372 return -ENODEV; 2373 2374 auto_demotion_disable_flags = icpu->auto_demotion_disable_flags; 2375 if (icpu->disable_promotion_to_c1e) 2376 c1e_promotion = C1E_PROMOTION_DISABLE; 2377 if (icpu->use_acpi || force_use_acpi) 2378 intel_idle_acpi_cst_extract(); 2379 } else if (!intel_idle_acpi_cst_extract()) { 2380 return -ENODEV; 2381 } 2382 2383 pr_debug("v" INTEL_IDLE_VERSION " model 0x%X\n", 2384 boot_cpu_data.x86_model); 2385 2386 intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device); 2387 if (!intel_idle_cpuidle_devices) 2388 return -ENOMEM; 2389 2390 intel_idle_cpuidle_driver_init(&intel_idle_driver); 2391 2392 retval = cpuidle_register_driver(&intel_idle_driver); 2393 if (retval) { 2394 struct cpuidle_driver *drv = cpuidle_get_driver(); 2395 printk(KERN_DEBUG pr_fmt("intel_idle yielding to %s\n"), 2396 drv ? drv->name : "none"); 2397 goto init_driver_fail; 2398 } 2399 2400 retval = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "idle/intel:online", 2401 intel_idle_cpu_online, NULL); 2402 if (retval < 0) 2403 goto hp_setup_fail; 2404 2405 pr_debug("Local APIC timer is reliable in %s\n", 2406 boot_cpu_has(X86_FEATURE_ARAT) ? "all C-states" : "C1"); 2407 2408 return 0; 2409 2410 hp_setup_fail: 2411 intel_idle_cpuidle_devices_uninit(); 2412 cpuidle_unregister_driver(&intel_idle_driver); 2413 init_driver_fail: 2414 free_percpu(intel_idle_cpuidle_devices); 2415 return retval; 2416 2417 } 2418 device_initcall(intel_idle_init); 2419 2420 /* 2421 * We are not really modular, but we used to support that. Meaning we also 2422 * support "intel_idle.max_cstate=..." at boot and also a read-only export of 2423 * it at /sys/module/intel_idle/parameters/max_cstate -- so using module_param 2424 * is the easiest way (currently) to continue doing that. 2425 */ 2426 module_param(max_cstate, int, 0444); 2427 /* 2428 * The positions of the bits that are set in this number are the indices of the 2429 * idle states to be disabled by default (as reflected by the names of the 2430 * corresponding idle state directories in sysfs, "state0", "state1" ... 2431 * "state<i>" ..., where <i> is the index of the given state). 2432 */ 2433 module_param_named(states_off, disabled_states_mask, uint, 0444); 2434 MODULE_PARM_DESC(states_off, "Mask of disabled idle states"); 2435 /* 2436 * Some platforms come with mutually exclusive C-states, so that if one is 2437 * enabled, the other C-states must not be used. Example: C1 and C1E on 2438 * Sapphire Rapids platform. This parameter allows for selecting the 2439 * preferred C-states among the groups of mutually exclusive C-states - the 2440 * selected C-states will be registered, the other C-states from the mutually 2441 * exclusive group won't be registered. If the platform has no mutually 2442 * exclusive C-states, this parameter has no effect. 2443 */ 2444 module_param_named(preferred_cstates, preferred_states_mask, uint, 0444); 2445 MODULE_PARM_DESC(preferred_cstates, "Mask of preferred idle states"); 2446 /* 2447 * Debugging option that forces the driver to enter all C-states with 2448 * interrupts enabled. Does not apply to C-states with 2449 * 'CPUIDLE_FLAG_INIT_XSTATE' and 'CPUIDLE_FLAG_IBRS' flags. 2450 */ 2451 module_param(force_irq_on, bool, 0444); 2452 /* 2453 * Force the disabling of IBRS when X86_FEATURE_KERNEL_IBRS is on and 2454 * CPUIDLE_FLAG_IRQ_ENABLE isn't set. 2455 */ 2456 module_param(ibrs_off, bool, 0444); 2457 MODULE_PARM_DESC(ibrs_off, "Disable IBRS when idle"); 2458