1 /* 2 * intel_idle.c - native hardware idle loop for modern Intel processors 3 * 4 * Copyright (c) 2010, Intel Corporation. 5 * Len Brown <len.brown@intel.com> 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program; if not, write to the Free Software Foundation, Inc., 18 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 19 */ 20 21 /* 22 * intel_idle is a cpuidle driver that loads on specific Intel processors 23 * in lieu of the legacy ACPI processor_idle driver. The intent is to 24 * make Linux more efficient on these processors, as intel_idle knows 25 * more than ACPI, as well as make Linux more immune to ACPI BIOS bugs. 26 */ 27 28 /* 29 * Design Assumptions 30 * 31 * All CPUs have same idle states as boot CPU 32 * 33 * Chipset BM_STS (bus master status) bit is a NOP 34 * for preventing entry into deep C-stats 35 */ 36 37 /* 38 * Known limitations 39 * 40 * The driver currently initializes for_each_online_cpu() upon modprobe. 41 * It it unaware of subsequent processors hot-added to the system. 42 * This means that if you boot with maxcpus=n and later online 43 * processors above n, those processors will use C1 only. 44 * 45 * ACPI has a .suspend hack to turn off deep c-statees during suspend 46 * to avoid complications with the lapic timer workaround. 47 * Have not seen issues with suspend, but may need same workaround here. 48 * 49 * There is currently no kernel-based automatic probing/loading mechanism 50 * if the driver is built as a module. 51 */ 52 53 /* un-comment DEBUG to enable pr_debug() statements */ 54 #define DEBUG 55 56 #include <linux/kernel.h> 57 #include <linux/cpuidle.h> 58 #include <linux/clockchips.h> 59 #include <linux/hrtimer.h> /* ktime_get_real() */ 60 #include <trace/events/power.h> 61 #include <linux/sched.h> 62 #include <linux/notifier.h> 63 #include <linux/cpu.h> 64 #include <linux/module.h> 65 #include <asm/cpu_device_id.h> 66 #include <asm/mwait.h> 67 #include <asm/msr.h> 68 69 #define INTEL_IDLE_VERSION "0.4" 70 #define PREFIX "intel_idle: " 71 72 static struct cpuidle_driver intel_idle_driver = { 73 .name = "intel_idle", 74 .owner = THIS_MODULE, 75 }; 76 /* intel_idle.max_cstate=0 disables driver */ 77 static int max_cstate = MWAIT_MAX_NUM_CSTATES - 1; 78 79 static unsigned int mwait_substates; 80 81 #define LAPIC_TIMER_ALWAYS_RELIABLE 0xFFFFFFFF 82 /* Reliable LAPIC Timer States, bit 1 for C1 etc. */ 83 static unsigned int lapic_timer_reliable_states = (1 << 1); /* Default to only C1 */ 84 85 struct idle_cpu { 86 struct cpuidle_state *state_table; 87 88 /* 89 * Hardware C-state auto-demotion may not always be optimal. 90 * Indicate which enable bits to clear here. 91 */ 92 unsigned long auto_demotion_disable_flags; 93 }; 94 95 static const struct idle_cpu *icpu; 96 static struct cpuidle_device __percpu *intel_idle_cpuidle_devices; 97 static int intel_idle(struct cpuidle_device *dev, 98 struct cpuidle_driver *drv, int index); 99 static int intel_idle_cpu_init(int cpu); 100 101 static struct cpuidle_state *cpuidle_state_table; 102 103 /* 104 * Set this flag for states where the HW flushes the TLB for us 105 * and so we don't need cross-calls to keep it consistent. 106 * If this flag is set, SW flushes the TLB, so even if the 107 * HW doesn't do the flushing, this flag is safe to use. 108 */ 109 #define CPUIDLE_FLAG_TLB_FLUSHED 0x10000 110 111 /* 112 * States are indexed by the cstate number, 113 * which is also the index into the MWAIT hint array. 114 * Thus C0 is a dummy. 115 */ 116 static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = { 117 { /* MWAIT C0 */ }, 118 { /* MWAIT C1 */ 119 .name = "C1-NHM", 120 .desc = "MWAIT 0x00", 121 .flags = CPUIDLE_FLAG_TIME_VALID, 122 .exit_latency = 3, 123 .target_residency = 6, 124 .enter = &intel_idle }, 125 { /* MWAIT C2 */ 126 .name = "C3-NHM", 127 .desc = "MWAIT 0x10", 128 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 129 .exit_latency = 20, 130 .target_residency = 80, 131 .enter = &intel_idle }, 132 { /* MWAIT C3 */ 133 .name = "C6-NHM", 134 .desc = "MWAIT 0x20", 135 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 136 .exit_latency = 200, 137 .target_residency = 800, 138 .enter = &intel_idle }, 139 }; 140 141 static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = { 142 { /* MWAIT C0 */ }, 143 { /* MWAIT C1 */ 144 .name = "C1-SNB", 145 .desc = "MWAIT 0x00", 146 .flags = CPUIDLE_FLAG_TIME_VALID, 147 .exit_latency = 1, 148 .target_residency = 1, 149 .enter = &intel_idle }, 150 { /* MWAIT C2 */ 151 .name = "C3-SNB", 152 .desc = "MWAIT 0x10", 153 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 154 .exit_latency = 80, 155 .target_residency = 211, 156 .enter = &intel_idle }, 157 { /* MWAIT C3 */ 158 .name = "C6-SNB", 159 .desc = "MWAIT 0x20", 160 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 161 .exit_latency = 104, 162 .target_residency = 345, 163 .enter = &intel_idle }, 164 { /* MWAIT C4 */ 165 .name = "C7-SNB", 166 .desc = "MWAIT 0x30", 167 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 168 .exit_latency = 109, 169 .target_residency = 345, 170 .enter = &intel_idle }, 171 }; 172 173 static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = { 174 { /* MWAIT C0 */ }, 175 { /* MWAIT C1 */ 176 .name = "C1-ATM", 177 .desc = "MWAIT 0x00", 178 .flags = CPUIDLE_FLAG_TIME_VALID, 179 .exit_latency = 1, 180 .target_residency = 4, 181 .enter = &intel_idle }, 182 { /* MWAIT C2 */ 183 .name = "C2-ATM", 184 .desc = "MWAIT 0x10", 185 .flags = CPUIDLE_FLAG_TIME_VALID, 186 .exit_latency = 20, 187 .target_residency = 80, 188 .enter = &intel_idle }, 189 { /* MWAIT C3 */ }, 190 { /* MWAIT C4 */ 191 .name = "C4-ATM", 192 .desc = "MWAIT 0x30", 193 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 194 .exit_latency = 100, 195 .target_residency = 400, 196 .enter = &intel_idle }, 197 { /* MWAIT C5 */ }, 198 { /* MWAIT C6 */ 199 .name = "C6-ATM", 200 .desc = "MWAIT 0x52", 201 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 202 .exit_latency = 140, 203 .target_residency = 560, 204 .enter = &intel_idle }, 205 }; 206 207 static long get_driver_data(int cstate) 208 { 209 int driver_data; 210 switch (cstate) { 211 212 case 1: /* MWAIT C1 */ 213 driver_data = 0x00; 214 break; 215 case 2: /* MWAIT C2 */ 216 driver_data = 0x10; 217 break; 218 case 3: /* MWAIT C3 */ 219 driver_data = 0x20; 220 break; 221 case 4: /* MWAIT C4 */ 222 driver_data = 0x30; 223 break; 224 case 5: /* MWAIT C5 */ 225 driver_data = 0x40; 226 break; 227 case 6: /* MWAIT C6 */ 228 driver_data = 0x52; 229 break; 230 default: 231 driver_data = 0x00; 232 } 233 return driver_data; 234 } 235 236 /** 237 * intel_idle 238 * @dev: cpuidle_device 239 * @drv: cpuidle driver 240 * @index: index of cpuidle state 241 * 242 * Must be called under local_irq_disable(). 243 */ 244 static int intel_idle(struct cpuidle_device *dev, 245 struct cpuidle_driver *drv, int index) 246 { 247 unsigned long ecx = 1; /* break on interrupt flag */ 248 struct cpuidle_state *state = &drv->states[index]; 249 struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; 250 unsigned long eax = (unsigned long)cpuidle_get_statedata(state_usage); 251 unsigned int cstate; 252 ktime_t kt_before, kt_after; 253 s64 usec_delta; 254 int cpu = smp_processor_id(); 255 256 cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1; 257 258 /* 259 * leave_mm() to avoid costly and often unnecessary wakeups 260 * for flushing the user TLB's associated with the active mm. 261 */ 262 if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED) 263 leave_mm(cpu); 264 265 if (!(lapic_timer_reliable_states & (1 << (cstate)))) 266 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); 267 268 kt_before = ktime_get_real(); 269 270 stop_critical_timings(); 271 if (!need_resched()) { 272 273 __monitor((void *)¤t_thread_info()->flags, 0, 0); 274 smp_mb(); 275 if (!need_resched()) 276 __mwait(eax, ecx); 277 } 278 279 start_critical_timings(); 280 281 kt_after = ktime_get_real(); 282 usec_delta = ktime_to_us(ktime_sub(kt_after, kt_before)); 283 284 local_irq_enable(); 285 286 if (!(lapic_timer_reliable_states & (1 << (cstate)))) 287 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); 288 289 /* Update cpuidle counters */ 290 dev->last_residency = (int)usec_delta; 291 292 return index; 293 } 294 295 static void __setup_broadcast_timer(void *arg) 296 { 297 unsigned long reason = (unsigned long)arg; 298 int cpu = smp_processor_id(); 299 300 reason = reason ? 301 CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF; 302 303 clockevents_notify(reason, &cpu); 304 } 305 306 static int cpu_hotplug_notify(struct notifier_block *n, 307 unsigned long action, void *hcpu) 308 { 309 int hotcpu = (unsigned long)hcpu; 310 struct cpuidle_device *dev; 311 312 switch (action & 0xf) { 313 case CPU_ONLINE: 314 315 if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) 316 smp_call_function_single(hotcpu, __setup_broadcast_timer, 317 (void *)true, 1); 318 319 /* 320 * Some systems can hotplug a cpu at runtime after 321 * the kernel has booted, we have to initialize the 322 * driver in this case 323 */ 324 dev = per_cpu_ptr(intel_idle_cpuidle_devices, hotcpu); 325 if (!dev->registered) 326 intel_idle_cpu_init(hotcpu); 327 328 break; 329 } 330 return NOTIFY_OK; 331 } 332 333 static struct notifier_block cpu_hotplug_notifier = { 334 .notifier_call = cpu_hotplug_notify, 335 }; 336 337 static void auto_demotion_disable(void *dummy) 338 { 339 unsigned long long msr_bits; 340 341 rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits); 342 msr_bits &= ~(icpu->auto_demotion_disable_flags); 343 wrmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits); 344 } 345 346 static const struct idle_cpu idle_cpu_nehalem = { 347 .state_table = nehalem_cstates, 348 .auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE, 349 }; 350 351 static const struct idle_cpu idle_cpu_atom = { 352 .state_table = atom_cstates, 353 }; 354 355 static const struct idle_cpu idle_cpu_lincroft = { 356 .state_table = atom_cstates, 357 .auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE, 358 }; 359 360 static const struct idle_cpu idle_cpu_snb = { 361 .state_table = snb_cstates, 362 }; 363 364 #define ICPU(model, cpu) \ 365 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu } 366 367 static const struct x86_cpu_id intel_idle_ids[] = { 368 ICPU(0x1a, idle_cpu_nehalem), 369 ICPU(0x1e, idle_cpu_nehalem), 370 ICPU(0x1f, idle_cpu_nehalem), 371 ICPU(0x25, idle_cpu_nehalem), 372 ICPU(0x2c, idle_cpu_nehalem), 373 ICPU(0x2e, idle_cpu_nehalem), 374 ICPU(0x1c, idle_cpu_atom), 375 ICPU(0x26, idle_cpu_lincroft), 376 ICPU(0x2f, idle_cpu_nehalem), 377 ICPU(0x2a, idle_cpu_snb), 378 ICPU(0x2d, idle_cpu_snb), 379 {} 380 }; 381 MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids); 382 383 /* 384 * intel_idle_probe() 385 */ 386 static int intel_idle_probe(void) 387 { 388 unsigned int eax, ebx, ecx; 389 const struct x86_cpu_id *id; 390 391 if (max_cstate == 0) { 392 pr_debug(PREFIX "disabled\n"); 393 return -EPERM; 394 } 395 396 id = x86_match_cpu(intel_idle_ids); 397 if (!id) { 398 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && 399 boot_cpu_data.x86 == 6) 400 pr_debug(PREFIX "does not run on family %d model %d\n", 401 boot_cpu_data.x86, boot_cpu_data.x86_model); 402 return -ENODEV; 403 } 404 405 if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) 406 return -ENODEV; 407 408 cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates); 409 410 if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || 411 !(ecx & CPUID5_ECX_INTERRUPT_BREAK) || 412 !mwait_substates) 413 return -ENODEV; 414 415 pr_debug(PREFIX "MWAIT substates: 0x%x\n", mwait_substates); 416 417 icpu = (const struct idle_cpu *)id->driver_data; 418 cpuidle_state_table = icpu->state_table; 419 420 if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */ 421 lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE; 422 else 423 on_each_cpu(__setup_broadcast_timer, (void *)true, 1); 424 425 register_cpu_notifier(&cpu_hotplug_notifier); 426 427 pr_debug(PREFIX "v" INTEL_IDLE_VERSION 428 " model 0x%X\n", boot_cpu_data.x86_model); 429 430 pr_debug(PREFIX "lapic_timer_reliable_states 0x%x\n", 431 lapic_timer_reliable_states); 432 return 0; 433 } 434 435 /* 436 * intel_idle_cpuidle_devices_uninit() 437 * unregister, free cpuidle_devices 438 */ 439 static void intel_idle_cpuidle_devices_uninit(void) 440 { 441 int i; 442 struct cpuidle_device *dev; 443 444 for_each_online_cpu(i) { 445 dev = per_cpu_ptr(intel_idle_cpuidle_devices, i); 446 cpuidle_unregister_device(dev); 447 } 448 449 free_percpu(intel_idle_cpuidle_devices); 450 return; 451 } 452 /* 453 * intel_idle_cpuidle_driver_init() 454 * allocate, initialize cpuidle_states 455 */ 456 static int intel_idle_cpuidle_driver_init(void) 457 { 458 int cstate; 459 struct cpuidle_driver *drv = &intel_idle_driver; 460 461 drv->state_count = 1; 462 463 for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) { 464 int num_substates; 465 466 if (cstate > max_cstate) { 467 printk(PREFIX "max_cstate %d reached\n", 468 max_cstate); 469 break; 470 } 471 472 /* does the state exist in CPUID.MWAIT? */ 473 num_substates = (mwait_substates >> ((cstate) * 4)) 474 & MWAIT_SUBSTATE_MASK; 475 if (num_substates == 0) 476 continue; 477 /* is the state not enabled? */ 478 if (cpuidle_state_table[cstate].enter == NULL) { 479 /* does the driver not know about the state? */ 480 if (*cpuidle_state_table[cstate].name == '\0') 481 pr_debug(PREFIX "unaware of model 0x%x" 482 " MWAIT %d please" 483 " contact lenb@kernel.org", 484 boot_cpu_data.x86_model, cstate); 485 continue; 486 } 487 488 if ((cstate > 2) && 489 !boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) 490 mark_tsc_unstable("TSC halts in idle" 491 " states deeper than C2"); 492 493 drv->states[drv->state_count] = /* structure copy */ 494 cpuidle_state_table[cstate]; 495 496 drv->state_count += 1; 497 } 498 499 if (icpu->auto_demotion_disable_flags) 500 on_each_cpu(auto_demotion_disable, NULL, 1); 501 502 return 0; 503 } 504 505 506 /* 507 * intel_idle_cpu_init() 508 * allocate, initialize, register cpuidle_devices 509 * @cpu: cpu/core to initialize 510 */ 511 static int intel_idle_cpu_init(int cpu) 512 { 513 int cstate; 514 struct cpuidle_device *dev; 515 516 dev = per_cpu_ptr(intel_idle_cpuidle_devices, cpu); 517 518 dev->state_count = 1; 519 520 for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) { 521 int num_substates; 522 523 if (cstate > max_cstate) { 524 printk(PREFIX "max_cstate %d reached\n", max_cstate); 525 break; 526 } 527 528 /* does the state exist in CPUID.MWAIT? */ 529 num_substates = (mwait_substates >> ((cstate) * 4)) 530 & MWAIT_SUBSTATE_MASK; 531 if (num_substates == 0) 532 continue; 533 /* is the state not enabled? */ 534 if (cpuidle_state_table[cstate].enter == NULL) 535 continue; 536 537 dev->states_usage[dev->state_count].driver_data = 538 (void *)get_driver_data(cstate); 539 540 dev->state_count += 1; 541 } 542 543 dev->cpu = cpu; 544 545 if (cpuidle_register_device(dev)) { 546 pr_debug(PREFIX "cpuidle_register_device %d failed!\n", cpu); 547 intel_idle_cpuidle_devices_uninit(); 548 return -EIO; 549 } 550 551 if (icpu->auto_demotion_disable_flags) 552 smp_call_function_single(cpu, auto_demotion_disable, NULL, 1); 553 554 return 0; 555 } 556 557 static int __init intel_idle_init(void) 558 { 559 int retval, i; 560 561 /* Do not load intel_idle at all for now if idle= is passed */ 562 if (boot_option_idle_override != IDLE_NO_OVERRIDE) 563 return -ENODEV; 564 565 retval = intel_idle_probe(); 566 if (retval) 567 return retval; 568 569 intel_idle_cpuidle_driver_init(); 570 retval = cpuidle_register_driver(&intel_idle_driver); 571 if (retval) { 572 printk(KERN_DEBUG PREFIX "intel_idle yielding to %s", 573 cpuidle_get_driver()->name); 574 return retval; 575 } 576 577 intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device); 578 if (intel_idle_cpuidle_devices == NULL) 579 return -ENOMEM; 580 581 for_each_online_cpu(i) { 582 retval = intel_idle_cpu_init(i); 583 if (retval) { 584 cpuidle_unregister_driver(&intel_idle_driver); 585 return retval; 586 } 587 } 588 589 return 0; 590 } 591 592 static void __exit intel_idle_exit(void) 593 { 594 intel_idle_cpuidle_devices_uninit(); 595 cpuidle_unregister_driver(&intel_idle_driver); 596 597 598 if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) 599 on_each_cpu(__setup_broadcast_timer, (void *)false, 1); 600 unregister_cpu_notifier(&cpu_hotplug_notifier); 601 602 return; 603 } 604 605 module_init(intel_idle_init); 606 module_exit(intel_idle_exit); 607 608 module_param(max_cstate, int, 0444); 609 610 MODULE_AUTHOR("Len Brown <len.brown@intel.com>"); 611 MODULE_DESCRIPTION("Cpuidle driver for Intel Hardware v" INTEL_IDLE_VERSION); 612 MODULE_LICENSE("GPL"); 613