1 /* 2 * intel_idle.c - native hardware idle loop for modern Intel processors 3 * 4 * Copyright (c) 2010, Intel Corporation. 5 * Len Brown <len.brown@intel.com> 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program; if not, write to the Free Software Foundation, Inc., 18 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 19 */ 20 21 /* 22 * intel_idle is a cpuidle driver that loads on specific Intel processors 23 * in lieu of the legacy ACPI processor_idle driver. The intent is to 24 * make Linux more efficient on these processors, as intel_idle knows 25 * more than ACPI, as well as make Linux more immune to ACPI BIOS bugs. 26 */ 27 28 /* 29 * Design Assumptions 30 * 31 * All CPUs have same idle states as boot CPU 32 * 33 * Chipset BM_STS (bus master status) bit is a NOP 34 * for preventing entry into deep C-stats 35 */ 36 37 /* 38 * Known limitations 39 * 40 * The driver currently initializes for_each_online_cpu() upon modprobe. 41 * It it unaware of subsequent processors hot-added to the system. 42 * This means that if you boot with maxcpus=n and later online 43 * processors above n, those processors will use C1 only. 44 * 45 * ACPI has a .suspend hack to turn off deep c-statees during suspend 46 * to avoid complications with the lapic timer workaround. 47 * Have not seen issues with suspend, but may need same workaround here. 48 * 49 * There is currently no kernel-based automatic probing/loading mechanism 50 * if the driver is built as a module. 51 */ 52 53 /* un-comment DEBUG to enable pr_debug() statements */ 54 #define DEBUG 55 56 #include <linux/kernel.h> 57 #include <linux/cpuidle.h> 58 #include <linux/clockchips.h> 59 #include <linux/hrtimer.h> /* ktime_get_real() */ 60 #include <trace/events/power.h> 61 #include <linux/sched.h> 62 #include <asm/mwait.h> 63 64 #define INTEL_IDLE_VERSION "0.4" 65 #define PREFIX "intel_idle: " 66 67 static struct cpuidle_driver intel_idle_driver = { 68 .name = "intel_idle", 69 .owner = THIS_MODULE, 70 }; 71 /* intel_idle.max_cstate=0 disables driver */ 72 static int max_cstate = MWAIT_MAX_NUM_CSTATES - 1; 73 74 static unsigned int mwait_substates; 75 76 /* Reliable LAPIC Timer States, bit 1 for C1 etc. */ 77 static unsigned int lapic_timer_reliable_states = (1 << 1); /* Default to only C1 */ 78 79 static struct cpuidle_device __percpu *intel_idle_cpuidle_devices; 80 static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state); 81 82 static struct cpuidle_state *cpuidle_state_table; 83 84 /* 85 * States are indexed by the cstate number, 86 * which is also the index into the MWAIT hint array. 87 * Thus C0 is a dummy. 88 */ 89 static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = { 90 { /* MWAIT C0 */ }, 91 { /* MWAIT C1 */ 92 .name = "NHM-C1", 93 .desc = "MWAIT 0x00", 94 .driver_data = (void *) 0x00, 95 .flags = CPUIDLE_FLAG_TIME_VALID, 96 .exit_latency = 3, 97 .target_residency = 6, 98 .enter = &intel_idle }, 99 { /* MWAIT C2 */ 100 .name = "NHM-C3", 101 .desc = "MWAIT 0x10", 102 .driver_data = (void *) 0x10, 103 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 104 .exit_latency = 20, 105 .target_residency = 80, 106 .enter = &intel_idle }, 107 { /* MWAIT C3 */ 108 .name = "NHM-C6", 109 .desc = "MWAIT 0x20", 110 .driver_data = (void *) 0x20, 111 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 112 .exit_latency = 200, 113 .target_residency = 800, 114 .enter = &intel_idle }, 115 }; 116 117 static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = { 118 { /* MWAIT C0 */ }, 119 { /* MWAIT C1 */ 120 .name = "SNB-C1", 121 .desc = "MWAIT 0x00", 122 .driver_data = (void *) 0x00, 123 .flags = CPUIDLE_FLAG_TIME_VALID, 124 .exit_latency = 1, 125 .target_residency = 4, 126 .enter = &intel_idle }, 127 { /* MWAIT C2 */ 128 .name = "SNB-C3", 129 .desc = "MWAIT 0x10", 130 .driver_data = (void *) 0x10, 131 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 132 .exit_latency = 80, 133 .target_residency = 160, 134 .enter = &intel_idle }, 135 { /* MWAIT C3 */ 136 .name = "SNB-C6", 137 .desc = "MWAIT 0x20", 138 .driver_data = (void *) 0x20, 139 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 140 .exit_latency = 104, 141 .target_residency = 208, 142 .enter = &intel_idle }, 143 { /* MWAIT C4 */ 144 .name = "SNB-C7", 145 .desc = "MWAIT 0x30", 146 .driver_data = (void *) 0x30, 147 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 148 .exit_latency = 109, 149 .target_residency = 300, 150 .enter = &intel_idle }, 151 }; 152 153 static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = { 154 { /* MWAIT C0 */ }, 155 { /* MWAIT C1 */ 156 .name = "ATM-C1", 157 .desc = "MWAIT 0x00", 158 .driver_data = (void *) 0x00, 159 .flags = CPUIDLE_FLAG_TIME_VALID, 160 .exit_latency = 1, 161 .target_residency = 4, 162 .enter = &intel_idle }, 163 { /* MWAIT C2 */ 164 .name = "ATM-C2", 165 .desc = "MWAIT 0x10", 166 .driver_data = (void *) 0x10, 167 .flags = CPUIDLE_FLAG_TIME_VALID, 168 .exit_latency = 20, 169 .target_residency = 80, 170 .enter = &intel_idle }, 171 { /* MWAIT C3 */ }, 172 { /* MWAIT C4 */ 173 .name = "ATM-C4", 174 .desc = "MWAIT 0x30", 175 .driver_data = (void *) 0x30, 176 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 177 .exit_latency = 100, 178 .target_residency = 400, 179 .enter = &intel_idle }, 180 { /* MWAIT C5 */ }, 181 { /* MWAIT C6 */ 182 .name = "ATM-C6", 183 .desc = "MWAIT 0x52", 184 .driver_data = (void *) 0x52, 185 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 186 .exit_latency = 140, 187 .target_residency = 560, 188 .enter = &intel_idle }, 189 }; 190 191 /** 192 * intel_idle 193 * @dev: cpuidle_device 194 * @state: cpuidle state 195 * 196 */ 197 static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state) 198 { 199 unsigned long ecx = 1; /* break on interrupt flag */ 200 unsigned long eax = (unsigned long)cpuidle_get_statedata(state); 201 unsigned int cstate; 202 ktime_t kt_before, kt_after; 203 s64 usec_delta; 204 int cpu = smp_processor_id(); 205 206 cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1; 207 208 local_irq_disable(); 209 210 /* 211 * leave_mm() to avoid costly and often unnecessary wakeups 212 * for flushing the user TLB's associated with the active mm. 213 */ 214 if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED) 215 leave_mm(cpu); 216 217 if (!(lapic_timer_reliable_states & (1 << (cstate)))) 218 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); 219 220 kt_before = ktime_get_real(); 221 222 stop_critical_timings(); 223 #ifndef MODULE 224 trace_power_start(POWER_CSTATE, (eax >> 4) + 1, cpu); 225 #endif 226 if (!need_resched()) { 227 228 __monitor((void *)¤t_thread_info()->flags, 0, 0); 229 smp_mb(); 230 if (!need_resched()) 231 __mwait(eax, ecx); 232 } 233 234 start_critical_timings(); 235 236 kt_after = ktime_get_real(); 237 usec_delta = ktime_to_us(ktime_sub(kt_after, kt_before)); 238 239 local_irq_enable(); 240 241 if (!(lapic_timer_reliable_states & (1 << (cstate)))) 242 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); 243 244 return usec_delta; 245 } 246 247 /* 248 * intel_idle_probe() 249 */ 250 static int intel_idle_probe(void) 251 { 252 unsigned int eax, ebx, ecx; 253 254 if (max_cstate == 0) { 255 pr_debug(PREFIX "disabled\n"); 256 return -EPERM; 257 } 258 259 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) 260 return -ENODEV; 261 262 if (!boot_cpu_has(X86_FEATURE_MWAIT)) 263 return -ENODEV; 264 265 if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) 266 return -ENODEV; 267 268 cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates); 269 270 if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || 271 !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) 272 return -ENODEV; 273 274 pr_debug(PREFIX "MWAIT substates: 0x%x\n", mwait_substates); 275 276 if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */ 277 lapic_timer_reliable_states = 0xFFFFFFFF; 278 279 if (boot_cpu_data.x86 != 6) /* family 6 */ 280 return -ENODEV; 281 282 switch (boot_cpu_data.x86_model) { 283 284 case 0x1A: /* Core i7, Xeon 5500 series */ 285 case 0x1E: /* Core i7 and i5 Processor - Lynnfield Jasper Forest */ 286 case 0x1F: /* Core i7 and i5 Processor - Nehalem */ 287 case 0x2E: /* Nehalem-EX Xeon */ 288 case 0x2F: /* Westmere-EX Xeon */ 289 lapic_timer_reliable_states = (1 << 1); /* C1 */ 290 291 case 0x25: /* Westmere */ 292 case 0x2C: /* Westmere */ 293 cpuidle_state_table = nehalem_cstates; 294 break; 295 296 case 0x1C: /* 28 - Atom Processor */ 297 case 0x26: /* 38 - Lincroft Atom Processor */ 298 lapic_timer_reliable_states = (1 << 1); /* C1 */ 299 cpuidle_state_table = atom_cstates; 300 break; 301 302 case 0x2A: /* SNB */ 303 case 0x2D: /* SNB Xeon */ 304 cpuidle_state_table = snb_cstates; 305 break; 306 #ifdef FUTURE_USE 307 case 0x17: /* 23 - Core 2 Duo */ 308 lapic_timer_reliable_states = (1 << 2) | (1 << 1); /* C2, C1 */ 309 #endif 310 311 default: 312 pr_debug(PREFIX "does not run on family %d model %d\n", 313 boot_cpu_data.x86, boot_cpu_data.x86_model); 314 return -ENODEV; 315 } 316 317 pr_debug(PREFIX "v" INTEL_IDLE_VERSION 318 " model 0x%X\n", boot_cpu_data.x86_model); 319 320 pr_debug(PREFIX "lapic_timer_reliable_states 0x%x\n", 321 lapic_timer_reliable_states); 322 return 0; 323 } 324 325 /* 326 * intel_idle_cpuidle_devices_uninit() 327 * unregister, free cpuidle_devices 328 */ 329 static void intel_idle_cpuidle_devices_uninit(void) 330 { 331 int i; 332 struct cpuidle_device *dev; 333 334 for_each_online_cpu(i) { 335 dev = per_cpu_ptr(intel_idle_cpuidle_devices, i); 336 cpuidle_unregister_device(dev); 337 } 338 339 free_percpu(intel_idle_cpuidle_devices); 340 return; 341 } 342 /* 343 * intel_idle_cpuidle_devices_init() 344 * allocate, initialize, register cpuidle_devices 345 */ 346 static int intel_idle_cpuidle_devices_init(void) 347 { 348 int i, cstate; 349 struct cpuidle_device *dev; 350 351 intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device); 352 if (intel_idle_cpuidle_devices == NULL) 353 return -ENOMEM; 354 355 for_each_online_cpu(i) { 356 dev = per_cpu_ptr(intel_idle_cpuidle_devices, i); 357 358 dev->state_count = 1; 359 360 for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) { 361 int num_substates; 362 363 if (cstate > max_cstate) { 364 printk(PREFIX "max_cstate %d reached\n", 365 max_cstate); 366 break; 367 } 368 369 /* does the state exist in CPUID.MWAIT? */ 370 num_substates = (mwait_substates >> ((cstate) * 4)) 371 & MWAIT_SUBSTATE_MASK; 372 if (num_substates == 0) 373 continue; 374 /* is the state not enabled? */ 375 if (cpuidle_state_table[cstate].enter == NULL) { 376 /* does the driver not know about the state? */ 377 if (*cpuidle_state_table[cstate].name == '\0') 378 pr_debug(PREFIX "unaware of model 0x%x" 379 " MWAIT %d please" 380 " contact lenb@kernel.org", 381 boot_cpu_data.x86_model, cstate); 382 continue; 383 } 384 385 if ((cstate > 2) && 386 !boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) 387 mark_tsc_unstable("TSC halts in idle" 388 " states deeper than C2"); 389 390 dev->states[dev->state_count] = /* structure copy */ 391 cpuidle_state_table[cstate]; 392 393 dev->state_count += 1; 394 } 395 396 dev->cpu = i; 397 if (cpuidle_register_device(dev)) { 398 pr_debug(PREFIX "cpuidle_register_device %d failed!\n", 399 i); 400 intel_idle_cpuidle_devices_uninit(); 401 return -EIO; 402 } 403 } 404 405 return 0; 406 } 407 408 409 static int __init intel_idle_init(void) 410 { 411 int retval; 412 413 retval = intel_idle_probe(); 414 if (retval) 415 return retval; 416 417 retval = cpuidle_register_driver(&intel_idle_driver); 418 if (retval) { 419 printk(KERN_DEBUG PREFIX "intel_idle yielding to %s", 420 cpuidle_get_driver()->name); 421 return retval; 422 } 423 424 retval = intel_idle_cpuidle_devices_init(); 425 if (retval) { 426 cpuidle_unregister_driver(&intel_idle_driver); 427 return retval; 428 } 429 430 return 0; 431 } 432 433 static void __exit intel_idle_exit(void) 434 { 435 intel_idle_cpuidle_devices_uninit(); 436 cpuidle_unregister_driver(&intel_idle_driver); 437 438 return; 439 } 440 441 module_init(intel_idle_init); 442 module_exit(intel_idle_exit); 443 444 module_param(max_cstate, int, 0444); 445 446 MODULE_AUTHOR("Len Brown <len.brown@intel.com>"); 447 MODULE_DESCRIPTION("Cpuidle driver for Intel Hardware v" INTEL_IDLE_VERSION); 448 MODULE_LICENSE("GPL"); 449