1 /* 2 * cpuidle.c - core cpuidle infrastructure 3 * 4 * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 5 * Shaohua Li <shaohua.li@intel.com> 6 * Adam Belay <abelay@novell.com> 7 * 8 * This code is licenced under the GPL. 9 */ 10 11 #include <linux/kernel.h> 12 #include <linux/mutex.h> 13 #include <linux/sched.h> 14 #include <linux/notifier.h> 15 #include <linux/pm_qos.h> 16 #include <linux/cpu.h> 17 #include <linux/cpuidle.h> 18 #include <linux/ktime.h> 19 #include <linux/hrtimer.h> 20 #include <linux/module.h> 21 #include <trace/events/power.h> 22 23 #include "cpuidle.h" 24 25 DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices); 26 27 DEFINE_MUTEX(cpuidle_lock); 28 LIST_HEAD(cpuidle_detected_devices); 29 30 static int enabled_devices; 31 static int off __read_mostly; 32 static int initialized __read_mostly; 33 34 int cpuidle_disabled(void) 35 { 36 return off; 37 } 38 void disable_cpuidle(void) 39 { 40 off = 1; 41 } 42 43 static int __cpuidle_register_device(struct cpuidle_device *dev); 44 45 static inline int cpuidle_enter(struct cpuidle_device *dev, 46 struct cpuidle_driver *drv, int index) 47 { 48 struct cpuidle_state *target_state = &drv->states[index]; 49 return target_state->enter(dev, drv, index); 50 } 51 52 static inline int cpuidle_enter_tk(struct cpuidle_device *dev, 53 struct cpuidle_driver *drv, int index) 54 { 55 return cpuidle_wrap_enter(dev, drv, index, cpuidle_enter); 56 } 57 58 typedef int (*cpuidle_enter_t)(struct cpuidle_device *dev, 59 struct cpuidle_driver *drv, int index); 60 61 static cpuidle_enter_t cpuidle_enter_ops; 62 63 /** 64 * cpuidle_play_dead - cpu off-lining 65 * 66 * Returns in case of an error or no driver 67 */ 68 int cpuidle_play_dead(void) 69 { 70 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); 71 struct cpuidle_driver *drv = cpuidle_get_driver(); 72 int i, dead_state = -1; 73 int power_usage = -1; 74 75 if (!drv) 76 return -ENODEV; 77 78 /* Find lowest-power state that supports long-term idle */ 79 for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) { 80 struct cpuidle_state *s = &drv->states[i]; 81 82 if (s->power_usage < power_usage && s->enter_dead) { 83 power_usage = s->power_usage; 84 dead_state = i; 85 } 86 } 87 88 if (dead_state != -1) 89 return drv->states[dead_state].enter_dead(dev, dead_state); 90 91 return -ENODEV; 92 } 93 94 /** 95 * cpuidle_enter_state - enter the state and update stats 96 * @dev: cpuidle device for this cpu 97 * @drv: cpuidle driver for this cpu 98 * @next_state: index into drv->states of the state to enter 99 */ 100 int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, 101 int next_state) 102 { 103 int entered_state; 104 105 entered_state = cpuidle_enter_ops(dev, drv, next_state); 106 107 if (entered_state >= 0) { 108 /* Update cpuidle counters */ 109 /* This can be moved to within driver enter routine 110 * but that results in multiple copies of same code. 111 */ 112 dev->states_usage[entered_state].time += 113 (unsigned long long)dev->last_residency; 114 dev->states_usage[entered_state].usage++; 115 } else { 116 dev->last_residency = 0; 117 } 118 119 return entered_state; 120 } 121 122 /** 123 * cpuidle_idle_call - the main idle loop 124 * 125 * NOTE: no locks or semaphores should be used here 126 * return non-zero on failure 127 */ 128 int cpuidle_idle_call(void) 129 { 130 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); 131 struct cpuidle_driver *drv = cpuidle_get_driver(); 132 int next_state, entered_state; 133 134 if (off) 135 return -ENODEV; 136 137 if (!initialized) 138 return -ENODEV; 139 140 /* check if the device is ready */ 141 if (!dev || !dev->enabled) 142 return -EBUSY; 143 144 /* ask the governor for the next state */ 145 next_state = cpuidle_curr_governor->select(drv, dev); 146 if (need_resched()) { 147 local_irq_enable(); 148 return 0; 149 } 150 151 trace_power_start_rcuidle(POWER_CSTATE, next_state, dev->cpu); 152 trace_cpu_idle_rcuidle(next_state, dev->cpu); 153 154 if (cpuidle_state_is_coupled(dev, drv, next_state)) 155 entered_state = cpuidle_enter_state_coupled(dev, drv, 156 next_state); 157 else 158 entered_state = cpuidle_enter_state(dev, drv, next_state); 159 160 trace_power_end_rcuidle(dev->cpu); 161 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); 162 163 /* give the governor an opportunity to reflect on the outcome */ 164 if (cpuidle_curr_governor->reflect) 165 cpuidle_curr_governor->reflect(dev, entered_state); 166 167 return 0; 168 } 169 170 /** 171 * cpuidle_install_idle_handler - installs the cpuidle idle loop handler 172 */ 173 void cpuidle_install_idle_handler(void) 174 { 175 if (enabled_devices) { 176 /* Make sure all changes finished before we switch to new idle */ 177 smp_wmb(); 178 initialized = 1; 179 } 180 } 181 182 /** 183 * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler 184 */ 185 void cpuidle_uninstall_idle_handler(void) 186 { 187 if (enabled_devices) { 188 initialized = 0; 189 kick_all_cpus_sync(); 190 } 191 } 192 193 /** 194 * cpuidle_pause_and_lock - temporarily disables CPUIDLE 195 */ 196 void cpuidle_pause_and_lock(void) 197 { 198 mutex_lock(&cpuidle_lock); 199 cpuidle_uninstall_idle_handler(); 200 } 201 202 EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock); 203 204 /** 205 * cpuidle_resume_and_unlock - resumes CPUIDLE operation 206 */ 207 void cpuidle_resume_and_unlock(void) 208 { 209 cpuidle_install_idle_handler(); 210 mutex_unlock(&cpuidle_lock); 211 } 212 213 EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock); 214 215 /* Currently used in suspend/resume path to suspend cpuidle */ 216 void cpuidle_pause(void) 217 { 218 mutex_lock(&cpuidle_lock); 219 cpuidle_uninstall_idle_handler(); 220 mutex_unlock(&cpuidle_lock); 221 } 222 223 /* Currently used in suspend/resume path to resume cpuidle */ 224 void cpuidle_resume(void) 225 { 226 mutex_lock(&cpuidle_lock); 227 cpuidle_install_idle_handler(); 228 mutex_unlock(&cpuidle_lock); 229 } 230 231 /** 232 * cpuidle_wrap_enter - performs timekeeping and irqen around enter function 233 * @dev: pointer to a valid cpuidle_device object 234 * @drv: pointer to a valid cpuidle_driver object 235 * @index: index of the target cpuidle state. 236 */ 237 int cpuidle_wrap_enter(struct cpuidle_device *dev, 238 struct cpuidle_driver *drv, int index, 239 int (*enter)(struct cpuidle_device *dev, 240 struct cpuidle_driver *drv, int index)) 241 { 242 ktime_t time_start, time_end; 243 s64 diff; 244 245 time_start = ktime_get(); 246 247 index = enter(dev, drv, index); 248 249 time_end = ktime_get(); 250 251 local_irq_enable(); 252 253 diff = ktime_to_us(ktime_sub(time_end, time_start)); 254 if (diff > INT_MAX) 255 diff = INT_MAX; 256 257 dev->last_residency = (int) diff; 258 259 return index; 260 } 261 262 #ifdef CONFIG_ARCH_HAS_CPU_RELAX 263 static int poll_idle(struct cpuidle_device *dev, 264 struct cpuidle_driver *drv, int index) 265 { 266 ktime_t t1, t2; 267 s64 diff; 268 269 t1 = ktime_get(); 270 local_irq_enable(); 271 while (!need_resched()) 272 cpu_relax(); 273 274 t2 = ktime_get(); 275 diff = ktime_to_us(ktime_sub(t2, t1)); 276 if (diff > INT_MAX) 277 diff = INT_MAX; 278 279 dev->last_residency = (int) diff; 280 281 return index; 282 } 283 284 static void poll_idle_init(struct cpuidle_driver *drv) 285 { 286 struct cpuidle_state *state = &drv->states[0]; 287 288 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL"); 289 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE"); 290 state->exit_latency = 0; 291 state->target_residency = 0; 292 state->power_usage = -1; 293 state->flags = 0; 294 state->enter = poll_idle; 295 state->disabled = false; 296 } 297 #else 298 static void poll_idle_init(struct cpuidle_driver *drv) {} 299 #endif /* CONFIG_ARCH_HAS_CPU_RELAX */ 300 301 /** 302 * cpuidle_enable_device - enables idle PM for a CPU 303 * @dev: the CPU 304 * 305 * This function must be called between cpuidle_pause_and_lock and 306 * cpuidle_resume_and_unlock when used externally. 307 */ 308 int cpuidle_enable_device(struct cpuidle_device *dev) 309 { 310 int ret, i; 311 struct cpuidle_driver *drv = cpuidle_get_driver(); 312 313 if (!dev) 314 return -EINVAL; 315 316 if (dev->enabled) 317 return 0; 318 if (!drv || !cpuidle_curr_governor) 319 return -EIO; 320 if (!dev->state_count) 321 dev->state_count = drv->state_count; 322 323 if (dev->registered == 0) { 324 ret = __cpuidle_register_device(dev); 325 if (ret) 326 return ret; 327 } 328 329 cpuidle_enter_ops = drv->en_core_tk_irqen ? 330 cpuidle_enter_tk : cpuidle_enter; 331 332 poll_idle_init(drv); 333 334 if ((ret = cpuidle_add_state_sysfs(dev))) 335 return ret; 336 337 if (cpuidle_curr_governor->enable && 338 (ret = cpuidle_curr_governor->enable(drv, dev))) 339 goto fail_sysfs; 340 341 for (i = 0; i < dev->state_count; i++) { 342 dev->states_usage[i].usage = 0; 343 dev->states_usage[i].time = 0; 344 } 345 dev->last_residency = 0; 346 347 smp_wmb(); 348 349 dev->enabled = 1; 350 351 enabled_devices++; 352 return 0; 353 354 fail_sysfs: 355 cpuidle_remove_state_sysfs(dev); 356 357 return ret; 358 } 359 360 EXPORT_SYMBOL_GPL(cpuidle_enable_device); 361 362 /** 363 * cpuidle_disable_device - disables idle PM for a CPU 364 * @dev: the CPU 365 * 366 * This function must be called between cpuidle_pause_and_lock and 367 * cpuidle_resume_and_unlock when used externally. 368 */ 369 void cpuidle_disable_device(struct cpuidle_device *dev) 370 { 371 if (!dev->enabled) 372 return; 373 if (!cpuidle_get_driver() || !cpuidle_curr_governor) 374 return; 375 376 dev->enabled = 0; 377 378 if (cpuidle_curr_governor->disable) 379 cpuidle_curr_governor->disable(cpuidle_get_driver(), dev); 380 381 cpuidle_remove_state_sysfs(dev); 382 enabled_devices--; 383 } 384 385 EXPORT_SYMBOL_GPL(cpuidle_disable_device); 386 387 /** 388 * __cpuidle_register_device - internal register function called before register 389 * and enable routines 390 * @dev: the cpu 391 * 392 * cpuidle_lock mutex must be held before this is called 393 */ 394 static int __cpuidle_register_device(struct cpuidle_device *dev) 395 { 396 int ret; 397 struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu); 398 struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver(); 399 400 if (!try_module_get(cpuidle_driver->owner)) 401 return -EINVAL; 402 403 init_completion(&dev->kobj_unregister); 404 405 per_cpu(cpuidle_devices, dev->cpu) = dev; 406 list_add(&dev->device_list, &cpuidle_detected_devices); 407 ret = cpuidle_add_sysfs(cpu_dev); 408 if (ret) 409 goto err_sysfs; 410 411 ret = cpuidle_coupled_register_device(dev); 412 if (ret) 413 goto err_coupled; 414 415 dev->registered = 1; 416 return 0; 417 418 err_coupled: 419 cpuidle_remove_sysfs(cpu_dev); 420 wait_for_completion(&dev->kobj_unregister); 421 err_sysfs: 422 list_del(&dev->device_list); 423 per_cpu(cpuidle_devices, dev->cpu) = NULL; 424 module_put(cpuidle_driver->owner); 425 return ret; 426 } 427 428 /** 429 * cpuidle_register_device - registers a CPU's idle PM feature 430 * @dev: the cpu 431 */ 432 int cpuidle_register_device(struct cpuidle_device *dev) 433 { 434 int ret; 435 436 if (!dev) 437 return -EINVAL; 438 439 mutex_lock(&cpuidle_lock); 440 441 if ((ret = __cpuidle_register_device(dev))) { 442 mutex_unlock(&cpuidle_lock); 443 return ret; 444 } 445 446 cpuidle_enable_device(dev); 447 cpuidle_install_idle_handler(); 448 449 mutex_unlock(&cpuidle_lock); 450 451 return 0; 452 453 } 454 455 EXPORT_SYMBOL_GPL(cpuidle_register_device); 456 457 /** 458 * cpuidle_unregister_device - unregisters a CPU's idle PM feature 459 * @dev: the cpu 460 */ 461 void cpuidle_unregister_device(struct cpuidle_device *dev) 462 { 463 struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu); 464 struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver(); 465 466 if (dev->registered == 0) 467 return; 468 469 cpuidle_pause_and_lock(); 470 471 cpuidle_disable_device(dev); 472 473 cpuidle_remove_sysfs(cpu_dev); 474 list_del(&dev->device_list); 475 wait_for_completion(&dev->kobj_unregister); 476 per_cpu(cpuidle_devices, dev->cpu) = NULL; 477 478 cpuidle_coupled_unregister_device(dev); 479 480 cpuidle_resume_and_unlock(); 481 482 module_put(cpuidle_driver->owner); 483 } 484 485 EXPORT_SYMBOL_GPL(cpuidle_unregister_device); 486 487 #ifdef CONFIG_SMP 488 489 static void smp_callback(void *v) 490 { 491 /* we already woke the CPU up, nothing more to do */ 492 } 493 494 /* 495 * This function gets called when a part of the kernel has a new latency 496 * requirement. This means we need to get all processors out of their C-state, 497 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that 498 * wakes them all right up. 499 */ 500 static int cpuidle_latency_notify(struct notifier_block *b, 501 unsigned long l, void *v) 502 { 503 smp_call_function(smp_callback, NULL, 1); 504 return NOTIFY_OK; 505 } 506 507 static struct notifier_block cpuidle_latency_notifier = { 508 .notifier_call = cpuidle_latency_notify, 509 }; 510 511 static inline void latency_notifier_init(struct notifier_block *n) 512 { 513 pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n); 514 } 515 516 #else /* CONFIG_SMP */ 517 518 #define latency_notifier_init(x) do { } while (0) 519 520 #endif /* CONFIG_SMP */ 521 522 /** 523 * cpuidle_init - core initializer 524 */ 525 static int __init cpuidle_init(void) 526 { 527 int ret; 528 529 if (cpuidle_disabled()) 530 return -ENODEV; 531 532 ret = cpuidle_add_interface(cpu_subsys.dev_root); 533 if (ret) 534 return ret; 535 536 latency_notifier_init(&cpuidle_latency_notifier); 537 538 return 0; 539 } 540 541 module_param(off, int, 0444); 542 core_initcall(cpuidle_init); 543