1 /* 2 * cpuidle.c - core cpuidle infrastructure 3 * 4 * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 5 * Shaohua Li <shaohua.li@intel.com> 6 * Adam Belay <abelay@novell.com> 7 * 8 * This code is licenced under the GPL. 9 */ 10 11 #include <linux/clockchips.h> 12 #include <linux/kernel.h> 13 #include <linux/mutex.h> 14 #include <linux/sched.h> 15 #include <linux/notifier.h> 16 #include <linux/pm_qos.h> 17 #include <linux/cpu.h> 18 #include <linux/cpuidle.h> 19 #include <linux/ktime.h> 20 #include <linux/hrtimer.h> 21 #include <linux/module.h> 22 #include <trace/events/power.h> 23 24 #include "cpuidle.h" 25 26 DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices); 27 DEFINE_PER_CPU(struct cpuidle_device, cpuidle_dev); 28 29 DEFINE_MUTEX(cpuidle_lock); 30 LIST_HEAD(cpuidle_detected_devices); 31 32 static int enabled_devices; 33 static int off __read_mostly; 34 static int initialized __read_mostly; 35 36 int cpuidle_disabled(void) 37 { 38 return off; 39 } 40 void disable_cpuidle(void) 41 { 42 off = 1; 43 } 44 45 /** 46 * cpuidle_play_dead - cpu off-lining 47 * 48 * Returns in case of an error or no driver 49 */ 50 int cpuidle_play_dead(void) 51 { 52 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); 53 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 54 int i; 55 56 if (!drv) 57 return -ENODEV; 58 59 /* Find lowest-power state that supports long-term idle */ 60 for (i = drv->state_count - 1; i >= CPUIDLE_DRIVER_STATE_START; i--) 61 if (drv->states[i].enter_dead) 62 return drv->states[i].enter_dead(dev, i); 63 64 return -ENODEV; 65 } 66 67 /** 68 * cpuidle_enabled - check if the cpuidle framework is ready 69 * @dev: cpuidle device for this cpu 70 * @drv: cpuidle driver for this cpu 71 * 72 * Return 0 on success, otherwise: 73 * -NODEV : the cpuidle framework is not available 74 * -EBUSY : the cpuidle framework is not initialized 75 */ 76 int cpuidle_enabled(struct cpuidle_driver *drv, struct cpuidle_device *dev) 77 { 78 if (off || !initialized) 79 return -ENODEV; 80 81 if (!drv || !dev || !dev->enabled) 82 return -EBUSY; 83 84 return 0; 85 } 86 87 /** 88 * cpuidle_enter_state - enter the state and update stats 89 * @dev: cpuidle device for this cpu 90 * @drv: cpuidle driver for this cpu 91 * @next_state: index into drv->states of the state to enter 92 */ 93 int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, 94 int index) 95 { 96 int entered_state; 97 98 struct cpuidle_state *target_state = &drv->states[index]; 99 ktime_t time_start, time_end; 100 s64 diff; 101 102 time_start = ktime_get(); 103 104 entered_state = target_state->enter(dev, drv, index); 105 106 time_end = ktime_get(); 107 108 if (!cpuidle_state_is_coupled(dev, drv, entered_state)) 109 local_irq_enable(); 110 111 diff = ktime_to_us(ktime_sub(time_end, time_start)); 112 if (diff > INT_MAX) 113 diff = INT_MAX; 114 115 dev->last_residency = (int) diff; 116 117 if (entered_state >= 0) { 118 /* Update cpuidle counters */ 119 /* This can be moved to within driver enter routine 120 * but that results in multiple copies of same code. 121 */ 122 dev->states_usage[entered_state].time += dev->last_residency; 123 dev->states_usage[entered_state].usage++; 124 } else { 125 dev->last_residency = 0; 126 } 127 128 return entered_state; 129 } 130 131 /** 132 * cpuidle_select - ask the cpuidle framework to choose an idle state 133 * 134 * @drv: the cpuidle driver 135 * @dev: the cpuidle device 136 * 137 * Returns the index of the idle state. 138 */ 139 int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) 140 { 141 return cpuidle_curr_governor->select(drv, dev); 142 } 143 144 /** 145 * cpuidle_enter - enter into the specified idle state 146 * 147 * @drv: the cpuidle driver tied with the cpu 148 * @dev: the cpuidle device 149 * @index: the index in the idle state table 150 * 151 * Returns the index in the idle state, < 0 in case of error. 152 * The error code depends on the backend driver 153 */ 154 int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev, 155 int index) 156 { 157 if (cpuidle_state_is_coupled(dev, drv, index)) 158 return cpuidle_enter_state_coupled(dev, drv, index); 159 return cpuidle_enter_state(dev, drv, index); 160 } 161 162 /** 163 * cpuidle_reflect - tell the underlying governor what was the state 164 * we were in 165 * 166 * @dev : the cpuidle device 167 * @index: the index in the idle state table 168 * 169 */ 170 void cpuidle_reflect(struct cpuidle_device *dev, int index) 171 { 172 if (cpuidle_curr_governor->reflect) 173 cpuidle_curr_governor->reflect(dev, index); 174 } 175 176 /** 177 * cpuidle_install_idle_handler - installs the cpuidle idle loop handler 178 */ 179 void cpuidle_install_idle_handler(void) 180 { 181 if (enabled_devices) { 182 /* Make sure all changes finished before we switch to new idle */ 183 smp_wmb(); 184 initialized = 1; 185 } 186 } 187 188 /** 189 * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler 190 */ 191 void cpuidle_uninstall_idle_handler(void) 192 { 193 if (enabled_devices) { 194 initialized = 0; 195 kick_all_cpus_sync(); 196 } 197 } 198 199 /** 200 * cpuidle_pause_and_lock - temporarily disables CPUIDLE 201 */ 202 void cpuidle_pause_and_lock(void) 203 { 204 mutex_lock(&cpuidle_lock); 205 cpuidle_uninstall_idle_handler(); 206 } 207 208 EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock); 209 210 /** 211 * cpuidle_resume_and_unlock - resumes CPUIDLE operation 212 */ 213 void cpuidle_resume_and_unlock(void) 214 { 215 cpuidle_install_idle_handler(); 216 mutex_unlock(&cpuidle_lock); 217 } 218 219 EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock); 220 221 /* Currently used in suspend/resume path to suspend cpuidle */ 222 void cpuidle_pause(void) 223 { 224 mutex_lock(&cpuidle_lock); 225 cpuidle_uninstall_idle_handler(); 226 mutex_unlock(&cpuidle_lock); 227 } 228 229 /* Currently used in suspend/resume path to resume cpuidle */ 230 void cpuidle_resume(void) 231 { 232 mutex_lock(&cpuidle_lock); 233 cpuidle_install_idle_handler(); 234 mutex_unlock(&cpuidle_lock); 235 } 236 237 /** 238 * cpuidle_enable_device - enables idle PM for a CPU 239 * @dev: the CPU 240 * 241 * This function must be called between cpuidle_pause_and_lock and 242 * cpuidle_resume_and_unlock when used externally. 243 */ 244 int cpuidle_enable_device(struct cpuidle_device *dev) 245 { 246 int ret; 247 struct cpuidle_driver *drv; 248 249 if (!dev) 250 return -EINVAL; 251 252 if (dev->enabled) 253 return 0; 254 255 drv = cpuidle_get_cpu_driver(dev); 256 257 if (!drv || !cpuidle_curr_governor) 258 return -EIO; 259 260 if (!dev->registered) 261 return -EINVAL; 262 263 if (!dev->state_count) 264 dev->state_count = drv->state_count; 265 266 ret = cpuidle_add_device_sysfs(dev); 267 if (ret) 268 return ret; 269 270 if (cpuidle_curr_governor->enable && 271 (ret = cpuidle_curr_governor->enable(drv, dev))) 272 goto fail_sysfs; 273 274 smp_wmb(); 275 276 dev->enabled = 1; 277 278 enabled_devices++; 279 return 0; 280 281 fail_sysfs: 282 cpuidle_remove_device_sysfs(dev); 283 284 return ret; 285 } 286 287 EXPORT_SYMBOL_GPL(cpuidle_enable_device); 288 289 /** 290 * cpuidle_disable_device - disables idle PM for a CPU 291 * @dev: the CPU 292 * 293 * This function must be called between cpuidle_pause_and_lock and 294 * cpuidle_resume_and_unlock when used externally. 295 */ 296 void cpuidle_disable_device(struct cpuidle_device *dev) 297 { 298 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 299 300 if (!dev || !dev->enabled) 301 return; 302 303 if (!drv || !cpuidle_curr_governor) 304 return; 305 306 dev->enabled = 0; 307 308 if (cpuidle_curr_governor->disable) 309 cpuidle_curr_governor->disable(drv, dev); 310 311 cpuidle_remove_device_sysfs(dev); 312 enabled_devices--; 313 } 314 315 EXPORT_SYMBOL_GPL(cpuidle_disable_device); 316 317 static void __cpuidle_unregister_device(struct cpuidle_device *dev) 318 { 319 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 320 321 list_del(&dev->device_list); 322 per_cpu(cpuidle_devices, dev->cpu) = NULL; 323 module_put(drv->owner); 324 } 325 326 static void __cpuidle_device_init(struct cpuidle_device *dev) 327 { 328 memset(dev->states_usage, 0, sizeof(dev->states_usage)); 329 dev->last_residency = 0; 330 } 331 332 /** 333 * __cpuidle_register_device - internal register function called before register 334 * and enable routines 335 * @dev: the cpu 336 * 337 * cpuidle_lock mutex must be held before this is called 338 */ 339 static int __cpuidle_register_device(struct cpuidle_device *dev) 340 { 341 int ret; 342 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 343 344 if (!try_module_get(drv->owner)) 345 return -EINVAL; 346 347 per_cpu(cpuidle_devices, dev->cpu) = dev; 348 list_add(&dev->device_list, &cpuidle_detected_devices); 349 350 ret = cpuidle_coupled_register_device(dev); 351 if (ret) 352 __cpuidle_unregister_device(dev); 353 else 354 dev->registered = 1; 355 356 return ret; 357 } 358 359 /** 360 * cpuidle_register_device - registers a CPU's idle PM feature 361 * @dev: the cpu 362 */ 363 int cpuidle_register_device(struct cpuidle_device *dev) 364 { 365 int ret = -EBUSY; 366 367 if (!dev) 368 return -EINVAL; 369 370 mutex_lock(&cpuidle_lock); 371 372 if (dev->registered) 373 goto out_unlock; 374 375 __cpuidle_device_init(dev); 376 377 ret = __cpuidle_register_device(dev); 378 if (ret) 379 goto out_unlock; 380 381 ret = cpuidle_add_sysfs(dev); 382 if (ret) 383 goto out_unregister; 384 385 ret = cpuidle_enable_device(dev); 386 if (ret) 387 goto out_sysfs; 388 389 cpuidle_install_idle_handler(); 390 391 out_unlock: 392 mutex_unlock(&cpuidle_lock); 393 394 return ret; 395 396 out_sysfs: 397 cpuidle_remove_sysfs(dev); 398 out_unregister: 399 __cpuidle_unregister_device(dev); 400 goto out_unlock; 401 } 402 403 EXPORT_SYMBOL_GPL(cpuidle_register_device); 404 405 /** 406 * cpuidle_unregister_device - unregisters a CPU's idle PM feature 407 * @dev: the cpu 408 */ 409 void cpuidle_unregister_device(struct cpuidle_device *dev) 410 { 411 if (!dev || dev->registered == 0) 412 return; 413 414 cpuidle_pause_and_lock(); 415 416 cpuidle_disable_device(dev); 417 418 cpuidle_remove_sysfs(dev); 419 420 __cpuidle_unregister_device(dev); 421 422 cpuidle_coupled_unregister_device(dev); 423 424 cpuidle_resume_and_unlock(); 425 } 426 427 EXPORT_SYMBOL_GPL(cpuidle_unregister_device); 428 429 /** 430 * cpuidle_unregister: unregister a driver and the devices. This function 431 * can be used only if the driver has been previously registered through 432 * the cpuidle_register function. 433 * 434 * @drv: a valid pointer to a struct cpuidle_driver 435 */ 436 void cpuidle_unregister(struct cpuidle_driver *drv) 437 { 438 int cpu; 439 struct cpuidle_device *device; 440 441 for_each_cpu(cpu, drv->cpumask) { 442 device = &per_cpu(cpuidle_dev, cpu); 443 cpuidle_unregister_device(device); 444 } 445 446 cpuidle_unregister_driver(drv); 447 } 448 EXPORT_SYMBOL_GPL(cpuidle_unregister); 449 450 /** 451 * cpuidle_register: registers the driver and the cpu devices with the 452 * coupled_cpus passed as parameter. This function is used for all common 453 * initialization pattern there are in the arch specific drivers. The 454 * devices is globally defined in this file. 455 * 456 * @drv : a valid pointer to a struct cpuidle_driver 457 * @coupled_cpus: a cpumask for the coupled states 458 * 459 * Returns 0 on success, < 0 otherwise 460 */ 461 int cpuidle_register(struct cpuidle_driver *drv, 462 const struct cpumask *const coupled_cpus) 463 { 464 int ret, cpu; 465 struct cpuidle_device *device; 466 467 ret = cpuidle_register_driver(drv); 468 if (ret) { 469 pr_err("failed to register cpuidle driver\n"); 470 return ret; 471 } 472 473 for_each_cpu(cpu, drv->cpumask) { 474 device = &per_cpu(cpuidle_dev, cpu); 475 device->cpu = cpu; 476 477 #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED 478 /* 479 * On multiplatform for ARM, the coupled idle states could be 480 * enabled in the kernel even if the cpuidle driver does not 481 * use it. Note, coupled_cpus is a struct copy. 482 */ 483 if (coupled_cpus) 484 device->coupled_cpus = *coupled_cpus; 485 #endif 486 ret = cpuidle_register_device(device); 487 if (!ret) 488 continue; 489 490 pr_err("Failed to register cpuidle device for cpu%d\n", cpu); 491 492 cpuidle_unregister(drv); 493 break; 494 } 495 496 return ret; 497 } 498 EXPORT_SYMBOL_GPL(cpuidle_register); 499 500 #ifdef CONFIG_SMP 501 502 static void smp_callback(void *v) 503 { 504 /* we already woke the CPU up, nothing more to do */ 505 } 506 507 /* 508 * This function gets called when a part of the kernel has a new latency 509 * requirement. This means we need to get all processors out of their C-state, 510 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that 511 * wakes them all right up. 512 */ 513 static int cpuidle_latency_notify(struct notifier_block *b, 514 unsigned long l, void *v) 515 { 516 smp_call_function(smp_callback, NULL, 1); 517 return NOTIFY_OK; 518 } 519 520 static struct notifier_block cpuidle_latency_notifier = { 521 .notifier_call = cpuidle_latency_notify, 522 }; 523 524 static inline void latency_notifier_init(struct notifier_block *n) 525 { 526 pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n); 527 } 528 529 #else /* CONFIG_SMP */ 530 531 #define latency_notifier_init(x) do { } while (0) 532 533 #endif /* CONFIG_SMP */ 534 535 /** 536 * cpuidle_init - core initializer 537 */ 538 static int __init cpuidle_init(void) 539 { 540 int ret; 541 542 if (cpuidle_disabled()) 543 return -ENODEV; 544 545 ret = cpuidle_add_interface(cpu_subsys.dev_root); 546 if (ret) 547 return ret; 548 549 latency_notifier_init(&cpuidle_latency_notifier); 550 551 return 0; 552 } 553 554 module_param(off, int, 0444); 555 core_initcall(cpuidle_init); 556