1 /* 2 * cpuidle.c - core cpuidle infrastructure 3 * 4 * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 5 * Shaohua Li <shaohua.li@intel.com> 6 * Adam Belay <abelay@novell.com> 7 * 8 * This code is licenced under the GPL. 9 */ 10 11 #include <linux/kernel.h> 12 #include <linux/mutex.h> 13 #include <linux/sched.h> 14 #include <linux/notifier.h> 15 #include <linux/pm_qos_params.h> 16 #include <linux/cpu.h> 17 #include <linux/cpuidle.h> 18 #include <linux/ktime.h> 19 #include <linux/hrtimer.h> 20 21 #include "cpuidle.h" 22 23 DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices); 24 25 DEFINE_MUTEX(cpuidle_lock); 26 LIST_HEAD(cpuidle_detected_devices); 27 static void (*pm_idle_old)(void); 28 29 static int enabled_devices; 30 31 #if defined(CONFIG_ARCH_HAS_CPU_IDLE_WAIT) 32 static void cpuidle_kick_cpus(void) 33 { 34 cpu_idle_wait(); 35 } 36 #elif defined(CONFIG_SMP) 37 # error "Arch needs cpu_idle_wait() equivalent here" 38 #else /* !CONFIG_ARCH_HAS_CPU_IDLE_WAIT && !CONFIG_SMP */ 39 static void cpuidle_kick_cpus(void) {} 40 #endif 41 42 static int __cpuidle_register_device(struct cpuidle_device *dev); 43 44 /** 45 * cpuidle_idle_call - the main idle loop 46 * 47 * NOTE: no locks or semaphores should be used here 48 */ 49 static void cpuidle_idle_call(void) 50 { 51 struct cpuidle_device *dev = __get_cpu_var(cpuidle_devices); 52 struct cpuidle_state *target_state; 53 int next_state; 54 55 /* check if the device is ready */ 56 if (!dev || !dev->enabled) { 57 if (pm_idle_old) 58 pm_idle_old(); 59 else 60 #if defined(CONFIG_ARCH_HAS_DEFAULT_IDLE) 61 default_idle(); 62 #else 63 local_irq_enable(); 64 #endif 65 return; 66 } 67 68 /* 69 * run any timers that can be run now, at this point 70 * before calculating the idle duration etc. 71 */ 72 hrtimer_peek_ahead_timers(); 73 74 /* ask the governor for the next state */ 75 next_state = cpuidle_curr_governor->select(dev); 76 if (need_resched()) 77 return; 78 target_state = &dev->states[next_state]; 79 80 /* enter the state and update stats */ 81 dev->last_state = target_state; 82 dev->last_residency = target_state->enter(dev, target_state); 83 if (dev->last_state) 84 target_state = dev->last_state; 85 86 target_state->time += (unsigned long long)dev->last_residency; 87 target_state->usage++; 88 89 /* give the governor an opportunity to reflect on the outcome */ 90 if (cpuidle_curr_governor->reflect) 91 cpuidle_curr_governor->reflect(dev); 92 } 93 94 /** 95 * cpuidle_install_idle_handler - installs the cpuidle idle loop handler 96 */ 97 void cpuidle_install_idle_handler(void) 98 { 99 if (enabled_devices && (pm_idle != cpuidle_idle_call)) { 100 /* Make sure all changes finished before we switch to new idle */ 101 smp_wmb(); 102 pm_idle = cpuidle_idle_call; 103 } 104 } 105 106 /** 107 * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler 108 */ 109 void cpuidle_uninstall_idle_handler(void) 110 { 111 if (enabled_devices && pm_idle_old && (pm_idle != pm_idle_old)) { 112 pm_idle = pm_idle_old; 113 cpuidle_kick_cpus(); 114 } 115 } 116 117 /** 118 * cpuidle_pause_and_lock - temporarily disables CPUIDLE 119 */ 120 void cpuidle_pause_and_lock(void) 121 { 122 mutex_lock(&cpuidle_lock); 123 cpuidle_uninstall_idle_handler(); 124 } 125 126 EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock); 127 128 /** 129 * cpuidle_resume_and_unlock - resumes CPUIDLE operation 130 */ 131 void cpuidle_resume_and_unlock(void) 132 { 133 cpuidle_install_idle_handler(); 134 mutex_unlock(&cpuidle_lock); 135 } 136 137 EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock); 138 139 /** 140 * cpuidle_enable_device - enables idle PM for a CPU 141 * @dev: the CPU 142 * 143 * This function must be called between cpuidle_pause_and_lock and 144 * cpuidle_resume_and_unlock when used externally. 145 */ 146 int cpuidle_enable_device(struct cpuidle_device *dev) 147 { 148 int ret, i; 149 150 if (dev->enabled) 151 return 0; 152 if (!cpuidle_curr_driver || !cpuidle_curr_governor) 153 return -EIO; 154 if (!dev->state_count) 155 return -EINVAL; 156 157 if (dev->registered == 0) { 158 ret = __cpuidle_register_device(dev); 159 if (ret) 160 return ret; 161 } 162 163 if ((ret = cpuidle_add_state_sysfs(dev))) 164 return ret; 165 166 if (cpuidle_curr_governor->enable && 167 (ret = cpuidle_curr_governor->enable(dev))) 168 goto fail_sysfs; 169 170 for (i = 0; i < dev->state_count; i++) { 171 dev->states[i].usage = 0; 172 dev->states[i].time = 0; 173 } 174 dev->last_residency = 0; 175 dev->last_state = NULL; 176 177 smp_wmb(); 178 179 dev->enabled = 1; 180 181 enabled_devices++; 182 return 0; 183 184 fail_sysfs: 185 cpuidle_remove_state_sysfs(dev); 186 187 return ret; 188 } 189 190 EXPORT_SYMBOL_GPL(cpuidle_enable_device); 191 192 /** 193 * cpuidle_disable_device - disables idle PM for a CPU 194 * @dev: the CPU 195 * 196 * This function must be called between cpuidle_pause_and_lock and 197 * cpuidle_resume_and_unlock when used externally. 198 */ 199 void cpuidle_disable_device(struct cpuidle_device *dev) 200 { 201 if (!dev->enabled) 202 return; 203 if (!cpuidle_curr_driver || !cpuidle_curr_governor) 204 return; 205 206 dev->enabled = 0; 207 208 if (cpuidle_curr_governor->disable) 209 cpuidle_curr_governor->disable(dev); 210 211 cpuidle_remove_state_sysfs(dev); 212 enabled_devices--; 213 } 214 215 EXPORT_SYMBOL_GPL(cpuidle_disable_device); 216 217 #ifdef CONFIG_ARCH_HAS_CPU_RELAX 218 static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st) 219 { 220 ktime_t t1, t2; 221 s64 diff; 222 int ret; 223 224 t1 = ktime_get(); 225 local_irq_enable(); 226 while (!need_resched()) 227 cpu_relax(); 228 229 t2 = ktime_get(); 230 diff = ktime_to_us(ktime_sub(t2, t1)); 231 if (diff > INT_MAX) 232 diff = INT_MAX; 233 234 ret = (int) diff; 235 return ret; 236 } 237 238 static void poll_idle_init(struct cpuidle_device *dev) 239 { 240 struct cpuidle_state *state = &dev->states[0]; 241 242 cpuidle_set_statedata(state, NULL); 243 244 snprintf(state->name, CPUIDLE_NAME_LEN, "C0"); 245 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE"); 246 state->exit_latency = 0; 247 state->target_residency = 0; 248 state->power_usage = -1; 249 state->flags = CPUIDLE_FLAG_POLL; 250 state->enter = poll_idle; 251 } 252 #else 253 static void poll_idle_init(struct cpuidle_device *dev) {} 254 #endif /* CONFIG_ARCH_HAS_CPU_RELAX */ 255 256 /** 257 * __cpuidle_register_device - internal register function called before register 258 * and enable routines 259 * @dev: the cpu 260 * 261 * cpuidle_lock mutex must be held before this is called 262 */ 263 static int __cpuidle_register_device(struct cpuidle_device *dev) 264 { 265 int ret; 266 struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu); 267 268 if (!sys_dev) 269 return -EINVAL; 270 if (!try_module_get(cpuidle_curr_driver->owner)) 271 return -EINVAL; 272 273 init_completion(&dev->kobj_unregister); 274 275 poll_idle_init(dev); 276 277 per_cpu(cpuidle_devices, dev->cpu) = dev; 278 list_add(&dev->device_list, &cpuidle_detected_devices); 279 if ((ret = cpuidle_add_sysfs(sys_dev))) { 280 module_put(cpuidle_curr_driver->owner); 281 return ret; 282 } 283 284 dev->registered = 1; 285 return 0; 286 } 287 288 /** 289 * cpuidle_register_device - registers a CPU's idle PM feature 290 * @dev: the cpu 291 */ 292 int cpuidle_register_device(struct cpuidle_device *dev) 293 { 294 int ret; 295 296 mutex_lock(&cpuidle_lock); 297 298 if ((ret = __cpuidle_register_device(dev))) { 299 mutex_unlock(&cpuidle_lock); 300 return ret; 301 } 302 303 cpuidle_enable_device(dev); 304 cpuidle_install_idle_handler(); 305 306 mutex_unlock(&cpuidle_lock); 307 308 return 0; 309 310 } 311 312 EXPORT_SYMBOL_GPL(cpuidle_register_device); 313 314 /** 315 * cpuidle_unregister_device - unregisters a CPU's idle PM feature 316 * @dev: the cpu 317 */ 318 void cpuidle_unregister_device(struct cpuidle_device *dev) 319 { 320 struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu); 321 322 if (dev->registered == 0) 323 return; 324 325 cpuidle_pause_and_lock(); 326 327 cpuidle_disable_device(dev); 328 329 cpuidle_remove_sysfs(sys_dev); 330 list_del(&dev->device_list); 331 wait_for_completion(&dev->kobj_unregister); 332 per_cpu(cpuidle_devices, dev->cpu) = NULL; 333 334 cpuidle_resume_and_unlock(); 335 336 module_put(cpuidle_curr_driver->owner); 337 } 338 339 EXPORT_SYMBOL_GPL(cpuidle_unregister_device); 340 341 #ifdef CONFIG_SMP 342 343 static void smp_callback(void *v) 344 { 345 /* we already woke the CPU up, nothing more to do */ 346 } 347 348 /* 349 * This function gets called when a part of the kernel has a new latency 350 * requirement. This means we need to get all processors out of their C-state, 351 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that 352 * wakes them all right up. 353 */ 354 static int cpuidle_latency_notify(struct notifier_block *b, 355 unsigned long l, void *v) 356 { 357 smp_call_function(smp_callback, NULL, 1); 358 return NOTIFY_OK; 359 } 360 361 static struct notifier_block cpuidle_latency_notifier = { 362 .notifier_call = cpuidle_latency_notify, 363 }; 364 365 static inline void latency_notifier_init(struct notifier_block *n) 366 { 367 pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n); 368 } 369 370 #else /* CONFIG_SMP */ 371 372 #define latency_notifier_init(x) do { } while (0) 373 374 #endif /* CONFIG_SMP */ 375 376 /** 377 * cpuidle_init - core initializer 378 */ 379 static int __init cpuidle_init(void) 380 { 381 int ret; 382 383 pm_idle_old = pm_idle; 384 385 ret = cpuidle_add_class_sysfs(&cpu_sysdev_class); 386 if (ret) 387 return ret; 388 389 latency_notifier_init(&cpuidle_latency_notifier); 390 391 return 0; 392 } 393 394 core_initcall(cpuidle_init); 395