14f86d3a8SLen Brown /* 24f86d3a8SLen Brown * cpuidle.c - core cpuidle infrastructure 34f86d3a8SLen Brown * 44f86d3a8SLen Brown * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 54f86d3a8SLen Brown * Shaohua Li <shaohua.li@intel.com> 64f86d3a8SLen Brown * Adam Belay <abelay@novell.com> 74f86d3a8SLen Brown * 84f86d3a8SLen Brown * This code is licenced under the GPL. 94f86d3a8SLen Brown */ 104f86d3a8SLen Brown 11b60e6a0eSDaniel Lezcano #include <linux/clockchips.h> 124f86d3a8SLen Brown #include <linux/kernel.h> 134f86d3a8SLen Brown #include <linux/mutex.h> 144f86d3a8SLen Brown #include <linux/sched.h> 154f86d3a8SLen Brown #include <linux/notifier.h> 16e8db0be1SJean Pihet #include <linux/pm_qos.h> 174f86d3a8SLen Brown #include <linux/cpu.h> 184f86d3a8SLen Brown #include <linux/cpuidle.h> 199a0b8415Svenkatesh.pallipadi@intel.com #include <linux/ktime.h> 202e94d1f7SArjan van de Ven #include <linux/hrtimer.h> 21884b17e1SPaul Gortmaker #include <linux/module.h> 2238106313SRafael J. Wysocki #include <linux/suspend.h> 23124cf911SRafael J. Wysocki #include <linux/tick.h> 24288f023eSArjan van de Ven #include <trace/events/power.h> 254f86d3a8SLen Brown 264f86d3a8SLen Brown #include "cpuidle.h" 274f86d3a8SLen Brown 284f86d3a8SLen Brown DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices); 294c637b21SDaniel Lezcano DEFINE_PER_CPU(struct cpuidle_device, cpuidle_dev); 304f86d3a8SLen Brown 314f86d3a8SLen Brown DEFINE_MUTEX(cpuidle_lock); 324f86d3a8SLen Brown LIST_HEAD(cpuidle_detected_devices); 334f86d3a8SLen Brown 344f86d3a8SLen Brown static int enabled_devices; 3562027aeaSLen Brown static int off __read_mostly; 36a0bfa137SLen Brown static int initialized __read_mostly; 3762027aeaSLen Brown 3862027aeaSLen Brown int cpuidle_disabled(void) 3962027aeaSLen Brown { 4062027aeaSLen Brown return off; 4162027aeaSLen Brown } 42d91ee586SLen Brown void disable_cpuidle(void) 43d91ee586SLen Brown { 44d91ee586SLen Brown off = 1; 45d91ee586SLen Brown } 464f86d3a8SLen Brown 47ef2b22acSRafael J. Wysocki bool cpuidle_not_available(struct cpuidle_driver *drv, 4831a34090SRafael J. Wysocki struct cpuidle_device *dev) 4931a34090SRafael J. Wysocki { 5031a34090SRafael J. Wysocki return off || !initialized || !drv || !dev || !dev->enabled; 5131a34090SRafael J. Wysocki } 5231a34090SRafael J. Wysocki 534f86d3a8SLen Brown /** 541a022e3fSBoris Ostrovsky * cpuidle_play_dead - cpu off-lining 551a022e3fSBoris Ostrovsky * 56ee01e663SToshi Kani * Returns in case of an error or no driver 571a022e3fSBoris Ostrovsky */ 581a022e3fSBoris Ostrovsky int cpuidle_play_dead(void) 591a022e3fSBoris Ostrovsky { 601a022e3fSBoris Ostrovsky struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); 61bf4d1b5dSDaniel Lezcano struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 628aef33a7SDaniel Lezcano int i; 631a022e3fSBoris Ostrovsky 64ee01e663SToshi Kani if (!drv) 65ee01e663SToshi Kani return -ENODEV; 66ee01e663SToshi Kani 671a022e3fSBoris Ostrovsky /* Find lowest-power state that supports long-term idle */ 688aef33a7SDaniel Lezcano for (i = drv->state_count - 1; i >= CPUIDLE_DRIVER_STATE_START; i--) 698aef33a7SDaniel Lezcano if (drv->states[i].enter_dead) 708aef33a7SDaniel Lezcano return drv->states[i].enter_dead(dev, i); 711a022e3fSBoris Ostrovsky 721a022e3fSBoris Ostrovsky return -ENODEV; 731a022e3fSBoris Ostrovsky } 741a022e3fSBoris Ostrovsky 75ef2b22acSRafael J. Wysocki static int find_deepest_state(struct cpuidle_driver *drv, 76124cf911SRafael J. Wysocki struct cpuidle_device *dev, bool freeze) 77a6220fc1SRafael J. Wysocki { 78a6220fc1SRafael J. Wysocki unsigned int latency_req = 0; 79124cf911SRafael J. Wysocki int i, ret = freeze ? -1 : CPUIDLE_DRIVER_STATE_START - 1; 80a6220fc1SRafael J. Wysocki 81a6220fc1SRafael J. Wysocki for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) { 82a6220fc1SRafael J. Wysocki struct cpuidle_state *s = &drv->states[i]; 83a6220fc1SRafael J. Wysocki struct cpuidle_state_usage *su = &dev->states_usage[i]; 84a6220fc1SRafael J. Wysocki 85124cf911SRafael J. Wysocki if (s->disabled || su->disable || s->exit_latency <= latency_req 86124cf911SRafael J. Wysocki || (freeze && !s->enter_freeze)) 87a6220fc1SRafael J. Wysocki continue; 88a6220fc1SRafael J. Wysocki 89a6220fc1SRafael J. Wysocki latency_req = s->exit_latency; 90a6220fc1SRafael J. Wysocki ret = i; 91a6220fc1SRafael J. Wysocki } 92a6220fc1SRafael J. Wysocki return ret; 93a6220fc1SRafael J. Wysocki } 94a6220fc1SRafael J. Wysocki 95*87e9b9f1SRafael J. Wysocki #ifdef CONFIG_SUSPEND 96ef2b22acSRafael J. Wysocki /** 97ef2b22acSRafael J. Wysocki * cpuidle_find_deepest_state - Find the deepest available idle state. 98ef2b22acSRafael J. Wysocki * @drv: cpuidle driver for the given CPU. 99ef2b22acSRafael J. Wysocki * @dev: cpuidle device for the given CPU. 100ef2b22acSRafael J. Wysocki */ 101ef2b22acSRafael J. Wysocki int cpuidle_find_deepest_state(struct cpuidle_driver *drv, 102ef2b22acSRafael J. Wysocki struct cpuidle_device *dev) 103ef2b22acSRafael J. Wysocki { 104ef2b22acSRafael J. Wysocki return find_deepest_state(drv, dev, false); 105ef2b22acSRafael J. Wysocki } 106ef2b22acSRafael J. Wysocki 107124cf911SRafael J. Wysocki static void enter_freeze_proper(struct cpuidle_driver *drv, 108124cf911SRafael J. Wysocki struct cpuidle_device *dev, int index) 109124cf911SRafael J. Wysocki { 110124cf911SRafael J. Wysocki tick_freeze(); 111124cf911SRafael J. Wysocki /* 112124cf911SRafael J. Wysocki * The state used here cannot be a "coupled" one, because the "coupled" 113124cf911SRafael J. Wysocki * cpuidle mechanism enables interrupts and doing that with timekeeping 114124cf911SRafael J. Wysocki * suspended is generally unsafe. 115124cf911SRafael J. Wysocki */ 116124cf911SRafael J. Wysocki drv->states[index].enter_freeze(dev, drv, index); 117124cf911SRafael J. Wysocki WARN_ON(!irqs_disabled()); 118124cf911SRafael J. Wysocki /* 119124cf911SRafael J. Wysocki * timekeeping_resume() that will be called by tick_unfreeze() for the 120124cf911SRafael J. Wysocki * last CPU executing it calls functions containing RCU read-side 121124cf911SRafael J. Wysocki * critical sections, so tell RCU about that. 122124cf911SRafael J. Wysocki */ 123124cf911SRafael J. Wysocki RCU_NONIDLE(tick_unfreeze()); 124124cf911SRafael J. Wysocki } 125124cf911SRafael J. Wysocki 126a6220fc1SRafael J. Wysocki /** 12738106313SRafael J. Wysocki * cpuidle_enter_freeze - Enter an idle state suitable for suspend-to-idle. 128ef2b22acSRafael J. Wysocki * @drv: cpuidle driver for the given CPU. 129ef2b22acSRafael J. Wysocki * @dev: cpuidle device for the given CPU. 13038106313SRafael J. Wysocki * 131124cf911SRafael J. Wysocki * If there are states with the ->enter_freeze callback, find the deepest of 132ef2b22acSRafael J. Wysocki * them and enter it with frozen tick. 13338106313SRafael J. Wysocki */ 134ef2b22acSRafael J. Wysocki int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev) 13538106313SRafael J. Wysocki { 13638106313SRafael J. Wysocki int index; 13738106313SRafael J. Wysocki 138124cf911SRafael J. Wysocki /* 139124cf911SRafael J. Wysocki * Find the deepest state with ->enter_freeze present, which guarantees 140124cf911SRafael J. Wysocki * that interrupts won't be enabled when it exits and allows the tick to 141124cf911SRafael J. Wysocki * be frozen safely. 142124cf911SRafael J. Wysocki */ 143ef2b22acSRafael J. Wysocki index = find_deepest_state(drv, dev, true); 144ef2b22acSRafael J. Wysocki if (index >= 0) 145124cf911SRafael J. Wysocki enter_freeze_proper(drv, dev, index); 146124cf911SRafael J. Wysocki 147ef2b22acSRafael J. Wysocki return index; 14838106313SRafael J. Wysocki } 149*87e9b9f1SRafael J. Wysocki #endif /* CONFIG_SUSPEND */ 15038106313SRafael J. Wysocki 15138106313SRafael J. Wysocki /** 15256cfbf74SColin Cross * cpuidle_enter_state - enter the state and update stats 15356cfbf74SColin Cross * @dev: cpuidle device for this cpu 15456cfbf74SColin Cross * @drv: cpuidle driver for this cpu 15556cfbf74SColin Cross * @next_state: index into drv->states of the state to enter 15656cfbf74SColin Cross */ 15756cfbf74SColin Cross int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, 158554c06baSDaniel Lezcano int index) 15956cfbf74SColin Cross { 16056cfbf74SColin Cross int entered_state; 16156cfbf74SColin Cross 162554c06baSDaniel Lezcano struct cpuidle_state *target_state = &drv->states[index]; 163df8d9eeaSRafael J. Wysocki bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP); 164554c06baSDaniel Lezcano ktime_t time_start, time_end; 165554c06baSDaniel Lezcano s64 diff; 166554c06baSDaniel Lezcano 167df8d9eeaSRafael J. Wysocki /* 168df8d9eeaSRafael J. Wysocki * Tell the time framework to switch to a broadcast timer because our 169df8d9eeaSRafael J. Wysocki * local timer will be shut down. If a local timer is used from another 170df8d9eeaSRafael J. Wysocki * CPU as a broadcast timer, this call may fail if it is not available. 171df8d9eeaSRafael J. Wysocki */ 172df8d9eeaSRafael J. Wysocki if (broadcast && tick_broadcast_enter()) 173df8d9eeaSRafael J. Wysocki return -EBUSY; 174df8d9eeaSRafael J. Wysocki 17530fe6884SSandeep Tripathy trace_cpu_idle_rcuidle(index, dev->cpu); 176554c06baSDaniel Lezcano time_start = ktime_get(); 177554c06baSDaniel Lezcano 178554c06baSDaniel Lezcano entered_state = target_state->enter(dev, drv, index); 179554c06baSDaniel Lezcano 180554c06baSDaniel Lezcano time_end = ktime_get(); 18130fe6884SSandeep Tripathy trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); 182554c06baSDaniel Lezcano 183df8d9eeaSRafael J. Wysocki if (broadcast) { 184df8d9eeaSRafael J. Wysocki if (WARN_ON_ONCE(!irqs_disabled())) 185df8d9eeaSRafael J. Wysocki local_irq_disable(); 186df8d9eeaSRafael J. Wysocki 187df8d9eeaSRafael J. Wysocki tick_broadcast_exit(); 188df8d9eeaSRafael J. Wysocki } 189df8d9eeaSRafael J. Wysocki 1900b89e9aaSPaul Burton if (!cpuidle_state_is_coupled(dev, drv, entered_state)) 191554c06baSDaniel Lezcano local_irq_enable(); 192554c06baSDaniel Lezcano 193554c06baSDaniel Lezcano diff = ktime_to_us(ktime_sub(time_end, time_start)); 194554c06baSDaniel Lezcano if (diff > INT_MAX) 195554c06baSDaniel Lezcano diff = INT_MAX; 196554c06baSDaniel Lezcano 197554c06baSDaniel Lezcano dev->last_residency = (int) diff; 19856cfbf74SColin Cross 19956cfbf74SColin Cross if (entered_state >= 0) { 20056cfbf74SColin Cross /* Update cpuidle counters */ 20156cfbf74SColin Cross /* This can be moved to within driver enter routine 20256cfbf74SColin Cross * but that results in multiple copies of same code. 20356cfbf74SColin Cross */ 204a474a515SJulius Werner dev->states_usage[entered_state].time += dev->last_residency; 20556cfbf74SColin Cross dev->states_usage[entered_state].usage++; 20656cfbf74SColin Cross } else { 20756cfbf74SColin Cross dev->last_residency = 0; 20856cfbf74SColin Cross } 20956cfbf74SColin Cross 21056cfbf74SColin Cross return entered_state; 21156cfbf74SColin Cross } 21256cfbf74SColin Cross 21356cfbf74SColin Cross /** 214907e30f1SDaniel Lezcano * cpuidle_select - ask the cpuidle framework to choose an idle state 2154f86d3a8SLen Brown * 216907e30f1SDaniel Lezcano * @drv: the cpuidle driver 217907e30f1SDaniel Lezcano * @dev: the cpuidle device 218907e30f1SDaniel Lezcano * 219907e30f1SDaniel Lezcano * Returns the index of the idle state. 2204f86d3a8SLen Brown */ 221907e30f1SDaniel Lezcano int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) 2224f86d3a8SLen Brown { 223907e30f1SDaniel Lezcano return cpuidle_curr_governor->select(drv, dev); 224246eb7f0SKevin Hilman } 225246eb7f0SKevin Hilman 226907e30f1SDaniel Lezcano /** 227907e30f1SDaniel Lezcano * cpuidle_enter - enter into the specified idle state 228907e30f1SDaniel Lezcano * 229907e30f1SDaniel Lezcano * @drv: the cpuidle driver tied with the cpu 230907e30f1SDaniel Lezcano * @dev: the cpuidle device 231907e30f1SDaniel Lezcano * @index: the index in the idle state table 232907e30f1SDaniel Lezcano * 233907e30f1SDaniel Lezcano * Returns the index in the idle state, < 0 in case of error. 234907e30f1SDaniel Lezcano * The error code depends on the backend driver 235907e30f1SDaniel Lezcano */ 236907e30f1SDaniel Lezcano int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev, 237907e30f1SDaniel Lezcano int index) 238907e30f1SDaniel Lezcano { 239907e30f1SDaniel Lezcano if (cpuidle_state_is_coupled(dev, drv, index)) 240907e30f1SDaniel Lezcano return cpuidle_enter_state_coupled(dev, drv, index); 241907e30f1SDaniel Lezcano return cpuidle_enter_state(dev, drv, index); 242907e30f1SDaniel Lezcano } 243fb11c9c6SViresh Kumar 244907e30f1SDaniel Lezcano /** 245907e30f1SDaniel Lezcano * cpuidle_reflect - tell the underlying governor what was the state 246907e30f1SDaniel Lezcano * we were in 247907e30f1SDaniel Lezcano * 248907e30f1SDaniel Lezcano * @dev : the cpuidle device 249907e30f1SDaniel Lezcano * @index: the index in the idle state table 250907e30f1SDaniel Lezcano * 251907e30f1SDaniel Lezcano */ 252907e30f1SDaniel Lezcano void cpuidle_reflect(struct cpuidle_device *dev, int index) 253907e30f1SDaniel Lezcano { 25438106313SRafael J. Wysocki if (cpuidle_curr_governor->reflect) 255907e30f1SDaniel Lezcano cpuidle_curr_governor->reflect(dev, index); 2564f86d3a8SLen Brown } 2574f86d3a8SLen Brown 2584f86d3a8SLen Brown /** 2594f86d3a8SLen Brown * cpuidle_install_idle_handler - installs the cpuidle idle loop handler 2604f86d3a8SLen Brown */ 2614f86d3a8SLen Brown void cpuidle_install_idle_handler(void) 2624f86d3a8SLen Brown { 263a0bfa137SLen Brown if (enabled_devices) { 2644f86d3a8SLen Brown /* Make sure all changes finished before we switch to new idle */ 2654f86d3a8SLen Brown smp_wmb(); 266a0bfa137SLen Brown initialized = 1; 2674f86d3a8SLen Brown } 2684f86d3a8SLen Brown } 2694f86d3a8SLen Brown 2704f86d3a8SLen Brown /** 2714f86d3a8SLen Brown * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler 2724f86d3a8SLen Brown */ 2734f86d3a8SLen Brown void cpuidle_uninstall_idle_handler(void) 2744f86d3a8SLen Brown { 275a0bfa137SLen Brown if (enabled_devices) { 276a0bfa137SLen Brown initialized = 0; 2772ed903c5SChuansheng Liu wake_up_all_idle_cpus(); 2784f86d3a8SLen Brown } 279442bf3aaSDaniel Lezcano 280442bf3aaSDaniel Lezcano /* 281442bf3aaSDaniel Lezcano * Make sure external observers (such as the scheduler) 282442bf3aaSDaniel Lezcano * are done looking at pointed idle states. 283442bf3aaSDaniel Lezcano */ 284442bf3aaSDaniel Lezcano synchronize_rcu(); 2854f86d3a8SLen Brown } 2864f86d3a8SLen Brown 2874f86d3a8SLen Brown /** 2884f86d3a8SLen Brown * cpuidle_pause_and_lock - temporarily disables CPUIDLE 2894f86d3a8SLen Brown */ 2904f86d3a8SLen Brown void cpuidle_pause_and_lock(void) 2914f86d3a8SLen Brown { 2924f86d3a8SLen Brown mutex_lock(&cpuidle_lock); 2934f86d3a8SLen Brown cpuidle_uninstall_idle_handler(); 2944f86d3a8SLen Brown } 2954f86d3a8SLen Brown 2964f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock); 2974f86d3a8SLen Brown 2984f86d3a8SLen Brown /** 2994f86d3a8SLen Brown * cpuidle_resume_and_unlock - resumes CPUIDLE operation 3004f86d3a8SLen Brown */ 3014f86d3a8SLen Brown void cpuidle_resume_and_unlock(void) 3024f86d3a8SLen Brown { 3034f86d3a8SLen Brown cpuidle_install_idle_handler(); 3044f86d3a8SLen Brown mutex_unlock(&cpuidle_lock); 3054f86d3a8SLen Brown } 3064f86d3a8SLen Brown 3074f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock); 3084f86d3a8SLen Brown 3098651f97bSPreeti U Murthy /* Currently used in suspend/resume path to suspend cpuidle */ 3108651f97bSPreeti U Murthy void cpuidle_pause(void) 3118651f97bSPreeti U Murthy { 3128651f97bSPreeti U Murthy mutex_lock(&cpuidle_lock); 3138651f97bSPreeti U Murthy cpuidle_uninstall_idle_handler(); 3148651f97bSPreeti U Murthy mutex_unlock(&cpuidle_lock); 3158651f97bSPreeti U Murthy } 3168651f97bSPreeti U Murthy 3178651f97bSPreeti U Murthy /* Currently used in suspend/resume path to resume cpuidle */ 3188651f97bSPreeti U Murthy void cpuidle_resume(void) 3198651f97bSPreeti U Murthy { 3208651f97bSPreeti U Murthy mutex_lock(&cpuidle_lock); 3218651f97bSPreeti U Murthy cpuidle_install_idle_handler(); 3228651f97bSPreeti U Murthy mutex_unlock(&cpuidle_lock); 3238651f97bSPreeti U Murthy } 3248651f97bSPreeti U Murthy 3254f86d3a8SLen Brown /** 3264f86d3a8SLen Brown * cpuidle_enable_device - enables idle PM for a CPU 3274f86d3a8SLen Brown * @dev: the CPU 3284f86d3a8SLen Brown * 3294f86d3a8SLen Brown * This function must be called between cpuidle_pause_and_lock and 3304f86d3a8SLen Brown * cpuidle_resume_and_unlock when used externally. 3314f86d3a8SLen Brown */ 3324f86d3a8SLen Brown int cpuidle_enable_device(struct cpuidle_device *dev) 3334f86d3a8SLen Brown { 3345df0aa73SDaniel Lezcano int ret; 335bf4d1b5dSDaniel Lezcano struct cpuidle_driver *drv; 3364f86d3a8SLen Brown 3371b0a0e9aSSrivatsa S. Bhat if (!dev) 3381b0a0e9aSSrivatsa S. Bhat return -EINVAL; 3391b0a0e9aSSrivatsa S. Bhat 3404f86d3a8SLen Brown if (dev->enabled) 3414f86d3a8SLen Brown return 0; 342bf4d1b5dSDaniel Lezcano 343bf4d1b5dSDaniel Lezcano drv = cpuidle_get_cpu_driver(dev); 344bf4d1b5dSDaniel Lezcano 345e1689795SRobert Lee if (!drv || !cpuidle_curr_governor) 3464f86d3a8SLen Brown return -EIO; 347bf4d1b5dSDaniel Lezcano 34810b9d3f8SDaniel Lezcano if (!dev->registered) 34910b9d3f8SDaniel Lezcano return -EINVAL; 35010b9d3f8SDaniel Lezcano 351bf4d1b5dSDaniel Lezcano ret = cpuidle_add_device_sysfs(dev); 352bf4d1b5dSDaniel Lezcano if (ret) 3534f86d3a8SLen Brown return ret; 3544f86d3a8SLen Brown 3554f86d3a8SLen Brown if (cpuidle_curr_governor->enable && 356e1689795SRobert Lee (ret = cpuidle_curr_governor->enable(drv, dev))) 3574f86d3a8SLen Brown goto fail_sysfs; 3584f86d3a8SLen Brown 3594f86d3a8SLen Brown smp_wmb(); 3604f86d3a8SLen Brown 3614f86d3a8SLen Brown dev->enabled = 1; 3624f86d3a8SLen Brown 3634f86d3a8SLen Brown enabled_devices++; 3644f86d3a8SLen Brown return 0; 3654f86d3a8SLen Brown 3664f86d3a8SLen Brown fail_sysfs: 367bf4d1b5dSDaniel Lezcano cpuidle_remove_device_sysfs(dev); 3684f86d3a8SLen Brown 3694f86d3a8SLen Brown return ret; 3704f86d3a8SLen Brown } 3714f86d3a8SLen Brown 3724f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_enable_device); 3734f86d3a8SLen Brown 3744f86d3a8SLen Brown /** 3754f86d3a8SLen Brown * cpuidle_disable_device - disables idle PM for a CPU 3764f86d3a8SLen Brown * @dev: the CPU 3774f86d3a8SLen Brown * 3784f86d3a8SLen Brown * This function must be called between cpuidle_pause_and_lock and 3794f86d3a8SLen Brown * cpuidle_resume_and_unlock when used externally. 3804f86d3a8SLen Brown */ 3814f86d3a8SLen Brown void cpuidle_disable_device(struct cpuidle_device *dev) 3824f86d3a8SLen Brown { 383bf4d1b5dSDaniel Lezcano struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 384bf4d1b5dSDaniel Lezcano 385cf31cd1aSSrivatsa S. Bhat if (!dev || !dev->enabled) 3864f86d3a8SLen Brown return; 387bf4d1b5dSDaniel Lezcano 388bf4d1b5dSDaniel Lezcano if (!drv || !cpuidle_curr_governor) 3894f86d3a8SLen Brown return; 3904f86d3a8SLen Brown 3914f86d3a8SLen Brown dev->enabled = 0; 3924f86d3a8SLen Brown 3934f86d3a8SLen Brown if (cpuidle_curr_governor->disable) 394bf4d1b5dSDaniel Lezcano cpuidle_curr_governor->disable(drv, dev); 3954f86d3a8SLen Brown 396bf4d1b5dSDaniel Lezcano cpuidle_remove_device_sysfs(dev); 3974f86d3a8SLen Brown enabled_devices--; 3984f86d3a8SLen Brown } 3994f86d3a8SLen Brown 4004f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_disable_device); 4014f86d3a8SLen Brown 402f6bb51a5SDaniel Lezcano static void __cpuidle_unregister_device(struct cpuidle_device *dev) 403f6bb51a5SDaniel Lezcano { 404f6bb51a5SDaniel Lezcano struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 405f6bb51a5SDaniel Lezcano 406f6bb51a5SDaniel Lezcano list_del(&dev->device_list); 407f6bb51a5SDaniel Lezcano per_cpu(cpuidle_devices, dev->cpu) = NULL; 408f6bb51a5SDaniel Lezcano module_put(drv->owner); 409f6bb51a5SDaniel Lezcano } 410f6bb51a5SDaniel Lezcano 411267d4bf8SViresh Kumar static void __cpuidle_device_init(struct cpuidle_device *dev) 4125df0aa73SDaniel Lezcano { 4135df0aa73SDaniel Lezcano memset(dev->states_usage, 0, sizeof(dev->states_usage)); 4145df0aa73SDaniel Lezcano dev->last_residency = 0; 4155df0aa73SDaniel Lezcano } 4165df0aa73SDaniel Lezcano 4174f86d3a8SLen Brown /** 418dcb84f33SVenkatesh Pallipadi * __cpuidle_register_device - internal register function called before register 419dcb84f33SVenkatesh Pallipadi * and enable routines 4204f86d3a8SLen Brown * @dev: the cpu 421dcb84f33SVenkatesh Pallipadi * 422dcb84f33SVenkatesh Pallipadi * cpuidle_lock mutex must be held before this is called 4234f86d3a8SLen Brown */ 424dcb84f33SVenkatesh Pallipadi static int __cpuidle_register_device(struct cpuidle_device *dev) 4254f86d3a8SLen Brown { 4264f86d3a8SLen Brown int ret; 427bf4d1b5dSDaniel Lezcano struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 4284f86d3a8SLen Brown 429bf4d1b5dSDaniel Lezcano if (!try_module_get(drv->owner)) 4304f86d3a8SLen Brown return -EINVAL; 4314f86d3a8SLen Brown 4324f86d3a8SLen Brown per_cpu(cpuidle_devices, dev->cpu) = dev; 4334f86d3a8SLen Brown list_add(&dev->device_list, &cpuidle_detected_devices); 4344f86d3a8SLen Brown 4354126c019SColin Cross ret = cpuidle_coupled_register_device(dev); 43647182668SViresh Kumar if (ret) 437f6bb51a5SDaniel Lezcano __cpuidle_unregister_device(dev); 43847182668SViresh Kumar else 439dcb84f33SVenkatesh Pallipadi dev->registered = 1; 44047182668SViresh Kumar 44147182668SViresh Kumar return ret; 442dcb84f33SVenkatesh Pallipadi } 443dcb84f33SVenkatesh Pallipadi 444dcb84f33SVenkatesh Pallipadi /** 445dcb84f33SVenkatesh Pallipadi * cpuidle_register_device - registers a CPU's idle PM feature 446dcb84f33SVenkatesh Pallipadi * @dev: the cpu 447dcb84f33SVenkatesh Pallipadi */ 448dcb84f33SVenkatesh Pallipadi int cpuidle_register_device(struct cpuidle_device *dev) 449dcb84f33SVenkatesh Pallipadi { 450c878a52dSDaniel Lezcano int ret = -EBUSY; 451dcb84f33SVenkatesh Pallipadi 4521b0a0e9aSSrivatsa S. Bhat if (!dev) 4531b0a0e9aSSrivatsa S. Bhat return -EINVAL; 4541b0a0e9aSSrivatsa S. Bhat 455dcb84f33SVenkatesh Pallipadi mutex_lock(&cpuidle_lock); 456dcb84f33SVenkatesh Pallipadi 457c878a52dSDaniel Lezcano if (dev->registered) 458c878a52dSDaniel Lezcano goto out_unlock; 459c878a52dSDaniel Lezcano 460267d4bf8SViresh Kumar __cpuidle_device_init(dev); 4615df0aa73SDaniel Lezcano 462f6bb51a5SDaniel Lezcano ret = __cpuidle_register_device(dev); 463f6bb51a5SDaniel Lezcano if (ret) 464f6bb51a5SDaniel Lezcano goto out_unlock; 465f6bb51a5SDaniel Lezcano 466f6bb51a5SDaniel Lezcano ret = cpuidle_add_sysfs(dev); 467f6bb51a5SDaniel Lezcano if (ret) 468f6bb51a5SDaniel Lezcano goto out_unregister; 469dcb84f33SVenkatesh Pallipadi 47010b9d3f8SDaniel Lezcano ret = cpuidle_enable_device(dev); 471f6bb51a5SDaniel Lezcano if (ret) 472f6bb51a5SDaniel Lezcano goto out_sysfs; 47310b9d3f8SDaniel Lezcano 4744f86d3a8SLen Brown cpuidle_install_idle_handler(); 4754f86d3a8SLen Brown 476f6bb51a5SDaniel Lezcano out_unlock: 4774f86d3a8SLen Brown mutex_unlock(&cpuidle_lock); 4784f86d3a8SLen Brown 479f6bb51a5SDaniel Lezcano return ret; 480f6bb51a5SDaniel Lezcano 481f6bb51a5SDaniel Lezcano out_sysfs: 482f6bb51a5SDaniel Lezcano cpuidle_remove_sysfs(dev); 483f6bb51a5SDaniel Lezcano out_unregister: 484f6bb51a5SDaniel Lezcano __cpuidle_unregister_device(dev); 485f6bb51a5SDaniel Lezcano goto out_unlock; 4864f86d3a8SLen Brown } 4874f86d3a8SLen Brown 4884f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_register_device); 4894f86d3a8SLen Brown 4904f86d3a8SLen Brown /** 4914f86d3a8SLen Brown * cpuidle_unregister_device - unregisters a CPU's idle PM feature 4924f86d3a8SLen Brown * @dev: the cpu 4934f86d3a8SLen Brown */ 4944f86d3a8SLen Brown void cpuidle_unregister_device(struct cpuidle_device *dev) 4954f86d3a8SLen Brown { 496813e8e3dSKonrad Rzeszutek Wilk if (!dev || dev->registered == 0) 497dcb84f33SVenkatesh Pallipadi return; 498dcb84f33SVenkatesh Pallipadi 4994f86d3a8SLen Brown cpuidle_pause_and_lock(); 5004f86d3a8SLen Brown 5014f86d3a8SLen Brown cpuidle_disable_device(dev); 5024f86d3a8SLen Brown 5031aef40e2SDaniel Lezcano cpuidle_remove_sysfs(dev); 504f6bb51a5SDaniel Lezcano 505f6bb51a5SDaniel Lezcano __cpuidle_unregister_device(dev); 5064f86d3a8SLen Brown 5074126c019SColin Cross cpuidle_coupled_unregister_device(dev); 5084126c019SColin Cross 5094f86d3a8SLen Brown cpuidle_resume_and_unlock(); 5104f86d3a8SLen Brown } 5114f86d3a8SLen Brown 5124f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_unregister_device); 5134f86d3a8SLen Brown 5141c192d04SDaniel Lezcano /** 5154c637b21SDaniel Lezcano * cpuidle_unregister: unregister a driver and the devices. This function 5164c637b21SDaniel Lezcano * can be used only if the driver has been previously registered through 5174c637b21SDaniel Lezcano * the cpuidle_register function. 5184c637b21SDaniel Lezcano * 5194c637b21SDaniel Lezcano * @drv: a valid pointer to a struct cpuidle_driver 5204c637b21SDaniel Lezcano */ 5214c637b21SDaniel Lezcano void cpuidle_unregister(struct cpuidle_driver *drv) 5224c637b21SDaniel Lezcano { 5234c637b21SDaniel Lezcano int cpu; 5244c637b21SDaniel Lezcano struct cpuidle_device *device; 5254c637b21SDaniel Lezcano 52682467a5aSDaniel Lezcano for_each_cpu(cpu, drv->cpumask) { 5274c637b21SDaniel Lezcano device = &per_cpu(cpuidle_dev, cpu); 5284c637b21SDaniel Lezcano cpuidle_unregister_device(device); 5294c637b21SDaniel Lezcano } 5304c637b21SDaniel Lezcano 5314c637b21SDaniel Lezcano cpuidle_unregister_driver(drv); 5324c637b21SDaniel Lezcano } 5334c637b21SDaniel Lezcano EXPORT_SYMBOL_GPL(cpuidle_unregister); 5344c637b21SDaniel Lezcano 5354c637b21SDaniel Lezcano /** 5364c637b21SDaniel Lezcano * cpuidle_register: registers the driver and the cpu devices with the 5374c637b21SDaniel Lezcano * coupled_cpus passed as parameter. This function is used for all common 5384c637b21SDaniel Lezcano * initialization pattern there are in the arch specific drivers. The 5394c637b21SDaniel Lezcano * devices is globally defined in this file. 5404c637b21SDaniel Lezcano * 5414c637b21SDaniel Lezcano * @drv : a valid pointer to a struct cpuidle_driver 5424c637b21SDaniel Lezcano * @coupled_cpus: a cpumask for the coupled states 5434c637b21SDaniel Lezcano * 5444c637b21SDaniel Lezcano * Returns 0 on success, < 0 otherwise 5454c637b21SDaniel Lezcano */ 5464c637b21SDaniel Lezcano int cpuidle_register(struct cpuidle_driver *drv, 5474c637b21SDaniel Lezcano const struct cpumask *const coupled_cpus) 5484c637b21SDaniel Lezcano { 5494c637b21SDaniel Lezcano int ret, cpu; 5504c637b21SDaniel Lezcano struct cpuidle_device *device; 5514c637b21SDaniel Lezcano 5524c637b21SDaniel Lezcano ret = cpuidle_register_driver(drv); 5534c637b21SDaniel Lezcano if (ret) { 5544c637b21SDaniel Lezcano pr_err("failed to register cpuidle driver\n"); 5554c637b21SDaniel Lezcano return ret; 5564c637b21SDaniel Lezcano } 5574c637b21SDaniel Lezcano 55882467a5aSDaniel Lezcano for_each_cpu(cpu, drv->cpumask) { 5594c637b21SDaniel Lezcano device = &per_cpu(cpuidle_dev, cpu); 5604c637b21SDaniel Lezcano device->cpu = cpu; 5614c637b21SDaniel Lezcano 5624c637b21SDaniel Lezcano #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED 5634c637b21SDaniel Lezcano /* 564caf4a36eSViresh Kumar * On multiplatform for ARM, the coupled idle states could be 5654c637b21SDaniel Lezcano * enabled in the kernel even if the cpuidle driver does not 5664c637b21SDaniel Lezcano * use it. Note, coupled_cpus is a struct copy. 5674c637b21SDaniel Lezcano */ 5684c637b21SDaniel Lezcano if (coupled_cpus) 5694c637b21SDaniel Lezcano device->coupled_cpus = *coupled_cpus; 5704c637b21SDaniel Lezcano #endif 5714c637b21SDaniel Lezcano ret = cpuidle_register_device(device); 5724c637b21SDaniel Lezcano if (!ret) 5734c637b21SDaniel Lezcano continue; 5744c637b21SDaniel Lezcano 5754c637b21SDaniel Lezcano pr_err("Failed to register cpuidle device for cpu%d\n", cpu); 5764c637b21SDaniel Lezcano 5774c637b21SDaniel Lezcano cpuidle_unregister(drv); 5784c637b21SDaniel Lezcano break; 5794c637b21SDaniel Lezcano } 5804c637b21SDaniel Lezcano 5814c637b21SDaniel Lezcano return ret; 5824c637b21SDaniel Lezcano } 5834c637b21SDaniel Lezcano EXPORT_SYMBOL_GPL(cpuidle_register); 5844c637b21SDaniel Lezcano 5854f86d3a8SLen Brown #ifdef CONFIG_SMP 5864f86d3a8SLen Brown 5874f86d3a8SLen Brown /* 5884f86d3a8SLen Brown * This function gets called when a part of the kernel has a new latency 5894f86d3a8SLen Brown * requirement. This means we need to get all processors out of their C-state, 5904f86d3a8SLen Brown * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that 5914f86d3a8SLen Brown * wakes them all right up. 5924f86d3a8SLen Brown */ 5934f86d3a8SLen Brown static int cpuidle_latency_notify(struct notifier_block *b, 5944f86d3a8SLen Brown unsigned long l, void *v) 5954f86d3a8SLen Brown { 5962ed903c5SChuansheng Liu wake_up_all_idle_cpus(); 5974f86d3a8SLen Brown return NOTIFY_OK; 5984f86d3a8SLen Brown } 5994f86d3a8SLen Brown 6004f86d3a8SLen Brown static struct notifier_block cpuidle_latency_notifier = { 6014f86d3a8SLen Brown .notifier_call = cpuidle_latency_notify, 6024f86d3a8SLen Brown }; 6034f86d3a8SLen Brown 604d82b3518SMark Gross static inline void latency_notifier_init(struct notifier_block *n) 605d82b3518SMark Gross { 606d82b3518SMark Gross pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n); 607d82b3518SMark Gross } 6084f86d3a8SLen Brown 6094f86d3a8SLen Brown #else /* CONFIG_SMP */ 6104f86d3a8SLen Brown 6114f86d3a8SLen Brown #define latency_notifier_init(x) do { } while (0) 6124f86d3a8SLen Brown 6134f86d3a8SLen Brown #endif /* CONFIG_SMP */ 6144f86d3a8SLen Brown 6154f86d3a8SLen Brown /** 6164f86d3a8SLen Brown * cpuidle_init - core initializer 6174f86d3a8SLen Brown */ 6184f86d3a8SLen Brown static int __init cpuidle_init(void) 6194f86d3a8SLen Brown { 6204f86d3a8SLen Brown int ret; 6214f86d3a8SLen Brown 62262027aeaSLen Brown if (cpuidle_disabled()) 62362027aeaSLen Brown return -ENODEV; 62462027aeaSLen Brown 6258a25a2fdSKay Sievers ret = cpuidle_add_interface(cpu_subsys.dev_root); 6264f86d3a8SLen Brown if (ret) 6274f86d3a8SLen Brown return ret; 6284f86d3a8SLen Brown 6294f86d3a8SLen Brown latency_notifier_init(&cpuidle_latency_notifier); 6304f86d3a8SLen Brown 6314f86d3a8SLen Brown return 0; 6324f86d3a8SLen Brown } 6334f86d3a8SLen Brown 63462027aeaSLen Brown module_param(off, int, 0444); 6354f86d3a8SLen Brown core_initcall(cpuidle_init); 636