14f86d3a8SLen Brown /* 24f86d3a8SLen Brown * cpuidle.c - core cpuidle infrastructure 34f86d3a8SLen Brown * 44f86d3a8SLen Brown * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 54f86d3a8SLen Brown * Shaohua Li <shaohua.li@intel.com> 64f86d3a8SLen Brown * Adam Belay <abelay@novell.com> 74f86d3a8SLen Brown * 84f86d3a8SLen Brown * This code is licenced under the GPL. 94f86d3a8SLen Brown */ 104f86d3a8SLen Brown 11b60e6a0eSDaniel Lezcano #include <linux/clockchips.h> 124f86d3a8SLen Brown #include <linux/kernel.h> 134f86d3a8SLen Brown #include <linux/mutex.h> 144f86d3a8SLen Brown #include <linux/sched.h> 154f86d3a8SLen Brown #include <linux/notifier.h> 16e8db0be1SJean Pihet #include <linux/pm_qos.h> 174f86d3a8SLen Brown #include <linux/cpu.h> 184f86d3a8SLen Brown #include <linux/cpuidle.h> 199a0b8415Svenkatesh.pallipadi@intel.com #include <linux/ktime.h> 202e94d1f7SArjan van de Ven #include <linux/hrtimer.h> 21884b17e1SPaul Gortmaker #include <linux/module.h> 2238106313SRafael J. Wysocki #include <linux/suspend.h> 23124cf911SRafael J. Wysocki #include <linux/tick.h> 24288f023eSArjan van de Ven #include <trace/events/power.h> 254f86d3a8SLen Brown 264f86d3a8SLen Brown #include "cpuidle.h" 274f86d3a8SLen Brown 284f86d3a8SLen Brown DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices); 294c637b21SDaniel Lezcano DEFINE_PER_CPU(struct cpuidle_device, cpuidle_dev); 304f86d3a8SLen Brown 314f86d3a8SLen Brown DEFINE_MUTEX(cpuidle_lock); 324f86d3a8SLen Brown LIST_HEAD(cpuidle_detected_devices); 334f86d3a8SLen Brown 344f86d3a8SLen Brown static int enabled_devices; 3562027aeaSLen Brown static int off __read_mostly; 36a0bfa137SLen Brown static int initialized __read_mostly; 3762027aeaSLen Brown 3862027aeaSLen Brown int cpuidle_disabled(void) 3962027aeaSLen Brown { 4062027aeaSLen Brown return off; 4162027aeaSLen Brown } 42d91ee586SLen Brown void disable_cpuidle(void) 43d91ee586SLen Brown { 44d91ee586SLen Brown off = 1; 45d91ee586SLen Brown } 464f86d3a8SLen Brown 47ef2b22acSRafael J. Wysocki bool cpuidle_not_available(struct cpuidle_driver *drv, 4831a34090SRafael J. Wysocki struct cpuidle_device *dev) 4931a34090SRafael J. Wysocki { 5031a34090SRafael J. Wysocki return off || !initialized || !drv || !dev || !dev->enabled; 5131a34090SRafael J. Wysocki } 5231a34090SRafael J. Wysocki 534f86d3a8SLen Brown /** 541a022e3fSBoris Ostrovsky * cpuidle_play_dead - cpu off-lining 551a022e3fSBoris Ostrovsky * 56ee01e663SToshi Kani * Returns in case of an error or no driver 571a022e3fSBoris Ostrovsky */ 581a022e3fSBoris Ostrovsky int cpuidle_play_dead(void) 591a022e3fSBoris Ostrovsky { 601a022e3fSBoris Ostrovsky struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); 61bf4d1b5dSDaniel Lezcano struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 628aef33a7SDaniel Lezcano int i; 631a022e3fSBoris Ostrovsky 64ee01e663SToshi Kani if (!drv) 65ee01e663SToshi Kani return -ENODEV; 66ee01e663SToshi Kani 671a022e3fSBoris Ostrovsky /* Find lowest-power state that supports long-term idle */ 688aef33a7SDaniel Lezcano for (i = drv->state_count - 1; i >= CPUIDLE_DRIVER_STATE_START; i--) 698aef33a7SDaniel Lezcano if (drv->states[i].enter_dead) 708aef33a7SDaniel Lezcano return drv->states[i].enter_dead(dev, i); 711a022e3fSBoris Ostrovsky 721a022e3fSBoris Ostrovsky return -ENODEV; 731a022e3fSBoris Ostrovsky } 741a022e3fSBoris Ostrovsky 75ef2b22acSRafael J. Wysocki static int find_deepest_state(struct cpuidle_driver *drv, 76124cf911SRafael J. Wysocki struct cpuidle_device *dev, bool freeze) 77a6220fc1SRafael J. Wysocki { 78a6220fc1SRafael J. Wysocki unsigned int latency_req = 0; 79124cf911SRafael J. Wysocki int i, ret = freeze ? -1 : CPUIDLE_DRIVER_STATE_START - 1; 80a6220fc1SRafael J. Wysocki 81a6220fc1SRafael J. Wysocki for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) { 82a6220fc1SRafael J. Wysocki struct cpuidle_state *s = &drv->states[i]; 83a6220fc1SRafael J. Wysocki struct cpuidle_state_usage *su = &dev->states_usage[i]; 84a6220fc1SRafael J. Wysocki 85124cf911SRafael J. Wysocki if (s->disabled || su->disable || s->exit_latency <= latency_req 86124cf911SRafael J. Wysocki || (freeze && !s->enter_freeze)) 87a6220fc1SRafael J. Wysocki continue; 88a6220fc1SRafael J. Wysocki 89a6220fc1SRafael J. Wysocki latency_req = s->exit_latency; 90a6220fc1SRafael J. Wysocki ret = i; 91a6220fc1SRafael J. Wysocki } 92a6220fc1SRafael J. Wysocki return ret; 93a6220fc1SRafael J. Wysocki } 94a6220fc1SRafael J. Wysocki 95ef2b22acSRafael J. Wysocki /** 96ef2b22acSRafael J. Wysocki * cpuidle_find_deepest_state - Find the deepest available idle state. 97ef2b22acSRafael J. Wysocki * @drv: cpuidle driver for the given CPU. 98ef2b22acSRafael J. Wysocki * @dev: cpuidle device for the given CPU. 99ef2b22acSRafael J. Wysocki */ 100ef2b22acSRafael J. Wysocki int cpuidle_find_deepest_state(struct cpuidle_driver *drv, 101ef2b22acSRafael J. Wysocki struct cpuidle_device *dev) 102ef2b22acSRafael J. Wysocki { 103ef2b22acSRafael J. Wysocki return find_deepest_state(drv, dev, false); 104ef2b22acSRafael J. Wysocki } 105ef2b22acSRafael J. Wysocki 106124cf911SRafael J. Wysocki static void enter_freeze_proper(struct cpuidle_driver *drv, 107124cf911SRafael J. Wysocki struct cpuidle_device *dev, int index) 108124cf911SRafael J. Wysocki { 109124cf911SRafael J. Wysocki tick_freeze(); 110124cf911SRafael J. Wysocki /* 111124cf911SRafael J. Wysocki * The state used here cannot be a "coupled" one, because the "coupled" 112124cf911SRafael J. Wysocki * cpuidle mechanism enables interrupts and doing that with timekeeping 113124cf911SRafael J. Wysocki * suspended is generally unsafe. 114124cf911SRafael J. Wysocki */ 115124cf911SRafael J. Wysocki drv->states[index].enter_freeze(dev, drv, index); 116124cf911SRafael J. Wysocki WARN_ON(!irqs_disabled()); 117124cf911SRafael J. Wysocki /* 118124cf911SRafael J. Wysocki * timekeeping_resume() that will be called by tick_unfreeze() for the 119124cf911SRafael J. Wysocki * last CPU executing it calls functions containing RCU read-side 120124cf911SRafael J. Wysocki * critical sections, so tell RCU about that. 121124cf911SRafael J. Wysocki */ 122124cf911SRafael J. Wysocki RCU_NONIDLE(tick_unfreeze()); 123124cf911SRafael J. Wysocki } 124124cf911SRafael J. Wysocki 125a6220fc1SRafael J. Wysocki /** 12638106313SRafael J. Wysocki * cpuidle_enter_freeze - Enter an idle state suitable for suspend-to-idle. 127ef2b22acSRafael J. Wysocki * @drv: cpuidle driver for the given CPU. 128ef2b22acSRafael J. Wysocki * @dev: cpuidle device for the given CPU. 12938106313SRafael J. Wysocki * 130124cf911SRafael J. Wysocki * If there are states with the ->enter_freeze callback, find the deepest of 131ef2b22acSRafael J. Wysocki * them and enter it with frozen tick. 13238106313SRafael J. Wysocki */ 133ef2b22acSRafael J. Wysocki int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev) 13438106313SRafael J. Wysocki { 13538106313SRafael J. Wysocki int index; 13638106313SRafael J. Wysocki 137124cf911SRafael J. Wysocki /* 138124cf911SRafael J. Wysocki * Find the deepest state with ->enter_freeze present, which guarantees 139124cf911SRafael J. Wysocki * that interrupts won't be enabled when it exits and allows the tick to 140124cf911SRafael J. Wysocki * be frozen safely. 141124cf911SRafael J. Wysocki */ 142ef2b22acSRafael J. Wysocki index = find_deepest_state(drv, dev, true); 143ef2b22acSRafael J. Wysocki if (index >= 0) 144124cf911SRafael J. Wysocki enter_freeze_proper(drv, dev, index); 145124cf911SRafael J. Wysocki 146ef2b22acSRafael J. Wysocki return index; 14738106313SRafael J. Wysocki } 14838106313SRafael J. Wysocki 14938106313SRafael J. Wysocki /** 15056cfbf74SColin Cross * cpuidle_enter_state - enter the state and update stats 15156cfbf74SColin Cross * @dev: cpuidle device for this cpu 15256cfbf74SColin Cross * @drv: cpuidle driver for this cpu 15356cfbf74SColin Cross * @next_state: index into drv->states of the state to enter 15456cfbf74SColin Cross */ 15556cfbf74SColin Cross int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, 156554c06baSDaniel Lezcano int index) 15756cfbf74SColin Cross { 15856cfbf74SColin Cross int entered_state; 15956cfbf74SColin Cross 160554c06baSDaniel Lezcano struct cpuidle_state *target_state = &drv->states[index]; 161*df8d9eeaSRafael J. Wysocki bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP); 162554c06baSDaniel Lezcano ktime_t time_start, time_end; 163554c06baSDaniel Lezcano s64 diff; 164554c06baSDaniel Lezcano 165*df8d9eeaSRafael J. Wysocki /* 166*df8d9eeaSRafael J. Wysocki * Tell the time framework to switch to a broadcast timer because our 167*df8d9eeaSRafael J. Wysocki * local timer will be shut down. If a local timer is used from another 168*df8d9eeaSRafael J. Wysocki * CPU as a broadcast timer, this call may fail if it is not available. 169*df8d9eeaSRafael J. Wysocki */ 170*df8d9eeaSRafael J. Wysocki if (broadcast && tick_broadcast_enter()) 171*df8d9eeaSRafael J. Wysocki return -EBUSY; 172*df8d9eeaSRafael J. Wysocki 17330fe6884SSandeep Tripathy trace_cpu_idle_rcuidle(index, dev->cpu); 174554c06baSDaniel Lezcano time_start = ktime_get(); 175554c06baSDaniel Lezcano 176554c06baSDaniel Lezcano entered_state = target_state->enter(dev, drv, index); 177554c06baSDaniel Lezcano 178554c06baSDaniel Lezcano time_end = ktime_get(); 17930fe6884SSandeep Tripathy trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); 180554c06baSDaniel Lezcano 181*df8d9eeaSRafael J. Wysocki if (broadcast) { 182*df8d9eeaSRafael J. Wysocki if (WARN_ON_ONCE(!irqs_disabled())) 183*df8d9eeaSRafael J. Wysocki local_irq_disable(); 184*df8d9eeaSRafael J. Wysocki 185*df8d9eeaSRafael J. Wysocki tick_broadcast_exit(); 186*df8d9eeaSRafael J. Wysocki } 187*df8d9eeaSRafael J. Wysocki 1880b89e9aaSPaul Burton if (!cpuidle_state_is_coupled(dev, drv, entered_state)) 189554c06baSDaniel Lezcano local_irq_enable(); 190554c06baSDaniel Lezcano 191554c06baSDaniel Lezcano diff = ktime_to_us(ktime_sub(time_end, time_start)); 192554c06baSDaniel Lezcano if (diff > INT_MAX) 193554c06baSDaniel Lezcano diff = INT_MAX; 194554c06baSDaniel Lezcano 195554c06baSDaniel Lezcano dev->last_residency = (int) diff; 19656cfbf74SColin Cross 19756cfbf74SColin Cross if (entered_state >= 0) { 19856cfbf74SColin Cross /* Update cpuidle counters */ 19956cfbf74SColin Cross /* This can be moved to within driver enter routine 20056cfbf74SColin Cross * but that results in multiple copies of same code. 20156cfbf74SColin Cross */ 202a474a515SJulius Werner dev->states_usage[entered_state].time += dev->last_residency; 20356cfbf74SColin Cross dev->states_usage[entered_state].usage++; 20456cfbf74SColin Cross } else { 20556cfbf74SColin Cross dev->last_residency = 0; 20656cfbf74SColin Cross } 20756cfbf74SColin Cross 20856cfbf74SColin Cross return entered_state; 20956cfbf74SColin Cross } 21056cfbf74SColin Cross 21156cfbf74SColin Cross /** 212907e30f1SDaniel Lezcano * cpuidle_select - ask the cpuidle framework to choose an idle state 2134f86d3a8SLen Brown * 214907e30f1SDaniel Lezcano * @drv: the cpuidle driver 215907e30f1SDaniel Lezcano * @dev: the cpuidle device 216907e30f1SDaniel Lezcano * 217907e30f1SDaniel Lezcano * Returns the index of the idle state. 2184f86d3a8SLen Brown */ 219907e30f1SDaniel Lezcano int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) 2204f86d3a8SLen Brown { 221907e30f1SDaniel Lezcano return cpuidle_curr_governor->select(drv, dev); 222246eb7f0SKevin Hilman } 223246eb7f0SKevin Hilman 224907e30f1SDaniel Lezcano /** 225907e30f1SDaniel Lezcano * cpuidle_enter - enter into the specified idle state 226907e30f1SDaniel Lezcano * 227907e30f1SDaniel Lezcano * @drv: the cpuidle driver tied with the cpu 228907e30f1SDaniel Lezcano * @dev: the cpuidle device 229907e30f1SDaniel Lezcano * @index: the index in the idle state table 230907e30f1SDaniel Lezcano * 231907e30f1SDaniel Lezcano * Returns the index in the idle state, < 0 in case of error. 232907e30f1SDaniel Lezcano * The error code depends on the backend driver 233907e30f1SDaniel Lezcano */ 234907e30f1SDaniel Lezcano int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev, 235907e30f1SDaniel Lezcano int index) 236907e30f1SDaniel Lezcano { 237907e30f1SDaniel Lezcano if (cpuidle_state_is_coupled(dev, drv, index)) 238907e30f1SDaniel Lezcano return cpuidle_enter_state_coupled(dev, drv, index); 239907e30f1SDaniel Lezcano return cpuidle_enter_state(dev, drv, index); 240907e30f1SDaniel Lezcano } 241fb11c9c6SViresh Kumar 242907e30f1SDaniel Lezcano /** 243907e30f1SDaniel Lezcano * cpuidle_reflect - tell the underlying governor what was the state 244907e30f1SDaniel Lezcano * we were in 245907e30f1SDaniel Lezcano * 246907e30f1SDaniel Lezcano * @dev : the cpuidle device 247907e30f1SDaniel Lezcano * @index: the index in the idle state table 248907e30f1SDaniel Lezcano * 249907e30f1SDaniel Lezcano */ 250907e30f1SDaniel Lezcano void cpuidle_reflect(struct cpuidle_device *dev, int index) 251907e30f1SDaniel Lezcano { 25238106313SRafael J. Wysocki if (cpuidle_curr_governor->reflect) 253907e30f1SDaniel Lezcano cpuidle_curr_governor->reflect(dev, index); 2544f86d3a8SLen Brown } 2554f86d3a8SLen Brown 2564f86d3a8SLen Brown /** 2574f86d3a8SLen Brown * cpuidle_install_idle_handler - installs the cpuidle idle loop handler 2584f86d3a8SLen Brown */ 2594f86d3a8SLen Brown void cpuidle_install_idle_handler(void) 2604f86d3a8SLen Brown { 261a0bfa137SLen Brown if (enabled_devices) { 2624f86d3a8SLen Brown /* Make sure all changes finished before we switch to new idle */ 2634f86d3a8SLen Brown smp_wmb(); 264a0bfa137SLen Brown initialized = 1; 2654f86d3a8SLen Brown } 2664f86d3a8SLen Brown } 2674f86d3a8SLen Brown 2684f86d3a8SLen Brown /** 2694f86d3a8SLen Brown * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler 2704f86d3a8SLen Brown */ 2714f86d3a8SLen Brown void cpuidle_uninstall_idle_handler(void) 2724f86d3a8SLen Brown { 273a0bfa137SLen Brown if (enabled_devices) { 274a0bfa137SLen Brown initialized = 0; 2752ed903c5SChuansheng Liu wake_up_all_idle_cpus(); 2764f86d3a8SLen Brown } 277442bf3aaSDaniel Lezcano 278442bf3aaSDaniel Lezcano /* 279442bf3aaSDaniel Lezcano * Make sure external observers (such as the scheduler) 280442bf3aaSDaniel Lezcano * are done looking at pointed idle states. 281442bf3aaSDaniel Lezcano */ 282442bf3aaSDaniel Lezcano synchronize_rcu(); 2834f86d3a8SLen Brown } 2844f86d3a8SLen Brown 2854f86d3a8SLen Brown /** 2864f86d3a8SLen Brown * cpuidle_pause_and_lock - temporarily disables CPUIDLE 2874f86d3a8SLen Brown */ 2884f86d3a8SLen Brown void cpuidle_pause_and_lock(void) 2894f86d3a8SLen Brown { 2904f86d3a8SLen Brown mutex_lock(&cpuidle_lock); 2914f86d3a8SLen Brown cpuidle_uninstall_idle_handler(); 2924f86d3a8SLen Brown } 2934f86d3a8SLen Brown 2944f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock); 2954f86d3a8SLen Brown 2964f86d3a8SLen Brown /** 2974f86d3a8SLen Brown * cpuidle_resume_and_unlock - resumes CPUIDLE operation 2984f86d3a8SLen Brown */ 2994f86d3a8SLen Brown void cpuidle_resume_and_unlock(void) 3004f86d3a8SLen Brown { 3014f86d3a8SLen Brown cpuidle_install_idle_handler(); 3024f86d3a8SLen Brown mutex_unlock(&cpuidle_lock); 3034f86d3a8SLen Brown } 3044f86d3a8SLen Brown 3054f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock); 3064f86d3a8SLen Brown 3078651f97bSPreeti U Murthy /* Currently used in suspend/resume path to suspend cpuidle */ 3088651f97bSPreeti U Murthy void cpuidle_pause(void) 3098651f97bSPreeti U Murthy { 3108651f97bSPreeti U Murthy mutex_lock(&cpuidle_lock); 3118651f97bSPreeti U Murthy cpuidle_uninstall_idle_handler(); 3128651f97bSPreeti U Murthy mutex_unlock(&cpuidle_lock); 3138651f97bSPreeti U Murthy } 3148651f97bSPreeti U Murthy 3158651f97bSPreeti U Murthy /* Currently used in suspend/resume path to resume cpuidle */ 3168651f97bSPreeti U Murthy void cpuidle_resume(void) 3178651f97bSPreeti U Murthy { 3188651f97bSPreeti U Murthy mutex_lock(&cpuidle_lock); 3198651f97bSPreeti U Murthy cpuidle_install_idle_handler(); 3208651f97bSPreeti U Murthy mutex_unlock(&cpuidle_lock); 3218651f97bSPreeti U Murthy } 3228651f97bSPreeti U Murthy 3234f86d3a8SLen Brown /** 3244f86d3a8SLen Brown * cpuidle_enable_device - enables idle PM for a CPU 3254f86d3a8SLen Brown * @dev: the CPU 3264f86d3a8SLen Brown * 3274f86d3a8SLen Brown * This function must be called between cpuidle_pause_and_lock and 3284f86d3a8SLen Brown * cpuidle_resume_and_unlock when used externally. 3294f86d3a8SLen Brown */ 3304f86d3a8SLen Brown int cpuidle_enable_device(struct cpuidle_device *dev) 3314f86d3a8SLen Brown { 3325df0aa73SDaniel Lezcano int ret; 333bf4d1b5dSDaniel Lezcano struct cpuidle_driver *drv; 3344f86d3a8SLen Brown 3351b0a0e9aSSrivatsa S. Bhat if (!dev) 3361b0a0e9aSSrivatsa S. Bhat return -EINVAL; 3371b0a0e9aSSrivatsa S. Bhat 3384f86d3a8SLen Brown if (dev->enabled) 3394f86d3a8SLen Brown return 0; 340bf4d1b5dSDaniel Lezcano 341bf4d1b5dSDaniel Lezcano drv = cpuidle_get_cpu_driver(dev); 342bf4d1b5dSDaniel Lezcano 343e1689795SRobert Lee if (!drv || !cpuidle_curr_governor) 3444f86d3a8SLen Brown return -EIO; 345bf4d1b5dSDaniel Lezcano 34610b9d3f8SDaniel Lezcano if (!dev->registered) 34710b9d3f8SDaniel Lezcano return -EINVAL; 34810b9d3f8SDaniel Lezcano 349bf4d1b5dSDaniel Lezcano ret = cpuidle_add_device_sysfs(dev); 350bf4d1b5dSDaniel Lezcano if (ret) 3514f86d3a8SLen Brown return ret; 3524f86d3a8SLen Brown 3534f86d3a8SLen Brown if (cpuidle_curr_governor->enable && 354e1689795SRobert Lee (ret = cpuidle_curr_governor->enable(drv, dev))) 3554f86d3a8SLen Brown goto fail_sysfs; 3564f86d3a8SLen Brown 3574f86d3a8SLen Brown smp_wmb(); 3584f86d3a8SLen Brown 3594f86d3a8SLen Brown dev->enabled = 1; 3604f86d3a8SLen Brown 3614f86d3a8SLen Brown enabled_devices++; 3624f86d3a8SLen Brown return 0; 3634f86d3a8SLen Brown 3644f86d3a8SLen Brown fail_sysfs: 365bf4d1b5dSDaniel Lezcano cpuidle_remove_device_sysfs(dev); 3664f86d3a8SLen Brown 3674f86d3a8SLen Brown return ret; 3684f86d3a8SLen Brown } 3694f86d3a8SLen Brown 3704f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_enable_device); 3714f86d3a8SLen Brown 3724f86d3a8SLen Brown /** 3734f86d3a8SLen Brown * cpuidle_disable_device - disables idle PM for a CPU 3744f86d3a8SLen Brown * @dev: the CPU 3754f86d3a8SLen Brown * 3764f86d3a8SLen Brown * This function must be called between cpuidle_pause_and_lock and 3774f86d3a8SLen Brown * cpuidle_resume_and_unlock when used externally. 3784f86d3a8SLen Brown */ 3794f86d3a8SLen Brown void cpuidle_disable_device(struct cpuidle_device *dev) 3804f86d3a8SLen Brown { 381bf4d1b5dSDaniel Lezcano struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 382bf4d1b5dSDaniel Lezcano 383cf31cd1aSSrivatsa S. Bhat if (!dev || !dev->enabled) 3844f86d3a8SLen Brown return; 385bf4d1b5dSDaniel Lezcano 386bf4d1b5dSDaniel Lezcano if (!drv || !cpuidle_curr_governor) 3874f86d3a8SLen Brown return; 3884f86d3a8SLen Brown 3894f86d3a8SLen Brown dev->enabled = 0; 3904f86d3a8SLen Brown 3914f86d3a8SLen Brown if (cpuidle_curr_governor->disable) 392bf4d1b5dSDaniel Lezcano cpuidle_curr_governor->disable(drv, dev); 3934f86d3a8SLen Brown 394bf4d1b5dSDaniel Lezcano cpuidle_remove_device_sysfs(dev); 3954f86d3a8SLen Brown enabled_devices--; 3964f86d3a8SLen Brown } 3974f86d3a8SLen Brown 3984f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_disable_device); 3994f86d3a8SLen Brown 400f6bb51a5SDaniel Lezcano static void __cpuidle_unregister_device(struct cpuidle_device *dev) 401f6bb51a5SDaniel Lezcano { 402f6bb51a5SDaniel Lezcano struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 403f6bb51a5SDaniel Lezcano 404f6bb51a5SDaniel Lezcano list_del(&dev->device_list); 405f6bb51a5SDaniel Lezcano per_cpu(cpuidle_devices, dev->cpu) = NULL; 406f6bb51a5SDaniel Lezcano module_put(drv->owner); 407f6bb51a5SDaniel Lezcano } 408f6bb51a5SDaniel Lezcano 409267d4bf8SViresh Kumar static void __cpuidle_device_init(struct cpuidle_device *dev) 4105df0aa73SDaniel Lezcano { 4115df0aa73SDaniel Lezcano memset(dev->states_usage, 0, sizeof(dev->states_usage)); 4125df0aa73SDaniel Lezcano dev->last_residency = 0; 4135df0aa73SDaniel Lezcano } 4145df0aa73SDaniel Lezcano 4154f86d3a8SLen Brown /** 416dcb84f33SVenkatesh Pallipadi * __cpuidle_register_device - internal register function called before register 417dcb84f33SVenkatesh Pallipadi * and enable routines 4184f86d3a8SLen Brown * @dev: the cpu 419dcb84f33SVenkatesh Pallipadi * 420dcb84f33SVenkatesh Pallipadi * cpuidle_lock mutex must be held before this is called 4214f86d3a8SLen Brown */ 422dcb84f33SVenkatesh Pallipadi static int __cpuidle_register_device(struct cpuidle_device *dev) 4234f86d3a8SLen Brown { 4244f86d3a8SLen Brown int ret; 425bf4d1b5dSDaniel Lezcano struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 4264f86d3a8SLen Brown 427bf4d1b5dSDaniel Lezcano if (!try_module_get(drv->owner)) 4284f86d3a8SLen Brown return -EINVAL; 4294f86d3a8SLen Brown 4304f86d3a8SLen Brown per_cpu(cpuidle_devices, dev->cpu) = dev; 4314f86d3a8SLen Brown list_add(&dev->device_list, &cpuidle_detected_devices); 4324f86d3a8SLen Brown 4334126c019SColin Cross ret = cpuidle_coupled_register_device(dev); 43447182668SViresh Kumar if (ret) 435f6bb51a5SDaniel Lezcano __cpuidle_unregister_device(dev); 43647182668SViresh Kumar else 437dcb84f33SVenkatesh Pallipadi dev->registered = 1; 43847182668SViresh Kumar 43947182668SViresh Kumar return ret; 440dcb84f33SVenkatesh Pallipadi } 441dcb84f33SVenkatesh Pallipadi 442dcb84f33SVenkatesh Pallipadi /** 443dcb84f33SVenkatesh Pallipadi * cpuidle_register_device - registers a CPU's idle PM feature 444dcb84f33SVenkatesh Pallipadi * @dev: the cpu 445dcb84f33SVenkatesh Pallipadi */ 446dcb84f33SVenkatesh Pallipadi int cpuidle_register_device(struct cpuidle_device *dev) 447dcb84f33SVenkatesh Pallipadi { 448c878a52dSDaniel Lezcano int ret = -EBUSY; 449dcb84f33SVenkatesh Pallipadi 4501b0a0e9aSSrivatsa S. Bhat if (!dev) 4511b0a0e9aSSrivatsa S. Bhat return -EINVAL; 4521b0a0e9aSSrivatsa S. Bhat 453dcb84f33SVenkatesh Pallipadi mutex_lock(&cpuidle_lock); 454dcb84f33SVenkatesh Pallipadi 455c878a52dSDaniel Lezcano if (dev->registered) 456c878a52dSDaniel Lezcano goto out_unlock; 457c878a52dSDaniel Lezcano 458267d4bf8SViresh Kumar __cpuidle_device_init(dev); 4595df0aa73SDaniel Lezcano 460f6bb51a5SDaniel Lezcano ret = __cpuidle_register_device(dev); 461f6bb51a5SDaniel Lezcano if (ret) 462f6bb51a5SDaniel Lezcano goto out_unlock; 463f6bb51a5SDaniel Lezcano 464f6bb51a5SDaniel Lezcano ret = cpuidle_add_sysfs(dev); 465f6bb51a5SDaniel Lezcano if (ret) 466f6bb51a5SDaniel Lezcano goto out_unregister; 467dcb84f33SVenkatesh Pallipadi 46810b9d3f8SDaniel Lezcano ret = cpuidle_enable_device(dev); 469f6bb51a5SDaniel Lezcano if (ret) 470f6bb51a5SDaniel Lezcano goto out_sysfs; 47110b9d3f8SDaniel Lezcano 4724f86d3a8SLen Brown cpuidle_install_idle_handler(); 4734f86d3a8SLen Brown 474f6bb51a5SDaniel Lezcano out_unlock: 4754f86d3a8SLen Brown mutex_unlock(&cpuidle_lock); 4764f86d3a8SLen Brown 477f6bb51a5SDaniel Lezcano return ret; 478f6bb51a5SDaniel Lezcano 479f6bb51a5SDaniel Lezcano out_sysfs: 480f6bb51a5SDaniel Lezcano cpuidle_remove_sysfs(dev); 481f6bb51a5SDaniel Lezcano out_unregister: 482f6bb51a5SDaniel Lezcano __cpuidle_unregister_device(dev); 483f6bb51a5SDaniel Lezcano goto out_unlock; 4844f86d3a8SLen Brown } 4854f86d3a8SLen Brown 4864f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_register_device); 4874f86d3a8SLen Brown 4884f86d3a8SLen Brown /** 4894f86d3a8SLen Brown * cpuidle_unregister_device - unregisters a CPU's idle PM feature 4904f86d3a8SLen Brown * @dev: the cpu 4914f86d3a8SLen Brown */ 4924f86d3a8SLen Brown void cpuidle_unregister_device(struct cpuidle_device *dev) 4934f86d3a8SLen Brown { 494813e8e3dSKonrad Rzeszutek Wilk if (!dev || dev->registered == 0) 495dcb84f33SVenkatesh Pallipadi return; 496dcb84f33SVenkatesh Pallipadi 4974f86d3a8SLen Brown cpuidle_pause_and_lock(); 4984f86d3a8SLen Brown 4994f86d3a8SLen Brown cpuidle_disable_device(dev); 5004f86d3a8SLen Brown 5011aef40e2SDaniel Lezcano cpuidle_remove_sysfs(dev); 502f6bb51a5SDaniel Lezcano 503f6bb51a5SDaniel Lezcano __cpuidle_unregister_device(dev); 5044f86d3a8SLen Brown 5054126c019SColin Cross cpuidle_coupled_unregister_device(dev); 5064126c019SColin Cross 5074f86d3a8SLen Brown cpuidle_resume_and_unlock(); 5084f86d3a8SLen Brown } 5094f86d3a8SLen Brown 5104f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_unregister_device); 5114f86d3a8SLen Brown 5121c192d04SDaniel Lezcano /** 5134c637b21SDaniel Lezcano * cpuidle_unregister: unregister a driver and the devices. This function 5144c637b21SDaniel Lezcano * can be used only if the driver has been previously registered through 5154c637b21SDaniel Lezcano * the cpuidle_register function. 5164c637b21SDaniel Lezcano * 5174c637b21SDaniel Lezcano * @drv: a valid pointer to a struct cpuidle_driver 5184c637b21SDaniel Lezcano */ 5194c637b21SDaniel Lezcano void cpuidle_unregister(struct cpuidle_driver *drv) 5204c637b21SDaniel Lezcano { 5214c637b21SDaniel Lezcano int cpu; 5224c637b21SDaniel Lezcano struct cpuidle_device *device; 5234c637b21SDaniel Lezcano 52482467a5aSDaniel Lezcano for_each_cpu(cpu, drv->cpumask) { 5254c637b21SDaniel Lezcano device = &per_cpu(cpuidle_dev, cpu); 5264c637b21SDaniel Lezcano cpuidle_unregister_device(device); 5274c637b21SDaniel Lezcano } 5284c637b21SDaniel Lezcano 5294c637b21SDaniel Lezcano cpuidle_unregister_driver(drv); 5304c637b21SDaniel Lezcano } 5314c637b21SDaniel Lezcano EXPORT_SYMBOL_GPL(cpuidle_unregister); 5324c637b21SDaniel Lezcano 5334c637b21SDaniel Lezcano /** 5344c637b21SDaniel Lezcano * cpuidle_register: registers the driver and the cpu devices with the 5354c637b21SDaniel Lezcano * coupled_cpus passed as parameter. This function is used for all common 5364c637b21SDaniel Lezcano * initialization pattern there are in the arch specific drivers. The 5374c637b21SDaniel Lezcano * devices is globally defined in this file. 5384c637b21SDaniel Lezcano * 5394c637b21SDaniel Lezcano * @drv : a valid pointer to a struct cpuidle_driver 5404c637b21SDaniel Lezcano * @coupled_cpus: a cpumask for the coupled states 5414c637b21SDaniel Lezcano * 5424c637b21SDaniel Lezcano * Returns 0 on success, < 0 otherwise 5434c637b21SDaniel Lezcano */ 5444c637b21SDaniel Lezcano int cpuidle_register(struct cpuidle_driver *drv, 5454c637b21SDaniel Lezcano const struct cpumask *const coupled_cpus) 5464c637b21SDaniel Lezcano { 5474c637b21SDaniel Lezcano int ret, cpu; 5484c637b21SDaniel Lezcano struct cpuidle_device *device; 5494c637b21SDaniel Lezcano 5504c637b21SDaniel Lezcano ret = cpuidle_register_driver(drv); 5514c637b21SDaniel Lezcano if (ret) { 5524c637b21SDaniel Lezcano pr_err("failed to register cpuidle driver\n"); 5534c637b21SDaniel Lezcano return ret; 5544c637b21SDaniel Lezcano } 5554c637b21SDaniel Lezcano 55682467a5aSDaniel Lezcano for_each_cpu(cpu, drv->cpumask) { 5574c637b21SDaniel Lezcano device = &per_cpu(cpuidle_dev, cpu); 5584c637b21SDaniel Lezcano device->cpu = cpu; 5594c637b21SDaniel Lezcano 5604c637b21SDaniel Lezcano #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED 5614c637b21SDaniel Lezcano /* 562caf4a36eSViresh Kumar * On multiplatform for ARM, the coupled idle states could be 5634c637b21SDaniel Lezcano * enabled in the kernel even if the cpuidle driver does not 5644c637b21SDaniel Lezcano * use it. Note, coupled_cpus is a struct copy. 5654c637b21SDaniel Lezcano */ 5664c637b21SDaniel Lezcano if (coupled_cpus) 5674c637b21SDaniel Lezcano device->coupled_cpus = *coupled_cpus; 5684c637b21SDaniel Lezcano #endif 5694c637b21SDaniel Lezcano ret = cpuidle_register_device(device); 5704c637b21SDaniel Lezcano if (!ret) 5714c637b21SDaniel Lezcano continue; 5724c637b21SDaniel Lezcano 5734c637b21SDaniel Lezcano pr_err("Failed to register cpuidle device for cpu%d\n", cpu); 5744c637b21SDaniel Lezcano 5754c637b21SDaniel Lezcano cpuidle_unregister(drv); 5764c637b21SDaniel Lezcano break; 5774c637b21SDaniel Lezcano } 5784c637b21SDaniel Lezcano 5794c637b21SDaniel Lezcano return ret; 5804c637b21SDaniel Lezcano } 5814c637b21SDaniel Lezcano EXPORT_SYMBOL_GPL(cpuidle_register); 5824c637b21SDaniel Lezcano 5834f86d3a8SLen Brown #ifdef CONFIG_SMP 5844f86d3a8SLen Brown 5854f86d3a8SLen Brown /* 5864f86d3a8SLen Brown * This function gets called when a part of the kernel has a new latency 5874f86d3a8SLen Brown * requirement. This means we need to get all processors out of their C-state, 5884f86d3a8SLen Brown * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that 5894f86d3a8SLen Brown * wakes them all right up. 5904f86d3a8SLen Brown */ 5914f86d3a8SLen Brown static int cpuidle_latency_notify(struct notifier_block *b, 5924f86d3a8SLen Brown unsigned long l, void *v) 5934f86d3a8SLen Brown { 5942ed903c5SChuansheng Liu wake_up_all_idle_cpus(); 5954f86d3a8SLen Brown return NOTIFY_OK; 5964f86d3a8SLen Brown } 5974f86d3a8SLen Brown 5984f86d3a8SLen Brown static struct notifier_block cpuidle_latency_notifier = { 5994f86d3a8SLen Brown .notifier_call = cpuidle_latency_notify, 6004f86d3a8SLen Brown }; 6014f86d3a8SLen Brown 602d82b3518SMark Gross static inline void latency_notifier_init(struct notifier_block *n) 603d82b3518SMark Gross { 604d82b3518SMark Gross pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n); 605d82b3518SMark Gross } 6064f86d3a8SLen Brown 6074f86d3a8SLen Brown #else /* CONFIG_SMP */ 6084f86d3a8SLen Brown 6094f86d3a8SLen Brown #define latency_notifier_init(x) do { } while (0) 6104f86d3a8SLen Brown 6114f86d3a8SLen Brown #endif /* CONFIG_SMP */ 6124f86d3a8SLen Brown 6134f86d3a8SLen Brown /** 6144f86d3a8SLen Brown * cpuidle_init - core initializer 6154f86d3a8SLen Brown */ 6164f86d3a8SLen Brown static int __init cpuidle_init(void) 6174f86d3a8SLen Brown { 6184f86d3a8SLen Brown int ret; 6194f86d3a8SLen Brown 62062027aeaSLen Brown if (cpuidle_disabled()) 62162027aeaSLen Brown return -ENODEV; 62262027aeaSLen Brown 6238a25a2fdSKay Sievers ret = cpuidle_add_interface(cpu_subsys.dev_root); 6244f86d3a8SLen Brown if (ret) 6254f86d3a8SLen Brown return ret; 6264f86d3a8SLen Brown 6274f86d3a8SLen Brown latency_notifier_init(&cpuidle_latency_notifier); 6284f86d3a8SLen Brown 6294f86d3a8SLen Brown return 0; 6304f86d3a8SLen Brown } 6314f86d3a8SLen Brown 63262027aeaSLen Brown module_param(off, int, 0444); 6334f86d3a8SLen Brown core_initcall(cpuidle_init); 634