14f86d3a8SLen Brown /* 24f86d3a8SLen Brown * cpuidle.c - core cpuidle infrastructure 34f86d3a8SLen Brown * 44f86d3a8SLen Brown * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 54f86d3a8SLen Brown * Shaohua Li <shaohua.li@intel.com> 64f86d3a8SLen Brown * Adam Belay <abelay@novell.com> 74f86d3a8SLen Brown * 84f86d3a8SLen Brown * This code is licenced under the GPL. 94f86d3a8SLen Brown */ 104f86d3a8SLen Brown 114f86d3a8SLen Brown #include <linux/kernel.h> 124f86d3a8SLen Brown #include <linux/mutex.h> 134f86d3a8SLen Brown #include <linux/sched.h> 144f86d3a8SLen Brown #include <linux/notifier.h> 15e8db0be1SJean Pihet #include <linux/pm_qos.h> 164f86d3a8SLen Brown #include <linux/cpu.h> 174f86d3a8SLen Brown #include <linux/cpuidle.h> 189a0b8415Svenkatesh.pallipadi@intel.com #include <linux/ktime.h> 192e94d1f7SArjan van de Ven #include <linux/hrtimer.h> 20884b17e1SPaul Gortmaker #include <linux/module.h> 21288f023eSArjan van de Ven #include <trace/events/power.h> 224f86d3a8SLen Brown 234f86d3a8SLen Brown #include "cpuidle.h" 244f86d3a8SLen Brown 254f86d3a8SLen Brown DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices); 264f86d3a8SLen Brown 274f86d3a8SLen Brown DEFINE_MUTEX(cpuidle_lock); 284f86d3a8SLen Brown LIST_HEAD(cpuidle_detected_devices); 294f86d3a8SLen Brown 304f86d3a8SLen Brown static int enabled_devices; 3162027aeaSLen Brown static int off __read_mostly; 32a0bfa137SLen Brown static int initialized __read_mostly; 3362027aeaSLen Brown 3462027aeaSLen Brown int cpuidle_disabled(void) 3562027aeaSLen Brown { 3662027aeaSLen Brown return off; 3762027aeaSLen Brown } 38d91ee586SLen Brown void disable_cpuidle(void) 39d91ee586SLen Brown { 40d91ee586SLen Brown off = 1; 41d91ee586SLen Brown } 424f86d3a8SLen Brown 43a6869cc4SVenki Pallipadi #if defined(CONFIG_ARCH_HAS_CPU_IDLE_WAIT) 44a6869cc4SVenki Pallipadi static void cpuidle_kick_cpus(void) 45a6869cc4SVenki Pallipadi { 46a6869cc4SVenki Pallipadi cpu_idle_wait(); 47a6869cc4SVenki Pallipadi } 48a6869cc4SVenki Pallipadi #elif defined(CONFIG_SMP) 49a6869cc4SVenki Pallipadi # error "Arch needs cpu_idle_wait() equivalent here" 50a6869cc4SVenki Pallipadi #else /* !CONFIG_ARCH_HAS_CPU_IDLE_WAIT && !CONFIG_SMP */ 51a6869cc4SVenki Pallipadi static void cpuidle_kick_cpus(void) {} 52a6869cc4SVenki Pallipadi #endif 53a6869cc4SVenki Pallipadi 54dcb84f33SVenkatesh Pallipadi static int __cpuidle_register_device(struct cpuidle_device *dev); 55dcb84f33SVenkatesh Pallipadi 56e1689795SRobert Lee static inline int cpuidle_enter(struct cpuidle_device *dev, 57e1689795SRobert Lee struct cpuidle_driver *drv, int index) 58e1689795SRobert Lee { 59e1689795SRobert Lee struct cpuidle_state *target_state = &drv->states[index]; 60e1689795SRobert Lee return target_state->enter(dev, drv, index); 61e1689795SRobert Lee } 62e1689795SRobert Lee 63e1689795SRobert Lee static inline int cpuidle_enter_tk(struct cpuidle_device *dev, 64e1689795SRobert Lee struct cpuidle_driver *drv, int index) 65e1689795SRobert Lee { 66e1689795SRobert Lee return cpuidle_wrap_enter(dev, drv, index, cpuidle_enter); 67e1689795SRobert Lee } 68e1689795SRobert Lee 69e1689795SRobert Lee typedef int (*cpuidle_enter_t)(struct cpuidle_device *dev, 70e1689795SRobert Lee struct cpuidle_driver *drv, int index); 71e1689795SRobert Lee 72e1689795SRobert Lee static cpuidle_enter_t cpuidle_enter_ops; 73e1689795SRobert Lee 744f86d3a8SLen Brown /** 75*1a022e3fSBoris Ostrovsky * cpuidle_play_dead - cpu off-lining 76*1a022e3fSBoris Ostrovsky * 77*1a022e3fSBoris Ostrovsky * Only returns in case of an error 78*1a022e3fSBoris Ostrovsky */ 79*1a022e3fSBoris Ostrovsky int cpuidle_play_dead(void) 80*1a022e3fSBoris Ostrovsky { 81*1a022e3fSBoris Ostrovsky struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); 82*1a022e3fSBoris Ostrovsky struct cpuidle_driver *drv = cpuidle_get_driver(); 83*1a022e3fSBoris Ostrovsky int i, dead_state = -1; 84*1a022e3fSBoris Ostrovsky int power_usage = -1; 85*1a022e3fSBoris Ostrovsky 86*1a022e3fSBoris Ostrovsky /* Find lowest-power state that supports long-term idle */ 87*1a022e3fSBoris Ostrovsky for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) { 88*1a022e3fSBoris Ostrovsky struct cpuidle_state *s = &drv->states[i]; 89*1a022e3fSBoris Ostrovsky 90*1a022e3fSBoris Ostrovsky if (s->power_usage < power_usage && s->enter_dead) { 91*1a022e3fSBoris Ostrovsky power_usage = s->power_usage; 92*1a022e3fSBoris Ostrovsky dead_state = i; 93*1a022e3fSBoris Ostrovsky } 94*1a022e3fSBoris Ostrovsky } 95*1a022e3fSBoris Ostrovsky 96*1a022e3fSBoris Ostrovsky if (dead_state != -1) 97*1a022e3fSBoris Ostrovsky return drv->states[dead_state].enter_dead(dev, dead_state); 98*1a022e3fSBoris Ostrovsky 99*1a022e3fSBoris Ostrovsky return -ENODEV; 100*1a022e3fSBoris Ostrovsky } 101*1a022e3fSBoris Ostrovsky 102*1a022e3fSBoris Ostrovsky /** 1034f86d3a8SLen Brown * cpuidle_idle_call - the main idle loop 1044f86d3a8SLen Brown * 1054f86d3a8SLen Brown * NOTE: no locks or semaphores should be used here 106a0bfa137SLen Brown * return non-zero on failure 1074f86d3a8SLen Brown */ 108a0bfa137SLen Brown int cpuidle_idle_call(void) 1094f86d3a8SLen Brown { 1104a6f4fe8SChristoph Lameter struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); 11146bcfad7SDeepthi Dharwar struct cpuidle_driver *drv = cpuidle_get_driver(); 112e978aa7dSDeepthi Dharwar int next_state, entered_state; 1134f86d3a8SLen Brown 114a0bfa137SLen Brown if (off) 115a0bfa137SLen Brown return -ENODEV; 116a0bfa137SLen Brown 117a0bfa137SLen Brown if (!initialized) 118a0bfa137SLen Brown return -ENODEV; 119a0bfa137SLen Brown 1204f86d3a8SLen Brown /* check if the device is ready */ 121a0bfa137SLen Brown if (!dev || !dev->enabled) 122a0bfa137SLen Brown return -EBUSY; 1234f86d3a8SLen Brown 1249a655837SArjan van de Ven #if 0 1259a655837SArjan van de Ven /* shows regressions, re-enable for 2.6.29 */ 1262e94d1f7SArjan van de Ven /* 1272e94d1f7SArjan van de Ven * run any timers that can be run now, at this point 1282e94d1f7SArjan van de Ven * before calculating the idle duration etc. 1292e94d1f7SArjan van de Ven */ 1302e94d1f7SArjan van de Ven hrtimer_peek_ahead_timers(); 1319a655837SArjan van de Ven #endif 13271abbbf8SAi Li 1334f86d3a8SLen Brown /* ask the governor for the next state */ 13446bcfad7SDeepthi Dharwar next_state = cpuidle_curr_governor->select(drv, dev); 135246eb7f0SKevin Hilman if (need_resched()) { 136246eb7f0SKevin Hilman local_irq_enable(); 137a0bfa137SLen Brown return 0; 138246eb7f0SKevin Hilman } 139246eb7f0SKevin Hilman 140f77cfe4eSThomas Renninger trace_power_start(POWER_CSTATE, next_state, dev->cpu); 141f77cfe4eSThomas Renninger trace_cpu_idle(next_state, dev->cpu); 142f77cfe4eSThomas Renninger 143e1689795SRobert Lee entered_state = cpuidle_enter_ops(dev, drv, next_state); 144f77cfe4eSThomas Renninger 145f77cfe4eSThomas Renninger trace_power_end(dev->cpu); 146f77cfe4eSThomas Renninger trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu); 147f77cfe4eSThomas Renninger 148e978aa7dSDeepthi Dharwar if (entered_state >= 0) { 149e978aa7dSDeepthi Dharwar /* Update cpuidle counters */ 150e978aa7dSDeepthi Dharwar /* This can be moved to within driver enter routine 151e978aa7dSDeepthi Dharwar * but that results in multiple copies of same code. 152e978aa7dSDeepthi Dharwar */ 1534202735eSDeepthi Dharwar dev->states_usage[entered_state].time += 154e978aa7dSDeepthi Dharwar (unsigned long long)dev->last_residency; 1554202735eSDeepthi Dharwar dev->states_usage[entered_state].usage++; 156e1689795SRobert Lee } else { 157e1689795SRobert Lee dev->last_residency = 0; 158e978aa7dSDeepthi Dharwar } 1594f86d3a8SLen Brown 1604f86d3a8SLen Brown /* give the governor an opportunity to reflect on the outcome */ 1614f86d3a8SLen Brown if (cpuidle_curr_governor->reflect) 162e978aa7dSDeepthi Dharwar cpuidle_curr_governor->reflect(dev, entered_state); 163a0bfa137SLen Brown 164a0bfa137SLen Brown return 0; 1654f86d3a8SLen Brown } 1664f86d3a8SLen Brown 1674f86d3a8SLen Brown /** 1684f86d3a8SLen Brown * cpuidle_install_idle_handler - installs the cpuidle idle loop handler 1694f86d3a8SLen Brown */ 1704f86d3a8SLen Brown void cpuidle_install_idle_handler(void) 1714f86d3a8SLen Brown { 172a0bfa137SLen Brown if (enabled_devices) { 1734f86d3a8SLen Brown /* Make sure all changes finished before we switch to new idle */ 1744f86d3a8SLen Brown smp_wmb(); 175a0bfa137SLen Brown initialized = 1; 1764f86d3a8SLen Brown } 1774f86d3a8SLen Brown } 1784f86d3a8SLen Brown 1794f86d3a8SLen Brown /** 1804f86d3a8SLen Brown * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler 1814f86d3a8SLen Brown */ 1824f86d3a8SLen Brown void cpuidle_uninstall_idle_handler(void) 1834f86d3a8SLen Brown { 184a0bfa137SLen Brown if (enabled_devices) { 185a0bfa137SLen Brown initialized = 0; 186a6869cc4SVenki Pallipadi cpuidle_kick_cpus(); 1874f86d3a8SLen Brown } 1884f86d3a8SLen Brown } 1894f86d3a8SLen Brown 1904f86d3a8SLen Brown /** 1914f86d3a8SLen Brown * cpuidle_pause_and_lock - temporarily disables CPUIDLE 1924f86d3a8SLen Brown */ 1934f86d3a8SLen Brown void cpuidle_pause_and_lock(void) 1944f86d3a8SLen Brown { 1954f86d3a8SLen Brown mutex_lock(&cpuidle_lock); 1964f86d3a8SLen Brown cpuidle_uninstall_idle_handler(); 1974f86d3a8SLen Brown } 1984f86d3a8SLen Brown 1994f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock); 2004f86d3a8SLen Brown 2014f86d3a8SLen Brown /** 2024f86d3a8SLen Brown * cpuidle_resume_and_unlock - resumes CPUIDLE operation 2034f86d3a8SLen Brown */ 2044f86d3a8SLen Brown void cpuidle_resume_and_unlock(void) 2054f86d3a8SLen Brown { 2064f86d3a8SLen Brown cpuidle_install_idle_handler(); 2074f86d3a8SLen Brown mutex_unlock(&cpuidle_lock); 2084f86d3a8SLen Brown } 2094f86d3a8SLen Brown 2104f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock); 2114f86d3a8SLen Brown 212e1689795SRobert Lee /** 213e1689795SRobert Lee * cpuidle_wrap_enter - performs timekeeping and irqen around enter function 214e1689795SRobert Lee * @dev: pointer to a valid cpuidle_device object 215e1689795SRobert Lee * @drv: pointer to a valid cpuidle_driver object 216e1689795SRobert Lee * @index: index of the target cpuidle state. 217e1689795SRobert Lee */ 218e1689795SRobert Lee int cpuidle_wrap_enter(struct cpuidle_device *dev, 219e1689795SRobert Lee struct cpuidle_driver *drv, int index, 220e1689795SRobert Lee int (*enter)(struct cpuidle_device *dev, 221e1689795SRobert Lee struct cpuidle_driver *drv, int index)) 222e1689795SRobert Lee { 223e1689795SRobert Lee ktime_t time_start, time_end; 224e1689795SRobert Lee s64 diff; 225e1689795SRobert Lee 226e1689795SRobert Lee time_start = ktime_get(); 227e1689795SRobert Lee 228e1689795SRobert Lee index = enter(dev, drv, index); 229e1689795SRobert Lee 230e1689795SRobert Lee time_end = ktime_get(); 231e1689795SRobert Lee 232e1689795SRobert Lee local_irq_enable(); 233e1689795SRobert Lee 234e1689795SRobert Lee diff = ktime_to_us(ktime_sub(time_end, time_start)); 235e1689795SRobert Lee if (diff > INT_MAX) 236e1689795SRobert Lee diff = INT_MAX; 237e1689795SRobert Lee 238e1689795SRobert Lee dev->last_residency = (int) diff; 239e1689795SRobert Lee 240e1689795SRobert Lee return index; 241e1689795SRobert Lee } 242e1689795SRobert Lee 243d8c216cfSRafael J. Wysocki #ifdef CONFIG_ARCH_HAS_CPU_RELAX 24446bcfad7SDeepthi Dharwar static int poll_idle(struct cpuidle_device *dev, 24546bcfad7SDeepthi Dharwar struct cpuidle_driver *drv, int index) 246d8c216cfSRafael J. Wysocki { 247d8c216cfSRafael J. Wysocki ktime_t t1, t2; 248d8c216cfSRafael J. Wysocki s64 diff; 249d8c216cfSRafael J. Wysocki 250d8c216cfSRafael J. Wysocki t1 = ktime_get(); 251d8c216cfSRafael J. Wysocki local_irq_enable(); 252d8c216cfSRafael J. Wysocki while (!need_resched()) 253d8c216cfSRafael J. Wysocki cpu_relax(); 254d8c216cfSRafael J. Wysocki 255d8c216cfSRafael J. Wysocki t2 = ktime_get(); 256d8c216cfSRafael J. Wysocki diff = ktime_to_us(ktime_sub(t2, t1)); 257d8c216cfSRafael J. Wysocki if (diff > INT_MAX) 258d8c216cfSRafael J. Wysocki diff = INT_MAX; 259d8c216cfSRafael J. Wysocki 260e978aa7dSDeepthi Dharwar dev->last_residency = (int) diff; 261e978aa7dSDeepthi Dharwar 262e978aa7dSDeepthi Dharwar return index; 263d8c216cfSRafael J. Wysocki } 264d8c216cfSRafael J. Wysocki 26546bcfad7SDeepthi Dharwar static void poll_idle_init(struct cpuidle_driver *drv) 266d8c216cfSRafael J. Wysocki { 26746bcfad7SDeepthi Dharwar struct cpuidle_state *state = &drv->states[0]; 268d8c216cfSRafael J. Wysocki 269720f1c30SThomas Renninger snprintf(state->name, CPUIDLE_NAME_LEN, "POLL"); 270d8c216cfSRafael J. Wysocki snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE"); 271d8c216cfSRafael J. Wysocki state->exit_latency = 0; 272d8c216cfSRafael J. Wysocki state->target_residency = 0; 273d8c216cfSRafael J. Wysocki state->power_usage = -1; 274d247632cSLen Brown state->flags = 0; 275d8c216cfSRafael J. Wysocki state->enter = poll_idle; 2763a53396bSShuoX Liu state->disable = 0; 277d8c216cfSRafael J. Wysocki } 278d8c216cfSRafael J. Wysocki #else 27946bcfad7SDeepthi Dharwar static void poll_idle_init(struct cpuidle_driver *drv) {} 280d8c216cfSRafael J. Wysocki #endif /* CONFIG_ARCH_HAS_CPU_RELAX */ 281d8c216cfSRafael J. Wysocki 2824f86d3a8SLen Brown /** 2834f86d3a8SLen Brown * cpuidle_enable_device - enables idle PM for a CPU 2844f86d3a8SLen Brown * @dev: the CPU 2854f86d3a8SLen Brown * 2864f86d3a8SLen Brown * This function must be called between cpuidle_pause_and_lock and 2874f86d3a8SLen Brown * cpuidle_resume_and_unlock when used externally. 2884f86d3a8SLen Brown */ 2894f86d3a8SLen Brown int cpuidle_enable_device(struct cpuidle_device *dev) 2904f86d3a8SLen Brown { 2914f86d3a8SLen Brown int ret, i; 292e1689795SRobert Lee struct cpuidle_driver *drv = cpuidle_get_driver(); 2934f86d3a8SLen Brown 2944f86d3a8SLen Brown if (dev->enabled) 2954f86d3a8SLen Brown return 0; 296e1689795SRobert Lee if (!drv || !cpuidle_curr_governor) 2974f86d3a8SLen Brown return -EIO; 2984f86d3a8SLen Brown if (!dev->state_count) 299fc850f39SDaniel Lezcano dev->state_count = drv->state_count; 3004f86d3a8SLen Brown 301dcb84f33SVenkatesh Pallipadi if (dev->registered == 0) { 302dcb84f33SVenkatesh Pallipadi ret = __cpuidle_register_device(dev); 303dcb84f33SVenkatesh Pallipadi if (ret) 304dcb84f33SVenkatesh Pallipadi return ret; 305dcb84f33SVenkatesh Pallipadi } 306dcb84f33SVenkatesh Pallipadi 307e1689795SRobert Lee cpuidle_enter_ops = drv->en_core_tk_irqen ? 308e1689795SRobert Lee cpuidle_enter_tk : cpuidle_enter; 309e1689795SRobert Lee 310e1689795SRobert Lee poll_idle_init(drv); 311d8c216cfSRafael J. Wysocki 3124f86d3a8SLen Brown if ((ret = cpuidle_add_state_sysfs(dev))) 3134f86d3a8SLen Brown return ret; 3144f86d3a8SLen Brown 3154f86d3a8SLen Brown if (cpuidle_curr_governor->enable && 316e1689795SRobert Lee (ret = cpuidle_curr_governor->enable(drv, dev))) 3174f86d3a8SLen Brown goto fail_sysfs; 3184f86d3a8SLen Brown 3194f86d3a8SLen Brown for (i = 0; i < dev->state_count; i++) { 3204202735eSDeepthi Dharwar dev->states_usage[i].usage = 0; 3214202735eSDeepthi Dharwar dev->states_usage[i].time = 0; 3224f86d3a8SLen Brown } 3234f86d3a8SLen Brown dev->last_residency = 0; 3244f86d3a8SLen Brown 3254f86d3a8SLen Brown smp_wmb(); 3264f86d3a8SLen Brown 3274f86d3a8SLen Brown dev->enabled = 1; 3284f86d3a8SLen Brown 3294f86d3a8SLen Brown enabled_devices++; 3304f86d3a8SLen Brown return 0; 3314f86d3a8SLen Brown 3324f86d3a8SLen Brown fail_sysfs: 3334f86d3a8SLen Brown cpuidle_remove_state_sysfs(dev); 3344f86d3a8SLen Brown 3354f86d3a8SLen Brown return ret; 3364f86d3a8SLen Brown } 3374f86d3a8SLen Brown 3384f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_enable_device); 3394f86d3a8SLen Brown 3404f86d3a8SLen Brown /** 3414f86d3a8SLen Brown * cpuidle_disable_device - disables idle PM for a CPU 3424f86d3a8SLen Brown * @dev: the CPU 3434f86d3a8SLen Brown * 3444f86d3a8SLen Brown * This function must be called between cpuidle_pause_and_lock and 3454f86d3a8SLen Brown * cpuidle_resume_and_unlock when used externally. 3464f86d3a8SLen Brown */ 3474f86d3a8SLen Brown void cpuidle_disable_device(struct cpuidle_device *dev) 3484f86d3a8SLen Brown { 3494f86d3a8SLen Brown if (!dev->enabled) 3504f86d3a8SLen Brown return; 351752138dfSLen Brown if (!cpuidle_get_driver() || !cpuidle_curr_governor) 3524f86d3a8SLen Brown return; 3534f86d3a8SLen Brown 3544f86d3a8SLen Brown dev->enabled = 0; 3554f86d3a8SLen Brown 3564f86d3a8SLen Brown if (cpuidle_curr_governor->disable) 35746bcfad7SDeepthi Dharwar cpuidle_curr_governor->disable(cpuidle_get_driver(), dev); 3584f86d3a8SLen Brown 3594f86d3a8SLen Brown cpuidle_remove_state_sysfs(dev); 3604f86d3a8SLen Brown enabled_devices--; 3614f86d3a8SLen Brown } 3624f86d3a8SLen Brown 3634f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_disable_device); 3644f86d3a8SLen Brown 3654f86d3a8SLen Brown /** 366dcb84f33SVenkatesh Pallipadi * __cpuidle_register_device - internal register function called before register 367dcb84f33SVenkatesh Pallipadi * and enable routines 3684f86d3a8SLen Brown * @dev: the cpu 369dcb84f33SVenkatesh Pallipadi * 370dcb84f33SVenkatesh Pallipadi * cpuidle_lock mutex must be held before this is called 3714f86d3a8SLen Brown */ 372dcb84f33SVenkatesh Pallipadi static int __cpuidle_register_device(struct cpuidle_device *dev) 3734f86d3a8SLen Brown { 3744f86d3a8SLen Brown int ret; 3758a25a2fdSKay Sievers struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu); 376752138dfSLen Brown struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver(); 3774f86d3a8SLen Brown 3788a25a2fdSKay Sievers if (!dev) 3794f86d3a8SLen Brown return -EINVAL; 380752138dfSLen Brown if (!try_module_get(cpuidle_driver->owner)) 3814f86d3a8SLen Brown return -EINVAL; 3824f86d3a8SLen Brown 3834f86d3a8SLen Brown init_completion(&dev->kobj_unregister); 3844f86d3a8SLen Brown 3854f86d3a8SLen Brown per_cpu(cpuidle_devices, dev->cpu) = dev; 3864f86d3a8SLen Brown list_add(&dev->device_list, &cpuidle_detected_devices); 3878a25a2fdSKay Sievers if ((ret = cpuidle_add_sysfs(cpu_dev))) { 388752138dfSLen Brown module_put(cpuidle_driver->owner); 3894f86d3a8SLen Brown return ret; 3904f86d3a8SLen Brown } 3914f86d3a8SLen Brown 392dcb84f33SVenkatesh Pallipadi dev->registered = 1; 393dcb84f33SVenkatesh Pallipadi return 0; 394dcb84f33SVenkatesh Pallipadi } 395dcb84f33SVenkatesh Pallipadi 396dcb84f33SVenkatesh Pallipadi /** 397dcb84f33SVenkatesh Pallipadi * cpuidle_register_device - registers a CPU's idle PM feature 398dcb84f33SVenkatesh Pallipadi * @dev: the cpu 399dcb84f33SVenkatesh Pallipadi */ 400dcb84f33SVenkatesh Pallipadi int cpuidle_register_device(struct cpuidle_device *dev) 401dcb84f33SVenkatesh Pallipadi { 402dcb84f33SVenkatesh Pallipadi int ret; 403dcb84f33SVenkatesh Pallipadi 404dcb84f33SVenkatesh Pallipadi mutex_lock(&cpuidle_lock); 405dcb84f33SVenkatesh Pallipadi 406dcb84f33SVenkatesh Pallipadi if ((ret = __cpuidle_register_device(dev))) { 407dcb84f33SVenkatesh Pallipadi mutex_unlock(&cpuidle_lock); 408dcb84f33SVenkatesh Pallipadi return ret; 409dcb84f33SVenkatesh Pallipadi } 410dcb84f33SVenkatesh Pallipadi 4114f86d3a8SLen Brown cpuidle_enable_device(dev); 4124f86d3a8SLen Brown cpuidle_install_idle_handler(); 4134f86d3a8SLen Brown 4144f86d3a8SLen Brown mutex_unlock(&cpuidle_lock); 4154f86d3a8SLen Brown 4164f86d3a8SLen Brown return 0; 4174f86d3a8SLen Brown 4184f86d3a8SLen Brown } 4194f86d3a8SLen Brown 4204f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_register_device); 4214f86d3a8SLen Brown 4224f86d3a8SLen Brown /** 4234f86d3a8SLen Brown * cpuidle_unregister_device - unregisters a CPU's idle PM feature 4244f86d3a8SLen Brown * @dev: the cpu 4254f86d3a8SLen Brown */ 4264f86d3a8SLen Brown void cpuidle_unregister_device(struct cpuidle_device *dev) 4274f86d3a8SLen Brown { 4288a25a2fdSKay Sievers struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu); 429752138dfSLen Brown struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver(); 4304f86d3a8SLen Brown 431dcb84f33SVenkatesh Pallipadi if (dev->registered == 0) 432dcb84f33SVenkatesh Pallipadi return; 433dcb84f33SVenkatesh Pallipadi 4344f86d3a8SLen Brown cpuidle_pause_and_lock(); 4354f86d3a8SLen Brown 4364f86d3a8SLen Brown cpuidle_disable_device(dev); 4374f86d3a8SLen Brown 4388a25a2fdSKay Sievers cpuidle_remove_sysfs(cpu_dev); 4394f86d3a8SLen Brown list_del(&dev->device_list); 4404f86d3a8SLen Brown wait_for_completion(&dev->kobj_unregister); 4414f86d3a8SLen Brown per_cpu(cpuidle_devices, dev->cpu) = NULL; 4424f86d3a8SLen Brown 4434f86d3a8SLen Brown cpuidle_resume_and_unlock(); 4444f86d3a8SLen Brown 445752138dfSLen Brown module_put(cpuidle_driver->owner); 4464f86d3a8SLen Brown } 4474f86d3a8SLen Brown 4484f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_unregister_device); 4494f86d3a8SLen Brown 4504f86d3a8SLen Brown #ifdef CONFIG_SMP 4514f86d3a8SLen Brown 4524f86d3a8SLen Brown static void smp_callback(void *v) 4534f86d3a8SLen Brown { 4544f86d3a8SLen Brown /* we already woke the CPU up, nothing more to do */ 4554f86d3a8SLen Brown } 4564f86d3a8SLen Brown 4574f86d3a8SLen Brown /* 4584f86d3a8SLen Brown * This function gets called when a part of the kernel has a new latency 4594f86d3a8SLen Brown * requirement. This means we need to get all processors out of their C-state, 4604f86d3a8SLen Brown * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that 4614f86d3a8SLen Brown * wakes them all right up. 4624f86d3a8SLen Brown */ 4634f86d3a8SLen Brown static int cpuidle_latency_notify(struct notifier_block *b, 4644f86d3a8SLen Brown unsigned long l, void *v) 4654f86d3a8SLen Brown { 4668691e5a8SJens Axboe smp_call_function(smp_callback, NULL, 1); 4674f86d3a8SLen Brown return NOTIFY_OK; 4684f86d3a8SLen Brown } 4694f86d3a8SLen Brown 4704f86d3a8SLen Brown static struct notifier_block cpuidle_latency_notifier = { 4714f86d3a8SLen Brown .notifier_call = cpuidle_latency_notify, 4724f86d3a8SLen Brown }; 4734f86d3a8SLen Brown 474d82b3518SMark Gross static inline void latency_notifier_init(struct notifier_block *n) 475d82b3518SMark Gross { 476d82b3518SMark Gross pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n); 477d82b3518SMark Gross } 4784f86d3a8SLen Brown 4794f86d3a8SLen Brown #else /* CONFIG_SMP */ 4804f86d3a8SLen Brown 4814f86d3a8SLen Brown #define latency_notifier_init(x) do { } while (0) 4824f86d3a8SLen Brown 4834f86d3a8SLen Brown #endif /* CONFIG_SMP */ 4844f86d3a8SLen Brown 4854f86d3a8SLen Brown /** 4864f86d3a8SLen Brown * cpuidle_init - core initializer 4874f86d3a8SLen Brown */ 4884f86d3a8SLen Brown static int __init cpuidle_init(void) 4894f86d3a8SLen Brown { 4904f86d3a8SLen Brown int ret; 4914f86d3a8SLen Brown 49262027aeaSLen Brown if (cpuidle_disabled()) 49362027aeaSLen Brown return -ENODEV; 49462027aeaSLen Brown 4958a25a2fdSKay Sievers ret = cpuidle_add_interface(cpu_subsys.dev_root); 4964f86d3a8SLen Brown if (ret) 4974f86d3a8SLen Brown return ret; 4984f86d3a8SLen Brown 4994f86d3a8SLen Brown latency_notifier_init(&cpuidle_latency_notifier); 5004f86d3a8SLen Brown 5014f86d3a8SLen Brown return 0; 5024f86d3a8SLen Brown } 5034f86d3a8SLen Brown 50462027aeaSLen Brown module_param(off, int, 0444); 5054f86d3a8SLen Brown core_initcall(cpuidle_init); 506