14f86d3a8SLen Brown /* 24f86d3a8SLen Brown * cpuidle.c - core cpuidle infrastructure 34f86d3a8SLen Brown * 44f86d3a8SLen Brown * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 54f86d3a8SLen Brown * Shaohua Li <shaohua.li@intel.com> 64f86d3a8SLen Brown * Adam Belay <abelay@novell.com> 74f86d3a8SLen Brown * 84f86d3a8SLen Brown * This code is licenced under the GPL. 94f86d3a8SLen Brown */ 104f86d3a8SLen Brown 114f86d3a8SLen Brown #include <linux/kernel.h> 124f86d3a8SLen Brown #include <linux/mutex.h> 134f86d3a8SLen Brown #include <linux/sched.h> 144f86d3a8SLen Brown #include <linux/notifier.h> 15e8db0be1SJean Pihet #include <linux/pm_qos.h> 164f86d3a8SLen Brown #include <linux/cpu.h> 174f86d3a8SLen Brown #include <linux/cpuidle.h> 189a0b8415Svenkatesh.pallipadi@intel.com #include <linux/ktime.h> 192e94d1f7SArjan van de Ven #include <linux/hrtimer.h> 20884b17e1SPaul Gortmaker #include <linux/module.h> 21288f023eSArjan van de Ven #include <trace/events/power.h> 224f86d3a8SLen Brown 234f86d3a8SLen Brown #include "cpuidle.h" 244f86d3a8SLen Brown 254f86d3a8SLen Brown DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices); 264f86d3a8SLen Brown 274f86d3a8SLen Brown DEFINE_MUTEX(cpuidle_lock); 284f86d3a8SLen Brown LIST_HEAD(cpuidle_detected_devices); 294f86d3a8SLen Brown 304f86d3a8SLen Brown static int enabled_devices; 3162027aeaSLen Brown static int off __read_mostly; 32a0bfa137SLen Brown static int initialized __read_mostly; 3362027aeaSLen Brown 3462027aeaSLen Brown int cpuidle_disabled(void) 3562027aeaSLen Brown { 3662027aeaSLen Brown return off; 3762027aeaSLen Brown } 38d91ee586SLen Brown void disable_cpuidle(void) 39d91ee586SLen Brown { 40d91ee586SLen Brown off = 1; 41d91ee586SLen Brown } 424f86d3a8SLen Brown 43a6869cc4SVenki Pallipadi #if defined(CONFIG_ARCH_HAS_CPU_IDLE_WAIT) 44a6869cc4SVenki Pallipadi static void cpuidle_kick_cpus(void) 45a6869cc4SVenki Pallipadi { 46a6869cc4SVenki Pallipadi cpu_idle_wait(); 47a6869cc4SVenki Pallipadi } 48a6869cc4SVenki Pallipadi #elif defined(CONFIG_SMP) 49a6869cc4SVenki Pallipadi # error "Arch needs cpu_idle_wait() equivalent here" 50a6869cc4SVenki Pallipadi #else /* !CONFIG_ARCH_HAS_CPU_IDLE_WAIT && !CONFIG_SMP */ 51a6869cc4SVenki Pallipadi static void cpuidle_kick_cpus(void) {} 52a6869cc4SVenki Pallipadi #endif 53a6869cc4SVenki Pallipadi 54dcb84f33SVenkatesh Pallipadi static int __cpuidle_register_device(struct cpuidle_device *dev); 55dcb84f33SVenkatesh Pallipadi 56e1689795SRobert Lee static inline int cpuidle_enter(struct cpuidle_device *dev, 57e1689795SRobert Lee struct cpuidle_driver *drv, int index) 58e1689795SRobert Lee { 59e1689795SRobert Lee struct cpuidle_state *target_state = &drv->states[index]; 60e1689795SRobert Lee return target_state->enter(dev, drv, index); 61e1689795SRobert Lee } 62e1689795SRobert Lee 63e1689795SRobert Lee static inline int cpuidle_enter_tk(struct cpuidle_device *dev, 64e1689795SRobert Lee struct cpuidle_driver *drv, int index) 65e1689795SRobert Lee { 66e1689795SRobert Lee return cpuidle_wrap_enter(dev, drv, index, cpuidle_enter); 67e1689795SRobert Lee } 68e1689795SRobert Lee 69e1689795SRobert Lee typedef int (*cpuidle_enter_t)(struct cpuidle_device *dev, 70e1689795SRobert Lee struct cpuidle_driver *drv, int index); 71e1689795SRobert Lee 72e1689795SRobert Lee static cpuidle_enter_t cpuidle_enter_ops; 73e1689795SRobert Lee 744f86d3a8SLen Brown /** 754f86d3a8SLen Brown * cpuidle_idle_call - the main idle loop 764f86d3a8SLen Brown * 774f86d3a8SLen Brown * NOTE: no locks or semaphores should be used here 78a0bfa137SLen Brown * return non-zero on failure 794f86d3a8SLen Brown */ 80a0bfa137SLen Brown int cpuidle_idle_call(void) 814f86d3a8SLen Brown { 824a6f4fe8SChristoph Lameter struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); 8346bcfad7SDeepthi Dharwar struct cpuidle_driver *drv = cpuidle_get_driver(); 84e978aa7dSDeepthi Dharwar int next_state, entered_state; 854f86d3a8SLen Brown 86a0bfa137SLen Brown if (off) 87a0bfa137SLen Brown return -ENODEV; 88a0bfa137SLen Brown 89a0bfa137SLen Brown if (!initialized) 90a0bfa137SLen Brown return -ENODEV; 91a0bfa137SLen Brown 924f86d3a8SLen Brown /* check if the device is ready */ 93a0bfa137SLen Brown if (!dev || !dev->enabled) 94a0bfa137SLen Brown return -EBUSY; 954f86d3a8SLen Brown 969a655837SArjan van de Ven #if 0 979a655837SArjan van de Ven /* shows regressions, re-enable for 2.6.29 */ 982e94d1f7SArjan van de Ven /* 992e94d1f7SArjan van de Ven * run any timers that can be run now, at this point 1002e94d1f7SArjan van de Ven * before calculating the idle duration etc. 1012e94d1f7SArjan van de Ven */ 1022e94d1f7SArjan van de Ven hrtimer_peek_ahead_timers(); 1039a655837SArjan van de Ven #endif 10471abbbf8SAi Li 1054f86d3a8SLen Brown /* ask the governor for the next state */ 10646bcfad7SDeepthi Dharwar next_state = cpuidle_curr_governor->select(drv, dev); 107246eb7f0SKevin Hilman if (need_resched()) { 108246eb7f0SKevin Hilman local_irq_enable(); 109a0bfa137SLen Brown return 0; 110246eb7f0SKevin Hilman } 111246eb7f0SKevin Hilman 112f77cfe4eSThomas Renninger trace_power_start(POWER_CSTATE, next_state, dev->cpu); 113f77cfe4eSThomas Renninger trace_cpu_idle(next_state, dev->cpu); 114f77cfe4eSThomas Renninger 115e1689795SRobert Lee entered_state = cpuidle_enter_ops(dev, drv, next_state); 116f77cfe4eSThomas Renninger 117f77cfe4eSThomas Renninger trace_power_end(dev->cpu); 118f77cfe4eSThomas Renninger trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu); 119f77cfe4eSThomas Renninger 120e978aa7dSDeepthi Dharwar if (entered_state >= 0) { 121e978aa7dSDeepthi Dharwar /* Update cpuidle counters */ 122e978aa7dSDeepthi Dharwar /* This can be moved to within driver enter routine 123e978aa7dSDeepthi Dharwar * but that results in multiple copies of same code. 124e978aa7dSDeepthi Dharwar */ 1254202735eSDeepthi Dharwar dev->states_usage[entered_state].time += 126e978aa7dSDeepthi Dharwar (unsigned long long)dev->last_residency; 1274202735eSDeepthi Dharwar dev->states_usage[entered_state].usage++; 128e1689795SRobert Lee } else { 129e1689795SRobert Lee dev->last_residency = 0; 130e978aa7dSDeepthi Dharwar } 1314f86d3a8SLen Brown 1324f86d3a8SLen Brown /* give the governor an opportunity to reflect on the outcome */ 1334f86d3a8SLen Brown if (cpuidle_curr_governor->reflect) 134e978aa7dSDeepthi Dharwar cpuidle_curr_governor->reflect(dev, entered_state); 135a0bfa137SLen Brown 136a0bfa137SLen Brown return 0; 1374f86d3a8SLen Brown } 1384f86d3a8SLen Brown 1394f86d3a8SLen Brown /** 1404f86d3a8SLen Brown * cpuidle_install_idle_handler - installs the cpuidle idle loop handler 1414f86d3a8SLen Brown */ 1424f86d3a8SLen Brown void cpuidle_install_idle_handler(void) 1434f86d3a8SLen Brown { 144a0bfa137SLen Brown if (enabled_devices) { 1454f86d3a8SLen Brown /* Make sure all changes finished before we switch to new idle */ 1464f86d3a8SLen Brown smp_wmb(); 147a0bfa137SLen Brown initialized = 1; 1484f86d3a8SLen Brown } 1494f86d3a8SLen Brown } 1504f86d3a8SLen Brown 1514f86d3a8SLen Brown /** 1524f86d3a8SLen Brown * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler 1534f86d3a8SLen Brown */ 1544f86d3a8SLen Brown void cpuidle_uninstall_idle_handler(void) 1554f86d3a8SLen Brown { 156a0bfa137SLen Brown if (enabled_devices) { 157a0bfa137SLen Brown initialized = 0; 158a6869cc4SVenki Pallipadi cpuidle_kick_cpus(); 1594f86d3a8SLen Brown } 1604f86d3a8SLen Brown } 1614f86d3a8SLen Brown 1624f86d3a8SLen Brown /** 1634f86d3a8SLen Brown * cpuidle_pause_and_lock - temporarily disables CPUIDLE 1644f86d3a8SLen Brown */ 1654f86d3a8SLen Brown void cpuidle_pause_and_lock(void) 1664f86d3a8SLen Brown { 1674f86d3a8SLen Brown mutex_lock(&cpuidle_lock); 1684f86d3a8SLen Brown cpuidle_uninstall_idle_handler(); 1694f86d3a8SLen Brown } 1704f86d3a8SLen Brown 1714f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock); 1724f86d3a8SLen Brown 1734f86d3a8SLen Brown /** 1744f86d3a8SLen Brown * cpuidle_resume_and_unlock - resumes CPUIDLE operation 1754f86d3a8SLen Brown */ 1764f86d3a8SLen Brown void cpuidle_resume_and_unlock(void) 1774f86d3a8SLen Brown { 1784f86d3a8SLen Brown cpuidle_install_idle_handler(); 1794f86d3a8SLen Brown mutex_unlock(&cpuidle_lock); 1804f86d3a8SLen Brown } 1814f86d3a8SLen Brown 1824f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock); 1834f86d3a8SLen Brown 184e1689795SRobert Lee /** 185e1689795SRobert Lee * cpuidle_wrap_enter - performs timekeeping and irqen around enter function 186e1689795SRobert Lee * @dev: pointer to a valid cpuidle_device object 187e1689795SRobert Lee * @drv: pointer to a valid cpuidle_driver object 188e1689795SRobert Lee * @index: index of the target cpuidle state. 189e1689795SRobert Lee */ 190e1689795SRobert Lee int cpuidle_wrap_enter(struct cpuidle_device *dev, 191e1689795SRobert Lee struct cpuidle_driver *drv, int index, 192e1689795SRobert Lee int (*enter)(struct cpuidle_device *dev, 193e1689795SRobert Lee struct cpuidle_driver *drv, int index)) 194e1689795SRobert Lee { 195e1689795SRobert Lee ktime_t time_start, time_end; 196e1689795SRobert Lee s64 diff; 197e1689795SRobert Lee 198e1689795SRobert Lee time_start = ktime_get(); 199e1689795SRobert Lee 200e1689795SRobert Lee index = enter(dev, drv, index); 201e1689795SRobert Lee 202e1689795SRobert Lee time_end = ktime_get(); 203e1689795SRobert Lee 204e1689795SRobert Lee local_irq_enable(); 205e1689795SRobert Lee 206e1689795SRobert Lee diff = ktime_to_us(ktime_sub(time_end, time_start)); 207e1689795SRobert Lee if (diff > INT_MAX) 208e1689795SRobert Lee diff = INT_MAX; 209e1689795SRobert Lee 210e1689795SRobert Lee dev->last_residency = (int) diff; 211e1689795SRobert Lee 212e1689795SRobert Lee return index; 213e1689795SRobert Lee } 214e1689795SRobert Lee 215d8c216cfSRafael J. Wysocki #ifdef CONFIG_ARCH_HAS_CPU_RELAX 21646bcfad7SDeepthi Dharwar static int poll_idle(struct cpuidle_device *dev, 21746bcfad7SDeepthi Dharwar struct cpuidle_driver *drv, int index) 218d8c216cfSRafael J. Wysocki { 219d8c216cfSRafael J. Wysocki ktime_t t1, t2; 220d8c216cfSRafael J. Wysocki s64 diff; 221d8c216cfSRafael J. Wysocki 222d8c216cfSRafael J. Wysocki t1 = ktime_get(); 223d8c216cfSRafael J. Wysocki local_irq_enable(); 224d8c216cfSRafael J. Wysocki while (!need_resched()) 225d8c216cfSRafael J. Wysocki cpu_relax(); 226d8c216cfSRafael J. Wysocki 227d8c216cfSRafael J. Wysocki t2 = ktime_get(); 228d8c216cfSRafael J. Wysocki diff = ktime_to_us(ktime_sub(t2, t1)); 229d8c216cfSRafael J. Wysocki if (diff > INT_MAX) 230d8c216cfSRafael J. Wysocki diff = INT_MAX; 231d8c216cfSRafael J. Wysocki 232e978aa7dSDeepthi Dharwar dev->last_residency = (int) diff; 233e978aa7dSDeepthi Dharwar 234e978aa7dSDeepthi Dharwar return index; 235d8c216cfSRafael J. Wysocki } 236d8c216cfSRafael J. Wysocki 23746bcfad7SDeepthi Dharwar static void poll_idle_init(struct cpuidle_driver *drv) 238d8c216cfSRafael J. Wysocki { 23946bcfad7SDeepthi Dharwar struct cpuidle_state *state = &drv->states[0]; 240d8c216cfSRafael J. Wysocki 241720f1c30SThomas Renninger snprintf(state->name, CPUIDLE_NAME_LEN, "POLL"); 242d8c216cfSRafael J. Wysocki snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE"); 243d8c216cfSRafael J. Wysocki state->exit_latency = 0; 244d8c216cfSRafael J. Wysocki state->target_residency = 0; 245d8c216cfSRafael J. Wysocki state->power_usage = -1; 246d247632cSLen Brown state->flags = 0; 247d8c216cfSRafael J. Wysocki state->enter = poll_idle; 2483a53396bSShuoX Liu state->disable = 0; 249d8c216cfSRafael J. Wysocki } 250d8c216cfSRafael J. Wysocki #else 25146bcfad7SDeepthi Dharwar static void poll_idle_init(struct cpuidle_driver *drv) {} 252d8c216cfSRafael J. Wysocki #endif /* CONFIG_ARCH_HAS_CPU_RELAX */ 253d8c216cfSRafael J. Wysocki 2544f86d3a8SLen Brown /** 2554f86d3a8SLen Brown * cpuidle_enable_device - enables idle PM for a CPU 2564f86d3a8SLen Brown * @dev: the CPU 2574f86d3a8SLen Brown * 2584f86d3a8SLen Brown * This function must be called between cpuidle_pause_and_lock and 2594f86d3a8SLen Brown * cpuidle_resume_and_unlock when used externally. 2604f86d3a8SLen Brown */ 2614f86d3a8SLen Brown int cpuidle_enable_device(struct cpuidle_device *dev) 2624f86d3a8SLen Brown { 2634f86d3a8SLen Brown int ret, i; 264e1689795SRobert Lee struct cpuidle_driver *drv = cpuidle_get_driver(); 2654f86d3a8SLen Brown 2664f86d3a8SLen Brown if (dev->enabled) 2674f86d3a8SLen Brown return 0; 268e1689795SRobert Lee if (!drv || !cpuidle_curr_governor) 2694f86d3a8SLen Brown return -EIO; 2704f86d3a8SLen Brown if (!dev->state_count) 271*fc850f39SDaniel Lezcano dev->state_count = drv->state_count; 2724f86d3a8SLen Brown 273dcb84f33SVenkatesh Pallipadi if (dev->registered == 0) { 274dcb84f33SVenkatesh Pallipadi ret = __cpuidle_register_device(dev); 275dcb84f33SVenkatesh Pallipadi if (ret) 276dcb84f33SVenkatesh Pallipadi return ret; 277dcb84f33SVenkatesh Pallipadi } 278dcb84f33SVenkatesh Pallipadi 279e1689795SRobert Lee cpuidle_enter_ops = drv->en_core_tk_irqen ? 280e1689795SRobert Lee cpuidle_enter_tk : cpuidle_enter; 281e1689795SRobert Lee 282e1689795SRobert Lee poll_idle_init(drv); 283d8c216cfSRafael J. Wysocki 2844f86d3a8SLen Brown if ((ret = cpuidle_add_state_sysfs(dev))) 2854f86d3a8SLen Brown return ret; 2864f86d3a8SLen Brown 2874f86d3a8SLen Brown if (cpuidle_curr_governor->enable && 288e1689795SRobert Lee (ret = cpuidle_curr_governor->enable(drv, dev))) 2894f86d3a8SLen Brown goto fail_sysfs; 2904f86d3a8SLen Brown 2914f86d3a8SLen Brown for (i = 0; i < dev->state_count; i++) { 2924202735eSDeepthi Dharwar dev->states_usage[i].usage = 0; 2934202735eSDeepthi Dharwar dev->states_usage[i].time = 0; 2944f86d3a8SLen Brown } 2954f86d3a8SLen Brown dev->last_residency = 0; 2964f86d3a8SLen Brown 2974f86d3a8SLen Brown smp_wmb(); 2984f86d3a8SLen Brown 2994f86d3a8SLen Brown dev->enabled = 1; 3004f86d3a8SLen Brown 3014f86d3a8SLen Brown enabled_devices++; 3024f86d3a8SLen Brown return 0; 3034f86d3a8SLen Brown 3044f86d3a8SLen Brown fail_sysfs: 3054f86d3a8SLen Brown cpuidle_remove_state_sysfs(dev); 3064f86d3a8SLen Brown 3074f86d3a8SLen Brown return ret; 3084f86d3a8SLen Brown } 3094f86d3a8SLen Brown 3104f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_enable_device); 3114f86d3a8SLen Brown 3124f86d3a8SLen Brown /** 3134f86d3a8SLen Brown * cpuidle_disable_device - disables idle PM for a CPU 3144f86d3a8SLen Brown * @dev: the CPU 3154f86d3a8SLen Brown * 3164f86d3a8SLen Brown * This function must be called between cpuidle_pause_and_lock and 3174f86d3a8SLen Brown * cpuidle_resume_and_unlock when used externally. 3184f86d3a8SLen Brown */ 3194f86d3a8SLen Brown void cpuidle_disable_device(struct cpuidle_device *dev) 3204f86d3a8SLen Brown { 3214f86d3a8SLen Brown if (!dev->enabled) 3224f86d3a8SLen Brown return; 323752138dfSLen Brown if (!cpuidle_get_driver() || !cpuidle_curr_governor) 3244f86d3a8SLen Brown return; 3254f86d3a8SLen Brown 3264f86d3a8SLen Brown dev->enabled = 0; 3274f86d3a8SLen Brown 3284f86d3a8SLen Brown if (cpuidle_curr_governor->disable) 32946bcfad7SDeepthi Dharwar cpuidle_curr_governor->disable(cpuidle_get_driver(), dev); 3304f86d3a8SLen Brown 3314f86d3a8SLen Brown cpuidle_remove_state_sysfs(dev); 3324f86d3a8SLen Brown enabled_devices--; 3334f86d3a8SLen Brown } 3344f86d3a8SLen Brown 3354f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_disable_device); 3364f86d3a8SLen Brown 3374f86d3a8SLen Brown /** 338dcb84f33SVenkatesh Pallipadi * __cpuidle_register_device - internal register function called before register 339dcb84f33SVenkatesh Pallipadi * and enable routines 3404f86d3a8SLen Brown * @dev: the cpu 341dcb84f33SVenkatesh Pallipadi * 342dcb84f33SVenkatesh Pallipadi * cpuidle_lock mutex must be held before this is called 3434f86d3a8SLen Brown */ 344dcb84f33SVenkatesh Pallipadi static int __cpuidle_register_device(struct cpuidle_device *dev) 3454f86d3a8SLen Brown { 3464f86d3a8SLen Brown int ret; 3478a25a2fdSKay Sievers struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu); 348752138dfSLen Brown struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver(); 3494f86d3a8SLen Brown 3508a25a2fdSKay Sievers if (!dev) 3514f86d3a8SLen Brown return -EINVAL; 352752138dfSLen Brown if (!try_module_get(cpuidle_driver->owner)) 3534f86d3a8SLen Brown return -EINVAL; 3544f86d3a8SLen Brown 3554f86d3a8SLen Brown init_completion(&dev->kobj_unregister); 3564f86d3a8SLen Brown 3574f86d3a8SLen Brown per_cpu(cpuidle_devices, dev->cpu) = dev; 3584f86d3a8SLen Brown list_add(&dev->device_list, &cpuidle_detected_devices); 3598a25a2fdSKay Sievers if ((ret = cpuidle_add_sysfs(cpu_dev))) { 360752138dfSLen Brown module_put(cpuidle_driver->owner); 3614f86d3a8SLen Brown return ret; 3624f86d3a8SLen Brown } 3634f86d3a8SLen Brown 364dcb84f33SVenkatesh Pallipadi dev->registered = 1; 365dcb84f33SVenkatesh Pallipadi return 0; 366dcb84f33SVenkatesh Pallipadi } 367dcb84f33SVenkatesh Pallipadi 368dcb84f33SVenkatesh Pallipadi /** 369dcb84f33SVenkatesh Pallipadi * cpuidle_register_device - registers a CPU's idle PM feature 370dcb84f33SVenkatesh Pallipadi * @dev: the cpu 371dcb84f33SVenkatesh Pallipadi */ 372dcb84f33SVenkatesh Pallipadi int cpuidle_register_device(struct cpuidle_device *dev) 373dcb84f33SVenkatesh Pallipadi { 374dcb84f33SVenkatesh Pallipadi int ret; 375dcb84f33SVenkatesh Pallipadi 376dcb84f33SVenkatesh Pallipadi mutex_lock(&cpuidle_lock); 377dcb84f33SVenkatesh Pallipadi 378dcb84f33SVenkatesh Pallipadi if ((ret = __cpuidle_register_device(dev))) { 379dcb84f33SVenkatesh Pallipadi mutex_unlock(&cpuidle_lock); 380dcb84f33SVenkatesh Pallipadi return ret; 381dcb84f33SVenkatesh Pallipadi } 382dcb84f33SVenkatesh Pallipadi 3834f86d3a8SLen Brown cpuidle_enable_device(dev); 3844f86d3a8SLen Brown cpuidle_install_idle_handler(); 3854f86d3a8SLen Brown 3864f86d3a8SLen Brown mutex_unlock(&cpuidle_lock); 3874f86d3a8SLen Brown 3884f86d3a8SLen Brown return 0; 3894f86d3a8SLen Brown 3904f86d3a8SLen Brown } 3914f86d3a8SLen Brown 3924f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_register_device); 3934f86d3a8SLen Brown 3944f86d3a8SLen Brown /** 3954f86d3a8SLen Brown * cpuidle_unregister_device - unregisters a CPU's idle PM feature 3964f86d3a8SLen Brown * @dev: the cpu 3974f86d3a8SLen Brown */ 3984f86d3a8SLen Brown void cpuidle_unregister_device(struct cpuidle_device *dev) 3994f86d3a8SLen Brown { 4008a25a2fdSKay Sievers struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu); 401752138dfSLen Brown struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver(); 4024f86d3a8SLen Brown 403dcb84f33SVenkatesh Pallipadi if (dev->registered == 0) 404dcb84f33SVenkatesh Pallipadi return; 405dcb84f33SVenkatesh Pallipadi 4064f86d3a8SLen Brown cpuidle_pause_and_lock(); 4074f86d3a8SLen Brown 4084f86d3a8SLen Brown cpuidle_disable_device(dev); 4094f86d3a8SLen Brown 4108a25a2fdSKay Sievers cpuidle_remove_sysfs(cpu_dev); 4114f86d3a8SLen Brown list_del(&dev->device_list); 4124f86d3a8SLen Brown wait_for_completion(&dev->kobj_unregister); 4134f86d3a8SLen Brown per_cpu(cpuidle_devices, dev->cpu) = NULL; 4144f86d3a8SLen Brown 4154f86d3a8SLen Brown cpuidle_resume_and_unlock(); 4164f86d3a8SLen Brown 417752138dfSLen Brown module_put(cpuidle_driver->owner); 4184f86d3a8SLen Brown } 4194f86d3a8SLen Brown 4204f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_unregister_device); 4214f86d3a8SLen Brown 4224f86d3a8SLen Brown #ifdef CONFIG_SMP 4234f86d3a8SLen Brown 4244f86d3a8SLen Brown static void smp_callback(void *v) 4254f86d3a8SLen Brown { 4264f86d3a8SLen Brown /* we already woke the CPU up, nothing more to do */ 4274f86d3a8SLen Brown } 4284f86d3a8SLen Brown 4294f86d3a8SLen Brown /* 4304f86d3a8SLen Brown * This function gets called when a part of the kernel has a new latency 4314f86d3a8SLen Brown * requirement. This means we need to get all processors out of their C-state, 4324f86d3a8SLen Brown * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that 4334f86d3a8SLen Brown * wakes them all right up. 4344f86d3a8SLen Brown */ 4354f86d3a8SLen Brown static int cpuidle_latency_notify(struct notifier_block *b, 4364f86d3a8SLen Brown unsigned long l, void *v) 4374f86d3a8SLen Brown { 4388691e5a8SJens Axboe smp_call_function(smp_callback, NULL, 1); 4394f86d3a8SLen Brown return NOTIFY_OK; 4404f86d3a8SLen Brown } 4414f86d3a8SLen Brown 4424f86d3a8SLen Brown static struct notifier_block cpuidle_latency_notifier = { 4434f86d3a8SLen Brown .notifier_call = cpuidle_latency_notify, 4444f86d3a8SLen Brown }; 4454f86d3a8SLen Brown 446d82b3518SMark Gross static inline void latency_notifier_init(struct notifier_block *n) 447d82b3518SMark Gross { 448d82b3518SMark Gross pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n); 449d82b3518SMark Gross } 4504f86d3a8SLen Brown 4514f86d3a8SLen Brown #else /* CONFIG_SMP */ 4524f86d3a8SLen Brown 4534f86d3a8SLen Brown #define latency_notifier_init(x) do { } while (0) 4544f86d3a8SLen Brown 4554f86d3a8SLen Brown #endif /* CONFIG_SMP */ 4564f86d3a8SLen Brown 4574f86d3a8SLen Brown /** 4584f86d3a8SLen Brown * cpuidle_init - core initializer 4594f86d3a8SLen Brown */ 4604f86d3a8SLen Brown static int __init cpuidle_init(void) 4614f86d3a8SLen Brown { 4624f86d3a8SLen Brown int ret; 4634f86d3a8SLen Brown 46462027aeaSLen Brown if (cpuidle_disabled()) 46562027aeaSLen Brown return -ENODEV; 46662027aeaSLen Brown 4678a25a2fdSKay Sievers ret = cpuidle_add_interface(cpu_subsys.dev_root); 4684f86d3a8SLen Brown if (ret) 4694f86d3a8SLen Brown return ret; 4704f86d3a8SLen Brown 4714f86d3a8SLen Brown latency_notifier_init(&cpuidle_latency_notifier); 4724f86d3a8SLen Brown 4734f86d3a8SLen Brown return 0; 4744f86d3a8SLen Brown } 4754f86d3a8SLen Brown 47662027aeaSLen Brown module_param(off, int, 0444); 4774f86d3a8SLen Brown core_initcall(cpuidle_init); 478