14f86d3a8SLen Brown /* 24f86d3a8SLen Brown * cpuidle.c - core cpuidle infrastructure 34f86d3a8SLen Brown * 44f86d3a8SLen Brown * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 54f86d3a8SLen Brown * Shaohua Li <shaohua.li@intel.com> 64f86d3a8SLen Brown * Adam Belay <abelay@novell.com> 74f86d3a8SLen Brown * 84f86d3a8SLen Brown * This code is licenced under the GPL. 94f86d3a8SLen Brown */ 104f86d3a8SLen Brown 114f86d3a8SLen Brown #include <linux/kernel.h> 124f86d3a8SLen Brown #include <linux/mutex.h> 134f86d3a8SLen Brown #include <linux/sched.h> 144f86d3a8SLen Brown #include <linux/notifier.h> 15e8db0be1SJean Pihet #include <linux/pm_qos.h> 164f86d3a8SLen Brown #include <linux/cpu.h> 174f86d3a8SLen Brown #include <linux/cpuidle.h> 189a0b8415Svenkatesh.pallipadi@intel.com #include <linux/ktime.h> 192e94d1f7SArjan van de Ven #include <linux/hrtimer.h> 20884b17e1SPaul Gortmaker #include <linux/module.h> 21288f023eSArjan van de Ven #include <trace/events/power.h> 224f86d3a8SLen Brown 234f86d3a8SLen Brown #include "cpuidle.h" 244f86d3a8SLen Brown 254f86d3a8SLen Brown DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices); 264f86d3a8SLen Brown 274f86d3a8SLen Brown DEFINE_MUTEX(cpuidle_lock); 284f86d3a8SLen Brown LIST_HEAD(cpuidle_detected_devices); 294f86d3a8SLen Brown 304f86d3a8SLen Brown static int enabled_devices; 3162027aeaSLen Brown static int off __read_mostly; 32a0bfa137SLen Brown static int initialized __read_mostly; 3362027aeaSLen Brown 3462027aeaSLen Brown int cpuidle_disabled(void) 3562027aeaSLen Brown { 3662027aeaSLen Brown return off; 3762027aeaSLen Brown } 38d91ee586SLen Brown void disable_cpuidle(void) 39d91ee586SLen Brown { 40d91ee586SLen Brown off = 1; 41d91ee586SLen Brown } 424f86d3a8SLen Brown 43a6869cc4SVenki Pallipadi #if defined(CONFIG_ARCH_HAS_CPU_IDLE_WAIT) 44a6869cc4SVenki Pallipadi static void cpuidle_kick_cpus(void) 45a6869cc4SVenki Pallipadi { 46a6869cc4SVenki Pallipadi cpu_idle_wait(); 47a6869cc4SVenki Pallipadi } 48a6869cc4SVenki Pallipadi #elif defined(CONFIG_SMP) 49a6869cc4SVenki Pallipadi # error "Arch needs cpu_idle_wait() equivalent here" 50a6869cc4SVenki Pallipadi #else /* !CONFIG_ARCH_HAS_CPU_IDLE_WAIT && !CONFIG_SMP */ 51a6869cc4SVenki Pallipadi static void cpuidle_kick_cpus(void) {} 52a6869cc4SVenki Pallipadi #endif 53a6869cc4SVenki Pallipadi 54dcb84f33SVenkatesh Pallipadi static int __cpuidle_register_device(struct cpuidle_device *dev); 55dcb84f33SVenkatesh Pallipadi 56*e1689795SRobert Lee static inline int cpuidle_enter(struct cpuidle_device *dev, 57*e1689795SRobert Lee struct cpuidle_driver *drv, int index) 58*e1689795SRobert Lee { 59*e1689795SRobert Lee struct cpuidle_state *target_state = &drv->states[index]; 60*e1689795SRobert Lee return target_state->enter(dev, drv, index); 61*e1689795SRobert Lee } 62*e1689795SRobert Lee 63*e1689795SRobert Lee static inline int cpuidle_enter_tk(struct cpuidle_device *dev, 64*e1689795SRobert Lee struct cpuidle_driver *drv, int index) 65*e1689795SRobert Lee { 66*e1689795SRobert Lee return cpuidle_wrap_enter(dev, drv, index, cpuidle_enter); 67*e1689795SRobert Lee } 68*e1689795SRobert Lee 69*e1689795SRobert Lee typedef int (*cpuidle_enter_t)(struct cpuidle_device *dev, 70*e1689795SRobert Lee struct cpuidle_driver *drv, int index); 71*e1689795SRobert Lee 72*e1689795SRobert Lee static cpuidle_enter_t cpuidle_enter_ops; 73*e1689795SRobert Lee 744f86d3a8SLen Brown /** 754f86d3a8SLen Brown * cpuidle_idle_call - the main idle loop 764f86d3a8SLen Brown * 774f86d3a8SLen Brown * NOTE: no locks or semaphores should be used here 78a0bfa137SLen Brown * return non-zero on failure 794f86d3a8SLen Brown */ 80a0bfa137SLen Brown int cpuidle_idle_call(void) 814f86d3a8SLen Brown { 824a6f4fe8SChristoph Lameter struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); 8346bcfad7SDeepthi Dharwar struct cpuidle_driver *drv = cpuidle_get_driver(); 84e978aa7dSDeepthi Dharwar int next_state, entered_state; 854f86d3a8SLen Brown 86a0bfa137SLen Brown if (off) 87a0bfa137SLen Brown return -ENODEV; 88a0bfa137SLen Brown 89a0bfa137SLen Brown if (!initialized) 90a0bfa137SLen Brown return -ENODEV; 91a0bfa137SLen Brown 924f86d3a8SLen Brown /* check if the device is ready */ 93a0bfa137SLen Brown if (!dev || !dev->enabled) 94a0bfa137SLen Brown return -EBUSY; 954f86d3a8SLen Brown 969a655837SArjan van de Ven #if 0 979a655837SArjan van de Ven /* shows regressions, re-enable for 2.6.29 */ 982e94d1f7SArjan van de Ven /* 992e94d1f7SArjan van de Ven * run any timers that can be run now, at this point 1002e94d1f7SArjan van de Ven * before calculating the idle duration etc. 1012e94d1f7SArjan van de Ven */ 1022e94d1f7SArjan van de Ven hrtimer_peek_ahead_timers(); 1039a655837SArjan van de Ven #endif 10471abbbf8SAi Li 1054f86d3a8SLen Brown /* ask the governor for the next state */ 10646bcfad7SDeepthi Dharwar next_state = cpuidle_curr_governor->select(drv, dev); 107246eb7f0SKevin Hilman if (need_resched()) { 108246eb7f0SKevin Hilman local_irq_enable(); 109a0bfa137SLen Brown return 0; 110246eb7f0SKevin Hilman } 111246eb7f0SKevin Hilman 112f77cfe4eSThomas Renninger trace_power_start(POWER_CSTATE, next_state, dev->cpu); 113f77cfe4eSThomas Renninger trace_cpu_idle(next_state, dev->cpu); 114f77cfe4eSThomas Renninger 115*e1689795SRobert Lee entered_state = cpuidle_enter_ops(dev, drv, next_state); 116f77cfe4eSThomas Renninger 117f77cfe4eSThomas Renninger trace_power_end(dev->cpu); 118f77cfe4eSThomas Renninger trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu); 119f77cfe4eSThomas Renninger 120e978aa7dSDeepthi Dharwar if (entered_state >= 0) { 121e978aa7dSDeepthi Dharwar /* Update cpuidle counters */ 122e978aa7dSDeepthi Dharwar /* This can be moved to within driver enter routine 123e978aa7dSDeepthi Dharwar * but that results in multiple copies of same code. 124e978aa7dSDeepthi Dharwar */ 1254202735eSDeepthi Dharwar dev->states_usage[entered_state].time += 126e978aa7dSDeepthi Dharwar (unsigned long long)dev->last_residency; 1274202735eSDeepthi Dharwar dev->states_usage[entered_state].usage++; 128*e1689795SRobert Lee } else { 129*e1689795SRobert Lee dev->last_residency = 0; 130e978aa7dSDeepthi Dharwar } 1314f86d3a8SLen Brown 1324f86d3a8SLen Brown /* give the governor an opportunity to reflect on the outcome */ 1334f86d3a8SLen Brown if (cpuidle_curr_governor->reflect) 134e978aa7dSDeepthi Dharwar cpuidle_curr_governor->reflect(dev, entered_state); 135a0bfa137SLen Brown 136a0bfa137SLen Brown return 0; 1374f86d3a8SLen Brown } 1384f86d3a8SLen Brown 1394f86d3a8SLen Brown /** 1404f86d3a8SLen Brown * cpuidle_install_idle_handler - installs the cpuidle idle loop handler 1414f86d3a8SLen Brown */ 1424f86d3a8SLen Brown void cpuidle_install_idle_handler(void) 1434f86d3a8SLen Brown { 144a0bfa137SLen Brown if (enabled_devices) { 1454f86d3a8SLen Brown /* Make sure all changes finished before we switch to new idle */ 1464f86d3a8SLen Brown smp_wmb(); 147a0bfa137SLen Brown initialized = 1; 1484f86d3a8SLen Brown } 1494f86d3a8SLen Brown } 1504f86d3a8SLen Brown 1514f86d3a8SLen Brown /** 1524f86d3a8SLen Brown * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler 1534f86d3a8SLen Brown */ 1544f86d3a8SLen Brown void cpuidle_uninstall_idle_handler(void) 1554f86d3a8SLen Brown { 156a0bfa137SLen Brown if (enabled_devices) { 157a0bfa137SLen Brown initialized = 0; 158a6869cc4SVenki Pallipadi cpuidle_kick_cpus(); 1594f86d3a8SLen Brown } 1604f86d3a8SLen Brown } 1614f86d3a8SLen Brown 1624f86d3a8SLen Brown /** 1634f86d3a8SLen Brown * cpuidle_pause_and_lock - temporarily disables CPUIDLE 1644f86d3a8SLen Brown */ 1654f86d3a8SLen Brown void cpuidle_pause_and_lock(void) 1664f86d3a8SLen Brown { 1674f86d3a8SLen Brown mutex_lock(&cpuidle_lock); 1684f86d3a8SLen Brown cpuidle_uninstall_idle_handler(); 1694f86d3a8SLen Brown } 1704f86d3a8SLen Brown 1714f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock); 1724f86d3a8SLen Brown 1734f86d3a8SLen Brown /** 1744f86d3a8SLen Brown * cpuidle_resume_and_unlock - resumes CPUIDLE operation 1754f86d3a8SLen Brown */ 1764f86d3a8SLen Brown void cpuidle_resume_and_unlock(void) 1774f86d3a8SLen Brown { 1784f86d3a8SLen Brown cpuidle_install_idle_handler(); 1794f86d3a8SLen Brown mutex_unlock(&cpuidle_lock); 1804f86d3a8SLen Brown } 1814f86d3a8SLen Brown 1824f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock); 1834f86d3a8SLen Brown 184*e1689795SRobert Lee /** 185*e1689795SRobert Lee * cpuidle_wrap_enter - performs timekeeping and irqen around enter function 186*e1689795SRobert Lee * @dev: pointer to a valid cpuidle_device object 187*e1689795SRobert Lee * @drv: pointer to a valid cpuidle_driver object 188*e1689795SRobert Lee * @index: index of the target cpuidle state. 189*e1689795SRobert Lee */ 190*e1689795SRobert Lee int cpuidle_wrap_enter(struct cpuidle_device *dev, 191*e1689795SRobert Lee struct cpuidle_driver *drv, int index, 192*e1689795SRobert Lee int (*enter)(struct cpuidle_device *dev, 193*e1689795SRobert Lee struct cpuidle_driver *drv, int index)) 194*e1689795SRobert Lee { 195*e1689795SRobert Lee ktime_t time_start, time_end; 196*e1689795SRobert Lee s64 diff; 197*e1689795SRobert Lee 198*e1689795SRobert Lee time_start = ktime_get(); 199*e1689795SRobert Lee 200*e1689795SRobert Lee index = enter(dev, drv, index); 201*e1689795SRobert Lee 202*e1689795SRobert Lee time_end = ktime_get(); 203*e1689795SRobert Lee 204*e1689795SRobert Lee local_irq_enable(); 205*e1689795SRobert Lee 206*e1689795SRobert Lee diff = ktime_to_us(ktime_sub(time_end, time_start)); 207*e1689795SRobert Lee if (diff > INT_MAX) 208*e1689795SRobert Lee diff = INT_MAX; 209*e1689795SRobert Lee 210*e1689795SRobert Lee dev->last_residency = (int) diff; 211*e1689795SRobert Lee 212*e1689795SRobert Lee return index; 213*e1689795SRobert Lee } 214*e1689795SRobert Lee 215d8c216cfSRafael J. Wysocki #ifdef CONFIG_ARCH_HAS_CPU_RELAX 21646bcfad7SDeepthi Dharwar static int poll_idle(struct cpuidle_device *dev, 21746bcfad7SDeepthi Dharwar struct cpuidle_driver *drv, int index) 218d8c216cfSRafael J. Wysocki { 219d8c216cfSRafael J. Wysocki ktime_t t1, t2; 220d8c216cfSRafael J. Wysocki s64 diff; 221d8c216cfSRafael J. Wysocki 222d8c216cfSRafael J. Wysocki t1 = ktime_get(); 223d8c216cfSRafael J. Wysocki local_irq_enable(); 224d8c216cfSRafael J. Wysocki while (!need_resched()) 225d8c216cfSRafael J. Wysocki cpu_relax(); 226d8c216cfSRafael J. Wysocki 227d8c216cfSRafael J. Wysocki t2 = ktime_get(); 228d8c216cfSRafael J. Wysocki diff = ktime_to_us(ktime_sub(t2, t1)); 229d8c216cfSRafael J. Wysocki if (diff > INT_MAX) 230d8c216cfSRafael J. Wysocki diff = INT_MAX; 231d8c216cfSRafael J. Wysocki 232e978aa7dSDeepthi Dharwar dev->last_residency = (int) diff; 233e978aa7dSDeepthi Dharwar 234e978aa7dSDeepthi Dharwar return index; 235d8c216cfSRafael J. Wysocki } 236d8c216cfSRafael J. Wysocki 23746bcfad7SDeepthi Dharwar static void poll_idle_init(struct cpuidle_driver *drv) 238d8c216cfSRafael J. Wysocki { 23946bcfad7SDeepthi Dharwar struct cpuidle_state *state = &drv->states[0]; 240d8c216cfSRafael J. Wysocki 241720f1c30SThomas Renninger snprintf(state->name, CPUIDLE_NAME_LEN, "POLL"); 242d8c216cfSRafael J. Wysocki snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE"); 243d8c216cfSRafael J. Wysocki state->exit_latency = 0; 244d8c216cfSRafael J. Wysocki state->target_residency = 0; 245d8c216cfSRafael J. Wysocki state->power_usage = -1; 246d247632cSLen Brown state->flags = 0; 247d8c216cfSRafael J. Wysocki state->enter = poll_idle; 248d8c216cfSRafael J. Wysocki } 249d8c216cfSRafael J. Wysocki #else 25046bcfad7SDeepthi Dharwar static void poll_idle_init(struct cpuidle_driver *drv) {} 251d8c216cfSRafael J. Wysocki #endif /* CONFIG_ARCH_HAS_CPU_RELAX */ 252d8c216cfSRafael J. Wysocki 2534f86d3a8SLen Brown /** 2544f86d3a8SLen Brown * cpuidle_enable_device - enables idle PM for a CPU 2554f86d3a8SLen Brown * @dev: the CPU 2564f86d3a8SLen Brown * 2574f86d3a8SLen Brown * This function must be called between cpuidle_pause_and_lock and 2584f86d3a8SLen Brown * cpuidle_resume_and_unlock when used externally. 2594f86d3a8SLen Brown */ 2604f86d3a8SLen Brown int cpuidle_enable_device(struct cpuidle_device *dev) 2614f86d3a8SLen Brown { 2624f86d3a8SLen Brown int ret, i; 263*e1689795SRobert Lee struct cpuidle_driver *drv = cpuidle_get_driver(); 2644f86d3a8SLen Brown 2654f86d3a8SLen Brown if (dev->enabled) 2664f86d3a8SLen Brown return 0; 267*e1689795SRobert Lee if (!drv || !cpuidle_curr_governor) 2684f86d3a8SLen Brown return -EIO; 2694f86d3a8SLen Brown if (!dev->state_count) 2704f86d3a8SLen Brown return -EINVAL; 2714f86d3a8SLen Brown 272dcb84f33SVenkatesh Pallipadi if (dev->registered == 0) { 273dcb84f33SVenkatesh Pallipadi ret = __cpuidle_register_device(dev); 274dcb84f33SVenkatesh Pallipadi if (ret) 275dcb84f33SVenkatesh Pallipadi return ret; 276dcb84f33SVenkatesh Pallipadi } 277dcb84f33SVenkatesh Pallipadi 278*e1689795SRobert Lee cpuidle_enter_ops = drv->en_core_tk_irqen ? 279*e1689795SRobert Lee cpuidle_enter_tk : cpuidle_enter; 280*e1689795SRobert Lee 281*e1689795SRobert Lee poll_idle_init(drv); 282d8c216cfSRafael J. Wysocki 2834f86d3a8SLen Brown if ((ret = cpuidle_add_state_sysfs(dev))) 2844f86d3a8SLen Brown return ret; 2854f86d3a8SLen Brown 2864f86d3a8SLen Brown if (cpuidle_curr_governor->enable && 287*e1689795SRobert Lee (ret = cpuidle_curr_governor->enable(drv, dev))) 2884f86d3a8SLen Brown goto fail_sysfs; 2894f86d3a8SLen Brown 2904f86d3a8SLen Brown for (i = 0; i < dev->state_count; i++) { 2914202735eSDeepthi Dharwar dev->states_usage[i].usage = 0; 2924202735eSDeepthi Dharwar dev->states_usage[i].time = 0; 2934f86d3a8SLen Brown } 2944f86d3a8SLen Brown dev->last_residency = 0; 2954f86d3a8SLen Brown 2964f86d3a8SLen Brown smp_wmb(); 2974f86d3a8SLen Brown 2984f86d3a8SLen Brown dev->enabled = 1; 2994f86d3a8SLen Brown 3004f86d3a8SLen Brown enabled_devices++; 3014f86d3a8SLen Brown return 0; 3024f86d3a8SLen Brown 3034f86d3a8SLen Brown fail_sysfs: 3044f86d3a8SLen Brown cpuidle_remove_state_sysfs(dev); 3054f86d3a8SLen Brown 3064f86d3a8SLen Brown return ret; 3074f86d3a8SLen Brown } 3084f86d3a8SLen Brown 3094f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_enable_device); 3104f86d3a8SLen Brown 3114f86d3a8SLen Brown /** 3124f86d3a8SLen Brown * cpuidle_disable_device - disables idle PM for a CPU 3134f86d3a8SLen Brown * @dev: the CPU 3144f86d3a8SLen Brown * 3154f86d3a8SLen Brown * This function must be called between cpuidle_pause_and_lock and 3164f86d3a8SLen Brown * cpuidle_resume_and_unlock when used externally. 3174f86d3a8SLen Brown */ 3184f86d3a8SLen Brown void cpuidle_disable_device(struct cpuidle_device *dev) 3194f86d3a8SLen Brown { 3204f86d3a8SLen Brown if (!dev->enabled) 3214f86d3a8SLen Brown return; 322752138dfSLen Brown if (!cpuidle_get_driver() || !cpuidle_curr_governor) 3234f86d3a8SLen Brown return; 3244f86d3a8SLen Brown 3254f86d3a8SLen Brown dev->enabled = 0; 3264f86d3a8SLen Brown 3274f86d3a8SLen Brown if (cpuidle_curr_governor->disable) 32846bcfad7SDeepthi Dharwar cpuidle_curr_governor->disable(cpuidle_get_driver(), dev); 3294f86d3a8SLen Brown 3304f86d3a8SLen Brown cpuidle_remove_state_sysfs(dev); 3314f86d3a8SLen Brown enabled_devices--; 3324f86d3a8SLen Brown } 3334f86d3a8SLen Brown 3344f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_disable_device); 3354f86d3a8SLen Brown 3364f86d3a8SLen Brown /** 337dcb84f33SVenkatesh Pallipadi * __cpuidle_register_device - internal register function called before register 338dcb84f33SVenkatesh Pallipadi * and enable routines 3394f86d3a8SLen Brown * @dev: the cpu 340dcb84f33SVenkatesh Pallipadi * 341dcb84f33SVenkatesh Pallipadi * cpuidle_lock mutex must be held before this is called 3424f86d3a8SLen Brown */ 343dcb84f33SVenkatesh Pallipadi static int __cpuidle_register_device(struct cpuidle_device *dev) 3444f86d3a8SLen Brown { 3454f86d3a8SLen Brown int ret; 3468a25a2fdSKay Sievers struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu); 347752138dfSLen Brown struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver(); 3484f86d3a8SLen Brown 3498a25a2fdSKay Sievers if (!dev) 3504f86d3a8SLen Brown return -EINVAL; 351752138dfSLen Brown if (!try_module_get(cpuidle_driver->owner)) 3524f86d3a8SLen Brown return -EINVAL; 3534f86d3a8SLen Brown 3544f86d3a8SLen Brown init_completion(&dev->kobj_unregister); 3554f86d3a8SLen Brown 3564f86d3a8SLen Brown per_cpu(cpuidle_devices, dev->cpu) = dev; 3574f86d3a8SLen Brown list_add(&dev->device_list, &cpuidle_detected_devices); 3588a25a2fdSKay Sievers if ((ret = cpuidle_add_sysfs(cpu_dev))) { 359752138dfSLen Brown module_put(cpuidle_driver->owner); 3604f86d3a8SLen Brown return ret; 3614f86d3a8SLen Brown } 3624f86d3a8SLen Brown 363dcb84f33SVenkatesh Pallipadi dev->registered = 1; 364dcb84f33SVenkatesh Pallipadi return 0; 365dcb84f33SVenkatesh Pallipadi } 366dcb84f33SVenkatesh Pallipadi 367dcb84f33SVenkatesh Pallipadi /** 368dcb84f33SVenkatesh Pallipadi * cpuidle_register_device - registers a CPU's idle PM feature 369dcb84f33SVenkatesh Pallipadi * @dev: the cpu 370dcb84f33SVenkatesh Pallipadi */ 371dcb84f33SVenkatesh Pallipadi int cpuidle_register_device(struct cpuidle_device *dev) 372dcb84f33SVenkatesh Pallipadi { 373dcb84f33SVenkatesh Pallipadi int ret; 374dcb84f33SVenkatesh Pallipadi 375dcb84f33SVenkatesh Pallipadi mutex_lock(&cpuidle_lock); 376dcb84f33SVenkatesh Pallipadi 377dcb84f33SVenkatesh Pallipadi if ((ret = __cpuidle_register_device(dev))) { 378dcb84f33SVenkatesh Pallipadi mutex_unlock(&cpuidle_lock); 379dcb84f33SVenkatesh Pallipadi return ret; 380dcb84f33SVenkatesh Pallipadi } 381dcb84f33SVenkatesh Pallipadi 3824f86d3a8SLen Brown cpuidle_enable_device(dev); 3834f86d3a8SLen Brown cpuidle_install_idle_handler(); 3844f86d3a8SLen Brown 3854f86d3a8SLen Brown mutex_unlock(&cpuidle_lock); 3864f86d3a8SLen Brown 3874f86d3a8SLen Brown return 0; 3884f86d3a8SLen Brown 3894f86d3a8SLen Brown } 3904f86d3a8SLen Brown 3914f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_register_device); 3924f86d3a8SLen Brown 3934f86d3a8SLen Brown /** 3944f86d3a8SLen Brown * cpuidle_unregister_device - unregisters a CPU's idle PM feature 3954f86d3a8SLen Brown * @dev: the cpu 3964f86d3a8SLen Brown */ 3974f86d3a8SLen Brown void cpuidle_unregister_device(struct cpuidle_device *dev) 3984f86d3a8SLen Brown { 3998a25a2fdSKay Sievers struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu); 400752138dfSLen Brown struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver(); 4014f86d3a8SLen Brown 402dcb84f33SVenkatesh Pallipadi if (dev->registered == 0) 403dcb84f33SVenkatesh Pallipadi return; 404dcb84f33SVenkatesh Pallipadi 4054f86d3a8SLen Brown cpuidle_pause_and_lock(); 4064f86d3a8SLen Brown 4074f86d3a8SLen Brown cpuidle_disable_device(dev); 4084f86d3a8SLen Brown 4098a25a2fdSKay Sievers cpuidle_remove_sysfs(cpu_dev); 4104f86d3a8SLen Brown list_del(&dev->device_list); 4114f86d3a8SLen Brown wait_for_completion(&dev->kobj_unregister); 4124f86d3a8SLen Brown per_cpu(cpuidle_devices, dev->cpu) = NULL; 4134f86d3a8SLen Brown 4144f86d3a8SLen Brown cpuidle_resume_and_unlock(); 4154f86d3a8SLen Brown 416752138dfSLen Brown module_put(cpuidle_driver->owner); 4174f86d3a8SLen Brown } 4184f86d3a8SLen Brown 4194f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_unregister_device); 4204f86d3a8SLen Brown 4214f86d3a8SLen Brown #ifdef CONFIG_SMP 4224f86d3a8SLen Brown 4234f86d3a8SLen Brown static void smp_callback(void *v) 4244f86d3a8SLen Brown { 4254f86d3a8SLen Brown /* we already woke the CPU up, nothing more to do */ 4264f86d3a8SLen Brown } 4274f86d3a8SLen Brown 4284f86d3a8SLen Brown /* 4294f86d3a8SLen Brown * This function gets called when a part of the kernel has a new latency 4304f86d3a8SLen Brown * requirement. This means we need to get all processors out of their C-state, 4314f86d3a8SLen Brown * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that 4324f86d3a8SLen Brown * wakes them all right up. 4334f86d3a8SLen Brown */ 4344f86d3a8SLen Brown static int cpuidle_latency_notify(struct notifier_block *b, 4354f86d3a8SLen Brown unsigned long l, void *v) 4364f86d3a8SLen Brown { 4378691e5a8SJens Axboe smp_call_function(smp_callback, NULL, 1); 4384f86d3a8SLen Brown return NOTIFY_OK; 4394f86d3a8SLen Brown } 4404f86d3a8SLen Brown 4414f86d3a8SLen Brown static struct notifier_block cpuidle_latency_notifier = { 4424f86d3a8SLen Brown .notifier_call = cpuidle_latency_notify, 4434f86d3a8SLen Brown }; 4444f86d3a8SLen Brown 445d82b3518SMark Gross static inline void latency_notifier_init(struct notifier_block *n) 446d82b3518SMark Gross { 447d82b3518SMark Gross pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n); 448d82b3518SMark Gross } 4494f86d3a8SLen Brown 4504f86d3a8SLen Brown #else /* CONFIG_SMP */ 4514f86d3a8SLen Brown 4524f86d3a8SLen Brown #define latency_notifier_init(x) do { } while (0) 4534f86d3a8SLen Brown 4544f86d3a8SLen Brown #endif /* CONFIG_SMP */ 4554f86d3a8SLen Brown 4564f86d3a8SLen Brown /** 4574f86d3a8SLen Brown * cpuidle_init - core initializer 4584f86d3a8SLen Brown */ 4594f86d3a8SLen Brown static int __init cpuidle_init(void) 4604f86d3a8SLen Brown { 4614f86d3a8SLen Brown int ret; 4624f86d3a8SLen Brown 46362027aeaSLen Brown if (cpuidle_disabled()) 46462027aeaSLen Brown return -ENODEV; 46562027aeaSLen Brown 4668a25a2fdSKay Sievers ret = cpuidle_add_interface(cpu_subsys.dev_root); 4674f86d3a8SLen Brown if (ret) 4684f86d3a8SLen Brown return ret; 4694f86d3a8SLen Brown 4704f86d3a8SLen Brown latency_notifier_init(&cpuidle_latency_notifier); 4714f86d3a8SLen Brown 4724f86d3a8SLen Brown return 0; 4734f86d3a8SLen Brown } 4744f86d3a8SLen Brown 47562027aeaSLen Brown module_param(off, int, 0444); 4764f86d3a8SLen Brown core_initcall(cpuidle_init); 477