14f86d3a8SLen Brown /* 24f86d3a8SLen Brown * cpuidle.c - core cpuidle infrastructure 34f86d3a8SLen Brown * 44f86d3a8SLen Brown * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 54f86d3a8SLen Brown * Shaohua Li <shaohua.li@intel.com> 64f86d3a8SLen Brown * Adam Belay <abelay@novell.com> 74f86d3a8SLen Brown * 84f86d3a8SLen Brown * This code is licenced under the GPL. 94f86d3a8SLen Brown */ 104f86d3a8SLen Brown 114f86d3a8SLen Brown #include <linux/kernel.h> 124f86d3a8SLen Brown #include <linux/mutex.h> 134f86d3a8SLen Brown #include <linux/sched.h> 144f86d3a8SLen Brown #include <linux/notifier.h> 15d82b3518SMark Gross #include <linux/pm_qos_params.h> 164f86d3a8SLen Brown #include <linux/cpu.h> 174f86d3a8SLen Brown #include <linux/cpuidle.h> 189a0b8415Svenkatesh.pallipadi@intel.com #include <linux/ktime.h> 192e94d1f7SArjan van de Ven #include <linux/hrtimer.h> 20288f023eSArjan van de Ven #include <trace/events/power.h> 214f86d3a8SLen Brown 224f86d3a8SLen Brown #include "cpuidle.h" 234f86d3a8SLen Brown 244f86d3a8SLen Brown DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices); 254f86d3a8SLen Brown 264f86d3a8SLen Brown DEFINE_MUTEX(cpuidle_lock); 274f86d3a8SLen Brown LIST_HEAD(cpuidle_detected_devices); 284f86d3a8SLen Brown static void (*pm_idle_old)(void); 294f86d3a8SLen Brown 304f86d3a8SLen Brown static int enabled_devices; 314f86d3a8SLen Brown 32a6869cc4SVenki Pallipadi #if defined(CONFIG_ARCH_HAS_CPU_IDLE_WAIT) 33a6869cc4SVenki Pallipadi static void cpuidle_kick_cpus(void) 34a6869cc4SVenki Pallipadi { 35a6869cc4SVenki Pallipadi cpu_idle_wait(); 36a6869cc4SVenki Pallipadi } 37a6869cc4SVenki Pallipadi #elif defined(CONFIG_SMP) 38a6869cc4SVenki Pallipadi # error "Arch needs cpu_idle_wait() equivalent here" 39a6869cc4SVenki Pallipadi #else /* !CONFIG_ARCH_HAS_CPU_IDLE_WAIT && !CONFIG_SMP */ 40a6869cc4SVenki Pallipadi static void cpuidle_kick_cpus(void) {} 41a6869cc4SVenki Pallipadi #endif 42a6869cc4SVenki Pallipadi 43dcb84f33SVenkatesh Pallipadi static int __cpuidle_register_device(struct cpuidle_device *dev); 44dcb84f33SVenkatesh Pallipadi 454f86d3a8SLen Brown /** 464f86d3a8SLen Brown * cpuidle_idle_call - the main idle loop 474f86d3a8SLen Brown * 484f86d3a8SLen Brown * NOTE: no locks or semaphores should be used here 494f86d3a8SLen Brown */ 504f86d3a8SLen Brown static void cpuidle_idle_call(void) 514f86d3a8SLen Brown { 524f86d3a8SLen Brown struct cpuidle_device *dev = __get_cpu_var(cpuidle_devices); 534f86d3a8SLen Brown struct cpuidle_state *target_state; 544f86d3a8SLen Brown int next_state; 554f86d3a8SLen Brown 564f86d3a8SLen Brown /* check if the device is ready */ 574f86d3a8SLen Brown if (!dev || !dev->enabled) { 584f86d3a8SLen Brown if (pm_idle_old) 594f86d3a8SLen Brown pm_idle_old(); 604f86d3a8SLen Brown else 6189cedfefSVenkatesh Pallipadi #if defined(CONFIG_ARCH_HAS_DEFAULT_IDLE) 6289cedfefSVenkatesh Pallipadi default_idle(); 6389cedfefSVenkatesh Pallipadi #else 644f86d3a8SLen Brown local_irq_enable(); 6589cedfefSVenkatesh Pallipadi #endif 664f86d3a8SLen Brown return; 674f86d3a8SLen Brown } 684f86d3a8SLen Brown 699a655837SArjan van de Ven #if 0 709a655837SArjan van de Ven /* shows regressions, re-enable for 2.6.29 */ 712e94d1f7SArjan van de Ven /* 722e94d1f7SArjan van de Ven * run any timers that can be run now, at this point 732e94d1f7SArjan van de Ven * before calculating the idle duration etc. 742e94d1f7SArjan van de Ven */ 752e94d1f7SArjan van de Ven hrtimer_peek_ahead_timers(); 769a655837SArjan van de Ven #endif 77*71abbbf8SAi Li 78*71abbbf8SAi Li /* 79*71abbbf8SAi Li * Call the device's prepare function before calling the 80*71abbbf8SAi Li * governor's select function. ->prepare gives the device's 81*71abbbf8SAi Li * cpuidle driver a chance to update any dynamic information 82*71abbbf8SAi Li * of its cpuidle states for the current idle period, e.g. 83*71abbbf8SAi Li * state availability, latencies, residencies, etc. 84*71abbbf8SAi Li */ 85*71abbbf8SAi Li if (dev->prepare) 86*71abbbf8SAi Li dev->prepare(dev); 87*71abbbf8SAi Li 884f86d3a8SLen Brown /* ask the governor for the next state */ 894f86d3a8SLen Brown next_state = cpuidle_curr_governor->select(dev); 90246eb7f0SKevin Hilman if (need_resched()) { 91246eb7f0SKevin Hilman local_irq_enable(); 924f86d3a8SLen Brown return; 93246eb7f0SKevin Hilman } 94246eb7f0SKevin Hilman 954f86d3a8SLen Brown target_state = &dev->states[next_state]; 964f86d3a8SLen Brown 974f86d3a8SLen Brown /* enter the state and update stats */ 984f86d3a8SLen Brown dev->last_state = target_state; 99887e301aSVenkatesh Pallipadi dev->last_residency = target_state->enter(dev, target_state); 100887e301aSVenkatesh Pallipadi if (dev->last_state) 101887e301aSVenkatesh Pallipadi target_state = dev->last_state; 102887e301aSVenkatesh Pallipadi 1038b78cf60SYi Yang target_state->time += (unsigned long long)dev->last_residency; 1044f86d3a8SLen Brown target_state->usage++; 1054f86d3a8SLen Brown 1064f86d3a8SLen Brown /* give the governor an opportunity to reflect on the outcome */ 1074f86d3a8SLen Brown if (cpuidle_curr_governor->reflect) 1084f86d3a8SLen Brown cpuidle_curr_governor->reflect(dev); 1096f4f2723SThomas Renninger trace_power_end(smp_processor_id()); 1104f86d3a8SLen Brown } 1114f86d3a8SLen Brown 1124f86d3a8SLen Brown /** 1134f86d3a8SLen Brown * cpuidle_install_idle_handler - installs the cpuidle idle loop handler 1144f86d3a8SLen Brown */ 1154f86d3a8SLen Brown void cpuidle_install_idle_handler(void) 1164f86d3a8SLen Brown { 1174f86d3a8SLen Brown if (enabled_devices && (pm_idle != cpuidle_idle_call)) { 1184f86d3a8SLen Brown /* Make sure all changes finished before we switch to new idle */ 1194f86d3a8SLen Brown smp_wmb(); 1204f86d3a8SLen Brown pm_idle = cpuidle_idle_call; 1214f86d3a8SLen Brown } 1224f86d3a8SLen Brown } 1234f86d3a8SLen Brown 1244f86d3a8SLen Brown /** 1254f86d3a8SLen Brown * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler 1264f86d3a8SLen Brown */ 1274f86d3a8SLen Brown void cpuidle_uninstall_idle_handler(void) 1284f86d3a8SLen Brown { 129b032bf70SThomas Gleixner if (enabled_devices && pm_idle_old && (pm_idle != pm_idle_old)) { 1304f86d3a8SLen Brown pm_idle = pm_idle_old; 131a6869cc4SVenki Pallipadi cpuidle_kick_cpus(); 1324f86d3a8SLen Brown } 1334f86d3a8SLen Brown } 1344f86d3a8SLen Brown 1354f86d3a8SLen Brown /** 1364f86d3a8SLen Brown * cpuidle_pause_and_lock - temporarily disables CPUIDLE 1374f86d3a8SLen Brown */ 1384f86d3a8SLen Brown void cpuidle_pause_and_lock(void) 1394f86d3a8SLen Brown { 1404f86d3a8SLen Brown mutex_lock(&cpuidle_lock); 1414f86d3a8SLen Brown cpuidle_uninstall_idle_handler(); 1424f86d3a8SLen Brown } 1434f86d3a8SLen Brown 1444f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock); 1454f86d3a8SLen Brown 1464f86d3a8SLen Brown /** 1474f86d3a8SLen Brown * cpuidle_resume_and_unlock - resumes CPUIDLE operation 1484f86d3a8SLen Brown */ 1494f86d3a8SLen Brown void cpuidle_resume_and_unlock(void) 1504f86d3a8SLen Brown { 1514f86d3a8SLen Brown cpuidle_install_idle_handler(); 1524f86d3a8SLen Brown mutex_unlock(&cpuidle_lock); 1534f86d3a8SLen Brown } 1544f86d3a8SLen Brown 1554f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock); 1564f86d3a8SLen Brown 1574f86d3a8SLen Brown /** 1584f86d3a8SLen Brown * cpuidle_enable_device - enables idle PM for a CPU 1594f86d3a8SLen Brown * @dev: the CPU 1604f86d3a8SLen Brown * 1614f86d3a8SLen Brown * This function must be called between cpuidle_pause_and_lock and 1624f86d3a8SLen Brown * cpuidle_resume_and_unlock when used externally. 1634f86d3a8SLen Brown */ 1644f86d3a8SLen Brown int cpuidle_enable_device(struct cpuidle_device *dev) 1654f86d3a8SLen Brown { 1664f86d3a8SLen Brown int ret, i; 1674f86d3a8SLen Brown 1684f86d3a8SLen Brown if (dev->enabled) 1694f86d3a8SLen Brown return 0; 170752138dfSLen Brown if (!cpuidle_get_driver() || !cpuidle_curr_governor) 1714f86d3a8SLen Brown return -EIO; 1724f86d3a8SLen Brown if (!dev->state_count) 1734f86d3a8SLen Brown return -EINVAL; 1744f86d3a8SLen Brown 175dcb84f33SVenkatesh Pallipadi if (dev->registered == 0) { 176dcb84f33SVenkatesh Pallipadi ret = __cpuidle_register_device(dev); 177dcb84f33SVenkatesh Pallipadi if (ret) 178dcb84f33SVenkatesh Pallipadi return ret; 179dcb84f33SVenkatesh Pallipadi } 180dcb84f33SVenkatesh Pallipadi 1814f86d3a8SLen Brown if ((ret = cpuidle_add_state_sysfs(dev))) 1824f86d3a8SLen Brown return ret; 1834f86d3a8SLen Brown 1844f86d3a8SLen Brown if (cpuidle_curr_governor->enable && 1854f86d3a8SLen Brown (ret = cpuidle_curr_governor->enable(dev))) 1864f86d3a8SLen Brown goto fail_sysfs; 1874f86d3a8SLen Brown 1884f86d3a8SLen Brown for (i = 0; i < dev->state_count; i++) { 1894f86d3a8SLen Brown dev->states[i].usage = 0; 1904f86d3a8SLen Brown dev->states[i].time = 0; 1914f86d3a8SLen Brown } 1924f86d3a8SLen Brown dev->last_residency = 0; 1934f86d3a8SLen Brown dev->last_state = NULL; 1944f86d3a8SLen Brown 1954f86d3a8SLen Brown smp_wmb(); 1964f86d3a8SLen Brown 1974f86d3a8SLen Brown dev->enabled = 1; 1984f86d3a8SLen Brown 1994f86d3a8SLen Brown enabled_devices++; 2004f86d3a8SLen Brown return 0; 2014f86d3a8SLen Brown 2024f86d3a8SLen Brown fail_sysfs: 2034f86d3a8SLen Brown cpuidle_remove_state_sysfs(dev); 2044f86d3a8SLen Brown 2054f86d3a8SLen Brown return ret; 2064f86d3a8SLen Brown } 2074f86d3a8SLen Brown 2084f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_enable_device); 2094f86d3a8SLen Brown 2104f86d3a8SLen Brown /** 2114f86d3a8SLen Brown * cpuidle_disable_device - disables idle PM for a CPU 2124f86d3a8SLen Brown * @dev: the CPU 2134f86d3a8SLen Brown * 2144f86d3a8SLen Brown * This function must be called between cpuidle_pause_and_lock and 2154f86d3a8SLen Brown * cpuidle_resume_and_unlock when used externally. 2164f86d3a8SLen Brown */ 2174f86d3a8SLen Brown void cpuidle_disable_device(struct cpuidle_device *dev) 2184f86d3a8SLen Brown { 2194f86d3a8SLen Brown if (!dev->enabled) 2204f86d3a8SLen Brown return; 221752138dfSLen Brown if (!cpuidle_get_driver() || !cpuidle_curr_governor) 2224f86d3a8SLen Brown return; 2234f86d3a8SLen Brown 2244f86d3a8SLen Brown dev->enabled = 0; 2254f86d3a8SLen Brown 2264f86d3a8SLen Brown if (cpuidle_curr_governor->disable) 2274f86d3a8SLen Brown cpuidle_curr_governor->disable(dev); 2284f86d3a8SLen Brown 2294f86d3a8SLen Brown cpuidle_remove_state_sysfs(dev); 2304f86d3a8SLen Brown enabled_devices--; 2314f86d3a8SLen Brown } 2324f86d3a8SLen Brown 2334f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_disable_device); 2344f86d3a8SLen Brown 2359a0b8415Svenkatesh.pallipadi@intel.com #ifdef CONFIG_ARCH_HAS_CPU_RELAX 2369a0b8415Svenkatesh.pallipadi@intel.com static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st) 2379a0b8415Svenkatesh.pallipadi@intel.com { 2389a0b8415Svenkatesh.pallipadi@intel.com ktime_t t1, t2; 2399a0b8415Svenkatesh.pallipadi@intel.com s64 diff; 2409a0b8415Svenkatesh.pallipadi@intel.com int ret; 2419a0b8415Svenkatesh.pallipadi@intel.com 2429a0b8415Svenkatesh.pallipadi@intel.com t1 = ktime_get(); 2439a0b8415Svenkatesh.pallipadi@intel.com local_irq_enable(); 2449a0b8415Svenkatesh.pallipadi@intel.com while (!need_resched()) 2459a0b8415Svenkatesh.pallipadi@intel.com cpu_relax(); 2469a0b8415Svenkatesh.pallipadi@intel.com 2479a0b8415Svenkatesh.pallipadi@intel.com t2 = ktime_get(); 2489a0b8415Svenkatesh.pallipadi@intel.com diff = ktime_to_us(ktime_sub(t2, t1)); 2499a0b8415Svenkatesh.pallipadi@intel.com if (diff > INT_MAX) 2509a0b8415Svenkatesh.pallipadi@intel.com diff = INT_MAX; 2519a0b8415Svenkatesh.pallipadi@intel.com 2529a0b8415Svenkatesh.pallipadi@intel.com ret = (int) diff; 2539a0b8415Svenkatesh.pallipadi@intel.com return ret; 2549a0b8415Svenkatesh.pallipadi@intel.com } 2559a0b8415Svenkatesh.pallipadi@intel.com 2569a0b8415Svenkatesh.pallipadi@intel.com static void poll_idle_init(struct cpuidle_device *dev) 2579a0b8415Svenkatesh.pallipadi@intel.com { 2589a0b8415Svenkatesh.pallipadi@intel.com struct cpuidle_state *state = &dev->states[0]; 2599a0b8415Svenkatesh.pallipadi@intel.com 2609a0b8415Svenkatesh.pallipadi@intel.com cpuidle_set_statedata(state, NULL); 2619a0b8415Svenkatesh.pallipadi@intel.com 2624fcb2fcdSVenkatesh Pallipadi snprintf(state->name, CPUIDLE_NAME_LEN, "C0"); 2634fcb2fcdSVenkatesh Pallipadi snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE"); 2649a0b8415Svenkatesh.pallipadi@intel.com state->exit_latency = 0; 2659a0b8415Svenkatesh.pallipadi@intel.com state->target_residency = 0; 2669a0b8415Svenkatesh.pallipadi@intel.com state->power_usage = -1; 2678e92b660SVenki Pallipadi state->flags = CPUIDLE_FLAG_POLL; 2689a0b8415Svenkatesh.pallipadi@intel.com state->enter = poll_idle; 2699a0b8415Svenkatesh.pallipadi@intel.com } 2709a0b8415Svenkatesh.pallipadi@intel.com #else 2719a0b8415Svenkatesh.pallipadi@intel.com static void poll_idle_init(struct cpuidle_device *dev) {} 2729a0b8415Svenkatesh.pallipadi@intel.com #endif /* CONFIG_ARCH_HAS_CPU_RELAX */ 2739a0b8415Svenkatesh.pallipadi@intel.com 2744f86d3a8SLen Brown /** 275dcb84f33SVenkatesh Pallipadi * __cpuidle_register_device - internal register function called before register 276dcb84f33SVenkatesh Pallipadi * and enable routines 2774f86d3a8SLen Brown * @dev: the cpu 278dcb84f33SVenkatesh Pallipadi * 279dcb84f33SVenkatesh Pallipadi * cpuidle_lock mutex must be held before this is called 2804f86d3a8SLen Brown */ 281dcb84f33SVenkatesh Pallipadi static int __cpuidle_register_device(struct cpuidle_device *dev) 2824f86d3a8SLen Brown { 2834f86d3a8SLen Brown int ret; 2844f86d3a8SLen Brown struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu); 285752138dfSLen Brown struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver(); 2864f86d3a8SLen Brown 2874f86d3a8SLen Brown if (!sys_dev) 2884f86d3a8SLen Brown return -EINVAL; 289752138dfSLen Brown if (!try_module_get(cpuidle_driver->owner)) 2904f86d3a8SLen Brown return -EINVAL; 2914f86d3a8SLen Brown 2924f86d3a8SLen Brown init_completion(&dev->kobj_unregister); 2934f86d3a8SLen Brown 2949a0b8415Svenkatesh.pallipadi@intel.com poll_idle_init(dev); 2959a0b8415Svenkatesh.pallipadi@intel.com 296*71abbbf8SAi Li /* 297*71abbbf8SAi Li * cpuidle driver should set the dev->power_specified bit 298*71abbbf8SAi Li * before registering the device if the driver provides 299*71abbbf8SAi Li * power_usage numbers. 300*71abbbf8SAi Li * 301*71abbbf8SAi Li * For those devices whose ->power_specified is not set, 302*71abbbf8SAi Li * we fill in power_usage with decreasing values as the 303*71abbbf8SAi Li * cpuidle code has an implicit assumption that state Cn 304*71abbbf8SAi Li * uses less power than C(n-1). 305*71abbbf8SAi Li * 306*71abbbf8SAi Li * With CONFIG_ARCH_HAS_CPU_RELAX, C0 is already assigned 307*71abbbf8SAi Li * an power value of -1. So we use -2, -3, etc, for other 308*71abbbf8SAi Li * c-states. 309*71abbbf8SAi Li */ 310*71abbbf8SAi Li if (!dev->power_specified) { 311*71abbbf8SAi Li int i; 312*71abbbf8SAi Li for (i = CPUIDLE_DRIVER_STATE_START; i < dev->state_count; i++) 313*71abbbf8SAi Li dev->states[i].power_usage = -1 - i; 314*71abbbf8SAi Li } 315*71abbbf8SAi Li 3164f86d3a8SLen Brown per_cpu(cpuidle_devices, dev->cpu) = dev; 3174f86d3a8SLen Brown list_add(&dev->device_list, &cpuidle_detected_devices); 3184f86d3a8SLen Brown if ((ret = cpuidle_add_sysfs(sys_dev))) { 319752138dfSLen Brown module_put(cpuidle_driver->owner); 3204f86d3a8SLen Brown return ret; 3214f86d3a8SLen Brown } 3224f86d3a8SLen Brown 323dcb84f33SVenkatesh Pallipadi dev->registered = 1; 324dcb84f33SVenkatesh Pallipadi return 0; 325dcb84f33SVenkatesh Pallipadi } 326dcb84f33SVenkatesh Pallipadi 327dcb84f33SVenkatesh Pallipadi /** 328dcb84f33SVenkatesh Pallipadi * cpuidle_register_device - registers a CPU's idle PM feature 329dcb84f33SVenkatesh Pallipadi * @dev: the cpu 330dcb84f33SVenkatesh Pallipadi */ 331dcb84f33SVenkatesh Pallipadi int cpuidle_register_device(struct cpuidle_device *dev) 332dcb84f33SVenkatesh Pallipadi { 333dcb84f33SVenkatesh Pallipadi int ret; 334dcb84f33SVenkatesh Pallipadi 335dcb84f33SVenkatesh Pallipadi mutex_lock(&cpuidle_lock); 336dcb84f33SVenkatesh Pallipadi 337dcb84f33SVenkatesh Pallipadi if ((ret = __cpuidle_register_device(dev))) { 338dcb84f33SVenkatesh Pallipadi mutex_unlock(&cpuidle_lock); 339dcb84f33SVenkatesh Pallipadi return ret; 340dcb84f33SVenkatesh Pallipadi } 341dcb84f33SVenkatesh Pallipadi 3424f86d3a8SLen Brown cpuidle_enable_device(dev); 3434f86d3a8SLen Brown cpuidle_install_idle_handler(); 3444f86d3a8SLen Brown 3454f86d3a8SLen Brown mutex_unlock(&cpuidle_lock); 3464f86d3a8SLen Brown 3474f86d3a8SLen Brown return 0; 3484f86d3a8SLen Brown 3494f86d3a8SLen Brown } 3504f86d3a8SLen Brown 3514f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_register_device); 3524f86d3a8SLen Brown 3534f86d3a8SLen Brown /** 3544f86d3a8SLen Brown * cpuidle_unregister_device - unregisters a CPU's idle PM feature 3554f86d3a8SLen Brown * @dev: the cpu 3564f86d3a8SLen Brown */ 3574f86d3a8SLen Brown void cpuidle_unregister_device(struct cpuidle_device *dev) 3584f86d3a8SLen Brown { 3594f86d3a8SLen Brown struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu); 360752138dfSLen Brown struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver(); 3614f86d3a8SLen Brown 362dcb84f33SVenkatesh Pallipadi if (dev->registered == 0) 363dcb84f33SVenkatesh Pallipadi return; 364dcb84f33SVenkatesh Pallipadi 3654f86d3a8SLen Brown cpuidle_pause_and_lock(); 3664f86d3a8SLen Brown 3674f86d3a8SLen Brown cpuidle_disable_device(dev); 3684f86d3a8SLen Brown 3694f86d3a8SLen Brown cpuidle_remove_sysfs(sys_dev); 3704f86d3a8SLen Brown list_del(&dev->device_list); 3714f86d3a8SLen Brown wait_for_completion(&dev->kobj_unregister); 3724f86d3a8SLen Brown per_cpu(cpuidle_devices, dev->cpu) = NULL; 3734f86d3a8SLen Brown 3744f86d3a8SLen Brown cpuidle_resume_and_unlock(); 3754f86d3a8SLen Brown 376752138dfSLen Brown module_put(cpuidle_driver->owner); 3774f86d3a8SLen Brown } 3784f86d3a8SLen Brown 3794f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_unregister_device); 3804f86d3a8SLen Brown 3814f86d3a8SLen Brown #ifdef CONFIG_SMP 3824f86d3a8SLen Brown 3834f86d3a8SLen Brown static void smp_callback(void *v) 3844f86d3a8SLen Brown { 3854f86d3a8SLen Brown /* we already woke the CPU up, nothing more to do */ 3864f86d3a8SLen Brown } 3874f86d3a8SLen Brown 3884f86d3a8SLen Brown /* 3894f86d3a8SLen Brown * This function gets called when a part of the kernel has a new latency 3904f86d3a8SLen Brown * requirement. This means we need to get all processors out of their C-state, 3914f86d3a8SLen Brown * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that 3924f86d3a8SLen Brown * wakes them all right up. 3934f86d3a8SLen Brown */ 3944f86d3a8SLen Brown static int cpuidle_latency_notify(struct notifier_block *b, 3954f86d3a8SLen Brown unsigned long l, void *v) 3964f86d3a8SLen Brown { 3978691e5a8SJens Axboe smp_call_function(smp_callback, NULL, 1); 3984f86d3a8SLen Brown return NOTIFY_OK; 3994f86d3a8SLen Brown } 4004f86d3a8SLen Brown 4014f86d3a8SLen Brown static struct notifier_block cpuidle_latency_notifier = { 4024f86d3a8SLen Brown .notifier_call = cpuidle_latency_notify, 4034f86d3a8SLen Brown }; 4044f86d3a8SLen Brown 405d82b3518SMark Gross static inline void latency_notifier_init(struct notifier_block *n) 406d82b3518SMark Gross { 407d82b3518SMark Gross pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n); 408d82b3518SMark Gross } 4094f86d3a8SLen Brown 4104f86d3a8SLen Brown #else /* CONFIG_SMP */ 4114f86d3a8SLen Brown 4124f86d3a8SLen Brown #define latency_notifier_init(x) do { } while (0) 4134f86d3a8SLen Brown 4144f86d3a8SLen Brown #endif /* CONFIG_SMP */ 4154f86d3a8SLen Brown 4164f86d3a8SLen Brown /** 4174f86d3a8SLen Brown * cpuidle_init - core initializer 4184f86d3a8SLen Brown */ 4194f86d3a8SLen Brown static int __init cpuidle_init(void) 4204f86d3a8SLen Brown { 4214f86d3a8SLen Brown int ret; 4224f86d3a8SLen Brown 4234f86d3a8SLen Brown pm_idle_old = pm_idle; 4244f86d3a8SLen Brown 4254f86d3a8SLen Brown ret = cpuidle_add_class_sysfs(&cpu_sysdev_class); 4264f86d3a8SLen Brown if (ret) 4274f86d3a8SLen Brown return ret; 4284f86d3a8SLen Brown 4294f86d3a8SLen Brown latency_notifier_init(&cpuidle_latency_notifier); 4304f86d3a8SLen Brown 4314f86d3a8SLen Brown return 0; 4324f86d3a8SLen Brown } 4334f86d3a8SLen Brown 4344f86d3a8SLen Brown core_initcall(cpuidle_init); 435