xref: /linux/drivers/cpuidle/cpuidle.c (revision 2e94d1f71f7e4404d997e6fb4f1618aa147d76f9)
14f86d3a8SLen Brown /*
24f86d3a8SLen Brown  * cpuidle.c - core cpuidle infrastructure
34f86d3a8SLen Brown  *
44f86d3a8SLen Brown  * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
54f86d3a8SLen Brown  *               Shaohua Li <shaohua.li@intel.com>
64f86d3a8SLen Brown  *               Adam Belay <abelay@novell.com>
74f86d3a8SLen Brown  *
84f86d3a8SLen Brown  * This code is licenced under the GPL.
94f86d3a8SLen Brown  */
104f86d3a8SLen Brown 
114f86d3a8SLen Brown #include <linux/kernel.h>
124f86d3a8SLen Brown #include <linux/mutex.h>
134f86d3a8SLen Brown #include <linux/sched.h>
144f86d3a8SLen Brown #include <linux/notifier.h>
15d82b3518SMark Gross #include <linux/pm_qos_params.h>
164f86d3a8SLen Brown #include <linux/cpu.h>
174f86d3a8SLen Brown #include <linux/cpuidle.h>
189a0b8415Svenkatesh.pallipadi@intel.com #include <linux/ktime.h>
19*2e94d1f7SArjan van de Ven #include <linux/hrtimer.h>
204f86d3a8SLen Brown 
214f86d3a8SLen Brown #include "cpuidle.h"
224f86d3a8SLen Brown 
234f86d3a8SLen Brown DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
244f86d3a8SLen Brown 
254f86d3a8SLen Brown DEFINE_MUTEX(cpuidle_lock);
264f86d3a8SLen Brown LIST_HEAD(cpuidle_detected_devices);
274f86d3a8SLen Brown static void (*pm_idle_old)(void);
284f86d3a8SLen Brown 
294f86d3a8SLen Brown static int enabled_devices;
304f86d3a8SLen Brown 
31a6869cc4SVenki Pallipadi #if defined(CONFIG_ARCH_HAS_CPU_IDLE_WAIT)
32a6869cc4SVenki Pallipadi static void cpuidle_kick_cpus(void)
33a6869cc4SVenki Pallipadi {
34a6869cc4SVenki Pallipadi 	cpu_idle_wait();
35a6869cc4SVenki Pallipadi }
36a6869cc4SVenki Pallipadi #elif defined(CONFIG_SMP)
37a6869cc4SVenki Pallipadi # error "Arch needs cpu_idle_wait() equivalent here"
38a6869cc4SVenki Pallipadi #else /* !CONFIG_ARCH_HAS_CPU_IDLE_WAIT && !CONFIG_SMP */
39a6869cc4SVenki Pallipadi static void cpuidle_kick_cpus(void) {}
40a6869cc4SVenki Pallipadi #endif
41a6869cc4SVenki Pallipadi 
42dcb84f33SVenkatesh Pallipadi static int __cpuidle_register_device(struct cpuidle_device *dev);
43dcb84f33SVenkatesh Pallipadi 
444f86d3a8SLen Brown /**
454f86d3a8SLen Brown  * cpuidle_idle_call - the main idle loop
464f86d3a8SLen Brown  *
474f86d3a8SLen Brown  * NOTE: no locks or semaphores should be used here
484f86d3a8SLen Brown  */
494f86d3a8SLen Brown static void cpuidle_idle_call(void)
504f86d3a8SLen Brown {
514f86d3a8SLen Brown 	struct cpuidle_device *dev = __get_cpu_var(cpuidle_devices);
524f86d3a8SLen Brown 	struct cpuidle_state *target_state;
534f86d3a8SLen Brown 	int next_state;
544f86d3a8SLen Brown 
554f86d3a8SLen Brown 	/* check if the device is ready */
564f86d3a8SLen Brown 	if (!dev || !dev->enabled) {
574f86d3a8SLen Brown 		if (pm_idle_old)
584f86d3a8SLen Brown 			pm_idle_old();
594f86d3a8SLen Brown 		else
604f86d3a8SLen Brown 			local_irq_enable();
614f86d3a8SLen Brown 		return;
624f86d3a8SLen Brown 	}
634f86d3a8SLen Brown 
64*2e94d1f7SArjan van de Ven 	/*
65*2e94d1f7SArjan van de Ven 	 * run any timers that can be run now, at this point
66*2e94d1f7SArjan van de Ven 	 * before calculating the idle duration etc.
67*2e94d1f7SArjan van de Ven 	 */
68*2e94d1f7SArjan van de Ven 	hrtimer_peek_ahead_timers();
69*2e94d1f7SArjan van de Ven 
704f86d3a8SLen Brown 	/* ask the governor for the next state */
714f86d3a8SLen Brown 	next_state = cpuidle_curr_governor->select(dev);
724f86d3a8SLen Brown 	if (need_resched())
734f86d3a8SLen Brown 		return;
744f86d3a8SLen Brown 	target_state = &dev->states[next_state];
754f86d3a8SLen Brown 
764f86d3a8SLen Brown 	/* enter the state and update stats */
774f86d3a8SLen Brown 	dev->last_residency = target_state->enter(dev, target_state);
784f86d3a8SLen Brown 	dev->last_state = target_state;
798b78cf60SYi Yang 	target_state->time += (unsigned long long)dev->last_residency;
804f86d3a8SLen Brown 	target_state->usage++;
814f86d3a8SLen Brown 
824f86d3a8SLen Brown 	/* give the governor an opportunity to reflect on the outcome */
834f86d3a8SLen Brown 	if (cpuidle_curr_governor->reflect)
844f86d3a8SLen Brown 		cpuidle_curr_governor->reflect(dev);
854f86d3a8SLen Brown }
864f86d3a8SLen Brown 
874f86d3a8SLen Brown /**
884f86d3a8SLen Brown  * cpuidle_install_idle_handler - installs the cpuidle idle loop handler
894f86d3a8SLen Brown  */
904f86d3a8SLen Brown void cpuidle_install_idle_handler(void)
914f86d3a8SLen Brown {
924f86d3a8SLen Brown 	if (enabled_devices && (pm_idle != cpuidle_idle_call)) {
934f86d3a8SLen Brown 		/* Make sure all changes finished before we switch to new idle */
944f86d3a8SLen Brown 		smp_wmb();
954f86d3a8SLen Brown 		pm_idle = cpuidle_idle_call;
964f86d3a8SLen Brown 	}
974f86d3a8SLen Brown }
984f86d3a8SLen Brown 
994f86d3a8SLen Brown /**
1004f86d3a8SLen Brown  * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler
1014f86d3a8SLen Brown  */
1024f86d3a8SLen Brown void cpuidle_uninstall_idle_handler(void)
1034f86d3a8SLen Brown {
104b032bf70SThomas Gleixner 	if (enabled_devices && pm_idle_old && (pm_idle != pm_idle_old)) {
1054f86d3a8SLen Brown 		pm_idle = pm_idle_old;
106a6869cc4SVenki Pallipadi 		cpuidle_kick_cpus();
1074f86d3a8SLen Brown 	}
1084f86d3a8SLen Brown }
1094f86d3a8SLen Brown 
1104f86d3a8SLen Brown /**
1114f86d3a8SLen Brown  * cpuidle_pause_and_lock - temporarily disables CPUIDLE
1124f86d3a8SLen Brown  */
1134f86d3a8SLen Brown void cpuidle_pause_and_lock(void)
1144f86d3a8SLen Brown {
1154f86d3a8SLen Brown 	mutex_lock(&cpuidle_lock);
1164f86d3a8SLen Brown 	cpuidle_uninstall_idle_handler();
1174f86d3a8SLen Brown }
1184f86d3a8SLen Brown 
1194f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock);
1204f86d3a8SLen Brown 
1214f86d3a8SLen Brown /**
1224f86d3a8SLen Brown  * cpuidle_resume_and_unlock - resumes CPUIDLE operation
1234f86d3a8SLen Brown  */
1244f86d3a8SLen Brown void cpuidle_resume_and_unlock(void)
1254f86d3a8SLen Brown {
1264f86d3a8SLen Brown 	cpuidle_install_idle_handler();
1274f86d3a8SLen Brown 	mutex_unlock(&cpuidle_lock);
1284f86d3a8SLen Brown }
1294f86d3a8SLen Brown 
1304f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);
1314f86d3a8SLen Brown 
1324f86d3a8SLen Brown /**
1334f86d3a8SLen Brown  * cpuidle_enable_device - enables idle PM for a CPU
1344f86d3a8SLen Brown  * @dev: the CPU
1354f86d3a8SLen Brown  *
1364f86d3a8SLen Brown  * This function must be called between cpuidle_pause_and_lock and
1374f86d3a8SLen Brown  * cpuidle_resume_and_unlock when used externally.
1384f86d3a8SLen Brown  */
1394f86d3a8SLen Brown int cpuidle_enable_device(struct cpuidle_device *dev)
1404f86d3a8SLen Brown {
1414f86d3a8SLen Brown 	int ret, i;
1424f86d3a8SLen Brown 
1434f86d3a8SLen Brown 	if (dev->enabled)
1444f86d3a8SLen Brown 		return 0;
1454f86d3a8SLen Brown 	if (!cpuidle_curr_driver || !cpuidle_curr_governor)
1464f86d3a8SLen Brown 		return -EIO;
1474f86d3a8SLen Brown 	if (!dev->state_count)
1484f86d3a8SLen Brown 		return -EINVAL;
1494f86d3a8SLen Brown 
150dcb84f33SVenkatesh Pallipadi 	if (dev->registered == 0) {
151dcb84f33SVenkatesh Pallipadi 		ret = __cpuidle_register_device(dev);
152dcb84f33SVenkatesh Pallipadi 		if (ret)
153dcb84f33SVenkatesh Pallipadi 			return ret;
154dcb84f33SVenkatesh Pallipadi 	}
155dcb84f33SVenkatesh Pallipadi 
1564f86d3a8SLen Brown 	if ((ret = cpuidle_add_state_sysfs(dev)))
1574f86d3a8SLen Brown 		return ret;
1584f86d3a8SLen Brown 
1594f86d3a8SLen Brown 	if (cpuidle_curr_governor->enable &&
1604f86d3a8SLen Brown 	    (ret = cpuidle_curr_governor->enable(dev)))
1614f86d3a8SLen Brown 		goto fail_sysfs;
1624f86d3a8SLen Brown 
1634f86d3a8SLen Brown 	for (i = 0; i < dev->state_count; i++) {
1644f86d3a8SLen Brown 		dev->states[i].usage = 0;
1654f86d3a8SLen Brown 		dev->states[i].time = 0;
1664f86d3a8SLen Brown 	}
1674f86d3a8SLen Brown 	dev->last_residency = 0;
1684f86d3a8SLen Brown 	dev->last_state = NULL;
1694f86d3a8SLen Brown 
1704f86d3a8SLen Brown 	smp_wmb();
1714f86d3a8SLen Brown 
1724f86d3a8SLen Brown 	dev->enabled = 1;
1734f86d3a8SLen Brown 
1744f86d3a8SLen Brown 	enabled_devices++;
1754f86d3a8SLen Brown 	return 0;
1764f86d3a8SLen Brown 
1774f86d3a8SLen Brown fail_sysfs:
1784f86d3a8SLen Brown 	cpuidle_remove_state_sysfs(dev);
1794f86d3a8SLen Brown 
1804f86d3a8SLen Brown 	return ret;
1814f86d3a8SLen Brown }
1824f86d3a8SLen Brown 
1834f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_enable_device);
1844f86d3a8SLen Brown 
1854f86d3a8SLen Brown /**
1864f86d3a8SLen Brown  * cpuidle_disable_device - disables idle PM for a CPU
1874f86d3a8SLen Brown  * @dev: the CPU
1884f86d3a8SLen Brown  *
1894f86d3a8SLen Brown  * This function must be called between cpuidle_pause_and_lock and
1904f86d3a8SLen Brown  * cpuidle_resume_and_unlock when used externally.
1914f86d3a8SLen Brown  */
1924f86d3a8SLen Brown void cpuidle_disable_device(struct cpuidle_device *dev)
1934f86d3a8SLen Brown {
1944f86d3a8SLen Brown 	if (!dev->enabled)
1954f86d3a8SLen Brown 		return;
1964f86d3a8SLen Brown 	if (!cpuidle_curr_driver || !cpuidle_curr_governor)
1974f86d3a8SLen Brown 		return;
1984f86d3a8SLen Brown 
1994f86d3a8SLen Brown 	dev->enabled = 0;
2004f86d3a8SLen Brown 
2014f86d3a8SLen Brown 	if (cpuidle_curr_governor->disable)
2024f86d3a8SLen Brown 		cpuidle_curr_governor->disable(dev);
2034f86d3a8SLen Brown 
2044f86d3a8SLen Brown 	cpuidle_remove_state_sysfs(dev);
2054f86d3a8SLen Brown 	enabled_devices--;
2064f86d3a8SLen Brown }
2074f86d3a8SLen Brown 
2084f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_disable_device);
2094f86d3a8SLen Brown 
2109a0b8415Svenkatesh.pallipadi@intel.com #ifdef CONFIG_ARCH_HAS_CPU_RELAX
2119a0b8415Svenkatesh.pallipadi@intel.com static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st)
2129a0b8415Svenkatesh.pallipadi@intel.com {
2139a0b8415Svenkatesh.pallipadi@intel.com 	ktime_t	t1, t2;
2149a0b8415Svenkatesh.pallipadi@intel.com 	s64 diff;
2159a0b8415Svenkatesh.pallipadi@intel.com 	int ret;
2169a0b8415Svenkatesh.pallipadi@intel.com 
2179a0b8415Svenkatesh.pallipadi@intel.com 	t1 = ktime_get();
2189a0b8415Svenkatesh.pallipadi@intel.com 	local_irq_enable();
2199a0b8415Svenkatesh.pallipadi@intel.com 	while (!need_resched())
2209a0b8415Svenkatesh.pallipadi@intel.com 		cpu_relax();
2219a0b8415Svenkatesh.pallipadi@intel.com 
2229a0b8415Svenkatesh.pallipadi@intel.com 	t2 = ktime_get();
2239a0b8415Svenkatesh.pallipadi@intel.com 	diff = ktime_to_us(ktime_sub(t2, t1));
2249a0b8415Svenkatesh.pallipadi@intel.com 	if (diff > INT_MAX)
2259a0b8415Svenkatesh.pallipadi@intel.com 		diff = INT_MAX;
2269a0b8415Svenkatesh.pallipadi@intel.com 
2279a0b8415Svenkatesh.pallipadi@intel.com 	ret = (int) diff;
2289a0b8415Svenkatesh.pallipadi@intel.com 	return ret;
2299a0b8415Svenkatesh.pallipadi@intel.com }
2309a0b8415Svenkatesh.pallipadi@intel.com 
2319a0b8415Svenkatesh.pallipadi@intel.com static void poll_idle_init(struct cpuidle_device *dev)
2329a0b8415Svenkatesh.pallipadi@intel.com {
2339a0b8415Svenkatesh.pallipadi@intel.com 	struct cpuidle_state *state = &dev->states[0];
2349a0b8415Svenkatesh.pallipadi@intel.com 
2359a0b8415Svenkatesh.pallipadi@intel.com 	cpuidle_set_statedata(state, NULL);
2369a0b8415Svenkatesh.pallipadi@intel.com 
2374fcb2fcdSVenkatesh Pallipadi 	snprintf(state->name, CPUIDLE_NAME_LEN, "C0");
2384fcb2fcdSVenkatesh Pallipadi 	snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
2399a0b8415Svenkatesh.pallipadi@intel.com 	state->exit_latency = 0;
2409a0b8415Svenkatesh.pallipadi@intel.com 	state->target_residency = 0;
2419a0b8415Svenkatesh.pallipadi@intel.com 	state->power_usage = -1;
2428e92b660SVenki Pallipadi 	state->flags = CPUIDLE_FLAG_POLL;
2439a0b8415Svenkatesh.pallipadi@intel.com 	state->enter = poll_idle;
2449a0b8415Svenkatesh.pallipadi@intel.com }
2459a0b8415Svenkatesh.pallipadi@intel.com #else
2469a0b8415Svenkatesh.pallipadi@intel.com static void poll_idle_init(struct cpuidle_device *dev) {}
2479a0b8415Svenkatesh.pallipadi@intel.com #endif /* CONFIG_ARCH_HAS_CPU_RELAX */
2489a0b8415Svenkatesh.pallipadi@intel.com 
2494f86d3a8SLen Brown /**
250dcb84f33SVenkatesh Pallipadi  * __cpuidle_register_device - internal register function called before register
251dcb84f33SVenkatesh Pallipadi  * and enable routines
2524f86d3a8SLen Brown  * @dev: the cpu
253dcb84f33SVenkatesh Pallipadi  *
254dcb84f33SVenkatesh Pallipadi  * cpuidle_lock mutex must be held before this is called
2554f86d3a8SLen Brown  */
256dcb84f33SVenkatesh Pallipadi static int __cpuidle_register_device(struct cpuidle_device *dev)
2574f86d3a8SLen Brown {
2584f86d3a8SLen Brown 	int ret;
2594f86d3a8SLen Brown 	struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu);
2604f86d3a8SLen Brown 
2614f86d3a8SLen Brown 	if (!sys_dev)
2624f86d3a8SLen Brown 		return -EINVAL;
2634f86d3a8SLen Brown 	if (!try_module_get(cpuidle_curr_driver->owner))
2644f86d3a8SLen Brown 		return -EINVAL;
2654f86d3a8SLen Brown 
2664f86d3a8SLen Brown 	init_completion(&dev->kobj_unregister);
2674f86d3a8SLen Brown 
2689a0b8415Svenkatesh.pallipadi@intel.com 	poll_idle_init(dev);
2699a0b8415Svenkatesh.pallipadi@intel.com 
2704f86d3a8SLen Brown 	per_cpu(cpuidle_devices, dev->cpu) = dev;
2714f86d3a8SLen Brown 	list_add(&dev->device_list, &cpuidle_detected_devices);
2724f86d3a8SLen Brown 	if ((ret = cpuidle_add_sysfs(sys_dev))) {
2734f86d3a8SLen Brown 		module_put(cpuidle_curr_driver->owner);
2744f86d3a8SLen Brown 		return ret;
2754f86d3a8SLen Brown 	}
2764f86d3a8SLen Brown 
277dcb84f33SVenkatesh Pallipadi 	dev->registered = 1;
278dcb84f33SVenkatesh Pallipadi 	return 0;
279dcb84f33SVenkatesh Pallipadi }
280dcb84f33SVenkatesh Pallipadi 
281dcb84f33SVenkatesh Pallipadi /**
282dcb84f33SVenkatesh Pallipadi  * cpuidle_register_device - registers a CPU's idle PM feature
283dcb84f33SVenkatesh Pallipadi  * @dev: the cpu
284dcb84f33SVenkatesh Pallipadi  */
285dcb84f33SVenkatesh Pallipadi int cpuidle_register_device(struct cpuidle_device *dev)
286dcb84f33SVenkatesh Pallipadi {
287dcb84f33SVenkatesh Pallipadi 	int ret;
288dcb84f33SVenkatesh Pallipadi 
289dcb84f33SVenkatesh Pallipadi 	mutex_lock(&cpuidle_lock);
290dcb84f33SVenkatesh Pallipadi 
291dcb84f33SVenkatesh Pallipadi 	if ((ret = __cpuidle_register_device(dev))) {
292dcb84f33SVenkatesh Pallipadi 		mutex_unlock(&cpuidle_lock);
293dcb84f33SVenkatesh Pallipadi 		return ret;
294dcb84f33SVenkatesh Pallipadi 	}
295dcb84f33SVenkatesh Pallipadi 
2964f86d3a8SLen Brown 	cpuidle_enable_device(dev);
2974f86d3a8SLen Brown 	cpuidle_install_idle_handler();
2984f86d3a8SLen Brown 
2994f86d3a8SLen Brown 	mutex_unlock(&cpuidle_lock);
3004f86d3a8SLen Brown 
3014f86d3a8SLen Brown 	return 0;
3024f86d3a8SLen Brown 
3034f86d3a8SLen Brown }
3044f86d3a8SLen Brown 
3054f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_register_device);
3064f86d3a8SLen Brown 
3074f86d3a8SLen Brown /**
3084f86d3a8SLen Brown  * cpuidle_unregister_device - unregisters a CPU's idle PM feature
3094f86d3a8SLen Brown  * @dev: the cpu
3104f86d3a8SLen Brown  */
3114f86d3a8SLen Brown void cpuidle_unregister_device(struct cpuidle_device *dev)
3124f86d3a8SLen Brown {
3134f86d3a8SLen Brown 	struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu);
3144f86d3a8SLen Brown 
315dcb84f33SVenkatesh Pallipadi 	if (dev->registered == 0)
316dcb84f33SVenkatesh Pallipadi 		return;
317dcb84f33SVenkatesh Pallipadi 
3184f86d3a8SLen Brown 	cpuidle_pause_and_lock();
3194f86d3a8SLen Brown 
3204f86d3a8SLen Brown 	cpuidle_disable_device(dev);
3214f86d3a8SLen Brown 
3224f86d3a8SLen Brown 	cpuidle_remove_sysfs(sys_dev);
3234f86d3a8SLen Brown 	list_del(&dev->device_list);
3244f86d3a8SLen Brown 	wait_for_completion(&dev->kobj_unregister);
3254f86d3a8SLen Brown 	per_cpu(cpuidle_devices, dev->cpu) = NULL;
3264f86d3a8SLen Brown 
3274f86d3a8SLen Brown 	cpuidle_resume_and_unlock();
3284f86d3a8SLen Brown 
3294f86d3a8SLen Brown 	module_put(cpuidle_curr_driver->owner);
3304f86d3a8SLen Brown }
3314f86d3a8SLen Brown 
3324f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_unregister_device);
3334f86d3a8SLen Brown 
3344f86d3a8SLen Brown #ifdef CONFIG_SMP
3354f86d3a8SLen Brown 
3364f86d3a8SLen Brown static void smp_callback(void *v)
3374f86d3a8SLen Brown {
3384f86d3a8SLen Brown 	/* we already woke the CPU up, nothing more to do */
3394f86d3a8SLen Brown }
3404f86d3a8SLen Brown 
3414f86d3a8SLen Brown /*
3424f86d3a8SLen Brown  * This function gets called when a part of the kernel has a new latency
3434f86d3a8SLen Brown  * requirement.  This means we need to get all processors out of their C-state,
3444f86d3a8SLen Brown  * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
3454f86d3a8SLen Brown  * wakes them all right up.
3464f86d3a8SLen Brown  */
3474f86d3a8SLen Brown static int cpuidle_latency_notify(struct notifier_block *b,
3484f86d3a8SLen Brown 		unsigned long l, void *v)
3494f86d3a8SLen Brown {
3508691e5a8SJens Axboe 	smp_call_function(smp_callback, NULL, 1);
3514f86d3a8SLen Brown 	return NOTIFY_OK;
3524f86d3a8SLen Brown }
3534f86d3a8SLen Brown 
3544f86d3a8SLen Brown static struct notifier_block cpuidle_latency_notifier = {
3554f86d3a8SLen Brown 	.notifier_call = cpuidle_latency_notify,
3564f86d3a8SLen Brown };
3574f86d3a8SLen Brown 
358d82b3518SMark Gross static inline void latency_notifier_init(struct notifier_block *n)
359d82b3518SMark Gross {
360d82b3518SMark Gross 	pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n);
361d82b3518SMark Gross }
3624f86d3a8SLen Brown 
3634f86d3a8SLen Brown #else /* CONFIG_SMP */
3644f86d3a8SLen Brown 
3654f86d3a8SLen Brown #define latency_notifier_init(x) do { } while (0)
3664f86d3a8SLen Brown 
3674f86d3a8SLen Brown #endif /* CONFIG_SMP */
3684f86d3a8SLen Brown 
3694f86d3a8SLen Brown /**
3704f86d3a8SLen Brown  * cpuidle_init - core initializer
3714f86d3a8SLen Brown  */
3724f86d3a8SLen Brown static int __init cpuidle_init(void)
3734f86d3a8SLen Brown {
3744f86d3a8SLen Brown 	int ret;
3754f86d3a8SLen Brown 
3764f86d3a8SLen Brown 	pm_idle_old = pm_idle;
3774f86d3a8SLen Brown 
3784f86d3a8SLen Brown 	ret = cpuidle_add_class_sysfs(&cpu_sysdev_class);
3794f86d3a8SLen Brown 	if (ret)
3804f86d3a8SLen Brown 		return ret;
3814f86d3a8SLen Brown 
3824f86d3a8SLen Brown 	latency_notifier_init(&cpuidle_latency_notifier);
3834f86d3a8SLen Brown 
3844f86d3a8SLen Brown 	return 0;
3854f86d3a8SLen Brown }
3864f86d3a8SLen Brown 
3874f86d3a8SLen Brown core_initcall(cpuidle_init);
388