xref: /linux/drivers/cpuidle/cpuidle.c (revision 8691e5a8f691cc2a4fda0651e8d307aaba0e7d68)
14f86d3a8SLen Brown /*
24f86d3a8SLen Brown  * cpuidle.c - core cpuidle infrastructure
34f86d3a8SLen Brown  *
44f86d3a8SLen Brown  * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
54f86d3a8SLen Brown  *               Shaohua Li <shaohua.li@intel.com>
64f86d3a8SLen Brown  *               Adam Belay <abelay@novell.com>
74f86d3a8SLen Brown  *
84f86d3a8SLen Brown  * This code is licenced under the GPL.
94f86d3a8SLen Brown  */
104f86d3a8SLen Brown 
114f86d3a8SLen Brown #include <linux/kernel.h>
124f86d3a8SLen Brown #include <linux/mutex.h>
134f86d3a8SLen Brown #include <linux/sched.h>
144f86d3a8SLen Brown #include <linux/notifier.h>
15d82b3518SMark Gross #include <linux/pm_qos_params.h>
164f86d3a8SLen Brown #include <linux/cpu.h>
174f86d3a8SLen Brown #include <linux/cpuidle.h>
189a0b8415Svenkatesh.pallipadi@intel.com #include <linux/ktime.h>
194f86d3a8SLen Brown 
204f86d3a8SLen Brown #include "cpuidle.h"
214f86d3a8SLen Brown 
224f86d3a8SLen Brown DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
234f86d3a8SLen Brown 
244f86d3a8SLen Brown DEFINE_MUTEX(cpuidle_lock);
254f86d3a8SLen Brown LIST_HEAD(cpuidle_detected_devices);
264f86d3a8SLen Brown static void (*pm_idle_old)(void);
274f86d3a8SLen Brown 
284f86d3a8SLen Brown static int enabled_devices;
294f86d3a8SLen Brown 
30a6869cc4SVenki Pallipadi #if defined(CONFIG_ARCH_HAS_CPU_IDLE_WAIT)
31a6869cc4SVenki Pallipadi static void cpuidle_kick_cpus(void)
32a6869cc4SVenki Pallipadi {
33a6869cc4SVenki Pallipadi 	cpu_idle_wait();
34a6869cc4SVenki Pallipadi }
35a6869cc4SVenki Pallipadi #elif defined(CONFIG_SMP)
36a6869cc4SVenki Pallipadi # error "Arch needs cpu_idle_wait() equivalent here"
37a6869cc4SVenki Pallipadi #else /* !CONFIG_ARCH_HAS_CPU_IDLE_WAIT && !CONFIG_SMP */
38a6869cc4SVenki Pallipadi static void cpuidle_kick_cpus(void) {}
39a6869cc4SVenki Pallipadi #endif
40a6869cc4SVenki Pallipadi 
41dcb84f33SVenkatesh Pallipadi static int __cpuidle_register_device(struct cpuidle_device *dev);
42dcb84f33SVenkatesh Pallipadi 
434f86d3a8SLen Brown /**
444f86d3a8SLen Brown  * cpuidle_idle_call - the main idle loop
454f86d3a8SLen Brown  *
464f86d3a8SLen Brown  * NOTE: no locks or semaphores should be used here
474f86d3a8SLen Brown  */
484f86d3a8SLen Brown static void cpuidle_idle_call(void)
494f86d3a8SLen Brown {
504f86d3a8SLen Brown 	struct cpuidle_device *dev = __get_cpu_var(cpuidle_devices);
514f86d3a8SLen Brown 	struct cpuidle_state *target_state;
524f86d3a8SLen Brown 	int next_state;
534f86d3a8SLen Brown 
544f86d3a8SLen Brown 	/* check if the device is ready */
554f86d3a8SLen Brown 	if (!dev || !dev->enabled) {
564f86d3a8SLen Brown 		if (pm_idle_old)
574f86d3a8SLen Brown 			pm_idle_old();
584f86d3a8SLen Brown 		else
594f86d3a8SLen Brown 			local_irq_enable();
604f86d3a8SLen Brown 		return;
614f86d3a8SLen Brown 	}
624f86d3a8SLen Brown 
634f86d3a8SLen Brown 	/* ask the governor for the next state */
644f86d3a8SLen Brown 	next_state = cpuidle_curr_governor->select(dev);
654f86d3a8SLen Brown 	if (need_resched())
664f86d3a8SLen Brown 		return;
674f86d3a8SLen Brown 	target_state = &dev->states[next_state];
684f86d3a8SLen Brown 
694f86d3a8SLen Brown 	/* enter the state and update stats */
704f86d3a8SLen Brown 	dev->last_residency = target_state->enter(dev, target_state);
714f86d3a8SLen Brown 	dev->last_state = target_state;
728b78cf60SYi Yang 	target_state->time += (unsigned long long)dev->last_residency;
734f86d3a8SLen Brown 	target_state->usage++;
744f86d3a8SLen Brown 
754f86d3a8SLen Brown 	/* give the governor an opportunity to reflect on the outcome */
764f86d3a8SLen Brown 	if (cpuidle_curr_governor->reflect)
774f86d3a8SLen Brown 		cpuidle_curr_governor->reflect(dev);
784f86d3a8SLen Brown }
794f86d3a8SLen Brown 
804f86d3a8SLen Brown /**
814f86d3a8SLen Brown  * cpuidle_install_idle_handler - installs the cpuidle idle loop handler
824f86d3a8SLen Brown  */
834f86d3a8SLen Brown void cpuidle_install_idle_handler(void)
844f86d3a8SLen Brown {
854f86d3a8SLen Brown 	if (enabled_devices && (pm_idle != cpuidle_idle_call)) {
864f86d3a8SLen Brown 		/* Make sure all changes finished before we switch to new idle */
874f86d3a8SLen Brown 		smp_wmb();
884f86d3a8SLen Brown 		pm_idle = cpuidle_idle_call;
894f86d3a8SLen Brown 	}
904f86d3a8SLen Brown }
914f86d3a8SLen Brown 
924f86d3a8SLen Brown /**
934f86d3a8SLen Brown  * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler
944f86d3a8SLen Brown  */
954f86d3a8SLen Brown void cpuidle_uninstall_idle_handler(void)
964f86d3a8SLen Brown {
974f86d3a8SLen Brown 	if (enabled_devices && (pm_idle != pm_idle_old)) {
984f86d3a8SLen Brown 		pm_idle = pm_idle_old;
99a6869cc4SVenki Pallipadi 		cpuidle_kick_cpus();
1004f86d3a8SLen Brown 	}
1014f86d3a8SLen Brown }
1024f86d3a8SLen Brown 
1034f86d3a8SLen Brown /**
1044f86d3a8SLen Brown  * cpuidle_pause_and_lock - temporarily disables CPUIDLE
1054f86d3a8SLen Brown  */
1064f86d3a8SLen Brown void cpuidle_pause_and_lock(void)
1074f86d3a8SLen Brown {
1084f86d3a8SLen Brown 	mutex_lock(&cpuidle_lock);
1094f86d3a8SLen Brown 	cpuidle_uninstall_idle_handler();
1104f86d3a8SLen Brown }
1114f86d3a8SLen Brown 
1124f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock);
1134f86d3a8SLen Brown 
1144f86d3a8SLen Brown /**
1154f86d3a8SLen Brown  * cpuidle_resume_and_unlock - resumes CPUIDLE operation
1164f86d3a8SLen Brown  */
1174f86d3a8SLen Brown void cpuidle_resume_and_unlock(void)
1184f86d3a8SLen Brown {
1194f86d3a8SLen Brown 	cpuidle_install_idle_handler();
1204f86d3a8SLen Brown 	mutex_unlock(&cpuidle_lock);
1214f86d3a8SLen Brown }
1224f86d3a8SLen Brown 
1234f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);
1244f86d3a8SLen Brown 
1254f86d3a8SLen Brown /**
1264f86d3a8SLen Brown  * cpuidle_enable_device - enables idle PM for a CPU
1274f86d3a8SLen Brown  * @dev: the CPU
1284f86d3a8SLen Brown  *
1294f86d3a8SLen Brown  * This function must be called between cpuidle_pause_and_lock and
1304f86d3a8SLen Brown  * cpuidle_resume_and_unlock when used externally.
1314f86d3a8SLen Brown  */
1324f86d3a8SLen Brown int cpuidle_enable_device(struct cpuidle_device *dev)
1334f86d3a8SLen Brown {
1344f86d3a8SLen Brown 	int ret, i;
1354f86d3a8SLen Brown 
1364f86d3a8SLen Brown 	if (dev->enabled)
1374f86d3a8SLen Brown 		return 0;
1384f86d3a8SLen Brown 	if (!cpuidle_curr_driver || !cpuidle_curr_governor)
1394f86d3a8SLen Brown 		return -EIO;
1404f86d3a8SLen Brown 	if (!dev->state_count)
1414f86d3a8SLen Brown 		return -EINVAL;
1424f86d3a8SLen Brown 
143dcb84f33SVenkatesh Pallipadi 	if (dev->registered == 0) {
144dcb84f33SVenkatesh Pallipadi 		ret = __cpuidle_register_device(dev);
145dcb84f33SVenkatesh Pallipadi 		if (ret)
146dcb84f33SVenkatesh Pallipadi 			return ret;
147dcb84f33SVenkatesh Pallipadi 	}
148dcb84f33SVenkatesh Pallipadi 
1494f86d3a8SLen Brown 	if ((ret = cpuidle_add_state_sysfs(dev)))
1504f86d3a8SLen Brown 		return ret;
1514f86d3a8SLen Brown 
1524f86d3a8SLen Brown 	if (cpuidle_curr_governor->enable &&
1534f86d3a8SLen Brown 	    (ret = cpuidle_curr_governor->enable(dev)))
1544f86d3a8SLen Brown 		goto fail_sysfs;
1554f86d3a8SLen Brown 
1564f86d3a8SLen Brown 	for (i = 0; i < dev->state_count; i++) {
1574f86d3a8SLen Brown 		dev->states[i].usage = 0;
1584f86d3a8SLen Brown 		dev->states[i].time = 0;
1594f86d3a8SLen Brown 	}
1604f86d3a8SLen Brown 	dev->last_residency = 0;
1614f86d3a8SLen Brown 	dev->last_state = NULL;
1624f86d3a8SLen Brown 
1634f86d3a8SLen Brown 	smp_wmb();
1644f86d3a8SLen Brown 
1654f86d3a8SLen Brown 	dev->enabled = 1;
1664f86d3a8SLen Brown 
1674f86d3a8SLen Brown 	enabled_devices++;
1684f86d3a8SLen Brown 	return 0;
1694f86d3a8SLen Brown 
1704f86d3a8SLen Brown fail_sysfs:
1714f86d3a8SLen Brown 	cpuidle_remove_state_sysfs(dev);
1724f86d3a8SLen Brown 
1734f86d3a8SLen Brown 	return ret;
1744f86d3a8SLen Brown }
1754f86d3a8SLen Brown 
1764f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_enable_device);
1774f86d3a8SLen Brown 
1784f86d3a8SLen Brown /**
1794f86d3a8SLen Brown  * cpuidle_disable_device - disables idle PM for a CPU
1804f86d3a8SLen Brown  * @dev: the CPU
1814f86d3a8SLen Brown  *
1824f86d3a8SLen Brown  * This function must be called between cpuidle_pause_and_lock and
1834f86d3a8SLen Brown  * cpuidle_resume_and_unlock when used externally.
1844f86d3a8SLen Brown  */
1854f86d3a8SLen Brown void cpuidle_disable_device(struct cpuidle_device *dev)
1864f86d3a8SLen Brown {
1874f86d3a8SLen Brown 	if (!dev->enabled)
1884f86d3a8SLen Brown 		return;
1894f86d3a8SLen Brown 	if (!cpuidle_curr_driver || !cpuidle_curr_governor)
1904f86d3a8SLen Brown 		return;
1914f86d3a8SLen Brown 
1924f86d3a8SLen Brown 	dev->enabled = 0;
1934f86d3a8SLen Brown 
1944f86d3a8SLen Brown 	if (cpuidle_curr_governor->disable)
1954f86d3a8SLen Brown 		cpuidle_curr_governor->disable(dev);
1964f86d3a8SLen Brown 
1974f86d3a8SLen Brown 	cpuidle_remove_state_sysfs(dev);
1984f86d3a8SLen Brown 	enabled_devices--;
1994f86d3a8SLen Brown }
2004f86d3a8SLen Brown 
2014f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_disable_device);
2024f86d3a8SLen Brown 
2039a0b8415Svenkatesh.pallipadi@intel.com #ifdef CONFIG_ARCH_HAS_CPU_RELAX
2049a0b8415Svenkatesh.pallipadi@intel.com static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st)
2059a0b8415Svenkatesh.pallipadi@intel.com {
2069a0b8415Svenkatesh.pallipadi@intel.com 	ktime_t	t1, t2;
2079a0b8415Svenkatesh.pallipadi@intel.com 	s64 diff;
2089a0b8415Svenkatesh.pallipadi@intel.com 	int ret;
2099a0b8415Svenkatesh.pallipadi@intel.com 
2109a0b8415Svenkatesh.pallipadi@intel.com 	t1 = ktime_get();
2119a0b8415Svenkatesh.pallipadi@intel.com 	local_irq_enable();
2129a0b8415Svenkatesh.pallipadi@intel.com 	while (!need_resched())
2139a0b8415Svenkatesh.pallipadi@intel.com 		cpu_relax();
2149a0b8415Svenkatesh.pallipadi@intel.com 
2159a0b8415Svenkatesh.pallipadi@intel.com 	t2 = ktime_get();
2169a0b8415Svenkatesh.pallipadi@intel.com 	diff = ktime_to_us(ktime_sub(t2, t1));
2179a0b8415Svenkatesh.pallipadi@intel.com 	if (diff > INT_MAX)
2189a0b8415Svenkatesh.pallipadi@intel.com 		diff = INT_MAX;
2199a0b8415Svenkatesh.pallipadi@intel.com 
2209a0b8415Svenkatesh.pallipadi@intel.com 	ret = (int) diff;
2219a0b8415Svenkatesh.pallipadi@intel.com 	return ret;
2229a0b8415Svenkatesh.pallipadi@intel.com }
2239a0b8415Svenkatesh.pallipadi@intel.com 
2249a0b8415Svenkatesh.pallipadi@intel.com static void poll_idle_init(struct cpuidle_device *dev)
2259a0b8415Svenkatesh.pallipadi@intel.com {
2269a0b8415Svenkatesh.pallipadi@intel.com 	struct cpuidle_state *state = &dev->states[0];
2279a0b8415Svenkatesh.pallipadi@intel.com 
2289a0b8415Svenkatesh.pallipadi@intel.com 	cpuidle_set_statedata(state, NULL);
2299a0b8415Svenkatesh.pallipadi@intel.com 
2304fcb2fcdSVenkatesh Pallipadi 	snprintf(state->name, CPUIDLE_NAME_LEN, "C0");
2314fcb2fcdSVenkatesh Pallipadi 	snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
2329a0b8415Svenkatesh.pallipadi@intel.com 	state->exit_latency = 0;
2339a0b8415Svenkatesh.pallipadi@intel.com 	state->target_residency = 0;
2349a0b8415Svenkatesh.pallipadi@intel.com 	state->power_usage = -1;
2358e92b660SVenki Pallipadi 	state->flags = CPUIDLE_FLAG_POLL;
2369a0b8415Svenkatesh.pallipadi@intel.com 	state->enter = poll_idle;
2379a0b8415Svenkatesh.pallipadi@intel.com }
2389a0b8415Svenkatesh.pallipadi@intel.com #else
2399a0b8415Svenkatesh.pallipadi@intel.com static void poll_idle_init(struct cpuidle_device *dev) {}
2409a0b8415Svenkatesh.pallipadi@intel.com #endif /* CONFIG_ARCH_HAS_CPU_RELAX */
2419a0b8415Svenkatesh.pallipadi@intel.com 
2424f86d3a8SLen Brown /**
243dcb84f33SVenkatesh Pallipadi  * __cpuidle_register_device - internal register function called before register
244dcb84f33SVenkatesh Pallipadi  * and enable routines
2454f86d3a8SLen Brown  * @dev: the cpu
246dcb84f33SVenkatesh Pallipadi  *
247dcb84f33SVenkatesh Pallipadi  * cpuidle_lock mutex must be held before this is called
2484f86d3a8SLen Brown  */
249dcb84f33SVenkatesh Pallipadi static int __cpuidle_register_device(struct cpuidle_device *dev)
2504f86d3a8SLen Brown {
2514f86d3a8SLen Brown 	int ret;
2524f86d3a8SLen Brown 	struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu);
2534f86d3a8SLen Brown 
2544f86d3a8SLen Brown 	if (!sys_dev)
2554f86d3a8SLen Brown 		return -EINVAL;
2564f86d3a8SLen Brown 	if (!try_module_get(cpuidle_curr_driver->owner))
2574f86d3a8SLen Brown 		return -EINVAL;
2584f86d3a8SLen Brown 
2594f86d3a8SLen Brown 	init_completion(&dev->kobj_unregister);
2604f86d3a8SLen Brown 
2619a0b8415Svenkatesh.pallipadi@intel.com 	poll_idle_init(dev);
2629a0b8415Svenkatesh.pallipadi@intel.com 
2634f86d3a8SLen Brown 	per_cpu(cpuidle_devices, dev->cpu) = dev;
2644f86d3a8SLen Brown 	list_add(&dev->device_list, &cpuidle_detected_devices);
2654f86d3a8SLen Brown 	if ((ret = cpuidle_add_sysfs(sys_dev))) {
2664f86d3a8SLen Brown 		module_put(cpuidle_curr_driver->owner);
2674f86d3a8SLen Brown 		return ret;
2684f86d3a8SLen Brown 	}
2694f86d3a8SLen Brown 
270dcb84f33SVenkatesh Pallipadi 	dev->registered = 1;
271dcb84f33SVenkatesh Pallipadi 	return 0;
272dcb84f33SVenkatesh Pallipadi }
273dcb84f33SVenkatesh Pallipadi 
274dcb84f33SVenkatesh Pallipadi /**
275dcb84f33SVenkatesh Pallipadi  * cpuidle_register_device - registers a CPU's idle PM feature
276dcb84f33SVenkatesh Pallipadi  * @dev: the cpu
277dcb84f33SVenkatesh Pallipadi  */
278dcb84f33SVenkatesh Pallipadi int cpuidle_register_device(struct cpuidle_device *dev)
279dcb84f33SVenkatesh Pallipadi {
280dcb84f33SVenkatesh Pallipadi 	int ret;
281dcb84f33SVenkatesh Pallipadi 
282dcb84f33SVenkatesh Pallipadi 	mutex_lock(&cpuidle_lock);
283dcb84f33SVenkatesh Pallipadi 
284dcb84f33SVenkatesh Pallipadi 	if ((ret = __cpuidle_register_device(dev))) {
285dcb84f33SVenkatesh Pallipadi 		mutex_unlock(&cpuidle_lock);
286dcb84f33SVenkatesh Pallipadi 		return ret;
287dcb84f33SVenkatesh Pallipadi 	}
288dcb84f33SVenkatesh Pallipadi 
2894f86d3a8SLen Brown 	cpuidle_enable_device(dev);
2904f86d3a8SLen Brown 	cpuidle_install_idle_handler();
2914f86d3a8SLen Brown 
2924f86d3a8SLen Brown 	mutex_unlock(&cpuidle_lock);
2934f86d3a8SLen Brown 
2944f86d3a8SLen Brown 	return 0;
2954f86d3a8SLen Brown 
2964f86d3a8SLen Brown }
2974f86d3a8SLen Brown 
2984f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_register_device);
2994f86d3a8SLen Brown 
3004f86d3a8SLen Brown /**
3014f86d3a8SLen Brown  * cpuidle_unregister_device - unregisters a CPU's idle PM feature
3024f86d3a8SLen Brown  * @dev: the cpu
3034f86d3a8SLen Brown  */
3044f86d3a8SLen Brown void cpuidle_unregister_device(struct cpuidle_device *dev)
3054f86d3a8SLen Brown {
3064f86d3a8SLen Brown 	struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu);
3074f86d3a8SLen Brown 
308dcb84f33SVenkatesh Pallipadi 	if (dev->registered == 0)
309dcb84f33SVenkatesh Pallipadi 		return;
310dcb84f33SVenkatesh Pallipadi 
3114f86d3a8SLen Brown 	cpuidle_pause_and_lock();
3124f86d3a8SLen Brown 
3134f86d3a8SLen Brown 	cpuidle_disable_device(dev);
3144f86d3a8SLen Brown 
3154f86d3a8SLen Brown 	cpuidle_remove_sysfs(sys_dev);
3164f86d3a8SLen Brown 	list_del(&dev->device_list);
3174f86d3a8SLen Brown 	wait_for_completion(&dev->kobj_unregister);
3184f86d3a8SLen Brown 	per_cpu(cpuidle_devices, dev->cpu) = NULL;
3194f86d3a8SLen Brown 
3204f86d3a8SLen Brown 	cpuidle_resume_and_unlock();
3214f86d3a8SLen Brown 
3224f86d3a8SLen Brown 	module_put(cpuidle_curr_driver->owner);
3234f86d3a8SLen Brown }
3244f86d3a8SLen Brown 
3254f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_unregister_device);
3264f86d3a8SLen Brown 
3274f86d3a8SLen Brown #ifdef CONFIG_SMP
3284f86d3a8SLen Brown 
3294f86d3a8SLen Brown static void smp_callback(void *v)
3304f86d3a8SLen Brown {
3314f86d3a8SLen Brown 	/* we already woke the CPU up, nothing more to do */
3324f86d3a8SLen Brown }
3334f86d3a8SLen Brown 
3344f86d3a8SLen Brown /*
3354f86d3a8SLen Brown  * This function gets called when a part of the kernel has a new latency
3364f86d3a8SLen Brown  * requirement.  This means we need to get all processors out of their C-state,
3374f86d3a8SLen Brown  * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
3384f86d3a8SLen Brown  * wakes them all right up.
3394f86d3a8SLen Brown  */
3404f86d3a8SLen Brown static int cpuidle_latency_notify(struct notifier_block *b,
3414f86d3a8SLen Brown 		unsigned long l, void *v)
3424f86d3a8SLen Brown {
343*8691e5a8SJens Axboe 	smp_call_function(smp_callback, NULL, 1);
3444f86d3a8SLen Brown 	return NOTIFY_OK;
3454f86d3a8SLen Brown }
3464f86d3a8SLen Brown 
3474f86d3a8SLen Brown static struct notifier_block cpuidle_latency_notifier = {
3484f86d3a8SLen Brown 	.notifier_call = cpuidle_latency_notify,
3494f86d3a8SLen Brown };
3504f86d3a8SLen Brown 
351d82b3518SMark Gross static inline void latency_notifier_init(struct notifier_block *n)
352d82b3518SMark Gross {
353d82b3518SMark Gross 	pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n);
354d82b3518SMark Gross }
3554f86d3a8SLen Brown 
3564f86d3a8SLen Brown #else /* CONFIG_SMP */
3574f86d3a8SLen Brown 
3584f86d3a8SLen Brown #define latency_notifier_init(x) do { } while (0)
3594f86d3a8SLen Brown 
3604f86d3a8SLen Brown #endif /* CONFIG_SMP */
3614f86d3a8SLen Brown 
3624f86d3a8SLen Brown /**
3634f86d3a8SLen Brown  * cpuidle_init - core initializer
3644f86d3a8SLen Brown  */
3654f86d3a8SLen Brown static int __init cpuidle_init(void)
3664f86d3a8SLen Brown {
3674f86d3a8SLen Brown 	int ret;
3684f86d3a8SLen Brown 
3694f86d3a8SLen Brown 	pm_idle_old = pm_idle;
3704f86d3a8SLen Brown 
3714f86d3a8SLen Brown 	ret = cpuidle_add_class_sysfs(&cpu_sysdev_class);
3724f86d3a8SLen Brown 	if (ret)
3734f86d3a8SLen Brown 		return ret;
3744f86d3a8SLen Brown 
3754f86d3a8SLen Brown 	latency_notifier_init(&cpuidle_latency_notifier);
3764f86d3a8SLen Brown 
3774f86d3a8SLen Brown 	return 0;
3784f86d3a8SLen Brown }
3794f86d3a8SLen Brown 
3804f86d3a8SLen Brown core_initcall(cpuidle_init);
381