xref: /linux/drivers/cpuidle/cpuidle.c (revision 8a25a2fd126c621f44f3aeaef80d51f00fc11639)
14f86d3a8SLen Brown /*
24f86d3a8SLen Brown  * cpuidle.c - core cpuidle infrastructure
34f86d3a8SLen Brown  *
44f86d3a8SLen Brown  * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
54f86d3a8SLen Brown  *               Shaohua Li <shaohua.li@intel.com>
64f86d3a8SLen Brown  *               Adam Belay <abelay@novell.com>
74f86d3a8SLen Brown  *
84f86d3a8SLen Brown  * This code is licenced under the GPL.
94f86d3a8SLen Brown  */
104f86d3a8SLen Brown 
114f86d3a8SLen Brown #include <linux/kernel.h>
124f86d3a8SLen Brown #include <linux/mutex.h>
134f86d3a8SLen Brown #include <linux/sched.h>
144f86d3a8SLen Brown #include <linux/notifier.h>
15e8db0be1SJean Pihet #include <linux/pm_qos.h>
164f86d3a8SLen Brown #include <linux/cpu.h>
174f86d3a8SLen Brown #include <linux/cpuidle.h>
189a0b8415Svenkatesh.pallipadi@intel.com #include <linux/ktime.h>
192e94d1f7SArjan van de Ven #include <linux/hrtimer.h>
20884b17e1SPaul Gortmaker #include <linux/module.h>
21288f023eSArjan van de Ven #include <trace/events/power.h>
224f86d3a8SLen Brown 
234f86d3a8SLen Brown #include "cpuidle.h"
244f86d3a8SLen Brown 
254f86d3a8SLen Brown DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
264f86d3a8SLen Brown 
274f86d3a8SLen Brown DEFINE_MUTEX(cpuidle_lock);
284f86d3a8SLen Brown LIST_HEAD(cpuidle_detected_devices);
294f86d3a8SLen Brown 
304f86d3a8SLen Brown static int enabled_devices;
3162027aeaSLen Brown static int off __read_mostly;
32a0bfa137SLen Brown static int initialized __read_mostly;
3362027aeaSLen Brown 
3462027aeaSLen Brown int cpuidle_disabled(void)
3562027aeaSLen Brown {
3662027aeaSLen Brown 	return off;
3762027aeaSLen Brown }
38d91ee586SLen Brown void disable_cpuidle(void)
39d91ee586SLen Brown {
40d91ee586SLen Brown 	off = 1;
41d91ee586SLen Brown }
424f86d3a8SLen Brown 
43a6869cc4SVenki Pallipadi #if defined(CONFIG_ARCH_HAS_CPU_IDLE_WAIT)
44a6869cc4SVenki Pallipadi static void cpuidle_kick_cpus(void)
45a6869cc4SVenki Pallipadi {
46a6869cc4SVenki Pallipadi 	cpu_idle_wait();
47a6869cc4SVenki Pallipadi }
48a6869cc4SVenki Pallipadi #elif defined(CONFIG_SMP)
49a6869cc4SVenki Pallipadi # error "Arch needs cpu_idle_wait() equivalent here"
50a6869cc4SVenki Pallipadi #else /* !CONFIG_ARCH_HAS_CPU_IDLE_WAIT && !CONFIG_SMP */
51a6869cc4SVenki Pallipadi static void cpuidle_kick_cpus(void) {}
52a6869cc4SVenki Pallipadi #endif
53a6869cc4SVenki Pallipadi 
54dcb84f33SVenkatesh Pallipadi static int __cpuidle_register_device(struct cpuidle_device *dev);
55dcb84f33SVenkatesh Pallipadi 
564f86d3a8SLen Brown /**
574f86d3a8SLen Brown  * cpuidle_idle_call - the main idle loop
584f86d3a8SLen Brown  *
594f86d3a8SLen Brown  * NOTE: no locks or semaphores should be used here
60a0bfa137SLen Brown  * return non-zero on failure
614f86d3a8SLen Brown  */
62a0bfa137SLen Brown int cpuidle_idle_call(void)
634f86d3a8SLen Brown {
644a6f4fe8SChristoph Lameter 	struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
6546bcfad7SDeepthi Dharwar 	struct cpuidle_driver *drv = cpuidle_get_driver();
664f86d3a8SLen Brown 	struct cpuidle_state *target_state;
67e978aa7dSDeepthi Dharwar 	int next_state, entered_state;
684f86d3a8SLen Brown 
69a0bfa137SLen Brown 	if (off)
70a0bfa137SLen Brown 		return -ENODEV;
71a0bfa137SLen Brown 
72a0bfa137SLen Brown 	if (!initialized)
73a0bfa137SLen Brown 		return -ENODEV;
74a0bfa137SLen Brown 
754f86d3a8SLen Brown 	/* check if the device is ready */
76a0bfa137SLen Brown 	if (!dev || !dev->enabled)
77a0bfa137SLen Brown 		return -EBUSY;
784f86d3a8SLen Brown 
799a655837SArjan van de Ven #if 0
809a655837SArjan van de Ven 	/* shows regressions, re-enable for 2.6.29 */
812e94d1f7SArjan van de Ven 	/*
822e94d1f7SArjan van de Ven 	 * run any timers that can be run now, at this point
832e94d1f7SArjan van de Ven 	 * before calculating the idle duration etc.
842e94d1f7SArjan van de Ven 	 */
852e94d1f7SArjan van de Ven 	hrtimer_peek_ahead_timers();
869a655837SArjan van de Ven #endif
8771abbbf8SAi Li 
884f86d3a8SLen Brown 	/* ask the governor for the next state */
8946bcfad7SDeepthi Dharwar 	next_state = cpuidle_curr_governor->select(drv, dev);
90246eb7f0SKevin Hilman 	if (need_resched()) {
91246eb7f0SKevin Hilman 		local_irq_enable();
92a0bfa137SLen Brown 		return 0;
93246eb7f0SKevin Hilman 	}
94246eb7f0SKevin Hilman 
9546bcfad7SDeepthi Dharwar 	target_state = &drv->states[next_state];
96f77cfe4eSThomas Renninger 
97f77cfe4eSThomas Renninger 	trace_power_start(POWER_CSTATE, next_state, dev->cpu);
98f77cfe4eSThomas Renninger 	trace_cpu_idle(next_state, dev->cpu);
99f77cfe4eSThomas Renninger 
10046bcfad7SDeepthi Dharwar 	entered_state = target_state->enter(dev, drv, next_state);
101f77cfe4eSThomas Renninger 
102f77cfe4eSThomas Renninger 	trace_power_end(dev->cpu);
103f77cfe4eSThomas Renninger 	trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu);
104f77cfe4eSThomas Renninger 
105e978aa7dSDeepthi Dharwar 	if (entered_state >= 0) {
106e978aa7dSDeepthi Dharwar 		/* Update cpuidle counters */
107e978aa7dSDeepthi Dharwar 		/* This can be moved to within driver enter routine
108e978aa7dSDeepthi Dharwar 		 * but that results in multiple copies of same code.
109e978aa7dSDeepthi Dharwar 		 */
1104202735eSDeepthi Dharwar 		dev->states_usage[entered_state].time +=
111e978aa7dSDeepthi Dharwar 				(unsigned long long)dev->last_residency;
1124202735eSDeepthi Dharwar 		dev->states_usage[entered_state].usage++;
113e978aa7dSDeepthi Dharwar 	}
1144f86d3a8SLen Brown 
1154f86d3a8SLen Brown 	/* give the governor an opportunity to reflect on the outcome */
1164f86d3a8SLen Brown 	if (cpuidle_curr_governor->reflect)
117e978aa7dSDeepthi Dharwar 		cpuidle_curr_governor->reflect(dev, entered_state);
118a0bfa137SLen Brown 
119a0bfa137SLen Brown 	return 0;
1204f86d3a8SLen Brown }
1214f86d3a8SLen Brown 
1224f86d3a8SLen Brown /**
1234f86d3a8SLen Brown  * cpuidle_install_idle_handler - installs the cpuidle idle loop handler
1244f86d3a8SLen Brown  */
1254f86d3a8SLen Brown void cpuidle_install_idle_handler(void)
1264f86d3a8SLen Brown {
127a0bfa137SLen Brown 	if (enabled_devices) {
1284f86d3a8SLen Brown 		/* Make sure all changes finished before we switch to new idle */
1294f86d3a8SLen Brown 		smp_wmb();
130a0bfa137SLen Brown 		initialized = 1;
1314f86d3a8SLen Brown 	}
1324f86d3a8SLen Brown }
1334f86d3a8SLen Brown 
1344f86d3a8SLen Brown /**
1354f86d3a8SLen Brown  * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler
1364f86d3a8SLen Brown  */
1374f86d3a8SLen Brown void cpuidle_uninstall_idle_handler(void)
1384f86d3a8SLen Brown {
139a0bfa137SLen Brown 	if (enabled_devices) {
140a0bfa137SLen Brown 		initialized = 0;
141a6869cc4SVenki Pallipadi 		cpuidle_kick_cpus();
1424f86d3a8SLen Brown 	}
1434f86d3a8SLen Brown }
1444f86d3a8SLen Brown 
1454f86d3a8SLen Brown /**
1464f86d3a8SLen Brown  * cpuidle_pause_and_lock - temporarily disables CPUIDLE
1474f86d3a8SLen Brown  */
1484f86d3a8SLen Brown void cpuidle_pause_and_lock(void)
1494f86d3a8SLen Brown {
1504f86d3a8SLen Brown 	mutex_lock(&cpuidle_lock);
1514f86d3a8SLen Brown 	cpuidle_uninstall_idle_handler();
1524f86d3a8SLen Brown }
1534f86d3a8SLen Brown 
1544f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock);
1554f86d3a8SLen Brown 
1564f86d3a8SLen Brown /**
1574f86d3a8SLen Brown  * cpuidle_resume_and_unlock - resumes CPUIDLE operation
1584f86d3a8SLen Brown  */
1594f86d3a8SLen Brown void cpuidle_resume_and_unlock(void)
1604f86d3a8SLen Brown {
1614f86d3a8SLen Brown 	cpuidle_install_idle_handler();
1624f86d3a8SLen Brown 	mutex_unlock(&cpuidle_lock);
1634f86d3a8SLen Brown }
1644f86d3a8SLen Brown 
1654f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);
1664f86d3a8SLen Brown 
167d8c216cfSRafael J. Wysocki #ifdef CONFIG_ARCH_HAS_CPU_RELAX
16846bcfad7SDeepthi Dharwar static int poll_idle(struct cpuidle_device *dev,
16946bcfad7SDeepthi Dharwar 		struct cpuidle_driver *drv, int index)
170d8c216cfSRafael J. Wysocki {
171d8c216cfSRafael J. Wysocki 	ktime_t	t1, t2;
172d8c216cfSRafael J. Wysocki 	s64 diff;
173d8c216cfSRafael J. Wysocki 
174d8c216cfSRafael J. Wysocki 	t1 = ktime_get();
175d8c216cfSRafael J. Wysocki 	local_irq_enable();
176d8c216cfSRafael J. Wysocki 	while (!need_resched())
177d8c216cfSRafael J. Wysocki 		cpu_relax();
178d8c216cfSRafael J. Wysocki 
179d8c216cfSRafael J. Wysocki 	t2 = ktime_get();
180d8c216cfSRafael J. Wysocki 	diff = ktime_to_us(ktime_sub(t2, t1));
181d8c216cfSRafael J. Wysocki 	if (diff > INT_MAX)
182d8c216cfSRafael J. Wysocki 		diff = INT_MAX;
183d8c216cfSRafael J. Wysocki 
184e978aa7dSDeepthi Dharwar 	dev->last_residency = (int) diff;
185e978aa7dSDeepthi Dharwar 
186e978aa7dSDeepthi Dharwar 	return index;
187d8c216cfSRafael J. Wysocki }
188d8c216cfSRafael J. Wysocki 
18946bcfad7SDeepthi Dharwar static void poll_idle_init(struct cpuidle_driver *drv)
190d8c216cfSRafael J. Wysocki {
19146bcfad7SDeepthi Dharwar 	struct cpuidle_state *state = &drv->states[0];
192d8c216cfSRafael J. Wysocki 
193720f1c30SThomas Renninger 	snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
194d8c216cfSRafael J. Wysocki 	snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
195d8c216cfSRafael J. Wysocki 	state->exit_latency = 0;
196d8c216cfSRafael J. Wysocki 	state->target_residency = 0;
197d8c216cfSRafael J. Wysocki 	state->power_usage = -1;
198d247632cSLen Brown 	state->flags = 0;
199d8c216cfSRafael J. Wysocki 	state->enter = poll_idle;
200d8c216cfSRafael J. Wysocki }
201d8c216cfSRafael J. Wysocki #else
20246bcfad7SDeepthi Dharwar static void poll_idle_init(struct cpuidle_driver *drv) {}
203d8c216cfSRafael J. Wysocki #endif /* CONFIG_ARCH_HAS_CPU_RELAX */
204d8c216cfSRafael J. Wysocki 
2054f86d3a8SLen Brown /**
2064f86d3a8SLen Brown  * cpuidle_enable_device - enables idle PM for a CPU
2074f86d3a8SLen Brown  * @dev: the CPU
2084f86d3a8SLen Brown  *
2094f86d3a8SLen Brown  * This function must be called between cpuidle_pause_and_lock and
2104f86d3a8SLen Brown  * cpuidle_resume_and_unlock when used externally.
2114f86d3a8SLen Brown  */
2124f86d3a8SLen Brown int cpuidle_enable_device(struct cpuidle_device *dev)
2134f86d3a8SLen Brown {
2144f86d3a8SLen Brown 	int ret, i;
2154f86d3a8SLen Brown 
2164f86d3a8SLen Brown 	if (dev->enabled)
2174f86d3a8SLen Brown 		return 0;
218752138dfSLen Brown 	if (!cpuidle_get_driver() || !cpuidle_curr_governor)
2194f86d3a8SLen Brown 		return -EIO;
2204f86d3a8SLen Brown 	if (!dev->state_count)
2214f86d3a8SLen Brown 		return -EINVAL;
2224f86d3a8SLen Brown 
223dcb84f33SVenkatesh Pallipadi 	if (dev->registered == 0) {
224dcb84f33SVenkatesh Pallipadi 		ret = __cpuidle_register_device(dev);
225dcb84f33SVenkatesh Pallipadi 		if (ret)
226dcb84f33SVenkatesh Pallipadi 			return ret;
227dcb84f33SVenkatesh Pallipadi 	}
228dcb84f33SVenkatesh Pallipadi 
22946bcfad7SDeepthi Dharwar 	poll_idle_init(cpuidle_get_driver());
230d8c216cfSRafael J. Wysocki 
2314f86d3a8SLen Brown 	if ((ret = cpuidle_add_state_sysfs(dev)))
2324f86d3a8SLen Brown 		return ret;
2334f86d3a8SLen Brown 
2344f86d3a8SLen Brown 	if (cpuidle_curr_governor->enable &&
23546bcfad7SDeepthi Dharwar 	    (ret = cpuidle_curr_governor->enable(cpuidle_get_driver(), dev)))
2364f86d3a8SLen Brown 		goto fail_sysfs;
2374f86d3a8SLen Brown 
2384f86d3a8SLen Brown 	for (i = 0; i < dev->state_count; i++) {
2394202735eSDeepthi Dharwar 		dev->states_usage[i].usage = 0;
2404202735eSDeepthi Dharwar 		dev->states_usage[i].time = 0;
2414f86d3a8SLen Brown 	}
2424f86d3a8SLen Brown 	dev->last_residency = 0;
2434f86d3a8SLen Brown 
2444f86d3a8SLen Brown 	smp_wmb();
2454f86d3a8SLen Brown 
2464f86d3a8SLen Brown 	dev->enabled = 1;
2474f86d3a8SLen Brown 
2484f86d3a8SLen Brown 	enabled_devices++;
2494f86d3a8SLen Brown 	return 0;
2504f86d3a8SLen Brown 
2514f86d3a8SLen Brown fail_sysfs:
2524f86d3a8SLen Brown 	cpuidle_remove_state_sysfs(dev);
2534f86d3a8SLen Brown 
2544f86d3a8SLen Brown 	return ret;
2554f86d3a8SLen Brown }
2564f86d3a8SLen Brown 
2574f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_enable_device);
2584f86d3a8SLen Brown 
2594f86d3a8SLen Brown /**
2604f86d3a8SLen Brown  * cpuidle_disable_device - disables idle PM for a CPU
2614f86d3a8SLen Brown  * @dev: the CPU
2624f86d3a8SLen Brown  *
2634f86d3a8SLen Brown  * This function must be called between cpuidle_pause_and_lock and
2644f86d3a8SLen Brown  * cpuidle_resume_and_unlock when used externally.
2654f86d3a8SLen Brown  */
2664f86d3a8SLen Brown void cpuidle_disable_device(struct cpuidle_device *dev)
2674f86d3a8SLen Brown {
2684f86d3a8SLen Brown 	if (!dev->enabled)
2694f86d3a8SLen Brown 		return;
270752138dfSLen Brown 	if (!cpuidle_get_driver() || !cpuidle_curr_governor)
2714f86d3a8SLen Brown 		return;
2724f86d3a8SLen Brown 
2734f86d3a8SLen Brown 	dev->enabled = 0;
2744f86d3a8SLen Brown 
2754f86d3a8SLen Brown 	if (cpuidle_curr_governor->disable)
27646bcfad7SDeepthi Dharwar 		cpuidle_curr_governor->disable(cpuidle_get_driver(), dev);
2774f86d3a8SLen Brown 
2784f86d3a8SLen Brown 	cpuidle_remove_state_sysfs(dev);
2794f86d3a8SLen Brown 	enabled_devices--;
2804f86d3a8SLen Brown }
2814f86d3a8SLen Brown 
2824f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_disable_device);
2834f86d3a8SLen Brown 
2844f86d3a8SLen Brown /**
285dcb84f33SVenkatesh Pallipadi  * __cpuidle_register_device - internal register function called before register
286dcb84f33SVenkatesh Pallipadi  * and enable routines
2874f86d3a8SLen Brown  * @dev: the cpu
288dcb84f33SVenkatesh Pallipadi  *
289dcb84f33SVenkatesh Pallipadi  * cpuidle_lock mutex must be held before this is called
2904f86d3a8SLen Brown  */
291dcb84f33SVenkatesh Pallipadi static int __cpuidle_register_device(struct cpuidle_device *dev)
2924f86d3a8SLen Brown {
2934f86d3a8SLen Brown 	int ret;
294*8a25a2fdSKay Sievers 	struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
295752138dfSLen Brown 	struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();
2964f86d3a8SLen Brown 
297*8a25a2fdSKay Sievers 	if (!dev)
2984f86d3a8SLen Brown 		return -EINVAL;
299752138dfSLen Brown 	if (!try_module_get(cpuidle_driver->owner))
3004f86d3a8SLen Brown 		return -EINVAL;
3014f86d3a8SLen Brown 
3024f86d3a8SLen Brown 	init_completion(&dev->kobj_unregister);
3034f86d3a8SLen Brown 
3044f86d3a8SLen Brown 	per_cpu(cpuidle_devices, dev->cpu) = dev;
3054f86d3a8SLen Brown 	list_add(&dev->device_list, &cpuidle_detected_devices);
306*8a25a2fdSKay Sievers 	if ((ret = cpuidle_add_sysfs(cpu_dev))) {
307752138dfSLen Brown 		module_put(cpuidle_driver->owner);
3084f86d3a8SLen Brown 		return ret;
3094f86d3a8SLen Brown 	}
3104f86d3a8SLen Brown 
311dcb84f33SVenkatesh Pallipadi 	dev->registered = 1;
312dcb84f33SVenkatesh Pallipadi 	return 0;
313dcb84f33SVenkatesh Pallipadi }
314dcb84f33SVenkatesh Pallipadi 
315dcb84f33SVenkatesh Pallipadi /**
316dcb84f33SVenkatesh Pallipadi  * cpuidle_register_device - registers a CPU's idle PM feature
317dcb84f33SVenkatesh Pallipadi  * @dev: the cpu
318dcb84f33SVenkatesh Pallipadi  */
319dcb84f33SVenkatesh Pallipadi int cpuidle_register_device(struct cpuidle_device *dev)
320dcb84f33SVenkatesh Pallipadi {
321dcb84f33SVenkatesh Pallipadi 	int ret;
322dcb84f33SVenkatesh Pallipadi 
323dcb84f33SVenkatesh Pallipadi 	mutex_lock(&cpuidle_lock);
324dcb84f33SVenkatesh Pallipadi 
325dcb84f33SVenkatesh Pallipadi 	if ((ret = __cpuidle_register_device(dev))) {
326dcb84f33SVenkatesh Pallipadi 		mutex_unlock(&cpuidle_lock);
327dcb84f33SVenkatesh Pallipadi 		return ret;
328dcb84f33SVenkatesh Pallipadi 	}
329dcb84f33SVenkatesh Pallipadi 
3304f86d3a8SLen Brown 	cpuidle_enable_device(dev);
3314f86d3a8SLen Brown 	cpuidle_install_idle_handler();
3324f86d3a8SLen Brown 
3334f86d3a8SLen Brown 	mutex_unlock(&cpuidle_lock);
3344f86d3a8SLen Brown 
3354f86d3a8SLen Brown 	return 0;
3364f86d3a8SLen Brown 
3374f86d3a8SLen Brown }
3384f86d3a8SLen Brown 
3394f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_register_device);
3404f86d3a8SLen Brown 
3414f86d3a8SLen Brown /**
3424f86d3a8SLen Brown  * cpuidle_unregister_device - unregisters a CPU's idle PM feature
3434f86d3a8SLen Brown  * @dev: the cpu
3444f86d3a8SLen Brown  */
3454f86d3a8SLen Brown void cpuidle_unregister_device(struct cpuidle_device *dev)
3464f86d3a8SLen Brown {
347*8a25a2fdSKay Sievers 	struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
348752138dfSLen Brown 	struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();
3494f86d3a8SLen Brown 
350dcb84f33SVenkatesh Pallipadi 	if (dev->registered == 0)
351dcb84f33SVenkatesh Pallipadi 		return;
352dcb84f33SVenkatesh Pallipadi 
3534f86d3a8SLen Brown 	cpuidle_pause_and_lock();
3544f86d3a8SLen Brown 
3554f86d3a8SLen Brown 	cpuidle_disable_device(dev);
3564f86d3a8SLen Brown 
357*8a25a2fdSKay Sievers 	cpuidle_remove_sysfs(cpu_dev);
3584f86d3a8SLen Brown 	list_del(&dev->device_list);
3594f86d3a8SLen Brown 	wait_for_completion(&dev->kobj_unregister);
3604f86d3a8SLen Brown 	per_cpu(cpuidle_devices, dev->cpu) = NULL;
3614f86d3a8SLen Brown 
3624f86d3a8SLen Brown 	cpuidle_resume_and_unlock();
3634f86d3a8SLen Brown 
364752138dfSLen Brown 	module_put(cpuidle_driver->owner);
3654f86d3a8SLen Brown }
3664f86d3a8SLen Brown 
3674f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_unregister_device);
3684f86d3a8SLen Brown 
3694f86d3a8SLen Brown #ifdef CONFIG_SMP
3704f86d3a8SLen Brown 
3714f86d3a8SLen Brown static void smp_callback(void *v)
3724f86d3a8SLen Brown {
3734f86d3a8SLen Brown 	/* we already woke the CPU up, nothing more to do */
3744f86d3a8SLen Brown }
3754f86d3a8SLen Brown 
3764f86d3a8SLen Brown /*
3774f86d3a8SLen Brown  * This function gets called when a part of the kernel has a new latency
3784f86d3a8SLen Brown  * requirement.  This means we need to get all processors out of their C-state,
3794f86d3a8SLen Brown  * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
3804f86d3a8SLen Brown  * wakes them all right up.
3814f86d3a8SLen Brown  */
3824f86d3a8SLen Brown static int cpuidle_latency_notify(struct notifier_block *b,
3834f86d3a8SLen Brown 		unsigned long l, void *v)
3844f86d3a8SLen Brown {
3858691e5a8SJens Axboe 	smp_call_function(smp_callback, NULL, 1);
3864f86d3a8SLen Brown 	return NOTIFY_OK;
3874f86d3a8SLen Brown }
3884f86d3a8SLen Brown 
3894f86d3a8SLen Brown static struct notifier_block cpuidle_latency_notifier = {
3904f86d3a8SLen Brown 	.notifier_call = cpuidle_latency_notify,
3914f86d3a8SLen Brown };
3924f86d3a8SLen Brown 
393d82b3518SMark Gross static inline void latency_notifier_init(struct notifier_block *n)
394d82b3518SMark Gross {
395d82b3518SMark Gross 	pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n);
396d82b3518SMark Gross }
3974f86d3a8SLen Brown 
3984f86d3a8SLen Brown #else /* CONFIG_SMP */
3994f86d3a8SLen Brown 
4004f86d3a8SLen Brown #define latency_notifier_init(x) do { } while (0)
4014f86d3a8SLen Brown 
4024f86d3a8SLen Brown #endif /* CONFIG_SMP */
4034f86d3a8SLen Brown 
4044f86d3a8SLen Brown /**
4054f86d3a8SLen Brown  * cpuidle_init - core initializer
4064f86d3a8SLen Brown  */
4074f86d3a8SLen Brown static int __init cpuidle_init(void)
4084f86d3a8SLen Brown {
4094f86d3a8SLen Brown 	int ret;
4104f86d3a8SLen Brown 
41162027aeaSLen Brown 	if (cpuidle_disabled())
41262027aeaSLen Brown 		return -ENODEV;
41362027aeaSLen Brown 
414*8a25a2fdSKay Sievers 	ret = cpuidle_add_interface(cpu_subsys.dev_root);
4154f86d3a8SLen Brown 	if (ret)
4164f86d3a8SLen Brown 		return ret;
4174f86d3a8SLen Brown 
4184f86d3a8SLen Brown 	latency_notifier_init(&cpuidle_latency_notifier);
4194f86d3a8SLen Brown 
4204f86d3a8SLen Brown 	return 0;
4214f86d3a8SLen Brown }
4224f86d3a8SLen Brown 
42362027aeaSLen Brown module_param(off, int, 0444);
4244f86d3a8SLen Brown core_initcall(cpuidle_init);
425