xref: /linux/drivers/cpuidle/cpuidle.c (revision 63caae8480921773b46adec0b6ddac9a844a042f)
14f86d3a8SLen Brown /*
24f86d3a8SLen Brown  * cpuidle.c - core cpuidle infrastructure
34f86d3a8SLen Brown  *
44f86d3a8SLen Brown  * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
54f86d3a8SLen Brown  *               Shaohua Li <shaohua.li@intel.com>
64f86d3a8SLen Brown  *               Adam Belay <abelay@novell.com>
74f86d3a8SLen Brown  *
84f86d3a8SLen Brown  * This code is licenced under the GPL.
94f86d3a8SLen Brown  */
104f86d3a8SLen Brown 
11b60e6a0eSDaniel Lezcano #include <linux/clockchips.h>
124f86d3a8SLen Brown #include <linux/kernel.h>
134f86d3a8SLen Brown #include <linux/mutex.h>
144f86d3a8SLen Brown #include <linux/sched.h>
154f86d3a8SLen Brown #include <linux/notifier.h>
16e8db0be1SJean Pihet #include <linux/pm_qos.h>
174f86d3a8SLen Brown #include <linux/cpu.h>
184f86d3a8SLen Brown #include <linux/cpuidle.h>
199a0b8415Svenkatesh.pallipadi@intel.com #include <linux/ktime.h>
202e94d1f7SArjan van de Ven #include <linux/hrtimer.h>
21884b17e1SPaul Gortmaker #include <linux/module.h>
2238106313SRafael J. Wysocki #include <linux/suspend.h>
23124cf911SRafael J. Wysocki #include <linux/tick.h>
24288f023eSArjan van de Ven #include <trace/events/power.h>
254f86d3a8SLen Brown 
264f86d3a8SLen Brown #include "cpuidle.h"
274f86d3a8SLen Brown 
284f86d3a8SLen Brown DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
294c637b21SDaniel Lezcano DEFINE_PER_CPU(struct cpuidle_device, cpuidle_dev);
304f86d3a8SLen Brown 
314f86d3a8SLen Brown DEFINE_MUTEX(cpuidle_lock);
324f86d3a8SLen Brown LIST_HEAD(cpuidle_detected_devices);
334f86d3a8SLen Brown 
344f86d3a8SLen Brown static int enabled_devices;
3562027aeaSLen Brown static int off __read_mostly;
36a0bfa137SLen Brown static int initialized __read_mostly;
3762027aeaSLen Brown 
3862027aeaSLen Brown int cpuidle_disabled(void)
3962027aeaSLen Brown {
4062027aeaSLen Brown 	return off;
4162027aeaSLen Brown }
42d91ee586SLen Brown void disable_cpuidle(void)
43d91ee586SLen Brown {
44d91ee586SLen Brown 	off = 1;
45d91ee586SLen Brown }
464f86d3a8SLen Brown 
47ef2b22acSRafael J. Wysocki bool cpuidle_not_available(struct cpuidle_driver *drv,
4831a34090SRafael J. Wysocki 			   struct cpuidle_device *dev)
4931a34090SRafael J. Wysocki {
5031a34090SRafael J. Wysocki 	return off || !initialized || !drv || !dev || !dev->enabled;
5131a34090SRafael J. Wysocki }
5231a34090SRafael J. Wysocki 
534f86d3a8SLen Brown /**
541a022e3fSBoris Ostrovsky  * cpuidle_play_dead - cpu off-lining
551a022e3fSBoris Ostrovsky  *
56ee01e663SToshi Kani  * Returns in case of an error or no driver
571a022e3fSBoris Ostrovsky  */
581a022e3fSBoris Ostrovsky int cpuidle_play_dead(void)
591a022e3fSBoris Ostrovsky {
601a022e3fSBoris Ostrovsky 	struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
61bf4d1b5dSDaniel Lezcano 	struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
628aef33a7SDaniel Lezcano 	int i;
631a022e3fSBoris Ostrovsky 
64ee01e663SToshi Kani 	if (!drv)
65ee01e663SToshi Kani 		return -ENODEV;
66ee01e663SToshi Kani 
671a022e3fSBoris Ostrovsky 	/* Find lowest-power state that supports long-term idle */
687d51d979SRafael J. Wysocki 	for (i = drv->state_count - 1; i >= 0; i--)
698aef33a7SDaniel Lezcano 		if (drv->states[i].enter_dead)
708aef33a7SDaniel Lezcano 			return drv->states[i].enter_dead(dev, i);
711a022e3fSBoris Ostrovsky 
721a022e3fSBoris Ostrovsky 	return -ENODEV;
731a022e3fSBoris Ostrovsky }
741a022e3fSBoris Ostrovsky 
75ef2b22acSRafael J. Wysocki static int find_deepest_state(struct cpuidle_driver *drv,
760d94039fSRafael J. Wysocki 			      struct cpuidle_device *dev,
770d94039fSRafael J. Wysocki 			      unsigned int max_latency,
780d94039fSRafael J. Wysocki 			      unsigned int forbidden_flags,
790d94039fSRafael J. Wysocki 			      bool freeze)
80a6220fc1SRafael J. Wysocki {
81a6220fc1SRafael J. Wysocki 	unsigned int latency_req = 0;
827d51d979SRafael J. Wysocki 	int i, ret = -ENXIO;
83a6220fc1SRafael J. Wysocki 
847d51d979SRafael J. Wysocki 	for (i = 0; i < drv->state_count; i++) {
85a6220fc1SRafael J. Wysocki 		struct cpuidle_state *s = &drv->states[i];
86a6220fc1SRafael J. Wysocki 		struct cpuidle_state_usage *su = &dev->states_usage[i];
87a6220fc1SRafael J. Wysocki 
88124cf911SRafael J. Wysocki 		if (s->disabled || su->disable || s->exit_latency <= latency_req
890d94039fSRafael J. Wysocki 		    || s->exit_latency > max_latency
900d94039fSRafael J. Wysocki 		    || (s->flags & forbidden_flags)
91124cf911SRafael J. Wysocki 		    || (freeze && !s->enter_freeze))
92a6220fc1SRafael J. Wysocki 			continue;
93a6220fc1SRafael J. Wysocki 
94a6220fc1SRafael J. Wysocki 		latency_req = s->exit_latency;
95a6220fc1SRafael J. Wysocki 		ret = i;
96a6220fc1SRafael J. Wysocki 	}
97a6220fc1SRafael J. Wysocki 	return ret;
98a6220fc1SRafael J. Wysocki }
99a6220fc1SRafael J. Wysocki 
10087e9b9f1SRafael J. Wysocki #ifdef CONFIG_SUSPEND
101ef2b22acSRafael J. Wysocki /**
102ef2b22acSRafael J. Wysocki  * cpuidle_find_deepest_state - Find the deepest available idle state.
103ef2b22acSRafael J. Wysocki  * @drv: cpuidle driver for the given CPU.
104ef2b22acSRafael J. Wysocki  * @dev: cpuidle device for the given CPU.
105ef2b22acSRafael J. Wysocki  */
106ef2b22acSRafael J. Wysocki int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
107ef2b22acSRafael J. Wysocki 			       struct cpuidle_device *dev)
108ef2b22acSRafael J. Wysocki {
1090d94039fSRafael J. Wysocki 	return find_deepest_state(drv, dev, UINT_MAX, 0, false);
110ef2b22acSRafael J. Wysocki }
111ef2b22acSRafael J. Wysocki 
112124cf911SRafael J. Wysocki static void enter_freeze_proper(struct cpuidle_driver *drv,
113124cf911SRafael J. Wysocki 				struct cpuidle_device *dev, int index)
114124cf911SRafael J. Wysocki {
115124cf911SRafael J. Wysocki 	tick_freeze();
116124cf911SRafael J. Wysocki 	/*
117124cf911SRafael J. Wysocki 	 * The state used here cannot be a "coupled" one, because the "coupled"
118124cf911SRafael J. Wysocki 	 * cpuidle mechanism enables interrupts and doing that with timekeeping
119124cf911SRafael J. Wysocki 	 * suspended is generally unsafe.
120124cf911SRafael J. Wysocki 	 */
121*63caae84SLucas Stach 	stop_critical_timings();
122124cf911SRafael J. Wysocki 	drv->states[index].enter_freeze(dev, drv, index);
123124cf911SRafael J. Wysocki 	WARN_ON(!irqs_disabled());
124124cf911SRafael J. Wysocki 	/*
125124cf911SRafael J. Wysocki 	 * timekeeping_resume() that will be called by tick_unfreeze() for the
126124cf911SRafael J. Wysocki 	 * last CPU executing it calls functions containing RCU read-side
127124cf911SRafael J. Wysocki 	 * critical sections, so tell RCU about that.
128124cf911SRafael J. Wysocki 	 */
129124cf911SRafael J. Wysocki 	RCU_NONIDLE(tick_unfreeze());
130*63caae84SLucas Stach 	start_critical_timings();
131124cf911SRafael J. Wysocki }
132124cf911SRafael J. Wysocki 
133a6220fc1SRafael J. Wysocki /**
13438106313SRafael J. Wysocki  * cpuidle_enter_freeze - Enter an idle state suitable for suspend-to-idle.
135ef2b22acSRafael J. Wysocki  * @drv: cpuidle driver for the given CPU.
136ef2b22acSRafael J. Wysocki  * @dev: cpuidle device for the given CPU.
13738106313SRafael J. Wysocki  *
138124cf911SRafael J. Wysocki  * If there are states with the ->enter_freeze callback, find the deepest of
139ef2b22acSRafael J. Wysocki  * them and enter it with frozen tick.
14038106313SRafael J. Wysocki  */
141ef2b22acSRafael J. Wysocki int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev)
14238106313SRafael J. Wysocki {
14338106313SRafael J. Wysocki 	int index;
14438106313SRafael J. Wysocki 
145124cf911SRafael J. Wysocki 	/*
146124cf911SRafael J. Wysocki 	 * Find the deepest state with ->enter_freeze present, which guarantees
147124cf911SRafael J. Wysocki 	 * that interrupts won't be enabled when it exits and allows the tick to
148124cf911SRafael J. Wysocki 	 * be frozen safely.
149124cf911SRafael J. Wysocki 	 */
1500d94039fSRafael J. Wysocki 	index = find_deepest_state(drv, dev, UINT_MAX, 0, true);
151ef2b22acSRafael J. Wysocki 	if (index >= 0)
152124cf911SRafael J. Wysocki 		enter_freeze_proper(drv, dev, index);
153124cf911SRafael J. Wysocki 
154ef2b22acSRafael J. Wysocki 	return index;
15538106313SRafael J. Wysocki }
15687e9b9f1SRafael J. Wysocki #endif /* CONFIG_SUSPEND */
15738106313SRafael J. Wysocki 
15838106313SRafael J. Wysocki /**
15956cfbf74SColin Cross  * cpuidle_enter_state - enter the state and update stats
16056cfbf74SColin Cross  * @dev: cpuidle device for this cpu
16156cfbf74SColin Cross  * @drv: cpuidle driver for this cpu
1627312280bSRafael J. Wysocki  * @index: index into the states table in @drv of the state to enter
16356cfbf74SColin Cross  */
16456cfbf74SColin Cross int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
165554c06baSDaniel Lezcano 			int index)
16656cfbf74SColin Cross {
16756cfbf74SColin Cross 	int entered_state;
16856cfbf74SColin Cross 
169554c06baSDaniel Lezcano 	struct cpuidle_state *target_state = &drv->states[index];
170df8d9eeaSRafael J. Wysocki 	bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP);
171554c06baSDaniel Lezcano 	ktime_t time_start, time_end;
172554c06baSDaniel Lezcano 	s64 diff;
173554c06baSDaniel Lezcano 
174df8d9eeaSRafael J. Wysocki 	/*
175df8d9eeaSRafael J. Wysocki 	 * Tell the time framework to switch to a broadcast timer because our
176df8d9eeaSRafael J. Wysocki 	 * local timer will be shut down.  If a local timer is used from another
177df8d9eeaSRafael J. Wysocki 	 * CPU as a broadcast timer, this call may fail if it is not available.
178df8d9eeaSRafael J. Wysocki 	 */
179827a5aefSRafael J. Wysocki 	if (broadcast && tick_broadcast_enter()) {
1800d94039fSRafael J. Wysocki 		index = find_deepest_state(drv, dev, target_state->exit_latency,
1810d94039fSRafael J. Wysocki 					   CPUIDLE_FLAG_TIMER_STOP, false);
1820d94039fSRafael J. Wysocki 		if (index < 0) {
183827a5aefSRafael J. Wysocki 			default_idle_call();
184df8d9eeaSRafael J. Wysocki 			return -EBUSY;
185827a5aefSRafael J. Wysocki 		}
1860d94039fSRafael J. Wysocki 		target_state = &drv->states[index];
1870d94039fSRafael J. Wysocki 	}
188df8d9eeaSRafael J. Wysocki 
189faad3849SRafael J. Wysocki 	/* Take note of the planned idle state. */
190faad3849SRafael J. Wysocki 	sched_idle_set_state(target_state);
191faad3849SRafael J. Wysocki 
19230fe6884SSandeep Tripathy 	trace_cpu_idle_rcuidle(index, dev->cpu);
193554c06baSDaniel Lezcano 	time_start = ktime_get();
194554c06baSDaniel Lezcano 
195*63caae84SLucas Stach 	stop_critical_timings();
196554c06baSDaniel Lezcano 	entered_state = target_state->enter(dev, drv, index);
197*63caae84SLucas Stach 	start_critical_timings();
198554c06baSDaniel Lezcano 
199554c06baSDaniel Lezcano 	time_end = ktime_get();
20030fe6884SSandeep Tripathy 	trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
201554c06baSDaniel Lezcano 
202faad3849SRafael J. Wysocki 	/* The cpu is no longer idle or about to enter idle. */
203faad3849SRafael J. Wysocki 	sched_idle_set_state(NULL);
204faad3849SRafael J. Wysocki 
205df8d9eeaSRafael J. Wysocki 	if (broadcast) {
206df8d9eeaSRafael J. Wysocki 		if (WARN_ON_ONCE(!irqs_disabled()))
207df8d9eeaSRafael J. Wysocki 			local_irq_disable();
208df8d9eeaSRafael J. Wysocki 
209df8d9eeaSRafael J. Wysocki 		tick_broadcast_exit();
210df8d9eeaSRafael J. Wysocki 	}
211df8d9eeaSRafael J. Wysocki 
2120b89e9aaSPaul Burton 	if (!cpuidle_state_is_coupled(dev, drv, entered_state))
213554c06baSDaniel Lezcano 		local_irq_enable();
214554c06baSDaniel Lezcano 
215554c06baSDaniel Lezcano 	diff = ktime_to_us(ktime_sub(time_end, time_start));
216554c06baSDaniel Lezcano 	if (diff > INT_MAX)
217554c06baSDaniel Lezcano 		diff = INT_MAX;
218554c06baSDaniel Lezcano 
219554c06baSDaniel Lezcano 	dev->last_residency = (int) diff;
22056cfbf74SColin Cross 
22156cfbf74SColin Cross 	if (entered_state >= 0) {
22256cfbf74SColin Cross 		/* Update cpuidle counters */
22356cfbf74SColin Cross 		/* This can be moved to within driver enter routine
22456cfbf74SColin Cross 		 * but that results in multiple copies of same code.
22556cfbf74SColin Cross 		 */
226a474a515SJulius Werner 		dev->states_usage[entered_state].time += dev->last_residency;
22756cfbf74SColin Cross 		dev->states_usage[entered_state].usage++;
22856cfbf74SColin Cross 	} else {
22956cfbf74SColin Cross 		dev->last_residency = 0;
23056cfbf74SColin Cross 	}
23156cfbf74SColin Cross 
23256cfbf74SColin Cross 	return entered_state;
23356cfbf74SColin Cross }
23456cfbf74SColin Cross 
23556cfbf74SColin Cross /**
236907e30f1SDaniel Lezcano  * cpuidle_select - ask the cpuidle framework to choose an idle state
2374f86d3a8SLen Brown  *
238907e30f1SDaniel Lezcano  * @drv: the cpuidle driver
239907e30f1SDaniel Lezcano  * @dev: the cpuidle device
240907e30f1SDaniel Lezcano  *
241907e30f1SDaniel Lezcano  * Returns the index of the idle state.
2424f86d3a8SLen Brown  */
243907e30f1SDaniel Lezcano int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
2444f86d3a8SLen Brown {
245907e30f1SDaniel Lezcano 	return cpuidle_curr_governor->select(drv, dev);
246246eb7f0SKevin Hilman }
247246eb7f0SKevin Hilman 
248907e30f1SDaniel Lezcano /**
249907e30f1SDaniel Lezcano  * cpuidle_enter - enter into the specified idle state
250907e30f1SDaniel Lezcano  *
251907e30f1SDaniel Lezcano  * @drv:   the cpuidle driver tied with the cpu
252907e30f1SDaniel Lezcano  * @dev:   the cpuidle device
253907e30f1SDaniel Lezcano  * @index: the index in the idle state table
254907e30f1SDaniel Lezcano  *
255907e30f1SDaniel Lezcano  * Returns the index in the idle state, < 0 in case of error.
256907e30f1SDaniel Lezcano  * The error code depends on the backend driver
257907e30f1SDaniel Lezcano  */
258907e30f1SDaniel Lezcano int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev,
259907e30f1SDaniel Lezcano 		  int index)
260907e30f1SDaniel Lezcano {
261907e30f1SDaniel Lezcano 	if (cpuidle_state_is_coupled(dev, drv, index))
262907e30f1SDaniel Lezcano 		return cpuidle_enter_state_coupled(dev, drv, index);
263907e30f1SDaniel Lezcano 	return cpuidle_enter_state(dev, drv, index);
264907e30f1SDaniel Lezcano }
265fb11c9c6SViresh Kumar 
266907e30f1SDaniel Lezcano /**
267907e30f1SDaniel Lezcano  * cpuidle_reflect - tell the underlying governor what was the state
268907e30f1SDaniel Lezcano  * we were in
269907e30f1SDaniel Lezcano  *
270907e30f1SDaniel Lezcano  * @dev  : the cpuidle device
271907e30f1SDaniel Lezcano  * @index: the index in the idle state table
272907e30f1SDaniel Lezcano  *
273907e30f1SDaniel Lezcano  */
274907e30f1SDaniel Lezcano void cpuidle_reflect(struct cpuidle_device *dev, int index)
275907e30f1SDaniel Lezcano {
276a802ea96SRafael J. Wysocki 	if (cpuidle_curr_governor->reflect && index >= 0)
277907e30f1SDaniel Lezcano 		cpuidle_curr_governor->reflect(dev, index);
2784f86d3a8SLen Brown }
2794f86d3a8SLen Brown 
2804f86d3a8SLen Brown /**
2814f86d3a8SLen Brown  * cpuidle_install_idle_handler - installs the cpuidle idle loop handler
2824f86d3a8SLen Brown  */
2834f86d3a8SLen Brown void cpuidle_install_idle_handler(void)
2844f86d3a8SLen Brown {
285a0bfa137SLen Brown 	if (enabled_devices) {
2864f86d3a8SLen Brown 		/* Make sure all changes finished before we switch to new idle */
2874f86d3a8SLen Brown 		smp_wmb();
288a0bfa137SLen Brown 		initialized = 1;
2894f86d3a8SLen Brown 	}
2904f86d3a8SLen Brown }
2914f86d3a8SLen Brown 
2924f86d3a8SLen Brown /**
2934f86d3a8SLen Brown  * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler
2944f86d3a8SLen Brown  */
2954f86d3a8SLen Brown void cpuidle_uninstall_idle_handler(void)
2964f86d3a8SLen Brown {
297a0bfa137SLen Brown 	if (enabled_devices) {
298a0bfa137SLen Brown 		initialized = 0;
2992ed903c5SChuansheng Liu 		wake_up_all_idle_cpus();
3004f86d3a8SLen Brown 	}
301442bf3aaSDaniel Lezcano 
302442bf3aaSDaniel Lezcano 	/*
303442bf3aaSDaniel Lezcano 	 * Make sure external observers (such as the scheduler)
304442bf3aaSDaniel Lezcano 	 * are done looking at pointed idle states.
305442bf3aaSDaniel Lezcano 	 */
306442bf3aaSDaniel Lezcano 	synchronize_rcu();
3074f86d3a8SLen Brown }
3084f86d3a8SLen Brown 
3094f86d3a8SLen Brown /**
3104f86d3a8SLen Brown  * cpuidle_pause_and_lock - temporarily disables CPUIDLE
3114f86d3a8SLen Brown  */
3124f86d3a8SLen Brown void cpuidle_pause_and_lock(void)
3134f86d3a8SLen Brown {
3144f86d3a8SLen Brown 	mutex_lock(&cpuidle_lock);
3154f86d3a8SLen Brown 	cpuidle_uninstall_idle_handler();
3164f86d3a8SLen Brown }
3174f86d3a8SLen Brown 
3184f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock);
3194f86d3a8SLen Brown 
3204f86d3a8SLen Brown /**
3214f86d3a8SLen Brown  * cpuidle_resume_and_unlock - resumes CPUIDLE operation
3224f86d3a8SLen Brown  */
3234f86d3a8SLen Brown void cpuidle_resume_and_unlock(void)
3244f86d3a8SLen Brown {
3254f86d3a8SLen Brown 	cpuidle_install_idle_handler();
3264f86d3a8SLen Brown 	mutex_unlock(&cpuidle_lock);
3274f86d3a8SLen Brown }
3284f86d3a8SLen Brown 
3294f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);
3304f86d3a8SLen Brown 
3318651f97bSPreeti U Murthy /* Currently used in suspend/resume path to suspend cpuidle */
3328651f97bSPreeti U Murthy void cpuidle_pause(void)
3338651f97bSPreeti U Murthy {
3348651f97bSPreeti U Murthy 	mutex_lock(&cpuidle_lock);
3358651f97bSPreeti U Murthy 	cpuidle_uninstall_idle_handler();
3368651f97bSPreeti U Murthy 	mutex_unlock(&cpuidle_lock);
3378651f97bSPreeti U Murthy }
3388651f97bSPreeti U Murthy 
3398651f97bSPreeti U Murthy /* Currently used in suspend/resume path to resume cpuidle */
3408651f97bSPreeti U Murthy void cpuidle_resume(void)
3418651f97bSPreeti U Murthy {
3428651f97bSPreeti U Murthy 	mutex_lock(&cpuidle_lock);
3438651f97bSPreeti U Murthy 	cpuidle_install_idle_handler();
3448651f97bSPreeti U Murthy 	mutex_unlock(&cpuidle_lock);
3458651f97bSPreeti U Murthy }
3468651f97bSPreeti U Murthy 
3474f86d3a8SLen Brown /**
3484f86d3a8SLen Brown  * cpuidle_enable_device - enables idle PM for a CPU
3494f86d3a8SLen Brown  * @dev: the CPU
3504f86d3a8SLen Brown  *
3514f86d3a8SLen Brown  * This function must be called between cpuidle_pause_and_lock and
3524f86d3a8SLen Brown  * cpuidle_resume_and_unlock when used externally.
3534f86d3a8SLen Brown  */
3544f86d3a8SLen Brown int cpuidle_enable_device(struct cpuidle_device *dev)
3554f86d3a8SLen Brown {
3565df0aa73SDaniel Lezcano 	int ret;
357bf4d1b5dSDaniel Lezcano 	struct cpuidle_driver *drv;
3584f86d3a8SLen Brown 
3591b0a0e9aSSrivatsa S. Bhat 	if (!dev)
3601b0a0e9aSSrivatsa S. Bhat 		return -EINVAL;
3611b0a0e9aSSrivatsa S. Bhat 
3624f86d3a8SLen Brown 	if (dev->enabled)
3634f86d3a8SLen Brown 		return 0;
364bf4d1b5dSDaniel Lezcano 
365bf4d1b5dSDaniel Lezcano 	drv = cpuidle_get_cpu_driver(dev);
366bf4d1b5dSDaniel Lezcano 
367e1689795SRobert Lee 	if (!drv || !cpuidle_curr_governor)
3684f86d3a8SLen Brown 		return -EIO;
369bf4d1b5dSDaniel Lezcano 
37010b9d3f8SDaniel Lezcano 	if (!dev->registered)
37110b9d3f8SDaniel Lezcano 		return -EINVAL;
37210b9d3f8SDaniel Lezcano 
373bf4d1b5dSDaniel Lezcano 	ret = cpuidle_add_device_sysfs(dev);
374bf4d1b5dSDaniel Lezcano 	if (ret)
3754f86d3a8SLen Brown 		return ret;
3764f86d3a8SLen Brown 
3774f86d3a8SLen Brown 	if (cpuidle_curr_governor->enable &&
378e1689795SRobert Lee 	    (ret = cpuidle_curr_governor->enable(drv, dev)))
3794f86d3a8SLen Brown 		goto fail_sysfs;
3804f86d3a8SLen Brown 
3814f86d3a8SLen Brown 	smp_wmb();
3824f86d3a8SLen Brown 
3834f86d3a8SLen Brown 	dev->enabled = 1;
3844f86d3a8SLen Brown 
3854f86d3a8SLen Brown 	enabled_devices++;
3864f86d3a8SLen Brown 	return 0;
3874f86d3a8SLen Brown 
3884f86d3a8SLen Brown fail_sysfs:
389bf4d1b5dSDaniel Lezcano 	cpuidle_remove_device_sysfs(dev);
3904f86d3a8SLen Brown 
3914f86d3a8SLen Brown 	return ret;
3924f86d3a8SLen Brown }
3934f86d3a8SLen Brown 
3944f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_enable_device);
3954f86d3a8SLen Brown 
3964f86d3a8SLen Brown /**
3974f86d3a8SLen Brown  * cpuidle_disable_device - disables idle PM for a CPU
3984f86d3a8SLen Brown  * @dev: the CPU
3994f86d3a8SLen Brown  *
4004f86d3a8SLen Brown  * This function must be called between cpuidle_pause_and_lock and
4014f86d3a8SLen Brown  * cpuidle_resume_and_unlock when used externally.
4024f86d3a8SLen Brown  */
4034f86d3a8SLen Brown void cpuidle_disable_device(struct cpuidle_device *dev)
4044f86d3a8SLen Brown {
405bf4d1b5dSDaniel Lezcano 	struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
406bf4d1b5dSDaniel Lezcano 
407cf31cd1aSSrivatsa S. Bhat 	if (!dev || !dev->enabled)
4084f86d3a8SLen Brown 		return;
409bf4d1b5dSDaniel Lezcano 
410bf4d1b5dSDaniel Lezcano 	if (!drv || !cpuidle_curr_governor)
4114f86d3a8SLen Brown 		return;
4124f86d3a8SLen Brown 
4134f86d3a8SLen Brown 	dev->enabled = 0;
4144f86d3a8SLen Brown 
4154f86d3a8SLen Brown 	if (cpuidle_curr_governor->disable)
416bf4d1b5dSDaniel Lezcano 		cpuidle_curr_governor->disable(drv, dev);
4174f86d3a8SLen Brown 
418bf4d1b5dSDaniel Lezcano 	cpuidle_remove_device_sysfs(dev);
4194f86d3a8SLen Brown 	enabled_devices--;
4204f86d3a8SLen Brown }
4214f86d3a8SLen Brown 
4224f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_disable_device);
4234f86d3a8SLen Brown 
424f6bb51a5SDaniel Lezcano static void __cpuidle_unregister_device(struct cpuidle_device *dev)
425f6bb51a5SDaniel Lezcano {
426f6bb51a5SDaniel Lezcano 	struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
427f6bb51a5SDaniel Lezcano 
428f6bb51a5SDaniel Lezcano 	list_del(&dev->device_list);
429f6bb51a5SDaniel Lezcano 	per_cpu(cpuidle_devices, dev->cpu) = NULL;
430f6bb51a5SDaniel Lezcano 	module_put(drv->owner);
431f6bb51a5SDaniel Lezcano }
432f6bb51a5SDaniel Lezcano 
433267d4bf8SViresh Kumar static void __cpuidle_device_init(struct cpuidle_device *dev)
4345df0aa73SDaniel Lezcano {
4355df0aa73SDaniel Lezcano 	memset(dev->states_usage, 0, sizeof(dev->states_usage));
4365df0aa73SDaniel Lezcano 	dev->last_residency = 0;
4375df0aa73SDaniel Lezcano }
4385df0aa73SDaniel Lezcano 
4394f86d3a8SLen Brown /**
440dcb84f33SVenkatesh Pallipadi  * __cpuidle_register_device - internal register function called before register
441dcb84f33SVenkatesh Pallipadi  * and enable routines
4424f86d3a8SLen Brown  * @dev: the cpu
443dcb84f33SVenkatesh Pallipadi  *
444dcb84f33SVenkatesh Pallipadi  * cpuidle_lock mutex must be held before this is called
4454f86d3a8SLen Brown  */
446dcb84f33SVenkatesh Pallipadi static int __cpuidle_register_device(struct cpuidle_device *dev)
4474f86d3a8SLen Brown {
4484f86d3a8SLen Brown 	int ret;
449bf4d1b5dSDaniel Lezcano 	struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
4504f86d3a8SLen Brown 
451bf4d1b5dSDaniel Lezcano 	if (!try_module_get(drv->owner))
4524f86d3a8SLen Brown 		return -EINVAL;
4534f86d3a8SLen Brown 
4544f86d3a8SLen Brown 	per_cpu(cpuidle_devices, dev->cpu) = dev;
4554f86d3a8SLen Brown 	list_add(&dev->device_list, &cpuidle_detected_devices);
4564f86d3a8SLen Brown 
4574126c019SColin Cross 	ret = cpuidle_coupled_register_device(dev);
45847182668SViresh Kumar 	if (ret)
459f6bb51a5SDaniel Lezcano 		__cpuidle_unregister_device(dev);
46047182668SViresh Kumar 	else
461dcb84f33SVenkatesh Pallipadi 		dev->registered = 1;
46247182668SViresh Kumar 
46347182668SViresh Kumar 	return ret;
464dcb84f33SVenkatesh Pallipadi }
465dcb84f33SVenkatesh Pallipadi 
466dcb84f33SVenkatesh Pallipadi /**
467dcb84f33SVenkatesh Pallipadi  * cpuidle_register_device - registers a CPU's idle PM feature
468dcb84f33SVenkatesh Pallipadi  * @dev: the cpu
469dcb84f33SVenkatesh Pallipadi  */
470dcb84f33SVenkatesh Pallipadi int cpuidle_register_device(struct cpuidle_device *dev)
471dcb84f33SVenkatesh Pallipadi {
472c878a52dSDaniel Lezcano 	int ret = -EBUSY;
473dcb84f33SVenkatesh Pallipadi 
4741b0a0e9aSSrivatsa S. Bhat 	if (!dev)
4751b0a0e9aSSrivatsa S. Bhat 		return -EINVAL;
4761b0a0e9aSSrivatsa S. Bhat 
477dcb84f33SVenkatesh Pallipadi 	mutex_lock(&cpuidle_lock);
478dcb84f33SVenkatesh Pallipadi 
479c878a52dSDaniel Lezcano 	if (dev->registered)
480c878a52dSDaniel Lezcano 		goto out_unlock;
481c878a52dSDaniel Lezcano 
482267d4bf8SViresh Kumar 	__cpuidle_device_init(dev);
4835df0aa73SDaniel Lezcano 
484f6bb51a5SDaniel Lezcano 	ret = __cpuidle_register_device(dev);
485f6bb51a5SDaniel Lezcano 	if (ret)
486f6bb51a5SDaniel Lezcano 		goto out_unlock;
487f6bb51a5SDaniel Lezcano 
488f6bb51a5SDaniel Lezcano 	ret = cpuidle_add_sysfs(dev);
489f6bb51a5SDaniel Lezcano 	if (ret)
490f6bb51a5SDaniel Lezcano 		goto out_unregister;
491dcb84f33SVenkatesh Pallipadi 
49210b9d3f8SDaniel Lezcano 	ret = cpuidle_enable_device(dev);
493f6bb51a5SDaniel Lezcano 	if (ret)
494f6bb51a5SDaniel Lezcano 		goto out_sysfs;
49510b9d3f8SDaniel Lezcano 
4964f86d3a8SLen Brown 	cpuidle_install_idle_handler();
4974f86d3a8SLen Brown 
498f6bb51a5SDaniel Lezcano out_unlock:
4994f86d3a8SLen Brown 	mutex_unlock(&cpuidle_lock);
5004f86d3a8SLen Brown 
501f6bb51a5SDaniel Lezcano 	return ret;
502f6bb51a5SDaniel Lezcano 
503f6bb51a5SDaniel Lezcano out_sysfs:
504f6bb51a5SDaniel Lezcano 	cpuidle_remove_sysfs(dev);
505f6bb51a5SDaniel Lezcano out_unregister:
506f6bb51a5SDaniel Lezcano 	__cpuidle_unregister_device(dev);
507f6bb51a5SDaniel Lezcano 	goto out_unlock;
5084f86d3a8SLen Brown }
5094f86d3a8SLen Brown 
5104f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_register_device);
5114f86d3a8SLen Brown 
5124f86d3a8SLen Brown /**
5134f86d3a8SLen Brown  * cpuidle_unregister_device - unregisters a CPU's idle PM feature
5144f86d3a8SLen Brown  * @dev: the cpu
5154f86d3a8SLen Brown  */
5164f86d3a8SLen Brown void cpuidle_unregister_device(struct cpuidle_device *dev)
5174f86d3a8SLen Brown {
518813e8e3dSKonrad Rzeszutek Wilk 	if (!dev || dev->registered == 0)
519dcb84f33SVenkatesh Pallipadi 		return;
520dcb84f33SVenkatesh Pallipadi 
5214f86d3a8SLen Brown 	cpuidle_pause_and_lock();
5224f86d3a8SLen Brown 
5234f86d3a8SLen Brown 	cpuidle_disable_device(dev);
5244f86d3a8SLen Brown 
5251aef40e2SDaniel Lezcano 	cpuidle_remove_sysfs(dev);
526f6bb51a5SDaniel Lezcano 
527f6bb51a5SDaniel Lezcano 	__cpuidle_unregister_device(dev);
5284f86d3a8SLen Brown 
5294126c019SColin Cross 	cpuidle_coupled_unregister_device(dev);
5304126c019SColin Cross 
5314f86d3a8SLen Brown 	cpuidle_resume_and_unlock();
5324f86d3a8SLen Brown }
5334f86d3a8SLen Brown 
5344f86d3a8SLen Brown EXPORT_SYMBOL_GPL(cpuidle_unregister_device);
5354f86d3a8SLen Brown 
5361c192d04SDaniel Lezcano /**
5374c637b21SDaniel Lezcano  * cpuidle_unregister: unregister a driver and the devices. This function
5384c637b21SDaniel Lezcano  * can be used only if the driver has been previously registered through
5394c637b21SDaniel Lezcano  * the cpuidle_register function.
5404c637b21SDaniel Lezcano  *
5414c637b21SDaniel Lezcano  * @drv: a valid pointer to a struct cpuidle_driver
5424c637b21SDaniel Lezcano  */
5434c637b21SDaniel Lezcano void cpuidle_unregister(struct cpuidle_driver *drv)
5444c637b21SDaniel Lezcano {
5454c637b21SDaniel Lezcano 	int cpu;
5464c637b21SDaniel Lezcano 	struct cpuidle_device *device;
5474c637b21SDaniel Lezcano 
54882467a5aSDaniel Lezcano 	for_each_cpu(cpu, drv->cpumask) {
5494c637b21SDaniel Lezcano 		device = &per_cpu(cpuidle_dev, cpu);
5504c637b21SDaniel Lezcano 		cpuidle_unregister_device(device);
5514c637b21SDaniel Lezcano 	}
5524c637b21SDaniel Lezcano 
5534c637b21SDaniel Lezcano 	cpuidle_unregister_driver(drv);
5544c637b21SDaniel Lezcano }
5554c637b21SDaniel Lezcano EXPORT_SYMBOL_GPL(cpuidle_unregister);
5564c637b21SDaniel Lezcano 
5574c637b21SDaniel Lezcano /**
5584c637b21SDaniel Lezcano  * cpuidle_register: registers the driver and the cpu devices with the
5594c637b21SDaniel Lezcano  * coupled_cpus passed as parameter. This function is used for all common
5604c637b21SDaniel Lezcano  * initialization pattern there are in the arch specific drivers. The
5614c637b21SDaniel Lezcano  * devices is globally defined in this file.
5624c637b21SDaniel Lezcano  *
5634c637b21SDaniel Lezcano  * @drv         : a valid pointer to a struct cpuidle_driver
5644c637b21SDaniel Lezcano  * @coupled_cpus: a cpumask for the coupled states
5654c637b21SDaniel Lezcano  *
5664c637b21SDaniel Lezcano  * Returns 0 on success, < 0 otherwise
5674c637b21SDaniel Lezcano  */
5684c637b21SDaniel Lezcano int cpuidle_register(struct cpuidle_driver *drv,
5694c637b21SDaniel Lezcano 		     const struct cpumask *const coupled_cpus)
5704c637b21SDaniel Lezcano {
5714c637b21SDaniel Lezcano 	int ret, cpu;
5724c637b21SDaniel Lezcano 	struct cpuidle_device *device;
5734c637b21SDaniel Lezcano 
5744c637b21SDaniel Lezcano 	ret = cpuidle_register_driver(drv);
5754c637b21SDaniel Lezcano 	if (ret) {
5764c637b21SDaniel Lezcano 		pr_err("failed to register cpuidle driver\n");
5774c637b21SDaniel Lezcano 		return ret;
5784c637b21SDaniel Lezcano 	}
5794c637b21SDaniel Lezcano 
58082467a5aSDaniel Lezcano 	for_each_cpu(cpu, drv->cpumask) {
5814c637b21SDaniel Lezcano 		device = &per_cpu(cpuidle_dev, cpu);
5824c637b21SDaniel Lezcano 		device->cpu = cpu;
5834c637b21SDaniel Lezcano 
5844c637b21SDaniel Lezcano #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
5854c637b21SDaniel Lezcano 		/*
586caf4a36eSViresh Kumar 		 * On multiplatform for ARM, the coupled idle states could be
5874c637b21SDaniel Lezcano 		 * enabled in the kernel even if the cpuidle driver does not
5884c637b21SDaniel Lezcano 		 * use it. Note, coupled_cpus is a struct copy.
5894c637b21SDaniel Lezcano 		 */
5904c637b21SDaniel Lezcano 		if (coupled_cpus)
5914c637b21SDaniel Lezcano 			device->coupled_cpus = *coupled_cpus;
5924c637b21SDaniel Lezcano #endif
5934c637b21SDaniel Lezcano 		ret = cpuidle_register_device(device);
5944c637b21SDaniel Lezcano 		if (!ret)
5954c637b21SDaniel Lezcano 			continue;
5964c637b21SDaniel Lezcano 
5974c637b21SDaniel Lezcano 		pr_err("Failed to register cpuidle device for cpu%d\n", cpu);
5984c637b21SDaniel Lezcano 
5994c637b21SDaniel Lezcano 		cpuidle_unregister(drv);
6004c637b21SDaniel Lezcano 		break;
6014c637b21SDaniel Lezcano 	}
6024c637b21SDaniel Lezcano 
6034c637b21SDaniel Lezcano 	return ret;
6044c637b21SDaniel Lezcano }
6054c637b21SDaniel Lezcano EXPORT_SYMBOL_GPL(cpuidle_register);
6064c637b21SDaniel Lezcano 
6074f86d3a8SLen Brown #ifdef CONFIG_SMP
6084f86d3a8SLen Brown 
6094f86d3a8SLen Brown /*
6104f86d3a8SLen Brown  * This function gets called when a part of the kernel has a new latency
6114f86d3a8SLen Brown  * requirement.  This means we need to get all processors out of their C-state,
6124f86d3a8SLen Brown  * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
6134f86d3a8SLen Brown  * wakes them all right up.
6144f86d3a8SLen Brown  */
6154f86d3a8SLen Brown static int cpuidle_latency_notify(struct notifier_block *b,
6164f86d3a8SLen Brown 		unsigned long l, void *v)
6174f86d3a8SLen Brown {
6182ed903c5SChuansheng Liu 	wake_up_all_idle_cpus();
6194f86d3a8SLen Brown 	return NOTIFY_OK;
6204f86d3a8SLen Brown }
6214f86d3a8SLen Brown 
6224f86d3a8SLen Brown static struct notifier_block cpuidle_latency_notifier = {
6234f86d3a8SLen Brown 	.notifier_call = cpuidle_latency_notify,
6244f86d3a8SLen Brown };
6254f86d3a8SLen Brown 
626d82b3518SMark Gross static inline void latency_notifier_init(struct notifier_block *n)
627d82b3518SMark Gross {
628d82b3518SMark Gross 	pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n);
629d82b3518SMark Gross }
6304f86d3a8SLen Brown 
6314f86d3a8SLen Brown #else /* CONFIG_SMP */
6324f86d3a8SLen Brown 
6334f86d3a8SLen Brown #define latency_notifier_init(x) do { } while (0)
6344f86d3a8SLen Brown 
6354f86d3a8SLen Brown #endif /* CONFIG_SMP */
6364f86d3a8SLen Brown 
6374f86d3a8SLen Brown /**
6384f86d3a8SLen Brown  * cpuidle_init - core initializer
6394f86d3a8SLen Brown  */
6404f86d3a8SLen Brown static int __init cpuidle_init(void)
6414f86d3a8SLen Brown {
6424f86d3a8SLen Brown 	int ret;
6434f86d3a8SLen Brown 
64462027aeaSLen Brown 	if (cpuidle_disabled())
64562027aeaSLen Brown 		return -ENODEV;
64662027aeaSLen Brown 
6478a25a2fdSKay Sievers 	ret = cpuidle_add_interface(cpu_subsys.dev_root);
6484f86d3a8SLen Brown 	if (ret)
6494f86d3a8SLen Brown 		return ret;
6504f86d3a8SLen Brown 
6514f86d3a8SLen Brown 	latency_notifier_init(&cpuidle_latency_notifier);
6524f86d3a8SLen Brown 
6534f86d3a8SLen Brown 	return 0;
6544f86d3a8SLen Brown }
6554f86d3a8SLen Brown 
65662027aeaSLen Brown module_param(off, int, 0444);
6574f86d3a8SLen Brown core_initcall(cpuidle_init);
658