xref: /linux/drivers/cpuidle/cpuidle.c (revision ca220141fa8ebae09765a242076b2b77338106b0)
1 /*
2  * cpuidle.c - core cpuidle infrastructure
3  *
4  * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5  *               Shaohua Li <shaohua.li@intel.com>
6  *               Adam Belay <abelay@novell.com>
7  *
8  * This code is licenced under the GPL.
9  */
10 
11 #include "linux/percpu-defs.h"
12 #include <linux/clockchips.h>
13 #include <linux/kernel.h>
14 #include <linux/mutex.h>
15 #include <linux/sched.h>
16 #include <linux/sched/clock.h>
17 #include <linux/sched/idle.h>
18 #include <linux/notifier.h>
19 #include <linux/pm_qos.h>
20 #include <linux/cpu.h>
21 #include <linux/cpuidle.h>
22 #include <linux/ktime.h>
23 #include <linux/hrtimer.h>
24 #include <linux/module.h>
25 #include <linux/suspend.h>
26 #include <linux/tick.h>
27 #include <linux/mmu_context.h>
28 #include <linux/context_tracking.h>
29 #include <trace/events/power.h>
30 
31 #include "cpuidle.h"
32 
33 DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
34 DEFINE_PER_CPU(struct cpuidle_device, cpuidle_dev);
35 
36 DEFINE_MUTEX(cpuidle_lock);
37 LIST_HEAD(cpuidle_detected_devices);
38 
39 static int enabled_devices;
40 static int off __read_mostly;
41 static int initialized __read_mostly;
42 
43 int cpuidle_disabled(void)
44 {
45 	return off;
46 }
47 void disable_cpuidle(void)
48 {
49 	off = 1;
50 }
51 
52 bool cpuidle_not_available(struct cpuidle_driver *drv,
53 			   struct cpuidle_device *dev)
54 {
55 	return off || !initialized || !drv || !dev || !dev->enabled;
56 }
57 
58 /**
59  * cpuidle_play_dead - cpu off-lining
60  *
61  * Returns in case of an error or no driver
62  */
63 int cpuidle_play_dead(void)
64 {
65 	struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
66 	struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
67 	int i;
68 
69 	if (!drv)
70 		return -ENODEV;
71 
72 	for (i = drv->state_count - 1; i >= 0; i--) {
73 		if (drv->states[i].enter_dead)
74 			drv->states[i].enter_dead(dev, i);
75 	}
76 
77 	/*
78 	 * If :enter_dead() is successful, it will never return, so reaching
79 	 * here means that all of them failed above or were not present.
80 	 */
81 	return -ENODEV;
82 }
83 
84 static int find_deepest_state(struct cpuidle_driver *drv,
85 			      struct cpuidle_device *dev,
86 			      u64 max_latency_ns,
87 			      unsigned int forbidden_flags,
88 			      bool s2idle)
89 {
90 	u64 latency_req = 0;
91 	int i, ret = 0;
92 
93 	for (i = 1; i < drv->state_count; i++) {
94 		struct cpuidle_state *s = &drv->states[i];
95 
96 		if (dev->states_usage[i].disable ||
97 		    s->exit_latency_ns <= latency_req ||
98 		    s->exit_latency_ns > max_latency_ns ||
99 		    (s->flags & forbidden_flags) ||
100 		    (s2idle && !s->enter_s2idle))
101 			continue;
102 
103 		latency_req = s->exit_latency_ns;
104 		ret = i;
105 	}
106 	return ret;
107 }
108 
109 /**
110  * cpuidle_use_deepest_state - Set/unset governor override mode.
111  * @latency_limit_ns: Idle state exit latency limit (or no override if 0).
112  *
113  * If @latency_limit_ns is nonzero, set the current CPU to use the deepest idle
114  * state with exit latency within @latency_limit_ns (override governors going
115  * forward), or do not override governors if it is zero.
116  */
117 void cpuidle_use_deepest_state(u64 latency_limit_ns)
118 {
119 	struct cpuidle_device *dev;
120 
121 	preempt_disable();
122 	dev = cpuidle_get_device();
123 	if (dev)
124 		dev->forced_idle_latency_limit_ns = latency_limit_ns;
125 	preempt_enable();
126 }
127 
128 /**
129  * cpuidle_find_deepest_state - Find the deepest available idle state.
130  * @drv: cpuidle driver for the given CPU.
131  * @dev: cpuidle device for the given CPU.
132  * @latency_limit_ns: Idle state exit latency limit
133  *
134  * Return: the index of the deepest available idle state.
135  */
136 int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
137 			       struct cpuidle_device *dev,
138 			       u64 latency_limit_ns)
139 {
140 	return find_deepest_state(drv, dev, latency_limit_ns, 0, false);
141 }
142 
143 #ifdef CONFIG_SUSPEND
144 static noinstr void enter_s2idle_proper(struct cpuidle_driver *drv,
145 					 struct cpuidle_device *dev, int index)
146 {
147 	struct cpuidle_state *target_state = &drv->states[index];
148 	ktime_t time_start, time_end;
149 
150 	instrumentation_begin();
151 
152 	time_start = ns_to_ktime(local_clock_noinstr());
153 
154 	tick_freeze();
155 	/*
156 	 * The state used here cannot be a "coupled" one, because the "coupled"
157 	 * cpuidle mechanism enables interrupts and doing that with timekeeping
158 	 * suspended is generally unsafe.
159 	 */
160 	stop_critical_timings();
161 	if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE)) {
162 		ct_cpuidle_enter();
163 		/* Annotate away the indirect call */
164 		instrumentation_begin();
165 	}
166 	target_state->enter_s2idle(dev, drv, index);
167 	if (WARN_ON_ONCE(!irqs_disabled()))
168 		raw_local_irq_disable();
169 	if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE)) {
170 		instrumentation_end();
171 		ct_cpuidle_exit();
172 	}
173 	tick_unfreeze();
174 	start_critical_timings();
175 
176 	time_end = ns_to_ktime(local_clock_noinstr());
177 
178 	dev->states_usage[index].s2idle_time += ktime_us_delta(time_end, time_start);
179 	dev->states_usage[index].s2idle_usage++;
180 	instrumentation_end();
181 }
182 
183 /**
184  * cpuidle_enter_s2idle - Enter an idle state suitable for suspend-to-idle.
185  * @drv: cpuidle driver for the given CPU.
186  * @dev: cpuidle device for the given CPU.
187  * @latency_limit_ns: Idle state exit latency limit
188  *
189  * If there are states with the ->enter_s2idle callback, find the deepest of
190  * them and enter it with frozen tick.
191  */
192 int cpuidle_enter_s2idle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
193 			 u64 latency_limit_ns)
194 {
195 	int index;
196 
197 	/*
198 	 * Find the deepest state with ->enter_s2idle present that meets the
199 	 * specified latency limit, which guarantees that interrupts won't be
200 	 * enabled when it exits and allows the tick to be frozen safely.
201 	 */
202 	index = find_deepest_state(drv, dev, latency_limit_ns, 0, true);
203 	if (index > 0) {
204 		enter_s2idle_proper(drv, dev, index);
205 		local_irq_enable();
206 	}
207 	return index;
208 }
209 #endif /* CONFIG_SUSPEND */
210 
211 /**
212  * cpuidle_enter_state - enter the state and update stats
213  * @dev: cpuidle device for this cpu
214  * @drv: cpuidle driver for this cpu
215  * @index: index into the states table in @drv of the state to enter
216  */
217 noinstr int cpuidle_enter_state(struct cpuidle_device *dev,
218 				 struct cpuidle_driver *drv,
219 				 int index)
220 {
221 	int entered_state;
222 
223 	struct cpuidle_state *target_state = &drv->states[index];
224 	bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP);
225 	ktime_t time_start, time_end;
226 
227 	instrumentation_begin();
228 
229 	/*
230 	 * Tell the time framework to switch to a broadcast timer because our
231 	 * local timer will be shut down.  If a local timer is used from another
232 	 * CPU as a broadcast timer, this call may fail if it is not available.
233 	 */
234 	if (broadcast && tick_broadcast_enter()) {
235 		index = find_deepest_state(drv, dev, target_state->exit_latency_ns,
236 					   CPUIDLE_FLAG_TIMER_STOP, false);
237 
238 		target_state = &drv->states[index];
239 		broadcast = false;
240 	}
241 
242 	if (target_state->flags & CPUIDLE_FLAG_TLB_FLUSHED)
243 		leave_mm();
244 
245 	/* Take note of the planned idle state. */
246 	sched_idle_set_state(target_state);
247 
248 	trace_cpu_idle(index, dev->cpu);
249 	time_start = ns_to_ktime(local_clock_noinstr());
250 
251 	stop_critical_timings();
252 	if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE)) {
253 		ct_cpuidle_enter();
254 		/* Annotate away the indirect call */
255 		instrumentation_begin();
256 	}
257 
258 	/*
259 	 * NOTE!!
260 	 *
261 	 * For cpuidle_state::enter() methods that do *NOT* set
262 	 * CPUIDLE_FLAG_RCU_IDLE RCU will be disabled here and these functions
263 	 * must be marked either noinstr or __cpuidle.
264 	 *
265 	 * For cpuidle_state::enter() methods that *DO* set
266 	 * CPUIDLE_FLAG_RCU_IDLE this isn't required, but they must mark the
267 	 * function calling ct_cpuidle_enter() as noinstr/__cpuidle and all
268 	 * functions called within the RCU-idle region.
269 	 */
270 	entered_state = target_state->enter(dev, drv, index);
271 
272 	if (WARN_ONCE(!irqs_disabled(), "%ps leaked IRQ state", target_state->enter))
273 		raw_local_irq_disable();
274 
275 	if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE)) {
276 		instrumentation_end();
277 		ct_cpuidle_exit();
278 	}
279 	start_critical_timings();
280 
281 	sched_clock_idle_wakeup_event();
282 	time_end = ns_to_ktime(local_clock_noinstr());
283 	trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu);
284 
285 	/* The cpu is no longer idle or about to enter idle. */
286 	sched_idle_set_state(NULL);
287 
288 	if (broadcast)
289 		tick_broadcast_exit();
290 
291 	if (!cpuidle_state_is_coupled(drv, index))
292 		local_irq_enable();
293 
294 	if (entered_state >= 0) {
295 		s64 diff, delay = drv->states[entered_state].exit_latency_ns;
296 		int i;
297 
298 		/*
299 		 * Update cpuidle counters
300 		 * This can be moved to within driver enter routine,
301 		 * but that results in multiple copies of same code.
302 		 */
303 		diff = ktime_sub(time_end, time_start);
304 
305 		dev->last_residency_ns = diff;
306 		dev->states_usage[entered_state].time_ns += diff;
307 		dev->states_usage[entered_state].usage++;
308 
309 		if (diff < drv->states[entered_state].target_residency_ns) {
310 			for (i = entered_state - 1; i >= 0; i--) {
311 				if (dev->states_usage[i].disable)
312 					continue;
313 
314 				/* Shallower states are enabled, so update. */
315 				dev->states_usage[entered_state].above++;
316 				trace_cpu_idle_miss(dev->cpu, entered_state, false);
317 				break;
318 			}
319 		} else if (diff > delay) {
320 			for (i = entered_state + 1; i < drv->state_count; i++) {
321 				if (dev->states_usage[i].disable)
322 					continue;
323 
324 				/*
325 				 * Update if a deeper state would have been a
326 				 * better match for the observed idle duration.
327 				 */
328 				if (diff - delay >= drv->states[i].target_residency_ns) {
329 					dev->states_usage[entered_state].below++;
330 					trace_cpu_idle_miss(dev->cpu, entered_state, true);
331 				}
332 
333 				break;
334 			}
335 		}
336 	} else {
337 		dev->last_residency_ns = 0;
338 		dev->states_usage[index].rejected++;
339 	}
340 
341 	instrumentation_end();
342 
343 	return entered_state;
344 }
345 
346 /**
347  * cpuidle_select - ask the cpuidle framework to choose an idle state
348  *
349  * @drv: the cpuidle driver
350  * @dev: the cpuidle device
351  * @stop_tick: indication on whether or not to stop the tick
352  *
353  * Returns the index of the idle state.  The return value must not be negative.
354  *
355  * The memory location pointed to by @stop_tick is expected to be written the
356  * 'false' boolean value if the scheduler tick should not be stopped before
357  * entering the returned state.
358  */
359 int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
360 		   bool *stop_tick)
361 {
362 	/*
363 	 * If there is only a single idle state (or none), there is nothing
364 	 * meaningful for the governor to choose. Skip the governor and
365 	 * always use state 0 with the tick running.
366 	 */
367 	if (drv->state_count <= 1) {
368 		*stop_tick = false;
369 		return 0;
370 	}
371 
372 	return cpuidle_curr_governor->select(drv, dev, stop_tick);
373 }
374 
375 /**
376  * cpuidle_enter - enter into the specified idle state
377  *
378  * @drv:   the cpuidle driver tied with the cpu
379  * @dev:   the cpuidle device
380  * @index: the index in the idle state table
381  *
382  * Returns the index in the idle state, < 0 in case of error.
383  * The error code depends on the backend driver
384  */
385 int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev,
386 		  int index)
387 {
388 	int ret = 0;
389 
390 	/*
391 	 * Store the next hrtimer, which becomes either next tick or the next
392 	 * timer event, whatever expires first. Additionally, to make this data
393 	 * useful for consumers outside cpuidle, we rely on that the governor's
394 	 * ->select() callback have decided, whether to stop the tick or not.
395 	 */
396 	WRITE_ONCE(dev->next_hrtimer, tick_nohz_get_next_hrtimer());
397 
398 	if (cpuidle_state_is_coupled(drv, index))
399 		ret = cpuidle_enter_state_coupled(dev, drv, index);
400 	else
401 		ret = cpuidle_enter_state(dev, drv, index);
402 
403 	WRITE_ONCE(dev->next_hrtimer, 0);
404 	return ret;
405 }
406 
407 /**
408  * cpuidle_reflect - tell the underlying governor what was the state
409  * we were in
410  *
411  * @dev  : the cpuidle device
412  * @index: the index in the idle state table
413  *
414  */
415 void cpuidle_reflect(struct cpuidle_device *dev, int index)
416 {
417 	if (cpuidle_curr_governor->reflect && index >= 0)
418 		cpuidle_curr_governor->reflect(dev, index);
419 }
420 
421 /*
422  * Min polling interval of 10usec is a guess. It is assuming that
423  * for most users, the time for a single ping-pong workload like
424  * perf bench pipe would generally complete within 10usec but
425  * this is hardware dependent. Actual time can be estimated with
426  *
427  * perf bench sched pipe -l 10000
428  *
429  * Run multiple times to avoid cpufreq effects.
430  */
431 #define CPUIDLE_POLL_MIN 10000
432 #define CPUIDLE_POLL_MAX (TICK_NSEC / 16)
433 
434 /**
435  * cpuidle_poll_time - return amount of time to poll for,
436  * governors can override dev->poll_limit_ns if necessary
437  *
438  * @drv:   the cpuidle driver tied with the cpu
439  * @dev:   the cpuidle device
440  *
441  */
442 __cpuidle u64 cpuidle_poll_time(struct cpuidle_driver *drv,
443 		      struct cpuidle_device *dev)
444 {
445 	int i;
446 	u64 limit_ns;
447 
448 	BUILD_BUG_ON(CPUIDLE_POLL_MIN > CPUIDLE_POLL_MAX);
449 
450 	if (dev->poll_limit_ns)
451 		return dev->poll_limit_ns;
452 
453 	limit_ns = CPUIDLE_POLL_MAX;
454 	for (i = 1; i < drv->state_count; i++) {
455 		u64 state_limit;
456 
457 		if (dev->states_usage[i].disable)
458 			continue;
459 
460 		state_limit = drv->states[i].target_residency_ns;
461 		if (state_limit < CPUIDLE_POLL_MIN)
462 			continue;
463 
464 		limit_ns = min_t(u64, state_limit, CPUIDLE_POLL_MAX);
465 		break;
466 	}
467 
468 	dev->poll_limit_ns = limit_ns;
469 
470 	return dev->poll_limit_ns;
471 }
472 
473 /**
474  * cpuidle_install_idle_handler - installs the cpuidle idle loop handler
475  */
476 void cpuidle_install_idle_handler(void)
477 {
478 	if (enabled_devices) {
479 		/* Make sure all changes finished before we switch to new idle */
480 		smp_wmb();
481 		initialized = 1;
482 	}
483 }
484 
485 /**
486  * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler
487  */
488 void cpuidle_uninstall_idle_handler(void)
489 {
490 	if (enabled_devices) {
491 		initialized = 0;
492 		wake_up_all_idle_cpus();
493 	}
494 
495 	/*
496 	 * Make sure external observers (such as the scheduler)
497 	 * are done looking at pointed idle states.
498 	 */
499 	synchronize_rcu();
500 }
501 
502 /**
503  * cpuidle_pause_and_lock - temporarily disables CPUIDLE
504  */
505 void cpuidle_pause_and_lock(void)
506 {
507 	mutex_lock(&cpuidle_lock);
508 	cpuidle_uninstall_idle_handler();
509 }
510 
511 EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock);
512 
513 /**
514  * cpuidle_resume_and_unlock - resumes CPUIDLE operation
515  */
516 void cpuidle_resume_and_unlock(void)
517 {
518 	cpuidle_install_idle_handler();
519 	mutex_unlock(&cpuidle_lock);
520 }
521 
522 EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);
523 
524 /* Currently used in suspend/resume path to suspend cpuidle */
525 void cpuidle_pause(void)
526 {
527 	mutex_lock(&cpuidle_lock);
528 	cpuidle_uninstall_idle_handler();
529 	mutex_unlock(&cpuidle_lock);
530 }
531 
532 /* Currently used in suspend/resume path to resume cpuidle */
533 void cpuidle_resume(void)
534 {
535 	mutex_lock(&cpuidle_lock);
536 	cpuidle_install_idle_handler();
537 	mutex_unlock(&cpuidle_lock);
538 }
539 
540 /**
541  * cpuidle_enable_device - enables idle PM for a CPU
542  * @dev: the CPU
543  *
544  * This function must be called between cpuidle_pause_and_lock and
545  * cpuidle_resume_and_unlock when used externally.
546  */
547 int cpuidle_enable_device(struct cpuidle_device *dev)
548 {
549 	int ret;
550 	struct cpuidle_driver *drv;
551 
552 	if (!dev)
553 		return -EINVAL;
554 
555 	if (dev->enabled)
556 		return 0;
557 
558 	if (!cpuidle_curr_governor)
559 		return -EIO;
560 
561 	drv = cpuidle_get_cpu_driver(dev);
562 
563 	if (!drv)
564 		return -EIO;
565 
566 	if (!dev->registered)
567 		return -EINVAL;
568 
569 	ret = cpuidle_add_device_sysfs(dev);
570 	if (ret)
571 		return ret;
572 
573 	if (cpuidle_curr_governor->enable) {
574 		ret = cpuidle_curr_governor->enable(drv, dev);
575 		if (ret)
576 			goto fail_sysfs;
577 	}
578 
579 	smp_wmb();
580 
581 	dev->enabled = 1;
582 
583 	enabled_devices++;
584 	return 0;
585 
586 fail_sysfs:
587 	cpuidle_remove_device_sysfs(dev);
588 
589 	return ret;
590 }
591 
592 EXPORT_SYMBOL_GPL(cpuidle_enable_device);
593 
594 /**
595  * cpuidle_disable_device - disables idle PM for a CPU
596  * @dev: the CPU
597  *
598  * This function must be called between cpuidle_pause_and_lock and
599  * cpuidle_resume_and_unlock when used externally.
600  */
601 void cpuidle_disable_device(struct cpuidle_device *dev)
602 {
603 	struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
604 
605 	if (!dev || !dev->enabled)
606 		return;
607 
608 	if (!drv || !cpuidle_curr_governor)
609 		return;
610 
611 	dev->enabled = 0;
612 
613 	if (cpuidle_curr_governor->disable)
614 		cpuidle_curr_governor->disable(drv, dev);
615 
616 	cpuidle_remove_device_sysfs(dev);
617 	enabled_devices--;
618 }
619 
620 EXPORT_SYMBOL_GPL(cpuidle_disable_device);
621 
622 static void __cpuidle_unregister_device(struct cpuidle_device *dev)
623 {
624 	struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
625 
626 	list_del(&dev->device_list);
627 	per_cpu(cpuidle_devices, dev->cpu) = NULL;
628 	module_put(drv->owner);
629 
630 	dev->registered = 0;
631 }
632 
633 static void __cpuidle_device_init(struct cpuidle_device *dev)
634 {
635 	memset(dev->states_usage, 0, sizeof(dev->states_usage));
636 	dev->last_residency_ns = 0;
637 	dev->next_hrtimer = 0;
638 }
639 
640 /**
641  * __cpuidle_register_device - internal register function called before register
642  * and enable routines
643  * @dev: the cpu
644  *
645  * cpuidle_lock mutex must be held before this is called
646  */
647 static int __cpuidle_register_device(struct cpuidle_device *dev)
648 {
649 	struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
650 	unsigned int cpu = dev->cpu;
651 	int i, ret;
652 
653 	if (per_cpu(cpuidle_devices, cpu)) {
654 		pr_info("CPU%d: cpuidle device already registered\n", cpu);
655 		return -EEXIST;
656 	}
657 
658 	if (!try_module_get(drv->owner))
659 		return -EINVAL;
660 
661 	for (i = 0; i < drv->state_count; i++) {
662 		if (drv->states[i].flags & CPUIDLE_FLAG_UNUSABLE)
663 			dev->states_usage[i].disable |= CPUIDLE_STATE_DISABLED_BY_DRIVER;
664 
665 		if (drv->states[i].flags & CPUIDLE_FLAG_OFF)
666 			dev->states_usage[i].disable |= CPUIDLE_STATE_DISABLED_BY_USER;
667 	}
668 
669 	per_cpu(cpuidle_devices, cpu) = dev;
670 	list_add(&dev->device_list, &cpuidle_detected_devices);
671 
672 	ret = cpuidle_coupled_register_device(dev);
673 	if (ret)
674 		__cpuidle_unregister_device(dev);
675 	else
676 		dev->registered = 1;
677 
678 	return ret;
679 }
680 
681 /**
682  * cpuidle_register_device - registers a CPU's idle PM feature
683  * @dev: the cpu
684  */
685 int cpuidle_register_device(struct cpuidle_device *dev)
686 {
687 	int ret = -EBUSY;
688 
689 	if (!dev)
690 		return -EINVAL;
691 
692 	mutex_lock(&cpuidle_lock);
693 
694 	if (dev->registered)
695 		goto out_unlock;
696 
697 	__cpuidle_device_init(dev);
698 
699 	ret = __cpuidle_register_device(dev);
700 	if (ret)
701 		goto out_unlock;
702 
703 	ret = cpuidle_add_sysfs(dev);
704 	if (ret)
705 		goto out_unregister;
706 
707 	ret = cpuidle_enable_device(dev);
708 	if (ret)
709 		goto out_sysfs;
710 
711 	cpuidle_install_idle_handler();
712 
713 out_unlock:
714 	mutex_unlock(&cpuidle_lock);
715 
716 	return ret;
717 
718 out_sysfs:
719 	cpuidle_remove_sysfs(dev);
720 out_unregister:
721 	__cpuidle_unregister_device(dev);
722 	goto out_unlock;
723 }
724 
725 EXPORT_SYMBOL_GPL(cpuidle_register_device);
726 
727 /**
728  * cpuidle_unregister_device - unregisters a CPU's idle PM feature
729  * @dev: the cpu
730  */
731 void cpuidle_unregister_device(struct cpuidle_device *dev)
732 {
733 	if (!dev || dev->registered == 0)
734 		return;
735 
736 	cpuidle_pause_and_lock();
737 
738 	cpuidle_disable_device(dev);
739 
740 	cpuidle_remove_sysfs(dev);
741 
742 	__cpuidle_unregister_device(dev);
743 
744 	cpuidle_coupled_unregister_device(dev);
745 
746 	cpuidle_resume_and_unlock();
747 }
748 
749 EXPORT_SYMBOL_GPL(cpuidle_unregister_device);
750 
751 /**
752  * cpuidle_unregister: unregister a driver and the devices. This function
753  * can be used only if the driver has been previously registered through
754  * the cpuidle_register function.
755  *
756  * @drv: a valid pointer to a struct cpuidle_driver
757  */
758 void cpuidle_unregister(struct cpuidle_driver *drv)
759 {
760 	int cpu;
761 	struct cpuidle_device *device;
762 
763 	for_each_cpu(cpu, drv->cpumask) {
764 		device = &per_cpu(cpuidle_dev, cpu);
765 		cpuidle_unregister_device(device);
766 	}
767 
768 	cpuidle_unregister_driver(drv);
769 }
770 EXPORT_SYMBOL_GPL(cpuidle_unregister);
771 
772 /**
773  * cpuidle_register: registers the driver and the cpu devices with the
774  * coupled_cpus passed as parameter. This function is used for all common
775  * initialization pattern there are in the arch specific drivers. The
776  * devices is globally defined in this file.
777  *
778  * @drv         : a valid pointer to a struct cpuidle_driver
779  * @coupled_cpus: a cpumask for the coupled states
780  *
781  * Returns 0 on success, < 0 otherwise
782  */
783 int cpuidle_register(struct cpuidle_driver *drv,
784 		     const struct cpumask *const coupled_cpus)
785 {
786 	int ret, cpu;
787 	struct cpuidle_device *device;
788 
789 	ret = cpuidle_register_driver(drv);
790 	if (ret) {
791 		pr_err("failed to register cpuidle driver\n");
792 		return ret;
793 	}
794 
795 	for_each_cpu(cpu, drv->cpumask) {
796 		device = &per_cpu(cpuidle_dev, cpu);
797 		device->cpu = cpu;
798 
799 #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
800 		/*
801 		 * On multiplatform for ARM, the coupled idle states could be
802 		 * enabled in the kernel even if the cpuidle driver does not
803 		 * use it. Note, coupled_cpus is a struct copy.
804 		 */
805 		if (coupled_cpus)
806 			device->coupled_cpus = *coupled_cpus;
807 #endif
808 		ret = cpuidle_register_device(device);
809 		if (!ret)
810 			continue;
811 
812 		pr_err("Failed to register cpuidle device for cpu%d\n", cpu);
813 
814 		cpuidle_unregister(drv);
815 		break;
816 	}
817 
818 	return ret;
819 }
820 EXPORT_SYMBOL_GPL(cpuidle_register);
821 
822 /**
823  * cpuidle_init - core initializer
824  */
825 static int __init cpuidle_init(void)
826 {
827 	if (cpuidle_disabled())
828 		return -ENODEV;
829 
830 	return cpuidle_add_interface();
831 }
832 
833 module_param(off, int, 0444);
834 module_param_string(governor, param_governor, CPUIDLE_NAME_LEN, 0444);
835 core_initcall(cpuidle_init);
836