xref: /linux/include/linux/pm_runtime.h (revision 3719a04a80caf660f899a462cd8f3973bcfa676e)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * pm_runtime.h - Device run-time power management helper functions.
4  *
5  * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>
6  */
7 
8 #ifndef _LINUX_PM_RUNTIME_H
9 #define _LINUX_PM_RUNTIME_H
10 
11 #include <linux/device.h>
12 #include <linux/notifier.h>
13 #include <linux/pm.h>
14 
15 #include <linux/jiffies.h>
16 
17 /* Runtime PM flag argument bits */
18 #define RPM_ASYNC		0x01	/* Request is asynchronous */
19 #define RPM_NOWAIT		0x02	/* Don't wait for concurrent
20 					    state change */
21 #define RPM_GET_PUT		0x04	/* Increment/decrement the
22 					    usage_count */
23 #define RPM_AUTO		0x08	/* Use autosuspend_delay */
24 
25 /*
26  * Use this for defining a set of PM operations to be used in all situations
27  * (system suspend, hibernation or runtime PM).
28  *
29  * Note that the behaviour differs from the deprecated UNIVERSAL_DEV_PM_OPS()
30  * macro, which uses the provided callbacks for both runtime PM and system
31  * sleep, while DEFINE_RUNTIME_DEV_PM_OPS() uses pm_runtime_force_suspend()
32  * and pm_runtime_force_resume() for its system sleep callbacks.
33  *
34  * If the underlying dev_pm_ops struct symbol has to be exported, use
35  * EXPORT_RUNTIME_DEV_PM_OPS() or EXPORT_GPL_RUNTIME_DEV_PM_OPS() instead.
36  */
37 #define DEFINE_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \
38 	_DEFINE_DEV_PM_OPS(name, pm_runtime_force_suspend, \
39 			   pm_runtime_force_resume, suspend_fn, \
40 			   resume_fn, idle_fn)
41 
42 #define EXPORT_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \
43 	EXPORT_DEV_PM_OPS(name) = { \
44 		RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
45 	}
46 #define EXPORT_GPL_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \
47 	EXPORT_GPL_DEV_PM_OPS(name) = { \
48 		RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
49 	}
50 #define EXPORT_NS_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn, ns) \
51 	EXPORT_NS_DEV_PM_OPS(name, ns) = { \
52 		RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
53 	}
54 #define EXPORT_NS_GPL_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn, ns) \
55 	EXPORT_NS_GPL_DEV_PM_OPS(name, ns) = { \
56 		RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
57 	}
58 
59 #ifdef CONFIG_PM
60 extern struct workqueue_struct *pm_wq;
61 
queue_pm_work(struct work_struct * work)62 static inline bool queue_pm_work(struct work_struct *work)
63 {
64 	return queue_work(pm_wq, work);
65 }
66 
67 extern int pm_generic_runtime_suspend(struct device *dev);
68 extern int pm_generic_runtime_resume(struct device *dev);
69 extern bool pm_runtime_need_not_resume(struct device *dev);
70 extern int pm_runtime_force_suspend(struct device *dev);
71 extern int pm_runtime_force_resume(struct device *dev);
72 
73 extern int __pm_runtime_idle(struct device *dev, int rpmflags);
74 extern int __pm_runtime_suspend(struct device *dev, int rpmflags);
75 extern int __pm_runtime_resume(struct device *dev, int rpmflags);
76 extern int pm_runtime_get_if_active(struct device *dev);
77 extern int pm_runtime_get_if_in_use(struct device *dev);
78 extern int pm_schedule_suspend(struct device *dev, unsigned int delay);
79 extern int __pm_runtime_set_status(struct device *dev, unsigned int status);
80 extern int pm_runtime_barrier(struct device *dev);
81 extern bool pm_runtime_block_if_disabled(struct device *dev);
82 extern void pm_runtime_unblock(struct device *dev);
83 extern void pm_runtime_enable(struct device *dev);
84 extern void __pm_runtime_disable(struct device *dev, bool check_resume);
85 extern void pm_runtime_allow(struct device *dev);
86 extern void pm_runtime_forbid(struct device *dev);
87 extern void pm_runtime_no_callbacks(struct device *dev);
88 extern void pm_runtime_irq_safe(struct device *dev);
89 extern void __pm_runtime_use_autosuspend(struct device *dev, bool use);
90 extern void pm_runtime_set_autosuspend_delay(struct device *dev, int delay);
91 extern u64 pm_runtime_autosuspend_expiration(struct device *dev);
92 extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable);
93 extern void pm_runtime_get_suppliers(struct device *dev);
94 extern void pm_runtime_put_suppliers(struct device *dev);
95 extern void pm_runtime_new_link(struct device *dev);
96 extern void pm_runtime_drop_link(struct device_link *link);
97 extern void pm_runtime_release_supplier(struct device_link *link);
98 
99 int devm_pm_runtime_set_active_enabled(struct device *dev);
100 extern int devm_pm_runtime_enable(struct device *dev);
101 int devm_pm_runtime_get_noresume(struct device *dev);
102 
103 /**
104  * pm_suspend_ignore_children - Set runtime PM behavior regarding children.
105  * @dev: Target device.
106  * @enable: Whether or not to ignore possible dependencies on children.
107  *
108  * The dependencies of @dev on its children will not be taken into account by
109  * the runtime PM framework going forward if @enable is %true, or they will
110  * be taken into account otherwise.
111  */
pm_suspend_ignore_children(struct device * dev,bool enable)112 static inline void pm_suspend_ignore_children(struct device *dev, bool enable)
113 {
114 	dev->power.ignore_children = enable;
115 }
116 
117 /**
118  * pm_runtime_get_noresume - Bump up runtime PM usage counter of a device.
119  * @dev: Target device.
120  */
pm_runtime_get_noresume(struct device * dev)121 static inline void pm_runtime_get_noresume(struct device *dev)
122 {
123 	atomic_inc(&dev->power.usage_count);
124 }
125 
126 /**
127  * pm_runtime_put_noidle - Drop runtime PM usage counter of a device.
128  * @dev: Target device.
129  *
130  * Decrement the runtime PM usage counter of @dev unless it is 0 already.
131  */
pm_runtime_put_noidle(struct device * dev)132 static inline void pm_runtime_put_noidle(struct device *dev)
133 {
134 	atomic_add_unless(&dev->power.usage_count, -1, 0);
135 }
136 
137 /**
138  * pm_runtime_suspended - Check whether or not a device is runtime-suspended.
139  * @dev: Target device.
140  *
141  * Return %true if runtime PM is enabled for @dev and its runtime PM status is
142  * %RPM_SUSPENDED, or %false otherwise.
143  *
144  * Note that the return value of this function can only be trusted if it is
145  * called under the runtime PM lock of @dev or under conditions in which
146  * runtime PM cannot be either disabled or enabled for @dev and its runtime PM
147  * status cannot change.
148  */
pm_runtime_suspended(struct device * dev)149 static inline bool pm_runtime_suspended(struct device *dev)
150 {
151 	return dev->power.runtime_status == RPM_SUSPENDED
152 		&& !dev->power.disable_depth;
153 }
154 
155 /**
156  * pm_runtime_active - Check whether or not a device is runtime-active.
157  * @dev: Target device.
158  *
159  * Return %true if runtime PM is disabled for @dev or its runtime PM status is
160  * %RPM_ACTIVE, or %false otherwise.
161  *
162  * Note that the return value of this function can only be trusted if it is
163  * called under the runtime PM lock of @dev or under conditions in which
164  * runtime PM cannot be either disabled or enabled for @dev and its runtime PM
165  * status cannot change.
166  */
pm_runtime_active(struct device * dev)167 static inline bool pm_runtime_active(struct device *dev)
168 {
169 	return dev->power.runtime_status == RPM_ACTIVE
170 		|| dev->power.disable_depth;
171 }
172 
173 /**
174  * pm_runtime_status_suspended - Check if runtime PM status is "suspended".
175  * @dev: Target device.
176  *
177  * Return %true if the runtime PM status of @dev is %RPM_SUSPENDED, or %false
178  * otherwise, regardless of whether or not runtime PM has been enabled for @dev.
179  *
180  * Note that the return value of this function can only be trusted if it is
181  * called under the runtime PM lock of @dev or under conditions in which the
182  * runtime PM status of @dev cannot change.
183  */
pm_runtime_status_suspended(struct device * dev)184 static inline bool pm_runtime_status_suspended(struct device *dev)
185 {
186 	return dev->power.runtime_status == RPM_SUSPENDED;
187 }
188 
189 /**
190  * pm_runtime_enabled - Check if runtime PM is enabled.
191  * @dev: Target device.
192  *
193  * Return %true if runtime PM is enabled for @dev or %false otherwise.
194  *
195  * Note that the return value of this function can only be trusted if it is
196  * called under the runtime PM lock of @dev or under conditions in which
197  * runtime PM cannot be either disabled or enabled for @dev.
198  */
pm_runtime_enabled(struct device * dev)199 static inline bool pm_runtime_enabled(struct device *dev)
200 {
201 	return !dev->power.disable_depth;
202 }
203 
204 /**
205  * pm_runtime_blocked - Check if runtime PM enabling is blocked.
206  * @dev: Target device.
207  *
208  * Do not call this function outside system suspend/resume code paths.
209  */
pm_runtime_blocked(struct device * dev)210 static inline bool pm_runtime_blocked(struct device *dev)
211 {
212 	return dev->power.last_status == RPM_BLOCKED;
213 }
214 
215 /**
216  * pm_runtime_has_no_callbacks - Check if runtime PM callbacks may be present.
217  * @dev: Target device.
218  *
219  * Return %true if @dev is a special device without runtime PM callbacks or
220  * %false otherwise.
221  */
pm_runtime_has_no_callbacks(struct device * dev)222 static inline bool pm_runtime_has_no_callbacks(struct device *dev)
223 {
224 	return dev->power.no_callbacks;
225 }
226 
227 /**
228  * pm_runtime_mark_last_busy - Update the last access time of a device.
229  * @dev: Target device.
230  *
231  * Update the last access time of @dev used by the runtime PM autosuspend
232  * mechanism to the current time as returned by ktime_get_mono_fast_ns().
233  */
pm_runtime_mark_last_busy(struct device * dev)234 static inline void pm_runtime_mark_last_busy(struct device *dev)
235 {
236 	WRITE_ONCE(dev->power.last_busy, ktime_get_mono_fast_ns());
237 }
238 
239 /**
240  * pm_runtime_is_irq_safe - Check if runtime PM can work in interrupt context.
241  * @dev: Target device.
242  *
243  * Return %true if @dev has been marked as an "IRQ-safe" device (with respect
244  * to runtime PM), in which case its runtime PM callabcks can be expected to
245  * work correctly when invoked from interrupt handlers.
246  */
pm_runtime_is_irq_safe(struct device * dev)247 static inline bool pm_runtime_is_irq_safe(struct device *dev)
248 {
249 	return dev->power.irq_safe;
250 }
251 
252 extern u64 pm_runtime_suspended_time(struct device *dev);
253 
254 #else /* !CONFIG_PM */
255 
queue_pm_work(struct work_struct * work)256 static inline bool queue_pm_work(struct work_struct *work) { return false; }
257 
pm_generic_runtime_suspend(struct device * dev)258 static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; }
pm_generic_runtime_resume(struct device * dev)259 static inline int pm_generic_runtime_resume(struct device *dev) { return 0; }
pm_runtime_need_not_resume(struct device * dev)260 static inline bool pm_runtime_need_not_resume(struct device *dev) {return true; }
pm_runtime_force_suspend(struct device * dev)261 static inline int pm_runtime_force_suspend(struct device *dev) { return 0; }
pm_runtime_force_resume(struct device * dev)262 static inline int pm_runtime_force_resume(struct device *dev) { return 0; }
263 
__pm_runtime_idle(struct device * dev,int rpmflags)264 static inline int __pm_runtime_idle(struct device *dev, int rpmflags)
265 {
266 	return -ENOSYS;
267 }
__pm_runtime_suspend(struct device * dev,int rpmflags)268 static inline int __pm_runtime_suspend(struct device *dev, int rpmflags)
269 {
270 	return -ENOSYS;
271 }
__pm_runtime_resume(struct device * dev,int rpmflags)272 static inline int __pm_runtime_resume(struct device *dev, int rpmflags)
273 {
274 	return 1;
275 }
pm_schedule_suspend(struct device * dev,unsigned int delay)276 static inline int pm_schedule_suspend(struct device *dev, unsigned int delay)
277 {
278 	return -ENOSYS;
279 }
pm_runtime_get_if_in_use(struct device * dev)280 static inline int pm_runtime_get_if_in_use(struct device *dev)
281 {
282 	return -EINVAL;
283 }
pm_runtime_get_if_active(struct device * dev)284 static inline int pm_runtime_get_if_active(struct device *dev)
285 {
286 	return -EINVAL;
287 }
__pm_runtime_set_status(struct device * dev,unsigned int status)288 static inline int __pm_runtime_set_status(struct device *dev,
289 					    unsigned int status) { return 0; }
pm_runtime_barrier(struct device * dev)290 static inline int pm_runtime_barrier(struct device *dev) { return 0; }
pm_runtime_block_if_disabled(struct device * dev)291 static inline bool pm_runtime_block_if_disabled(struct device *dev) { return true; }
pm_runtime_unblock(struct device * dev)292 static inline void pm_runtime_unblock(struct device *dev) {}
pm_runtime_enable(struct device * dev)293 static inline void pm_runtime_enable(struct device *dev) {}
__pm_runtime_disable(struct device * dev,bool c)294 static inline void __pm_runtime_disable(struct device *dev, bool c) {}
pm_runtime_blocked(struct device * dev)295 static inline bool pm_runtime_blocked(struct device *dev) { return true; }
pm_runtime_allow(struct device * dev)296 static inline void pm_runtime_allow(struct device *dev) {}
pm_runtime_forbid(struct device * dev)297 static inline void pm_runtime_forbid(struct device *dev) {}
298 
devm_pm_runtime_set_active_enabled(struct device * dev)299 static inline int devm_pm_runtime_set_active_enabled(struct device *dev) { return 0; }
devm_pm_runtime_enable(struct device * dev)300 static inline int devm_pm_runtime_enable(struct device *dev) { return 0; }
devm_pm_runtime_get_noresume(struct device * dev)301 static inline int devm_pm_runtime_get_noresume(struct device *dev) { return 0; }
302 
pm_suspend_ignore_children(struct device * dev,bool enable)303 static inline void pm_suspend_ignore_children(struct device *dev, bool enable) {}
pm_runtime_get_noresume(struct device * dev)304 static inline void pm_runtime_get_noresume(struct device *dev) {}
pm_runtime_put_noidle(struct device * dev)305 static inline void pm_runtime_put_noidle(struct device *dev) {}
pm_runtime_suspended(struct device * dev)306 static inline bool pm_runtime_suspended(struct device *dev) { return false; }
pm_runtime_active(struct device * dev)307 static inline bool pm_runtime_active(struct device *dev) { return true; }
pm_runtime_status_suspended(struct device * dev)308 static inline bool pm_runtime_status_suspended(struct device *dev) { return false; }
pm_runtime_enabled(struct device * dev)309 static inline bool pm_runtime_enabled(struct device *dev) { return false; }
310 
pm_runtime_no_callbacks(struct device * dev)311 static inline void pm_runtime_no_callbacks(struct device *dev) {}
pm_runtime_irq_safe(struct device * dev)312 static inline void pm_runtime_irq_safe(struct device *dev) {}
pm_runtime_is_irq_safe(struct device * dev)313 static inline bool pm_runtime_is_irq_safe(struct device *dev) { return false; }
314 
pm_runtime_has_no_callbacks(struct device * dev)315 static inline bool pm_runtime_has_no_callbacks(struct device *dev) { return false; }
pm_runtime_mark_last_busy(struct device * dev)316 static inline void pm_runtime_mark_last_busy(struct device *dev) {}
__pm_runtime_use_autosuspend(struct device * dev,bool use)317 static inline void __pm_runtime_use_autosuspend(struct device *dev,
318 						bool use) {}
pm_runtime_set_autosuspend_delay(struct device * dev,int delay)319 static inline void pm_runtime_set_autosuspend_delay(struct device *dev,
320 						int delay) {}
pm_runtime_autosuspend_expiration(struct device * dev)321 static inline u64 pm_runtime_autosuspend_expiration(
322 				struct device *dev) { return 0; }
pm_runtime_set_memalloc_noio(struct device * dev,bool enable)323 static inline void pm_runtime_set_memalloc_noio(struct device *dev,
324 						bool enable){}
pm_runtime_get_suppliers(struct device * dev)325 static inline void pm_runtime_get_suppliers(struct device *dev) {}
pm_runtime_put_suppliers(struct device * dev)326 static inline void pm_runtime_put_suppliers(struct device *dev) {}
pm_runtime_new_link(struct device * dev)327 static inline void pm_runtime_new_link(struct device *dev) {}
pm_runtime_drop_link(struct device_link * link)328 static inline void pm_runtime_drop_link(struct device_link *link) {}
pm_runtime_release_supplier(struct device_link * link)329 static inline void pm_runtime_release_supplier(struct device_link *link) {}
330 
331 #endif /* !CONFIG_PM */
332 
333 /**
334  * pm_runtime_idle - Conditionally set up autosuspend of a device or suspend it.
335  * @dev: Target device.
336  *
337  * Invoke the "idle check" callback of @dev and, depending on its return value,
338  * set up autosuspend of @dev or suspend it (depending on whether or not
339  * autosuspend has been enabled for it).
340  */
pm_runtime_idle(struct device * dev)341 static inline int pm_runtime_idle(struct device *dev)
342 {
343 	return __pm_runtime_idle(dev, 0);
344 }
345 
346 /**
347  * pm_runtime_suspend - Suspend a device synchronously.
348  * @dev: Target device.
349  */
pm_runtime_suspend(struct device * dev)350 static inline int pm_runtime_suspend(struct device *dev)
351 {
352 	return __pm_runtime_suspend(dev, 0);
353 }
354 
355 /**
356  * pm_runtime_autosuspend - Set up autosuspend of a device or suspend it.
357  * @dev: Target device.
358  *
359  * Set up autosuspend of @dev or suspend it (depending on whether or not
360  * autosuspend is enabled for it) without engaging its "idle check" callback.
361  */
pm_runtime_autosuspend(struct device * dev)362 static inline int pm_runtime_autosuspend(struct device *dev)
363 {
364 	return __pm_runtime_suspend(dev, RPM_AUTO);
365 }
366 
367 /**
368  * pm_runtime_resume - Resume a device synchronously.
369  * @dev: Target device.
370  */
pm_runtime_resume(struct device * dev)371 static inline int pm_runtime_resume(struct device *dev)
372 {
373 	return __pm_runtime_resume(dev, 0);
374 }
375 
376 /**
377  * pm_request_idle - Queue up "idle check" execution for a device.
378  * @dev: Target device.
379  *
380  * Queue up a work item to run an equivalent of pm_runtime_idle() for @dev
381  * asynchronously.
382  */
pm_request_idle(struct device * dev)383 static inline int pm_request_idle(struct device *dev)
384 {
385 	return __pm_runtime_idle(dev, RPM_ASYNC);
386 }
387 
388 /**
389  * pm_request_resume - Queue up runtime-resume of a device.
390  * @dev: Target device.
391  */
pm_request_resume(struct device * dev)392 static inline int pm_request_resume(struct device *dev)
393 {
394 	return __pm_runtime_resume(dev, RPM_ASYNC);
395 }
396 
397 /**
398  * pm_request_autosuspend - Queue up autosuspend of a device.
399  * @dev: Target device.
400  *
401  * Queue up a work item to run an equivalent pm_runtime_autosuspend() for @dev
402  * asynchronously.
403  */
pm_request_autosuspend(struct device * dev)404 static inline int pm_request_autosuspend(struct device *dev)
405 {
406 	return __pm_runtime_suspend(dev, RPM_ASYNC | RPM_AUTO);
407 }
408 
409 /**
410  * pm_runtime_get - Bump up usage counter and queue up resume of a device.
411  * @dev: Target device.
412  *
413  * Bump up the runtime PM usage counter of @dev and queue up a work item to
414  * carry out runtime-resume of it.
415  */
pm_runtime_get(struct device * dev)416 static inline int pm_runtime_get(struct device *dev)
417 {
418 	return __pm_runtime_resume(dev, RPM_GET_PUT | RPM_ASYNC);
419 }
420 
421 /**
422  * pm_runtime_get_sync - Bump up usage counter of a device and resume it.
423  * @dev: Target device.
424  *
425  * Bump up the runtime PM usage counter of @dev and carry out runtime-resume of
426  * it synchronously.
427  *
428  * The possible return values of this function are the same as for
429  * pm_runtime_resume() and the runtime PM usage counter of @dev remains
430  * incremented in all cases, even if it returns an error code.
431  * Consider using pm_runtime_resume_and_get() instead of it, especially
432  * if its return value is checked by the caller, as this is likely to result
433  * in cleaner code.
434  */
pm_runtime_get_sync(struct device * dev)435 static inline int pm_runtime_get_sync(struct device *dev)
436 {
437 	return __pm_runtime_resume(dev, RPM_GET_PUT);
438 }
439 
440 /**
441  * pm_runtime_resume_and_get - Bump up usage counter of a device and resume it.
442  * @dev: Target device.
443  *
444  * Resume @dev synchronously and if that is successful, increment its runtime
445  * PM usage counter. Return 0 if the runtime PM usage counter of @dev has been
446  * incremented or a negative error code otherwise.
447  */
pm_runtime_resume_and_get(struct device * dev)448 static inline int pm_runtime_resume_and_get(struct device *dev)
449 {
450 	int ret;
451 
452 	ret = __pm_runtime_resume(dev, RPM_GET_PUT);
453 	if (ret < 0) {
454 		pm_runtime_put_noidle(dev);
455 		return ret;
456 	}
457 
458 	return 0;
459 }
460 
461 /**
462  * pm_runtime_put - Drop device usage counter and queue up "idle check" if 0.
463  * @dev: Target device.
464  *
465  * Decrement the runtime PM usage counter of @dev and if it turns out to be
466  * equal to 0, queue up a work item for @dev like in pm_request_idle().
467  */
pm_runtime_put(struct device * dev)468 static inline int pm_runtime_put(struct device *dev)
469 {
470 	return __pm_runtime_idle(dev, RPM_GET_PUT | RPM_ASYNC);
471 }
472 
DEFINE_FREE(pm_runtime_put,struct device *,if (_T)pm_runtime_put (_T))473 DEFINE_FREE(pm_runtime_put, struct device *, if (_T) pm_runtime_put(_T))
474 
475 /**
476  * __pm_runtime_put_autosuspend - Drop device usage counter and queue autosuspend if 0.
477  * @dev: Target device.
478  *
479  * Decrement the runtime PM usage counter of @dev and if it turns out to be
480  * equal to 0, queue up a work item for @dev like in pm_request_autosuspend().
481  */
482 static inline int __pm_runtime_put_autosuspend(struct device *dev)
483 {
484 	return __pm_runtime_suspend(dev, RPM_GET_PUT | RPM_ASYNC | RPM_AUTO);
485 }
486 
487 /**
488  * pm_runtime_put_autosuspend - Drop device usage counter and queue autosuspend if 0.
489  * @dev: Target device.
490  *
491  * Decrement the runtime PM usage counter of @dev and if it turns out to be
492  * equal to 0, queue up a work item for @dev like in pm_request_autosuspend().
493  */
pm_runtime_put_autosuspend(struct device * dev)494 static inline int pm_runtime_put_autosuspend(struct device *dev)
495 {
496 	return __pm_runtime_suspend(dev,
497 	    RPM_GET_PUT | RPM_ASYNC | RPM_AUTO);
498 }
499 
500 /**
501  * pm_runtime_put_sync - Drop device usage counter and run "idle check" if 0.
502  * @dev: Target device.
503  *
504  * Decrement the runtime PM usage counter of @dev and if it turns out to be
505  * equal to 0, invoke the "idle check" callback of @dev and, depending on its
506  * return value, set up autosuspend of @dev or suspend it (depending on whether
507  * or not autosuspend has been enabled for it).
508  *
509  * The possible return values of this function are the same as for
510  * pm_runtime_idle() and the runtime PM usage counter of @dev remains
511  * decremented in all cases, even if it returns an error code.
512  */
pm_runtime_put_sync(struct device * dev)513 static inline int pm_runtime_put_sync(struct device *dev)
514 {
515 	return __pm_runtime_idle(dev, RPM_GET_PUT);
516 }
517 
518 /**
519  * pm_runtime_put_sync_suspend - Drop device usage counter and suspend if 0.
520  * @dev: Target device.
521  *
522  * Decrement the runtime PM usage counter of @dev and if it turns out to be
523  * equal to 0, carry out runtime-suspend of @dev synchronously.
524  *
525  * The possible return values of this function are the same as for
526  * pm_runtime_suspend() and the runtime PM usage counter of @dev remains
527  * decremented in all cases, even if it returns an error code.
528  */
pm_runtime_put_sync_suspend(struct device * dev)529 static inline int pm_runtime_put_sync_suspend(struct device *dev)
530 {
531 	return __pm_runtime_suspend(dev, RPM_GET_PUT);
532 }
533 
534 /**
535  * pm_runtime_put_sync_autosuspend - Drop device usage counter and autosuspend if 0.
536  * @dev: Target device.
537  *
538  * Decrement the runtime PM usage counter of @dev and if it turns out to be
539  * equal to 0, set up autosuspend of @dev or suspend it synchronously (depending
540  * on whether or not autosuspend has been enabled for it).
541  *
542  * The possible return values of this function are the same as for
543  * pm_runtime_autosuspend() and the runtime PM usage counter of @dev remains
544  * decremented in all cases, even if it returns an error code.
545  */
pm_runtime_put_sync_autosuspend(struct device * dev)546 static inline int pm_runtime_put_sync_autosuspend(struct device *dev)
547 {
548 	return __pm_runtime_suspend(dev, RPM_GET_PUT | RPM_AUTO);
549 }
550 
551 /**
552  * pm_runtime_set_active - Set runtime PM status to "active".
553  * @dev: Target device.
554  *
555  * Set the runtime PM status of @dev to %RPM_ACTIVE and ensure that dependencies
556  * of it will be taken into account.
557  *
558  * It is not valid to call this function for devices with runtime PM enabled.
559  */
pm_runtime_set_active(struct device * dev)560 static inline int pm_runtime_set_active(struct device *dev)
561 {
562 	return __pm_runtime_set_status(dev, RPM_ACTIVE);
563 }
564 
565 /**
566  * pm_runtime_set_suspended - Set runtime PM status to "suspended".
567  * @dev: Target device.
568  *
569  * Set the runtime PM status of @dev to %RPM_SUSPENDED and ensure that
570  * dependencies of it will be taken into account.
571  *
572  * It is not valid to call this function for devices with runtime PM enabled.
573  */
pm_runtime_set_suspended(struct device * dev)574 static inline int pm_runtime_set_suspended(struct device *dev)
575 {
576 	return __pm_runtime_set_status(dev, RPM_SUSPENDED);
577 }
578 
579 /**
580  * pm_runtime_disable - Disable runtime PM for a device.
581  * @dev: Target device.
582  *
583  * Prevent the runtime PM framework from working with @dev by incrementing its
584  * "disable" counter.
585  *
586  * If the counter is zero when this function runs and there is a pending runtime
587  * resume request for @dev, it will be resumed.  If the counter is still zero at
588  * that point, all of the pending runtime PM requests for @dev will be canceled
589  * and all runtime PM operations in progress involving it will be waited for to
590  * complete.
591  *
592  * For each invocation of this function for @dev, there must be a matching
593  * pm_runtime_enable() call, so that runtime PM is eventually enabled for it
594  * again.
595  */
pm_runtime_disable(struct device * dev)596 static inline void pm_runtime_disable(struct device *dev)
597 {
598 	__pm_runtime_disable(dev, true);
599 }
600 
601 /**
602  * pm_runtime_use_autosuspend - Allow autosuspend to be used for a device.
603  * @dev: Target device.
604  *
605  * Allow the runtime PM autosuspend mechanism to be used for @dev whenever
606  * requested (or "autosuspend" will be handled as direct runtime-suspend for
607  * it).
608  *
609  * NOTE: It's important to undo this with pm_runtime_dont_use_autosuspend()
610  * at driver exit time unless your driver initially enabled pm_runtime
611  * with devm_pm_runtime_enable() (which handles it for you).
612  */
pm_runtime_use_autosuspend(struct device * dev)613 static inline void pm_runtime_use_autosuspend(struct device *dev)
614 {
615 	__pm_runtime_use_autosuspend(dev, true);
616 }
617 
618 /**
619  * pm_runtime_dont_use_autosuspend - Prevent autosuspend from being used.
620  * @dev: Target device.
621  *
622  * Prevent the runtime PM autosuspend mechanism from being used for @dev which
623  * means that "autosuspend" will be handled as direct runtime-suspend for it
624  * going forward.
625  */
pm_runtime_dont_use_autosuspend(struct device * dev)626 static inline void pm_runtime_dont_use_autosuspend(struct device *dev)
627 {
628 	__pm_runtime_use_autosuspend(dev, false);
629 }
630 
631 #endif
632