1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * drivers/base/power/runtime.c - Helper functions for device runtime PM
4 *
5 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
6 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
7 */
8 #include <linux/sched/mm.h>
9 #include <linux/ktime.h>
10 #include <linux/hrtimer.h>
11 #include <linux/export.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/pm_wakeirq.h>
14 #include <linux/rculist.h>
15 #include <trace/events/rpm.h>
16
17 #include "../base.h"
18 #include "power.h"
19
20 typedef int (*pm_callback_t)(struct device *);
21
get_callback_ptr(const void * start,size_t offset)22 static inline pm_callback_t get_callback_ptr(const void *start, size_t offset)
23 {
24 return *(pm_callback_t *)(start + offset);
25 }
26
__rpm_get_driver_callback(struct device * dev,size_t cb_offset)27 static pm_callback_t __rpm_get_driver_callback(struct device *dev,
28 size_t cb_offset)
29 {
30 if (dev->driver && dev->driver->pm)
31 return get_callback_ptr(dev->driver->pm, cb_offset);
32
33 return NULL;
34 }
35
__rpm_get_callback(struct device * dev,size_t cb_offset)36 static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
37 {
38 const struct dev_pm_ops *ops;
39 pm_callback_t cb = NULL;
40
41 if (dev->pm_domain)
42 ops = &dev->pm_domain->ops;
43 else if (dev->type && dev->type->pm)
44 ops = dev->type->pm;
45 else if (dev->class && dev->class->pm)
46 ops = dev->class->pm;
47 else if (dev->bus && dev->bus->pm)
48 ops = dev->bus->pm;
49 else
50 ops = NULL;
51
52 if (ops)
53 cb = get_callback_ptr(ops, cb_offset);
54
55 if (!cb)
56 cb = __rpm_get_driver_callback(dev, cb_offset);
57
58 return cb;
59 }
60
61 #define RPM_GET_CALLBACK(dev, callback) \
62 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
63
64 static int rpm_resume(struct device *dev, int rpmflags);
65 static int rpm_suspend(struct device *dev, int rpmflags);
66
67 /**
68 * update_pm_runtime_accounting - Update the time accounting of power states
69 * @dev: Device to update the accounting for
70 *
71 * In order to be able to have time accounting of the various power states
72 * (as used by programs such as PowerTOP to show the effectiveness of runtime
73 * PM), we need to track the time spent in each state.
74 * update_pm_runtime_accounting must be called each time before the
75 * runtime_status field is updated, to account the time in the old state
76 * correctly.
77 */
update_pm_runtime_accounting(struct device * dev)78 static void update_pm_runtime_accounting(struct device *dev)
79 {
80 u64 now, last, delta;
81
82 if (dev->power.disable_depth > 0)
83 return;
84
85 last = dev->power.accounting_timestamp;
86
87 now = ktime_get_mono_fast_ns();
88 dev->power.accounting_timestamp = now;
89
90 /*
91 * Because ktime_get_mono_fast_ns() is not monotonic during
92 * timekeeping updates, ensure that 'now' is after the last saved
93 * timesptamp.
94 */
95 if (now < last)
96 return;
97
98 delta = now - last;
99
100 if (dev->power.runtime_status == RPM_SUSPENDED)
101 dev->power.suspended_time += delta;
102 else
103 dev->power.active_time += delta;
104 }
105
__update_runtime_status(struct device * dev,enum rpm_status status)106 static void __update_runtime_status(struct device *dev, enum rpm_status status)
107 {
108 update_pm_runtime_accounting(dev);
109 trace_rpm_status(dev, status);
110 dev->power.runtime_status = status;
111 }
112
rpm_get_accounted_time(struct device * dev,bool suspended)113 static u64 rpm_get_accounted_time(struct device *dev, bool suspended)
114 {
115 u64 time;
116 unsigned long flags;
117
118 spin_lock_irqsave(&dev->power.lock, flags);
119
120 update_pm_runtime_accounting(dev);
121 time = suspended ? dev->power.suspended_time : dev->power.active_time;
122
123 spin_unlock_irqrestore(&dev->power.lock, flags);
124
125 return time;
126 }
127
pm_runtime_active_time(struct device * dev)128 u64 pm_runtime_active_time(struct device *dev)
129 {
130 return rpm_get_accounted_time(dev, false);
131 }
132
pm_runtime_suspended_time(struct device * dev)133 u64 pm_runtime_suspended_time(struct device *dev)
134 {
135 return rpm_get_accounted_time(dev, true);
136 }
137 EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);
138
139 /**
140 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
141 * @dev: Device to handle.
142 */
pm_runtime_deactivate_timer(struct device * dev)143 static void pm_runtime_deactivate_timer(struct device *dev)
144 {
145 if (dev->power.timer_expires > 0) {
146 hrtimer_try_to_cancel(&dev->power.suspend_timer);
147 dev->power.timer_expires = 0;
148 }
149 }
150
151 /**
152 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
153 * @dev: Device to handle.
154 */
pm_runtime_cancel_pending(struct device * dev)155 static void pm_runtime_cancel_pending(struct device *dev)
156 {
157 pm_runtime_deactivate_timer(dev);
158 /*
159 * In case there's a request pending, make sure its work function will
160 * return without doing anything.
161 */
162 dev->power.request = RPM_REQ_NONE;
163 }
164
165 /*
166 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
167 * @dev: Device to handle.
168 *
169 * Compute the autosuspend-delay expiration time based on the device's
170 * power.last_busy time. If the delay has already expired or is disabled
171 * (negative) or the power.use_autosuspend flag isn't set, return 0.
172 * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero).
173 *
174 * This function may be called either with or without dev->power.lock held.
175 * Either way it can be racy, since power.last_busy may be updated at any time.
176 */
pm_runtime_autosuspend_expiration(struct device * dev)177 u64 pm_runtime_autosuspend_expiration(struct device *dev)
178 {
179 int autosuspend_delay;
180 u64 expires;
181
182 if (!dev->power.use_autosuspend)
183 return 0;
184
185 autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
186 if (autosuspend_delay < 0)
187 return 0;
188
189 expires = READ_ONCE(dev->power.last_busy);
190 expires += (u64)autosuspend_delay * NSEC_PER_MSEC;
191 if (expires > ktime_get_mono_fast_ns())
192 return expires; /* Expires in the future */
193
194 return 0;
195 }
196 EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
197
dev_memalloc_noio(struct device * dev,void * data)198 static int dev_memalloc_noio(struct device *dev, void *data)
199 {
200 return dev->power.memalloc_noio;
201 }
202
203 /*
204 * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
205 * @dev: Device to handle.
206 * @enable: True for setting the flag and False for clearing the flag.
207 *
208 * Set the flag for all devices in the path from the device to the
209 * root device in the device tree if @enable is true, otherwise clear
210 * the flag for devices in the path whose siblings don't set the flag.
211 *
212 * The function should only be called by block device, or network
213 * device driver for solving the deadlock problem during runtime
214 * resume/suspend:
215 *
216 * If memory allocation with GFP_KERNEL is called inside runtime
217 * resume/suspend callback of any one of its ancestors(or the
218 * block device itself), the deadlock may be triggered inside the
219 * memory allocation since it might not complete until the block
220 * device becomes active and the involed page I/O finishes. The
221 * situation is pointed out first by Alan Stern. Network device
222 * are involved in iSCSI kind of situation.
223 *
224 * The lock of dev_hotplug_mutex is held in the function for handling
225 * hotplug race because pm_runtime_set_memalloc_noio() may be called
226 * in async probe().
227 *
228 * The function should be called between device_add() and device_del()
229 * on the affected device(block/network device).
230 */
pm_runtime_set_memalloc_noio(struct device * dev,bool enable)231 void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
232 {
233 static DEFINE_MUTEX(dev_hotplug_mutex);
234
235 mutex_lock(&dev_hotplug_mutex);
236 for (;;) {
237 bool enabled;
238
239 /* hold power lock since bitfield is not SMP-safe. */
240 spin_lock_irq(&dev->power.lock);
241 enabled = dev->power.memalloc_noio;
242 dev->power.memalloc_noio = enable;
243 spin_unlock_irq(&dev->power.lock);
244
245 /*
246 * not need to enable ancestors any more if the device
247 * has been enabled.
248 */
249 if (enabled && enable)
250 break;
251
252 dev = dev->parent;
253
254 /*
255 * clear flag of the parent device only if all the
256 * children don't set the flag because ancestor's
257 * flag was set by any one of the descendants.
258 */
259 if (!dev || (!enable &&
260 device_for_each_child(dev, NULL, dev_memalloc_noio)))
261 break;
262 }
263 mutex_unlock(&dev_hotplug_mutex);
264 }
265 EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
266
267 /**
268 * rpm_check_suspend_allowed - Test whether a device may be suspended.
269 * @dev: Device to test.
270 */
rpm_check_suspend_allowed(struct device * dev)271 static int rpm_check_suspend_allowed(struct device *dev)
272 {
273 int retval = 0;
274
275 if (dev->power.runtime_error)
276 retval = -EINVAL;
277 else if (dev->power.disable_depth > 0)
278 retval = -EACCES;
279 else if (atomic_read(&dev->power.usage_count))
280 retval = -EAGAIN;
281 else if (!dev->power.ignore_children && atomic_read(&dev->power.child_count))
282 retval = -EBUSY;
283
284 /* Pending resume requests take precedence over suspends. */
285 else if ((dev->power.deferred_resume &&
286 dev->power.runtime_status == RPM_SUSPENDING) ||
287 (dev->power.request_pending && dev->power.request == RPM_REQ_RESUME))
288 retval = -EAGAIN;
289 else if (__dev_pm_qos_resume_latency(dev) == 0)
290 retval = -EPERM;
291 else if (dev->power.runtime_status == RPM_SUSPENDED)
292 retval = 1;
293
294 return retval;
295 }
296
rpm_get_suppliers(struct device * dev)297 static int rpm_get_suppliers(struct device *dev)
298 {
299 struct device_link *link;
300
301 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
302 device_links_read_lock_held()) {
303 int retval;
304
305 if (!device_link_test(link, DL_FLAG_PM_RUNTIME))
306 continue;
307
308 retval = pm_runtime_get_sync(link->supplier);
309 /* Ignore suppliers with disabled runtime PM. */
310 if (retval < 0 && retval != -EACCES) {
311 pm_runtime_put_noidle(link->supplier);
312 return retval;
313 }
314 refcount_inc(&link->rpm_active);
315 }
316 return 0;
317 }
318
319 /**
320 * pm_runtime_release_supplier - Drop references to device link's supplier.
321 * @link: Target device link.
322 *
323 * Drop all runtime PM references associated with @link to its supplier device.
324 */
pm_runtime_release_supplier(struct device_link * link)325 void pm_runtime_release_supplier(struct device_link *link)
326 {
327 struct device *supplier = link->supplier;
328
329 /*
330 * The additional power.usage_count check is a safety net in case
331 * the rpm_active refcount becomes saturated, in which case
332 * refcount_dec_not_one() would return true forever, but it is not
333 * strictly necessary.
334 */
335 while (refcount_dec_not_one(&link->rpm_active) &&
336 atomic_read(&supplier->power.usage_count) > 0)
337 pm_runtime_put_noidle(supplier);
338 }
339
__rpm_put_suppliers(struct device * dev,bool try_to_suspend)340 static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)
341 {
342 struct device_link *link;
343
344 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
345 device_links_read_lock_held()) {
346 pm_runtime_release_supplier(link);
347 if (try_to_suspend)
348 pm_request_idle(link->supplier);
349 }
350 }
351
rpm_put_suppliers(struct device * dev)352 static void rpm_put_suppliers(struct device *dev)
353 {
354 __rpm_put_suppliers(dev, true);
355 }
356
rpm_suspend_suppliers(struct device * dev)357 static void rpm_suspend_suppliers(struct device *dev)
358 {
359 struct device_link *link;
360 int idx = device_links_read_lock();
361
362 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
363 device_links_read_lock_held())
364 pm_request_idle(link->supplier);
365
366 device_links_read_unlock(idx);
367 }
368
369 /**
370 * __rpm_callback - Run a given runtime PM callback for a given device.
371 * @cb: Runtime PM callback to run.
372 * @dev: Device to run the callback for.
373 */
__rpm_callback(int (* cb)(struct device *),struct device * dev)374 static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
375 __releases(&dev->power.lock) __acquires(&dev->power.lock)
376 {
377 int retval = 0, idx;
378 bool use_links = dev->power.links_count > 0;
379
380 if (dev->power.irq_safe) {
381 spin_unlock(&dev->power.lock);
382 } else {
383 spin_unlock_irq(&dev->power.lock);
384
385 /*
386 * Resume suppliers if necessary.
387 *
388 * The device's runtime PM status cannot change until this
389 * routine returns, so it is safe to read the status outside of
390 * the lock.
391 */
392 if (use_links && dev->power.runtime_status == RPM_RESUMING) {
393 idx = device_links_read_lock();
394
395 retval = rpm_get_suppliers(dev);
396 if (retval) {
397 rpm_put_suppliers(dev);
398 goto fail;
399 }
400
401 device_links_read_unlock(idx);
402 }
403 }
404
405 if (cb)
406 retval = cb(dev);
407
408 if (dev->power.irq_safe) {
409 spin_lock(&dev->power.lock);
410 } else {
411 /*
412 * If the device is suspending and the callback has returned
413 * success, drop the usage counters of the suppliers that have
414 * been reference counted on its resume.
415 *
416 * Do that if resume fails too.
417 */
418 if (use_links &&
419 ((dev->power.runtime_status == RPM_SUSPENDING && !retval) ||
420 (dev->power.runtime_status == RPM_RESUMING && retval))) {
421 idx = device_links_read_lock();
422
423 __rpm_put_suppliers(dev, false);
424
425 fail:
426 device_links_read_unlock(idx);
427 }
428
429 spin_lock_irq(&dev->power.lock);
430 }
431
432 return retval;
433 }
434
435 /**
436 * rpm_callback - Run a given runtime PM callback for a given device.
437 * @cb: Runtime PM callback to run.
438 * @dev: Device to run the callback for.
439 */
rpm_callback(int (* cb)(struct device *),struct device * dev)440 static int rpm_callback(int (*cb)(struct device *), struct device *dev)
441 {
442 int retval;
443
444 if (dev->power.memalloc_noio) {
445 unsigned int noio_flag;
446
447 /*
448 * Deadlock might be caused if memory allocation with
449 * GFP_KERNEL happens inside runtime_suspend and
450 * runtime_resume callbacks of one block device's
451 * ancestor or the block device itself. Network
452 * device might be thought as part of iSCSI block
453 * device, so network device and its ancestor should
454 * be marked as memalloc_noio too.
455 */
456 noio_flag = memalloc_noio_save();
457 retval = __rpm_callback(cb, dev);
458 memalloc_noio_restore(noio_flag);
459 } else {
460 retval = __rpm_callback(cb, dev);
461 }
462
463 /*
464 * Since -EACCES means that runtime PM is disabled for the given device,
465 * it should not be returned by runtime PM callbacks. If it is returned
466 * nevertheless, assume it to be a transient error and convert it to
467 * -EAGAIN.
468 */
469 if (retval == -EACCES)
470 retval = -EAGAIN;
471
472 if (retval != -EAGAIN && retval != -EBUSY)
473 dev->power.runtime_error = retval;
474
475 return retval;
476 }
477
478 /**
479 * rpm_idle - Notify device bus type if the device can be suspended.
480 * @dev: Device to notify the bus type about.
481 * @rpmflags: Flag bits.
482 *
483 * Check if the device's runtime PM status allows it to be suspended. If
484 * another idle notification has been started earlier, return immediately. If
485 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
486 * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
487 * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
488 *
489 * This function must be called under dev->power.lock with interrupts disabled.
490 */
rpm_idle(struct device * dev,int rpmflags)491 static int rpm_idle(struct device *dev, int rpmflags)
492 {
493 int (*callback)(struct device *);
494 int retval;
495
496 trace_rpm_idle(dev, rpmflags);
497 retval = rpm_check_suspend_allowed(dev);
498 if (retval < 0)
499 ; /* Conditions are wrong. */
500
501 /* Idle notifications are allowed only in the RPM_ACTIVE state. */
502 else if (dev->power.runtime_status != RPM_ACTIVE)
503 retval = -EAGAIN;
504
505 /*
506 * Any pending request other than an idle notification takes
507 * precedence over us, except that the timer may be running.
508 */
509 else if (dev->power.request_pending &&
510 dev->power.request > RPM_REQ_IDLE)
511 retval = -EAGAIN;
512
513 /* Act as though RPM_NOWAIT is always set. */
514 else if (dev->power.idle_notification)
515 retval = -EINPROGRESS;
516
517 if (retval)
518 goto out;
519
520 /* Pending requests need to be canceled. */
521 dev->power.request = RPM_REQ_NONE;
522
523 callback = RPM_GET_CALLBACK(dev, runtime_idle);
524
525 /* If no callback assume success. */
526 if (!callback || dev->power.no_callbacks)
527 goto out;
528
529 /* Carry out an asynchronous or a synchronous idle notification. */
530 if (rpmflags & RPM_ASYNC) {
531 dev->power.request = RPM_REQ_IDLE;
532 if (!dev->power.request_pending) {
533 dev->power.request_pending = true;
534 queue_work(pm_wq, &dev->power.work);
535 }
536 trace_rpm_return_int(dev, _THIS_IP_, 0);
537 return 0;
538 }
539
540 dev->power.idle_notification = true;
541
542 if (dev->power.irq_safe)
543 spin_unlock(&dev->power.lock);
544 else
545 spin_unlock_irq(&dev->power.lock);
546
547 retval = callback(dev);
548
549 if (dev->power.irq_safe)
550 spin_lock(&dev->power.lock);
551 else
552 spin_lock_irq(&dev->power.lock);
553
554 dev->power.idle_notification = false;
555 wake_up_all(&dev->power.wait_queue);
556
557 out:
558 trace_rpm_return_int(dev, _THIS_IP_, retval);
559 return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
560 }
561
562 /**
563 * rpm_suspend - Carry out runtime suspend of given device.
564 * @dev: Device to suspend.
565 * @rpmflags: Flag bits.
566 *
567 * Check if the device's runtime PM status allows it to be suspended.
568 * Cancel a pending idle notification, autosuspend or suspend. If
569 * another suspend has been started earlier, either return immediately
570 * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
571 * flags. If the RPM_ASYNC flag is set then queue a suspend request;
572 * otherwise run the ->runtime_suspend() callback directly. When
573 * ->runtime_suspend succeeded, if a deferred resume was requested while
574 * the callback was running then carry it out, otherwise send an idle
575 * notification for its parent (if the suspend succeeded and both
576 * ignore_children of parent->power and irq_safe of dev->power are not set).
577 * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
578 * flag is set and the next autosuspend-delay expiration time is in the
579 * future, schedule another autosuspend attempt.
580 *
581 * This function must be called under dev->power.lock with interrupts disabled.
582 */
rpm_suspend(struct device * dev,int rpmflags)583 static int rpm_suspend(struct device *dev, int rpmflags)
584 __releases(&dev->power.lock) __acquires(&dev->power.lock)
585 {
586 int (*callback)(struct device *);
587 struct device *parent = NULL;
588 int retval;
589
590 trace_rpm_suspend(dev, rpmflags);
591
592 repeat:
593 retval = rpm_check_suspend_allowed(dev);
594 if (retval < 0)
595 goto out; /* Conditions are wrong. */
596
597 /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
598 if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC))
599 retval = -EAGAIN;
600
601 if (retval)
602 goto out;
603
604 /* If the autosuspend_delay time hasn't expired yet, reschedule. */
605 if ((rpmflags & RPM_AUTO) && dev->power.runtime_status != RPM_SUSPENDING) {
606 u64 expires = pm_runtime_autosuspend_expiration(dev);
607
608 if (expires != 0) {
609 /* Pending requests need to be canceled. */
610 dev->power.request = RPM_REQ_NONE;
611
612 /*
613 * Optimization: If the timer is already running and is
614 * set to expire at or before the autosuspend delay,
615 * avoid the overhead of resetting it. Just let it
616 * expire; pm_suspend_timer_fn() will take care of the
617 * rest.
618 */
619 if (!(dev->power.timer_expires &&
620 dev->power.timer_expires <= expires)) {
621 /*
622 * We add a slack of 25% to gather wakeups
623 * without sacrificing the granularity.
624 */
625 u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
626 (NSEC_PER_MSEC >> 2);
627
628 dev->power.timer_expires = expires;
629 hrtimer_start_range_ns(&dev->power.suspend_timer,
630 ns_to_ktime(expires),
631 slack,
632 HRTIMER_MODE_ABS);
633 }
634 dev->power.timer_autosuspends = 1;
635 goto out;
636 }
637 }
638
639 /* Other scheduled or pending requests need to be canceled. */
640 pm_runtime_cancel_pending(dev);
641
642 if (dev->power.runtime_status == RPM_SUSPENDING) {
643 DEFINE_WAIT(wait);
644
645 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
646 retval = -EINPROGRESS;
647 goto out;
648 }
649
650 if (dev->power.irq_safe) {
651 spin_unlock(&dev->power.lock);
652
653 cpu_relax();
654
655 spin_lock(&dev->power.lock);
656 goto repeat;
657 }
658
659 /* Wait for the other suspend running in parallel with us. */
660 for (;;) {
661 prepare_to_wait(&dev->power.wait_queue, &wait,
662 TASK_UNINTERRUPTIBLE);
663 if (dev->power.runtime_status != RPM_SUSPENDING)
664 break;
665
666 spin_unlock_irq(&dev->power.lock);
667
668 schedule();
669
670 spin_lock_irq(&dev->power.lock);
671 }
672 finish_wait(&dev->power.wait_queue, &wait);
673 goto repeat;
674 }
675
676 if (dev->power.no_callbacks)
677 goto no_callback; /* Assume success. */
678
679 /* Carry out an asynchronous or a synchronous suspend. */
680 if (rpmflags & RPM_ASYNC) {
681 dev->power.request = (rpmflags & RPM_AUTO) ?
682 RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
683 if (!dev->power.request_pending) {
684 dev->power.request_pending = true;
685 queue_work(pm_wq, &dev->power.work);
686 }
687 goto out;
688 }
689
690 __update_runtime_status(dev, RPM_SUSPENDING);
691
692 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
693
694 dev_pm_enable_wake_irq_check(dev, true);
695 retval = rpm_callback(callback, dev);
696 if (retval)
697 goto fail;
698
699 dev_pm_enable_wake_irq_complete(dev);
700
701 no_callback:
702 __update_runtime_status(dev, RPM_SUSPENDED);
703 pm_runtime_deactivate_timer(dev);
704
705 if (dev->parent) {
706 parent = dev->parent;
707 atomic_add_unless(&parent->power.child_count, -1, 0);
708 }
709 wake_up_all(&dev->power.wait_queue);
710
711 if (dev->power.deferred_resume) {
712 dev->power.deferred_resume = false;
713 rpm_resume(dev, 0);
714 retval = -EAGAIN;
715 goto out;
716 }
717
718 if (dev->power.irq_safe)
719 goto out;
720
721 /* Maybe the parent is now able to suspend. */
722 if (parent && !parent->power.ignore_children) {
723 spin_unlock(&dev->power.lock);
724
725 spin_lock(&parent->power.lock);
726 rpm_idle(parent, RPM_ASYNC);
727 spin_unlock(&parent->power.lock);
728
729 spin_lock(&dev->power.lock);
730 }
731 /* Maybe the suppliers are now able to suspend. */
732 if (dev->power.links_count > 0) {
733 spin_unlock_irq(&dev->power.lock);
734
735 rpm_suspend_suppliers(dev);
736
737 spin_lock_irq(&dev->power.lock);
738 }
739
740 out:
741 trace_rpm_return_int(dev, _THIS_IP_, retval);
742
743 return retval;
744
745 fail:
746 dev_pm_disable_wake_irq_check(dev, true);
747 __update_runtime_status(dev, RPM_ACTIVE);
748 dev->power.deferred_resume = false;
749 wake_up_all(&dev->power.wait_queue);
750
751 /*
752 * On transient errors, if the callback routine failed an autosuspend,
753 * and if the last_busy time has been updated so that there is a new
754 * autosuspend expiration time, automatically reschedule another
755 * autosuspend.
756 */
757 if (!dev->power.runtime_error && (rpmflags & RPM_AUTO) &&
758 pm_runtime_autosuspend_expiration(dev) != 0)
759 goto repeat;
760
761 pm_runtime_cancel_pending(dev);
762
763 goto out;
764 }
765
766 /**
767 * rpm_resume - Carry out runtime resume of given device.
768 * @dev: Device to resume.
769 * @rpmflags: Flag bits.
770 *
771 * Check if the device's runtime PM status allows it to be resumed. Cancel
772 * any scheduled or pending requests. If another resume has been started
773 * earlier, either return immediately or wait for it to finish, depending on the
774 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
775 * parallel with this function, either tell the other process to resume after
776 * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
777 * flag is set then queue a resume request; otherwise run the
778 * ->runtime_resume() callback directly. Queue an idle notification for the
779 * device if the resume succeeded.
780 *
781 * This function must be called under dev->power.lock with interrupts disabled.
782 */
rpm_resume(struct device * dev,int rpmflags)783 static int rpm_resume(struct device *dev, int rpmflags)
784 __releases(&dev->power.lock) __acquires(&dev->power.lock)
785 {
786 int (*callback)(struct device *);
787 struct device *parent = NULL;
788 int retval = 0;
789
790 trace_rpm_resume(dev, rpmflags);
791
792 repeat:
793 if (dev->power.runtime_error) {
794 retval = -EINVAL;
795 } else if (dev->power.disable_depth > 0) {
796 if (dev->power.runtime_status == RPM_ACTIVE &&
797 dev->power.last_status == RPM_ACTIVE)
798 retval = 1;
799 else
800 retval = -EACCES;
801 }
802 if (retval)
803 goto out;
804
805 /*
806 * Other scheduled or pending requests need to be canceled. Small
807 * optimization: If an autosuspend timer is running, leave it running
808 * rather than cancelling it now only to restart it again in the near
809 * future.
810 */
811 dev->power.request = RPM_REQ_NONE;
812 if (!dev->power.timer_autosuspends)
813 pm_runtime_deactivate_timer(dev);
814
815 if (dev->power.runtime_status == RPM_ACTIVE) {
816 retval = 1;
817 goto out;
818 }
819
820 if (dev->power.runtime_status == RPM_RESUMING ||
821 dev->power.runtime_status == RPM_SUSPENDING) {
822 DEFINE_WAIT(wait);
823
824 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
825 if (dev->power.runtime_status == RPM_SUSPENDING) {
826 dev->power.deferred_resume = true;
827 if (rpmflags & RPM_NOWAIT)
828 retval = -EINPROGRESS;
829 } else {
830 retval = -EINPROGRESS;
831 }
832 goto out;
833 }
834
835 if (dev->power.irq_safe) {
836 spin_unlock(&dev->power.lock);
837
838 cpu_relax();
839
840 spin_lock(&dev->power.lock);
841 goto repeat;
842 }
843
844 /* Wait for the operation carried out in parallel with us. */
845 for (;;) {
846 prepare_to_wait(&dev->power.wait_queue, &wait,
847 TASK_UNINTERRUPTIBLE);
848 if (dev->power.runtime_status != RPM_RESUMING &&
849 dev->power.runtime_status != RPM_SUSPENDING)
850 break;
851
852 spin_unlock_irq(&dev->power.lock);
853
854 schedule();
855
856 spin_lock_irq(&dev->power.lock);
857 }
858 finish_wait(&dev->power.wait_queue, &wait);
859 goto repeat;
860 }
861
862 /*
863 * See if we can skip waking up the parent. This is safe only if
864 * power.no_callbacks is set, because otherwise we don't know whether
865 * the resume will actually succeed.
866 */
867 if (dev->power.no_callbacks && !parent && dev->parent) {
868 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
869 if (dev->parent->power.disable_depth > 0 ||
870 dev->parent->power.ignore_children ||
871 dev->parent->power.runtime_status == RPM_ACTIVE) {
872 atomic_inc(&dev->parent->power.child_count);
873 spin_unlock(&dev->parent->power.lock);
874 retval = 1;
875 goto no_callback; /* Assume success. */
876 }
877 spin_unlock(&dev->parent->power.lock);
878 }
879
880 /* Carry out an asynchronous or a synchronous resume. */
881 if (rpmflags & RPM_ASYNC) {
882 dev->power.request = RPM_REQ_RESUME;
883 if (!dev->power.request_pending) {
884 dev->power.request_pending = true;
885 queue_work(pm_wq, &dev->power.work);
886 }
887 retval = 0;
888 goto out;
889 }
890
891 if (!parent && dev->parent) {
892 /*
893 * Increment the parent's usage counter and resume it if
894 * necessary. Not needed if dev is irq-safe; then the
895 * parent is permanently resumed.
896 */
897 parent = dev->parent;
898 if (dev->power.irq_safe)
899 goto skip_parent;
900
901 spin_unlock(&dev->power.lock);
902
903 pm_runtime_get_noresume(parent);
904
905 spin_lock(&parent->power.lock);
906 /*
907 * Resume the parent if it has runtime PM enabled and not been
908 * set to ignore its children.
909 */
910 if (!parent->power.disable_depth &&
911 !parent->power.ignore_children) {
912 rpm_resume(parent, 0);
913 if (parent->power.runtime_status != RPM_ACTIVE)
914 retval = -EBUSY;
915 }
916 spin_unlock(&parent->power.lock);
917
918 spin_lock(&dev->power.lock);
919 if (retval)
920 goto out;
921
922 goto repeat;
923 }
924 skip_parent:
925
926 if (dev->power.no_callbacks)
927 goto no_callback; /* Assume success. */
928
929 __update_runtime_status(dev, RPM_RESUMING);
930
931 callback = RPM_GET_CALLBACK(dev, runtime_resume);
932
933 dev_pm_disable_wake_irq_check(dev, false);
934 retval = rpm_callback(callback, dev);
935 if (retval) {
936 __update_runtime_status(dev, RPM_SUSPENDED);
937 pm_runtime_cancel_pending(dev);
938 dev_pm_enable_wake_irq_check(dev, false);
939 } else {
940 no_callback:
941 __update_runtime_status(dev, RPM_ACTIVE);
942 pm_runtime_mark_last_busy(dev);
943 if (parent)
944 atomic_inc(&parent->power.child_count);
945 }
946 wake_up_all(&dev->power.wait_queue);
947
948 if (retval >= 0)
949 rpm_idle(dev, RPM_ASYNC);
950
951 out:
952 if (parent && !dev->power.irq_safe) {
953 spin_unlock_irq(&dev->power.lock);
954
955 pm_runtime_put(parent);
956
957 spin_lock_irq(&dev->power.lock);
958 }
959
960 trace_rpm_return_int(dev, _THIS_IP_, retval);
961
962 return retval;
963 }
964
965 /**
966 * pm_runtime_work - Universal runtime PM work function.
967 * @work: Work structure used for scheduling the execution of this function.
968 *
969 * Use @work to get the device object the work is to be done for, determine what
970 * is to be done and execute the appropriate runtime PM function.
971 */
pm_runtime_work(struct work_struct * work)972 static void pm_runtime_work(struct work_struct *work)
973 {
974 struct device *dev = container_of(work, struct device, power.work);
975 enum rpm_request req;
976
977 spin_lock_irq(&dev->power.lock);
978
979 if (!dev->power.request_pending)
980 goto out;
981
982 req = dev->power.request;
983 dev->power.request = RPM_REQ_NONE;
984 dev->power.request_pending = false;
985
986 switch (req) {
987 case RPM_REQ_NONE:
988 break;
989 case RPM_REQ_IDLE:
990 rpm_idle(dev, RPM_NOWAIT);
991 break;
992 case RPM_REQ_SUSPEND:
993 rpm_suspend(dev, RPM_NOWAIT);
994 break;
995 case RPM_REQ_AUTOSUSPEND:
996 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
997 break;
998 case RPM_REQ_RESUME:
999 rpm_resume(dev, RPM_NOWAIT);
1000 break;
1001 }
1002
1003 out:
1004 spin_unlock_irq(&dev->power.lock);
1005 }
1006
1007 /**
1008 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
1009 * @timer: hrtimer used by pm_schedule_suspend().
1010 *
1011 * Check if the time is right and queue a suspend request.
1012 */
pm_suspend_timer_fn(struct hrtimer * timer)1013 static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
1014 {
1015 struct device *dev = container_of(timer, struct device, power.suspend_timer);
1016 unsigned long flags;
1017 u64 expires;
1018
1019 spin_lock_irqsave(&dev->power.lock, flags);
1020
1021 expires = dev->power.timer_expires;
1022 /*
1023 * If 'expires' is after the current time, we've been called
1024 * too early.
1025 */
1026 if (expires > 0 && expires <= ktime_get_mono_fast_ns()) {
1027 dev->power.timer_expires = 0;
1028 rpm_suspend(dev, dev->power.timer_autosuspends ?
1029 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
1030 }
1031
1032 spin_unlock_irqrestore(&dev->power.lock, flags);
1033
1034 return HRTIMER_NORESTART;
1035 }
1036
1037 /**
1038 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
1039 * @dev: Device to suspend.
1040 * @delay: Time to wait before submitting a suspend request, in milliseconds.
1041 */
pm_schedule_suspend(struct device * dev,unsigned int delay)1042 int pm_schedule_suspend(struct device *dev, unsigned int delay)
1043 {
1044 unsigned long flags;
1045 u64 expires;
1046 int retval;
1047
1048 spin_lock_irqsave(&dev->power.lock, flags);
1049
1050 if (!delay) {
1051 retval = rpm_suspend(dev, RPM_ASYNC);
1052 goto out;
1053 }
1054
1055 retval = rpm_check_suspend_allowed(dev);
1056 if (retval)
1057 goto out;
1058
1059 /* Other scheduled or pending requests need to be canceled. */
1060 pm_runtime_cancel_pending(dev);
1061
1062 expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;
1063 dev->power.timer_expires = expires;
1064 dev->power.timer_autosuspends = 0;
1065 hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
1066
1067 out:
1068 spin_unlock_irqrestore(&dev->power.lock, flags);
1069
1070 return retval;
1071 }
1072 EXPORT_SYMBOL_GPL(pm_schedule_suspend);
1073
rpm_drop_usage_count(struct device * dev)1074 static int rpm_drop_usage_count(struct device *dev)
1075 {
1076 int ret;
1077
1078 ret = atomic_sub_return(1, &dev->power.usage_count);
1079 if (ret >= 0)
1080 return ret;
1081
1082 /*
1083 * Because rpm_resume() does not check the usage counter, it will resume
1084 * the device even if the usage counter is 0 or negative, so it is
1085 * sufficient to increment the usage counter here to reverse the change
1086 * made above.
1087 */
1088 atomic_inc(&dev->power.usage_count);
1089 dev_warn(dev, "Runtime PM usage count underflow!\n");
1090 return -EINVAL;
1091 }
1092
1093 /**
1094 * __pm_runtime_idle - Entry point for runtime idle operations.
1095 * @dev: Device to send idle notification for.
1096 * @rpmflags: Flag bits.
1097 *
1098 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
1099 * return immediately if it is larger than zero (if it becomes negative, log a
1100 * warning, increment it, and return an error). Then carry out an idle
1101 * notification, either synchronous or asynchronous.
1102 *
1103 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1104 * or if pm_runtime_irq_safe() has been called.
1105 */
__pm_runtime_idle(struct device * dev,int rpmflags)1106 int __pm_runtime_idle(struct device *dev, int rpmflags)
1107 {
1108 unsigned long flags;
1109 int retval;
1110
1111 if (rpmflags & RPM_GET_PUT) {
1112 retval = rpm_drop_usage_count(dev);
1113 if (retval < 0) {
1114 return retval;
1115 } else if (retval > 0) {
1116 trace_rpm_usage(dev, rpmflags);
1117 return 0;
1118 }
1119 }
1120
1121 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1122
1123 spin_lock_irqsave(&dev->power.lock, flags);
1124 retval = rpm_idle(dev, rpmflags);
1125 spin_unlock_irqrestore(&dev->power.lock, flags);
1126
1127 return retval;
1128 }
1129 EXPORT_SYMBOL_GPL(__pm_runtime_idle);
1130
1131 /**
1132 * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
1133 * @dev: Device to suspend.
1134 * @rpmflags: Flag bits.
1135 *
1136 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
1137 * return immediately if it is larger than zero (if it becomes negative, log a
1138 * warning, increment it, and return an error). Then carry out a suspend,
1139 * either synchronous or asynchronous.
1140 *
1141 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1142 * or if pm_runtime_irq_safe() has been called.
1143 */
__pm_runtime_suspend(struct device * dev,int rpmflags)1144 int __pm_runtime_suspend(struct device *dev, int rpmflags)
1145 {
1146 unsigned long flags;
1147 int retval;
1148
1149 if (rpmflags & RPM_GET_PUT) {
1150 retval = rpm_drop_usage_count(dev);
1151 if (retval < 0) {
1152 return retval;
1153 } else if (retval > 0) {
1154 trace_rpm_usage(dev, rpmflags);
1155 return 0;
1156 }
1157 }
1158
1159 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1160
1161 spin_lock_irqsave(&dev->power.lock, flags);
1162 retval = rpm_suspend(dev, rpmflags);
1163 spin_unlock_irqrestore(&dev->power.lock, flags);
1164
1165 return retval;
1166 }
1167 EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
1168
1169 /**
1170 * __pm_runtime_resume - Entry point for runtime resume operations.
1171 * @dev: Device to resume.
1172 * @rpmflags: Flag bits.
1173 *
1174 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then
1175 * carry out a resume, either synchronous or asynchronous.
1176 *
1177 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1178 * or if pm_runtime_irq_safe() has been called.
1179 */
__pm_runtime_resume(struct device * dev,int rpmflags)1180 int __pm_runtime_resume(struct device *dev, int rpmflags)
1181 {
1182 unsigned long flags;
1183 int retval;
1184
1185 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
1186 dev->power.runtime_status != RPM_ACTIVE);
1187
1188 if (rpmflags & RPM_GET_PUT)
1189 atomic_inc(&dev->power.usage_count);
1190
1191 spin_lock_irqsave(&dev->power.lock, flags);
1192 retval = rpm_resume(dev, rpmflags);
1193 spin_unlock_irqrestore(&dev->power.lock, flags);
1194
1195 return retval;
1196 }
1197 EXPORT_SYMBOL_GPL(__pm_runtime_resume);
1198
1199 /**
1200 * pm_runtime_get_conditional - Conditionally bump up device usage counter.
1201 * @dev: Device to handle.
1202 * @ign_usage_count: Whether or not to look at the current usage counter value.
1203 *
1204 * Return -EINVAL if runtime PM is disabled for @dev.
1205 *
1206 * Otherwise, if its runtime PM status is %RPM_ACTIVE and (1) @ign_usage_count
1207 * is set, or (2) @dev is not ignoring children and its active child count is
1208 * nonero, or (3) the runtime PM usage counter of @dev is not zero, increment
1209 * the usage counter of @dev and return 1.
1210 *
1211 * Otherwise, return 0 without changing the usage counter.
1212 *
1213 * If @ign_usage_count is %true, this function can be used to prevent suspending
1214 * the device when its runtime PM status is %RPM_ACTIVE.
1215 *
1216 * If @ign_usage_count is %false, this function can be used to prevent
1217 * suspending the device when both its runtime PM status is %RPM_ACTIVE and its
1218 * runtime PM usage counter is not zero.
1219 *
1220 * The caller is responsible for decrementing the runtime PM usage counter of
1221 * @dev after this function has returned a positive value for it.
1222 */
pm_runtime_get_conditional(struct device * dev,bool ign_usage_count)1223 static int pm_runtime_get_conditional(struct device *dev, bool ign_usage_count)
1224 {
1225 unsigned long flags;
1226 int retval;
1227
1228 spin_lock_irqsave(&dev->power.lock, flags);
1229 if (dev->power.disable_depth > 0) {
1230 retval = -EINVAL;
1231 } else if (dev->power.runtime_status != RPM_ACTIVE) {
1232 retval = 0;
1233 } else if (ign_usage_count || (!dev->power.ignore_children &&
1234 atomic_read(&dev->power.child_count) > 0)) {
1235 retval = 1;
1236 atomic_inc(&dev->power.usage_count);
1237 } else {
1238 retval = atomic_inc_not_zero(&dev->power.usage_count);
1239 }
1240 trace_rpm_usage(dev, 0);
1241 spin_unlock_irqrestore(&dev->power.lock, flags);
1242
1243 return retval;
1244 }
1245
1246 /**
1247 * pm_runtime_get_if_active - Bump up runtime PM usage counter if the device is
1248 * in active state
1249 * @dev: Target device.
1250 *
1251 * Increment the runtime PM usage counter of @dev if its runtime PM status is
1252 * %RPM_ACTIVE, in which case it returns 1. If the device is in a different
1253 * state, 0 is returned. -EINVAL is returned if runtime PM is disabled for the
1254 * device, in which case also the usage_count will remain unmodified.
1255 */
pm_runtime_get_if_active(struct device * dev)1256 int pm_runtime_get_if_active(struct device *dev)
1257 {
1258 return pm_runtime_get_conditional(dev, true);
1259 }
1260 EXPORT_SYMBOL_GPL(pm_runtime_get_if_active);
1261
1262 /**
1263 * pm_runtime_get_if_in_use - Conditionally bump up runtime PM usage counter.
1264 * @dev: Target device.
1265 *
1266 * Increment the runtime PM usage counter of @dev if its runtime PM status is
1267 * %RPM_ACTIVE and its runtime PM usage counter is greater than 0 or it is not
1268 * ignoring children and its active child count is nonzero. 1 is returned in
1269 * this case.
1270 *
1271 * If @dev is in a different state or it is not in use (that is, its usage
1272 * counter is 0, or it is ignoring children, or its active child count is 0),
1273 * 0 is returned.
1274 *
1275 * -EINVAL is returned if runtime PM is disabled for the device, in which case
1276 * also the usage counter of @dev is not updated.
1277 */
pm_runtime_get_if_in_use(struct device * dev)1278 int pm_runtime_get_if_in_use(struct device *dev)
1279 {
1280 return pm_runtime_get_conditional(dev, false);
1281 }
1282 EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);
1283
1284 /**
1285 * __pm_runtime_set_status - Set runtime PM status of a device.
1286 * @dev: Device to handle.
1287 * @status: New runtime PM status of the device.
1288 *
1289 * If runtime PM of the device is disabled or its power.runtime_error field is
1290 * different from zero, the status may be changed either to RPM_ACTIVE, or to
1291 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
1292 * However, if the device has a parent and the parent is not active, and the
1293 * parent's power.ignore_children flag is unset, the device's status cannot be
1294 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
1295 *
1296 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
1297 * and the device parent's counter of unsuspended children is modified to
1298 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
1299 * notification request for the parent is submitted.
1300 *
1301 * If @dev has any suppliers (as reflected by device links to them), and @status
1302 * is RPM_ACTIVE, they will be activated upfront and if the activation of one
1303 * of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead
1304 * of the @status value) and the suppliers will be deacticated on exit. The
1305 * error returned by the failing supplier activation will be returned in that
1306 * case.
1307 */
__pm_runtime_set_status(struct device * dev,unsigned int status)1308 int __pm_runtime_set_status(struct device *dev, unsigned int status)
1309 {
1310 struct device *parent = dev->parent;
1311 bool notify_parent = false;
1312 unsigned long flags;
1313 int error = 0;
1314
1315 if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
1316 return -EINVAL;
1317
1318 spin_lock_irqsave(&dev->power.lock, flags);
1319
1320 /*
1321 * Prevent PM-runtime from being enabled for the device or return an
1322 * error if it is enabled already and working.
1323 */
1324 if (dev->power.runtime_error || dev->power.disable_depth)
1325 dev->power.disable_depth++;
1326 else
1327 error = -EAGAIN;
1328
1329 spin_unlock_irqrestore(&dev->power.lock, flags);
1330
1331 if (error)
1332 return error;
1333
1334 /*
1335 * If the new status is RPM_ACTIVE, the suppliers can be activated
1336 * upfront regardless of the current status, because next time
1337 * rpm_put_suppliers() runs, the rpm_active refcounts of the links
1338 * involved will be dropped down to one anyway.
1339 */
1340 if (status == RPM_ACTIVE) {
1341 int idx = device_links_read_lock();
1342
1343 error = rpm_get_suppliers(dev);
1344 if (error)
1345 status = RPM_SUSPENDED;
1346
1347 device_links_read_unlock(idx);
1348 }
1349
1350 spin_lock_irqsave(&dev->power.lock, flags);
1351
1352 if (dev->power.runtime_status == status || !parent)
1353 goto out_set;
1354
1355 if (status == RPM_SUSPENDED) {
1356 atomic_add_unless(&parent->power.child_count, -1, 0);
1357 notify_parent = !parent->power.ignore_children;
1358 } else {
1359 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
1360
1361 /*
1362 * It is invalid to put an active child under a parent that is
1363 * not active, has runtime PM enabled and the
1364 * 'power.ignore_children' flag unset.
1365 */
1366 if (!parent->power.disable_depth &&
1367 !parent->power.ignore_children &&
1368 parent->power.runtime_status != RPM_ACTIVE) {
1369 dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
1370 dev_name(dev),
1371 dev_name(parent));
1372 error = -EBUSY;
1373 } else if (dev->power.runtime_status == RPM_SUSPENDED) {
1374 atomic_inc(&parent->power.child_count);
1375 }
1376
1377 spin_unlock(&parent->power.lock);
1378
1379 if (error) {
1380 status = RPM_SUSPENDED;
1381 goto out;
1382 }
1383 }
1384
1385 out_set:
1386 __update_runtime_status(dev, status);
1387 if (!error)
1388 dev->power.runtime_error = 0;
1389
1390 out:
1391 spin_unlock_irqrestore(&dev->power.lock, flags);
1392
1393 if (notify_parent)
1394 pm_request_idle(parent);
1395
1396 if (status == RPM_SUSPENDED) {
1397 int idx = device_links_read_lock();
1398
1399 rpm_put_suppliers(dev);
1400
1401 device_links_read_unlock(idx);
1402 }
1403
1404 pm_runtime_enable(dev);
1405
1406 return error;
1407 }
1408 EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
1409
1410 /**
1411 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
1412 * @dev: Device to handle.
1413 *
1414 * Flush all pending requests for the device from pm_wq and wait for all
1415 * runtime PM operations involving the device in progress to complete.
1416 *
1417 * Should be called under dev->power.lock with interrupts disabled.
1418 */
__pm_runtime_barrier(struct device * dev)1419 static void __pm_runtime_barrier(struct device *dev)
1420 {
1421 pm_runtime_deactivate_timer(dev);
1422
1423 if (dev->power.request_pending) {
1424 dev->power.request = RPM_REQ_NONE;
1425 spin_unlock_irq(&dev->power.lock);
1426
1427 cancel_work_sync(&dev->power.work);
1428
1429 spin_lock_irq(&dev->power.lock);
1430 dev->power.request_pending = false;
1431 }
1432
1433 if (dev->power.runtime_status == RPM_SUSPENDING ||
1434 dev->power.runtime_status == RPM_RESUMING ||
1435 dev->power.idle_notification) {
1436 DEFINE_WAIT(wait);
1437
1438 /* Suspend, wake-up or idle notification in progress. */
1439 for (;;) {
1440 prepare_to_wait(&dev->power.wait_queue, &wait,
1441 TASK_UNINTERRUPTIBLE);
1442 if (dev->power.runtime_status != RPM_SUSPENDING
1443 && dev->power.runtime_status != RPM_RESUMING
1444 && !dev->power.idle_notification)
1445 break;
1446 spin_unlock_irq(&dev->power.lock);
1447
1448 schedule();
1449
1450 spin_lock_irq(&dev->power.lock);
1451 }
1452 finish_wait(&dev->power.wait_queue, &wait);
1453 }
1454 }
1455
1456 /**
1457 * pm_runtime_barrier - Flush pending requests and wait for completions.
1458 * @dev: Device to handle.
1459 *
1460 * Prevent the device from being suspended by incrementing its usage counter and
1461 * if there's a pending resume request for the device, wake the device up.
1462 * Next, make sure that all pending requests for the device have been flushed
1463 * from pm_wq and wait for all runtime PM operations involving the device in
1464 * progress to complete.
1465 *
1466 * Return value:
1467 * 1, if there was a resume request pending and the device had to be woken up,
1468 * 0, otherwise
1469 */
pm_runtime_barrier(struct device * dev)1470 int pm_runtime_barrier(struct device *dev)
1471 {
1472 int retval = 0;
1473
1474 pm_runtime_get_noresume(dev);
1475 spin_lock_irq(&dev->power.lock);
1476
1477 if (dev->power.request_pending
1478 && dev->power.request == RPM_REQ_RESUME) {
1479 rpm_resume(dev, 0);
1480 retval = 1;
1481 }
1482
1483 __pm_runtime_barrier(dev);
1484
1485 spin_unlock_irq(&dev->power.lock);
1486 pm_runtime_put_noidle(dev);
1487
1488 return retval;
1489 }
1490 EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1491
pm_runtime_block_if_disabled(struct device * dev)1492 bool pm_runtime_block_if_disabled(struct device *dev)
1493 {
1494 bool ret;
1495
1496 spin_lock_irq(&dev->power.lock);
1497
1498 ret = !pm_runtime_enabled(dev);
1499 if (ret && dev->power.last_status == RPM_INVALID)
1500 dev->power.last_status = RPM_BLOCKED;
1501
1502 spin_unlock_irq(&dev->power.lock);
1503
1504 return ret;
1505 }
1506
pm_runtime_unblock(struct device * dev)1507 void pm_runtime_unblock(struct device *dev)
1508 {
1509 spin_lock_irq(&dev->power.lock);
1510
1511 if (dev->power.last_status == RPM_BLOCKED)
1512 dev->power.last_status = RPM_INVALID;
1513
1514 spin_unlock_irq(&dev->power.lock);
1515 }
1516
__pm_runtime_disable(struct device * dev,bool check_resume)1517 void __pm_runtime_disable(struct device *dev, bool check_resume)
1518 {
1519 spin_lock_irq(&dev->power.lock);
1520
1521 if (dev->power.disable_depth > 0) {
1522 dev->power.disable_depth++;
1523 goto out;
1524 }
1525
1526 /*
1527 * Wake up the device if there's a resume request pending, because that
1528 * means there probably is some I/O to process and disabling runtime PM
1529 * shouldn't prevent the device from processing the I/O.
1530 */
1531 if (check_resume && dev->power.request_pending &&
1532 dev->power.request == RPM_REQ_RESUME) {
1533 /*
1534 * Prevent suspends and idle notifications from being carried
1535 * out after we have woken up the device.
1536 */
1537 pm_runtime_get_noresume(dev);
1538
1539 rpm_resume(dev, 0);
1540
1541 pm_runtime_put_noidle(dev);
1542 }
1543
1544 /* Update time accounting before disabling PM-runtime. */
1545 update_pm_runtime_accounting(dev);
1546
1547 if (!dev->power.disable_depth++) {
1548 __pm_runtime_barrier(dev);
1549 dev->power.last_status = dev->power.runtime_status;
1550 }
1551
1552 out:
1553 spin_unlock_irq(&dev->power.lock);
1554 }
1555 EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1556
1557 /**
1558 * pm_runtime_enable - Enable runtime PM of a device.
1559 * @dev: Device to handle.
1560 */
pm_runtime_enable(struct device * dev)1561 void pm_runtime_enable(struct device *dev)
1562 {
1563 unsigned long flags;
1564
1565 spin_lock_irqsave(&dev->power.lock, flags);
1566
1567 if (!dev->power.disable_depth) {
1568 dev_warn(dev, "Unbalanced %s!\n", __func__);
1569 goto out;
1570 }
1571
1572 if (--dev->power.disable_depth > 0)
1573 goto out;
1574
1575 if (dev->power.last_status == RPM_BLOCKED) {
1576 dev_warn(dev, "Attempt to enable runtime PM when it is blocked\n");
1577 dump_stack();
1578 }
1579 dev->power.last_status = RPM_INVALID;
1580 dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
1581
1582 if (dev->power.runtime_status == RPM_SUSPENDED &&
1583 !dev->power.ignore_children &&
1584 atomic_read(&dev->power.child_count) > 0)
1585 dev_warn(dev, "Enabling runtime PM for inactive device with active children\n");
1586
1587 out:
1588 spin_unlock_irqrestore(&dev->power.lock, flags);
1589 }
1590 EXPORT_SYMBOL_GPL(pm_runtime_enable);
1591
pm_runtime_set_suspended_action(void * data)1592 static void pm_runtime_set_suspended_action(void *data)
1593 {
1594 pm_runtime_set_suspended(data);
1595 }
1596
1597 /**
1598 * devm_pm_runtime_set_active_enabled - set_active version of devm_pm_runtime_enable.
1599 *
1600 * @dev: Device to handle.
1601 */
devm_pm_runtime_set_active_enabled(struct device * dev)1602 int devm_pm_runtime_set_active_enabled(struct device *dev)
1603 {
1604 int err;
1605
1606 err = pm_runtime_set_active(dev);
1607 if (err)
1608 return err;
1609
1610 err = devm_add_action_or_reset(dev, pm_runtime_set_suspended_action, dev);
1611 if (err)
1612 return err;
1613
1614 return devm_pm_runtime_enable(dev);
1615 }
1616 EXPORT_SYMBOL_GPL(devm_pm_runtime_set_active_enabled);
1617
pm_runtime_disable_action(void * data)1618 static void pm_runtime_disable_action(void *data)
1619 {
1620 pm_runtime_dont_use_autosuspend(data);
1621 pm_runtime_disable(data);
1622 }
1623
1624 /**
1625 * devm_pm_runtime_enable - devres-enabled version of pm_runtime_enable.
1626 *
1627 * NOTE: this will also handle calling pm_runtime_dont_use_autosuspend() for
1628 * you at driver exit time if needed.
1629 *
1630 * @dev: Device to handle.
1631 */
devm_pm_runtime_enable(struct device * dev)1632 int devm_pm_runtime_enable(struct device *dev)
1633 {
1634 pm_runtime_enable(dev);
1635
1636 return devm_add_action_or_reset(dev, pm_runtime_disable_action, dev);
1637 }
1638 EXPORT_SYMBOL_GPL(devm_pm_runtime_enable);
1639
pm_runtime_put_noidle_action(void * data)1640 static void pm_runtime_put_noidle_action(void *data)
1641 {
1642 pm_runtime_put_noidle(data);
1643 }
1644
1645 /**
1646 * devm_pm_runtime_get_noresume - devres-enabled version of pm_runtime_get_noresume.
1647 *
1648 * @dev: Device to handle.
1649 */
devm_pm_runtime_get_noresume(struct device * dev)1650 int devm_pm_runtime_get_noresume(struct device *dev)
1651 {
1652 pm_runtime_get_noresume(dev);
1653
1654 return devm_add_action_or_reset(dev, pm_runtime_put_noidle_action, dev);
1655 }
1656 EXPORT_SYMBOL_GPL(devm_pm_runtime_get_noresume);
1657
1658 /**
1659 * pm_runtime_forbid - Block runtime PM of a device.
1660 * @dev: Device to handle.
1661 *
1662 * Increase the device's usage count and clear its power.runtime_auto flag,
1663 * so that it cannot be suspended at run time until pm_runtime_allow() is called
1664 * for it.
1665 */
pm_runtime_forbid(struct device * dev)1666 void pm_runtime_forbid(struct device *dev)
1667 {
1668 spin_lock_irq(&dev->power.lock);
1669 if (!dev->power.runtime_auto)
1670 goto out;
1671
1672 dev->power.runtime_auto = false;
1673 atomic_inc(&dev->power.usage_count);
1674 rpm_resume(dev, 0);
1675
1676 out:
1677 spin_unlock_irq(&dev->power.lock);
1678 }
1679 EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1680
1681 /**
1682 * pm_runtime_allow - Unblock runtime PM of a device.
1683 * @dev: Device to handle.
1684 *
1685 * Decrease the device's usage count and set its power.runtime_auto flag.
1686 */
pm_runtime_allow(struct device * dev)1687 void pm_runtime_allow(struct device *dev)
1688 {
1689 int ret;
1690
1691 spin_lock_irq(&dev->power.lock);
1692 if (dev->power.runtime_auto)
1693 goto out;
1694
1695 dev->power.runtime_auto = true;
1696 ret = rpm_drop_usage_count(dev);
1697 if (ret == 0)
1698 rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
1699 else if (ret > 0)
1700 trace_rpm_usage(dev, RPM_AUTO | RPM_ASYNC);
1701
1702 out:
1703 spin_unlock_irq(&dev->power.lock);
1704 }
1705 EXPORT_SYMBOL_GPL(pm_runtime_allow);
1706
1707 /**
1708 * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1709 * @dev: Device to handle.
1710 *
1711 * Set the power.no_callbacks flag, which tells the PM core that this
1712 * device is power-managed through its parent and has no runtime PM
1713 * callbacks of its own. The runtime sysfs attributes will be removed.
1714 */
pm_runtime_no_callbacks(struct device * dev)1715 void pm_runtime_no_callbacks(struct device *dev)
1716 {
1717 spin_lock_irq(&dev->power.lock);
1718 dev->power.no_callbacks = 1;
1719 spin_unlock_irq(&dev->power.lock);
1720 if (device_is_registered(dev))
1721 rpm_sysfs_remove(dev);
1722 }
1723 EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1724
1725 /**
1726 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1727 * @dev: Device to handle
1728 *
1729 * Set the power.irq_safe flag, which tells the PM core that the
1730 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1731 * always be invoked with the spinlock held and interrupts disabled. It also
1732 * causes the parent's usage counter to be permanently incremented, preventing
1733 * the parent from runtime suspending -- otherwise an irq-safe child might have
1734 * to wait for a non-irq-safe parent.
1735 */
pm_runtime_irq_safe(struct device * dev)1736 void pm_runtime_irq_safe(struct device *dev)
1737 {
1738 if (dev->parent)
1739 pm_runtime_get_sync(dev->parent);
1740
1741 spin_lock_irq(&dev->power.lock);
1742 dev->power.irq_safe = 1;
1743 spin_unlock_irq(&dev->power.lock);
1744 }
1745 EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1746
1747 /**
1748 * update_autosuspend - Handle a change to a device's autosuspend settings.
1749 * @dev: Device to handle.
1750 * @old_delay: The former autosuspend_delay value.
1751 * @old_use: The former use_autosuspend value.
1752 *
1753 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1754 * set; otherwise allow it. Send an idle notification if suspends are allowed.
1755 *
1756 * This function must be called under dev->power.lock with interrupts disabled.
1757 */
update_autosuspend(struct device * dev,int old_delay,int old_use)1758 static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1759 {
1760 int delay = dev->power.autosuspend_delay;
1761
1762 /* Should runtime suspend be prevented now? */
1763 if (dev->power.use_autosuspend && delay < 0) {
1764
1765 /* If it used to be allowed then prevent it. */
1766 if (!old_use || old_delay >= 0) {
1767 atomic_inc(&dev->power.usage_count);
1768 rpm_resume(dev, 0);
1769 } else {
1770 trace_rpm_usage(dev, 0);
1771 }
1772 }
1773
1774 /* Runtime suspend should be allowed now. */
1775 else {
1776
1777 /* If it used to be prevented then allow it. */
1778 if (old_use && old_delay < 0)
1779 atomic_dec(&dev->power.usage_count);
1780
1781 /* Maybe we can autosuspend now. */
1782 rpm_idle(dev, RPM_AUTO);
1783 }
1784 }
1785
1786 /**
1787 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1788 * @dev: Device to handle.
1789 * @delay: Value of the new delay in milliseconds.
1790 *
1791 * Set the device's power.autosuspend_delay value. If it changes to negative
1792 * and the power.use_autosuspend flag is set, prevent runtime suspends. If it
1793 * changes the other way, allow runtime suspends.
1794 */
pm_runtime_set_autosuspend_delay(struct device * dev,int delay)1795 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1796 {
1797 int old_delay, old_use;
1798
1799 spin_lock_irq(&dev->power.lock);
1800 old_delay = dev->power.autosuspend_delay;
1801 old_use = dev->power.use_autosuspend;
1802 dev->power.autosuspend_delay = delay;
1803 update_autosuspend(dev, old_delay, old_use);
1804 spin_unlock_irq(&dev->power.lock);
1805 }
1806 EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1807
1808 /**
1809 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1810 * @dev: Device to handle.
1811 * @use: New value for use_autosuspend.
1812 *
1813 * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1814 * suspends as needed.
1815 */
__pm_runtime_use_autosuspend(struct device * dev,bool use)1816 void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1817 {
1818 int old_delay, old_use;
1819
1820 spin_lock_irq(&dev->power.lock);
1821 old_delay = dev->power.autosuspend_delay;
1822 old_use = dev->power.use_autosuspend;
1823 dev->power.use_autosuspend = use;
1824 update_autosuspend(dev, old_delay, old_use);
1825 spin_unlock_irq(&dev->power.lock);
1826 }
1827 EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1828
1829 /**
1830 * pm_runtime_init - Initialize runtime PM fields in given device object.
1831 * @dev: Device object to initialize.
1832 */
pm_runtime_init(struct device * dev)1833 void pm_runtime_init(struct device *dev)
1834 {
1835 dev->power.runtime_status = RPM_SUSPENDED;
1836 dev->power.last_status = RPM_INVALID;
1837 dev->power.idle_notification = false;
1838
1839 dev->power.disable_depth = 1;
1840 atomic_set(&dev->power.usage_count, 0);
1841
1842 dev->power.runtime_error = 0;
1843
1844 atomic_set(&dev->power.child_count, 0);
1845 pm_suspend_ignore_children(dev, false);
1846 dev->power.runtime_auto = true;
1847
1848 dev->power.request_pending = false;
1849 dev->power.request = RPM_REQ_NONE;
1850 dev->power.deferred_resume = false;
1851 dev->power.needs_force_resume = false;
1852 INIT_WORK(&dev->power.work, pm_runtime_work);
1853
1854 dev->power.timer_expires = 0;
1855 hrtimer_setup(&dev->power.suspend_timer, pm_suspend_timer_fn, CLOCK_MONOTONIC,
1856 HRTIMER_MODE_ABS);
1857
1858 init_waitqueue_head(&dev->power.wait_queue);
1859 }
1860
1861 /**
1862 * pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
1863 * @dev: Device object to re-initialize.
1864 */
pm_runtime_reinit(struct device * dev)1865 void pm_runtime_reinit(struct device *dev)
1866 {
1867 if (!pm_runtime_enabled(dev)) {
1868 if (dev->power.runtime_status == RPM_ACTIVE)
1869 pm_runtime_set_suspended(dev);
1870 if (dev->power.irq_safe) {
1871 spin_lock_irq(&dev->power.lock);
1872 dev->power.irq_safe = 0;
1873 spin_unlock_irq(&dev->power.lock);
1874 if (dev->parent)
1875 pm_runtime_put(dev->parent);
1876 }
1877 }
1878 /*
1879 * Clear power.needs_force_resume in case it has been set by
1880 * pm_runtime_force_suspend() invoked from a driver remove callback.
1881 */
1882 dev->power.needs_force_resume = false;
1883 }
1884
1885 /**
1886 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1887 * @dev: Device object being removed from device hierarchy.
1888 */
pm_runtime_remove(struct device * dev)1889 void pm_runtime_remove(struct device *dev)
1890 {
1891 __pm_runtime_disable(dev, false);
1892 pm_runtime_reinit(dev);
1893 }
1894
1895 /**
1896 * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
1897 * @dev: Consumer device.
1898 */
pm_runtime_get_suppliers(struct device * dev)1899 void pm_runtime_get_suppliers(struct device *dev)
1900 {
1901 struct device_link *link;
1902 int idx;
1903
1904 idx = device_links_read_lock();
1905
1906 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1907 device_links_read_lock_held())
1908 if (device_link_test(link, DL_FLAG_PM_RUNTIME)) {
1909 link->supplier_preactivated = true;
1910 pm_runtime_get_sync(link->supplier);
1911 }
1912
1913 device_links_read_unlock(idx);
1914 }
1915
1916 /**
1917 * pm_runtime_put_suppliers - Drop references to supplier devices.
1918 * @dev: Consumer device.
1919 */
pm_runtime_put_suppliers(struct device * dev)1920 void pm_runtime_put_suppliers(struct device *dev)
1921 {
1922 struct device_link *link;
1923 int idx;
1924
1925 idx = device_links_read_lock();
1926
1927 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1928 device_links_read_lock_held())
1929 if (link->supplier_preactivated) {
1930 link->supplier_preactivated = false;
1931 pm_runtime_put(link->supplier);
1932 }
1933
1934 device_links_read_unlock(idx);
1935 }
1936
pm_runtime_new_link(struct device * dev)1937 void pm_runtime_new_link(struct device *dev)
1938 {
1939 spin_lock_irq(&dev->power.lock);
1940 dev->power.links_count++;
1941 spin_unlock_irq(&dev->power.lock);
1942 }
1943
pm_runtime_drop_link_count(struct device * dev)1944 static void pm_runtime_drop_link_count(struct device *dev)
1945 {
1946 spin_lock_irq(&dev->power.lock);
1947 WARN_ON(dev->power.links_count == 0);
1948 dev->power.links_count--;
1949 spin_unlock_irq(&dev->power.lock);
1950 }
1951
1952 /**
1953 * pm_runtime_drop_link - Prepare for device link removal.
1954 * @link: Device link going away.
1955 *
1956 * Drop the link count of the consumer end of @link and decrement the supplier
1957 * device's runtime PM usage counter as many times as needed to drop all of the
1958 * PM runtime reference to it from the consumer.
1959 */
pm_runtime_drop_link(struct device_link * link)1960 void pm_runtime_drop_link(struct device_link *link)
1961 {
1962 if (!device_link_test(link, DL_FLAG_PM_RUNTIME))
1963 return;
1964
1965 pm_runtime_drop_link_count(link->consumer);
1966 pm_runtime_release_supplier(link);
1967 pm_request_idle(link->supplier);
1968 }
1969
get_callback(struct device * dev,size_t cb_offset)1970 static pm_callback_t get_callback(struct device *dev, size_t cb_offset)
1971 {
1972 /*
1973 * Setting power.strict_midlayer means that the middle layer
1974 * code does not want its runtime PM callbacks to be invoked via
1975 * pm_runtime_force_suspend() and pm_runtime_force_resume(), so
1976 * return a direct pointer to the driver callback in that case.
1977 */
1978 if (dev_pm_strict_midlayer_is_set(dev))
1979 return __rpm_get_driver_callback(dev, cb_offset);
1980
1981 return __rpm_get_callback(dev, cb_offset);
1982 }
1983
1984 #define GET_CALLBACK(dev, callback) \
1985 get_callback(dev, offsetof(struct dev_pm_ops, callback))
1986
1987 /**
1988 * pm_runtime_force_suspend - Force a device into suspend state if needed.
1989 * @dev: Device to suspend.
1990 *
1991 * Disable runtime PM so we safely can check the device's runtime PM status and
1992 * if it is active, invoke its ->runtime_suspend callback to suspend it and
1993 * change its runtime PM status field to RPM_SUSPENDED. Also, if the device's
1994 * usage and children counters don't indicate that the device was in use before
1995 * the system-wide transition under way, decrement its parent's children counter
1996 * (if there is a parent). Keep runtime PM disabled to preserve the state
1997 * unless we encounter errors.
1998 *
1999 * Typically this function may be invoked from a system suspend callback to make
2000 * sure the device is put into low power state and it should only be used during
2001 * system-wide PM transitions to sleep states. It assumes that the analogous
2002 * pm_runtime_force_resume() will be used to resume the device.
2003 */
pm_runtime_force_suspend(struct device * dev)2004 int pm_runtime_force_suspend(struct device *dev)
2005 {
2006 int (*callback)(struct device *);
2007 int ret;
2008
2009 pm_runtime_disable(dev);
2010 if (pm_runtime_status_suspended(dev) || dev->power.needs_force_resume)
2011 return 0;
2012
2013 callback = GET_CALLBACK(dev, runtime_suspend);
2014
2015 dev_pm_enable_wake_irq_check(dev, true);
2016 ret = callback ? callback(dev) : 0;
2017 if (ret)
2018 goto err;
2019
2020 dev_pm_enable_wake_irq_complete(dev);
2021
2022 /*
2023 * If the device can stay in suspend after the system-wide transition
2024 * to the working state that will follow, drop the children counter of
2025 * its parent and the usage counters of its suppliers. Otherwise, set
2026 * power.needs_force_resume to let pm_runtime_force_resume() know that
2027 * the device needs to be taken care of and to prevent this function
2028 * from handling the device again in case the device is passed to it
2029 * once more subsequently.
2030 */
2031 if (pm_runtime_need_not_resume(dev))
2032 pm_runtime_set_suspended(dev);
2033 else
2034 dev->power.needs_force_resume = true;
2035
2036 return 0;
2037
2038 err:
2039 dev_pm_disable_wake_irq_check(dev, true);
2040 pm_runtime_enable(dev);
2041 return ret;
2042 }
2043 EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
2044
2045 #ifdef CONFIG_PM_SLEEP
2046
2047 /**
2048 * pm_runtime_force_resume - Force a device into resume state if needed.
2049 * @dev: Device to resume.
2050 *
2051 * This function expects that either pm_runtime_force_suspend() has put the
2052 * device into a low-power state prior to calling it, or the device had been
2053 * runtime-suspended before the preceding system-wide suspend transition and it
2054 * was left in suspend during that transition.
2055 *
2056 * The actions carried out by pm_runtime_force_suspend(), or by a runtime
2057 * suspend in general, are reversed and the device is brought back into full
2058 * power if it is expected to be used on system resume, which is the case when
2059 * its needs_force_resume flag is set or when its smart_suspend flag is set and
2060 * its runtime PM status is "active".
2061 *
2062 * In other cases, the resume is deferred to be managed via runtime PM.
2063 *
2064 * Typically, this function may be invoked from a system resume callback.
2065 */
pm_runtime_force_resume(struct device * dev)2066 int pm_runtime_force_resume(struct device *dev)
2067 {
2068 int (*callback)(struct device *);
2069 int ret = 0;
2070
2071 if (!dev->power.needs_force_resume && (!dev_pm_smart_suspend(dev) ||
2072 pm_runtime_status_suspended(dev)))
2073 goto out;
2074
2075 callback = GET_CALLBACK(dev, runtime_resume);
2076
2077 dev_pm_disable_wake_irq_check(dev, false);
2078 ret = callback ? callback(dev) : 0;
2079 if (ret) {
2080 pm_runtime_set_suspended(dev);
2081 dev_pm_enable_wake_irq_check(dev, false);
2082 goto out;
2083 }
2084
2085 pm_runtime_mark_last_busy(dev);
2086
2087 out:
2088 /*
2089 * The smart_suspend flag can be cleared here because it is not going
2090 * to be necessary until the next system-wide suspend transition that
2091 * will update it again.
2092 */
2093 dev->power.smart_suspend = false;
2094 /*
2095 * Also clear needs_force_resume to make this function skip devices that
2096 * have been seen by it once.
2097 */
2098 dev->power.needs_force_resume = false;
2099
2100 pm_runtime_enable(dev);
2101 return ret;
2102 }
2103 EXPORT_SYMBOL_GPL(pm_runtime_force_resume);
2104
pm_runtime_need_not_resume(struct device * dev)2105 bool pm_runtime_need_not_resume(struct device *dev)
2106 {
2107 return atomic_read(&dev->power.usage_count) <= 1 &&
2108 (atomic_read(&dev->power.child_count) == 0 ||
2109 dev->power.ignore_children);
2110 }
2111
2112 #endif /* CONFIG_PM_SLEEP */
2113