Lines Matching +full:suspend +full:- +full:to +full:- +full:idle
1 // SPDX-License-Identifier: GPL-2.0
3 * drivers/base/power/runtime.c - Helper functions for device runtime PM
30 if (dev->driver && dev->driver->pm) in __rpm_get_driver_callback()
31 return get_callback_ptr(dev->driver->pm, cb_offset); in __rpm_get_driver_callback()
41 if (dev->pm_domain) in __rpm_get_callback()
42 ops = &dev->pm_domain->ops; in __rpm_get_callback()
43 else if (dev->type && dev->type->pm) in __rpm_get_callback()
44 ops = dev->type->pm; in __rpm_get_callback()
45 else if (dev->class && dev->class->pm) in __rpm_get_callback()
46 ops = dev->class->pm; in __rpm_get_callback()
47 else if (dev->bus && dev->bus->pm) in __rpm_get_callback()
48 ops = dev->bus->pm; in __rpm_get_callback()
68 * update_pm_runtime_accounting - Update the time accounting of power states
69 * @dev: Device to update the accounting for
71 * In order to be able to have time accounting of the various power states
72 * (as used by programs such as PowerTOP to show the effectiveness of runtime
73 * PM), we need to track the time spent in each state.
75 * runtime_status field is updated, to account the time in the old state
82 if (dev->power.disable_depth > 0) in update_pm_runtime_accounting()
85 last = dev->power.accounting_timestamp; in update_pm_runtime_accounting()
88 dev->power.accounting_timestamp = now; in update_pm_runtime_accounting()
98 delta = now - last; in update_pm_runtime_accounting()
100 if (dev->power.runtime_status == RPM_SUSPENDED) in update_pm_runtime_accounting()
101 dev->power.suspended_time += delta; in update_pm_runtime_accounting()
103 dev->power.active_time += delta; in update_pm_runtime_accounting()
110 dev->power.runtime_status = status; in __update_runtime_status()
118 spin_lock_irqsave(&dev->power.lock, flags); in rpm_get_accounted_time()
121 time = suspended ? dev->power.suspended_time : dev->power.active_time; in rpm_get_accounted_time()
123 spin_unlock_irqrestore(&dev->power.lock, flags); in rpm_get_accounted_time()
140 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
141 * @dev: Device to handle.
145 if (dev->power.timer_expires > 0) { in pm_runtime_deactivate_timer()
146 hrtimer_try_to_cancel(&dev->power.suspend_timer); in pm_runtime_deactivate_timer()
147 dev->power.timer_expires = 0; in pm_runtime_deactivate_timer()
152 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
153 * @dev: Device to handle.
162 dev->power.request = RPM_REQ_NONE; in pm_runtime_cancel_pending()
166 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
167 * @dev: Device to handle.
169 * Compute the autosuspend-delay expiration time based on the device's
172 * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero).
174 * This function may be called either with or without dev->power.lock held.
182 if (!dev->power.use_autosuspend) in pm_runtime_autosuspend_expiration()
185 autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay); in pm_runtime_autosuspend_expiration()
189 expires = READ_ONCE(dev->power.last_busy); in pm_runtime_autosuspend_expiration()
200 return dev->power.memalloc_noio; in dev_memalloc_noio()
204 * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
205 * @dev: Device to handle.
208 * Set the flag for all devices in the path from the device to the
214 * resume/suspend:
217 * resume/suspend callback of any one of its ancestors(or the
239 /* hold power lock since bitfield is not SMP-safe. */ in pm_runtime_set_memalloc_noio()
240 spin_lock_irq(&dev->power.lock); in pm_runtime_set_memalloc_noio()
241 enabled = dev->power.memalloc_noio; in pm_runtime_set_memalloc_noio()
242 dev->power.memalloc_noio = enable; in pm_runtime_set_memalloc_noio()
243 spin_unlock_irq(&dev->power.lock); in pm_runtime_set_memalloc_noio()
246 * not need to enable ancestors any more if the device in pm_runtime_set_memalloc_noio()
252 dev = dev->parent; in pm_runtime_set_memalloc_noio()
268 * rpm_check_suspend_allowed - Test whether a device may be suspended.
269 * @dev: Device to test.
275 if (dev->power.runtime_error) in rpm_check_suspend_allowed()
276 retval = -EINVAL; in rpm_check_suspend_allowed()
277 else if (dev->power.disable_depth > 0) in rpm_check_suspend_allowed()
278 retval = -EACCES; in rpm_check_suspend_allowed()
279 else if (atomic_read(&dev->power.usage_count)) in rpm_check_suspend_allowed()
280 retval = -EAGAIN; in rpm_check_suspend_allowed()
281 else if (!dev->power.ignore_children && atomic_read(&dev->power.child_count)) in rpm_check_suspend_allowed()
282 retval = -EBUSY; in rpm_check_suspend_allowed()
285 else if ((dev->power.deferred_resume && in rpm_check_suspend_allowed()
286 dev->power.runtime_status == RPM_SUSPENDING) || in rpm_check_suspend_allowed()
287 (dev->power.request_pending && dev->power.request == RPM_REQ_RESUME)) in rpm_check_suspend_allowed()
288 retval = -EAGAIN; in rpm_check_suspend_allowed()
290 retval = -EPERM; in rpm_check_suspend_allowed()
291 else if (dev->power.runtime_status == RPM_SUSPENDED) in rpm_check_suspend_allowed()
301 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, in rpm_get_suppliers()
308 retval = pm_runtime_get_sync(link->supplier); in rpm_get_suppliers()
310 if (retval < 0 && retval != -EACCES) { in rpm_get_suppliers()
311 pm_runtime_put_noidle(link->supplier); in rpm_get_suppliers()
314 refcount_inc(&link->rpm_active); in rpm_get_suppliers()
320 * pm_runtime_release_supplier - Drop references to device link's supplier.
323 * Drop all runtime PM references associated with @link to its supplier device.
327 struct device *supplier = link->supplier; in pm_runtime_release_supplier()
335 while (refcount_dec_not_one(&link->rpm_active) && in pm_runtime_release_supplier()
336 atomic_read(&supplier->power.usage_count) > 0) in pm_runtime_release_supplier()
344 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, in __rpm_put_suppliers()
348 pm_request_idle(link->supplier); in __rpm_put_suppliers()
362 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, in rpm_suspend_suppliers()
364 pm_request_idle(link->supplier); in rpm_suspend_suppliers()
370 * __rpm_callback - Run a given runtime PM callback for a given device.
371 * @cb: Runtime PM callback to run.
372 * @dev: Device to run the callback for.
375 __releases(&dev->power.lock) __acquires(&dev->power.lock) in __rpm_callback()
378 bool use_links = dev->power.links_count > 0; in __rpm_callback()
380 if (dev->power.irq_safe) { in __rpm_callback()
381 spin_unlock(&dev->power.lock); in __rpm_callback()
383 spin_unlock_irq(&dev->power.lock); in __rpm_callback()
389 * routine returns, so it is safe to read the status outside of in __rpm_callback()
392 if (use_links && dev->power.runtime_status == RPM_RESUMING) { in __rpm_callback()
408 if (dev->power.irq_safe) { in __rpm_callback()
409 spin_lock(&dev->power.lock); in __rpm_callback()
419 ((dev->power.runtime_status == RPM_SUSPENDING && !retval) || in __rpm_callback()
420 (dev->power.runtime_status == RPM_RESUMING && retval))) { in __rpm_callback()
429 spin_lock_irq(&dev->power.lock); in __rpm_callback()
436 * rpm_callback - Run a given runtime PM callback for a given device.
437 * @cb: Runtime PM callback to run.
438 * @dev: Device to run the callback for.
444 if (dev->power.memalloc_noio) { in rpm_callback()
464 * Since -EACCES means that runtime PM is disabled for the given device, in rpm_callback()
466 * nevertheless, assume it to be a transient error and convert it to in rpm_callback()
467 * -EAGAIN. in rpm_callback()
469 if (retval == -EACCES) in rpm_callback()
470 retval = -EAGAIN; in rpm_callback()
472 if (retval != -EAGAIN && retval != -EBUSY) in rpm_callback()
473 dev->power.runtime_error = retval; in rpm_callback()
479 * rpm_idle - Notify device bus type if the device can be suspended.
480 * @dev: Device to notify the bus type about.
483 * Check if the device's runtime PM status allows it to be suspended. If
484 * another idle notification has been started earlier, return immediately. If
485 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
486 * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
489 * This function must be called under dev->power.lock with interrupts disabled.
504 /* Idle notifications are allowed only in the RPM_ACTIVE state. */ in rpm_idle()
505 else if (dev->power.runtime_status != RPM_ACTIVE) in rpm_idle()
506 retval = -EAGAIN; in rpm_idle()
509 * Any pending request other than an idle notification takes in rpm_idle()
512 else if (dev->power.request_pending && in rpm_idle()
513 dev->power.request > RPM_REQ_IDLE) in rpm_idle()
514 retval = -EAGAIN; in rpm_idle()
517 else if (dev->power.idle_notification) in rpm_idle()
518 retval = -EINPROGRESS; in rpm_idle()
523 /* Pending requests need to be canceled. */ in rpm_idle()
524 dev->power.request = RPM_REQ_NONE; in rpm_idle()
529 if (!callback || dev->power.no_callbacks) in rpm_idle()
532 /* Carry out an asynchronous or a synchronous idle notification. */ in rpm_idle()
534 dev->power.request = RPM_REQ_IDLE; in rpm_idle()
535 if (!dev->power.request_pending) { in rpm_idle()
536 dev->power.request_pending = true; in rpm_idle()
537 queue_work(pm_wq, &dev->power.work); in rpm_idle()
543 dev->power.idle_notification = true; in rpm_idle()
545 if (dev->power.irq_safe) in rpm_idle()
546 spin_unlock(&dev->power.lock); in rpm_idle()
548 spin_unlock_irq(&dev->power.lock); in rpm_idle()
552 if (dev->power.irq_safe) in rpm_idle()
553 spin_lock(&dev->power.lock); in rpm_idle()
555 spin_lock_irq(&dev->power.lock); in rpm_idle()
557 dev->power.idle_notification = false; in rpm_idle()
558 wake_up_all(&dev->power.wait_queue); in rpm_idle()
566 * rpm_suspend - Carry out runtime suspend of given device.
567 * @dev: Device to suspend.
570 * Check if the device's runtime PM status allows it to be suspended.
571 * Cancel a pending idle notification, autosuspend or suspend. If
572 * another suspend has been started earlier, either return immediately
573 * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
574 * flags. If the RPM_ASYNC flag is set then queue a suspend request;
575 * otherwise run the ->runtime_suspend() callback directly. When
576 * ->runtime_suspend succeeded, if a deferred resume was requested while
577 * the callback was running then carry it out, otherwise send an idle
578 * notification for its parent (if the suspend succeeded and both
579 * ignore_children of parent->power and irq_safe of dev->power are not set).
580 * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
581 * flag is set and the next autosuspend-delay expiration time is in the
584 * This function must be called under dev->power.lock with interrupts disabled.
587 __releases(&dev->power.lock) __acquires(&dev->power.lock) in rpm_suspend()
601 if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC)) in rpm_suspend()
602 retval = -EAGAIN; in rpm_suspend()
608 if ((rpmflags & RPM_AUTO) && dev->power.runtime_status != RPM_SUSPENDING) { in rpm_suspend()
612 /* Pending requests need to be canceled. */ in rpm_suspend()
613 dev->power.request = RPM_REQ_NONE; in rpm_suspend()
617 * set to expire at or before the autosuspend delay, in rpm_suspend()
622 if (!(dev->power.timer_expires && in rpm_suspend()
623 dev->power.timer_expires <= expires)) { in rpm_suspend()
625 * We add a slack of 25% to gather wakeups in rpm_suspend()
628 u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) * in rpm_suspend()
631 dev->power.timer_expires = expires; in rpm_suspend()
632 hrtimer_start_range_ns(&dev->power.suspend_timer, in rpm_suspend()
637 dev->power.timer_autosuspends = 1; in rpm_suspend()
642 /* Other scheduled or pending requests need to be canceled. */ in rpm_suspend()
645 if (dev->power.runtime_status == RPM_SUSPENDING) { in rpm_suspend()
649 retval = -EINPROGRESS; in rpm_suspend()
653 if (dev->power.irq_safe) { in rpm_suspend()
654 spin_unlock(&dev->power.lock); in rpm_suspend()
658 spin_lock(&dev->power.lock); in rpm_suspend()
662 /* Wait for the other suspend running in parallel with us. */ in rpm_suspend()
664 prepare_to_wait(&dev->power.wait_queue, &wait, in rpm_suspend()
666 if (dev->power.runtime_status != RPM_SUSPENDING) in rpm_suspend()
669 spin_unlock_irq(&dev->power.lock); in rpm_suspend()
673 spin_lock_irq(&dev->power.lock); in rpm_suspend()
675 finish_wait(&dev->power.wait_queue, &wait); in rpm_suspend()
679 if (dev->power.no_callbacks) in rpm_suspend()
682 /* Carry out an asynchronous or a synchronous suspend. */ in rpm_suspend()
684 dev->power.request = (rpmflags & RPM_AUTO) ? in rpm_suspend()
686 if (!dev->power.request_pending) { in rpm_suspend()
687 dev->power.request_pending = true; in rpm_suspend()
688 queue_work(pm_wq, &dev->power.work); in rpm_suspend()
708 if (dev->parent) { in rpm_suspend()
709 parent = dev->parent; in rpm_suspend()
710 atomic_add_unless(&parent->power.child_count, -1, 0); in rpm_suspend()
712 wake_up_all(&dev->power.wait_queue); in rpm_suspend()
714 if (dev->power.deferred_resume) { in rpm_suspend()
715 dev->power.deferred_resume = false; in rpm_suspend()
717 retval = -EAGAIN; in rpm_suspend()
721 if (dev->power.irq_safe) in rpm_suspend()
724 /* Maybe the parent is now able to suspend. */ in rpm_suspend()
725 if (parent && !parent->power.ignore_children) { in rpm_suspend()
726 spin_unlock(&dev->power.lock); in rpm_suspend()
728 spin_lock(&parent->power.lock); in rpm_suspend()
730 spin_unlock(&parent->power.lock); in rpm_suspend()
732 spin_lock(&dev->power.lock); in rpm_suspend()
734 /* Maybe the suppliers are now able to suspend. */ in rpm_suspend()
735 if (dev->power.links_count > 0) { in rpm_suspend()
736 spin_unlock_irq(&dev->power.lock); in rpm_suspend()
740 spin_lock_irq(&dev->power.lock); in rpm_suspend()
751 dev->power.deferred_resume = false; in rpm_suspend()
752 wake_up_all(&dev->power.wait_queue); in rpm_suspend()
760 if (!dev->power.runtime_error && (rpmflags & RPM_AUTO) && in rpm_suspend()
770 * rpm_resume - Carry out runtime resume of given device.
771 * @dev: Device to resume.
774 * Check if the device's runtime PM status allows it to be resumed. Cancel
776 * earlier, either return immediately or wait for it to finish, depending on the
777 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
778 * parallel with this function, either tell the other process to resume after
779 * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
781 * ->runtime_resume() callback directly. Queue an idle notification for the
784 * This function must be called under dev->power.lock with interrupts disabled.
787 __releases(&dev->power.lock) __acquires(&dev->power.lock) in rpm_resume()
796 if (dev->power.runtime_error) { in rpm_resume()
797 retval = -EINVAL; in rpm_resume()
798 } else if (dev->power.disable_depth > 0) { in rpm_resume()
799 if (dev->power.runtime_status == RPM_ACTIVE && in rpm_resume()
800 dev->power.last_status == RPM_ACTIVE) in rpm_resume()
805 retval = -EACCES; in rpm_resume()
811 * Other scheduled or pending requests need to be canceled. Small in rpm_resume()
813 * rather than cancelling it now only to restart it again in the near in rpm_resume()
816 dev->power.request = RPM_REQ_NONE; in rpm_resume()
817 if (!dev->power.timer_autosuspends) in rpm_resume()
820 if (dev->power.runtime_status == RPM_ACTIVE) { in rpm_resume()
825 if (dev->power.runtime_status == RPM_RESUMING || in rpm_resume()
826 dev->power.runtime_status == RPM_SUSPENDING) { in rpm_resume()
830 if (dev->power.runtime_status == RPM_SUSPENDING) { in rpm_resume()
831 dev->power.deferred_resume = true; in rpm_resume()
833 retval = -EINPROGRESS; in rpm_resume()
835 retval = -EINPROGRESS; in rpm_resume()
840 if (dev->power.irq_safe) { in rpm_resume()
841 spin_unlock(&dev->power.lock); in rpm_resume()
845 spin_lock(&dev->power.lock); in rpm_resume()
851 prepare_to_wait(&dev->power.wait_queue, &wait, in rpm_resume()
853 if (dev->power.runtime_status != RPM_RESUMING && in rpm_resume()
854 dev->power.runtime_status != RPM_SUSPENDING) in rpm_resume()
857 spin_unlock_irq(&dev->power.lock); in rpm_resume()
861 spin_lock_irq(&dev->power.lock); in rpm_resume()
863 finish_wait(&dev->power.wait_queue, &wait); in rpm_resume()
872 if (dev->power.no_callbacks && !parent && dev->parent) { in rpm_resume()
873 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING); in rpm_resume()
874 if (dev->parent->power.disable_depth > 0 || in rpm_resume()
875 dev->parent->power.ignore_children || in rpm_resume()
876 dev->parent->power.runtime_status == RPM_ACTIVE) { in rpm_resume()
877 atomic_inc(&dev->parent->power.child_count); in rpm_resume()
878 spin_unlock(&dev->parent->power.lock); in rpm_resume()
882 spin_unlock(&dev->parent->power.lock); in rpm_resume()
887 dev->power.request = RPM_REQ_RESUME; in rpm_resume()
888 if (!dev->power.request_pending) { in rpm_resume()
889 dev->power.request_pending = true; in rpm_resume()
890 queue_work(pm_wq, &dev->power.work); in rpm_resume()
896 if (!parent && dev->parent) { in rpm_resume()
899 * necessary. Not needed if dev is irq-safe; then the in rpm_resume()
902 parent = dev->parent; in rpm_resume()
903 if (dev->power.irq_safe) in rpm_resume()
906 spin_unlock(&dev->power.lock); in rpm_resume()
910 spin_lock(&parent->power.lock); in rpm_resume()
913 * set to ignore its children. in rpm_resume()
915 if (!parent->power.disable_depth && in rpm_resume()
916 !parent->power.ignore_children) { in rpm_resume()
918 if (parent->power.runtime_status != RPM_ACTIVE) in rpm_resume()
919 retval = -EBUSY; in rpm_resume()
921 spin_unlock(&parent->power.lock); in rpm_resume()
923 spin_lock(&dev->power.lock); in rpm_resume()
931 if (dev->power.no_callbacks) in rpm_resume()
949 atomic_inc(&parent->power.child_count); in rpm_resume()
951 wake_up_all(&dev->power.wait_queue); in rpm_resume()
957 if (parent && !dev->power.irq_safe) { in rpm_resume()
958 spin_unlock_irq(&dev->power.lock); in rpm_resume()
962 spin_lock_irq(&dev->power.lock); in rpm_resume()
971 * pm_runtime_work - Universal runtime PM work function.
974 * Use @work to get the device object the work is to be done for, determine what
975 * is to be done and execute the appropriate runtime PM function.
982 spin_lock_irq(&dev->power.lock); in pm_runtime_work()
984 if (!dev->power.request_pending) in pm_runtime_work()
987 req = dev->power.request; in pm_runtime_work()
988 dev->power.request = RPM_REQ_NONE; in pm_runtime_work()
989 dev->power.request_pending = false; in pm_runtime_work()
1009 spin_unlock_irq(&dev->power.lock); in pm_runtime_work()
1013 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
1016 * Check if the time is right and queue a suspend request.
1024 spin_lock_irqsave(&dev->power.lock, flags); in pm_suspend_timer_fn()
1026 expires = dev->power.timer_expires; in pm_suspend_timer_fn()
1032 dev->power.timer_expires = 0; in pm_suspend_timer_fn()
1033 rpm_suspend(dev, dev->power.timer_autosuspends ? in pm_suspend_timer_fn()
1037 spin_unlock_irqrestore(&dev->power.lock, flags); in pm_suspend_timer_fn()
1043 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
1044 * @dev: Device to suspend.
1045 * @delay: Time to wait before submitting a suspend request, in milliseconds.
1053 spin_lock_irqsave(&dev->power.lock, flags); in pm_schedule_suspend()
1064 /* Other scheduled or pending requests need to be canceled. */ in pm_schedule_suspend()
1068 dev->power.timer_expires = expires; in pm_schedule_suspend()
1069 dev->power.timer_autosuspends = 0; in pm_schedule_suspend()
1070 hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS); in pm_schedule_suspend()
1073 spin_unlock_irqrestore(&dev->power.lock, flags); in pm_schedule_suspend()
1083 ret = atomic_sub_return(1, &dev->power.usage_count); in rpm_drop_usage_count()
1090 * sufficient to increment the usage counter here to reverse the change in rpm_drop_usage_count()
1093 atomic_inc(&dev->power.usage_count); in rpm_drop_usage_count()
1095 return -EINVAL; in rpm_drop_usage_count()
1099 * __pm_runtime_idle - Entry point for runtime idle operations.
1100 * @dev: Device to send idle notification for.
1105 * warning, increment it, and return an error). Then carry out an idle
1126 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); in __pm_runtime_idle()
1128 spin_lock_irqsave(&dev->power.lock, flags); in __pm_runtime_idle()
1130 spin_unlock_irqrestore(&dev->power.lock, flags); in __pm_runtime_idle()
1137 * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
1138 * @dev: Device to suspend.
1143 * warning, increment it, and return an error). Then carry out a suspend,
1164 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); in __pm_runtime_suspend()
1166 spin_lock_irqsave(&dev->power.lock, flags); in __pm_runtime_suspend()
1168 spin_unlock_irqrestore(&dev->power.lock, flags); in __pm_runtime_suspend()
1175 * __pm_runtime_resume - Entry point for runtime resume operations.
1176 * @dev: Device to resume.
1190 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe && in __pm_runtime_resume()
1191 dev->power.runtime_status != RPM_ACTIVE); in __pm_runtime_resume()
1194 atomic_inc(&dev->power.usage_count); in __pm_runtime_resume()
1196 spin_lock_irqsave(&dev->power.lock, flags); in __pm_runtime_resume()
1198 spin_unlock_irqrestore(&dev->power.lock, flags); in __pm_runtime_resume()
1205 * pm_runtime_get_conditional - Conditionally bump up device usage counter.
1206 * @dev: Device to handle.
1207 * @ign_usage_count: Whether or not to look at the current usage counter value.
1209 * Return -EINVAL if runtime PM is disabled for @dev.
1218 * If @ign_usage_count is %true, this function can be used to prevent suspending
1221 * If @ign_usage_count is %false, this function can be used to prevent
1233 spin_lock_irqsave(&dev->power.lock, flags); in pm_runtime_get_conditional()
1234 if (dev->power.disable_depth > 0) { in pm_runtime_get_conditional()
1235 retval = -EINVAL; in pm_runtime_get_conditional()
1236 } else if (dev->power.runtime_status != RPM_ACTIVE) { in pm_runtime_get_conditional()
1238 } else if (ign_usage_count || (!dev->power.ignore_children && in pm_runtime_get_conditional()
1239 atomic_read(&dev->power.child_count) > 0)) { in pm_runtime_get_conditional()
1241 atomic_inc(&dev->power.usage_count); in pm_runtime_get_conditional()
1243 retval = atomic_inc_not_zero(&dev->power.usage_count); in pm_runtime_get_conditional()
1246 spin_unlock_irqrestore(&dev->power.lock, flags); in pm_runtime_get_conditional()
1252 * pm_runtime_get_if_active - Bump up runtime PM usage counter if the device is
1258 * state, 0 is returned. -EINVAL is returned if runtime PM is disabled for the
1268 * pm_runtime_get_if_in_use - Conditionally bump up runtime PM usage counter.
1280 * -EINVAL is returned if runtime PM is disabled for the device, in which case
1290 * __pm_runtime_set_status - Set runtime PM status of a device.
1291 * @dev: Device to handle.
1295 * different from zero, the status may be changed either to RPM_ACTIVE, or to
1299 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
1302 * and the device parent's counter of unsuspended children is modified to
1303 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
1306 * If @dev has any suppliers (as reflected by device links to them), and @status
1308 * of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead
1315 struct device *parent = dev->parent; in __pm_runtime_set_status()
1321 return -EINVAL; in __pm_runtime_set_status()
1323 spin_lock_irqsave(&dev->power.lock, flags); in __pm_runtime_set_status()
1326 * Prevent PM-runtime from being enabled for the device or return an in __pm_runtime_set_status()
1329 if (dev->power.runtime_error || dev->power.disable_depth) in __pm_runtime_set_status()
1330 dev->power.disable_depth++; in __pm_runtime_set_status()
1332 error = -EAGAIN; in __pm_runtime_set_status()
1334 spin_unlock_irqrestore(&dev->power.lock, flags); in __pm_runtime_set_status()
1343 * involved will be dropped down to one anyway. in __pm_runtime_set_status()
1355 spin_lock_irqsave(&dev->power.lock, flags); in __pm_runtime_set_status()
1357 if (dev->power.runtime_status == status || !parent) in __pm_runtime_set_status()
1361 atomic_add_unless(&parent->power.child_count, -1, 0); in __pm_runtime_set_status()
1362 notify_parent = !parent->power.ignore_children; in __pm_runtime_set_status()
1364 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING); in __pm_runtime_set_status()
1367 * It is invalid to put an active child under a parent that is in __pm_runtime_set_status()
1371 if (!parent->power.disable_depth && in __pm_runtime_set_status()
1372 !parent->power.ignore_children && in __pm_runtime_set_status()
1373 parent->power.runtime_status != RPM_ACTIVE) { in __pm_runtime_set_status()
1374 dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n", in __pm_runtime_set_status()
1377 error = -EBUSY; in __pm_runtime_set_status()
1378 } else if (dev->power.runtime_status == RPM_SUSPENDED) { in __pm_runtime_set_status()
1379 atomic_inc(&parent->power.child_count); in __pm_runtime_set_status()
1382 spin_unlock(&parent->power.lock); in __pm_runtime_set_status()
1393 dev->power.runtime_error = 0; in __pm_runtime_set_status()
1396 spin_unlock_irqrestore(&dev->power.lock, flags); in __pm_runtime_set_status()
1416 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
1417 * @dev: Device to handle.
1420 * runtime PM operations involving the device in progress to complete.
1422 * Should be called under dev->power.lock with interrupts disabled.
1428 if (dev->power.request_pending) { in __pm_runtime_barrier()
1429 dev->power.request = RPM_REQ_NONE; in __pm_runtime_barrier()
1430 spin_unlock_irq(&dev->power.lock); in __pm_runtime_barrier()
1432 cancel_work_sync(&dev->power.work); in __pm_runtime_barrier()
1434 spin_lock_irq(&dev->power.lock); in __pm_runtime_barrier()
1435 dev->power.request_pending = false; in __pm_runtime_barrier()
1438 if (dev->power.runtime_status == RPM_SUSPENDING || in __pm_runtime_barrier()
1439 dev->power.runtime_status == RPM_RESUMING || in __pm_runtime_barrier()
1440 dev->power.idle_notification) { in __pm_runtime_barrier()
1443 /* Suspend, wake-up or idle notification in progress. */ in __pm_runtime_barrier()
1445 prepare_to_wait(&dev->power.wait_queue, &wait, in __pm_runtime_barrier()
1447 if (dev->power.runtime_status != RPM_SUSPENDING in __pm_runtime_barrier()
1448 && dev->power.runtime_status != RPM_RESUMING in __pm_runtime_barrier()
1449 && !dev->power.idle_notification) in __pm_runtime_barrier()
1451 spin_unlock_irq(&dev->power.lock); in __pm_runtime_barrier()
1455 spin_lock_irq(&dev->power.lock); in __pm_runtime_barrier()
1457 finish_wait(&dev->power.wait_queue, &wait); in __pm_runtime_barrier()
1462 * pm_runtime_barrier - Flush pending requests and wait for completions.
1463 * @dev: Device to handle.
1469 * progress to complete.
1472 * 1, if there was a resume request pending and the device had to be woken up,
1480 spin_lock_irq(&dev->power.lock); in pm_runtime_barrier()
1482 if (dev->power.request_pending in pm_runtime_barrier()
1483 && dev->power.request == RPM_REQ_RESUME) { in pm_runtime_barrier()
1490 spin_unlock_irq(&dev->power.lock); in pm_runtime_barrier()
1501 spin_lock_irq(&dev->power.lock); in pm_runtime_block_if_disabled()
1504 if (ret && dev->power.last_status == RPM_INVALID) in pm_runtime_block_if_disabled()
1505 dev->power.last_status = RPM_BLOCKED; in pm_runtime_block_if_disabled()
1507 spin_unlock_irq(&dev->power.lock); in pm_runtime_block_if_disabled()
1514 spin_lock_irq(&dev->power.lock); in pm_runtime_unblock()
1516 if (dev->power.last_status == RPM_BLOCKED) in pm_runtime_unblock()
1517 dev->power.last_status = RPM_INVALID; in pm_runtime_unblock()
1519 spin_unlock_irq(&dev->power.lock); in pm_runtime_unblock()
1524 spin_lock_irq(&dev->power.lock); in __pm_runtime_disable()
1526 if (dev->power.disable_depth > 0) { in __pm_runtime_disable()
1527 dev->power.disable_depth++; in __pm_runtime_disable()
1533 * means there probably is some I/O to process and disabling runtime PM in __pm_runtime_disable()
1536 if (check_resume && dev->power.request_pending && in __pm_runtime_disable()
1537 dev->power.request == RPM_REQ_RESUME) { in __pm_runtime_disable()
1539 * Prevent suspends and idle notifications from being carried in __pm_runtime_disable()
1549 /* Update time accounting before disabling PM-runtime. */ in __pm_runtime_disable()
1552 if (!dev->power.disable_depth++) { in __pm_runtime_disable()
1554 dev->power.last_status = dev->power.runtime_status; in __pm_runtime_disable()
1558 spin_unlock_irq(&dev->power.lock); in __pm_runtime_disable()
1563 * pm_runtime_enable - Enable runtime PM of a device.
1564 * @dev: Device to handle.
1570 spin_lock_irqsave(&dev->power.lock, flags); in pm_runtime_enable()
1572 if (!dev->power.disable_depth) { in pm_runtime_enable()
1577 if (--dev->power.disable_depth > 0) in pm_runtime_enable()
1580 if (dev->power.last_status == RPM_BLOCKED) { in pm_runtime_enable()
1581 dev_warn(dev, "Attempt to enable runtime PM when it is blocked\n"); in pm_runtime_enable()
1584 dev->power.last_status = RPM_INVALID; in pm_runtime_enable()
1585 dev->power.accounting_timestamp = ktime_get_mono_fast_ns(); in pm_runtime_enable()
1587 if (dev->power.runtime_status == RPM_SUSPENDED && in pm_runtime_enable()
1588 !dev->power.ignore_children && in pm_runtime_enable()
1589 atomic_read(&dev->power.child_count) > 0) in pm_runtime_enable()
1593 spin_unlock_irqrestore(&dev->power.lock, flags); in pm_runtime_enable()
1603 * devm_pm_runtime_set_active_enabled - set_active version of devm_pm_runtime_enable.
1605 * @dev: Device to handle.
1630 * devm_pm_runtime_enable - devres-enabled version of pm_runtime_enable.
1635 * @dev: Device to handle.
1651 * devm_pm_runtime_get_noresume - devres-enabled version of pm_runtime_get_noresume.
1653 * @dev: Device to handle.
1664 * pm_runtime_forbid - Block runtime PM of a device.
1665 * @dev: Device to handle.
1673 spin_lock_irq(&dev->power.lock); in pm_runtime_forbid()
1674 if (!dev->power.runtime_auto) in pm_runtime_forbid()
1677 dev->power.runtime_auto = false; in pm_runtime_forbid()
1678 atomic_inc(&dev->power.usage_count); in pm_runtime_forbid()
1682 spin_unlock_irq(&dev->power.lock); in pm_runtime_forbid()
1687 * pm_runtime_allow - Unblock runtime PM of a device.
1688 * @dev: Device to handle.
1696 spin_lock_irq(&dev->power.lock); in pm_runtime_allow()
1697 if (dev->power.runtime_auto) in pm_runtime_allow()
1700 dev->power.runtime_auto = true; in pm_runtime_allow()
1708 spin_unlock_irq(&dev->power.lock); in pm_runtime_allow()
1713 * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1714 * @dev: Device to handle.
1717 * device is power-managed through its parent and has no runtime PM
1722 spin_lock_irq(&dev->power.lock); in pm_runtime_no_callbacks()
1723 dev->power.no_callbacks = 1; in pm_runtime_no_callbacks()
1724 spin_unlock_irq(&dev->power.lock); in pm_runtime_no_callbacks()
1731 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1732 * @dev: Device to handle
1735 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1737 * causes the parent's usage counter to be permanently incremented, preventing
1738 * the parent from runtime suspending -- otherwise an irq-safe child might have
1739 * to wait for a non-irq-safe parent.
1743 if (dev->parent) in pm_runtime_irq_safe()
1744 pm_runtime_get_sync(dev->parent); in pm_runtime_irq_safe()
1746 spin_lock_irq(&dev->power.lock); in pm_runtime_irq_safe()
1747 dev->power.irq_safe = 1; in pm_runtime_irq_safe()
1748 spin_unlock_irq(&dev->power.lock); in pm_runtime_irq_safe()
1753 * update_autosuspend - Handle a change to a device's autosuspend settings.
1754 * @dev: Device to handle.
1758 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1759 * set; otherwise allow it. Send an idle notification if suspends are allowed.
1761 * This function must be called under dev->power.lock with interrupts disabled.
1765 int delay = dev->power.autosuspend_delay; in update_autosuspend()
1767 /* Should runtime suspend be prevented now? */ in update_autosuspend()
1768 if (dev->power.use_autosuspend && delay < 0) { in update_autosuspend()
1770 /* If it used to be allowed then prevent it. */ in update_autosuspend()
1772 atomic_inc(&dev->power.usage_count); in update_autosuspend()
1779 /* Runtime suspend should be allowed now. */ in update_autosuspend()
1782 /* If it used to be prevented then allow it. */ in update_autosuspend()
1784 atomic_dec(&dev->power.usage_count); in update_autosuspend()
1792 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1793 * @dev: Device to handle.
1796 * Set the device's power.autosuspend_delay value. If it changes to negative
1804 spin_lock_irq(&dev->power.lock); in pm_runtime_set_autosuspend_delay()
1805 old_delay = dev->power.autosuspend_delay; in pm_runtime_set_autosuspend_delay()
1806 old_use = dev->power.use_autosuspend; in pm_runtime_set_autosuspend_delay()
1807 dev->power.autosuspend_delay = delay; in pm_runtime_set_autosuspend_delay()
1809 spin_unlock_irq(&dev->power.lock); in pm_runtime_set_autosuspend_delay()
1814 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1815 * @dev: Device to handle.
1825 spin_lock_irq(&dev->power.lock); in __pm_runtime_use_autosuspend()
1826 old_delay = dev->power.autosuspend_delay; in __pm_runtime_use_autosuspend()
1827 old_use = dev->power.use_autosuspend; in __pm_runtime_use_autosuspend()
1828 dev->power.use_autosuspend = use; in __pm_runtime_use_autosuspend()
1830 spin_unlock_irq(&dev->power.lock); in __pm_runtime_use_autosuspend()
1835 * pm_runtime_init - Initialize runtime PM fields in given device object.
1836 * @dev: Device object to initialize.
1840 dev->power.runtime_status = RPM_SUSPENDED; in pm_runtime_init()
1841 dev->power.last_status = RPM_INVALID; in pm_runtime_init()
1842 dev->power.idle_notification = false; in pm_runtime_init()
1844 dev->power.disable_depth = 1; in pm_runtime_init()
1845 atomic_set(&dev->power.usage_count, 0); in pm_runtime_init()
1847 dev->power.runtime_error = 0; in pm_runtime_init()
1849 atomic_set(&dev->power.child_count, 0); in pm_runtime_init()
1851 dev->power.runtime_auto = true; in pm_runtime_init()
1853 dev->power.request_pending = false; in pm_runtime_init()
1854 dev->power.request = RPM_REQ_NONE; in pm_runtime_init()
1855 dev->power.deferred_resume = false; in pm_runtime_init()
1856 dev->power.needs_force_resume = false; in pm_runtime_init()
1857 INIT_WORK(&dev->power.work, pm_runtime_work); in pm_runtime_init()
1859 dev->power.timer_expires = 0; in pm_runtime_init()
1860 hrtimer_setup(&dev->power.suspend_timer, pm_suspend_timer_fn, CLOCK_MONOTONIC, in pm_runtime_init()
1863 init_waitqueue_head(&dev->power.wait_queue); in pm_runtime_init()
1867 * pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
1868 * @dev: Device object to re-initialize.
1873 if (dev->power.runtime_status == RPM_ACTIVE) in pm_runtime_reinit()
1875 if (dev->power.irq_safe) { in pm_runtime_reinit()
1876 spin_lock_irq(&dev->power.lock); in pm_runtime_reinit()
1877 dev->power.irq_safe = 0; in pm_runtime_reinit()
1878 spin_unlock_irq(&dev->power.lock); in pm_runtime_reinit()
1879 if (dev->parent) in pm_runtime_reinit()
1880 pm_runtime_put(dev->parent); in pm_runtime_reinit()
1887 dev->power.needs_force_resume = false; in pm_runtime_reinit()
1891 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1901 * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
1913 link->supplier_preactivated = true; in pm_runtime_get_suppliers()
1914 pm_runtime_get_sync(link->supplier); in pm_runtime_get_suppliers()
1921 * pm_runtime_put_suppliers - Drop references to supplier devices.
1931 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, in pm_runtime_put_suppliers()
1933 if (link->supplier_preactivated) { in pm_runtime_put_suppliers()
1934 link->supplier_preactivated = false; in pm_runtime_put_suppliers()
1935 pm_runtime_put(link->supplier); in pm_runtime_put_suppliers()
1943 spin_lock_irq(&dev->power.lock); in pm_runtime_new_link()
1944 dev->power.links_count++; in pm_runtime_new_link()
1945 spin_unlock_irq(&dev->power.lock); in pm_runtime_new_link()
1950 spin_lock_irq(&dev->power.lock); in pm_runtime_drop_link_count()
1951 WARN_ON(dev->power.links_count == 0); in pm_runtime_drop_link_count()
1952 dev->power.links_count--; in pm_runtime_drop_link_count()
1953 spin_unlock_irq(&dev->power.lock); in pm_runtime_drop_link_count()
1957 * pm_runtime_drop_link - Prepare for device link removal.
1961 * device's runtime PM usage counter as many times as needed to drop all of the
1962 * PM runtime reference to it from the consumer.
1969 pm_runtime_drop_link_count(link->consumer); in pm_runtime_drop_link()
1971 pm_request_idle(link->supplier); in pm_runtime_drop_link()
1978 * code does not want its runtime PM callbacks to be invoked via in get_callback()
1980 * return a direct pointer to the driver callback in that case. in get_callback()
1992 * pm_runtime_force_suspend - Force a device into suspend state if needed.
1993 * @dev: Device to suspend.
1996 * if it is active, invoke its ->runtime_suspend callback to suspend it and
1997 * change its runtime PM status field to RPM_SUSPENDED. Also, if the device's
1999 * the system-wide transition under way, decrement its parent's children counter
2000 * (if there is a parent). Keep runtime PM disabled to preserve the state
2003 * Typically this function may be invoked from a system suspend callback to make
2005 * system-wide PM transitions to sleep states. It assumes that the analogous
2006 * pm_runtime_force_resume() will be used to resume the device.
2014 if (pm_runtime_status_suspended(dev) || dev->power.needs_force_resume) in pm_runtime_force_suspend()
2027 * If the device can stay in suspend after the system-wide transition in pm_runtime_force_suspend()
2028 * to the working state that will follow, drop the children counter of in pm_runtime_force_suspend()
2030 * power.needs_force_resume to let pm_runtime_force_resume() know that in pm_runtime_force_suspend()
2031 * the device needs to be taken care of and to prevent this function in pm_runtime_force_suspend()
2032 * from handling the device again in case the device is passed to it in pm_runtime_force_suspend()
2038 dev->power.needs_force_resume = true; in pm_runtime_force_suspend()
2052 * pm_runtime_force_resume - Force a device into resume state if needed.
2053 * @dev: Device to resume.
2056 * device into a low-power state prior to calling it, or the device had been
2057 * runtime-suspended before the preceding system-wide suspend transition and it
2058 * was left in suspend during that transition.
2061 * suspend in general, are reversed and the device is brought back into full
2062 * power if it is expected to be used on system resume, which is the case when
2066 * In other cases, the resume is deferred to be managed via runtime PM.
2075 if (!dev->power.needs_force_resume && (!dev_pm_smart_suspend(dev) || in pm_runtime_force_resume()
2094 * to be necessary until the next system-wide suspend transition that in pm_runtime_force_resume()
2097 dev->power.smart_suspend = false; in pm_runtime_force_resume()
2099 * Also clear needs_force_resume to make this function skip devices that in pm_runtime_force_resume()
2102 dev->power.needs_force_resume = false; in pm_runtime_force_resume()
2111 return atomic_read(&dev->power.usage_count) <= 1 && in pm_runtime_need_not_resume()
2112 (atomic_read(&dev->power.child_count) == 0 || in pm_runtime_need_not_resume()
2113 dev->power.ignore_children); in pm_runtime_need_not_resume()