1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * drivers/base/power/runtime.c - Helper functions for device runtime PM
4 *
5 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
6 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
7 */
8 #include <linux/sched/mm.h>
9 #include <linux/ktime.h>
10 #include <linux/hrtimer.h>
11 #include <linux/export.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/pm_wakeirq.h>
14 #include <linux/rculist.h>
15 #include <trace/events/rpm.h>
16
17 #include "../base.h"
18 #include "power.h"
19
20 typedef int (*pm_callback_t)(struct device *);
21
get_callback_ptr(const void * start,size_t offset)22 static inline pm_callback_t get_callback_ptr(const void *start, size_t offset)
23 {
24 return *(pm_callback_t *)(start + offset);
25 }
26
__rpm_get_driver_callback(struct device * dev,size_t cb_offset)27 static pm_callback_t __rpm_get_driver_callback(struct device *dev,
28 size_t cb_offset)
29 {
30 if (dev->driver && dev->driver->pm)
31 return get_callback_ptr(dev->driver->pm, cb_offset);
32
33 return NULL;
34 }
35
__rpm_get_callback(struct device * dev,size_t cb_offset)36 static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
37 {
38 const struct dev_pm_ops *ops;
39 pm_callback_t cb = NULL;
40
41 if (dev->pm_domain)
42 ops = &dev->pm_domain->ops;
43 else if (dev->type && dev->type->pm)
44 ops = dev->type->pm;
45 else if (dev->class && dev->class->pm)
46 ops = dev->class->pm;
47 else if (dev->bus && dev->bus->pm)
48 ops = dev->bus->pm;
49 else
50 ops = NULL;
51
52 if (ops)
53 cb = get_callback_ptr(ops, cb_offset);
54
55 if (!cb)
56 cb = __rpm_get_driver_callback(dev, cb_offset);
57
58 return cb;
59 }
60
61 #define RPM_GET_CALLBACK(dev, callback) \
62 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
63
64 static int rpm_resume(struct device *dev, int rpmflags);
65 static int rpm_suspend(struct device *dev, int rpmflags);
66
67 /**
68 * update_pm_runtime_accounting - Update the time accounting of power states
69 * @dev: Device to update the accounting for
70 *
71 * In order to be able to have time accounting of the various power states
72 * (as used by programs such as PowerTOP to show the effectiveness of runtime
73 * PM), we need to track the time spent in each state.
74 * update_pm_runtime_accounting must be called each time before the
75 * runtime_status field is updated, to account the time in the old state
76 * correctly.
77 */
update_pm_runtime_accounting(struct device * dev)78 static void update_pm_runtime_accounting(struct device *dev)
79 {
80 u64 now, last, delta;
81
82 if (dev->power.disable_depth > 0)
83 return;
84
85 last = dev->power.accounting_timestamp;
86
87 now = ktime_get_mono_fast_ns();
88 dev->power.accounting_timestamp = now;
89
90 /*
91 * Because ktime_get_mono_fast_ns() is not monotonic during
92 * timekeeping updates, ensure that 'now' is after the last saved
93 * timestamp.
94 */
95 if (now < last)
96 return;
97
98 delta = now - last;
99
100 if (dev->power.runtime_status == RPM_SUSPENDED)
101 dev->power.suspended_time += delta;
102 else
103 dev->power.active_time += delta;
104 }
105
__update_runtime_status(struct device * dev,enum rpm_status status)106 static void __update_runtime_status(struct device *dev, enum rpm_status status)
107 {
108 update_pm_runtime_accounting(dev);
109 trace_rpm_status(dev, status);
110 dev->power.runtime_status = status;
111 }
112
rpm_get_accounted_time(struct device * dev,bool suspended)113 static u64 rpm_get_accounted_time(struct device *dev, bool suspended)
114 {
115 u64 time;
116 unsigned long flags;
117
118 spin_lock_irqsave(&dev->power.lock, flags);
119
120 update_pm_runtime_accounting(dev);
121 time = suspended ? dev->power.suspended_time : dev->power.active_time;
122
123 spin_unlock_irqrestore(&dev->power.lock, flags);
124
125 return time;
126 }
127
pm_runtime_active_time(struct device * dev)128 u64 pm_runtime_active_time(struct device *dev)
129 {
130 return rpm_get_accounted_time(dev, false);
131 }
132
pm_runtime_suspended_time(struct device * dev)133 u64 pm_runtime_suspended_time(struct device *dev)
134 {
135 return rpm_get_accounted_time(dev, true);
136 }
137 EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);
138
139 /**
140 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
141 * @dev: Device to handle.
142 */
pm_runtime_deactivate_timer(struct device * dev)143 static void pm_runtime_deactivate_timer(struct device *dev)
144 {
145 if (dev->power.timer_expires > 0) {
146 hrtimer_try_to_cancel(&dev->power.suspend_timer);
147 dev->power.timer_expires = 0;
148 }
149 }
150
151 /**
152 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
153 * @dev: Device to handle.
154 */
pm_runtime_cancel_pending(struct device * dev)155 static void pm_runtime_cancel_pending(struct device *dev)
156 {
157 pm_runtime_deactivate_timer(dev);
158 /*
159 * In case there's a request pending, make sure its work function will
160 * return without doing anything.
161 */
162 dev->power.request = RPM_REQ_NONE;
163 }
164
165 /*
166 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
167 * @dev: Device to handle.
168 *
169 * Compute the autosuspend-delay expiration time based on the device's
170 * power.last_busy time. If the delay has already expired or is disabled
171 * (negative) or the power.use_autosuspend flag isn't set, return 0.
172 * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero).
173 *
174 * This function may be called either with or without dev->power.lock held.
175 * Either way it can be racy, since power.last_busy may be updated at any time.
176 */
pm_runtime_autosuspend_expiration(struct device * dev)177 u64 pm_runtime_autosuspend_expiration(struct device *dev)
178 {
179 int autosuspend_delay;
180 u64 expires;
181
182 if (!dev->power.use_autosuspend)
183 return 0;
184
185 autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
186 if (autosuspend_delay < 0)
187 return 0;
188
189 expires = READ_ONCE(dev->power.last_busy);
190 expires += (u64)autosuspend_delay * NSEC_PER_MSEC;
191 if (expires > ktime_get_mono_fast_ns())
192 return expires; /* Expires in the future */
193
194 return 0;
195 }
196 EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
197
dev_memalloc_noio(struct device * dev,void * data)198 static int dev_memalloc_noio(struct device *dev, void *data)
199 {
200 return dev->power.memalloc_noio;
201 }
202
203 /*
204 * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
205 * @dev: Device to handle.
206 * @enable: True for setting the flag and False for clearing the flag.
207 *
208 * Set the flag for all devices in the path from the device to the
209 * root device in the device tree if @enable is true, otherwise clear
210 * the flag for devices in the path whose siblings don't set the flag.
211 *
212 * The function should only be called by block device, or network
213 * device driver for solving the deadlock problem during runtime
214 * resume/suspend:
215 *
216 * If memory allocation with GFP_KERNEL is called inside runtime
217 * resume/suspend callback of any one of its ancestors(or the
218 * block device itself), the deadlock may be triggered inside the
219 * memory allocation since it might not complete until the block
220 * device becomes active and the involved page I/O finishes. The
221 * situation is pointed out first by Alan Stern. Network device
222 * are involved in iSCSI kind of situation.
223 *
224 * The lock of dev_hotplug_mutex is held in the function for handling
225 * hotplug race because pm_runtime_set_memalloc_noio() may be called
226 * in async probe().
227 *
228 * The function should be called between device_add() and device_del()
229 * on the affected device(block/network device).
230 */
pm_runtime_set_memalloc_noio(struct device * dev,bool enable)231 void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
232 {
233 static DEFINE_MUTEX(dev_hotplug_mutex);
234
235 mutex_lock(&dev_hotplug_mutex);
236 for (;;) {
237 bool enabled;
238
239 /* hold power lock since bitfield is not SMP-safe. */
240 spin_lock_irq(&dev->power.lock);
241 enabled = dev->power.memalloc_noio;
242 dev->power.memalloc_noio = enable;
243 spin_unlock_irq(&dev->power.lock);
244
245 /*
246 * not need to enable ancestors any more if the device
247 * has been enabled.
248 */
249 if (enabled && enable)
250 break;
251
252 dev = dev->parent;
253
254 /*
255 * clear flag of the parent device only if all the
256 * children don't set the flag because ancestor's
257 * flag was set by any one of the descendants.
258 */
259 if (!dev || (!enable &&
260 device_for_each_child(dev, NULL, dev_memalloc_noio)))
261 break;
262 }
263 mutex_unlock(&dev_hotplug_mutex);
264 }
265 EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
266
267 /**
268 * rpm_check_suspend_allowed - Test whether a device may be suspended.
269 * @dev: Device to test.
270 */
rpm_check_suspend_allowed(struct device * dev)271 static int rpm_check_suspend_allowed(struct device *dev)
272 {
273 int retval = 0;
274
275 if (dev->power.runtime_error)
276 retval = -EINVAL;
277 else if (dev->power.disable_depth > 0)
278 retval = -EACCES;
279 else if (atomic_read(&dev->power.usage_count))
280 retval = -EAGAIN;
281 else if (!dev->power.ignore_children && atomic_read(&dev->power.child_count))
282 retval = -EBUSY;
283
284 /* Pending resume requests take precedence over suspends. */
285 else if ((dev->power.deferred_resume &&
286 dev->power.runtime_status == RPM_SUSPENDING) ||
287 (dev->power.request_pending && dev->power.request == RPM_REQ_RESUME))
288 retval = -EAGAIN;
289 else if (__dev_pm_qos_resume_latency(dev) == 0)
290 retval = -EPERM;
291 else if (dev->power.runtime_status == RPM_SUSPENDED)
292 retval = 1;
293
294 return retval;
295 }
296
rpm_get_suppliers(struct device * dev)297 static int rpm_get_suppliers(struct device *dev)
298 {
299 struct device_link *link;
300
301 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
302 device_links_read_lock_held()) {
303 int retval;
304
305 if (!device_link_test(link, DL_FLAG_PM_RUNTIME))
306 continue;
307
308 retval = pm_runtime_get_sync(link->supplier);
309 /* Ignore suppliers with disabled runtime PM. */
310 if (retval < 0 && retval != -EACCES) {
311 pm_runtime_put_noidle(link->supplier);
312 return retval;
313 }
314 refcount_inc(&link->rpm_active);
315 }
316 return 0;
317 }
318
319 /**
320 * pm_runtime_release_supplier - Drop references to device link's supplier.
321 * @link: Target device link.
322 *
323 * Drop all runtime PM references associated with @link to its supplier device.
324 */
pm_runtime_release_supplier(struct device_link * link)325 void pm_runtime_release_supplier(struct device_link *link)
326 {
327 struct device *supplier = link->supplier;
328
329 /*
330 * The additional power.usage_count check is a safety net in case
331 * the rpm_active refcount becomes saturated, in which case
332 * refcount_dec_not_one() would return true forever, but it is not
333 * strictly necessary.
334 */
335 while (refcount_dec_not_one(&link->rpm_active) &&
336 atomic_read(&supplier->power.usage_count) > 0)
337 pm_runtime_put_noidle(supplier);
338 }
339
__rpm_put_suppliers(struct device * dev,bool try_to_suspend)340 static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)
341 {
342 struct device_link *link;
343
344 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
345 device_links_read_lock_held()) {
346 pm_runtime_release_supplier(link);
347 if (try_to_suspend)
348 pm_request_idle(link->supplier);
349 }
350 }
351
rpm_put_suppliers(struct device * dev)352 static void rpm_put_suppliers(struct device *dev)
353 {
354 __rpm_put_suppliers(dev, true);
355 }
356
rpm_suspend_suppliers(struct device * dev)357 static void rpm_suspend_suppliers(struct device *dev)
358 {
359 struct device_link *link;
360 int idx = device_links_read_lock();
361
362 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
363 device_links_read_lock_held())
364 pm_request_idle(link->supplier);
365
366 device_links_read_unlock(idx);
367 }
368
369 /**
370 * __rpm_callback - Run a given runtime PM callback for a given device.
371 * @cb: Runtime PM callback to run.
372 * @dev: Device to run the callback for.
373 */
__rpm_callback(int (* cb)(struct device *),struct device * dev)374 static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
375 __releases(&dev->power.lock) __acquires(&dev->power.lock)
376 {
377 int retval = 0, idx;
378 bool use_links = dev->power.links_count > 0;
379
380 if (dev->power.irq_safe) {
381 spin_unlock(&dev->power.lock);
382 } else {
383 spin_unlock_irq(&dev->power.lock);
384
385 /*
386 * Resume suppliers if necessary.
387 *
388 * The device's runtime PM status cannot change until this
389 * routine returns, so it is safe to read the status outside of
390 * the lock.
391 */
392 if (use_links && dev->power.runtime_status == RPM_RESUMING) {
393 idx = device_links_read_lock();
394
395 retval = rpm_get_suppliers(dev);
396 if (retval) {
397 rpm_put_suppliers(dev);
398 goto fail;
399 }
400
401 device_links_read_unlock(idx);
402 }
403 }
404
405 if (cb)
406 retval = cb(dev);
407
408 if (dev->power.irq_safe) {
409 spin_lock(&dev->power.lock);
410 } else {
411 /*
412 * If the device is suspending and the callback has returned
413 * success, drop the usage counters of the suppliers that have
414 * been reference counted on its resume.
415 *
416 * Do that if resume fails too.
417 */
418 if (use_links &&
419 ((dev->power.runtime_status == RPM_SUSPENDING && !retval) ||
420 (dev->power.runtime_status == RPM_RESUMING && retval))) {
421 idx = device_links_read_lock();
422
423 __rpm_put_suppliers(dev, false);
424
425 fail:
426 device_links_read_unlock(idx);
427 }
428
429 spin_lock_irq(&dev->power.lock);
430 }
431
432 return retval;
433 }
434
435 /**
436 * rpm_callback - Run a given runtime PM callback for a given device.
437 * @cb: Runtime PM callback to run.
438 * @dev: Device to run the callback for.
439 */
rpm_callback(int (* cb)(struct device *),struct device * dev)440 static int rpm_callback(int (*cb)(struct device *), struct device *dev)
441 {
442 int retval;
443
444 if (dev->power.memalloc_noio) {
445 unsigned int noio_flag;
446
447 /*
448 * Deadlock might be caused if memory allocation with
449 * GFP_KERNEL happens inside runtime_suspend and
450 * runtime_resume callbacks of one block device's
451 * ancestor or the block device itself. Network
452 * device might be thought as part of iSCSI block
453 * device, so network device and its ancestor should
454 * be marked as memalloc_noio too.
455 */
456 noio_flag = memalloc_noio_save();
457 retval = __rpm_callback(cb, dev);
458 memalloc_noio_restore(noio_flag);
459 } else {
460 retval = __rpm_callback(cb, dev);
461 }
462
463 /*
464 * Since -EACCES means that runtime PM is disabled for the given device,
465 * it should not be returned by runtime PM callbacks. If it is returned
466 * nevertheless, assume it to be a transient error and convert it to
467 * -EAGAIN.
468 */
469 if (retval == -EACCES)
470 retval = -EAGAIN;
471
472 if (retval != -EAGAIN && retval != -EBUSY)
473 dev->power.runtime_error = retval;
474
475 return retval;
476 }
477
478 /**
479 * rpm_idle - Notify device bus type if the device can be suspended.
480 * @dev: Device to notify the bus type about.
481 * @rpmflags: Flag bits.
482 *
483 * Check if the device's runtime PM status allows it to be suspended. If
484 * another idle notification has been started earlier, return immediately. If
485 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
486 * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
487 * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
488 *
489 * This function must be called under dev->power.lock with interrupts disabled.
490 */
rpm_idle(struct device * dev,int rpmflags)491 static int rpm_idle(struct device *dev, int rpmflags)
492 {
493 int (*callback)(struct device *);
494 int retval;
495
496 trace_rpm_idle(dev, rpmflags);
497 retval = rpm_check_suspend_allowed(dev);
498 if (retval < 0)
499 ; /* Conditions are wrong. */
500
501 else if ((rpmflags & RPM_GET_PUT) && retval == 1)
502 ; /* put() is allowed in RPM_SUSPENDED */
503
504 /* Idle notifications are allowed only in the RPM_ACTIVE state. */
505 else if (dev->power.runtime_status != RPM_ACTIVE)
506 retval = -EAGAIN;
507
508 /*
509 * Any pending request other than an idle notification takes
510 * precedence over us, except that the timer may be running.
511 */
512 else if (dev->power.request_pending &&
513 dev->power.request > RPM_REQ_IDLE)
514 retval = -EAGAIN;
515
516 /* Act as though RPM_NOWAIT is always set. */
517 else if (dev->power.idle_notification)
518 retval = -EINPROGRESS;
519
520 if (retval)
521 goto out;
522
523 /* Pending requests need to be canceled. */
524 dev->power.request = RPM_REQ_NONE;
525
526 callback = RPM_GET_CALLBACK(dev, runtime_idle);
527
528 /* If no callback assume success. */
529 if (!callback || dev->power.no_callbacks)
530 goto out;
531
532 /* Carry out an asynchronous or a synchronous idle notification. */
533 if (rpmflags & RPM_ASYNC) {
534 dev->power.request = RPM_REQ_IDLE;
535 if (!dev->power.request_pending) {
536 dev->power.request_pending = true;
537 queue_work(pm_wq, &dev->power.work);
538 }
539 trace_rpm_return_int(dev, _THIS_IP_, 0);
540 return 0;
541 }
542
543 dev->power.idle_notification = true;
544
545 if (dev->power.irq_safe)
546 spin_unlock(&dev->power.lock);
547 else
548 spin_unlock_irq(&dev->power.lock);
549
550 retval = callback(dev);
551
552 if (dev->power.irq_safe)
553 spin_lock(&dev->power.lock);
554 else
555 spin_lock_irq(&dev->power.lock);
556
557 dev->power.idle_notification = false;
558 wake_up_all(&dev->power.wait_queue);
559
560 out:
561 trace_rpm_return_int(dev, _THIS_IP_, retval);
562 return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
563 }
564
565 /**
566 * rpm_suspend - Carry out runtime suspend of given device.
567 * @dev: Device to suspend.
568 * @rpmflags: Flag bits.
569 *
570 * Check if the device's runtime PM status allows it to be suspended.
571 * Cancel a pending idle notification, autosuspend or suspend. If
572 * another suspend has been started earlier, either return immediately
573 * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
574 * flags. If the RPM_ASYNC flag is set then queue a suspend request;
575 * otherwise run the ->runtime_suspend() callback directly. When
576 * ->runtime_suspend succeeded, if a deferred resume was requested while
577 * the callback was running then carry it out, otherwise send an idle
578 * notification for its parent (if the suspend succeeded and both
579 * ignore_children of parent->power and irq_safe of dev->power are not set).
580 * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
581 * flag is set and the next autosuspend-delay expiration time is in the
582 * future, schedule another autosuspend attempt.
583 *
584 * This function must be called under dev->power.lock with interrupts disabled.
585 */
rpm_suspend(struct device * dev,int rpmflags)586 static int rpm_suspend(struct device *dev, int rpmflags)
587 __releases(&dev->power.lock) __acquires(&dev->power.lock)
588 {
589 int (*callback)(struct device *);
590 struct device *parent = NULL;
591 int retval;
592
593 trace_rpm_suspend(dev, rpmflags);
594
595 repeat:
596 retval = rpm_check_suspend_allowed(dev);
597 if (retval < 0)
598 goto out; /* Conditions are wrong. */
599
600 /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
601 if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC))
602 retval = -EAGAIN;
603
604 if (retval)
605 goto out;
606
607 /* If the autosuspend_delay time hasn't expired yet, reschedule. */
608 if ((rpmflags & RPM_AUTO) && dev->power.runtime_status != RPM_SUSPENDING) {
609 u64 expires = pm_runtime_autosuspend_expiration(dev);
610
611 if (expires != 0) {
612 /* Pending requests need to be canceled. */
613 dev->power.request = RPM_REQ_NONE;
614
615 /*
616 * Optimization: If the timer is already running and is
617 * set to expire at or before the autosuspend delay,
618 * avoid the overhead of resetting it. Just let it
619 * expire; pm_suspend_timer_fn() will take care of the
620 * rest.
621 */
622 if (!(dev->power.timer_expires &&
623 dev->power.timer_expires <= expires)) {
624 /*
625 * We add a slack of 25% to gather wakeups
626 * without sacrificing the granularity.
627 */
628 u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
629 (NSEC_PER_MSEC >> 2);
630
631 dev->power.timer_expires = expires;
632 hrtimer_start_range_ns(&dev->power.suspend_timer,
633 ns_to_ktime(expires),
634 slack,
635 HRTIMER_MODE_ABS);
636 }
637 dev->power.timer_autosuspends = 1;
638 goto out;
639 }
640 }
641
642 /* Other scheduled or pending requests need to be canceled. */
643 pm_runtime_cancel_pending(dev);
644
645 if (dev->power.runtime_status == RPM_SUSPENDING) {
646 DEFINE_WAIT(wait);
647
648 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
649 retval = -EINPROGRESS;
650 goto out;
651 }
652
653 if (dev->power.irq_safe) {
654 spin_unlock(&dev->power.lock);
655
656 cpu_relax();
657
658 spin_lock(&dev->power.lock);
659 goto repeat;
660 }
661
662 /* Wait for the other suspend running in parallel with us. */
663 for (;;) {
664 prepare_to_wait(&dev->power.wait_queue, &wait,
665 TASK_UNINTERRUPTIBLE);
666 if (dev->power.runtime_status != RPM_SUSPENDING)
667 break;
668
669 spin_unlock_irq(&dev->power.lock);
670
671 schedule();
672
673 spin_lock_irq(&dev->power.lock);
674 }
675 finish_wait(&dev->power.wait_queue, &wait);
676 goto repeat;
677 }
678
679 if (dev->power.no_callbacks)
680 goto no_callback; /* Assume success. */
681
682 /* Carry out an asynchronous or a synchronous suspend. */
683 if (rpmflags & RPM_ASYNC) {
684 dev->power.request = (rpmflags & RPM_AUTO) ?
685 RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
686 if (!dev->power.request_pending) {
687 dev->power.request_pending = true;
688 queue_work(pm_wq, &dev->power.work);
689 }
690 goto out;
691 }
692
693 __update_runtime_status(dev, RPM_SUSPENDING);
694
695 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
696
697 dev_pm_enable_wake_irq_check(dev, true);
698 retval = rpm_callback(callback, dev);
699 if (retval)
700 goto fail;
701
702 dev_pm_enable_wake_irq_complete(dev);
703
704 no_callback:
705 __update_runtime_status(dev, RPM_SUSPENDED);
706 pm_runtime_deactivate_timer(dev);
707
708 if (dev->parent) {
709 parent = dev->parent;
710 atomic_add_unless(&parent->power.child_count, -1, 0);
711 }
712 wake_up_all(&dev->power.wait_queue);
713
714 if (dev->power.deferred_resume) {
715 dev->power.deferred_resume = false;
716 rpm_resume(dev, 0);
717 retval = -EAGAIN;
718 goto out;
719 }
720
721 if (dev->power.irq_safe)
722 goto out;
723
724 /* Maybe the parent is now able to suspend. */
725 if (parent && !parent->power.ignore_children) {
726 spin_unlock(&dev->power.lock);
727
728 spin_lock(&parent->power.lock);
729 rpm_idle(parent, RPM_ASYNC);
730 spin_unlock(&parent->power.lock);
731
732 spin_lock(&dev->power.lock);
733 }
734 /* Maybe the suppliers are now able to suspend. */
735 if (dev->power.links_count > 0) {
736 spin_unlock_irq(&dev->power.lock);
737
738 rpm_suspend_suppliers(dev);
739
740 spin_lock_irq(&dev->power.lock);
741 }
742
743 out:
744 trace_rpm_return_int(dev, _THIS_IP_, retval);
745
746 return retval;
747
748 fail:
749 dev_pm_disable_wake_irq_check(dev, true);
750 __update_runtime_status(dev, RPM_ACTIVE);
751 dev->power.deferred_resume = false;
752 wake_up_all(&dev->power.wait_queue);
753
754 /*
755 * On transient errors, if the callback routine failed an autosuspend,
756 * and if the last_busy time has been updated so that there is a new
757 * autosuspend expiration time, automatically reschedule another
758 * autosuspend.
759 */
760 if (!dev->power.runtime_error && (rpmflags & RPM_AUTO) &&
761 pm_runtime_autosuspend_expiration(dev) != 0)
762 goto repeat;
763
764 pm_runtime_cancel_pending(dev);
765
766 goto out;
767 }
768
769 /**
770 * rpm_resume - Carry out runtime resume of given device.
771 * @dev: Device to resume.
772 * @rpmflags: Flag bits.
773 *
774 * Check if the device's runtime PM status allows it to be resumed. Cancel
775 * any scheduled or pending requests. If another resume has been started
776 * earlier, either return immediately or wait for it to finish, depending on the
777 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
778 * parallel with this function, either tell the other process to resume after
779 * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
780 * flag is set then queue a resume request; otherwise run the
781 * ->runtime_resume() callback directly. Queue an idle notification for the
782 * device if the resume succeeded.
783 *
784 * This function must be called under dev->power.lock with interrupts disabled.
785 */
rpm_resume(struct device * dev,int rpmflags)786 static int rpm_resume(struct device *dev, int rpmflags)
787 __releases(&dev->power.lock) __acquires(&dev->power.lock)
788 {
789 int (*callback)(struct device *);
790 struct device *parent = NULL;
791 int retval = 0;
792
793 trace_rpm_resume(dev, rpmflags);
794
795 repeat:
796 if (dev->power.runtime_error) {
797 retval = -EINVAL;
798 } else if (dev->power.disable_depth > 0) {
799 if (dev->power.runtime_status == RPM_ACTIVE &&
800 dev->power.last_status == RPM_ACTIVE)
801 retval = 1;
802 else if (rpmflags & RPM_TRANSPARENT)
803 goto out;
804 else
805 retval = -EACCES;
806 }
807 if (retval)
808 goto out;
809
810 /*
811 * Other scheduled or pending requests need to be canceled. Small
812 * optimization: If an autosuspend timer is running, leave it running
813 * rather than cancelling it now only to restart it again in the near
814 * future.
815 */
816 dev->power.request = RPM_REQ_NONE;
817 if (!dev->power.timer_autosuspends)
818 pm_runtime_deactivate_timer(dev);
819
820 if (dev->power.runtime_status == RPM_ACTIVE) {
821 retval = 1;
822 goto out;
823 }
824
825 if (dev->power.runtime_status == RPM_RESUMING ||
826 dev->power.runtime_status == RPM_SUSPENDING) {
827 DEFINE_WAIT(wait);
828
829 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
830 if (dev->power.runtime_status == RPM_SUSPENDING) {
831 dev->power.deferred_resume = true;
832 if (rpmflags & RPM_NOWAIT)
833 retval = -EINPROGRESS;
834 } else {
835 retval = -EINPROGRESS;
836 }
837 goto out;
838 }
839
840 if (dev->power.irq_safe) {
841 spin_unlock(&dev->power.lock);
842
843 cpu_relax();
844
845 spin_lock(&dev->power.lock);
846 goto repeat;
847 }
848
849 /* Wait for the operation carried out in parallel with us. */
850 for (;;) {
851 prepare_to_wait(&dev->power.wait_queue, &wait,
852 TASK_UNINTERRUPTIBLE);
853 if (dev->power.runtime_status != RPM_RESUMING &&
854 dev->power.runtime_status != RPM_SUSPENDING)
855 break;
856
857 spin_unlock_irq(&dev->power.lock);
858
859 schedule();
860
861 spin_lock_irq(&dev->power.lock);
862 }
863 finish_wait(&dev->power.wait_queue, &wait);
864 goto repeat;
865 }
866
867 /*
868 * See if we can skip waking up the parent. This is safe only if
869 * power.no_callbacks is set, because otherwise we don't know whether
870 * the resume will actually succeed.
871 */
872 if (dev->power.no_callbacks && !parent && dev->parent) {
873 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
874 if (dev->parent->power.disable_depth > 0 ||
875 dev->parent->power.ignore_children ||
876 dev->parent->power.runtime_status == RPM_ACTIVE) {
877 atomic_inc(&dev->parent->power.child_count);
878 spin_unlock(&dev->parent->power.lock);
879 retval = 1;
880 goto no_callback; /* Assume success. */
881 }
882 spin_unlock(&dev->parent->power.lock);
883 }
884
885 /* Carry out an asynchronous or a synchronous resume. */
886 if (rpmflags & RPM_ASYNC) {
887 dev->power.request = RPM_REQ_RESUME;
888 if (!dev->power.request_pending) {
889 dev->power.request_pending = true;
890 queue_work(pm_wq, &dev->power.work);
891 }
892 retval = 0;
893 goto out;
894 }
895
896 if (!parent && dev->parent) {
897 /*
898 * Increment the parent's usage counter and resume it if
899 * necessary. Not needed if dev is irq-safe; then the
900 * parent is permanently resumed.
901 */
902 parent = dev->parent;
903 if (dev->power.irq_safe)
904 goto skip_parent;
905
906 spin_unlock(&dev->power.lock);
907
908 pm_runtime_get_noresume(parent);
909
910 spin_lock(&parent->power.lock);
911 /*
912 * Resume the parent if it has runtime PM enabled and not been
913 * set to ignore its children.
914 */
915 if (!parent->power.disable_depth &&
916 !parent->power.ignore_children) {
917 rpm_resume(parent, 0);
918 if (parent->power.runtime_status != RPM_ACTIVE)
919 retval = -EBUSY;
920 }
921 spin_unlock(&parent->power.lock);
922
923 spin_lock(&dev->power.lock);
924 if (retval)
925 goto out;
926
927 goto repeat;
928 }
929 skip_parent:
930
931 if (dev->power.no_callbacks)
932 goto no_callback; /* Assume success. */
933
934 __update_runtime_status(dev, RPM_RESUMING);
935
936 callback = RPM_GET_CALLBACK(dev, runtime_resume);
937
938 dev_pm_disable_wake_irq_check(dev, false);
939 retval = rpm_callback(callback, dev);
940 if (retval) {
941 __update_runtime_status(dev, RPM_SUSPENDED);
942 pm_runtime_cancel_pending(dev);
943 dev_pm_enable_wake_irq_check(dev, false);
944 } else {
945 no_callback:
946 __update_runtime_status(dev, RPM_ACTIVE);
947 pm_runtime_mark_last_busy(dev);
948 if (parent)
949 atomic_inc(&parent->power.child_count);
950 }
951 wake_up_all(&dev->power.wait_queue);
952
953 if (retval >= 0)
954 rpm_idle(dev, RPM_ASYNC);
955
956 out:
957 if (parent && !dev->power.irq_safe) {
958 spin_unlock_irq(&dev->power.lock);
959
960 pm_runtime_put(parent);
961
962 spin_lock_irq(&dev->power.lock);
963 }
964
965 trace_rpm_return_int(dev, _THIS_IP_, retval);
966
967 return retval;
968 }
969
970 /**
971 * pm_runtime_work - Universal runtime PM work function.
972 * @work: Work structure used for scheduling the execution of this function.
973 *
974 * Use @work to get the device object the work is to be done for, determine what
975 * is to be done and execute the appropriate runtime PM function.
976 */
pm_runtime_work(struct work_struct * work)977 static void pm_runtime_work(struct work_struct *work)
978 {
979 struct device *dev = container_of(work, struct device, power.work);
980 enum rpm_request req;
981
982 spin_lock_irq(&dev->power.lock);
983
984 if (!dev->power.request_pending)
985 goto out;
986
987 req = dev->power.request;
988 dev->power.request = RPM_REQ_NONE;
989 dev->power.request_pending = false;
990
991 switch (req) {
992 case RPM_REQ_NONE:
993 break;
994 case RPM_REQ_IDLE:
995 rpm_idle(dev, RPM_NOWAIT);
996 break;
997 case RPM_REQ_SUSPEND:
998 rpm_suspend(dev, RPM_NOWAIT);
999 break;
1000 case RPM_REQ_AUTOSUSPEND:
1001 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
1002 break;
1003 case RPM_REQ_RESUME:
1004 rpm_resume(dev, RPM_NOWAIT);
1005 break;
1006 }
1007
1008 out:
1009 spin_unlock_irq(&dev->power.lock);
1010 }
1011
1012 /**
1013 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
1014 * @timer: hrtimer used by pm_schedule_suspend().
1015 *
1016 * Check if the time is right and queue a suspend request.
1017 */
pm_suspend_timer_fn(struct hrtimer * timer)1018 static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
1019 {
1020 struct device *dev = container_of(timer, struct device, power.suspend_timer);
1021 unsigned long flags;
1022 u64 expires;
1023
1024 spin_lock_irqsave(&dev->power.lock, flags);
1025
1026 expires = dev->power.timer_expires;
1027 /*
1028 * If 'expires' is after the current time, we've been called
1029 * too early.
1030 */
1031 if (expires > 0 && expires <= ktime_get_mono_fast_ns()) {
1032 dev->power.timer_expires = 0;
1033 rpm_suspend(dev, dev->power.timer_autosuspends ?
1034 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
1035 }
1036
1037 spin_unlock_irqrestore(&dev->power.lock, flags);
1038
1039 return HRTIMER_NORESTART;
1040 }
1041
1042 /**
1043 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
1044 * @dev: Device to suspend.
1045 * @delay: Time to wait before submitting a suspend request, in milliseconds.
1046 */
pm_schedule_suspend(struct device * dev,unsigned int delay)1047 int pm_schedule_suspend(struct device *dev, unsigned int delay)
1048 {
1049 unsigned long flags;
1050 u64 expires;
1051 int retval;
1052
1053 spin_lock_irqsave(&dev->power.lock, flags);
1054
1055 if (!delay) {
1056 retval = rpm_suspend(dev, RPM_ASYNC);
1057 goto out;
1058 }
1059
1060 retval = rpm_check_suspend_allowed(dev);
1061 if (retval)
1062 goto out;
1063
1064 /* Other scheduled or pending requests need to be canceled. */
1065 pm_runtime_cancel_pending(dev);
1066
1067 expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;
1068 dev->power.timer_expires = expires;
1069 dev->power.timer_autosuspends = 0;
1070 hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
1071
1072 out:
1073 spin_unlock_irqrestore(&dev->power.lock, flags);
1074
1075 return retval;
1076 }
1077 EXPORT_SYMBOL_GPL(pm_schedule_suspend);
1078
rpm_drop_usage_count(struct device * dev)1079 static int rpm_drop_usage_count(struct device *dev)
1080 {
1081 int ret;
1082
1083 ret = atomic_sub_return(1, &dev->power.usage_count);
1084 if (ret >= 0)
1085 return ret;
1086
1087 /*
1088 * Because rpm_resume() does not check the usage counter, it will resume
1089 * the device even if the usage counter is 0 or negative, so it is
1090 * sufficient to increment the usage counter here to reverse the change
1091 * made above.
1092 */
1093 atomic_inc(&dev->power.usage_count);
1094 dev_warn(dev, "Runtime PM usage count underflow!\n");
1095 return -EINVAL;
1096 }
1097
1098 /**
1099 * __pm_runtime_idle - Entry point for runtime idle operations.
1100 * @dev: Device to send idle notification for.
1101 * @rpmflags: Flag bits.
1102 *
1103 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
1104 * return immediately if it is larger than zero (if it becomes negative, log a
1105 * warning, increment it, and return an error). Then carry out an idle
1106 * notification, either synchronous or asynchronous.
1107 *
1108 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1109 * or if pm_runtime_irq_safe() has been called.
1110 */
__pm_runtime_idle(struct device * dev,int rpmflags)1111 int __pm_runtime_idle(struct device *dev, int rpmflags)
1112 {
1113 unsigned long flags;
1114 int retval;
1115
1116 if (rpmflags & RPM_GET_PUT) {
1117 retval = rpm_drop_usage_count(dev);
1118 if (retval < 0) {
1119 return retval;
1120 } else if (retval > 0) {
1121 trace_rpm_usage(dev, rpmflags);
1122 return 0;
1123 }
1124 }
1125
1126 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1127
1128 spin_lock_irqsave(&dev->power.lock, flags);
1129 retval = rpm_idle(dev, rpmflags);
1130 spin_unlock_irqrestore(&dev->power.lock, flags);
1131
1132 return retval;
1133 }
1134 EXPORT_SYMBOL_GPL(__pm_runtime_idle);
1135
1136 /**
1137 * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
1138 * @dev: Device to suspend.
1139 * @rpmflags: Flag bits.
1140 *
1141 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
1142 * return immediately if it is larger than zero (if it becomes negative, log a
1143 * warning, increment it, and return an error). Then carry out a suspend,
1144 * either synchronous or asynchronous.
1145 *
1146 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1147 * or if pm_runtime_irq_safe() has been called.
1148 */
__pm_runtime_suspend(struct device * dev,int rpmflags)1149 int __pm_runtime_suspend(struct device *dev, int rpmflags)
1150 {
1151 unsigned long flags;
1152 int retval;
1153
1154 if (rpmflags & RPM_GET_PUT) {
1155 retval = rpm_drop_usage_count(dev);
1156 if (retval < 0) {
1157 return retval;
1158 } else if (retval > 0) {
1159 trace_rpm_usage(dev, rpmflags);
1160 return 0;
1161 }
1162 }
1163
1164 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1165
1166 spin_lock_irqsave(&dev->power.lock, flags);
1167 retval = rpm_suspend(dev, rpmflags);
1168 spin_unlock_irqrestore(&dev->power.lock, flags);
1169
1170 return retval;
1171 }
1172 EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
1173
1174 /**
1175 * __pm_runtime_resume - Entry point for runtime resume operations.
1176 * @dev: Device to resume.
1177 * @rpmflags: Flag bits.
1178 *
1179 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then
1180 * carry out a resume, either synchronous or asynchronous.
1181 *
1182 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
1183 * or if pm_runtime_irq_safe() has been called.
1184 */
__pm_runtime_resume(struct device * dev,int rpmflags)1185 int __pm_runtime_resume(struct device *dev, int rpmflags)
1186 {
1187 unsigned long flags;
1188 int retval;
1189
1190 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
1191 dev->power.runtime_status != RPM_ACTIVE);
1192
1193 if (rpmflags & RPM_GET_PUT)
1194 atomic_inc(&dev->power.usage_count);
1195
1196 spin_lock_irqsave(&dev->power.lock, flags);
1197 retval = rpm_resume(dev, rpmflags);
1198 spin_unlock_irqrestore(&dev->power.lock, flags);
1199
1200 return retval;
1201 }
1202 EXPORT_SYMBOL_GPL(__pm_runtime_resume);
1203
1204 /**
1205 * pm_runtime_get_conditional - Conditionally bump up device usage counter.
1206 * @dev: Device to handle.
1207 * @ign_usage_count: Whether or not to look at the current usage counter value.
1208 *
1209 * Return -EINVAL if runtime PM is disabled for @dev.
1210 *
1211 * Otherwise, if its runtime PM status is %RPM_ACTIVE and (1) @ign_usage_count
1212 * is set, or (2) @dev is not ignoring children and its active child count is
1213 * nonzero, or (3) the runtime PM usage counter of @dev is not zero, increment
1214 * the usage counter of @dev and return 1.
1215 *
1216 * Otherwise, return 0 without changing the usage counter.
1217 *
1218 * If @ign_usage_count is %true, this function can be used to prevent suspending
1219 * the device when its runtime PM status is %RPM_ACTIVE.
1220 *
1221 * If @ign_usage_count is %false, this function can be used to prevent
1222 * suspending the device when both its runtime PM status is %RPM_ACTIVE and its
1223 * runtime PM usage counter is not zero.
1224 *
1225 * The caller is responsible for decrementing the runtime PM usage counter of
1226 * @dev after this function has returned a positive value for it.
1227 */
pm_runtime_get_conditional(struct device * dev,bool ign_usage_count)1228 static int pm_runtime_get_conditional(struct device *dev, bool ign_usage_count)
1229 {
1230 unsigned long flags;
1231 int retval;
1232
1233 spin_lock_irqsave(&dev->power.lock, flags);
1234 if (dev->power.disable_depth > 0) {
1235 retval = -EINVAL;
1236 } else if (dev->power.runtime_status != RPM_ACTIVE) {
1237 retval = 0;
1238 } else if (ign_usage_count || (!dev->power.ignore_children &&
1239 atomic_read(&dev->power.child_count) > 0)) {
1240 retval = 1;
1241 atomic_inc(&dev->power.usage_count);
1242 } else {
1243 retval = atomic_inc_not_zero(&dev->power.usage_count);
1244 }
1245 trace_rpm_usage(dev, 0);
1246 spin_unlock_irqrestore(&dev->power.lock, flags);
1247
1248 return retval;
1249 }
1250
1251 /**
1252 * pm_runtime_get_if_active - Bump up runtime PM usage counter if the device is
1253 * in active state
1254 * @dev: Target device.
1255 *
1256 * Increment the runtime PM usage counter of @dev if its runtime PM status is
1257 * %RPM_ACTIVE, in which case it returns 1. If the device is in a different
1258 * state, 0 is returned. -EINVAL is returned if runtime PM is disabled for the
1259 * device, in which case also the usage_count will remain unmodified.
1260 */
pm_runtime_get_if_active(struct device * dev)1261 int pm_runtime_get_if_active(struct device *dev)
1262 {
1263 return pm_runtime_get_conditional(dev, true);
1264 }
1265 EXPORT_SYMBOL_GPL(pm_runtime_get_if_active);
1266
1267 /**
1268 * pm_runtime_get_if_in_use - Conditionally bump up runtime PM usage counter.
1269 * @dev: Target device.
1270 *
1271 * Increment the runtime PM usage counter of @dev if its runtime PM status is
1272 * %RPM_ACTIVE and its runtime PM usage counter is greater than 0 or it is not
1273 * ignoring children and its active child count is nonzero. 1 is returned in
1274 * this case.
1275 *
1276 * If @dev is in a different state or it is not in use (that is, its usage
1277 * counter is 0, or it is ignoring children, or its active child count is 0),
1278 * 0 is returned.
1279 *
1280 * -EINVAL is returned if runtime PM is disabled for the device, in which case
1281 * also the usage counter of @dev is not updated.
1282 */
pm_runtime_get_if_in_use(struct device * dev)1283 int pm_runtime_get_if_in_use(struct device *dev)
1284 {
1285 return pm_runtime_get_conditional(dev, false);
1286 }
1287 EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);
1288
1289 /**
1290 * __pm_runtime_set_status - Set runtime PM status of a device.
1291 * @dev: Device to handle.
1292 * @status: New runtime PM status of the device.
1293 *
1294 * If runtime PM of the device is disabled or its power.runtime_error field is
1295 * different from zero, the status may be changed either to RPM_ACTIVE, or to
1296 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
1297 * However, if the device has a parent and the parent is not active, and the
1298 * parent's power.ignore_children flag is unset, the device's status cannot be
1299 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
1300 *
1301 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
1302 * and the device parent's counter of unsuspended children is modified to
1303 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
1304 * notification request for the parent is submitted.
1305 *
1306 * If @dev has any suppliers (as reflected by device links to them), and @status
1307 * is RPM_ACTIVE, they will be activated upfront and if the activation of one
1308 * of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead
1309 * of the @status value) and the suppliers will be deacticated on exit. The
1310 * error returned by the failing supplier activation will be returned in that
1311 * case.
1312 */
__pm_runtime_set_status(struct device * dev,unsigned int status)1313 int __pm_runtime_set_status(struct device *dev, unsigned int status)
1314 {
1315 struct device *parent = dev->parent;
1316 bool notify_parent = false;
1317 unsigned long flags;
1318 int error = 0;
1319
1320 if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
1321 return -EINVAL;
1322
1323 spin_lock_irqsave(&dev->power.lock, flags);
1324
1325 /*
1326 * Prevent PM-runtime from being enabled for the device or return an
1327 * error if it is enabled already and working.
1328 */
1329 if (dev->power.runtime_error || dev->power.disable_depth)
1330 dev->power.disable_depth++;
1331 else
1332 error = -EAGAIN;
1333
1334 spin_unlock_irqrestore(&dev->power.lock, flags);
1335
1336 if (error)
1337 return error;
1338
1339 /*
1340 * If the new status is RPM_ACTIVE, the suppliers can be activated
1341 * upfront regardless of the current status, because next time
1342 * rpm_put_suppliers() runs, the rpm_active refcounts of the links
1343 * involved will be dropped down to one anyway.
1344 */
1345 if (status == RPM_ACTIVE) {
1346 int idx = device_links_read_lock();
1347
1348 error = rpm_get_suppliers(dev);
1349 if (error)
1350 status = RPM_SUSPENDED;
1351
1352 device_links_read_unlock(idx);
1353 }
1354
1355 spin_lock_irqsave(&dev->power.lock, flags);
1356
1357 if (dev->power.runtime_status == status || !parent)
1358 goto out_set;
1359
1360 if (status == RPM_SUSPENDED) {
1361 atomic_add_unless(&parent->power.child_count, -1, 0);
1362 notify_parent = !parent->power.ignore_children;
1363 } else {
1364 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
1365
1366 /*
1367 * It is invalid to put an active child under a parent that is
1368 * not active, has runtime PM enabled and the
1369 * 'power.ignore_children' flag unset.
1370 */
1371 if (!parent->power.disable_depth &&
1372 !parent->power.ignore_children &&
1373 parent->power.runtime_status != RPM_ACTIVE) {
1374 dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
1375 dev_name(dev),
1376 dev_name(parent));
1377 error = -EBUSY;
1378 } else if (dev->power.runtime_status == RPM_SUSPENDED) {
1379 atomic_inc(&parent->power.child_count);
1380 }
1381
1382 spin_unlock(&parent->power.lock);
1383
1384 if (error) {
1385 status = RPM_SUSPENDED;
1386 goto out;
1387 }
1388 }
1389
1390 out_set:
1391 __update_runtime_status(dev, status);
1392 if (!error)
1393 dev->power.runtime_error = 0;
1394
1395 out:
1396 spin_unlock_irqrestore(&dev->power.lock, flags);
1397
1398 if (notify_parent)
1399 pm_request_idle(parent);
1400
1401 if (status == RPM_SUSPENDED) {
1402 int idx = device_links_read_lock();
1403
1404 rpm_put_suppliers(dev);
1405
1406 device_links_read_unlock(idx);
1407 }
1408
1409 pm_runtime_enable(dev);
1410
1411 return error;
1412 }
1413 EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
1414
1415 /**
1416 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
1417 * @dev: Device to handle.
1418 *
1419 * Flush all pending requests for the device from pm_wq and wait for all
1420 * runtime PM operations involving the device in progress to complete.
1421 *
1422 * Should be called under dev->power.lock with interrupts disabled.
1423 */
__pm_runtime_barrier(struct device * dev)1424 static void __pm_runtime_barrier(struct device *dev)
1425 {
1426 pm_runtime_deactivate_timer(dev);
1427
1428 if (dev->power.request_pending) {
1429 dev->power.request = RPM_REQ_NONE;
1430 spin_unlock_irq(&dev->power.lock);
1431
1432 cancel_work_sync(&dev->power.work);
1433
1434 spin_lock_irq(&dev->power.lock);
1435 dev->power.request_pending = false;
1436 }
1437
1438 if (dev->power.runtime_status == RPM_SUSPENDING ||
1439 dev->power.runtime_status == RPM_RESUMING ||
1440 dev->power.idle_notification) {
1441 DEFINE_WAIT(wait);
1442
1443 /* Suspend, wake-up or idle notification in progress. */
1444 for (;;) {
1445 prepare_to_wait(&dev->power.wait_queue, &wait,
1446 TASK_UNINTERRUPTIBLE);
1447 if (dev->power.runtime_status != RPM_SUSPENDING
1448 && dev->power.runtime_status != RPM_RESUMING
1449 && !dev->power.idle_notification)
1450 break;
1451 spin_unlock_irq(&dev->power.lock);
1452
1453 schedule();
1454
1455 spin_lock_irq(&dev->power.lock);
1456 }
1457 finish_wait(&dev->power.wait_queue, &wait);
1458 }
1459 }
1460
1461 /**
1462 * pm_runtime_barrier - Flush pending requests and wait for completions.
1463 * @dev: Device to handle.
1464 *
1465 * Prevent the device from being suspended by incrementing its usage counter and
1466 * if there's a pending resume request for the device, wake the device up.
1467 * Next, make sure that all pending requests for the device have been flushed
1468 * from pm_wq and wait for all runtime PM operations involving the device in
1469 * progress to complete.
1470 */
pm_runtime_barrier(struct device * dev)1471 void pm_runtime_barrier(struct device *dev)
1472 {
1473 pm_runtime_get_noresume(dev);
1474 spin_lock_irq(&dev->power.lock);
1475
1476 if (dev->power.request_pending
1477 && dev->power.request == RPM_REQ_RESUME)
1478 rpm_resume(dev, 0);
1479
1480 __pm_runtime_barrier(dev);
1481
1482 spin_unlock_irq(&dev->power.lock);
1483 pm_runtime_put_noidle(dev);
1484 }
1485 EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1486
pm_runtime_block_if_disabled(struct device * dev)1487 bool pm_runtime_block_if_disabled(struct device *dev)
1488 {
1489 bool ret;
1490
1491 spin_lock_irq(&dev->power.lock);
1492
1493 ret = !pm_runtime_enabled(dev);
1494 if (ret && dev->power.last_status == RPM_INVALID)
1495 dev->power.last_status = RPM_BLOCKED;
1496
1497 spin_unlock_irq(&dev->power.lock);
1498
1499 return ret;
1500 }
1501
pm_runtime_unblock(struct device * dev)1502 void pm_runtime_unblock(struct device *dev)
1503 {
1504 spin_lock_irq(&dev->power.lock);
1505
1506 if (dev->power.last_status == RPM_BLOCKED)
1507 dev->power.last_status = RPM_INVALID;
1508
1509 spin_unlock_irq(&dev->power.lock);
1510 }
1511
__pm_runtime_disable(struct device * dev,bool check_resume)1512 void __pm_runtime_disable(struct device *dev, bool check_resume)
1513 {
1514 spin_lock_irq(&dev->power.lock);
1515
1516 if (dev->power.disable_depth > 0) {
1517 dev->power.disable_depth++;
1518 goto out;
1519 }
1520
1521 /*
1522 * Wake up the device if there's a resume request pending, because that
1523 * means there probably is some I/O to process and disabling runtime PM
1524 * shouldn't prevent the device from processing the I/O.
1525 */
1526 if (check_resume && dev->power.request_pending &&
1527 dev->power.request == RPM_REQ_RESUME) {
1528 /*
1529 * Prevent suspends and idle notifications from being carried
1530 * out after we have woken up the device.
1531 */
1532 pm_runtime_get_noresume(dev);
1533
1534 rpm_resume(dev, 0);
1535
1536 pm_runtime_put_noidle(dev);
1537 }
1538
1539 /* Update time accounting before disabling PM-runtime. */
1540 update_pm_runtime_accounting(dev);
1541
1542 if (!dev->power.disable_depth++) {
1543 __pm_runtime_barrier(dev);
1544 dev->power.last_status = dev->power.runtime_status;
1545 }
1546
1547 out:
1548 spin_unlock_irq(&dev->power.lock);
1549 }
1550 EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1551
1552 /**
1553 * pm_runtime_enable - Enable runtime PM of a device.
1554 * @dev: Device to handle.
1555 */
pm_runtime_enable(struct device * dev)1556 void pm_runtime_enable(struct device *dev)
1557 {
1558 unsigned long flags;
1559
1560 spin_lock_irqsave(&dev->power.lock, flags);
1561
1562 if (!dev->power.disable_depth) {
1563 dev_warn(dev, "Unbalanced %s!\n", __func__);
1564 goto out;
1565 }
1566
1567 if (--dev->power.disable_depth > 0)
1568 goto out;
1569
1570 if (dev->power.last_status == RPM_BLOCKED) {
1571 dev_warn(dev, "Attempt to enable runtime PM when it is blocked\n");
1572 dump_stack();
1573 }
1574 dev->power.last_status = RPM_INVALID;
1575 dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
1576
1577 if (dev->power.runtime_status == RPM_SUSPENDED &&
1578 !dev->power.ignore_children &&
1579 atomic_read(&dev->power.child_count) > 0)
1580 dev_warn(dev, "Enabling runtime PM for inactive device with active children\n");
1581
1582 out:
1583 spin_unlock_irqrestore(&dev->power.lock, flags);
1584 }
1585 EXPORT_SYMBOL_GPL(pm_runtime_enable);
1586
pm_runtime_set_suspended_action(void * data)1587 static void pm_runtime_set_suspended_action(void *data)
1588 {
1589 pm_runtime_set_suspended(data);
1590 }
1591
1592 /**
1593 * devm_pm_runtime_set_active_enabled - set_active version of devm_pm_runtime_enable.
1594 *
1595 * @dev: Device to handle.
1596 */
devm_pm_runtime_set_active_enabled(struct device * dev)1597 int devm_pm_runtime_set_active_enabled(struct device *dev)
1598 {
1599 int err;
1600
1601 err = pm_runtime_set_active(dev);
1602 if (err)
1603 return err;
1604
1605 err = devm_add_action_or_reset(dev, pm_runtime_set_suspended_action, dev);
1606 if (err)
1607 return err;
1608
1609 return devm_pm_runtime_enable(dev);
1610 }
1611 EXPORT_SYMBOL_GPL(devm_pm_runtime_set_active_enabled);
1612
pm_runtime_disable_action(void * data)1613 static void pm_runtime_disable_action(void *data)
1614 {
1615 pm_runtime_dont_use_autosuspend(data);
1616 pm_runtime_disable(data);
1617 }
1618
1619 /**
1620 * devm_pm_runtime_enable - devres-enabled version of pm_runtime_enable.
1621 *
1622 * NOTE: this will also handle calling pm_runtime_dont_use_autosuspend() for
1623 * you at driver exit time if needed.
1624 *
1625 * @dev: Device to handle.
1626 */
devm_pm_runtime_enable(struct device * dev)1627 int devm_pm_runtime_enable(struct device *dev)
1628 {
1629 pm_runtime_enable(dev);
1630
1631 return devm_add_action_or_reset(dev, pm_runtime_disable_action, dev);
1632 }
1633 EXPORT_SYMBOL_GPL(devm_pm_runtime_enable);
1634
pm_runtime_put_noidle_action(void * data)1635 static void pm_runtime_put_noidle_action(void *data)
1636 {
1637 pm_runtime_put_noidle(data);
1638 }
1639
1640 /**
1641 * devm_pm_runtime_get_noresume - devres-enabled version of pm_runtime_get_noresume.
1642 *
1643 * @dev: Device to handle.
1644 */
devm_pm_runtime_get_noresume(struct device * dev)1645 int devm_pm_runtime_get_noresume(struct device *dev)
1646 {
1647 pm_runtime_get_noresume(dev);
1648
1649 return devm_add_action_or_reset(dev, pm_runtime_put_noidle_action, dev);
1650 }
1651 EXPORT_SYMBOL_GPL(devm_pm_runtime_get_noresume);
1652
1653 /**
1654 * pm_runtime_forbid - Block runtime PM of a device.
1655 * @dev: Device to handle.
1656 *
1657 * Resume @dev if already suspended and block runtime suspend of @dev in such
1658 * a way that it can be unblocked via the /sys/devices/.../power/control
1659 * interface, or otherwise by calling pm_runtime_allow().
1660 *
1661 * Calling this function many times in a row has the same effect as calling it
1662 * once.
1663 */
pm_runtime_forbid(struct device * dev)1664 void pm_runtime_forbid(struct device *dev)
1665 {
1666 spin_lock_irq(&dev->power.lock);
1667 if (!dev->power.runtime_auto)
1668 goto out;
1669
1670 dev->power.runtime_auto = false;
1671 atomic_inc(&dev->power.usage_count);
1672 rpm_resume(dev, 0);
1673
1674 out:
1675 spin_unlock_irq(&dev->power.lock);
1676 }
1677 EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1678
1679 /**
1680 * pm_runtime_allow - Unblock runtime PM of a device.
1681 * @dev: Device to handle.
1682 *
1683 * Unblock runtime suspend of @dev after it has been blocked by
1684 * pm_runtime_forbid() (for instance, if it has been blocked via the
1685 * /sys/devices/.../power/control interface), check if @dev can be
1686 * suspended and suspend it in that case.
1687 *
1688 * Calling this function many times in a row has the same effect as calling it
1689 * once.
1690 */
pm_runtime_allow(struct device * dev)1691 void pm_runtime_allow(struct device *dev)
1692 {
1693 int ret;
1694
1695 spin_lock_irq(&dev->power.lock);
1696 if (dev->power.runtime_auto)
1697 goto out;
1698
1699 dev->power.runtime_auto = true;
1700 ret = rpm_drop_usage_count(dev);
1701 if (ret == 0)
1702 rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
1703 else if (ret > 0)
1704 trace_rpm_usage(dev, RPM_AUTO | RPM_ASYNC);
1705
1706 out:
1707 spin_unlock_irq(&dev->power.lock);
1708 }
1709 EXPORT_SYMBOL_GPL(pm_runtime_allow);
1710
1711 /**
1712 * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1713 * @dev: Device to handle.
1714 *
1715 * Set the power.no_callbacks flag, which tells the PM core that this
1716 * device is power-managed through its parent and has no runtime PM
1717 * callbacks of its own. The runtime sysfs attributes will be removed.
1718 */
pm_runtime_no_callbacks(struct device * dev)1719 void pm_runtime_no_callbacks(struct device *dev)
1720 {
1721 spin_lock_irq(&dev->power.lock);
1722 dev->power.no_callbacks = 1;
1723 spin_unlock_irq(&dev->power.lock);
1724 if (device_is_registered(dev))
1725 rpm_sysfs_remove(dev);
1726 }
1727 EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1728
1729 /**
1730 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1731 * @dev: Device to handle
1732 *
1733 * Set the power.irq_safe flag, which tells the PM core that the
1734 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1735 * always be invoked with the spinlock held and interrupts disabled. It also
1736 * causes the parent's usage counter to be permanently incremented, preventing
1737 * the parent from runtime suspending -- otherwise an irq-safe child might have
1738 * to wait for a non-irq-safe parent.
1739 */
pm_runtime_irq_safe(struct device * dev)1740 void pm_runtime_irq_safe(struct device *dev)
1741 {
1742 if (dev->parent)
1743 pm_runtime_get_sync(dev->parent);
1744
1745 spin_lock_irq(&dev->power.lock);
1746 dev->power.irq_safe = 1;
1747 spin_unlock_irq(&dev->power.lock);
1748 }
1749 EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1750
1751 /**
1752 * update_autosuspend - Handle a change to a device's autosuspend settings.
1753 * @dev: Device to handle.
1754 * @old_delay: The former autosuspend_delay value.
1755 * @old_use: The former use_autosuspend value.
1756 *
1757 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1758 * set; otherwise allow it. Send an idle notification if suspends are allowed.
1759 *
1760 * This function must be called under dev->power.lock with interrupts disabled.
1761 */
update_autosuspend(struct device * dev,int old_delay,int old_use)1762 static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1763 {
1764 int delay = dev->power.autosuspend_delay;
1765
1766 /* Should runtime suspend be prevented now? */
1767 if (dev->power.use_autosuspend && delay < 0) {
1768
1769 /* If it used to be allowed then prevent it. */
1770 if (!old_use || old_delay >= 0) {
1771 atomic_inc(&dev->power.usage_count);
1772 rpm_resume(dev, 0);
1773 } else {
1774 trace_rpm_usage(dev, 0);
1775 }
1776 }
1777
1778 /* Runtime suspend should be allowed now. */
1779 else {
1780
1781 /* If it used to be prevented then allow it. */
1782 if (old_use && old_delay < 0)
1783 atomic_dec(&dev->power.usage_count);
1784
1785 /* Maybe we can autosuspend now. */
1786 rpm_idle(dev, RPM_AUTO);
1787 }
1788 }
1789
1790 /**
1791 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1792 * @dev: Device to handle.
1793 * @delay: Value of the new delay in milliseconds.
1794 *
1795 * Set the device's power.autosuspend_delay value. If it changes to negative
1796 * and the power.use_autosuspend flag is set, prevent runtime suspends. If it
1797 * changes the other way, allow runtime suspends.
1798 */
pm_runtime_set_autosuspend_delay(struct device * dev,int delay)1799 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1800 {
1801 int old_delay, old_use;
1802
1803 spin_lock_irq(&dev->power.lock);
1804 old_delay = dev->power.autosuspend_delay;
1805 old_use = dev->power.use_autosuspend;
1806 dev->power.autosuspend_delay = delay;
1807 update_autosuspend(dev, old_delay, old_use);
1808 spin_unlock_irq(&dev->power.lock);
1809 }
1810 EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1811
1812 /**
1813 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1814 * @dev: Device to handle.
1815 * @use: New value for use_autosuspend.
1816 *
1817 * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1818 * suspends as needed.
1819 */
__pm_runtime_use_autosuspend(struct device * dev,bool use)1820 void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1821 {
1822 int old_delay, old_use;
1823
1824 spin_lock_irq(&dev->power.lock);
1825 old_delay = dev->power.autosuspend_delay;
1826 old_use = dev->power.use_autosuspend;
1827 dev->power.use_autosuspend = use;
1828 update_autosuspend(dev, old_delay, old_use);
1829 spin_unlock_irq(&dev->power.lock);
1830 }
1831 EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1832
1833 /**
1834 * pm_runtime_init - Initialize runtime PM fields in given device object.
1835 * @dev: Device object to initialize.
1836 */
pm_runtime_init(struct device * dev)1837 void pm_runtime_init(struct device *dev)
1838 {
1839 dev->power.runtime_status = RPM_SUSPENDED;
1840 dev->power.last_status = RPM_INVALID;
1841 dev->power.idle_notification = false;
1842
1843 dev->power.disable_depth = 1;
1844 atomic_set(&dev->power.usage_count, 0);
1845
1846 dev->power.runtime_error = 0;
1847
1848 atomic_set(&dev->power.child_count, 0);
1849 pm_suspend_ignore_children(dev, false);
1850 dev->power.runtime_auto = true;
1851
1852 dev->power.request_pending = false;
1853 dev->power.request = RPM_REQ_NONE;
1854 dev->power.deferred_resume = false;
1855 dev->power.needs_force_resume = false;
1856 INIT_WORK(&dev->power.work, pm_runtime_work);
1857
1858 dev->power.timer_expires = 0;
1859 hrtimer_setup(&dev->power.suspend_timer, pm_suspend_timer_fn, CLOCK_MONOTONIC,
1860 HRTIMER_MODE_ABS);
1861
1862 init_waitqueue_head(&dev->power.wait_queue);
1863 }
1864
1865 /**
1866 * pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
1867 * @dev: Device object to re-initialize.
1868 */
pm_runtime_reinit(struct device * dev)1869 void pm_runtime_reinit(struct device *dev)
1870 {
1871 if (!pm_runtime_enabled(dev)) {
1872 if (dev->power.runtime_status == RPM_ACTIVE)
1873 pm_runtime_set_suspended(dev);
1874 if (dev->power.irq_safe) {
1875 spin_lock_irq(&dev->power.lock);
1876 dev->power.irq_safe = 0;
1877 spin_unlock_irq(&dev->power.lock);
1878 if (dev->parent)
1879 pm_runtime_put(dev->parent);
1880 }
1881 }
1882 /*
1883 * Clear power.needs_force_resume in case it has been set by
1884 * pm_runtime_force_suspend() invoked from a driver remove callback.
1885 */
1886 dev->power.needs_force_resume = false;
1887 }
1888
1889 /**
1890 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1891 * @dev: Device object being removed from device hierarchy.
1892 */
pm_runtime_remove(struct device * dev)1893 void pm_runtime_remove(struct device *dev)
1894 {
1895 __pm_runtime_disable(dev, false);
1896 pm_runtime_reinit(dev);
1897 }
1898
1899 /**
1900 * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
1901 * @dev: Consumer device.
1902 */
pm_runtime_get_suppliers(struct device * dev)1903 void pm_runtime_get_suppliers(struct device *dev)
1904 {
1905 struct device_link *link;
1906 int idx;
1907
1908 idx = device_links_read_lock();
1909
1910 dev_for_each_link_to_supplier(link, dev)
1911 if (device_link_test(link, DL_FLAG_PM_RUNTIME)) {
1912 link->supplier_preactivated = true;
1913 pm_runtime_get_sync(link->supplier);
1914 }
1915
1916 device_links_read_unlock(idx);
1917 }
1918
1919 /**
1920 * pm_runtime_put_suppliers - Drop references to supplier devices.
1921 * @dev: Consumer device.
1922 */
pm_runtime_put_suppliers(struct device * dev)1923 void pm_runtime_put_suppliers(struct device *dev)
1924 {
1925 struct device_link *link;
1926 int idx;
1927
1928 idx = device_links_read_lock();
1929
1930 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1931 device_links_read_lock_held())
1932 if (link->supplier_preactivated) {
1933 link->supplier_preactivated = false;
1934 pm_runtime_put(link->supplier);
1935 }
1936
1937 device_links_read_unlock(idx);
1938 }
1939
pm_runtime_new_link(struct device * dev)1940 void pm_runtime_new_link(struct device *dev)
1941 {
1942 spin_lock_irq(&dev->power.lock);
1943 dev->power.links_count++;
1944 spin_unlock_irq(&dev->power.lock);
1945 }
1946
pm_runtime_drop_link_count(struct device * dev)1947 static void pm_runtime_drop_link_count(struct device *dev)
1948 {
1949 spin_lock_irq(&dev->power.lock);
1950 WARN_ON(dev->power.links_count == 0);
1951 dev->power.links_count--;
1952 spin_unlock_irq(&dev->power.lock);
1953 }
1954
1955 /**
1956 * pm_runtime_drop_link - Prepare for device link removal.
1957 * @link: Device link going away.
1958 *
1959 * Drop the link count of the consumer end of @link and decrement the supplier
1960 * device's runtime PM usage counter as many times as needed to drop all of the
1961 * PM runtime reference to it from the consumer.
1962 */
pm_runtime_drop_link(struct device_link * link)1963 void pm_runtime_drop_link(struct device_link *link)
1964 {
1965 if (!device_link_test(link, DL_FLAG_PM_RUNTIME))
1966 return;
1967
1968 pm_runtime_drop_link_count(link->consumer);
1969 pm_runtime_release_supplier(link);
1970 pm_request_idle(link->supplier);
1971 }
1972
get_callback(struct device * dev,size_t cb_offset)1973 static pm_callback_t get_callback(struct device *dev, size_t cb_offset)
1974 {
1975 /*
1976 * Setting power.strict_midlayer means that the middle layer
1977 * code does not want its runtime PM callbacks to be invoked via
1978 * pm_runtime_force_suspend() and pm_runtime_force_resume(), so
1979 * return a direct pointer to the driver callback in that case.
1980 */
1981 if (dev_pm_strict_midlayer_is_set(dev))
1982 return __rpm_get_driver_callback(dev, cb_offset);
1983
1984 return __rpm_get_callback(dev, cb_offset);
1985 }
1986
1987 #define GET_CALLBACK(dev, callback) \
1988 get_callback(dev, offsetof(struct dev_pm_ops, callback))
1989
1990 /**
1991 * pm_runtime_force_suspend - Force a device into suspend state if needed.
1992 * @dev: Device to suspend.
1993 *
1994 * Disable runtime PM so we safely can check the device's runtime PM status and
1995 * if it is active, invoke its ->runtime_suspend callback to suspend it and
1996 * change its runtime PM status field to RPM_SUSPENDED. Also, if the device's
1997 * usage and children counters don't indicate that the device was in use before
1998 * the system-wide transition under way, decrement its parent's children counter
1999 * (if there is a parent). Keep runtime PM disabled to preserve the state
2000 * unless we encounter errors.
2001 *
2002 * Typically this function may be invoked from a system suspend callback to make
2003 * sure the device is put into low power state and it should only be used during
2004 * system-wide PM transitions to sleep states. It assumes that the analogous
2005 * pm_runtime_force_resume() will be used to resume the device.
2006 */
pm_runtime_force_suspend(struct device * dev)2007 int pm_runtime_force_suspend(struct device *dev)
2008 {
2009 int (*callback)(struct device *);
2010 int ret;
2011
2012 pm_runtime_disable(dev);
2013 if (pm_runtime_status_suspended(dev) || dev->power.needs_force_resume)
2014 return 0;
2015
2016 callback = GET_CALLBACK(dev, runtime_suspend);
2017
2018 dev_pm_enable_wake_irq_check(dev, true);
2019 ret = callback ? callback(dev) : 0;
2020 if (ret)
2021 goto err;
2022
2023 dev_pm_enable_wake_irq_complete(dev);
2024
2025 /*
2026 * If the device can stay in suspend after the system-wide transition
2027 * to the working state that will follow, drop the children counter of
2028 * its parent and the usage counters of its suppliers. Otherwise, set
2029 * power.needs_force_resume to let pm_runtime_force_resume() know that
2030 * the device needs to be taken care of and to prevent this function
2031 * from handling the device again in case the device is passed to it
2032 * once more subsequently.
2033 */
2034 if (pm_runtime_need_not_resume(dev))
2035 pm_runtime_set_suspended(dev);
2036 else
2037 dev->power.needs_force_resume = true;
2038
2039 return 0;
2040
2041 err:
2042 dev_pm_disable_wake_irq_check(dev, true);
2043 pm_runtime_enable(dev);
2044 return ret;
2045 }
2046 EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
2047
2048 #ifdef CONFIG_PM_SLEEP
2049
2050 /**
2051 * pm_runtime_force_resume - Force a device into resume state if needed.
2052 * @dev: Device to resume.
2053 *
2054 * This function expects that either pm_runtime_force_suspend() has put the
2055 * device into a low-power state prior to calling it, or the device had been
2056 * runtime-suspended before the preceding system-wide suspend transition and it
2057 * was left in suspend during that transition.
2058 *
2059 * The actions carried out by pm_runtime_force_suspend(), or by a runtime
2060 * suspend in general, are reversed and the device is brought back into full
2061 * power if it is expected to be used on system resume, which is the case when
2062 * its needs_force_resume flag is set or when its smart_suspend flag is set and
2063 * its runtime PM status is "active".
2064 *
2065 * In other cases, the resume is deferred to be managed via runtime PM.
2066 *
2067 * Typically, this function may be invoked from a system resume callback.
2068 */
pm_runtime_force_resume(struct device * dev)2069 int pm_runtime_force_resume(struct device *dev)
2070 {
2071 int (*callback)(struct device *);
2072 int ret = 0;
2073
2074 if (!dev->power.needs_force_resume && (!dev_pm_smart_suspend(dev) ||
2075 pm_runtime_status_suspended(dev)))
2076 goto out;
2077
2078 callback = GET_CALLBACK(dev, runtime_resume);
2079
2080 dev_pm_disable_wake_irq_check(dev, false);
2081 ret = callback ? callback(dev) : 0;
2082 if (ret) {
2083 pm_runtime_set_suspended(dev);
2084 dev_pm_enable_wake_irq_check(dev, false);
2085 goto out;
2086 }
2087
2088 pm_runtime_mark_last_busy(dev);
2089
2090 out:
2091 /*
2092 * The smart_suspend flag can be cleared here because it is not going
2093 * to be necessary until the next system-wide suspend transition that
2094 * will update it again.
2095 */
2096 dev->power.smart_suspend = false;
2097 /*
2098 * Also clear needs_force_resume to make this function skip devices that
2099 * have been seen by it once.
2100 */
2101 dev->power.needs_force_resume = false;
2102
2103 pm_runtime_enable(dev);
2104 return ret;
2105 }
2106 EXPORT_SYMBOL_GPL(pm_runtime_force_resume);
2107
pm_runtime_need_not_resume(struct device * dev)2108 bool pm_runtime_need_not_resume(struct device *dev)
2109 {
2110 return atomic_read(&dev->power.usage_count) <= 1 &&
2111 (atomic_read(&dev->power.child_count) == 0 ||
2112 dev->power.ignore_children);
2113 }
2114
2115 #endif /* CONFIG_PM_SLEEP */
2116