1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * drivers/base/power/main.c - Where the driver meets power management.
4 *
5 * Copyright (c) 2003 Patrick Mochel
6 * Copyright (c) 2003 Open Source Development Lab
7 *
8 * The driver model core calls device_pm_add() when a device is registered.
9 * This will initialize the embedded device_pm_info object in the device
10 * and add it to the list of power-controlled devices. sysfs entries for
11 * controlling device power management will also be added.
12 *
13 * A separate list is used for keeping track of power info, because the power
14 * domain dependencies may differ from the ancestral dependencies that the
15 * subsystem list maintains.
16 */
17
18 #define pr_fmt(fmt) "PM: " fmt
19 #define dev_fmt pr_fmt
20
21 #include <linux/device.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm-trace.h>
27 #include <linux/pm_wakeirq.h>
28 #include <linux/interrupt.h>
29 #include <linux/sched.h>
30 #include <linux/sched/debug.h>
31 #include <linux/async.h>
32 #include <linux/suspend.h>
33 #include <trace/events/power.h>
34 #include <linux/cpufreq.h>
35 #include <linux/devfreq.h>
36 #include <linux/timer.h>
37
38 #include "../base.h"
39 #include "power.h"
40
41 typedef int (*pm_callback_t)(struct device *);
42
43 /*
44 * The entries in the dpm_list list are in a depth first order, simply
45 * because children are guaranteed to be discovered after parents, and
46 * are inserted at the back of the list on discovery.
47 *
48 * Since device_pm_add() may be called with a device lock held,
49 * we must never try to acquire a device lock while holding
50 * dpm_list_mutex.
51 */
52
53 LIST_HEAD(dpm_list);
54 static LIST_HEAD(dpm_prepared_list);
55 static LIST_HEAD(dpm_suspended_list);
56 static LIST_HEAD(dpm_late_early_list);
57 static LIST_HEAD(dpm_noirq_list);
58
59 static DEFINE_MUTEX(dpm_list_mtx);
60 static pm_message_t pm_transition;
61
62 static DEFINE_MUTEX(async_wip_mtx);
63 static int async_error;
64
65 /**
66 * pm_hibernate_is_recovering - if recovering from hibernate due to error.
67 *
68 * Used to query if dev_pm_ops.thaw() is called for normal hibernation case or
69 * recovering from some error.
70 *
71 * Return: true for error case, false for normal case.
72 */
pm_hibernate_is_recovering(void)73 bool pm_hibernate_is_recovering(void)
74 {
75 return pm_transition.event == PM_EVENT_RECOVER;
76 }
77 EXPORT_SYMBOL_GPL(pm_hibernate_is_recovering);
78
pm_verb(int event)79 static const char *pm_verb(int event)
80 {
81 switch (event) {
82 case PM_EVENT_SUSPEND:
83 return "suspend";
84 case PM_EVENT_RESUME:
85 return "resume";
86 case PM_EVENT_FREEZE:
87 return "freeze";
88 case PM_EVENT_QUIESCE:
89 return "quiesce";
90 case PM_EVENT_HIBERNATE:
91 return "hibernate";
92 case PM_EVENT_THAW:
93 return "thaw";
94 case PM_EVENT_RESTORE:
95 return "restore";
96 case PM_EVENT_RECOVER:
97 return "recover";
98 default:
99 return "(unknown PM event)";
100 }
101 }
102
103 /**
104 * device_pm_sleep_init - Initialize system suspend-related device fields.
105 * @dev: Device object being initialized.
106 */
device_pm_sleep_init(struct device * dev)107 void device_pm_sleep_init(struct device *dev)
108 {
109 dev->power.is_prepared = false;
110 dev->power.is_suspended = false;
111 dev->power.is_noirq_suspended = false;
112 dev->power.is_late_suspended = false;
113 init_completion(&dev->power.completion);
114 complete_all(&dev->power.completion);
115 dev->power.wakeup = NULL;
116 INIT_LIST_HEAD(&dev->power.entry);
117 }
118
119 /**
120 * device_pm_lock - Lock the list of active devices used by the PM core.
121 */
device_pm_lock(void)122 void device_pm_lock(void)
123 {
124 mutex_lock(&dpm_list_mtx);
125 }
126
127 /**
128 * device_pm_unlock - Unlock the list of active devices used by the PM core.
129 */
device_pm_unlock(void)130 void device_pm_unlock(void)
131 {
132 mutex_unlock(&dpm_list_mtx);
133 }
134
135 /**
136 * device_pm_add - Add a device to the PM core's list of active devices.
137 * @dev: Device to add to the list.
138 */
device_pm_add(struct device * dev)139 void device_pm_add(struct device *dev)
140 {
141 /* Skip PM setup/initialization. */
142 if (device_pm_not_required(dev))
143 return;
144
145 pr_debug("Adding info for %s:%s\n",
146 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
147 device_pm_check_callbacks(dev);
148 mutex_lock(&dpm_list_mtx);
149 if (dev->parent && dev->parent->power.is_prepared)
150 dev_warn(dev, "parent %s should not be sleeping\n",
151 dev_name(dev->parent));
152 list_add_tail(&dev->power.entry, &dpm_list);
153 dev->power.in_dpm_list = true;
154 mutex_unlock(&dpm_list_mtx);
155 }
156
157 /**
158 * device_pm_remove - Remove a device from the PM core's list of active devices.
159 * @dev: Device to be removed from the list.
160 */
device_pm_remove(struct device * dev)161 void device_pm_remove(struct device *dev)
162 {
163 if (device_pm_not_required(dev))
164 return;
165
166 pr_debug("Removing info for %s:%s\n",
167 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
168 complete_all(&dev->power.completion);
169 mutex_lock(&dpm_list_mtx);
170 list_del_init(&dev->power.entry);
171 dev->power.in_dpm_list = false;
172 mutex_unlock(&dpm_list_mtx);
173 device_wakeup_disable(dev);
174 pm_runtime_remove(dev);
175 device_pm_check_callbacks(dev);
176 }
177
178 /**
179 * device_pm_move_before - Move device in the PM core's list of active devices.
180 * @deva: Device to move in dpm_list.
181 * @devb: Device @deva should come before.
182 */
device_pm_move_before(struct device * deva,struct device * devb)183 void device_pm_move_before(struct device *deva, struct device *devb)
184 {
185 pr_debug("Moving %s:%s before %s:%s\n",
186 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
187 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
188 /* Delete deva from dpm_list and reinsert before devb. */
189 list_move_tail(&deva->power.entry, &devb->power.entry);
190 }
191
192 /**
193 * device_pm_move_after - Move device in the PM core's list of active devices.
194 * @deva: Device to move in dpm_list.
195 * @devb: Device @deva should come after.
196 */
device_pm_move_after(struct device * deva,struct device * devb)197 void device_pm_move_after(struct device *deva, struct device *devb)
198 {
199 pr_debug("Moving %s:%s after %s:%s\n",
200 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
201 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
202 /* Delete deva from dpm_list and reinsert after devb. */
203 list_move(&deva->power.entry, &devb->power.entry);
204 }
205
206 /**
207 * device_pm_move_last - Move device to end of the PM core's list of devices.
208 * @dev: Device to move in dpm_list.
209 */
device_pm_move_last(struct device * dev)210 void device_pm_move_last(struct device *dev)
211 {
212 pr_debug("Moving %s:%s to end of list\n",
213 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
214 list_move_tail(&dev->power.entry, &dpm_list);
215 }
216
initcall_debug_start(struct device * dev,void * cb)217 static ktime_t initcall_debug_start(struct device *dev, void *cb)
218 {
219 if (!pm_print_times_enabled)
220 return 0;
221
222 dev_info(dev, "calling %ps @ %i, parent: %s\n", cb,
223 task_pid_nr(current),
224 dev->parent ? dev_name(dev->parent) : "none");
225 return ktime_get();
226 }
227
initcall_debug_report(struct device * dev,ktime_t calltime,void * cb,int error)228 static void initcall_debug_report(struct device *dev, ktime_t calltime,
229 void *cb, int error)
230 {
231 ktime_t rettime;
232
233 if (!pm_print_times_enabled)
234 return;
235
236 rettime = ktime_get();
237 dev_info(dev, "%ps returned %d after %Ld usecs\n", cb, error,
238 (unsigned long long)ktime_us_delta(rettime, calltime));
239 }
240
241 /**
242 * dpm_wait - Wait for a PM operation to complete.
243 * @dev: Device to wait for.
244 * @async: If unset, wait only if the device's power.async_suspend flag is set.
245 */
dpm_wait(struct device * dev,bool async)246 static void dpm_wait(struct device *dev, bool async)
247 {
248 if (!dev)
249 return;
250
251 if (async || (pm_async_enabled && dev->power.async_suspend))
252 wait_for_completion(&dev->power.completion);
253 }
254
dpm_wait_fn(struct device * dev,void * async_ptr)255 static int dpm_wait_fn(struct device *dev, void *async_ptr)
256 {
257 dpm_wait(dev, *((bool *)async_ptr));
258 return 0;
259 }
260
dpm_wait_for_children(struct device * dev,bool async)261 static void dpm_wait_for_children(struct device *dev, bool async)
262 {
263 device_for_each_child(dev, &async, dpm_wait_fn);
264 }
265
dpm_wait_for_suppliers(struct device * dev,bool async)266 static void dpm_wait_for_suppliers(struct device *dev, bool async)
267 {
268 struct device_link *link;
269 int idx;
270
271 idx = device_links_read_lock();
272
273 /*
274 * If the supplier goes away right after we've checked the link to it,
275 * we'll wait for its completion to change the state, but that's fine,
276 * because the only things that will block as a result are the SRCU
277 * callbacks freeing the link objects for the links in the list we're
278 * walking.
279 */
280 dev_for_each_link_to_supplier(link, dev)
281 if (READ_ONCE(link->status) != DL_STATE_DORMANT &&
282 !device_link_flag_is_sync_state_only(link->flags))
283 dpm_wait(link->supplier, async);
284
285 device_links_read_unlock(idx);
286 }
287
dpm_wait_for_superior(struct device * dev,bool async)288 static bool dpm_wait_for_superior(struct device *dev, bool async)
289 {
290 struct device *parent;
291
292 /*
293 * If the device is resumed asynchronously and the parent's callback
294 * deletes both the device and the parent itself, the parent object may
295 * be freed while this function is running, so avoid that by reference
296 * counting the parent once more unless the device has been deleted
297 * already (in which case return right away).
298 */
299 mutex_lock(&dpm_list_mtx);
300
301 if (!device_pm_initialized(dev)) {
302 mutex_unlock(&dpm_list_mtx);
303 return false;
304 }
305
306 parent = get_device(dev->parent);
307
308 mutex_unlock(&dpm_list_mtx);
309
310 dpm_wait(parent, async);
311 put_device(parent);
312
313 dpm_wait_for_suppliers(dev, async);
314
315 /*
316 * If the parent's callback has deleted the device, attempting to resume
317 * it would be invalid, so avoid doing that then.
318 */
319 return device_pm_initialized(dev);
320 }
321
dpm_wait_for_consumers(struct device * dev,bool async)322 static void dpm_wait_for_consumers(struct device *dev, bool async)
323 {
324 struct device_link *link;
325 int idx;
326
327 idx = device_links_read_lock();
328
329 /*
330 * The status of a device link can only be changed from "dormant" by a
331 * probe, but that cannot happen during system suspend/resume. In
332 * theory it can change to "dormant" at that time, but then it is
333 * reasonable to wait for the target device anyway (eg. if it goes
334 * away, it's better to wait for it to go away completely and then
335 * continue instead of trying to continue in parallel with its
336 * unregistration).
337 */
338 dev_for_each_link_to_consumer(link, dev)
339 if (READ_ONCE(link->status) != DL_STATE_DORMANT &&
340 !device_link_flag_is_sync_state_only(link->flags))
341 dpm_wait(link->consumer, async);
342
343 device_links_read_unlock(idx);
344 }
345
dpm_wait_for_subordinate(struct device * dev,bool async)346 static void dpm_wait_for_subordinate(struct device *dev, bool async)
347 {
348 dpm_wait_for_children(dev, async);
349 dpm_wait_for_consumers(dev, async);
350 }
351
352 /**
353 * pm_op - Return the PM operation appropriate for given PM event.
354 * @ops: PM operations to choose from.
355 * @state: PM transition of the system being carried out.
356 */
pm_op(const struct dev_pm_ops * ops,pm_message_t state)357 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
358 {
359 switch (state.event) {
360 #ifdef CONFIG_SUSPEND
361 case PM_EVENT_SUSPEND:
362 return ops->suspend;
363 case PM_EVENT_RESUME:
364 return ops->resume;
365 #endif /* CONFIG_SUSPEND */
366 #ifdef CONFIG_HIBERNATE_CALLBACKS
367 case PM_EVENT_FREEZE:
368 case PM_EVENT_QUIESCE:
369 return ops->freeze;
370 case PM_EVENT_HIBERNATE:
371 return ops->poweroff;
372 case PM_EVENT_THAW:
373 case PM_EVENT_RECOVER:
374 return ops->thaw;
375 case PM_EVENT_RESTORE:
376 return ops->restore;
377 #endif /* CONFIG_HIBERNATE_CALLBACKS */
378 }
379
380 return NULL;
381 }
382
383 /**
384 * pm_late_early_op - Return the PM operation appropriate for given PM event.
385 * @ops: PM operations to choose from.
386 * @state: PM transition of the system being carried out.
387 *
388 * Runtime PM is disabled for @dev while this function is being executed.
389 */
pm_late_early_op(const struct dev_pm_ops * ops,pm_message_t state)390 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
391 pm_message_t state)
392 {
393 switch (state.event) {
394 #ifdef CONFIG_SUSPEND
395 case PM_EVENT_SUSPEND:
396 return ops->suspend_late;
397 case PM_EVENT_RESUME:
398 return ops->resume_early;
399 #endif /* CONFIG_SUSPEND */
400 #ifdef CONFIG_HIBERNATE_CALLBACKS
401 case PM_EVENT_FREEZE:
402 case PM_EVENT_QUIESCE:
403 return ops->freeze_late;
404 case PM_EVENT_HIBERNATE:
405 return ops->poweroff_late;
406 case PM_EVENT_THAW:
407 case PM_EVENT_RECOVER:
408 return ops->thaw_early;
409 case PM_EVENT_RESTORE:
410 return ops->restore_early;
411 #endif /* CONFIG_HIBERNATE_CALLBACKS */
412 }
413
414 return NULL;
415 }
416
417 /**
418 * pm_noirq_op - Return the PM operation appropriate for given PM event.
419 * @ops: PM operations to choose from.
420 * @state: PM transition of the system being carried out.
421 *
422 * The driver of @dev will not receive interrupts while this function is being
423 * executed.
424 */
pm_noirq_op(const struct dev_pm_ops * ops,pm_message_t state)425 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
426 {
427 switch (state.event) {
428 #ifdef CONFIG_SUSPEND
429 case PM_EVENT_SUSPEND:
430 return ops->suspend_noirq;
431 case PM_EVENT_RESUME:
432 return ops->resume_noirq;
433 #endif /* CONFIG_SUSPEND */
434 #ifdef CONFIG_HIBERNATE_CALLBACKS
435 case PM_EVENT_FREEZE:
436 case PM_EVENT_QUIESCE:
437 return ops->freeze_noirq;
438 case PM_EVENT_HIBERNATE:
439 return ops->poweroff_noirq;
440 case PM_EVENT_THAW:
441 case PM_EVENT_RECOVER:
442 return ops->thaw_noirq;
443 case PM_EVENT_RESTORE:
444 return ops->restore_noirq;
445 #endif /* CONFIG_HIBERNATE_CALLBACKS */
446 }
447
448 return NULL;
449 }
450
pm_dev_dbg(struct device * dev,pm_message_t state,const char * info)451 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
452 {
453 dev_dbg(dev, "%s%s%s driver flags: %x\n", info, pm_verb(state.event),
454 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
455 ", may wakeup" : "", dev->power.driver_flags);
456 }
457
pm_dev_err(struct device * dev,pm_message_t state,const char * info,int error)458 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
459 int error)
460 {
461 dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info,
462 error);
463 }
464
dpm_show_time(ktime_t starttime,pm_message_t state,int error,const char * info)465 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
466 const char *info)
467 {
468 ktime_t calltime;
469 u64 usecs64;
470 int usecs;
471
472 calltime = ktime_get();
473 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
474 do_div(usecs64, NSEC_PER_USEC);
475 usecs = usecs64;
476 if (usecs == 0)
477 usecs = 1;
478
479 pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
480 info ?: "", info ? " " : "", pm_verb(state.event),
481 error ? "aborted" : "complete",
482 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
483 }
484
dpm_run_callback(pm_callback_t cb,struct device * dev,pm_message_t state,const char * info)485 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
486 pm_message_t state, const char *info)
487 {
488 ktime_t calltime;
489 int error;
490
491 if (!cb)
492 return 0;
493
494 calltime = initcall_debug_start(dev, cb);
495
496 pm_dev_dbg(dev, state, info);
497 trace_device_pm_callback_start(dev, info, state.event);
498 error = cb(dev);
499 trace_device_pm_callback_end(dev, error);
500 suspend_report_result(dev, cb, error);
501
502 initcall_debug_report(dev, calltime, cb, error);
503
504 return error;
505 }
506
507 #ifdef CONFIG_DPM_WATCHDOG
508 struct dpm_watchdog {
509 struct device *dev;
510 struct task_struct *tsk;
511 struct timer_list timer;
512 bool fatal;
513 };
514
515 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
516 struct dpm_watchdog wd
517
518 /**
519 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
520 * @t: The timer that PM watchdog depends on.
521 *
522 * Called when a driver has timed out suspending or resuming.
523 * There's not much we can do here to recover so panic() to
524 * capture a crash-dump in pstore.
525 */
dpm_watchdog_handler(struct timer_list * t)526 static void dpm_watchdog_handler(struct timer_list *t)
527 {
528 struct dpm_watchdog *wd = timer_container_of(wd, t, timer);
529 struct timer_list *timer = &wd->timer;
530 unsigned int time_left;
531
532 if (wd->fatal) {
533 dev_emerg(wd->dev, "**** DPM device timeout ****\n");
534 show_stack(wd->tsk, NULL, KERN_EMERG);
535 panic("%s %s: unrecoverable failure\n",
536 dev_driver_string(wd->dev), dev_name(wd->dev));
537 }
538
539 time_left = CONFIG_DPM_WATCHDOG_TIMEOUT - CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
540 dev_warn(wd->dev, "**** DPM device timeout after %u seconds; %u seconds until panic ****\n",
541 CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT, time_left);
542 show_stack(wd->tsk, NULL, KERN_WARNING);
543
544 wd->fatal = true;
545 mod_timer(timer, jiffies + HZ * time_left);
546 }
547
548 /**
549 * dpm_watchdog_set - Enable pm watchdog for given device.
550 * @wd: Watchdog. Must be allocated on the stack.
551 * @dev: Device to handle.
552 */
dpm_watchdog_set(struct dpm_watchdog * wd,struct device * dev)553 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
554 {
555 struct timer_list *timer = &wd->timer;
556
557 wd->dev = dev;
558 wd->tsk = current;
559 wd->fatal = CONFIG_DPM_WATCHDOG_TIMEOUT == CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
560
561 timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
562 /* use same timeout value for both suspend and resume */
563 timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
564 add_timer(timer);
565 }
566
567 /**
568 * dpm_watchdog_clear - Disable suspend/resume watchdog.
569 * @wd: Watchdog to disable.
570 */
dpm_watchdog_clear(struct dpm_watchdog * wd)571 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
572 {
573 struct timer_list *timer = &wd->timer;
574
575 timer_delete_sync(timer);
576 timer_destroy_on_stack(timer);
577 }
578 #else
579 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
580 #define dpm_watchdog_set(x, y)
581 #define dpm_watchdog_clear(x)
582 #endif
583
584 /*------------------------- Resume routines -------------------------*/
585
586 /**
587 * dev_pm_skip_resume - System-wide device resume optimization check.
588 * @dev: Target device.
589 *
590 * Return:
591 * - %false if the transition under way is RESTORE.
592 * - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
593 * - The logical negation of %power.must_resume otherwise (that is, when the
594 * transition under way is RESUME).
595 */
dev_pm_skip_resume(struct device * dev)596 bool dev_pm_skip_resume(struct device *dev)
597 {
598 if (pm_transition.event == PM_EVENT_RESTORE)
599 return false;
600
601 if (pm_transition.event == PM_EVENT_THAW)
602 return dev_pm_skip_suspend(dev);
603
604 return !dev->power.must_resume;
605 }
606
is_async(struct device * dev)607 static bool is_async(struct device *dev)
608 {
609 return dev->power.async_suspend && pm_async_enabled
610 && !pm_trace_is_enabled();
611 }
612
__dpm_async(struct device * dev,async_func_t func)613 static bool __dpm_async(struct device *dev, async_func_t func)
614 {
615 if (dev->power.work_in_progress)
616 return true;
617
618 if (!is_async(dev))
619 return false;
620
621 dev->power.work_in_progress = true;
622
623 get_device(dev);
624
625 if (async_schedule_dev_nocall(func, dev))
626 return true;
627
628 put_device(dev);
629
630 return false;
631 }
632
dpm_async_fn(struct device * dev,async_func_t func)633 static bool dpm_async_fn(struct device *dev, async_func_t func)
634 {
635 guard(mutex)(&async_wip_mtx);
636
637 return __dpm_async(dev, func);
638 }
639
dpm_async_with_cleanup(struct device * dev,void * fn)640 static int dpm_async_with_cleanup(struct device *dev, void *fn)
641 {
642 guard(mutex)(&async_wip_mtx);
643
644 if (!__dpm_async(dev, fn))
645 dev->power.work_in_progress = false;
646
647 return 0;
648 }
649
dpm_async_resume_children(struct device * dev,async_func_t func)650 static void dpm_async_resume_children(struct device *dev, async_func_t func)
651 {
652 /*
653 * Prevent racing with dpm_clear_async_state() during initial list
654 * walks in dpm_noirq_resume_devices(), dpm_resume_early(), and
655 * dpm_resume().
656 */
657 guard(mutex)(&dpm_list_mtx);
658
659 /*
660 * Start processing "async" children of the device unless it's been
661 * started already for them.
662 */
663 device_for_each_child(dev, func, dpm_async_with_cleanup);
664 }
665
dpm_async_resume_subordinate(struct device * dev,async_func_t func)666 static void dpm_async_resume_subordinate(struct device *dev, async_func_t func)
667 {
668 struct device_link *link;
669 int idx;
670
671 dpm_async_resume_children(dev, func);
672
673 idx = device_links_read_lock();
674
675 /* Start processing the device's "async" consumers. */
676 dev_for_each_link_to_consumer(link, dev)
677 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
678 dpm_async_with_cleanup(link->consumer, func);
679
680 device_links_read_unlock(idx);
681 }
682
dpm_clear_async_state(struct device * dev)683 static void dpm_clear_async_state(struct device *dev)
684 {
685 reinit_completion(&dev->power.completion);
686 dev->power.work_in_progress = false;
687 }
688
dpm_root_device(struct device * dev)689 static bool dpm_root_device(struct device *dev)
690 {
691 lockdep_assert_held(&dpm_list_mtx);
692
693 /*
694 * Since this function is required to run under dpm_list_mtx, the
695 * list_empty() below will only return true if the device's list of
696 * consumers is actually empty before calling it.
697 */
698 return !dev->parent && list_empty(&dev->links.suppliers);
699 }
700
701 static void async_resume_noirq(void *data, async_cookie_t cookie);
702
703 /**
704 * device_resume_noirq - Execute a "noirq resume" callback for given device.
705 * @dev: Device to handle.
706 * @state: PM transition of the system being carried out.
707 * @async: If true, the device is being resumed asynchronously.
708 *
709 * The driver of @dev will not receive interrupts while this function is being
710 * executed.
711 */
device_resume_noirq(struct device * dev,pm_message_t state,bool async)712 static void device_resume_noirq(struct device *dev, pm_message_t state, bool async)
713 {
714 pm_callback_t callback = NULL;
715 const char *info = NULL;
716 bool skip_resume;
717 int error = 0;
718
719 TRACE_DEVICE(dev);
720 TRACE_RESUME(0);
721
722 if (dev->power.syscore || dev->power.direct_complete)
723 goto Out;
724
725 if (!dev->power.is_noirq_suspended) {
726 /*
727 * This means that system suspend has been aborted in the noirq
728 * phase before invoking the noirq suspend callback for the
729 * device, so if device_suspend_late() has left it in suspend,
730 * device_resume_early() should leave it in suspend either in
731 * case the early resume of it depends on the noirq resume that
732 * has not run.
733 */
734 if (dev_pm_skip_suspend(dev))
735 dev->power.must_resume = false;
736
737 goto Out;
738 }
739
740 if (!dpm_wait_for_superior(dev, async))
741 goto Out;
742
743 skip_resume = dev_pm_skip_resume(dev);
744 /*
745 * If the driver callback is skipped below or by the middle layer
746 * callback and device_resume_early() also skips the driver callback for
747 * this device later, it needs to appear as "suspended" to PM-runtime,
748 * so change its status accordingly.
749 *
750 * Otherwise, the device is going to be resumed, so set its PM-runtime
751 * status to "active" unless its power.smart_suspend flag is clear, in
752 * which case it is not necessary to update its PM-runtime status.
753 */
754 if (skip_resume)
755 pm_runtime_set_suspended(dev);
756 else if (dev_pm_smart_suspend(dev))
757 pm_runtime_set_active(dev);
758
759 if (dev->pm_domain) {
760 info = "noirq power domain ";
761 callback = pm_noirq_op(&dev->pm_domain->ops, state);
762 } else if (dev->type && dev->type->pm) {
763 info = "noirq type ";
764 callback = pm_noirq_op(dev->type->pm, state);
765 } else if (dev->class && dev->class->pm) {
766 info = "noirq class ";
767 callback = pm_noirq_op(dev->class->pm, state);
768 } else if (dev->bus && dev->bus->pm) {
769 info = "noirq bus ";
770 callback = pm_noirq_op(dev->bus->pm, state);
771 }
772 if (callback)
773 goto Run;
774
775 if (skip_resume)
776 goto Skip;
777
778 if (dev->driver && dev->driver->pm) {
779 info = "noirq driver ";
780 callback = pm_noirq_op(dev->driver->pm, state);
781 }
782
783 Run:
784 error = dpm_run_callback(callback, dev, state, info);
785
786 Skip:
787 dev->power.is_noirq_suspended = false;
788
789 Out:
790 complete_all(&dev->power.completion);
791 TRACE_RESUME(error);
792
793 if (error) {
794 WRITE_ONCE(async_error, error);
795 dpm_save_failed_dev(dev_name(dev));
796 pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
797 }
798
799 dpm_async_resume_subordinate(dev, async_resume_noirq);
800 }
801
async_resume_noirq(void * data,async_cookie_t cookie)802 static void async_resume_noirq(void *data, async_cookie_t cookie)
803 {
804 struct device *dev = data;
805
806 device_resume_noirq(dev, pm_transition, true);
807 put_device(dev);
808 }
809
dpm_noirq_resume_devices(pm_message_t state)810 static void dpm_noirq_resume_devices(pm_message_t state)
811 {
812 struct device *dev;
813 ktime_t starttime = ktime_get();
814
815 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
816
817 async_error = 0;
818 pm_transition = state;
819
820 mutex_lock(&dpm_list_mtx);
821
822 /*
823 * Start processing "async" root devices upfront so they don't wait for
824 * the "sync" devices they don't depend on.
825 */
826 list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
827 dpm_clear_async_state(dev);
828 if (dpm_root_device(dev))
829 dpm_async_with_cleanup(dev, async_resume_noirq);
830 }
831
832 while (!list_empty(&dpm_noirq_list)) {
833 dev = to_device(dpm_noirq_list.next);
834 list_move_tail(&dev->power.entry, &dpm_late_early_list);
835
836 if (!dpm_async_fn(dev, async_resume_noirq)) {
837 get_device(dev);
838
839 mutex_unlock(&dpm_list_mtx);
840
841 device_resume_noirq(dev, state, false);
842
843 put_device(dev);
844
845 mutex_lock(&dpm_list_mtx);
846 }
847 }
848 mutex_unlock(&dpm_list_mtx);
849 async_synchronize_full();
850 dpm_show_time(starttime, state, 0, "noirq");
851 if (READ_ONCE(async_error))
852 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
853
854 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
855 }
856
857 /**
858 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
859 * @state: PM transition of the system being carried out.
860 *
861 * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
862 * allow device drivers' interrupt handlers to be called.
863 */
dpm_resume_noirq(pm_message_t state)864 void dpm_resume_noirq(pm_message_t state)
865 {
866 dpm_noirq_resume_devices(state);
867
868 resume_device_irqs();
869 device_wakeup_disarm_wake_irqs();
870 }
871
872 static void async_resume_early(void *data, async_cookie_t cookie);
873
874 /**
875 * device_resume_early - Execute an "early resume" callback for given device.
876 * @dev: Device to handle.
877 * @state: PM transition of the system being carried out.
878 * @async: If true, the device is being resumed asynchronously.
879 *
880 * Runtime PM is disabled for @dev while this function is being executed.
881 */
device_resume_early(struct device * dev,pm_message_t state,bool async)882 static void device_resume_early(struct device *dev, pm_message_t state, bool async)
883 {
884 pm_callback_t callback = NULL;
885 const char *info = NULL;
886 int error = 0;
887
888 TRACE_DEVICE(dev);
889 TRACE_RESUME(0);
890
891 if (dev->power.direct_complete)
892 goto Out;
893
894 if (!dev->power.is_late_suspended)
895 goto Out;
896
897 if (dev->power.syscore)
898 goto Skip;
899
900 if (!dpm_wait_for_superior(dev, async))
901 goto Out;
902
903 if (dev->pm_domain) {
904 info = "early power domain ";
905 callback = pm_late_early_op(&dev->pm_domain->ops, state);
906 } else if (dev->type && dev->type->pm) {
907 info = "early type ";
908 callback = pm_late_early_op(dev->type->pm, state);
909 } else if (dev->class && dev->class->pm) {
910 info = "early class ";
911 callback = pm_late_early_op(dev->class->pm, state);
912 } else if (dev->bus && dev->bus->pm) {
913 info = "early bus ";
914 callback = pm_late_early_op(dev->bus->pm, state);
915 }
916 if (callback)
917 goto Run;
918
919 if (dev_pm_skip_resume(dev))
920 goto Skip;
921
922 if (dev->driver && dev->driver->pm) {
923 info = "early driver ";
924 callback = pm_late_early_op(dev->driver->pm, state);
925 }
926
927 Run:
928 error = dpm_run_callback(callback, dev, state, info);
929
930 Skip:
931 dev->power.is_late_suspended = false;
932 pm_runtime_enable(dev);
933
934 Out:
935 TRACE_RESUME(error);
936
937 complete_all(&dev->power.completion);
938
939 if (error) {
940 WRITE_ONCE(async_error, error);
941 dpm_save_failed_dev(dev_name(dev));
942 pm_dev_err(dev, state, async ? " async early" : " early", error);
943 }
944
945 dpm_async_resume_subordinate(dev, async_resume_early);
946 }
947
async_resume_early(void * data,async_cookie_t cookie)948 static void async_resume_early(void *data, async_cookie_t cookie)
949 {
950 struct device *dev = data;
951
952 device_resume_early(dev, pm_transition, true);
953 put_device(dev);
954 }
955
956 /**
957 * dpm_resume_early - Execute "early resume" callbacks for all devices.
958 * @state: PM transition of the system being carried out.
959 */
dpm_resume_early(pm_message_t state)960 void dpm_resume_early(pm_message_t state)
961 {
962 struct device *dev;
963 ktime_t starttime = ktime_get();
964
965 trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
966
967 async_error = 0;
968 pm_transition = state;
969
970 mutex_lock(&dpm_list_mtx);
971
972 /*
973 * Start processing "async" root devices upfront so they don't wait for
974 * the "sync" devices they don't depend on.
975 */
976 list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
977 dpm_clear_async_state(dev);
978 if (dpm_root_device(dev))
979 dpm_async_with_cleanup(dev, async_resume_early);
980 }
981
982 while (!list_empty(&dpm_late_early_list)) {
983 dev = to_device(dpm_late_early_list.next);
984 list_move_tail(&dev->power.entry, &dpm_suspended_list);
985
986 if (!dpm_async_fn(dev, async_resume_early)) {
987 get_device(dev);
988
989 mutex_unlock(&dpm_list_mtx);
990
991 device_resume_early(dev, state, false);
992
993 put_device(dev);
994
995 mutex_lock(&dpm_list_mtx);
996 }
997 }
998 mutex_unlock(&dpm_list_mtx);
999 async_synchronize_full();
1000 dpm_show_time(starttime, state, 0, "early");
1001 if (READ_ONCE(async_error))
1002 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
1003
1004 trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
1005 }
1006
1007 /**
1008 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
1009 * @state: PM transition of the system being carried out.
1010 */
dpm_resume_start(pm_message_t state)1011 void dpm_resume_start(pm_message_t state)
1012 {
1013 dpm_resume_noirq(state);
1014 dpm_resume_early(state);
1015 }
1016 EXPORT_SYMBOL_GPL(dpm_resume_start);
1017
1018 static void async_resume(void *data, async_cookie_t cookie);
1019
1020 /**
1021 * device_resume - Execute "resume" callbacks for given device.
1022 * @dev: Device to handle.
1023 * @state: PM transition of the system being carried out.
1024 * @async: If true, the device is being resumed asynchronously.
1025 */
device_resume(struct device * dev,pm_message_t state,bool async)1026 static void device_resume(struct device *dev, pm_message_t state, bool async)
1027 {
1028 pm_callback_t callback = NULL;
1029 const char *info = NULL;
1030 int error = 0;
1031 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1032
1033 TRACE_DEVICE(dev);
1034 TRACE_RESUME(0);
1035
1036 if (dev->power.syscore)
1037 goto Complete;
1038
1039 if (!dev->power.is_suspended)
1040 goto Complete;
1041
1042 dev->power.is_suspended = false;
1043
1044 if (dev->power.direct_complete) {
1045 /*
1046 * Allow new children to be added under the device after this
1047 * point if it has no PM callbacks.
1048 */
1049 if (dev->power.no_pm_callbacks)
1050 dev->power.is_prepared = false;
1051
1052 /* Match the pm_runtime_disable() in device_suspend(). */
1053 pm_runtime_enable(dev);
1054 goto Complete;
1055 }
1056
1057 if (!dpm_wait_for_superior(dev, async))
1058 goto Complete;
1059
1060 dpm_watchdog_set(&wd, dev);
1061 device_lock(dev);
1062
1063 /*
1064 * This is a fib. But we'll allow new children to be added below
1065 * a resumed device, even if the device hasn't been completed yet.
1066 */
1067 dev->power.is_prepared = false;
1068
1069 if (dev->pm_domain) {
1070 info = "power domain ";
1071 callback = pm_op(&dev->pm_domain->ops, state);
1072 goto Driver;
1073 }
1074
1075 if (dev->type && dev->type->pm) {
1076 info = "type ";
1077 callback = pm_op(dev->type->pm, state);
1078 goto Driver;
1079 }
1080
1081 if (dev->class && dev->class->pm) {
1082 info = "class ";
1083 callback = pm_op(dev->class->pm, state);
1084 goto Driver;
1085 }
1086
1087 if (dev->bus) {
1088 if (dev->bus->pm) {
1089 info = "bus ";
1090 callback = pm_op(dev->bus->pm, state);
1091 } else if (dev->bus->resume) {
1092 info = "legacy bus ";
1093 callback = dev->bus->resume;
1094 goto End;
1095 }
1096 }
1097
1098 Driver:
1099 if (!callback && dev->driver && dev->driver->pm) {
1100 info = "driver ";
1101 callback = pm_op(dev->driver->pm, state);
1102 }
1103
1104 End:
1105 error = dpm_run_callback(callback, dev, state, info);
1106
1107 device_unlock(dev);
1108 dpm_watchdog_clear(&wd);
1109
1110 Complete:
1111 complete_all(&dev->power.completion);
1112
1113 TRACE_RESUME(error);
1114
1115 if (error) {
1116 WRITE_ONCE(async_error, error);
1117 dpm_save_failed_dev(dev_name(dev));
1118 pm_dev_err(dev, state, async ? " async" : "", error);
1119 }
1120
1121 dpm_async_resume_subordinate(dev, async_resume);
1122 }
1123
async_resume(void * data,async_cookie_t cookie)1124 static void async_resume(void *data, async_cookie_t cookie)
1125 {
1126 struct device *dev = data;
1127
1128 device_resume(dev, pm_transition, true);
1129 put_device(dev);
1130 }
1131
1132 /**
1133 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
1134 * @state: PM transition of the system being carried out.
1135 *
1136 * Execute the appropriate "resume" callback for all devices whose status
1137 * indicates that they are suspended.
1138 */
dpm_resume(pm_message_t state)1139 void dpm_resume(pm_message_t state)
1140 {
1141 struct device *dev;
1142 ktime_t starttime = ktime_get();
1143
1144 trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1145
1146 pm_transition = state;
1147 async_error = 0;
1148
1149 mutex_lock(&dpm_list_mtx);
1150
1151 /*
1152 * Start processing "async" root devices upfront so they don't wait for
1153 * the "sync" devices they don't depend on.
1154 */
1155 list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
1156 dpm_clear_async_state(dev);
1157 if (dpm_root_device(dev))
1158 dpm_async_with_cleanup(dev, async_resume);
1159 }
1160
1161 while (!list_empty(&dpm_suspended_list)) {
1162 dev = to_device(dpm_suspended_list.next);
1163 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1164
1165 if (!dpm_async_fn(dev, async_resume)) {
1166 get_device(dev);
1167
1168 mutex_unlock(&dpm_list_mtx);
1169
1170 device_resume(dev, state, false);
1171
1172 put_device(dev);
1173
1174 mutex_lock(&dpm_list_mtx);
1175 }
1176 }
1177 mutex_unlock(&dpm_list_mtx);
1178 async_synchronize_full();
1179 dpm_show_time(starttime, state, 0, NULL);
1180 if (READ_ONCE(async_error))
1181 dpm_save_failed_step(SUSPEND_RESUME);
1182
1183 cpufreq_resume();
1184 devfreq_resume();
1185 trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1186 }
1187
1188 /**
1189 * device_complete - Complete a PM transition for given device.
1190 * @dev: Device to handle.
1191 * @state: PM transition of the system being carried out.
1192 */
device_complete(struct device * dev,pm_message_t state)1193 static void device_complete(struct device *dev, pm_message_t state)
1194 {
1195 void (*callback)(struct device *) = NULL;
1196 const char *info = NULL;
1197
1198 if (dev->power.syscore)
1199 goto out;
1200
1201 device_lock(dev);
1202
1203 if (dev->pm_domain) {
1204 info = "completing power domain ";
1205 callback = dev->pm_domain->ops.complete;
1206 } else if (dev->type && dev->type->pm) {
1207 info = "completing type ";
1208 callback = dev->type->pm->complete;
1209 } else if (dev->class && dev->class->pm) {
1210 info = "completing class ";
1211 callback = dev->class->pm->complete;
1212 } else if (dev->bus && dev->bus->pm) {
1213 info = "completing bus ";
1214 callback = dev->bus->pm->complete;
1215 }
1216
1217 if (!callback && dev->driver && dev->driver->pm) {
1218 info = "completing driver ";
1219 callback = dev->driver->pm->complete;
1220 }
1221
1222 if (callback) {
1223 pm_dev_dbg(dev, state, info);
1224 callback(dev);
1225 }
1226
1227 device_unlock(dev);
1228
1229 out:
1230 /* If enabling runtime PM for the device is blocked, unblock it. */
1231 pm_runtime_unblock(dev);
1232 pm_runtime_put(dev);
1233 }
1234
1235 /**
1236 * dpm_complete - Complete a PM transition for all non-sysdev devices.
1237 * @state: PM transition of the system being carried out.
1238 *
1239 * Execute the ->complete() callbacks for all devices whose PM status is not
1240 * DPM_ON (this allows new devices to be registered).
1241 */
dpm_complete(pm_message_t state)1242 void dpm_complete(pm_message_t state)
1243 {
1244 struct list_head list;
1245
1246 trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1247
1248 INIT_LIST_HEAD(&list);
1249 mutex_lock(&dpm_list_mtx);
1250 while (!list_empty(&dpm_prepared_list)) {
1251 struct device *dev = to_device(dpm_prepared_list.prev);
1252
1253 get_device(dev);
1254 dev->power.is_prepared = false;
1255 list_move(&dev->power.entry, &list);
1256
1257 mutex_unlock(&dpm_list_mtx);
1258
1259 trace_device_pm_callback_start(dev, "", state.event);
1260 device_complete(dev, state);
1261 trace_device_pm_callback_end(dev, 0);
1262
1263 put_device(dev);
1264
1265 mutex_lock(&dpm_list_mtx);
1266 }
1267 list_splice(&list, &dpm_list);
1268 mutex_unlock(&dpm_list_mtx);
1269
1270 /* Allow device probing and trigger re-probing of deferred devices */
1271 device_unblock_probing();
1272 trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1273 }
1274
1275 /**
1276 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1277 * @state: PM transition of the system being carried out.
1278 *
1279 * Execute "resume" callbacks for all devices and complete the PM transition of
1280 * the system.
1281 */
dpm_resume_end(pm_message_t state)1282 void dpm_resume_end(pm_message_t state)
1283 {
1284 dpm_resume(state);
1285 pm_restore_gfp_mask();
1286 dpm_complete(state);
1287 }
1288 EXPORT_SYMBOL_GPL(dpm_resume_end);
1289
1290
1291 /*------------------------- Suspend routines -------------------------*/
1292
dpm_leaf_device(struct device * dev)1293 static bool dpm_leaf_device(struct device *dev)
1294 {
1295 struct device *child;
1296
1297 lockdep_assert_held(&dpm_list_mtx);
1298
1299 child = device_find_any_child(dev);
1300 if (child) {
1301 put_device(child);
1302
1303 return false;
1304 }
1305
1306 /*
1307 * Since this function is required to run under dpm_list_mtx, the
1308 * list_empty() below will only return true if the device's list of
1309 * consumers is actually empty before calling it.
1310 */
1311 return list_empty(&dev->links.consumers);
1312 }
1313
dpm_async_suspend_parent(struct device * dev,async_func_t func)1314 static bool dpm_async_suspend_parent(struct device *dev, async_func_t func)
1315 {
1316 guard(mutex)(&dpm_list_mtx);
1317
1318 /*
1319 * If the device is suspended asynchronously and the parent's callback
1320 * deletes both the device and the parent itself, the parent object may
1321 * be freed while this function is running, so avoid that by checking
1322 * if the device has been deleted already as the parent cannot be
1323 * deleted before it.
1324 */
1325 if (!device_pm_initialized(dev))
1326 return false;
1327
1328 /* Start processing the device's parent if it is "async". */
1329 if (dev->parent)
1330 dpm_async_with_cleanup(dev->parent, func);
1331
1332 return true;
1333 }
1334
dpm_async_suspend_superior(struct device * dev,async_func_t func)1335 static void dpm_async_suspend_superior(struct device *dev, async_func_t func)
1336 {
1337 struct device_link *link;
1338 int idx;
1339
1340 if (!dpm_async_suspend_parent(dev, func))
1341 return;
1342
1343 idx = device_links_read_lock();
1344
1345 /* Start processing the device's "async" suppliers. */
1346 dev_for_each_link_to_supplier(link, dev)
1347 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
1348 dpm_async_with_cleanup(link->supplier, func);
1349
1350 device_links_read_unlock(idx);
1351 }
1352
dpm_async_suspend_complete_all(struct list_head * device_list)1353 static void dpm_async_suspend_complete_all(struct list_head *device_list)
1354 {
1355 struct device *dev;
1356
1357 guard(mutex)(&async_wip_mtx);
1358
1359 list_for_each_entry_reverse(dev, device_list, power.entry) {
1360 /*
1361 * In case the device is being waited for and async processing
1362 * has not started for it yet, let the waiters make progress.
1363 */
1364 if (!dev->power.work_in_progress)
1365 complete_all(&dev->power.completion);
1366 }
1367 }
1368
1369 /**
1370 * resume_event - Return a "resume" message for given "suspend" sleep state.
1371 * @sleep_state: PM message representing a sleep state.
1372 *
1373 * Return a PM message representing the resume event corresponding to given
1374 * sleep state.
1375 */
resume_event(pm_message_t sleep_state)1376 static pm_message_t resume_event(pm_message_t sleep_state)
1377 {
1378 switch (sleep_state.event) {
1379 case PM_EVENT_SUSPEND:
1380 return PMSG_RESUME;
1381 case PM_EVENT_FREEZE:
1382 case PM_EVENT_QUIESCE:
1383 return PMSG_RECOVER;
1384 case PM_EVENT_HIBERNATE:
1385 return PMSG_RESTORE;
1386 }
1387 return PMSG_ON;
1388 }
1389
dpm_superior_set_must_resume(struct device * dev)1390 static void dpm_superior_set_must_resume(struct device *dev)
1391 {
1392 struct device_link *link;
1393 int idx;
1394
1395 if (dev->parent)
1396 dev->parent->power.must_resume = true;
1397
1398 idx = device_links_read_lock();
1399
1400 dev_for_each_link_to_supplier(link, dev)
1401 link->supplier->power.must_resume = true;
1402
1403 device_links_read_unlock(idx);
1404 }
1405
1406 static void async_suspend_noirq(void *data, async_cookie_t cookie);
1407
1408 /**
1409 * device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1410 * @dev: Device to handle.
1411 * @state: PM transition of the system being carried out.
1412 * @async: If true, the device is being suspended asynchronously.
1413 *
1414 * The driver of @dev will not receive interrupts while this function is being
1415 * executed.
1416 */
device_suspend_noirq(struct device * dev,pm_message_t state,bool async)1417 static void device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1418 {
1419 pm_callback_t callback = NULL;
1420 const char *info = NULL;
1421 int error = 0;
1422
1423 TRACE_DEVICE(dev);
1424 TRACE_SUSPEND(0);
1425
1426 dpm_wait_for_subordinate(dev, async);
1427
1428 if (READ_ONCE(async_error))
1429 goto Complete;
1430
1431 if (dev->power.syscore || dev->power.direct_complete)
1432 goto Complete;
1433
1434 if (dev->pm_domain) {
1435 info = "noirq power domain ";
1436 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1437 } else if (dev->type && dev->type->pm) {
1438 info = "noirq type ";
1439 callback = pm_noirq_op(dev->type->pm, state);
1440 } else if (dev->class && dev->class->pm) {
1441 info = "noirq class ";
1442 callback = pm_noirq_op(dev->class->pm, state);
1443 } else if (dev->bus && dev->bus->pm) {
1444 info = "noirq bus ";
1445 callback = pm_noirq_op(dev->bus->pm, state);
1446 }
1447 if (callback)
1448 goto Run;
1449
1450 if (dev_pm_skip_suspend(dev))
1451 goto Skip;
1452
1453 if (dev->driver && dev->driver->pm) {
1454 info = "noirq driver ";
1455 callback = pm_noirq_op(dev->driver->pm, state);
1456 }
1457
1458 Run:
1459 error = dpm_run_callback(callback, dev, state, info);
1460 if (error) {
1461 WRITE_ONCE(async_error, error);
1462 dpm_save_failed_dev(dev_name(dev));
1463 pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
1464 goto Complete;
1465 }
1466
1467 Skip:
1468 dev->power.is_noirq_suspended = true;
1469
1470 /*
1471 * Devices must be resumed unless they are explicitly allowed to be left
1472 * in suspend, but even in that case skipping the resume of devices that
1473 * were in use right before the system suspend (as indicated by their
1474 * runtime PM usage counters and child counters) would be suboptimal.
1475 */
1476 if (!(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
1477 dev->power.may_skip_resume) || !pm_runtime_need_not_resume(dev))
1478 dev->power.must_resume = true;
1479
1480 if (dev->power.must_resume)
1481 dpm_superior_set_must_resume(dev);
1482
1483 Complete:
1484 complete_all(&dev->power.completion);
1485 TRACE_SUSPEND(error);
1486
1487 if (error || READ_ONCE(async_error))
1488 return;
1489
1490 dpm_async_suspend_superior(dev, async_suspend_noirq);
1491 }
1492
async_suspend_noirq(void * data,async_cookie_t cookie)1493 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1494 {
1495 struct device *dev = data;
1496
1497 device_suspend_noirq(dev, pm_transition, true);
1498 put_device(dev);
1499 }
1500
dpm_noirq_suspend_devices(pm_message_t state)1501 static int dpm_noirq_suspend_devices(pm_message_t state)
1502 {
1503 ktime_t starttime = ktime_get();
1504 struct device *dev;
1505 int error;
1506
1507 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1508
1509 pm_transition = state;
1510 async_error = 0;
1511
1512 mutex_lock(&dpm_list_mtx);
1513
1514 /*
1515 * Start processing "async" leaf devices upfront so they don't need to
1516 * wait for the "sync" devices they don't depend on.
1517 */
1518 list_for_each_entry_reverse(dev, &dpm_late_early_list, power.entry) {
1519 dpm_clear_async_state(dev);
1520 if (dpm_leaf_device(dev))
1521 dpm_async_with_cleanup(dev, async_suspend_noirq);
1522 }
1523
1524 while (!list_empty(&dpm_late_early_list)) {
1525 dev = to_device(dpm_late_early_list.prev);
1526
1527 list_move(&dev->power.entry, &dpm_noirq_list);
1528
1529 if (dpm_async_fn(dev, async_suspend_noirq))
1530 continue;
1531
1532 get_device(dev);
1533
1534 mutex_unlock(&dpm_list_mtx);
1535
1536 device_suspend_noirq(dev, state, false);
1537
1538 put_device(dev);
1539
1540 mutex_lock(&dpm_list_mtx);
1541
1542 if (READ_ONCE(async_error)) {
1543 dpm_async_suspend_complete_all(&dpm_late_early_list);
1544 /*
1545 * Move all devices to the target list to resume them
1546 * properly.
1547 */
1548 list_splice_init(&dpm_late_early_list, &dpm_noirq_list);
1549 break;
1550 }
1551 }
1552
1553 mutex_unlock(&dpm_list_mtx);
1554
1555 async_synchronize_full();
1556
1557 error = READ_ONCE(async_error);
1558 if (error)
1559 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1560
1561 dpm_show_time(starttime, state, error, "noirq");
1562 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1563 return error;
1564 }
1565
1566 /**
1567 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1568 * @state: PM transition of the system being carried out.
1569 *
1570 * Prevent device drivers' interrupt handlers from being called and invoke
1571 * "noirq" suspend callbacks for all non-sysdev devices.
1572 */
dpm_suspend_noirq(pm_message_t state)1573 int dpm_suspend_noirq(pm_message_t state)
1574 {
1575 int ret;
1576
1577 device_wakeup_arm_wake_irqs();
1578 suspend_device_irqs();
1579
1580 ret = dpm_noirq_suspend_devices(state);
1581 if (ret)
1582 dpm_resume_noirq(resume_event(state));
1583
1584 return ret;
1585 }
1586
dpm_propagate_wakeup_to_parent(struct device * dev)1587 static void dpm_propagate_wakeup_to_parent(struct device *dev)
1588 {
1589 struct device *parent = dev->parent;
1590
1591 if (!parent)
1592 return;
1593
1594 spin_lock_irq(&parent->power.lock);
1595
1596 if (device_wakeup_path(dev) && !parent->power.ignore_children)
1597 parent->power.wakeup_path = true;
1598
1599 spin_unlock_irq(&parent->power.lock);
1600 }
1601
1602 static void async_suspend_late(void *data, async_cookie_t cookie);
1603
1604 /**
1605 * device_suspend_late - Execute a "late suspend" callback for given device.
1606 * @dev: Device to handle.
1607 * @state: PM transition of the system being carried out.
1608 * @async: If true, the device is being suspended asynchronously.
1609 *
1610 * Runtime PM is disabled for @dev while this function is being executed.
1611 */
device_suspend_late(struct device * dev,pm_message_t state,bool async)1612 static void device_suspend_late(struct device *dev, pm_message_t state, bool async)
1613 {
1614 pm_callback_t callback = NULL;
1615 const char *info = NULL;
1616 int error = 0;
1617
1618 TRACE_DEVICE(dev);
1619 TRACE_SUSPEND(0);
1620
1621 dpm_wait_for_subordinate(dev, async);
1622
1623 if (READ_ONCE(async_error))
1624 goto Complete;
1625
1626 if (pm_wakeup_pending()) {
1627 WRITE_ONCE(async_error, -EBUSY);
1628 goto Complete;
1629 }
1630
1631 if (dev->power.direct_complete)
1632 goto Complete;
1633
1634 /*
1635 * Disable runtime PM for the device without checking if there is a
1636 * pending resume request for it.
1637 */
1638 __pm_runtime_disable(dev, false);
1639
1640 if (dev->power.syscore)
1641 goto Skip;
1642
1643 if (dev->pm_domain) {
1644 info = "late power domain ";
1645 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1646 } else if (dev->type && dev->type->pm) {
1647 info = "late type ";
1648 callback = pm_late_early_op(dev->type->pm, state);
1649 } else if (dev->class && dev->class->pm) {
1650 info = "late class ";
1651 callback = pm_late_early_op(dev->class->pm, state);
1652 } else if (dev->bus && dev->bus->pm) {
1653 info = "late bus ";
1654 callback = pm_late_early_op(dev->bus->pm, state);
1655 }
1656 if (callback)
1657 goto Run;
1658
1659 if (dev_pm_skip_suspend(dev))
1660 goto Skip;
1661
1662 if (dev->driver && dev->driver->pm) {
1663 info = "late driver ";
1664 callback = pm_late_early_op(dev->driver->pm, state);
1665 }
1666
1667 Run:
1668 error = dpm_run_callback(callback, dev, state, info);
1669 if (error) {
1670 WRITE_ONCE(async_error, error);
1671 dpm_save_failed_dev(dev_name(dev));
1672 pm_dev_err(dev, state, async ? " async late" : " late", error);
1673 pm_runtime_enable(dev);
1674 goto Complete;
1675 }
1676 dpm_propagate_wakeup_to_parent(dev);
1677
1678 Skip:
1679 dev->power.is_late_suspended = true;
1680
1681 Complete:
1682 TRACE_SUSPEND(error);
1683 complete_all(&dev->power.completion);
1684
1685 if (error || READ_ONCE(async_error))
1686 return;
1687
1688 dpm_async_suspend_superior(dev, async_suspend_late);
1689 }
1690
async_suspend_late(void * data,async_cookie_t cookie)1691 static void async_suspend_late(void *data, async_cookie_t cookie)
1692 {
1693 struct device *dev = data;
1694
1695 device_suspend_late(dev, pm_transition, true);
1696 put_device(dev);
1697 }
1698
1699 /**
1700 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1701 * @state: PM transition of the system being carried out.
1702 */
dpm_suspend_late(pm_message_t state)1703 int dpm_suspend_late(pm_message_t state)
1704 {
1705 ktime_t starttime = ktime_get();
1706 struct device *dev;
1707 int error;
1708
1709 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1710
1711 pm_transition = state;
1712 async_error = 0;
1713
1714 wake_up_all_idle_cpus();
1715
1716 mutex_lock(&dpm_list_mtx);
1717
1718 /*
1719 * Start processing "async" leaf devices upfront so they don't need to
1720 * wait for the "sync" devices they don't depend on.
1721 */
1722 list_for_each_entry_reverse(dev, &dpm_suspended_list, power.entry) {
1723 dpm_clear_async_state(dev);
1724 if (dpm_leaf_device(dev))
1725 dpm_async_with_cleanup(dev, async_suspend_late);
1726 }
1727
1728 while (!list_empty(&dpm_suspended_list)) {
1729 dev = to_device(dpm_suspended_list.prev);
1730
1731 list_move(&dev->power.entry, &dpm_late_early_list);
1732
1733 if (dpm_async_fn(dev, async_suspend_late))
1734 continue;
1735
1736 get_device(dev);
1737
1738 mutex_unlock(&dpm_list_mtx);
1739
1740 device_suspend_late(dev, state, false);
1741
1742 put_device(dev);
1743
1744 mutex_lock(&dpm_list_mtx);
1745
1746 if (READ_ONCE(async_error)) {
1747 dpm_async_suspend_complete_all(&dpm_suspended_list);
1748 /*
1749 * Move all devices to the target list to resume them
1750 * properly.
1751 */
1752 list_splice_init(&dpm_suspended_list, &dpm_late_early_list);
1753 break;
1754 }
1755 }
1756
1757 mutex_unlock(&dpm_list_mtx);
1758
1759 async_synchronize_full();
1760
1761 error = READ_ONCE(async_error);
1762 if (error) {
1763 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1764 dpm_resume_early(resume_event(state));
1765 }
1766 dpm_show_time(starttime, state, error, "late");
1767 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1768 return error;
1769 }
1770
1771 /**
1772 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1773 * @state: PM transition of the system being carried out.
1774 */
dpm_suspend_end(pm_message_t state)1775 int dpm_suspend_end(pm_message_t state)
1776 {
1777 ktime_t starttime = ktime_get();
1778 int error;
1779
1780 error = dpm_suspend_late(state);
1781 if (error)
1782 goto out;
1783
1784 error = dpm_suspend_noirq(state);
1785 if (error)
1786 dpm_resume_early(resume_event(state));
1787
1788 out:
1789 dpm_show_time(starttime, state, error, "end");
1790 return error;
1791 }
1792 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1793
1794 /**
1795 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1796 * @dev: Device to suspend.
1797 * @state: PM transition of the system being carried out.
1798 * @cb: Suspend callback to execute.
1799 * @info: string description of caller.
1800 */
legacy_suspend(struct device * dev,pm_message_t state,int (* cb)(struct device * dev,pm_message_t state),const char * info)1801 static int legacy_suspend(struct device *dev, pm_message_t state,
1802 int (*cb)(struct device *dev, pm_message_t state),
1803 const char *info)
1804 {
1805 int error;
1806 ktime_t calltime;
1807
1808 calltime = initcall_debug_start(dev, cb);
1809
1810 trace_device_pm_callback_start(dev, info, state.event);
1811 error = cb(dev, state);
1812 trace_device_pm_callback_end(dev, error);
1813 suspend_report_result(dev, cb, error);
1814
1815 initcall_debug_report(dev, calltime, cb, error);
1816
1817 return error;
1818 }
1819
dpm_clear_superiors_direct_complete(struct device * dev)1820 static void dpm_clear_superiors_direct_complete(struct device *dev)
1821 {
1822 struct device_link *link;
1823 int idx;
1824
1825 if (dev->parent) {
1826 spin_lock_irq(&dev->parent->power.lock);
1827 dev->parent->power.direct_complete = false;
1828 spin_unlock_irq(&dev->parent->power.lock);
1829 }
1830
1831 idx = device_links_read_lock();
1832
1833 dev_for_each_link_to_supplier(link, dev) {
1834 spin_lock_irq(&link->supplier->power.lock);
1835 link->supplier->power.direct_complete = false;
1836 spin_unlock_irq(&link->supplier->power.lock);
1837 }
1838
1839 device_links_read_unlock(idx);
1840 }
1841
1842 static void async_suspend(void *data, async_cookie_t cookie);
1843
1844 /**
1845 * device_suspend - Execute "suspend" callbacks for given device.
1846 * @dev: Device to handle.
1847 * @state: PM transition of the system being carried out.
1848 * @async: If true, the device is being suspended asynchronously.
1849 */
device_suspend(struct device * dev,pm_message_t state,bool async)1850 static void device_suspend(struct device *dev, pm_message_t state, bool async)
1851 {
1852 pm_callback_t callback = NULL;
1853 const char *info = NULL;
1854 int error = 0;
1855 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1856
1857 TRACE_DEVICE(dev);
1858 TRACE_SUSPEND(0);
1859
1860 dpm_wait_for_subordinate(dev, async);
1861
1862 if (READ_ONCE(async_error)) {
1863 dev->power.direct_complete = false;
1864 goto Complete;
1865 }
1866
1867 /*
1868 * Wait for possible runtime PM transitions of the device in progress
1869 * to complete and if there's a runtime resume request pending for it,
1870 * resume it before proceeding with invoking the system-wide suspend
1871 * callbacks for it.
1872 *
1873 * If the system-wide suspend callbacks below change the configuration
1874 * of the device, they must disable runtime PM for it or otherwise
1875 * ensure that its runtime-resume callbacks will not be confused by that
1876 * change in case they are invoked going forward.
1877 */
1878 pm_runtime_barrier(dev);
1879
1880 if (pm_wakeup_pending()) {
1881 dev->power.direct_complete = false;
1882 WRITE_ONCE(async_error, -EBUSY);
1883 goto Complete;
1884 }
1885
1886 if (dev->power.syscore)
1887 goto Complete;
1888
1889 /* Avoid direct_complete to let wakeup_path propagate. */
1890 if (device_may_wakeup(dev) || device_wakeup_path(dev))
1891 dev->power.direct_complete = false;
1892
1893 if (dev->power.direct_complete) {
1894 if (pm_runtime_status_suspended(dev)) {
1895 pm_runtime_disable(dev);
1896 if (pm_runtime_status_suspended(dev)) {
1897 pm_dev_dbg(dev, state, "direct-complete ");
1898 dev->power.is_suspended = true;
1899 goto Complete;
1900 }
1901
1902 pm_runtime_enable(dev);
1903 }
1904 dev->power.direct_complete = false;
1905 }
1906
1907 dev->power.may_skip_resume = true;
1908 dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME);
1909
1910 dpm_watchdog_set(&wd, dev);
1911 device_lock(dev);
1912
1913 if (dev->pm_domain) {
1914 info = "power domain ";
1915 callback = pm_op(&dev->pm_domain->ops, state);
1916 goto Run;
1917 }
1918
1919 if (dev->type && dev->type->pm) {
1920 info = "type ";
1921 callback = pm_op(dev->type->pm, state);
1922 goto Run;
1923 }
1924
1925 if (dev->class && dev->class->pm) {
1926 info = "class ";
1927 callback = pm_op(dev->class->pm, state);
1928 goto Run;
1929 }
1930
1931 if (dev->bus) {
1932 if (dev->bus->pm) {
1933 info = "bus ";
1934 callback = pm_op(dev->bus->pm, state);
1935 } else if (dev->bus->suspend) {
1936 pm_dev_dbg(dev, state, "legacy bus ");
1937 error = legacy_suspend(dev, state, dev->bus->suspend,
1938 "legacy bus ");
1939 goto End;
1940 }
1941 }
1942
1943 Run:
1944 if (!callback && dev->driver && dev->driver->pm) {
1945 info = "driver ";
1946 callback = pm_op(dev->driver->pm, state);
1947 }
1948
1949 error = dpm_run_callback(callback, dev, state, info);
1950
1951 End:
1952 if (!error) {
1953 dev->power.is_suspended = true;
1954 if (device_may_wakeup(dev))
1955 dev->power.wakeup_path = true;
1956
1957 dpm_propagate_wakeup_to_parent(dev);
1958 dpm_clear_superiors_direct_complete(dev);
1959 }
1960
1961 device_unlock(dev);
1962 dpm_watchdog_clear(&wd);
1963
1964 Complete:
1965 if (error) {
1966 WRITE_ONCE(async_error, error);
1967 dpm_save_failed_dev(dev_name(dev));
1968 pm_dev_err(dev, state, async ? " async" : "", error);
1969 }
1970
1971 complete_all(&dev->power.completion);
1972 TRACE_SUSPEND(error);
1973
1974 if (error || READ_ONCE(async_error))
1975 return;
1976
1977 dpm_async_suspend_superior(dev, async_suspend);
1978 }
1979
async_suspend(void * data,async_cookie_t cookie)1980 static void async_suspend(void *data, async_cookie_t cookie)
1981 {
1982 struct device *dev = data;
1983
1984 device_suspend(dev, pm_transition, true);
1985 put_device(dev);
1986 }
1987
1988 /**
1989 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1990 * @state: PM transition of the system being carried out.
1991 */
dpm_suspend(pm_message_t state)1992 int dpm_suspend(pm_message_t state)
1993 {
1994 ktime_t starttime = ktime_get();
1995 struct device *dev;
1996 int error;
1997
1998 trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1999 might_sleep();
2000
2001 devfreq_suspend();
2002 cpufreq_suspend();
2003
2004 pm_transition = state;
2005 async_error = 0;
2006
2007 mutex_lock(&dpm_list_mtx);
2008
2009 /*
2010 * Start processing "async" leaf devices upfront so they don't need to
2011 * wait for the "sync" devices they don't depend on.
2012 */
2013 list_for_each_entry_reverse(dev, &dpm_prepared_list, power.entry) {
2014 dpm_clear_async_state(dev);
2015 if (dpm_leaf_device(dev))
2016 dpm_async_with_cleanup(dev, async_suspend);
2017 }
2018
2019 while (!list_empty(&dpm_prepared_list)) {
2020 dev = to_device(dpm_prepared_list.prev);
2021
2022 list_move(&dev->power.entry, &dpm_suspended_list);
2023
2024 if (dpm_async_fn(dev, async_suspend))
2025 continue;
2026
2027 get_device(dev);
2028
2029 mutex_unlock(&dpm_list_mtx);
2030
2031 device_suspend(dev, state, false);
2032
2033 put_device(dev);
2034
2035 mutex_lock(&dpm_list_mtx);
2036
2037 if (READ_ONCE(async_error)) {
2038 dpm_async_suspend_complete_all(&dpm_prepared_list);
2039 /*
2040 * Move all devices to the target list to resume them
2041 * properly.
2042 */
2043 list_splice_init(&dpm_prepared_list, &dpm_suspended_list);
2044 break;
2045 }
2046 }
2047
2048 mutex_unlock(&dpm_list_mtx);
2049
2050 async_synchronize_full();
2051
2052 error = READ_ONCE(async_error);
2053 if (error)
2054 dpm_save_failed_step(SUSPEND_SUSPEND);
2055
2056 dpm_show_time(starttime, state, error, NULL);
2057 trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
2058 return error;
2059 }
2060
device_prepare_smart_suspend(struct device * dev)2061 static bool device_prepare_smart_suspend(struct device *dev)
2062 {
2063 struct device_link *link;
2064 bool ret = true;
2065 int idx;
2066
2067 /*
2068 * The "smart suspend" feature is enabled for devices whose drivers ask
2069 * for it and for devices without PM callbacks.
2070 *
2071 * However, if "smart suspend" is not enabled for the device's parent
2072 * or any of its suppliers that take runtime PM into account, it cannot
2073 * be enabled for the device either.
2074 */
2075 if (!dev->power.no_pm_callbacks &&
2076 !dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND))
2077 return false;
2078
2079 if (dev->parent && !dev_pm_smart_suspend(dev->parent) &&
2080 !dev->parent->power.ignore_children && !pm_runtime_blocked(dev->parent))
2081 return false;
2082
2083 idx = device_links_read_lock();
2084
2085 dev_for_each_link_to_supplier(link, dev) {
2086 if (!device_link_test(link, DL_FLAG_PM_RUNTIME))
2087 continue;
2088
2089 if (!dev_pm_smart_suspend(link->supplier) &&
2090 !pm_runtime_blocked(link->supplier)) {
2091 ret = false;
2092 break;
2093 }
2094 }
2095
2096 device_links_read_unlock(idx);
2097
2098 return ret;
2099 }
2100
2101 /**
2102 * device_prepare - Prepare a device for system power transition.
2103 * @dev: Device to handle.
2104 * @state: PM transition of the system being carried out.
2105 *
2106 * Execute the ->prepare() callback(s) for given device. No new children of the
2107 * device may be registered after this function has returned.
2108 */
device_prepare(struct device * dev,pm_message_t state)2109 static int device_prepare(struct device *dev, pm_message_t state)
2110 {
2111 int (*callback)(struct device *) = NULL;
2112 bool smart_suspend;
2113 int ret = 0;
2114
2115 /*
2116 * If a device's parent goes into runtime suspend at the wrong time,
2117 * it won't be possible to resume the device. To prevent this we
2118 * block runtime suspend here, during the prepare phase, and allow
2119 * it again during the complete phase.
2120 */
2121 pm_runtime_get_noresume(dev);
2122 /*
2123 * If runtime PM is disabled for the device at this point and it has
2124 * never been enabled so far, it should not be enabled until this system
2125 * suspend-resume cycle is complete, so prepare to trigger a warning on
2126 * subsequent attempts to enable it.
2127 */
2128 smart_suspend = !pm_runtime_block_if_disabled(dev);
2129
2130 if (dev->power.syscore)
2131 return 0;
2132
2133 device_lock(dev);
2134
2135 dev->power.wakeup_path = false;
2136
2137 if (dev->power.no_pm_callbacks)
2138 goto unlock;
2139
2140 if (dev->pm_domain)
2141 callback = dev->pm_domain->ops.prepare;
2142 else if (dev->type && dev->type->pm)
2143 callback = dev->type->pm->prepare;
2144 else if (dev->class && dev->class->pm)
2145 callback = dev->class->pm->prepare;
2146 else if (dev->bus && dev->bus->pm)
2147 callback = dev->bus->pm->prepare;
2148
2149 if (!callback && dev->driver && dev->driver->pm)
2150 callback = dev->driver->pm->prepare;
2151
2152 if (callback)
2153 ret = callback(dev);
2154
2155 unlock:
2156 device_unlock(dev);
2157
2158 if (ret < 0) {
2159 suspend_report_result(dev, callback, ret);
2160 pm_runtime_put(dev);
2161 return ret;
2162 }
2163 /* Do not enable "smart suspend" for devices with disabled runtime PM. */
2164 if (smart_suspend)
2165 smart_suspend = device_prepare_smart_suspend(dev);
2166
2167 spin_lock_irq(&dev->power.lock);
2168
2169 dev->power.smart_suspend = smart_suspend;
2170 /*
2171 * A positive return value from ->prepare() means "this device appears
2172 * to be runtime-suspended and its state is fine, so if it really is
2173 * runtime-suspended, you can leave it in that state provided that you
2174 * will do the same thing with all of its descendants". This only
2175 * applies to suspend transitions, however.
2176 */
2177 dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
2178 (ret > 0 || dev->power.no_pm_callbacks) &&
2179 !dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
2180
2181 spin_unlock_irq(&dev->power.lock);
2182
2183 return 0;
2184 }
2185
2186 /**
2187 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
2188 * @state: PM transition of the system being carried out.
2189 *
2190 * Execute the ->prepare() callback(s) for all devices.
2191 */
dpm_prepare(pm_message_t state)2192 int dpm_prepare(pm_message_t state)
2193 {
2194 int error = 0;
2195
2196 trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
2197
2198 /*
2199 * Give a chance for the known devices to complete their probes, before
2200 * disable probing of devices. This sync point is important at least
2201 * at boot time + hibernation restore.
2202 */
2203 wait_for_device_probe();
2204 /*
2205 * It is unsafe if probing of devices will happen during suspend or
2206 * hibernation and system behavior will be unpredictable in this case.
2207 * So, let's prohibit device's probing here and defer their probes
2208 * instead. The normal behavior will be restored in dpm_complete().
2209 */
2210 device_block_probing();
2211
2212 mutex_lock(&dpm_list_mtx);
2213 while (!list_empty(&dpm_list) && !error) {
2214 struct device *dev = to_device(dpm_list.next);
2215
2216 get_device(dev);
2217
2218 mutex_unlock(&dpm_list_mtx);
2219
2220 trace_device_pm_callback_start(dev, "", state.event);
2221 error = device_prepare(dev, state);
2222 trace_device_pm_callback_end(dev, error);
2223
2224 mutex_lock(&dpm_list_mtx);
2225
2226 if (!error) {
2227 dev->power.is_prepared = true;
2228 if (!list_empty(&dev->power.entry))
2229 list_move_tail(&dev->power.entry, &dpm_prepared_list);
2230 } else if (error == -EAGAIN) {
2231 error = 0;
2232 } else {
2233 dev_info(dev, "not prepared for power transition: code %d\n",
2234 error);
2235 }
2236
2237 mutex_unlock(&dpm_list_mtx);
2238
2239 put_device(dev);
2240
2241 mutex_lock(&dpm_list_mtx);
2242 }
2243 mutex_unlock(&dpm_list_mtx);
2244 trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
2245 return error;
2246 }
2247
2248 /**
2249 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
2250 * @state: PM transition of the system being carried out.
2251 *
2252 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
2253 * callbacks for them.
2254 */
dpm_suspend_start(pm_message_t state)2255 int dpm_suspend_start(pm_message_t state)
2256 {
2257 ktime_t starttime = ktime_get();
2258 int error;
2259
2260 error = dpm_prepare(state);
2261 if (error)
2262 dpm_save_failed_step(SUSPEND_PREPARE);
2263 else {
2264 pm_restrict_gfp_mask();
2265 error = dpm_suspend(state);
2266 }
2267
2268 dpm_show_time(starttime, state, error, "start");
2269 return error;
2270 }
2271 EXPORT_SYMBOL_GPL(dpm_suspend_start);
2272
__suspend_report_result(const char * function,struct device * dev,void * fn,int ret)2273 void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret)
2274 {
2275 if (ret)
2276 dev_err(dev, "%s(): %ps returns %d\n", function, fn, ret);
2277 }
2278 EXPORT_SYMBOL_GPL(__suspend_report_result);
2279
2280 /**
2281 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
2282 * @subordinate: Device that needs to wait for @dev.
2283 * @dev: Device to wait for.
2284 */
device_pm_wait_for_dev(struct device * subordinate,struct device * dev)2285 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
2286 {
2287 dpm_wait(dev, subordinate->power.async_suspend);
2288 return async_error;
2289 }
2290 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
2291
2292 /**
2293 * dpm_for_each_dev - device iterator.
2294 * @data: data for the callback.
2295 * @fn: function to be called for each device.
2296 *
2297 * Iterate over devices in dpm_list, and call @fn for each device,
2298 * passing it @data.
2299 */
dpm_for_each_dev(void * data,void (* fn)(struct device *,void *))2300 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
2301 {
2302 struct device *dev;
2303
2304 if (!fn)
2305 return;
2306
2307 device_pm_lock();
2308 list_for_each_entry(dev, &dpm_list, power.entry)
2309 fn(dev, data);
2310 device_pm_unlock();
2311 }
2312 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
2313
pm_ops_is_empty(const struct dev_pm_ops * ops)2314 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
2315 {
2316 if (!ops)
2317 return true;
2318
2319 return !ops->prepare &&
2320 !ops->suspend &&
2321 !ops->suspend_late &&
2322 !ops->suspend_noirq &&
2323 !ops->resume_noirq &&
2324 !ops->resume_early &&
2325 !ops->resume &&
2326 !ops->complete;
2327 }
2328
device_pm_check_callbacks(struct device * dev)2329 void device_pm_check_callbacks(struct device *dev)
2330 {
2331 unsigned long flags;
2332
2333 spin_lock_irqsave(&dev->power.lock, flags);
2334 dev->power.no_pm_callbacks =
2335 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2336 !dev->bus->suspend && !dev->bus->resume)) &&
2337 (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2338 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2339 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2340 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2341 !dev->driver->suspend && !dev->driver->resume));
2342 spin_unlock_irqrestore(&dev->power.lock, flags);
2343 }
2344
dev_pm_skip_suspend(struct device * dev)2345 bool dev_pm_skip_suspend(struct device *dev)
2346 {
2347 return dev_pm_smart_suspend(dev) && pm_runtime_status_suspended(dev);
2348 }
2349