1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * drivers/base/power/main.c - Where the driver meets power management.
4 *
5 * Copyright (c) 2003 Patrick Mochel
6 * Copyright (c) 2003 Open Source Development Lab
7 *
8 * The driver model core calls device_pm_add() when a device is registered.
9 * This will initialize the embedded device_pm_info object in the device
10 * and add it to the list of power-controlled devices. sysfs entries for
11 * controlling device power management will also be added.
12 *
13 * A separate list is used for keeping track of power info, because the power
14 * domain dependencies may differ from the ancestral dependencies that the
15 * subsystem list maintains.
16 */
17
18 #define pr_fmt(fmt) "PM: " fmt
19 #define dev_fmt pr_fmt
20
21 #include <linux/device.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm-trace.h>
27 #include <linux/pm_wakeirq.h>
28 #include <linux/interrupt.h>
29 #include <linux/sched.h>
30 #include <linux/sched/debug.h>
31 #include <linux/async.h>
32 #include <linux/suspend.h>
33 #include <trace/events/power.h>
34 #include <linux/cpufreq.h>
35 #include <linux/devfreq.h>
36 #include <linux/timer.h>
37
38 #include "../base.h"
39 #include "power.h"
40
41 typedef int (*pm_callback_t)(struct device *);
42
43 /*
44 * The entries in the dpm_list list are in a depth first order, simply
45 * because children are guaranteed to be discovered after parents, and
46 * are inserted at the back of the list on discovery.
47 *
48 * Since device_pm_add() may be called with a device lock held,
49 * we must never try to acquire a device lock while holding
50 * dpm_list_mutex.
51 */
52
53 LIST_HEAD(dpm_list);
54 static LIST_HEAD(dpm_prepared_list);
55 static LIST_HEAD(dpm_suspended_list);
56 static LIST_HEAD(dpm_late_early_list);
57 static LIST_HEAD(dpm_noirq_list);
58
59 static DEFINE_MUTEX(dpm_list_mtx);
60 static pm_message_t pm_transition;
61
62 static DEFINE_MUTEX(async_wip_mtx);
63 static int async_error;
64
65 /**
66 * pm_hibernate_is_recovering - if recovering from hibernate due to error.
67 *
68 * Used to query if dev_pm_ops.thaw() is called for normal hibernation case or
69 * recovering from some error.
70 *
71 * Return: true for error case, false for normal case.
72 */
pm_hibernate_is_recovering(void)73 bool pm_hibernate_is_recovering(void)
74 {
75 return pm_transition.event == PM_EVENT_RECOVER;
76 }
77 EXPORT_SYMBOL_GPL(pm_hibernate_is_recovering);
78
pm_verb(int event)79 static const char *pm_verb(int event)
80 {
81 switch (event) {
82 case PM_EVENT_SUSPEND:
83 return "suspend";
84 case PM_EVENT_RESUME:
85 return "resume";
86 case PM_EVENT_FREEZE:
87 return "freeze";
88 case PM_EVENT_QUIESCE:
89 return "quiesce";
90 case PM_EVENT_HIBERNATE:
91 return "hibernate";
92 case PM_EVENT_THAW:
93 return "thaw";
94 case PM_EVENT_RESTORE:
95 return "restore";
96 case PM_EVENT_RECOVER:
97 return "recover";
98 default:
99 return "(unknown PM event)";
100 }
101 }
102
103 /**
104 * device_pm_sleep_init - Initialize system suspend-related device fields.
105 * @dev: Device object being initialized.
106 */
device_pm_sleep_init(struct device * dev)107 void device_pm_sleep_init(struct device *dev)
108 {
109 dev->power.is_prepared = false;
110 dev->power.is_suspended = false;
111 dev->power.is_noirq_suspended = false;
112 dev->power.is_late_suspended = false;
113 init_completion(&dev->power.completion);
114 complete_all(&dev->power.completion);
115 dev->power.wakeup = NULL;
116 INIT_LIST_HEAD(&dev->power.entry);
117 }
118
119 /**
120 * device_pm_lock - Lock the list of active devices used by the PM core.
121 */
device_pm_lock(void)122 void device_pm_lock(void)
123 {
124 mutex_lock(&dpm_list_mtx);
125 }
126
127 /**
128 * device_pm_unlock - Unlock the list of active devices used by the PM core.
129 */
device_pm_unlock(void)130 void device_pm_unlock(void)
131 {
132 mutex_unlock(&dpm_list_mtx);
133 }
134
135 /**
136 * device_pm_add - Add a device to the PM core's list of active devices.
137 * @dev: Device to add to the list.
138 */
device_pm_add(struct device * dev)139 void device_pm_add(struct device *dev)
140 {
141 /* Skip PM setup/initialization. */
142 if (device_pm_not_required(dev))
143 return;
144
145 pr_debug("Adding info for %s:%s\n",
146 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
147 device_pm_check_callbacks(dev);
148 mutex_lock(&dpm_list_mtx);
149 if (dev->parent && dev->parent->power.is_prepared)
150 dev_warn(dev, "parent %s should not be sleeping\n",
151 dev_name(dev->parent));
152 list_add_tail(&dev->power.entry, &dpm_list);
153 dev->power.in_dpm_list = true;
154 mutex_unlock(&dpm_list_mtx);
155 }
156
157 /**
158 * device_pm_remove - Remove a device from the PM core's list of active devices.
159 * @dev: Device to be removed from the list.
160 */
device_pm_remove(struct device * dev)161 void device_pm_remove(struct device *dev)
162 {
163 if (device_pm_not_required(dev))
164 return;
165
166 pr_debug("Removing info for %s:%s\n",
167 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
168 complete_all(&dev->power.completion);
169 mutex_lock(&dpm_list_mtx);
170 list_del_init(&dev->power.entry);
171 dev->power.in_dpm_list = false;
172 mutex_unlock(&dpm_list_mtx);
173 device_wakeup_disable(dev);
174 pm_runtime_remove(dev);
175 device_pm_check_callbacks(dev);
176 }
177
178 /**
179 * device_pm_move_before - Move device in the PM core's list of active devices.
180 * @deva: Device to move in dpm_list.
181 * @devb: Device @deva should come before.
182 */
device_pm_move_before(struct device * deva,struct device * devb)183 void device_pm_move_before(struct device *deva, struct device *devb)
184 {
185 pr_debug("Moving %s:%s before %s:%s\n",
186 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
187 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
188 /* Delete deva from dpm_list and reinsert before devb. */
189 list_move_tail(&deva->power.entry, &devb->power.entry);
190 }
191
192 /**
193 * device_pm_move_after - Move device in the PM core's list of active devices.
194 * @deva: Device to move in dpm_list.
195 * @devb: Device @deva should come after.
196 */
device_pm_move_after(struct device * deva,struct device * devb)197 void device_pm_move_after(struct device *deva, struct device *devb)
198 {
199 pr_debug("Moving %s:%s after %s:%s\n",
200 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
201 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
202 /* Delete deva from dpm_list and reinsert after devb. */
203 list_move(&deva->power.entry, &devb->power.entry);
204 }
205
206 /**
207 * device_pm_move_last - Move device to end of the PM core's list of devices.
208 * @dev: Device to move in dpm_list.
209 */
device_pm_move_last(struct device * dev)210 void device_pm_move_last(struct device *dev)
211 {
212 pr_debug("Moving %s:%s to end of list\n",
213 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
214 list_move_tail(&dev->power.entry, &dpm_list);
215 }
216
initcall_debug_start(struct device * dev,void * cb)217 static ktime_t initcall_debug_start(struct device *dev, void *cb)
218 {
219 if (!pm_print_times_enabled)
220 return 0;
221
222 dev_info(dev, "calling %ps @ %i, parent: %s\n", cb,
223 task_pid_nr(current),
224 dev->parent ? dev_name(dev->parent) : "none");
225 return ktime_get();
226 }
227
initcall_debug_report(struct device * dev,ktime_t calltime,void * cb,int error)228 static void initcall_debug_report(struct device *dev, ktime_t calltime,
229 void *cb, int error)
230 {
231 ktime_t rettime;
232
233 if (!pm_print_times_enabled)
234 return;
235
236 rettime = ktime_get();
237 dev_info(dev, "%ps returned %d after %Ld usecs\n", cb, error,
238 (unsigned long long)ktime_us_delta(rettime, calltime));
239 }
240
241 /**
242 * dpm_wait - Wait for a PM operation to complete.
243 * @dev: Device to wait for.
244 * @async: If unset, wait only if the device's power.async_suspend flag is set.
245 */
dpm_wait(struct device * dev,bool async)246 static void dpm_wait(struct device *dev, bool async)
247 {
248 if (!dev)
249 return;
250
251 if (async || (pm_async_enabled && dev->power.async_suspend))
252 wait_for_completion(&dev->power.completion);
253 }
254
dpm_wait_fn(struct device * dev,void * async_ptr)255 static int dpm_wait_fn(struct device *dev, void *async_ptr)
256 {
257 dpm_wait(dev, *((bool *)async_ptr));
258 return 0;
259 }
260
dpm_wait_for_children(struct device * dev,bool async)261 static void dpm_wait_for_children(struct device *dev, bool async)
262 {
263 device_for_each_child(dev, &async, dpm_wait_fn);
264 }
265
dpm_wait_for_suppliers(struct device * dev,bool async)266 static void dpm_wait_for_suppliers(struct device *dev, bool async)
267 {
268 struct device_link *link;
269 int idx;
270
271 idx = device_links_read_lock();
272
273 /*
274 * If the supplier goes away right after we've checked the link to it,
275 * we'll wait for its completion to change the state, but that's fine,
276 * because the only things that will block as a result are the SRCU
277 * callbacks freeing the link objects for the links in the list we're
278 * walking.
279 */
280 dev_for_each_link_to_supplier(link, dev)
281 if (READ_ONCE(link->status) != DL_STATE_DORMANT &&
282 !device_link_flag_is_sync_state_only(link->flags))
283 dpm_wait(link->supplier, async);
284
285 device_links_read_unlock(idx);
286 }
287
dpm_wait_for_superior(struct device * dev,bool async)288 static bool dpm_wait_for_superior(struct device *dev, bool async)
289 {
290 struct device *parent;
291
292 /*
293 * If the device is resumed asynchronously and the parent's callback
294 * deletes both the device and the parent itself, the parent object may
295 * be freed while this function is running, so avoid that by reference
296 * counting the parent once more unless the device has been deleted
297 * already (in which case return right away).
298 */
299 mutex_lock(&dpm_list_mtx);
300
301 if (!device_pm_initialized(dev)) {
302 mutex_unlock(&dpm_list_mtx);
303 return false;
304 }
305
306 parent = get_device(dev->parent);
307
308 mutex_unlock(&dpm_list_mtx);
309
310 dpm_wait(parent, async);
311 put_device(parent);
312
313 dpm_wait_for_suppliers(dev, async);
314
315 /*
316 * If the parent's callback has deleted the device, attempting to resume
317 * it would be invalid, so avoid doing that then.
318 */
319 return device_pm_initialized(dev);
320 }
321
dpm_wait_for_consumers(struct device * dev,bool async)322 static void dpm_wait_for_consumers(struct device *dev, bool async)
323 {
324 struct device_link *link;
325 int idx;
326
327 idx = device_links_read_lock();
328
329 /*
330 * The status of a device link can only be changed from "dormant" by a
331 * probe, but that cannot happen during system suspend/resume. In
332 * theory it can change to "dormant" at that time, but then it is
333 * reasonable to wait for the target device anyway (eg. if it goes
334 * away, it's better to wait for it to go away completely and then
335 * continue instead of trying to continue in parallel with its
336 * unregistration).
337 */
338 dev_for_each_link_to_consumer(link, dev)
339 if (READ_ONCE(link->status) != DL_STATE_DORMANT &&
340 !device_link_flag_is_sync_state_only(link->flags))
341 dpm_wait(link->consumer, async);
342
343 device_links_read_unlock(idx);
344 }
345
dpm_wait_for_subordinate(struct device * dev,bool async)346 static void dpm_wait_for_subordinate(struct device *dev, bool async)
347 {
348 dpm_wait_for_children(dev, async);
349 dpm_wait_for_consumers(dev, async);
350 }
351
352 /**
353 * pm_op - Return the PM operation appropriate for given PM event.
354 * @ops: PM operations to choose from.
355 * @state: PM transition of the system being carried out.
356 */
pm_op(const struct dev_pm_ops * ops,pm_message_t state)357 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
358 {
359 switch (state.event) {
360 #ifdef CONFIG_SUSPEND
361 case PM_EVENT_SUSPEND:
362 return ops->suspend;
363 case PM_EVENT_RESUME:
364 return ops->resume;
365 #endif /* CONFIG_SUSPEND */
366 #ifdef CONFIG_HIBERNATE_CALLBACKS
367 case PM_EVENT_FREEZE:
368 case PM_EVENT_QUIESCE:
369 return ops->freeze;
370 case PM_EVENT_HIBERNATE:
371 return ops->poweroff;
372 case PM_EVENT_THAW:
373 case PM_EVENT_RECOVER:
374 return ops->thaw;
375 case PM_EVENT_RESTORE:
376 return ops->restore;
377 #endif /* CONFIG_HIBERNATE_CALLBACKS */
378 }
379
380 return NULL;
381 }
382
383 /**
384 * pm_late_early_op - Return the PM operation appropriate for given PM event.
385 * @ops: PM operations to choose from.
386 * @state: PM transition of the system being carried out.
387 *
388 * Runtime PM is disabled for @dev while this function is being executed.
389 */
pm_late_early_op(const struct dev_pm_ops * ops,pm_message_t state)390 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
391 pm_message_t state)
392 {
393 switch (state.event) {
394 #ifdef CONFIG_SUSPEND
395 case PM_EVENT_SUSPEND:
396 return ops->suspend_late;
397 case PM_EVENT_RESUME:
398 return ops->resume_early;
399 #endif /* CONFIG_SUSPEND */
400 #ifdef CONFIG_HIBERNATE_CALLBACKS
401 case PM_EVENT_FREEZE:
402 case PM_EVENT_QUIESCE:
403 return ops->freeze_late;
404 case PM_EVENT_HIBERNATE:
405 return ops->poweroff_late;
406 case PM_EVENT_THAW:
407 case PM_EVENT_RECOVER:
408 return ops->thaw_early;
409 case PM_EVENT_RESTORE:
410 return ops->restore_early;
411 #endif /* CONFIG_HIBERNATE_CALLBACKS */
412 }
413
414 return NULL;
415 }
416
417 /**
418 * pm_noirq_op - Return the PM operation appropriate for given PM event.
419 * @ops: PM operations to choose from.
420 * @state: PM transition of the system being carried out.
421 *
422 * The driver of @dev will not receive interrupts while this function is being
423 * executed.
424 */
pm_noirq_op(const struct dev_pm_ops * ops,pm_message_t state)425 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
426 {
427 switch (state.event) {
428 #ifdef CONFIG_SUSPEND
429 case PM_EVENT_SUSPEND:
430 return ops->suspend_noirq;
431 case PM_EVENT_RESUME:
432 return ops->resume_noirq;
433 #endif /* CONFIG_SUSPEND */
434 #ifdef CONFIG_HIBERNATE_CALLBACKS
435 case PM_EVENT_FREEZE:
436 case PM_EVENT_QUIESCE:
437 return ops->freeze_noirq;
438 case PM_EVENT_HIBERNATE:
439 return ops->poweroff_noirq;
440 case PM_EVENT_THAW:
441 case PM_EVENT_RECOVER:
442 return ops->thaw_noirq;
443 case PM_EVENT_RESTORE:
444 return ops->restore_noirq;
445 #endif /* CONFIG_HIBERNATE_CALLBACKS */
446 }
447
448 return NULL;
449 }
450
pm_dev_dbg(struct device * dev,pm_message_t state,const char * info)451 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
452 {
453 dev_dbg(dev, "%s%s%s driver flags: %x\n", info, pm_verb(state.event),
454 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
455 ", may wakeup" : "", dev->power.driver_flags);
456 }
457
pm_dev_err(struct device * dev,pm_message_t state,const char * info,int error)458 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
459 int error)
460 {
461 dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info,
462 error);
463 }
464
dpm_show_time(ktime_t starttime,pm_message_t state,int error,const char * info)465 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
466 const char *info)
467 {
468 ktime_t calltime;
469 u64 usecs64;
470 int usecs;
471
472 calltime = ktime_get();
473 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
474 do_div(usecs64, NSEC_PER_USEC);
475 usecs = usecs64;
476 if (usecs == 0)
477 usecs = 1;
478
479 pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
480 info ?: "", info ? " " : "", pm_verb(state.event),
481 error ? "aborted" : "complete",
482 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
483 }
484
dpm_run_callback(pm_callback_t cb,struct device * dev,pm_message_t state,const char * info)485 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
486 pm_message_t state, const char *info)
487 {
488 ktime_t calltime;
489 int error;
490
491 if (!cb)
492 return 0;
493
494 calltime = initcall_debug_start(dev, cb);
495
496 pm_dev_dbg(dev, state, info);
497 trace_device_pm_callback_start(dev, info, state.event);
498 error = cb(dev);
499 trace_device_pm_callback_end(dev, error);
500 suspend_report_result(dev, cb, error);
501
502 initcall_debug_report(dev, calltime, cb, error);
503
504 return error;
505 }
506
507 #ifdef CONFIG_DPM_WATCHDOG
508 struct dpm_watchdog {
509 struct device *dev;
510 struct task_struct *tsk;
511 struct timer_list timer;
512 bool fatal;
513 };
514
515 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
516 struct dpm_watchdog wd
517
518 /**
519 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
520 * @t: The timer that PM watchdog depends on.
521 *
522 * Called when a driver has timed out suspending or resuming.
523 * There's not much we can do here to recover so panic() to
524 * capture a crash-dump in pstore.
525 */
dpm_watchdog_handler(struct timer_list * t)526 static void dpm_watchdog_handler(struct timer_list *t)
527 {
528 struct dpm_watchdog *wd = timer_container_of(wd, t, timer);
529 struct timer_list *timer = &wd->timer;
530 unsigned int time_left;
531
532 if (wd->fatal) {
533 dev_emerg(wd->dev, "**** DPM device timeout ****\n");
534 show_stack(wd->tsk, NULL, KERN_EMERG);
535 panic("%s %s: unrecoverable failure\n",
536 dev_driver_string(wd->dev), dev_name(wd->dev));
537 }
538
539 time_left = CONFIG_DPM_WATCHDOG_TIMEOUT - CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
540 dev_warn(wd->dev, "**** DPM device timeout after %u seconds; %u seconds until panic ****\n",
541 CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT, time_left);
542 show_stack(wd->tsk, NULL, KERN_WARNING);
543
544 wd->fatal = true;
545 mod_timer(timer, jiffies + HZ * time_left);
546 }
547
548 /**
549 * dpm_watchdog_set - Enable pm watchdog for given device.
550 * @wd: Watchdog. Must be allocated on the stack.
551 * @dev: Device to handle.
552 */
dpm_watchdog_set(struct dpm_watchdog * wd,struct device * dev)553 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
554 {
555 struct timer_list *timer = &wd->timer;
556
557 wd->dev = dev;
558 wd->tsk = current;
559 wd->fatal = CONFIG_DPM_WATCHDOG_TIMEOUT == CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
560
561 timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
562 /* use same timeout value for both suspend and resume */
563 timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
564 add_timer(timer);
565 }
566
567 /**
568 * dpm_watchdog_clear - Disable suspend/resume watchdog.
569 * @wd: Watchdog to disable.
570 */
dpm_watchdog_clear(struct dpm_watchdog * wd)571 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
572 {
573 struct timer_list *timer = &wd->timer;
574
575 timer_delete_sync(timer);
576 timer_destroy_on_stack(timer);
577 }
578 #else
579 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
580 #define dpm_watchdog_set(x, y)
581 #define dpm_watchdog_clear(x)
582 #endif
583
584 /*------------------------- Resume routines -------------------------*/
585
586 /**
587 * dev_pm_skip_resume - System-wide device resume optimization check.
588 * @dev: Target device.
589 *
590 * Return:
591 * - %false if the transition under way is RESTORE.
592 * - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
593 * - The logical negation of %power.must_resume otherwise (that is, when the
594 * transition under way is RESUME).
595 */
dev_pm_skip_resume(struct device * dev)596 bool dev_pm_skip_resume(struct device *dev)
597 {
598 if (pm_transition.event == PM_EVENT_RESTORE)
599 return false;
600
601 if (pm_transition.event == PM_EVENT_THAW)
602 return dev_pm_skip_suspend(dev);
603
604 return !dev->power.must_resume;
605 }
606
is_async(struct device * dev)607 static bool is_async(struct device *dev)
608 {
609 return dev->power.async_suspend && pm_async_enabled
610 && !pm_trace_is_enabled();
611 }
612
__dpm_async(struct device * dev,async_func_t func)613 static bool __dpm_async(struct device *dev, async_func_t func)
614 {
615 if (dev->power.work_in_progress)
616 return true;
617
618 if (!is_async(dev))
619 return false;
620
621 dev->power.work_in_progress = true;
622
623 get_device(dev);
624
625 if (async_schedule_dev_nocall(func, dev))
626 return true;
627
628 put_device(dev);
629
630 return false;
631 }
632
dpm_async_fn(struct device * dev,async_func_t func)633 static bool dpm_async_fn(struct device *dev, async_func_t func)
634 {
635 guard(mutex)(&async_wip_mtx);
636
637 return __dpm_async(dev, func);
638 }
639
dpm_async_with_cleanup(struct device * dev,void * fn)640 static int dpm_async_with_cleanup(struct device *dev, void *fn)
641 {
642 guard(mutex)(&async_wip_mtx);
643
644 if (!__dpm_async(dev, fn))
645 dev->power.work_in_progress = false;
646
647 return 0;
648 }
649
dpm_async_resume_children(struct device * dev,async_func_t func)650 static void dpm_async_resume_children(struct device *dev, async_func_t func)
651 {
652 /*
653 * Prevent racing with dpm_clear_async_state() during initial list
654 * walks in dpm_noirq_resume_devices(), dpm_resume_early(), and
655 * dpm_resume().
656 */
657 guard(mutex)(&dpm_list_mtx);
658
659 /*
660 * Start processing "async" children of the device unless it's been
661 * started already for them.
662 */
663 device_for_each_child(dev, func, dpm_async_with_cleanup);
664 }
665
dpm_async_resume_subordinate(struct device * dev,async_func_t func)666 static void dpm_async_resume_subordinate(struct device *dev, async_func_t func)
667 {
668 struct device_link *link;
669 int idx;
670
671 dpm_async_resume_children(dev, func);
672
673 idx = device_links_read_lock();
674
675 /* Start processing the device's "async" consumers. */
676 dev_for_each_link_to_consumer(link, dev)
677 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
678 dpm_async_with_cleanup(link->consumer, func);
679
680 device_links_read_unlock(idx);
681 }
682
dpm_clear_async_state(struct device * dev)683 static void dpm_clear_async_state(struct device *dev)
684 {
685 reinit_completion(&dev->power.completion);
686 dev->power.work_in_progress = false;
687 }
688
dpm_root_device(struct device * dev)689 static bool dpm_root_device(struct device *dev)
690 {
691 lockdep_assert_held(&dpm_list_mtx);
692
693 /*
694 * Since this function is required to run under dpm_list_mtx, the
695 * list_empty() below will only return true if the device's list of
696 * consumers is actually empty before calling it.
697 */
698 return !dev->parent && list_empty(&dev->links.suppliers);
699 }
700
701 static void async_resume_noirq(void *data, async_cookie_t cookie);
702
703 /**
704 * device_resume_noirq - Execute a "noirq resume" callback for given device.
705 * @dev: Device to handle.
706 * @state: PM transition of the system being carried out.
707 * @async: If true, the device is being resumed asynchronously.
708 *
709 * The driver of @dev will not receive interrupts while this function is being
710 * executed.
711 */
device_resume_noirq(struct device * dev,pm_message_t state,bool async)712 static void device_resume_noirq(struct device *dev, pm_message_t state, bool async)
713 {
714 pm_callback_t callback = NULL;
715 const char *info = NULL;
716 bool skip_resume;
717 int error = 0;
718
719 TRACE_DEVICE(dev);
720 TRACE_RESUME(0);
721
722 if (dev->power.syscore || dev->power.direct_complete)
723 goto Out;
724
725 if (!dev->power.is_noirq_suspended) {
726 /*
727 * This means that system suspend has been aborted in the noirq
728 * phase before invoking the noirq suspend callback for the
729 * device, so if device_suspend_late() has left it in suspend,
730 * device_resume_early() should leave it in suspend either in
731 * case the early resume of it depends on the noirq resume that
732 * has not run.
733 */
734 if (dev_pm_skip_suspend(dev))
735 dev->power.must_resume = false;
736
737 goto Out;
738 }
739
740 if (!dpm_wait_for_superior(dev, async))
741 goto Out;
742
743 skip_resume = dev_pm_skip_resume(dev);
744 /*
745 * If the driver callback is skipped below or by the middle layer
746 * callback and device_resume_early() also skips the driver callback for
747 * this device later, it needs to appear as "suspended" to PM-runtime,
748 * so change its status accordingly.
749 *
750 * Otherwise, the device is going to be resumed, so set its PM-runtime
751 * status to "active" unless its power.smart_suspend flag is clear, in
752 * which case it is not necessary to update its PM-runtime status.
753 */
754 if (skip_resume)
755 pm_runtime_set_suspended(dev);
756 else if (dev_pm_smart_suspend(dev))
757 pm_runtime_set_active(dev);
758
759 if (dev->pm_domain) {
760 info = "noirq power domain ";
761 callback = pm_noirq_op(&dev->pm_domain->ops, state);
762 } else if (dev->type && dev->type->pm) {
763 info = "noirq type ";
764 callback = pm_noirq_op(dev->type->pm, state);
765 } else if (dev->class && dev->class->pm) {
766 info = "noirq class ";
767 callback = pm_noirq_op(dev->class->pm, state);
768 } else if (dev->bus && dev->bus->pm) {
769 info = "noirq bus ";
770 callback = pm_noirq_op(dev->bus->pm, state);
771 }
772 if (callback)
773 goto Run;
774
775 if (skip_resume)
776 goto Skip;
777
778 if (dev->driver && dev->driver->pm) {
779 info = "noirq driver ";
780 callback = pm_noirq_op(dev->driver->pm, state);
781 }
782
783 Run:
784 error = dpm_run_callback(callback, dev, state, info);
785
786 Skip:
787 dev->power.is_noirq_suspended = false;
788
789 Out:
790 complete_all(&dev->power.completion);
791 TRACE_RESUME(error);
792
793 if (error) {
794 WRITE_ONCE(async_error, error);
795 dpm_save_failed_dev(dev_name(dev));
796 pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
797 }
798
799 dpm_async_resume_subordinate(dev, async_resume_noirq);
800 }
801
async_resume_noirq(void * data,async_cookie_t cookie)802 static void async_resume_noirq(void *data, async_cookie_t cookie)
803 {
804 struct device *dev = data;
805
806 device_resume_noirq(dev, pm_transition, true);
807 put_device(dev);
808 }
809
dpm_noirq_resume_devices(pm_message_t state)810 static void dpm_noirq_resume_devices(pm_message_t state)
811 {
812 struct device *dev;
813 ktime_t starttime = ktime_get();
814
815 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
816
817 async_error = 0;
818 pm_transition = state;
819
820 mutex_lock(&dpm_list_mtx);
821
822 /*
823 * Start processing "async" root devices upfront so they don't wait for
824 * the "sync" devices they don't depend on.
825 */
826 list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
827 dpm_clear_async_state(dev);
828 if (dpm_root_device(dev))
829 dpm_async_with_cleanup(dev, async_resume_noirq);
830 }
831
832 while (!list_empty(&dpm_noirq_list)) {
833 dev = to_device(dpm_noirq_list.next);
834 list_move_tail(&dev->power.entry, &dpm_late_early_list);
835
836 if (!dpm_async_fn(dev, async_resume_noirq)) {
837 get_device(dev);
838
839 mutex_unlock(&dpm_list_mtx);
840
841 device_resume_noirq(dev, state, false);
842
843 put_device(dev);
844
845 mutex_lock(&dpm_list_mtx);
846 }
847 }
848 mutex_unlock(&dpm_list_mtx);
849 async_synchronize_full();
850 dpm_show_time(starttime, state, 0, "noirq");
851 if (READ_ONCE(async_error))
852 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
853
854 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
855 }
856
857 /**
858 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
859 * @state: PM transition of the system being carried out.
860 *
861 * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
862 * allow device drivers' interrupt handlers to be called.
863 */
dpm_resume_noirq(pm_message_t state)864 void dpm_resume_noirq(pm_message_t state)
865 {
866 dpm_noirq_resume_devices(state);
867
868 resume_device_irqs();
869 device_wakeup_disarm_wake_irqs();
870 }
871
872 static void async_resume_early(void *data, async_cookie_t cookie);
873
874 /**
875 * device_resume_early - Execute an "early resume" callback for given device.
876 * @dev: Device to handle.
877 * @state: PM transition of the system being carried out.
878 * @async: If true, the device is being resumed asynchronously.
879 *
880 * Runtime PM is disabled for @dev while this function is being executed.
881 */
device_resume_early(struct device * dev,pm_message_t state,bool async)882 static void device_resume_early(struct device *dev, pm_message_t state, bool async)
883 {
884 pm_callback_t callback = NULL;
885 const char *info = NULL;
886 int error = 0;
887
888 TRACE_DEVICE(dev);
889 TRACE_RESUME(0);
890
891 if (dev->power.syscore || dev->power.direct_complete)
892 goto Out;
893
894 if (!dev->power.is_late_suspended)
895 goto Out;
896
897 if (!dpm_wait_for_superior(dev, async))
898 goto Out;
899
900 if (dev->pm_domain) {
901 info = "early power domain ";
902 callback = pm_late_early_op(&dev->pm_domain->ops, state);
903 } else if (dev->type && dev->type->pm) {
904 info = "early type ";
905 callback = pm_late_early_op(dev->type->pm, state);
906 } else if (dev->class && dev->class->pm) {
907 info = "early class ";
908 callback = pm_late_early_op(dev->class->pm, state);
909 } else if (dev->bus && dev->bus->pm) {
910 info = "early bus ";
911 callback = pm_late_early_op(dev->bus->pm, state);
912 }
913 if (callback)
914 goto Run;
915
916 if (dev_pm_skip_resume(dev))
917 goto Skip;
918
919 if (dev->driver && dev->driver->pm) {
920 info = "early driver ";
921 callback = pm_late_early_op(dev->driver->pm, state);
922 }
923
924 Run:
925 error = dpm_run_callback(callback, dev, state, info);
926
927 Skip:
928 dev->power.is_late_suspended = false;
929
930 Out:
931 TRACE_RESUME(error);
932
933 pm_runtime_enable(dev);
934 complete_all(&dev->power.completion);
935
936 if (error) {
937 WRITE_ONCE(async_error, error);
938 dpm_save_failed_dev(dev_name(dev));
939 pm_dev_err(dev, state, async ? " async early" : " early", error);
940 }
941
942 dpm_async_resume_subordinate(dev, async_resume_early);
943 }
944
async_resume_early(void * data,async_cookie_t cookie)945 static void async_resume_early(void *data, async_cookie_t cookie)
946 {
947 struct device *dev = data;
948
949 device_resume_early(dev, pm_transition, true);
950 put_device(dev);
951 }
952
953 /**
954 * dpm_resume_early - Execute "early resume" callbacks for all devices.
955 * @state: PM transition of the system being carried out.
956 */
dpm_resume_early(pm_message_t state)957 void dpm_resume_early(pm_message_t state)
958 {
959 struct device *dev;
960 ktime_t starttime = ktime_get();
961
962 trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
963
964 async_error = 0;
965 pm_transition = state;
966
967 mutex_lock(&dpm_list_mtx);
968
969 /*
970 * Start processing "async" root devices upfront so they don't wait for
971 * the "sync" devices they don't depend on.
972 */
973 list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
974 dpm_clear_async_state(dev);
975 if (dpm_root_device(dev))
976 dpm_async_with_cleanup(dev, async_resume_early);
977 }
978
979 while (!list_empty(&dpm_late_early_list)) {
980 dev = to_device(dpm_late_early_list.next);
981 list_move_tail(&dev->power.entry, &dpm_suspended_list);
982
983 if (!dpm_async_fn(dev, async_resume_early)) {
984 get_device(dev);
985
986 mutex_unlock(&dpm_list_mtx);
987
988 device_resume_early(dev, state, false);
989
990 put_device(dev);
991
992 mutex_lock(&dpm_list_mtx);
993 }
994 }
995 mutex_unlock(&dpm_list_mtx);
996 async_synchronize_full();
997 dpm_show_time(starttime, state, 0, "early");
998 if (READ_ONCE(async_error))
999 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
1000
1001 trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
1002 }
1003
1004 /**
1005 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
1006 * @state: PM transition of the system being carried out.
1007 */
dpm_resume_start(pm_message_t state)1008 void dpm_resume_start(pm_message_t state)
1009 {
1010 dpm_resume_noirq(state);
1011 dpm_resume_early(state);
1012 }
1013 EXPORT_SYMBOL_GPL(dpm_resume_start);
1014
1015 static void async_resume(void *data, async_cookie_t cookie);
1016
1017 /**
1018 * device_resume - Execute "resume" callbacks for given device.
1019 * @dev: Device to handle.
1020 * @state: PM transition of the system being carried out.
1021 * @async: If true, the device is being resumed asynchronously.
1022 */
device_resume(struct device * dev,pm_message_t state,bool async)1023 static void device_resume(struct device *dev, pm_message_t state, bool async)
1024 {
1025 pm_callback_t callback = NULL;
1026 const char *info = NULL;
1027 int error = 0;
1028 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1029
1030 TRACE_DEVICE(dev);
1031 TRACE_RESUME(0);
1032
1033 if (dev->power.syscore)
1034 goto Complete;
1035
1036 if (!dev->power.is_suspended)
1037 goto Complete;
1038
1039 dev->power.is_suspended = false;
1040
1041 if (dev->power.direct_complete) {
1042 /*
1043 * Allow new children to be added under the device after this
1044 * point if it has no PM callbacks.
1045 */
1046 if (dev->power.no_pm_callbacks)
1047 dev->power.is_prepared = false;
1048
1049 /* Match the pm_runtime_disable() in device_suspend(). */
1050 pm_runtime_enable(dev);
1051 goto Complete;
1052 }
1053
1054 if (!dpm_wait_for_superior(dev, async))
1055 goto Complete;
1056
1057 dpm_watchdog_set(&wd, dev);
1058 device_lock(dev);
1059
1060 /*
1061 * This is a fib. But we'll allow new children to be added below
1062 * a resumed device, even if the device hasn't been completed yet.
1063 */
1064 dev->power.is_prepared = false;
1065
1066 if (dev->pm_domain) {
1067 info = "power domain ";
1068 callback = pm_op(&dev->pm_domain->ops, state);
1069 goto Driver;
1070 }
1071
1072 if (dev->type && dev->type->pm) {
1073 info = "type ";
1074 callback = pm_op(dev->type->pm, state);
1075 goto Driver;
1076 }
1077
1078 if (dev->class && dev->class->pm) {
1079 info = "class ";
1080 callback = pm_op(dev->class->pm, state);
1081 goto Driver;
1082 }
1083
1084 if (dev->bus) {
1085 if (dev->bus->pm) {
1086 info = "bus ";
1087 callback = pm_op(dev->bus->pm, state);
1088 } else if (dev->bus->resume) {
1089 info = "legacy bus ";
1090 callback = dev->bus->resume;
1091 goto End;
1092 }
1093 }
1094
1095 Driver:
1096 if (!callback && dev->driver && dev->driver->pm) {
1097 info = "driver ";
1098 callback = pm_op(dev->driver->pm, state);
1099 }
1100
1101 End:
1102 error = dpm_run_callback(callback, dev, state, info);
1103
1104 device_unlock(dev);
1105 dpm_watchdog_clear(&wd);
1106
1107 Complete:
1108 complete_all(&dev->power.completion);
1109
1110 TRACE_RESUME(error);
1111
1112 if (error) {
1113 WRITE_ONCE(async_error, error);
1114 dpm_save_failed_dev(dev_name(dev));
1115 pm_dev_err(dev, state, async ? " async" : "", error);
1116 }
1117
1118 dpm_async_resume_subordinate(dev, async_resume);
1119 }
1120
async_resume(void * data,async_cookie_t cookie)1121 static void async_resume(void *data, async_cookie_t cookie)
1122 {
1123 struct device *dev = data;
1124
1125 device_resume(dev, pm_transition, true);
1126 put_device(dev);
1127 }
1128
1129 /**
1130 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
1131 * @state: PM transition of the system being carried out.
1132 *
1133 * Execute the appropriate "resume" callback for all devices whose status
1134 * indicates that they are suspended.
1135 */
dpm_resume(pm_message_t state)1136 void dpm_resume(pm_message_t state)
1137 {
1138 struct device *dev;
1139 ktime_t starttime = ktime_get();
1140
1141 trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1142
1143 pm_transition = state;
1144 async_error = 0;
1145
1146 mutex_lock(&dpm_list_mtx);
1147
1148 /*
1149 * Start processing "async" root devices upfront so they don't wait for
1150 * the "sync" devices they don't depend on.
1151 */
1152 list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
1153 dpm_clear_async_state(dev);
1154 if (dpm_root_device(dev))
1155 dpm_async_with_cleanup(dev, async_resume);
1156 }
1157
1158 while (!list_empty(&dpm_suspended_list)) {
1159 dev = to_device(dpm_suspended_list.next);
1160 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1161
1162 if (!dpm_async_fn(dev, async_resume)) {
1163 get_device(dev);
1164
1165 mutex_unlock(&dpm_list_mtx);
1166
1167 device_resume(dev, state, false);
1168
1169 put_device(dev);
1170
1171 mutex_lock(&dpm_list_mtx);
1172 }
1173 }
1174 mutex_unlock(&dpm_list_mtx);
1175 async_synchronize_full();
1176 dpm_show_time(starttime, state, 0, NULL);
1177 if (READ_ONCE(async_error))
1178 dpm_save_failed_step(SUSPEND_RESUME);
1179
1180 cpufreq_resume();
1181 devfreq_resume();
1182 trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1183 }
1184
1185 /**
1186 * device_complete - Complete a PM transition for given device.
1187 * @dev: Device to handle.
1188 * @state: PM transition of the system being carried out.
1189 */
device_complete(struct device * dev,pm_message_t state)1190 static void device_complete(struct device *dev, pm_message_t state)
1191 {
1192 void (*callback)(struct device *) = NULL;
1193 const char *info = NULL;
1194
1195 if (dev->power.syscore)
1196 goto out;
1197
1198 device_lock(dev);
1199
1200 if (dev->pm_domain) {
1201 info = "completing power domain ";
1202 callback = dev->pm_domain->ops.complete;
1203 } else if (dev->type && dev->type->pm) {
1204 info = "completing type ";
1205 callback = dev->type->pm->complete;
1206 } else if (dev->class && dev->class->pm) {
1207 info = "completing class ";
1208 callback = dev->class->pm->complete;
1209 } else if (dev->bus && dev->bus->pm) {
1210 info = "completing bus ";
1211 callback = dev->bus->pm->complete;
1212 }
1213
1214 if (!callback && dev->driver && dev->driver->pm) {
1215 info = "completing driver ";
1216 callback = dev->driver->pm->complete;
1217 }
1218
1219 if (callback) {
1220 pm_dev_dbg(dev, state, info);
1221 callback(dev);
1222 }
1223
1224 device_unlock(dev);
1225
1226 out:
1227 /* If enabling runtime PM for the device is blocked, unblock it. */
1228 pm_runtime_unblock(dev);
1229 pm_runtime_put(dev);
1230 }
1231
1232 /**
1233 * dpm_complete - Complete a PM transition for all non-sysdev devices.
1234 * @state: PM transition of the system being carried out.
1235 *
1236 * Execute the ->complete() callbacks for all devices whose PM status is not
1237 * DPM_ON (this allows new devices to be registered).
1238 */
dpm_complete(pm_message_t state)1239 void dpm_complete(pm_message_t state)
1240 {
1241 struct list_head list;
1242
1243 trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1244
1245 INIT_LIST_HEAD(&list);
1246 mutex_lock(&dpm_list_mtx);
1247 while (!list_empty(&dpm_prepared_list)) {
1248 struct device *dev = to_device(dpm_prepared_list.prev);
1249
1250 get_device(dev);
1251 dev->power.is_prepared = false;
1252 list_move(&dev->power.entry, &list);
1253
1254 mutex_unlock(&dpm_list_mtx);
1255
1256 trace_device_pm_callback_start(dev, "", state.event);
1257 device_complete(dev, state);
1258 trace_device_pm_callback_end(dev, 0);
1259
1260 put_device(dev);
1261
1262 mutex_lock(&dpm_list_mtx);
1263 }
1264 list_splice(&list, &dpm_list);
1265 mutex_unlock(&dpm_list_mtx);
1266
1267 /* Allow device probing and trigger re-probing of deferred devices */
1268 device_unblock_probing();
1269 trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1270 }
1271
1272 /**
1273 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1274 * @state: PM transition of the system being carried out.
1275 *
1276 * Execute "resume" callbacks for all devices and complete the PM transition of
1277 * the system.
1278 */
dpm_resume_end(pm_message_t state)1279 void dpm_resume_end(pm_message_t state)
1280 {
1281 dpm_resume(state);
1282 pm_restore_gfp_mask();
1283 dpm_complete(state);
1284 }
1285 EXPORT_SYMBOL_GPL(dpm_resume_end);
1286
1287
1288 /*------------------------- Suspend routines -------------------------*/
1289
dpm_leaf_device(struct device * dev)1290 static bool dpm_leaf_device(struct device *dev)
1291 {
1292 struct device *child;
1293
1294 lockdep_assert_held(&dpm_list_mtx);
1295
1296 child = device_find_any_child(dev);
1297 if (child) {
1298 put_device(child);
1299
1300 return false;
1301 }
1302
1303 /*
1304 * Since this function is required to run under dpm_list_mtx, the
1305 * list_empty() below will only return true if the device's list of
1306 * consumers is actually empty before calling it.
1307 */
1308 return list_empty(&dev->links.consumers);
1309 }
1310
dpm_async_suspend_parent(struct device * dev,async_func_t func)1311 static bool dpm_async_suspend_parent(struct device *dev, async_func_t func)
1312 {
1313 guard(mutex)(&dpm_list_mtx);
1314
1315 /*
1316 * If the device is suspended asynchronously and the parent's callback
1317 * deletes both the device and the parent itself, the parent object may
1318 * be freed while this function is running, so avoid that by checking
1319 * if the device has been deleted already as the parent cannot be
1320 * deleted before it.
1321 */
1322 if (!device_pm_initialized(dev))
1323 return false;
1324
1325 /* Start processing the device's parent if it is "async". */
1326 if (dev->parent)
1327 dpm_async_with_cleanup(dev->parent, func);
1328
1329 return true;
1330 }
1331
dpm_async_suspend_superior(struct device * dev,async_func_t func)1332 static void dpm_async_suspend_superior(struct device *dev, async_func_t func)
1333 {
1334 struct device_link *link;
1335 int idx;
1336
1337 if (!dpm_async_suspend_parent(dev, func))
1338 return;
1339
1340 idx = device_links_read_lock();
1341
1342 /* Start processing the device's "async" suppliers. */
1343 dev_for_each_link_to_supplier(link, dev)
1344 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
1345 dpm_async_with_cleanup(link->supplier, func);
1346
1347 device_links_read_unlock(idx);
1348 }
1349
dpm_async_suspend_complete_all(struct list_head * device_list)1350 static void dpm_async_suspend_complete_all(struct list_head *device_list)
1351 {
1352 struct device *dev;
1353
1354 guard(mutex)(&async_wip_mtx);
1355
1356 list_for_each_entry_reverse(dev, device_list, power.entry) {
1357 /*
1358 * In case the device is being waited for and async processing
1359 * has not started for it yet, let the waiters make progress.
1360 */
1361 if (!dev->power.work_in_progress)
1362 complete_all(&dev->power.completion);
1363 }
1364 }
1365
1366 /**
1367 * resume_event - Return a "resume" message for given "suspend" sleep state.
1368 * @sleep_state: PM message representing a sleep state.
1369 *
1370 * Return a PM message representing the resume event corresponding to given
1371 * sleep state.
1372 */
resume_event(pm_message_t sleep_state)1373 static pm_message_t resume_event(pm_message_t sleep_state)
1374 {
1375 switch (sleep_state.event) {
1376 case PM_EVENT_SUSPEND:
1377 return PMSG_RESUME;
1378 case PM_EVENT_FREEZE:
1379 case PM_EVENT_QUIESCE:
1380 return PMSG_RECOVER;
1381 case PM_EVENT_HIBERNATE:
1382 return PMSG_RESTORE;
1383 }
1384 return PMSG_ON;
1385 }
1386
dpm_superior_set_must_resume(struct device * dev)1387 static void dpm_superior_set_must_resume(struct device *dev)
1388 {
1389 struct device_link *link;
1390 int idx;
1391
1392 if (dev->parent)
1393 dev->parent->power.must_resume = true;
1394
1395 idx = device_links_read_lock();
1396
1397 dev_for_each_link_to_supplier(link, dev)
1398 link->supplier->power.must_resume = true;
1399
1400 device_links_read_unlock(idx);
1401 }
1402
1403 static void async_suspend_noirq(void *data, async_cookie_t cookie);
1404
1405 /**
1406 * device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1407 * @dev: Device to handle.
1408 * @state: PM transition of the system being carried out.
1409 * @async: If true, the device is being suspended asynchronously.
1410 *
1411 * The driver of @dev will not receive interrupts while this function is being
1412 * executed.
1413 */
device_suspend_noirq(struct device * dev,pm_message_t state,bool async)1414 static void device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1415 {
1416 pm_callback_t callback = NULL;
1417 const char *info = NULL;
1418 int error = 0;
1419
1420 TRACE_DEVICE(dev);
1421 TRACE_SUSPEND(0);
1422
1423 dpm_wait_for_subordinate(dev, async);
1424
1425 if (READ_ONCE(async_error))
1426 goto Complete;
1427
1428 if (dev->power.syscore || dev->power.direct_complete)
1429 goto Complete;
1430
1431 if (dev->pm_domain) {
1432 info = "noirq power domain ";
1433 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1434 } else if (dev->type && dev->type->pm) {
1435 info = "noirq type ";
1436 callback = pm_noirq_op(dev->type->pm, state);
1437 } else if (dev->class && dev->class->pm) {
1438 info = "noirq class ";
1439 callback = pm_noirq_op(dev->class->pm, state);
1440 } else if (dev->bus && dev->bus->pm) {
1441 info = "noirq bus ";
1442 callback = pm_noirq_op(dev->bus->pm, state);
1443 }
1444 if (callback)
1445 goto Run;
1446
1447 if (dev_pm_skip_suspend(dev))
1448 goto Skip;
1449
1450 if (dev->driver && dev->driver->pm) {
1451 info = "noirq driver ";
1452 callback = pm_noirq_op(dev->driver->pm, state);
1453 }
1454
1455 Run:
1456 error = dpm_run_callback(callback, dev, state, info);
1457 if (error) {
1458 WRITE_ONCE(async_error, error);
1459 dpm_save_failed_dev(dev_name(dev));
1460 pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
1461 goto Complete;
1462 }
1463
1464 Skip:
1465 dev->power.is_noirq_suspended = true;
1466
1467 /*
1468 * Devices must be resumed unless they are explicitly allowed to be left
1469 * in suspend, but even in that case skipping the resume of devices that
1470 * were in use right before the system suspend (as indicated by their
1471 * runtime PM usage counters and child counters) would be suboptimal.
1472 */
1473 if (!(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
1474 dev->power.may_skip_resume) || !pm_runtime_need_not_resume(dev))
1475 dev->power.must_resume = true;
1476
1477 if (dev->power.must_resume)
1478 dpm_superior_set_must_resume(dev);
1479
1480 Complete:
1481 complete_all(&dev->power.completion);
1482 TRACE_SUSPEND(error);
1483
1484 if (error || READ_ONCE(async_error))
1485 return;
1486
1487 dpm_async_suspend_superior(dev, async_suspend_noirq);
1488 }
1489
async_suspend_noirq(void * data,async_cookie_t cookie)1490 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1491 {
1492 struct device *dev = data;
1493
1494 device_suspend_noirq(dev, pm_transition, true);
1495 put_device(dev);
1496 }
1497
dpm_noirq_suspend_devices(pm_message_t state)1498 static int dpm_noirq_suspend_devices(pm_message_t state)
1499 {
1500 ktime_t starttime = ktime_get();
1501 struct device *dev;
1502 int error;
1503
1504 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1505
1506 pm_transition = state;
1507 async_error = 0;
1508
1509 mutex_lock(&dpm_list_mtx);
1510
1511 /*
1512 * Start processing "async" leaf devices upfront so they don't need to
1513 * wait for the "sync" devices they don't depend on.
1514 */
1515 list_for_each_entry_reverse(dev, &dpm_late_early_list, power.entry) {
1516 dpm_clear_async_state(dev);
1517 if (dpm_leaf_device(dev))
1518 dpm_async_with_cleanup(dev, async_suspend_noirq);
1519 }
1520
1521 while (!list_empty(&dpm_late_early_list)) {
1522 dev = to_device(dpm_late_early_list.prev);
1523
1524 list_move(&dev->power.entry, &dpm_noirq_list);
1525
1526 if (dpm_async_fn(dev, async_suspend_noirq))
1527 continue;
1528
1529 get_device(dev);
1530
1531 mutex_unlock(&dpm_list_mtx);
1532
1533 device_suspend_noirq(dev, state, false);
1534
1535 put_device(dev);
1536
1537 mutex_lock(&dpm_list_mtx);
1538
1539 if (READ_ONCE(async_error)) {
1540 dpm_async_suspend_complete_all(&dpm_late_early_list);
1541 /*
1542 * Move all devices to the target list to resume them
1543 * properly.
1544 */
1545 list_splice_init(&dpm_late_early_list, &dpm_noirq_list);
1546 break;
1547 }
1548 }
1549
1550 mutex_unlock(&dpm_list_mtx);
1551
1552 async_synchronize_full();
1553
1554 error = READ_ONCE(async_error);
1555 if (error)
1556 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1557
1558 dpm_show_time(starttime, state, error, "noirq");
1559 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1560 return error;
1561 }
1562
1563 /**
1564 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1565 * @state: PM transition of the system being carried out.
1566 *
1567 * Prevent device drivers' interrupt handlers from being called and invoke
1568 * "noirq" suspend callbacks for all non-sysdev devices.
1569 */
dpm_suspend_noirq(pm_message_t state)1570 int dpm_suspend_noirq(pm_message_t state)
1571 {
1572 int ret;
1573
1574 device_wakeup_arm_wake_irqs();
1575 suspend_device_irqs();
1576
1577 ret = dpm_noirq_suspend_devices(state);
1578 if (ret)
1579 dpm_resume_noirq(resume_event(state));
1580
1581 return ret;
1582 }
1583
dpm_propagate_wakeup_to_parent(struct device * dev)1584 static void dpm_propagate_wakeup_to_parent(struct device *dev)
1585 {
1586 struct device *parent = dev->parent;
1587
1588 if (!parent)
1589 return;
1590
1591 spin_lock_irq(&parent->power.lock);
1592
1593 if (device_wakeup_path(dev) && !parent->power.ignore_children)
1594 parent->power.wakeup_path = true;
1595
1596 spin_unlock_irq(&parent->power.lock);
1597 }
1598
1599 static void async_suspend_late(void *data, async_cookie_t cookie);
1600
1601 /**
1602 * device_suspend_late - Execute a "late suspend" callback for given device.
1603 * @dev: Device to handle.
1604 * @state: PM transition of the system being carried out.
1605 * @async: If true, the device is being suspended asynchronously.
1606 *
1607 * Runtime PM is disabled for @dev while this function is being executed.
1608 */
device_suspend_late(struct device * dev,pm_message_t state,bool async)1609 static void device_suspend_late(struct device *dev, pm_message_t state, bool async)
1610 {
1611 pm_callback_t callback = NULL;
1612 const char *info = NULL;
1613 int error = 0;
1614
1615 TRACE_DEVICE(dev);
1616 TRACE_SUSPEND(0);
1617
1618 /*
1619 * Disable runtime PM for the device without checking if there is a
1620 * pending resume request for it.
1621 */
1622 __pm_runtime_disable(dev, false);
1623
1624 dpm_wait_for_subordinate(dev, async);
1625
1626 if (READ_ONCE(async_error))
1627 goto Complete;
1628
1629 if (pm_wakeup_pending()) {
1630 WRITE_ONCE(async_error, -EBUSY);
1631 goto Complete;
1632 }
1633
1634 if (dev->power.syscore || dev->power.direct_complete)
1635 goto Complete;
1636
1637 if (dev->pm_domain) {
1638 info = "late power domain ";
1639 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1640 } else if (dev->type && dev->type->pm) {
1641 info = "late type ";
1642 callback = pm_late_early_op(dev->type->pm, state);
1643 } else if (dev->class && dev->class->pm) {
1644 info = "late class ";
1645 callback = pm_late_early_op(dev->class->pm, state);
1646 } else if (dev->bus && dev->bus->pm) {
1647 info = "late bus ";
1648 callback = pm_late_early_op(dev->bus->pm, state);
1649 }
1650 if (callback)
1651 goto Run;
1652
1653 if (dev_pm_skip_suspend(dev))
1654 goto Skip;
1655
1656 if (dev->driver && dev->driver->pm) {
1657 info = "late driver ";
1658 callback = pm_late_early_op(dev->driver->pm, state);
1659 }
1660
1661 Run:
1662 error = dpm_run_callback(callback, dev, state, info);
1663 if (error) {
1664 WRITE_ONCE(async_error, error);
1665 dpm_save_failed_dev(dev_name(dev));
1666 pm_dev_err(dev, state, async ? " async late" : " late", error);
1667 goto Complete;
1668 }
1669 dpm_propagate_wakeup_to_parent(dev);
1670
1671 Skip:
1672 dev->power.is_late_suspended = true;
1673
1674 Complete:
1675 TRACE_SUSPEND(error);
1676 complete_all(&dev->power.completion);
1677
1678 if (error || READ_ONCE(async_error))
1679 return;
1680
1681 dpm_async_suspend_superior(dev, async_suspend_late);
1682 }
1683
async_suspend_late(void * data,async_cookie_t cookie)1684 static void async_suspend_late(void *data, async_cookie_t cookie)
1685 {
1686 struct device *dev = data;
1687
1688 device_suspend_late(dev, pm_transition, true);
1689 put_device(dev);
1690 }
1691
1692 /**
1693 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1694 * @state: PM transition of the system being carried out.
1695 */
dpm_suspend_late(pm_message_t state)1696 int dpm_suspend_late(pm_message_t state)
1697 {
1698 ktime_t starttime = ktime_get();
1699 struct device *dev;
1700 int error;
1701
1702 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1703
1704 pm_transition = state;
1705 async_error = 0;
1706
1707 wake_up_all_idle_cpus();
1708
1709 mutex_lock(&dpm_list_mtx);
1710
1711 /*
1712 * Start processing "async" leaf devices upfront so they don't need to
1713 * wait for the "sync" devices they don't depend on.
1714 */
1715 list_for_each_entry_reverse(dev, &dpm_suspended_list, power.entry) {
1716 dpm_clear_async_state(dev);
1717 if (dpm_leaf_device(dev))
1718 dpm_async_with_cleanup(dev, async_suspend_late);
1719 }
1720
1721 while (!list_empty(&dpm_suspended_list)) {
1722 dev = to_device(dpm_suspended_list.prev);
1723
1724 list_move(&dev->power.entry, &dpm_late_early_list);
1725
1726 if (dpm_async_fn(dev, async_suspend_late))
1727 continue;
1728
1729 get_device(dev);
1730
1731 mutex_unlock(&dpm_list_mtx);
1732
1733 device_suspend_late(dev, state, false);
1734
1735 put_device(dev);
1736
1737 mutex_lock(&dpm_list_mtx);
1738
1739 if (READ_ONCE(async_error)) {
1740 dpm_async_suspend_complete_all(&dpm_suspended_list);
1741 /*
1742 * Move all devices to the target list to resume them
1743 * properly.
1744 */
1745 list_splice_init(&dpm_suspended_list, &dpm_late_early_list);
1746 break;
1747 }
1748 }
1749
1750 mutex_unlock(&dpm_list_mtx);
1751
1752 async_synchronize_full();
1753
1754 error = READ_ONCE(async_error);
1755 if (error) {
1756 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1757 dpm_resume_early(resume_event(state));
1758 }
1759 dpm_show_time(starttime, state, error, "late");
1760 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1761 return error;
1762 }
1763
1764 /**
1765 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1766 * @state: PM transition of the system being carried out.
1767 */
dpm_suspend_end(pm_message_t state)1768 int dpm_suspend_end(pm_message_t state)
1769 {
1770 ktime_t starttime = ktime_get();
1771 int error;
1772
1773 error = dpm_suspend_late(state);
1774 if (error)
1775 goto out;
1776
1777 error = dpm_suspend_noirq(state);
1778 if (error)
1779 dpm_resume_early(resume_event(state));
1780
1781 out:
1782 dpm_show_time(starttime, state, error, "end");
1783 return error;
1784 }
1785 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1786
1787 /**
1788 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1789 * @dev: Device to suspend.
1790 * @state: PM transition of the system being carried out.
1791 * @cb: Suspend callback to execute.
1792 * @info: string description of caller.
1793 */
legacy_suspend(struct device * dev,pm_message_t state,int (* cb)(struct device * dev,pm_message_t state),const char * info)1794 static int legacy_suspend(struct device *dev, pm_message_t state,
1795 int (*cb)(struct device *dev, pm_message_t state),
1796 const char *info)
1797 {
1798 int error;
1799 ktime_t calltime;
1800
1801 calltime = initcall_debug_start(dev, cb);
1802
1803 trace_device_pm_callback_start(dev, info, state.event);
1804 error = cb(dev, state);
1805 trace_device_pm_callback_end(dev, error);
1806 suspend_report_result(dev, cb, error);
1807
1808 initcall_debug_report(dev, calltime, cb, error);
1809
1810 return error;
1811 }
1812
dpm_clear_superiors_direct_complete(struct device * dev)1813 static void dpm_clear_superiors_direct_complete(struct device *dev)
1814 {
1815 struct device_link *link;
1816 int idx;
1817
1818 if (dev->parent) {
1819 spin_lock_irq(&dev->parent->power.lock);
1820 dev->parent->power.direct_complete = false;
1821 spin_unlock_irq(&dev->parent->power.lock);
1822 }
1823
1824 idx = device_links_read_lock();
1825
1826 dev_for_each_link_to_supplier(link, dev) {
1827 spin_lock_irq(&link->supplier->power.lock);
1828 link->supplier->power.direct_complete = false;
1829 spin_unlock_irq(&link->supplier->power.lock);
1830 }
1831
1832 device_links_read_unlock(idx);
1833 }
1834
1835 static void async_suspend(void *data, async_cookie_t cookie);
1836
1837 /**
1838 * device_suspend - Execute "suspend" callbacks for given device.
1839 * @dev: Device to handle.
1840 * @state: PM transition of the system being carried out.
1841 * @async: If true, the device is being suspended asynchronously.
1842 */
device_suspend(struct device * dev,pm_message_t state,bool async)1843 static void device_suspend(struct device *dev, pm_message_t state, bool async)
1844 {
1845 pm_callback_t callback = NULL;
1846 const char *info = NULL;
1847 int error = 0;
1848 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1849
1850 TRACE_DEVICE(dev);
1851 TRACE_SUSPEND(0);
1852
1853 dpm_wait_for_subordinate(dev, async);
1854
1855 if (READ_ONCE(async_error)) {
1856 dev->power.direct_complete = false;
1857 goto Complete;
1858 }
1859
1860 /*
1861 * Wait for possible runtime PM transitions of the device in progress
1862 * to complete and if there's a runtime resume request pending for it,
1863 * resume it before proceeding with invoking the system-wide suspend
1864 * callbacks for it.
1865 *
1866 * If the system-wide suspend callbacks below change the configuration
1867 * of the device, they must disable runtime PM for it or otherwise
1868 * ensure that its runtime-resume callbacks will not be confused by that
1869 * change in case they are invoked going forward.
1870 */
1871 pm_runtime_barrier(dev);
1872
1873 if (pm_wakeup_pending()) {
1874 dev->power.direct_complete = false;
1875 WRITE_ONCE(async_error, -EBUSY);
1876 goto Complete;
1877 }
1878
1879 if (dev->power.syscore)
1880 goto Complete;
1881
1882 /* Avoid direct_complete to let wakeup_path propagate. */
1883 if (device_may_wakeup(dev) || device_wakeup_path(dev))
1884 dev->power.direct_complete = false;
1885
1886 if (dev->power.direct_complete) {
1887 if (pm_runtime_status_suspended(dev)) {
1888 pm_runtime_disable(dev);
1889 if (pm_runtime_status_suspended(dev)) {
1890 pm_dev_dbg(dev, state, "direct-complete ");
1891 dev->power.is_suspended = true;
1892 goto Complete;
1893 }
1894
1895 pm_runtime_enable(dev);
1896 }
1897 dev->power.direct_complete = false;
1898 }
1899
1900 dev->power.may_skip_resume = true;
1901 dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME);
1902
1903 dpm_watchdog_set(&wd, dev);
1904 device_lock(dev);
1905
1906 if (dev->pm_domain) {
1907 info = "power domain ";
1908 callback = pm_op(&dev->pm_domain->ops, state);
1909 goto Run;
1910 }
1911
1912 if (dev->type && dev->type->pm) {
1913 info = "type ";
1914 callback = pm_op(dev->type->pm, state);
1915 goto Run;
1916 }
1917
1918 if (dev->class && dev->class->pm) {
1919 info = "class ";
1920 callback = pm_op(dev->class->pm, state);
1921 goto Run;
1922 }
1923
1924 if (dev->bus) {
1925 if (dev->bus->pm) {
1926 info = "bus ";
1927 callback = pm_op(dev->bus->pm, state);
1928 } else if (dev->bus->suspend) {
1929 pm_dev_dbg(dev, state, "legacy bus ");
1930 error = legacy_suspend(dev, state, dev->bus->suspend,
1931 "legacy bus ");
1932 goto End;
1933 }
1934 }
1935
1936 Run:
1937 if (!callback && dev->driver && dev->driver->pm) {
1938 info = "driver ";
1939 callback = pm_op(dev->driver->pm, state);
1940 }
1941
1942 error = dpm_run_callback(callback, dev, state, info);
1943
1944 End:
1945 if (!error) {
1946 dev->power.is_suspended = true;
1947 if (device_may_wakeup(dev))
1948 dev->power.wakeup_path = true;
1949
1950 dpm_propagate_wakeup_to_parent(dev);
1951 dpm_clear_superiors_direct_complete(dev);
1952 }
1953
1954 device_unlock(dev);
1955 dpm_watchdog_clear(&wd);
1956
1957 Complete:
1958 if (error) {
1959 WRITE_ONCE(async_error, error);
1960 dpm_save_failed_dev(dev_name(dev));
1961 pm_dev_err(dev, state, async ? " async" : "", error);
1962 }
1963
1964 complete_all(&dev->power.completion);
1965 TRACE_SUSPEND(error);
1966
1967 if (error || READ_ONCE(async_error))
1968 return;
1969
1970 dpm_async_suspend_superior(dev, async_suspend);
1971 }
1972
async_suspend(void * data,async_cookie_t cookie)1973 static void async_suspend(void *data, async_cookie_t cookie)
1974 {
1975 struct device *dev = data;
1976
1977 device_suspend(dev, pm_transition, true);
1978 put_device(dev);
1979 }
1980
1981 /**
1982 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1983 * @state: PM transition of the system being carried out.
1984 */
dpm_suspend(pm_message_t state)1985 int dpm_suspend(pm_message_t state)
1986 {
1987 ktime_t starttime = ktime_get();
1988 struct device *dev;
1989 int error;
1990
1991 trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1992 might_sleep();
1993
1994 devfreq_suspend();
1995 cpufreq_suspend();
1996
1997 pm_transition = state;
1998 async_error = 0;
1999
2000 mutex_lock(&dpm_list_mtx);
2001
2002 /*
2003 * Start processing "async" leaf devices upfront so they don't need to
2004 * wait for the "sync" devices they don't depend on.
2005 */
2006 list_for_each_entry_reverse(dev, &dpm_prepared_list, power.entry) {
2007 dpm_clear_async_state(dev);
2008 if (dpm_leaf_device(dev))
2009 dpm_async_with_cleanup(dev, async_suspend);
2010 }
2011
2012 while (!list_empty(&dpm_prepared_list)) {
2013 dev = to_device(dpm_prepared_list.prev);
2014
2015 list_move(&dev->power.entry, &dpm_suspended_list);
2016
2017 if (dpm_async_fn(dev, async_suspend))
2018 continue;
2019
2020 get_device(dev);
2021
2022 mutex_unlock(&dpm_list_mtx);
2023
2024 device_suspend(dev, state, false);
2025
2026 put_device(dev);
2027
2028 mutex_lock(&dpm_list_mtx);
2029
2030 if (READ_ONCE(async_error)) {
2031 dpm_async_suspend_complete_all(&dpm_prepared_list);
2032 /*
2033 * Move all devices to the target list to resume them
2034 * properly.
2035 */
2036 list_splice_init(&dpm_prepared_list, &dpm_suspended_list);
2037 break;
2038 }
2039 }
2040
2041 mutex_unlock(&dpm_list_mtx);
2042
2043 async_synchronize_full();
2044
2045 error = READ_ONCE(async_error);
2046 if (error)
2047 dpm_save_failed_step(SUSPEND_SUSPEND);
2048
2049 dpm_show_time(starttime, state, error, NULL);
2050 trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
2051 return error;
2052 }
2053
device_prepare_smart_suspend(struct device * dev)2054 static bool device_prepare_smart_suspend(struct device *dev)
2055 {
2056 struct device_link *link;
2057 bool ret = true;
2058 int idx;
2059
2060 /*
2061 * The "smart suspend" feature is enabled for devices whose drivers ask
2062 * for it and for devices without PM callbacks.
2063 *
2064 * However, if "smart suspend" is not enabled for the device's parent
2065 * or any of its suppliers that take runtime PM into account, it cannot
2066 * be enabled for the device either.
2067 */
2068 if (!dev->power.no_pm_callbacks &&
2069 !dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND))
2070 return false;
2071
2072 if (dev->parent && !dev_pm_smart_suspend(dev->parent) &&
2073 !dev->parent->power.ignore_children && !pm_runtime_blocked(dev->parent))
2074 return false;
2075
2076 idx = device_links_read_lock();
2077
2078 dev_for_each_link_to_supplier(link, dev) {
2079 if (!device_link_test(link, DL_FLAG_PM_RUNTIME))
2080 continue;
2081
2082 if (!dev_pm_smart_suspend(link->supplier) &&
2083 !pm_runtime_blocked(link->supplier)) {
2084 ret = false;
2085 break;
2086 }
2087 }
2088
2089 device_links_read_unlock(idx);
2090
2091 return ret;
2092 }
2093
2094 /**
2095 * device_prepare - Prepare a device for system power transition.
2096 * @dev: Device to handle.
2097 * @state: PM transition of the system being carried out.
2098 *
2099 * Execute the ->prepare() callback(s) for given device. No new children of the
2100 * device may be registered after this function has returned.
2101 */
device_prepare(struct device * dev,pm_message_t state)2102 static int device_prepare(struct device *dev, pm_message_t state)
2103 {
2104 int (*callback)(struct device *) = NULL;
2105 bool smart_suspend;
2106 int ret = 0;
2107
2108 /*
2109 * If a device's parent goes into runtime suspend at the wrong time,
2110 * it won't be possible to resume the device. To prevent this we
2111 * block runtime suspend here, during the prepare phase, and allow
2112 * it again during the complete phase.
2113 */
2114 pm_runtime_get_noresume(dev);
2115 /*
2116 * If runtime PM is disabled for the device at this point and it has
2117 * never been enabled so far, it should not be enabled until this system
2118 * suspend-resume cycle is complete, so prepare to trigger a warning on
2119 * subsequent attempts to enable it.
2120 */
2121 smart_suspend = !pm_runtime_block_if_disabled(dev);
2122
2123 if (dev->power.syscore)
2124 return 0;
2125
2126 device_lock(dev);
2127
2128 dev->power.wakeup_path = false;
2129
2130 if (dev->power.no_pm_callbacks)
2131 goto unlock;
2132
2133 if (dev->pm_domain)
2134 callback = dev->pm_domain->ops.prepare;
2135 else if (dev->type && dev->type->pm)
2136 callback = dev->type->pm->prepare;
2137 else if (dev->class && dev->class->pm)
2138 callback = dev->class->pm->prepare;
2139 else if (dev->bus && dev->bus->pm)
2140 callback = dev->bus->pm->prepare;
2141
2142 if (!callback && dev->driver && dev->driver->pm)
2143 callback = dev->driver->pm->prepare;
2144
2145 if (callback)
2146 ret = callback(dev);
2147
2148 unlock:
2149 device_unlock(dev);
2150
2151 if (ret < 0) {
2152 suspend_report_result(dev, callback, ret);
2153 pm_runtime_put(dev);
2154 return ret;
2155 }
2156 /* Do not enable "smart suspend" for devices with disabled runtime PM. */
2157 if (smart_suspend)
2158 smart_suspend = device_prepare_smart_suspend(dev);
2159
2160 spin_lock_irq(&dev->power.lock);
2161
2162 dev->power.smart_suspend = smart_suspend;
2163 /*
2164 * A positive return value from ->prepare() means "this device appears
2165 * to be runtime-suspended and its state is fine, so if it really is
2166 * runtime-suspended, you can leave it in that state provided that you
2167 * will do the same thing with all of its descendants". This only
2168 * applies to suspend transitions, however.
2169 */
2170 dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
2171 (ret > 0 || dev->power.no_pm_callbacks) &&
2172 !dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
2173
2174 spin_unlock_irq(&dev->power.lock);
2175
2176 return 0;
2177 }
2178
2179 /**
2180 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
2181 * @state: PM transition of the system being carried out.
2182 *
2183 * Execute the ->prepare() callback(s) for all devices.
2184 */
dpm_prepare(pm_message_t state)2185 int dpm_prepare(pm_message_t state)
2186 {
2187 int error = 0;
2188
2189 trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
2190
2191 /*
2192 * Give a chance for the known devices to complete their probes, before
2193 * disable probing of devices. This sync point is important at least
2194 * at boot time + hibernation restore.
2195 */
2196 wait_for_device_probe();
2197 /*
2198 * It is unsafe if probing of devices will happen during suspend or
2199 * hibernation and system behavior will be unpredictable in this case.
2200 * So, let's prohibit device's probing here and defer their probes
2201 * instead. The normal behavior will be restored in dpm_complete().
2202 */
2203 device_block_probing();
2204
2205 mutex_lock(&dpm_list_mtx);
2206 while (!list_empty(&dpm_list) && !error) {
2207 struct device *dev = to_device(dpm_list.next);
2208
2209 get_device(dev);
2210
2211 mutex_unlock(&dpm_list_mtx);
2212
2213 trace_device_pm_callback_start(dev, "", state.event);
2214 error = device_prepare(dev, state);
2215 trace_device_pm_callback_end(dev, error);
2216
2217 mutex_lock(&dpm_list_mtx);
2218
2219 if (!error) {
2220 dev->power.is_prepared = true;
2221 if (!list_empty(&dev->power.entry))
2222 list_move_tail(&dev->power.entry, &dpm_prepared_list);
2223 } else if (error == -EAGAIN) {
2224 error = 0;
2225 } else {
2226 dev_info(dev, "not prepared for power transition: code %d\n",
2227 error);
2228 }
2229
2230 mutex_unlock(&dpm_list_mtx);
2231
2232 put_device(dev);
2233
2234 mutex_lock(&dpm_list_mtx);
2235 }
2236 mutex_unlock(&dpm_list_mtx);
2237 trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
2238 return error;
2239 }
2240
2241 /**
2242 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
2243 * @state: PM transition of the system being carried out.
2244 *
2245 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
2246 * callbacks for them.
2247 */
dpm_suspend_start(pm_message_t state)2248 int dpm_suspend_start(pm_message_t state)
2249 {
2250 ktime_t starttime = ktime_get();
2251 int error;
2252
2253 error = dpm_prepare(state);
2254 if (error)
2255 dpm_save_failed_step(SUSPEND_PREPARE);
2256 else {
2257 pm_restrict_gfp_mask();
2258 error = dpm_suspend(state);
2259 }
2260
2261 dpm_show_time(starttime, state, error, "start");
2262 return error;
2263 }
2264 EXPORT_SYMBOL_GPL(dpm_suspend_start);
2265
__suspend_report_result(const char * function,struct device * dev,void * fn,int ret)2266 void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret)
2267 {
2268 if (ret)
2269 dev_err(dev, "%s(): %ps returns %d\n", function, fn, ret);
2270 }
2271 EXPORT_SYMBOL_GPL(__suspend_report_result);
2272
2273 /**
2274 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
2275 * @subordinate: Device that needs to wait for @dev.
2276 * @dev: Device to wait for.
2277 */
device_pm_wait_for_dev(struct device * subordinate,struct device * dev)2278 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
2279 {
2280 dpm_wait(dev, subordinate->power.async_suspend);
2281 return async_error;
2282 }
2283 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
2284
2285 /**
2286 * dpm_for_each_dev - device iterator.
2287 * @data: data for the callback.
2288 * @fn: function to be called for each device.
2289 *
2290 * Iterate over devices in dpm_list, and call @fn for each device,
2291 * passing it @data.
2292 */
dpm_for_each_dev(void * data,void (* fn)(struct device *,void *))2293 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
2294 {
2295 struct device *dev;
2296
2297 if (!fn)
2298 return;
2299
2300 device_pm_lock();
2301 list_for_each_entry(dev, &dpm_list, power.entry)
2302 fn(dev, data);
2303 device_pm_unlock();
2304 }
2305 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
2306
pm_ops_is_empty(const struct dev_pm_ops * ops)2307 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
2308 {
2309 if (!ops)
2310 return true;
2311
2312 return !ops->prepare &&
2313 !ops->suspend &&
2314 !ops->suspend_late &&
2315 !ops->suspend_noirq &&
2316 !ops->resume_noirq &&
2317 !ops->resume_early &&
2318 !ops->resume &&
2319 !ops->complete;
2320 }
2321
device_pm_check_callbacks(struct device * dev)2322 void device_pm_check_callbacks(struct device *dev)
2323 {
2324 unsigned long flags;
2325
2326 spin_lock_irqsave(&dev->power.lock, flags);
2327 dev->power.no_pm_callbacks =
2328 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2329 !dev->bus->suspend && !dev->bus->resume)) &&
2330 (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2331 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2332 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2333 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2334 !dev->driver->suspend && !dev->driver->resume));
2335 spin_unlock_irqrestore(&dev->power.lock, flags);
2336 }
2337
dev_pm_skip_suspend(struct device * dev)2338 bool dev_pm_skip_suspend(struct device *dev)
2339 {
2340 return dev_pm_smart_suspend(dev) && pm_runtime_status_suspended(dev);
2341 }
2342