xref: /linux/drivers/base/power/main.c (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/base/power/main.c - Where the driver meets power management.
4  *
5  * Copyright (c) 2003 Patrick Mochel
6  * Copyright (c) 2003 Open Source Development Lab
7  *
8  * The driver model core calls device_pm_add() when a device is registered.
9  * This will initialize the embedded device_pm_info object in the device
10  * and add it to the list of power-controlled devices. sysfs entries for
11  * controlling device power management will also be added.
12  *
13  * A separate list is used for keeping track of power info, because the power
14  * domain dependencies may differ from the ancestral dependencies that the
15  * subsystem list maintains.
16  */
17 
18 #define pr_fmt(fmt) "PM: " fmt
19 #define dev_fmt pr_fmt
20 
21 #include <linux/device.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm-trace.h>
27 #include <linux/pm_wakeirq.h>
28 #include <linux/interrupt.h>
29 #include <linux/sched.h>
30 #include <linux/sched/debug.h>
31 #include <linux/async.h>
32 #include <linux/suspend.h>
33 #include <trace/events/power.h>
34 #include <linux/cpufreq.h>
35 #include <linux/devfreq.h>
36 #include <linux/timer.h>
37 
38 #include "../base.h"
39 #include "power.h"
40 
41 typedef int (*pm_callback_t)(struct device *);
42 
43 /*
44  * The entries in the dpm_list list are in a depth first order, simply
45  * because children are guaranteed to be discovered after parents, and
46  * are inserted at the back of the list on discovery.
47  *
48  * Since device_pm_add() may be called with a device lock held,
49  * we must never try to acquire a device lock while holding
50  * dpm_list_mutex.
51  */
52 
53 LIST_HEAD(dpm_list);
54 static LIST_HEAD(dpm_prepared_list);
55 static LIST_HEAD(dpm_suspended_list);
56 static LIST_HEAD(dpm_late_early_list);
57 static LIST_HEAD(dpm_noirq_list);
58 
59 static DEFINE_MUTEX(dpm_list_mtx);
60 static pm_message_t pm_transition;
61 
62 static DEFINE_MUTEX(async_wip_mtx);
63 static int async_error;
64 
65 /**
66  * pm_hibernate_is_recovering - if recovering from hibernate due to error.
67  *
68  * Used to query if dev_pm_ops.thaw() is called for normal hibernation case or
69  * recovering from some error.
70  *
71  * Return: true for error case, false for normal case.
72  */
73 bool pm_hibernate_is_recovering(void)
74 {
75 	return pm_transition.event == PM_EVENT_RECOVER;
76 }
77 EXPORT_SYMBOL_GPL(pm_hibernate_is_recovering);
78 
79 static const char *pm_verb(int event)
80 {
81 	switch (event) {
82 	case PM_EVENT_SUSPEND:
83 		return "suspend";
84 	case PM_EVENT_RESUME:
85 		return "resume";
86 	case PM_EVENT_FREEZE:
87 		return "freeze";
88 	case PM_EVENT_QUIESCE:
89 		return "quiesce";
90 	case PM_EVENT_HIBERNATE:
91 		return "hibernate";
92 	case PM_EVENT_THAW:
93 		return "thaw";
94 	case PM_EVENT_RESTORE:
95 		return "restore";
96 	case PM_EVENT_RECOVER:
97 		return "recover";
98 	default:
99 		return "(unknown PM event)";
100 	}
101 }
102 
103 /**
104  * device_pm_sleep_init - Initialize system suspend-related device fields.
105  * @dev: Device object being initialized.
106  */
107 void device_pm_sleep_init(struct device *dev)
108 {
109 	dev->power.is_prepared = false;
110 	dev->power.is_suspended = false;
111 	dev->power.is_noirq_suspended = false;
112 	dev->power.is_late_suspended = false;
113 	init_completion(&dev->power.completion);
114 	complete_all(&dev->power.completion);
115 	dev->power.wakeup = NULL;
116 	INIT_LIST_HEAD(&dev->power.entry);
117 }
118 
119 /**
120  * device_pm_lock - Lock the list of active devices used by the PM core.
121  */
122 void device_pm_lock(void)
123 {
124 	mutex_lock(&dpm_list_mtx);
125 }
126 
127 /**
128  * device_pm_unlock - Unlock the list of active devices used by the PM core.
129  */
130 void device_pm_unlock(void)
131 {
132 	mutex_unlock(&dpm_list_mtx);
133 }
134 
135 /**
136  * device_pm_add - Add a device to the PM core's list of active devices.
137  * @dev: Device to add to the list.
138  */
139 void device_pm_add(struct device *dev)
140 {
141 	/* Skip PM setup/initialization. */
142 	if (device_pm_not_required(dev))
143 		return;
144 
145 	pr_debug("Adding info for %s:%s\n",
146 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
147 	device_pm_check_callbacks(dev);
148 	mutex_lock(&dpm_list_mtx);
149 	if (dev->parent && dev->parent->power.is_prepared)
150 		dev_warn(dev, "parent %s should not be sleeping\n",
151 			dev_name(dev->parent));
152 	list_add_tail(&dev->power.entry, &dpm_list);
153 	dev->power.in_dpm_list = true;
154 	mutex_unlock(&dpm_list_mtx);
155 }
156 
157 /**
158  * device_pm_remove - Remove a device from the PM core's list of active devices.
159  * @dev: Device to be removed from the list.
160  */
161 void device_pm_remove(struct device *dev)
162 {
163 	if (device_pm_not_required(dev))
164 		return;
165 
166 	pr_debug("Removing info for %s:%s\n",
167 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
168 	complete_all(&dev->power.completion);
169 	mutex_lock(&dpm_list_mtx);
170 	list_del_init(&dev->power.entry);
171 	dev->power.in_dpm_list = false;
172 	mutex_unlock(&dpm_list_mtx);
173 	device_wakeup_disable(dev);
174 	pm_runtime_remove(dev);
175 	device_pm_check_callbacks(dev);
176 }
177 
178 /**
179  * device_pm_move_before - Move device in the PM core's list of active devices.
180  * @deva: Device to move in dpm_list.
181  * @devb: Device @deva should come before.
182  */
183 void device_pm_move_before(struct device *deva, struct device *devb)
184 {
185 	pr_debug("Moving %s:%s before %s:%s\n",
186 		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
187 		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
188 	/* Delete deva from dpm_list and reinsert before devb. */
189 	list_move_tail(&deva->power.entry, &devb->power.entry);
190 }
191 
192 /**
193  * device_pm_move_after - Move device in the PM core's list of active devices.
194  * @deva: Device to move in dpm_list.
195  * @devb: Device @deva should come after.
196  */
197 void device_pm_move_after(struct device *deva, struct device *devb)
198 {
199 	pr_debug("Moving %s:%s after %s:%s\n",
200 		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
201 		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
202 	/* Delete deva from dpm_list and reinsert after devb. */
203 	list_move(&deva->power.entry, &devb->power.entry);
204 }
205 
206 /**
207  * device_pm_move_last - Move device to end of the PM core's list of devices.
208  * @dev: Device to move in dpm_list.
209  */
210 void device_pm_move_last(struct device *dev)
211 {
212 	pr_debug("Moving %s:%s to end of list\n",
213 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
214 	list_move_tail(&dev->power.entry, &dpm_list);
215 }
216 
217 static ktime_t initcall_debug_start(struct device *dev, void *cb)
218 {
219 	if (!pm_print_times_enabled)
220 		return 0;
221 
222 	dev_info(dev, "calling %ps @ %i, parent: %s\n", cb,
223 		 task_pid_nr(current),
224 		 dev->parent ? dev_name(dev->parent) : "none");
225 	return ktime_get();
226 }
227 
228 static void initcall_debug_report(struct device *dev, ktime_t calltime,
229 				  void *cb, int error)
230 {
231 	ktime_t rettime;
232 
233 	if (!pm_print_times_enabled)
234 		return;
235 
236 	rettime = ktime_get();
237 	dev_info(dev, "%ps returned %d after %Ld usecs\n", cb, error,
238 		 (unsigned long long)ktime_us_delta(rettime, calltime));
239 }
240 
241 /**
242  * dpm_wait - Wait for a PM operation to complete.
243  * @dev: Device to wait for.
244  * @async: If unset, wait only if the device's power.async_suspend flag is set.
245  */
246 static void dpm_wait(struct device *dev, bool async)
247 {
248 	if (!dev)
249 		return;
250 
251 	if (async || (pm_async_enabled && dev->power.async_suspend))
252 		wait_for_completion(&dev->power.completion);
253 }
254 
255 static int dpm_wait_fn(struct device *dev, void *async_ptr)
256 {
257 	dpm_wait(dev, *((bool *)async_ptr));
258 	return 0;
259 }
260 
261 static void dpm_wait_for_children(struct device *dev, bool async)
262 {
263 	device_for_each_child(dev, &async, dpm_wait_fn);
264 }
265 
266 static void dpm_wait_for_suppliers(struct device *dev, bool async)
267 {
268 	struct device_link *link;
269 	int idx;
270 
271 	idx = device_links_read_lock();
272 
273 	/*
274 	 * If the supplier goes away right after we've checked the link to it,
275 	 * we'll wait for its completion to change the state, but that's fine,
276 	 * because the only things that will block as a result are the SRCU
277 	 * callbacks freeing the link objects for the links in the list we're
278 	 * walking.
279 	 */
280 	dev_for_each_link_to_supplier(link, dev)
281 		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
282 			dpm_wait(link->supplier, async);
283 
284 	device_links_read_unlock(idx);
285 }
286 
287 static bool dpm_wait_for_superior(struct device *dev, bool async)
288 {
289 	struct device *parent;
290 
291 	/*
292 	 * If the device is resumed asynchronously and the parent's callback
293 	 * deletes both the device and the parent itself, the parent object may
294 	 * be freed while this function is running, so avoid that by reference
295 	 * counting the parent once more unless the device has been deleted
296 	 * already (in which case return right away).
297 	 */
298 	mutex_lock(&dpm_list_mtx);
299 
300 	if (!device_pm_initialized(dev)) {
301 		mutex_unlock(&dpm_list_mtx);
302 		return false;
303 	}
304 
305 	parent = get_device(dev->parent);
306 
307 	mutex_unlock(&dpm_list_mtx);
308 
309 	dpm_wait(parent, async);
310 	put_device(parent);
311 
312 	dpm_wait_for_suppliers(dev, async);
313 
314 	/*
315 	 * If the parent's callback has deleted the device, attempting to resume
316 	 * it would be invalid, so avoid doing that then.
317 	 */
318 	return device_pm_initialized(dev);
319 }
320 
321 static void dpm_wait_for_consumers(struct device *dev, bool async)
322 {
323 	struct device_link *link;
324 	int idx;
325 
326 	idx = device_links_read_lock();
327 
328 	/*
329 	 * The status of a device link can only be changed from "dormant" by a
330 	 * probe, but that cannot happen during system suspend/resume.  In
331 	 * theory it can change to "dormant" at that time, but then it is
332 	 * reasonable to wait for the target device anyway (eg. if it goes
333 	 * away, it's better to wait for it to go away completely and then
334 	 * continue instead of trying to continue in parallel with its
335 	 * unregistration).
336 	 */
337 	dev_for_each_link_to_consumer(link, dev)
338 		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
339 			dpm_wait(link->consumer, async);
340 
341 	device_links_read_unlock(idx);
342 }
343 
344 static void dpm_wait_for_subordinate(struct device *dev, bool async)
345 {
346 	dpm_wait_for_children(dev, async);
347 	dpm_wait_for_consumers(dev, async);
348 }
349 
350 /**
351  * pm_op - Return the PM operation appropriate for given PM event.
352  * @ops: PM operations to choose from.
353  * @state: PM transition of the system being carried out.
354  */
355 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
356 {
357 	switch (state.event) {
358 #ifdef CONFIG_SUSPEND
359 	case PM_EVENT_SUSPEND:
360 		return ops->suspend;
361 	case PM_EVENT_RESUME:
362 		return ops->resume;
363 #endif /* CONFIG_SUSPEND */
364 #ifdef CONFIG_HIBERNATE_CALLBACKS
365 	case PM_EVENT_FREEZE:
366 	case PM_EVENT_QUIESCE:
367 		return ops->freeze;
368 	case PM_EVENT_HIBERNATE:
369 		return ops->poweroff;
370 	case PM_EVENT_THAW:
371 	case PM_EVENT_RECOVER:
372 		return ops->thaw;
373 	case PM_EVENT_RESTORE:
374 		return ops->restore;
375 #endif /* CONFIG_HIBERNATE_CALLBACKS */
376 	}
377 
378 	return NULL;
379 }
380 
381 /**
382  * pm_late_early_op - Return the PM operation appropriate for given PM event.
383  * @ops: PM operations to choose from.
384  * @state: PM transition of the system being carried out.
385  *
386  * Runtime PM is disabled for @dev while this function is being executed.
387  */
388 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
389 				      pm_message_t state)
390 {
391 	switch (state.event) {
392 #ifdef CONFIG_SUSPEND
393 	case PM_EVENT_SUSPEND:
394 		return ops->suspend_late;
395 	case PM_EVENT_RESUME:
396 		return ops->resume_early;
397 #endif /* CONFIG_SUSPEND */
398 #ifdef CONFIG_HIBERNATE_CALLBACKS
399 	case PM_EVENT_FREEZE:
400 	case PM_EVENT_QUIESCE:
401 		return ops->freeze_late;
402 	case PM_EVENT_HIBERNATE:
403 		return ops->poweroff_late;
404 	case PM_EVENT_THAW:
405 	case PM_EVENT_RECOVER:
406 		return ops->thaw_early;
407 	case PM_EVENT_RESTORE:
408 		return ops->restore_early;
409 #endif /* CONFIG_HIBERNATE_CALLBACKS */
410 	}
411 
412 	return NULL;
413 }
414 
415 /**
416  * pm_noirq_op - Return the PM operation appropriate for given PM event.
417  * @ops: PM operations to choose from.
418  * @state: PM transition of the system being carried out.
419  *
420  * The driver of @dev will not receive interrupts while this function is being
421  * executed.
422  */
423 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
424 {
425 	switch (state.event) {
426 #ifdef CONFIG_SUSPEND
427 	case PM_EVENT_SUSPEND:
428 		return ops->suspend_noirq;
429 	case PM_EVENT_RESUME:
430 		return ops->resume_noirq;
431 #endif /* CONFIG_SUSPEND */
432 #ifdef CONFIG_HIBERNATE_CALLBACKS
433 	case PM_EVENT_FREEZE:
434 	case PM_EVENT_QUIESCE:
435 		return ops->freeze_noirq;
436 	case PM_EVENT_HIBERNATE:
437 		return ops->poweroff_noirq;
438 	case PM_EVENT_THAW:
439 	case PM_EVENT_RECOVER:
440 		return ops->thaw_noirq;
441 	case PM_EVENT_RESTORE:
442 		return ops->restore_noirq;
443 #endif /* CONFIG_HIBERNATE_CALLBACKS */
444 	}
445 
446 	return NULL;
447 }
448 
449 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
450 {
451 	dev_dbg(dev, "%s%s%s driver flags: %x\n", info, pm_verb(state.event),
452 		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
453 		", may wakeup" : "", dev->power.driver_flags);
454 }
455 
456 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
457 			int error)
458 {
459 	dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info,
460 		error);
461 }
462 
463 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
464 			  const char *info)
465 {
466 	ktime_t calltime;
467 	u64 usecs64;
468 	int usecs;
469 
470 	calltime = ktime_get();
471 	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
472 	do_div(usecs64, NSEC_PER_USEC);
473 	usecs = usecs64;
474 	if (usecs == 0)
475 		usecs = 1;
476 
477 	pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
478 		  info ?: "", info ? " " : "", pm_verb(state.event),
479 		  error ? "aborted" : "complete",
480 		  usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
481 }
482 
483 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
484 			    pm_message_t state, const char *info)
485 {
486 	ktime_t calltime;
487 	int error;
488 
489 	if (!cb)
490 		return 0;
491 
492 	calltime = initcall_debug_start(dev, cb);
493 
494 	pm_dev_dbg(dev, state, info);
495 	trace_device_pm_callback_start(dev, info, state.event);
496 	error = cb(dev);
497 	trace_device_pm_callback_end(dev, error);
498 	suspend_report_result(dev, cb, error);
499 
500 	initcall_debug_report(dev, calltime, cb, error);
501 
502 	return error;
503 }
504 
505 #ifdef CONFIG_DPM_WATCHDOG
506 struct dpm_watchdog {
507 	struct device		*dev;
508 	struct task_struct	*tsk;
509 	struct timer_list	timer;
510 	bool			fatal;
511 };
512 
513 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
514 	struct dpm_watchdog wd
515 
516 /**
517  * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
518  * @t: The timer that PM watchdog depends on.
519  *
520  * Called when a driver has timed out suspending or resuming.
521  * There's not much we can do here to recover so panic() to
522  * capture a crash-dump in pstore.
523  */
524 static void dpm_watchdog_handler(struct timer_list *t)
525 {
526 	struct dpm_watchdog *wd = timer_container_of(wd, t, timer);
527 	struct timer_list *timer = &wd->timer;
528 	unsigned int time_left;
529 
530 	if (wd->fatal) {
531 		dev_emerg(wd->dev, "**** DPM device timeout ****\n");
532 		show_stack(wd->tsk, NULL, KERN_EMERG);
533 		panic("%s %s: unrecoverable failure\n",
534 			dev_driver_string(wd->dev), dev_name(wd->dev));
535 	}
536 
537 	time_left = CONFIG_DPM_WATCHDOG_TIMEOUT - CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
538 	dev_warn(wd->dev, "**** DPM device timeout after %u seconds; %u seconds until panic ****\n",
539 		 CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT, time_left);
540 	show_stack(wd->tsk, NULL, KERN_WARNING);
541 
542 	wd->fatal = true;
543 	mod_timer(timer, jiffies + HZ * time_left);
544 }
545 
546 /**
547  * dpm_watchdog_set - Enable pm watchdog for given device.
548  * @wd: Watchdog. Must be allocated on the stack.
549  * @dev: Device to handle.
550  */
551 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
552 {
553 	struct timer_list *timer = &wd->timer;
554 
555 	wd->dev = dev;
556 	wd->tsk = current;
557 	wd->fatal = CONFIG_DPM_WATCHDOG_TIMEOUT == CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
558 
559 	timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
560 	/* use same timeout value for both suspend and resume */
561 	timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
562 	add_timer(timer);
563 }
564 
565 /**
566  * dpm_watchdog_clear - Disable suspend/resume watchdog.
567  * @wd: Watchdog to disable.
568  */
569 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
570 {
571 	struct timer_list *timer = &wd->timer;
572 
573 	timer_delete_sync(timer);
574 	timer_destroy_on_stack(timer);
575 }
576 #else
577 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
578 #define dpm_watchdog_set(x, y)
579 #define dpm_watchdog_clear(x)
580 #endif
581 
582 /*------------------------- Resume routines -------------------------*/
583 
584 /**
585  * dev_pm_skip_resume - System-wide device resume optimization check.
586  * @dev: Target device.
587  *
588  * Return:
589  * - %false if the transition under way is RESTORE.
590  * - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
591  * - The logical negation of %power.must_resume otherwise (that is, when the
592  *   transition under way is RESUME).
593  */
594 bool dev_pm_skip_resume(struct device *dev)
595 {
596 	if (pm_transition.event == PM_EVENT_RESTORE)
597 		return false;
598 
599 	if (pm_transition.event == PM_EVENT_THAW)
600 		return dev_pm_skip_suspend(dev);
601 
602 	return !dev->power.must_resume;
603 }
604 
605 static bool is_async(struct device *dev)
606 {
607 	return dev->power.async_suspend && pm_async_enabled
608 		&& !pm_trace_is_enabled();
609 }
610 
611 static bool __dpm_async(struct device *dev, async_func_t func)
612 {
613 	if (dev->power.work_in_progress)
614 		return true;
615 
616 	if (!is_async(dev))
617 		return false;
618 
619 	dev->power.work_in_progress = true;
620 
621 	get_device(dev);
622 
623 	if (async_schedule_dev_nocall(func, dev))
624 		return true;
625 
626 	put_device(dev);
627 
628 	return false;
629 }
630 
631 static bool dpm_async_fn(struct device *dev, async_func_t func)
632 {
633 	guard(mutex)(&async_wip_mtx);
634 
635 	return __dpm_async(dev, func);
636 }
637 
638 static int dpm_async_with_cleanup(struct device *dev, void *fn)
639 {
640 	guard(mutex)(&async_wip_mtx);
641 
642 	if (!__dpm_async(dev, fn))
643 		dev->power.work_in_progress = false;
644 
645 	return 0;
646 }
647 
648 static void dpm_async_resume_children(struct device *dev, async_func_t func)
649 {
650 	/*
651 	 * Prevent racing with dpm_clear_async_state() during initial list
652 	 * walks in dpm_noirq_resume_devices(), dpm_resume_early(), and
653 	 * dpm_resume().
654 	 */
655 	guard(mutex)(&dpm_list_mtx);
656 
657 	/*
658 	 * Start processing "async" children of the device unless it's been
659 	 * started already for them.
660 	 */
661 	device_for_each_child(dev, func, dpm_async_with_cleanup);
662 }
663 
664 static void dpm_async_resume_subordinate(struct device *dev, async_func_t func)
665 {
666 	struct device_link *link;
667 	int idx;
668 
669 	dpm_async_resume_children(dev, func);
670 
671 	idx = device_links_read_lock();
672 
673 	/* Start processing the device's "async" consumers. */
674 	dev_for_each_link_to_consumer(link, dev)
675 		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
676 			dpm_async_with_cleanup(link->consumer, func);
677 
678 	device_links_read_unlock(idx);
679 }
680 
681 static void dpm_clear_async_state(struct device *dev)
682 {
683 	reinit_completion(&dev->power.completion);
684 	dev->power.work_in_progress = false;
685 }
686 
687 static bool dpm_root_device(struct device *dev)
688 {
689 	lockdep_assert_held(&dpm_list_mtx);
690 
691 	/*
692 	 * Since this function is required to run under dpm_list_mtx, the
693 	 * list_empty() below will only return true if the device's list of
694 	 * consumers is actually empty before calling it.
695 	 */
696 	return !dev->parent && list_empty(&dev->links.suppliers);
697 }
698 
699 static void async_resume_noirq(void *data, async_cookie_t cookie);
700 
701 /**
702  * device_resume_noirq - Execute a "noirq resume" callback for given device.
703  * @dev: Device to handle.
704  * @state: PM transition of the system being carried out.
705  * @async: If true, the device is being resumed asynchronously.
706  *
707  * The driver of @dev will not receive interrupts while this function is being
708  * executed.
709  */
710 static void device_resume_noirq(struct device *dev, pm_message_t state, bool async)
711 {
712 	pm_callback_t callback = NULL;
713 	const char *info = NULL;
714 	bool skip_resume;
715 	int error = 0;
716 
717 	TRACE_DEVICE(dev);
718 	TRACE_RESUME(0);
719 
720 	if (dev->power.syscore || dev->power.direct_complete)
721 		goto Out;
722 
723 	if (!dev->power.is_noirq_suspended) {
724 		/*
725 		 * This means that system suspend has been aborted in the noirq
726 		 * phase before invoking the noirq suspend callback for the
727 		 * device, so if device_suspend_late() has left it in suspend,
728 		 * device_resume_early() should leave it in suspend either in
729 		 * case the early resume of it depends on the noirq resume that
730 		 * has not run.
731 		 */
732 		if (dev_pm_skip_suspend(dev))
733 			dev->power.must_resume = false;
734 
735 		goto Out;
736 	}
737 
738 	if (!dpm_wait_for_superior(dev, async))
739 		goto Out;
740 
741 	skip_resume = dev_pm_skip_resume(dev);
742 	/*
743 	 * If the driver callback is skipped below or by the middle layer
744 	 * callback and device_resume_early() also skips the driver callback for
745 	 * this device later, it needs to appear as "suspended" to PM-runtime,
746 	 * so change its status accordingly.
747 	 *
748 	 * Otherwise, the device is going to be resumed, so set its PM-runtime
749 	 * status to "active" unless its power.smart_suspend flag is clear, in
750 	 * which case it is not necessary to update its PM-runtime status.
751 	 */
752 	if (skip_resume)
753 		pm_runtime_set_suspended(dev);
754 	else if (dev_pm_smart_suspend(dev))
755 		pm_runtime_set_active(dev);
756 
757 	if (dev->pm_domain) {
758 		info = "noirq power domain ";
759 		callback = pm_noirq_op(&dev->pm_domain->ops, state);
760 	} else if (dev->type && dev->type->pm) {
761 		info = "noirq type ";
762 		callback = pm_noirq_op(dev->type->pm, state);
763 	} else if (dev->class && dev->class->pm) {
764 		info = "noirq class ";
765 		callback = pm_noirq_op(dev->class->pm, state);
766 	} else if (dev->bus && dev->bus->pm) {
767 		info = "noirq bus ";
768 		callback = pm_noirq_op(dev->bus->pm, state);
769 	}
770 	if (callback)
771 		goto Run;
772 
773 	if (skip_resume)
774 		goto Skip;
775 
776 	if (dev->driver && dev->driver->pm) {
777 		info = "noirq driver ";
778 		callback = pm_noirq_op(dev->driver->pm, state);
779 	}
780 
781 Run:
782 	error = dpm_run_callback(callback, dev, state, info);
783 
784 Skip:
785 	dev->power.is_noirq_suspended = false;
786 
787 Out:
788 	complete_all(&dev->power.completion);
789 	TRACE_RESUME(error);
790 
791 	if (error) {
792 		WRITE_ONCE(async_error, error);
793 		dpm_save_failed_dev(dev_name(dev));
794 		pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
795 	}
796 
797 	dpm_async_resume_subordinate(dev, async_resume_noirq);
798 }
799 
800 static void async_resume_noirq(void *data, async_cookie_t cookie)
801 {
802 	struct device *dev = data;
803 
804 	device_resume_noirq(dev, pm_transition, true);
805 	put_device(dev);
806 }
807 
808 static void dpm_noirq_resume_devices(pm_message_t state)
809 {
810 	struct device *dev;
811 	ktime_t starttime = ktime_get();
812 
813 	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
814 
815 	async_error = 0;
816 	pm_transition = state;
817 
818 	mutex_lock(&dpm_list_mtx);
819 
820 	/*
821 	 * Start processing "async" root devices upfront so they don't wait for
822 	 * the "sync" devices they don't depend on.
823 	 */
824 	list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
825 		dpm_clear_async_state(dev);
826 		if (dpm_root_device(dev))
827 			dpm_async_with_cleanup(dev, async_resume_noirq);
828 	}
829 
830 	while (!list_empty(&dpm_noirq_list)) {
831 		dev = to_device(dpm_noirq_list.next);
832 		list_move_tail(&dev->power.entry, &dpm_late_early_list);
833 
834 		if (!dpm_async_fn(dev, async_resume_noirq)) {
835 			get_device(dev);
836 
837 			mutex_unlock(&dpm_list_mtx);
838 
839 			device_resume_noirq(dev, state, false);
840 
841 			put_device(dev);
842 
843 			mutex_lock(&dpm_list_mtx);
844 		}
845 	}
846 	mutex_unlock(&dpm_list_mtx);
847 	async_synchronize_full();
848 	dpm_show_time(starttime, state, 0, "noirq");
849 	if (READ_ONCE(async_error))
850 		dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
851 
852 	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
853 }
854 
855 /**
856  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
857  * @state: PM transition of the system being carried out.
858  *
859  * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
860  * allow device drivers' interrupt handlers to be called.
861  */
862 void dpm_resume_noirq(pm_message_t state)
863 {
864 	dpm_noirq_resume_devices(state);
865 
866 	resume_device_irqs();
867 	device_wakeup_disarm_wake_irqs();
868 }
869 
870 static void async_resume_early(void *data, async_cookie_t cookie);
871 
872 /**
873  * device_resume_early - Execute an "early resume" callback for given device.
874  * @dev: Device to handle.
875  * @state: PM transition of the system being carried out.
876  * @async: If true, the device is being resumed asynchronously.
877  *
878  * Runtime PM is disabled for @dev while this function is being executed.
879  */
880 static void device_resume_early(struct device *dev, pm_message_t state, bool async)
881 {
882 	pm_callback_t callback = NULL;
883 	const char *info = NULL;
884 	int error = 0;
885 
886 	TRACE_DEVICE(dev);
887 	TRACE_RESUME(0);
888 
889 	if (dev->power.syscore || dev->power.direct_complete)
890 		goto Out;
891 
892 	if (!dev->power.is_late_suspended)
893 		goto Out;
894 
895 	if (!dpm_wait_for_superior(dev, async))
896 		goto Out;
897 
898 	if (dev->pm_domain) {
899 		info = "early power domain ";
900 		callback = pm_late_early_op(&dev->pm_domain->ops, state);
901 	} else if (dev->type && dev->type->pm) {
902 		info = "early type ";
903 		callback = pm_late_early_op(dev->type->pm, state);
904 	} else if (dev->class && dev->class->pm) {
905 		info = "early class ";
906 		callback = pm_late_early_op(dev->class->pm, state);
907 	} else if (dev->bus && dev->bus->pm) {
908 		info = "early bus ";
909 		callback = pm_late_early_op(dev->bus->pm, state);
910 	}
911 	if (callback)
912 		goto Run;
913 
914 	if (dev_pm_skip_resume(dev))
915 		goto Skip;
916 
917 	if (dev->driver && dev->driver->pm) {
918 		info = "early driver ";
919 		callback = pm_late_early_op(dev->driver->pm, state);
920 	}
921 
922 Run:
923 	error = dpm_run_callback(callback, dev, state, info);
924 
925 Skip:
926 	dev->power.is_late_suspended = false;
927 
928 Out:
929 	TRACE_RESUME(error);
930 
931 	pm_runtime_enable(dev);
932 	complete_all(&dev->power.completion);
933 
934 	if (error) {
935 		WRITE_ONCE(async_error, error);
936 		dpm_save_failed_dev(dev_name(dev));
937 		pm_dev_err(dev, state, async ? " async early" : " early", error);
938 	}
939 
940 	dpm_async_resume_subordinate(dev, async_resume_early);
941 }
942 
943 static void async_resume_early(void *data, async_cookie_t cookie)
944 {
945 	struct device *dev = data;
946 
947 	device_resume_early(dev, pm_transition, true);
948 	put_device(dev);
949 }
950 
951 /**
952  * dpm_resume_early - Execute "early resume" callbacks for all devices.
953  * @state: PM transition of the system being carried out.
954  */
955 void dpm_resume_early(pm_message_t state)
956 {
957 	struct device *dev;
958 	ktime_t starttime = ktime_get();
959 
960 	trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
961 
962 	async_error = 0;
963 	pm_transition = state;
964 
965 	mutex_lock(&dpm_list_mtx);
966 
967 	/*
968 	 * Start processing "async" root devices upfront so they don't wait for
969 	 * the "sync" devices they don't depend on.
970 	 */
971 	list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
972 		dpm_clear_async_state(dev);
973 		if (dpm_root_device(dev))
974 			dpm_async_with_cleanup(dev, async_resume_early);
975 	}
976 
977 	while (!list_empty(&dpm_late_early_list)) {
978 		dev = to_device(dpm_late_early_list.next);
979 		list_move_tail(&dev->power.entry, &dpm_suspended_list);
980 
981 		if (!dpm_async_fn(dev, async_resume_early)) {
982 			get_device(dev);
983 
984 			mutex_unlock(&dpm_list_mtx);
985 
986 			device_resume_early(dev, state, false);
987 
988 			put_device(dev);
989 
990 			mutex_lock(&dpm_list_mtx);
991 		}
992 	}
993 	mutex_unlock(&dpm_list_mtx);
994 	async_synchronize_full();
995 	dpm_show_time(starttime, state, 0, "early");
996 	if (READ_ONCE(async_error))
997 		dpm_save_failed_step(SUSPEND_RESUME_EARLY);
998 
999 	trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
1000 }
1001 
1002 /**
1003  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
1004  * @state: PM transition of the system being carried out.
1005  */
1006 void dpm_resume_start(pm_message_t state)
1007 {
1008 	dpm_resume_noirq(state);
1009 	dpm_resume_early(state);
1010 }
1011 EXPORT_SYMBOL_GPL(dpm_resume_start);
1012 
1013 static void async_resume(void *data, async_cookie_t cookie);
1014 
1015 /**
1016  * device_resume - Execute "resume" callbacks for given device.
1017  * @dev: Device to handle.
1018  * @state: PM transition of the system being carried out.
1019  * @async: If true, the device is being resumed asynchronously.
1020  */
1021 static void device_resume(struct device *dev, pm_message_t state, bool async)
1022 {
1023 	pm_callback_t callback = NULL;
1024 	const char *info = NULL;
1025 	int error = 0;
1026 	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1027 
1028 	TRACE_DEVICE(dev);
1029 	TRACE_RESUME(0);
1030 
1031 	if (dev->power.syscore)
1032 		goto Complete;
1033 
1034 	if (!dev->power.is_suspended)
1035 		goto Complete;
1036 
1037 	dev->power.is_suspended = false;
1038 
1039 	if (dev->power.direct_complete) {
1040 		/*
1041 		 * Allow new children to be added under the device after this
1042 		 * point if it has no PM callbacks.
1043 		 */
1044 		if (dev->power.no_pm_callbacks)
1045 			dev->power.is_prepared = false;
1046 
1047 		/* Match the pm_runtime_disable() in device_suspend(). */
1048 		pm_runtime_enable(dev);
1049 		goto Complete;
1050 	}
1051 
1052 	if (!dpm_wait_for_superior(dev, async))
1053 		goto Complete;
1054 
1055 	dpm_watchdog_set(&wd, dev);
1056 	device_lock(dev);
1057 
1058 	/*
1059 	 * This is a fib.  But we'll allow new children to be added below
1060 	 * a resumed device, even if the device hasn't been completed yet.
1061 	 */
1062 	dev->power.is_prepared = false;
1063 
1064 	if (dev->pm_domain) {
1065 		info = "power domain ";
1066 		callback = pm_op(&dev->pm_domain->ops, state);
1067 		goto Driver;
1068 	}
1069 
1070 	if (dev->type && dev->type->pm) {
1071 		info = "type ";
1072 		callback = pm_op(dev->type->pm, state);
1073 		goto Driver;
1074 	}
1075 
1076 	if (dev->class && dev->class->pm) {
1077 		info = "class ";
1078 		callback = pm_op(dev->class->pm, state);
1079 		goto Driver;
1080 	}
1081 
1082 	if (dev->bus) {
1083 		if (dev->bus->pm) {
1084 			info = "bus ";
1085 			callback = pm_op(dev->bus->pm, state);
1086 		} else if (dev->bus->resume) {
1087 			info = "legacy bus ";
1088 			callback = dev->bus->resume;
1089 			goto End;
1090 		}
1091 	}
1092 
1093  Driver:
1094 	if (!callback && dev->driver && dev->driver->pm) {
1095 		info = "driver ";
1096 		callback = pm_op(dev->driver->pm, state);
1097 	}
1098 
1099  End:
1100 	error = dpm_run_callback(callback, dev, state, info);
1101 
1102 	device_unlock(dev);
1103 	dpm_watchdog_clear(&wd);
1104 
1105  Complete:
1106 	complete_all(&dev->power.completion);
1107 
1108 	TRACE_RESUME(error);
1109 
1110 	if (error) {
1111 		WRITE_ONCE(async_error, error);
1112 		dpm_save_failed_dev(dev_name(dev));
1113 		pm_dev_err(dev, state, async ? " async" : "", error);
1114 	}
1115 
1116 	dpm_async_resume_subordinate(dev, async_resume);
1117 }
1118 
1119 static void async_resume(void *data, async_cookie_t cookie)
1120 {
1121 	struct device *dev = data;
1122 
1123 	device_resume(dev, pm_transition, true);
1124 	put_device(dev);
1125 }
1126 
1127 /**
1128  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
1129  * @state: PM transition of the system being carried out.
1130  *
1131  * Execute the appropriate "resume" callback for all devices whose status
1132  * indicates that they are suspended.
1133  */
1134 void dpm_resume(pm_message_t state)
1135 {
1136 	struct device *dev;
1137 	ktime_t starttime = ktime_get();
1138 
1139 	trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1140 
1141 	pm_transition = state;
1142 	async_error = 0;
1143 
1144 	mutex_lock(&dpm_list_mtx);
1145 
1146 	/*
1147 	 * Start processing "async" root devices upfront so they don't wait for
1148 	 * the "sync" devices they don't depend on.
1149 	 */
1150 	list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
1151 		dpm_clear_async_state(dev);
1152 		if (dpm_root_device(dev))
1153 			dpm_async_with_cleanup(dev, async_resume);
1154 	}
1155 
1156 	while (!list_empty(&dpm_suspended_list)) {
1157 		dev = to_device(dpm_suspended_list.next);
1158 		list_move_tail(&dev->power.entry, &dpm_prepared_list);
1159 
1160 		if (!dpm_async_fn(dev, async_resume)) {
1161 			get_device(dev);
1162 
1163 			mutex_unlock(&dpm_list_mtx);
1164 
1165 			device_resume(dev, state, false);
1166 
1167 			put_device(dev);
1168 
1169 			mutex_lock(&dpm_list_mtx);
1170 		}
1171 	}
1172 	mutex_unlock(&dpm_list_mtx);
1173 	async_synchronize_full();
1174 	dpm_show_time(starttime, state, 0, NULL);
1175 	if (READ_ONCE(async_error))
1176 		dpm_save_failed_step(SUSPEND_RESUME);
1177 
1178 	cpufreq_resume();
1179 	devfreq_resume();
1180 	trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1181 }
1182 
1183 /**
1184  * device_complete - Complete a PM transition for given device.
1185  * @dev: Device to handle.
1186  * @state: PM transition of the system being carried out.
1187  */
1188 static void device_complete(struct device *dev, pm_message_t state)
1189 {
1190 	void (*callback)(struct device *) = NULL;
1191 	const char *info = NULL;
1192 
1193 	if (dev->power.syscore)
1194 		goto out;
1195 
1196 	device_lock(dev);
1197 
1198 	if (dev->pm_domain) {
1199 		info = "completing power domain ";
1200 		callback = dev->pm_domain->ops.complete;
1201 	} else if (dev->type && dev->type->pm) {
1202 		info = "completing type ";
1203 		callback = dev->type->pm->complete;
1204 	} else if (dev->class && dev->class->pm) {
1205 		info = "completing class ";
1206 		callback = dev->class->pm->complete;
1207 	} else if (dev->bus && dev->bus->pm) {
1208 		info = "completing bus ";
1209 		callback = dev->bus->pm->complete;
1210 	}
1211 
1212 	if (!callback && dev->driver && dev->driver->pm) {
1213 		info = "completing driver ";
1214 		callback = dev->driver->pm->complete;
1215 	}
1216 
1217 	if (callback) {
1218 		pm_dev_dbg(dev, state, info);
1219 		callback(dev);
1220 	}
1221 
1222 	device_unlock(dev);
1223 
1224 out:
1225 	/* If enabling runtime PM for the device is blocked, unblock it. */
1226 	pm_runtime_unblock(dev);
1227 	pm_runtime_put(dev);
1228 }
1229 
1230 /**
1231  * dpm_complete - Complete a PM transition for all non-sysdev devices.
1232  * @state: PM transition of the system being carried out.
1233  *
1234  * Execute the ->complete() callbacks for all devices whose PM status is not
1235  * DPM_ON (this allows new devices to be registered).
1236  */
1237 void dpm_complete(pm_message_t state)
1238 {
1239 	struct list_head list;
1240 
1241 	trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1242 
1243 	INIT_LIST_HEAD(&list);
1244 	mutex_lock(&dpm_list_mtx);
1245 	while (!list_empty(&dpm_prepared_list)) {
1246 		struct device *dev = to_device(dpm_prepared_list.prev);
1247 
1248 		get_device(dev);
1249 		dev->power.is_prepared = false;
1250 		list_move(&dev->power.entry, &list);
1251 
1252 		mutex_unlock(&dpm_list_mtx);
1253 
1254 		trace_device_pm_callback_start(dev, "", state.event);
1255 		device_complete(dev, state);
1256 		trace_device_pm_callback_end(dev, 0);
1257 
1258 		put_device(dev);
1259 
1260 		mutex_lock(&dpm_list_mtx);
1261 	}
1262 	list_splice(&list, &dpm_list);
1263 	mutex_unlock(&dpm_list_mtx);
1264 
1265 	/* Allow device probing and trigger re-probing of deferred devices */
1266 	device_unblock_probing();
1267 	trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1268 }
1269 
1270 /**
1271  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1272  * @state: PM transition of the system being carried out.
1273  *
1274  * Execute "resume" callbacks for all devices and complete the PM transition of
1275  * the system.
1276  */
1277 void dpm_resume_end(pm_message_t state)
1278 {
1279 	dpm_resume(state);
1280 	pm_restore_gfp_mask();
1281 	dpm_complete(state);
1282 }
1283 EXPORT_SYMBOL_GPL(dpm_resume_end);
1284 
1285 
1286 /*------------------------- Suspend routines -------------------------*/
1287 
1288 static bool dpm_leaf_device(struct device *dev)
1289 {
1290 	struct device *child;
1291 
1292 	lockdep_assert_held(&dpm_list_mtx);
1293 
1294 	child = device_find_any_child(dev);
1295 	if (child) {
1296 		put_device(child);
1297 
1298 		return false;
1299 	}
1300 
1301 	/*
1302 	 * Since this function is required to run under dpm_list_mtx, the
1303 	 * list_empty() below will only return true if the device's list of
1304 	 * consumers is actually empty before calling it.
1305 	 */
1306 	return list_empty(&dev->links.consumers);
1307 }
1308 
1309 static bool dpm_async_suspend_parent(struct device *dev, async_func_t func)
1310 {
1311 	guard(mutex)(&dpm_list_mtx);
1312 
1313 	/*
1314 	 * If the device is suspended asynchronously and the parent's callback
1315 	 * deletes both the device and the parent itself, the parent object may
1316 	 * be freed while this function is running, so avoid that by checking
1317 	 * if the device has been deleted already as the parent cannot be
1318 	 * deleted before it.
1319 	 */
1320 	if (!device_pm_initialized(dev))
1321 		return false;
1322 
1323 	/* Start processing the device's parent if it is "async". */
1324 	if (dev->parent)
1325 		dpm_async_with_cleanup(dev->parent, func);
1326 
1327 	return true;
1328 }
1329 
1330 static void dpm_async_suspend_superior(struct device *dev, async_func_t func)
1331 {
1332 	struct device_link *link;
1333 	int idx;
1334 
1335 	if (!dpm_async_suspend_parent(dev, func))
1336 		return;
1337 
1338 	idx = device_links_read_lock();
1339 
1340 	/* Start processing the device's "async" suppliers. */
1341 	dev_for_each_link_to_supplier(link, dev)
1342 		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
1343 			dpm_async_with_cleanup(link->supplier, func);
1344 
1345 	device_links_read_unlock(idx);
1346 }
1347 
1348 static void dpm_async_suspend_complete_all(struct list_head *device_list)
1349 {
1350 	struct device *dev;
1351 
1352 	guard(mutex)(&async_wip_mtx);
1353 
1354 	list_for_each_entry_reverse(dev, device_list, power.entry) {
1355 		/*
1356 		 * In case the device is being waited for and async processing
1357 		 * has not started for it yet, let the waiters make progress.
1358 		 */
1359 		if (!dev->power.work_in_progress)
1360 			complete_all(&dev->power.completion);
1361 	}
1362 }
1363 
1364 /**
1365  * resume_event - Return a "resume" message for given "suspend" sleep state.
1366  * @sleep_state: PM message representing a sleep state.
1367  *
1368  * Return a PM message representing the resume event corresponding to given
1369  * sleep state.
1370  */
1371 static pm_message_t resume_event(pm_message_t sleep_state)
1372 {
1373 	switch (sleep_state.event) {
1374 	case PM_EVENT_SUSPEND:
1375 		return PMSG_RESUME;
1376 	case PM_EVENT_FREEZE:
1377 	case PM_EVENT_QUIESCE:
1378 		return PMSG_RECOVER;
1379 	case PM_EVENT_HIBERNATE:
1380 		return PMSG_RESTORE;
1381 	}
1382 	return PMSG_ON;
1383 }
1384 
1385 static void dpm_superior_set_must_resume(struct device *dev)
1386 {
1387 	struct device_link *link;
1388 	int idx;
1389 
1390 	if (dev->parent)
1391 		dev->parent->power.must_resume = true;
1392 
1393 	idx = device_links_read_lock();
1394 
1395 	dev_for_each_link_to_supplier(link, dev)
1396 		link->supplier->power.must_resume = true;
1397 
1398 	device_links_read_unlock(idx);
1399 }
1400 
1401 static void async_suspend_noirq(void *data, async_cookie_t cookie);
1402 
1403 /**
1404  * device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1405  * @dev: Device to handle.
1406  * @state: PM transition of the system being carried out.
1407  * @async: If true, the device is being suspended asynchronously.
1408  *
1409  * The driver of @dev will not receive interrupts while this function is being
1410  * executed.
1411  */
1412 static void device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1413 {
1414 	pm_callback_t callback = NULL;
1415 	const char *info = NULL;
1416 	int error = 0;
1417 
1418 	TRACE_DEVICE(dev);
1419 	TRACE_SUSPEND(0);
1420 
1421 	dpm_wait_for_subordinate(dev, async);
1422 
1423 	if (READ_ONCE(async_error))
1424 		goto Complete;
1425 
1426 	if (dev->power.syscore || dev->power.direct_complete)
1427 		goto Complete;
1428 
1429 	if (dev->pm_domain) {
1430 		info = "noirq power domain ";
1431 		callback = pm_noirq_op(&dev->pm_domain->ops, state);
1432 	} else if (dev->type && dev->type->pm) {
1433 		info = "noirq type ";
1434 		callback = pm_noirq_op(dev->type->pm, state);
1435 	} else if (dev->class && dev->class->pm) {
1436 		info = "noirq class ";
1437 		callback = pm_noirq_op(dev->class->pm, state);
1438 	} else if (dev->bus && dev->bus->pm) {
1439 		info = "noirq bus ";
1440 		callback = pm_noirq_op(dev->bus->pm, state);
1441 	}
1442 	if (callback)
1443 		goto Run;
1444 
1445 	if (dev_pm_skip_suspend(dev))
1446 		goto Skip;
1447 
1448 	if (dev->driver && dev->driver->pm) {
1449 		info = "noirq driver ";
1450 		callback = pm_noirq_op(dev->driver->pm, state);
1451 	}
1452 
1453 Run:
1454 	error = dpm_run_callback(callback, dev, state, info);
1455 	if (error) {
1456 		WRITE_ONCE(async_error, error);
1457 		dpm_save_failed_dev(dev_name(dev));
1458 		pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
1459 		goto Complete;
1460 	}
1461 
1462 Skip:
1463 	dev->power.is_noirq_suspended = true;
1464 
1465 	/*
1466 	 * Devices must be resumed unless they are explicitly allowed to be left
1467 	 * in suspend, but even in that case skipping the resume of devices that
1468 	 * were in use right before the system suspend (as indicated by their
1469 	 * runtime PM usage counters and child counters) would be suboptimal.
1470 	 */
1471 	if (!(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
1472 	      dev->power.may_skip_resume) || !pm_runtime_need_not_resume(dev))
1473 		dev->power.must_resume = true;
1474 
1475 	if (dev->power.must_resume)
1476 		dpm_superior_set_must_resume(dev);
1477 
1478 Complete:
1479 	complete_all(&dev->power.completion);
1480 	TRACE_SUSPEND(error);
1481 
1482 	if (error || READ_ONCE(async_error))
1483 		return;
1484 
1485 	dpm_async_suspend_superior(dev, async_suspend_noirq);
1486 }
1487 
1488 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1489 {
1490 	struct device *dev = data;
1491 
1492 	device_suspend_noirq(dev, pm_transition, true);
1493 	put_device(dev);
1494 }
1495 
1496 static int dpm_noirq_suspend_devices(pm_message_t state)
1497 {
1498 	ktime_t starttime = ktime_get();
1499 	struct device *dev;
1500 	int error;
1501 
1502 	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1503 
1504 	pm_transition = state;
1505 	async_error = 0;
1506 
1507 	mutex_lock(&dpm_list_mtx);
1508 
1509 	/*
1510 	 * Start processing "async" leaf devices upfront so they don't need to
1511 	 * wait for the "sync" devices they don't depend on.
1512 	 */
1513 	list_for_each_entry_reverse(dev, &dpm_late_early_list, power.entry) {
1514 		dpm_clear_async_state(dev);
1515 		if (dpm_leaf_device(dev))
1516 			dpm_async_with_cleanup(dev, async_suspend_noirq);
1517 	}
1518 
1519 	while (!list_empty(&dpm_late_early_list)) {
1520 		dev = to_device(dpm_late_early_list.prev);
1521 
1522 		list_move(&dev->power.entry, &dpm_noirq_list);
1523 
1524 		if (dpm_async_fn(dev, async_suspend_noirq))
1525 			continue;
1526 
1527 		get_device(dev);
1528 
1529 		mutex_unlock(&dpm_list_mtx);
1530 
1531 		device_suspend_noirq(dev, state, false);
1532 
1533 		put_device(dev);
1534 
1535 		mutex_lock(&dpm_list_mtx);
1536 
1537 		if (READ_ONCE(async_error)) {
1538 			dpm_async_suspend_complete_all(&dpm_late_early_list);
1539 			/*
1540 			 * Move all devices to the target list to resume them
1541 			 * properly.
1542 			 */
1543 			list_splice_init(&dpm_late_early_list, &dpm_noirq_list);
1544 			break;
1545 		}
1546 	}
1547 
1548 	mutex_unlock(&dpm_list_mtx);
1549 
1550 	async_synchronize_full();
1551 
1552 	error = READ_ONCE(async_error);
1553 	if (error)
1554 		dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1555 
1556 	dpm_show_time(starttime, state, error, "noirq");
1557 	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1558 	return error;
1559 }
1560 
1561 /**
1562  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1563  * @state: PM transition of the system being carried out.
1564  *
1565  * Prevent device drivers' interrupt handlers from being called and invoke
1566  * "noirq" suspend callbacks for all non-sysdev devices.
1567  */
1568 int dpm_suspend_noirq(pm_message_t state)
1569 {
1570 	int ret;
1571 
1572 	device_wakeup_arm_wake_irqs();
1573 	suspend_device_irqs();
1574 
1575 	ret = dpm_noirq_suspend_devices(state);
1576 	if (ret)
1577 		dpm_resume_noirq(resume_event(state));
1578 
1579 	return ret;
1580 }
1581 
1582 static void dpm_propagate_wakeup_to_parent(struct device *dev)
1583 {
1584 	struct device *parent = dev->parent;
1585 
1586 	if (!parent)
1587 		return;
1588 
1589 	spin_lock_irq(&parent->power.lock);
1590 
1591 	if (device_wakeup_path(dev) && !parent->power.ignore_children)
1592 		parent->power.wakeup_path = true;
1593 
1594 	spin_unlock_irq(&parent->power.lock);
1595 }
1596 
1597 static void async_suspend_late(void *data, async_cookie_t cookie);
1598 
1599 /**
1600  * device_suspend_late - Execute a "late suspend" callback for given device.
1601  * @dev: Device to handle.
1602  * @state: PM transition of the system being carried out.
1603  * @async: If true, the device is being suspended asynchronously.
1604  *
1605  * Runtime PM is disabled for @dev while this function is being executed.
1606  */
1607 static void device_suspend_late(struct device *dev, pm_message_t state, bool async)
1608 {
1609 	pm_callback_t callback = NULL;
1610 	const char *info = NULL;
1611 	int error = 0;
1612 
1613 	TRACE_DEVICE(dev);
1614 	TRACE_SUSPEND(0);
1615 
1616 	/*
1617 	 * Disable runtime PM for the device without checking if there is a
1618 	 * pending resume request for it.
1619 	 */
1620 	__pm_runtime_disable(dev, false);
1621 
1622 	dpm_wait_for_subordinate(dev, async);
1623 
1624 	if (READ_ONCE(async_error))
1625 		goto Complete;
1626 
1627 	if (pm_wakeup_pending()) {
1628 		WRITE_ONCE(async_error, -EBUSY);
1629 		goto Complete;
1630 	}
1631 
1632 	if (dev->power.syscore || dev->power.direct_complete)
1633 		goto Complete;
1634 
1635 	if (dev->pm_domain) {
1636 		info = "late power domain ";
1637 		callback = pm_late_early_op(&dev->pm_domain->ops, state);
1638 	} else if (dev->type && dev->type->pm) {
1639 		info = "late type ";
1640 		callback = pm_late_early_op(dev->type->pm, state);
1641 	} else if (dev->class && dev->class->pm) {
1642 		info = "late class ";
1643 		callback = pm_late_early_op(dev->class->pm, state);
1644 	} else if (dev->bus && dev->bus->pm) {
1645 		info = "late bus ";
1646 		callback = pm_late_early_op(dev->bus->pm, state);
1647 	}
1648 	if (callback)
1649 		goto Run;
1650 
1651 	if (dev_pm_skip_suspend(dev))
1652 		goto Skip;
1653 
1654 	if (dev->driver && dev->driver->pm) {
1655 		info = "late driver ";
1656 		callback = pm_late_early_op(dev->driver->pm, state);
1657 	}
1658 
1659 Run:
1660 	error = dpm_run_callback(callback, dev, state, info);
1661 	if (error) {
1662 		WRITE_ONCE(async_error, error);
1663 		dpm_save_failed_dev(dev_name(dev));
1664 		pm_dev_err(dev, state, async ? " async late" : " late", error);
1665 		goto Complete;
1666 	}
1667 	dpm_propagate_wakeup_to_parent(dev);
1668 
1669 Skip:
1670 	dev->power.is_late_suspended = true;
1671 
1672 Complete:
1673 	TRACE_SUSPEND(error);
1674 	complete_all(&dev->power.completion);
1675 
1676 	if (error || READ_ONCE(async_error))
1677 		return;
1678 
1679 	dpm_async_suspend_superior(dev, async_suspend_late);
1680 }
1681 
1682 static void async_suspend_late(void *data, async_cookie_t cookie)
1683 {
1684 	struct device *dev = data;
1685 
1686 	device_suspend_late(dev, pm_transition, true);
1687 	put_device(dev);
1688 }
1689 
1690 /**
1691  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1692  * @state: PM transition of the system being carried out.
1693  */
1694 int dpm_suspend_late(pm_message_t state)
1695 {
1696 	ktime_t starttime = ktime_get();
1697 	struct device *dev;
1698 	int error;
1699 
1700 	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1701 
1702 	pm_transition = state;
1703 	async_error = 0;
1704 
1705 	wake_up_all_idle_cpus();
1706 
1707 	mutex_lock(&dpm_list_mtx);
1708 
1709 	/*
1710 	 * Start processing "async" leaf devices upfront so they don't need to
1711 	 * wait for the "sync" devices they don't depend on.
1712 	 */
1713 	list_for_each_entry_reverse(dev, &dpm_suspended_list, power.entry) {
1714 		dpm_clear_async_state(dev);
1715 		if (dpm_leaf_device(dev))
1716 			dpm_async_with_cleanup(dev, async_suspend_late);
1717 	}
1718 
1719 	while (!list_empty(&dpm_suspended_list)) {
1720 		dev = to_device(dpm_suspended_list.prev);
1721 
1722 		list_move(&dev->power.entry, &dpm_late_early_list);
1723 
1724 		if (dpm_async_fn(dev, async_suspend_late))
1725 			continue;
1726 
1727 		get_device(dev);
1728 
1729 		mutex_unlock(&dpm_list_mtx);
1730 
1731 		device_suspend_late(dev, state, false);
1732 
1733 		put_device(dev);
1734 
1735 		mutex_lock(&dpm_list_mtx);
1736 
1737 		if (READ_ONCE(async_error)) {
1738 			dpm_async_suspend_complete_all(&dpm_suspended_list);
1739 			/*
1740 			 * Move all devices to the target list to resume them
1741 			 * properly.
1742 			 */
1743 			list_splice_init(&dpm_suspended_list, &dpm_late_early_list);
1744 			break;
1745 		}
1746 	}
1747 
1748 	mutex_unlock(&dpm_list_mtx);
1749 
1750 	async_synchronize_full();
1751 
1752 	error = READ_ONCE(async_error);
1753 	if (error) {
1754 		dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1755 		dpm_resume_early(resume_event(state));
1756 	}
1757 	dpm_show_time(starttime, state, error, "late");
1758 	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1759 	return error;
1760 }
1761 
1762 /**
1763  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1764  * @state: PM transition of the system being carried out.
1765  */
1766 int dpm_suspend_end(pm_message_t state)
1767 {
1768 	ktime_t starttime = ktime_get();
1769 	int error;
1770 
1771 	error = dpm_suspend_late(state);
1772 	if (error)
1773 		goto out;
1774 
1775 	error = dpm_suspend_noirq(state);
1776 	if (error)
1777 		dpm_resume_early(resume_event(state));
1778 
1779 out:
1780 	dpm_show_time(starttime, state, error, "end");
1781 	return error;
1782 }
1783 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1784 
1785 /**
1786  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1787  * @dev: Device to suspend.
1788  * @state: PM transition of the system being carried out.
1789  * @cb: Suspend callback to execute.
1790  * @info: string description of caller.
1791  */
1792 static int legacy_suspend(struct device *dev, pm_message_t state,
1793 			  int (*cb)(struct device *dev, pm_message_t state),
1794 			  const char *info)
1795 {
1796 	int error;
1797 	ktime_t calltime;
1798 
1799 	calltime = initcall_debug_start(dev, cb);
1800 
1801 	trace_device_pm_callback_start(dev, info, state.event);
1802 	error = cb(dev, state);
1803 	trace_device_pm_callback_end(dev, error);
1804 	suspend_report_result(dev, cb, error);
1805 
1806 	initcall_debug_report(dev, calltime, cb, error);
1807 
1808 	return error;
1809 }
1810 
1811 static void dpm_clear_superiors_direct_complete(struct device *dev)
1812 {
1813 	struct device_link *link;
1814 	int idx;
1815 
1816 	if (dev->parent) {
1817 		spin_lock_irq(&dev->parent->power.lock);
1818 		dev->parent->power.direct_complete = false;
1819 		spin_unlock_irq(&dev->parent->power.lock);
1820 	}
1821 
1822 	idx = device_links_read_lock();
1823 
1824 	dev_for_each_link_to_supplier(link, dev) {
1825 		spin_lock_irq(&link->supplier->power.lock);
1826 		link->supplier->power.direct_complete = false;
1827 		spin_unlock_irq(&link->supplier->power.lock);
1828 	}
1829 
1830 	device_links_read_unlock(idx);
1831 }
1832 
1833 static void async_suspend(void *data, async_cookie_t cookie);
1834 
1835 /**
1836  * device_suspend - Execute "suspend" callbacks for given device.
1837  * @dev: Device to handle.
1838  * @state: PM transition of the system being carried out.
1839  * @async: If true, the device is being suspended asynchronously.
1840  */
1841 static void device_suspend(struct device *dev, pm_message_t state, bool async)
1842 {
1843 	pm_callback_t callback = NULL;
1844 	const char *info = NULL;
1845 	int error = 0;
1846 	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1847 
1848 	TRACE_DEVICE(dev);
1849 	TRACE_SUSPEND(0);
1850 
1851 	dpm_wait_for_subordinate(dev, async);
1852 
1853 	if (READ_ONCE(async_error)) {
1854 		dev->power.direct_complete = false;
1855 		goto Complete;
1856 	}
1857 
1858 	/*
1859 	 * Wait for possible runtime PM transitions of the device in progress
1860 	 * to complete and if there's a runtime resume request pending for it,
1861 	 * resume it before proceeding with invoking the system-wide suspend
1862 	 * callbacks for it.
1863 	 *
1864 	 * If the system-wide suspend callbacks below change the configuration
1865 	 * of the device, they must disable runtime PM for it or otherwise
1866 	 * ensure that its runtime-resume callbacks will not be confused by that
1867 	 * change in case they are invoked going forward.
1868 	 */
1869 	pm_runtime_barrier(dev);
1870 
1871 	if (pm_wakeup_pending()) {
1872 		dev->power.direct_complete = false;
1873 		WRITE_ONCE(async_error, -EBUSY);
1874 		goto Complete;
1875 	}
1876 
1877 	if (dev->power.syscore)
1878 		goto Complete;
1879 
1880 	/* Avoid direct_complete to let wakeup_path propagate. */
1881 	if (device_may_wakeup(dev) || device_wakeup_path(dev))
1882 		dev->power.direct_complete = false;
1883 
1884 	if (dev->power.direct_complete) {
1885 		if (pm_runtime_status_suspended(dev)) {
1886 			pm_runtime_disable(dev);
1887 			if (pm_runtime_status_suspended(dev)) {
1888 				pm_dev_dbg(dev, state, "direct-complete ");
1889 				dev->power.is_suspended = true;
1890 				goto Complete;
1891 			}
1892 
1893 			pm_runtime_enable(dev);
1894 		}
1895 		dev->power.direct_complete = false;
1896 	}
1897 
1898 	dev->power.may_skip_resume = true;
1899 	dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME);
1900 
1901 	dpm_watchdog_set(&wd, dev);
1902 	device_lock(dev);
1903 
1904 	if (dev->pm_domain) {
1905 		info = "power domain ";
1906 		callback = pm_op(&dev->pm_domain->ops, state);
1907 		goto Run;
1908 	}
1909 
1910 	if (dev->type && dev->type->pm) {
1911 		info = "type ";
1912 		callback = pm_op(dev->type->pm, state);
1913 		goto Run;
1914 	}
1915 
1916 	if (dev->class && dev->class->pm) {
1917 		info = "class ";
1918 		callback = pm_op(dev->class->pm, state);
1919 		goto Run;
1920 	}
1921 
1922 	if (dev->bus) {
1923 		if (dev->bus->pm) {
1924 			info = "bus ";
1925 			callback = pm_op(dev->bus->pm, state);
1926 		} else if (dev->bus->suspend) {
1927 			pm_dev_dbg(dev, state, "legacy bus ");
1928 			error = legacy_suspend(dev, state, dev->bus->suspend,
1929 						"legacy bus ");
1930 			goto End;
1931 		}
1932 	}
1933 
1934  Run:
1935 	if (!callback && dev->driver && dev->driver->pm) {
1936 		info = "driver ";
1937 		callback = pm_op(dev->driver->pm, state);
1938 	}
1939 
1940 	error = dpm_run_callback(callback, dev, state, info);
1941 
1942  End:
1943 	if (!error) {
1944 		dev->power.is_suspended = true;
1945 		if (device_may_wakeup(dev))
1946 			dev->power.wakeup_path = true;
1947 
1948 		dpm_propagate_wakeup_to_parent(dev);
1949 		dpm_clear_superiors_direct_complete(dev);
1950 	}
1951 
1952 	device_unlock(dev);
1953 	dpm_watchdog_clear(&wd);
1954 
1955  Complete:
1956 	if (error) {
1957 		WRITE_ONCE(async_error, error);
1958 		dpm_save_failed_dev(dev_name(dev));
1959 		pm_dev_err(dev, state, async ? " async" : "", error);
1960 	}
1961 
1962 	complete_all(&dev->power.completion);
1963 	TRACE_SUSPEND(error);
1964 
1965 	if (error || READ_ONCE(async_error))
1966 		return;
1967 
1968 	dpm_async_suspend_superior(dev, async_suspend);
1969 }
1970 
1971 static void async_suspend(void *data, async_cookie_t cookie)
1972 {
1973 	struct device *dev = data;
1974 
1975 	device_suspend(dev, pm_transition, true);
1976 	put_device(dev);
1977 }
1978 
1979 /**
1980  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1981  * @state: PM transition of the system being carried out.
1982  */
1983 int dpm_suspend(pm_message_t state)
1984 {
1985 	ktime_t starttime = ktime_get();
1986 	struct device *dev;
1987 	int error;
1988 
1989 	trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1990 	might_sleep();
1991 
1992 	devfreq_suspend();
1993 	cpufreq_suspend();
1994 
1995 	pm_transition = state;
1996 	async_error = 0;
1997 
1998 	mutex_lock(&dpm_list_mtx);
1999 
2000 	/*
2001 	 * Start processing "async" leaf devices upfront so they don't need to
2002 	 * wait for the "sync" devices they don't depend on.
2003 	 */
2004 	list_for_each_entry_reverse(dev, &dpm_prepared_list, power.entry) {
2005 		dpm_clear_async_state(dev);
2006 		if (dpm_leaf_device(dev))
2007 			dpm_async_with_cleanup(dev, async_suspend);
2008 	}
2009 
2010 	while (!list_empty(&dpm_prepared_list)) {
2011 		dev = to_device(dpm_prepared_list.prev);
2012 
2013 		list_move(&dev->power.entry, &dpm_suspended_list);
2014 
2015 		if (dpm_async_fn(dev, async_suspend))
2016 			continue;
2017 
2018 		get_device(dev);
2019 
2020 		mutex_unlock(&dpm_list_mtx);
2021 
2022 		device_suspend(dev, state, false);
2023 
2024 		put_device(dev);
2025 
2026 		mutex_lock(&dpm_list_mtx);
2027 
2028 		if (READ_ONCE(async_error)) {
2029 			dpm_async_suspend_complete_all(&dpm_prepared_list);
2030 			/*
2031 			 * Move all devices to the target list to resume them
2032 			 * properly.
2033 			 */
2034 			list_splice_init(&dpm_prepared_list, &dpm_suspended_list);
2035 			break;
2036 		}
2037 	}
2038 
2039 	mutex_unlock(&dpm_list_mtx);
2040 
2041 	async_synchronize_full();
2042 
2043 	error = READ_ONCE(async_error);
2044 	if (error)
2045 		dpm_save_failed_step(SUSPEND_SUSPEND);
2046 
2047 	dpm_show_time(starttime, state, error, NULL);
2048 	trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
2049 	return error;
2050 }
2051 
2052 static bool device_prepare_smart_suspend(struct device *dev)
2053 {
2054 	struct device_link *link;
2055 	bool ret = true;
2056 	int idx;
2057 
2058 	/*
2059 	 * The "smart suspend" feature is enabled for devices whose drivers ask
2060 	 * for it and for devices without PM callbacks.
2061 	 *
2062 	 * However, if "smart suspend" is not enabled for the device's parent
2063 	 * or any of its suppliers that take runtime PM into account, it cannot
2064 	 * be enabled for the device either.
2065 	 */
2066 	if (!dev->power.no_pm_callbacks &&
2067 	    !dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND))
2068 		return false;
2069 
2070 	if (dev->parent && !dev_pm_smart_suspend(dev->parent) &&
2071 	    !dev->parent->power.ignore_children && !pm_runtime_blocked(dev->parent))
2072 		return false;
2073 
2074 	idx = device_links_read_lock();
2075 
2076 	dev_for_each_link_to_supplier(link, dev) {
2077 		if (!device_link_test(link, DL_FLAG_PM_RUNTIME))
2078 			continue;
2079 
2080 		if (!dev_pm_smart_suspend(link->supplier) &&
2081 		    !pm_runtime_blocked(link->supplier)) {
2082 			ret = false;
2083 			break;
2084 		}
2085 	}
2086 
2087 	device_links_read_unlock(idx);
2088 
2089 	return ret;
2090 }
2091 
2092 /**
2093  * device_prepare - Prepare a device for system power transition.
2094  * @dev: Device to handle.
2095  * @state: PM transition of the system being carried out.
2096  *
2097  * Execute the ->prepare() callback(s) for given device.  No new children of the
2098  * device may be registered after this function has returned.
2099  */
2100 static int device_prepare(struct device *dev, pm_message_t state)
2101 {
2102 	int (*callback)(struct device *) = NULL;
2103 	bool smart_suspend;
2104 	int ret = 0;
2105 
2106 	/*
2107 	 * If a device's parent goes into runtime suspend at the wrong time,
2108 	 * it won't be possible to resume the device.  To prevent this we
2109 	 * block runtime suspend here, during the prepare phase, and allow
2110 	 * it again during the complete phase.
2111 	 */
2112 	pm_runtime_get_noresume(dev);
2113 	/*
2114 	 * If runtime PM is disabled for the device at this point and it has
2115 	 * never been enabled so far, it should not be enabled until this system
2116 	 * suspend-resume cycle is complete, so prepare to trigger a warning on
2117 	 * subsequent attempts to enable it.
2118 	 */
2119 	smart_suspend = !pm_runtime_block_if_disabled(dev);
2120 
2121 	if (dev->power.syscore)
2122 		return 0;
2123 
2124 	device_lock(dev);
2125 
2126 	dev->power.wakeup_path = false;
2127 
2128 	if (dev->power.no_pm_callbacks)
2129 		goto unlock;
2130 
2131 	if (dev->pm_domain)
2132 		callback = dev->pm_domain->ops.prepare;
2133 	else if (dev->type && dev->type->pm)
2134 		callback = dev->type->pm->prepare;
2135 	else if (dev->class && dev->class->pm)
2136 		callback = dev->class->pm->prepare;
2137 	else if (dev->bus && dev->bus->pm)
2138 		callback = dev->bus->pm->prepare;
2139 
2140 	if (!callback && dev->driver && dev->driver->pm)
2141 		callback = dev->driver->pm->prepare;
2142 
2143 	if (callback)
2144 		ret = callback(dev);
2145 
2146 unlock:
2147 	device_unlock(dev);
2148 
2149 	if (ret < 0) {
2150 		suspend_report_result(dev, callback, ret);
2151 		pm_runtime_put(dev);
2152 		return ret;
2153 	}
2154 	/* Do not enable "smart suspend" for devices with disabled runtime PM. */
2155 	if (smart_suspend)
2156 		smart_suspend = device_prepare_smart_suspend(dev);
2157 
2158 	spin_lock_irq(&dev->power.lock);
2159 
2160 	dev->power.smart_suspend = smart_suspend;
2161 	/*
2162 	 * A positive return value from ->prepare() means "this device appears
2163 	 * to be runtime-suspended and its state is fine, so if it really is
2164 	 * runtime-suspended, you can leave it in that state provided that you
2165 	 * will do the same thing with all of its descendants".  This only
2166 	 * applies to suspend transitions, however.
2167 	 */
2168 	dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
2169 		(ret > 0 || dev->power.no_pm_callbacks) &&
2170 		!dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
2171 
2172 	spin_unlock_irq(&dev->power.lock);
2173 
2174 	return 0;
2175 }
2176 
2177 /**
2178  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
2179  * @state: PM transition of the system being carried out.
2180  *
2181  * Execute the ->prepare() callback(s) for all devices.
2182  */
2183 int dpm_prepare(pm_message_t state)
2184 {
2185 	int error = 0;
2186 
2187 	trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
2188 
2189 	/*
2190 	 * Give a chance for the known devices to complete their probes, before
2191 	 * disable probing of devices. This sync point is important at least
2192 	 * at boot time + hibernation restore.
2193 	 */
2194 	wait_for_device_probe();
2195 	/*
2196 	 * It is unsafe if probing of devices will happen during suspend or
2197 	 * hibernation and system behavior will be unpredictable in this case.
2198 	 * So, let's prohibit device's probing here and defer their probes
2199 	 * instead. The normal behavior will be restored in dpm_complete().
2200 	 */
2201 	device_block_probing();
2202 
2203 	mutex_lock(&dpm_list_mtx);
2204 	while (!list_empty(&dpm_list) && !error) {
2205 		struct device *dev = to_device(dpm_list.next);
2206 
2207 		get_device(dev);
2208 
2209 		mutex_unlock(&dpm_list_mtx);
2210 
2211 		trace_device_pm_callback_start(dev, "", state.event);
2212 		error = device_prepare(dev, state);
2213 		trace_device_pm_callback_end(dev, error);
2214 
2215 		mutex_lock(&dpm_list_mtx);
2216 
2217 		if (!error) {
2218 			dev->power.is_prepared = true;
2219 			if (!list_empty(&dev->power.entry))
2220 				list_move_tail(&dev->power.entry, &dpm_prepared_list);
2221 		} else if (error == -EAGAIN) {
2222 			error = 0;
2223 		} else {
2224 			dev_info(dev, "not prepared for power transition: code %d\n",
2225 				 error);
2226 		}
2227 
2228 		mutex_unlock(&dpm_list_mtx);
2229 
2230 		put_device(dev);
2231 
2232 		mutex_lock(&dpm_list_mtx);
2233 	}
2234 	mutex_unlock(&dpm_list_mtx);
2235 	trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
2236 	return error;
2237 }
2238 
2239 /**
2240  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
2241  * @state: PM transition of the system being carried out.
2242  *
2243  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
2244  * callbacks for them.
2245  */
2246 int dpm_suspend_start(pm_message_t state)
2247 {
2248 	ktime_t starttime = ktime_get();
2249 	int error;
2250 
2251 	error = dpm_prepare(state);
2252 	if (error)
2253 		dpm_save_failed_step(SUSPEND_PREPARE);
2254 	else {
2255 		pm_restrict_gfp_mask();
2256 		error = dpm_suspend(state);
2257 	}
2258 
2259 	dpm_show_time(starttime, state, error, "start");
2260 	return error;
2261 }
2262 EXPORT_SYMBOL_GPL(dpm_suspend_start);
2263 
2264 void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret)
2265 {
2266 	if (ret)
2267 		dev_err(dev, "%s(): %ps returns %d\n", function, fn, ret);
2268 }
2269 EXPORT_SYMBOL_GPL(__suspend_report_result);
2270 
2271 /**
2272  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
2273  * @subordinate: Device that needs to wait for @dev.
2274  * @dev: Device to wait for.
2275  */
2276 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
2277 {
2278 	dpm_wait(dev, subordinate->power.async_suspend);
2279 	return async_error;
2280 }
2281 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
2282 
2283 /**
2284  * dpm_for_each_dev - device iterator.
2285  * @data: data for the callback.
2286  * @fn: function to be called for each device.
2287  *
2288  * Iterate over devices in dpm_list, and call @fn for each device,
2289  * passing it @data.
2290  */
2291 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
2292 {
2293 	struct device *dev;
2294 
2295 	if (!fn)
2296 		return;
2297 
2298 	device_pm_lock();
2299 	list_for_each_entry(dev, &dpm_list, power.entry)
2300 		fn(dev, data);
2301 	device_pm_unlock();
2302 }
2303 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
2304 
2305 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
2306 {
2307 	if (!ops)
2308 		return true;
2309 
2310 	return !ops->prepare &&
2311 	       !ops->suspend &&
2312 	       !ops->suspend_late &&
2313 	       !ops->suspend_noirq &&
2314 	       !ops->resume_noirq &&
2315 	       !ops->resume_early &&
2316 	       !ops->resume &&
2317 	       !ops->complete;
2318 }
2319 
2320 void device_pm_check_callbacks(struct device *dev)
2321 {
2322 	unsigned long flags;
2323 
2324 	spin_lock_irqsave(&dev->power.lock, flags);
2325 	dev->power.no_pm_callbacks =
2326 		(!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2327 		 !dev->bus->suspend && !dev->bus->resume)) &&
2328 		(!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2329 		(!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2330 		(!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2331 		(!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2332 		 !dev->driver->suspend && !dev->driver->resume));
2333 	spin_unlock_irqrestore(&dev->power.lock, flags);
2334 }
2335 
2336 bool dev_pm_skip_suspend(struct device *dev)
2337 {
2338 	return dev_pm_smart_suspend(dev) && pm_runtime_status_suspended(dev);
2339 }
2340