xref: /linux/drivers/base/power/main.c (revision 5c8d5e2619f7d2985adfe45608dc942ca8151aa3)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/base/power/main.c - Where the driver meets power management.
4  *
5  * Copyright (c) 2003 Patrick Mochel
6  * Copyright (c) 2003 Open Source Development Lab
7  *
8  * The driver model core calls device_pm_add() when a device is registered.
9  * This will initialize the embedded device_pm_info object in the device
10  * and add it to the list of power-controlled devices. sysfs entries for
11  * controlling device power management will also be added.
12  *
13  * A separate list is used for keeping track of power info, because the power
14  * domain dependencies may differ from the ancestral dependencies that the
15  * subsystem list maintains.
16  */
17 
18 #define pr_fmt(fmt) "PM: " fmt
19 #define dev_fmt pr_fmt
20 
21 #include <linux/device.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm-trace.h>
27 #include <linux/pm_wakeirq.h>
28 #include <linux/interrupt.h>
29 #include <linux/sched.h>
30 #include <linux/sched/debug.h>
31 #include <linux/async.h>
32 #include <linux/suspend.h>
33 #include <trace/events/power.h>
34 #include <linux/cpufreq.h>
35 #include <linux/devfreq.h>
36 #include <linux/timer.h>
37 
38 #include "../base.h"
39 #include "power.h"
40 
41 typedef int (*pm_callback_t)(struct device *);
42 
43 #define list_for_each_entry_rcu_locked(pos, head, member) \
44 	list_for_each_entry_rcu(pos, head, member, \
45 			device_links_read_lock_held())
46 
47 /*
48  * The entries in the dpm_list list are in a depth first order, simply
49  * because children are guaranteed to be discovered after parents, and
50  * are inserted at the back of the list on discovery.
51  *
52  * Since device_pm_add() may be called with a device lock held,
53  * we must never try to acquire a device lock while holding
54  * dpm_list_mutex.
55  */
56 
57 LIST_HEAD(dpm_list);
58 static LIST_HEAD(dpm_prepared_list);
59 static LIST_HEAD(dpm_suspended_list);
60 static LIST_HEAD(dpm_late_early_list);
61 static LIST_HEAD(dpm_noirq_list);
62 
63 static DEFINE_MUTEX(dpm_list_mtx);
64 static pm_message_t pm_transition;
65 
66 static DEFINE_MUTEX(async_wip_mtx);
67 static int async_error;
68 
69 /**
70  * pm_hibernate_is_recovering - if recovering from hibernate due to error.
71  *
72  * Used to query if dev_pm_ops.thaw() is called for normal hibernation case or
73  * recovering from some error.
74  *
75  * Return: true for error case, false for normal case.
76  */
77 bool pm_hibernate_is_recovering(void)
78 {
79 	return pm_transition.event == PM_EVENT_RECOVER;
80 }
81 EXPORT_SYMBOL_GPL(pm_hibernate_is_recovering);
82 
83 static const char *pm_verb(int event)
84 {
85 	switch (event) {
86 	case PM_EVENT_SUSPEND:
87 		return "suspend";
88 	case PM_EVENT_RESUME:
89 		return "resume";
90 	case PM_EVENT_FREEZE:
91 		return "freeze";
92 	case PM_EVENT_QUIESCE:
93 		return "quiesce";
94 	case PM_EVENT_HIBERNATE:
95 		return "hibernate";
96 	case PM_EVENT_THAW:
97 		return "thaw";
98 	case PM_EVENT_RESTORE:
99 		return "restore";
100 	case PM_EVENT_RECOVER:
101 		return "recover";
102 	default:
103 		return "(unknown PM event)";
104 	}
105 }
106 
107 /**
108  * device_pm_sleep_init - Initialize system suspend-related device fields.
109  * @dev: Device object being initialized.
110  */
111 void device_pm_sleep_init(struct device *dev)
112 {
113 	dev->power.is_prepared = false;
114 	dev->power.is_suspended = false;
115 	dev->power.is_noirq_suspended = false;
116 	dev->power.is_late_suspended = false;
117 	init_completion(&dev->power.completion);
118 	complete_all(&dev->power.completion);
119 	dev->power.wakeup = NULL;
120 	INIT_LIST_HEAD(&dev->power.entry);
121 }
122 
123 /**
124  * device_pm_lock - Lock the list of active devices used by the PM core.
125  */
126 void device_pm_lock(void)
127 {
128 	mutex_lock(&dpm_list_mtx);
129 }
130 
131 /**
132  * device_pm_unlock - Unlock the list of active devices used by the PM core.
133  */
134 void device_pm_unlock(void)
135 {
136 	mutex_unlock(&dpm_list_mtx);
137 }
138 
139 /**
140  * device_pm_add - Add a device to the PM core's list of active devices.
141  * @dev: Device to add to the list.
142  */
143 void device_pm_add(struct device *dev)
144 {
145 	/* Skip PM setup/initialization. */
146 	if (device_pm_not_required(dev))
147 		return;
148 
149 	pr_debug("Adding info for %s:%s\n",
150 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
151 	device_pm_check_callbacks(dev);
152 	mutex_lock(&dpm_list_mtx);
153 	if (dev->parent && dev->parent->power.is_prepared)
154 		dev_warn(dev, "parent %s should not be sleeping\n",
155 			dev_name(dev->parent));
156 	list_add_tail(&dev->power.entry, &dpm_list);
157 	dev->power.in_dpm_list = true;
158 	mutex_unlock(&dpm_list_mtx);
159 }
160 
161 /**
162  * device_pm_remove - Remove a device from the PM core's list of active devices.
163  * @dev: Device to be removed from the list.
164  */
165 void device_pm_remove(struct device *dev)
166 {
167 	if (device_pm_not_required(dev))
168 		return;
169 
170 	pr_debug("Removing info for %s:%s\n",
171 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
172 	complete_all(&dev->power.completion);
173 	mutex_lock(&dpm_list_mtx);
174 	list_del_init(&dev->power.entry);
175 	dev->power.in_dpm_list = false;
176 	mutex_unlock(&dpm_list_mtx);
177 	device_wakeup_disable(dev);
178 	pm_runtime_remove(dev);
179 	device_pm_check_callbacks(dev);
180 }
181 
182 /**
183  * device_pm_move_before - Move device in the PM core's list of active devices.
184  * @deva: Device to move in dpm_list.
185  * @devb: Device @deva should come before.
186  */
187 void device_pm_move_before(struct device *deva, struct device *devb)
188 {
189 	pr_debug("Moving %s:%s before %s:%s\n",
190 		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
191 		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
192 	/* Delete deva from dpm_list and reinsert before devb. */
193 	list_move_tail(&deva->power.entry, &devb->power.entry);
194 }
195 
196 /**
197  * device_pm_move_after - Move device in the PM core's list of active devices.
198  * @deva: Device to move in dpm_list.
199  * @devb: Device @deva should come after.
200  */
201 void device_pm_move_after(struct device *deva, struct device *devb)
202 {
203 	pr_debug("Moving %s:%s after %s:%s\n",
204 		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
205 		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
206 	/* Delete deva from dpm_list and reinsert after devb. */
207 	list_move(&deva->power.entry, &devb->power.entry);
208 }
209 
210 /**
211  * device_pm_move_last - Move device to end of the PM core's list of devices.
212  * @dev: Device to move in dpm_list.
213  */
214 void device_pm_move_last(struct device *dev)
215 {
216 	pr_debug("Moving %s:%s to end of list\n",
217 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
218 	list_move_tail(&dev->power.entry, &dpm_list);
219 }
220 
221 static ktime_t initcall_debug_start(struct device *dev, void *cb)
222 {
223 	if (!pm_print_times_enabled)
224 		return 0;
225 
226 	dev_info(dev, "calling %ps @ %i, parent: %s\n", cb,
227 		 task_pid_nr(current),
228 		 dev->parent ? dev_name(dev->parent) : "none");
229 	return ktime_get();
230 }
231 
232 static void initcall_debug_report(struct device *dev, ktime_t calltime,
233 				  void *cb, int error)
234 {
235 	ktime_t rettime;
236 
237 	if (!pm_print_times_enabled)
238 		return;
239 
240 	rettime = ktime_get();
241 	dev_info(dev, "%ps returned %d after %Ld usecs\n", cb, error,
242 		 (unsigned long long)ktime_us_delta(rettime, calltime));
243 }
244 
245 /**
246  * dpm_wait - Wait for a PM operation to complete.
247  * @dev: Device to wait for.
248  * @async: If unset, wait only if the device's power.async_suspend flag is set.
249  */
250 static void dpm_wait(struct device *dev, bool async)
251 {
252 	if (!dev)
253 		return;
254 
255 	if (async || (pm_async_enabled && dev->power.async_suspend))
256 		wait_for_completion(&dev->power.completion);
257 }
258 
259 static int dpm_wait_fn(struct device *dev, void *async_ptr)
260 {
261 	dpm_wait(dev, *((bool *)async_ptr));
262 	return 0;
263 }
264 
265 static void dpm_wait_for_children(struct device *dev, bool async)
266 {
267 	device_for_each_child(dev, &async, dpm_wait_fn);
268 }
269 
270 static void dpm_wait_for_suppliers(struct device *dev, bool async)
271 {
272 	struct device_link *link;
273 	int idx;
274 
275 	idx = device_links_read_lock();
276 
277 	/*
278 	 * If the supplier goes away right after we've checked the link to it,
279 	 * we'll wait for its completion to change the state, but that's fine,
280 	 * because the only things that will block as a result are the SRCU
281 	 * callbacks freeing the link objects for the links in the list we're
282 	 * walking.
283 	 */
284 	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
285 		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
286 			dpm_wait(link->supplier, async);
287 
288 	device_links_read_unlock(idx);
289 }
290 
291 static bool dpm_wait_for_superior(struct device *dev, bool async)
292 {
293 	struct device *parent;
294 
295 	/*
296 	 * If the device is resumed asynchronously and the parent's callback
297 	 * deletes both the device and the parent itself, the parent object may
298 	 * be freed while this function is running, so avoid that by reference
299 	 * counting the parent once more unless the device has been deleted
300 	 * already (in which case return right away).
301 	 */
302 	mutex_lock(&dpm_list_mtx);
303 
304 	if (!device_pm_initialized(dev)) {
305 		mutex_unlock(&dpm_list_mtx);
306 		return false;
307 	}
308 
309 	parent = get_device(dev->parent);
310 
311 	mutex_unlock(&dpm_list_mtx);
312 
313 	dpm_wait(parent, async);
314 	put_device(parent);
315 
316 	dpm_wait_for_suppliers(dev, async);
317 
318 	/*
319 	 * If the parent's callback has deleted the device, attempting to resume
320 	 * it would be invalid, so avoid doing that then.
321 	 */
322 	return device_pm_initialized(dev);
323 }
324 
325 static void dpm_wait_for_consumers(struct device *dev, bool async)
326 {
327 	struct device_link *link;
328 	int idx;
329 
330 	idx = device_links_read_lock();
331 
332 	/*
333 	 * The status of a device link can only be changed from "dormant" by a
334 	 * probe, but that cannot happen during system suspend/resume.  In
335 	 * theory it can change to "dormant" at that time, but then it is
336 	 * reasonable to wait for the target device anyway (eg. if it goes
337 	 * away, it's better to wait for it to go away completely and then
338 	 * continue instead of trying to continue in parallel with its
339 	 * unregistration).
340 	 */
341 	list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
342 		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
343 			dpm_wait(link->consumer, async);
344 
345 	device_links_read_unlock(idx);
346 }
347 
348 static void dpm_wait_for_subordinate(struct device *dev, bool async)
349 {
350 	dpm_wait_for_children(dev, async);
351 	dpm_wait_for_consumers(dev, async);
352 }
353 
354 /**
355  * pm_op - Return the PM operation appropriate for given PM event.
356  * @ops: PM operations to choose from.
357  * @state: PM transition of the system being carried out.
358  */
359 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
360 {
361 	switch (state.event) {
362 #ifdef CONFIG_SUSPEND
363 	case PM_EVENT_SUSPEND:
364 		return ops->suspend;
365 	case PM_EVENT_RESUME:
366 		return ops->resume;
367 #endif /* CONFIG_SUSPEND */
368 #ifdef CONFIG_HIBERNATE_CALLBACKS
369 	case PM_EVENT_FREEZE:
370 	case PM_EVENT_QUIESCE:
371 		return ops->freeze;
372 	case PM_EVENT_HIBERNATE:
373 		return ops->poweroff;
374 	case PM_EVENT_THAW:
375 	case PM_EVENT_RECOVER:
376 		return ops->thaw;
377 	case PM_EVENT_RESTORE:
378 		return ops->restore;
379 #endif /* CONFIG_HIBERNATE_CALLBACKS */
380 	}
381 
382 	return NULL;
383 }
384 
385 /**
386  * pm_late_early_op - Return the PM operation appropriate for given PM event.
387  * @ops: PM operations to choose from.
388  * @state: PM transition of the system being carried out.
389  *
390  * Runtime PM is disabled for @dev while this function is being executed.
391  */
392 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
393 				      pm_message_t state)
394 {
395 	switch (state.event) {
396 #ifdef CONFIG_SUSPEND
397 	case PM_EVENT_SUSPEND:
398 		return ops->suspend_late;
399 	case PM_EVENT_RESUME:
400 		return ops->resume_early;
401 #endif /* CONFIG_SUSPEND */
402 #ifdef CONFIG_HIBERNATE_CALLBACKS
403 	case PM_EVENT_FREEZE:
404 	case PM_EVENT_QUIESCE:
405 		return ops->freeze_late;
406 	case PM_EVENT_HIBERNATE:
407 		return ops->poweroff_late;
408 	case PM_EVENT_THAW:
409 	case PM_EVENT_RECOVER:
410 		return ops->thaw_early;
411 	case PM_EVENT_RESTORE:
412 		return ops->restore_early;
413 #endif /* CONFIG_HIBERNATE_CALLBACKS */
414 	}
415 
416 	return NULL;
417 }
418 
419 /**
420  * pm_noirq_op - Return the PM operation appropriate for given PM event.
421  * @ops: PM operations to choose from.
422  * @state: PM transition of the system being carried out.
423  *
424  * The driver of @dev will not receive interrupts while this function is being
425  * executed.
426  */
427 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
428 {
429 	switch (state.event) {
430 #ifdef CONFIG_SUSPEND
431 	case PM_EVENT_SUSPEND:
432 		return ops->suspend_noirq;
433 	case PM_EVENT_RESUME:
434 		return ops->resume_noirq;
435 #endif /* CONFIG_SUSPEND */
436 #ifdef CONFIG_HIBERNATE_CALLBACKS
437 	case PM_EVENT_FREEZE:
438 	case PM_EVENT_QUIESCE:
439 		return ops->freeze_noirq;
440 	case PM_EVENT_HIBERNATE:
441 		return ops->poweroff_noirq;
442 	case PM_EVENT_THAW:
443 	case PM_EVENT_RECOVER:
444 		return ops->thaw_noirq;
445 	case PM_EVENT_RESTORE:
446 		return ops->restore_noirq;
447 #endif /* CONFIG_HIBERNATE_CALLBACKS */
448 	}
449 
450 	return NULL;
451 }
452 
453 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
454 {
455 	dev_dbg(dev, "%s%s%s driver flags: %x\n", info, pm_verb(state.event),
456 		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
457 		", may wakeup" : "", dev->power.driver_flags);
458 }
459 
460 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
461 			int error)
462 {
463 	dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info,
464 		error);
465 }
466 
467 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
468 			  const char *info)
469 {
470 	ktime_t calltime;
471 	u64 usecs64;
472 	int usecs;
473 
474 	calltime = ktime_get();
475 	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
476 	do_div(usecs64, NSEC_PER_USEC);
477 	usecs = usecs64;
478 	if (usecs == 0)
479 		usecs = 1;
480 
481 	pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
482 		  info ?: "", info ? " " : "", pm_verb(state.event),
483 		  error ? "aborted" : "complete",
484 		  usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
485 }
486 
487 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
488 			    pm_message_t state, const char *info)
489 {
490 	ktime_t calltime;
491 	int error;
492 
493 	if (!cb)
494 		return 0;
495 
496 	calltime = initcall_debug_start(dev, cb);
497 
498 	pm_dev_dbg(dev, state, info);
499 	trace_device_pm_callback_start(dev, info, state.event);
500 	error = cb(dev);
501 	trace_device_pm_callback_end(dev, error);
502 	suspend_report_result(dev, cb, error);
503 
504 	initcall_debug_report(dev, calltime, cb, error);
505 
506 	return error;
507 }
508 
509 #ifdef CONFIG_DPM_WATCHDOG
510 struct dpm_watchdog {
511 	struct device		*dev;
512 	struct task_struct	*tsk;
513 	struct timer_list	timer;
514 	bool			fatal;
515 };
516 
517 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
518 	struct dpm_watchdog wd
519 
520 /**
521  * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
522  * @t: The timer that PM watchdog depends on.
523  *
524  * Called when a driver has timed out suspending or resuming.
525  * There's not much we can do here to recover so panic() to
526  * capture a crash-dump in pstore.
527  */
528 static void dpm_watchdog_handler(struct timer_list *t)
529 {
530 	struct dpm_watchdog *wd = timer_container_of(wd, t, timer);
531 	struct timer_list *timer = &wd->timer;
532 	unsigned int time_left;
533 
534 	if (wd->fatal) {
535 		dev_emerg(wd->dev, "**** DPM device timeout ****\n");
536 		show_stack(wd->tsk, NULL, KERN_EMERG);
537 		panic("%s %s: unrecoverable failure\n",
538 			dev_driver_string(wd->dev), dev_name(wd->dev));
539 	}
540 
541 	time_left = CONFIG_DPM_WATCHDOG_TIMEOUT - CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
542 	dev_warn(wd->dev, "**** DPM device timeout after %u seconds; %u seconds until panic ****\n",
543 		 CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT, time_left);
544 	show_stack(wd->tsk, NULL, KERN_WARNING);
545 
546 	wd->fatal = true;
547 	mod_timer(timer, jiffies + HZ * time_left);
548 }
549 
550 /**
551  * dpm_watchdog_set - Enable pm watchdog for given device.
552  * @wd: Watchdog. Must be allocated on the stack.
553  * @dev: Device to handle.
554  */
555 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
556 {
557 	struct timer_list *timer = &wd->timer;
558 
559 	wd->dev = dev;
560 	wd->tsk = current;
561 	wd->fatal = CONFIG_DPM_WATCHDOG_TIMEOUT == CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
562 
563 	timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
564 	/* use same timeout value for both suspend and resume */
565 	timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
566 	add_timer(timer);
567 }
568 
569 /**
570  * dpm_watchdog_clear - Disable suspend/resume watchdog.
571  * @wd: Watchdog to disable.
572  */
573 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
574 {
575 	struct timer_list *timer = &wd->timer;
576 
577 	timer_delete_sync(timer);
578 	timer_destroy_on_stack(timer);
579 }
580 #else
581 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
582 #define dpm_watchdog_set(x, y)
583 #define dpm_watchdog_clear(x)
584 #endif
585 
586 /*------------------------- Resume routines -------------------------*/
587 
588 /**
589  * dev_pm_skip_resume - System-wide device resume optimization check.
590  * @dev: Target device.
591  *
592  * Return:
593  * - %false if the transition under way is RESTORE.
594  * - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
595  * - The logical negation of %power.must_resume otherwise (that is, when the
596  *   transition under way is RESUME).
597  */
598 bool dev_pm_skip_resume(struct device *dev)
599 {
600 	if (pm_transition.event == PM_EVENT_RESTORE)
601 		return false;
602 
603 	if (pm_transition.event == PM_EVENT_THAW)
604 		return dev_pm_skip_suspend(dev);
605 
606 	return !dev->power.must_resume;
607 }
608 
609 static bool is_async(struct device *dev)
610 {
611 	return dev->power.async_suspend && pm_async_enabled
612 		&& !pm_trace_is_enabled();
613 }
614 
615 static bool __dpm_async(struct device *dev, async_func_t func)
616 {
617 	if (dev->power.work_in_progress)
618 		return true;
619 
620 	if (!is_async(dev))
621 		return false;
622 
623 	dev->power.work_in_progress = true;
624 
625 	get_device(dev);
626 
627 	if (async_schedule_dev_nocall(func, dev))
628 		return true;
629 
630 	put_device(dev);
631 
632 	return false;
633 }
634 
635 static bool dpm_async_fn(struct device *dev, async_func_t func)
636 {
637 	guard(mutex)(&async_wip_mtx);
638 
639 	return __dpm_async(dev, func);
640 }
641 
642 static int dpm_async_with_cleanup(struct device *dev, void *fn)
643 {
644 	guard(mutex)(&async_wip_mtx);
645 
646 	if (!__dpm_async(dev, fn))
647 		dev->power.work_in_progress = false;
648 
649 	return 0;
650 }
651 
652 static void dpm_async_resume_children(struct device *dev, async_func_t func)
653 {
654 	/*
655 	 * Prevent racing with dpm_clear_async_state() during initial list
656 	 * walks in dpm_noirq_resume_devices(), dpm_resume_early(), and
657 	 * dpm_resume().
658 	 */
659 	guard(mutex)(&dpm_list_mtx);
660 
661 	/*
662 	 * Start processing "async" children of the device unless it's been
663 	 * started already for them.
664 	 *
665 	 * This could have been done for the device's "async" consumers too, but
666 	 * they either need to wait for their parents or the processing has
667 	 * already started for them after their parents were processed.
668 	 */
669 	device_for_each_child(dev, func, dpm_async_with_cleanup);
670 }
671 
672 static void dpm_clear_async_state(struct device *dev)
673 {
674 	reinit_completion(&dev->power.completion);
675 	dev->power.work_in_progress = false;
676 }
677 
678 static bool dpm_root_device(struct device *dev)
679 {
680 	return !dev->parent;
681 }
682 
683 static void async_resume_noirq(void *data, async_cookie_t cookie);
684 
685 /**
686  * device_resume_noirq - Execute a "noirq resume" callback for given device.
687  * @dev: Device to handle.
688  * @state: PM transition of the system being carried out.
689  * @async: If true, the device is being resumed asynchronously.
690  *
691  * The driver of @dev will not receive interrupts while this function is being
692  * executed.
693  */
694 static void device_resume_noirq(struct device *dev, pm_message_t state, bool async)
695 {
696 	pm_callback_t callback = NULL;
697 	const char *info = NULL;
698 	bool skip_resume;
699 	int error = 0;
700 
701 	TRACE_DEVICE(dev);
702 	TRACE_RESUME(0);
703 
704 	if (dev->power.syscore || dev->power.direct_complete)
705 		goto Out;
706 
707 	if (!dev->power.is_noirq_suspended)
708 		goto Out;
709 
710 	if (!dpm_wait_for_superior(dev, async))
711 		goto Out;
712 
713 	skip_resume = dev_pm_skip_resume(dev);
714 	/*
715 	 * If the driver callback is skipped below or by the middle layer
716 	 * callback and device_resume_early() also skips the driver callback for
717 	 * this device later, it needs to appear as "suspended" to PM-runtime,
718 	 * so change its status accordingly.
719 	 *
720 	 * Otherwise, the device is going to be resumed, so set its PM-runtime
721 	 * status to "active" unless its power.smart_suspend flag is clear, in
722 	 * which case it is not necessary to update its PM-runtime status.
723 	 */
724 	if (skip_resume)
725 		pm_runtime_set_suspended(dev);
726 	else if (dev_pm_smart_suspend(dev))
727 		pm_runtime_set_active(dev);
728 
729 	if (dev->pm_domain) {
730 		info = "noirq power domain ";
731 		callback = pm_noirq_op(&dev->pm_domain->ops, state);
732 	} else if (dev->type && dev->type->pm) {
733 		info = "noirq type ";
734 		callback = pm_noirq_op(dev->type->pm, state);
735 	} else if (dev->class && dev->class->pm) {
736 		info = "noirq class ";
737 		callback = pm_noirq_op(dev->class->pm, state);
738 	} else if (dev->bus && dev->bus->pm) {
739 		info = "noirq bus ";
740 		callback = pm_noirq_op(dev->bus->pm, state);
741 	}
742 	if (callback)
743 		goto Run;
744 
745 	if (skip_resume)
746 		goto Skip;
747 
748 	if (dev->driver && dev->driver->pm) {
749 		info = "noirq driver ";
750 		callback = pm_noirq_op(dev->driver->pm, state);
751 	}
752 
753 Run:
754 	error = dpm_run_callback(callback, dev, state, info);
755 
756 Skip:
757 	dev->power.is_noirq_suspended = false;
758 
759 Out:
760 	complete_all(&dev->power.completion);
761 	TRACE_RESUME(error);
762 
763 	if (error) {
764 		async_error = error;
765 		dpm_save_failed_dev(dev_name(dev));
766 		pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
767 	}
768 
769 	dpm_async_resume_children(dev, async_resume_noirq);
770 }
771 
772 static void async_resume_noirq(void *data, async_cookie_t cookie)
773 {
774 	struct device *dev = data;
775 
776 	device_resume_noirq(dev, pm_transition, true);
777 	put_device(dev);
778 }
779 
780 static void dpm_noirq_resume_devices(pm_message_t state)
781 {
782 	struct device *dev;
783 	ktime_t starttime = ktime_get();
784 
785 	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
786 
787 	async_error = 0;
788 	pm_transition = state;
789 
790 	mutex_lock(&dpm_list_mtx);
791 
792 	/*
793 	 * Start processing "async" root devices upfront so they don't wait for
794 	 * the "sync" devices they don't depend on.
795 	 */
796 	list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
797 		dpm_clear_async_state(dev);
798 		if (dpm_root_device(dev))
799 			dpm_async_with_cleanup(dev, async_resume_noirq);
800 	}
801 
802 	while (!list_empty(&dpm_noirq_list)) {
803 		dev = to_device(dpm_noirq_list.next);
804 		list_move_tail(&dev->power.entry, &dpm_late_early_list);
805 
806 		if (!dpm_async_fn(dev, async_resume_noirq)) {
807 			get_device(dev);
808 
809 			mutex_unlock(&dpm_list_mtx);
810 
811 			device_resume_noirq(dev, state, false);
812 
813 			put_device(dev);
814 
815 			mutex_lock(&dpm_list_mtx);
816 		}
817 	}
818 	mutex_unlock(&dpm_list_mtx);
819 	async_synchronize_full();
820 	dpm_show_time(starttime, state, 0, "noirq");
821 	if (async_error)
822 		dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
823 
824 	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
825 }
826 
827 /**
828  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
829  * @state: PM transition of the system being carried out.
830  *
831  * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
832  * allow device drivers' interrupt handlers to be called.
833  */
834 void dpm_resume_noirq(pm_message_t state)
835 {
836 	dpm_noirq_resume_devices(state);
837 
838 	resume_device_irqs();
839 	device_wakeup_disarm_wake_irqs();
840 }
841 
842 static void async_resume_early(void *data, async_cookie_t cookie);
843 
844 /**
845  * device_resume_early - Execute an "early resume" callback for given device.
846  * @dev: Device to handle.
847  * @state: PM transition of the system being carried out.
848  * @async: If true, the device is being resumed asynchronously.
849  *
850  * Runtime PM is disabled for @dev while this function is being executed.
851  */
852 static void device_resume_early(struct device *dev, pm_message_t state, bool async)
853 {
854 	pm_callback_t callback = NULL;
855 	const char *info = NULL;
856 	int error = 0;
857 
858 	TRACE_DEVICE(dev);
859 	TRACE_RESUME(0);
860 
861 	if (dev->power.syscore || dev->power.direct_complete)
862 		goto Out;
863 
864 	if (!dev->power.is_late_suspended)
865 		goto Out;
866 
867 	if (!dpm_wait_for_superior(dev, async))
868 		goto Out;
869 
870 	if (dev->pm_domain) {
871 		info = "early power domain ";
872 		callback = pm_late_early_op(&dev->pm_domain->ops, state);
873 	} else if (dev->type && dev->type->pm) {
874 		info = "early type ";
875 		callback = pm_late_early_op(dev->type->pm, state);
876 	} else if (dev->class && dev->class->pm) {
877 		info = "early class ";
878 		callback = pm_late_early_op(dev->class->pm, state);
879 	} else if (dev->bus && dev->bus->pm) {
880 		info = "early bus ";
881 		callback = pm_late_early_op(dev->bus->pm, state);
882 	}
883 	if (callback)
884 		goto Run;
885 
886 	if (dev_pm_skip_resume(dev))
887 		goto Skip;
888 
889 	if (dev->driver && dev->driver->pm) {
890 		info = "early driver ";
891 		callback = pm_late_early_op(dev->driver->pm, state);
892 	}
893 
894 Run:
895 	error = dpm_run_callback(callback, dev, state, info);
896 
897 Skip:
898 	dev->power.is_late_suspended = false;
899 
900 Out:
901 	TRACE_RESUME(error);
902 
903 	pm_runtime_enable(dev);
904 	complete_all(&dev->power.completion);
905 
906 	if (error) {
907 		async_error = error;
908 		dpm_save_failed_dev(dev_name(dev));
909 		pm_dev_err(dev, state, async ? " async early" : " early", error);
910 	}
911 
912 	dpm_async_resume_children(dev, async_resume_early);
913 }
914 
915 static void async_resume_early(void *data, async_cookie_t cookie)
916 {
917 	struct device *dev = data;
918 
919 	device_resume_early(dev, pm_transition, true);
920 	put_device(dev);
921 }
922 
923 /**
924  * dpm_resume_early - Execute "early resume" callbacks for all devices.
925  * @state: PM transition of the system being carried out.
926  */
927 void dpm_resume_early(pm_message_t state)
928 {
929 	struct device *dev;
930 	ktime_t starttime = ktime_get();
931 
932 	trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
933 
934 	async_error = 0;
935 	pm_transition = state;
936 
937 	mutex_lock(&dpm_list_mtx);
938 
939 	/*
940 	 * Start processing "async" root devices upfront so they don't wait for
941 	 * the "sync" devices they don't depend on.
942 	 */
943 	list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
944 		dpm_clear_async_state(dev);
945 		if (dpm_root_device(dev))
946 			dpm_async_with_cleanup(dev, async_resume_early);
947 	}
948 
949 	while (!list_empty(&dpm_late_early_list)) {
950 		dev = to_device(dpm_late_early_list.next);
951 		list_move_tail(&dev->power.entry, &dpm_suspended_list);
952 
953 		if (!dpm_async_fn(dev, async_resume_early)) {
954 			get_device(dev);
955 
956 			mutex_unlock(&dpm_list_mtx);
957 
958 			device_resume_early(dev, state, false);
959 
960 			put_device(dev);
961 
962 			mutex_lock(&dpm_list_mtx);
963 		}
964 	}
965 	mutex_unlock(&dpm_list_mtx);
966 	async_synchronize_full();
967 	dpm_show_time(starttime, state, 0, "early");
968 	if (async_error)
969 		dpm_save_failed_step(SUSPEND_RESUME_EARLY);
970 
971 	trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
972 }
973 
974 /**
975  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
976  * @state: PM transition of the system being carried out.
977  */
978 void dpm_resume_start(pm_message_t state)
979 {
980 	dpm_resume_noirq(state);
981 	dpm_resume_early(state);
982 }
983 EXPORT_SYMBOL_GPL(dpm_resume_start);
984 
985 static void async_resume(void *data, async_cookie_t cookie);
986 
987 /**
988  * device_resume - Execute "resume" callbacks for given device.
989  * @dev: Device to handle.
990  * @state: PM transition of the system being carried out.
991  * @async: If true, the device is being resumed asynchronously.
992  */
993 static void device_resume(struct device *dev, pm_message_t state, bool async)
994 {
995 	pm_callback_t callback = NULL;
996 	const char *info = NULL;
997 	int error = 0;
998 	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
999 
1000 	TRACE_DEVICE(dev);
1001 	TRACE_RESUME(0);
1002 
1003 	if (dev->power.syscore)
1004 		goto Complete;
1005 
1006 	if (!dev->power.is_suspended)
1007 		goto Complete;
1008 
1009 	dev->power.is_suspended = false;
1010 
1011 	if (dev->power.direct_complete) {
1012 		/*
1013 		 * Allow new children to be added under the device after this
1014 		 * point if it has no PM callbacks.
1015 		 */
1016 		if (dev->power.no_pm_callbacks)
1017 			dev->power.is_prepared = false;
1018 
1019 		/* Match the pm_runtime_disable() in device_suspend(). */
1020 		pm_runtime_enable(dev);
1021 		goto Complete;
1022 	}
1023 
1024 	if (!dpm_wait_for_superior(dev, async))
1025 		goto Complete;
1026 
1027 	dpm_watchdog_set(&wd, dev);
1028 	device_lock(dev);
1029 
1030 	/*
1031 	 * This is a fib.  But we'll allow new children to be added below
1032 	 * a resumed device, even if the device hasn't been completed yet.
1033 	 */
1034 	dev->power.is_prepared = false;
1035 
1036 	if (dev->pm_domain) {
1037 		info = "power domain ";
1038 		callback = pm_op(&dev->pm_domain->ops, state);
1039 		goto Driver;
1040 	}
1041 
1042 	if (dev->type && dev->type->pm) {
1043 		info = "type ";
1044 		callback = pm_op(dev->type->pm, state);
1045 		goto Driver;
1046 	}
1047 
1048 	if (dev->class && dev->class->pm) {
1049 		info = "class ";
1050 		callback = pm_op(dev->class->pm, state);
1051 		goto Driver;
1052 	}
1053 
1054 	if (dev->bus) {
1055 		if (dev->bus->pm) {
1056 			info = "bus ";
1057 			callback = pm_op(dev->bus->pm, state);
1058 		} else if (dev->bus->resume) {
1059 			info = "legacy bus ";
1060 			callback = dev->bus->resume;
1061 			goto End;
1062 		}
1063 	}
1064 
1065  Driver:
1066 	if (!callback && dev->driver && dev->driver->pm) {
1067 		info = "driver ";
1068 		callback = pm_op(dev->driver->pm, state);
1069 	}
1070 
1071  End:
1072 	error = dpm_run_callback(callback, dev, state, info);
1073 
1074 	device_unlock(dev);
1075 	dpm_watchdog_clear(&wd);
1076 
1077  Complete:
1078 	complete_all(&dev->power.completion);
1079 
1080 	TRACE_RESUME(error);
1081 
1082 	if (error) {
1083 		async_error = error;
1084 		dpm_save_failed_dev(dev_name(dev));
1085 		pm_dev_err(dev, state, async ? " async" : "", error);
1086 	}
1087 
1088 	dpm_async_resume_children(dev, async_resume);
1089 }
1090 
1091 static void async_resume(void *data, async_cookie_t cookie)
1092 {
1093 	struct device *dev = data;
1094 
1095 	device_resume(dev, pm_transition, true);
1096 	put_device(dev);
1097 }
1098 
1099 /**
1100  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
1101  * @state: PM transition of the system being carried out.
1102  *
1103  * Execute the appropriate "resume" callback for all devices whose status
1104  * indicates that they are suspended.
1105  */
1106 void dpm_resume(pm_message_t state)
1107 {
1108 	struct device *dev;
1109 	ktime_t starttime = ktime_get();
1110 
1111 	trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1112 	might_sleep();
1113 
1114 	pm_transition = state;
1115 	async_error = 0;
1116 
1117 	mutex_lock(&dpm_list_mtx);
1118 
1119 	/*
1120 	 * Start processing "async" root devices upfront so they don't wait for
1121 	 * the "sync" devices they don't depend on.
1122 	 */
1123 	list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
1124 		dpm_clear_async_state(dev);
1125 		if (dpm_root_device(dev))
1126 			dpm_async_with_cleanup(dev, async_resume);
1127 	}
1128 
1129 	while (!list_empty(&dpm_suspended_list)) {
1130 		dev = to_device(dpm_suspended_list.next);
1131 		list_move_tail(&dev->power.entry, &dpm_prepared_list);
1132 
1133 		if (!dpm_async_fn(dev, async_resume)) {
1134 			get_device(dev);
1135 
1136 			mutex_unlock(&dpm_list_mtx);
1137 
1138 			device_resume(dev, state, false);
1139 
1140 			put_device(dev);
1141 
1142 			mutex_lock(&dpm_list_mtx);
1143 		}
1144 	}
1145 	mutex_unlock(&dpm_list_mtx);
1146 	async_synchronize_full();
1147 	dpm_show_time(starttime, state, 0, NULL);
1148 	if (async_error)
1149 		dpm_save_failed_step(SUSPEND_RESUME);
1150 
1151 	cpufreq_resume();
1152 	devfreq_resume();
1153 	trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1154 }
1155 
1156 /**
1157  * device_complete - Complete a PM transition for given device.
1158  * @dev: Device to handle.
1159  * @state: PM transition of the system being carried out.
1160  */
1161 static void device_complete(struct device *dev, pm_message_t state)
1162 {
1163 	void (*callback)(struct device *) = NULL;
1164 	const char *info = NULL;
1165 
1166 	if (dev->power.syscore)
1167 		goto out;
1168 
1169 	device_lock(dev);
1170 
1171 	if (dev->pm_domain) {
1172 		info = "completing power domain ";
1173 		callback = dev->pm_domain->ops.complete;
1174 	} else if (dev->type && dev->type->pm) {
1175 		info = "completing type ";
1176 		callback = dev->type->pm->complete;
1177 	} else if (dev->class && dev->class->pm) {
1178 		info = "completing class ";
1179 		callback = dev->class->pm->complete;
1180 	} else if (dev->bus && dev->bus->pm) {
1181 		info = "completing bus ";
1182 		callback = dev->bus->pm->complete;
1183 	}
1184 
1185 	if (!callback && dev->driver && dev->driver->pm) {
1186 		info = "completing driver ";
1187 		callback = dev->driver->pm->complete;
1188 	}
1189 
1190 	if (callback) {
1191 		pm_dev_dbg(dev, state, info);
1192 		callback(dev);
1193 	}
1194 
1195 	device_unlock(dev);
1196 
1197 out:
1198 	/* If enabling runtime PM for the device is blocked, unblock it. */
1199 	pm_runtime_unblock(dev);
1200 	pm_runtime_put(dev);
1201 }
1202 
1203 /**
1204  * dpm_complete - Complete a PM transition for all non-sysdev devices.
1205  * @state: PM transition of the system being carried out.
1206  *
1207  * Execute the ->complete() callbacks for all devices whose PM status is not
1208  * DPM_ON (this allows new devices to be registered).
1209  */
1210 void dpm_complete(pm_message_t state)
1211 {
1212 	struct list_head list;
1213 
1214 	trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1215 	might_sleep();
1216 
1217 	INIT_LIST_HEAD(&list);
1218 	mutex_lock(&dpm_list_mtx);
1219 	while (!list_empty(&dpm_prepared_list)) {
1220 		struct device *dev = to_device(dpm_prepared_list.prev);
1221 
1222 		get_device(dev);
1223 		dev->power.is_prepared = false;
1224 		list_move(&dev->power.entry, &list);
1225 
1226 		mutex_unlock(&dpm_list_mtx);
1227 
1228 		trace_device_pm_callback_start(dev, "", state.event);
1229 		device_complete(dev, state);
1230 		trace_device_pm_callback_end(dev, 0);
1231 
1232 		put_device(dev);
1233 
1234 		mutex_lock(&dpm_list_mtx);
1235 	}
1236 	list_splice(&list, &dpm_list);
1237 	mutex_unlock(&dpm_list_mtx);
1238 
1239 	/* Allow device probing and trigger re-probing of deferred devices */
1240 	device_unblock_probing();
1241 	trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1242 }
1243 
1244 /**
1245  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1246  * @state: PM transition of the system being carried out.
1247  *
1248  * Execute "resume" callbacks for all devices and complete the PM transition of
1249  * the system.
1250  */
1251 void dpm_resume_end(pm_message_t state)
1252 {
1253 	dpm_resume(state);
1254 	dpm_complete(state);
1255 }
1256 EXPORT_SYMBOL_GPL(dpm_resume_end);
1257 
1258 
1259 /*------------------------- Suspend routines -------------------------*/
1260 
1261 static bool dpm_leaf_device(struct device *dev)
1262 {
1263 	struct device *child;
1264 
1265 	lockdep_assert_held(&dpm_list_mtx);
1266 
1267 	child = device_find_any_child(dev);
1268 	if (child) {
1269 		put_device(child);
1270 
1271 		return false;
1272 	}
1273 
1274 	return true;
1275 }
1276 
1277 static void dpm_async_suspend_parent(struct device *dev, async_func_t func)
1278 {
1279 	guard(mutex)(&dpm_list_mtx);
1280 
1281 	/*
1282 	 * If the device is suspended asynchronously and the parent's callback
1283 	 * deletes both the device and the parent itself, the parent object may
1284 	 * be freed while this function is running, so avoid that by checking
1285 	 * if the device has been deleted already as the parent cannot be
1286 	 * deleted before it.
1287 	 */
1288 	if (!device_pm_initialized(dev))
1289 		return;
1290 
1291 	/* Start processing the device's parent if it is "async". */
1292 	if (dev->parent)
1293 		dpm_async_with_cleanup(dev->parent, func);
1294 }
1295 
1296 /**
1297  * resume_event - Return a "resume" message for given "suspend" sleep state.
1298  * @sleep_state: PM message representing a sleep state.
1299  *
1300  * Return a PM message representing the resume event corresponding to given
1301  * sleep state.
1302  */
1303 static pm_message_t resume_event(pm_message_t sleep_state)
1304 {
1305 	switch (sleep_state.event) {
1306 	case PM_EVENT_SUSPEND:
1307 		return PMSG_RESUME;
1308 	case PM_EVENT_FREEZE:
1309 	case PM_EVENT_QUIESCE:
1310 		return PMSG_RECOVER;
1311 	case PM_EVENT_HIBERNATE:
1312 		return PMSG_RESTORE;
1313 	}
1314 	return PMSG_ON;
1315 }
1316 
1317 static void dpm_superior_set_must_resume(struct device *dev)
1318 {
1319 	struct device_link *link;
1320 	int idx;
1321 
1322 	if (dev->parent)
1323 		dev->parent->power.must_resume = true;
1324 
1325 	idx = device_links_read_lock();
1326 
1327 	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
1328 		link->supplier->power.must_resume = true;
1329 
1330 	device_links_read_unlock(idx);
1331 }
1332 
1333 static void async_suspend_noirq(void *data, async_cookie_t cookie);
1334 
1335 /**
1336  * device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1337  * @dev: Device to handle.
1338  * @state: PM transition of the system being carried out.
1339  * @async: If true, the device is being suspended asynchronously.
1340  *
1341  * The driver of @dev will not receive interrupts while this function is being
1342  * executed.
1343  */
1344 static int device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1345 {
1346 	pm_callback_t callback = NULL;
1347 	const char *info = NULL;
1348 	int error = 0;
1349 
1350 	TRACE_DEVICE(dev);
1351 	TRACE_SUSPEND(0);
1352 
1353 	dpm_wait_for_subordinate(dev, async);
1354 
1355 	if (async_error)
1356 		goto Complete;
1357 
1358 	if (dev->power.syscore || dev->power.direct_complete)
1359 		goto Complete;
1360 
1361 	if (dev->pm_domain) {
1362 		info = "noirq power domain ";
1363 		callback = pm_noirq_op(&dev->pm_domain->ops, state);
1364 	} else if (dev->type && dev->type->pm) {
1365 		info = "noirq type ";
1366 		callback = pm_noirq_op(dev->type->pm, state);
1367 	} else if (dev->class && dev->class->pm) {
1368 		info = "noirq class ";
1369 		callback = pm_noirq_op(dev->class->pm, state);
1370 	} else if (dev->bus && dev->bus->pm) {
1371 		info = "noirq bus ";
1372 		callback = pm_noirq_op(dev->bus->pm, state);
1373 	}
1374 	if (callback)
1375 		goto Run;
1376 
1377 	if (dev_pm_skip_suspend(dev))
1378 		goto Skip;
1379 
1380 	if (dev->driver && dev->driver->pm) {
1381 		info = "noirq driver ";
1382 		callback = pm_noirq_op(dev->driver->pm, state);
1383 	}
1384 
1385 Run:
1386 	error = dpm_run_callback(callback, dev, state, info);
1387 	if (error) {
1388 		async_error = error;
1389 		dpm_save_failed_dev(dev_name(dev));
1390 		pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
1391 		goto Complete;
1392 	}
1393 
1394 Skip:
1395 	dev->power.is_noirq_suspended = true;
1396 
1397 	/*
1398 	 * Devices must be resumed unless they are explicitly allowed to be left
1399 	 * in suspend, but even in that case skipping the resume of devices that
1400 	 * were in use right before the system suspend (as indicated by their
1401 	 * runtime PM usage counters and child counters) would be suboptimal.
1402 	 */
1403 	if (!(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
1404 	      dev->power.may_skip_resume) || !pm_runtime_need_not_resume(dev))
1405 		dev->power.must_resume = true;
1406 
1407 	if (dev->power.must_resume)
1408 		dpm_superior_set_must_resume(dev);
1409 
1410 Complete:
1411 	complete_all(&dev->power.completion);
1412 	TRACE_SUSPEND(error);
1413 
1414 	if (error || async_error)
1415 		return error;
1416 
1417 	dpm_async_suspend_parent(dev, async_suspend_noirq);
1418 
1419 	return 0;
1420 }
1421 
1422 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1423 {
1424 	struct device *dev = data;
1425 
1426 	device_suspend_noirq(dev, pm_transition, true);
1427 	put_device(dev);
1428 }
1429 
1430 static int dpm_noirq_suspend_devices(pm_message_t state)
1431 {
1432 	ktime_t starttime = ktime_get();
1433 	struct device *dev;
1434 	int error = 0;
1435 
1436 	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1437 
1438 	pm_transition = state;
1439 	async_error = 0;
1440 
1441 	mutex_lock(&dpm_list_mtx);
1442 
1443 	/*
1444 	 * Start processing "async" leaf devices upfront so they don't need to
1445 	 * wait for the "sync" devices they don't depend on.
1446 	 */
1447 	list_for_each_entry_reverse(dev, &dpm_late_early_list, power.entry) {
1448 		dpm_clear_async_state(dev);
1449 		if (dpm_leaf_device(dev))
1450 			dpm_async_with_cleanup(dev, async_suspend_noirq);
1451 	}
1452 
1453 	while (!list_empty(&dpm_late_early_list)) {
1454 		dev = to_device(dpm_late_early_list.prev);
1455 
1456 		list_move(&dev->power.entry, &dpm_noirq_list);
1457 
1458 		if (dpm_async_fn(dev, async_suspend_noirq))
1459 			continue;
1460 
1461 		get_device(dev);
1462 
1463 		mutex_unlock(&dpm_list_mtx);
1464 
1465 		error = device_suspend_noirq(dev, state, false);
1466 
1467 		put_device(dev);
1468 
1469 		mutex_lock(&dpm_list_mtx);
1470 
1471 		if (error || async_error) {
1472 			/*
1473 			 * Move all devices to the target list to resume them
1474 			 * properly.
1475 			 */
1476 			list_splice_init(&dpm_late_early_list, &dpm_noirq_list);
1477 			break;
1478 		}
1479 	}
1480 
1481 	mutex_unlock(&dpm_list_mtx);
1482 
1483 	async_synchronize_full();
1484 	if (!error)
1485 		error = async_error;
1486 
1487 	if (error)
1488 		dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1489 
1490 	dpm_show_time(starttime, state, error, "noirq");
1491 	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1492 	return error;
1493 }
1494 
1495 /**
1496  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1497  * @state: PM transition of the system being carried out.
1498  *
1499  * Prevent device drivers' interrupt handlers from being called and invoke
1500  * "noirq" suspend callbacks for all non-sysdev devices.
1501  */
1502 int dpm_suspend_noirq(pm_message_t state)
1503 {
1504 	int ret;
1505 
1506 	device_wakeup_arm_wake_irqs();
1507 	suspend_device_irqs();
1508 
1509 	ret = dpm_noirq_suspend_devices(state);
1510 	if (ret)
1511 		dpm_resume_noirq(resume_event(state));
1512 
1513 	return ret;
1514 }
1515 
1516 static void dpm_propagate_wakeup_to_parent(struct device *dev)
1517 {
1518 	struct device *parent = dev->parent;
1519 
1520 	if (!parent)
1521 		return;
1522 
1523 	spin_lock_irq(&parent->power.lock);
1524 
1525 	if (device_wakeup_path(dev) && !parent->power.ignore_children)
1526 		parent->power.wakeup_path = true;
1527 
1528 	spin_unlock_irq(&parent->power.lock);
1529 }
1530 
1531 static void async_suspend_late(void *data, async_cookie_t cookie);
1532 
1533 /**
1534  * device_suspend_late - Execute a "late suspend" callback for given device.
1535  * @dev: Device to handle.
1536  * @state: PM transition of the system being carried out.
1537  * @async: If true, the device is being suspended asynchronously.
1538  *
1539  * Runtime PM is disabled for @dev while this function is being executed.
1540  */
1541 static int device_suspend_late(struct device *dev, pm_message_t state, bool async)
1542 {
1543 	pm_callback_t callback = NULL;
1544 	const char *info = NULL;
1545 	int error = 0;
1546 
1547 	TRACE_DEVICE(dev);
1548 	TRACE_SUSPEND(0);
1549 
1550 	/*
1551 	 * Disable runtime PM for the device without checking if there is a
1552 	 * pending resume request for it.
1553 	 */
1554 	__pm_runtime_disable(dev, false);
1555 
1556 	dpm_wait_for_subordinate(dev, async);
1557 
1558 	if (async_error)
1559 		goto Complete;
1560 
1561 	if (pm_wakeup_pending()) {
1562 		async_error = -EBUSY;
1563 		goto Complete;
1564 	}
1565 
1566 	if (dev->power.syscore || dev->power.direct_complete)
1567 		goto Complete;
1568 
1569 	if (dev->pm_domain) {
1570 		info = "late power domain ";
1571 		callback = pm_late_early_op(&dev->pm_domain->ops, state);
1572 	} else if (dev->type && dev->type->pm) {
1573 		info = "late type ";
1574 		callback = pm_late_early_op(dev->type->pm, state);
1575 	} else if (dev->class && dev->class->pm) {
1576 		info = "late class ";
1577 		callback = pm_late_early_op(dev->class->pm, state);
1578 	} else if (dev->bus && dev->bus->pm) {
1579 		info = "late bus ";
1580 		callback = pm_late_early_op(dev->bus->pm, state);
1581 	}
1582 	if (callback)
1583 		goto Run;
1584 
1585 	if (dev_pm_skip_suspend(dev))
1586 		goto Skip;
1587 
1588 	if (dev->driver && dev->driver->pm) {
1589 		info = "late driver ";
1590 		callback = pm_late_early_op(dev->driver->pm, state);
1591 	}
1592 
1593 Run:
1594 	error = dpm_run_callback(callback, dev, state, info);
1595 	if (error) {
1596 		async_error = error;
1597 		dpm_save_failed_dev(dev_name(dev));
1598 		pm_dev_err(dev, state, async ? " async late" : " late", error);
1599 		goto Complete;
1600 	}
1601 	dpm_propagate_wakeup_to_parent(dev);
1602 
1603 Skip:
1604 	dev->power.is_late_suspended = true;
1605 
1606 Complete:
1607 	TRACE_SUSPEND(error);
1608 	complete_all(&dev->power.completion);
1609 
1610 	if (error || async_error)
1611 		return error;
1612 
1613 	dpm_async_suspend_parent(dev, async_suspend_late);
1614 
1615 	return 0;
1616 }
1617 
1618 static void async_suspend_late(void *data, async_cookie_t cookie)
1619 {
1620 	struct device *dev = data;
1621 
1622 	device_suspend_late(dev, pm_transition, true);
1623 	put_device(dev);
1624 }
1625 
1626 /**
1627  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1628  * @state: PM transition of the system being carried out.
1629  */
1630 int dpm_suspend_late(pm_message_t state)
1631 {
1632 	ktime_t starttime = ktime_get();
1633 	struct device *dev;
1634 	int error = 0;
1635 
1636 	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1637 
1638 	pm_transition = state;
1639 	async_error = 0;
1640 
1641 	wake_up_all_idle_cpus();
1642 
1643 	mutex_lock(&dpm_list_mtx);
1644 
1645 	/*
1646 	 * Start processing "async" leaf devices upfront so they don't need to
1647 	 * wait for the "sync" devices they don't depend on.
1648 	 */
1649 	list_for_each_entry_reverse(dev, &dpm_suspended_list, power.entry) {
1650 		dpm_clear_async_state(dev);
1651 		if (dpm_leaf_device(dev))
1652 			dpm_async_with_cleanup(dev, async_suspend_late);
1653 	}
1654 
1655 	while (!list_empty(&dpm_suspended_list)) {
1656 		dev = to_device(dpm_suspended_list.prev);
1657 
1658 		list_move(&dev->power.entry, &dpm_late_early_list);
1659 
1660 		if (dpm_async_fn(dev, async_suspend_late))
1661 			continue;
1662 
1663 		get_device(dev);
1664 
1665 		mutex_unlock(&dpm_list_mtx);
1666 
1667 		error = device_suspend_late(dev, state, false);
1668 
1669 		put_device(dev);
1670 
1671 		mutex_lock(&dpm_list_mtx);
1672 
1673 		if (error || async_error) {
1674 			/*
1675 			 * Move all devices to the target list to resume them
1676 			 * properly.
1677 			 */
1678 			list_splice_init(&dpm_suspended_list, &dpm_late_early_list);
1679 			break;
1680 		}
1681 	}
1682 
1683 	mutex_unlock(&dpm_list_mtx);
1684 
1685 	async_synchronize_full();
1686 	if (!error)
1687 		error = async_error;
1688 
1689 	if (error) {
1690 		dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1691 		dpm_resume_early(resume_event(state));
1692 	}
1693 	dpm_show_time(starttime, state, error, "late");
1694 	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1695 	return error;
1696 }
1697 
1698 /**
1699  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1700  * @state: PM transition of the system being carried out.
1701  */
1702 int dpm_suspend_end(pm_message_t state)
1703 {
1704 	ktime_t starttime = ktime_get();
1705 	int error;
1706 
1707 	error = dpm_suspend_late(state);
1708 	if (error)
1709 		goto out;
1710 
1711 	error = dpm_suspend_noirq(state);
1712 	if (error)
1713 		dpm_resume_early(resume_event(state));
1714 
1715 out:
1716 	dpm_show_time(starttime, state, error, "end");
1717 	return error;
1718 }
1719 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1720 
1721 /**
1722  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1723  * @dev: Device to suspend.
1724  * @state: PM transition of the system being carried out.
1725  * @cb: Suspend callback to execute.
1726  * @info: string description of caller.
1727  */
1728 static int legacy_suspend(struct device *dev, pm_message_t state,
1729 			  int (*cb)(struct device *dev, pm_message_t state),
1730 			  const char *info)
1731 {
1732 	int error;
1733 	ktime_t calltime;
1734 
1735 	calltime = initcall_debug_start(dev, cb);
1736 
1737 	trace_device_pm_callback_start(dev, info, state.event);
1738 	error = cb(dev, state);
1739 	trace_device_pm_callback_end(dev, error);
1740 	suspend_report_result(dev, cb, error);
1741 
1742 	initcall_debug_report(dev, calltime, cb, error);
1743 
1744 	return error;
1745 }
1746 
1747 static void dpm_clear_superiors_direct_complete(struct device *dev)
1748 {
1749 	struct device_link *link;
1750 	int idx;
1751 
1752 	if (dev->parent) {
1753 		spin_lock_irq(&dev->parent->power.lock);
1754 		dev->parent->power.direct_complete = false;
1755 		spin_unlock_irq(&dev->parent->power.lock);
1756 	}
1757 
1758 	idx = device_links_read_lock();
1759 
1760 	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
1761 		spin_lock_irq(&link->supplier->power.lock);
1762 		link->supplier->power.direct_complete = false;
1763 		spin_unlock_irq(&link->supplier->power.lock);
1764 	}
1765 
1766 	device_links_read_unlock(idx);
1767 }
1768 
1769 static void async_suspend(void *data, async_cookie_t cookie);
1770 
1771 /**
1772  * device_suspend - Execute "suspend" callbacks for given device.
1773  * @dev: Device to handle.
1774  * @state: PM transition of the system being carried out.
1775  * @async: If true, the device is being suspended asynchronously.
1776  */
1777 static int device_suspend(struct device *dev, pm_message_t state, bool async)
1778 {
1779 	pm_callback_t callback = NULL;
1780 	const char *info = NULL;
1781 	int error = 0;
1782 	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1783 
1784 	TRACE_DEVICE(dev);
1785 	TRACE_SUSPEND(0);
1786 
1787 	dpm_wait_for_subordinate(dev, async);
1788 
1789 	if (async_error) {
1790 		dev->power.direct_complete = false;
1791 		goto Complete;
1792 	}
1793 
1794 	/*
1795 	 * Wait for possible runtime PM transitions of the device in progress
1796 	 * to complete and if there's a runtime resume request pending for it,
1797 	 * resume it before proceeding with invoking the system-wide suspend
1798 	 * callbacks for it.
1799 	 *
1800 	 * If the system-wide suspend callbacks below change the configuration
1801 	 * of the device, they must disable runtime PM for it or otherwise
1802 	 * ensure that its runtime-resume callbacks will not be confused by that
1803 	 * change in case they are invoked going forward.
1804 	 */
1805 	pm_runtime_barrier(dev);
1806 
1807 	if (pm_wakeup_pending()) {
1808 		dev->power.direct_complete = false;
1809 		async_error = -EBUSY;
1810 		goto Complete;
1811 	}
1812 
1813 	if (dev->power.syscore)
1814 		goto Complete;
1815 
1816 	/* Avoid direct_complete to let wakeup_path propagate. */
1817 	if (device_may_wakeup(dev) || device_wakeup_path(dev))
1818 		dev->power.direct_complete = false;
1819 
1820 	if (dev->power.direct_complete) {
1821 		if (pm_runtime_status_suspended(dev)) {
1822 			pm_runtime_disable(dev);
1823 			if (pm_runtime_status_suspended(dev)) {
1824 				pm_dev_dbg(dev, state, "direct-complete ");
1825 				dev->power.is_suspended = true;
1826 				goto Complete;
1827 			}
1828 
1829 			pm_runtime_enable(dev);
1830 		}
1831 		dev->power.direct_complete = false;
1832 	}
1833 
1834 	dev->power.may_skip_resume = true;
1835 	dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME);
1836 
1837 	dpm_watchdog_set(&wd, dev);
1838 	device_lock(dev);
1839 
1840 	if (dev->pm_domain) {
1841 		info = "power domain ";
1842 		callback = pm_op(&dev->pm_domain->ops, state);
1843 		goto Run;
1844 	}
1845 
1846 	if (dev->type && dev->type->pm) {
1847 		info = "type ";
1848 		callback = pm_op(dev->type->pm, state);
1849 		goto Run;
1850 	}
1851 
1852 	if (dev->class && dev->class->pm) {
1853 		info = "class ";
1854 		callback = pm_op(dev->class->pm, state);
1855 		goto Run;
1856 	}
1857 
1858 	if (dev->bus) {
1859 		if (dev->bus->pm) {
1860 			info = "bus ";
1861 			callback = pm_op(dev->bus->pm, state);
1862 		} else if (dev->bus->suspend) {
1863 			pm_dev_dbg(dev, state, "legacy bus ");
1864 			error = legacy_suspend(dev, state, dev->bus->suspend,
1865 						"legacy bus ");
1866 			goto End;
1867 		}
1868 	}
1869 
1870  Run:
1871 	if (!callback && dev->driver && dev->driver->pm) {
1872 		info = "driver ";
1873 		callback = pm_op(dev->driver->pm, state);
1874 	}
1875 
1876 	error = dpm_run_callback(callback, dev, state, info);
1877 
1878  End:
1879 	if (!error) {
1880 		dev->power.is_suspended = true;
1881 		if (device_may_wakeup(dev))
1882 			dev->power.wakeup_path = true;
1883 
1884 		dpm_propagate_wakeup_to_parent(dev);
1885 		dpm_clear_superiors_direct_complete(dev);
1886 	}
1887 
1888 	device_unlock(dev);
1889 	dpm_watchdog_clear(&wd);
1890 
1891  Complete:
1892 	if (error) {
1893 		async_error = error;
1894 		dpm_save_failed_dev(dev_name(dev));
1895 		pm_dev_err(dev, state, async ? " async" : "", error);
1896 	}
1897 
1898 	complete_all(&dev->power.completion);
1899 	TRACE_SUSPEND(error);
1900 
1901 	if (error || async_error)
1902 		return error;
1903 
1904 	dpm_async_suspend_parent(dev, async_suspend);
1905 
1906 	return 0;
1907 }
1908 
1909 static void async_suspend(void *data, async_cookie_t cookie)
1910 {
1911 	struct device *dev = data;
1912 
1913 	device_suspend(dev, pm_transition, true);
1914 	put_device(dev);
1915 }
1916 
1917 /**
1918  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1919  * @state: PM transition of the system being carried out.
1920  */
1921 int dpm_suspend(pm_message_t state)
1922 {
1923 	ktime_t starttime = ktime_get();
1924 	struct device *dev;
1925 	int error = 0;
1926 
1927 	trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1928 	might_sleep();
1929 
1930 	devfreq_suspend();
1931 	cpufreq_suspend();
1932 
1933 	pm_transition = state;
1934 	async_error = 0;
1935 
1936 	mutex_lock(&dpm_list_mtx);
1937 
1938 	/*
1939 	 * Start processing "async" leaf devices upfront so they don't need to
1940 	 * wait for the "sync" devices they don't depend on.
1941 	 */
1942 	list_for_each_entry_reverse(dev, &dpm_prepared_list, power.entry) {
1943 		dpm_clear_async_state(dev);
1944 		if (dpm_leaf_device(dev))
1945 			dpm_async_with_cleanup(dev, async_suspend);
1946 	}
1947 
1948 	while (!list_empty(&dpm_prepared_list)) {
1949 		dev = to_device(dpm_prepared_list.prev);
1950 
1951 		list_move(&dev->power.entry, &dpm_suspended_list);
1952 
1953 		if (dpm_async_fn(dev, async_suspend))
1954 			continue;
1955 
1956 		get_device(dev);
1957 
1958 		mutex_unlock(&dpm_list_mtx);
1959 
1960 		error = device_suspend(dev, state, false);
1961 
1962 		put_device(dev);
1963 
1964 		mutex_lock(&dpm_list_mtx);
1965 
1966 		if (error || async_error) {
1967 			/*
1968 			 * Move all devices to the target list to resume them
1969 			 * properly.
1970 			 */
1971 			list_splice_init(&dpm_prepared_list, &dpm_suspended_list);
1972 			break;
1973 		}
1974 	}
1975 
1976 	mutex_unlock(&dpm_list_mtx);
1977 
1978 	async_synchronize_full();
1979 	if (!error)
1980 		error = async_error;
1981 
1982 	if (error)
1983 		dpm_save_failed_step(SUSPEND_SUSPEND);
1984 
1985 	dpm_show_time(starttime, state, error, NULL);
1986 	trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1987 	return error;
1988 }
1989 
1990 static bool device_prepare_smart_suspend(struct device *dev)
1991 {
1992 	struct device_link *link;
1993 	bool ret = true;
1994 	int idx;
1995 
1996 	/*
1997 	 * The "smart suspend" feature is enabled for devices whose drivers ask
1998 	 * for it and for devices without PM callbacks.
1999 	 *
2000 	 * However, if "smart suspend" is not enabled for the device's parent
2001 	 * or any of its suppliers that take runtime PM into account, it cannot
2002 	 * be enabled for the device either.
2003 	 */
2004 	if (!dev->power.no_pm_callbacks &&
2005 	    !dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND))
2006 		return false;
2007 
2008 	if (dev->parent && !dev_pm_smart_suspend(dev->parent) &&
2009 	    !dev->parent->power.ignore_children && !pm_runtime_blocked(dev->parent))
2010 		return false;
2011 
2012 	idx = device_links_read_lock();
2013 
2014 	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
2015 		if (!(link->flags & DL_FLAG_PM_RUNTIME))
2016 			continue;
2017 
2018 		if (!dev_pm_smart_suspend(link->supplier) &&
2019 		    !pm_runtime_blocked(link->supplier)) {
2020 			ret = false;
2021 			break;
2022 		}
2023 	}
2024 
2025 	device_links_read_unlock(idx);
2026 
2027 	return ret;
2028 }
2029 
2030 /**
2031  * device_prepare - Prepare a device for system power transition.
2032  * @dev: Device to handle.
2033  * @state: PM transition of the system being carried out.
2034  *
2035  * Execute the ->prepare() callback(s) for given device.  No new children of the
2036  * device may be registered after this function has returned.
2037  */
2038 static int device_prepare(struct device *dev, pm_message_t state)
2039 {
2040 	int (*callback)(struct device *) = NULL;
2041 	bool smart_suspend;
2042 	int ret = 0;
2043 
2044 	/*
2045 	 * If a device's parent goes into runtime suspend at the wrong time,
2046 	 * it won't be possible to resume the device.  To prevent this we
2047 	 * block runtime suspend here, during the prepare phase, and allow
2048 	 * it again during the complete phase.
2049 	 */
2050 	pm_runtime_get_noresume(dev);
2051 	/*
2052 	 * If runtime PM is disabled for the device at this point and it has
2053 	 * never been enabled so far, it should not be enabled until this system
2054 	 * suspend-resume cycle is complete, so prepare to trigger a warning on
2055 	 * subsequent attempts to enable it.
2056 	 */
2057 	smart_suspend = !pm_runtime_block_if_disabled(dev);
2058 
2059 	if (dev->power.syscore)
2060 		return 0;
2061 
2062 	device_lock(dev);
2063 
2064 	dev->power.wakeup_path = false;
2065 
2066 	if (dev->power.no_pm_callbacks)
2067 		goto unlock;
2068 
2069 	if (dev->pm_domain)
2070 		callback = dev->pm_domain->ops.prepare;
2071 	else if (dev->type && dev->type->pm)
2072 		callback = dev->type->pm->prepare;
2073 	else if (dev->class && dev->class->pm)
2074 		callback = dev->class->pm->prepare;
2075 	else if (dev->bus && dev->bus->pm)
2076 		callback = dev->bus->pm->prepare;
2077 
2078 	if (!callback && dev->driver && dev->driver->pm)
2079 		callback = dev->driver->pm->prepare;
2080 
2081 	if (callback)
2082 		ret = callback(dev);
2083 
2084 unlock:
2085 	device_unlock(dev);
2086 
2087 	if (ret < 0) {
2088 		suspend_report_result(dev, callback, ret);
2089 		pm_runtime_put(dev);
2090 		return ret;
2091 	}
2092 	/* Do not enable "smart suspend" for devices with disabled runtime PM. */
2093 	if (smart_suspend)
2094 		smart_suspend = device_prepare_smart_suspend(dev);
2095 
2096 	spin_lock_irq(&dev->power.lock);
2097 
2098 	dev->power.smart_suspend = smart_suspend;
2099 	/*
2100 	 * A positive return value from ->prepare() means "this device appears
2101 	 * to be runtime-suspended and its state is fine, so if it really is
2102 	 * runtime-suspended, you can leave it in that state provided that you
2103 	 * will do the same thing with all of its descendants".  This only
2104 	 * applies to suspend transitions, however.
2105 	 */
2106 	dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
2107 		(ret > 0 || dev->power.no_pm_callbacks) &&
2108 		!dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
2109 
2110 	spin_unlock_irq(&dev->power.lock);
2111 
2112 	return 0;
2113 }
2114 
2115 /**
2116  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
2117  * @state: PM transition of the system being carried out.
2118  *
2119  * Execute the ->prepare() callback(s) for all devices.
2120  */
2121 int dpm_prepare(pm_message_t state)
2122 {
2123 	int error = 0;
2124 
2125 	trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
2126 	might_sleep();
2127 
2128 	/*
2129 	 * Give a chance for the known devices to complete their probes, before
2130 	 * disable probing of devices. This sync point is important at least
2131 	 * at boot time + hibernation restore.
2132 	 */
2133 	wait_for_device_probe();
2134 	/*
2135 	 * It is unsafe if probing of devices will happen during suspend or
2136 	 * hibernation and system behavior will be unpredictable in this case.
2137 	 * So, let's prohibit device's probing here and defer their probes
2138 	 * instead. The normal behavior will be restored in dpm_complete().
2139 	 */
2140 	device_block_probing();
2141 
2142 	mutex_lock(&dpm_list_mtx);
2143 	while (!list_empty(&dpm_list) && !error) {
2144 		struct device *dev = to_device(dpm_list.next);
2145 
2146 		get_device(dev);
2147 
2148 		mutex_unlock(&dpm_list_mtx);
2149 
2150 		trace_device_pm_callback_start(dev, "", state.event);
2151 		error = device_prepare(dev, state);
2152 		trace_device_pm_callback_end(dev, error);
2153 
2154 		mutex_lock(&dpm_list_mtx);
2155 
2156 		if (!error) {
2157 			dev->power.is_prepared = true;
2158 			if (!list_empty(&dev->power.entry))
2159 				list_move_tail(&dev->power.entry, &dpm_prepared_list);
2160 		} else if (error == -EAGAIN) {
2161 			error = 0;
2162 		} else {
2163 			dev_info(dev, "not prepared for power transition: code %d\n",
2164 				 error);
2165 		}
2166 
2167 		mutex_unlock(&dpm_list_mtx);
2168 
2169 		put_device(dev);
2170 
2171 		mutex_lock(&dpm_list_mtx);
2172 	}
2173 	mutex_unlock(&dpm_list_mtx);
2174 	trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
2175 	return error;
2176 }
2177 
2178 /**
2179  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
2180  * @state: PM transition of the system being carried out.
2181  *
2182  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
2183  * callbacks for them.
2184  */
2185 int dpm_suspend_start(pm_message_t state)
2186 {
2187 	ktime_t starttime = ktime_get();
2188 	int error;
2189 
2190 	error = dpm_prepare(state);
2191 	if (error)
2192 		dpm_save_failed_step(SUSPEND_PREPARE);
2193 	else
2194 		error = dpm_suspend(state);
2195 
2196 	dpm_show_time(starttime, state, error, "start");
2197 	return error;
2198 }
2199 EXPORT_SYMBOL_GPL(dpm_suspend_start);
2200 
2201 void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret)
2202 {
2203 	if (ret)
2204 		dev_err(dev, "%s(): %ps returns %d\n", function, fn, ret);
2205 }
2206 EXPORT_SYMBOL_GPL(__suspend_report_result);
2207 
2208 /**
2209  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
2210  * @subordinate: Device that needs to wait for @dev.
2211  * @dev: Device to wait for.
2212  */
2213 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
2214 {
2215 	dpm_wait(dev, subordinate->power.async_suspend);
2216 	return async_error;
2217 }
2218 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
2219 
2220 /**
2221  * dpm_for_each_dev - device iterator.
2222  * @data: data for the callback.
2223  * @fn: function to be called for each device.
2224  *
2225  * Iterate over devices in dpm_list, and call @fn for each device,
2226  * passing it @data.
2227  */
2228 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
2229 {
2230 	struct device *dev;
2231 
2232 	if (!fn)
2233 		return;
2234 
2235 	device_pm_lock();
2236 	list_for_each_entry(dev, &dpm_list, power.entry)
2237 		fn(dev, data);
2238 	device_pm_unlock();
2239 }
2240 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
2241 
2242 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
2243 {
2244 	if (!ops)
2245 		return true;
2246 
2247 	return !ops->prepare &&
2248 	       !ops->suspend &&
2249 	       !ops->suspend_late &&
2250 	       !ops->suspend_noirq &&
2251 	       !ops->resume_noirq &&
2252 	       !ops->resume_early &&
2253 	       !ops->resume &&
2254 	       !ops->complete;
2255 }
2256 
2257 void device_pm_check_callbacks(struct device *dev)
2258 {
2259 	unsigned long flags;
2260 
2261 	spin_lock_irqsave(&dev->power.lock, flags);
2262 	dev->power.no_pm_callbacks =
2263 		(!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2264 		 !dev->bus->suspend && !dev->bus->resume)) &&
2265 		(!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2266 		(!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2267 		(!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2268 		(!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2269 		 !dev->driver->suspend && !dev->driver->resume));
2270 	spin_unlock_irqrestore(&dev->power.lock, flags);
2271 }
2272 
2273 bool dev_pm_skip_suspend(struct device *dev)
2274 {
2275 	return dev_pm_smart_suspend(dev) && pm_runtime_status_suspended(dev);
2276 }
2277