xref: /linux/drivers/base/power/main.c (revision e814f3fd16acfb7f9966773953de8f740a1e3202)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/base/power/main.c - Where the driver meets power management.
4  *
5  * Copyright (c) 2003 Patrick Mochel
6  * Copyright (c) 2003 Open Source Development Lab
7  *
8  * The driver model core calls device_pm_add() when a device is registered.
9  * This will initialize the embedded device_pm_info object in the device
10  * and add it to the list of power-controlled devices. sysfs entries for
11  * controlling device power management will also be added.
12  *
13  * A separate list is used for keeping track of power info, because the power
14  * domain dependencies may differ from the ancestral dependencies that the
15  * subsystem list maintains.
16  */
17 
18 #define pr_fmt(fmt) "PM: " fmt
19 #define dev_fmt pr_fmt
20 
21 #include <linux/device.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm-trace.h>
27 #include <linux/pm_wakeirq.h>
28 #include <linux/interrupt.h>
29 #include <linux/sched.h>
30 #include <linux/sched/debug.h>
31 #include <linux/async.h>
32 #include <linux/suspend.h>
33 #include <trace/events/power.h>
34 #include <linux/cpufreq.h>
35 #include <linux/devfreq.h>
36 #include <linux/timer.h>
37 
38 #include "../base.h"
39 #include "power.h"
40 
41 typedef int (*pm_callback_t)(struct device *);
42 
43 #define list_for_each_entry_rcu_locked(pos, head, member) \
44 	list_for_each_entry_rcu(pos, head, member, \
45 			device_links_read_lock_held())
46 
47 /*
48  * The entries in the dpm_list list are in a depth first order, simply
49  * because children are guaranteed to be discovered after parents, and
50  * are inserted at the back of the list on discovery.
51  *
52  * Since device_pm_add() may be called with a device lock held,
53  * we must never try to acquire a device lock while holding
54  * dpm_list_mutex.
55  */
56 
57 LIST_HEAD(dpm_list);
58 static LIST_HEAD(dpm_prepared_list);
59 static LIST_HEAD(dpm_suspended_list);
60 static LIST_HEAD(dpm_late_early_list);
61 static LIST_HEAD(dpm_noirq_list);
62 
63 static DEFINE_MUTEX(dpm_list_mtx);
64 static pm_message_t pm_transition;
65 
66 static int async_error;
67 
68 static const char *pm_verb(int event)
69 {
70 	switch (event) {
71 	case PM_EVENT_SUSPEND:
72 		return "suspend";
73 	case PM_EVENT_RESUME:
74 		return "resume";
75 	case PM_EVENT_FREEZE:
76 		return "freeze";
77 	case PM_EVENT_QUIESCE:
78 		return "quiesce";
79 	case PM_EVENT_HIBERNATE:
80 		return "hibernate";
81 	case PM_EVENT_THAW:
82 		return "thaw";
83 	case PM_EVENT_RESTORE:
84 		return "restore";
85 	case PM_EVENT_RECOVER:
86 		return "recover";
87 	default:
88 		return "(unknown PM event)";
89 	}
90 }
91 
92 /**
93  * device_pm_sleep_init - Initialize system suspend-related device fields.
94  * @dev: Device object being initialized.
95  */
96 void device_pm_sleep_init(struct device *dev)
97 {
98 	dev->power.is_prepared = false;
99 	dev->power.is_suspended = false;
100 	dev->power.is_noirq_suspended = false;
101 	dev->power.is_late_suspended = false;
102 	init_completion(&dev->power.completion);
103 	complete_all(&dev->power.completion);
104 	dev->power.wakeup = NULL;
105 	INIT_LIST_HEAD(&dev->power.entry);
106 }
107 
108 /**
109  * device_pm_lock - Lock the list of active devices used by the PM core.
110  */
111 void device_pm_lock(void)
112 {
113 	mutex_lock(&dpm_list_mtx);
114 }
115 
116 /**
117  * device_pm_unlock - Unlock the list of active devices used by the PM core.
118  */
119 void device_pm_unlock(void)
120 {
121 	mutex_unlock(&dpm_list_mtx);
122 }
123 
124 /**
125  * device_pm_add - Add a device to the PM core's list of active devices.
126  * @dev: Device to add to the list.
127  */
128 void device_pm_add(struct device *dev)
129 {
130 	/* Skip PM setup/initialization. */
131 	if (device_pm_not_required(dev))
132 		return;
133 
134 	pr_debug("Adding info for %s:%s\n",
135 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
136 	device_pm_check_callbacks(dev);
137 	mutex_lock(&dpm_list_mtx);
138 	if (dev->parent && dev->parent->power.is_prepared)
139 		dev_warn(dev, "parent %s should not be sleeping\n",
140 			dev_name(dev->parent));
141 	list_add_tail(&dev->power.entry, &dpm_list);
142 	dev->power.in_dpm_list = true;
143 	mutex_unlock(&dpm_list_mtx);
144 }
145 
146 /**
147  * device_pm_remove - Remove a device from the PM core's list of active devices.
148  * @dev: Device to be removed from the list.
149  */
150 void device_pm_remove(struct device *dev)
151 {
152 	if (device_pm_not_required(dev))
153 		return;
154 
155 	pr_debug("Removing info for %s:%s\n",
156 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
157 	complete_all(&dev->power.completion);
158 	mutex_lock(&dpm_list_mtx);
159 	list_del_init(&dev->power.entry);
160 	dev->power.in_dpm_list = false;
161 	mutex_unlock(&dpm_list_mtx);
162 	device_wakeup_disable(dev);
163 	pm_runtime_remove(dev);
164 	device_pm_check_callbacks(dev);
165 }
166 
167 /**
168  * device_pm_move_before - Move device in the PM core's list of active devices.
169  * @deva: Device to move in dpm_list.
170  * @devb: Device @deva should come before.
171  */
172 void device_pm_move_before(struct device *deva, struct device *devb)
173 {
174 	pr_debug("Moving %s:%s before %s:%s\n",
175 		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
176 		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
177 	/* Delete deva from dpm_list and reinsert before devb. */
178 	list_move_tail(&deva->power.entry, &devb->power.entry);
179 }
180 
181 /**
182  * device_pm_move_after - Move device in the PM core's list of active devices.
183  * @deva: Device to move in dpm_list.
184  * @devb: Device @deva should come after.
185  */
186 void device_pm_move_after(struct device *deva, struct device *devb)
187 {
188 	pr_debug("Moving %s:%s after %s:%s\n",
189 		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
190 		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
191 	/* Delete deva from dpm_list and reinsert after devb. */
192 	list_move(&deva->power.entry, &devb->power.entry);
193 }
194 
195 /**
196  * device_pm_move_last - Move device to end of the PM core's list of devices.
197  * @dev: Device to move in dpm_list.
198  */
199 void device_pm_move_last(struct device *dev)
200 {
201 	pr_debug("Moving %s:%s to end of list\n",
202 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
203 	list_move_tail(&dev->power.entry, &dpm_list);
204 }
205 
206 static ktime_t initcall_debug_start(struct device *dev, void *cb)
207 {
208 	if (!pm_print_times_enabled)
209 		return 0;
210 
211 	dev_info(dev, "calling %ps @ %i, parent: %s\n", cb,
212 		 task_pid_nr(current),
213 		 dev->parent ? dev_name(dev->parent) : "none");
214 	return ktime_get();
215 }
216 
217 static void initcall_debug_report(struct device *dev, ktime_t calltime,
218 				  void *cb, int error)
219 {
220 	ktime_t rettime;
221 
222 	if (!pm_print_times_enabled)
223 		return;
224 
225 	rettime = ktime_get();
226 	dev_info(dev, "%ps returned %d after %Ld usecs\n", cb, error,
227 		 (unsigned long long)ktime_us_delta(rettime, calltime));
228 }
229 
230 /**
231  * dpm_wait - Wait for a PM operation to complete.
232  * @dev: Device to wait for.
233  * @async: If unset, wait only if the device's power.async_suspend flag is set.
234  */
235 static void dpm_wait(struct device *dev, bool async)
236 {
237 	if (!dev)
238 		return;
239 
240 	if (async || (pm_async_enabled && dev->power.async_suspend))
241 		wait_for_completion(&dev->power.completion);
242 }
243 
244 static int dpm_wait_fn(struct device *dev, void *async_ptr)
245 {
246 	dpm_wait(dev, *((bool *)async_ptr));
247 	return 0;
248 }
249 
250 static void dpm_wait_for_children(struct device *dev, bool async)
251 {
252        device_for_each_child(dev, &async, dpm_wait_fn);
253 }
254 
255 static void dpm_wait_for_suppliers(struct device *dev, bool async)
256 {
257 	struct device_link *link;
258 	int idx;
259 
260 	idx = device_links_read_lock();
261 
262 	/*
263 	 * If the supplier goes away right after we've checked the link to it,
264 	 * we'll wait for its completion to change the state, but that's fine,
265 	 * because the only things that will block as a result are the SRCU
266 	 * callbacks freeing the link objects for the links in the list we're
267 	 * walking.
268 	 */
269 	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
270 		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
271 			dpm_wait(link->supplier, async);
272 
273 	device_links_read_unlock(idx);
274 }
275 
276 static bool dpm_wait_for_superior(struct device *dev, bool async)
277 {
278 	struct device *parent;
279 
280 	/*
281 	 * If the device is resumed asynchronously and the parent's callback
282 	 * deletes both the device and the parent itself, the parent object may
283 	 * be freed while this function is running, so avoid that by reference
284 	 * counting the parent once more unless the device has been deleted
285 	 * already (in which case return right away).
286 	 */
287 	mutex_lock(&dpm_list_mtx);
288 
289 	if (!device_pm_initialized(dev)) {
290 		mutex_unlock(&dpm_list_mtx);
291 		return false;
292 	}
293 
294 	parent = get_device(dev->parent);
295 
296 	mutex_unlock(&dpm_list_mtx);
297 
298 	dpm_wait(parent, async);
299 	put_device(parent);
300 
301 	dpm_wait_for_suppliers(dev, async);
302 
303 	/*
304 	 * If the parent's callback has deleted the device, attempting to resume
305 	 * it would be invalid, so avoid doing that then.
306 	 */
307 	return device_pm_initialized(dev);
308 }
309 
310 static void dpm_wait_for_consumers(struct device *dev, bool async)
311 {
312 	struct device_link *link;
313 	int idx;
314 
315 	idx = device_links_read_lock();
316 
317 	/*
318 	 * The status of a device link can only be changed from "dormant" by a
319 	 * probe, but that cannot happen during system suspend/resume.  In
320 	 * theory it can change to "dormant" at that time, but then it is
321 	 * reasonable to wait for the target device anyway (eg. if it goes
322 	 * away, it's better to wait for it to go away completely and then
323 	 * continue instead of trying to continue in parallel with its
324 	 * unregistration).
325 	 */
326 	list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
327 		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
328 			dpm_wait(link->consumer, async);
329 
330 	device_links_read_unlock(idx);
331 }
332 
333 static void dpm_wait_for_subordinate(struct device *dev, bool async)
334 {
335 	dpm_wait_for_children(dev, async);
336 	dpm_wait_for_consumers(dev, async);
337 }
338 
339 /**
340  * pm_op - Return the PM operation appropriate for given PM event.
341  * @ops: PM operations to choose from.
342  * @state: PM transition of the system being carried out.
343  */
344 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
345 {
346 	switch (state.event) {
347 #ifdef CONFIG_SUSPEND
348 	case PM_EVENT_SUSPEND:
349 		return ops->suspend;
350 	case PM_EVENT_RESUME:
351 		return ops->resume;
352 #endif /* CONFIG_SUSPEND */
353 #ifdef CONFIG_HIBERNATE_CALLBACKS
354 	case PM_EVENT_FREEZE:
355 	case PM_EVENT_QUIESCE:
356 		return ops->freeze;
357 	case PM_EVENT_HIBERNATE:
358 		return ops->poweroff;
359 	case PM_EVENT_THAW:
360 	case PM_EVENT_RECOVER:
361 		return ops->thaw;
362 	case PM_EVENT_RESTORE:
363 		return ops->restore;
364 #endif /* CONFIG_HIBERNATE_CALLBACKS */
365 	}
366 
367 	return NULL;
368 }
369 
370 /**
371  * pm_late_early_op - Return the PM operation appropriate for given PM event.
372  * @ops: PM operations to choose from.
373  * @state: PM transition of the system being carried out.
374  *
375  * Runtime PM is disabled for @dev while this function is being executed.
376  */
377 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
378 				      pm_message_t state)
379 {
380 	switch (state.event) {
381 #ifdef CONFIG_SUSPEND
382 	case PM_EVENT_SUSPEND:
383 		return ops->suspend_late;
384 	case PM_EVENT_RESUME:
385 		return ops->resume_early;
386 #endif /* CONFIG_SUSPEND */
387 #ifdef CONFIG_HIBERNATE_CALLBACKS
388 	case PM_EVENT_FREEZE:
389 	case PM_EVENT_QUIESCE:
390 		return ops->freeze_late;
391 	case PM_EVENT_HIBERNATE:
392 		return ops->poweroff_late;
393 	case PM_EVENT_THAW:
394 	case PM_EVENT_RECOVER:
395 		return ops->thaw_early;
396 	case PM_EVENT_RESTORE:
397 		return ops->restore_early;
398 #endif /* CONFIG_HIBERNATE_CALLBACKS */
399 	}
400 
401 	return NULL;
402 }
403 
404 /**
405  * pm_noirq_op - Return the PM operation appropriate for given PM event.
406  * @ops: PM operations to choose from.
407  * @state: PM transition of the system being carried out.
408  *
409  * The driver of @dev will not receive interrupts while this function is being
410  * executed.
411  */
412 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
413 {
414 	switch (state.event) {
415 #ifdef CONFIG_SUSPEND
416 	case PM_EVENT_SUSPEND:
417 		return ops->suspend_noirq;
418 	case PM_EVENT_RESUME:
419 		return ops->resume_noirq;
420 #endif /* CONFIG_SUSPEND */
421 #ifdef CONFIG_HIBERNATE_CALLBACKS
422 	case PM_EVENT_FREEZE:
423 	case PM_EVENT_QUIESCE:
424 		return ops->freeze_noirq;
425 	case PM_EVENT_HIBERNATE:
426 		return ops->poweroff_noirq;
427 	case PM_EVENT_THAW:
428 	case PM_EVENT_RECOVER:
429 		return ops->thaw_noirq;
430 	case PM_EVENT_RESTORE:
431 		return ops->restore_noirq;
432 #endif /* CONFIG_HIBERNATE_CALLBACKS */
433 	}
434 
435 	return NULL;
436 }
437 
438 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
439 {
440 	dev_dbg(dev, "%s%s%s driver flags: %x\n", info, pm_verb(state.event),
441 		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
442 		", may wakeup" : "", dev->power.driver_flags);
443 }
444 
445 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
446 			int error)
447 {
448 	dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info,
449 		error);
450 }
451 
452 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
453 			  const char *info)
454 {
455 	ktime_t calltime;
456 	u64 usecs64;
457 	int usecs;
458 
459 	calltime = ktime_get();
460 	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
461 	do_div(usecs64, NSEC_PER_USEC);
462 	usecs = usecs64;
463 	if (usecs == 0)
464 		usecs = 1;
465 
466 	pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
467 		  info ?: "", info ? " " : "", pm_verb(state.event),
468 		  error ? "aborted" : "complete",
469 		  usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
470 }
471 
472 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
473 			    pm_message_t state, const char *info)
474 {
475 	ktime_t calltime;
476 	int error;
477 
478 	if (!cb)
479 		return 0;
480 
481 	calltime = initcall_debug_start(dev, cb);
482 
483 	pm_dev_dbg(dev, state, info);
484 	trace_device_pm_callback_start(dev, info, state.event);
485 	error = cb(dev);
486 	trace_device_pm_callback_end(dev, error);
487 	suspend_report_result(dev, cb, error);
488 
489 	initcall_debug_report(dev, calltime, cb, error);
490 
491 	return error;
492 }
493 
494 #ifdef CONFIG_DPM_WATCHDOG
495 struct dpm_watchdog {
496 	struct device		*dev;
497 	struct task_struct	*tsk;
498 	struct timer_list	timer;
499 	bool			fatal;
500 };
501 
502 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
503 	struct dpm_watchdog wd
504 
505 /**
506  * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
507  * @t: The timer that PM watchdog depends on.
508  *
509  * Called when a driver has timed out suspending or resuming.
510  * There's not much we can do here to recover so panic() to
511  * capture a crash-dump in pstore.
512  */
513 static void dpm_watchdog_handler(struct timer_list *t)
514 {
515 	struct dpm_watchdog *wd = from_timer(wd, t, timer);
516 	struct timer_list *timer = &wd->timer;
517 	unsigned int time_left;
518 
519 	if (wd->fatal) {
520 		dev_emerg(wd->dev, "**** DPM device timeout ****\n");
521 		show_stack(wd->tsk, NULL, KERN_EMERG);
522 		panic("%s %s: unrecoverable failure\n",
523 			dev_driver_string(wd->dev), dev_name(wd->dev));
524 	}
525 
526 	time_left = CONFIG_DPM_WATCHDOG_TIMEOUT - CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
527 	dev_warn(wd->dev, "**** DPM device timeout after %u seconds; %u seconds until panic ****\n",
528 		 CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT, time_left);
529 	show_stack(wd->tsk, NULL, KERN_WARNING);
530 
531 	wd->fatal = true;
532 	mod_timer(timer, jiffies + HZ * time_left);
533 }
534 
535 /**
536  * dpm_watchdog_set - Enable pm watchdog for given device.
537  * @wd: Watchdog. Must be allocated on the stack.
538  * @dev: Device to handle.
539  */
540 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
541 {
542 	struct timer_list *timer = &wd->timer;
543 
544 	wd->dev = dev;
545 	wd->tsk = current;
546 	wd->fatal = CONFIG_DPM_WATCHDOG_TIMEOUT == CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
547 
548 	timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
549 	/* use same timeout value for both suspend and resume */
550 	timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
551 	add_timer(timer);
552 }
553 
554 /**
555  * dpm_watchdog_clear - Disable suspend/resume watchdog.
556  * @wd: Watchdog to disable.
557  */
558 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
559 {
560 	struct timer_list *timer = &wd->timer;
561 
562 	del_timer_sync(timer);
563 	destroy_timer_on_stack(timer);
564 }
565 #else
566 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
567 #define dpm_watchdog_set(x, y)
568 #define dpm_watchdog_clear(x)
569 #endif
570 
571 /*------------------------- Resume routines -------------------------*/
572 
573 /**
574  * dev_pm_skip_resume - System-wide device resume optimization check.
575  * @dev: Target device.
576  *
577  * Return:
578  * - %false if the transition under way is RESTORE.
579  * - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
580  * - The logical negation of %power.must_resume otherwise (that is, when the
581  *   transition under way is RESUME).
582  */
583 bool dev_pm_skip_resume(struct device *dev)
584 {
585 	if (pm_transition.event == PM_EVENT_RESTORE)
586 		return false;
587 
588 	if (pm_transition.event == PM_EVENT_THAW)
589 		return dev_pm_skip_suspend(dev);
590 
591 	return !dev->power.must_resume;
592 }
593 
594 static bool is_async(struct device *dev)
595 {
596 	return dev->power.async_suspend && pm_async_enabled
597 		&& !pm_trace_is_enabled();
598 }
599 
600 static bool dpm_async_fn(struct device *dev, async_func_t func)
601 {
602 	reinit_completion(&dev->power.completion);
603 
604 	if (is_async(dev)) {
605 		dev->power.async_in_progress = true;
606 
607 		get_device(dev);
608 
609 		if (async_schedule_dev_nocall(func, dev))
610 			return true;
611 
612 		put_device(dev);
613 	}
614 	/*
615 	 * Because async_schedule_dev_nocall() above has returned false or it
616 	 * has not been called at all, func() is not running and it is safe to
617 	 * update the async_in_progress flag without extra synchronization.
618 	 */
619 	dev->power.async_in_progress = false;
620 	return false;
621 }
622 
623 /**
624  * device_resume_noirq - Execute a "noirq resume" callback for given device.
625  * @dev: Device to handle.
626  * @state: PM transition of the system being carried out.
627  * @async: If true, the device is being resumed asynchronously.
628  *
629  * The driver of @dev will not receive interrupts while this function is being
630  * executed.
631  */
632 static void device_resume_noirq(struct device *dev, pm_message_t state, bool async)
633 {
634 	pm_callback_t callback = NULL;
635 	const char *info = NULL;
636 	bool skip_resume;
637 	int error = 0;
638 
639 	TRACE_DEVICE(dev);
640 	TRACE_RESUME(0);
641 
642 	if (dev->power.syscore || dev->power.direct_complete)
643 		goto Out;
644 
645 	if (!dev->power.is_noirq_suspended)
646 		goto Out;
647 
648 	if (!dpm_wait_for_superior(dev, async))
649 		goto Out;
650 
651 	skip_resume = dev_pm_skip_resume(dev);
652 	/*
653 	 * If the driver callback is skipped below or by the middle layer
654 	 * callback and device_resume_early() also skips the driver callback for
655 	 * this device later, it needs to appear as "suspended" to PM-runtime,
656 	 * so change its status accordingly.
657 	 *
658 	 * Otherwise, the device is going to be resumed, so set its PM-runtime
659 	 * status to "active", but do that only if DPM_FLAG_SMART_SUSPEND is set
660 	 * to avoid confusing drivers that don't use it.
661 	 */
662 	if (skip_resume)
663 		pm_runtime_set_suspended(dev);
664 	else if (dev_pm_skip_suspend(dev))
665 		pm_runtime_set_active(dev);
666 
667 	if (dev->pm_domain) {
668 		info = "noirq power domain ";
669 		callback = pm_noirq_op(&dev->pm_domain->ops, state);
670 	} else if (dev->type && dev->type->pm) {
671 		info = "noirq type ";
672 		callback = pm_noirq_op(dev->type->pm, state);
673 	} else if (dev->class && dev->class->pm) {
674 		info = "noirq class ";
675 		callback = pm_noirq_op(dev->class->pm, state);
676 	} else if (dev->bus && dev->bus->pm) {
677 		info = "noirq bus ";
678 		callback = pm_noirq_op(dev->bus->pm, state);
679 	}
680 	if (callback)
681 		goto Run;
682 
683 	if (skip_resume)
684 		goto Skip;
685 
686 	if (dev->driver && dev->driver->pm) {
687 		info = "noirq driver ";
688 		callback = pm_noirq_op(dev->driver->pm, state);
689 	}
690 
691 Run:
692 	error = dpm_run_callback(callback, dev, state, info);
693 
694 Skip:
695 	dev->power.is_noirq_suspended = false;
696 
697 Out:
698 	complete_all(&dev->power.completion);
699 	TRACE_RESUME(error);
700 
701 	if (error) {
702 		async_error = error;
703 		dpm_save_failed_dev(dev_name(dev));
704 		pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
705 	}
706 }
707 
708 static void async_resume_noirq(void *data, async_cookie_t cookie)
709 {
710 	struct device *dev = data;
711 
712 	device_resume_noirq(dev, pm_transition, true);
713 	put_device(dev);
714 }
715 
716 static void dpm_noirq_resume_devices(pm_message_t state)
717 {
718 	struct device *dev;
719 	ktime_t starttime = ktime_get();
720 
721 	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
722 
723 	async_error = 0;
724 	pm_transition = state;
725 
726 	mutex_lock(&dpm_list_mtx);
727 
728 	/*
729 	 * Trigger the resume of "async" devices upfront so they don't have to
730 	 * wait for the "non-async" ones they don't depend on.
731 	 */
732 	list_for_each_entry(dev, &dpm_noirq_list, power.entry)
733 		dpm_async_fn(dev, async_resume_noirq);
734 
735 	while (!list_empty(&dpm_noirq_list)) {
736 		dev = to_device(dpm_noirq_list.next);
737 		list_move_tail(&dev->power.entry, &dpm_late_early_list);
738 
739 		if (!dev->power.async_in_progress) {
740 			get_device(dev);
741 
742 			mutex_unlock(&dpm_list_mtx);
743 
744 			device_resume_noirq(dev, state, false);
745 
746 			put_device(dev);
747 
748 			mutex_lock(&dpm_list_mtx);
749 		}
750 	}
751 	mutex_unlock(&dpm_list_mtx);
752 	async_synchronize_full();
753 	dpm_show_time(starttime, state, 0, "noirq");
754 	if (async_error)
755 		dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
756 
757 	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
758 }
759 
760 /**
761  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
762  * @state: PM transition of the system being carried out.
763  *
764  * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
765  * allow device drivers' interrupt handlers to be called.
766  */
767 void dpm_resume_noirq(pm_message_t state)
768 {
769 	dpm_noirq_resume_devices(state);
770 
771 	resume_device_irqs();
772 	device_wakeup_disarm_wake_irqs();
773 }
774 
775 /**
776  * device_resume_early - Execute an "early resume" callback for given device.
777  * @dev: Device to handle.
778  * @state: PM transition of the system being carried out.
779  * @async: If true, the device is being resumed asynchronously.
780  *
781  * Runtime PM is disabled for @dev while this function is being executed.
782  */
783 static void device_resume_early(struct device *dev, pm_message_t state, bool async)
784 {
785 	pm_callback_t callback = NULL;
786 	const char *info = NULL;
787 	int error = 0;
788 
789 	TRACE_DEVICE(dev);
790 	TRACE_RESUME(0);
791 
792 	if (dev->power.syscore || dev->power.direct_complete)
793 		goto Out;
794 
795 	if (!dev->power.is_late_suspended)
796 		goto Out;
797 
798 	if (!dpm_wait_for_superior(dev, async))
799 		goto Out;
800 
801 	if (dev->pm_domain) {
802 		info = "early power domain ";
803 		callback = pm_late_early_op(&dev->pm_domain->ops, state);
804 	} else if (dev->type && dev->type->pm) {
805 		info = "early type ";
806 		callback = pm_late_early_op(dev->type->pm, state);
807 	} else if (dev->class && dev->class->pm) {
808 		info = "early class ";
809 		callback = pm_late_early_op(dev->class->pm, state);
810 	} else if (dev->bus && dev->bus->pm) {
811 		info = "early bus ";
812 		callback = pm_late_early_op(dev->bus->pm, state);
813 	}
814 	if (callback)
815 		goto Run;
816 
817 	if (dev_pm_skip_resume(dev))
818 		goto Skip;
819 
820 	if (dev->driver && dev->driver->pm) {
821 		info = "early driver ";
822 		callback = pm_late_early_op(dev->driver->pm, state);
823 	}
824 
825 Run:
826 	error = dpm_run_callback(callback, dev, state, info);
827 
828 Skip:
829 	dev->power.is_late_suspended = false;
830 
831 Out:
832 	TRACE_RESUME(error);
833 
834 	pm_runtime_enable(dev);
835 	complete_all(&dev->power.completion);
836 
837 	if (error) {
838 		async_error = error;
839 		dpm_save_failed_dev(dev_name(dev));
840 		pm_dev_err(dev, state, async ? " async early" : " early", error);
841 	}
842 }
843 
844 static void async_resume_early(void *data, async_cookie_t cookie)
845 {
846 	struct device *dev = data;
847 
848 	device_resume_early(dev, pm_transition, true);
849 	put_device(dev);
850 }
851 
852 /**
853  * dpm_resume_early - Execute "early resume" callbacks for all devices.
854  * @state: PM transition of the system being carried out.
855  */
856 void dpm_resume_early(pm_message_t state)
857 {
858 	struct device *dev;
859 	ktime_t starttime = ktime_get();
860 
861 	trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
862 
863 	async_error = 0;
864 	pm_transition = state;
865 
866 	mutex_lock(&dpm_list_mtx);
867 
868 	/*
869 	 * Trigger the resume of "async" devices upfront so they don't have to
870 	 * wait for the "non-async" ones they don't depend on.
871 	 */
872 	list_for_each_entry(dev, &dpm_late_early_list, power.entry)
873 		dpm_async_fn(dev, async_resume_early);
874 
875 	while (!list_empty(&dpm_late_early_list)) {
876 		dev = to_device(dpm_late_early_list.next);
877 		list_move_tail(&dev->power.entry, &dpm_suspended_list);
878 
879 		if (!dev->power.async_in_progress) {
880 			get_device(dev);
881 
882 			mutex_unlock(&dpm_list_mtx);
883 
884 			device_resume_early(dev, state, false);
885 
886 			put_device(dev);
887 
888 			mutex_lock(&dpm_list_mtx);
889 		}
890 	}
891 	mutex_unlock(&dpm_list_mtx);
892 	async_synchronize_full();
893 	dpm_show_time(starttime, state, 0, "early");
894 	if (async_error)
895 		dpm_save_failed_step(SUSPEND_RESUME_EARLY);
896 
897 	trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
898 }
899 
900 /**
901  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
902  * @state: PM transition of the system being carried out.
903  */
904 void dpm_resume_start(pm_message_t state)
905 {
906 	dpm_resume_noirq(state);
907 	dpm_resume_early(state);
908 }
909 EXPORT_SYMBOL_GPL(dpm_resume_start);
910 
911 /**
912  * device_resume - Execute "resume" callbacks for given device.
913  * @dev: Device to handle.
914  * @state: PM transition of the system being carried out.
915  * @async: If true, the device is being resumed asynchronously.
916  */
917 static void device_resume(struct device *dev, pm_message_t state, bool async)
918 {
919 	pm_callback_t callback = NULL;
920 	const char *info = NULL;
921 	int error = 0;
922 	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
923 
924 	TRACE_DEVICE(dev);
925 	TRACE_RESUME(0);
926 
927 	if (dev->power.syscore)
928 		goto Complete;
929 
930 	if (dev->power.direct_complete) {
931 		/* Match the pm_runtime_disable() in device_suspend(). */
932 		pm_runtime_enable(dev);
933 		goto Complete;
934 	}
935 
936 	if (!dpm_wait_for_superior(dev, async))
937 		goto Complete;
938 
939 	dpm_watchdog_set(&wd, dev);
940 	device_lock(dev);
941 
942 	/*
943 	 * This is a fib.  But we'll allow new children to be added below
944 	 * a resumed device, even if the device hasn't been completed yet.
945 	 */
946 	dev->power.is_prepared = false;
947 
948 	if (!dev->power.is_suspended)
949 		goto Unlock;
950 
951 	if (dev->pm_domain) {
952 		info = "power domain ";
953 		callback = pm_op(&dev->pm_domain->ops, state);
954 		goto Driver;
955 	}
956 
957 	if (dev->type && dev->type->pm) {
958 		info = "type ";
959 		callback = pm_op(dev->type->pm, state);
960 		goto Driver;
961 	}
962 
963 	if (dev->class && dev->class->pm) {
964 		info = "class ";
965 		callback = pm_op(dev->class->pm, state);
966 		goto Driver;
967 	}
968 
969 	if (dev->bus) {
970 		if (dev->bus->pm) {
971 			info = "bus ";
972 			callback = pm_op(dev->bus->pm, state);
973 		} else if (dev->bus->resume) {
974 			info = "legacy bus ";
975 			callback = dev->bus->resume;
976 			goto End;
977 		}
978 	}
979 
980  Driver:
981 	if (!callback && dev->driver && dev->driver->pm) {
982 		info = "driver ";
983 		callback = pm_op(dev->driver->pm, state);
984 	}
985 
986  End:
987 	error = dpm_run_callback(callback, dev, state, info);
988 	dev->power.is_suspended = false;
989 
990  Unlock:
991 	device_unlock(dev);
992 	dpm_watchdog_clear(&wd);
993 
994  Complete:
995 	complete_all(&dev->power.completion);
996 
997 	TRACE_RESUME(error);
998 
999 	if (error) {
1000 		async_error = error;
1001 		dpm_save_failed_dev(dev_name(dev));
1002 		pm_dev_err(dev, state, async ? " async" : "", error);
1003 	}
1004 }
1005 
1006 static void async_resume(void *data, async_cookie_t cookie)
1007 {
1008 	struct device *dev = data;
1009 
1010 	device_resume(dev, pm_transition, true);
1011 	put_device(dev);
1012 }
1013 
1014 /**
1015  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
1016  * @state: PM transition of the system being carried out.
1017  *
1018  * Execute the appropriate "resume" callback for all devices whose status
1019  * indicates that they are suspended.
1020  */
1021 void dpm_resume(pm_message_t state)
1022 {
1023 	struct device *dev;
1024 	ktime_t starttime = ktime_get();
1025 
1026 	trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1027 	might_sleep();
1028 
1029 	pm_transition = state;
1030 	async_error = 0;
1031 
1032 	mutex_lock(&dpm_list_mtx);
1033 
1034 	/*
1035 	 * Trigger the resume of "async" devices upfront so they don't have to
1036 	 * wait for the "non-async" ones they don't depend on.
1037 	 */
1038 	list_for_each_entry(dev, &dpm_suspended_list, power.entry)
1039 		dpm_async_fn(dev, async_resume);
1040 
1041 	while (!list_empty(&dpm_suspended_list)) {
1042 		dev = to_device(dpm_suspended_list.next);
1043 		list_move_tail(&dev->power.entry, &dpm_prepared_list);
1044 
1045 		if (!dev->power.async_in_progress) {
1046 			get_device(dev);
1047 
1048 			mutex_unlock(&dpm_list_mtx);
1049 
1050 			device_resume(dev, state, false);
1051 
1052 			put_device(dev);
1053 
1054 			mutex_lock(&dpm_list_mtx);
1055 		}
1056 	}
1057 	mutex_unlock(&dpm_list_mtx);
1058 	async_synchronize_full();
1059 	dpm_show_time(starttime, state, 0, NULL);
1060 	if (async_error)
1061 		dpm_save_failed_step(SUSPEND_RESUME);
1062 
1063 	cpufreq_resume();
1064 	devfreq_resume();
1065 	trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1066 }
1067 
1068 /**
1069  * device_complete - Complete a PM transition for given device.
1070  * @dev: Device to handle.
1071  * @state: PM transition of the system being carried out.
1072  */
1073 static void device_complete(struct device *dev, pm_message_t state)
1074 {
1075 	void (*callback)(struct device *) = NULL;
1076 	const char *info = NULL;
1077 
1078 	if (dev->power.syscore)
1079 		goto out;
1080 
1081 	device_lock(dev);
1082 
1083 	if (dev->pm_domain) {
1084 		info = "completing power domain ";
1085 		callback = dev->pm_domain->ops.complete;
1086 	} else if (dev->type && dev->type->pm) {
1087 		info = "completing type ";
1088 		callback = dev->type->pm->complete;
1089 	} else if (dev->class && dev->class->pm) {
1090 		info = "completing class ";
1091 		callback = dev->class->pm->complete;
1092 	} else if (dev->bus && dev->bus->pm) {
1093 		info = "completing bus ";
1094 		callback = dev->bus->pm->complete;
1095 	}
1096 
1097 	if (!callback && dev->driver && dev->driver->pm) {
1098 		info = "completing driver ";
1099 		callback = dev->driver->pm->complete;
1100 	}
1101 
1102 	if (callback) {
1103 		pm_dev_dbg(dev, state, info);
1104 		callback(dev);
1105 	}
1106 
1107 	device_unlock(dev);
1108 
1109 out:
1110 	pm_runtime_put(dev);
1111 }
1112 
1113 /**
1114  * dpm_complete - Complete a PM transition for all non-sysdev devices.
1115  * @state: PM transition of the system being carried out.
1116  *
1117  * Execute the ->complete() callbacks for all devices whose PM status is not
1118  * DPM_ON (this allows new devices to be registered).
1119  */
1120 void dpm_complete(pm_message_t state)
1121 {
1122 	struct list_head list;
1123 
1124 	trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1125 	might_sleep();
1126 
1127 	INIT_LIST_HEAD(&list);
1128 	mutex_lock(&dpm_list_mtx);
1129 	while (!list_empty(&dpm_prepared_list)) {
1130 		struct device *dev = to_device(dpm_prepared_list.prev);
1131 
1132 		get_device(dev);
1133 		dev->power.is_prepared = false;
1134 		list_move(&dev->power.entry, &list);
1135 
1136 		mutex_unlock(&dpm_list_mtx);
1137 
1138 		trace_device_pm_callback_start(dev, "", state.event);
1139 		device_complete(dev, state);
1140 		trace_device_pm_callback_end(dev, 0);
1141 
1142 		put_device(dev);
1143 
1144 		mutex_lock(&dpm_list_mtx);
1145 	}
1146 	list_splice(&list, &dpm_list);
1147 	mutex_unlock(&dpm_list_mtx);
1148 
1149 	/* Allow device probing and trigger re-probing of deferred devices */
1150 	device_unblock_probing();
1151 	trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1152 }
1153 
1154 /**
1155  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1156  * @state: PM transition of the system being carried out.
1157  *
1158  * Execute "resume" callbacks for all devices and complete the PM transition of
1159  * the system.
1160  */
1161 void dpm_resume_end(pm_message_t state)
1162 {
1163 	dpm_resume(state);
1164 	dpm_complete(state);
1165 }
1166 EXPORT_SYMBOL_GPL(dpm_resume_end);
1167 
1168 
1169 /*------------------------- Suspend routines -------------------------*/
1170 
1171 /**
1172  * resume_event - Return a "resume" message for given "suspend" sleep state.
1173  * @sleep_state: PM message representing a sleep state.
1174  *
1175  * Return a PM message representing the resume event corresponding to given
1176  * sleep state.
1177  */
1178 static pm_message_t resume_event(pm_message_t sleep_state)
1179 {
1180 	switch (sleep_state.event) {
1181 	case PM_EVENT_SUSPEND:
1182 		return PMSG_RESUME;
1183 	case PM_EVENT_FREEZE:
1184 	case PM_EVENT_QUIESCE:
1185 		return PMSG_RECOVER;
1186 	case PM_EVENT_HIBERNATE:
1187 		return PMSG_RESTORE;
1188 	}
1189 	return PMSG_ON;
1190 }
1191 
1192 static void dpm_superior_set_must_resume(struct device *dev)
1193 {
1194 	struct device_link *link;
1195 	int idx;
1196 
1197 	if (dev->parent)
1198 		dev->parent->power.must_resume = true;
1199 
1200 	idx = device_links_read_lock();
1201 
1202 	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
1203 		link->supplier->power.must_resume = true;
1204 
1205 	device_links_read_unlock(idx);
1206 }
1207 
1208 /**
1209  * device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1210  * @dev: Device to handle.
1211  * @state: PM transition of the system being carried out.
1212  * @async: If true, the device is being suspended asynchronously.
1213  *
1214  * The driver of @dev will not receive interrupts while this function is being
1215  * executed.
1216  */
1217 static int device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1218 {
1219 	pm_callback_t callback = NULL;
1220 	const char *info = NULL;
1221 	int error = 0;
1222 
1223 	TRACE_DEVICE(dev);
1224 	TRACE_SUSPEND(0);
1225 
1226 	dpm_wait_for_subordinate(dev, async);
1227 
1228 	if (async_error)
1229 		goto Complete;
1230 
1231 	if (dev->power.syscore || dev->power.direct_complete)
1232 		goto Complete;
1233 
1234 	if (dev->pm_domain) {
1235 		info = "noirq power domain ";
1236 		callback = pm_noirq_op(&dev->pm_domain->ops, state);
1237 	} else if (dev->type && dev->type->pm) {
1238 		info = "noirq type ";
1239 		callback = pm_noirq_op(dev->type->pm, state);
1240 	} else if (dev->class && dev->class->pm) {
1241 		info = "noirq class ";
1242 		callback = pm_noirq_op(dev->class->pm, state);
1243 	} else if (dev->bus && dev->bus->pm) {
1244 		info = "noirq bus ";
1245 		callback = pm_noirq_op(dev->bus->pm, state);
1246 	}
1247 	if (callback)
1248 		goto Run;
1249 
1250 	if (dev_pm_skip_suspend(dev))
1251 		goto Skip;
1252 
1253 	if (dev->driver && dev->driver->pm) {
1254 		info = "noirq driver ";
1255 		callback = pm_noirq_op(dev->driver->pm, state);
1256 	}
1257 
1258 Run:
1259 	error = dpm_run_callback(callback, dev, state, info);
1260 	if (error) {
1261 		async_error = error;
1262 		dpm_save_failed_dev(dev_name(dev));
1263 		pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
1264 		goto Complete;
1265 	}
1266 
1267 Skip:
1268 	dev->power.is_noirq_suspended = true;
1269 
1270 	/*
1271 	 * Skipping the resume of devices that were in use right before the
1272 	 * system suspend (as indicated by their PM-runtime usage counters)
1273 	 * would be suboptimal.  Also resume them if doing that is not allowed
1274 	 * to be skipped.
1275 	 */
1276 	if (atomic_read(&dev->power.usage_count) > 1 ||
1277 	    !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
1278 	      dev->power.may_skip_resume))
1279 		dev->power.must_resume = true;
1280 
1281 	if (dev->power.must_resume)
1282 		dpm_superior_set_must_resume(dev);
1283 
1284 Complete:
1285 	complete_all(&dev->power.completion);
1286 	TRACE_SUSPEND(error);
1287 	return error;
1288 }
1289 
1290 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1291 {
1292 	struct device *dev = data;
1293 
1294 	device_suspend_noirq(dev, pm_transition, true);
1295 	put_device(dev);
1296 }
1297 
1298 static int dpm_noirq_suspend_devices(pm_message_t state)
1299 {
1300 	ktime_t starttime = ktime_get();
1301 	int error = 0;
1302 
1303 	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1304 
1305 	pm_transition = state;
1306 	async_error = 0;
1307 
1308 	mutex_lock(&dpm_list_mtx);
1309 
1310 	while (!list_empty(&dpm_late_early_list)) {
1311 		struct device *dev = to_device(dpm_late_early_list.prev);
1312 
1313 		list_move(&dev->power.entry, &dpm_noirq_list);
1314 
1315 		if (dpm_async_fn(dev, async_suspend_noirq))
1316 			continue;
1317 
1318 		get_device(dev);
1319 
1320 		mutex_unlock(&dpm_list_mtx);
1321 
1322 		error = device_suspend_noirq(dev, state, false);
1323 
1324 		put_device(dev);
1325 
1326 		mutex_lock(&dpm_list_mtx);
1327 
1328 		if (error || async_error)
1329 			break;
1330 	}
1331 
1332 	mutex_unlock(&dpm_list_mtx);
1333 
1334 	async_synchronize_full();
1335 	if (!error)
1336 		error = async_error;
1337 
1338 	if (error)
1339 		dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1340 
1341 	dpm_show_time(starttime, state, error, "noirq");
1342 	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1343 	return error;
1344 }
1345 
1346 /**
1347  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1348  * @state: PM transition of the system being carried out.
1349  *
1350  * Prevent device drivers' interrupt handlers from being called and invoke
1351  * "noirq" suspend callbacks for all non-sysdev devices.
1352  */
1353 int dpm_suspend_noirq(pm_message_t state)
1354 {
1355 	int ret;
1356 
1357 	device_wakeup_arm_wake_irqs();
1358 	suspend_device_irqs();
1359 
1360 	ret = dpm_noirq_suspend_devices(state);
1361 	if (ret)
1362 		dpm_resume_noirq(resume_event(state));
1363 
1364 	return ret;
1365 }
1366 
1367 static void dpm_propagate_wakeup_to_parent(struct device *dev)
1368 {
1369 	struct device *parent = dev->parent;
1370 
1371 	if (!parent)
1372 		return;
1373 
1374 	spin_lock_irq(&parent->power.lock);
1375 
1376 	if (device_wakeup_path(dev) && !parent->power.ignore_children)
1377 		parent->power.wakeup_path = true;
1378 
1379 	spin_unlock_irq(&parent->power.lock);
1380 }
1381 
1382 /**
1383  * device_suspend_late - Execute a "late suspend" callback for given device.
1384  * @dev: Device to handle.
1385  * @state: PM transition of the system being carried out.
1386  * @async: If true, the device is being suspended asynchronously.
1387  *
1388  * Runtime PM is disabled for @dev while this function is being executed.
1389  */
1390 static int device_suspend_late(struct device *dev, pm_message_t state, bool async)
1391 {
1392 	pm_callback_t callback = NULL;
1393 	const char *info = NULL;
1394 	int error = 0;
1395 
1396 	TRACE_DEVICE(dev);
1397 	TRACE_SUSPEND(0);
1398 
1399 	__pm_runtime_disable(dev, false);
1400 
1401 	dpm_wait_for_subordinate(dev, async);
1402 
1403 	if (async_error)
1404 		goto Complete;
1405 
1406 	if (pm_wakeup_pending()) {
1407 		async_error = -EBUSY;
1408 		goto Complete;
1409 	}
1410 
1411 	if (dev->power.syscore || dev->power.direct_complete)
1412 		goto Complete;
1413 
1414 	if (dev->pm_domain) {
1415 		info = "late power domain ";
1416 		callback = pm_late_early_op(&dev->pm_domain->ops, state);
1417 	} else if (dev->type && dev->type->pm) {
1418 		info = "late type ";
1419 		callback = pm_late_early_op(dev->type->pm, state);
1420 	} else if (dev->class && dev->class->pm) {
1421 		info = "late class ";
1422 		callback = pm_late_early_op(dev->class->pm, state);
1423 	} else if (dev->bus && dev->bus->pm) {
1424 		info = "late bus ";
1425 		callback = pm_late_early_op(dev->bus->pm, state);
1426 	}
1427 	if (callback)
1428 		goto Run;
1429 
1430 	if (dev_pm_skip_suspend(dev))
1431 		goto Skip;
1432 
1433 	if (dev->driver && dev->driver->pm) {
1434 		info = "late driver ";
1435 		callback = pm_late_early_op(dev->driver->pm, state);
1436 	}
1437 
1438 Run:
1439 	error = dpm_run_callback(callback, dev, state, info);
1440 	if (error) {
1441 		async_error = error;
1442 		dpm_save_failed_dev(dev_name(dev));
1443 		pm_dev_err(dev, state, async ? " async late" : " late", error);
1444 		goto Complete;
1445 	}
1446 	dpm_propagate_wakeup_to_parent(dev);
1447 
1448 Skip:
1449 	dev->power.is_late_suspended = true;
1450 
1451 Complete:
1452 	TRACE_SUSPEND(error);
1453 	complete_all(&dev->power.completion);
1454 	return error;
1455 }
1456 
1457 static void async_suspend_late(void *data, async_cookie_t cookie)
1458 {
1459 	struct device *dev = data;
1460 
1461 	device_suspend_late(dev, pm_transition, true);
1462 	put_device(dev);
1463 }
1464 
1465 /**
1466  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1467  * @state: PM transition of the system being carried out.
1468  */
1469 int dpm_suspend_late(pm_message_t state)
1470 {
1471 	ktime_t starttime = ktime_get();
1472 	int error = 0;
1473 
1474 	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1475 
1476 	pm_transition = state;
1477 	async_error = 0;
1478 
1479 	wake_up_all_idle_cpus();
1480 
1481 	mutex_lock(&dpm_list_mtx);
1482 
1483 	while (!list_empty(&dpm_suspended_list)) {
1484 		struct device *dev = to_device(dpm_suspended_list.prev);
1485 
1486 		list_move(&dev->power.entry, &dpm_late_early_list);
1487 
1488 		if (dpm_async_fn(dev, async_suspend_late))
1489 			continue;
1490 
1491 		get_device(dev);
1492 
1493 		mutex_unlock(&dpm_list_mtx);
1494 
1495 		error = device_suspend_late(dev, state, false);
1496 
1497 		put_device(dev);
1498 
1499 		mutex_lock(&dpm_list_mtx);
1500 
1501 		if (error || async_error)
1502 			break;
1503 	}
1504 
1505 	mutex_unlock(&dpm_list_mtx);
1506 
1507 	async_synchronize_full();
1508 	if (!error)
1509 		error = async_error;
1510 
1511 	if (error) {
1512 		dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1513 		dpm_resume_early(resume_event(state));
1514 	}
1515 	dpm_show_time(starttime, state, error, "late");
1516 	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1517 	return error;
1518 }
1519 
1520 /**
1521  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1522  * @state: PM transition of the system being carried out.
1523  */
1524 int dpm_suspend_end(pm_message_t state)
1525 {
1526 	ktime_t starttime = ktime_get();
1527 	int error;
1528 
1529 	error = dpm_suspend_late(state);
1530 	if (error)
1531 		goto out;
1532 
1533 	error = dpm_suspend_noirq(state);
1534 	if (error)
1535 		dpm_resume_early(resume_event(state));
1536 
1537 out:
1538 	dpm_show_time(starttime, state, error, "end");
1539 	return error;
1540 }
1541 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1542 
1543 /**
1544  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1545  * @dev: Device to suspend.
1546  * @state: PM transition of the system being carried out.
1547  * @cb: Suspend callback to execute.
1548  * @info: string description of caller.
1549  */
1550 static int legacy_suspend(struct device *dev, pm_message_t state,
1551 			  int (*cb)(struct device *dev, pm_message_t state),
1552 			  const char *info)
1553 {
1554 	int error;
1555 	ktime_t calltime;
1556 
1557 	calltime = initcall_debug_start(dev, cb);
1558 
1559 	trace_device_pm_callback_start(dev, info, state.event);
1560 	error = cb(dev, state);
1561 	trace_device_pm_callback_end(dev, error);
1562 	suspend_report_result(dev, cb, error);
1563 
1564 	initcall_debug_report(dev, calltime, cb, error);
1565 
1566 	return error;
1567 }
1568 
1569 static void dpm_clear_superiors_direct_complete(struct device *dev)
1570 {
1571 	struct device_link *link;
1572 	int idx;
1573 
1574 	if (dev->parent) {
1575 		spin_lock_irq(&dev->parent->power.lock);
1576 		dev->parent->power.direct_complete = false;
1577 		spin_unlock_irq(&dev->parent->power.lock);
1578 	}
1579 
1580 	idx = device_links_read_lock();
1581 
1582 	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
1583 		spin_lock_irq(&link->supplier->power.lock);
1584 		link->supplier->power.direct_complete = false;
1585 		spin_unlock_irq(&link->supplier->power.lock);
1586 	}
1587 
1588 	device_links_read_unlock(idx);
1589 }
1590 
1591 /**
1592  * device_suspend - Execute "suspend" callbacks for given device.
1593  * @dev: Device to handle.
1594  * @state: PM transition of the system being carried out.
1595  * @async: If true, the device is being suspended asynchronously.
1596  */
1597 static int device_suspend(struct device *dev, pm_message_t state, bool async)
1598 {
1599 	pm_callback_t callback = NULL;
1600 	const char *info = NULL;
1601 	int error = 0;
1602 	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1603 
1604 	TRACE_DEVICE(dev);
1605 	TRACE_SUSPEND(0);
1606 
1607 	dpm_wait_for_subordinate(dev, async);
1608 
1609 	if (async_error) {
1610 		dev->power.direct_complete = false;
1611 		goto Complete;
1612 	}
1613 
1614 	/*
1615 	 * Wait for possible runtime PM transitions of the device in progress
1616 	 * to complete and if there's a runtime resume request pending for it,
1617 	 * resume it before proceeding with invoking the system-wide suspend
1618 	 * callbacks for it.
1619 	 *
1620 	 * If the system-wide suspend callbacks below change the configuration
1621 	 * of the device, they must disable runtime PM for it or otherwise
1622 	 * ensure that its runtime-resume callbacks will not be confused by that
1623 	 * change in case they are invoked going forward.
1624 	 */
1625 	pm_runtime_barrier(dev);
1626 
1627 	if (pm_wakeup_pending()) {
1628 		dev->power.direct_complete = false;
1629 		async_error = -EBUSY;
1630 		goto Complete;
1631 	}
1632 
1633 	if (dev->power.syscore)
1634 		goto Complete;
1635 
1636 	/* Avoid direct_complete to let wakeup_path propagate. */
1637 	if (device_may_wakeup(dev) || device_wakeup_path(dev))
1638 		dev->power.direct_complete = false;
1639 
1640 	if (dev->power.direct_complete) {
1641 		if (pm_runtime_status_suspended(dev)) {
1642 			pm_runtime_disable(dev);
1643 			if (pm_runtime_status_suspended(dev)) {
1644 				pm_dev_dbg(dev, state, "direct-complete ");
1645 				goto Complete;
1646 			}
1647 
1648 			pm_runtime_enable(dev);
1649 		}
1650 		dev->power.direct_complete = false;
1651 	}
1652 
1653 	dev->power.may_skip_resume = true;
1654 	dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME);
1655 
1656 	dpm_watchdog_set(&wd, dev);
1657 	device_lock(dev);
1658 
1659 	if (dev->pm_domain) {
1660 		info = "power domain ";
1661 		callback = pm_op(&dev->pm_domain->ops, state);
1662 		goto Run;
1663 	}
1664 
1665 	if (dev->type && dev->type->pm) {
1666 		info = "type ";
1667 		callback = pm_op(dev->type->pm, state);
1668 		goto Run;
1669 	}
1670 
1671 	if (dev->class && dev->class->pm) {
1672 		info = "class ";
1673 		callback = pm_op(dev->class->pm, state);
1674 		goto Run;
1675 	}
1676 
1677 	if (dev->bus) {
1678 		if (dev->bus->pm) {
1679 			info = "bus ";
1680 			callback = pm_op(dev->bus->pm, state);
1681 		} else if (dev->bus->suspend) {
1682 			pm_dev_dbg(dev, state, "legacy bus ");
1683 			error = legacy_suspend(dev, state, dev->bus->suspend,
1684 						"legacy bus ");
1685 			goto End;
1686 		}
1687 	}
1688 
1689  Run:
1690 	if (!callback && dev->driver && dev->driver->pm) {
1691 		info = "driver ";
1692 		callback = pm_op(dev->driver->pm, state);
1693 	}
1694 
1695 	error = dpm_run_callback(callback, dev, state, info);
1696 
1697  End:
1698 	if (!error) {
1699 		dev->power.is_suspended = true;
1700 		if (device_may_wakeup(dev))
1701 			dev->power.wakeup_path = true;
1702 
1703 		dpm_propagate_wakeup_to_parent(dev);
1704 		dpm_clear_superiors_direct_complete(dev);
1705 	}
1706 
1707 	device_unlock(dev);
1708 	dpm_watchdog_clear(&wd);
1709 
1710  Complete:
1711 	if (error) {
1712 		async_error = error;
1713 		dpm_save_failed_dev(dev_name(dev));
1714 		pm_dev_err(dev, state, async ? " async" : "", error);
1715 	}
1716 
1717 	complete_all(&dev->power.completion);
1718 	TRACE_SUSPEND(error);
1719 	return error;
1720 }
1721 
1722 static void async_suspend(void *data, async_cookie_t cookie)
1723 {
1724 	struct device *dev = data;
1725 
1726 	device_suspend(dev, pm_transition, true);
1727 	put_device(dev);
1728 }
1729 
1730 /**
1731  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1732  * @state: PM transition of the system being carried out.
1733  */
1734 int dpm_suspend(pm_message_t state)
1735 {
1736 	ktime_t starttime = ktime_get();
1737 	int error = 0;
1738 
1739 	trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1740 	might_sleep();
1741 
1742 	devfreq_suspend();
1743 	cpufreq_suspend();
1744 
1745 	pm_transition = state;
1746 	async_error = 0;
1747 
1748 	mutex_lock(&dpm_list_mtx);
1749 
1750 	while (!list_empty(&dpm_prepared_list)) {
1751 		struct device *dev = to_device(dpm_prepared_list.prev);
1752 
1753 		list_move(&dev->power.entry, &dpm_suspended_list);
1754 
1755 		if (dpm_async_fn(dev, async_suspend))
1756 			continue;
1757 
1758 		get_device(dev);
1759 
1760 		mutex_unlock(&dpm_list_mtx);
1761 
1762 		error = device_suspend(dev, state, false);
1763 
1764 		put_device(dev);
1765 
1766 		mutex_lock(&dpm_list_mtx);
1767 
1768 		if (error || async_error)
1769 			break;
1770 	}
1771 
1772 	mutex_unlock(&dpm_list_mtx);
1773 
1774 	async_synchronize_full();
1775 	if (!error)
1776 		error = async_error;
1777 
1778 	if (error)
1779 		dpm_save_failed_step(SUSPEND_SUSPEND);
1780 
1781 	dpm_show_time(starttime, state, error, NULL);
1782 	trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1783 	return error;
1784 }
1785 
1786 /**
1787  * device_prepare - Prepare a device for system power transition.
1788  * @dev: Device to handle.
1789  * @state: PM transition of the system being carried out.
1790  *
1791  * Execute the ->prepare() callback(s) for given device.  No new children of the
1792  * device may be registered after this function has returned.
1793  */
1794 static int device_prepare(struct device *dev, pm_message_t state)
1795 {
1796 	int (*callback)(struct device *) = NULL;
1797 	int ret = 0;
1798 
1799 	/*
1800 	 * If a device's parent goes into runtime suspend at the wrong time,
1801 	 * it won't be possible to resume the device.  To prevent this we
1802 	 * block runtime suspend here, during the prepare phase, and allow
1803 	 * it again during the complete phase.
1804 	 */
1805 	pm_runtime_get_noresume(dev);
1806 
1807 	if (dev->power.syscore)
1808 		return 0;
1809 
1810 	device_lock(dev);
1811 
1812 	dev->power.wakeup_path = false;
1813 
1814 	if (dev->power.no_pm_callbacks)
1815 		goto unlock;
1816 
1817 	if (dev->pm_domain)
1818 		callback = dev->pm_domain->ops.prepare;
1819 	else if (dev->type && dev->type->pm)
1820 		callback = dev->type->pm->prepare;
1821 	else if (dev->class && dev->class->pm)
1822 		callback = dev->class->pm->prepare;
1823 	else if (dev->bus && dev->bus->pm)
1824 		callback = dev->bus->pm->prepare;
1825 
1826 	if (!callback && dev->driver && dev->driver->pm)
1827 		callback = dev->driver->pm->prepare;
1828 
1829 	if (callback)
1830 		ret = callback(dev);
1831 
1832 unlock:
1833 	device_unlock(dev);
1834 
1835 	if (ret < 0) {
1836 		suspend_report_result(dev, callback, ret);
1837 		pm_runtime_put(dev);
1838 		return ret;
1839 	}
1840 	/*
1841 	 * A positive return value from ->prepare() means "this device appears
1842 	 * to be runtime-suspended and its state is fine, so if it really is
1843 	 * runtime-suspended, you can leave it in that state provided that you
1844 	 * will do the same thing with all of its descendants".  This only
1845 	 * applies to suspend transitions, however.
1846 	 */
1847 	spin_lock_irq(&dev->power.lock);
1848 	dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1849 		(ret > 0 || dev->power.no_pm_callbacks) &&
1850 		!dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
1851 	spin_unlock_irq(&dev->power.lock);
1852 	return 0;
1853 }
1854 
1855 /**
1856  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1857  * @state: PM transition of the system being carried out.
1858  *
1859  * Execute the ->prepare() callback(s) for all devices.
1860  */
1861 int dpm_prepare(pm_message_t state)
1862 {
1863 	int error = 0;
1864 
1865 	trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1866 	might_sleep();
1867 
1868 	/*
1869 	 * Give a chance for the known devices to complete their probes, before
1870 	 * disable probing of devices. This sync point is important at least
1871 	 * at boot time + hibernation restore.
1872 	 */
1873 	wait_for_device_probe();
1874 	/*
1875 	 * It is unsafe if probing of devices will happen during suspend or
1876 	 * hibernation and system behavior will be unpredictable in this case.
1877 	 * So, let's prohibit device's probing here and defer their probes
1878 	 * instead. The normal behavior will be restored in dpm_complete().
1879 	 */
1880 	device_block_probing();
1881 
1882 	mutex_lock(&dpm_list_mtx);
1883 	while (!list_empty(&dpm_list) && !error) {
1884 		struct device *dev = to_device(dpm_list.next);
1885 
1886 		get_device(dev);
1887 
1888 		mutex_unlock(&dpm_list_mtx);
1889 
1890 		trace_device_pm_callback_start(dev, "", state.event);
1891 		error = device_prepare(dev, state);
1892 		trace_device_pm_callback_end(dev, error);
1893 
1894 		mutex_lock(&dpm_list_mtx);
1895 
1896 		if (!error) {
1897 			dev->power.is_prepared = true;
1898 			if (!list_empty(&dev->power.entry))
1899 				list_move_tail(&dev->power.entry, &dpm_prepared_list);
1900 		} else if (error == -EAGAIN) {
1901 			error = 0;
1902 		} else {
1903 			dev_info(dev, "not prepared for power transition: code %d\n",
1904 				 error);
1905 		}
1906 
1907 		mutex_unlock(&dpm_list_mtx);
1908 
1909 		put_device(dev);
1910 
1911 		mutex_lock(&dpm_list_mtx);
1912 	}
1913 	mutex_unlock(&dpm_list_mtx);
1914 	trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1915 	return error;
1916 }
1917 
1918 /**
1919  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1920  * @state: PM transition of the system being carried out.
1921  *
1922  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1923  * callbacks for them.
1924  */
1925 int dpm_suspend_start(pm_message_t state)
1926 {
1927 	ktime_t starttime = ktime_get();
1928 	int error;
1929 
1930 	error = dpm_prepare(state);
1931 	if (error)
1932 		dpm_save_failed_step(SUSPEND_PREPARE);
1933 	else
1934 		error = dpm_suspend(state);
1935 
1936 	dpm_show_time(starttime, state, error, "start");
1937 	return error;
1938 }
1939 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1940 
1941 void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret)
1942 {
1943 	if (ret)
1944 		dev_err(dev, "%s(): %ps returns %d\n", function, fn, ret);
1945 }
1946 EXPORT_SYMBOL_GPL(__suspend_report_result);
1947 
1948 /**
1949  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1950  * @subordinate: Device that needs to wait for @dev.
1951  * @dev: Device to wait for.
1952  */
1953 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1954 {
1955 	dpm_wait(dev, subordinate->power.async_suspend);
1956 	return async_error;
1957 }
1958 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1959 
1960 /**
1961  * dpm_for_each_dev - device iterator.
1962  * @data: data for the callback.
1963  * @fn: function to be called for each device.
1964  *
1965  * Iterate over devices in dpm_list, and call @fn for each device,
1966  * passing it @data.
1967  */
1968 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1969 {
1970 	struct device *dev;
1971 
1972 	if (!fn)
1973 		return;
1974 
1975 	device_pm_lock();
1976 	list_for_each_entry(dev, &dpm_list, power.entry)
1977 		fn(dev, data);
1978 	device_pm_unlock();
1979 }
1980 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
1981 
1982 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
1983 {
1984 	if (!ops)
1985 		return true;
1986 
1987 	return !ops->prepare &&
1988 	       !ops->suspend &&
1989 	       !ops->suspend_late &&
1990 	       !ops->suspend_noirq &&
1991 	       !ops->resume_noirq &&
1992 	       !ops->resume_early &&
1993 	       !ops->resume &&
1994 	       !ops->complete;
1995 }
1996 
1997 void device_pm_check_callbacks(struct device *dev)
1998 {
1999 	unsigned long flags;
2000 
2001 	spin_lock_irqsave(&dev->power.lock, flags);
2002 	dev->power.no_pm_callbacks =
2003 		(!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2004 		 !dev->bus->suspend && !dev->bus->resume)) &&
2005 		(!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2006 		(!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2007 		(!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2008 		(!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2009 		 !dev->driver->suspend && !dev->driver->resume));
2010 	spin_unlock_irqrestore(&dev->power.lock, flags);
2011 }
2012 
2013 bool dev_pm_skip_suspend(struct device *dev)
2014 {
2015 	return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
2016 		pm_runtime_status_suspended(dev);
2017 }
2018