xref: /linux/drivers/base/power/main.c (revision 0d5ec7919f3747193f051036b2301734a4b5e1d6)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/base/power/main.c - Where the driver meets power management.
4  *
5  * Copyright (c) 2003 Patrick Mochel
6  * Copyright (c) 2003 Open Source Development Lab
7  *
8  * The driver model core calls device_pm_add() when a device is registered.
9  * This will initialize the embedded device_pm_info object in the device
10  * and add it to the list of power-controlled devices. sysfs entries for
11  * controlling device power management will also be added.
12  *
13  * A separate list is used for keeping track of power info, because the power
14  * domain dependencies may differ from the ancestral dependencies that the
15  * subsystem list maintains.
16  */
17 
18 #define pr_fmt(fmt) "PM: " fmt
19 #define dev_fmt pr_fmt
20 
21 #include <linux/device.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm-trace.h>
27 #include <linux/pm_wakeirq.h>
28 #include <linux/interrupt.h>
29 #include <linux/sched.h>
30 #include <linux/sched/debug.h>
31 #include <linux/async.h>
32 #include <linux/suspend.h>
33 #include <trace/events/power.h>
34 #include <linux/cpufreq.h>
35 #include <linux/devfreq.h>
36 #include <linux/timer.h>
37 
38 #include "../base.h"
39 #include "power.h"
40 
41 typedef int (*pm_callback_t)(struct device *);
42 
43 #define list_for_each_entry_rcu_locked(pos, head, member) \
44 	list_for_each_entry_rcu(pos, head, member, \
45 			device_links_read_lock_held())
46 
47 /*
48  * The entries in the dpm_list list are in a depth first order, simply
49  * because children are guaranteed to be discovered after parents, and
50  * are inserted at the back of the list on discovery.
51  *
52  * Since device_pm_add() may be called with a device lock held,
53  * we must never try to acquire a device lock while holding
54  * dpm_list_mutex.
55  */
56 
57 LIST_HEAD(dpm_list);
58 static LIST_HEAD(dpm_prepared_list);
59 static LIST_HEAD(dpm_suspended_list);
60 static LIST_HEAD(dpm_late_early_list);
61 static LIST_HEAD(dpm_noirq_list);
62 
63 static DEFINE_MUTEX(dpm_list_mtx);
64 static pm_message_t pm_transition;
65 
66 static DEFINE_MUTEX(async_wip_mtx);
67 static int async_error;
68 
69 static const char *pm_verb(int event)
70 {
71 	switch (event) {
72 	case PM_EVENT_SUSPEND:
73 		return "suspend";
74 	case PM_EVENT_RESUME:
75 		return "resume";
76 	case PM_EVENT_FREEZE:
77 		return "freeze";
78 	case PM_EVENT_QUIESCE:
79 		return "quiesce";
80 	case PM_EVENT_HIBERNATE:
81 		return "hibernate";
82 	case PM_EVENT_THAW:
83 		return "thaw";
84 	case PM_EVENT_RESTORE:
85 		return "restore";
86 	case PM_EVENT_RECOVER:
87 		return "recover";
88 	default:
89 		return "(unknown PM event)";
90 	}
91 }
92 
93 /**
94  * device_pm_sleep_init - Initialize system suspend-related device fields.
95  * @dev: Device object being initialized.
96  */
97 void device_pm_sleep_init(struct device *dev)
98 {
99 	dev->power.is_prepared = false;
100 	dev->power.is_suspended = false;
101 	dev->power.is_noirq_suspended = false;
102 	dev->power.is_late_suspended = false;
103 	init_completion(&dev->power.completion);
104 	complete_all(&dev->power.completion);
105 	dev->power.wakeup = NULL;
106 	INIT_LIST_HEAD(&dev->power.entry);
107 }
108 
109 /**
110  * device_pm_lock - Lock the list of active devices used by the PM core.
111  */
112 void device_pm_lock(void)
113 {
114 	mutex_lock(&dpm_list_mtx);
115 }
116 
117 /**
118  * device_pm_unlock - Unlock the list of active devices used by the PM core.
119  */
120 void device_pm_unlock(void)
121 {
122 	mutex_unlock(&dpm_list_mtx);
123 }
124 
125 /**
126  * device_pm_add - Add a device to the PM core's list of active devices.
127  * @dev: Device to add to the list.
128  */
129 void device_pm_add(struct device *dev)
130 {
131 	/* Skip PM setup/initialization. */
132 	if (device_pm_not_required(dev))
133 		return;
134 
135 	pr_debug("Adding info for %s:%s\n",
136 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
137 	device_pm_check_callbacks(dev);
138 	mutex_lock(&dpm_list_mtx);
139 	if (dev->parent && dev->parent->power.is_prepared)
140 		dev_warn(dev, "parent %s should not be sleeping\n",
141 			dev_name(dev->parent));
142 	list_add_tail(&dev->power.entry, &dpm_list);
143 	dev->power.in_dpm_list = true;
144 	mutex_unlock(&dpm_list_mtx);
145 }
146 
147 /**
148  * device_pm_remove - Remove a device from the PM core's list of active devices.
149  * @dev: Device to be removed from the list.
150  */
151 void device_pm_remove(struct device *dev)
152 {
153 	if (device_pm_not_required(dev))
154 		return;
155 
156 	pr_debug("Removing info for %s:%s\n",
157 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
158 	complete_all(&dev->power.completion);
159 	mutex_lock(&dpm_list_mtx);
160 	list_del_init(&dev->power.entry);
161 	dev->power.in_dpm_list = false;
162 	mutex_unlock(&dpm_list_mtx);
163 	device_wakeup_disable(dev);
164 	pm_runtime_remove(dev);
165 	device_pm_check_callbacks(dev);
166 }
167 
168 /**
169  * device_pm_move_before - Move device in the PM core's list of active devices.
170  * @deva: Device to move in dpm_list.
171  * @devb: Device @deva should come before.
172  */
173 void device_pm_move_before(struct device *deva, struct device *devb)
174 {
175 	pr_debug("Moving %s:%s before %s:%s\n",
176 		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
177 		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
178 	/* Delete deva from dpm_list and reinsert before devb. */
179 	list_move_tail(&deva->power.entry, &devb->power.entry);
180 }
181 
182 /**
183  * device_pm_move_after - Move device in the PM core's list of active devices.
184  * @deva: Device to move in dpm_list.
185  * @devb: Device @deva should come after.
186  */
187 void device_pm_move_after(struct device *deva, struct device *devb)
188 {
189 	pr_debug("Moving %s:%s after %s:%s\n",
190 		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
191 		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
192 	/* Delete deva from dpm_list and reinsert after devb. */
193 	list_move(&deva->power.entry, &devb->power.entry);
194 }
195 
196 /**
197  * device_pm_move_last - Move device to end of the PM core's list of devices.
198  * @dev: Device to move in dpm_list.
199  */
200 void device_pm_move_last(struct device *dev)
201 {
202 	pr_debug("Moving %s:%s to end of list\n",
203 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
204 	list_move_tail(&dev->power.entry, &dpm_list);
205 }
206 
207 static ktime_t initcall_debug_start(struct device *dev, void *cb)
208 {
209 	if (!pm_print_times_enabled)
210 		return 0;
211 
212 	dev_info(dev, "calling %ps @ %i, parent: %s\n", cb,
213 		 task_pid_nr(current),
214 		 dev->parent ? dev_name(dev->parent) : "none");
215 	return ktime_get();
216 }
217 
218 static void initcall_debug_report(struct device *dev, ktime_t calltime,
219 				  void *cb, int error)
220 {
221 	ktime_t rettime;
222 
223 	if (!pm_print_times_enabled)
224 		return;
225 
226 	rettime = ktime_get();
227 	dev_info(dev, "%ps returned %d after %Ld usecs\n", cb, error,
228 		 (unsigned long long)ktime_us_delta(rettime, calltime));
229 }
230 
231 /**
232  * dpm_wait - Wait for a PM operation to complete.
233  * @dev: Device to wait for.
234  * @async: If unset, wait only if the device's power.async_suspend flag is set.
235  */
236 static void dpm_wait(struct device *dev, bool async)
237 {
238 	if (!dev)
239 		return;
240 
241 	if (async || (pm_async_enabled && dev->power.async_suspend))
242 		wait_for_completion(&dev->power.completion);
243 }
244 
245 static int dpm_wait_fn(struct device *dev, void *async_ptr)
246 {
247 	dpm_wait(dev, *((bool *)async_ptr));
248 	return 0;
249 }
250 
251 static void dpm_wait_for_children(struct device *dev, bool async)
252 {
253 	device_for_each_child(dev, &async, dpm_wait_fn);
254 }
255 
256 static void dpm_wait_for_suppliers(struct device *dev, bool async)
257 {
258 	struct device_link *link;
259 	int idx;
260 
261 	idx = device_links_read_lock();
262 
263 	/*
264 	 * If the supplier goes away right after we've checked the link to it,
265 	 * we'll wait for its completion to change the state, but that's fine,
266 	 * because the only things that will block as a result are the SRCU
267 	 * callbacks freeing the link objects for the links in the list we're
268 	 * walking.
269 	 */
270 	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
271 		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
272 			dpm_wait(link->supplier, async);
273 
274 	device_links_read_unlock(idx);
275 }
276 
277 static bool dpm_wait_for_superior(struct device *dev, bool async)
278 {
279 	struct device *parent;
280 
281 	/*
282 	 * If the device is resumed asynchronously and the parent's callback
283 	 * deletes both the device and the parent itself, the parent object may
284 	 * be freed while this function is running, so avoid that by reference
285 	 * counting the parent once more unless the device has been deleted
286 	 * already (in which case return right away).
287 	 */
288 	mutex_lock(&dpm_list_mtx);
289 
290 	if (!device_pm_initialized(dev)) {
291 		mutex_unlock(&dpm_list_mtx);
292 		return false;
293 	}
294 
295 	parent = get_device(dev->parent);
296 
297 	mutex_unlock(&dpm_list_mtx);
298 
299 	dpm_wait(parent, async);
300 	put_device(parent);
301 
302 	dpm_wait_for_suppliers(dev, async);
303 
304 	/*
305 	 * If the parent's callback has deleted the device, attempting to resume
306 	 * it would be invalid, so avoid doing that then.
307 	 */
308 	return device_pm_initialized(dev);
309 }
310 
311 static void dpm_wait_for_consumers(struct device *dev, bool async)
312 {
313 	struct device_link *link;
314 	int idx;
315 
316 	idx = device_links_read_lock();
317 
318 	/*
319 	 * The status of a device link can only be changed from "dormant" by a
320 	 * probe, but that cannot happen during system suspend/resume.  In
321 	 * theory it can change to "dormant" at that time, but then it is
322 	 * reasonable to wait for the target device anyway (eg. if it goes
323 	 * away, it's better to wait for it to go away completely and then
324 	 * continue instead of trying to continue in parallel with its
325 	 * unregistration).
326 	 */
327 	list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
328 		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
329 			dpm_wait(link->consumer, async);
330 
331 	device_links_read_unlock(idx);
332 }
333 
334 static void dpm_wait_for_subordinate(struct device *dev, bool async)
335 {
336 	dpm_wait_for_children(dev, async);
337 	dpm_wait_for_consumers(dev, async);
338 }
339 
340 /**
341  * pm_op - Return the PM operation appropriate for given PM event.
342  * @ops: PM operations to choose from.
343  * @state: PM transition of the system being carried out.
344  */
345 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
346 {
347 	switch (state.event) {
348 #ifdef CONFIG_SUSPEND
349 	case PM_EVENT_SUSPEND:
350 		return ops->suspend;
351 	case PM_EVENT_RESUME:
352 		return ops->resume;
353 #endif /* CONFIG_SUSPEND */
354 #ifdef CONFIG_HIBERNATE_CALLBACKS
355 	case PM_EVENT_FREEZE:
356 	case PM_EVENT_QUIESCE:
357 		return ops->freeze;
358 	case PM_EVENT_HIBERNATE:
359 		return ops->poweroff;
360 	case PM_EVENT_THAW:
361 	case PM_EVENT_RECOVER:
362 		return ops->thaw;
363 	case PM_EVENT_RESTORE:
364 		return ops->restore;
365 #endif /* CONFIG_HIBERNATE_CALLBACKS */
366 	}
367 
368 	return NULL;
369 }
370 
371 /**
372  * pm_late_early_op - Return the PM operation appropriate for given PM event.
373  * @ops: PM operations to choose from.
374  * @state: PM transition of the system being carried out.
375  *
376  * Runtime PM is disabled for @dev while this function is being executed.
377  */
378 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
379 				      pm_message_t state)
380 {
381 	switch (state.event) {
382 #ifdef CONFIG_SUSPEND
383 	case PM_EVENT_SUSPEND:
384 		return ops->suspend_late;
385 	case PM_EVENT_RESUME:
386 		return ops->resume_early;
387 #endif /* CONFIG_SUSPEND */
388 #ifdef CONFIG_HIBERNATE_CALLBACKS
389 	case PM_EVENT_FREEZE:
390 	case PM_EVENT_QUIESCE:
391 		return ops->freeze_late;
392 	case PM_EVENT_HIBERNATE:
393 		return ops->poweroff_late;
394 	case PM_EVENT_THAW:
395 	case PM_EVENT_RECOVER:
396 		return ops->thaw_early;
397 	case PM_EVENT_RESTORE:
398 		return ops->restore_early;
399 #endif /* CONFIG_HIBERNATE_CALLBACKS */
400 	}
401 
402 	return NULL;
403 }
404 
405 /**
406  * pm_noirq_op - Return the PM operation appropriate for given PM event.
407  * @ops: PM operations to choose from.
408  * @state: PM transition of the system being carried out.
409  *
410  * The driver of @dev will not receive interrupts while this function is being
411  * executed.
412  */
413 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
414 {
415 	switch (state.event) {
416 #ifdef CONFIG_SUSPEND
417 	case PM_EVENT_SUSPEND:
418 		return ops->suspend_noirq;
419 	case PM_EVENT_RESUME:
420 		return ops->resume_noirq;
421 #endif /* CONFIG_SUSPEND */
422 #ifdef CONFIG_HIBERNATE_CALLBACKS
423 	case PM_EVENT_FREEZE:
424 	case PM_EVENT_QUIESCE:
425 		return ops->freeze_noirq;
426 	case PM_EVENT_HIBERNATE:
427 		return ops->poweroff_noirq;
428 	case PM_EVENT_THAW:
429 	case PM_EVENT_RECOVER:
430 		return ops->thaw_noirq;
431 	case PM_EVENT_RESTORE:
432 		return ops->restore_noirq;
433 #endif /* CONFIG_HIBERNATE_CALLBACKS */
434 	}
435 
436 	return NULL;
437 }
438 
439 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
440 {
441 	dev_dbg(dev, "%s%s%s driver flags: %x\n", info, pm_verb(state.event),
442 		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
443 		", may wakeup" : "", dev->power.driver_flags);
444 }
445 
446 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
447 			int error)
448 {
449 	dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info,
450 		error);
451 }
452 
453 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
454 			  const char *info)
455 {
456 	ktime_t calltime;
457 	u64 usecs64;
458 	int usecs;
459 
460 	calltime = ktime_get();
461 	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
462 	do_div(usecs64, NSEC_PER_USEC);
463 	usecs = usecs64;
464 	if (usecs == 0)
465 		usecs = 1;
466 
467 	pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
468 		  info ?: "", info ? " " : "", pm_verb(state.event),
469 		  error ? "aborted" : "complete",
470 		  usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
471 }
472 
473 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
474 			    pm_message_t state, const char *info)
475 {
476 	ktime_t calltime;
477 	int error;
478 
479 	if (!cb)
480 		return 0;
481 
482 	calltime = initcall_debug_start(dev, cb);
483 
484 	pm_dev_dbg(dev, state, info);
485 	trace_device_pm_callback_start(dev, info, state.event);
486 	error = cb(dev);
487 	trace_device_pm_callback_end(dev, error);
488 	suspend_report_result(dev, cb, error);
489 
490 	initcall_debug_report(dev, calltime, cb, error);
491 
492 	return error;
493 }
494 
495 #ifdef CONFIG_DPM_WATCHDOG
496 struct dpm_watchdog {
497 	struct device		*dev;
498 	struct task_struct	*tsk;
499 	struct timer_list	timer;
500 	bool			fatal;
501 };
502 
503 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
504 	struct dpm_watchdog wd
505 
506 /**
507  * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
508  * @t: The timer that PM watchdog depends on.
509  *
510  * Called when a driver has timed out suspending or resuming.
511  * There's not much we can do here to recover so panic() to
512  * capture a crash-dump in pstore.
513  */
514 static void dpm_watchdog_handler(struct timer_list *t)
515 {
516 	struct dpm_watchdog *wd = timer_container_of(wd, t, timer);
517 	struct timer_list *timer = &wd->timer;
518 	unsigned int time_left;
519 
520 	if (wd->fatal) {
521 		dev_emerg(wd->dev, "**** DPM device timeout ****\n");
522 		show_stack(wd->tsk, NULL, KERN_EMERG);
523 		panic("%s %s: unrecoverable failure\n",
524 			dev_driver_string(wd->dev), dev_name(wd->dev));
525 	}
526 
527 	time_left = CONFIG_DPM_WATCHDOG_TIMEOUT - CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
528 	dev_warn(wd->dev, "**** DPM device timeout after %u seconds; %u seconds until panic ****\n",
529 		 CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT, time_left);
530 	show_stack(wd->tsk, NULL, KERN_WARNING);
531 
532 	wd->fatal = true;
533 	mod_timer(timer, jiffies + HZ * time_left);
534 }
535 
536 /**
537  * dpm_watchdog_set - Enable pm watchdog for given device.
538  * @wd: Watchdog. Must be allocated on the stack.
539  * @dev: Device to handle.
540  */
541 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
542 {
543 	struct timer_list *timer = &wd->timer;
544 
545 	wd->dev = dev;
546 	wd->tsk = current;
547 	wd->fatal = CONFIG_DPM_WATCHDOG_TIMEOUT == CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
548 
549 	timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
550 	/* use same timeout value for both suspend and resume */
551 	timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
552 	add_timer(timer);
553 }
554 
555 /**
556  * dpm_watchdog_clear - Disable suspend/resume watchdog.
557  * @wd: Watchdog to disable.
558  */
559 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
560 {
561 	struct timer_list *timer = &wd->timer;
562 
563 	timer_delete_sync(timer);
564 	timer_destroy_on_stack(timer);
565 }
566 #else
567 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
568 #define dpm_watchdog_set(x, y)
569 #define dpm_watchdog_clear(x)
570 #endif
571 
572 /*------------------------- Resume routines -------------------------*/
573 
574 /**
575  * dev_pm_skip_resume - System-wide device resume optimization check.
576  * @dev: Target device.
577  *
578  * Return:
579  * - %false if the transition under way is RESTORE.
580  * - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
581  * - The logical negation of %power.must_resume otherwise (that is, when the
582  *   transition under way is RESUME).
583  */
584 bool dev_pm_skip_resume(struct device *dev)
585 {
586 	if (pm_transition.event == PM_EVENT_RESTORE)
587 		return false;
588 
589 	if (pm_transition.event == PM_EVENT_THAW)
590 		return dev_pm_skip_suspend(dev);
591 
592 	return !dev->power.must_resume;
593 }
594 
595 static bool is_async(struct device *dev)
596 {
597 	return dev->power.async_suspend && pm_async_enabled
598 		&& !pm_trace_is_enabled();
599 }
600 
601 static bool __dpm_async(struct device *dev, async_func_t func)
602 {
603 	if (dev->power.work_in_progress)
604 		return true;
605 
606 	if (!is_async(dev))
607 		return false;
608 
609 	dev->power.work_in_progress = true;
610 
611 	get_device(dev);
612 
613 	if (async_schedule_dev_nocall(func, dev))
614 		return true;
615 
616 	put_device(dev);
617 
618 	return false;
619 }
620 
621 static bool dpm_async_fn(struct device *dev, async_func_t func)
622 {
623 	guard(mutex)(&async_wip_mtx);
624 
625 	return __dpm_async(dev, func);
626 }
627 
628 static int dpm_async_with_cleanup(struct device *dev, void *fn)
629 {
630 	guard(mutex)(&async_wip_mtx);
631 
632 	if (!__dpm_async(dev, fn))
633 		dev->power.work_in_progress = false;
634 
635 	return 0;
636 }
637 
638 static void dpm_async_resume_children(struct device *dev, async_func_t func)
639 {
640 	/*
641 	 * Prevent racing with dpm_clear_async_state() during initial list
642 	 * walks in dpm_noirq_resume_devices(), dpm_resume_early(), and
643 	 * dpm_resume().
644 	 */
645 	guard(mutex)(&dpm_list_mtx);
646 
647 	/*
648 	 * Start processing "async" children of the device unless it's been
649 	 * started already for them.
650 	 */
651 	device_for_each_child(dev, func, dpm_async_with_cleanup);
652 }
653 
654 static void dpm_async_resume_subordinate(struct device *dev, async_func_t func)
655 {
656 	struct device_link *link;
657 	int idx;
658 
659 	dpm_async_resume_children(dev, func);
660 
661 	idx = device_links_read_lock();
662 
663 	/* Start processing the device's "async" consumers. */
664 	list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
665 		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
666 			dpm_async_with_cleanup(link->consumer, func);
667 
668 	device_links_read_unlock(idx);
669 }
670 
671 static void dpm_clear_async_state(struct device *dev)
672 {
673 	reinit_completion(&dev->power.completion);
674 	dev->power.work_in_progress = false;
675 }
676 
677 static bool dpm_root_device(struct device *dev)
678 {
679 	lockdep_assert_held(&dpm_list_mtx);
680 
681 	/*
682 	 * Since this function is required to run under dpm_list_mtx, the
683 	 * list_empty() below will only return true if the device's list of
684 	 * consumers is actually empty before calling it.
685 	 */
686 	return !dev->parent && list_empty(&dev->links.suppliers);
687 }
688 
689 static void async_resume_noirq(void *data, async_cookie_t cookie);
690 
691 /**
692  * device_resume_noirq - Execute a "noirq resume" callback for given device.
693  * @dev: Device to handle.
694  * @state: PM transition of the system being carried out.
695  * @async: If true, the device is being resumed asynchronously.
696  *
697  * The driver of @dev will not receive interrupts while this function is being
698  * executed.
699  */
700 static void device_resume_noirq(struct device *dev, pm_message_t state, bool async)
701 {
702 	pm_callback_t callback = NULL;
703 	const char *info = NULL;
704 	bool skip_resume;
705 	int error = 0;
706 
707 	TRACE_DEVICE(dev);
708 	TRACE_RESUME(0);
709 
710 	if (dev->power.syscore || dev->power.direct_complete)
711 		goto Out;
712 
713 	if (!dev->power.is_noirq_suspended)
714 		goto Out;
715 
716 	if (!dpm_wait_for_superior(dev, async))
717 		goto Out;
718 
719 	skip_resume = dev_pm_skip_resume(dev);
720 	/*
721 	 * If the driver callback is skipped below or by the middle layer
722 	 * callback and device_resume_early() also skips the driver callback for
723 	 * this device later, it needs to appear as "suspended" to PM-runtime,
724 	 * so change its status accordingly.
725 	 *
726 	 * Otherwise, the device is going to be resumed, so set its PM-runtime
727 	 * status to "active" unless its power.smart_suspend flag is clear, in
728 	 * which case it is not necessary to update its PM-runtime status.
729 	 */
730 	if (skip_resume)
731 		pm_runtime_set_suspended(dev);
732 	else if (dev_pm_smart_suspend(dev))
733 		pm_runtime_set_active(dev);
734 
735 	if (dev->pm_domain) {
736 		info = "noirq power domain ";
737 		callback = pm_noirq_op(&dev->pm_domain->ops, state);
738 	} else if (dev->type && dev->type->pm) {
739 		info = "noirq type ";
740 		callback = pm_noirq_op(dev->type->pm, state);
741 	} else if (dev->class && dev->class->pm) {
742 		info = "noirq class ";
743 		callback = pm_noirq_op(dev->class->pm, state);
744 	} else if (dev->bus && dev->bus->pm) {
745 		info = "noirq bus ";
746 		callback = pm_noirq_op(dev->bus->pm, state);
747 	}
748 	if (callback)
749 		goto Run;
750 
751 	if (skip_resume)
752 		goto Skip;
753 
754 	if (dev->driver && dev->driver->pm) {
755 		info = "noirq driver ";
756 		callback = pm_noirq_op(dev->driver->pm, state);
757 	}
758 
759 Run:
760 	error = dpm_run_callback(callback, dev, state, info);
761 
762 Skip:
763 	dev->power.is_noirq_suspended = false;
764 
765 Out:
766 	complete_all(&dev->power.completion);
767 	TRACE_RESUME(error);
768 
769 	if (error) {
770 		WRITE_ONCE(async_error, error);
771 		dpm_save_failed_dev(dev_name(dev));
772 		pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
773 	}
774 
775 	dpm_async_resume_subordinate(dev, async_resume_noirq);
776 }
777 
778 static void async_resume_noirq(void *data, async_cookie_t cookie)
779 {
780 	struct device *dev = data;
781 
782 	device_resume_noirq(dev, pm_transition, true);
783 	put_device(dev);
784 }
785 
786 static void dpm_noirq_resume_devices(pm_message_t state)
787 {
788 	struct device *dev;
789 	ktime_t starttime = ktime_get();
790 
791 	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
792 
793 	async_error = 0;
794 	pm_transition = state;
795 
796 	mutex_lock(&dpm_list_mtx);
797 
798 	/*
799 	 * Start processing "async" root devices upfront so they don't wait for
800 	 * the "sync" devices they don't depend on.
801 	 */
802 	list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
803 		dpm_clear_async_state(dev);
804 		if (dpm_root_device(dev))
805 			dpm_async_with_cleanup(dev, async_resume_noirq);
806 	}
807 
808 	while (!list_empty(&dpm_noirq_list)) {
809 		dev = to_device(dpm_noirq_list.next);
810 		list_move_tail(&dev->power.entry, &dpm_late_early_list);
811 
812 		if (!dpm_async_fn(dev, async_resume_noirq)) {
813 			get_device(dev);
814 
815 			mutex_unlock(&dpm_list_mtx);
816 
817 			device_resume_noirq(dev, state, false);
818 
819 			put_device(dev);
820 
821 			mutex_lock(&dpm_list_mtx);
822 		}
823 	}
824 	mutex_unlock(&dpm_list_mtx);
825 	async_synchronize_full();
826 	dpm_show_time(starttime, state, 0, "noirq");
827 	if (READ_ONCE(async_error))
828 		dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
829 
830 	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
831 }
832 
833 /**
834  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
835  * @state: PM transition of the system being carried out.
836  *
837  * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
838  * allow device drivers' interrupt handlers to be called.
839  */
840 void dpm_resume_noirq(pm_message_t state)
841 {
842 	dpm_noirq_resume_devices(state);
843 
844 	resume_device_irqs();
845 	device_wakeup_disarm_wake_irqs();
846 }
847 
848 static void async_resume_early(void *data, async_cookie_t cookie);
849 
850 /**
851  * device_resume_early - Execute an "early resume" callback for given device.
852  * @dev: Device to handle.
853  * @state: PM transition of the system being carried out.
854  * @async: If true, the device is being resumed asynchronously.
855  *
856  * Runtime PM is disabled for @dev while this function is being executed.
857  */
858 static void device_resume_early(struct device *dev, pm_message_t state, bool async)
859 {
860 	pm_callback_t callback = NULL;
861 	const char *info = NULL;
862 	int error = 0;
863 
864 	TRACE_DEVICE(dev);
865 	TRACE_RESUME(0);
866 
867 	if (dev->power.syscore || dev->power.direct_complete)
868 		goto Out;
869 
870 	if (!dev->power.is_late_suspended)
871 		goto Out;
872 
873 	if (!dpm_wait_for_superior(dev, async))
874 		goto Out;
875 
876 	if (dev->pm_domain) {
877 		info = "early power domain ";
878 		callback = pm_late_early_op(&dev->pm_domain->ops, state);
879 	} else if (dev->type && dev->type->pm) {
880 		info = "early type ";
881 		callback = pm_late_early_op(dev->type->pm, state);
882 	} else if (dev->class && dev->class->pm) {
883 		info = "early class ";
884 		callback = pm_late_early_op(dev->class->pm, state);
885 	} else if (dev->bus && dev->bus->pm) {
886 		info = "early bus ";
887 		callback = pm_late_early_op(dev->bus->pm, state);
888 	}
889 	if (callback)
890 		goto Run;
891 
892 	if (dev_pm_skip_resume(dev))
893 		goto Skip;
894 
895 	if (dev->driver && dev->driver->pm) {
896 		info = "early driver ";
897 		callback = pm_late_early_op(dev->driver->pm, state);
898 	}
899 
900 Run:
901 	error = dpm_run_callback(callback, dev, state, info);
902 
903 Skip:
904 	dev->power.is_late_suspended = false;
905 
906 Out:
907 	TRACE_RESUME(error);
908 
909 	pm_runtime_enable(dev);
910 	complete_all(&dev->power.completion);
911 
912 	if (error) {
913 		WRITE_ONCE(async_error, error);
914 		dpm_save_failed_dev(dev_name(dev));
915 		pm_dev_err(dev, state, async ? " async early" : " early", error);
916 	}
917 
918 	dpm_async_resume_subordinate(dev, async_resume_early);
919 }
920 
921 static void async_resume_early(void *data, async_cookie_t cookie)
922 {
923 	struct device *dev = data;
924 
925 	device_resume_early(dev, pm_transition, true);
926 	put_device(dev);
927 }
928 
929 /**
930  * dpm_resume_early - Execute "early resume" callbacks for all devices.
931  * @state: PM transition of the system being carried out.
932  */
933 void dpm_resume_early(pm_message_t state)
934 {
935 	struct device *dev;
936 	ktime_t starttime = ktime_get();
937 
938 	trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
939 
940 	async_error = 0;
941 	pm_transition = state;
942 
943 	mutex_lock(&dpm_list_mtx);
944 
945 	/*
946 	 * Start processing "async" root devices upfront so they don't wait for
947 	 * the "sync" devices they don't depend on.
948 	 */
949 	list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
950 		dpm_clear_async_state(dev);
951 		if (dpm_root_device(dev))
952 			dpm_async_with_cleanup(dev, async_resume_early);
953 	}
954 
955 	while (!list_empty(&dpm_late_early_list)) {
956 		dev = to_device(dpm_late_early_list.next);
957 		list_move_tail(&dev->power.entry, &dpm_suspended_list);
958 
959 		if (!dpm_async_fn(dev, async_resume_early)) {
960 			get_device(dev);
961 
962 			mutex_unlock(&dpm_list_mtx);
963 
964 			device_resume_early(dev, state, false);
965 
966 			put_device(dev);
967 
968 			mutex_lock(&dpm_list_mtx);
969 		}
970 	}
971 	mutex_unlock(&dpm_list_mtx);
972 	async_synchronize_full();
973 	dpm_show_time(starttime, state, 0, "early");
974 	if (READ_ONCE(async_error))
975 		dpm_save_failed_step(SUSPEND_RESUME_EARLY);
976 
977 	trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
978 }
979 
980 /**
981  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
982  * @state: PM transition of the system being carried out.
983  */
984 void dpm_resume_start(pm_message_t state)
985 {
986 	dpm_resume_noirq(state);
987 	dpm_resume_early(state);
988 }
989 EXPORT_SYMBOL_GPL(dpm_resume_start);
990 
991 static void async_resume(void *data, async_cookie_t cookie);
992 
993 /**
994  * device_resume - Execute "resume" callbacks for given device.
995  * @dev: Device to handle.
996  * @state: PM transition of the system being carried out.
997  * @async: If true, the device is being resumed asynchronously.
998  */
999 static void device_resume(struct device *dev, pm_message_t state, bool async)
1000 {
1001 	pm_callback_t callback = NULL;
1002 	const char *info = NULL;
1003 	int error = 0;
1004 	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1005 
1006 	TRACE_DEVICE(dev);
1007 	TRACE_RESUME(0);
1008 
1009 	if (dev->power.syscore)
1010 		goto Complete;
1011 
1012 	if (!dev->power.is_suspended)
1013 		goto Complete;
1014 
1015 	dev->power.is_suspended = false;
1016 
1017 	if (dev->power.direct_complete) {
1018 		/*
1019 		 * Allow new children to be added under the device after this
1020 		 * point if it has no PM callbacks.
1021 		 */
1022 		if (dev->power.no_pm_callbacks)
1023 			dev->power.is_prepared = false;
1024 
1025 		/* Match the pm_runtime_disable() in device_suspend(). */
1026 		pm_runtime_enable(dev);
1027 		goto Complete;
1028 	}
1029 
1030 	if (!dpm_wait_for_superior(dev, async))
1031 		goto Complete;
1032 
1033 	dpm_watchdog_set(&wd, dev);
1034 	device_lock(dev);
1035 
1036 	/*
1037 	 * This is a fib.  But we'll allow new children to be added below
1038 	 * a resumed device, even if the device hasn't been completed yet.
1039 	 */
1040 	dev->power.is_prepared = false;
1041 
1042 	if (dev->pm_domain) {
1043 		info = "power domain ";
1044 		callback = pm_op(&dev->pm_domain->ops, state);
1045 		goto Driver;
1046 	}
1047 
1048 	if (dev->type && dev->type->pm) {
1049 		info = "type ";
1050 		callback = pm_op(dev->type->pm, state);
1051 		goto Driver;
1052 	}
1053 
1054 	if (dev->class && dev->class->pm) {
1055 		info = "class ";
1056 		callback = pm_op(dev->class->pm, state);
1057 		goto Driver;
1058 	}
1059 
1060 	if (dev->bus) {
1061 		if (dev->bus->pm) {
1062 			info = "bus ";
1063 			callback = pm_op(dev->bus->pm, state);
1064 		} else if (dev->bus->resume) {
1065 			info = "legacy bus ";
1066 			callback = dev->bus->resume;
1067 			goto End;
1068 		}
1069 	}
1070 
1071  Driver:
1072 	if (!callback && dev->driver && dev->driver->pm) {
1073 		info = "driver ";
1074 		callback = pm_op(dev->driver->pm, state);
1075 	}
1076 
1077  End:
1078 	error = dpm_run_callback(callback, dev, state, info);
1079 
1080 	device_unlock(dev);
1081 	dpm_watchdog_clear(&wd);
1082 
1083  Complete:
1084 	complete_all(&dev->power.completion);
1085 
1086 	TRACE_RESUME(error);
1087 
1088 	if (error) {
1089 		WRITE_ONCE(async_error, error);
1090 		dpm_save_failed_dev(dev_name(dev));
1091 		pm_dev_err(dev, state, async ? " async" : "", error);
1092 	}
1093 
1094 	dpm_async_resume_subordinate(dev, async_resume);
1095 }
1096 
1097 static void async_resume(void *data, async_cookie_t cookie)
1098 {
1099 	struct device *dev = data;
1100 
1101 	device_resume(dev, pm_transition, true);
1102 	put_device(dev);
1103 }
1104 
1105 /**
1106  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
1107  * @state: PM transition of the system being carried out.
1108  *
1109  * Execute the appropriate "resume" callback for all devices whose status
1110  * indicates that they are suspended.
1111  */
1112 void dpm_resume(pm_message_t state)
1113 {
1114 	struct device *dev;
1115 	ktime_t starttime = ktime_get();
1116 
1117 	trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1118 
1119 	pm_transition = state;
1120 	async_error = 0;
1121 
1122 	mutex_lock(&dpm_list_mtx);
1123 
1124 	/*
1125 	 * Start processing "async" root devices upfront so they don't wait for
1126 	 * the "sync" devices they don't depend on.
1127 	 */
1128 	list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
1129 		dpm_clear_async_state(dev);
1130 		if (dpm_root_device(dev))
1131 			dpm_async_with_cleanup(dev, async_resume);
1132 	}
1133 
1134 	while (!list_empty(&dpm_suspended_list)) {
1135 		dev = to_device(dpm_suspended_list.next);
1136 		list_move_tail(&dev->power.entry, &dpm_prepared_list);
1137 
1138 		if (!dpm_async_fn(dev, async_resume)) {
1139 			get_device(dev);
1140 
1141 			mutex_unlock(&dpm_list_mtx);
1142 
1143 			device_resume(dev, state, false);
1144 
1145 			put_device(dev);
1146 
1147 			mutex_lock(&dpm_list_mtx);
1148 		}
1149 	}
1150 	mutex_unlock(&dpm_list_mtx);
1151 	async_synchronize_full();
1152 	dpm_show_time(starttime, state, 0, NULL);
1153 	if (READ_ONCE(async_error))
1154 		dpm_save_failed_step(SUSPEND_RESUME);
1155 
1156 	cpufreq_resume();
1157 	devfreq_resume();
1158 	trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1159 }
1160 
1161 /**
1162  * device_complete - Complete a PM transition for given device.
1163  * @dev: Device to handle.
1164  * @state: PM transition of the system being carried out.
1165  */
1166 static void device_complete(struct device *dev, pm_message_t state)
1167 {
1168 	void (*callback)(struct device *) = NULL;
1169 	const char *info = NULL;
1170 
1171 	if (dev->power.syscore)
1172 		goto out;
1173 
1174 	device_lock(dev);
1175 
1176 	if (dev->pm_domain) {
1177 		info = "completing power domain ";
1178 		callback = dev->pm_domain->ops.complete;
1179 	} else if (dev->type && dev->type->pm) {
1180 		info = "completing type ";
1181 		callback = dev->type->pm->complete;
1182 	} else if (dev->class && dev->class->pm) {
1183 		info = "completing class ";
1184 		callback = dev->class->pm->complete;
1185 	} else if (dev->bus && dev->bus->pm) {
1186 		info = "completing bus ";
1187 		callback = dev->bus->pm->complete;
1188 	}
1189 
1190 	if (!callback && dev->driver && dev->driver->pm) {
1191 		info = "completing driver ";
1192 		callback = dev->driver->pm->complete;
1193 	}
1194 
1195 	if (callback) {
1196 		pm_dev_dbg(dev, state, info);
1197 		callback(dev);
1198 	}
1199 
1200 	device_unlock(dev);
1201 
1202 out:
1203 	/* If enabling runtime PM for the device is blocked, unblock it. */
1204 	pm_runtime_unblock(dev);
1205 	pm_runtime_put(dev);
1206 }
1207 
1208 /**
1209  * dpm_complete - Complete a PM transition for all non-sysdev devices.
1210  * @state: PM transition of the system being carried out.
1211  *
1212  * Execute the ->complete() callbacks for all devices whose PM status is not
1213  * DPM_ON (this allows new devices to be registered).
1214  */
1215 void dpm_complete(pm_message_t state)
1216 {
1217 	struct list_head list;
1218 
1219 	trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1220 
1221 	INIT_LIST_HEAD(&list);
1222 	mutex_lock(&dpm_list_mtx);
1223 	while (!list_empty(&dpm_prepared_list)) {
1224 		struct device *dev = to_device(dpm_prepared_list.prev);
1225 
1226 		get_device(dev);
1227 		dev->power.is_prepared = false;
1228 		list_move(&dev->power.entry, &list);
1229 
1230 		mutex_unlock(&dpm_list_mtx);
1231 
1232 		trace_device_pm_callback_start(dev, "", state.event);
1233 		device_complete(dev, state);
1234 		trace_device_pm_callback_end(dev, 0);
1235 
1236 		put_device(dev);
1237 
1238 		mutex_lock(&dpm_list_mtx);
1239 	}
1240 	list_splice(&list, &dpm_list);
1241 	mutex_unlock(&dpm_list_mtx);
1242 
1243 	/* Allow device probing and trigger re-probing of deferred devices */
1244 	device_unblock_probing();
1245 	trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1246 }
1247 
1248 /**
1249  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1250  * @state: PM transition of the system being carried out.
1251  *
1252  * Execute "resume" callbacks for all devices and complete the PM transition of
1253  * the system.
1254  */
1255 void dpm_resume_end(pm_message_t state)
1256 {
1257 	dpm_resume(state);
1258 	pm_restore_gfp_mask();
1259 	dpm_complete(state);
1260 }
1261 EXPORT_SYMBOL_GPL(dpm_resume_end);
1262 
1263 
1264 /*------------------------- Suspend routines -------------------------*/
1265 
1266 static bool dpm_leaf_device(struct device *dev)
1267 {
1268 	struct device *child;
1269 
1270 	lockdep_assert_held(&dpm_list_mtx);
1271 
1272 	child = device_find_any_child(dev);
1273 	if (child) {
1274 		put_device(child);
1275 
1276 		return false;
1277 	}
1278 
1279 	/*
1280 	 * Since this function is required to run under dpm_list_mtx, the
1281 	 * list_empty() below will only return true if the device's list of
1282 	 * consumers is actually empty before calling it.
1283 	 */
1284 	return list_empty(&dev->links.consumers);
1285 }
1286 
1287 static bool dpm_async_suspend_parent(struct device *dev, async_func_t func)
1288 {
1289 	guard(mutex)(&dpm_list_mtx);
1290 
1291 	/*
1292 	 * If the device is suspended asynchronously and the parent's callback
1293 	 * deletes both the device and the parent itself, the parent object may
1294 	 * be freed while this function is running, so avoid that by checking
1295 	 * if the device has been deleted already as the parent cannot be
1296 	 * deleted before it.
1297 	 */
1298 	if (!device_pm_initialized(dev))
1299 		return false;
1300 
1301 	/* Start processing the device's parent if it is "async". */
1302 	if (dev->parent)
1303 		dpm_async_with_cleanup(dev->parent, func);
1304 
1305 	return true;
1306 }
1307 
1308 static void dpm_async_suspend_superior(struct device *dev, async_func_t func)
1309 {
1310 	struct device_link *link;
1311 	int idx;
1312 
1313 	if (!dpm_async_suspend_parent(dev, func))
1314 		return;
1315 
1316 	idx = device_links_read_lock();
1317 
1318 	/* Start processing the device's "async" suppliers. */
1319 	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1320 		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
1321 			dpm_async_with_cleanup(link->supplier, func);
1322 
1323 	device_links_read_unlock(idx);
1324 }
1325 
1326 static void dpm_async_suspend_complete_all(struct list_head *device_list)
1327 {
1328 	struct device *dev;
1329 
1330 	guard(mutex)(&async_wip_mtx);
1331 
1332 	list_for_each_entry_reverse(dev, device_list, power.entry) {
1333 		/*
1334 		 * In case the device is being waited for and async processing
1335 		 * has not started for it yet, let the waiters make progress.
1336 		 */
1337 		if (!dev->power.work_in_progress)
1338 			complete_all(&dev->power.completion);
1339 	}
1340 }
1341 
1342 /**
1343  * resume_event - Return a "resume" message for given "suspend" sleep state.
1344  * @sleep_state: PM message representing a sleep state.
1345  *
1346  * Return a PM message representing the resume event corresponding to given
1347  * sleep state.
1348  */
1349 static pm_message_t resume_event(pm_message_t sleep_state)
1350 {
1351 	switch (sleep_state.event) {
1352 	case PM_EVENT_SUSPEND:
1353 		return PMSG_RESUME;
1354 	case PM_EVENT_FREEZE:
1355 	case PM_EVENT_QUIESCE:
1356 		return PMSG_RECOVER;
1357 	case PM_EVENT_HIBERNATE:
1358 		return PMSG_RESTORE;
1359 	}
1360 	return PMSG_ON;
1361 }
1362 
1363 static void dpm_superior_set_must_resume(struct device *dev)
1364 {
1365 	struct device_link *link;
1366 	int idx;
1367 
1368 	if (dev->parent)
1369 		dev->parent->power.must_resume = true;
1370 
1371 	idx = device_links_read_lock();
1372 
1373 	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
1374 		link->supplier->power.must_resume = true;
1375 
1376 	device_links_read_unlock(idx);
1377 }
1378 
1379 static void async_suspend_noirq(void *data, async_cookie_t cookie);
1380 
1381 /**
1382  * device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1383  * @dev: Device to handle.
1384  * @state: PM transition of the system being carried out.
1385  * @async: If true, the device is being suspended asynchronously.
1386  *
1387  * The driver of @dev will not receive interrupts while this function is being
1388  * executed.
1389  */
1390 static void device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1391 {
1392 	pm_callback_t callback = NULL;
1393 	const char *info = NULL;
1394 	int error = 0;
1395 
1396 	TRACE_DEVICE(dev);
1397 	TRACE_SUSPEND(0);
1398 
1399 	dpm_wait_for_subordinate(dev, async);
1400 
1401 	if (READ_ONCE(async_error))
1402 		goto Complete;
1403 
1404 	if (dev->power.syscore || dev->power.direct_complete)
1405 		goto Complete;
1406 
1407 	if (dev->pm_domain) {
1408 		info = "noirq power domain ";
1409 		callback = pm_noirq_op(&dev->pm_domain->ops, state);
1410 	} else if (dev->type && dev->type->pm) {
1411 		info = "noirq type ";
1412 		callback = pm_noirq_op(dev->type->pm, state);
1413 	} else if (dev->class && dev->class->pm) {
1414 		info = "noirq class ";
1415 		callback = pm_noirq_op(dev->class->pm, state);
1416 	} else if (dev->bus && dev->bus->pm) {
1417 		info = "noirq bus ";
1418 		callback = pm_noirq_op(dev->bus->pm, state);
1419 	}
1420 	if (callback)
1421 		goto Run;
1422 
1423 	if (dev_pm_skip_suspend(dev))
1424 		goto Skip;
1425 
1426 	if (dev->driver && dev->driver->pm) {
1427 		info = "noirq driver ";
1428 		callback = pm_noirq_op(dev->driver->pm, state);
1429 	}
1430 
1431 Run:
1432 	error = dpm_run_callback(callback, dev, state, info);
1433 	if (error) {
1434 		WRITE_ONCE(async_error, error);
1435 		dpm_save_failed_dev(dev_name(dev));
1436 		pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
1437 		goto Complete;
1438 	}
1439 
1440 Skip:
1441 	dev->power.is_noirq_suspended = true;
1442 
1443 	/*
1444 	 * Devices must be resumed unless they are explicitly allowed to be left
1445 	 * in suspend, but even in that case skipping the resume of devices that
1446 	 * were in use right before the system suspend (as indicated by their
1447 	 * runtime PM usage counters and child counters) would be suboptimal.
1448 	 */
1449 	if (!(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
1450 	      dev->power.may_skip_resume) || !pm_runtime_need_not_resume(dev))
1451 		dev->power.must_resume = true;
1452 
1453 	if (dev->power.must_resume)
1454 		dpm_superior_set_must_resume(dev);
1455 
1456 Complete:
1457 	complete_all(&dev->power.completion);
1458 	TRACE_SUSPEND(error);
1459 
1460 	if (error || READ_ONCE(async_error))
1461 		return;
1462 
1463 	dpm_async_suspend_superior(dev, async_suspend_noirq);
1464 }
1465 
1466 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1467 {
1468 	struct device *dev = data;
1469 
1470 	device_suspend_noirq(dev, pm_transition, true);
1471 	put_device(dev);
1472 }
1473 
1474 static int dpm_noirq_suspend_devices(pm_message_t state)
1475 {
1476 	ktime_t starttime = ktime_get();
1477 	struct device *dev;
1478 	int error;
1479 
1480 	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1481 
1482 	pm_transition = state;
1483 	async_error = 0;
1484 
1485 	mutex_lock(&dpm_list_mtx);
1486 
1487 	/*
1488 	 * Start processing "async" leaf devices upfront so they don't need to
1489 	 * wait for the "sync" devices they don't depend on.
1490 	 */
1491 	list_for_each_entry_reverse(dev, &dpm_late_early_list, power.entry) {
1492 		dpm_clear_async_state(dev);
1493 		if (dpm_leaf_device(dev))
1494 			dpm_async_with_cleanup(dev, async_suspend_noirq);
1495 	}
1496 
1497 	while (!list_empty(&dpm_late_early_list)) {
1498 		dev = to_device(dpm_late_early_list.prev);
1499 
1500 		list_move(&dev->power.entry, &dpm_noirq_list);
1501 
1502 		if (dpm_async_fn(dev, async_suspend_noirq))
1503 			continue;
1504 
1505 		get_device(dev);
1506 
1507 		mutex_unlock(&dpm_list_mtx);
1508 
1509 		device_suspend_noirq(dev, state, false);
1510 
1511 		put_device(dev);
1512 
1513 		mutex_lock(&dpm_list_mtx);
1514 
1515 		if (READ_ONCE(async_error)) {
1516 			dpm_async_suspend_complete_all(&dpm_late_early_list);
1517 			/*
1518 			 * Move all devices to the target list to resume them
1519 			 * properly.
1520 			 */
1521 			list_splice_init(&dpm_late_early_list, &dpm_noirq_list);
1522 			break;
1523 		}
1524 	}
1525 
1526 	mutex_unlock(&dpm_list_mtx);
1527 
1528 	async_synchronize_full();
1529 
1530 	error = READ_ONCE(async_error);
1531 	if (error)
1532 		dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1533 
1534 	dpm_show_time(starttime, state, error, "noirq");
1535 	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1536 	return error;
1537 }
1538 
1539 /**
1540  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1541  * @state: PM transition of the system being carried out.
1542  *
1543  * Prevent device drivers' interrupt handlers from being called and invoke
1544  * "noirq" suspend callbacks for all non-sysdev devices.
1545  */
1546 int dpm_suspend_noirq(pm_message_t state)
1547 {
1548 	int ret;
1549 
1550 	device_wakeup_arm_wake_irqs();
1551 	suspend_device_irqs();
1552 
1553 	ret = dpm_noirq_suspend_devices(state);
1554 	if (ret)
1555 		dpm_resume_noirq(resume_event(state));
1556 
1557 	return ret;
1558 }
1559 
1560 static void dpm_propagate_wakeup_to_parent(struct device *dev)
1561 {
1562 	struct device *parent = dev->parent;
1563 
1564 	if (!parent)
1565 		return;
1566 
1567 	spin_lock_irq(&parent->power.lock);
1568 
1569 	if (device_wakeup_path(dev) && !parent->power.ignore_children)
1570 		parent->power.wakeup_path = true;
1571 
1572 	spin_unlock_irq(&parent->power.lock);
1573 }
1574 
1575 static void async_suspend_late(void *data, async_cookie_t cookie);
1576 
1577 /**
1578  * device_suspend_late - Execute a "late suspend" callback for given device.
1579  * @dev: Device to handle.
1580  * @state: PM transition of the system being carried out.
1581  * @async: If true, the device is being suspended asynchronously.
1582  *
1583  * Runtime PM is disabled for @dev while this function is being executed.
1584  */
1585 static void device_suspend_late(struct device *dev, pm_message_t state, bool async)
1586 {
1587 	pm_callback_t callback = NULL;
1588 	const char *info = NULL;
1589 	int error = 0;
1590 
1591 	TRACE_DEVICE(dev);
1592 	TRACE_SUSPEND(0);
1593 
1594 	/*
1595 	 * Disable runtime PM for the device without checking if there is a
1596 	 * pending resume request for it.
1597 	 */
1598 	__pm_runtime_disable(dev, false);
1599 
1600 	dpm_wait_for_subordinate(dev, async);
1601 
1602 	if (READ_ONCE(async_error))
1603 		goto Complete;
1604 
1605 	if (pm_wakeup_pending()) {
1606 		WRITE_ONCE(async_error, -EBUSY);
1607 		goto Complete;
1608 	}
1609 
1610 	if (dev->power.syscore || dev->power.direct_complete)
1611 		goto Complete;
1612 
1613 	if (dev->pm_domain) {
1614 		info = "late power domain ";
1615 		callback = pm_late_early_op(&dev->pm_domain->ops, state);
1616 	} else if (dev->type && dev->type->pm) {
1617 		info = "late type ";
1618 		callback = pm_late_early_op(dev->type->pm, state);
1619 	} else if (dev->class && dev->class->pm) {
1620 		info = "late class ";
1621 		callback = pm_late_early_op(dev->class->pm, state);
1622 	} else if (dev->bus && dev->bus->pm) {
1623 		info = "late bus ";
1624 		callback = pm_late_early_op(dev->bus->pm, state);
1625 	}
1626 	if (callback)
1627 		goto Run;
1628 
1629 	if (dev_pm_skip_suspend(dev))
1630 		goto Skip;
1631 
1632 	if (dev->driver && dev->driver->pm) {
1633 		info = "late driver ";
1634 		callback = pm_late_early_op(dev->driver->pm, state);
1635 	}
1636 
1637 Run:
1638 	error = dpm_run_callback(callback, dev, state, info);
1639 	if (error) {
1640 		WRITE_ONCE(async_error, error);
1641 		dpm_save_failed_dev(dev_name(dev));
1642 		pm_dev_err(dev, state, async ? " async late" : " late", error);
1643 		goto Complete;
1644 	}
1645 	dpm_propagate_wakeup_to_parent(dev);
1646 
1647 Skip:
1648 	dev->power.is_late_suspended = true;
1649 
1650 Complete:
1651 	TRACE_SUSPEND(error);
1652 	complete_all(&dev->power.completion);
1653 
1654 	if (error || READ_ONCE(async_error))
1655 		return;
1656 
1657 	dpm_async_suspend_superior(dev, async_suspend_late);
1658 }
1659 
1660 static void async_suspend_late(void *data, async_cookie_t cookie)
1661 {
1662 	struct device *dev = data;
1663 
1664 	device_suspend_late(dev, pm_transition, true);
1665 	put_device(dev);
1666 }
1667 
1668 /**
1669  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1670  * @state: PM transition of the system being carried out.
1671  */
1672 int dpm_suspend_late(pm_message_t state)
1673 {
1674 	ktime_t starttime = ktime_get();
1675 	struct device *dev;
1676 	int error;
1677 
1678 	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1679 
1680 	pm_transition = state;
1681 	async_error = 0;
1682 
1683 	wake_up_all_idle_cpus();
1684 
1685 	mutex_lock(&dpm_list_mtx);
1686 
1687 	/*
1688 	 * Start processing "async" leaf devices upfront so they don't need to
1689 	 * wait for the "sync" devices they don't depend on.
1690 	 */
1691 	list_for_each_entry_reverse(dev, &dpm_suspended_list, power.entry) {
1692 		dpm_clear_async_state(dev);
1693 		if (dpm_leaf_device(dev))
1694 			dpm_async_with_cleanup(dev, async_suspend_late);
1695 	}
1696 
1697 	while (!list_empty(&dpm_suspended_list)) {
1698 		dev = to_device(dpm_suspended_list.prev);
1699 
1700 		list_move(&dev->power.entry, &dpm_late_early_list);
1701 
1702 		if (dpm_async_fn(dev, async_suspend_late))
1703 			continue;
1704 
1705 		get_device(dev);
1706 
1707 		mutex_unlock(&dpm_list_mtx);
1708 
1709 		device_suspend_late(dev, state, false);
1710 
1711 		put_device(dev);
1712 
1713 		mutex_lock(&dpm_list_mtx);
1714 
1715 		if (READ_ONCE(async_error)) {
1716 			dpm_async_suspend_complete_all(&dpm_suspended_list);
1717 			/*
1718 			 * Move all devices to the target list to resume them
1719 			 * properly.
1720 			 */
1721 			list_splice_init(&dpm_suspended_list, &dpm_late_early_list);
1722 			break;
1723 		}
1724 	}
1725 
1726 	mutex_unlock(&dpm_list_mtx);
1727 
1728 	async_synchronize_full();
1729 
1730 	error = READ_ONCE(async_error);
1731 	if (error) {
1732 		dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1733 		dpm_resume_early(resume_event(state));
1734 	}
1735 	dpm_show_time(starttime, state, error, "late");
1736 	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1737 	return error;
1738 }
1739 
1740 /**
1741  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1742  * @state: PM transition of the system being carried out.
1743  */
1744 int dpm_suspend_end(pm_message_t state)
1745 {
1746 	ktime_t starttime = ktime_get();
1747 	int error;
1748 
1749 	error = dpm_suspend_late(state);
1750 	if (error)
1751 		goto out;
1752 
1753 	error = dpm_suspend_noirq(state);
1754 	if (error)
1755 		dpm_resume_early(resume_event(state));
1756 
1757 out:
1758 	dpm_show_time(starttime, state, error, "end");
1759 	return error;
1760 }
1761 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1762 
1763 /**
1764  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1765  * @dev: Device to suspend.
1766  * @state: PM transition of the system being carried out.
1767  * @cb: Suspend callback to execute.
1768  * @info: string description of caller.
1769  */
1770 static int legacy_suspend(struct device *dev, pm_message_t state,
1771 			  int (*cb)(struct device *dev, pm_message_t state),
1772 			  const char *info)
1773 {
1774 	int error;
1775 	ktime_t calltime;
1776 
1777 	calltime = initcall_debug_start(dev, cb);
1778 
1779 	trace_device_pm_callback_start(dev, info, state.event);
1780 	error = cb(dev, state);
1781 	trace_device_pm_callback_end(dev, error);
1782 	suspend_report_result(dev, cb, error);
1783 
1784 	initcall_debug_report(dev, calltime, cb, error);
1785 
1786 	return error;
1787 }
1788 
1789 static void dpm_clear_superiors_direct_complete(struct device *dev)
1790 {
1791 	struct device_link *link;
1792 	int idx;
1793 
1794 	if (dev->parent) {
1795 		spin_lock_irq(&dev->parent->power.lock);
1796 		dev->parent->power.direct_complete = false;
1797 		spin_unlock_irq(&dev->parent->power.lock);
1798 	}
1799 
1800 	idx = device_links_read_lock();
1801 
1802 	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
1803 		spin_lock_irq(&link->supplier->power.lock);
1804 		link->supplier->power.direct_complete = false;
1805 		spin_unlock_irq(&link->supplier->power.lock);
1806 	}
1807 
1808 	device_links_read_unlock(idx);
1809 }
1810 
1811 static void async_suspend(void *data, async_cookie_t cookie);
1812 
1813 /**
1814  * device_suspend - Execute "suspend" callbacks for given device.
1815  * @dev: Device to handle.
1816  * @state: PM transition of the system being carried out.
1817  * @async: If true, the device is being suspended asynchronously.
1818  */
1819 static void device_suspend(struct device *dev, pm_message_t state, bool async)
1820 {
1821 	pm_callback_t callback = NULL;
1822 	const char *info = NULL;
1823 	int error = 0;
1824 	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1825 
1826 	TRACE_DEVICE(dev);
1827 	TRACE_SUSPEND(0);
1828 
1829 	dpm_wait_for_subordinate(dev, async);
1830 
1831 	if (READ_ONCE(async_error)) {
1832 		dev->power.direct_complete = false;
1833 		goto Complete;
1834 	}
1835 
1836 	/*
1837 	 * Wait for possible runtime PM transitions of the device in progress
1838 	 * to complete and if there's a runtime resume request pending for it,
1839 	 * resume it before proceeding with invoking the system-wide suspend
1840 	 * callbacks for it.
1841 	 *
1842 	 * If the system-wide suspend callbacks below change the configuration
1843 	 * of the device, they must disable runtime PM for it or otherwise
1844 	 * ensure that its runtime-resume callbacks will not be confused by that
1845 	 * change in case they are invoked going forward.
1846 	 */
1847 	pm_runtime_barrier(dev);
1848 
1849 	if (pm_wakeup_pending()) {
1850 		dev->power.direct_complete = false;
1851 		WRITE_ONCE(async_error, -EBUSY);
1852 		goto Complete;
1853 	}
1854 
1855 	if (dev->power.syscore)
1856 		goto Complete;
1857 
1858 	/* Avoid direct_complete to let wakeup_path propagate. */
1859 	if (device_may_wakeup(dev) || device_wakeup_path(dev))
1860 		dev->power.direct_complete = false;
1861 
1862 	if (dev->power.direct_complete) {
1863 		if (pm_runtime_status_suspended(dev)) {
1864 			pm_runtime_disable(dev);
1865 			if (pm_runtime_status_suspended(dev)) {
1866 				pm_dev_dbg(dev, state, "direct-complete ");
1867 				dev->power.is_suspended = true;
1868 				goto Complete;
1869 			}
1870 
1871 			pm_runtime_enable(dev);
1872 		}
1873 		dev->power.direct_complete = false;
1874 	}
1875 
1876 	dev->power.may_skip_resume = true;
1877 	dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME);
1878 
1879 	dpm_watchdog_set(&wd, dev);
1880 	device_lock(dev);
1881 
1882 	if (dev->pm_domain) {
1883 		info = "power domain ";
1884 		callback = pm_op(&dev->pm_domain->ops, state);
1885 		goto Run;
1886 	}
1887 
1888 	if (dev->type && dev->type->pm) {
1889 		info = "type ";
1890 		callback = pm_op(dev->type->pm, state);
1891 		goto Run;
1892 	}
1893 
1894 	if (dev->class && dev->class->pm) {
1895 		info = "class ";
1896 		callback = pm_op(dev->class->pm, state);
1897 		goto Run;
1898 	}
1899 
1900 	if (dev->bus) {
1901 		if (dev->bus->pm) {
1902 			info = "bus ";
1903 			callback = pm_op(dev->bus->pm, state);
1904 		} else if (dev->bus->suspend) {
1905 			pm_dev_dbg(dev, state, "legacy bus ");
1906 			error = legacy_suspend(dev, state, dev->bus->suspend,
1907 						"legacy bus ");
1908 			goto End;
1909 		}
1910 	}
1911 
1912  Run:
1913 	if (!callback && dev->driver && dev->driver->pm) {
1914 		info = "driver ";
1915 		callback = pm_op(dev->driver->pm, state);
1916 	}
1917 
1918 	error = dpm_run_callback(callback, dev, state, info);
1919 
1920  End:
1921 	if (!error) {
1922 		dev->power.is_suspended = true;
1923 		if (device_may_wakeup(dev))
1924 			dev->power.wakeup_path = true;
1925 
1926 		dpm_propagate_wakeup_to_parent(dev);
1927 		dpm_clear_superiors_direct_complete(dev);
1928 	}
1929 
1930 	device_unlock(dev);
1931 	dpm_watchdog_clear(&wd);
1932 
1933  Complete:
1934 	if (error) {
1935 		WRITE_ONCE(async_error, error);
1936 		dpm_save_failed_dev(dev_name(dev));
1937 		pm_dev_err(dev, state, async ? " async" : "", error);
1938 	}
1939 
1940 	complete_all(&dev->power.completion);
1941 	TRACE_SUSPEND(error);
1942 
1943 	if (error || READ_ONCE(async_error))
1944 		return;
1945 
1946 	dpm_async_suspend_superior(dev, async_suspend);
1947 }
1948 
1949 static void async_suspend(void *data, async_cookie_t cookie)
1950 {
1951 	struct device *dev = data;
1952 
1953 	device_suspend(dev, pm_transition, true);
1954 	put_device(dev);
1955 }
1956 
1957 /**
1958  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1959  * @state: PM transition of the system being carried out.
1960  */
1961 int dpm_suspend(pm_message_t state)
1962 {
1963 	ktime_t starttime = ktime_get();
1964 	struct device *dev;
1965 	int error;
1966 
1967 	trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1968 	might_sleep();
1969 
1970 	devfreq_suspend();
1971 	cpufreq_suspend();
1972 
1973 	pm_transition = state;
1974 	async_error = 0;
1975 
1976 	mutex_lock(&dpm_list_mtx);
1977 
1978 	/*
1979 	 * Start processing "async" leaf devices upfront so they don't need to
1980 	 * wait for the "sync" devices they don't depend on.
1981 	 */
1982 	list_for_each_entry_reverse(dev, &dpm_prepared_list, power.entry) {
1983 		dpm_clear_async_state(dev);
1984 		if (dpm_leaf_device(dev))
1985 			dpm_async_with_cleanup(dev, async_suspend);
1986 	}
1987 
1988 	while (!list_empty(&dpm_prepared_list)) {
1989 		dev = to_device(dpm_prepared_list.prev);
1990 
1991 		list_move(&dev->power.entry, &dpm_suspended_list);
1992 
1993 		if (dpm_async_fn(dev, async_suspend))
1994 			continue;
1995 
1996 		get_device(dev);
1997 
1998 		mutex_unlock(&dpm_list_mtx);
1999 
2000 		device_suspend(dev, state, false);
2001 
2002 		put_device(dev);
2003 
2004 		mutex_lock(&dpm_list_mtx);
2005 
2006 		if (READ_ONCE(async_error)) {
2007 			dpm_async_suspend_complete_all(&dpm_prepared_list);
2008 			/*
2009 			 * Move all devices to the target list to resume them
2010 			 * properly.
2011 			 */
2012 			list_splice_init(&dpm_prepared_list, &dpm_suspended_list);
2013 			break;
2014 		}
2015 	}
2016 
2017 	mutex_unlock(&dpm_list_mtx);
2018 
2019 	async_synchronize_full();
2020 
2021 	error = READ_ONCE(async_error);
2022 	if (error)
2023 		dpm_save_failed_step(SUSPEND_SUSPEND);
2024 
2025 	dpm_show_time(starttime, state, error, NULL);
2026 	trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
2027 	return error;
2028 }
2029 
2030 static bool device_prepare_smart_suspend(struct device *dev)
2031 {
2032 	struct device_link *link;
2033 	bool ret = true;
2034 	int idx;
2035 
2036 	/*
2037 	 * The "smart suspend" feature is enabled for devices whose drivers ask
2038 	 * for it and for devices without PM callbacks.
2039 	 *
2040 	 * However, if "smart suspend" is not enabled for the device's parent
2041 	 * or any of its suppliers that take runtime PM into account, it cannot
2042 	 * be enabled for the device either.
2043 	 */
2044 	if (!dev->power.no_pm_callbacks &&
2045 	    !dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND))
2046 		return false;
2047 
2048 	if (dev->parent && !dev_pm_smart_suspend(dev->parent) &&
2049 	    !dev->parent->power.ignore_children && !pm_runtime_blocked(dev->parent))
2050 		return false;
2051 
2052 	idx = device_links_read_lock();
2053 
2054 	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
2055 		if (!(link->flags & DL_FLAG_PM_RUNTIME))
2056 			continue;
2057 
2058 		if (!dev_pm_smart_suspend(link->supplier) &&
2059 		    !pm_runtime_blocked(link->supplier)) {
2060 			ret = false;
2061 			break;
2062 		}
2063 	}
2064 
2065 	device_links_read_unlock(idx);
2066 
2067 	return ret;
2068 }
2069 
2070 /**
2071  * device_prepare - Prepare a device for system power transition.
2072  * @dev: Device to handle.
2073  * @state: PM transition of the system being carried out.
2074  *
2075  * Execute the ->prepare() callback(s) for given device.  No new children of the
2076  * device may be registered after this function has returned.
2077  */
2078 static int device_prepare(struct device *dev, pm_message_t state)
2079 {
2080 	int (*callback)(struct device *) = NULL;
2081 	bool smart_suspend;
2082 	int ret = 0;
2083 
2084 	/*
2085 	 * If a device's parent goes into runtime suspend at the wrong time,
2086 	 * it won't be possible to resume the device.  To prevent this we
2087 	 * block runtime suspend here, during the prepare phase, and allow
2088 	 * it again during the complete phase.
2089 	 */
2090 	pm_runtime_get_noresume(dev);
2091 	/*
2092 	 * If runtime PM is disabled for the device at this point and it has
2093 	 * never been enabled so far, it should not be enabled until this system
2094 	 * suspend-resume cycle is complete, so prepare to trigger a warning on
2095 	 * subsequent attempts to enable it.
2096 	 */
2097 	smart_suspend = !pm_runtime_block_if_disabled(dev);
2098 
2099 	if (dev->power.syscore)
2100 		return 0;
2101 
2102 	device_lock(dev);
2103 
2104 	dev->power.wakeup_path = false;
2105 
2106 	if (dev->power.no_pm_callbacks)
2107 		goto unlock;
2108 
2109 	if (dev->pm_domain)
2110 		callback = dev->pm_domain->ops.prepare;
2111 	else if (dev->type && dev->type->pm)
2112 		callback = dev->type->pm->prepare;
2113 	else if (dev->class && dev->class->pm)
2114 		callback = dev->class->pm->prepare;
2115 	else if (dev->bus && dev->bus->pm)
2116 		callback = dev->bus->pm->prepare;
2117 
2118 	if (!callback && dev->driver && dev->driver->pm)
2119 		callback = dev->driver->pm->prepare;
2120 
2121 	if (callback)
2122 		ret = callback(dev);
2123 
2124 unlock:
2125 	device_unlock(dev);
2126 
2127 	if (ret < 0) {
2128 		suspend_report_result(dev, callback, ret);
2129 		pm_runtime_put(dev);
2130 		return ret;
2131 	}
2132 	/* Do not enable "smart suspend" for devices with disabled runtime PM. */
2133 	if (smart_suspend)
2134 		smart_suspend = device_prepare_smart_suspend(dev);
2135 
2136 	spin_lock_irq(&dev->power.lock);
2137 
2138 	dev->power.smart_suspend = smart_suspend;
2139 	/*
2140 	 * A positive return value from ->prepare() means "this device appears
2141 	 * to be runtime-suspended and its state is fine, so if it really is
2142 	 * runtime-suspended, you can leave it in that state provided that you
2143 	 * will do the same thing with all of its descendants".  This only
2144 	 * applies to suspend transitions, however.
2145 	 */
2146 	dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
2147 		(ret > 0 || dev->power.no_pm_callbacks) &&
2148 		!dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
2149 
2150 	spin_unlock_irq(&dev->power.lock);
2151 
2152 	return 0;
2153 }
2154 
2155 /**
2156  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
2157  * @state: PM transition of the system being carried out.
2158  *
2159  * Execute the ->prepare() callback(s) for all devices.
2160  */
2161 int dpm_prepare(pm_message_t state)
2162 {
2163 	int error = 0;
2164 
2165 	trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
2166 
2167 	/*
2168 	 * Give a chance for the known devices to complete their probes, before
2169 	 * disable probing of devices. This sync point is important at least
2170 	 * at boot time + hibernation restore.
2171 	 */
2172 	wait_for_device_probe();
2173 	/*
2174 	 * It is unsafe if probing of devices will happen during suspend or
2175 	 * hibernation and system behavior will be unpredictable in this case.
2176 	 * So, let's prohibit device's probing here and defer their probes
2177 	 * instead. The normal behavior will be restored in dpm_complete().
2178 	 */
2179 	device_block_probing();
2180 
2181 	mutex_lock(&dpm_list_mtx);
2182 	while (!list_empty(&dpm_list) && !error) {
2183 		struct device *dev = to_device(dpm_list.next);
2184 
2185 		get_device(dev);
2186 
2187 		mutex_unlock(&dpm_list_mtx);
2188 
2189 		trace_device_pm_callback_start(dev, "", state.event);
2190 		error = device_prepare(dev, state);
2191 		trace_device_pm_callback_end(dev, error);
2192 
2193 		mutex_lock(&dpm_list_mtx);
2194 
2195 		if (!error) {
2196 			dev->power.is_prepared = true;
2197 			if (!list_empty(&dev->power.entry))
2198 				list_move_tail(&dev->power.entry, &dpm_prepared_list);
2199 		} else if (error == -EAGAIN) {
2200 			error = 0;
2201 		} else {
2202 			dev_info(dev, "not prepared for power transition: code %d\n",
2203 				 error);
2204 		}
2205 
2206 		mutex_unlock(&dpm_list_mtx);
2207 
2208 		put_device(dev);
2209 
2210 		mutex_lock(&dpm_list_mtx);
2211 	}
2212 	mutex_unlock(&dpm_list_mtx);
2213 	trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
2214 	return error;
2215 }
2216 
2217 /**
2218  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
2219  * @state: PM transition of the system being carried out.
2220  *
2221  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
2222  * callbacks for them.
2223  */
2224 int dpm_suspend_start(pm_message_t state)
2225 {
2226 	ktime_t starttime = ktime_get();
2227 	int error;
2228 
2229 	error = dpm_prepare(state);
2230 	if (error)
2231 		dpm_save_failed_step(SUSPEND_PREPARE);
2232 	else {
2233 		pm_restrict_gfp_mask();
2234 		error = dpm_suspend(state);
2235 	}
2236 
2237 	dpm_show_time(starttime, state, error, "start");
2238 	return error;
2239 }
2240 EXPORT_SYMBOL_GPL(dpm_suspend_start);
2241 
2242 void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret)
2243 {
2244 	if (ret)
2245 		dev_err(dev, "%s(): %ps returns %d\n", function, fn, ret);
2246 }
2247 EXPORT_SYMBOL_GPL(__suspend_report_result);
2248 
2249 /**
2250  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
2251  * @subordinate: Device that needs to wait for @dev.
2252  * @dev: Device to wait for.
2253  */
2254 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
2255 {
2256 	dpm_wait(dev, subordinate->power.async_suspend);
2257 	return async_error;
2258 }
2259 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
2260 
2261 /**
2262  * dpm_for_each_dev - device iterator.
2263  * @data: data for the callback.
2264  * @fn: function to be called for each device.
2265  *
2266  * Iterate over devices in dpm_list, and call @fn for each device,
2267  * passing it @data.
2268  */
2269 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
2270 {
2271 	struct device *dev;
2272 
2273 	if (!fn)
2274 		return;
2275 
2276 	device_pm_lock();
2277 	list_for_each_entry(dev, &dpm_list, power.entry)
2278 		fn(dev, data);
2279 	device_pm_unlock();
2280 }
2281 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
2282 
2283 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
2284 {
2285 	if (!ops)
2286 		return true;
2287 
2288 	return !ops->prepare &&
2289 	       !ops->suspend &&
2290 	       !ops->suspend_late &&
2291 	       !ops->suspend_noirq &&
2292 	       !ops->resume_noirq &&
2293 	       !ops->resume_early &&
2294 	       !ops->resume &&
2295 	       !ops->complete;
2296 }
2297 
2298 void device_pm_check_callbacks(struct device *dev)
2299 {
2300 	unsigned long flags;
2301 
2302 	spin_lock_irqsave(&dev->power.lock, flags);
2303 	dev->power.no_pm_callbacks =
2304 		(!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2305 		 !dev->bus->suspend && !dev->bus->resume)) &&
2306 		(!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2307 		(!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2308 		(!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2309 		(!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2310 		 !dev->driver->suspend && !dev->driver->resume));
2311 	spin_unlock_irqrestore(&dev->power.lock, flags);
2312 }
2313 
2314 bool dev_pm_skip_suspend(struct device *dev)
2315 {
2316 	return dev_pm_smart_suspend(dev) && pm_runtime_status_suspended(dev);
2317 }
2318