xref: /linux/drivers/base/power/main.c (revision 9ed9895370aedd6032af2a9181c62c394d08223b)
1 /*
2  * drivers/base/power/main.c - Where the driver meets power management.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  *
7  * This file is released under the GPLv2
8  *
9  *
10  * The driver model core calls device_pm_add() when a device is registered.
11  * This will initialize the embedded device_pm_info object in the device
12  * and add it to the list of power-controlled devices. sysfs entries for
13  * controlling device power management will also be added.
14  *
15  * A separate list is used for keeping track of power info, because the power
16  * domain dependencies may differ from the ancestral dependencies that the
17  * subsystem list maintains.
18  */
19 
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm-trace.h>
27 #include <linux/pm_wakeirq.h>
28 #include <linux/interrupt.h>
29 #include <linux/sched.h>
30 #include <linux/async.h>
31 #include <linux/suspend.h>
32 #include <trace/events/power.h>
33 #include <linux/cpufreq.h>
34 #include <linux/cpuidle.h>
35 #include <linux/timer.h>
36 
37 #include "../base.h"
38 #include "power.h"
39 
40 typedef int (*pm_callback_t)(struct device *);
41 
42 /*
43  * The entries in the dpm_list list are in a depth first order, simply
44  * because children are guaranteed to be discovered after parents, and
45  * are inserted at the back of the list on discovery.
46  *
47  * Since device_pm_add() may be called with a device lock held,
48  * we must never try to acquire a device lock while holding
49  * dpm_list_mutex.
50  */
51 
52 LIST_HEAD(dpm_list);
53 static LIST_HEAD(dpm_prepared_list);
54 static LIST_HEAD(dpm_suspended_list);
55 static LIST_HEAD(dpm_late_early_list);
56 static LIST_HEAD(dpm_noirq_list);
57 
58 struct suspend_stats suspend_stats;
59 static DEFINE_MUTEX(dpm_list_mtx);
60 static pm_message_t pm_transition;
61 
62 static int async_error;
63 
64 static char *pm_verb(int event)
65 {
66 	switch (event) {
67 	case PM_EVENT_SUSPEND:
68 		return "suspend";
69 	case PM_EVENT_RESUME:
70 		return "resume";
71 	case PM_EVENT_FREEZE:
72 		return "freeze";
73 	case PM_EVENT_QUIESCE:
74 		return "quiesce";
75 	case PM_EVENT_HIBERNATE:
76 		return "hibernate";
77 	case PM_EVENT_THAW:
78 		return "thaw";
79 	case PM_EVENT_RESTORE:
80 		return "restore";
81 	case PM_EVENT_RECOVER:
82 		return "recover";
83 	default:
84 		return "(unknown PM event)";
85 	}
86 }
87 
88 /**
89  * device_pm_sleep_init - Initialize system suspend-related device fields.
90  * @dev: Device object being initialized.
91  */
92 void device_pm_sleep_init(struct device *dev)
93 {
94 	dev->power.is_prepared = false;
95 	dev->power.is_suspended = false;
96 	dev->power.is_noirq_suspended = false;
97 	dev->power.is_late_suspended = false;
98 	init_completion(&dev->power.completion);
99 	complete_all(&dev->power.completion);
100 	dev->power.wakeup = NULL;
101 	INIT_LIST_HEAD(&dev->power.entry);
102 }
103 
104 /**
105  * device_pm_lock - Lock the list of active devices used by the PM core.
106  */
107 void device_pm_lock(void)
108 {
109 	mutex_lock(&dpm_list_mtx);
110 }
111 
112 /**
113  * device_pm_unlock - Unlock the list of active devices used by the PM core.
114  */
115 void device_pm_unlock(void)
116 {
117 	mutex_unlock(&dpm_list_mtx);
118 }
119 
120 /**
121  * device_pm_add - Add a device to the PM core's list of active devices.
122  * @dev: Device to add to the list.
123  */
124 void device_pm_add(struct device *dev)
125 {
126 	pr_debug("PM: Adding info for %s:%s\n",
127 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
128 	device_pm_check_callbacks(dev);
129 	mutex_lock(&dpm_list_mtx);
130 	if (dev->parent && dev->parent->power.is_prepared)
131 		dev_warn(dev, "parent %s should not be sleeping\n",
132 			dev_name(dev->parent));
133 	list_add_tail(&dev->power.entry, &dpm_list);
134 	dev->power.in_dpm_list = true;
135 	mutex_unlock(&dpm_list_mtx);
136 }
137 
138 /**
139  * device_pm_remove - Remove a device from the PM core's list of active devices.
140  * @dev: Device to be removed from the list.
141  */
142 void device_pm_remove(struct device *dev)
143 {
144 	pr_debug("PM: Removing info for %s:%s\n",
145 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
146 	complete_all(&dev->power.completion);
147 	mutex_lock(&dpm_list_mtx);
148 	list_del_init(&dev->power.entry);
149 	dev->power.in_dpm_list = false;
150 	mutex_unlock(&dpm_list_mtx);
151 	device_wakeup_disable(dev);
152 	pm_runtime_remove(dev);
153 	device_pm_check_callbacks(dev);
154 }
155 
156 /**
157  * device_pm_move_before - Move device in the PM core's list of active devices.
158  * @deva: Device to move in dpm_list.
159  * @devb: Device @deva should come before.
160  */
161 void device_pm_move_before(struct device *deva, struct device *devb)
162 {
163 	pr_debug("PM: Moving %s:%s before %s:%s\n",
164 		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
165 		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
166 	/* Delete deva from dpm_list and reinsert before devb. */
167 	list_move_tail(&deva->power.entry, &devb->power.entry);
168 }
169 
170 /**
171  * device_pm_move_after - Move device in the PM core's list of active devices.
172  * @deva: Device to move in dpm_list.
173  * @devb: Device @deva should come after.
174  */
175 void device_pm_move_after(struct device *deva, struct device *devb)
176 {
177 	pr_debug("PM: Moving %s:%s after %s:%s\n",
178 		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
179 		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
180 	/* Delete deva from dpm_list and reinsert after devb. */
181 	list_move(&deva->power.entry, &devb->power.entry);
182 }
183 
184 /**
185  * device_pm_move_last - Move device to end of the PM core's list of devices.
186  * @dev: Device to move in dpm_list.
187  */
188 void device_pm_move_last(struct device *dev)
189 {
190 	pr_debug("PM: Moving %s:%s to end of list\n",
191 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
192 	list_move_tail(&dev->power.entry, &dpm_list);
193 }
194 
195 static ktime_t initcall_debug_start(struct device *dev)
196 {
197 	ktime_t calltime = ktime_set(0, 0);
198 
199 	if (pm_print_times_enabled) {
200 		pr_info("calling  %s+ @ %i, parent: %s\n",
201 			dev_name(dev), task_pid_nr(current),
202 			dev->parent ? dev_name(dev->parent) : "none");
203 		calltime = ktime_get();
204 	}
205 
206 	return calltime;
207 }
208 
209 static void initcall_debug_report(struct device *dev, ktime_t calltime,
210 				  int error, pm_message_t state, char *info)
211 {
212 	ktime_t rettime;
213 	s64 nsecs;
214 
215 	rettime = ktime_get();
216 	nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
217 
218 	if (pm_print_times_enabled) {
219 		pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
220 			error, (unsigned long long)nsecs >> 10);
221 	}
222 }
223 
224 /**
225  * dpm_wait - Wait for a PM operation to complete.
226  * @dev: Device to wait for.
227  * @async: If unset, wait only if the device's power.async_suspend flag is set.
228  */
229 static void dpm_wait(struct device *dev, bool async)
230 {
231 	if (!dev)
232 		return;
233 
234 	if (async || (pm_async_enabled && dev->power.async_suspend))
235 		wait_for_completion(&dev->power.completion);
236 }
237 
238 static int dpm_wait_fn(struct device *dev, void *async_ptr)
239 {
240 	dpm_wait(dev, *((bool *)async_ptr));
241 	return 0;
242 }
243 
244 static void dpm_wait_for_children(struct device *dev, bool async)
245 {
246        device_for_each_child(dev, &async, dpm_wait_fn);
247 }
248 
249 /**
250  * pm_op - Return the PM operation appropriate for given PM event.
251  * @ops: PM operations to choose from.
252  * @state: PM transition of the system being carried out.
253  */
254 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
255 {
256 	switch (state.event) {
257 #ifdef CONFIG_SUSPEND
258 	case PM_EVENT_SUSPEND:
259 		return ops->suspend;
260 	case PM_EVENT_RESUME:
261 		return ops->resume;
262 #endif /* CONFIG_SUSPEND */
263 #ifdef CONFIG_HIBERNATE_CALLBACKS
264 	case PM_EVENT_FREEZE:
265 	case PM_EVENT_QUIESCE:
266 		return ops->freeze;
267 	case PM_EVENT_HIBERNATE:
268 		return ops->poweroff;
269 	case PM_EVENT_THAW:
270 	case PM_EVENT_RECOVER:
271 		return ops->thaw;
272 		break;
273 	case PM_EVENT_RESTORE:
274 		return ops->restore;
275 #endif /* CONFIG_HIBERNATE_CALLBACKS */
276 	}
277 
278 	return NULL;
279 }
280 
281 /**
282  * pm_late_early_op - Return the PM operation appropriate for given PM event.
283  * @ops: PM operations to choose from.
284  * @state: PM transition of the system being carried out.
285  *
286  * Runtime PM is disabled for @dev while this function is being executed.
287  */
288 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
289 				      pm_message_t state)
290 {
291 	switch (state.event) {
292 #ifdef CONFIG_SUSPEND
293 	case PM_EVENT_SUSPEND:
294 		return ops->suspend_late;
295 	case PM_EVENT_RESUME:
296 		return ops->resume_early;
297 #endif /* CONFIG_SUSPEND */
298 #ifdef CONFIG_HIBERNATE_CALLBACKS
299 	case PM_EVENT_FREEZE:
300 	case PM_EVENT_QUIESCE:
301 		return ops->freeze_late;
302 	case PM_EVENT_HIBERNATE:
303 		return ops->poweroff_late;
304 	case PM_EVENT_THAW:
305 	case PM_EVENT_RECOVER:
306 		return ops->thaw_early;
307 	case PM_EVENT_RESTORE:
308 		return ops->restore_early;
309 #endif /* CONFIG_HIBERNATE_CALLBACKS */
310 	}
311 
312 	return NULL;
313 }
314 
315 /**
316  * pm_noirq_op - Return the PM operation appropriate for given PM event.
317  * @ops: PM operations to choose from.
318  * @state: PM transition of the system being carried out.
319  *
320  * The driver of @dev will not receive interrupts while this function is being
321  * executed.
322  */
323 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
324 {
325 	switch (state.event) {
326 #ifdef CONFIG_SUSPEND
327 	case PM_EVENT_SUSPEND:
328 		return ops->suspend_noirq;
329 	case PM_EVENT_RESUME:
330 		return ops->resume_noirq;
331 #endif /* CONFIG_SUSPEND */
332 #ifdef CONFIG_HIBERNATE_CALLBACKS
333 	case PM_EVENT_FREEZE:
334 	case PM_EVENT_QUIESCE:
335 		return ops->freeze_noirq;
336 	case PM_EVENT_HIBERNATE:
337 		return ops->poweroff_noirq;
338 	case PM_EVENT_THAW:
339 	case PM_EVENT_RECOVER:
340 		return ops->thaw_noirq;
341 	case PM_EVENT_RESTORE:
342 		return ops->restore_noirq;
343 #endif /* CONFIG_HIBERNATE_CALLBACKS */
344 	}
345 
346 	return NULL;
347 }
348 
349 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
350 {
351 	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
352 		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
353 		", may wakeup" : "");
354 }
355 
356 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
357 			int error)
358 {
359 	printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
360 		dev_name(dev), pm_verb(state.event), info, error);
361 }
362 
363 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
364 {
365 	ktime_t calltime;
366 	u64 usecs64;
367 	int usecs;
368 
369 	calltime = ktime_get();
370 	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
371 	do_div(usecs64, NSEC_PER_USEC);
372 	usecs = usecs64;
373 	if (usecs == 0)
374 		usecs = 1;
375 	pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
376 		info ?: "", info ? " " : "", pm_verb(state.event),
377 		usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
378 }
379 
380 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
381 			    pm_message_t state, char *info)
382 {
383 	ktime_t calltime;
384 	int error;
385 
386 	if (!cb)
387 		return 0;
388 
389 	calltime = initcall_debug_start(dev);
390 
391 	pm_dev_dbg(dev, state, info);
392 	trace_device_pm_callback_start(dev, info, state.event);
393 	error = cb(dev);
394 	trace_device_pm_callback_end(dev, error);
395 	suspend_report_result(cb, error);
396 
397 	initcall_debug_report(dev, calltime, error, state, info);
398 
399 	return error;
400 }
401 
402 #ifdef CONFIG_DPM_WATCHDOG
403 struct dpm_watchdog {
404 	struct device		*dev;
405 	struct task_struct	*tsk;
406 	struct timer_list	timer;
407 };
408 
409 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
410 	struct dpm_watchdog wd
411 
412 /**
413  * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
414  * @data: Watchdog object address.
415  *
416  * Called when a driver has timed out suspending or resuming.
417  * There's not much we can do here to recover so panic() to
418  * capture a crash-dump in pstore.
419  */
420 static void dpm_watchdog_handler(unsigned long data)
421 {
422 	struct dpm_watchdog *wd = (void *)data;
423 
424 	dev_emerg(wd->dev, "**** DPM device timeout ****\n");
425 	show_stack(wd->tsk, NULL);
426 	panic("%s %s: unrecoverable failure\n",
427 		dev_driver_string(wd->dev), dev_name(wd->dev));
428 }
429 
430 /**
431  * dpm_watchdog_set - Enable pm watchdog for given device.
432  * @wd: Watchdog. Must be allocated on the stack.
433  * @dev: Device to handle.
434  */
435 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
436 {
437 	struct timer_list *timer = &wd->timer;
438 
439 	wd->dev = dev;
440 	wd->tsk = current;
441 
442 	init_timer_on_stack(timer);
443 	/* use same timeout value for both suspend and resume */
444 	timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
445 	timer->function = dpm_watchdog_handler;
446 	timer->data = (unsigned long)wd;
447 	add_timer(timer);
448 }
449 
450 /**
451  * dpm_watchdog_clear - Disable suspend/resume watchdog.
452  * @wd: Watchdog to disable.
453  */
454 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
455 {
456 	struct timer_list *timer = &wd->timer;
457 
458 	del_timer_sync(timer);
459 	destroy_timer_on_stack(timer);
460 }
461 #else
462 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
463 #define dpm_watchdog_set(x, y)
464 #define dpm_watchdog_clear(x)
465 #endif
466 
467 /*------------------------- Resume routines -------------------------*/
468 
469 /**
470  * device_resume_noirq - Execute an "early resume" callback for given device.
471  * @dev: Device to handle.
472  * @state: PM transition of the system being carried out.
473  * @async: If true, the device is being resumed asynchronously.
474  *
475  * The driver of @dev will not receive interrupts while this function is being
476  * executed.
477  */
478 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
479 {
480 	pm_callback_t callback = NULL;
481 	char *info = NULL;
482 	int error = 0;
483 
484 	TRACE_DEVICE(dev);
485 	TRACE_RESUME(0);
486 
487 	if (dev->power.syscore || dev->power.direct_complete)
488 		goto Out;
489 
490 	if (!dev->power.is_noirq_suspended)
491 		goto Out;
492 
493 	dpm_wait(dev->parent, async);
494 
495 	if (dev->pm_domain) {
496 		info = "noirq power domain ";
497 		callback = pm_noirq_op(&dev->pm_domain->ops, state);
498 	} else if (dev->type && dev->type->pm) {
499 		info = "noirq type ";
500 		callback = pm_noirq_op(dev->type->pm, state);
501 	} else if (dev->class && dev->class->pm) {
502 		info = "noirq class ";
503 		callback = pm_noirq_op(dev->class->pm, state);
504 	} else if (dev->bus && dev->bus->pm) {
505 		info = "noirq bus ";
506 		callback = pm_noirq_op(dev->bus->pm, state);
507 	}
508 
509 	if (!callback && dev->driver && dev->driver->pm) {
510 		info = "noirq driver ";
511 		callback = pm_noirq_op(dev->driver->pm, state);
512 	}
513 
514 	error = dpm_run_callback(callback, dev, state, info);
515 	dev->power.is_noirq_suspended = false;
516 
517  Out:
518 	complete_all(&dev->power.completion);
519 	TRACE_RESUME(error);
520 	return error;
521 }
522 
523 static bool is_async(struct device *dev)
524 {
525 	return dev->power.async_suspend && pm_async_enabled
526 		&& !pm_trace_is_enabled();
527 }
528 
529 static void async_resume_noirq(void *data, async_cookie_t cookie)
530 {
531 	struct device *dev = (struct device *)data;
532 	int error;
533 
534 	error = device_resume_noirq(dev, pm_transition, true);
535 	if (error)
536 		pm_dev_err(dev, pm_transition, " async", error);
537 
538 	put_device(dev);
539 }
540 
541 /**
542  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
543  * @state: PM transition of the system being carried out.
544  *
545  * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
546  * enable device drivers to receive interrupts.
547  */
548 void dpm_resume_noirq(pm_message_t state)
549 {
550 	struct device *dev;
551 	ktime_t starttime = ktime_get();
552 
553 	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
554 	mutex_lock(&dpm_list_mtx);
555 	pm_transition = state;
556 
557 	/*
558 	 * Advanced the async threads upfront,
559 	 * in case the starting of async threads is
560 	 * delayed by non-async resuming devices.
561 	 */
562 	list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
563 		reinit_completion(&dev->power.completion);
564 		if (is_async(dev)) {
565 			get_device(dev);
566 			async_schedule(async_resume_noirq, dev);
567 		}
568 	}
569 
570 	while (!list_empty(&dpm_noirq_list)) {
571 		dev = to_device(dpm_noirq_list.next);
572 		get_device(dev);
573 		list_move_tail(&dev->power.entry, &dpm_late_early_list);
574 		mutex_unlock(&dpm_list_mtx);
575 
576 		if (!is_async(dev)) {
577 			int error;
578 
579 			error = device_resume_noirq(dev, state, false);
580 			if (error) {
581 				suspend_stats.failed_resume_noirq++;
582 				dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
583 				dpm_save_failed_dev(dev_name(dev));
584 				pm_dev_err(dev, state, " noirq", error);
585 			}
586 		}
587 
588 		mutex_lock(&dpm_list_mtx);
589 		put_device(dev);
590 	}
591 	mutex_unlock(&dpm_list_mtx);
592 	async_synchronize_full();
593 	dpm_show_time(starttime, state, "noirq");
594 	resume_device_irqs();
595 	device_wakeup_disarm_wake_irqs();
596 	cpuidle_resume();
597 	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
598 }
599 
600 /**
601  * device_resume_early - Execute an "early resume" callback for given device.
602  * @dev: Device to handle.
603  * @state: PM transition of the system being carried out.
604  * @async: If true, the device is being resumed asynchronously.
605  *
606  * Runtime PM is disabled for @dev while this function is being executed.
607  */
608 static int device_resume_early(struct device *dev, pm_message_t state, bool async)
609 {
610 	pm_callback_t callback = NULL;
611 	char *info = NULL;
612 	int error = 0;
613 
614 	TRACE_DEVICE(dev);
615 	TRACE_RESUME(0);
616 
617 	if (dev->power.syscore || dev->power.direct_complete)
618 		goto Out;
619 
620 	if (!dev->power.is_late_suspended)
621 		goto Out;
622 
623 	dpm_wait(dev->parent, async);
624 
625 	if (dev->pm_domain) {
626 		info = "early power domain ";
627 		callback = pm_late_early_op(&dev->pm_domain->ops, state);
628 	} else if (dev->type && dev->type->pm) {
629 		info = "early type ";
630 		callback = pm_late_early_op(dev->type->pm, state);
631 	} else if (dev->class && dev->class->pm) {
632 		info = "early class ";
633 		callback = pm_late_early_op(dev->class->pm, state);
634 	} else if (dev->bus && dev->bus->pm) {
635 		info = "early bus ";
636 		callback = pm_late_early_op(dev->bus->pm, state);
637 	}
638 
639 	if (!callback && dev->driver && dev->driver->pm) {
640 		info = "early driver ";
641 		callback = pm_late_early_op(dev->driver->pm, state);
642 	}
643 
644 	error = dpm_run_callback(callback, dev, state, info);
645 	dev->power.is_late_suspended = false;
646 
647  Out:
648 	TRACE_RESUME(error);
649 
650 	pm_runtime_enable(dev);
651 	complete_all(&dev->power.completion);
652 	return error;
653 }
654 
655 static void async_resume_early(void *data, async_cookie_t cookie)
656 {
657 	struct device *dev = (struct device *)data;
658 	int error;
659 
660 	error = device_resume_early(dev, pm_transition, true);
661 	if (error)
662 		pm_dev_err(dev, pm_transition, " async", error);
663 
664 	put_device(dev);
665 }
666 
667 /**
668  * dpm_resume_early - Execute "early resume" callbacks for all devices.
669  * @state: PM transition of the system being carried out.
670  */
671 void dpm_resume_early(pm_message_t state)
672 {
673 	struct device *dev;
674 	ktime_t starttime = ktime_get();
675 
676 	trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
677 	mutex_lock(&dpm_list_mtx);
678 	pm_transition = state;
679 
680 	/*
681 	 * Advanced the async threads upfront,
682 	 * in case the starting of async threads is
683 	 * delayed by non-async resuming devices.
684 	 */
685 	list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
686 		reinit_completion(&dev->power.completion);
687 		if (is_async(dev)) {
688 			get_device(dev);
689 			async_schedule(async_resume_early, dev);
690 		}
691 	}
692 
693 	while (!list_empty(&dpm_late_early_list)) {
694 		dev = to_device(dpm_late_early_list.next);
695 		get_device(dev);
696 		list_move_tail(&dev->power.entry, &dpm_suspended_list);
697 		mutex_unlock(&dpm_list_mtx);
698 
699 		if (!is_async(dev)) {
700 			int error;
701 
702 			error = device_resume_early(dev, state, false);
703 			if (error) {
704 				suspend_stats.failed_resume_early++;
705 				dpm_save_failed_step(SUSPEND_RESUME_EARLY);
706 				dpm_save_failed_dev(dev_name(dev));
707 				pm_dev_err(dev, state, " early", error);
708 			}
709 		}
710 		mutex_lock(&dpm_list_mtx);
711 		put_device(dev);
712 	}
713 	mutex_unlock(&dpm_list_mtx);
714 	async_synchronize_full();
715 	dpm_show_time(starttime, state, "early");
716 	trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
717 }
718 
719 /**
720  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
721  * @state: PM transition of the system being carried out.
722  */
723 void dpm_resume_start(pm_message_t state)
724 {
725 	dpm_resume_noirq(state);
726 	dpm_resume_early(state);
727 }
728 EXPORT_SYMBOL_GPL(dpm_resume_start);
729 
730 /**
731  * device_resume - Execute "resume" callbacks for given device.
732  * @dev: Device to handle.
733  * @state: PM transition of the system being carried out.
734  * @async: If true, the device is being resumed asynchronously.
735  */
736 static int device_resume(struct device *dev, pm_message_t state, bool async)
737 {
738 	pm_callback_t callback = NULL;
739 	char *info = NULL;
740 	int error = 0;
741 	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
742 
743 	TRACE_DEVICE(dev);
744 	TRACE_RESUME(0);
745 
746 	if (dev->power.syscore)
747 		goto Complete;
748 
749 	if (dev->power.direct_complete) {
750 		/* Match the pm_runtime_disable() in __device_suspend(). */
751 		pm_runtime_enable(dev);
752 		goto Complete;
753 	}
754 
755 	dpm_wait(dev->parent, async);
756 	dpm_watchdog_set(&wd, dev);
757 	device_lock(dev);
758 
759 	/*
760 	 * This is a fib.  But we'll allow new children to be added below
761 	 * a resumed device, even if the device hasn't been completed yet.
762 	 */
763 	dev->power.is_prepared = false;
764 
765 	if (!dev->power.is_suspended)
766 		goto Unlock;
767 
768 	if (dev->pm_domain) {
769 		info = "power domain ";
770 		callback = pm_op(&dev->pm_domain->ops, state);
771 		goto Driver;
772 	}
773 
774 	if (dev->type && dev->type->pm) {
775 		info = "type ";
776 		callback = pm_op(dev->type->pm, state);
777 		goto Driver;
778 	}
779 
780 	if (dev->class) {
781 		if (dev->class->pm) {
782 			info = "class ";
783 			callback = pm_op(dev->class->pm, state);
784 			goto Driver;
785 		} else if (dev->class->resume) {
786 			info = "legacy class ";
787 			callback = dev->class->resume;
788 			goto End;
789 		}
790 	}
791 
792 	if (dev->bus) {
793 		if (dev->bus->pm) {
794 			info = "bus ";
795 			callback = pm_op(dev->bus->pm, state);
796 		} else if (dev->bus->resume) {
797 			info = "legacy bus ";
798 			callback = dev->bus->resume;
799 			goto End;
800 		}
801 	}
802 
803  Driver:
804 	if (!callback && dev->driver && dev->driver->pm) {
805 		info = "driver ";
806 		callback = pm_op(dev->driver->pm, state);
807 	}
808 
809  End:
810 	error = dpm_run_callback(callback, dev, state, info);
811 	dev->power.is_suspended = false;
812 
813  Unlock:
814 	device_unlock(dev);
815 	dpm_watchdog_clear(&wd);
816 
817  Complete:
818 	complete_all(&dev->power.completion);
819 
820 	TRACE_RESUME(error);
821 
822 	return error;
823 }
824 
825 static void async_resume(void *data, async_cookie_t cookie)
826 {
827 	struct device *dev = (struct device *)data;
828 	int error;
829 
830 	error = device_resume(dev, pm_transition, true);
831 	if (error)
832 		pm_dev_err(dev, pm_transition, " async", error);
833 	put_device(dev);
834 }
835 
836 /**
837  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
838  * @state: PM transition of the system being carried out.
839  *
840  * Execute the appropriate "resume" callback for all devices whose status
841  * indicates that they are suspended.
842  */
843 void dpm_resume(pm_message_t state)
844 {
845 	struct device *dev;
846 	ktime_t starttime = ktime_get();
847 
848 	trace_suspend_resume(TPS("dpm_resume"), state.event, true);
849 	might_sleep();
850 
851 	mutex_lock(&dpm_list_mtx);
852 	pm_transition = state;
853 	async_error = 0;
854 
855 	list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
856 		reinit_completion(&dev->power.completion);
857 		if (is_async(dev)) {
858 			get_device(dev);
859 			async_schedule(async_resume, dev);
860 		}
861 	}
862 
863 	while (!list_empty(&dpm_suspended_list)) {
864 		dev = to_device(dpm_suspended_list.next);
865 		get_device(dev);
866 		if (!is_async(dev)) {
867 			int error;
868 
869 			mutex_unlock(&dpm_list_mtx);
870 
871 			error = device_resume(dev, state, false);
872 			if (error) {
873 				suspend_stats.failed_resume++;
874 				dpm_save_failed_step(SUSPEND_RESUME);
875 				dpm_save_failed_dev(dev_name(dev));
876 				pm_dev_err(dev, state, "", error);
877 			}
878 
879 			mutex_lock(&dpm_list_mtx);
880 		}
881 		if (!list_empty(&dev->power.entry))
882 			list_move_tail(&dev->power.entry, &dpm_prepared_list);
883 		put_device(dev);
884 	}
885 	mutex_unlock(&dpm_list_mtx);
886 	async_synchronize_full();
887 	dpm_show_time(starttime, state, NULL);
888 
889 	cpufreq_resume();
890 	trace_suspend_resume(TPS("dpm_resume"), state.event, false);
891 }
892 
893 /**
894  * device_complete - Complete a PM transition for given device.
895  * @dev: Device to handle.
896  * @state: PM transition of the system being carried out.
897  */
898 static void device_complete(struct device *dev, pm_message_t state)
899 {
900 	void (*callback)(struct device *) = NULL;
901 	char *info = NULL;
902 
903 	if (dev->power.syscore)
904 		return;
905 
906 	device_lock(dev);
907 
908 	if (dev->pm_domain) {
909 		info = "completing power domain ";
910 		callback = dev->pm_domain->ops.complete;
911 	} else if (dev->type && dev->type->pm) {
912 		info = "completing type ";
913 		callback = dev->type->pm->complete;
914 	} else if (dev->class && dev->class->pm) {
915 		info = "completing class ";
916 		callback = dev->class->pm->complete;
917 	} else if (dev->bus && dev->bus->pm) {
918 		info = "completing bus ";
919 		callback = dev->bus->pm->complete;
920 	}
921 
922 	if (!callback && dev->driver && dev->driver->pm) {
923 		info = "completing driver ";
924 		callback = dev->driver->pm->complete;
925 	}
926 
927 	if (callback) {
928 		pm_dev_dbg(dev, state, info);
929 		callback(dev);
930 	}
931 
932 	device_unlock(dev);
933 
934 	pm_runtime_put(dev);
935 }
936 
937 /**
938  * dpm_complete - Complete a PM transition for all non-sysdev devices.
939  * @state: PM transition of the system being carried out.
940  *
941  * Execute the ->complete() callbacks for all devices whose PM status is not
942  * DPM_ON (this allows new devices to be registered).
943  */
944 void dpm_complete(pm_message_t state)
945 {
946 	struct list_head list;
947 
948 	trace_suspend_resume(TPS("dpm_complete"), state.event, true);
949 	might_sleep();
950 
951 	INIT_LIST_HEAD(&list);
952 	mutex_lock(&dpm_list_mtx);
953 	while (!list_empty(&dpm_prepared_list)) {
954 		struct device *dev = to_device(dpm_prepared_list.prev);
955 
956 		get_device(dev);
957 		dev->power.is_prepared = false;
958 		list_move(&dev->power.entry, &list);
959 		mutex_unlock(&dpm_list_mtx);
960 
961 		trace_device_pm_callback_start(dev, "", state.event);
962 		device_complete(dev, state);
963 		trace_device_pm_callback_end(dev, 0);
964 
965 		mutex_lock(&dpm_list_mtx);
966 		put_device(dev);
967 	}
968 	list_splice(&list, &dpm_list);
969 	mutex_unlock(&dpm_list_mtx);
970 
971 	/* Allow device probing and trigger re-probing of deferred devices */
972 	device_unblock_probing();
973 	trace_suspend_resume(TPS("dpm_complete"), state.event, false);
974 }
975 
976 /**
977  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
978  * @state: PM transition of the system being carried out.
979  *
980  * Execute "resume" callbacks for all devices and complete the PM transition of
981  * the system.
982  */
983 void dpm_resume_end(pm_message_t state)
984 {
985 	dpm_resume(state);
986 	dpm_complete(state);
987 }
988 EXPORT_SYMBOL_GPL(dpm_resume_end);
989 
990 
991 /*------------------------- Suspend routines -------------------------*/
992 
993 /**
994  * resume_event - Return a "resume" message for given "suspend" sleep state.
995  * @sleep_state: PM message representing a sleep state.
996  *
997  * Return a PM message representing the resume event corresponding to given
998  * sleep state.
999  */
1000 static pm_message_t resume_event(pm_message_t sleep_state)
1001 {
1002 	switch (sleep_state.event) {
1003 	case PM_EVENT_SUSPEND:
1004 		return PMSG_RESUME;
1005 	case PM_EVENT_FREEZE:
1006 	case PM_EVENT_QUIESCE:
1007 		return PMSG_RECOVER;
1008 	case PM_EVENT_HIBERNATE:
1009 		return PMSG_RESTORE;
1010 	}
1011 	return PMSG_ON;
1012 }
1013 
1014 /**
1015  * device_suspend_noirq - Execute a "late suspend" callback for given device.
1016  * @dev: Device to handle.
1017  * @state: PM transition of the system being carried out.
1018  * @async: If true, the device is being suspended asynchronously.
1019  *
1020  * The driver of @dev will not receive interrupts while this function is being
1021  * executed.
1022  */
1023 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1024 {
1025 	pm_callback_t callback = NULL;
1026 	char *info = NULL;
1027 	int error = 0;
1028 
1029 	TRACE_DEVICE(dev);
1030 	TRACE_SUSPEND(0);
1031 
1032 	if (async_error)
1033 		goto Complete;
1034 
1035 	if (pm_wakeup_pending()) {
1036 		async_error = -EBUSY;
1037 		goto Complete;
1038 	}
1039 
1040 	if (dev->power.syscore || dev->power.direct_complete)
1041 		goto Complete;
1042 
1043 	dpm_wait_for_children(dev, async);
1044 
1045 	if (dev->pm_domain) {
1046 		info = "noirq power domain ";
1047 		callback = pm_noirq_op(&dev->pm_domain->ops, state);
1048 	} else if (dev->type && dev->type->pm) {
1049 		info = "noirq type ";
1050 		callback = pm_noirq_op(dev->type->pm, state);
1051 	} else if (dev->class && dev->class->pm) {
1052 		info = "noirq class ";
1053 		callback = pm_noirq_op(dev->class->pm, state);
1054 	} else if (dev->bus && dev->bus->pm) {
1055 		info = "noirq bus ";
1056 		callback = pm_noirq_op(dev->bus->pm, state);
1057 	}
1058 
1059 	if (!callback && dev->driver && dev->driver->pm) {
1060 		info = "noirq driver ";
1061 		callback = pm_noirq_op(dev->driver->pm, state);
1062 	}
1063 
1064 	error = dpm_run_callback(callback, dev, state, info);
1065 	if (!error)
1066 		dev->power.is_noirq_suspended = true;
1067 	else
1068 		async_error = error;
1069 
1070 Complete:
1071 	complete_all(&dev->power.completion);
1072 	TRACE_SUSPEND(error);
1073 	return error;
1074 }
1075 
1076 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1077 {
1078 	struct device *dev = (struct device *)data;
1079 	int error;
1080 
1081 	error = __device_suspend_noirq(dev, pm_transition, true);
1082 	if (error) {
1083 		dpm_save_failed_dev(dev_name(dev));
1084 		pm_dev_err(dev, pm_transition, " async", error);
1085 	}
1086 
1087 	put_device(dev);
1088 }
1089 
1090 static int device_suspend_noirq(struct device *dev)
1091 {
1092 	reinit_completion(&dev->power.completion);
1093 
1094 	if (is_async(dev)) {
1095 		get_device(dev);
1096 		async_schedule(async_suspend_noirq, dev);
1097 		return 0;
1098 	}
1099 	return __device_suspend_noirq(dev, pm_transition, false);
1100 }
1101 
1102 /**
1103  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1104  * @state: PM transition of the system being carried out.
1105  *
1106  * Prevent device drivers from receiving interrupts and call the "noirq" suspend
1107  * handlers for all non-sysdev devices.
1108  */
1109 int dpm_suspend_noirq(pm_message_t state)
1110 {
1111 	ktime_t starttime = ktime_get();
1112 	int error = 0;
1113 
1114 	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1115 	cpuidle_pause();
1116 	device_wakeup_arm_wake_irqs();
1117 	suspend_device_irqs();
1118 	mutex_lock(&dpm_list_mtx);
1119 	pm_transition = state;
1120 	async_error = 0;
1121 
1122 	while (!list_empty(&dpm_late_early_list)) {
1123 		struct device *dev = to_device(dpm_late_early_list.prev);
1124 
1125 		get_device(dev);
1126 		mutex_unlock(&dpm_list_mtx);
1127 
1128 		error = device_suspend_noirq(dev);
1129 
1130 		mutex_lock(&dpm_list_mtx);
1131 		if (error) {
1132 			pm_dev_err(dev, state, " noirq", error);
1133 			dpm_save_failed_dev(dev_name(dev));
1134 			put_device(dev);
1135 			break;
1136 		}
1137 		if (!list_empty(&dev->power.entry))
1138 			list_move(&dev->power.entry, &dpm_noirq_list);
1139 		put_device(dev);
1140 
1141 		if (async_error)
1142 			break;
1143 	}
1144 	mutex_unlock(&dpm_list_mtx);
1145 	async_synchronize_full();
1146 	if (!error)
1147 		error = async_error;
1148 
1149 	if (error) {
1150 		suspend_stats.failed_suspend_noirq++;
1151 		dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1152 		dpm_resume_noirq(resume_event(state));
1153 	} else {
1154 		dpm_show_time(starttime, state, "noirq");
1155 	}
1156 	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1157 	return error;
1158 }
1159 
1160 /**
1161  * device_suspend_late - Execute a "late suspend" callback for given device.
1162  * @dev: Device to handle.
1163  * @state: PM transition of the system being carried out.
1164  * @async: If true, the device is being suspended asynchronously.
1165  *
1166  * Runtime PM is disabled for @dev while this function is being executed.
1167  */
1168 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1169 {
1170 	pm_callback_t callback = NULL;
1171 	char *info = NULL;
1172 	int error = 0;
1173 
1174 	TRACE_DEVICE(dev);
1175 	TRACE_SUSPEND(0);
1176 
1177 	__pm_runtime_disable(dev, false);
1178 
1179 	if (async_error)
1180 		goto Complete;
1181 
1182 	if (pm_wakeup_pending()) {
1183 		async_error = -EBUSY;
1184 		goto Complete;
1185 	}
1186 
1187 	if (dev->power.syscore || dev->power.direct_complete)
1188 		goto Complete;
1189 
1190 	dpm_wait_for_children(dev, async);
1191 
1192 	if (dev->pm_domain) {
1193 		info = "late power domain ";
1194 		callback = pm_late_early_op(&dev->pm_domain->ops, state);
1195 	} else if (dev->type && dev->type->pm) {
1196 		info = "late type ";
1197 		callback = pm_late_early_op(dev->type->pm, state);
1198 	} else if (dev->class && dev->class->pm) {
1199 		info = "late class ";
1200 		callback = pm_late_early_op(dev->class->pm, state);
1201 	} else if (dev->bus && dev->bus->pm) {
1202 		info = "late bus ";
1203 		callback = pm_late_early_op(dev->bus->pm, state);
1204 	}
1205 
1206 	if (!callback && dev->driver && dev->driver->pm) {
1207 		info = "late driver ";
1208 		callback = pm_late_early_op(dev->driver->pm, state);
1209 	}
1210 
1211 	error = dpm_run_callback(callback, dev, state, info);
1212 	if (!error)
1213 		dev->power.is_late_suspended = true;
1214 	else
1215 		async_error = error;
1216 
1217 Complete:
1218 	TRACE_SUSPEND(error);
1219 	complete_all(&dev->power.completion);
1220 	return error;
1221 }
1222 
1223 static void async_suspend_late(void *data, async_cookie_t cookie)
1224 {
1225 	struct device *dev = (struct device *)data;
1226 	int error;
1227 
1228 	error = __device_suspend_late(dev, pm_transition, true);
1229 	if (error) {
1230 		dpm_save_failed_dev(dev_name(dev));
1231 		pm_dev_err(dev, pm_transition, " async", error);
1232 	}
1233 	put_device(dev);
1234 }
1235 
1236 static int device_suspend_late(struct device *dev)
1237 {
1238 	reinit_completion(&dev->power.completion);
1239 
1240 	if (is_async(dev)) {
1241 		get_device(dev);
1242 		async_schedule(async_suspend_late, dev);
1243 		return 0;
1244 	}
1245 
1246 	return __device_suspend_late(dev, pm_transition, false);
1247 }
1248 
1249 /**
1250  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1251  * @state: PM transition of the system being carried out.
1252  */
1253 int dpm_suspend_late(pm_message_t state)
1254 {
1255 	ktime_t starttime = ktime_get();
1256 	int error = 0;
1257 
1258 	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1259 	mutex_lock(&dpm_list_mtx);
1260 	pm_transition = state;
1261 	async_error = 0;
1262 
1263 	while (!list_empty(&dpm_suspended_list)) {
1264 		struct device *dev = to_device(dpm_suspended_list.prev);
1265 
1266 		get_device(dev);
1267 		mutex_unlock(&dpm_list_mtx);
1268 
1269 		error = device_suspend_late(dev);
1270 
1271 		mutex_lock(&dpm_list_mtx);
1272 		if (!list_empty(&dev->power.entry))
1273 			list_move(&dev->power.entry, &dpm_late_early_list);
1274 
1275 		if (error) {
1276 			pm_dev_err(dev, state, " late", error);
1277 			dpm_save_failed_dev(dev_name(dev));
1278 			put_device(dev);
1279 			break;
1280 		}
1281 		put_device(dev);
1282 
1283 		if (async_error)
1284 			break;
1285 	}
1286 	mutex_unlock(&dpm_list_mtx);
1287 	async_synchronize_full();
1288 	if (!error)
1289 		error = async_error;
1290 	if (error) {
1291 		suspend_stats.failed_suspend_late++;
1292 		dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1293 		dpm_resume_early(resume_event(state));
1294 	} else {
1295 		dpm_show_time(starttime, state, "late");
1296 	}
1297 	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1298 	return error;
1299 }
1300 
1301 /**
1302  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1303  * @state: PM transition of the system being carried out.
1304  */
1305 int dpm_suspend_end(pm_message_t state)
1306 {
1307 	int error = dpm_suspend_late(state);
1308 	if (error)
1309 		return error;
1310 
1311 	error = dpm_suspend_noirq(state);
1312 	if (error) {
1313 		dpm_resume_early(resume_event(state));
1314 		return error;
1315 	}
1316 
1317 	return 0;
1318 }
1319 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1320 
1321 /**
1322  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1323  * @dev: Device to suspend.
1324  * @state: PM transition of the system being carried out.
1325  * @cb: Suspend callback to execute.
1326  * @info: string description of caller.
1327  */
1328 static int legacy_suspend(struct device *dev, pm_message_t state,
1329 			  int (*cb)(struct device *dev, pm_message_t state),
1330 			  char *info)
1331 {
1332 	int error;
1333 	ktime_t calltime;
1334 
1335 	calltime = initcall_debug_start(dev);
1336 
1337 	trace_device_pm_callback_start(dev, info, state.event);
1338 	error = cb(dev, state);
1339 	trace_device_pm_callback_end(dev, error);
1340 	suspend_report_result(cb, error);
1341 
1342 	initcall_debug_report(dev, calltime, error, state, info);
1343 
1344 	return error;
1345 }
1346 
1347 /**
1348  * device_suspend - Execute "suspend" callbacks for given device.
1349  * @dev: Device to handle.
1350  * @state: PM transition of the system being carried out.
1351  * @async: If true, the device is being suspended asynchronously.
1352  */
1353 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1354 {
1355 	pm_callback_t callback = NULL;
1356 	char *info = NULL;
1357 	int error = 0;
1358 	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1359 
1360 	TRACE_DEVICE(dev);
1361 	TRACE_SUSPEND(0);
1362 
1363 	dpm_wait_for_children(dev, async);
1364 
1365 	if (async_error)
1366 		goto Complete;
1367 
1368 	/*
1369 	 * If a device configured to wake up the system from sleep states
1370 	 * has been suspended at run time and there's a resume request pending
1371 	 * for it, this is equivalent to the device signaling wakeup, so the
1372 	 * system suspend operation should be aborted.
1373 	 */
1374 	if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1375 		pm_wakeup_event(dev, 0);
1376 
1377 	if (pm_wakeup_pending()) {
1378 		async_error = -EBUSY;
1379 		goto Complete;
1380 	}
1381 
1382 	if (dev->power.syscore)
1383 		goto Complete;
1384 
1385 	if (dev->power.direct_complete) {
1386 		if (pm_runtime_status_suspended(dev)) {
1387 			pm_runtime_disable(dev);
1388 			if (pm_runtime_status_suspended(dev))
1389 				goto Complete;
1390 
1391 			pm_runtime_enable(dev);
1392 		}
1393 		dev->power.direct_complete = false;
1394 	}
1395 
1396 	dpm_watchdog_set(&wd, dev);
1397 	device_lock(dev);
1398 
1399 	if (dev->pm_domain) {
1400 		info = "power domain ";
1401 		callback = pm_op(&dev->pm_domain->ops, state);
1402 		goto Run;
1403 	}
1404 
1405 	if (dev->type && dev->type->pm) {
1406 		info = "type ";
1407 		callback = pm_op(dev->type->pm, state);
1408 		goto Run;
1409 	}
1410 
1411 	if (dev->class) {
1412 		if (dev->class->pm) {
1413 			info = "class ";
1414 			callback = pm_op(dev->class->pm, state);
1415 			goto Run;
1416 		} else if (dev->class->suspend) {
1417 			pm_dev_dbg(dev, state, "legacy class ");
1418 			error = legacy_suspend(dev, state, dev->class->suspend,
1419 						"legacy class ");
1420 			goto End;
1421 		}
1422 	}
1423 
1424 	if (dev->bus) {
1425 		if (dev->bus->pm) {
1426 			info = "bus ";
1427 			callback = pm_op(dev->bus->pm, state);
1428 		} else if (dev->bus->suspend) {
1429 			pm_dev_dbg(dev, state, "legacy bus ");
1430 			error = legacy_suspend(dev, state, dev->bus->suspend,
1431 						"legacy bus ");
1432 			goto End;
1433 		}
1434 	}
1435 
1436  Run:
1437 	if (!callback && dev->driver && dev->driver->pm) {
1438 		info = "driver ";
1439 		callback = pm_op(dev->driver->pm, state);
1440 	}
1441 
1442 	error = dpm_run_callback(callback, dev, state, info);
1443 
1444  End:
1445 	if (!error) {
1446 		struct device *parent = dev->parent;
1447 
1448 		dev->power.is_suspended = true;
1449 		if (parent) {
1450 			spin_lock_irq(&parent->power.lock);
1451 
1452 			dev->parent->power.direct_complete = false;
1453 			if (dev->power.wakeup_path
1454 			    && !dev->parent->power.ignore_children)
1455 				dev->parent->power.wakeup_path = true;
1456 
1457 			spin_unlock_irq(&parent->power.lock);
1458 		}
1459 	}
1460 
1461 	device_unlock(dev);
1462 	dpm_watchdog_clear(&wd);
1463 
1464  Complete:
1465 	complete_all(&dev->power.completion);
1466 	if (error)
1467 		async_error = error;
1468 
1469 	TRACE_SUSPEND(error);
1470 	return error;
1471 }
1472 
1473 static void async_suspend(void *data, async_cookie_t cookie)
1474 {
1475 	struct device *dev = (struct device *)data;
1476 	int error;
1477 
1478 	error = __device_suspend(dev, pm_transition, true);
1479 	if (error) {
1480 		dpm_save_failed_dev(dev_name(dev));
1481 		pm_dev_err(dev, pm_transition, " async", error);
1482 	}
1483 
1484 	put_device(dev);
1485 }
1486 
1487 static int device_suspend(struct device *dev)
1488 {
1489 	reinit_completion(&dev->power.completion);
1490 
1491 	if (is_async(dev)) {
1492 		get_device(dev);
1493 		async_schedule(async_suspend, dev);
1494 		return 0;
1495 	}
1496 
1497 	return __device_suspend(dev, pm_transition, false);
1498 }
1499 
1500 /**
1501  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1502  * @state: PM transition of the system being carried out.
1503  */
1504 int dpm_suspend(pm_message_t state)
1505 {
1506 	ktime_t starttime = ktime_get();
1507 	int error = 0;
1508 
1509 	trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1510 	might_sleep();
1511 
1512 	cpufreq_suspend();
1513 
1514 	mutex_lock(&dpm_list_mtx);
1515 	pm_transition = state;
1516 	async_error = 0;
1517 	while (!list_empty(&dpm_prepared_list)) {
1518 		struct device *dev = to_device(dpm_prepared_list.prev);
1519 
1520 		get_device(dev);
1521 		mutex_unlock(&dpm_list_mtx);
1522 
1523 		error = device_suspend(dev);
1524 
1525 		mutex_lock(&dpm_list_mtx);
1526 		if (error) {
1527 			pm_dev_err(dev, state, "", error);
1528 			dpm_save_failed_dev(dev_name(dev));
1529 			put_device(dev);
1530 			break;
1531 		}
1532 		if (!list_empty(&dev->power.entry))
1533 			list_move(&dev->power.entry, &dpm_suspended_list);
1534 		put_device(dev);
1535 		if (async_error)
1536 			break;
1537 	}
1538 	mutex_unlock(&dpm_list_mtx);
1539 	async_synchronize_full();
1540 	if (!error)
1541 		error = async_error;
1542 	if (error) {
1543 		suspend_stats.failed_suspend++;
1544 		dpm_save_failed_step(SUSPEND_SUSPEND);
1545 	} else
1546 		dpm_show_time(starttime, state, NULL);
1547 	trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1548 	return error;
1549 }
1550 
1551 /**
1552  * device_prepare - Prepare a device for system power transition.
1553  * @dev: Device to handle.
1554  * @state: PM transition of the system being carried out.
1555  *
1556  * Execute the ->prepare() callback(s) for given device.  No new children of the
1557  * device may be registered after this function has returned.
1558  */
1559 static int device_prepare(struct device *dev, pm_message_t state)
1560 {
1561 	int (*callback)(struct device *) = NULL;
1562 	int ret = 0;
1563 
1564 	if (dev->power.syscore)
1565 		return 0;
1566 
1567 	/*
1568 	 * If a device's parent goes into runtime suspend at the wrong time,
1569 	 * it won't be possible to resume the device.  To prevent this we
1570 	 * block runtime suspend here, during the prepare phase, and allow
1571 	 * it again during the complete phase.
1572 	 */
1573 	pm_runtime_get_noresume(dev);
1574 
1575 	device_lock(dev);
1576 
1577 	dev->power.wakeup_path = device_may_wakeup(dev);
1578 
1579 	if (dev->power.no_pm_callbacks) {
1580 		ret = 1;	/* Let device go direct_complete */
1581 		goto unlock;
1582 	}
1583 
1584 	if (dev->pm_domain)
1585 		callback = dev->pm_domain->ops.prepare;
1586 	else if (dev->type && dev->type->pm)
1587 		callback = dev->type->pm->prepare;
1588 	else if (dev->class && dev->class->pm)
1589 		callback = dev->class->pm->prepare;
1590 	else if (dev->bus && dev->bus->pm)
1591 		callback = dev->bus->pm->prepare;
1592 
1593 	if (!callback && dev->driver && dev->driver->pm)
1594 		callback = dev->driver->pm->prepare;
1595 
1596 	if (callback)
1597 		ret = callback(dev);
1598 
1599 unlock:
1600 	device_unlock(dev);
1601 
1602 	if (ret < 0) {
1603 		suspend_report_result(callback, ret);
1604 		pm_runtime_put(dev);
1605 		return ret;
1606 	}
1607 	/*
1608 	 * A positive return value from ->prepare() means "this device appears
1609 	 * to be runtime-suspended and its state is fine, so if it really is
1610 	 * runtime-suspended, you can leave it in that state provided that you
1611 	 * will do the same thing with all of its descendants".  This only
1612 	 * applies to suspend transitions, however.
1613 	 */
1614 	spin_lock_irq(&dev->power.lock);
1615 	dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND;
1616 	spin_unlock_irq(&dev->power.lock);
1617 	return 0;
1618 }
1619 
1620 /**
1621  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1622  * @state: PM transition of the system being carried out.
1623  *
1624  * Execute the ->prepare() callback(s) for all devices.
1625  */
1626 int dpm_prepare(pm_message_t state)
1627 {
1628 	int error = 0;
1629 
1630 	trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1631 	might_sleep();
1632 
1633 	/*
1634 	 * Give a chance for the known devices to complete their probes, before
1635 	 * disable probing of devices. This sync point is important at least
1636 	 * at boot time + hibernation restore.
1637 	 */
1638 	wait_for_device_probe();
1639 	/*
1640 	 * It is unsafe if probing of devices will happen during suspend or
1641 	 * hibernation and system behavior will be unpredictable in this case.
1642 	 * So, let's prohibit device's probing here and defer their probes
1643 	 * instead. The normal behavior will be restored in dpm_complete().
1644 	 */
1645 	device_block_probing();
1646 
1647 	mutex_lock(&dpm_list_mtx);
1648 	while (!list_empty(&dpm_list)) {
1649 		struct device *dev = to_device(dpm_list.next);
1650 
1651 		get_device(dev);
1652 		mutex_unlock(&dpm_list_mtx);
1653 
1654 		trace_device_pm_callback_start(dev, "", state.event);
1655 		error = device_prepare(dev, state);
1656 		trace_device_pm_callback_end(dev, error);
1657 
1658 		mutex_lock(&dpm_list_mtx);
1659 		if (error) {
1660 			if (error == -EAGAIN) {
1661 				put_device(dev);
1662 				error = 0;
1663 				continue;
1664 			}
1665 			printk(KERN_INFO "PM: Device %s not prepared "
1666 				"for power transition: code %d\n",
1667 				dev_name(dev), error);
1668 			put_device(dev);
1669 			break;
1670 		}
1671 		dev->power.is_prepared = true;
1672 		if (!list_empty(&dev->power.entry))
1673 			list_move_tail(&dev->power.entry, &dpm_prepared_list);
1674 		put_device(dev);
1675 	}
1676 	mutex_unlock(&dpm_list_mtx);
1677 	trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1678 	return error;
1679 }
1680 
1681 /**
1682  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1683  * @state: PM transition of the system being carried out.
1684  *
1685  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1686  * callbacks for them.
1687  */
1688 int dpm_suspend_start(pm_message_t state)
1689 {
1690 	int error;
1691 
1692 	error = dpm_prepare(state);
1693 	if (error) {
1694 		suspend_stats.failed_prepare++;
1695 		dpm_save_failed_step(SUSPEND_PREPARE);
1696 	} else
1697 		error = dpm_suspend(state);
1698 	return error;
1699 }
1700 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1701 
1702 void __suspend_report_result(const char *function, void *fn, int ret)
1703 {
1704 	if (ret)
1705 		printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1706 }
1707 EXPORT_SYMBOL_GPL(__suspend_report_result);
1708 
1709 /**
1710  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1711  * @dev: Device to wait for.
1712  * @subordinate: Device that needs to wait for @dev.
1713  */
1714 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1715 {
1716 	dpm_wait(dev, subordinate->power.async_suspend);
1717 	return async_error;
1718 }
1719 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1720 
1721 /**
1722  * dpm_for_each_dev - device iterator.
1723  * @data: data for the callback.
1724  * @fn: function to be called for each device.
1725  *
1726  * Iterate over devices in dpm_list, and call @fn for each device,
1727  * passing it @data.
1728  */
1729 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1730 {
1731 	struct device *dev;
1732 
1733 	if (!fn)
1734 		return;
1735 
1736 	device_pm_lock();
1737 	list_for_each_entry(dev, &dpm_list, power.entry)
1738 		fn(dev, data);
1739 	device_pm_unlock();
1740 }
1741 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
1742 
1743 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
1744 {
1745 	if (!ops)
1746 		return true;
1747 
1748 	return !ops->prepare &&
1749 	       !ops->suspend &&
1750 	       !ops->suspend_late &&
1751 	       !ops->suspend_noirq &&
1752 	       !ops->resume_noirq &&
1753 	       !ops->resume_early &&
1754 	       !ops->resume &&
1755 	       !ops->complete;
1756 }
1757 
1758 void device_pm_check_callbacks(struct device *dev)
1759 {
1760 	spin_lock_irq(&dev->power.lock);
1761 	dev->power.no_pm_callbacks =
1762 		(!dev->bus || pm_ops_is_empty(dev->bus->pm)) &&
1763 		(!dev->class || pm_ops_is_empty(dev->class->pm)) &&
1764 		(!dev->type || pm_ops_is_empty(dev->type->pm)) &&
1765 		(!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
1766 		(!dev->driver || pm_ops_is_empty(dev->driver->pm));
1767 	spin_unlock_irq(&dev->power.lock);
1768 }
1769