xref: /linux/drivers/base/power/main.c (revision 12871a0bd67dd4db4418e1daafcd46e9d329ef10)
1 /*
2  * drivers/base/power/main.c - Where the driver meets power management.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  *
7  * This file is released under the GPLv2
8  *
9  *
10  * The driver model core calls device_pm_add() when a device is registered.
11  * This will initialize the embedded device_pm_info object in the device
12  * and add it to the list of power-controlled devices. sysfs entries for
13  * controlling device power management will also be added.
14  *
15  * A separate list is used for keeping track of power info, because the power
16  * domain dependencies may differ from the ancestral dependencies that the
17  * subsystem list maintains.
18  */
19 
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/mutex.h>
23 #include <linux/pm.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/resume-trace.h>
26 #include <linux/interrupt.h>
27 #include <linux/sched.h>
28 #include <linux/async.h>
29 #include <linux/suspend.h>
30 
31 #include "../base.h"
32 #include "power.h"
33 
34 /*
35  * The entries in the dpm_list list are in a depth first order, simply
36  * because children are guaranteed to be discovered after parents, and
37  * are inserted at the back of the list on discovery.
38  *
39  * Since device_pm_add() may be called with a device lock held,
40  * we must never try to acquire a device lock while holding
41  * dpm_list_mutex.
42  */
43 
44 LIST_HEAD(dpm_list);
45 LIST_HEAD(dpm_prepared_list);
46 LIST_HEAD(dpm_suspended_list);
47 LIST_HEAD(dpm_noirq_list);
48 
49 static DEFINE_MUTEX(dpm_list_mtx);
50 static pm_message_t pm_transition;
51 
52 static int async_error;
53 
54 /**
55  * device_pm_init - Initialize the PM-related part of a device object.
56  * @dev: Device object being initialized.
57  */
58 void device_pm_init(struct device *dev)
59 {
60 	dev->power.in_suspend = false;
61 	init_completion(&dev->power.completion);
62 	complete_all(&dev->power.completion);
63 	dev->power.wakeup = NULL;
64 	spin_lock_init(&dev->power.lock);
65 	pm_runtime_init(dev);
66 	INIT_LIST_HEAD(&dev->power.entry);
67 }
68 
69 /**
70  * device_pm_lock - Lock the list of active devices used by the PM core.
71  */
72 void device_pm_lock(void)
73 {
74 	mutex_lock(&dpm_list_mtx);
75 }
76 
77 /**
78  * device_pm_unlock - Unlock the list of active devices used by the PM core.
79  */
80 void device_pm_unlock(void)
81 {
82 	mutex_unlock(&dpm_list_mtx);
83 }
84 
85 /**
86  * device_pm_add - Add a device to the PM core's list of active devices.
87  * @dev: Device to add to the list.
88  */
89 void device_pm_add(struct device *dev)
90 {
91 	pr_debug("PM: Adding info for %s:%s\n",
92 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
93 	mutex_lock(&dpm_list_mtx);
94 	if (dev->parent && dev->parent->power.in_suspend)
95 		dev_warn(dev, "parent %s should not be sleeping\n",
96 			dev_name(dev->parent));
97 	list_add_tail(&dev->power.entry, &dpm_list);
98 	mutex_unlock(&dpm_list_mtx);
99 }
100 
101 /**
102  * device_pm_remove - Remove a device from the PM core's list of active devices.
103  * @dev: Device to be removed from the list.
104  */
105 void device_pm_remove(struct device *dev)
106 {
107 	pr_debug("PM: Removing info for %s:%s\n",
108 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
109 	complete_all(&dev->power.completion);
110 	mutex_lock(&dpm_list_mtx);
111 	list_del_init(&dev->power.entry);
112 	mutex_unlock(&dpm_list_mtx);
113 	device_wakeup_disable(dev);
114 	pm_runtime_remove(dev);
115 }
116 
117 /**
118  * device_pm_move_before - Move device in the PM core's list of active devices.
119  * @deva: Device to move in dpm_list.
120  * @devb: Device @deva should come before.
121  */
122 void device_pm_move_before(struct device *deva, struct device *devb)
123 {
124 	pr_debug("PM: Moving %s:%s before %s:%s\n",
125 		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
126 		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
127 	/* Delete deva from dpm_list and reinsert before devb. */
128 	list_move_tail(&deva->power.entry, &devb->power.entry);
129 }
130 
131 /**
132  * device_pm_move_after - Move device in the PM core's list of active devices.
133  * @deva: Device to move in dpm_list.
134  * @devb: Device @deva should come after.
135  */
136 void device_pm_move_after(struct device *deva, struct device *devb)
137 {
138 	pr_debug("PM: Moving %s:%s after %s:%s\n",
139 		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
140 		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
141 	/* Delete deva from dpm_list and reinsert after devb. */
142 	list_move(&deva->power.entry, &devb->power.entry);
143 }
144 
145 /**
146  * device_pm_move_last - Move device to end of the PM core's list of devices.
147  * @dev: Device to move in dpm_list.
148  */
149 void device_pm_move_last(struct device *dev)
150 {
151 	pr_debug("PM: Moving %s:%s to end of list\n",
152 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
153 	list_move_tail(&dev->power.entry, &dpm_list);
154 }
155 
156 static ktime_t initcall_debug_start(struct device *dev)
157 {
158 	ktime_t calltime = ktime_set(0, 0);
159 
160 	if (initcall_debug) {
161 		pr_info("calling  %s+ @ %i\n",
162 				dev_name(dev), task_pid_nr(current));
163 		calltime = ktime_get();
164 	}
165 
166 	return calltime;
167 }
168 
169 static void initcall_debug_report(struct device *dev, ktime_t calltime,
170 				  int error)
171 {
172 	ktime_t delta, rettime;
173 
174 	if (initcall_debug) {
175 		rettime = ktime_get();
176 		delta = ktime_sub(rettime, calltime);
177 		pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
178 			error, (unsigned long long)ktime_to_ns(delta) >> 10);
179 	}
180 }
181 
182 /**
183  * dpm_wait - Wait for a PM operation to complete.
184  * @dev: Device to wait for.
185  * @async: If unset, wait only if the device's power.async_suspend flag is set.
186  */
187 static void dpm_wait(struct device *dev, bool async)
188 {
189 	if (!dev)
190 		return;
191 
192 	if (async || (pm_async_enabled && dev->power.async_suspend))
193 		wait_for_completion(&dev->power.completion);
194 }
195 
196 static int dpm_wait_fn(struct device *dev, void *async_ptr)
197 {
198 	dpm_wait(dev, *((bool *)async_ptr));
199 	return 0;
200 }
201 
202 static void dpm_wait_for_children(struct device *dev, bool async)
203 {
204        device_for_each_child(dev, &async, dpm_wait_fn);
205 }
206 
207 /**
208  * pm_op - Execute the PM operation appropriate for given PM event.
209  * @dev: Device to handle.
210  * @ops: PM operations to choose from.
211  * @state: PM transition of the system being carried out.
212  */
213 static int pm_op(struct device *dev,
214 		 const struct dev_pm_ops *ops,
215 		 pm_message_t state)
216 {
217 	int error = 0;
218 	ktime_t calltime;
219 
220 	calltime = initcall_debug_start(dev);
221 
222 	switch (state.event) {
223 #ifdef CONFIG_SUSPEND
224 	case PM_EVENT_SUSPEND:
225 		if (ops->suspend) {
226 			error = ops->suspend(dev);
227 			suspend_report_result(ops->suspend, error);
228 		}
229 		break;
230 	case PM_EVENT_RESUME:
231 		if (ops->resume) {
232 			error = ops->resume(dev);
233 			suspend_report_result(ops->resume, error);
234 		}
235 		break;
236 #endif /* CONFIG_SUSPEND */
237 #ifdef CONFIG_HIBERNATE_CALLBACKS
238 	case PM_EVENT_FREEZE:
239 	case PM_EVENT_QUIESCE:
240 		if (ops->freeze) {
241 			error = ops->freeze(dev);
242 			suspend_report_result(ops->freeze, error);
243 		}
244 		break;
245 	case PM_EVENT_HIBERNATE:
246 		if (ops->poweroff) {
247 			error = ops->poweroff(dev);
248 			suspend_report_result(ops->poweroff, error);
249 		}
250 		break;
251 	case PM_EVENT_THAW:
252 	case PM_EVENT_RECOVER:
253 		if (ops->thaw) {
254 			error = ops->thaw(dev);
255 			suspend_report_result(ops->thaw, error);
256 		}
257 		break;
258 	case PM_EVENT_RESTORE:
259 		if (ops->restore) {
260 			error = ops->restore(dev);
261 			suspend_report_result(ops->restore, error);
262 		}
263 		break;
264 #endif /* CONFIG_HIBERNATE_CALLBACKS */
265 	default:
266 		error = -EINVAL;
267 	}
268 
269 	initcall_debug_report(dev, calltime, error);
270 
271 	return error;
272 }
273 
274 /**
275  * pm_noirq_op - Execute the PM operation appropriate for given PM event.
276  * @dev: Device to handle.
277  * @ops: PM operations to choose from.
278  * @state: PM transition of the system being carried out.
279  *
280  * The driver of @dev will not receive interrupts while this function is being
281  * executed.
282  */
283 static int pm_noirq_op(struct device *dev,
284 			const struct dev_pm_ops *ops,
285 			pm_message_t state)
286 {
287 	int error = 0;
288 	ktime_t calltime = ktime_set(0, 0), delta, rettime;
289 
290 	if (initcall_debug) {
291 		pr_info("calling  %s+ @ %i, parent: %s\n",
292 				dev_name(dev), task_pid_nr(current),
293 				dev->parent ? dev_name(dev->parent) : "none");
294 		calltime = ktime_get();
295 	}
296 
297 	switch (state.event) {
298 #ifdef CONFIG_SUSPEND
299 	case PM_EVENT_SUSPEND:
300 		if (ops->suspend_noirq) {
301 			error = ops->suspend_noirq(dev);
302 			suspend_report_result(ops->suspend_noirq, error);
303 		}
304 		break;
305 	case PM_EVENT_RESUME:
306 		if (ops->resume_noirq) {
307 			error = ops->resume_noirq(dev);
308 			suspend_report_result(ops->resume_noirq, error);
309 		}
310 		break;
311 #endif /* CONFIG_SUSPEND */
312 #ifdef CONFIG_HIBERNATE_CALLBACKS
313 	case PM_EVENT_FREEZE:
314 	case PM_EVENT_QUIESCE:
315 		if (ops->freeze_noirq) {
316 			error = ops->freeze_noirq(dev);
317 			suspend_report_result(ops->freeze_noirq, error);
318 		}
319 		break;
320 	case PM_EVENT_HIBERNATE:
321 		if (ops->poweroff_noirq) {
322 			error = ops->poweroff_noirq(dev);
323 			suspend_report_result(ops->poweroff_noirq, error);
324 		}
325 		break;
326 	case PM_EVENT_THAW:
327 	case PM_EVENT_RECOVER:
328 		if (ops->thaw_noirq) {
329 			error = ops->thaw_noirq(dev);
330 			suspend_report_result(ops->thaw_noirq, error);
331 		}
332 		break;
333 	case PM_EVENT_RESTORE:
334 		if (ops->restore_noirq) {
335 			error = ops->restore_noirq(dev);
336 			suspend_report_result(ops->restore_noirq, error);
337 		}
338 		break;
339 #endif /* CONFIG_HIBERNATE_CALLBACKS */
340 	default:
341 		error = -EINVAL;
342 	}
343 
344 	if (initcall_debug) {
345 		rettime = ktime_get();
346 		delta = ktime_sub(rettime, calltime);
347 		printk("initcall %s_i+ returned %d after %Ld usecs\n",
348 			dev_name(dev), error,
349 			(unsigned long long)ktime_to_ns(delta) >> 10);
350 	}
351 
352 	return error;
353 }
354 
355 static char *pm_verb(int event)
356 {
357 	switch (event) {
358 	case PM_EVENT_SUSPEND:
359 		return "suspend";
360 	case PM_EVENT_RESUME:
361 		return "resume";
362 	case PM_EVENT_FREEZE:
363 		return "freeze";
364 	case PM_EVENT_QUIESCE:
365 		return "quiesce";
366 	case PM_EVENT_HIBERNATE:
367 		return "hibernate";
368 	case PM_EVENT_THAW:
369 		return "thaw";
370 	case PM_EVENT_RESTORE:
371 		return "restore";
372 	case PM_EVENT_RECOVER:
373 		return "recover";
374 	default:
375 		return "(unknown PM event)";
376 	}
377 }
378 
379 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
380 {
381 	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
382 		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
383 		", may wakeup" : "");
384 }
385 
386 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
387 			int error)
388 {
389 	printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
390 		dev_name(dev), pm_verb(state.event), info, error);
391 }
392 
393 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
394 {
395 	ktime_t calltime;
396 	u64 usecs64;
397 	int usecs;
398 
399 	calltime = ktime_get();
400 	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
401 	do_div(usecs64, NSEC_PER_USEC);
402 	usecs = usecs64;
403 	if (usecs == 0)
404 		usecs = 1;
405 	pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
406 		info ?: "", info ? " " : "", pm_verb(state.event),
407 		usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
408 }
409 
410 /*------------------------- Resume routines -------------------------*/
411 
412 /**
413  * device_resume_noirq - Execute an "early resume" callback for given device.
414  * @dev: Device to handle.
415  * @state: PM transition of the system being carried out.
416  *
417  * The driver of @dev will not receive interrupts while this function is being
418  * executed.
419  */
420 static int device_resume_noirq(struct device *dev, pm_message_t state)
421 {
422 	int error = 0;
423 
424 	TRACE_DEVICE(dev);
425 	TRACE_RESUME(0);
426 
427 	if (dev->pwr_domain) {
428 		pm_dev_dbg(dev, state, "EARLY power domain ");
429 		error = pm_noirq_op(dev, &dev->pwr_domain->ops, state);
430 	} else if (dev->type && dev->type->pm) {
431 		pm_dev_dbg(dev, state, "EARLY type ");
432 		error = pm_noirq_op(dev, dev->type->pm, state);
433 	} else if (dev->class && dev->class->pm) {
434 		pm_dev_dbg(dev, state, "EARLY class ");
435 		error = pm_noirq_op(dev, dev->class->pm, state);
436 	} else if (dev->bus && dev->bus->pm) {
437 		pm_dev_dbg(dev, state, "EARLY ");
438 		error = pm_noirq_op(dev, dev->bus->pm, state);
439 	}
440 
441 	TRACE_RESUME(error);
442 	return error;
443 }
444 
445 /**
446  * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices.
447  * @state: PM transition of the system being carried out.
448  *
449  * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and
450  * enable device drivers to receive interrupts.
451  */
452 void dpm_resume_noirq(pm_message_t state)
453 {
454 	ktime_t starttime = ktime_get();
455 
456 	mutex_lock(&dpm_list_mtx);
457 	while (!list_empty(&dpm_noirq_list)) {
458 		struct device *dev = to_device(dpm_noirq_list.next);
459 		int error;
460 
461 		get_device(dev);
462 		list_move_tail(&dev->power.entry, &dpm_suspended_list);
463 		mutex_unlock(&dpm_list_mtx);
464 
465 		error = device_resume_noirq(dev, state);
466 		if (error)
467 			pm_dev_err(dev, state, " early", error);
468 
469 		mutex_lock(&dpm_list_mtx);
470 		put_device(dev);
471 	}
472 	mutex_unlock(&dpm_list_mtx);
473 	dpm_show_time(starttime, state, "early");
474 	resume_device_irqs();
475 }
476 EXPORT_SYMBOL_GPL(dpm_resume_noirq);
477 
478 /**
479  * legacy_resume - Execute a legacy (bus or class) resume callback for device.
480  * @dev: Device to resume.
481  * @cb: Resume callback to execute.
482  */
483 static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
484 {
485 	int error;
486 	ktime_t calltime;
487 
488 	calltime = initcall_debug_start(dev);
489 
490 	error = cb(dev);
491 	suspend_report_result(cb, error);
492 
493 	initcall_debug_report(dev, calltime, error);
494 
495 	return error;
496 }
497 
498 /**
499  * device_resume - Execute "resume" callbacks for given device.
500  * @dev: Device to handle.
501  * @state: PM transition of the system being carried out.
502  * @async: If true, the device is being resumed asynchronously.
503  */
504 static int device_resume(struct device *dev, pm_message_t state, bool async)
505 {
506 	int error = 0;
507 
508 	TRACE_DEVICE(dev);
509 	TRACE_RESUME(0);
510 
511 	dpm_wait(dev->parent, async);
512 	device_lock(dev);
513 
514 	dev->power.in_suspend = false;
515 
516 	if (dev->pwr_domain) {
517 		pm_dev_dbg(dev, state, "power domain ");
518 		error = pm_op(dev, &dev->pwr_domain->ops, state);
519 		goto End;
520 	}
521 
522 	if (dev->type && dev->type->pm) {
523 		pm_dev_dbg(dev, state, "type ");
524 		error = pm_op(dev, dev->type->pm, state);
525 		goto End;
526 	}
527 
528 	if (dev->class) {
529 		if (dev->class->pm) {
530 			pm_dev_dbg(dev, state, "class ");
531 			error = pm_op(dev, dev->class->pm, state);
532 			goto End;
533 		} else if (dev->class->resume) {
534 			pm_dev_dbg(dev, state, "legacy class ");
535 			error = legacy_resume(dev, dev->class->resume);
536 			goto End;
537 		}
538 	}
539 
540 	if (dev->bus) {
541 		if (dev->bus->pm) {
542 			pm_dev_dbg(dev, state, "");
543 			error = pm_op(dev, dev->bus->pm, state);
544 		} else if (dev->bus->resume) {
545 			pm_dev_dbg(dev, state, "legacy ");
546 			error = legacy_resume(dev, dev->bus->resume);
547 		}
548 	}
549 
550  End:
551 	device_unlock(dev);
552 	complete_all(&dev->power.completion);
553 
554 	TRACE_RESUME(error);
555 	return error;
556 }
557 
558 static void async_resume(void *data, async_cookie_t cookie)
559 {
560 	struct device *dev = (struct device *)data;
561 	int error;
562 
563 	error = device_resume(dev, pm_transition, true);
564 	if (error)
565 		pm_dev_err(dev, pm_transition, " async", error);
566 	put_device(dev);
567 }
568 
569 static bool is_async(struct device *dev)
570 {
571 	return dev->power.async_suspend && pm_async_enabled
572 		&& !pm_trace_is_enabled();
573 }
574 
575 /**
576  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
577  * @state: PM transition of the system being carried out.
578  *
579  * Execute the appropriate "resume" callback for all devices whose status
580  * indicates that they are suspended.
581  */
582 void dpm_resume(pm_message_t state)
583 {
584 	struct device *dev;
585 	ktime_t starttime = ktime_get();
586 
587 	might_sleep();
588 
589 	mutex_lock(&dpm_list_mtx);
590 	pm_transition = state;
591 	async_error = 0;
592 
593 	list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
594 		INIT_COMPLETION(dev->power.completion);
595 		if (is_async(dev)) {
596 			get_device(dev);
597 			async_schedule(async_resume, dev);
598 		}
599 	}
600 
601 	while (!list_empty(&dpm_suspended_list)) {
602 		dev = to_device(dpm_suspended_list.next);
603 		get_device(dev);
604 		if (!is_async(dev)) {
605 			int error;
606 
607 			mutex_unlock(&dpm_list_mtx);
608 
609 			error = device_resume(dev, state, false);
610 			if (error)
611 				pm_dev_err(dev, state, "", error);
612 
613 			mutex_lock(&dpm_list_mtx);
614 		}
615 		if (!list_empty(&dev->power.entry))
616 			list_move_tail(&dev->power.entry, &dpm_prepared_list);
617 		put_device(dev);
618 	}
619 	mutex_unlock(&dpm_list_mtx);
620 	async_synchronize_full();
621 	dpm_show_time(starttime, state, NULL);
622 }
623 
624 /**
625  * device_complete - Complete a PM transition for given device.
626  * @dev: Device to handle.
627  * @state: PM transition of the system being carried out.
628  */
629 static void device_complete(struct device *dev, pm_message_t state)
630 {
631 	device_lock(dev);
632 
633 	if (dev->pwr_domain) {
634 		pm_dev_dbg(dev, state, "completing power domain ");
635 		if (dev->pwr_domain->ops.complete)
636 			dev->pwr_domain->ops.complete(dev);
637 	} else if (dev->type && dev->type->pm) {
638 		pm_dev_dbg(dev, state, "completing type ");
639 		if (dev->type->pm->complete)
640 			dev->type->pm->complete(dev);
641 	} else if (dev->class && dev->class->pm) {
642 		pm_dev_dbg(dev, state, "completing class ");
643 		if (dev->class->pm->complete)
644 			dev->class->pm->complete(dev);
645 	} else if (dev->bus && dev->bus->pm) {
646 		pm_dev_dbg(dev, state, "completing ");
647 		if (dev->bus->pm->complete)
648 			dev->bus->pm->complete(dev);
649 	}
650 
651 	device_unlock(dev);
652 }
653 
654 /**
655  * dpm_complete - Complete a PM transition for all non-sysdev devices.
656  * @state: PM transition of the system being carried out.
657  *
658  * Execute the ->complete() callbacks for all devices whose PM status is not
659  * DPM_ON (this allows new devices to be registered).
660  */
661 void dpm_complete(pm_message_t state)
662 {
663 	struct list_head list;
664 
665 	might_sleep();
666 
667 	INIT_LIST_HEAD(&list);
668 	mutex_lock(&dpm_list_mtx);
669 	while (!list_empty(&dpm_prepared_list)) {
670 		struct device *dev = to_device(dpm_prepared_list.prev);
671 
672 		get_device(dev);
673 		dev->power.in_suspend = false;
674 		list_move(&dev->power.entry, &list);
675 		mutex_unlock(&dpm_list_mtx);
676 
677 		device_complete(dev, state);
678 
679 		mutex_lock(&dpm_list_mtx);
680 		put_device(dev);
681 	}
682 	list_splice(&list, &dpm_list);
683 	mutex_unlock(&dpm_list_mtx);
684 }
685 
686 /**
687  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
688  * @state: PM transition of the system being carried out.
689  *
690  * Execute "resume" callbacks for all devices and complete the PM transition of
691  * the system.
692  */
693 void dpm_resume_end(pm_message_t state)
694 {
695 	dpm_resume(state);
696 	dpm_complete(state);
697 }
698 EXPORT_SYMBOL_GPL(dpm_resume_end);
699 
700 
701 /*------------------------- Suspend routines -------------------------*/
702 
703 /**
704  * resume_event - Return a "resume" message for given "suspend" sleep state.
705  * @sleep_state: PM message representing a sleep state.
706  *
707  * Return a PM message representing the resume event corresponding to given
708  * sleep state.
709  */
710 static pm_message_t resume_event(pm_message_t sleep_state)
711 {
712 	switch (sleep_state.event) {
713 	case PM_EVENT_SUSPEND:
714 		return PMSG_RESUME;
715 	case PM_EVENT_FREEZE:
716 	case PM_EVENT_QUIESCE:
717 		return PMSG_RECOVER;
718 	case PM_EVENT_HIBERNATE:
719 		return PMSG_RESTORE;
720 	}
721 	return PMSG_ON;
722 }
723 
724 /**
725  * device_suspend_noirq - Execute a "late suspend" callback for given device.
726  * @dev: Device to handle.
727  * @state: PM transition of the system being carried out.
728  *
729  * The driver of @dev will not receive interrupts while this function is being
730  * executed.
731  */
732 static int device_suspend_noirq(struct device *dev, pm_message_t state)
733 {
734 	int error;
735 
736 	if (dev->pwr_domain) {
737 		pm_dev_dbg(dev, state, "LATE power domain ");
738 		error = pm_noirq_op(dev, &dev->pwr_domain->ops, state);
739 		if (error)
740 			return error;
741 	} else if (dev->type && dev->type->pm) {
742 		pm_dev_dbg(dev, state, "LATE type ");
743 		error = pm_noirq_op(dev, dev->type->pm, state);
744 		if (error)
745 			return error;
746 	} else if (dev->class && dev->class->pm) {
747 		pm_dev_dbg(dev, state, "LATE class ");
748 		error = pm_noirq_op(dev, dev->class->pm, state);
749 		if (error)
750 			return error;
751 	} else if (dev->bus && dev->bus->pm) {
752 		pm_dev_dbg(dev, state, "LATE ");
753 		error = pm_noirq_op(dev, dev->bus->pm, state);
754 		if (error)
755 			return error;
756 	}
757 
758 	return 0;
759 }
760 
761 /**
762  * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices.
763  * @state: PM transition of the system being carried out.
764  *
765  * Prevent device drivers from receiving interrupts and call the "noirq" suspend
766  * handlers for all non-sysdev devices.
767  */
768 int dpm_suspend_noirq(pm_message_t state)
769 {
770 	ktime_t starttime = ktime_get();
771 	int error = 0;
772 
773 	suspend_device_irqs();
774 	mutex_lock(&dpm_list_mtx);
775 	while (!list_empty(&dpm_suspended_list)) {
776 		struct device *dev = to_device(dpm_suspended_list.prev);
777 
778 		get_device(dev);
779 		mutex_unlock(&dpm_list_mtx);
780 
781 		error = device_suspend_noirq(dev, state);
782 
783 		mutex_lock(&dpm_list_mtx);
784 		if (error) {
785 			pm_dev_err(dev, state, " late", error);
786 			put_device(dev);
787 			break;
788 		}
789 		if (!list_empty(&dev->power.entry))
790 			list_move(&dev->power.entry, &dpm_noirq_list);
791 		put_device(dev);
792 	}
793 	mutex_unlock(&dpm_list_mtx);
794 	if (error)
795 		dpm_resume_noirq(resume_event(state));
796 	else
797 		dpm_show_time(starttime, state, "late");
798 	return error;
799 }
800 EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
801 
802 /**
803  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
804  * @dev: Device to suspend.
805  * @state: PM transition of the system being carried out.
806  * @cb: Suspend callback to execute.
807  */
808 static int legacy_suspend(struct device *dev, pm_message_t state,
809 			  int (*cb)(struct device *dev, pm_message_t state))
810 {
811 	int error;
812 	ktime_t calltime;
813 
814 	calltime = initcall_debug_start(dev);
815 
816 	error = cb(dev, state);
817 	suspend_report_result(cb, error);
818 
819 	initcall_debug_report(dev, calltime, error);
820 
821 	return error;
822 }
823 
824 /**
825  * device_suspend - Execute "suspend" callbacks for given device.
826  * @dev: Device to handle.
827  * @state: PM transition of the system being carried out.
828  * @async: If true, the device is being suspended asynchronously.
829  */
830 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
831 {
832 	int error = 0;
833 
834 	dpm_wait_for_children(dev, async);
835 	device_lock(dev);
836 
837 	if (async_error)
838 		goto End;
839 
840 	if (pm_wakeup_pending()) {
841 		async_error = -EBUSY;
842 		goto End;
843 	}
844 
845 	if (dev->pwr_domain) {
846 		pm_dev_dbg(dev, state, "power domain ");
847 		error = pm_op(dev, &dev->pwr_domain->ops, state);
848 		goto End;
849 	}
850 
851 	if (dev->type && dev->type->pm) {
852 		pm_dev_dbg(dev, state, "type ");
853 		error = pm_op(dev, dev->type->pm, state);
854 		goto End;
855 	}
856 
857 	if (dev->class) {
858 		if (dev->class->pm) {
859 			pm_dev_dbg(dev, state, "class ");
860 			error = pm_op(dev, dev->class->pm, state);
861 			goto End;
862 		} else if (dev->class->suspend) {
863 			pm_dev_dbg(dev, state, "legacy class ");
864 			error = legacy_suspend(dev, state, dev->class->suspend);
865 			goto End;
866 		}
867 	}
868 
869 	if (dev->bus) {
870 		if (dev->bus->pm) {
871 			pm_dev_dbg(dev, state, "");
872 			error = pm_op(dev, dev->bus->pm, state);
873 		} else if (dev->bus->suspend) {
874 			pm_dev_dbg(dev, state, "legacy ");
875 			error = legacy_suspend(dev, state, dev->bus->suspend);
876 		}
877 	}
878 
879  End:
880 	device_unlock(dev);
881 	complete_all(&dev->power.completion);
882 
883 	if (error)
884 		async_error = error;
885 
886 	return error;
887 }
888 
889 static void async_suspend(void *data, async_cookie_t cookie)
890 {
891 	struct device *dev = (struct device *)data;
892 	int error;
893 
894 	error = __device_suspend(dev, pm_transition, true);
895 	if (error)
896 		pm_dev_err(dev, pm_transition, " async", error);
897 
898 	put_device(dev);
899 }
900 
901 static int device_suspend(struct device *dev)
902 {
903 	INIT_COMPLETION(dev->power.completion);
904 
905 	if (pm_async_enabled && dev->power.async_suspend) {
906 		get_device(dev);
907 		async_schedule(async_suspend, dev);
908 		return 0;
909 	}
910 
911 	return __device_suspend(dev, pm_transition, false);
912 }
913 
914 /**
915  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
916  * @state: PM transition of the system being carried out.
917  */
918 int dpm_suspend(pm_message_t state)
919 {
920 	ktime_t starttime = ktime_get();
921 	int error = 0;
922 
923 	might_sleep();
924 
925 	mutex_lock(&dpm_list_mtx);
926 	pm_transition = state;
927 	async_error = 0;
928 	while (!list_empty(&dpm_prepared_list)) {
929 		struct device *dev = to_device(dpm_prepared_list.prev);
930 
931 		get_device(dev);
932 		mutex_unlock(&dpm_list_mtx);
933 
934 		error = device_suspend(dev);
935 
936 		mutex_lock(&dpm_list_mtx);
937 		if (error) {
938 			pm_dev_err(dev, state, "", error);
939 			put_device(dev);
940 			break;
941 		}
942 		if (!list_empty(&dev->power.entry))
943 			list_move(&dev->power.entry, &dpm_suspended_list);
944 		put_device(dev);
945 		if (async_error)
946 			break;
947 	}
948 	mutex_unlock(&dpm_list_mtx);
949 	async_synchronize_full();
950 	if (!error)
951 		error = async_error;
952 	if (!error)
953 		dpm_show_time(starttime, state, NULL);
954 	return error;
955 }
956 
957 /**
958  * device_prepare - Prepare a device for system power transition.
959  * @dev: Device to handle.
960  * @state: PM transition of the system being carried out.
961  *
962  * Execute the ->prepare() callback(s) for given device.  No new children of the
963  * device may be registered after this function has returned.
964  */
965 static int device_prepare(struct device *dev, pm_message_t state)
966 {
967 	int error = 0;
968 
969 	device_lock(dev);
970 
971 	if (dev->pwr_domain) {
972 		pm_dev_dbg(dev, state, "preparing power domain ");
973 		if (dev->pwr_domain->ops.prepare)
974 			error = dev->pwr_domain->ops.prepare(dev);
975 		suspend_report_result(dev->pwr_domain->ops.prepare, error);
976 		if (error)
977 			goto End;
978 	} else if (dev->type && dev->type->pm) {
979 		pm_dev_dbg(dev, state, "preparing type ");
980 		if (dev->type->pm->prepare)
981 			error = dev->type->pm->prepare(dev);
982 		suspend_report_result(dev->type->pm->prepare, error);
983 		if (error)
984 			goto End;
985 	} else if (dev->class && dev->class->pm) {
986 		pm_dev_dbg(dev, state, "preparing class ");
987 		if (dev->class->pm->prepare)
988 			error = dev->class->pm->prepare(dev);
989 		suspend_report_result(dev->class->pm->prepare, error);
990 		if (error)
991 			goto End;
992 	} else if (dev->bus && dev->bus->pm) {
993 		pm_dev_dbg(dev, state, "preparing ");
994 		if (dev->bus->pm->prepare)
995 			error = dev->bus->pm->prepare(dev);
996 		suspend_report_result(dev->bus->pm->prepare, error);
997 	}
998 
999  End:
1000 	device_unlock(dev);
1001 
1002 	return error;
1003 }
1004 
1005 /**
1006  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1007  * @state: PM transition of the system being carried out.
1008  *
1009  * Execute the ->prepare() callback(s) for all devices.
1010  */
1011 int dpm_prepare(pm_message_t state)
1012 {
1013 	int error = 0;
1014 
1015 	might_sleep();
1016 
1017 	mutex_lock(&dpm_list_mtx);
1018 	while (!list_empty(&dpm_list)) {
1019 		struct device *dev = to_device(dpm_list.next);
1020 
1021 		get_device(dev);
1022 		mutex_unlock(&dpm_list_mtx);
1023 
1024 		pm_runtime_get_noresume(dev);
1025 		if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1026 			pm_wakeup_event(dev, 0);
1027 
1028 		pm_runtime_put_sync(dev);
1029 		error = pm_wakeup_pending() ?
1030 				-EBUSY : device_prepare(dev, state);
1031 
1032 		mutex_lock(&dpm_list_mtx);
1033 		if (error) {
1034 			if (error == -EAGAIN) {
1035 				put_device(dev);
1036 				error = 0;
1037 				continue;
1038 			}
1039 			printk(KERN_INFO "PM: Device %s not prepared "
1040 				"for power transition: code %d\n",
1041 				dev_name(dev), error);
1042 			put_device(dev);
1043 			break;
1044 		}
1045 		dev->power.in_suspend = true;
1046 		if (!list_empty(&dev->power.entry))
1047 			list_move_tail(&dev->power.entry, &dpm_prepared_list);
1048 		put_device(dev);
1049 	}
1050 	mutex_unlock(&dpm_list_mtx);
1051 	return error;
1052 }
1053 
1054 /**
1055  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1056  * @state: PM transition of the system being carried out.
1057  *
1058  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1059  * callbacks for them.
1060  */
1061 int dpm_suspend_start(pm_message_t state)
1062 {
1063 	int error;
1064 
1065 	error = dpm_prepare(state);
1066 	if (!error)
1067 		error = dpm_suspend(state);
1068 	return error;
1069 }
1070 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1071 
1072 void __suspend_report_result(const char *function, void *fn, int ret)
1073 {
1074 	if (ret)
1075 		printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1076 }
1077 EXPORT_SYMBOL_GPL(__suspend_report_result);
1078 
1079 /**
1080  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1081  * @dev: Device to wait for.
1082  * @subordinate: Device that needs to wait for @dev.
1083  */
1084 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1085 {
1086 	dpm_wait(dev, subordinate->power.async_suspend);
1087 	return async_error;
1088 }
1089 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1090