xref: /linux/drivers/base/power/main.c (revision 7025bec9125b0a02edcaf22c2dce753bf2c95480)
1 /*
2  * drivers/base/power/main.c - Where the driver meets power management.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  *
7  * This file is released under the GPLv2
8  *
9  *
10  * The driver model core calls device_pm_add() when a device is registered.
11  * This will intialize the embedded device_pm_info object in the device
12  * and add it to the list of power-controlled devices. sysfs entries for
13  * controlling device power management will also be added.
14  *
15  * A separate list is used for keeping track of power info, because the power
16  * domain dependencies may differ from the ancestral dependencies that the
17  * subsystem list maintains.
18  */
19 
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/mutex.h>
23 #include <linux/pm.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/resume-trace.h>
26 #include <linux/interrupt.h>
27 #include <linux/sched.h>
28 
29 #include "../base.h"
30 #include "power.h"
31 
32 /*
33  * The entries in the dpm_list list are in a depth first order, simply
34  * because children are guaranteed to be discovered after parents, and
35  * are inserted at the back of the list on discovery.
36  *
37  * Since device_pm_add() may be called with a device semaphore held,
38  * we must never try to acquire a device semaphore while holding
39  * dpm_list_mutex.
40  */
41 
42 LIST_HEAD(dpm_list);
43 
44 static DEFINE_MUTEX(dpm_list_mtx);
45 
46 /*
47  * Set once the preparation of devices for a PM transition has started, reset
48  * before starting to resume devices.  Protected by dpm_list_mtx.
49  */
50 static bool transition_started;
51 
52 /**
53  * device_pm_init - Initialize the PM-related part of a device object.
54  * @dev: Device object being initialized.
55  */
56 void device_pm_init(struct device *dev)
57 {
58 	dev->power.status = DPM_ON;
59 	pm_runtime_init(dev);
60 }
61 
62 /**
63  * device_pm_lock - Lock the list of active devices used by the PM core.
64  */
65 void device_pm_lock(void)
66 {
67 	mutex_lock(&dpm_list_mtx);
68 }
69 
70 /**
71  * device_pm_unlock - Unlock the list of active devices used by the PM core.
72  */
73 void device_pm_unlock(void)
74 {
75 	mutex_unlock(&dpm_list_mtx);
76 }
77 
78 /**
79  * device_pm_add - Add a device to the PM core's list of active devices.
80  * @dev: Device to add to the list.
81  */
82 void device_pm_add(struct device *dev)
83 {
84 	pr_debug("PM: Adding info for %s:%s\n",
85 		 dev->bus ? dev->bus->name : "No Bus",
86 		 kobject_name(&dev->kobj));
87 	mutex_lock(&dpm_list_mtx);
88 	if (dev->parent) {
89 		if (dev->parent->power.status >= DPM_SUSPENDING)
90 			dev_warn(dev, "parent %s should not be sleeping\n",
91 				 dev_name(dev->parent));
92 	} else if (transition_started) {
93 		/*
94 		 * We refuse to register parentless devices while a PM
95 		 * transition is in progress in order to avoid leaving them
96 		 * unhandled down the road
97 		 */
98 		dev_WARN(dev, "Parentless device registered during a PM transaction\n");
99 	}
100 
101 	list_add_tail(&dev->power.entry, &dpm_list);
102 	mutex_unlock(&dpm_list_mtx);
103 }
104 
105 /**
106  * device_pm_remove - Remove a device from the PM core's list of active devices.
107  * @dev: Device to be removed from the list.
108  */
109 void device_pm_remove(struct device *dev)
110 {
111 	pr_debug("PM: Removing info for %s:%s\n",
112 		 dev->bus ? dev->bus->name : "No Bus",
113 		 kobject_name(&dev->kobj));
114 	mutex_lock(&dpm_list_mtx);
115 	list_del_init(&dev->power.entry);
116 	mutex_unlock(&dpm_list_mtx);
117 	pm_runtime_remove(dev);
118 }
119 
120 /**
121  * device_pm_move_before - Move device in the PM core's list of active devices.
122  * @deva: Device to move in dpm_list.
123  * @devb: Device @deva should come before.
124  */
125 void device_pm_move_before(struct device *deva, struct device *devb)
126 {
127 	pr_debug("PM: Moving %s:%s before %s:%s\n",
128 		 deva->bus ? deva->bus->name : "No Bus",
129 		 kobject_name(&deva->kobj),
130 		 devb->bus ? devb->bus->name : "No Bus",
131 		 kobject_name(&devb->kobj));
132 	/* Delete deva from dpm_list and reinsert before devb. */
133 	list_move_tail(&deva->power.entry, &devb->power.entry);
134 }
135 
136 /**
137  * device_pm_move_after - Move device in the PM core's list of active devices.
138  * @deva: Device to move in dpm_list.
139  * @devb: Device @deva should come after.
140  */
141 void device_pm_move_after(struct device *deva, struct device *devb)
142 {
143 	pr_debug("PM: Moving %s:%s after %s:%s\n",
144 		 deva->bus ? deva->bus->name : "No Bus",
145 		 kobject_name(&deva->kobj),
146 		 devb->bus ? devb->bus->name : "No Bus",
147 		 kobject_name(&devb->kobj));
148 	/* Delete deva from dpm_list and reinsert after devb. */
149 	list_move(&deva->power.entry, &devb->power.entry);
150 }
151 
152 /**
153  * device_pm_move_last - Move device to end of the PM core's list of devices.
154  * @dev: Device to move in dpm_list.
155  */
156 void device_pm_move_last(struct device *dev)
157 {
158 	pr_debug("PM: Moving %s:%s to end of list\n",
159 		 dev->bus ? dev->bus->name : "No Bus",
160 		 kobject_name(&dev->kobj));
161 	list_move_tail(&dev->power.entry, &dpm_list);
162 }
163 
164 /**
165  * pm_op - Execute the PM operation appropriate for given PM event.
166  * @dev: Device to handle.
167  * @ops: PM operations to choose from.
168  * @state: PM transition of the system being carried out.
169  */
170 static int pm_op(struct device *dev,
171 		 const struct dev_pm_ops *ops,
172 		 pm_message_t state)
173 {
174 	int error = 0;
175 	ktime_t calltime, delta, rettime;
176 
177 	if (initcall_debug) {
178 		pr_info("calling  %s+ @ %i\n",
179 				dev_name(dev), task_pid_nr(current));
180 		calltime = ktime_get();
181 	}
182 
183 	switch (state.event) {
184 #ifdef CONFIG_SUSPEND
185 	case PM_EVENT_SUSPEND:
186 		if (ops->suspend) {
187 			error = ops->suspend(dev);
188 			suspend_report_result(ops->suspend, error);
189 		}
190 		break;
191 	case PM_EVENT_RESUME:
192 		if (ops->resume) {
193 			error = ops->resume(dev);
194 			suspend_report_result(ops->resume, error);
195 		}
196 		break;
197 #endif /* CONFIG_SUSPEND */
198 #ifdef CONFIG_HIBERNATION
199 	case PM_EVENT_FREEZE:
200 	case PM_EVENT_QUIESCE:
201 		if (ops->freeze) {
202 			error = ops->freeze(dev);
203 			suspend_report_result(ops->freeze, error);
204 		}
205 		break;
206 	case PM_EVENT_HIBERNATE:
207 		if (ops->poweroff) {
208 			error = ops->poweroff(dev);
209 			suspend_report_result(ops->poweroff, error);
210 		}
211 		break;
212 	case PM_EVENT_THAW:
213 	case PM_EVENT_RECOVER:
214 		if (ops->thaw) {
215 			error = ops->thaw(dev);
216 			suspend_report_result(ops->thaw, error);
217 		}
218 		break;
219 	case PM_EVENT_RESTORE:
220 		if (ops->restore) {
221 			error = ops->restore(dev);
222 			suspend_report_result(ops->restore, error);
223 		}
224 		break;
225 #endif /* CONFIG_HIBERNATION */
226 	default:
227 		error = -EINVAL;
228 	}
229 
230 	if (initcall_debug) {
231 		rettime = ktime_get();
232 		delta = ktime_sub(rettime, calltime);
233 		pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
234 			error, (unsigned long long)ktime_to_ns(delta) >> 10);
235 	}
236 
237 	return error;
238 }
239 
240 /**
241  * pm_noirq_op - Execute the PM operation appropriate for given PM event.
242  * @dev: Device to handle.
243  * @ops: PM operations to choose from.
244  * @state: PM transition of the system being carried out.
245  *
246  * The driver of @dev will not receive interrupts while this function is being
247  * executed.
248  */
249 static int pm_noirq_op(struct device *dev,
250 			const struct dev_pm_ops *ops,
251 			pm_message_t state)
252 {
253 	int error = 0;
254 	ktime_t calltime, delta, rettime;
255 
256 	if (initcall_debug) {
257 		pr_info("calling  %s_i+ @ %i\n",
258 				dev_name(dev), task_pid_nr(current));
259 		calltime = ktime_get();
260 	}
261 
262 	switch (state.event) {
263 #ifdef CONFIG_SUSPEND
264 	case PM_EVENT_SUSPEND:
265 		if (ops->suspend_noirq) {
266 			error = ops->suspend_noirq(dev);
267 			suspend_report_result(ops->suspend_noirq, error);
268 		}
269 		break;
270 	case PM_EVENT_RESUME:
271 		if (ops->resume_noirq) {
272 			error = ops->resume_noirq(dev);
273 			suspend_report_result(ops->resume_noirq, error);
274 		}
275 		break;
276 #endif /* CONFIG_SUSPEND */
277 #ifdef CONFIG_HIBERNATION
278 	case PM_EVENT_FREEZE:
279 	case PM_EVENT_QUIESCE:
280 		if (ops->freeze_noirq) {
281 			error = ops->freeze_noirq(dev);
282 			suspend_report_result(ops->freeze_noirq, error);
283 		}
284 		break;
285 	case PM_EVENT_HIBERNATE:
286 		if (ops->poweroff_noirq) {
287 			error = ops->poweroff_noirq(dev);
288 			suspend_report_result(ops->poweroff_noirq, error);
289 		}
290 		break;
291 	case PM_EVENT_THAW:
292 	case PM_EVENT_RECOVER:
293 		if (ops->thaw_noirq) {
294 			error = ops->thaw_noirq(dev);
295 			suspend_report_result(ops->thaw_noirq, error);
296 		}
297 		break;
298 	case PM_EVENT_RESTORE:
299 		if (ops->restore_noirq) {
300 			error = ops->restore_noirq(dev);
301 			suspend_report_result(ops->restore_noirq, error);
302 		}
303 		break;
304 #endif /* CONFIG_HIBERNATION */
305 	default:
306 		error = -EINVAL;
307 	}
308 
309 	if (initcall_debug) {
310 		rettime = ktime_get();
311 		delta = ktime_sub(rettime, calltime);
312 		printk("initcall %s_i+ returned %d after %Ld usecs\n", dev_name(dev),
313 			error, (unsigned long long)ktime_to_ns(delta) >> 10);
314 	}
315 
316 	return error;
317 }
318 
319 static char *pm_verb(int event)
320 {
321 	switch (event) {
322 	case PM_EVENT_SUSPEND:
323 		return "suspend";
324 	case PM_EVENT_RESUME:
325 		return "resume";
326 	case PM_EVENT_FREEZE:
327 		return "freeze";
328 	case PM_EVENT_QUIESCE:
329 		return "quiesce";
330 	case PM_EVENT_HIBERNATE:
331 		return "hibernate";
332 	case PM_EVENT_THAW:
333 		return "thaw";
334 	case PM_EVENT_RESTORE:
335 		return "restore";
336 	case PM_EVENT_RECOVER:
337 		return "recover";
338 	default:
339 		return "(unknown PM event)";
340 	}
341 }
342 
343 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
344 {
345 	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
346 		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
347 		", may wakeup" : "");
348 }
349 
350 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
351 			int error)
352 {
353 	printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
354 		kobject_name(&dev->kobj), pm_verb(state.event), info, error);
355 }
356 
357 /*------------------------- Resume routines -------------------------*/
358 
359 /**
360  * device_resume_noirq - Execute an "early resume" callback for given device.
361  * @dev: Device to handle.
362  * @state: PM transition of the system being carried out.
363  *
364  * The driver of @dev will not receive interrupts while this function is being
365  * executed.
366  */
367 static int device_resume_noirq(struct device *dev, pm_message_t state)
368 {
369 	int error = 0;
370 
371 	TRACE_DEVICE(dev);
372 	TRACE_RESUME(0);
373 
374 	if (dev->bus && dev->bus->pm) {
375 		pm_dev_dbg(dev, state, "EARLY ");
376 		error = pm_noirq_op(dev, dev->bus->pm, state);
377 	}
378 
379 	TRACE_RESUME(error);
380 	return error;
381 }
382 
383 /**
384  * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices.
385  * @state: PM transition of the system being carried out.
386  *
387  * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and
388  * enable device drivers to receive interrupts.
389  */
390 void dpm_resume_noirq(pm_message_t state)
391 {
392 	struct device *dev;
393 
394 	mutex_lock(&dpm_list_mtx);
395 	transition_started = false;
396 	list_for_each_entry(dev, &dpm_list, power.entry)
397 		if (dev->power.status > DPM_OFF) {
398 			int error;
399 
400 			dev->power.status = DPM_OFF;
401 			error = device_resume_noirq(dev, state);
402 			if (error)
403 				pm_dev_err(dev, state, " early", error);
404 		}
405 	mutex_unlock(&dpm_list_mtx);
406 	resume_device_irqs();
407 }
408 EXPORT_SYMBOL_GPL(dpm_resume_noirq);
409 
410 /**
411  * device_resume - Execute "resume" callbacks for given device.
412  * @dev: Device to handle.
413  * @state: PM transition of the system being carried out.
414  */
415 static int device_resume(struct device *dev, pm_message_t state)
416 {
417 	int error = 0;
418 
419 	TRACE_DEVICE(dev);
420 	TRACE_RESUME(0);
421 
422 	down(&dev->sem);
423 
424 	if (dev->bus) {
425 		if (dev->bus->pm) {
426 			pm_dev_dbg(dev, state, "");
427 			error = pm_op(dev, dev->bus->pm, state);
428 		} else if (dev->bus->resume) {
429 			pm_dev_dbg(dev, state, "legacy ");
430 			error = dev->bus->resume(dev);
431 		}
432 		if (error)
433 			goto End;
434 	}
435 
436 	if (dev->type) {
437 		if (dev->type->pm) {
438 			pm_dev_dbg(dev, state, "type ");
439 			error = pm_op(dev, dev->type->pm, state);
440 		}
441 		if (error)
442 			goto End;
443 	}
444 
445 	if (dev->class) {
446 		if (dev->class->pm) {
447 			pm_dev_dbg(dev, state, "class ");
448 			error = pm_op(dev, dev->class->pm, state);
449 		} else if (dev->class->resume) {
450 			pm_dev_dbg(dev, state, "legacy class ");
451 			error = dev->class->resume(dev);
452 		}
453 	}
454  End:
455 	up(&dev->sem);
456 
457 	TRACE_RESUME(error);
458 	return error;
459 }
460 
461 /**
462  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
463  * @state: PM transition of the system being carried out.
464  *
465  * Execute the appropriate "resume" callback for all devices whose status
466  * indicates that they are suspended.
467  */
468 static void dpm_resume(pm_message_t state)
469 {
470 	struct list_head list;
471 
472 	INIT_LIST_HEAD(&list);
473 	mutex_lock(&dpm_list_mtx);
474 	while (!list_empty(&dpm_list)) {
475 		struct device *dev = to_device(dpm_list.next);
476 
477 		get_device(dev);
478 		if (dev->power.status >= DPM_OFF) {
479 			int error;
480 
481 			dev->power.status = DPM_RESUMING;
482 			mutex_unlock(&dpm_list_mtx);
483 
484 			error = device_resume(dev, state);
485 
486 			mutex_lock(&dpm_list_mtx);
487 			if (error)
488 				pm_dev_err(dev, state, "", error);
489 		} else if (dev->power.status == DPM_SUSPENDING) {
490 			/* Allow new children of the device to be registered */
491 			dev->power.status = DPM_RESUMING;
492 		}
493 		if (!list_empty(&dev->power.entry))
494 			list_move_tail(&dev->power.entry, &list);
495 		put_device(dev);
496 	}
497 	list_splice(&list, &dpm_list);
498 	mutex_unlock(&dpm_list_mtx);
499 }
500 
501 /**
502  * device_complete - Complete a PM transition for given device.
503  * @dev: Device to handle.
504  * @state: PM transition of the system being carried out.
505  */
506 static void device_complete(struct device *dev, pm_message_t state)
507 {
508 	down(&dev->sem);
509 
510 	if (dev->class && dev->class->pm && dev->class->pm->complete) {
511 		pm_dev_dbg(dev, state, "completing class ");
512 		dev->class->pm->complete(dev);
513 	}
514 
515 	if (dev->type && dev->type->pm && dev->type->pm->complete) {
516 		pm_dev_dbg(dev, state, "completing type ");
517 		dev->type->pm->complete(dev);
518 	}
519 
520 	if (dev->bus && dev->bus->pm && dev->bus->pm->complete) {
521 		pm_dev_dbg(dev, state, "completing ");
522 		dev->bus->pm->complete(dev);
523 	}
524 
525 	up(&dev->sem);
526 }
527 
528 /**
529  * dpm_complete - Complete a PM transition for all non-sysdev devices.
530  * @state: PM transition of the system being carried out.
531  *
532  * Execute the ->complete() callbacks for all devices whose PM status is not
533  * DPM_ON (this allows new devices to be registered).
534  */
535 static void dpm_complete(pm_message_t state)
536 {
537 	struct list_head list;
538 
539 	INIT_LIST_HEAD(&list);
540 	mutex_lock(&dpm_list_mtx);
541 	transition_started = false;
542 	while (!list_empty(&dpm_list)) {
543 		struct device *dev = to_device(dpm_list.prev);
544 
545 		get_device(dev);
546 		if (dev->power.status > DPM_ON) {
547 			dev->power.status = DPM_ON;
548 			mutex_unlock(&dpm_list_mtx);
549 
550 			device_complete(dev, state);
551 			pm_runtime_put_noidle(dev);
552 
553 			mutex_lock(&dpm_list_mtx);
554 		}
555 		if (!list_empty(&dev->power.entry))
556 			list_move(&dev->power.entry, &list);
557 		put_device(dev);
558 	}
559 	list_splice(&list, &dpm_list);
560 	mutex_unlock(&dpm_list_mtx);
561 }
562 
563 /**
564  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
565  * @state: PM transition of the system being carried out.
566  *
567  * Execute "resume" callbacks for all devices and complete the PM transition of
568  * the system.
569  */
570 void dpm_resume_end(pm_message_t state)
571 {
572 	might_sleep();
573 	dpm_resume(state);
574 	dpm_complete(state);
575 }
576 EXPORT_SYMBOL_GPL(dpm_resume_end);
577 
578 
579 /*------------------------- Suspend routines -------------------------*/
580 
581 /**
582  * resume_event - Return a "resume" message for given "suspend" sleep state.
583  * @sleep_state: PM message representing a sleep state.
584  *
585  * Return a PM message representing the resume event corresponding to given
586  * sleep state.
587  */
588 static pm_message_t resume_event(pm_message_t sleep_state)
589 {
590 	switch (sleep_state.event) {
591 	case PM_EVENT_SUSPEND:
592 		return PMSG_RESUME;
593 	case PM_EVENT_FREEZE:
594 	case PM_EVENT_QUIESCE:
595 		return PMSG_RECOVER;
596 	case PM_EVENT_HIBERNATE:
597 		return PMSG_RESTORE;
598 	}
599 	return PMSG_ON;
600 }
601 
602 /**
603  * device_suspend_noirq - Execute a "late suspend" callback for given device.
604  * @dev: Device to handle.
605  * @state: PM transition of the system being carried out.
606  *
607  * The driver of @dev will not receive interrupts while this function is being
608  * executed.
609  */
610 static int device_suspend_noirq(struct device *dev, pm_message_t state)
611 {
612 	int error = 0;
613 
614 	if (dev->bus && dev->bus->pm) {
615 		pm_dev_dbg(dev, state, "LATE ");
616 		error = pm_noirq_op(dev, dev->bus->pm, state);
617 	}
618 	return error;
619 }
620 
621 /**
622  * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices.
623  * @state: PM transition of the system being carried out.
624  *
625  * Prevent device drivers from receiving interrupts and call the "noirq" suspend
626  * handlers for all non-sysdev devices.
627  */
628 int dpm_suspend_noirq(pm_message_t state)
629 {
630 	struct device *dev;
631 	int error = 0;
632 
633 	suspend_device_irqs();
634 	mutex_lock(&dpm_list_mtx);
635 	list_for_each_entry_reverse(dev, &dpm_list, power.entry) {
636 		error = device_suspend_noirq(dev, state);
637 		if (error) {
638 			pm_dev_err(dev, state, " late", error);
639 			break;
640 		}
641 		dev->power.status = DPM_OFF_IRQ;
642 	}
643 	mutex_unlock(&dpm_list_mtx);
644 	if (error)
645 		dpm_resume_noirq(resume_event(state));
646 	return error;
647 }
648 EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
649 
650 /**
651  * device_suspend - Execute "suspend" callbacks for given device.
652  * @dev: Device to handle.
653  * @state: PM transition of the system being carried out.
654  */
655 static int device_suspend(struct device *dev, pm_message_t state)
656 {
657 	int error = 0;
658 
659 	down(&dev->sem);
660 
661 	if (dev->class) {
662 		if (dev->class->pm) {
663 			pm_dev_dbg(dev, state, "class ");
664 			error = pm_op(dev, dev->class->pm, state);
665 		} else if (dev->class->suspend) {
666 			pm_dev_dbg(dev, state, "legacy class ");
667 			error = dev->class->suspend(dev, state);
668 			suspend_report_result(dev->class->suspend, error);
669 		}
670 		if (error)
671 			goto End;
672 	}
673 
674 	if (dev->type) {
675 		if (dev->type->pm) {
676 			pm_dev_dbg(dev, state, "type ");
677 			error = pm_op(dev, dev->type->pm, state);
678 		}
679 		if (error)
680 			goto End;
681 	}
682 
683 	if (dev->bus) {
684 		if (dev->bus->pm) {
685 			pm_dev_dbg(dev, state, "");
686 			error = pm_op(dev, dev->bus->pm, state);
687 		} else if (dev->bus->suspend) {
688 			pm_dev_dbg(dev, state, "legacy ");
689 			error = dev->bus->suspend(dev, state);
690 			suspend_report_result(dev->bus->suspend, error);
691 		}
692 	}
693  End:
694 	up(&dev->sem);
695 
696 	return error;
697 }
698 
699 /**
700  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
701  * @state: PM transition of the system being carried out.
702  */
703 static int dpm_suspend(pm_message_t state)
704 {
705 	struct list_head list;
706 	int error = 0;
707 
708 	INIT_LIST_HEAD(&list);
709 	mutex_lock(&dpm_list_mtx);
710 	while (!list_empty(&dpm_list)) {
711 		struct device *dev = to_device(dpm_list.prev);
712 
713 		get_device(dev);
714 		mutex_unlock(&dpm_list_mtx);
715 
716 		error = device_suspend(dev, state);
717 
718 		mutex_lock(&dpm_list_mtx);
719 		if (error) {
720 			pm_dev_err(dev, state, "", error);
721 			put_device(dev);
722 			break;
723 		}
724 		dev->power.status = DPM_OFF;
725 		if (!list_empty(&dev->power.entry))
726 			list_move(&dev->power.entry, &list);
727 		put_device(dev);
728 	}
729 	list_splice(&list, dpm_list.prev);
730 	mutex_unlock(&dpm_list_mtx);
731 	return error;
732 }
733 
734 /**
735  * device_prepare - Prepare a device for system power transition.
736  * @dev: Device to handle.
737  * @state: PM transition of the system being carried out.
738  *
739  * Execute the ->prepare() callback(s) for given device.  No new children of the
740  * device may be registered after this function has returned.
741  */
742 static int device_prepare(struct device *dev, pm_message_t state)
743 {
744 	int error = 0;
745 
746 	down(&dev->sem);
747 
748 	if (dev->bus && dev->bus->pm && dev->bus->pm->prepare) {
749 		pm_dev_dbg(dev, state, "preparing ");
750 		error = dev->bus->pm->prepare(dev);
751 		suspend_report_result(dev->bus->pm->prepare, error);
752 		if (error)
753 			goto End;
754 	}
755 
756 	if (dev->type && dev->type->pm && dev->type->pm->prepare) {
757 		pm_dev_dbg(dev, state, "preparing type ");
758 		error = dev->type->pm->prepare(dev);
759 		suspend_report_result(dev->type->pm->prepare, error);
760 		if (error)
761 			goto End;
762 	}
763 
764 	if (dev->class && dev->class->pm && dev->class->pm->prepare) {
765 		pm_dev_dbg(dev, state, "preparing class ");
766 		error = dev->class->pm->prepare(dev);
767 		suspend_report_result(dev->class->pm->prepare, error);
768 	}
769  End:
770 	up(&dev->sem);
771 
772 	return error;
773 }
774 
775 /**
776  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
777  * @state: PM transition of the system being carried out.
778  *
779  * Execute the ->prepare() callback(s) for all devices.
780  */
781 static int dpm_prepare(pm_message_t state)
782 {
783 	struct list_head list;
784 	int error = 0;
785 
786 	INIT_LIST_HEAD(&list);
787 	mutex_lock(&dpm_list_mtx);
788 	transition_started = true;
789 	while (!list_empty(&dpm_list)) {
790 		struct device *dev = to_device(dpm_list.next);
791 
792 		get_device(dev);
793 		dev->power.status = DPM_PREPARING;
794 		mutex_unlock(&dpm_list_mtx);
795 
796 		pm_runtime_get_noresume(dev);
797 		if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) {
798 			/* Wake-up requested during system sleep transition. */
799 			pm_runtime_put_noidle(dev);
800 			error = -EBUSY;
801 		} else {
802 			error = device_prepare(dev, state);
803 		}
804 
805 		mutex_lock(&dpm_list_mtx);
806 		if (error) {
807 			dev->power.status = DPM_ON;
808 			if (error == -EAGAIN) {
809 				put_device(dev);
810 				error = 0;
811 				continue;
812 			}
813 			printk(KERN_ERR "PM: Failed to prepare device %s "
814 				"for power transition: error %d\n",
815 				kobject_name(&dev->kobj), error);
816 			put_device(dev);
817 			break;
818 		}
819 		dev->power.status = DPM_SUSPENDING;
820 		if (!list_empty(&dev->power.entry))
821 			list_move_tail(&dev->power.entry, &list);
822 		put_device(dev);
823 	}
824 	list_splice(&list, &dpm_list);
825 	mutex_unlock(&dpm_list_mtx);
826 	return error;
827 }
828 
829 /**
830  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
831  * @state: PM transition of the system being carried out.
832  *
833  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
834  * callbacks for them.
835  */
836 int dpm_suspend_start(pm_message_t state)
837 {
838 	int error;
839 
840 	might_sleep();
841 	error = dpm_prepare(state);
842 	if (!error)
843 		error = dpm_suspend(state);
844 	return error;
845 }
846 EXPORT_SYMBOL_GPL(dpm_suspend_start);
847 
848 void __suspend_report_result(const char *function, void *fn, int ret)
849 {
850 	if (ret)
851 		printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
852 }
853 EXPORT_SYMBOL_GPL(__suspend_report_result);
854